blob: df9a6493bcbbb6c52a9782407adf1188840ae326 [file] [log] [blame]
Eric Kunzee5e26762020-10-13 16:11:07 -07001#!/usr/bin/env python3
2
Kevin Cheng3a478572021-01-22 17:21:02 -08003# Copyright (c) 2020-2021, ARM Limited.
Eric Kunzee5e26762020-10-13 16:11:07 -07004#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17
18import numpy as np
19import argparse
20import sys
21import re
22import os
23import subprocess
24import shlex
25import json
26import glob
27import math
28import queue
29import threading
30import traceback
31import math
Jeremy Johnsona6185572021-06-21 15:55:35 +010032import itertools
Eric Kunzee5e26762020-10-13 16:11:07 -070033
34from enum import IntEnum, Enum, unique
35
Kevin Cheng550ccc52021-03-03 11:21:43 -080036# Include the ../thirdparty/serialization_lib/python directory in PYTHONPATH
37parent_dir = os.path.dirname(os.path.realpath(__file__))
38sys.path.append(
39 os.path.join(parent_dir, "..", "thirdparty", "serialization_lib", "python")
40)
Eric Kunzee5e26762020-10-13 16:11:07 -070041import tosa_serializer as ts
42from tosa_serializer import *
43import tosa
44
45# Convenience variables to the flatc-generated types that should be enums, but aren't
46DType = tosa.DType.DType()
Kevin Cheng550ccc52021-03-03 11:21:43 -080047Op = tosa.Op.Op()
Eric Kunzee5e26762020-10-13 16:11:07 -070048ResizeMode = tosa.ResizeMode.ResizeMode()
49
Kevin Cheng550ccc52021-03-03 11:21:43 -080050
Eric Kunzee5e26762020-10-13 16:11:07 -070051class TosaQuantGen:
Kevin Cheng550ccc52021-03-03 11:21:43 -080052 """QuantizedInfo random generator helper functions. Specify with 'qgen': in the operator defintion"""
53
Eric Kunzee5e26762020-10-13 16:11:07 -070054 def __init__(self):
55 pass
56
57 @staticmethod
58 def needsQinfo(op, dtype):
Jared Smolens2a76ad22021-03-04 11:18:54 -080059 if dtype == DType.INT8 or dtype == DType.INT16:
Eric Kunzee5e26762020-10-13 16:11:07 -070060 return True
61 return False
62
63 @staticmethod
64 def qgUnary(testGen, op, dtype):
65 qinfo = ts.TosaSerializerQuantInfo()
66 if TosaQuantGen.needsQinfo(op, dtype):
67 qinfo.UnaryQuantInfo(testGen.randInt(), testGen.randInt())
68 else:
69 qinfo.UnaryQuantInfo(0, 0)
70 return qinfo
71
72 @staticmethod
73 def qgConv(testGen, op, dtype):
74 qinfo = ts.TosaSerializerQuantInfo()
75 if TosaQuantGen.needsQinfo(op, dtype):
76 qinfo.ConvQuantInfo(testGen.randInt(), testGen.randInt())
77 else:
78 qinfo.ConvQuantInfo(0, 0)
79 return qinfo
80
81 @staticmethod
82 def qgMatmul(testGen, op, dtype):
83 qinfo = ts.TosaSerializerQuantInfo()
84 if TosaQuantGen.needsQinfo(op, dtype):
85 qinfo.MatMulQuantInfo(testGen.randInt(), testGen.randInt())
86 else:
87 qinfo.MatMulQuantInfo(0, 0)
88 return qinfo
89
90 @staticmethod
91 def qgPad(testGen, op, dtype):
92 qinfo = ts.TosaSerializerQuantInfo()
93 if TosaQuantGen.needsQinfo(op, dtype):
94 qinfo.PadQuantInfo(testGen.randInt())
95 else:
96 qinfo.PadQuantInfo(0)
97 return qinfo
98
99 @staticmethod
100 def computeMultiplierAndShift(scaleFp, scale32):
101 # Derived from computeMultiplierAndShiftTosaScale32
102 # Provide a floating-point scaling factor and the scale32 parameter
103 # to compute the multiplier and shift
104
105 if scale32:
106 scaleBits = 31
107 else:
108 scaleBits = 15
109
110 m, shift = math.frexp(scaleFp)
111
112 if scaleFp < 0.0:
113 m = -m
114
115 multiplier = round(m * (1 << scaleBits))
Kevin Cheng550ccc52021-03-03 11:21:43 -0800116 assert multiplier <= (1 << scaleBits)
Eric Kunzee5e26762020-10-13 16:11:07 -0700117
118 if multiplier == (1 << scaleBits):
119 multiplier = multiplier // 2
120 shift = shift + 1
121
122 shift = (-shift) + scaleBits
Kevin Cheng550ccc52021-03-03 11:21:43 -0800123 # print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(scaleFp, scaleBits, m, multiplier, shift))
Eric Kunzee5e26762020-10-13 16:11:07 -0700124
Kevin Cheng550ccc52021-03-03 11:21:43 -0800125 assert multiplier <= (1 << scaleBits)
126 assert shift >= 0 and shift <= 63
Eric Kunzee5e26762020-10-13 16:11:07 -0700127
128 return multiplier, shift
129
130
Kevin Cheng550ccc52021-03-03 11:21:43 -0800131class TosaTensorGen:
132 """Tensor generators create a shape list for the placeholder and const tensor
133 data operands for the operator. The actual random data is generated separately for each test."""
134
Eric Kunzee5e26762020-10-13 16:11:07 -0700135 def __init__(self):
136 pass
137
138 @staticmethod
139 def tgBasic(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800140 pl, const = opName["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700141 shape = testGen.makeShape(rank)
142
143 shape_list = []
144 for i in range(pl + const):
145 shape_list.append(shape.copy())
146
147 return shape_list
148
149 @staticmethod
150 def tgNHWC(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800151 pl, const = opName["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700152
Kevin Cheng550ccc52021-03-03 11:21:43 -0800153 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700154
155 shape = testGen.makeShape(rank)
156
157 # Constrict the batch size?
158 if testGen.args.max_batch_size:
159 shape[0] = (shape[0] % testGen.args.max_batch_size) + 1
160
161 shape_list = []
162 for i in range(pl + const):
163 shape_list.append(shape.copy())
164
165 return shape_list
166
167 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -0800168 def tgScatter(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800169 pl, const = opName["operands"]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800170
Kevin Cheng550ccc52021-03-03 11:21:43 -0800171 assert pl == 2
172 assert const == 0
173 assert rank == 3
Kevin Cheng77d0f762020-11-24 10:26:32 -0800174
175 values_in_shape = testGen.makeShape(rank)
176
177 # Constrict the batch size?
178 if testGen.args.max_batch_size:
179 values_in_shape[0] = (values_in_shape[0] % testGen.args.max_batch_size) + 1
180
Kevin Cheng550ccc52021-03-03 11:21:43 -0800181 W = testGen.randInt(
182 testGen.args.tensor_shape_range[0], testGen.args.tensor_shape_range[1]
183 )
Kevin Cheng77d0f762020-11-24 10:26:32 -0800184 input_shape = [values_in_shape[0], W, values_in_shape[2]]
185
186 shape_list = []
187 shape_list.append(values_in_shape.copy())
188 shape_list.append(input_shape.copy())
189
190 return shape_list
191
192 @staticmethod
Eric Kunzee5e26762020-10-13 16:11:07 -0700193 def tgBroadcastFuzz(testGen, op, rank):
194 shape = testGen.makeShape(rank)
195
Kevin Cheng550ccc52021-03-03 11:21:43 -0800196 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700197
198 shape_list = []
199
200 # Choose one of the inputs to broadcast
201 bcast_idx = testGen.randInt(0, pl + const)
202 for i in range(pl + const):
203 shape_bcast = shape.copy()
204
205 # If the chosen input, pick a random index to broadcast
206 if i == bcast_idx:
207 fuzz_idx = testGen.randInt(0, rank)
208 shape_bcast[fuzz_idx] = 1
209
210 shape_list.append(shape_bcast)
211
212 return shape_list
213
214 @staticmethod
215 def tgConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800216 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700217
Kevin Cheng550ccc52021-03-03 11:21:43 -0800218 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700219
220 # IFM dimensions are NHWC
221 ifm_shape = testGen.makeShape(rank)
222
223 # Constrict the batch size?
224 if testGen.args.max_batch_size:
225 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
226
227 # Get the filter height/width from the operator parameters
Kevin Cheng550ccc52021-03-03 11:21:43 -0800228 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700229
230 # Generate a random OFM depth
231 ofm_depth = testGen.makeShape(1)[0]
232
233 # The filter dimensions are OHWI
234 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
235
236 # The bias is OC
237 bias_shape = np.asarray([ofm_depth])
238
239 return [ifm_shape, filter_shape, bias_shape]
240
241 @staticmethod
242 def tgTransposeConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800243 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700244
Kevin Cheng550ccc52021-03-03 11:21:43 -0800245 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700246
247 # IFM dimensions are NHWC
248 ifm_shape = testGen.makeShape(rank)
249
250 # Constrict the batch size?
251 if testGen.args.max_batch_size:
252 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
253
254 # Get the filter height/width from the operator parameters
Kevin Cheng550ccc52021-03-03 11:21:43 -0800255 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700256
257 # Generate a random OFM depth
258 ofm_depth = testGen.makeShape(1)[0]
259
260 # The filter dimensions are OHWI
261 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
262
Kevin Cheng989cb052021-04-28 16:29:44 -0700263 # The bias is OC
264 bias_shape = np.asarray([ofm_depth])
265
266 return [ifm_shape, filter_shape, bias_shape]
Eric Kunzee5e26762020-10-13 16:11:07 -0700267
268 @staticmethod
269 def tgDepthwiseConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800270 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700271
Kevin Cheng550ccc52021-03-03 11:21:43 -0800272 assert rank == 4
273 assert pl == 1 and const == 2
Eric Kunzee5e26762020-10-13 16:11:07 -0700274
275 # IFM dimensions are NHWC
276 ifm_shape = testGen.makeShape(rank)
277
278 # Constrict the batch size?
279 if testGen.args.max_batch_size:
280 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
281
282 # Get the filter height/width from the operator parameters
283 # Filter is KH, HW, C, M
Kevin Cheng550ccc52021-03-03 11:21:43 -0800284 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700285
286 # Generate a random OFM depth, but don't let it get too big because
287 # the output depth is M * C
Kevin Cheng550ccc52021-03-03 11:21:43 -0800288 filter_m = (
289 testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)
290 ) + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700291
292 # The filter dimensions are HWCM
293 filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m])
294
295 # The bias is M * C
296 bias_shape = np.asarray([ifm_shape[3] * filter_m])
297
298 return [ifm_shape, filter_shape, bias_shape]
299
300 @staticmethod
301 def tgFullyConnected(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800302 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700303
Kevin Cheng550ccc52021-03-03 11:21:43 -0800304 assert rank == 2
Eric Kunzee5e26762020-10-13 16:11:07 -0700305
306 input_shape = testGen.makeShape(rank)
307 filter_oc = testGen.makeShape(1)[0]
308 filter_shape = np.asarray([filter_oc, input_shape[1]])
309
310 bias_shape = np.asarray([filter_oc])
311
312 return [input_shape, filter_shape, bias_shape]
313
314 @staticmethod
315 def tgMatmul(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800316 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700317
Kevin Cheng2d60f002021-06-09 14:18:32 -0700318 assert rank == 3
Kevin Cheng550ccc52021-03-03 11:21:43 -0800319 assert pl == 2 and const == 0
Eric Kunzee5e26762020-10-13 16:11:07 -0700320
321 a_shape = testGen.makeShape(rank)
322 b_oc = testGen.makeShape(1)[0]
Kevin Cheng2d60f002021-06-09 14:18:32 -0700323 b_shape = np.asarray([a_shape[0], a_shape[2], b_oc])
Eric Kunzee5e26762020-10-13 16:11:07 -0700324
325 return [a_shape, b_shape]
326
Kevin Cheng550ccc52021-03-03 11:21:43 -0800327
Eric Kunzee5e26762020-10-13 16:11:07 -0700328class TosaArgGen:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800329 """Argument generators create exhaustive or random lists of attributes for operators that take
330 attributes or other parameters. The return value is a list of (descriptive_name, [arglist])
331 tuples where the descriptive_name is appended to the test name and the arglist is expanded
332 as arguments to the operator build function."""
333
Eric Kunzee5e26762020-10-13 16:11:07 -0700334 def __init__(self):
335 pass
336
337 @staticmethod
338 def agNone(testGen, opName, shapeList, dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800339 """A trivial argument generator for operators that don't take any
340 non-tensor arguments"""
341 return [("", [])]
Eric Kunzee5e26762020-10-13 16:11:07 -0700342
343 @staticmethod
344 def agAxis(testGen, opName, shapeList, dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800345 """Build the axis argument for operators that take a single axis"""
Eric Kunzee5e26762020-10-13 16:11:07 -0700346 axes = []
347
348 shape = shapeList[0]
349
350 for a in range(0, len(shape)):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800351 axes.append(("axis_{}".format(a), [a]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700352 return axes
353
354 @staticmethod
355 def agConv2D(testGen, opName, shapeList, dtype):
356 arg_list = []
357
358 ifm_shape = shapeList[0]
359 filter_shape = shapeList[1]
360
361 # Must be rank 4
Kevin Cheng550ccc52021-03-03 11:21:43 -0800362 assert len(ifm_shape) == 4
363 assert len(filter_shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700364
365 maxStride = testGen.args.max_conv_stride
366 maxPadding = testGen.args.max_conv_padding + 1
367 maxDilation = testGen.args.max_conv_dilation
368
369 # Strides, padding, dilations
370 for stride in range(0, maxStride ** 2):
371 for padding in range(0, (maxPadding) ** 4):
372 for dilation in range(0, maxDilation ** 2):
373
Kevin Cheng550ccc52021-03-03 11:21:43 -0800374 s = [stride // maxStride + 1, stride % maxStride + 1]
375 p = [
376 (padding // (maxPadding * 4)) % maxPadding,
377 (padding // (maxPadding * 2)) % maxPadding,
378 (padding // (maxPadding * 1)) % maxPadding,
379 padding % maxPadding,
380 ]
381 d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
Eric Kunzee5e26762020-10-13 16:11:07 -0700382
383 # 4 padding parameters for regular conv2d
Kevin Cheng550ccc52021-03-03 11:21:43 -0800384 arg_list.append(
385 (
386 "st{}{}_pad{}{}{}{}_dilat{}{}".format(
387 s[0], s[1], p[0], p[1], p[2], p[3], d[0], d[1]
388 ),
389 [s, p, d],
390 )
391 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700392 return arg_list
393
394 @staticmethod
395 def agTransposeConv2D(testGen, opName, shapeList, dtype):
396 arg_list = []
397
398 ifm_shape = shapeList[0]
399 filter_shape = shapeList[1]
400
401 # Must be rank 4
Kevin Cheng550ccc52021-03-03 11:21:43 -0800402 assert len(ifm_shape) == 4
403 assert len(filter_shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700404
405 maxStride = testGen.args.max_conv_stride
406 maxPadding = testGen.args.max_conv_padding + 1
407 maxDilation = testGen.args.max_conv_dilation
408
409 # Strides, padding, dilations
410 for stride in range(0, maxStride ** 2):
411 for out_padding in range(0, (maxPadding) ** 2):
412 for dilation in range(0, maxDilation ** 2):
413
Kevin Cheng550ccc52021-03-03 11:21:43 -0800414 s = [stride // maxStride + 1, stride % maxStride + 1]
415 p = [
416 (out_padding // (maxPadding * 1)) % maxPadding,
417 out_padding % maxPadding,
418 ]
419 d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
Eric Kunzee5e26762020-10-13 16:11:07 -0700420
Kevin Cheng550ccc52021-03-03 11:21:43 -0800421 oh = (
422 ifm_shape[1]
423 - filter_shape[1]
424 - (filter_shape[1] - 1) * (d[0] - 1)
425 + 2 * p[0]
426 ) // s[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700427
Kevin Cheng550ccc52021-03-03 11:21:43 -0800428 ow = (
429 ifm_shape[2]
430 - filter_shape[2]
431 - (filter_shape[2] - 1) * (d[1] - 1)
432 + 2 * p[1]
433 ) // s[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700434
435 # Output shape
Kevin Cheng550ccc52021-03-03 11:21:43 -0800436 os = [ifm_shape[0], oh, ow, filter_shape[0]]
Eric Kunzee5e26762020-10-13 16:11:07 -0700437
Kevin Cheng550ccc52021-03-03 11:21:43 -0800438 arg_list.append(
439 (
440 "st{}{}_outpad{}{}_dilat{}{}_os{}x{}x{}x{}".format(
441 s[0],
442 s[1],
443 p[0],
444 p[1],
445 d[0],
446 d[1],
447 os[0],
448 os[1],
449 os[2],
450 os[3],
451 ),
452 [s, p, d, os],
453 )
454 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700455
456 return arg_list
457
458 @staticmethod
459 def agPad(testGen, opName, shapeList, dtype):
460 arg_list = []
461 rank = len(shapeList[0])
462
463 # Exhaustively test combinations of 0/1 padding on each side of each dimension
464 # This process might need some revision for >1 padding, but use rank**2 as a bitmask
465 # for now
466 for v in range(rank ** 2):
467
468 # Create a flat arraypadding4D
469 paddings = np.zeros((rank * 2), dtype=np.int32)
470
471 # Fill in the 1's
Kevin Cheng550ccc52021-03-03 11:21:43 -0800472 for r in range(rank * 2):
Eric Kunzee5e26762020-10-13 16:11:07 -0700473 if (v >> r) & 1:
474 paddings[r] = 1
475
476 # Reshape back to a 2D array
477 paddings = paddings.reshape((rank, 2))
478
Kevin Cheng550ccc52021-03-03 11:21:43 -0800479 arg_list.append(("pad{0:b}".format(v), [paddings]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700480
481 return arg_list
482
483 @staticmethod
484 def agPooling(testGen, opName, shapeList, dtype):
485 arg_list = []
486
487 shape = shapeList[0]
Kevin Cheng550ccc52021-03-03 11:21:43 -0800488 assert len(shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700489
490 maxStride = testGen.args.max_pooling_stride
491 maxKernel = testGen.args.max_pooling_kernel
492 maxPadding = testGen.args.max_pooling_padding + 1
493
494 for kernel in range(0, maxKernel ** 2):
495 for stride in range(0, maxStride ** 2):
496 for padding in range(0, maxPadding ** 4):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800497 s = [stride // maxStride + 1, stride % maxStride + 1]
498 k = [(kernel // maxKernel) + 2, (kernel % maxKernel) + 2]
499 p = [
500 (padding // (maxPadding * 4)) % maxPadding,
501 (padding // (maxPadding * 2)) % maxPadding,
502 (padding // (maxPadding * 1)) % maxPadding,
503 padding % maxPadding,
504 ]
Eric Kunzee5e26762020-10-13 16:11:07 -0700505
Kevin Cheng550ccc52021-03-03 11:21:43 -0800506 arg_list.append(
507 (
508 "st{}{}_kern{}{}_pad{}{}{}{}".format(
509 s[0], s[1], k[0], k[1], p[0], p[1], p[2], p[3]
510 ),
511 [k, s, p],
512 )
513 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700514 return arg_list
515
516 @staticmethod
517 def agCast(testGen, opName, shapeList, inDtype):
518 arg_list = []
519
520 # Enumerate the output types here
521 if inDtype == DType.INT8:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800522 dtypeList = [DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700523 elif inDtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800524 dtypeList = [DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700525 elif inDtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800526 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700527 elif inDtype == DType.BOOL:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800528 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700529 elif inDtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800530 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700531 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800532 raise Exception("Unexpected input dtype: {}".format(inDtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700533
534 for dtype in dtypeList:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800535 arg_list.append(("out{}".format(DTypeNames[dtype]), [dtype]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700536
537 return arg_list
538
539 @staticmethod
540 def agRescale(testGen, opName, shapeList, inDtype):
541 arg_list = []
542
543 # Enumerate the output types here
Kevin Cheng550ccc52021-03-03 11:21:43 -0800544 for dtype in [DType.INT8, DType.INT16, DType.INT32]:
545 for scale32 in [False, True]:
546 for double_round in [False, True]:
547 for per_channel in [False, True]:
Eric Kunzee5e26762020-10-13 16:11:07 -0700548
549 if inDtype == DType.INT48 and scale32:
550 # Illegal condition. Must be scale32=False
551 continue
552
Kevin Cheng550ccc52021-03-03 11:21:43 -0800553 arg_list.append(
554 (
555 "out{}_sc{}_dr{}_pc{}".format(
556 DTypeNames[dtype],
557 int(scale32),
558 int(double_round),
559 int(per_channel),
560 ),
561 [dtype, scale32, double_round, per_channel],
562 )
563 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700564
565 return arg_list
566
Kevin Chengaee1fac2020-11-11 13:54:06 -0800567 @staticmethod
568 def agMul(testGen, opName, shapeList, dtype):
569 arg_list = []
570
571 if dtype is DType.INT32:
572 for p in range(testGen.args.num_rand_permutations):
573
574 shift = testGen.randInt(0, 32)
575
Kevin Cheng550ccc52021-03-03 11:21:43 -0800576 arg_list.append(("perm{}_shift{}".format(p, shift), [shift]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800577 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800578 arg_list.append(("shift0", [0]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800579
580 return arg_list
581
582 @staticmethod
583 def agArithmeticRightShift(testGen, opName, shapeList, dtype):
584 arg_list = []
585
Kevin Cheng550ccc52021-03-03 11:21:43 -0800586 arg_list.append(("roundTrue", [True]))
587 arg_list.append(("roundFalse", [False]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800588
589 return arg_list
590
Eric Kunzee5e26762020-10-13 16:11:07 -0700591 # Helper function for reshape. Gets some factors of a larger number.
592 @staticmethod
593 def getFactors(val, start=1):
594 factors = []
595
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100596 for i in range(start, int(np.sqrt(val)) + 1):
Eric Kunzee5e26762020-10-13 16:11:07 -0700597 if (val % i) == 0:
598 factors.append(i)
599
600 return factors
601
602 @staticmethod
603 def agReshape(testGen, opName, shapeList, dtype):
604 arg_list = []
605
606 origShape = shapeList[0]
607
608 totalElements = 1
609 for s in origShape:
610 totalElements *= s
611
612 # This code is NOT fast. Fortunately, the numbers are fairly small.
613 factors = TosaArgGen.getFactors(totalElements)
614
615 for p in range(testGen.args.num_rand_permutations):
Matthew Haddon5fc4e682021-07-07 11:28:29 +0100616 newRank = testGen.randInt(1, 7)
Kevin Cheng550ccc52021-03-03 11:21:43 -0800617 if len(factors) < newRank:
Eric Kunzee5e26762020-10-13 16:11:07 -0700618 continue
619
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100620 found = True
621 # escape_counter breaks while loop if it continues on for too long
622 escape_counter = 0
623 while found:
624 newShape = []
625 # Generate newShape ensuring it isn't a duplicate
626 remainingElements = totalElements
627 shuffledFactors = testGen.rng.permutation(factors)
Matthew Haddon5fc4e682021-07-07 11:28:29 +0100628 for i in range(1, newRank):
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100629 # pick rank-1 factors
630 newShape.append(shuffledFactors[0])
631 remainingElements = remainingElements // shuffledFactors[0]
632 shuffledFactors = testGen.rng.permutation(
633 TosaArgGen.getFactors(remainingElements)
634 )
635 newShape.append(remainingElements)
Eric Kunzee5e26762020-10-13 16:11:07 -0700636
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100637 # Toss in a -1 sometimes
638 minusOne = testGen.randInt(0, newRank * 4)
639 if minusOne < newRank:
640 newShape[minusOne] = -1
Eric Kunzee5e26762020-10-13 16:11:07 -0700641
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100642 # Check for duplicates
643 found = False
644 for name, other_shape in arg_list:
645 if other_shape[0] == newShape:
646 found = True
647 break
648
649 escape_counter += 1
650 if escape_counter >= 100:
651 break
652
653 if not found:
654 arg_list.append(("perm{}_rank{}".format(p, newRank), [newShape]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700655
656 return arg_list
657
Eric Kunzee5e26762020-10-13 16:11:07 -0700658 @staticmethod
659 def agTranspose(testGen, opName, shapeList, dtype):
660 arg_list = []
661
662 ifm_shape = shapeList[0]
663
Jeremy Johnsona6185572021-06-21 15:55:35 +0100664 # Get all permutations
665 permutations = [p for p in itertools.permutations(range(len(ifm_shape)))]
Eric Kunzee5e26762020-10-13 16:11:07 -0700666
Jeremy Johnsona6185572021-06-21 15:55:35 +0100667 # Limit to possible permutations from shape dimension or argument setting
668 limit = min(len(permutations), testGen.args.num_rand_permutations)
Eric Kunzee5e26762020-10-13 16:11:07 -0700669
Jeremy Johnsona6185572021-06-21 15:55:35 +0100670 # Get random permutation generator that uses all permutations
671 random_permutations = testGen.rng.permutation(permutations)
Eric Kunzee5e26762020-10-13 16:11:07 -0700672
Jeremy Johnsona6185572021-06-21 15:55:35 +0100673 # Create list of required amount of permutations
674 arg_list = [("perm{}".format(p), [random_permutations[p].tolist()]) for p in range(limit)]
Eric Kunzee5e26762020-10-13 16:11:07 -0700675 return arg_list
676
677 @staticmethod
678 def agSlice(testGen, opName, shapeList, dtype):
679 arg_list = []
680
681 ifm_shape = shapeList[0]
682 rank = len(ifm_shape)
683
684 for p in range(testGen.args.num_rand_permutations):
685 begin = []
686 size = []
687
Kevin Cheng550ccc52021-03-03 11:21:43 -0800688 valid = True
Eric Kunzee5e26762020-10-13 16:11:07 -0700689
690 for i in range(rank):
691 if ifm_shape[i] > 1:
692 begin.append(testGen.randInt(0, ifm_shape[i]))
693 size.append(testGen.randInt(0, ifm_shape[i] - begin[i]))
694
695 # Invalid slice size?
696 if size[i] == 0:
697 valid = False
698 else:
699 begin.append(0)
700 size.append(1)
701
702 if valid:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800703 arg_list.append(("perm{}".format(p), [begin, size]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700704 return arg_list
705
706 @staticmethod
707 def agTile(testGen, opName, shapeList, dtype):
708 arg_list = []
709
710 ifm_shape = shapeList[0]
711 rank = len(ifm_shape)
712
713 for p in range(testGen.args.num_rand_permutations):
714
715 # Pick a few random, but small multiple values
716 # because otherwise this has a tendency to generate
717 # enormous tensors
718 multiples = []
719 for i in range(rank):
720 multiples.append(testGen.randInt(1, 4))
721
Kevin Cheng550ccc52021-03-03 11:21:43 -0800722 arg_list.append(("perm{}".format(p), [multiples]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700723
724 return arg_list
725
726 @staticmethod
727 def agResize(testGen, opName, shapeList, dtype):
728 arg_list = []
729
730 ifm_shape = shapeList[0]
731
732 for m in [ResizeMode.NEAREST, ResizeMode.BILINEAR]:
733
734 # Exclude illegal {mode, type} configurations. Pick legal output types
735 if m == ResizeMode.NEAREST and dtype == DType.INT8:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800736 outputDTypeList = [DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700737 elif m == ResizeMode.NEAREST and dtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800738 outputDTypeList = [DType.INT16]
Eric Kunzee5e26762020-10-13 16:11:07 -0700739 elif m == ResizeMode.BILINEAR and dtype == DType.INT8:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800740 outputDTypeList = [DType.INT8]
Eric Kunzee5e26762020-10-13 16:11:07 -0700741 elif m == ResizeMode.BILINEAR and dtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800742 outputDTypeList = [DType.INT48]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800743 elif dtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800744 outputDTypeList = [DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700745 else:
746 continue
747
748 for outputDType in outputDTypeList:
749 for perm in range(testGen.args.num_rand_permutations):
750
751 # Randomly generate legal output dimensions and shift
752 # and then compute the stride and offset based on them
Kevin Cheng550ccc52021-03-03 11:21:43 -0800753 output_dims = [testGen.randInt(1), testGen.randInt(1)]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800754 in_center_h = (ifm_shape[1] - 1) / 2.0
755 in_center_w = (ifm_shape[2] - 1) / 2.0
756 out_center_h = (output_dims[0] - 1) / 2.0
757 out_center_w = (output_dims[1] - 1) / 2.0
Eric Kunzee5e26762020-10-13 16:11:07 -0700758
Kevin Cheng77d0f762020-11-24 10:26:32 -0800759 fp_stride_y = float(ifm_shape[1]) / float(output_dims[0])
760 fp_stride_x = float(ifm_shape[2]) / float(output_dims[1])
761 fp_offset_y = in_center_h - fp_stride_y * out_center_h
762 fp_offset_x = in_center_w - fp_stride_x * out_center_w
Eric Kunzee5e26762020-10-13 16:11:07 -0700763
Kevin Cheng77d0f762020-11-24 10:26:32 -0800764 if outputDType == DType.FLOAT:
765 shift = 0
766 stride = [0, 0]
767 offset = [0, 0]
Kevin Cheng550ccc52021-03-03 11:21:43 -0800768 stride_fp = [fp_stride_y, fp_stride_x]
769 offset_fp = [fp_offset_y, fp_offset_x]
770 arg_list.append(
771 (
772 "mode{}_odim{}x{}_out{}_st{:.2f}x{:.2f}_off{:.2f}x{:.2f}".format(
773 m,
774 output_dims[0],
775 output_dims[1],
776 testGen.typeStr(outputDType),
777 stride_fp[0],
778 stride_fp[1],
779 offset_fp[0],
780 offset_fp[1],
781 ),
782 [
783 m,
784 stride,
785 offset,
786 shift,
787 stride_fp,
788 offset_fp,
789 output_dims,
790 dtype,
791 outputDType,
792 ],
793 )
794 )
Kevin Cheng77d0f762020-11-24 10:26:32 -0800795 else:
796 shift = 11
797 unit = float(1 << shift)
798 stride_y = int(round(fp_stride_y * unit))
799 stride_x = int(round(fp_stride_x * unit))
800 offset_y = int(round(fp_offset_y * unit))
801 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700802
Kevin Cheng550ccc52021-03-03 11:21:43 -0800803 while (
804 stride_y >= 32768
805 or stride_x >= 32768
806 or offset_y >= 32768
807 or offset_x >= 32768
808 or offset_y < -32768
809 or offset_x < -32768
810 ):
Kevin Cheng77d0f762020-11-24 10:26:32 -0800811 shift = shift - 1
812 unit = float(1 << shift)
813 stride_y = int(round(fp_stride_y * unit))
814 stride_x = int(round(fp_stride_x * unit))
815 offset_y = int(round(fp_offset_y * unit))
816 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700817
Kevin Cheng550ccc52021-03-03 11:21:43 -0800818 stride = [stride_y, stride_x]
819 offset = [offset_y, offset_x]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800820
821 stride_fp = [0.0, 0.0]
822 offset_fp = [0.0, 0.0]
823
Kevin Cheng550ccc52021-03-03 11:21:43 -0800824 arg_list.append(
825 (
826 "mode{}_shift{}_odim{}x{}_out{}_st{}x{}_off{}x{}".format(
827 m,
828 shift,
829 output_dims[0],
830 output_dims[1],
831 testGen.typeStr(outputDType),
832 stride[0],
833 stride[1],
834 offset[0],
835 offset[1],
836 ),
837 [
838 m,
839 stride,
840 offset,
841 shift,
842 stride_fp,
843 offset_fp,
844 output_dims,
845 dtype,
846 outputDType,
847 ],
848 )
849 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700850
851 return arg_list
852
853 def agCondIf(testGen, opName, shapeList, dtype):
854 # CondIf generates the condition values here.
855 # Convert to tensors in the build function, along with the
856 # then and else blocks
857 arg_list = []
858
859 for c in [False, True]:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800860 arg_list.append(("cond{}".format(int(c)), [c]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700861
862 return arg_list
863
864 def agWhileLoop(testGen, opName, shapeList, dtype):
865 # While loop: 0 iterations, 1, more than 1
866 arg_list = []
867
868 for iter in [0, 1, 4]:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800869 arg_list.append(("iter{}".format(iter), [iter]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700870
871 return arg_list
872
Kevin Cheng550ccc52021-03-03 11:21:43 -0800873
Eric Kunzee5e26762020-10-13 16:11:07 -0700874class TosaTestGen:
875 def __init__(self, args):
876 self.args = args
877 self.basePath = args.output_dir
878 self.random_seed = args.random_seed
879 self.ser = None
880 self.rng = np.random.default_rng(self.random_seed)
881 self.createDynamicOpLists()
882 self.initOpListDefaults()
883 self.quantGen = TosaQuantGen()
884 # Force makeShape to do a specific starting shape
885 self.targetted_shape = None
886
887 def createSerializer(self, opName, testPath):
888 self.testPath = os.path.join(opName, testPath)
889
890 fullPath = os.path.join(self.basePath, self.testPath)
891 os.makedirs(fullPath, exist_ok=True)
892 self.ser = ts.TosaSerializer(fullPath)
893
894 def getSerializer(self):
895 return self.ser
896
897 def serialize(self, testName):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800898 with open(
899 os.path.join(self.basePath, self.testPath, "{}.tosa".format(testName)), "wb"
900 ) as fd:
Eric Kunzee5e26762020-10-13 16:11:07 -0700901 fd.write(self.ser.serialize())
902
Kevin Cheng550ccc52021-03-03 11:21:43 -0800903 with open(os.path.join(self.basePath, self.testPath, "desc.json"), "w") as fd:
904 fd.write(self.ser.writeJson("{}.tosa".format(testName)))
Eric Kunzee5e26762020-10-13 16:11:07 -0700905
906 def getRandTensor(self, shape, dtype):
907 RAND_SHIFT_FACTOR = 0.5
908 RAND_SCALE_FACTOR = 4.0
909
910 if dtype == DType.BOOL:
911 np_dt = np.bool
912 return np.bool_(self.rng.choice(a=[False, True], size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -0700913 elif dtype == DType.INT4:
914 return np.int32(self.rng.integers(low=-7, high=8, size=shape))
915 elif dtype == DType.INT8:
916 return np.int32(self.rng.integers(low=-127, high=128, size=shape))
917 elif dtype == DType.INT16:
918 return np.int32(self.rng.integers(low=-32768, high=32768, size=shape))
919 elif dtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800920 return np.int32(
921 self.rng.integers(low=-(1 << 31), high=(1 << 31), size=shape)
922 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700923 elif dtype == DType.INT48:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800924 return np.int64(
925 self.rng.integers(low=-(1 << 47), high=(1 << 47), size=shape)
926 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700927 elif dtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800928 return np.float32(
929 self.rng.random(size=shape) - RAND_SHIFT_FACTOR * RAND_SCALE_FACTOR
930 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700931 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800932 raise Exception("Unrecognized Dtype: {}".format(dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700933
Kevin Cheng989cb052021-04-28 16:29:44 -0700934 def buildPlaceholderTensors(self, shape_list, dtype_list):
Eric Kunzee5e26762020-10-13 16:11:07 -0700935 placeholders = []
936
Kevin Cheng989cb052021-04-28 16:29:44 -0700937 assert len(shape_list) == len(dtype_list)
938
939 for idx, shape in enumerate(shape_list):
940 arr = self.getRandTensor(shape, dtype_list[idx])
941 placeholders.append(self.ser.addPlaceholder(shape, dtype_list[idx], arr))
Eric Kunzee5e26762020-10-13 16:11:07 -0700942
943 return placeholders
944
Kevin Cheng989cb052021-04-28 16:29:44 -0700945 def buildConstTensors(self, shape_list, dtype_list):
Eric Kunzee5e26762020-10-13 16:11:07 -0700946 consts = []
947
Kevin Cheng989cb052021-04-28 16:29:44 -0700948 assert len(shape_list) == len(dtype_list)
949
950 for idx, shape in enumerate(shape_list):
951 arr = self.getRandTensor(shape, dtype_list[idx])
952 consts.append(self.ser.addConst(shape, dtype_list[idx], arr))
Eric Kunzee5e26762020-10-13 16:11:07 -0700953
954 return consts
955
956 def makeShape(self, rank):
957 if self.targetted_shape:
958 return np.int32(self.targetted_shape)
Kevin Cheng550ccc52021-03-03 11:21:43 -0800959 return np.int32(
960 self.rng.integers(
961 low=self.args.tensor_shape_range[0],
962 high=self.args.tensor_shape_range[1],
963 size=rank,
964 )
965 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700966
967 def setTargetShape(self, shape):
968 self.targetted_shape = shape
969
970 def randInt(self, low=0, high=256):
971 return np.int32(self.rng.integers(low=low, high=high, size=1))[0]
972
973 def getRandNumberDType(self, dtype):
974 if dtype == DType.FLOAT:
975 return self.rng.random()
976 elif dtype == DType.BOOL:
977 return self.rng.choice([False, True])
978 elif dtype == DType.INT4:
979 low, high = (-7, 8)
Eric Kunzee5e26762020-10-13 16:11:07 -0700980 elif dtype == DType.INT8:
981 low, high = (-127, 128)
982 elif dtype == DType.INT16:
983 low, high = (-32768, 32768)
984 elif dtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800985 low, high = (-(1 << 31), (1 << 31))
Eric Kunzee5e26762020-10-13 16:11:07 -0700986 elif dtype == DType.INT48:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800987 low, high = (-(1 << 47), (1 << 47))
Eric Kunzee5e26762020-10-13 16:11:07 -0700988 # Special size
989 return np.int64(self.rng.integers(low, high, size=1))[0]
990 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800991 raise Exception("Unknown dtype: {}".format(dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700992
993 return np.int32(self.rng.integers(low, high, size=1))[0]
994
995 def shapeStr(self, shape):
996
997 sStr = []
998 # Convert to strings
999 for i in shape:
1000 sStr.append(str(i))
1001
Kevin Cheng550ccc52021-03-03 11:21:43 -08001002 return "x".join(sStr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001003
1004 def typeStr(self, t):
Kevin Cheng989cb052021-04-28 16:29:44 -07001005 if isinstance(t, list):
1006 assert len(t) >= 2
1007 return "{}x{}".format(self.typeStr(t[0]), self.typeStr(t[1]))
Eric Kunzee5e26762020-10-13 16:11:07 -07001008 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07001009 if t == DType.BOOL:
1010 return "b"
1011 elif t == DType.INT4:
1012 return "i4"
1013 elif t == DType.INT8:
1014 return "i8"
1015 elif t == DType.UINT8:
1016 return "u8"
1017 elif t == DType.INT16:
1018 return "i16"
1019 elif t == DType.INT32:
1020 return "i32"
1021 elif t == DType.INT48:
1022 return "i48"
1023 elif t == DType.FLOAT:
1024 return "float"
1025 else:
1026 raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
Eric Kunzee5e26762020-10-13 16:11:07 -07001027
1028 def typeWidth(self, t):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001029 """ Get the datatype width for integer types"""
Kevin Cheng3a478572021-01-22 17:21:02 -08001030 if t == DType.INT4:
Eric Kunzee5e26762020-10-13 16:11:07 -07001031 return 4
1032 elif t == DType.INT8:
1033 return 8
Kevin Cheng3a478572021-01-22 17:21:02 -08001034 elif t == DType.UINT8:
1035 return 8
Eric Kunzee5e26762020-10-13 16:11:07 -07001036 elif t == DType.INT16:
1037 return 16
1038 elif t == DType.INT32:
1039 return 32
1040 elif t == DType.INT48:
1041 return 48
1042 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001043 raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
Eric Kunzee5e26762020-10-13 16:11:07 -07001044
1045 # Argument generators
1046 # Returns a list of tuples (stringDescriptor, [build_fcn_arg_list])
1047 # Where the string descriptor is used to generate the test name and
1048 # The build_fcn_arg_list is expanded and passed to the operator test
1049 # build function
1050
Kevin Cheng550ccc52021-03-03 11:21:43 -08001051 def build_unary(self, op, a, qinfo=None):
Eric Kunzee5e26762020-10-13 16:11:07 -07001052 result_tens = OutputShaper.unaryOp(self.ser, a)
1053 self.ser.addOperator(op, [a.name], [result_tens.name], None, qinfo)
1054 return result_tens
1055
1056 def build_binary_broadcast(self, op, a, b):
1057 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1058 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1059 return result_tens
1060
1061 def build_binary_nonbroadcast(self, op, a, b):
1062 result_tens = OutputShaper.binaryNonBroadcastOp(self.ser, a, b)
1063 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1064 return result_tens
1065
Kevin Chengaee1fac2020-11-11 13:54:06 -08001066 def build_arithmetic_right_shift(self, op, a, b, round):
1067 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1068
1069 attr = ts.TosaSerializerAttribute()
1070 attr.ArithmeticRightShiftAttribute(round)
1071
1072 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
1073 return result_tens
1074
1075 def build_mul(self, op, a, b, shift):
Eric Kunzee5e26762020-10-13 16:11:07 -07001076 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1077
1078 # Special for multiply:
1079 # Force the result to INT32 for INT types
1080 if a.dtype != DType.FLOAT:
1081 result_tens.setDtype(DType.INT32)
1082
Kevin Chengaee1fac2020-11-11 13:54:06 -08001083 attr = ts.TosaSerializerAttribute()
1084 attr.MulAttribute(shift)
1085
1086 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001087 return result_tens
1088
1089 def build_table(self, op, a):
1090 # Constant size, random values
1091 table_arr = self.getRandTensor([513], DType.INT16)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001092 table_tens = self.ser.addConst(table_arr.shape, DType.INT16, table_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001093
1094 result_tens = OutputShaper.tableOp(self.ser, a, table_tens)
1095 self.ser.addOperator(op, [a.name, table_tens.name], [result_tens.name], None)
1096
1097 return result_tens
1098
1099 def build_select(self, op, cond, a, b):
1100
1101 # Replace the cond tensor with a boolean tensor since it probably
1102 # has the wrong dtype
Kevin Cheng989cb052021-04-28 16:29:44 -07001103 t = self.buildPlaceholderTensors([cond.shape], [DType.BOOL])
Eric Kunzee5e26762020-10-13 16:11:07 -07001104 cond = t[0]
1105
1106 result_tens = OutputShaper.selectOp(self.ser, cond, a, b)
1107 self.ser.addOperator(op, [cond.name, a.name, b.name], [result_tens.name])
1108
1109 return result_tens
1110
1111 def build_comparison(self, op, a, b):
1112 result_tens = OutputShaper.binaryComparisonOp(self.ser, a, b)
1113 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1114 return result_tens
1115
1116 def build_argmax(self, op, a, axis):
1117 result_tens = OutputShaper.argmaxOp(self.ser, a, axis)
1118
1119 attr = ts.TosaSerializerAttribute()
1120 attr.AxisAttribute(axis)
1121
1122 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1123 return result_tens
1124
Kevin Cheng550ccc52021-03-03 11:21:43 -08001125 def build_pool2d(self, op, input, kernel, stride, pad, qinfo=None):
Eric Kunzee5e26762020-10-13 16:11:07 -07001126 result_tens = OutputShaper.pool2dOp(self.ser, input, kernel, stride, pad)
1127
1128 attr = ts.TosaSerializerAttribute()
1129 attr.Pool2dAttribute(kernel, stride, pad)
Eric Kunzee5e26762020-10-13 16:11:07 -07001130
1131 self.ser.addOperator(op, [input.name], [result_tens.name], attr, qinfo)
1132 return result_tens
1133
1134 def build_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001135 assert len(padding) == 4
1136 result_tens = OutputShaper.conv2dOp(
1137 self.ser, ifm, filter, strides, padding, dilations
1138 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001139
1140 attr = ts.TosaSerializerAttribute()
1141 attr.Conv2dAttribute(padding, strides, dilations)
1142
Kevin Cheng550ccc52021-03-03 11:21:43 -08001143 self.ser.addOperator(
1144 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1145 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001146 return result_tens
1147
Kevin Cheng550ccc52021-03-03 11:21:43 -08001148 def build_transpose_conv2d(
Kevin Cheng989cb052021-04-28 16:29:44 -07001149 self, op, ifm, filter, bias, stride, outpad, dilation, output_shape, qinfo
Kevin Cheng550ccc52021-03-03 11:21:43 -08001150 ):
1151 assert len(outpad) == 2
Eric Kunzee5e26762020-10-13 16:11:07 -07001152 result_tens = OutputShaper.transposeConv2DOp(self.ser, ifm, output_shape)
1153
1154 attr = ts.TosaSerializerAttribute()
1155 attr.TransposeConv2DAttribute(outpad, stride, dilation, output_shape)
1156
Kevin Cheng550ccc52021-03-03 11:21:43 -08001157 self.ser.addOperator(
Kevin Cheng989cb052021-04-28 16:29:44 -07001158 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
Kevin Cheng550ccc52021-03-03 11:21:43 -08001159 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001160 return result_tens
1161
Kevin Cheng550ccc52021-03-03 11:21:43 -08001162 def build_depthwise_conv2d(
1163 self, op, ifm, filter, bias, strides, padding, dilations, qinfo
1164 ):
1165 result_tens = OutputShaper.depthwiseConv2dOp(
1166 self.ser, ifm, filter, strides, padding, dilations
1167 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001168
1169 attr = ts.TosaSerializerAttribute()
1170 attr.Conv2dAttribute(padding, strides, dilations)
1171
Kevin Cheng550ccc52021-03-03 11:21:43 -08001172 self.ser.addOperator(
1173 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1174 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001175 return result_tens
1176
1177 def build_fully_connected(self, op, ifm, filter, bias, qinfo):
1178 result_tens = OutputShaper.fullyConnectedOp(self.ser, ifm, filter)
1179
Kevin Cheng550ccc52021-03-03 11:21:43 -08001180 self.ser.addOperator(
1181 op, [ifm.name, filter.name, bias.name], [result_tens.name], None, qinfo
1182 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001183 return result_tens
1184
1185 def build_matmul(self, op, a, b, qinfo):
1186 result_tens = OutputShaper.matmulOp(self.ser, a, b)
1187 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], None, qinfo)
1188 return result_tens
1189
1190 def build_reduce(self, op, a, axis):
1191 result_tens = OutputShaper.reduceOp(self.ser, a, axis)
1192
1193 attr = ts.TosaSerializerAttribute()
1194 attr.AxisAttribute(axis)
1195
1196 self.ser.addOperator(op, [a.name], result_tens.name, attr)
1197 return result_tens
1198
1199 def build_clamp(self, op, a):
1200 result_tens = OutputShaper.unaryOp(self.ser, a)
1201
1202 attr = ts.TosaSerializerAttribute()
1203
1204 # Get two random ints
1205 v = [self.randInt(), self.randInt()]
1206
1207 if a.dtype == DType.FLOAT:
1208 attr.ClampAttribute(0, 0, min(v), max(v))
1209 else:
1210 attr.ClampAttribute(min(v), max(v), 0, 0)
1211
1212 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1213 return result_tens
1214
1215 def build_leaky_relu(self, op, a):
1216 result_tens = OutputShaper.unaryOp(self.ser, a)
1217 attr = ts.TosaSerializerAttribute()
1218
1219 attr.LeakyReluAttribute(self.getRandNumberDType(DType.FLOAT))
1220
1221 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1222 return result_tens
1223
1224 # Needs an additional type/input
1225 def build_prelu(self, op, a):
1226 result_tens = OutputShaper.unaryOp(self.ser, a)
1227
1228 self.ser.addOperator(op, [a.name], [result_tens.name])
1229 return result_tens
1230
1231 def build_relun(self, op, a):
1232 result_tens = OutputShaper.unaryOp(self.ser, a)
1233
1234 attr = ts.TosaSerializerAttribute()
1235
1236 if a.dtype == DType.FLOAT:
1237 attr.ReluNAttribute(0, self.getRandNumberDType(a.dtype))
1238 else:
1239 attr.ReluNAttribute(self.getRandNumberDType(a.dtype), 0)
1240
1241 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1242 return result_tens
1243
1244 def build_sigmoid(self, op, a):
1245 result_tens = OutputShaper.unaryOp(self.ser, a)
1246 self.ser.addOperator(op, [a.name], [result_tens.name])
1247 return result_tens
1248
1249 def build_tanh(self, op, a):
1250 result_tens = OutputShaper.unaryOp(self.ser, a)
1251 self.ser.addOperator(op, [a.name], [result_tens.name])
1252 return result_tens
1253
1254 def build_concat(self, op, a, b, axis):
1255 result_tens = OutputShaper.concatOp(self.ser, a, b, axis)
1256
1257 attr = ts.TosaSerializerAttribute()
1258 attr.AxisAttribute(axis)
1259
1260 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
1261
1262 def build_pad(self, op, a, padding, qinfo):
1263 result_tens = OutputShaper.padOp(self.ser, a, padding)
1264
1265 # Need to turn the padding array into a TOSA tensor here.
1266 # This is one of the few tensor operands that does not get
1267 # randomly generated
Kevin Cheng550ccc52021-03-03 11:21:43 -08001268 padding_tens = self.ser.addConst(padding.shape, DType.INT32, padding)
Eric Kunzee5e26762020-10-13 16:11:07 -07001269
Kevin Cheng550ccc52021-03-03 11:21:43 -08001270 self.ser.addOperator(
1271 op, [a.name, padding_tens.name], [result_tens.name], None, qinfo
1272 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001273
1274 def build_reshape(self, op, a, newShape):
1275 result_tens = OutputShaper.reshapeOp(self.ser, a, newShape)
1276
1277 attr = ts.TosaSerializerAttribute()
1278 attr.ReshapeAttribute(newShape)
1279
1280 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1281 return result_tens
1282
1283 def build_reverse(self, op, a, axis):
1284 result_tens = OutputShaper.unaryOp(self.ser, a)
1285
1286 attr = ts.TosaSerializerAttribute()
1287 attr.AxisAttribute(axis)
1288
1289 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1290 return result_tens
1291
1292 def build_transpose(self, op, a, perms):
1293 result_tens = OutputShaper.transposeOp(self.ser, a, perms)
1294
Kevin Cheng550ccc52021-03-03 11:21:43 -08001295 perms_tens = self.ser.addConst([len(perms)], DType.INT32, np.int32(perms))
Eric Kunzee5e26762020-10-13 16:11:07 -07001296
1297 self.ser.addOperator(op, [a.name, perms_tens.name], [result_tens.name])
1298 return result_tens
1299
1300 def build_slice(self, op, a, begin, size):
1301 result_tens = OutputShaper.sliceOp(self.ser, a, begin, size)
1302
1303 attr = ts.TosaSerializerAttribute()
1304 attr.SliceAttribute(begin, size)
1305
1306 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1307 return result_tens
1308
1309 def build_tile(self, op, a, multiples):
1310 result_tens = OutputShaper.tileOp(self.ser, a, multiples)
1311
1312 attr = ts.TosaSerializerAttribute()
1313 attr.TileAttribute(multiples)
1314
1315 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1316 return result_tens
1317
Kevin Cheng77d0f762020-11-24 10:26:32 -08001318 def build_gather(self, op, values):
Eric Kunzee5e26762020-10-13 16:11:07 -07001319
1320 # Create a new indicies tensor
1321 # here with data that doesn't exceed the dimensions of the values tensor
1322
Kevin Cheng550ccc52021-03-03 11:21:43 -08001323 K = values.shape[1] # K
1324 W = self.randInt(
1325 self.args.tensor_shape_range[0], self.args.tensor_shape_range[1]
1326 ) # W
1327 indicies_arr = np.int32(
1328 self.rng.integers(low=0, high=K, size=[values.shape[0], W])
1329 ) # (N, W)
1330 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001331
Kevin Cheng77d0f762020-11-24 10:26:32 -08001332 result_tens = OutputShaper.gatherOp(self.ser, values, indicies)
Eric Kunzee5e26762020-10-13 16:11:07 -07001333
Kevin Cheng77d0f762020-11-24 10:26:32 -08001334 self.ser.addOperator(op, [values.name, indicies.name], [result_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001335
1336 return result_tens
1337
Kevin Cheng77d0f762020-11-24 10:26:32 -08001338 def build_scatter(self, op, values_in, input):
1339
1340 # Create a new indicies tensor
1341 # here with data that doesn't exceed the dimensions of the values_in tensor
1342
Kevin Cheng550ccc52021-03-03 11:21:43 -08001343 K = values_in.shape[1] # K
1344 W = input.shape[1] # W
1345 indicies_arr = np.int32(
1346 self.rng.integers(low=0, high=K, size=[values_in.shape[0], W])
1347 ) # (N, W)
1348 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
Kevin Cheng77d0f762020-11-24 10:26:32 -08001349
1350 result_tens = OutputShaper.scatterOp(self.ser, values_in, indicies, input)
1351
Kevin Cheng550ccc52021-03-03 11:21:43 -08001352 self.ser.addOperator(
1353 op, [values_in.name, indicies.name, input.name], [result_tens.name]
1354 )
Kevin Cheng77d0f762020-11-24 10:26:32 -08001355
1356 return result_tens
1357
Kevin Cheng550ccc52021-03-03 11:21:43 -08001358 def build_resize(
1359 self,
1360 op,
1361 input,
1362 mode,
1363 stride,
1364 offset,
1365 shift,
1366 stride_fp,
1367 offset_fp,
1368 output_dims,
1369 input_dtype,
1370 output_dtype,
1371 ):
1372 result_tens = OutputShaper.resizeOp(
1373 self.ser,
1374 input,
1375 mode,
1376 stride,
1377 offset,
1378 shift,
1379 stride_fp,
1380 offset_fp,
1381 output_dims,
1382 input_dtype,
1383 output_dtype,
1384 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001385
1386 attr = ts.TosaSerializerAttribute()
Kevin Cheng77d0f762020-11-24 10:26:32 -08001387
Kevin Cheng550ccc52021-03-03 11:21:43 -08001388 attr.ResizeAttribute(
1389 output_dims, stride, offset, shift, stride_fp, offset_fp, mode
1390 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001391
1392 self.ser.addOperator(op, [input.name], [result_tens.name], attr)
1393 return result_tens
1394
1395 def build_identityn(self, op, val, val2):
1396
Kevin Cheng550ccc52021-03-03 11:21:43 -08001397 result_tens = OutputShaper.unaryOp(self.ser, val)
Eric Kunzee5e26762020-10-13 16:11:07 -07001398 result_tens2 = OutputShaper.unaryOp(self.ser, val2)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001399 self.ser.addOperator(
1400 op, [val.name, val2.name], [result_tens.name, result_tens2.name]
1401 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001402 return result_tens
1403
1404 def build_placeholder(self, op, val):
1405 # Add an identity op to avoid warning in the reference model
1406 return self.build_unary(Op.IDENTITY, val)
1407
1408 # Type Conversion
1409 def build_cast(self, op, val, out_dtype):
1410 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1411 self.ser.addOperator(op, [val.name], [result_tens.name])
1412 return result_tens
1413
1414 def build_rescale(self, op, val, out_dtype, scale32, double_round, per_channel):
1415 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1416
1417 if per_channel:
1418 nc = val.shape[-1]
1419 else:
1420 nc = 1
1421
1422 in_type_width = self.typeWidth(val.dtype)
1423 out_type_width = self.typeWidth(out_dtype)
1424
Kevin Cheng3a478572021-01-22 17:21:02 -08001425 if val.dtype == DType.INT8:
Kevin Cheng989cb052021-04-28 16:29:44 -07001426 input_zp = self.randInt(-128, 127)
Eric Kunzee5e26762020-10-13 16:11:07 -07001427 in_type_width = in_type_width + 1
1428 else:
1429 input_zp = 0
1430
Kevin Cheng3a478572021-01-22 17:21:02 -08001431 if out_dtype == DType.INT8:
Kevin Cheng989cb052021-04-28 16:29:44 -07001432 output_zp = self.randInt(-128, 127)
Eric Kunzee5e26762020-10-13 16:11:07 -07001433 out_type_width = out_type_width + 1
1434 else:
1435 output_zp = 0
1436
1437 # Calculate scale based on:
1438 # scale = a *(2^output_width)/(2^input_width))
1439
1440 a = np.float32(self.rng.random(size=[nc]))
1441 scale_arr = a * np.float32((1 << out_type_width) / (1 << in_type_width))
1442
1443 if scale32:
1444 pass
1445 # Cap the scaling at 2^15 - 1 for scale16
1446 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), (1 << 31) - 1)
1447 else:
1448 # Cap the scaling at 2^15 - 1 for scale16
1449 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), 32767.0)
1450
Kevin Cheng550ccc52021-03-03 11:21:43 -08001451 # print('{} {} -> {}'.format(out_type_width, in_type_width, scale_arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001452
1453 multiplier_arr = np.int32(np.zeros(shape=[nc]))
1454 shift_arr = np.int32(np.zeros(shape=[nc]))
1455
1456 for i in range(nc):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001457 multiplier_arr[i], shift_arr[i] = TosaQuantGen.computeMultiplierAndShift(
1458 scale_arr[i], scale32
1459 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08001460 if shift_arr[i] < 2 or shift_arr[i] > 62:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001461 self.ser.setExpectedFailure(True, "OpRescale: invalid shift value")
Eric Kunzee5e26762020-10-13 16:11:07 -07001462
Kevin Cheng550ccc52021-03-03 11:21:43 -08001463 # print('multiplier {} shift {} inzp {} outzp {}'.format(multiplier_arr, shift_arr, input_zp, output_zp))
Eric Kunzee5e26762020-10-13 16:11:07 -07001464
1465 attr = ts.TosaSerializerAttribute()
Kevin Cheng550ccc52021-03-03 11:21:43 -08001466 attr.RescaleAttribute(
1467 input_zp,
1468 output_zp,
1469 multiplier_arr,
1470 shift_arr,
1471 scale32,
1472 double_round,
1473 per_channel,
1474 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001475
1476 self.ser.addOperator(op, [val.name], [result_tens.name], attr)
1477 return result_tens
1478
1479 def build_cond_if_const(self, op, then_tens, else_tens, cond):
1480 # For cond_if with constants, we're supplied with then/else tensors that we ignore
1481 # (except for the generated shap) and the condition. Build Then/Else blocks
1482 # and fill them with const nodes for the body.
1483
1484 # Condition tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001485 cond_tens = self.ser.addConst([], DType.BOOL, [cond])
Eric Kunzee5e26762020-10-13 16:11:07 -07001486
1487 # Make then/else tensors
1488 out_shape = then_tens.shape
1489 then_arr = np.int32(self.rng.integers(0, 255, size=out_shape))
1490 else_arr = np.int32(self.rng.integers(0, 255, size=out_shape))
1491
1492 # And the result tensor based on any of the outputs
Kevin Cheng550ccc52021-03-03 11:21:43 -08001493 result_tens = self.ser.addOutput(out_shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07001494
1495 # Create the attribute with the names of the then/else blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001496 then_block = "THEN_BLOCK"
1497 else_block = "ELSE_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001498 attr = ts.TosaSerializerAttribute()
1499 attr.CondIfAttribute(then_block, else_block)
1500
1501 # Finally, build the op and the two blocks
1502 self.ser.addOperator(op, [cond_tens.name], [result_tens.name], attr)
1503
1504 self.ser.startBasicBlock(then_block)
1505 # Build the actual then/else tensors inside their blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001506 then_tens = self.ser.addConst(out_shape, DType.INT32, then_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001507 self.ser.addOutputTensor(then_tens)
1508
1509 self.ser.startBasicBlock(else_block)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001510 else_tens = self.ser.addConst(out_shape, DType.INT32, else_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001511 self.ser.addOutputTensor(else_tens)
1512
1513 return result_tens
1514
1515 def build_cond_if_binary(self, op, a, b, cond):
1516 # For cond_if with a binary op in the then/else blocks, take a and b and
1517 # alternately add or subtract them based on the condition
1518
1519 # Condition tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001520 cond_tens = self.ser.addConst([], DType.BOOL, [cond])
Eric Kunzee5e26762020-10-13 16:11:07 -07001521
Kevin Cheng550ccc52021-03-03 11:21:43 -08001522 result_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001523 self.ser.currBasicBlock.addOutput(result_tens.name)
1524
1525 # Create the attribute with the names of the then/else blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001526 then_block = "THEN_BLOCK"
1527 else_block = "ELSE_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001528 attr = ts.TosaSerializerAttribute()
1529 attr.CondIfAttribute(then_block, else_block)
1530
1531 # Finally, build the op and the two blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001532 self.ser.addOperator(
1533 op, [cond_tens.name, a.name, b.name], [result_tens.name], attr
1534 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001535
1536 self.ser.startBasicBlock(then_block)
1537 self.ser.addInputTensor(a)
1538 self.ser.addInputTensor(b)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001539 then_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001540 self.ser.addOperator(Op.ADD, [a.name, b.name], [then_tens.name])
1541
1542 self.ser.startBasicBlock(else_block)
1543 self.ser.addInputTensor(a)
1544 self.ser.addInputTensor(b)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001545 else_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001546 self.ser.addOperator(Op.SUB, [a.name, b.name], [else_tens.name])
1547
1548 return result_tens
1549
1550 def build_while_loop(self, op, a, iter_val):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001551 iter = self.ser.addPlaceholder([], DType.INT32, [np.int32(iter_val)])
Eric Kunzee5e26762020-10-13 16:11:07 -07001552
Kevin Cheng550ccc52021-03-03 11:21:43 -08001553 cond_block = "COND_BLOCK"
1554 body_block = "BODY_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001555
1556 attr = ts.TosaSerializerAttribute()
1557 attr.WhileLoopAttribute(cond_block, body_block)
1558
1559 # Accumulator tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001560 # acc = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001561 acc_init_val = np.int32(np.zeros(a.shape))
Kevin Cheng550ccc52021-03-03 11:21:43 -08001562 acc = self.ser.addPlaceholder(a.shape, a.dtype, acc_init_val)
Eric Kunzee5e26762020-10-13 16:11:07 -07001563
1564 # Intermediate/output tensors for everything going through the loop
Kevin Cheng550ccc52021-03-03 11:21:43 -08001565 iter_out = self.ser.addIntermediate(iter.shape, iter.dtype)
1566 a_out = self.ser.addIntermediate(a.shape, a.dtype)
1567 acc_out = self.ser.addIntermediate(acc.shape, acc.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001568
1569 # While_loop operator
Kevin Cheng550ccc52021-03-03 11:21:43 -08001570 self.ser.addOperator(
1571 op,
1572 [iter.name, a.name, acc.name],
1573 [iter_out.name, a_out.name, acc_out.name],
1574 attr,
1575 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001576
1577 # COND block (input: iter, output: cond_tens )
1578 self.ser.startBasicBlock(cond_block)
1579 self.ser.addInputTensor(iter)
1580 self.ser.addInputTensor(a)
1581 self.ser.addInputTensor(acc)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001582 zero_tens = self.ser.addConst([], DType.INT32, [np.int32(0)])
1583 cond_tens = self.ser.addOutput([], DType.BOOL)
1584 self.ser.addOperator(Op.GREATER, [iter.name, zero_tens.name], [cond_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001585
1586 # BODY block (input: a, acc, iter, output: a, acc, iter)
1587 # Note that local intermediate tensors need to be declared here for the outputs
1588 self.ser.startBasicBlock(body_block)
1589 self.ser.addInputTensor(iter)
1590 self.ser.addInputTensor(a)
1591 self.ser.addInputTensor(acc)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001592 one_tens = self.ser.addConst([], DType.INT32, [np.int32(1)])
1593 iter_body_out = self.ser.addIntermediate(iter.shape, iter.dtype)
1594 acc_body_out = self.ser.addIntermediate(acc.shape, acc.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001595 self.ser.addOperator(Op.ADD, [a.name, acc.name], [acc_body_out.name])
1596 self.ser.addOperator(Op.SUB, [iter.name, one_tens.name], [iter_body_out.name])
1597 self.ser.addOutputTensor(iter_body_out)
1598 self.ser.addOutputTensor(a)
1599 self.ser.addOutputTensor(acc_body_out)
1600
1601 return acc_out
1602
Kevin Cheng550ccc52021-03-03 11:21:43 -08001603 def genOpTestList(
1604 self, opName, shapeFilter=[None], rankFilter=None, dtypeFilter=None
1605 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07001606
1607 try:
1608 op = self.TOSA_OP_LIST[opName]
1609 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001610 raise Exception("Cannot find op with name {}".format(opName))
Eric Kunzee5e26762020-10-13 16:11:07 -07001611
1612 # Initialize a new random number generator
1613 self.rng = np.random.default_rng(self.random_seed)
1614
Kevin Cheng550ccc52021-03-03 11:21:43 -08001615 build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001616
1617 # Generate the lists of arguments
Kevin Cheng550ccc52021-03-03 11:21:43 -08001618 rmin, rmax = op["rank"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001619
1620 # Test list consists of a tuple of:
1621 # (opName, testNameStr, dtype, shapeList, argumentsList)
1622 testList = []
1623
1624 if not shapeFilter:
1625 shapeFilter = [None]
1626
1627 for r in range(rmin, rmax + 1):
1628
1629 # Filter out the rank?
1630 if rankFilter is not None and r not in rankFilter:
1631 continue
1632
Kevin Cheng550ccc52021-03-03 11:21:43 -08001633 for t in op["types"]:
Eric Kunzee5e26762020-10-13 16:11:07 -07001634
1635 # Filter tests based on dtype?
1636 if dtypeFilter is not None:
1637 if t not in dtypeFilter:
1638 continue
1639
1640 # Create the placeholder and const tensors
1641 for shape in shapeFilter:
1642 # A None shape chooses a random shape of a given rank
1643
1644 # Filter out by rank
1645 if shape is not None and len(shape) != r:
1646 continue
1647
1648 self.setTargetShape(shape)
1649 shapeList = tgen_fcn(self, op, r)
1650
1651 shapeStr = self.shapeStr(shapeList[0])
1652 typeStr = self.typeStr(t)
1653
1654 # Argument lists consists of tuples of the (str, []) string representation and the build function argument list
1655 argList = []
1656 if agen_fcn:
1657 argList = agen_fcn(self, opName, shapeList, t)
1658 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001659 argList = [("", [])]
Eric Kunzee5e26762020-10-13 16:11:07 -07001660
1661 for argStr, args in argList:
1662 if argStr:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001663 testStr = "{}_{}_{}_{}".format(
1664 opName, shapeStr, typeStr, argStr
1665 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001666 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001667 testStr = "{}_{}_{}".format(opName, shapeStr, typeStr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001668
1669 testList.append((opName, testStr, t, shapeList, args))
1670
1671 return testList
1672
Kevin Cheng989cb052021-04-28 16:29:44 -07001673 def serializeTest(self, opName, testStr, dtype_or_dtypeList, shapeList, testArgs):
Eric Kunzee5e26762020-10-13 16:11:07 -07001674 try:
1675 op = self.TOSA_OP_LIST[opName]
1676 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001677 raise Exception("Cannot find op with name {}".format(opName))
Eric Kunzee5e26762020-10-13 16:11:07 -07001678
1679 # Create a serializer
1680 self.createSerializer(opName, testStr)
1681
Kevin Cheng550ccc52021-03-03 11:21:43 -08001682 build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
1683 pCount, cCount = op["operands"]
Kevin Cheng989cb052021-04-28 16:29:44 -07001684 num_operands = pCount + cCount
1685
1686 if isinstance(dtype_or_dtypeList, list):
1687 dtypeList = dtype_or_dtypeList
1688 else:
1689 dtypeList = [dtype_or_dtypeList] * (num_operands)
1690
1691 assert (
1692 len(shapeList) == num_operands
1693 ), "shapeList length {} must match number of operands {}".format(
1694 len(shapeList), num_operands
1695 )
1696 assert (
1697 len(dtypeList) == num_operands
1698 ), "dtypeList length {} must match number of operands {}".format(
1699 len(dtypeList), num_operands
1700 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001701
1702 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001703 qgen = op["qgen"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001704 except KeyError:
1705 qgen = None
1706
1707 # Build the random tensor operands and the test
1708 tens = []
Kevin Chengaee1fac2020-11-11 13:54:06 -08001709
1710 # If test is ArithmeticRightShift, force value of operand[1] to be within [0, num_bits]
Kevin Cheng550ccc52021-03-03 11:21:43 -08001711 if op["op"] == Op.ARITHMETIC_RIGHT_SHIFT:
1712 assert (
1713 pCount == 2 and cCount == 0
1714 ), "Op.ArithmeticRightShift must have 2 placeholders, 0 consts"
Kevin Chengaee1fac2020-11-11 13:54:06 -08001715
1716 placeholders = []
1717 for idx, shape in enumerate(shapeList[:]):
1718 if idx == 1:
Kevin Cheng989cb052021-04-28 16:29:44 -07001719 if dtypeList[idx] == DType.INT8:
Kevin Chengaee1fac2020-11-11 13:54:06 -08001720 arr = np.int32(self.rng.integers(low=0, high=8, size=shape))
Kevin Cheng989cb052021-04-28 16:29:44 -07001721 elif dtypeList[idx] == DType.INT16:
Kevin Chengaee1fac2020-11-11 13:54:06 -08001722 arr = np.int32(self.rng.integers(low=0, high=16, size=shape))
Kevin Cheng989cb052021-04-28 16:29:44 -07001723 elif dtypeList[idx] == DType.INT32:
Kevin Chengaee1fac2020-11-11 13:54:06 -08001724 arr = np.int32(self.rng.integers(low=0, high=32, size=shape))
1725 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001726 raise Exception("OpArithmeticRightShift: invalid input dtype")
Kevin Chengaee1fac2020-11-11 13:54:06 -08001727 else:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001728 arr = self.getRandTensor(shape, dtypeList[idx])
Kevin Cheng989cb052021-04-28 16:29:44 -07001729 placeholders.append(self.ser.addPlaceholder(shape, dtypeList[idx], arr))
Kevin Chengaee1fac2020-11-11 13:54:06 -08001730
1731 tens.extend(placeholders)
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001732 elif op["op"] == Op.DIV:
1733 assert (
1734 pCount == 2 and cCount == 0
1735 ), "Op.Div must have 2 placeholders, 0 consts"
1736
1737 placeholders = []
1738
1739 # Two invalid cases for Op.DIV:
1740 # 1. divisor == 0
Kevin Cheng47315e12021-05-13 17:41:28 -07001741 # 2. dividend == -(1<<31) and divisor == -1
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001742 while True:
1743 dividend_arr = self.getRandTensor(shapeList[0], dtypeList[0])
1744 divisor_arr = self.getRandTensor(shapeList[1], dtypeList[1])
1745
1746 if (divisor_arr == 0).any():
1747 continue
1748
Kevin Cheng47315e12021-05-13 17:41:28 -07001749 if (dividend_arr == -(2 ** 31)).any() and (divisor_arr == -1).any():
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001750 continue
1751
1752 break
1753
1754 placeholders.append(
1755 self.ser.addPlaceholder(shapeList[0], dtypeList[0], dividend_arr)
1756 )
1757 placeholders.append(
1758 self.ser.addPlaceholder(shapeList[1], dtypeList[1], divisor_arr)
1759 )
1760
1761 tens.extend(placeholders)
1762 elif op["op"] == Op.MUL:
1763 assert (
1764 pCount == 2 and cCount == 0
1765 ), "Op.MUL must have 2 placeholders, 0 consts"
1766
1767 if dtypeList[0] == DType.FLOAT:
1768 tens.extend(self.buildPlaceholderTensors(shapeList[:], dtypeList[:]))
1769 else:
1770 placeholders = []
1771
1772 # Make sure multiply result in int32 range
1773 shift = testArgs[0]
1774 if dtypeList[0] == DType.INT8:
1775 num_bits = 8
1776 elif dtypeList[0] == DType.INT16:
1777 num_bits = 16
1778 elif dtypeList[0] == DType.INT32:
1779 num_bits = 32
1780 else:
1781 raise Exception("OpMul: invalid input dtype")
1782
1783 for idx, shape in enumerate(shapeList[:]):
1784 low = -(2 ** (num_bits - 1))
1785 high = (2 ** (num_bits - 1)) - 1
1786
1787 a_arr = np.int32(
1788 self.rng.integers(low=low, high=high, size=shapeList[0])
1789 )
1790 b_arr = np.int32(
1791 self.rng.integers(low=low, high=high, size=shapeList[1])
1792 )
1793
1794 i = 0
1795 while True:
1796
1797 a_arr_64 = a_arr.astype(np.int64)
1798 b_arr_64 = b_arr.astype(np.int64)
1799
1800 if shift > 0:
1801 rounding = 1 << (shift - 1)
1802 result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift
1803 else:
1804 result_arr = a_arr_64 * b_arr_64
1805
1806 if (result_arr > -(2 ** 31)).all() and (
1807 result_arr <= ((2 ** 31) - 1)
1808 ).all():
1809 break
1810
1811 i = i + 1
1812 a_arr = a_arr // 2
1813 b_arr = b_arr // 2
1814
1815 placeholders.append(
1816 self.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
1817 )
1818 placeholders.append(
1819 self.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
1820 )
1821
1822 tens.extend(placeholders)
Kevin Chengaee1fac2020-11-11 13:54:06 -08001823 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07001824 tens.extend(
1825 self.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
1826 )
1827 tens.extend(self.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
Eric Kunzee5e26762020-10-13 16:11:07 -07001828
1829 if qgen is not None:
Kevin Cheng989cb052021-04-28 16:29:44 -07001830 qinfo = qgen(self, op, dtypeList[0])
Eric Kunzee5e26762020-10-13 16:11:07 -07001831 else:
1832 qinfo = None
1833
1834 try:
1835 if qinfo is not None:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001836 resultName = build_fcn(self, op["op"], *tens, *testArgs, qinfo)
Eric Kunzee5e26762020-10-13 16:11:07 -07001837 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001838 resultName = build_fcn(self, op["op"], *tens, *testArgs)
Eric Kunzee5e26762020-10-13 16:11:07 -07001839 except TypeError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001840 print(
1841 "build_fcn: {}\nTensors: {}\nArgs: {}\n".format(
1842 build_fcn, tens, testArgs
1843 )
1844 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001845 raise e
1846
1847 # Save the serialized test
Kevin Cheng550ccc52021-03-03 11:21:43 -08001848 self.serialize("test")
Eric Kunzee5e26762020-10-13 16:11:07 -07001849
1850 def createDynamicOpLists(self):
1851
1852 # Dynamically create op lists for convolutions with a list of kernel sizes
Kevin Cheng550ccc52021-03-03 11:21:43 -08001853 KERNELS = [[1, 1], [2, 2], [3, 3], [5, 5], [3, 1], [1, 3]]
Eric Kunzee5e26762020-10-13 16:11:07 -07001854
1855 for k in KERNELS:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001856 testName = "conv2d_{}x{}".format(k[0], k[1])
1857 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST["conv2d_TEMPLATE"].copy()
1858 self.TOSA_OP_LIST[testName]["filter"] = k
1859 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07001860
Kevin Cheng550ccc52021-03-03 11:21:43 -08001861 testName = "depthwise_conv2d_{}x{}".format(k[0], k[1])
1862 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
1863 "depthwise_conv2d_TEMPLATE"
1864 ].copy()
1865 self.TOSA_OP_LIST[testName]["filter"] = k
1866 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07001867
Kevin Cheng550ccc52021-03-03 11:21:43 -08001868 testName = "transpose_conv2d_{}x{}".format(k[0], k[1])
1869 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
1870 "transpose_conv2d_TEMPLATE"
1871 ].copy()
1872 self.TOSA_OP_LIST[testName]["filter"] = k
1873 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07001874
1875 # Delete any templates after having created any dynamic ops
1876 # This is a two-pass operation because it's bad practice to delete
1877 # keys from dictionaries while iterating
1878 keyList = []
1879 for k in self.TOSA_OP_LIST:
1880 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001881 if self.TOSA_OP_LIST[k]["template"] == True:
Eric Kunzee5e26762020-10-13 16:11:07 -07001882 keyList.append(k)
1883 continue
1884 except KeyError:
1885 pass
1886
1887 for k in keyList:
1888 del self.TOSA_OP_LIST[k]
1889
1890 def initOpListDefaults(self):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001891 """Fill in default fields for ops if they aren't already specified.
1892 Look for missing required fields (datastructure linting)."""
Eric Kunzee5e26762020-10-13 16:11:07 -07001893 for op in self.TOSA_OP_LIST:
1894
1895 # Required fields
1896 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001897 pl, c = self.TOSA_OP_LIST[op]["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001898 except (KeyError, ValueError, TypeError):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001899 raise Exception(
1900 "Op {} is missing a valid operand tuple in TOSA_OP_LIST".format(op)
1901 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001902
1903 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001904 fcn, tgen, arggen = self.TOSA_OP_LIST[op]["build_fcn"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001905 except (KeyError, ValueError, TypeError):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001906 raise Exception(
1907 "Op {} is missing a valid build_fcn tuple in TOSA_OP_LIST".format(
1908 op
1909 )
1910 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001911
1912 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001913 types = self.TOSA_OP_LIST[op]["types"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001914 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001915 raise Exception(
1916 "Op {} is missing a valid type list in TOSA_OP_LIST".format(op)
1917 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001918
1919 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001920 opcode = self.TOSA_OP_LIST[op]["op"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001921 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001922 raise Exception(
1923 "Op {} is missing the Op field in TOSA_OP_LIST".format(op)
1924 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001925
1926 # Put in default rank range, if missing
1927 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001928 rank = self.TOSA_OP_LIST[op]["rank"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001929 except KeyError:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001930 self.TOSA_OP_LIST[op]["rank"] = self.DEFAULT_RANK_RANGE
Eric Kunzee5e26762020-10-13 16:11:07 -07001931
1932 # Tensor operator list
1933 # 'op': op name
1934 # 'operands': tuple of (placeholder, const) operands
Kevin Cheng3a478572021-01-22 17:21:02 -08001935 # 'rank': optional, restricts rank to tuple inclusive of (min, max),
1936 # if not specified, defaults to (1, 4)
Eric Kunzee5e26762020-10-13 16:11:07 -07001937 # 'build_fcn': tuple of the function to (build_operator(), TensorGen function, ArgGen enum)
1938 # 'types': array of datatypes to be tested
Kevin Cheng550ccc52021-03-03 11:21:43 -08001939 TYPE_FP = [DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -07001940
Kevin Cheng550ccc52021-03-03 11:21:43 -08001941 TYPE_INT = [DType.INT8, DType.INT16, DType.INT32] # Excludes INT4
1942 TYPE_INT_FP = [DType.INT8, DType.INT16, DType.INT32, DType.FLOAT] # Excludes INT4
Eric Kunzee5e26762020-10-13 16:11:07 -07001943
Kevin Cheng550ccc52021-03-03 11:21:43 -08001944 TYPE_BOOL = [DType.BOOL]
1945 TYPE_FI32 = [DType.FLOAT, DType.INT32]
1946 TYPE_FIB = [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL]
1947 TYPE_FI16 = [DType.FLOAT, DType.INT16]
Eric Kunzee5e26762020-10-13 16:11:07 -07001948
Kevin Cheng550ccc52021-03-03 11:21:43 -08001949 TYPE_NARROW_INT_FP = [DType.INT8, DType.INT16, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -07001950
Kevin Cheng989cb052021-04-28 16:29:44 -07001951 TYPE_CONV2D = [
1952 [DType.INT8, DType.INT8, DType.INT32],
1953 [DType.INT16, DType.INT8, DType.INT48],
1954 DType.FLOAT,
1955 ]
1956
Eric Kunzee5e26762020-10-13 16:11:07 -07001957 DEFAULT_RANK_RANGE = (1, 4)
1958
1959 TOSA_OP_LIST = {
Jared Smolens573ecd42021-03-04 15:24:10 -08001960 # Tensor operators
Kevin Cheng550ccc52021-03-03 11:21:43 -08001961 "argmax": {
1962 "op": Op.ARGMAX,
1963 "operands": (1, 0),
1964 "build_fcn": (build_argmax, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1965 "types": TYPE_NARROW_INT_FP,
1966 },
Jared Smolens573ecd42021-03-04 15:24:10 -08001967 "avg_pool2d": {
1968 "op": Op.AVG_POOL2D,
1969 "operands": (1, 0),
1970 "rank": (4, 4),
1971 "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
1972 "qgen": TosaQuantGen.qgUnary,
1973 "types": TYPE_NARROW_INT_FP,
1974 },
Eric Kunzee5e26762020-10-13 16:11:07 -07001975 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08001976 "conv2d_TEMPLATE": {
1977 "op": Op.CONV2D,
1978 "operands": (1, 2),
1979 "rank": (4, 4),
1980 "build_fcn": (build_conv2d, TosaTensorGen.tgConv2D, TosaArgGen.agConv2D),
1981 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07001982 "types": TYPE_CONV2D,
Kevin Cheng550ccc52021-03-03 11:21:43 -08001983 "template": True,
1984 },
Jared Smolens573ecd42021-03-04 15:24:10 -08001985 # Conv3d TBD
Eric Kunzee5e26762020-10-13 16:11:07 -07001986 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08001987 "depthwise_conv2d_TEMPLATE": {
1988 "op": Op.DEPTHWISE_CONV2D,
1989 "operands": (1, 2),
1990 "filter": [1, 1],
1991 "rank": (4, 4),
1992 "build_fcn": (
1993 build_depthwise_conv2d,
1994 TosaTensorGen.tgDepthwiseConv2D,
1995 TosaArgGen.agConv2D,
1996 ),
1997 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07001998 "types": TYPE_CONV2D,
Kevin Cheng550ccc52021-03-03 11:21:43 -08001999 "template": True,
2000 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002001 "fully_connected": {
2002 "op": Op.FULLY_CONNECTED,
2003 "operands": (1, 2),
2004 "rank": (2, 2),
2005 "build_fcn": (build_fully_connected, TosaTensorGen.tgFullyConnected, None),
2006 "qgen": TosaQuantGen.qgConv,
2007 "types": TYPE_CONV2D,
2008 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002009 "matmul": {
2010 "op": Op.MATMUL,
2011 "operands": (2, 0),
Kevin Cheng2d60f002021-06-09 14:18:32 -07002012 "rank": (3, 3),
Jared Smolens573ecd42021-03-04 15:24:10 -08002013 "build_fcn": (build_matmul, TosaTensorGen.tgMatmul, None),
2014 "qgen": TosaQuantGen.qgMatmul,
2015 "types": TYPE_NARROW_INT_FP,
2016 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002017 "max_pool2d": {
2018 "op": Op.MAX_POOL2D,
2019 "operands": (1, 0),
2020 "rank": (4, 4),
2021 "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
2022 "types": TYPE_NARROW_INT_FP,
2023 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002024 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002025 "transpose_conv2d_TEMPLATE": {
2026 "op": Op.TRANSPOSE_CONV2D,
Kevin Cheng989cb052021-04-28 16:29:44 -07002027 "operands": (1, 2),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002028 "rank": (4, 4),
2029 "build_fcn": (
2030 build_transpose_conv2d,
2031 TosaTensorGen.tgTransposeConv2D,
2032 TosaArgGen.agTransposeConv2D,
2033 ),
2034 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07002035 "types": TYPE_CONV2D,
Kevin Cheng550ccc52021-03-03 11:21:43 -08002036 "template": True,
2037 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002038 # Activation functions
Kevin Cheng550ccc52021-03-03 11:21:43 -08002039 "clamp": {
2040 "op": Op.CLAMP,
2041 "operands": (1, 0),
2042 "build_fcn": (build_clamp, TosaTensorGen.tgBasic, None),
2043 "types": TYPE_NARROW_INT_FP,
2044 },
2045 "relun": {
2046 "op": Op.RELUN,
2047 "operands": (1, 0),
2048 "build_fcn": (build_relun, TosaTensorGen.tgBasic, None),
2049 "types": TYPE_FI32,
2050 },
2051 "sigmoid": {
2052 "op": Op.SIGMOID,
2053 "operands": (1, 0),
2054 "build_fcn": (build_sigmoid, TosaTensorGen.tgBasic, None),
2055 "types": TYPE_FP,
2056 },
2057 "tanh": {
2058 "op": Op.TANH,
2059 "operands": (1, 0),
2060 "build_fcn": (build_tanh, TosaTensorGen.tgBasic, None),
2061 "types": TYPE_FP,
2062 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002063 # Elementwise Binary Operators
2064 "add": {
2065 "op": Op.ADD,
2066 "operands": (2, 0),
2067 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2068 "types": TYPE_FI32,
2069 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002070 "arithmetic_right_shift": {
2071 "op": Op.ARITHMETIC_RIGHT_SHIFT,
2072 "operands": (2, 0),
2073 "build_fcn": (
2074 build_arithmetic_right_shift,
2075 TosaTensorGen.tgBroadcastFuzz,
2076 TosaArgGen.agArithmeticRightShift,
2077 ),
2078 "types": TYPE_INT,
2079 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002080 "bitwise_and": {
2081 "op": Op.BITWISE_AND,
2082 "operands": (2, 0),
2083 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2084 "types": TYPE_INT,
2085 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002086 "bitwise_or": {
2087 "op": Op.BITWISE_OR,
2088 "operands": (2, 0),
2089 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2090 "types": TYPE_INT,
2091 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002092 "bitwise_xor": {
2093 "op": Op.BITWISE_XOR,
2094 "operands": (2, 0),
2095 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2096 "types": TYPE_INT,
2097 },
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002098 "div": {
2099 "op": Op.DIV,
2100 "operands": (2, 0),
2101 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2102 "types": [DType.INT32],
2103 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002104 "logical_and": {
2105 "op": Op.LOGICAL_AND,
2106 "operands": (2, 0),
2107 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2108 "types": TYPE_BOOL,
2109 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002110 "logical_left_shift": {
2111 "op": Op.LOGICAL_LEFT_SHIFT,
2112 "operands": (2, 0),
2113 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2114 "types": TYPE_INT,
2115 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002116 "logical_right_shift": {
2117 "op": Op.LOGICAL_RIGHT_SHIFT,
2118 "operands": (2, 0),
2119 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2120 "types": TYPE_INT,
2121 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002122 "logical_or": {
2123 "op": Op.LOGICAL_OR,
2124 "operands": (2, 0),
2125 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2126 "types": TYPE_BOOL,
2127 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002128 "logical_xor": {
2129 "op": Op.LOGICAL_XOR,
2130 "operands": (2, 0),
2131 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2132 "types": TYPE_BOOL,
2133 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002134 "maximum": {
2135 "op": Op.MAXIMUM,
2136 "operands": (2, 0),
2137 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2138 "types": TYPE_FI32,
2139 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002140 "minimum": {
2141 "op": Op.MINIMUM,
2142 "operands": (2, 0),
2143 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2144 "types": TYPE_FI32,
2145 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002146 "mul": {
2147 "op": Op.MUL,
2148 "operands": (2, 0),
2149 "build_fcn": (build_mul, TosaTensorGen.tgBroadcastFuzz, TosaArgGen.agMul),
2150 "types": TYPE_INT_FP,
2151 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002152 "pow": {
2153 "op": Op.POW,
2154 "operands": (2, 0),
2155 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBasic, None),
2156 "types": TYPE_FP,
2157 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002158 "sub": {
2159 "op": Op.SUB,
2160 "operands": (2, 0),
2161 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2162 "types": TYPE_FI32,
2163 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002164 "table": {
2165 "op": Op.TABLE,
2166 # Use the automatic generation functions to create the input array
2167 # but create the table tensor in the build function, as it may be
2168 # a different type from the input
2169 "operands": (1, 0),
2170 "build_fcn": (build_table, TosaTensorGen.tgBasic, None),
2171 "types": [DType.INT16],
2172 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002173 # Elementwise Unary operators
2174 "abs": {
2175 "op": Op.ABS,
2176 "operands": (1, 0),
2177 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2178 "types": TYPE_FI32,
2179 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002180 "bitwise_not": {
2181 "op": Op.BITWISE_NOT,
2182 "operands": (1, 0),
2183 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2184 "types": TYPE_INT,
2185 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002186 "ceil": {
2187 "op": Op.CEIL,
2188 "operands": (1, 0),
2189 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2190 "types": TYPE_FP,
2191 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002192 "clz": {
2193 "op": Op.CLZ,
2194 "operands": (1, 0),
2195 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2196 "types": [DType.INT32],
2197 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002198 "exp": {
2199 "op": Op.EXP,
2200 "operands": (1, 0),
2201 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2202 "types": TYPE_FP,
2203 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002204 "floor": {
2205 "op": Op.FLOOR,
2206 "operands": (1, 0),
2207 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2208 "types": TYPE_FP,
2209 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002210 "log": {
2211 "op": Op.LOG,
2212 "operands": (1, 0),
2213 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2214 "types": TYPE_FP,
2215 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002216 "logical_not": {
2217 "op": Op.LOGICAL_NOT,
2218 "operands": (1, 0),
2219 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2220 "types": TYPE_BOOL,
2221 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002222 "negate": {
2223 "op": Op.NEGATE,
2224 "operands": (1, 0),
2225 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2226 "qgen": TosaQuantGen.qgUnary,
2227 "types": TYPE_INT_FP,
2228 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002229 "reciprocal": {
2230 "op": Op.RECIPROCAL,
2231 "operands": (1, 0),
2232 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2233 "types": TYPE_FP,
2234 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002235 "rsqrt": {
2236 "op": Op.RSQRT,
2237 "operands": (1, 0),
2238 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2239 "types": TYPE_FP,
2240 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002241 # Elementwise Ternary operators
2242 "select": {
2243 "op": Op.SELECT,
2244 "operands": (3, 0),
2245 "build_fcn": (build_select, TosaTensorGen.tgBroadcastFuzz, None),
2246 "types": TYPE_FIB,
2247 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002248 # Comparison operators
2249 "equal": {
2250 "op": Op.EQUAL,
2251 "operands": (2, 0),
2252 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2253 "types": TYPE_FI32,
2254 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002255 "greater_equal": {
2256 "op": Op.GREATER_EQUAL,
2257 "operands": (2, 0),
2258 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2259 "types": TYPE_FI32,
2260 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002261 "greater": {
2262 "op": Op.GREATER,
2263 "operands": (2, 0),
2264 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2265 "types": TYPE_FI32,
2266 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002267 # Reduction operators
2268 "reduce_all": {
2269 "op": Op.REDUCE_ALL,
2270 "operands": (1, 0),
2271 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2272 "types": TYPE_BOOL,
2273 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002274 "reduce_any": {
2275 "op": Op.REDUCE_ANY,
2276 "operands": (1, 0),
2277 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2278 "types": TYPE_BOOL,
2279 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002280 "reduce_max": {
2281 "op": Op.REDUCE_MAX,
2282 "operands": (1, 0),
2283 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2284 "types": TYPE_INT_FP,
2285 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002286 "reduce_min": {
2287 "op": Op.REDUCE_MAX,
2288 "operands": (1, 0),
2289 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2290 "types": TYPE_INT_FP,
2291 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002292 "reduce_product": {
2293 "op": Op.REDUCE_PRODUCT,
2294 "operands": (1, 0),
2295 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2296 "types": TYPE_FP,
2297 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002298 "reduce_sum": {
2299 "op": Op.REDUCE_SUM,
2300 "operands": (1, 0),
2301 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2302 "types": TYPE_FI32,
2303 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002304 # Data layout operators
Kevin Cheng550ccc52021-03-03 11:21:43 -08002305 "concat": {
2306 "op": Op.CONCAT,
2307 "operands": (2, 0),
2308 "build_fcn": (build_concat, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2309 "types": TYPE_FIB,
2310 },
2311 "pad": {
2312 "op": Op.PAD,
2313 "operands": (1, 0),
2314 "build_fcn": (build_pad, TosaTensorGen.tgBasic, TosaArgGen.agPad),
2315 "qgen": TosaQuantGen.qgPad,
2316 "types": TYPE_FIB,
2317 },
2318 "reshape": {
2319 "op": Op.RESHAPE,
2320 "operands": (1, 0),
2321 "build_fcn": (build_reshape, TosaTensorGen.tgBasic, TosaArgGen.agReshape),
2322 "types": TYPE_FIB,
2323 },
2324 "reverse": {
2325 "op": Op.REVERSE,
2326 "operands": (1, 0),
2327 "build_fcn": (build_reverse, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2328 "types": TYPE_FIB,
2329 },
2330 "slice": {
2331 "op": Op.SLICE,
2332 "operands": (1, 0),
2333 "build_fcn": (build_slice, TosaTensorGen.tgBasic, TosaArgGen.agSlice),
2334 "types": TYPE_FIB,
2335 },
2336 "tile": {
2337 "op": Op.TILE,
2338 "operands": (1, 0),
2339 "build_fcn": (build_tile, TosaTensorGen.tgBasic, TosaArgGen.agTile),
2340 "types": TYPE_FIB,
2341 },
2342 "transpose": {
2343 "op": Op.TRANSPOSE,
2344 "operands": (1, 0),
Jeremy Johnsona6185572021-06-21 15:55:35 +01002345 "rank": (1, 4),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002346 "build_fcn": (
2347 build_transpose,
2348 TosaTensorGen.tgBasic,
2349 TosaArgGen.agTranspose,
2350 ),
2351 "types": TYPE_FIB,
2352 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002353 # Data nodes
2354 "const": {
2355 "op": Op.CONST,
2356 "operands": (1, 0),
2357 "build_fcn": (build_placeholder, TosaTensorGen.tgBasic, None),
2358 "types": TYPE_FIB,
2359 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002360 "identity": {
2361 "op": Op.IDENTITY,
2362 "operands": (1, 0),
2363 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2364 "types": TYPE_FIB,
2365 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002366 # Scatter/Gather
Kevin Cheng550ccc52021-03-03 11:21:43 -08002367 "gather": {
2368 "op": Op.GATHER,
2369 # Only specify 'values' tensor here. 'indices' is generated in op building stage
2370 "operands": (1, 0),
2371 "rank": (3, 3),
2372 "build_fcn": (build_gather, TosaTensorGen.tgBasic, None),
2373 "types": TYPE_INT_FP,
2374 },
2375 "scatter": {
2376 "op": Op.SCATTER,
2377 # Only specify 'values_in' tensor here.
2378 #'indices' and 'input' are generated in op building stage
2379 "operands": (2, 0),
2380 "rank": (3, 3),
2381 "build_fcn": (build_scatter, TosaTensorGen.tgScatter, None),
2382 "types": TYPE_INT_FP,
2383 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002384 # Image operations
Kevin Cheng550ccc52021-03-03 11:21:43 -08002385 "resize": {
2386 "op": Op.RESIZE,
2387 "operands": (1, 0),
2388 "rank": (4, 4),
2389 "build_fcn": (build_resize, TosaTensorGen.tgNHWC, TosaArgGen.agResize),
2390 "types": [DType.INT8, DType.INT16, DType.FLOAT],
2391 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002392 # Type conversion
Kevin Cheng550ccc52021-03-03 11:21:43 -08002393 "cast": {
2394 "op": Op.CAST,
2395 "operands": (1, 0),
2396 "build_fcn": (build_cast, TosaTensorGen.tgBasic, TosaArgGen.agCast),
2397 "types": [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL],
2398 },
2399 "rescale": {
2400 "op": Op.RESCALE,
2401 "operands": (1, 0),
2402 "build_fcn": (build_rescale, TosaTensorGen.tgBasic, TosaArgGen.agRescale),
2403 "types": [DType.INT8, DType.INT16, DType.INT32, DType.INT48],
2404 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002405 # Custom
2406 # Not implemented.
Jared Smolens573ecd42021-03-04 15:24:10 -08002407 # Control flow operators
Eric Kunzee5e26762020-10-13 16:11:07 -07002408 # Two varients of cond_if, one that generates one of two constant tensors (no
2409 # inputs to the basic blocks, one output) and another that either adds or subtracts two tensors
2410 # (two inputs to the basic blocks, one output)
Kevin Cheng550ccc52021-03-03 11:21:43 -08002411 "cond_if_const": {
2412 "op": Op.COND_IF,
2413 "operands": (0, 2),
2414 "build_fcn": (
2415 build_cond_if_const,
2416 TosaTensorGen.tgBasic,
2417 TosaArgGen.agCondIf,
2418 ),
2419 "types": [DType.BOOL],
2420 },
2421 "cond_if_binary": {
2422 "op": Op.COND_IF,
2423 "operands": (2, 0),
2424 "build_fcn": (
2425 build_cond_if_binary,
2426 TosaTensorGen.tgBasic,
2427 TosaArgGen.agCondIf,
2428 ),
2429 "types": TYPE_FI32,
2430 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002431 # while_loop
Kevin Cheng550ccc52021-03-03 11:21:43 -08002432 "while_loop": {
2433 "op": Op.WHILE_LOOP,
2434 "operands": (0, 1),
2435 "build_fcn": (
2436 build_while_loop,
2437 TosaTensorGen.tgBasic,
2438 TosaArgGen.agWhileLoop,
2439 ),
2440 "types": [DType.INT32],
2441 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002442 }
2443
Kevin Cheng550ccc52021-03-03 11:21:43 -08002444
Eric Kunzee5e26762020-10-13 16:11:07 -07002445class OutputShaper:
2446 # Methods in this class compute the expected output shape and datatype
2447 # for common classes of operations
2448 def __init__(self):
2449 pass
2450
2451 # These methods return arguments that can be used for
2452 # creating a new output tensor
2453 @staticmethod
2454 def binaryBroadcastOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002455 assert len(a.shape) == len(b.shape)
2456 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002457
2458 shape = []
2459 for i in range(len(a.shape)):
2460 if a.shape[i] == 1:
2461 shape.append(b.shape[i])
2462 else:
2463 shape.append(a.shape[i])
2464
Kevin Cheng550ccc52021-03-03 11:21:43 -08002465 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002466
2467 @staticmethod
2468 def binaryNonBroadcastOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002469 assert len(a.shape) == len(b.shape)
2470 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002471
2472 shape = []
2473 for i in range(len(a.shape)):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002474 assert a.shape[i] == b.shape[i]
Eric Kunzee5e26762020-10-13 16:11:07 -07002475 shape.append(a.shape[i])
2476
Kevin Cheng550ccc52021-03-03 11:21:43 -08002477 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002478
2479 @staticmethod
2480 def unaryOp(ser, a):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002481 return ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002482
2483 @staticmethod
2484 def selectOp(ser, cond, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002485 assert len(a.shape) == len(b.shape) and len(a.shape) == len(cond.shape)
2486 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002487
2488 shape = []
2489 for i in range(len(a.shape)):
2490 shape.append(max(cond.shape[i], a.shape[i], b.shape[i]))
2491
Kevin Cheng550ccc52021-03-03 11:21:43 -08002492 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002493
2494 @staticmethod
2495 def binaryComparisonOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002496 assert len(a.shape) == len(b.shape)
2497 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002498
2499 # Do broadcast
2500 shape = []
2501 for i in range(len(a.shape)):
2502 if a.shape[i] == 1:
2503 shape.append(b.shape[i])
2504 else:
2505 shape.append(a.shape[i])
2506
2507 # Force the output type to bool
Kevin Cheng550ccc52021-03-03 11:21:43 -08002508 return ser.addOutput(shape, DType.BOOL)
Eric Kunzee5e26762020-10-13 16:11:07 -07002509
2510 @staticmethod
2511 def reduceOp(ser, a, axis):
2512
2513 shape = a.shape.copy()
2514
2515 shape[axis] = 1
2516
Kevin Cheng550ccc52021-03-03 11:21:43 -08002517 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002518
2519 @staticmethod
2520 def argmaxOp(ser, a, axis):
2521 shape = a.shape.copy()
2522 del shape[axis]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002523 return ser.addOutput(shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07002524
2525 @staticmethod
2526 def conv2dOp(ser, ifm, filter, strides, padding, dilations):
2527
2528 # IFM: NHWC
2529 # Filter: OHWI
2530 # OFM: NHWC
2531
2532 if len(padding) == 2:
2533 # Expand padding to 4 parameters in the case of transpose_conv2d
2534 # From H,W to T,B,L,R
2535 padding = [padding[0], padding[0], padding[1], padding[1]]
2536
Kevin Cheng550ccc52021-03-03 11:21:43 -08002537 h = (
2538 ifm.shape[1]
2539 - filter.shape[1]
2540 - (filter.shape[1] - 1) * (dilations[0] - 1)
2541 + padding[0]
2542 + padding[1]
2543 ) // strides[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002544
Kevin Cheng550ccc52021-03-03 11:21:43 -08002545 w = (
2546 ifm.shape[2]
2547 - filter.shape[2]
2548 - (filter.shape[2] - 1) * (dilations[1] - 1)
2549 + padding[2]
2550 + padding[3]
2551 ) // strides[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002552
2553 if h <= 0 or w <= 0:
2554 # Invalid test parameters?
2555 h = 0
2556 w = 0
Kevin Cheng550ccc52021-03-03 11:21:43 -08002557 ser.setExpectedFailure(True, "Invalid combination of conv2d parameters")
Eric Kunzee5e26762020-10-13 16:11:07 -07002558
2559 ofm_shape = [ifm.shape[0], h, w, filter.shape[0]]
2560
Kevin Cheng3a478572021-01-22 17:21:02 -08002561 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002562 out_dtype = DType.INT32
2563 elif ifm.dtype == DType.INT16:
2564 out_dtype = DType.INT48
2565 elif ifm.dtype == DType.FLOAT:
2566 out_dtype = DType.FLOAT
2567 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002568 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002569
Kevin Cheng550ccc52021-03-03 11:21:43 -08002570 return ser.addOutput(ofm_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002571
2572 @staticmethod
2573 def depthwiseConv2dOp(ser, ifm, filter, strides, padding, dilations):
2574 # IFM: NHWC
2575 # Filter: HWCM
2576 # OFM: NHW C*M
Kevin Cheng550ccc52021-03-03 11:21:43 -08002577 h = (
2578 ifm.shape[1]
2579 - filter.shape[0]
2580 - (filter.shape[0] - 1) * (dilations[0] - 1)
2581 + padding[0]
2582 + padding[1]
2583 ) // strides[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002584
Kevin Cheng550ccc52021-03-03 11:21:43 -08002585 w = (
2586 ifm.shape[2]
2587 - filter.shape[1]
2588 - (filter.shape[1] - 1) * (dilations[1] - 1)
2589 + padding[2]
2590 + padding[3]
2591 ) // strides[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002592
2593 if h <= 0 or w <= 0:
2594 # Invalid test parameters?
2595 h = 0
2596 w = 0
Kevin Cheng550ccc52021-03-03 11:21:43 -08002597 ser.setExpectedFailure(True, "Invalid combination of conv2d parameters")
Eric Kunzee5e26762020-10-13 16:11:07 -07002598
2599 ofm_shape = [ifm.shape[0], h, w, filter.shape[2] * filter.shape[3]]
2600
Kevin Cheng3a478572021-01-22 17:21:02 -08002601 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002602 out_dtype = DType.INT32
2603 elif ifm.dtype == DType.INT16:
2604 out_dtype = DType.INT48
2605 elif ifm.dtype == DType.FLOAT:
2606 out_dtype = DType.FLOAT
2607 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002608 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002609
Kevin Cheng550ccc52021-03-03 11:21:43 -08002610 return ser.addOutput(ofm_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002611
2612 @staticmethod
2613 def pool2dOp(ser, ifm, kernel, stride, pad):
2614 # input: NHWC
2615 h = (ifm.shape[1] + pad[0] + pad[1] + stride[0] - kernel[0]) // stride[0]
2616 w = (ifm.shape[2] + pad[2] + pad[3] + stride[1] - kernel[1]) // stride[1]
2617
2618 if h <= 0 or w <= 0:
2619 # Invalid test parameters?
2620 h = 0
2621 w = 0
Kevin Cheng550ccc52021-03-03 11:21:43 -08002622 ser.setExpectedFailure(True, "Invalid combination of pooling parameters")
Eric Kunzee5e26762020-10-13 16:11:07 -07002623
2624 ofm_shape = [ifm.shape[0], h, w, ifm.shape[3]]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002625 return ser.addOutput(ofm_shape, ifm.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002626
2627 @staticmethod
2628 def fullyConnectedOp(ser, input, filter):
2629 # input: N, IC
2630 # filter: OC, IC
2631 # output: N, OC
2632
2633 output_shape = [input.shape[0], filter.shape[0]]
2634
Kevin Cheng3a478572021-01-22 17:21:02 -08002635 if input.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002636 out_dtype = DType.INT32
2637 elif input.dtype == DType.INT16:
2638 out_dtype = DType.INT48
2639 elif input.dtype == DType.FLOAT:
2640 out_dtype = DType.FLOAT
2641 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002642 raise Exception("Unsupported input dtype: {}".format(input.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002643
Kevin Cheng550ccc52021-03-03 11:21:43 -08002644 return ser.addOutput(output_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002645
2646 @staticmethod
2647 def matmulOp(ser, a, b):
Kevin Cheng2d60f002021-06-09 14:18:32 -07002648 # a: N, H, C
2649 # b: N, C, W
2650 # out: N, H, W
Eric Kunzee5e26762020-10-13 16:11:07 -07002651
Kevin Cheng2d60f002021-06-09 14:18:32 -07002652 output_shape = [a.shape[0], a.shape[1], b.shape[2]]
Eric Kunzee5e26762020-10-13 16:11:07 -07002653
Kevin Cheng3a478572021-01-22 17:21:02 -08002654 if a.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002655 out_dtype = DType.INT32
2656 elif a.dtype == DType.INT16:
2657 out_dtype = DType.INT48
2658 elif a.dtype == DType.FLOAT:
2659 out_dtype = DType.FLOAT
2660 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002661 raise Exception("UNsupported input dtype for matmul: {}".format(a.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002662
Kevin Cheng550ccc52021-03-03 11:21:43 -08002663 return ser.addOutput(output_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002664
2665 @staticmethod
2666 def concatOp(ser, a, b, axis):
2667
2668 output_shape = a.shape.copy()
2669 output_shape[axis] = a.shape[axis] + b.shape[axis]
2670
Kevin Cheng550ccc52021-03-03 11:21:43 -08002671 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002672
2673 @staticmethod
2674 def padOp(ser, a, padding):
2675
2676 output_shape = a.shape.copy()
2677
2678 for i in range(len(output_shape)):
2679 output_shape[i] = padding[i][0] + padding[i][1] + output_shape[i]
2680
Kevin Cheng550ccc52021-03-03 11:21:43 -08002681 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002682
2683 @staticmethod
2684 def reshapeOp(ser, a, shape):
2685 output_shape = shape.copy()
2686
2687 totalElements = 1
2688 for i in a.shape:
2689 totalElements *= i
2690
2691 # If there are any -1 elements, figure out what that dimension must be
2692 totalOutputElements = 1
2693 for i in output_shape:
2694 if i != -1:
2695 totalOutputElements *= i
2696
2697 # And fill it in
2698 for i in range(len(output_shape)):
2699 if output_shape[i] == -1:
2700 output_shape[i] = totalElements // totalOutputElements
2701
Kevin Cheng550ccc52021-03-03 11:21:43 -08002702 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002703
2704 @staticmethod
2705 def sliceOp(ser, a, begin, size):
2706
2707 output_shape = size.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08002708 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002709
2710 @staticmethod
2711 def tileOp(ser, a, multiples):
2712
2713 output_shape = a.shape.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08002714 assert len(multiples) == len(output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07002715
2716 for i in range(len(output_shape)):
2717 output_shape[i] = a.shape[i] * multiples[i]
2718
Kevin Cheng550ccc52021-03-03 11:21:43 -08002719 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002720
2721 @staticmethod
2722 def transposeOp(ser, a, perms):
2723 output_shape = a.shape.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08002724 assert len(perms) == len(output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07002725
2726 for i in range(len(output_shape)):
2727 output_shape[i] = a.shape[perms[i]]
2728
Kevin Cheng550ccc52021-03-03 11:21:43 -08002729 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002730
2731 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -08002732 def gatherOp(ser, values, indices):
2733 assert len(values.shape) == 3
2734 assert len(indices.shape) == 2
2735 assert values.shape[0] == indices.shape[0]
Eric Kunzee5e26762020-10-13 16:11:07 -07002736
Kevin Cheng77d0f762020-11-24 10:26:32 -08002737 output_shape = [values.shape[0], indices.shape[1], values.shape[2]]
2738
Kevin Cheng550ccc52021-03-03 11:21:43 -08002739 return ser.addOutput(output_shape, values.dtype)
Kevin Cheng77d0f762020-11-24 10:26:32 -08002740
2741 @staticmethod
2742 def scatterOp(ser, values_in, indices, input):
2743 assert len(values_in.shape) == 3
2744 assert len(indices.shape) == 2
2745 assert len(input.shape) == 3
Kevin Cheng550ccc52021-03-03 11:21:43 -08002746 assert values_in.shape[0] == indices.shape[0] # N
2747 assert input.shape[1] == indices.shape[1] # W
2748 assert values_in.shape[2] == input.shape[2] # C
Kevin Cheng77d0f762020-11-24 10:26:32 -08002749
2750 output_shape = values_in.shape
2751
Kevin Cheng550ccc52021-03-03 11:21:43 -08002752 return ser.addOutput(output_shape, values_in.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002753
2754 @staticmethod
2755 def tableOp(ser, input, table):
2756 # Same shape as the input, but with the type of the table.
Kevin Cheng550ccc52021-03-03 11:21:43 -08002757 return ser.addOutput(input.shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07002758
2759 @staticmethod
Kevin Cheng550ccc52021-03-03 11:21:43 -08002760 def resizeOp(
2761 ser,
2762 input,
2763 mode,
2764 stride,
2765 offset,
2766 shift,
2767 stride_fp,
2768 offset_fp,
2769 output_dims,
2770 input_dtype,
2771 output_dtype,
2772 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07002773
2774 output_dims = [input.shape[0], output_dims[0], output_dims[1], input.shape[3]]
2775
Kevin Cheng77d0f762020-11-24 10:26:32 -08002776 if input_dtype == DType.FLOAT:
2777 if stride_fp[0] <= 0 or stride_fp[1] <= 0:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002778 ser.setExpectedFailure(True, "Negative or zero stride")
Kevin Cheng77d0f762020-11-24 10:26:32 -08002779 else:
2780 if stride[0] <= 0 or stride[1] <= 0:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002781 ser.setExpectedFailure(True, "Negative or zero stride")
Eric Kunzee5e26762020-10-13 16:11:07 -07002782
Kevin Chengaee1fac2020-11-11 13:54:06 -08002783 if mode == ResizeMode.BILINEAR:
2784 if input_dtype == DType.INT8:
2785 if output_dtype != DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002786 ser.setExpectedFailure(True, "Invalid output data type")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002787 elif input_dtype == DType.INT16:
2788 if output_dtype != DType.INT48:
Kevin Cheng989cb052021-04-28 16:29:44 -07002789 ser.setExpectedFailure(true, "Invalid output data type")
Kevin Cheng77d0f762020-11-24 10:26:32 -08002790 elif input_dtype == DType.FLOAT:
2791 if output_dtype != DType.FLOAT:
Kevin Cheng989cb052021-04-28 16:29:44 -07002792 ser.setExpectedFailure(true, "Invalid output data type")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002793 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07002794 ser.setExpectedFailure(true, "Invalid input data type")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002795
2796 elif mode == ResizeMode.NEAREST:
2797 if input_dtype == DType.INT8:
2798 if output_dtype != DType.INT8:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002799 ser.setExpectedFailure(True, "Invalid output data type")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002800 elif input_dtype == DType.INT16:
2801 if output_dtype != DType.INT16:
Kevin Cheng989cb052021-04-28 16:29:44 -07002802 ser.setExpectedFailure(true, "Invalid output data type")
Kevin Cheng77d0f762020-11-24 10:26:32 -08002803 elif input_dtype == DType.FLOAT:
2804 if output_dtype != DType.FLOAT:
Kevin Cheng989cb052021-04-28 16:29:44 -07002805 ser.setExpectedFailure(true, "Invalid output data type")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002806 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07002807 ser.setExpectedFailure(true, "Invalid input data type")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002808
2809 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07002810 ser.setExpectedFailure(true, "Invalid resize mode")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002811
Kevin Cheng550ccc52021-03-03 11:21:43 -08002812 return ser.addOutput(output_dims, output_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002813
2814 @staticmethod
2815 def typeConversionOp(ser, val, out_dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002816 return ser.addOutput(val.shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002817
2818 @staticmethod
2819 def transposeConv2DOp(ser, ifm, output_shape):
Kevin Cheng3a478572021-01-22 17:21:02 -08002820 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002821 out_dtype = DType.INT32
2822 elif ifm.dtype == DType.INT16:
2823 out_dtype = DType.INT48
2824 elif ifm.dtype == DType.FLOAT:
2825 out_dtype = DType.FLOAT
2826 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002827 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002828
2829 if output_shape[1] <= 0 or output_shape[2] <= 0:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002830 ser.setExpectedFailure(True, "Negative output shape")
Eric Kunzee5e26762020-10-13 16:11:07 -07002831
Kevin Cheng550ccc52021-03-03 11:21:43 -08002832 return ser.addOutput(output_shape, out_dtype)