blob: b3731ce7ca1f460d87ed6217e95ab8af450e37b0 [file] [log] [blame]
Eric Kunzee5e26762020-10-13 16:11:07 -07001#!/usr/bin/env python3
2
Kevin Cheng3a478572021-01-22 17:21:02 -08003# Copyright (c) 2020-2021, ARM Limited.
Eric Kunzee5e26762020-10-13 16:11:07 -07004#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17
18import numpy as np
19import argparse
20import sys
21import re
22import os
23import subprocess
24import shlex
25import json
26import glob
27import math
28import queue
29import threading
30import traceback
31import math
Jeremy Johnsona6185572021-06-21 15:55:35 +010032import itertools
Eric Kunzee5e26762020-10-13 16:11:07 -070033
34from enum import IntEnum, Enum, unique
35
Kevin Cheng550ccc52021-03-03 11:21:43 -080036# Include the ../thirdparty/serialization_lib/python directory in PYTHONPATH
37parent_dir = os.path.dirname(os.path.realpath(__file__))
38sys.path.append(
39 os.path.join(parent_dir, "..", "thirdparty", "serialization_lib", "python")
40)
Eric Kunzee5e26762020-10-13 16:11:07 -070041import tosa_serializer as ts
42from tosa_serializer import *
43import tosa
44
45# Convenience variables to the flatc-generated types that should be enums, but aren't
46DType = tosa.DType.DType()
Kevin Cheng550ccc52021-03-03 11:21:43 -080047Op = tosa.Op.Op()
Eric Kunzee5e26762020-10-13 16:11:07 -070048ResizeMode = tosa.ResizeMode.ResizeMode()
49
Kevin Cheng550ccc52021-03-03 11:21:43 -080050
Eric Kunzee5e26762020-10-13 16:11:07 -070051class TosaQuantGen:
Kevin Cheng550ccc52021-03-03 11:21:43 -080052 """QuantizedInfo random generator helper functions. Specify with 'qgen': in the operator defintion"""
53
Eric Kunzee5e26762020-10-13 16:11:07 -070054 def __init__(self):
55 pass
56
57 @staticmethod
58 def needsQinfo(op, dtype):
Jared Smolens2a76ad22021-03-04 11:18:54 -080059 if dtype == DType.INT8 or dtype == DType.INT16:
Eric Kunzee5e26762020-10-13 16:11:07 -070060 return True
61 return False
62
63 @staticmethod
64 def qgUnary(testGen, op, dtype):
65 qinfo = ts.TosaSerializerQuantInfo()
66 if TosaQuantGen.needsQinfo(op, dtype):
67 qinfo.UnaryQuantInfo(testGen.randInt(), testGen.randInt())
68 else:
69 qinfo.UnaryQuantInfo(0, 0)
70 return qinfo
71
72 @staticmethod
73 def qgConv(testGen, op, dtype):
74 qinfo = ts.TosaSerializerQuantInfo()
75 if TosaQuantGen.needsQinfo(op, dtype):
76 qinfo.ConvQuantInfo(testGen.randInt(), testGen.randInt())
77 else:
78 qinfo.ConvQuantInfo(0, 0)
79 return qinfo
80
81 @staticmethod
82 def qgMatmul(testGen, op, dtype):
83 qinfo = ts.TosaSerializerQuantInfo()
84 if TosaQuantGen.needsQinfo(op, dtype):
85 qinfo.MatMulQuantInfo(testGen.randInt(), testGen.randInt())
86 else:
87 qinfo.MatMulQuantInfo(0, 0)
88 return qinfo
89
90 @staticmethod
91 def qgPad(testGen, op, dtype):
92 qinfo = ts.TosaSerializerQuantInfo()
93 if TosaQuantGen.needsQinfo(op, dtype):
94 qinfo.PadQuantInfo(testGen.randInt())
95 else:
96 qinfo.PadQuantInfo(0)
97 return qinfo
98
99 @staticmethod
100 def computeMultiplierAndShift(scaleFp, scale32):
101 # Derived from computeMultiplierAndShiftTosaScale32
102 # Provide a floating-point scaling factor and the scale32 parameter
103 # to compute the multiplier and shift
104
105 if scale32:
106 scaleBits = 31
107 else:
108 scaleBits = 15
109
110 m, shift = math.frexp(scaleFp)
111
112 if scaleFp < 0.0:
113 m = -m
114
115 multiplier = round(m * (1 << scaleBits))
Kevin Cheng550ccc52021-03-03 11:21:43 -0800116 assert multiplier <= (1 << scaleBits)
Eric Kunzee5e26762020-10-13 16:11:07 -0700117
118 if multiplier == (1 << scaleBits):
119 multiplier = multiplier // 2
120 shift = shift + 1
121
122 shift = (-shift) + scaleBits
Kevin Cheng550ccc52021-03-03 11:21:43 -0800123 # print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(scaleFp, scaleBits, m, multiplier, shift))
Eric Kunzee5e26762020-10-13 16:11:07 -0700124
Kevin Cheng550ccc52021-03-03 11:21:43 -0800125 assert multiplier <= (1 << scaleBits)
126 assert shift >= 0 and shift <= 63
Eric Kunzee5e26762020-10-13 16:11:07 -0700127
128 return multiplier, shift
129
130
Kevin Cheng550ccc52021-03-03 11:21:43 -0800131class TosaTensorGen:
132 """Tensor generators create a shape list for the placeholder and const tensor
133 data operands for the operator. The actual random data is generated separately for each test."""
134
Eric Kunzee5e26762020-10-13 16:11:07 -0700135 def __init__(self):
136 pass
137
138 @staticmethod
139 def tgBasic(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800140 pl, const = opName["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700141 shape = testGen.makeShape(rank)
142
143 shape_list = []
144 for i in range(pl + const):
145 shape_list.append(shape.copy())
146
147 return shape_list
148
149 @staticmethod
150 def tgNHWC(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800151 pl, const = opName["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700152
Kevin Cheng550ccc52021-03-03 11:21:43 -0800153 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700154
155 shape = testGen.makeShape(rank)
156
157 # Constrict the batch size?
158 if testGen.args.max_batch_size:
159 shape[0] = (shape[0] % testGen.args.max_batch_size) + 1
160
161 shape_list = []
162 for i in range(pl + const):
163 shape_list.append(shape.copy())
164
165 return shape_list
166
167 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -0800168 def tgScatter(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800169 pl, const = opName["operands"]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800170
Kevin Cheng550ccc52021-03-03 11:21:43 -0800171 assert pl == 2
172 assert const == 0
173 assert rank == 3
Kevin Cheng77d0f762020-11-24 10:26:32 -0800174
175 values_in_shape = testGen.makeShape(rank)
176
177 # Constrict the batch size?
178 if testGen.args.max_batch_size:
179 values_in_shape[0] = (values_in_shape[0] % testGen.args.max_batch_size) + 1
180
Kevin Cheng550ccc52021-03-03 11:21:43 -0800181 W = testGen.randInt(
182 testGen.args.tensor_shape_range[0], testGen.args.tensor_shape_range[1]
183 )
Kevin Cheng77d0f762020-11-24 10:26:32 -0800184 input_shape = [values_in_shape[0], W, values_in_shape[2]]
185
186 shape_list = []
187 shape_list.append(values_in_shape.copy())
188 shape_list.append(input_shape.copy())
189
190 return shape_list
191
192 @staticmethod
Eric Kunzee5e26762020-10-13 16:11:07 -0700193 def tgBroadcastFuzz(testGen, op, rank):
194 shape = testGen.makeShape(rank)
195
Kevin Cheng550ccc52021-03-03 11:21:43 -0800196 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700197
198 shape_list = []
199
200 # Choose one of the inputs to broadcast
201 bcast_idx = testGen.randInt(0, pl + const)
202 for i in range(pl + const):
203 shape_bcast = shape.copy()
204
205 # If the chosen input, pick a random index to broadcast
206 if i == bcast_idx:
207 fuzz_idx = testGen.randInt(0, rank)
208 shape_bcast[fuzz_idx] = 1
209
210 shape_list.append(shape_bcast)
211
212 return shape_list
213
214 @staticmethod
215 def tgConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800216 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700217
Kevin Cheng550ccc52021-03-03 11:21:43 -0800218 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700219
220 # IFM dimensions are NHWC
221 ifm_shape = testGen.makeShape(rank)
222
223 # Constrict the batch size?
224 if testGen.args.max_batch_size:
225 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
226
227 # Get the filter height/width from the operator parameters
Kevin Cheng550ccc52021-03-03 11:21:43 -0800228 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700229
230 # Generate a random OFM depth
231 ofm_depth = testGen.makeShape(1)[0]
232
233 # The filter dimensions are OHWI
234 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
235
236 # The bias is OC
237 bias_shape = np.asarray([ofm_depth])
238
239 return [ifm_shape, filter_shape, bias_shape]
240
241 @staticmethod
242 def tgTransposeConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800243 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700244
Kevin Cheng550ccc52021-03-03 11:21:43 -0800245 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700246
247 # IFM dimensions are NHWC
248 ifm_shape = testGen.makeShape(rank)
249
250 # Constrict the batch size?
251 if testGen.args.max_batch_size:
252 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
253
254 # Get the filter height/width from the operator parameters
Kevin Cheng550ccc52021-03-03 11:21:43 -0800255 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700256
257 # Generate a random OFM depth
258 ofm_depth = testGen.makeShape(1)[0]
259
260 # The filter dimensions are OHWI
261 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
262
Kevin Cheng989cb052021-04-28 16:29:44 -0700263 # The bias is OC
264 bias_shape = np.asarray([ofm_depth])
265
266 return [ifm_shape, filter_shape, bias_shape]
Eric Kunzee5e26762020-10-13 16:11:07 -0700267
268 @staticmethod
269 def tgDepthwiseConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800270 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700271
Kevin Cheng550ccc52021-03-03 11:21:43 -0800272 assert rank == 4
273 assert pl == 1 and const == 2
Eric Kunzee5e26762020-10-13 16:11:07 -0700274
275 # IFM dimensions are NHWC
276 ifm_shape = testGen.makeShape(rank)
277
278 # Constrict the batch size?
279 if testGen.args.max_batch_size:
280 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
281
282 # Get the filter height/width from the operator parameters
283 # Filter is KH, HW, C, M
Kevin Cheng550ccc52021-03-03 11:21:43 -0800284 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700285
286 # Generate a random OFM depth, but don't let it get too big because
287 # the output depth is M * C
Kevin Cheng550ccc52021-03-03 11:21:43 -0800288 filter_m = (
289 testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)
290 ) + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700291
292 # The filter dimensions are HWCM
293 filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m])
294
295 # The bias is M * C
296 bias_shape = np.asarray([ifm_shape[3] * filter_m])
297
298 return [ifm_shape, filter_shape, bias_shape]
299
300 @staticmethod
301 def tgFullyConnected(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800302 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700303
Kevin Cheng550ccc52021-03-03 11:21:43 -0800304 assert rank == 2
Eric Kunzee5e26762020-10-13 16:11:07 -0700305
306 input_shape = testGen.makeShape(rank)
307 filter_oc = testGen.makeShape(1)[0]
308 filter_shape = np.asarray([filter_oc, input_shape[1]])
309
310 bias_shape = np.asarray([filter_oc])
311
312 return [input_shape, filter_shape, bias_shape]
313
314 @staticmethod
315 def tgMatmul(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800316 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700317
Kevin Cheng2d60f002021-06-09 14:18:32 -0700318 assert rank == 3
Kevin Cheng550ccc52021-03-03 11:21:43 -0800319 assert pl == 2 and const == 0
Eric Kunzee5e26762020-10-13 16:11:07 -0700320
321 a_shape = testGen.makeShape(rank)
322 b_oc = testGen.makeShape(1)[0]
Kevin Cheng2d60f002021-06-09 14:18:32 -0700323 b_shape = np.asarray([a_shape[0], a_shape[2], b_oc])
Eric Kunzee5e26762020-10-13 16:11:07 -0700324
325 return [a_shape, b_shape]
326
Kevin Cheng550ccc52021-03-03 11:21:43 -0800327
Eric Kunzee5e26762020-10-13 16:11:07 -0700328class TosaArgGen:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800329 """Argument generators create exhaustive or random lists of attributes for operators that take
330 attributes or other parameters. The return value is a list of (descriptive_name, [arglist])
331 tuples where the descriptive_name is appended to the test name and the arglist is expanded
332 as arguments to the operator build function."""
333
Eric Kunzee5e26762020-10-13 16:11:07 -0700334 def __init__(self):
335 pass
336
337 @staticmethod
338 def agNone(testGen, opName, shapeList, dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800339 """A trivial argument generator for operators that don't take any
340 non-tensor arguments"""
341 return [("", [])]
Eric Kunzee5e26762020-10-13 16:11:07 -0700342
343 @staticmethod
344 def agAxis(testGen, opName, shapeList, dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800345 """Build the axis argument for operators that take a single axis"""
Eric Kunzee5e26762020-10-13 16:11:07 -0700346 axes = []
347
348 shape = shapeList[0]
349
350 for a in range(0, len(shape)):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800351 axes.append(("axis_{}".format(a), [a]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700352 return axes
353
354 @staticmethod
355 def agConv2D(testGen, opName, shapeList, dtype):
356 arg_list = []
357
358 ifm_shape = shapeList[0]
359 filter_shape = shapeList[1]
360
361 # Must be rank 4
Kevin Cheng550ccc52021-03-03 11:21:43 -0800362 assert len(ifm_shape) == 4
363 assert len(filter_shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700364
365 maxStride = testGen.args.max_conv_stride
366 maxPadding = testGen.args.max_conv_padding + 1
367 maxDilation = testGen.args.max_conv_dilation
368
369 # Strides, padding, dilations
370 for stride in range(0, maxStride ** 2):
371 for padding in range(0, (maxPadding) ** 4):
372 for dilation in range(0, maxDilation ** 2):
373
Kevin Cheng550ccc52021-03-03 11:21:43 -0800374 s = [stride // maxStride + 1, stride % maxStride + 1]
375 p = [
376 (padding // (maxPadding * 4)) % maxPadding,
377 (padding // (maxPadding * 2)) % maxPadding,
378 (padding // (maxPadding * 1)) % maxPadding,
379 padding % maxPadding,
380 ]
381 d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
Eric Kunzee5e26762020-10-13 16:11:07 -0700382
383 # 4 padding parameters for regular conv2d
Kevin Cheng550ccc52021-03-03 11:21:43 -0800384 arg_list.append(
385 (
386 "st{}{}_pad{}{}{}{}_dilat{}{}".format(
387 s[0], s[1], p[0], p[1], p[2], p[3], d[0], d[1]
388 ),
389 [s, p, d],
390 )
391 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700392 return arg_list
393
394 @staticmethod
395 def agTransposeConv2D(testGen, opName, shapeList, dtype):
396 arg_list = []
397
398 ifm_shape = shapeList[0]
399 filter_shape = shapeList[1]
400
401 # Must be rank 4
Kevin Cheng550ccc52021-03-03 11:21:43 -0800402 assert len(ifm_shape) == 4
403 assert len(filter_shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700404
405 maxStride = testGen.args.max_conv_stride
406 maxPadding = testGen.args.max_conv_padding + 1
407 maxDilation = testGen.args.max_conv_dilation
408
409 # Strides, padding, dilations
410 for stride in range(0, maxStride ** 2):
411 for out_padding in range(0, (maxPadding) ** 2):
412 for dilation in range(0, maxDilation ** 2):
413
Kevin Cheng550ccc52021-03-03 11:21:43 -0800414 s = [stride // maxStride + 1, stride % maxStride + 1]
415 p = [
416 (out_padding // (maxPadding * 1)) % maxPadding,
417 out_padding % maxPadding,
418 ]
419 d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
Eric Kunzee5e26762020-10-13 16:11:07 -0700420
Kevin Cheng550ccc52021-03-03 11:21:43 -0800421 oh = (
422 ifm_shape[1]
423 - filter_shape[1]
424 - (filter_shape[1] - 1) * (d[0] - 1)
425 + 2 * p[0]
426 ) // s[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700427
Kevin Cheng550ccc52021-03-03 11:21:43 -0800428 ow = (
429 ifm_shape[2]
430 - filter_shape[2]
431 - (filter_shape[2] - 1) * (d[1] - 1)
432 + 2 * p[1]
433 ) // s[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700434
435 # Output shape
Kevin Cheng550ccc52021-03-03 11:21:43 -0800436 os = [ifm_shape[0], oh, ow, filter_shape[0]]
Eric Kunzee5e26762020-10-13 16:11:07 -0700437
Kevin Cheng550ccc52021-03-03 11:21:43 -0800438 arg_list.append(
439 (
440 "st{}{}_outpad{}{}_dilat{}{}_os{}x{}x{}x{}".format(
441 s[0],
442 s[1],
443 p[0],
444 p[1],
445 d[0],
446 d[1],
447 os[0],
448 os[1],
449 os[2],
450 os[3],
451 ),
452 [s, p, d, os],
453 )
454 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700455
456 return arg_list
457
458 @staticmethod
459 def agPad(testGen, opName, shapeList, dtype):
460 arg_list = []
461 rank = len(shapeList[0])
462
463 # Exhaustively test combinations of 0/1 padding on each side of each dimension
464 # This process might need some revision for >1 padding, but use rank**2 as a bitmask
465 # for now
466 for v in range(rank ** 2):
467
468 # Create a flat arraypadding4D
469 paddings = np.zeros((rank * 2), dtype=np.int32)
470
471 # Fill in the 1's
Kevin Cheng550ccc52021-03-03 11:21:43 -0800472 for r in range(rank * 2):
Eric Kunzee5e26762020-10-13 16:11:07 -0700473 if (v >> r) & 1:
474 paddings[r] = 1
475
476 # Reshape back to a 2D array
477 paddings = paddings.reshape((rank, 2))
478
Kevin Cheng550ccc52021-03-03 11:21:43 -0800479 arg_list.append(("pad{0:b}".format(v), [paddings]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700480
481 return arg_list
482
483 @staticmethod
484 def agPooling(testGen, opName, shapeList, dtype):
485 arg_list = []
486
487 shape = shapeList[0]
Kevin Cheng550ccc52021-03-03 11:21:43 -0800488 assert len(shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700489
490 maxStride = testGen.args.max_pooling_stride
491 maxKernel = testGen.args.max_pooling_kernel
492 maxPadding = testGen.args.max_pooling_padding + 1
493
494 for kernel in range(0, maxKernel ** 2):
495 for stride in range(0, maxStride ** 2):
496 for padding in range(0, maxPadding ** 4):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800497 s = [stride // maxStride + 1, stride % maxStride + 1]
498 k = [(kernel // maxKernel) + 2, (kernel % maxKernel) + 2]
499 p = [
500 (padding // (maxPadding * 4)) % maxPadding,
501 (padding // (maxPadding * 2)) % maxPadding,
502 (padding // (maxPadding * 1)) % maxPadding,
503 padding % maxPadding,
504 ]
Eric Kunzee5e26762020-10-13 16:11:07 -0700505
Kevin Cheng550ccc52021-03-03 11:21:43 -0800506 arg_list.append(
507 (
508 "st{}{}_kern{}{}_pad{}{}{}{}".format(
509 s[0], s[1], k[0], k[1], p[0], p[1], p[2], p[3]
510 ),
511 [k, s, p],
512 )
513 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700514 return arg_list
515
516 @staticmethod
517 def agCast(testGen, opName, shapeList, inDtype):
518 arg_list = []
519
520 # Enumerate the output types here
521 if inDtype == DType.INT8:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800522 dtypeList = [DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700523 elif inDtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800524 dtypeList = [DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700525 elif inDtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800526 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700527 elif inDtype == DType.BOOL:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800528 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700529 elif inDtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800530 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700531 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800532 raise Exception("Unexpected input dtype: {}".format(inDtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700533
534 for dtype in dtypeList:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800535 arg_list.append(("out{}".format(DTypeNames[dtype]), [dtype]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700536
537 return arg_list
538
539 @staticmethod
540 def agRescale(testGen, opName, shapeList, inDtype):
541 arg_list = []
542
543 # Enumerate the output types here
Kevin Cheng550ccc52021-03-03 11:21:43 -0800544 for dtype in [DType.INT8, DType.INT16, DType.INT32]:
545 for scale32 in [False, True]:
546 for double_round in [False, True]:
547 for per_channel in [False, True]:
Eric Kunzee5e26762020-10-13 16:11:07 -0700548
549 if inDtype == DType.INT48 and scale32:
550 # Illegal condition. Must be scale32=False
551 continue
552
Kevin Cheng550ccc52021-03-03 11:21:43 -0800553 arg_list.append(
554 (
555 "out{}_sc{}_dr{}_pc{}".format(
556 DTypeNames[dtype],
557 int(scale32),
558 int(double_round),
559 int(per_channel),
560 ),
561 [dtype, scale32, double_round, per_channel],
562 )
563 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700564
565 return arg_list
566
Kevin Chengaee1fac2020-11-11 13:54:06 -0800567 @staticmethod
568 def agMul(testGen, opName, shapeList, dtype):
569 arg_list = []
570
571 if dtype is DType.INT32:
572 for p in range(testGen.args.num_rand_permutations):
573
574 shift = testGen.randInt(0, 32)
575
Kevin Cheng550ccc52021-03-03 11:21:43 -0800576 arg_list.append(("perm{}_shift{}".format(p, shift), [shift]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800577 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800578 arg_list.append(("shift0", [0]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800579
580 return arg_list
581
582 @staticmethod
583 def agArithmeticRightShift(testGen, opName, shapeList, dtype):
584 arg_list = []
585
Kevin Cheng550ccc52021-03-03 11:21:43 -0800586 arg_list.append(("roundTrue", [True]))
587 arg_list.append(("roundFalse", [False]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800588
589 return arg_list
590
Eric Kunzee5e26762020-10-13 16:11:07 -0700591 # Helper function for reshape. Gets some factors of a larger number.
592 @staticmethod
593 def getFactors(val, start=1):
594 factors = []
595
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100596 for i in range(start, int(np.sqrt(val)) + 1):
Eric Kunzee5e26762020-10-13 16:11:07 -0700597 if (val % i) == 0:
598 factors.append(i)
599
600 return factors
601
602 @staticmethod
603 def agReshape(testGen, opName, shapeList, dtype):
604 arg_list = []
605
606 origShape = shapeList[0]
607
608 totalElements = 1
609 for s in origShape:
610 totalElements *= s
611
612 # This code is NOT fast. Fortunately, the numbers are fairly small.
613 factors = TosaArgGen.getFactors(totalElements)
614
615 for p in range(testGen.args.num_rand_permutations):
Matthew Haddon5fc4e682021-07-07 11:28:29 +0100616 newRank = testGen.randInt(1, 7)
Kevin Cheng550ccc52021-03-03 11:21:43 -0800617 if len(factors) < newRank:
Eric Kunzee5e26762020-10-13 16:11:07 -0700618 continue
619
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100620 found = True
621 # escape_counter breaks while loop if it continues on for too long
622 escape_counter = 0
623 while found:
624 newShape = []
625 # Generate newShape ensuring it isn't a duplicate
626 remainingElements = totalElements
627 shuffledFactors = testGen.rng.permutation(factors)
Matthew Haddon5fc4e682021-07-07 11:28:29 +0100628 for i in range(1, newRank):
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100629 # pick rank-1 factors
630 newShape.append(shuffledFactors[0])
631 remainingElements = remainingElements // shuffledFactors[0]
632 shuffledFactors = testGen.rng.permutation(
633 TosaArgGen.getFactors(remainingElements)
634 )
635 newShape.append(remainingElements)
Eric Kunzee5e26762020-10-13 16:11:07 -0700636
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100637 # Toss in a -1 sometimes
638 minusOne = testGen.randInt(0, newRank * 4)
639 if minusOne < newRank:
640 newShape[minusOne] = -1
Eric Kunzee5e26762020-10-13 16:11:07 -0700641
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100642 # Check for duplicates
643 found = False
644 for name, other_shape in arg_list:
645 if other_shape[0] == newShape:
646 found = True
647 break
648
649 escape_counter += 1
650 if escape_counter >= 100:
651 break
652
653 if not found:
654 arg_list.append(("perm{}_rank{}".format(p, newRank), [newShape]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700655
656 return arg_list
657
Eric Kunzee5e26762020-10-13 16:11:07 -0700658 @staticmethod
659 def agTranspose(testGen, opName, shapeList, dtype):
660 arg_list = []
661
662 ifm_shape = shapeList[0]
663
Jeremy Johnsona6185572021-06-21 15:55:35 +0100664 # Get all permutations
665 permutations = [p for p in itertools.permutations(range(len(ifm_shape)))]
Eric Kunzee5e26762020-10-13 16:11:07 -0700666
Jeremy Johnsona6185572021-06-21 15:55:35 +0100667 # Limit to possible permutations from shape dimension or argument setting
668 limit = min(len(permutations), testGen.args.num_rand_permutations)
Eric Kunzee5e26762020-10-13 16:11:07 -0700669
Jeremy Johnsona6185572021-06-21 15:55:35 +0100670 # Get random permutation generator that uses all permutations
671 random_permutations = testGen.rng.permutation(permutations)
Eric Kunzee5e26762020-10-13 16:11:07 -0700672
Jeremy Johnsona6185572021-06-21 15:55:35 +0100673 # Create list of required amount of permutations
674 arg_list = [("perm{}".format(p), [random_permutations[p].tolist()]) for p in range(limit)]
Eric Kunzee5e26762020-10-13 16:11:07 -0700675 return arg_list
676
677 @staticmethod
678 def agSlice(testGen, opName, shapeList, dtype):
679 arg_list = []
680
681 ifm_shape = shapeList[0]
682 rank = len(ifm_shape)
683
684 for p in range(testGen.args.num_rand_permutations):
685 begin = []
686 size = []
687
Kevin Cheng550ccc52021-03-03 11:21:43 -0800688 valid = True
Eric Kunzee5e26762020-10-13 16:11:07 -0700689
690 for i in range(rank):
691 if ifm_shape[i] > 1:
692 begin.append(testGen.randInt(0, ifm_shape[i]))
693 size.append(testGen.randInt(0, ifm_shape[i] - begin[i]))
694
695 # Invalid slice size?
696 if size[i] == 0:
697 valid = False
698 else:
699 begin.append(0)
700 size.append(1)
701
702 if valid:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800703 arg_list.append(("perm{}".format(p), [begin, size]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700704 return arg_list
705
706 @staticmethod
707 def agTile(testGen, opName, shapeList, dtype):
708 arg_list = []
709
710 ifm_shape = shapeList[0]
711 rank = len(ifm_shape)
712
713 for p in range(testGen.args.num_rand_permutations):
714
715 # Pick a few random, but small multiple values
716 # because otherwise this has a tendency to generate
717 # enormous tensors
718 multiples = []
719 for i in range(rank):
720 multiples.append(testGen.randInt(1, 4))
721
Kevin Cheng550ccc52021-03-03 11:21:43 -0800722 arg_list.append(("perm{}".format(p), [multiples]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700723
724 return arg_list
725
726 @staticmethod
727 def agResize(testGen, opName, shapeList, dtype):
728 arg_list = []
729
730 ifm_shape = shapeList[0]
731
732 for m in [ResizeMode.NEAREST, ResizeMode.BILINEAR]:
733
734 # Exclude illegal {mode, type} configurations. Pick legal output types
735 if m == ResizeMode.NEAREST and dtype == DType.INT8:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800736 outputDTypeList = [DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700737 elif m == ResizeMode.NEAREST and dtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800738 outputDTypeList = [DType.INT16]
Eric Kunzee5e26762020-10-13 16:11:07 -0700739 elif m == ResizeMode.BILINEAR and dtype == DType.INT8:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800740 outputDTypeList = [DType.INT8]
Eric Kunzee5e26762020-10-13 16:11:07 -0700741 elif m == ResizeMode.BILINEAR and dtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800742 outputDTypeList = [DType.INT48]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800743 elif dtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800744 outputDTypeList = [DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700745 else:
746 continue
747
748 for outputDType in outputDTypeList:
749 for perm in range(testGen.args.num_rand_permutations):
750
751 # Randomly generate legal output dimensions and shift
752 # and then compute the stride and offset based on them
Kevin Cheng550ccc52021-03-03 11:21:43 -0800753 output_dims = [testGen.randInt(1), testGen.randInt(1)]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800754 in_center_h = (ifm_shape[1] - 1) / 2.0
755 in_center_w = (ifm_shape[2] - 1) / 2.0
756 out_center_h = (output_dims[0] - 1) / 2.0
757 out_center_w = (output_dims[1] - 1) / 2.0
Eric Kunzee5e26762020-10-13 16:11:07 -0700758
Kevin Cheng77d0f762020-11-24 10:26:32 -0800759 fp_stride_y = float(ifm_shape[1]) / float(output_dims[0])
760 fp_stride_x = float(ifm_shape[2]) / float(output_dims[1])
761 fp_offset_y = in_center_h - fp_stride_y * out_center_h
762 fp_offset_x = in_center_w - fp_stride_x * out_center_w
Eric Kunzee5e26762020-10-13 16:11:07 -0700763
Kevin Cheng77d0f762020-11-24 10:26:32 -0800764 if outputDType == DType.FLOAT:
765 shift = 0
766 stride = [0, 0]
767 offset = [0, 0]
Kevin Cheng550ccc52021-03-03 11:21:43 -0800768 stride_fp = [fp_stride_y, fp_stride_x]
769 offset_fp = [fp_offset_y, fp_offset_x]
770 arg_list.append(
771 (
772 "mode{}_odim{}x{}_out{}_st{:.2f}x{:.2f}_off{:.2f}x{:.2f}".format(
773 m,
774 output_dims[0],
775 output_dims[1],
776 testGen.typeStr(outputDType),
777 stride_fp[0],
778 stride_fp[1],
779 offset_fp[0],
780 offset_fp[1],
781 ),
782 [
783 m,
784 stride,
785 offset,
786 shift,
787 stride_fp,
788 offset_fp,
789 output_dims,
790 dtype,
791 outputDType,
792 ],
793 )
794 )
Kevin Cheng77d0f762020-11-24 10:26:32 -0800795 else:
796 shift = 11
797 unit = float(1 << shift)
798 stride_y = int(round(fp_stride_y * unit))
799 stride_x = int(round(fp_stride_x * unit))
800 offset_y = int(round(fp_offset_y * unit))
801 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700802
Kevin Cheng550ccc52021-03-03 11:21:43 -0800803 while (
804 stride_y >= 32768
805 or stride_x >= 32768
806 or offset_y >= 32768
807 or offset_x >= 32768
808 or offset_y < -32768
809 or offset_x < -32768
810 ):
Kevin Cheng77d0f762020-11-24 10:26:32 -0800811 shift = shift - 1
812 unit = float(1 << shift)
813 stride_y = int(round(fp_stride_y * unit))
814 stride_x = int(round(fp_stride_x * unit))
815 offset_y = int(round(fp_offset_y * unit))
816 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700817
Kevin Cheng550ccc52021-03-03 11:21:43 -0800818 stride = [stride_y, stride_x]
819 offset = [offset_y, offset_x]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800820
821 stride_fp = [0.0, 0.0]
822 offset_fp = [0.0, 0.0]
823
Kevin Cheng550ccc52021-03-03 11:21:43 -0800824 arg_list.append(
825 (
826 "mode{}_shift{}_odim{}x{}_out{}_st{}x{}_off{}x{}".format(
827 m,
828 shift,
829 output_dims[0],
830 output_dims[1],
831 testGen.typeStr(outputDType),
832 stride[0],
833 stride[1],
834 offset[0],
835 offset[1],
836 ),
837 [
838 m,
839 stride,
840 offset,
841 shift,
842 stride_fp,
843 offset_fp,
844 output_dims,
845 dtype,
846 outputDType,
847 ],
848 )
849 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700850
851 return arg_list
852
853 def agCondIf(testGen, opName, shapeList, dtype):
854 # CondIf generates the condition values here.
855 # Convert to tensors in the build function, along with the
856 # then and else blocks
857 arg_list = []
858
859 for c in [False, True]:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800860 arg_list.append(("cond{}".format(int(c)), [c]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700861
862 return arg_list
863
864 def agWhileLoop(testGen, opName, shapeList, dtype):
865 # While loop: 0 iterations, 1, more than 1
866 arg_list = []
867
868 for iter in [0, 1, 4]:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800869 arg_list.append(("iter{}".format(iter), [iter]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700870
871 return arg_list
872
Kevin Cheng550ccc52021-03-03 11:21:43 -0800873
Eric Kunzee5e26762020-10-13 16:11:07 -0700874class TosaTestGen:
Jeremy Johnson97eb75f2021-07-08 11:58:02 +0100875 # Maximum rank of tensor supported by test generator.
876 TOSA_TENSOR_MAX_RANK = 6
877
Eric Kunzee5e26762020-10-13 16:11:07 -0700878 def __init__(self, args):
879 self.args = args
880 self.basePath = args.output_dir
881 self.random_seed = args.random_seed
882 self.ser = None
883 self.rng = np.random.default_rng(self.random_seed)
884 self.createDynamicOpLists()
885 self.initOpListDefaults()
886 self.quantGen = TosaQuantGen()
887 # Force makeShape to do a specific starting shape
888 self.targetted_shape = None
889
890 def createSerializer(self, opName, testPath):
891 self.testPath = os.path.join(opName, testPath)
892
893 fullPath = os.path.join(self.basePath, self.testPath)
894 os.makedirs(fullPath, exist_ok=True)
895 self.ser = ts.TosaSerializer(fullPath)
896
897 def getSerializer(self):
898 return self.ser
899
900 def serialize(self, testName):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800901 with open(
902 os.path.join(self.basePath, self.testPath, "{}.tosa".format(testName)), "wb"
903 ) as fd:
Eric Kunzee5e26762020-10-13 16:11:07 -0700904 fd.write(self.ser.serialize())
905
Kevin Cheng550ccc52021-03-03 11:21:43 -0800906 with open(os.path.join(self.basePath, self.testPath, "desc.json"), "w") as fd:
907 fd.write(self.ser.writeJson("{}.tosa".format(testName)))
Eric Kunzee5e26762020-10-13 16:11:07 -0700908
909 def getRandTensor(self, shape, dtype):
910 RAND_SHIFT_FACTOR = 0.5
911 RAND_SCALE_FACTOR = 4.0
912
913 if dtype == DType.BOOL:
914 np_dt = np.bool
915 return np.bool_(self.rng.choice(a=[False, True], size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -0700916 elif dtype == DType.INT4:
917 return np.int32(self.rng.integers(low=-7, high=8, size=shape))
918 elif dtype == DType.INT8:
919 return np.int32(self.rng.integers(low=-127, high=128, size=shape))
920 elif dtype == DType.INT16:
921 return np.int32(self.rng.integers(low=-32768, high=32768, size=shape))
922 elif dtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800923 return np.int32(
924 self.rng.integers(low=-(1 << 31), high=(1 << 31), size=shape)
925 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700926 elif dtype == DType.INT48:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800927 return np.int64(
928 self.rng.integers(low=-(1 << 47), high=(1 << 47), size=shape)
929 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700930 elif dtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800931 return np.float32(
932 self.rng.random(size=shape) - RAND_SHIFT_FACTOR * RAND_SCALE_FACTOR
933 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700934 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800935 raise Exception("Unrecognized Dtype: {}".format(dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700936
Kevin Cheng989cb052021-04-28 16:29:44 -0700937 def buildPlaceholderTensors(self, shape_list, dtype_list):
Eric Kunzee5e26762020-10-13 16:11:07 -0700938 placeholders = []
939
Kevin Cheng989cb052021-04-28 16:29:44 -0700940 assert len(shape_list) == len(dtype_list)
941
942 for idx, shape in enumerate(shape_list):
943 arr = self.getRandTensor(shape, dtype_list[idx])
944 placeholders.append(self.ser.addPlaceholder(shape, dtype_list[idx], arr))
Eric Kunzee5e26762020-10-13 16:11:07 -0700945
946 return placeholders
947
Kevin Cheng989cb052021-04-28 16:29:44 -0700948 def buildConstTensors(self, shape_list, dtype_list):
Eric Kunzee5e26762020-10-13 16:11:07 -0700949 consts = []
950
Kevin Cheng989cb052021-04-28 16:29:44 -0700951 assert len(shape_list) == len(dtype_list)
952
953 for idx, shape in enumerate(shape_list):
954 arr = self.getRandTensor(shape, dtype_list[idx])
955 consts.append(self.ser.addConst(shape, dtype_list[idx], arr))
Eric Kunzee5e26762020-10-13 16:11:07 -0700956
957 return consts
958
959 def makeShape(self, rank):
960 if self.targetted_shape:
961 return np.int32(self.targetted_shape)
Kevin Cheng550ccc52021-03-03 11:21:43 -0800962 return np.int32(
963 self.rng.integers(
964 low=self.args.tensor_shape_range[0],
965 high=self.args.tensor_shape_range[1],
966 size=rank,
967 )
968 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700969
970 def setTargetShape(self, shape):
971 self.targetted_shape = shape
972
973 def randInt(self, low=0, high=256):
974 return np.int32(self.rng.integers(low=low, high=high, size=1))[0]
975
976 def getRandNumberDType(self, dtype):
977 if dtype == DType.FLOAT:
978 return self.rng.random()
979 elif dtype == DType.BOOL:
980 return self.rng.choice([False, True])
981 elif dtype == DType.INT4:
982 low, high = (-7, 8)
Eric Kunzee5e26762020-10-13 16:11:07 -0700983 elif dtype == DType.INT8:
984 low, high = (-127, 128)
985 elif dtype == DType.INT16:
986 low, high = (-32768, 32768)
987 elif dtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800988 low, high = (-(1 << 31), (1 << 31))
Eric Kunzee5e26762020-10-13 16:11:07 -0700989 elif dtype == DType.INT48:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800990 low, high = (-(1 << 47), (1 << 47))
Eric Kunzee5e26762020-10-13 16:11:07 -0700991 # Special size
992 return np.int64(self.rng.integers(low, high, size=1))[0]
993 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800994 raise Exception("Unknown dtype: {}".format(dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700995
996 return np.int32(self.rng.integers(low, high, size=1))[0]
997
998 def shapeStr(self, shape):
999
1000 sStr = []
1001 # Convert to strings
1002 for i in shape:
1003 sStr.append(str(i))
1004
Kevin Cheng550ccc52021-03-03 11:21:43 -08001005 return "x".join(sStr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001006
1007 def typeStr(self, t):
Kevin Cheng989cb052021-04-28 16:29:44 -07001008 if isinstance(t, list):
1009 assert len(t) >= 2
1010 return "{}x{}".format(self.typeStr(t[0]), self.typeStr(t[1]))
Eric Kunzee5e26762020-10-13 16:11:07 -07001011 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07001012 if t == DType.BOOL:
1013 return "b"
1014 elif t == DType.INT4:
1015 return "i4"
1016 elif t == DType.INT8:
1017 return "i8"
1018 elif t == DType.UINT8:
1019 return "u8"
1020 elif t == DType.INT16:
1021 return "i16"
1022 elif t == DType.INT32:
1023 return "i32"
1024 elif t == DType.INT48:
1025 return "i48"
1026 elif t == DType.FLOAT:
1027 return "float"
1028 else:
1029 raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
Eric Kunzee5e26762020-10-13 16:11:07 -07001030
1031 def typeWidth(self, t):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001032 """ Get the datatype width for integer types"""
Kevin Cheng3a478572021-01-22 17:21:02 -08001033 if t == DType.INT4:
Eric Kunzee5e26762020-10-13 16:11:07 -07001034 return 4
1035 elif t == DType.INT8:
1036 return 8
Kevin Cheng3a478572021-01-22 17:21:02 -08001037 elif t == DType.UINT8:
1038 return 8
Eric Kunzee5e26762020-10-13 16:11:07 -07001039 elif t == DType.INT16:
1040 return 16
1041 elif t == DType.INT32:
1042 return 32
1043 elif t == DType.INT48:
1044 return 48
1045 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001046 raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
Eric Kunzee5e26762020-10-13 16:11:07 -07001047
1048 # Argument generators
1049 # Returns a list of tuples (stringDescriptor, [build_fcn_arg_list])
1050 # Where the string descriptor is used to generate the test name and
1051 # The build_fcn_arg_list is expanded and passed to the operator test
1052 # build function
1053
Kevin Cheng550ccc52021-03-03 11:21:43 -08001054 def build_unary(self, op, a, qinfo=None):
Eric Kunzee5e26762020-10-13 16:11:07 -07001055 result_tens = OutputShaper.unaryOp(self.ser, a)
1056 self.ser.addOperator(op, [a.name], [result_tens.name], None, qinfo)
1057 return result_tens
1058
1059 def build_binary_broadcast(self, op, a, b):
1060 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1061 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1062 return result_tens
1063
1064 def build_binary_nonbroadcast(self, op, a, b):
1065 result_tens = OutputShaper.binaryNonBroadcastOp(self.ser, a, b)
1066 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1067 return result_tens
1068
Kevin Chengaee1fac2020-11-11 13:54:06 -08001069 def build_arithmetic_right_shift(self, op, a, b, round):
1070 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1071
1072 attr = ts.TosaSerializerAttribute()
1073 attr.ArithmeticRightShiftAttribute(round)
1074
1075 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
1076 return result_tens
1077
1078 def build_mul(self, op, a, b, shift):
Eric Kunzee5e26762020-10-13 16:11:07 -07001079 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1080
1081 # Special for multiply:
1082 # Force the result to INT32 for INT types
1083 if a.dtype != DType.FLOAT:
1084 result_tens.setDtype(DType.INT32)
1085
Kevin Chengaee1fac2020-11-11 13:54:06 -08001086 attr = ts.TosaSerializerAttribute()
1087 attr.MulAttribute(shift)
1088
1089 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001090 return result_tens
1091
1092 def build_table(self, op, a):
1093 # Constant size, random values
1094 table_arr = self.getRandTensor([513], DType.INT16)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001095 table_tens = self.ser.addConst(table_arr.shape, DType.INT16, table_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001096
1097 result_tens = OutputShaper.tableOp(self.ser, a, table_tens)
1098 self.ser.addOperator(op, [a.name, table_tens.name], [result_tens.name], None)
1099
1100 return result_tens
1101
1102 def build_select(self, op, cond, a, b):
1103
1104 # Replace the cond tensor with a boolean tensor since it probably
1105 # has the wrong dtype
Kevin Cheng989cb052021-04-28 16:29:44 -07001106 t = self.buildPlaceholderTensors([cond.shape], [DType.BOOL])
Eric Kunzee5e26762020-10-13 16:11:07 -07001107 cond = t[0]
1108
1109 result_tens = OutputShaper.selectOp(self.ser, cond, a, b)
1110 self.ser.addOperator(op, [cond.name, a.name, b.name], [result_tens.name])
1111
1112 return result_tens
1113
1114 def build_comparison(self, op, a, b):
1115 result_tens = OutputShaper.binaryComparisonOp(self.ser, a, b)
1116 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1117 return result_tens
1118
1119 def build_argmax(self, op, a, axis):
1120 result_tens = OutputShaper.argmaxOp(self.ser, a, axis)
1121
1122 attr = ts.TosaSerializerAttribute()
1123 attr.AxisAttribute(axis)
1124
1125 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1126 return result_tens
1127
Kevin Cheng550ccc52021-03-03 11:21:43 -08001128 def build_pool2d(self, op, input, kernel, stride, pad, qinfo=None):
Eric Kunzee5e26762020-10-13 16:11:07 -07001129 result_tens = OutputShaper.pool2dOp(self.ser, input, kernel, stride, pad)
1130
1131 attr = ts.TosaSerializerAttribute()
1132 attr.Pool2dAttribute(kernel, stride, pad)
Eric Kunzee5e26762020-10-13 16:11:07 -07001133
1134 self.ser.addOperator(op, [input.name], [result_tens.name], attr, qinfo)
1135 return result_tens
1136
1137 def build_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001138 assert len(padding) == 4
1139 result_tens = OutputShaper.conv2dOp(
1140 self.ser, ifm, filter, strides, padding, dilations
1141 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001142
1143 attr = ts.TosaSerializerAttribute()
1144 attr.Conv2dAttribute(padding, strides, dilations)
1145
Kevin Cheng550ccc52021-03-03 11:21:43 -08001146 self.ser.addOperator(
1147 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1148 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001149 return result_tens
1150
Kevin Cheng550ccc52021-03-03 11:21:43 -08001151 def build_transpose_conv2d(
Kevin Cheng989cb052021-04-28 16:29:44 -07001152 self, op, ifm, filter, bias, stride, outpad, dilation, output_shape, qinfo
Kevin Cheng550ccc52021-03-03 11:21:43 -08001153 ):
1154 assert len(outpad) == 2
Eric Kunzee5e26762020-10-13 16:11:07 -07001155 result_tens = OutputShaper.transposeConv2DOp(self.ser, ifm, output_shape)
1156
1157 attr = ts.TosaSerializerAttribute()
1158 attr.TransposeConv2DAttribute(outpad, stride, dilation, output_shape)
1159
Kevin Cheng550ccc52021-03-03 11:21:43 -08001160 self.ser.addOperator(
Kevin Cheng989cb052021-04-28 16:29:44 -07001161 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
Kevin Cheng550ccc52021-03-03 11:21:43 -08001162 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001163 return result_tens
1164
Kevin Cheng550ccc52021-03-03 11:21:43 -08001165 def build_depthwise_conv2d(
1166 self, op, ifm, filter, bias, strides, padding, dilations, qinfo
1167 ):
1168 result_tens = OutputShaper.depthwiseConv2dOp(
1169 self.ser, ifm, filter, strides, padding, dilations
1170 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001171
1172 attr = ts.TosaSerializerAttribute()
1173 attr.Conv2dAttribute(padding, strides, dilations)
1174
Kevin Cheng550ccc52021-03-03 11:21:43 -08001175 self.ser.addOperator(
1176 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1177 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001178 return result_tens
1179
1180 def build_fully_connected(self, op, ifm, filter, bias, qinfo):
1181 result_tens = OutputShaper.fullyConnectedOp(self.ser, ifm, filter)
1182
Kevin Cheng550ccc52021-03-03 11:21:43 -08001183 self.ser.addOperator(
1184 op, [ifm.name, filter.name, bias.name], [result_tens.name], None, qinfo
1185 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001186 return result_tens
1187
1188 def build_matmul(self, op, a, b, qinfo):
1189 result_tens = OutputShaper.matmulOp(self.ser, a, b)
1190 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], None, qinfo)
1191 return result_tens
1192
1193 def build_reduce(self, op, a, axis):
1194 result_tens = OutputShaper.reduceOp(self.ser, a, axis)
1195
1196 attr = ts.TosaSerializerAttribute()
1197 attr.AxisAttribute(axis)
1198
1199 self.ser.addOperator(op, [a.name], result_tens.name, attr)
1200 return result_tens
1201
1202 def build_clamp(self, op, a):
1203 result_tens = OutputShaper.unaryOp(self.ser, a)
1204
1205 attr = ts.TosaSerializerAttribute()
1206
1207 # Get two random ints
1208 v = [self.randInt(), self.randInt()]
1209
1210 if a.dtype == DType.FLOAT:
1211 attr.ClampAttribute(0, 0, min(v), max(v))
1212 else:
1213 attr.ClampAttribute(min(v), max(v), 0, 0)
1214
1215 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1216 return result_tens
1217
1218 def build_leaky_relu(self, op, a):
1219 result_tens = OutputShaper.unaryOp(self.ser, a)
1220 attr = ts.TosaSerializerAttribute()
1221
1222 attr.LeakyReluAttribute(self.getRandNumberDType(DType.FLOAT))
1223
1224 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1225 return result_tens
1226
1227 # Needs an additional type/input
1228 def build_prelu(self, op, a):
1229 result_tens = OutputShaper.unaryOp(self.ser, a)
1230
1231 self.ser.addOperator(op, [a.name], [result_tens.name])
1232 return result_tens
1233
1234 def build_relun(self, op, a):
1235 result_tens = OutputShaper.unaryOp(self.ser, a)
1236
1237 attr = ts.TosaSerializerAttribute()
1238
1239 if a.dtype == DType.FLOAT:
1240 attr.ReluNAttribute(0, self.getRandNumberDType(a.dtype))
1241 else:
1242 attr.ReluNAttribute(self.getRandNumberDType(a.dtype), 0)
1243
1244 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1245 return result_tens
1246
1247 def build_sigmoid(self, op, a):
1248 result_tens = OutputShaper.unaryOp(self.ser, a)
1249 self.ser.addOperator(op, [a.name], [result_tens.name])
1250 return result_tens
1251
1252 def build_tanh(self, op, a):
1253 result_tens = OutputShaper.unaryOp(self.ser, a)
1254 self.ser.addOperator(op, [a.name], [result_tens.name])
1255 return result_tens
1256
1257 def build_concat(self, op, a, b, axis):
1258 result_tens = OutputShaper.concatOp(self.ser, a, b, axis)
1259
1260 attr = ts.TosaSerializerAttribute()
1261 attr.AxisAttribute(axis)
1262
1263 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
1264
1265 def build_pad(self, op, a, padding, qinfo):
1266 result_tens = OutputShaper.padOp(self.ser, a, padding)
1267
1268 # Need to turn the padding array into a TOSA tensor here.
1269 # This is one of the few tensor operands that does not get
1270 # randomly generated
Kevin Cheng550ccc52021-03-03 11:21:43 -08001271 padding_tens = self.ser.addConst(padding.shape, DType.INT32, padding)
Eric Kunzee5e26762020-10-13 16:11:07 -07001272
Kevin Cheng550ccc52021-03-03 11:21:43 -08001273 self.ser.addOperator(
1274 op, [a.name, padding_tens.name], [result_tens.name], None, qinfo
1275 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001276
1277 def build_reshape(self, op, a, newShape):
1278 result_tens = OutputShaper.reshapeOp(self.ser, a, newShape)
1279
1280 attr = ts.TosaSerializerAttribute()
1281 attr.ReshapeAttribute(newShape)
1282
1283 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1284 return result_tens
1285
1286 def build_reverse(self, op, a, axis):
1287 result_tens = OutputShaper.unaryOp(self.ser, a)
1288
1289 attr = ts.TosaSerializerAttribute()
1290 attr.AxisAttribute(axis)
1291
1292 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1293 return result_tens
1294
1295 def build_transpose(self, op, a, perms):
1296 result_tens = OutputShaper.transposeOp(self.ser, a, perms)
1297
Kevin Cheng550ccc52021-03-03 11:21:43 -08001298 perms_tens = self.ser.addConst([len(perms)], DType.INT32, np.int32(perms))
Eric Kunzee5e26762020-10-13 16:11:07 -07001299
1300 self.ser.addOperator(op, [a.name, perms_tens.name], [result_tens.name])
1301 return result_tens
1302
1303 def build_slice(self, op, a, begin, size):
1304 result_tens = OutputShaper.sliceOp(self.ser, a, begin, size)
1305
1306 attr = ts.TosaSerializerAttribute()
1307 attr.SliceAttribute(begin, size)
1308
1309 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1310 return result_tens
1311
1312 def build_tile(self, op, a, multiples):
1313 result_tens = OutputShaper.tileOp(self.ser, a, multiples)
1314
1315 attr = ts.TosaSerializerAttribute()
1316 attr.TileAttribute(multiples)
1317
1318 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1319 return result_tens
1320
Kevin Cheng77d0f762020-11-24 10:26:32 -08001321 def build_gather(self, op, values):
Eric Kunzee5e26762020-10-13 16:11:07 -07001322
1323 # Create a new indicies tensor
1324 # here with data that doesn't exceed the dimensions of the values tensor
1325
Kevin Cheng550ccc52021-03-03 11:21:43 -08001326 K = values.shape[1] # K
1327 W = self.randInt(
1328 self.args.tensor_shape_range[0], self.args.tensor_shape_range[1]
1329 ) # W
1330 indicies_arr = np.int32(
1331 self.rng.integers(low=0, high=K, size=[values.shape[0], W])
1332 ) # (N, W)
1333 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001334
Kevin Cheng77d0f762020-11-24 10:26:32 -08001335 result_tens = OutputShaper.gatherOp(self.ser, values, indicies)
Eric Kunzee5e26762020-10-13 16:11:07 -07001336
Kevin Cheng77d0f762020-11-24 10:26:32 -08001337 self.ser.addOperator(op, [values.name, indicies.name], [result_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001338
1339 return result_tens
1340
Kevin Cheng77d0f762020-11-24 10:26:32 -08001341 def build_scatter(self, op, values_in, input):
1342
1343 # Create a new indicies tensor
1344 # here with data that doesn't exceed the dimensions of the values_in tensor
1345
Kevin Cheng550ccc52021-03-03 11:21:43 -08001346 K = values_in.shape[1] # K
1347 W = input.shape[1] # W
1348 indicies_arr = np.int32(
1349 self.rng.integers(low=0, high=K, size=[values_in.shape[0], W])
1350 ) # (N, W)
1351 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
Kevin Cheng77d0f762020-11-24 10:26:32 -08001352
1353 result_tens = OutputShaper.scatterOp(self.ser, values_in, indicies, input)
1354
Kevin Cheng550ccc52021-03-03 11:21:43 -08001355 self.ser.addOperator(
1356 op, [values_in.name, indicies.name, input.name], [result_tens.name]
1357 )
Kevin Cheng77d0f762020-11-24 10:26:32 -08001358
1359 return result_tens
1360
Kevin Cheng550ccc52021-03-03 11:21:43 -08001361 def build_resize(
1362 self,
1363 op,
1364 input,
1365 mode,
1366 stride,
1367 offset,
1368 shift,
1369 stride_fp,
1370 offset_fp,
1371 output_dims,
1372 input_dtype,
1373 output_dtype,
1374 ):
1375 result_tens = OutputShaper.resizeOp(
1376 self.ser,
1377 input,
1378 mode,
1379 stride,
1380 offset,
1381 shift,
1382 stride_fp,
1383 offset_fp,
1384 output_dims,
1385 input_dtype,
1386 output_dtype,
1387 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001388
1389 attr = ts.TosaSerializerAttribute()
Kevin Cheng77d0f762020-11-24 10:26:32 -08001390
Kevin Cheng550ccc52021-03-03 11:21:43 -08001391 attr.ResizeAttribute(
1392 output_dims, stride, offset, shift, stride_fp, offset_fp, mode
1393 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001394
1395 self.ser.addOperator(op, [input.name], [result_tens.name], attr)
1396 return result_tens
1397
1398 def build_identityn(self, op, val, val2):
1399
Kevin Cheng550ccc52021-03-03 11:21:43 -08001400 result_tens = OutputShaper.unaryOp(self.ser, val)
Eric Kunzee5e26762020-10-13 16:11:07 -07001401 result_tens2 = OutputShaper.unaryOp(self.ser, val2)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001402 self.ser.addOperator(
1403 op, [val.name, val2.name], [result_tens.name, result_tens2.name]
1404 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001405 return result_tens
1406
1407 def build_placeholder(self, op, val):
1408 # Add an identity op to avoid warning in the reference model
1409 return self.build_unary(Op.IDENTITY, val)
1410
1411 # Type Conversion
1412 def build_cast(self, op, val, out_dtype):
1413 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1414 self.ser.addOperator(op, [val.name], [result_tens.name])
1415 return result_tens
1416
1417 def build_rescale(self, op, val, out_dtype, scale32, double_round, per_channel):
1418 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1419
1420 if per_channel:
1421 nc = val.shape[-1]
1422 else:
1423 nc = 1
1424
1425 in_type_width = self.typeWidth(val.dtype)
1426 out_type_width = self.typeWidth(out_dtype)
1427
Kevin Cheng3a478572021-01-22 17:21:02 -08001428 if val.dtype == DType.INT8:
Kevin Cheng989cb052021-04-28 16:29:44 -07001429 input_zp = self.randInt(-128, 127)
Eric Kunzee5e26762020-10-13 16:11:07 -07001430 in_type_width = in_type_width + 1
1431 else:
1432 input_zp = 0
1433
Kevin Cheng3a478572021-01-22 17:21:02 -08001434 if out_dtype == DType.INT8:
Kevin Cheng989cb052021-04-28 16:29:44 -07001435 output_zp = self.randInt(-128, 127)
Eric Kunzee5e26762020-10-13 16:11:07 -07001436 out_type_width = out_type_width + 1
1437 else:
1438 output_zp = 0
1439
1440 # Calculate scale based on:
1441 # scale = a *(2^output_width)/(2^input_width))
1442
1443 a = np.float32(self.rng.random(size=[nc]))
1444 scale_arr = a * np.float32((1 << out_type_width) / (1 << in_type_width))
1445
1446 if scale32:
1447 pass
1448 # Cap the scaling at 2^15 - 1 for scale16
1449 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), (1 << 31) - 1)
1450 else:
1451 # Cap the scaling at 2^15 - 1 for scale16
1452 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), 32767.0)
1453
Kevin Cheng550ccc52021-03-03 11:21:43 -08001454 # print('{} {} -> {}'.format(out_type_width, in_type_width, scale_arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001455
1456 multiplier_arr = np.int32(np.zeros(shape=[nc]))
1457 shift_arr = np.int32(np.zeros(shape=[nc]))
1458
1459 for i in range(nc):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001460 multiplier_arr[i], shift_arr[i] = TosaQuantGen.computeMultiplierAndShift(
1461 scale_arr[i], scale32
1462 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08001463 if shift_arr[i] < 2 or shift_arr[i] > 62:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001464 self.ser.setExpectedFailure(True, "OpRescale: invalid shift value")
Eric Kunzee5e26762020-10-13 16:11:07 -07001465
Kevin Cheng550ccc52021-03-03 11:21:43 -08001466 # print('multiplier {} shift {} inzp {} outzp {}'.format(multiplier_arr, shift_arr, input_zp, output_zp))
Eric Kunzee5e26762020-10-13 16:11:07 -07001467
1468 attr = ts.TosaSerializerAttribute()
Kevin Cheng550ccc52021-03-03 11:21:43 -08001469 attr.RescaleAttribute(
1470 input_zp,
1471 output_zp,
1472 multiplier_arr,
1473 shift_arr,
1474 scale32,
1475 double_round,
1476 per_channel,
1477 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001478
1479 self.ser.addOperator(op, [val.name], [result_tens.name], attr)
1480 return result_tens
1481
1482 def build_cond_if_const(self, op, then_tens, else_tens, cond):
1483 # For cond_if with constants, we're supplied with then/else tensors that we ignore
1484 # (except for the generated shap) and the condition. Build Then/Else blocks
1485 # and fill them with const nodes for the body.
1486
1487 # Condition tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001488 cond_tens = self.ser.addConst([], DType.BOOL, [cond])
Eric Kunzee5e26762020-10-13 16:11:07 -07001489
1490 # Make then/else tensors
1491 out_shape = then_tens.shape
1492 then_arr = np.int32(self.rng.integers(0, 255, size=out_shape))
1493 else_arr = np.int32(self.rng.integers(0, 255, size=out_shape))
1494
1495 # And the result tensor based on any of the outputs
Kevin Cheng550ccc52021-03-03 11:21:43 -08001496 result_tens = self.ser.addOutput(out_shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07001497
1498 # Create the attribute with the names of the then/else blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001499 then_block = "THEN_BLOCK"
1500 else_block = "ELSE_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001501 attr = ts.TosaSerializerAttribute()
1502 attr.CondIfAttribute(then_block, else_block)
1503
1504 # Finally, build the op and the two blocks
1505 self.ser.addOperator(op, [cond_tens.name], [result_tens.name], attr)
1506
1507 self.ser.startBasicBlock(then_block)
1508 # Build the actual then/else tensors inside their blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001509 then_tens = self.ser.addConst(out_shape, DType.INT32, then_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001510 self.ser.addOutputTensor(then_tens)
1511
1512 self.ser.startBasicBlock(else_block)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001513 else_tens = self.ser.addConst(out_shape, DType.INT32, else_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001514 self.ser.addOutputTensor(else_tens)
1515
1516 return result_tens
1517
1518 def build_cond_if_binary(self, op, a, b, cond):
1519 # For cond_if with a binary op in the then/else blocks, take a and b and
1520 # alternately add or subtract them based on the condition
1521
1522 # Condition tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001523 cond_tens = self.ser.addConst([], DType.BOOL, [cond])
Eric Kunzee5e26762020-10-13 16:11:07 -07001524
Kevin Cheng550ccc52021-03-03 11:21:43 -08001525 result_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001526 self.ser.currBasicBlock.addOutput(result_tens.name)
1527
1528 # Create the attribute with the names of the then/else blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001529 then_block = "THEN_BLOCK"
1530 else_block = "ELSE_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001531 attr = ts.TosaSerializerAttribute()
1532 attr.CondIfAttribute(then_block, else_block)
1533
1534 # Finally, build the op and the two blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001535 self.ser.addOperator(
1536 op, [cond_tens.name, a.name, b.name], [result_tens.name], attr
1537 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001538
1539 self.ser.startBasicBlock(then_block)
1540 self.ser.addInputTensor(a)
1541 self.ser.addInputTensor(b)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001542 then_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001543 self.ser.addOperator(Op.ADD, [a.name, b.name], [then_tens.name])
1544
1545 self.ser.startBasicBlock(else_block)
1546 self.ser.addInputTensor(a)
1547 self.ser.addInputTensor(b)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001548 else_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001549 self.ser.addOperator(Op.SUB, [a.name, b.name], [else_tens.name])
1550
1551 return result_tens
1552
1553 def build_while_loop(self, op, a, iter_val):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001554 iter = self.ser.addPlaceholder([], DType.INT32, [np.int32(iter_val)])
Eric Kunzee5e26762020-10-13 16:11:07 -07001555
Kevin Cheng550ccc52021-03-03 11:21:43 -08001556 cond_block = "COND_BLOCK"
1557 body_block = "BODY_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001558
1559 attr = ts.TosaSerializerAttribute()
1560 attr.WhileLoopAttribute(cond_block, body_block)
1561
1562 # Accumulator tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001563 # acc = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001564 acc_init_val = np.int32(np.zeros(a.shape))
Kevin Cheng550ccc52021-03-03 11:21:43 -08001565 acc = self.ser.addPlaceholder(a.shape, a.dtype, acc_init_val)
Eric Kunzee5e26762020-10-13 16:11:07 -07001566
1567 # Intermediate/output tensors for everything going through the loop
Kevin Cheng550ccc52021-03-03 11:21:43 -08001568 iter_out = self.ser.addIntermediate(iter.shape, iter.dtype)
1569 a_out = self.ser.addIntermediate(a.shape, a.dtype)
1570 acc_out = self.ser.addIntermediate(acc.shape, acc.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001571
1572 # While_loop operator
Kevin Cheng550ccc52021-03-03 11:21:43 -08001573 self.ser.addOperator(
1574 op,
1575 [iter.name, a.name, acc.name],
1576 [iter_out.name, a_out.name, acc_out.name],
1577 attr,
1578 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001579
1580 # COND block (input: iter, output: cond_tens )
1581 self.ser.startBasicBlock(cond_block)
1582 self.ser.addInputTensor(iter)
1583 self.ser.addInputTensor(a)
1584 self.ser.addInputTensor(acc)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001585 zero_tens = self.ser.addConst([], DType.INT32, [np.int32(0)])
1586 cond_tens = self.ser.addOutput([], DType.BOOL)
1587 self.ser.addOperator(Op.GREATER, [iter.name, zero_tens.name], [cond_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001588
1589 # BODY block (input: a, acc, iter, output: a, acc, iter)
1590 # Note that local intermediate tensors need to be declared here for the outputs
1591 self.ser.startBasicBlock(body_block)
1592 self.ser.addInputTensor(iter)
1593 self.ser.addInputTensor(a)
1594 self.ser.addInputTensor(acc)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001595 one_tens = self.ser.addConst([], DType.INT32, [np.int32(1)])
1596 iter_body_out = self.ser.addIntermediate(iter.shape, iter.dtype)
1597 acc_body_out = self.ser.addIntermediate(acc.shape, acc.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001598 self.ser.addOperator(Op.ADD, [a.name, acc.name], [acc_body_out.name])
1599 self.ser.addOperator(Op.SUB, [iter.name, one_tens.name], [iter_body_out.name])
1600 self.ser.addOutputTensor(iter_body_out)
1601 self.ser.addOutputTensor(a)
1602 self.ser.addOutputTensor(acc_body_out)
1603
1604 return acc_out
1605
Kevin Cheng550ccc52021-03-03 11:21:43 -08001606 def genOpTestList(
1607 self, opName, shapeFilter=[None], rankFilter=None, dtypeFilter=None
1608 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07001609
1610 try:
1611 op = self.TOSA_OP_LIST[opName]
1612 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001613 raise Exception("Cannot find op with name {}".format(opName))
Eric Kunzee5e26762020-10-13 16:11:07 -07001614
1615 # Initialize a new random number generator
1616 self.rng = np.random.default_rng(self.random_seed)
1617
Kevin Cheng550ccc52021-03-03 11:21:43 -08001618 build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001619
1620 # Generate the lists of arguments
Kevin Cheng550ccc52021-03-03 11:21:43 -08001621 rmin, rmax = op["rank"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001622
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01001623 # Create a default testing rank range, 1-4 inclusive to keep test sizes reasonably small.
1624 default_test_rank_range = range(1, 5)
1625
Eric Kunzee5e26762020-10-13 16:11:07 -07001626 # Test list consists of a tuple of:
1627 # (opName, testNameStr, dtype, shapeList, argumentsList)
1628 testList = []
1629
1630 if not shapeFilter:
1631 shapeFilter = [None]
1632
1633 for r in range(rmin, rmax + 1):
1634
1635 # Filter out the rank?
1636 if rankFilter is not None and r not in rankFilter:
1637 continue
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01001638 if rankFilter is None and shapeFilter[0] is None and r not in default_test_rank_range:
1639 continue
Eric Kunzee5e26762020-10-13 16:11:07 -07001640
Kevin Cheng550ccc52021-03-03 11:21:43 -08001641 for t in op["types"]:
Eric Kunzee5e26762020-10-13 16:11:07 -07001642
1643 # Filter tests based on dtype?
1644 if dtypeFilter is not None:
1645 if t not in dtypeFilter:
1646 continue
1647
1648 # Create the placeholder and const tensors
1649 for shape in shapeFilter:
1650 # A None shape chooses a random shape of a given rank
1651
1652 # Filter out by rank
1653 if shape is not None and len(shape) != r:
1654 continue
1655
1656 self.setTargetShape(shape)
1657 shapeList = tgen_fcn(self, op, r)
1658
1659 shapeStr = self.shapeStr(shapeList[0])
1660 typeStr = self.typeStr(t)
1661
1662 # Argument lists consists of tuples of the (str, []) string representation and the build function argument list
1663 argList = []
1664 if agen_fcn:
1665 argList = agen_fcn(self, opName, shapeList, t)
1666 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001667 argList = [("", [])]
Eric Kunzee5e26762020-10-13 16:11:07 -07001668
1669 for argStr, args in argList:
1670 if argStr:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001671 testStr = "{}_{}_{}_{}".format(
1672 opName, shapeStr, typeStr, argStr
1673 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001674 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001675 testStr = "{}_{}_{}".format(opName, shapeStr, typeStr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001676
1677 testList.append((opName, testStr, t, shapeList, args))
1678
1679 return testList
1680
Kevin Cheng989cb052021-04-28 16:29:44 -07001681 def serializeTest(self, opName, testStr, dtype_or_dtypeList, shapeList, testArgs):
Eric Kunzee5e26762020-10-13 16:11:07 -07001682 try:
1683 op = self.TOSA_OP_LIST[opName]
1684 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001685 raise Exception("Cannot find op with name {}".format(opName))
Eric Kunzee5e26762020-10-13 16:11:07 -07001686
1687 # Create a serializer
1688 self.createSerializer(opName, testStr)
1689
Kevin Cheng550ccc52021-03-03 11:21:43 -08001690 build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
1691 pCount, cCount = op["operands"]
Kevin Cheng989cb052021-04-28 16:29:44 -07001692 num_operands = pCount + cCount
1693
1694 if isinstance(dtype_or_dtypeList, list):
1695 dtypeList = dtype_or_dtypeList
1696 else:
1697 dtypeList = [dtype_or_dtypeList] * (num_operands)
1698
1699 assert (
1700 len(shapeList) == num_operands
1701 ), "shapeList length {} must match number of operands {}".format(
1702 len(shapeList), num_operands
1703 )
1704 assert (
1705 len(dtypeList) == num_operands
1706 ), "dtypeList length {} must match number of operands {}".format(
1707 len(dtypeList), num_operands
1708 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001709
1710 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001711 qgen = op["qgen"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001712 except KeyError:
1713 qgen = None
1714
1715 # Build the random tensor operands and the test
1716 tens = []
Kevin Chengaee1fac2020-11-11 13:54:06 -08001717
1718 # If test is ArithmeticRightShift, force value of operand[1] to be within [0, num_bits]
Kevin Cheng550ccc52021-03-03 11:21:43 -08001719 if op["op"] == Op.ARITHMETIC_RIGHT_SHIFT:
1720 assert (
1721 pCount == 2 and cCount == 0
1722 ), "Op.ArithmeticRightShift must have 2 placeholders, 0 consts"
Kevin Chengaee1fac2020-11-11 13:54:06 -08001723
1724 placeholders = []
1725 for idx, shape in enumerate(shapeList[:]):
1726 if idx == 1:
Kevin Cheng989cb052021-04-28 16:29:44 -07001727 if dtypeList[idx] == DType.INT8:
Kevin Chengaee1fac2020-11-11 13:54:06 -08001728 arr = np.int32(self.rng.integers(low=0, high=8, size=shape))
Kevin Cheng989cb052021-04-28 16:29:44 -07001729 elif dtypeList[idx] == DType.INT16:
Kevin Chengaee1fac2020-11-11 13:54:06 -08001730 arr = np.int32(self.rng.integers(low=0, high=16, size=shape))
Kevin Cheng989cb052021-04-28 16:29:44 -07001731 elif dtypeList[idx] == DType.INT32:
Kevin Chengaee1fac2020-11-11 13:54:06 -08001732 arr = np.int32(self.rng.integers(low=0, high=32, size=shape))
1733 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001734 raise Exception("OpArithmeticRightShift: invalid input dtype")
Kevin Chengaee1fac2020-11-11 13:54:06 -08001735 else:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001736 arr = self.getRandTensor(shape, dtypeList[idx])
Kevin Cheng989cb052021-04-28 16:29:44 -07001737 placeholders.append(self.ser.addPlaceholder(shape, dtypeList[idx], arr))
Kevin Chengaee1fac2020-11-11 13:54:06 -08001738
1739 tens.extend(placeholders)
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001740 elif op["op"] == Op.DIV:
1741 assert (
1742 pCount == 2 and cCount == 0
1743 ), "Op.Div must have 2 placeholders, 0 consts"
1744
1745 placeholders = []
1746
1747 # Two invalid cases for Op.DIV:
1748 # 1. divisor == 0
Kevin Cheng47315e12021-05-13 17:41:28 -07001749 # 2. dividend == -(1<<31) and divisor == -1
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001750 while True:
1751 dividend_arr = self.getRandTensor(shapeList[0], dtypeList[0])
1752 divisor_arr = self.getRandTensor(shapeList[1], dtypeList[1])
1753
1754 if (divisor_arr == 0).any():
1755 continue
1756
Kevin Cheng47315e12021-05-13 17:41:28 -07001757 if (dividend_arr == -(2 ** 31)).any() and (divisor_arr == -1).any():
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001758 continue
1759
1760 break
1761
1762 placeholders.append(
1763 self.ser.addPlaceholder(shapeList[0], dtypeList[0], dividend_arr)
1764 )
1765 placeholders.append(
1766 self.ser.addPlaceholder(shapeList[1], dtypeList[1], divisor_arr)
1767 )
1768
1769 tens.extend(placeholders)
1770 elif op["op"] == Op.MUL:
1771 assert (
1772 pCount == 2 and cCount == 0
1773 ), "Op.MUL must have 2 placeholders, 0 consts"
1774
1775 if dtypeList[0] == DType.FLOAT:
1776 tens.extend(self.buildPlaceholderTensors(shapeList[:], dtypeList[:]))
1777 else:
1778 placeholders = []
1779
1780 # Make sure multiply result in int32 range
1781 shift = testArgs[0]
1782 if dtypeList[0] == DType.INT8:
1783 num_bits = 8
1784 elif dtypeList[0] == DType.INT16:
1785 num_bits = 16
1786 elif dtypeList[0] == DType.INT32:
1787 num_bits = 32
1788 else:
1789 raise Exception("OpMul: invalid input dtype")
1790
1791 for idx, shape in enumerate(shapeList[:]):
1792 low = -(2 ** (num_bits - 1))
1793 high = (2 ** (num_bits - 1)) - 1
1794
1795 a_arr = np.int32(
1796 self.rng.integers(low=low, high=high, size=shapeList[0])
1797 )
1798 b_arr = np.int32(
1799 self.rng.integers(low=low, high=high, size=shapeList[1])
1800 )
1801
1802 i = 0
1803 while True:
1804
1805 a_arr_64 = a_arr.astype(np.int64)
1806 b_arr_64 = b_arr.astype(np.int64)
1807
1808 if shift > 0:
1809 rounding = 1 << (shift - 1)
1810 result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift
1811 else:
1812 result_arr = a_arr_64 * b_arr_64
1813
1814 if (result_arr > -(2 ** 31)).all() and (
1815 result_arr <= ((2 ** 31) - 1)
1816 ).all():
1817 break
1818
1819 i = i + 1
1820 a_arr = a_arr // 2
1821 b_arr = b_arr // 2
1822
1823 placeholders.append(
1824 self.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
1825 )
1826 placeholders.append(
1827 self.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
1828 )
1829
1830 tens.extend(placeholders)
Kevin Chengaee1fac2020-11-11 13:54:06 -08001831 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07001832 tens.extend(
1833 self.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
1834 )
1835 tens.extend(self.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
Eric Kunzee5e26762020-10-13 16:11:07 -07001836
1837 if qgen is not None:
Kevin Cheng989cb052021-04-28 16:29:44 -07001838 qinfo = qgen(self, op, dtypeList[0])
Eric Kunzee5e26762020-10-13 16:11:07 -07001839 else:
1840 qinfo = None
1841
1842 try:
1843 if qinfo is not None:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001844 resultName = build_fcn(self, op["op"], *tens, *testArgs, qinfo)
Eric Kunzee5e26762020-10-13 16:11:07 -07001845 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001846 resultName = build_fcn(self, op["op"], *tens, *testArgs)
Eric Kunzee5e26762020-10-13 16:11:07 -07001847 except TypeError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001848 print(
1849 "build_fcn: {}\nTensors: {}\nArgs: {}\n".format(
1850 build_fcn, tens, testArgs
1851 )
1852 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001853 raise e
1854
1855 # Save the serialized test
Kevin Cheng550ccc52021-03-03 11:21:43 -08001856 self.serialize("test")
Eric Kunzee5e26762020-10-13 16:11:07 -07001857
1858 def createDynamicOpLists(self):
1859
1860 # Dynamically create op lists for convolutions with a list of kernel sizes
Kevin Cheng550ccc52021-03-03 11:21:43 -08001861 KERNELS = [[1, 1], [2, 2], [3, 3], [5, 5], [3, 1], [1, 3]]
Eric Kunzee5e26762020-10-13 16:11:07 -07001862
1863 for k in KERNELS:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001864 testName = "conv2d_{}x{}".format(k[0], k[1])
1865 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST["conv2d_TEMPLATE"].copy()
1866 self.TOSA_OP_LIST[testName]["filter"] = k
1867 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07001868
Kevin Cheng550ccc52021-03-03 11:21:43 -08001869 testName = "depthwise_conv2d_{}x{}".format(k[0], k[1])
1870 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
1871 "depthwise_conv2d_TEMPLATE"
1872 ].copy()
1873 self.TOSA_OP_LIST[testName]["filter"] = k
1874 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07001875
Kevin Cheng550ccc52021-03-03 11:21:43 -08001876 testName = "transpose_conv2d_{}x{}".format(k[0], k[1])
1877 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
1878 "transpose_conv2d_TEMPLATE"
1879 ].copy()
1880 self.TOSA_OP_LIST[testName]["filter"] = k
1881 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07001882
1883 # Delete any templates after having created any dynamic ops
1884 # This is a two-pass operation because it's bad practice to delete
1885 # keys from dictionaries while iterating
1886 keyList = []
1887 for k in self.TOSA_OP_LIST:
1888 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001889 if self.TOSA_OP_LIST[k]["template"] == True:
Eric Kunzee5e26762020-10-13 16:11:07 -07001890 keyList.append(k)
1891 continue
1892 except KeyError:
1893 pass
1894
1895 for k in keyList:
1896 del self.TOSA_OP_LIST[k]
1897
1898 def initOpListDefaults(self):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001899 """Fill in default fields for ops if they aren't already specified.
1900 Look for missing required fields (datastructure linting)."""
Eric Kunzee5e26762020-10-13 16:11:07 -07001901 for op in self.TOSA_OP_LIST:
1902
1903 # Required fields
1904 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001905 pl, c = self.TOSA_OP_LIST[op]["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001906 except (KeyError, ValueError, TypeError):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001907 raise Exception(
1908 "Op {} is missing a valid operand tuple in TOSA_OP_LIST".format(op)
1909 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001910
1911 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001912 fcn, tgen, arggen = self.TOSA_OP_LIST[op]["build_fcn"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001913 except (KeyError, ValueError, TypeError):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001914 raise Exception(
1915 "Op {} is missing a valid build_fcn tuple in TOSA_OP_LIST".format(
1916 op
1917 )
1918 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001919
1920 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001921 types = self.TOSA_OP_LIST[op]["types"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001922 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001923 raise Exception(
1924 "Op {} is missing a valid type list in TOSA_OP_LIST".format(op)
1925 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001926
1927 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001928 opcode = self.TOSA_OP_LIST[op]["op"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001929 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001930 raise Exception(
1931 "Op {} is missing the Op field in TOSA_OP_LIST".format(op)
1932 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001933
1934 # Put in default rank range, if missing
1935 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001936 rank = self.TOSA_OP_LIST[op]["rank"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001937 except KeyError:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001938 self.TOSA_OP_LIST[op]["rank"] = self.DEFAULT_RANK_RANGE
Eric Kunzee5e26762020-10-13 16:11:07 -07001939
1940 # Tensor operator list
1941 # 'op': op name
1942 # 'operands': tuple of (placeholder, const) operands
Kevin Cheng3a478572021-01-22 17:21:02 -08001943 # 'rank': optional, restricts rank to tuple inclusive of (min, max),
1944 # if not specified, defaults to (1, 4)
Eric Kunzee5e26762020-10-13 16:11:07 -07001945 # 'build_fcn': tuple of the function to (build_operator(), TensorGen function, ArgGen enum)
1946 # 'types': array of datatypes to be tested
Kevin Cheng550ccc52021-03-03 11:21:43 -08001947 TYPE_FP = [DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -07001948
Kevin Cheng550ccc52021-03-03 11:21:43 -08001949 TYPE_INT = [DType.INT8, DType.INT16, DType.INT32] # Excludes INT4
1950 TYPE_INT_FP = [DType.INT8, DType.INT16, DType.INT32, DType.FLOAT] # Excludes INT4
Eric Kunzee5e26762020-10-13 16:11:07 -07001951
Kevin Cheng550ccc52021-03-03 11:21:43 -08001952 TYPE_BOOL = [DType.BOOL]
1953 TYPE_FI32 = [DType.FLOAT, DType.INT32]
1954 TYPE_FIB = [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL]
1955 TYPE_FI16 = [DType.FLOAT, DType.INT16]
Eric Kunzee5e26762020-10-13 16:11:07 -07001956
Kevin Cheng550ccc52021-03-03 11:21:43 -08001957 TYPE_NARROW_INT_FP = [DType.INT8, DType.INT16, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -07001958
Kevin Cheng989cb052021-04-28 16:29:44 -07001959 TYPE_CONV2D = [
1960 [DType.INT8, DType.INT8, DType.INT32],
1961 [DType.INT16, DType.INT8, DType.INT48],
1962 DType.FLOAT,
1963 ]
1964
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01001965 DEFAULT_RANK_RANGE = (1, TOSA_TENSOR_MAX_RANK)
Eric Kunzee5e26762020-10-13 16:11:07 -07001966
1967 TOSA_OP_LIST = {
Jared Smolens573ecd42021-03-04 15:24:10 -08001968 # Tensor operators
Kevin Cheng550ccc52021-03-03 11:21:43 -08001969 "argmax": {
1970 "op": Op.ARGMAX,
1971 "operands": (1, 0),
1972 "build_fcn": (build_argmax, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
1973 "types": TYPE_NARROW_INT_FP,
1974 },
Jared Smolens573ecd42021-03-04 15:24:10 -08001975 "avg_pool2d": {
1976 "op": Op.AVG_POOL2D,
1977 "operands": (1, 0),
1978 "rank": (4, 4),
1979 "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
1980 "qgen": TosaQuantGen.qgUnary,
1981 "types": TYPE_NARROW_INT_FP,
1982 },
Eric Kunzee5e26762020-10-13 16:11:07 -07001983 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08001984 "conv2d_TEMPLATE": {
1985 "op": Op.CONV2D,
1986 "operands": (1, 2),
1987 "rank": (4, 4),
1988 "build_fcn": (build_conv2d, TosaTensorGen.tgConv2D, TosaArgGen.agConv2D),
1989 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07001990 "types": TYPE_CONV2D,
Kevin Cheng550ccc52021-03-03 11:21:43 -08001991 "template": True,
1992 },
Jared Smolens573ecd42021-03-04 15:24:10 -08001993 # Conv3d TBD
Eric Kunzee5e26762020-10-13 16:11:07 -07001994 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08001995 "depthwise_conv2d_TEMPLATE": {
1996 "op": Op.DEPTHWISE_CONV2D,
1997 "operands": (1, 2),
1998 "filter": [1, 1],
1999 "rank": (4, 4),
2000 "build_fcn": (
2001 build_depthwise_conv2d,
2002 TosaTensorGen.tgDepthwiseConv2D,
2003 TosaArgGen.agConv2D,
2004 ),
2005 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07002006 "types": TYPE_CONV2D,
Kevin Cheng550ccc52021-03-03 11:21:43 -08002007 "template": True,
2008 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002009 "fully_connected": {
2010 "op": Op.FULLY_CONNECTED,
2011 "operands": (1, 2),
2012 "rank": (2, 2),
2013 "build_fcn": (build_fully_connected, TosaTensorGen.tgFullyConnected, None),
2014 "qgen": TosaQuantGen.qgConv,
2015 "types": TYPE_CONV2D,
2016 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002017 "matmul": {
2018 "op": Op.MATMUL,
2019 "operands": (2, 0),
Kevin Cheng2d60f002021-06-09 14:18:32 -07002020 "rank": (3, 3),
Jared Smolens573ecd42021-03-04 15:24:10 -08002021 "build_fcn": (build_matmul, TosaTensorGen.tgMatmul, None),
2022 "qgen": TosaQuantGen.qgMatmul,
2023 "types": TYPE_NARROW_INT_FP,
2024 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002025 "max_pool2d": {
2026 "op": Op.MAX_POOL2D,
2027 "operands": (1, 0),
2028 "rank": (4, 4),
2029 "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
2030 "types": TYPE_NARROW_INT_FP,
2031 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002032 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002033 "transpose_conv2d_TEMPLATE": {
2034 "op": Op.TRANSPOSE_CONV2D,
Kevin Cheng989cb052021-04-28 16:29:44 -07002035 "operands": (1, 2),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002036 "rank": (4, 4),
2037 "build_fcn": (
2038 build_transpose_conv2d,
2039 TosaTensorGen.tgTransposeConv2D,
2040 TosaArgGen.agTransposeConv2D,
2041 ),
2042 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07002043 "types": TYPE_CONV2D,
Kevin Cheng550ccc52021-03-03 11:21:43 -08002044 "template": True,
2045 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002046 # Activation functions
Kevin Cheng550ccc52021-03-03 11:21:43 -08002047 "clamp": {
2048 "op": Op.CLAMP,
2049 "operands": (1, 0),
2050 "build_fcn": (build_clamp, TosaTensorGen.tgBasic, None),
2051 "types": TYPE_NARROW_INT_FP,
2052 },
2053 "relun": {
2054 "op": Op.RELUN,
2055 "operands": (1, 0),
2056 "build_fcn": (build_relun, TosaTensorGen.tgBasic, None),
2057 "types": TYPE_FI32,
2058 },
2059 "sigmoid": {
2060 "op": Op.SIGMOID,
2061 "operands": (1, 0),
2062 "build_fcn": (build_sigmoid, TosaTensorGen.tgBasic, None),
2063 "types": TYPE_FP,
2064 },
2065 "tanh": {
2066 "op": Op.TANH,
2067 "operands": (1, 0),
2068 "build_fcn": (build_tanh, TosaTensorGen.tgBasic, None),
2069 "types": TYPE_FP,
2070 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002071 # Elementwise Binary Operators
2072 "add": {
2073 "op": Op.ADD,
2074 "operands": (2, 0),
2075 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2076 "types": TYPE_FI32,
2077 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002078 "arithmetic_right_shift": {
2079 "op": Op.ARITHMETIC_RIGHT_SHIFT,
2080 "operands": (2, 0),
2081 "build_fcn": (
2082 build_arithmetic_right_shift,
2083 TosaTensorGen.tgBroadcastFuzz,
2084 TosaArgGen.agArithmeticRightShift,
2085 ),
2086 "types": TYPE_INT,
2087 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002088 "bitwise_and": {
2089 "op": Op.BITWISE_AND,
2090 "operands": (2, 0),
2091 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2092 "types": TYPE_INT,
2093 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002094 "bitwise_or": {
2095 "op": Op.BITWISE_OR,
2096 "operands": (2, 0),
2097 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2098 "types": TYPE_INT,
2099 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002100 "bitwise_xor": {
2101 "op": Op.BITWISE_XOR,
2102 "operands": (2, 0),
2103 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2104 "types": TYPE_INT,
2105 },
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002106 "div": {
2107 "op": Op.DIV,
2108 "operands": (2, 0),
2109 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2110 "types": [DType.INT32],
2111 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002112 "logical_and": {
2113 "op": Op.LOGICAL_AND,
2114 "operands": (2, 0),
2115 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2116 "types": TYPE_BOOL,
2117 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002118 "logical_left_shift": {
2119 "op": Op.LOGICAL_LEFT_SHIFT,
2120 "operands": (2, 0),
2121 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2122 "types": TYPE_INT,
2123 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002124 "logical_right_shift": {
2125 "op": Op.LOGICAL_RIGHT_SHIFT,
2126 "operands": (2, 0),
2127 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2128 "types": TYPE_INT,
2129 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002130 "logical_or": {
2131 "op": Op.LOGICAL_OR,
2132 "operands": (2, 0),
2133 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2134 "types": TYPE_BOOL,
2135 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002136 "logical_xor": {
2137 "op": Op.LOGICAL_XOR,
2138 "operands": (2, 0),
2139 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2140 "types": TYPE_BOOL,
2141 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002142 "maximum": {
2143 "op": Op.MAXIMUM,
2144 "operands": (2, 0),
2145 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2146 "types": TYPE_FI32,
2147 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002148 "minimum": {
2149 "op": Op.MINIMUM,
2150 "operands": (2, 0),
2151 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2152 "types": TYPE_FI32,
2153 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002154 "mul": {
2155 "op": Op.MUL,
2156 "operands": (2, 0),
2157 "build_fcn": (build_mul, TosaTensorGen.tgBroadcastFuzz, TosaArgGen.agMul),
2158 "types": TYPE_INT_FP,
2159 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002160 "pow": {
2161 "op": Op.POW,
2162 "operands": (2, 0),
2163 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBasic, None),
2164 "types": TYPE_FP,
2165 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002166 "sub": {
2167 "op": Op.SUB,
2168 "operands": (2, 0),
2169 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2170 "types": TYPE_FI32,
2171 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002172 "table": {
2173 "op": Op.TABLE,
2174 # Use the automatic generation functions to create the input array
2175 # but create the table tensor in the build function, as it may be
2176 # a different type from the input
2177 "operands": (1, 0),
2178 "build_fcn": (build_table, TosaTensorGen.tgBasic, None),
2179 "types": [DType.INT16],
2180 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002181 # Elementwise Unary operators
2182 "abs": {
2183 "op": Op.ABS,
2184 "operands": (1, 0),
2185 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2186 "types": TYPE_FI32,
2187 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002188 "bitwise_not": {
2189 "op": Op.BITWISE_NOT,
2190 "operands": (1, 0),
2191 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2192 "types": TYPE_INT,
2193 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002194 "ceil": {
2195 "op": Op.CEIL,
2196 "operands": (1, 0),
2197 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2198 "types": TYPE_FP,
2199 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002200 "clz": {
2201 "op": Op.CLZ,
2202 "operands": (1, 0),
2203 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2204 "types": [DType.INT32],
2205 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002206 "exp": {
2207 "op": Op.EXP,
2208 "operands": (1, 0),
2209 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2210 "types": TYPE_FP,
2211 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002212 "floor": {
2213 "op": Op.FLOOR,
2214 "operands": (1, 0),
2215 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2216 "types": TYPE_FP,
2217 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002218 "log": {
2219 "op": Op.LOG,
2220 "operands": (1, 0),
2221 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2222 "types": TYPE_FP,
2223 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002224 "logical_not": {
2225 "op": Op.LOGICAL_NOT,
2226 "operands": (1, 0),
2227 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2228 "types": TYPE_BOOL,
2229 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002230 "negate": {
2231 "op": Op.NEGATE,
2232 "operands": (1, 0),
2233 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2234 "qgen": TosaQuantGen.qgUnary,
2235 "types": TYPE_INT_FP,
2236 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002237 "reciprocal": {
2238 "op": Op.RECIPROCAL,
2239 "operands": (1, 0),
2240 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2241 "types": TYPE_FP,
2242 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002243 "rsqrt": {
2244 "op": Op.RSQRT,
2245 "operands": (1, 0),
2246 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2247 "types": TYPE_FP,
2248 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002249 # Elementwise Ternary operators
2250 "select": {
2251 "op": Op.SELECT,
2252 "operands": (3, 0),
2253 "build_fcn": (build_select, TosaTensorGen.tgBroadcastFuzz, None),
2254 "types": TYPE_FIB,
2255 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002256 # Comparison operators
2257 "equal": {
2258 "op": Op.EQUAL,
2259 "operands": (2, 0),
2260 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2261 "types": TYPE_FI32,
2262 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002263 "greater_equal": {
2264 "op": Op.GREATER_EQUAL,
2265 "operands": (2, 0),
2266 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2267 "types": TYPE_FI32,
2268 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002269 "greater": {
2270 "op": Op.GREATER,
2271 "operands": (2, 0),
2272 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2273 "types": TYPE_FI32,
2274 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002275 # Reduction operators
2276 "reduce_all": {
2277 "op": Op.REDUCE_ALL,
2278 "operands": (1, 0),
2279 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2280 "types": TYPE_BOOL,
2281 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002282 "reduce_any": {
2283 "op": Op.REDUCE_ANY,
2284 "operands": (1, 0),
2285 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2286 "types": TYPE_BOOL,
2287 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002288 "reduce_max": {
2289 "op": Op.REDUCE_MAX,
2290 "operands": (1, 0),
2291 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2292 "types": TYPE_INT_FP,
2293 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002294 "reduce_min": {
2295 "op": Op.REDUCE_MAX,
2296 "operands": (1, 0),
2297 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2298 "types": TYPE_INT_FP,
2299 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002300 "reduce_product": {
2301 "op": Op.REDUCE_PRODUCT,
2302 "operands": (1, 0),
2303 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2304 "types": TYPE_FP,
2305 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002306 "reduce_sum": {
2307 "op": Op.REDUCE_SUM,
2308 "operands": (1, 0),
2309 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2310 "types": TYPE_FI32,
2311 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002312 # Data layout operators
Kevin Cheng550ccc52021-03-03 11:21:43 -08002313 "concat": {
2314 "op": Op.CONCAT,
2315 "operands": (2, 0),
2316 "build_fcn": (build_concat, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2317 "types": TYPE_FIB,
2318 },
2319 "pad": {
2320 "op": Op.PAD,
2321 "operands": (1, 0),
2322 "build_fcn": (build_pad, TosaTensorGen.tgBasic, TosaArgGen.agPad),
2323 "qgen": TosaQuantGen.qgPad,
2324 "types": TYPE_FIB,
2325 },
2326 "reshape": {
2327 "op": Op.RESHAPE,
2328 "operands": (1, 0),
2329 "build_fcn": (build_reshape, TosaTensorGen.tgBasic, TosaArgGen.agReshape),
2330 "types": TYPE_FIB,
2331 },
2332 "reverse": {
2333 "op": Op.REVERSE,
2334 "operands": (1, 0),
2335 "build_fcn": (build_reverse, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2336 "types": TYPE_FIB,
2337 },
2338 "slice": {
2339 "op": Op.SLICE,
2340 "operands": (1, 0),
2341 "build_fcn": (build_slice, TosaTensorGen.tgBasic, TosaArgGen.agSlice),
2342 "types": TYPE_FIB,
2343 },
2344 "tile": {
2345 "op": Op.TILE,
2346 "operands": (1, 0),
2347 "build_fcn": (build_tile, TosaTensorGen.tgBasic, TosaArgGen.agTile),
2348 "types": TYPE_FIB,
2349 },
2350 "transpose": {
2351 "op": Op.TRANSPOSE,
2352 "operands": (1, 0),
Jeremy Johnsona6185572021-06-21 15:55:35 +01002353 "rank": (1, 4),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002354 "build_fcn": (
2355 build_transpose,
2356 TosaTensorGen.tgBasic,
2357 TosaArgGen.agTranspose,
2358 ),
2359 "types": TYPE_FIB,
2360 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002361 # Data nodes
2362 "const": {
2363 "op": Op.CONST,
2364 "operands": (1, 0),
2365 "build_fcn": (build_placeholder, TosaTensorGen.tgBasic, None),
2366 "types": TYPE_FIB,
2367 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002368 "identity": {
2369 "op": Op.IDENTITY,
2370 "operands": (1, 0),
2371 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2372 "types": TYPE_FIB,
2373 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002374 # Scatter/Gather
Kevin Cheng550ccc52021-03-03 11:21:43 -08002375 "gather": {
2376 "op": Op.GATHER,
2377 # Only specify 'values' tensor here. 'indices' is generated in op building stage
2378 "operands": (1, 0),
2379 "rank": (3, 3),
2380 "build_fcn": (build_gather, TosaTensorGen.tgBasic, None),
2381 "types": TYPE_INT_FP,
2382 },
2383 "scatter": {
2384 "op": Op.SCATTER,
2385 # Only specify 'values_in' tensor here.
2386 #'indices' and 'input' are generated in op building stage
2387 "operands": (2, 0),
2388 "rank": (3, 3),
2389 "build_fcn": (build_scatter, TosaTensorGen.tgScatter, None),
2390 "types": TYPE_INT_FP,
2391 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002392 # Image operations
Kevin Cheng550ccc52021-03-03 11:21:43 -08002393 "resize": {
2394 "op": Op.RESIZE,
2395 "operands": (1, 0),
2396 "rank": (4, 4),
2397 "build_fcn": (build_resize, TosaTensorGen.tgNHWC, TosaArgGen.agResize),
2398 "types": [DType.INT8, DType.INT16, DType.FLOAT],
2399 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002400 # Type conversion
Kevin Cheng550ccc52021-03-03 11:21:43 -08002401 "cast": {
2402 "op": Op.CAST,
2403 "operands": (1, 0),
2404 "build_fcn": (build_cast, TosaTensorGen.tgBasic, TosaArgGen.agCast),
2405 "types": [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL],
2406 },
2407 "rescale": {
2408 "op": Op.RESCALE,
2409 "operands": (1, 0),
2410 "build_fcn": (build_rescale, TosaTensorGen.tgBasic, TosaArgGen.agRescale),
2411 "types": [DType.INT8, DType.INT16, DType.INT32, DType.INT48],
2412 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002413 # Custom
2414 # Not implemented.
Jared Smolens573ecd42021-03-04 15:24:10 -08002415 # Control flow operators
Eric Kunzee5e26762020-10-13 16:11:07 -07002416 # Two varients of cond_if, one that generates one of two constant tensors (no
2417 # inputs to the basic blocks, one output) and another that either adds or subtracts two tensors
2418 # (two inputs to the basic blocks, one output)
Kevin Cheng550ccc52021-03-03 11:21:43 -08002419 "cond_if_const": {
2420 "op": Op.COND_IF,
2421 "operands": (0, 2),
2422 "build_fcn": (
2423 build_cond_if_const,
2424 TosaTensorGen.tgBasic,
2425 TosaArgGen.agCondIf,
2426 ),
2427 "types": [DType.BOOL],
2428 },
2429 "cond_if_binary": {
2430 "op": Op.COND_IF,
2431 "operands": (2, 0),
2432 "build_fcn": (
2433 build_cond_if_binary,
2434 TosaTensorGen.tgBasic,
2435 TosaArgGen.agCondIf,
2436 ),
2437 "types": TYPE_FI32,
2438 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002439 # while_loop
Kevin Cheng550ccc52021-03-03 11:21:43 -08002440 "while_loop": {
2441 "op": Op.WHILE_LOOP,
2442 "operands": (0, 1),
2443 "build_fcn": (
2444 build_while_loop,
2445 TosaTensorGen.tgBasic,
2446 TosaArgGen.agWhileLoop,
2447 ),
2448 "types": [DType.INT32],
2449 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002450 }
2451
Kevin Cheng550ccc52021-03-03 11:21:43 -08002452
Eric Kunzee5e26762020-10-13 16:11:07 -07002453class OutputShaper:
2454 # Methods in this class compute the expected output shape and datatype
2455 # for common classes of operations
2456 def __init__(self):
2457 pass
2458
2459 # These methods return arguments that can be used for
2460 # creating a new output tensor
2461 @staticmethod
2462 def binaryBroadcastOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002463 assert len(a.shape) == len(b.shape)
2464 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002465
2466 shape = []
2467 for i in range(len(a.shape)):
2468 if a.shape[i] == 1:
2469 shape.append(b.shape[i])
2470 else:
2471 shape.append(a.shape[i])
2472
Kevin Cheng550ccc52021-03-03 11:21:43 -08002473 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002474
2475 @staticmethod
2476 def binaryNonBroadcastOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002477 assert len(a.shape) == len(b.shape)
2478 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002479
2480 shape = []
2481 for i in range(len(a.shape)):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002482 assert a.shape[i] == b.shape[i]
Eric Kunzee5e26762020-10-13 16:11:07 -07002483 shape.append(a.shape[i])
2484
Kevin Cheng550ccc52021-03-03 11:21:43 -08002485 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002486
2487 @staticmethod
2488 def unaryOp(ser, a):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002489 return ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002490
2491 @staticmethod
2492 def selectOp(ser, cond, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002493 assert len(a.shape) == len(b.shape) and len(a.shape) == len(cond.shape)
2494 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002495
2496 shape = []
2497 for i in range(len(a.shape)):
2498 shape.append(max(cond.shape[i], a.shape[i], b.shape[i]))
2499
Kevin Cheng550ccc52021-03-03 11:21:43 -08002500 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002501
2502 @staticmethod
2503 def binaryComparisonOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002504 assert len(a.shape) == len(b.shape)
2505 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002506
2507 # Do broadcast
2508 shape = []
2509 for i in range(len(a.shape)):
2510 if a.shape[i] == 1:
2511 shape.append(b.shape[i])
2512 else:
2513 shape.append(a.shape[i])
2514
2515 # Force the output type to bool
Kevin Cheng550ccc52021-03-03 11:21:43 -08002516 return ser.addOutput(shape, DType.BOOL)
Eric Kunzee5e26762020-10-13 16:11:07 -07002517
2518 @staticmethod
2519 def reduceOp(ser, a, axis):
2520
2521 shape = a.shape.copy()
2522
2523 shape[axis] = 1
2524
Kevin Cheng550ccc52021-03-03 11:21:43 -08002525 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002526
2527 @staticmethod
2528 def argmaxOp(ser, a, axis):
2529 shape = a.shape.copy()
2530 del shape[axis]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002531 return ser.addOutput(shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07002532
2533 @staticmethod
2534 def conv2dOp(ser, ifm, filter, strides, padding, dilations):
2535
2536 # IFM: NHWC
2537 # Filter: OHWI
2538 # OFM: NHWC
2539
2540 if len(padding) == 2:
2541 # Expand padding to 4 parameters in the case of transpose_conv2d
2542 # From H,W to T,B,L,R
2543 padding = [padding[0], padding[0], padding[1], padding[1]]
2544
Kevin Cheng550ccc52021-03-03 11:21:43 -08002545 h = (
2546 ifm.shape[1]
2547 - filter.shape[1]
2548 - (filter.shape[1] - 1) * (dilations[0] - 1)
2549 + padding[0]
2550 + padding[1]
2551 ) // strides[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002552
Kevin Cheng550ccc52021-03-03 11:21:43 -08002553 w = (
2554 ifm.shape[2]
2555 - filter.shape[2]
2556 - (filter.shape[2] - 1) * (dilations[1] - 1)
2557 + padding[2]
2558 + padding[3]
2559 ) // strides[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002560
2561 if h <= 0 or w <= 0:
2562 # Invalid test parameters?
2563 h = 0
2564 w = 0
Kevin Cheng550ccc52021-03-03 11:21:43 -08002565 ser.setExpectedFailure(True, "Invalid combination of conv2d parameters")
Eric Kunzee5e26762020-10-13 16:11:07 -07002566
2567 ofm_shape = [ifm.shape[0], h, w, filter.shape[0]]
2568
Kevin Cheng3a478572021-01-22 17:21:02 -08002569 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002570 out_dtype = DType.INT32
2571 elif ifm.dtype == DType.INT16:
2572 out_dtype = DType.INT48
2573 elif ifm.dtype == DType.FLOAT:
2574 out_dtype = DType.FLOAT
2575 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002576 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002577
Kevin Cheng550ccc52021-03-03 11:21:43 -08002578 return ser.addOutput(ofm_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002579
2580 @staticmethod
2581 def depthwiseConv2dOp(ser, ifm, filter, strides, padding, dilations):
2582 # IFM: NHWC
2583 # Filter: HWCM
2584 # OFM: NHW C*M
Kevin Cheng550ccc52021-03-03 11:21:43 -08002585 h = (
2586 ifm.shape[1]
2587 - filter.shape[0]
2588 - (filter.shape[0] - 1) * (dilations[0] - 1)
2589 + padding[0]
2590 + padding[1]
2591 ) // strides[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002592
Kevin Cheng550ccc52021-03-03 11:21:43 -08002593 w = (
2594 ifm.shape[2]
2595 - filter.shape[1]
2596 - (filter.shape[1] - 1) * (dilations[1] - 1)
2597 + padding[2]
2598 + padding[3]
2599 ) // strides[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002600
2601 if h <= 0 or w <= 0:
2602 # Invalid test parameters?
2603 h = 0
2604 w = 0
Kevin Cheng550ccc52021-03-03 11:21:43 -08002605 ser.setExpectedFailure(True, "Invalid combination of conv2d parameters")
Eric Kunzee5e26762020-10-13 16:11:07 -07002606
2607 ofm_shape = [ifm.shape[0], h, w, filter.shape[2] * filter.shape[3]]
2608
Kevin Cheng3a478572021-01-22 17:21:02 -08002609 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002610 out_dtype = DType.INT32
2611 elif ifm.dtype == DType.INT16:
2612 out_dtype = DType.INT48
2613 elif ifm.dtype == DType.FLOAT:
2614 out_dtype = DType.FLOAT
2615 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002616 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002617
Kevin Cheng550ccc52021-03-03 11:21:43 -08002618 return ser.addOutput(ofm_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002619
2620 @staticmethod
2621 def pool2dOp(ser, ifm, kernel, stride, pad):
2622 # input: NHWC
2623 h = (ifm.shape[1] + pad[0] + pad[1] + stride[0] - kernel[0]) // stride[0]
2624 w = (ifm.shape[2] + pad[2] + pad[3] + stride[1] - kernel[1]) // stride[1]
2625
2626 if h <= 0 or w <= 0:
2627 # Invalid test parameters?
2628 h = 0
2629 w = 0
Kevin Cheng550ccc52021-03-03 11:21:43 -08002630 ser.setExpectedFailure(True, "Invalid combination of pooling parameters")
Eric Kunzee5e26762020-10-13 16:11:07 -07002631
2632 ofm_shape = [ifm.shape[0], h, w, ifm.shape[3]]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002633 return ser.addOutput(ofm_shape, ifm.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002634
2635 @staticmethod
2636 def fullyConnectedOp(ser, input, filter):
2637 # input: N, IC
2638 # filter: OC, IC
2639 # output: N, OC
2640
2641 output_shape = [input.shape[0], filter.shape[0]]
2642
Kevin Cheng3a478572021-01-22 17:21:02 -08002643 if input.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002644 out_dtype = DType.INT32
2645 elif input.dtype == DType.INT16:
2646 out_dtype = DType.INT48
2647 elif input.dtype == DType.FLOAT:
2648 out_dtype = DType.FLOAT
2649 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002650 raise Exception("Unsupported input dtype: {}".format(input.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002651
Kevin Cheng550ccc52021-03-03 11:21:43 -08002652 return ser.addOutput(output_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002653
2654 @staticmethod
2655 def matmulOp(ser, a, b):
Kevin Cheng2d60f002021-06-09 14:18:32 -07002656 # a: N, H, C
2657 # b: N, C, W
2658 # out: N, H, W
Eric Kunzee5e26762020-10-13 16:11:07 -07002659
Kevin Cheng2d60f002021-06-09 14:18:32 -07002660 output_shape = [a.shape[0], a.shape[1], b.shape[2]]
Eric Kunzee5e26762020-10-13 16:11:07 -07002661
Kevin Cheng3a478572021-01-22 17:21:02 -08002662 if a.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002663 out_dtype = DType.INT32
2664 elif a.dtype == DType.INT16:
2665 out_dtype = DType.INT48
2666 elif a.dtype == DType.FLOAT:
2667 out_dtype = DType.FLOAT
2668 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002669 raise Exception("UNsupported input dtype for matmul: {}".format(a.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002670
Kevin Cheng550ccc52021-03-03 11:21:43 -08002671 return ser.addOutput(output_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002672
2673 @staticmethod
2674 def concatOp(ser, a, b, axis):
2675
2676 output_shape = a.shape.copy()
2677 output_shape[axis] = a.shape[axis] + b.shape[axis]
2678
Kevin Cheng550ccc52021-03-03 11:21:43 -08002679 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002680
2681 @staticmethod
2682 def padOp(ser, a, padding):
2683
2684 output_shape = a.shape.copy()
2685
2686 for i in range(len(output_shape)):
2687 output_shape[i] = padding[i][0] + padding[i][1] + output_shape[i]
2688
Kevin Cheng550ccc52021-03-03 11:21:43 -08002689 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002690
2691 @staticmethod
2692 def reshapeOp(ser, a, shape):
2693 output_shape = shape.copy()
2694
2695 totalElements = 1
2696 for i in a.shape:
2697 totalElements *= i
2698
2699 # If there are any -1 elements, figure out what that dimension must be
2700 totalOutputElements = 1
2701 for i in output_shape:
2702 if i != -1:
2703 totalOutputElements *= i
2704
2705 # And fill it in
2706 for i in range(len(output_shape)):
2707 if output_shape[i] == -1:
2708 output_shape[i] = totalElements // totalOutputElements
2709
Kevin Cheng550ccc52021-03-03 11:21:43 -08002710 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002711
2712 @staticmethod
2713 def sliceOp(ser, a, begin, size):
2714
2715 output_shape = size.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08002716 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002717
2718 @staticmethod
2719 def tileOp(ser, a, multiples):
2720
2721 output_shape = a.shape.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08002722 assert len(multiples) == len(output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07002723
2724 for i in range(len(output_shape)):
2725 output_shape[i] = a.shape[i] * multiples[i]
2726
Kevin Cheng550ccc52021-03-03 11:21:43 -08002727 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002728
2729 @staticmethod
2730 def transposeOp(ser, a, perms):
2731 output_shape = a.shape.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08002732 assert len(perms) == len(output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07002733
2734 for i in range(len(output_shape)):
2735 output_shape[i] = a.shape[perms[i]]
2736
Kevin Cheng550ccc52021-03-03 11:21:43 -08002737 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002738
2739 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -08002740 def gatherOp(ser, values, indices):
2741 assert len(values.shape) == 3
2742 assert len(indices.shape) == 2
2743 assert values.shape[0] == indices.shape[0]
Eric Kunzee5e26762020-10-13 16:11:07 -07002744
Kevin Cheng77d0f762020-11-24 10:26:32 -08002745 output_shape = [values.shape[0], indices.shape[1], values.shape[2]]
2746
Kevin Cheng550ccc52021-03-03 11:21:43 -08002747 return ser.addOutput(output_shape, values.dtype)
Kevin Cheng77d0f762020-11-24 10:26:32 -08002748
2749 @staticmethod
2750 def scatterOp(ser, values_in, indices, input):
2751 assert len(values_in.shape) == 3
2752 assert len(indices.shape) == 2
2753 assert len(input.shape) == 3
Kevin Cheng550ccc52021-03-03 11:21:43 -08002754 assert values_in.shape[0] == indices.shape[0] # N
2755 assert input.shape[1] == indices.shape[1] # W
2756 assert values_in.shape[2] == input.shape[2] # C
Kevin Cheng77d0f762020-11-24 10:26:32 -08002757
2758 output_shape = values_in.shape
2759
Kevin Cheng550ccc52021-03-03 11:21:43 -08002760 return ser.addOutput(output_shape, values_in.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002761
2762 @staticmethod
2763 def tableOp(ser, input, table):
2764 # Same shape as the input, but with the type of the table.
Kevin Cheng550ccc52021-03-03 11:21:43 -08002765 return ser.addOutput(input.shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07002766
2767 @staticmethod
Kevin Cheng550ccc52021-03-03 11:21:43 -08002768 def resizeOp(
2769 ser,
2770 input,
2771 mode,
2772 stride,
2773 offset,
2774 shift,
2775 stride_fp,
2776 offset_fp,
2777 output_dims,
2778 input_dtype,
2779 output_dtype,
2780 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07002781
2782 output_dims = [input.shape[0], output_dims[0], output_dims[1], input.shape[3]]
2783
Kevin Cheng77d0f762020-11-24 10:26:32 -08002784 if input_dtype == DType.FLOAT:
2785 if stride_fp[0] <= 0 or stride_fp[1] <= 0:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002786 ser.setExpectedFailure(True, "Negative or zero stride")
Kevin Cheng77d0f762020-11-24 10:26:32 -08002787 else:
2788 if stride[0] <= 0 or stride[1] <= 0:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002789 ser.setExpectedFailure(True, "Negative or zero stride")
Eric Kunzee5e26762020-10-13 16:11:07 -07002790
Kevin Chengaee1fac2020-11-11 13:54:06 -08002791 if mode == ResizeMode.BILINEAR:
2792 if input_dtype == DType.INT8:
2793 if output_dtype != DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002794 ser.setExpectedFailure(True, "Invalid output data type")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002795 elif input_dtype == DType.INT16:
2796 if output_dtype != DType.INT48:
Kevin Cheng989cb052021-04-28 16:29:44 -07002797 ser.setExpectedFailure(true, "Invalid output data type")
Kevin Cheng77d0f762020-11-24 10:26:32 -08002798 elif input_dtype == DType.FLOAT:
2799 if output_dtype != DType.FLOAT:
Kevin Cheng989cb052021-04-28 16:29:44 -07002800 ser.setExpectedFailure(true, "Invalid output data type")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002801 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07002802 ser.setExpectedFailure(true, "Invalid input data type")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002803
2804 elif mode == ResizeMode.NEAREST:
2805 if input_dtype == DType.INT8:
2806 if output_dtype != DType.INT8:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002807 ser.setExpectedFailure(True, "Invalid output data type")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002808 elif input_dtype == DType.INT16:
2809 if output_dtype != DType.INT16:
Kevin Cheng989cb052021-04-28 16:29:44 -07002810 ser.setExpectedFailure(true, "Invalid output data type")
Kevin Cheng77d0f762020-11-24 10:26:32 -08002811 elif input_dtype == DType.FLOAT:
2812 if output_dtype != DType.FLOAT:
Kevin Cheng989cb052021-04-28 16:29:44 -07002813 ser.setExpectedFailure(true, "Invalid output data type")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002814 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07002815 ser.setExpectedFailure(true, "Invalid input data type")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002816
2817 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07002818 ser.setExpectedFailure(true, "Invalid resize mode")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002819
Kevin Cheng550ccc52021-03-03 11:21:43 -08002820 return ser.addOutput(output_dims, output_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002821
2822 @staticmethod
2823 def typeConversionOp(ser, val, out_dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002824 return ser.addOutput(val.shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002825
2826 @staticmethod
2827 def transposeConv2DOp(ser, ifm, output_shape):
Kevin Cheng3a478572021-01-22 17:21:02 -08002828 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002829 out_dtype = DType.INT32
2830 elif ifm.dtype == DType.INT16:
2831 out_dtype = DType.INT48
2832 elif ifm.dtype == DType.FLOAT:
2833 out_dtype = DType.FLOAT
2834 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002835 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002836
2837 if output_shape[1] <= 0 or output_shape[2] <= 0:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002838 ser.setExpectedFailure(True, "Negative output shape")
Eric Kunzee5e26762020-10-13 16:11:07 -07002839
Kevin Cheng550ccc52021-03-03 11:21:43 -08002840 return ser.addOutput(output_shape, out_dtype)