Eric Kunze | e5e2676 | 2020-10-13 16:11:07 -0700 | [diff] [blame] | 1 | #!/usr/bin/env python3 |
| 2 | |
| 3 | # Copyright (c) 2020, ARM Limited. |
| 4 | # |
| 5 | # Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | # you may not use this file except in compliance with the License. |
| 7 | # You may obtain a copy of the License at |
| 8 | # |
| 9 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | # |
| 11 | # Unless required by applicable law or agreed to in writing, software |
| 12 | # distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | # See the License for the specific language governing permissions and |
| 15 | # limitations under the License. |
| 16 | |
| 17 | |
| 18 | import numpy as np |
| 19 | import argparse |
| 20 | import sys |
| 21 | import re |
| 22 | import os |
| 23 | import subprocess |
| 24 | import shlex |
| 25 | import json |
| 26 | import glob |
| 27 | import math |
| 28 | import queue |
| 29 | import threading |
| 30 | import traceback |
| 31 | import math |
| 32 | |
| 33 | from enum import IntEnum, Enum, unique |
| 34 | |
| 35 | import tosa_serializer as ts |
| 36 | from tosa_serializer import * |
| 37 | import tosa |
| 38 | |
| 39 | # Convenience variables to the flatc-generated types that should be enums, but aren't |
| 40 | DType = tosa.DType.DType() |
| 41 | Usage = tosa.Usage.Usage() |
| 42 | Format = tosa.Format.Format() |
| 43 | Op = tosa.Op.Op() |
| 44 | ResizeMode = tosa.ResizeMode.ResizeMode() |
| 45 | |
| 46 | class TosaQuantGen: |
| 47 | '''QuantizedInfo random generator helper functions. Specify with 'qgen': in the operator defintion''' |
| 48 | def __init__(self): |
| 49 | pass |
| 50 | |
| 51 | @staticmethod |
| 52 | def needsQinfo(op, dtype): |
| 53 | if dtype == DType.AINT8 or dtype == DType.INT8: |
| 54 | return True |
| 55 | return False |
| 56 | |
| 57 | @staticmethod |
| 58 | def qgUnary(testGen, op, dtype): |
| 59 | qinfo = ts.TosaSerializerQuantInfo() |
| 60 | if TosaQuantGen.needsQinfo(op, dtype): |
| 61 | qinfo.UnaryQuantInfo(testGen.randInt(), testGen.randInt()) |
| 62 | else: |
| 63 | qinfo.UnaryQuantInfo(0, 0) |
| 64 | return qinfo |
| 65 | |
| 66 | @staticmethod |
| 67 | def qgConv(testGen, op, dtype): |
| 68 | qinfo = ts.TosaSerializerQuantInfo() |
| 69 | if TosaQuantGen.needsQinfo(op, dtype): |
| 70 | qinfo.ConvQuantInfo(testGen.randInt(), testGen.randInt()) |
| 71 | else: |
| 72 | qinfo.ConvQuantInfo(0, 0) |
| 73 | return qinfo |
| 74 | |
| 75 | @staticmethod |
| 76 | def qgMatmul(testGen, op, dtype): |
| 77 | qinfo = ts.TosaSerializerQuantInfo() |
| 78 | if TosaQuantGen.needsQinfo(op, dtype): |
| 79 | qinfo.MatMulQuantInfo(testGen.randInt(), testGen.randInt()) |
| 80 | else: |
| 81 | qinfo.MatMulQuantInfo(0, 0) |
| 82 | return qinfo |
| 83 | |
| 84 | @staticmethod |
| 85 | def qgPad(testGen, op, dtype): |
| 86 | qinfo = ts.TosaSerializerQuantInfo() |
| 87 | if TosaQuantGen.needsQinfo(op, dtype): |
| 88 | qinfo.PadQuantInfo(testGen.randInt()) |
| 89 | else: |
| 90 | qinfo.PadQuantInfo(0) |
| 91 | return qinfo |
| 92 | |
| 93 | @staticmethod |
| 94 | def computeMultiplierAndShift(scaleFp, scale32): |
| 95 | # Derived from computeMultiplierAndShiftTosaScale32 |
| 96 | # Provide a floating-point scaling factor and the scale32 parameter |
| 97 | # to compute the multiplier and shift |
| 98 | |
| 99 | if scale32: |
| 100 | scaleBits = 31 |
| 101 | else: |
| 102 | scaleBits = 15 |
| 103 | |
| 104 | m, shift = math.frexp(scaleFp) |
| 105 | |
| 106 | if scaleFp < 0.0: |
| 107 | m = -m |
| 108 | |
| 109 | multiplier = round(m * (1 << scaleBits)) |
| 110 | assert(multiplier <= (1 << scaleBits)) |
| 111 | |
| 112 | if multiplier == (1 << scaleBits): |
| 113 | multiplier = multiplier // 2 |
| 114 | shift = shift + 1 |
| 115 | |
| 116 | shift = (-shift) + scaleBits |
| 117 | #print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(scaleFp, scaleBits, m, multiplier, shift)) |
| 118 | |
| 119 | assert(multiplier <= (1 << scaleBits)) |
| 120 | assert(shift >= 0 and shift <= 63) |
| 121 | |
| 122 | return multiplier, shift |
| 123 | |
| 124 | |
| 125 | class TosaTensorGen(): |
| 126 | ''' Tensor generators create a shape list for the placeholder and const tensor |
| 127 | data operands for the operator. The actual random data is generated separately for each test.''' |
| 128 | def __init__(self): |
| 129 | pass |
| 130 | |
| 131 | @staticmethod |
| 132 | def tgBasic(testGen, opName, rank): |
| 133 | pl, const = opName['operands'] |
| 134 | shape = testGen.makeShape(rank) |
| 135 | |
| 136 | shape_list = [] |
| 137 | for i in range(pl + const): |
| 138 | shape_list.append(shape.copy()) |
| 139 | |
| 140 | return shape_list |
| 141 | |
| 142 | @staticmethod |
| 143 | def tgNHWC(testGen, opName, rank): |
| 144 | pl, const = opName['operands'] |
| 145 | |
| 146 | assert(rank == 4) |
| 147 | |
| 148 | shape = testGen.makeShape(rank) |
| 149 | |
| 150 | # Constrict the batch size? |
| 151 | if testGen.args.max_batch_size: |
| 152 | shape[0] = (shape[0] % testGen.args.max_batch_size) + 1 |
| 153 | |
| 154 | shape_list = [] |
| 155 | for i in range(pl + const): |
| 156 | shape_list.append(shape.copy()) |
| 157 | |
| 158 | return shape_list |
| 159 | |
| 160 | @staticmethod |
| 161 | def tgBroadcastFuzz(testGen, op, rank): |
| 162 | shape = testGen.makeShape(rank) |
| 163 | |
| 164 | pl, const = op['operands'] |
| 165 | |
| 166 | shape_list = [] |
| 167 | |
| 168 | # Choose one of the inputs to broadcast |
| 169 | bcast_idx = testGen.randInt(0, pl + const) |
| 170 | for i in range(pl + const): |
| 171 | shape_bcast = shape.copy() |
| 172 | |
| 173 | # If the chosen input, pick a random index to broadcast |
| 174 | if i == bcast_idx: |
| 175 | fuzz_idx = testGen.randInt(0, rank) |
| 176 | shape_bcast[fuzz_idx] = 1 |
| 177 | |
| 178 | shape_list.append(shape_bcast) |
| 179 | |
| 180 | return shape_list |
| 181 | |
| 182 | @staticmethod |
| 183 | def tgConv2D(testGen, op, rank): |
| 184 | pl, const = op['operands'] |
| 185 | |
| 186 | assert(rank == 4) |
| 187 | |
| 188 | # IFM dimensions are NHWC |
| 189 | ifm_shape = testGen.makeShape(rank) |
| 190 | |
| 191 | # Constrict the batch size? |
| 192 | if testGen.args.max_batch_size: |
| 193 | ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1 |
| 194 | |
| 195 | # Get the filter height/width from the operator parameters |
| 196 | filter_hw = op['filter'] |
| 197 | |
| 198 | # Generate a random OFM depth |
| 199 | ofm_depth = testGen.makeShape(1)[0] |
| 200 | |
| 201 | # The filter dimensions are OHWI |
| 202 | filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]]) |
| 203 | |
| 204 | # The bias is OC |
| 205 | bias_shape = np.asarray([ofm_depth]) |
| 206 | |
| 207 | return [ifm_shape, filter_shape, bias_shape] |
| 208 | |
| 209 | @staticmethod |
| 210 | def tgTransposeConv2D(testGen, op, rank): |
| 211 | pl, const = op['operands'] |
| 212 | |
| 213 | assert(rank == 4) |
| 214 | |
| 215 | # IFM dimensions are NHWC |
| 216 | ifm_shape = testGen.makeShape(rank) |
| 217 | |
| 218 | # Constrict the batch size? |
| 219 | if testGen.args.max_batch_size: |
| 220 | ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1 |
| 221 | |
| 222 | # Get the filter height/width from the operator parameters |
| 223 | filter_hw = op['filter'] |
| 224 | |
| 225 | # Generate a random OFM depth |
| 226 | ofm_depth = testGen.makeShape(1)[0] |
| 227 | |
| 228 | # The filter dimensions are OHWI |
| 229 | filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]]) |
| 230 | |
| 231 | return [ifm_shape, filter_shape] |
| 232 | |
| 233 | @staticmethod |
| 234 | def tgDepthwiseConv2D(testGen, op, rank): |
| 235 | pl, const = op['operands'] |
| 236 | |
| 237 | assert(rank == 4) |
| 238 | assert(pl == 1 and const == 2) |
| 239 | |
| 240 | # IFM dimensions are NHWC |
| 241 | ifm_shape = testGen.makeShape(rank) |
| 242 | |
| 243 | # Constrict the batch size? |
| 244 | if testGen.args.max_batch_size: |
| 245 | ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1 |
| 246 | |
| 247 | # Get the filter height/width from the operator parameters |
| 248 | # Filter is KH, HW, C, M |
| 249 | filter_hw = op['filter'] |
| 250 | |
| 251 | # Generate a random OFM depth, but don't let it get too big because |
| 252 | # the output depth is M * C |
| 253 | filter_m = (testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)) + 1 |
| 254 | |
| 255 | # The filter dimensions are HWCM |
| 256 | filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m]) |
| 257 | |
| 258 | # The bias is M * C |
| 259 | bias_shape = np.asarray([ifm_shape[3] * filter_m]) |
| 260 | |
| 261 | return [ifm_shape, filter_shape, bias_shape] |
| 262 | |
| 263 | @staticmethod |
| 264 | def tgFullyConnected(testGen, op, rank): |
| 265 | pl, const = op['operands'] |
| 266 | |
| 267 | assert(rank == 2) |
| 268 | assert(pl == 2 and const == 0) |
| 269 | |
| 270 | input_shape = testGen.makeShape(rank) |
| 271 | filter_oc = testGen.makeShape(1)[0] |
| 272 | filter_shape = np.asarray([filter_oc, input_shape[1]]) |
| 273 | |
| 274 | bias_shape = np.asarray([filter_oc]) |
| 275 | |
| 276 | return [input_shape, filter_shape, bias_shape] |
| 277 | |
| 278 | @staticmethod |
| 279 | def tgMatmul(testGen, op, rank): |
| 280 | pl, const = op['operands'] |
| 281 | |
| 282 | assert(rank == 2) |
| 283 | assert(pl == 2 and const == 0) |
| 284 | |
| 285 | a_shape = testGen.makeShape(rank) |
| 286 | b_oc = testGen.makeShape(1)[0] |
| 287 | b_shape = np.asarray([a_shape[1], b_oc]) |
| 288 | |
| 289 | return [a_shape, b_shape] |
| 290 | |
| 291 | class TosaArgGen: |
| 292 | '''Argument generators create exhaustive or random lists of attributes for operators that take |
| 293 | attributes or other parameters. The return value is a list of (descriptive_name, [arglist]) |
| 294 | tuples where the descriptive_name is appended to the test name and the arglist is expanded |
| 295 | as arguments to the operator build function.''' |
| 296 | def __init__(self): |
| 297 | pass |
| 298 | |
| 299 | @staticmethod |
| 300 | def agNone(testGen, opName, shapeList, dtype): |
| 301 | '''A trivial argument generator for operators that don't take any |
| 302 | non-tensor arguments''' |
| 303 | return [('', [])] |
| 304 | |
| 305 | @staticmethod |
| 306 | def agAxis(testGen, opName, shapeList, dtype): |
| 307 | '''Build the axis argument for operators that take a single axis''' |
| 308 | axes = [] |
| 309 | |
| 310 | shape = shapeList[0] |
| 311 | |
| 312 | for a in range(0, len(shape)): |
| 313 | axes.append(('axis_{}'.format(a), [a])) |
| 314 | return axes |
| 315 | |
| 316 | @staticmethod |
| 317 | def agConv2D(testGen, opName, shapeList, dtype): |
| 318 | arg_list = [] |
| 319 | |
| 320 | ifm_shape = shapeList[0] |
| 321 | filter_shape = shapeList[1] |
| 322 | |
| 323 | # Must be rank 4 |
| 324 | assert(len(ifm_shape) == 4) |
| 325 | assert(len(filter_shape) == 4) |
| 326 | |
| 327 | maxStride = testGen.args.max_conv_stride |
| 328 | maxPadding = testGen.args.max_conv_padding + 1 |
| 329 | maxDilation = testGen.args.max_conv_dilation |
| 330 | |
| 331 | # Strides, padding, dilations |
| 332 | for stride in range(0, maxStride ** 2): |
| 333 | for padding in range(0, (maxPadding) ** 4): |
| 334 | for dilation in range(0, maxDilation ** 2): |
| 335 | |
| 336 | s = [stride // maxStride + 1, |
| 337 | stride % maxStride + 1] |
| 338 | p = [(padding // (maxPadding * 4)) % maxPadding, |
| 339 | (padding // (maxPadding * 2)) % maxPadding, |
| 340 | (padding // (maxPadding * 1)) % maxPadding, |
| 341 | padding % maxPadding] |
| 342 | d = [ dilation // maxDilation + 1, |
| 343 | dilation % maxDilation + 1] |
| 344 | |
| 345 | # 4 padding parameters for regular conv2d |
| 346 | arg_list.append(('st{}{}_pad{}{}{}{}_dilat{}{}'.format(s[0], s[1], |
| 347 | p[0], p[1], p[2], p[3], |
| 348 | d[0], d[1]), |
| 349 | [ s, p, d ])) |
| 350 | return arg_list |
| 351 | |
| 352 | @staticmethod |
| 353 | def agTransposeConv2D(testGen, opName, shapeList, dtype): |
| 354 | arg_list = [] |
| 355 | |
| 356 | ifm_shape = shapeList[0] |
| 357 | filter_shape = shapeList[1] |
| 358 | |
| 359 | # Must be rank 4 |
| 360 | assert(len(ifm_shape) == 4) |
| 361 | assert(len(filter_shape) == 4) |
| 362 | |
| 363 | maxStride = testGen.args.max_conv_stride |
| 364 | maxPadding = testGen.args.max_conv_padding + 1 |
| 365 | maxDilation = testGen.args.max_conv_dilation |
| 366 | |
| 367 | # Strides, padding, dilations |
| 368 | for stride in range(0, maxStride ** 2): |
| 369 | for out_padding in range(0, (maxPadding) ** 2): |
| 370 | for dilation in range(0, maxDilation ** 2): |
| 371 | |
| 372 | s = [stride // maxStride + 1, |
| 373 | stride % maxStride + 1] |
| 374 | p = [(out_padding // (maxPadding * 1)) % maxPadding, |
| 375 | out_padding % maxPadding] |
| 376 | d = [ dilation // maxDilation + 1, |
| 377 | dilation % maxDilation + 1] |
| 378 | |
| 379 | oh = (ifm_shape[1] - filter_shape[1] - (filter_shape[1] - 1) * (d[0] - 1) + \ |
| 380 | 2 * p[0]) // s[0] + 1 |
| 381 | |
| 382 | ow = (ifm_shape[2] - filter_shape[2] - (filter_shape[2] - 1) * (d[1] - 1) + \ |
| 383 | 2 * p[1]) // s[1] + 1 |
| 384 | |
| 385 | # Output shape |
| 386 | os = [ ifm_shape[0], oh, ow, filter_shape[0] ] |
| 387 | |
| 388 | arg_list.append(('st{}{}_outpad{}{}_dilat{}{}_os{}x{}x{}x{}'.format(s[0], s[1], |
| 389 | p[0], p[1], |
| 390 | d[0], d[1], |
| 391 | os[0], os[1], os[2], os[3]), |
| 392 | [ s, p, d, os ])) |
| 393 | |
| 394 | return arg_list |
| 395 | |
| 396 | @staticmethod |
| 397 | def agPad(testGen, opName, shapeList, dtype): |
| 398 | arg_list = [] |
| 399 | rank = len(shapeList[0]) |
| 400 | |
| 401 | # Exhaustively test combinations of 0/1 padding on each side of each dimension |
| 402 | # This process might need some revision for >1 padding, but use rank**2 as a bitmask |
| 403 | # for now |
| 404 | for v in range(rank ** 2): |
| 405 | |
| 406 | # Create a flat arraypadding4D |
| 407 | paddings = np.zeros((rank * 2), dtype=np.int32) |
| 408 | |
| 409 | # Fill in the 1's |
| 410 | for r in (range(rank * 2)): |
| 411 | if (v >> r) & 1: |
| 412 | paddings[r] = 1 |
| 413 | |
| 414 | # Reshape back to a 2D array |
| 415 | paddings = paddings.reshape((rank, 2)) |
| 416 | |
| 417 | arg_list.append(('pad{0:b}'.format(v), [ paddings ])) |
| 418 | |
| 419 | return arg_list |
| 420 | |
| 421 | @staticmethod |
| 422 | def agPooling(testGen, opName, shapeList, dtype): |
| 423 | arg_list = [] |
| 424 | |
| 425 | shape = shapeList[0] |
| 426 | assert(len(shape) == 4) |
| 427 | |
| 428 | maxStride = testGen.args.max_pooling_stride |
| 429 | maxKernel = testGen.args.max_pooling_kernel |
| 430 | maxPadding = testGen.args.max_pooling_padding + 1 |
| 431 | |
| 432 | for kernel in range(0, maxKernel ** 2): |
| 433 | for stride in range(0, maxStride ** 2): |
| 434 | for padding in range(0, maxPadding ** 4): |
| 435 | s = [stride // maxStride + 1, |
| 436 | stride % maxStride + 1] |
| 437 | k = [(kernel // maxKernel) + 2, |
| 438 | (kernel % maxKernel) + 2] |
| 439 | p = [(padding // (maxPadding * 4)) % maxPadding, |
| 440 | (padding // (maxPadding * 2)) % maxPadding, |
| 441 | (padding // (maxPadding * 1)) % maxPadding, |
| 442 | padding % maxPadding] |
| 443 | |
| 444 | arg_list.append(('st{}{}_kern{}{}_pad{}{}{}{}'.format(s[0], s[1], |
| 445 | k[0], k[1], |
| 446 | p[0], p[1], p[2], p[3]), |
| 447 | [k, s, p])) |
| 448 | return arg_list |
| 449 | |
| 450 | @staticmethod |
| 451 | def agCast(testGen, opName, shapeList, inDtype): |
| 452 | arg_list = [] |
| 453 | |
| 454 | # Enumerate the output types here |
| 455 | if inDtype == DType.INT8: |
| 456 | dtypeList = [ DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT ] |
| 457 | elif inDtype == DType.INT16: |
| 458 | dtypeList = [ DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT ] |
| 459 | elif inDtype == DType.INT32: |
| 460 | dtypeList = [ DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT ] |
| 461 | elif inDtype == DType.BOOL: |
| 462 | dtypeList = [ DType.INT8, DType.INT16, DType.INT32 ] |
| 463 | elif inDtype == DType.FLOAT: |
| 464 | dtypeList = [ DType.INT8, DType.INT16, DType.INT32 ] |
| 465 | else: |
| 466 | raise Exception('Unexpected input dtype: {}'.format(inDtype)) |
| 467 | |
| 468 | for dtype in dtypeList: |
| 469 | arg_list.append(('out{}'.format(DTypeNames[dtype]), [dtype])) |
| 470 | |
| 471 | return arg_list |
| 472 | |
| 473 | @staticmethod |
| 474 | def agRescale(testGen, opName, shapeList, inDtype): |
| 475 | arg_list = [] |
| 476 | |
| 477 | # Enumerate the output types here |
| 478 | for dtype in [ DType.AINT8, DType.INT16, DType.INT32 ]: |
| 479 | for scale32 in [ False, True ]: |
| 480 | for double_round in [ False, True ]: |
| 481 | for per_channel in [ False, True ]: |
| 482 | |
| 483 | if inDtype == DType.INT48 and scale32: |
| 484 | # Illegal condition. Must be scale32=False |
| 485 | continue |
| 486 | |
| 487 | arg_list.append(('out{}_sc{}_dr{}_pc{}'.format(DTypeNames[dtype], int(scale32), int(double_round), int(per_channel)), |
| 488 | [dtype, scale32, double_round, per_channel])) |
| 489 | |
| 490 | return arg_list |
| 491 | |
Kevin Cheng | aee1fac | 2020-11-11 13:54:06 -0800 | [diff] [blame^] | 492 | @staticmethod |
| 493 | def agMul(testGen, opName, shapeList, dtype): |
| 494 | arg_list = [] |
| 495 | |
| 496 | if dtype is DType.INT32: |
| 497 | for p in range(testGen.args.num_rand_permutations): |
| 498 | |
| 499 | shift = testGen.randInt(0, 32) |
| 500 | |
| 501 | arg_list.append(('perm{}_shift{}'.format(p, shift), [shift])) |
| 502 | else: |
| 503 | arg_list.append(('shift0', [0])) |
| 504 | |
| 505 | return arg_list |
| 506 | |
| 507 | @staticmethod |
| 508 | def agArithmeticRightShift(testGen, opName, shapeList, dtype): |
| 509 | arg_list = [] |
| 510 | |
| 511 | arg_list.append(('roundTrue', [True])) |
| 512 | arg_list.append(('roundFalse', [False])) |
| 513 | |
| 514 | return arg_list |
| 515 | |
Eric Kunze | e5e2676 | 2020-10-13 16:11:07 -0700 | [diff] [blame] | 516 | # Helper function for reshape. Gets some factors of a larger number. |
| 517 | @staticmethod |
| 518 | def getFactors(val, start=1): |
| 519 | factors = [] |
| 520 | |
| 521 | for i in range(start, int(np.sqrt(val))): |
| 522 | if (val % i) == 0: |
| 523 | factors.append(i) |
| 524 | |
| 525 | return factors |
| 526 | |
| 527 | @staticmethod |
| 528 | def agReshape(testGen, opName, shapeList, dtype): |
| 529 | arg_list = [] |
| 530 | |
| 531 | origShape = shapeList[0] |
| 532 | |
| 533 | totalElements = 1 |
| 534 | for s in origShape: |
| 535 | totalElements *= s |
| 536 | |
| 537 | # This code is NOT fast. Fortunately, the numbers are fairly small. |
| 538 | factors = TosaArgGen.getFactors(totalElements) |
| 539 | |
| 540 | for p in range(testGen.args.num_rand_permutations): |
| 541 | newRank = testGen.randInt(1, 6) |
| 542 | newShape = [] |
| 543 | if (len(factors) < newRank): |
| 544 | continue |
| 545 | |
| 546 | remainingElements = totalElements |
| 547 | shuffledFactors = testGen.rng.permutation(factors) |
| 548 | for i in range(newRank): |
| 549 | # pick rank-1 factors |
| 550 | newShape.append(shuffledFactors[0]) |
| 551 | remainingElements = remainingElements // shuffledFactors[0] |
| 552 | shuffledFactors = testGen.rng.permutation(TosaArgGen.getFactors(remainingElements)) |
| 553 | newShape.append(remainingElements) |
| 554 | |
| 555 | # Toss in a -1 sometimes |
| 556 | minusOne = testGen.randInt(0, newRank * 4) |
| 557 | if minusOne < newRank: |
| 558 | newShape[minusOne] = -1 |
| 559 | |
| 560 | arg_list.append(('perm{}_rank{}'.format(p, newRank), [newShape])) |
| 561 | |
| 562 | return arg_list |
| 563 | |
| 564 | |
| 565 | @staticmethod |
| 566 | def agTranspose(testGen, opName, shapeList, dtype): |
| 567 | arg_list = [] |
| 568 | |
| 569 | ifm_shape = shapeList[0] |
| 570 | |
| 571 | perms = range(len(ifm_shape)) |
| 572 | for p in range(testGen.args.num_rand_permutations): |
| 573 | perms = np.int32(testGen.rng.permutation(perms)).tolist() |
| 574 | |
| 575 | # Avoid duplicates |
| 576 | found = False |
| 577 | for name, other_perm in arg_list: |
| 578 | if other_perm[0] == perms: |
| 579 | found = True |
| 580 | break |
| 581 | |
| 582 | if not found: |
| 583 | arg_list.append(('perm{}'.format(p), [perms])) |
| 584 | |
| 585 | return arg_list |
| 586 | |
| 587 | @staticmethod |
| 588 | def agSlice(testGen, opName, shapeList, dtype): |
| 589 | arg_list = [] |
| 590 | |
| 591 | ifm_shape = shapeList[0] |
| 592 | rank = len(ifm_shape) |
| 593 | |
| 594 | for p in range(testGen.args.num_rand_permutations): |
| 595 | begin = [] |
| 596 | size = [] |
| 597 | |
| 598 | valid=True |
| 599 | |
| 600 | for i in range(rank): |
| 601 | if ifm_shape[i] > 1: |
| 602 | begin.append(testGen.randInt(0, ifm_shape[i])) |
| 603 | size.append(testGen.randInt(0, ifm_shape[i] - begin[i])) |
| 604 | |
| 605 | # Invalid slice size? |
| 606 | if size[i] == 0: |
| 607 | valid = False |
| 608 | else: |
| 609 | begin.append(0) |
| 610 | size.append(1) |
| 611 | |
| 612 | if valid: |
| 613 | arg_list.append(('perm{}'.format(p), [begin, size])) |
| 614 | return arg_list |
| 615 | |
| 616 | @staticmethod |
| 617 | def agTile(testGen, opName, shapeList, dtype): |
| 618 | arg_list = [] |
| 619 | |
| 620 | ifm_shape = shapeList[0] |
| 621 | rank = len(ifm_shape) |
| 622 | |
| 623 | for p in range(testGen.args.num_rand_permutations): |
| 624 | |
| 625 | # Pick a few random, but small multiple values |
| 626 | # because otherwise this has a tendency to generate |
| 627 | # enormous tensors |
| 628 | multiples = [] |
| 629 | for i in range(rank): |
| 630 | multiples.append(testGen.randInt(1, 4)) |
| 631 | |
| 632 | arg_list.append(('perm{}'.format(p), [multiples])) |
| 633 | |
| 634 | return arg_list |
| 635 | |
| 636 | @staticmethod |
| 637 | def agResize(testGen, opName, shapeList, dtype): |
| 638 | arg_list = [] |
| 639 | |
| 640 | ifm_shape = shapeList[0] |
| 641 | |
| 642 | for m in [ResizeMode.NEAREST, ResizeMode.BILINEAR]: |
| 643 | |
| 644 | # Exclude illegal {mode, type} configurations. Pick legal output types |
| 645 | if m == ResizeMode.NEAREST and dtype == DType.INT8: |
| 646 | outputDTypeList = [ DType.INT32 ] |
| 647 | elif m == ResizeMode.NEAREST and dtype == DType.INT16: |
| 648 | outputDTypeList = [ DType.INT16 ] |
| 649 | elif m == ResizeMode.BILINEAR and dtype == DType.INT8: |
| 650 | outputDTypeList = [ DType.INT8 ] |
| 651 | elif m == ResizeMode.BILINEAR and dtype == DType.INT16: |
| 652 | outputDTypeList = [ DType.INT48 ] |
| 653 | else: |
| 654 | continue |
| 655 | |
| 656 | for outputDType in outputDTypeList: |
| 657 | for perm in range(testGen.args.num_rand_permutations): |
| 658 | |
| 659 | # Randomly generate legal output dimensions and shift |
| 660 | # and then compute the stride and offset based on them |
| 661 | output_dims = [ testGen.randInt(), testGen.randInt() ] |
| 662 | |
| 663 | shift = testGen.randInt(1, 11) |
| 664 | |
| 665 | stride = [ (ifm_shape[1] << shift) // output_dims[0], |
| 666 | (ifm_shape[2] << shift) // output_dims[1] ] |
| 667 | |
| 668 | offset = [ testGen.randInt(-stride[0], (ifm_shape[1] << shift) - (output_dims[0] - 1) * stride[0]), |
| 669 | testGen.randInt(-stride[1], (ifm_shape[2] << shift) - (output_dims[1] - 1) * stride[1]) ] |
| 670 | |
| 671 | arg_list.append(('mode{}_shift{}_odim{}x{}_out{}_st{}x{}_off{}x{}'.format(m, shift, output_dims[0], output_dims[1], |
| 672 | testGen.typeStr(outputDType), stride[0], stride[1], |
| 673 | offset[0], offset[1]), |
Kevin Cheng | aee1fac | 2020-11-11 13:54:06 -0800 | [diff] [blame^] | 674 | [m, stride, offset, shift, output_dims, dtype, outputDType])) |
Eric Kunze | e5e2676 | 2020-10-13 16:11:07 -0700 | [diff] [blame] | 675 | |
| 676 | return arg_list |
| 677 | |
| 678 | def agCondIf(testGen, opName, shapeList, dtype): |
| 679 | # CondIf generates the condition values here. |
| 680 | # Convert to tensors in the build function, along with the |
| 681 | # then and else blocks |
| 682 | arg_list = [] |
| 683 | |
| 684 | for c in [False, True]: |
| 685 | arg_list.append(('cond{}'.format(int(c)), [ c ])) |
| 686 | |
| 687 | return arg_list |
| 688 | |
| 689 | def agWhileLoop(testGen, opName, shapeList, dtype): |
| 690 | # While loop: 0 iterations, 1, more than 1 |
| 691 | arg_list = [] |
| 692 | |
| 693 | for iter in [0, 1, 4]: |
| 694 | arg_list.append(('iter{}'.format(iter), [ iter ])) |
| 695 | |
| 696 | return arg_list |
| 697 | |
| 698 | class TosaTestGen: |
| 699 | def __init__(self, args): |
| 700 | self.args = args |
| 701 | self.basePath = args.output_dir |
| 702 | self.random_seed = args.random_seed |
| 703 | self.ser = None |
| 704 | self.rng = np.random.default_rng(self.random_seed) |
| 705 | self.createDynamicOpLists() |
| 706 | self.initOpListDefaults() |
| 707 | self.quantGen = TosaQuantGen() |
| 708 | # Force makeShape to do a specific starting shape |
| 709 | self.targetted_shape = None |
| 710 | |
| 711 | def createSerializer(self, opName, testPath): |
| 712 | self.testPath = os.path.join(opName, testPath) |
| 713 | |
| 714 | fullPath = os.path.join(self.basePath, self.testPath) |
| 715 | os.makedirs(fullPath, exist_ok=True) |
| 716 | self.ser = ts.TosaSerializer(fullPath) |
| 717 | |
| 718 | def getSerializer(self): |
| 719 | return self.ser |
| 720 | |
| 721 | def serialize(self, testName): |
| 722 | with open(os.path.join(self.basePath, self.testPath, '{}.tosa'.format(testName)), 'wb') as fd: |
| 723 | fd.write(self.ser.serialize()) |
| 724 | |
| 725 | with open(os.path.join(self.basePath, self.testPath, 'desc.json'), 'w') as fd: |
| 726 | fd.write(self.ser.writeJson('{}.tosa'.format(testName))) |
| 727 | |
| 728 | def getRandTensor(self, shape, dtype): |
| 729 | RAND_SHIFT_FACTOR = 0.5 |
| 730 | RAND_SCALE_FACTOR = 4.0 |
| 731 | |
| 732 | if dtype == DType.BOOL: |
| 733 | np_dt = np.bool |
| 734 | return np.bool_(self.rng.choice(a=[False, True], size=shape)) |
| 735 | elif dtype == DType.AINT8: |
| 736 | return np.int32(self.rng.integers(low=0, high=256, size=shape)) |
| 737 | elif dtype == DType.INT4: |
| 738 | return np.int32(self.rng.integers(low=-7, high=8, size=shape)) |
| 739 | elif dtype == DType.INT8: |
| 740 | return np.int32(self.rng.integers(low=-127, high=128, size=shape)) |
| 741 | elif dtype == DType.INT16: |
| 742 | return np.int32(self.rng.integers(low=-32768, high=32768, size=shape)) |
| 743 | elif dtype == DType.INT32: |
| 744 | return np.int32(self.rng.integers(low=-(1 << 31), high=(1 << 31), size=shape)) |
| 745 | elif dtype == DType.INT48: |
| 746 | return np.int64(self.rng.integers(low=-(1 << 47), high=(1 << 47), size=shape)) |
| 747 | elif dtype == DType.FLOAT: |
| 748 | return np.float32(self.rng.random(size=shape) - RAND_SHIFT_FACTOR * RAND_SCALE_FACTOR) |
| 749 | else: |
| 750 | raise Exception('Unrecognized Dtype: {}'.format(dtype)) |
| 751 | |
| 752 | def buildPlaceholderTensors(self, shape_list, dtype): |
| 753 | placeholders = [] |
| 754 | |
| 755 | for shape in shape_list: |
| 756 | arr = self.getRandTensor(shape, dtype) |
| 757 | placeholders.append(self.ser.addPlaceholder(shape, dtype, Usage.ACTIVATION, [], arr)) |
| 758 | |
| 759 | return placeholders |
| 760 | |
| 761 | def buildConstTensors(self, shape_list, dtype): |
| 762 | consts = [] |
| 763 | |
| 764 | for shape in shape_list: |
| 765 | arr = self.getRandTensor(shape, dtype) |
| 766 | consts.append(self.ser.addConst(shape, dtype, Usage.ACTIVATION, [], arr)) |
| 767 | |
| 768 | return consts |
| 769 | |
| 770 | def makeShape(self, rank): |
| 771 | if self.targetted_shape: |
| 772 | return np.int32(self.targetted_shape) |
| 773 | return np.int32(self.rng.integers(low=self.args.tensor_shape_range[0], |
| 774 | high=self.args.tensor_shape_range[1], |
| 775 | size=rank)) |
| 776 | |
| 777 | def setTargetShape(self, shape): |
| 778 | self.targetted_shape = shape |
| 779 | |
| 780 | def randInt(self, low=0, high=256): |
| 781 | return np.int32(self.rng.integers(low=low, high=high, size=1))[0] |
| 782 | |
| 783 | def getRandNumberDType(self, dtype): |
| 784 | if dtype == DType.FLOAT: |
| 785 | return self.rng.random() |
| 786 | elif dtype == DType.BOOL: |
| 787 | return self.rng.choice([False, True]) |
| 788 | elif dtype == DType.INT4: |
| 789 | low, high = (-7, 8) |
| 790 | elif dtype == DType.AINT8: |
| 791 | low, high = (0, 256) |
| 792 | elif dtype == DType.INT8: |
| 793 | low, high = (-127, 128) |
| 794 | elif dtype == DType.INT16: |
| 795 | low, high = (-32768, 32768) |
| 796 | elif dtype == DType.INT32: |
| 797 | low, high = (-(1<<31), (1<<31)) |
| 798 | elif dtype == DType.INT48: |
| 799 | low, high = (-(1<<47), (1<<47)) |
| 800 | # Special size |
| 801 | return np.int64(self.rng.integers(low, high, size=1))[0] |
| 802 | else: |
| 803 | raise Exception('Unknown dtype: {}'.format(dtype)) |
| 804 | |
| 805 | return np.int32(self.rng.integers(low, high, size=1))[0] |
| 806 | |
| 807 | def shapeStr(self, shape): |
| 808 | |
| 809 | sStr = [] |
| 810 | # Convert to strings |
| 811 | for i in shape: |
| 812 | sStr.append(str(i)) |
| 813 | |
| 814 | return 'x'.join(sStr) |
| 815 | |
| 816 | def typeStr(self, t): |
| 817 | if t == DType.BOOL: |
| 818 | return 'b' |
| 819 | elif t == DType.AINT8: |
| 820 | return 'a8' |
| 821 | elif t == DType.INT4: |
| 822 | return 'i4' |
| 823 | elif t == DType.INT8: |
| 824 | return 'i8' |
| 825 | elif t == DType.INT16: |
| 826 | return 'i16' |
| 827 | elif t == DType.INT32: |
| 828 | return 'i32' |
| 829 | elif t == DType.INT48: |
| 830 | return 'i48' |
| 831 | elif t == DType.FLOAT: |
| 832 | return 'float' |
| 833 | else: |
| 834 | raise Exception('Unknown dtype, cannot convert to string: {}'.format(t)) |
| 835 | |
| 836 | def typeWidth(self, t): |
| 837 | ''' Get the datatype width for integer types''' |
| 838 | if t == DType.AINT8: |
| 839 | return 8 |
| 840 | elif t == DType.UINT8: |
| 841 | return 8 |
| 842 | elif t == DType.INT4: |
| 843 | return 4 |
| 844 | elif t == DType.INT8: |
| 845 | return 8 |
| 846 | elif t == DType.INT16: |
| 847 | return 16 |
| 848 | elif t == DType.INT32: |
| 849 | return 32 |
| 850 | elif t == DType.INT48: |
| 851 | return 48 |
| 852 | else: |
| 853 | raise Exception('Unknown dtype, cannot convert to string: {}'.format(t)) |
| 854 | |
| 855 | # Argument generators |
| 856 | # Returns a list of tuples (stringDescriptor, [build_fcn_arg_list]) |
| 857 | # Where the string descriptor is used to generate the test name and |
| 858 | # The build_fcn_arg_list is expanded and passed to the operator test |
| 859 | # build function |
| 860 | |
| 861 | |
| 862 | def build_unary(self, op, a, qinfo = None): |
| 863 | result_tens = OutputShaper.unaryOp(self.ser, a) |
| 864 | self.ser.addOperator(op, [a.name], [result_tens.name], None, qinfo) |
| 865 | return result_tens |
| 866 | |
| 867 | def build_binary_broadcast(self, op, a, b): |
| 868 | result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b) |
| 869 | self.ser.addOperator(op, [a.name, b.name], [result_tens.name]) |
| 870 | return result_tens |
| 871 | |
| 872 | def build_binary_nonbroadcast(self, op, a, b): |
| 873 | result_tens = OutputShaper.binaryNonBroadcastOp(self.ser, a, b) |
| 874 | self.ser.addOperator(op, [a.name, b.name], [result_tens.name]) |
| 875 | return result_tens |
| 876 | |
Kevin Cheng | aee1fac | 2020-11-11 13:54:06 -0800 | [diff] [blame^] | 877 | def build_arithmetic_right_shift(self, op, a, b, round): |
| 878 | result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b) |
| 879 | |
| 880 | attr = ts.TosaSerializerAttribute() |
| 881 | attr.ArithmeticRightShiftAttribute(round) |
| 882 | |
| 883 | self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr) |
| 884 | return result_tens |
| 885 | |
| 886 | def build_mul(self, op, a, b, shift): |
Eric Kunze | e5e2676 | 2020-10-13 16:11:07 -0700 | [diff] [blame] | 887 | result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b) |
| 888 | |
| 889 | # Special for multiply: |
| 890 | # Force the result to INT32 for INT types |
| 891 | if a.dtype != DType.FLOAT: |
| 892 | result_tens.setDtype(DType.INT32) |
| 893 | |
Kevin Cheng | aee1fac | 2020-11-11 13:54:06 -0800 | [diff] [blame^] | 894 | attr = ts.TosaSerializerAttribute() |
| 895 | attr.MulAttribute(shift) |
| 896 | |
| 897 | self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr) |
Eric Kunze | e5e2676 | 2020-10-13 16:11:07 -0700 | [diff] [blame] | 898 | return result_tens |
| 899 | |
| 900 | def build_table(self, op, a): |
| 901 | # Constant size, random values |
| 902 | table_arr = self.getRandTensor([513], DType.INT16) |
| 903 | table_tens = self.ser.addConst(table_arr.shape, DType.INT16, Usage.INDEX, [], table_arr) |
| 904 | |
| 905 | result_tens = OutputShaper.tableOp(self.ser, a, table_tens) |
| 906 | self.ser.addOperator(op, [a.name, table_tens.name], [result_tens.name], None) |
| 907 | |
| 908 | return result_tens |
| 909 | |
| 910 | def build_select(self, op, cond, a, b): |
| 911 | |
| 912 | # Replace the cond tensor with a boolean tensor since it probably |
| 913 | # has the wrong dtype |
| 914 | t = self.buildPlaceholderTensors([cond.shape], DType.BOOL) |
| 915 | cond = t[0] |
| 916 | |
| 917 | result_tens = OutputShaper.selectOp(self.ser, cond, a, b) |
| 918 | self.ser.addOperator(op, [cond.name, a.name, b.name], [result_tens.name]) |
| 919 | |
| 920 | return result_tens |
| 921 | |
| 922 | def build_comparison(self, op, a, b): |
| 923 | result_tens = OutputShaper.binaryComparisonOp(self.ser, a, b) |
| 924 | self.ser.addOperator(op, [a.name, b.name], [result_tens.name]) |
| 925 | return result_tens |
| 926 | |
| 927 | def build_argmax(self, op, a, axis): |
| 928 | result_tens = OutputShaper.argmaxOp(self.ser, a, axis) |
| 929 | |
| 930 | attr = ts.TosaSerializerAttribute() |
| 931 | attr.AxisAttribute(axis) |
| 932 | |
| 933 | self.ser.addOperator(op, [a.name], [result_tens.name], attr) |
| 934 | return result_tens |
| 935 | |
| 936 | def build_pool2d(self, op, input, kernel, stride, pad, qinfo = None): |
| 937 | result_tens = OutputShaper.pool2dOp(self.ser, input, kernel, stride, pad) |
| 938 | |
| 939 | attr = ts.TosaSerializerAttribute() |
| 940 | attr.Pool2dAttribute(kernel, stride, pad) |
| 941 | input.addFormat(Format.NHWC) |
| 942 | |
| 943 | self.ser.addOperator(op, [input.name], [result_tens.name], attr, qinfo) |
| 944 | return result_tens |
| 945 | |
| 946 | def build_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo): |
| 947 | assert(len(padding) == 4) |
| 948 | result_tens = OutputShaper.conv2dOp(self.ser, ifm, filter, strides, padding, dilations) |
| 949 | |
| 950 | attr = ts.TosaSerializerAttribute() |
| 951 | attr.Conv2dAttribute(padding, strides, dilations) |
| 952 | |
| 953 | ifm.addFormat(Format.NHWC) |
| 954 | # Update the filter ordering |
| 955 | filter.addUsage(Usage.WEIGHT) |
| 956 | filter.addFormat(Format.OHWI) |
| 957 | |
| 958 | self.ser.addOperator(op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo) |
| 959 | return result_tens |
| 960 | |
| 961 | def build_transpose_conv2d(self, op, ifm, filter, stride, outpad, dilation, output_shape, qinfo): |
| 962 | assert(len(outpad) == 2) |
| 963 | result_tens = OutputShaper.transposeConv2DOp(self.ser, ifm, output_shape) |
| 964 | |
| 965 | attr = ts.TosaSerializerAttribute() |
| 966 | attr.TransposeConv2DAttribute(outpad, stride, dilation, output_shape) |
| 967 | |
| 968 | ifm.addFormat(Format.NHWC) |
| 969 | # Update the filter ordering |
| 970 | filter.addUsage(Usage.WEIGHT) |
| 971 | filter.addFormat(Format.OHWI) |
| 972 | |
| 973 | # Create bias here since the acc_t depends on (but isn't the same as) the input dtype |
| 974 | # The bias is OC |
| 975 | if ifm.dtype == DType.AINT8 or ifm.dtype == DType.INT8: |
| 976 | bias_type = DType.INT32 |
| 977 | elif ifm.dtype == DType.INT16: |
| 978 | bias_type = DType.INT48 |
| 979 | elif ifm.dtype == DType.FLOAT: |
| 980 | bias_type = DType.FLOAT |
| 981 | else: |
| 982 | raise Exception('Unsupported dtype for transpose_conv2d: {}'.format(ifm.dtype)) |
| 983 | |
| 984 | bias_arr = self.getRandTensor([filter.shape[0]], bias_type) |
| 985 | bias_tens = self.ser.addConst([filter.shape[0]], bias_type, [], [], bias_arr) |
| 986 | |
| 987 | self.ser.addOperator(op, [ifm.name, filter.name, bias_tens.name], [result_tens.name], attr, qinfo) |
| 988 | return result_tens |
| 989 | |
| 990 | def build_depthwise_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo): |
| 991 | result_tens = OutputShaper.depthwiseConv2dOp(self.ser, ifm, filter, strides, padding, dilations) |
| 992 | |
| 993 | attr = ts.TosaSerializerAttribute() |
| 994 | attr.Conv2dAttribute(padding, strides, dilations) |
| 995 | |
| 996 | ifm.addFormat(Format.NHWC) |
| 997 | filter.addUsage(Usage.WEIGHT) |
| 998 | filter.addFormat(Format.HWIM) |
| 999 | |
| 1000 | self.ser.addOperator(op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo) |
| 1001 | return result_tens |
| 1002 | |
| 1003 | def build_fully_connected(self, op, ifm, filter, bias, qinfo): |
| 1004 | result_tens = OutputShaper.fullyConnectedOp(self.ser, ifm, filter) |
| 1005 | |
| 1006 | filter.addUsage(Usage.WEIGHT) |
| 1007 | self.ser.addOperator(op, [ifm.name, filter.name, bias.name], [result_tens.name], None, qinfo) |
| 1008 | return result_tens |
| 1009 | |
| 1010 | def build_matmul(self, op, a, b, qinfo): |
| 1011 | result_tens = OutputShaper.matmulOp(self.ser, a, b) |
| 1012 | self.ser.addOperator(op, [a.name, b.name], [result_tens.name], None, qinfo) |
| 1013 | return result_tens |
| 1014 | |
| 1015 | def build_reduce(self, op, a, axis): |
| 1016 | result_tens = OutputShaper.reduceOp(self.ser, a, axis) |
| 1017 | |
| 1018 | attr = ts.TosaSerializerAttribute() |
| 1019 | attr.AxisAttribute(axis) |
| 1020 | |
| 1021 | self.ser.addOperator(op, [a.name], result_tens.name, attr) |
| 1022 | return result_tens |
| 1023 | |
| 1024 | def build_clamp(self, op, a): |
| 1025 | result_tens = OutputShaper.unaryOp(self.ser, a) |
| 1026 | |
| 1027 | attr = ts.TosaSerializerAttribute() |
| 1028 | |
| 1029 | # Get two random ints |
| 1030 | v = [self.randInt(), self.randInt()] |
| 1031 | |
| 1032 | if a.dtype == DType.FLOAT: |
| 1033 | attr.ClampAttribute(0, 0, min(v), max(v)) |
| 1034 | else: |
| 1035 | attr.ClampAttribute(min(v), max(v), 0, 0) |
| 1036 | |
| 1037 | self.ser.addOperator(op, [a.name], [result_tens.name], attr) |
| 1038 | return result_tens |
| 1039 | |
| 1040 | def build_leaky_relu(self, op, a): |
| 1041 | result_tens = OutputShaper.unaryOp(self.ser, a) |
| 1042 | attr = ts.TosaSerializerAttribute() |
| 1043 | |
| 1044 | attr.LeakyReluAttribute(self.getRandNumberDType(DType.FLOAT)) |
| 1045 | |
| 1046 | self.ser.addOperator(op, [a.name], [result_tens.name], attr) |
| 1047 | return result_tens |
| 1048 | |
| 1049 | # Needs an additional type/input |
| 1050 | def build_prelu(self, op, a): |
| 1051 | result_tens = OutputShaper.unaryOp(self.ser, a) |
| 1052 | |
| 1053 | self.ser.addOperator(op, [a.name], [result_tens.name]) |
| 1054 | return result_tens |
| 1055 | |
| 1056 | def build_relun(self, op, a): |
| 1057 | result_tens = OutputShaper.unaryOp(self.ser, a) |
| 1058 | |
| 1059 | attr = ts.TosaSerializerAttribute() |
| 1060 | |
| 1061 | if a.dtype == DType.FLOAT: |
| 1062 | attr.ReluNAttribute(0, self.getRandNumberDType(a.dtype)) |
| 1063 | else: |
| 1064 | attr.ReluNAttribute(self.getRandNumberDType(a.dtype), 0) |
| 1065 | |
| 1066 | self.ser.addOperator(op, [a.name], [result_tens.name], attr) |
| 1067 | return result_tens |
| 1068 | |
| 1069 | def build_sigmoid(self, op, a): |
| 1070 | result_tens = OutputShaper.unaryOp(self.ser, a) |
| 1071 | self.ser.addOperator(op, [a.name], [result_tens.name]) |
| 1072 | return result_tens |
| 1073 | |
| 1074 | def build_tanh(self, op, a): |
| 1075 | result_tens = OutputShaper.unaryOp(self.ser, a) |
| 1076 | self.ser.addOperator(op, [a.name], [result_tens.name]) |
| 1077 | return result_tens |
| 1078 | |
| 1079 | def build_concat(self, op, a, b, axis): |
| 1080 | result_tens = OutputShaper.concatOp(self.ser, a, b, axis) |
| 1081 | |
| 1082 | attr = ts.TosaSerializerAttribute() |
| 1083 | attr.AxisAttribute(axis) |
| 1084 | |
| 1085 | self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr) |
| 1086 | |
| 1087 | def build_pad(self, op, a, padding, qinfo): |
| 1088 | result_tens = OutputShaper.padOp(self.ser, a, padding) |
| 1089 | |
| 1090 | # Need to turn the padding array into a TOSA tensor here. |
| 1091 | # This is one of the few tensor operands that does not get |
| 1092 | # randomly generated |
| 1093 | padding_tens = self.ser.addConst(padding.shape, DType.INT32, [], [], padding) |
| 1094 | |
| 1095 | self.ser.addOperator(op, [a.name, padding_tens.name], [result_tens.name], None, qinfo) |
| 1096 | |
| 1097 | def build_reshape(self, op, a, newShape): |
| 1098 | result_tens = OutputShaper.reshapeOp(self.ser, a, newShape) |
| 1099 | |
| 1100 | attr = ts.TosaSerializerAttribute() |
| 1101 | attr.ReshapeAttribute(newShape) |
| 1102 | |
| 1103 | self.ser.addOperator(op, [a.name], [result_tens.name], attr) |
| 1104 | return result_tens |
| 1105 | |
| 1106 | def build_reverse(self, op, a, axis): |
| 1107 | result_tens = OutputShaper.unaryOp(self.ser, a) |
| 1108 | |
| 1109 | attr = ts.TosaSerializerAttribute() |
| 1110 | attr.AxisAttribute(axis) |
| 1111 | |
| 1112 | self.ser.addOperator(op, [a.name], [result_tens.name], attr) |
| 1113 | return result_tens |
| 1114 | |
| 1115 | def build_transpose(self, op, a, perms): |
| 1116 | result_tens = OutputShaper.transposeOp(self.ser, a, perms) |
| 1117 | |
| 1118 | perms_tens = self.ser.addConst([len(perms)], DType.INT32, Usage.ACTIVATION, [], np.int32(perms)) |
| 1119 | |
| 1120 | self.ser.addOperator(op, [a.name, perms_tens.name], [result_tens.name]) |
| 1121 | return result_tens |
| 1122 | |
| 1123 | def build_slice(self, op, a, begin, size): |
| 1124 | result_tens = OutputShaper.sliceOp(self.ser, a, begin, size) |
| 1125 | |
| 1126 | attr = ts.TosaSerializerAttribute() |
| 1127 | attr.SliceAttribute(begin, size) |
| 1128 | |
| 1129 | self.ser.addOperator(op, [a.name], [result_tens.name], attr) |
| 1130 | return result_tens |
| 1131 | |
| 1132 | def build_tile(self, op, a, multiples): |
| 1133 | result_tens = OutputShaper.tileOp(self.ser, a, multiples) |
| 1134 | |
| 1135 | attr = ts.TosaSerializerAttribute() |
| 1136 | attr.TileAttribute(multiples) |
| 1137 | |
| 1138 | self.ser.addOperator(op, [a.name], [result_tens.name], attr) |
| 1139 | return result_tens |
| 1140 | |
| 1141 | |
| 1142 | def build_gather(self, op, values, axis): |
| 1143 | |
| 1144 | # Create a new indicies tensor |
| 1145 | # here with data that doesn't exceed the dimensions of the values tensor |
| 1146 | |
| 1147 | max_val = values.shape[axis] |
| 1148 | indicies_arr = np.int32(self.rng.integers(low=0, high=max_val, size=[self.randInt(1, max_val + 1)])) |
| 1149 | indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, Usage.INDEX, [], indicies_arr) |
| 1150 | |
| 1151 | result_tens = OutputShaper.gatherOp(self.ser, values, indicies, axis) |
| 1152 | |
| 1153 | attr = ts.TosaSerializerAttribute() |
| 1154 | attr.AxisAttribute(axis) |
| 1155 | |
| 1156 | self.ser.addOperator(op, [values.name, indicies.name], [result_tens.name], attr) |
| 1157 | |
| 1158 | return result_tens |
| 1159 | |
Kevin Cheng | aee1fac | 2020-11-11 13:54:06 -0800 | [diff] [blame^] | 1160 | def build_resize(self, op, input, mode, stride, offset, shift, output_dims, input_dtype, output_dtype): |
| 1161 | result_tens = OutputShaper.resizeOp(self.ser, input, mode, stride, offset, shift, output_dims, input_dtype, output_dtype) |
Eric Kunze | e5e2676 | 2020-10-13 16:11:07 -0700 | [diff] [blame] | 1162 | |
| 1163 | attr = ts.TosaSerializerAttribute() |
| 1164 | attr.ResizeAttribute(output_dims, stride, offset, shift, mode) |
| 1165 | |
| 1166 | self.ser.addOperator(op, [input.name], [result_tens.name], attr) |
| 1167 | return result_tens |
| 1168 | |
| 1169 | def build_identityn(self, op, val, val2): |
| 1170 | |
| 1171 | result_tens = OutputShaper.unaryOp(self.ser, val) |
| 1172 | result_tens2 = OutputShaper.unaryOp(self.ser, val2) |
| 1173 | self.ser.addOperator(op, [val.name, val2.name], [result_tens.name, result_tens2.name]) |
| 1174 | return result_tens |
| 1175 | |
| 1176 | def build_placeholder(self, op, val): |
| 1177 | # Add an identity op to avoid warning in the reference model |
| 1178 | return self.build_unary(Op.IDENTITY, val) |
| 1179 | |
| 1180 | # Type Conversion |
| 1181 | def build_cast(self, op, val, out_dtype): |
| 1182 | result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype) |
| 1183 | self.ser.addOperator(op, [val.name], [result_tens.name]) |
| 1184 | return result_tens |
| 1185 | |
| 1186 | def build_rescale(self, op, val, out_dtype, scale32, double_round, per_channel): |
| 1187 | result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype) |
| 1188 | |
| 1189 | if per_channel: |
| 1190 | nc = val.shape[-1] |
| 1191 | else: |
| 1192 | nc = 1 |
| 1193 | |
| 1194 | in_type_width = self.typeWidth(val.dtype) |
| 1195 | out_type_width = self.typeWidth(out_dtype) |
| 1196 | |
| 1197 | if val.dtype == DType.AINT8: |
| 1198 | input_zp = self.randInt() |
| 1199 | in_type_width = in_type_width + 1 |
| 1200 | else: |
| 1201 | input_zp = 0 |
| 1202 | |
| 1203 | if out_dtype == DType.AINT8: |
| 1204 | output_zp = self.randInt() |
| 1205 | out_type_width = out_type_width + 1 |
| 1206 | else: |
| 1207 | output_zp = 0 |
| 1208 | |
| 1209 | # Calculate scale based on: |
| 1210 | # scale = a *(2^output_width)/(2^input_width)) |
| 1211 | |
| 1212 | a = np.float32(self.rng.random(size=[nc])) |
| 1213 | scale_arr = a * np.float32((1 << out_type_width) / (1 << in_type_width)) |
| 1214 | |
| 1215 | if scale32: |
| 1216 | pass |
| 1217 | # Cap the scaling at 2^15 - 1 for scale16 |
| 1218 | scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), (1 << 31) - 1) |
| 1219 | else: |
| 1220 | # Cap the scaling at 2^15 - 1 for scale16 |
| 1221 | scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), 32767.0) |
| 1222 | |
| 1223 | #print('{} {} -> {}'.format(out_type_width, in_type_width, scale_arr)) |
| 1224 | |
| 1225 | multiplier_arr = np.int32(np.zeros(shape=[nc])) |
| 1226 | shift_arr = np.int32(np.zeros(shape=[nc])) |
| 1227 | |
| 1228 | for i in range(nc): |
| 1229 | multiplier_arr[i], shift_arr[i] = TosaQuantGen.computeMultiplierAndShift(scale_arr[i], scale32) |
Kevin Cheng | aee1fac | 2020-11-11 13:54:06 -0800 | [diff] [blame^] | 1230 | if shift_arr[i] < 2 or shift_arr[i] > 62: |
| 1231 | self.ser.setExpectedFailure(True, 'OpRescale: invalid shift value') |
Eric Kunze | e5e2676 | 2020-10-13 16:11:07 -0700 | [diff] [blame] | 1232 | |
| 1233 | #print('multiplier {} shift {} inzp {} outzp {}'.format(multiplier_arr, shift_arr, input_zp, output_zp)) |
| 1234 | |
| 1235 | attr = ts.TosaSerializerAttribute() |
| 1236 | attr.RescaleAttribute(input_zp, |
| 1237 | output_zp, |
| 1238 | multiplier_arr, |
| 1239 | shift_arr, |
| 1240 | scale32, |
| 1241 | double_round, |
| 1242 | |
| 1243 | per_channel) |
| 1244 | |
| 1245 | self.ser.addOperator(op, [val.name], [result_tens.name], attr) |
| 1246 | return result_tens |
| 1247 | |
| 1248 | def build_cond_if_const(self, op, then_tens, else_tens, cond): |
| 1249 | # For cond_if with constants, we're supplied with then/else tensors that we ignore |
| 1250 | # (except for the generated shap) and the condition. Build Then/Else blocks |
| 1251 | # and fill them with const nodes for the body. |
| 1252 | |
| 1253 | # Condition tensor |
| 1254 | cond_tens = self.ser.addConst([], DType.BOOL, Usage.ACTIVATION, [], [cond]) |
| 1255 | |
| 1256 | # Make then/else tensors |
| 1257 | out_shape = then_tens.shape |
| 1258 | then_arr = np.int32(self.rng.integers(0, 255, size=out_shape)) |
| 1259 | else_arr = np.int32(self.rng.integers(0, 255, size=out_shape)) |
| 1260 | |
| 1261 | # And the result tensor based on any of the outputs |
| 1262 | result_tens = self.ser.addOutput(out_shape, DType.INT32, Usage.ACTIVATION, []) |
| 1263 | |
| 1264 | # Create the attribute with the names of the then/else blocks |
| 1265 | then_block = 'THEN_BLOCK' |
| 1266 | else_block = 'ELSE_BLOCK' |
| 1267 | attr = ts.TosaSerializerAttribute() |
| 1268 | attr.CondIfAttribute(then_block, else_block) |
| 1269 | |
| 1270 | # Finally, build the op and the two blocks |
| 1271 | self.ser.addOperator(op, [cond_tens.name], [result_tens.name], attr) |
| 1272 | |
| 1273 | self.ser.startBasicBlock(then_block) |
| 1274 | # Build the actual then/else tensors inside their blocks |
| 1275 | then_tens = self.ser.addConst(out_shape, DType.INT32, Usage.ACTIVATION, [], then_arr) |
| 1276 | self.ser.addOutputTensor(then_tens) |
| 1277 | |
| 1278 | self.ser.startBasicBlock(else_block) |
| 1279 | else_tens = self.ser.addConst(out_shape, DType.INT32, Usage.ACTIVATION, [], else_arr) |
| 1280 | self.ser.addOutputTensor(else_tens) |
| 1281 | |
| 1282 | return result_tens |
| 1283 | |
| 1284 | def build_cond_if_binary(self, op, a, b, cond): |
| 1285 | # For cond_if with a binary op in the then/else blocks, take a and b and |
| 1286 | # alternately add or subtract them based on the condition |
| 1287 | |
| 1288 | # Condition tensor |
| 1289 | cond_tens = self.ser.addConst([], DType.BOOL, Usage.ACTIVATION, [], [cond]) |
| 1290 | |
| 1291 | result_tens = self.ser.addOutput(a.shape, a.dtype, Usage.ACTIVATION, []) |
| 1292 | self.ser.currBasicBlock.addOutput(result_tens.name) |
| 1293 | |
| 1294 | # Create the attribute with the names of the then/else blocks |
| 1295 | then_block = 'THEN_BLOCK' |
| 1296 | else_block = 'ELSE_BLOCK' |
| 1297 | attr = ts.TosaSerializerAttribute() |
| 1298 | attr.CondIfAttribute(then_block, else_block) |
| 1299 | |
| 1300 | # Finally, build the op and the two blocks |
| 1301 | self.ser.addOperator(op, [cond_tens.name, a.name, b.name], [result_tens.name], attr) |
| 1302 | |
| 1303 | self.ser.startBasicBlock(then_block) |
| 1304 | self.ser.addInputTensor(a) |
| 1305 | self.ser.addInputTensor(b) |
| 1306 | then_tens = self.ser.addOutput(a.shape, a.dtype, a.usage, a.dformat) |
| 1307 | self.ser.addOperator(Op.ADD, [a.name, b.name], [then_tens.name]) |
| 1308 | |
| 1309 | self.ser.startBasicBlock(else_block) |
| 1310 | self.ser.addInputTensor(a) |
| 1311 | self.ser.addInputTensor(b) |
| 1312 | else_tens = self.ser.addOutput(a.shape, a.dtype, a.usage, a.dformat) |
| 1313 | self.ser.addOperator(Op.SUB, [a.name, b.name], [else_tens.name]) |
| 1314 | |
| 1315 | return result_tens |
| 1316 | |
| 1317 | def build_while_loop(self, op, a, iter_val): |
| 1318 | iter = self.ser.addPlaceholder([], DType.INT32, Usage.ACTIVATION, [], [np.int32(iter_val)]) |
| 1319 | |
| 1320 | cond_block = 'COND_BLOCK' |
| 1321 | body_block = 'BODY_BLOCK' |
| 1322 | |
| 1323 | attr = ts.TosaSerializerAttribute() |
| 1324 | attr.WhileLoopAttribute(cond_block, body_block) |
| 1325 | |
| 1326 | # Accumulator tensor |
| 1327 | #acc = self.ser.addOutput(a.shape, a.dtype, a.usage, a.dformat) |
| 1328 | acc_init_val = np.int32(np.zeros(a.shape)) |
| 1329 | acc = self.ser.addPlaceholder(a.shape, a.dtype, a.usage, a.dformat, acc_init_val) |
| 1330 | |
| 1331 | # Intermediate/output tensors for everything going through the loop |
| 1332 | iter_out = self.ser.addIntermediate(iter.shape, iter.dtype, iter.usage, iter.dformat) |
| 1333 | a_out = self.ser.addIntermediate(a.shape, a.dtype, a.usage, a.dformat) |
| 1334 | acc_out = self.ser.addIntermediate(acc.shape, acc.dtype, acc.usage, acc.dformat) |
| 1335 | |
| 1336 | # While_loop operator |
| 1337 | self.ser.addOperator(op, |
| 1338 | [iter.name, a.name, acc.name], |
| 1339 | [iter_out.name, a_out.name, acc_out.name], attr) |
| 1340 | |
| 1341 | # COND block (input: iter, output: cond_tens ) |
| 1342 | self.ser.startBasicBlock(cond_block) |
| 1343 | self.ser.addInputTensor(iter) |
| 1344 | self.ser.addInputTensor(a) |
| 1345 | self.ser.addInputTensor(acc) |
| 1346 | zero_tens = self.ser.addConst([], DType.INT32, [], [], [np.int32(0)]) |
| 1347 | cond_tens = self.ser.addOutput([], DType.BOOL, [], []) |
| 1348 | self.ser.addOperator(Op.GREATER, [iter.name, zero_tens.name], |
| 1349 | [cond_tens.name]) |
| 1350 | |
| 1351 | # BODY block (input: a, acc, iter, output: a, acc, iter) |
| 1352 | # Note that local intermediate tensors need to be declared here for the outputs |
| 1353 | self.ser.startBasicBlock(body_block) |
| 1354 | self.ser.addInputTensor(iter) |
| 1355 | self.ser.addInputTensor(a) |
| 1356 | self.ser.addInputTensor(acc) |
| 1357 | one_tens = self.ser.addConst([], DType.INT32, [], [], [np.int32(1)]) |
| 1358 | iter_body_out = self.ser.addIntermediate(iter.shape, iter.dtype, iter.usage, iter.dformat) |
| 1359 | acc_body_out = self.ser.addIntermediate(acc.shape, acc.dtype, acc.usage, acc.dformat) |
| 1360 | self.ser.addOperator(Op.ADD, [a.name, acc.name], [acc_body_out.name]) |
| 1361 | self.ser.addOperator(Op.SUB, [iter.name, one_tens.name], [iter_body_out.name]) |
| 1362 | self.ser.addOutputTensor(iter_body_out) |
| 1363 | self.ser.addOutputTensor(a) |
| 1364 | self.ser.addOutputTensor(acc_body_out) |
| 1365 | |
| 1366 | return acc_out |
| 1367 | |
| 1368 | |
| 1369 | def genOpTestList(self, opName, shapeFilter=[None], rankFilter=None, dtypeFilter=None): |
| 1370 | |
| 1371 | try: |
| 1372 | op = self.TOSA_OP_LIST[opName] |
| 1373 | except KeyError as e: |
| 1374 | raise Exception('Cannot find op with name {}'.format(opName)) |
| 1375 | |
| 1376 | # Initialize a new random number generator |
| 1377 | self.rng = np.random.default_rng(self.random_seed) |
| 1378 | |
| 1379 | build_fcn, tgen_fcn, agen_fcn = op['build_fcn'] |
| 1380 | |
| 1381 | # Generate the lists of arguments |
| 1382 | rmin, rmax = op['rank'] |
| 1383 | |
| 1384 | # Test list consists of a tuple of: |
| 1385 | # (opName, testNameStr, dtype, shapeList, argumentsList) |
| 1386 | testList = [] |
| 1387 | |
| 1388 | if not shapeFilter: |
| 1389 | shapeFilter = [None] |
| 1390 | |
| 1391 | for r in range(rmin, rmax + 1): |
| 1392 | |
| 1393 | # Filter out the rank? |
| 1394 | if rankFilter is not None and r not in rankFilter: |
| 1395 | continue |
| 1396 | |
| 1397 | for t in op['types']: |
| 1398 | |
| 1399 | # Filter tests based on dtype? |
| 1400 | if dtypeFilter is not None: |
| 1401 | if t not in dtypeFilter: |
| 1402 | continue |
| 1403 | |
| 1404 | # Create the placeholder and const tensors |
| 1405 | for shape in shapeFilter: |
| 1406 | # A None shape chooses a random shape of a given rank |
| 1407 | |
| 1408 | # Filter out by rank |
| 1409 | if shape is not None and len(shape) != r: |
| 1410 | continue |
| 1411 | |
| 1412 | self.setTargetShape(shape) |
| 1413 | shapeList = tgen_fcn(self, op, r) |
| 1414 | |
| 1415 | shapeStr = self.shapeStr(shapeList[0]) |
| 1416 | typeStr = self.typeStr(t) |
| 1417 | |
| 1418 | # Argument lists consists of tuples of the (str, []) string representation and the build function argument list |
| 1419 | argList = [] |
| 1420 | if agen_fcn: |
| 1421 | argList = agen_fcn(self, opName, shapeList, t) |
| 1422 | else: |
| 1423 | argList = [('', [])] |
| 1424 | |
| 1425 | for argStr, args in argList: |
| 1426 | if argStr: |
| 1427 | testStr = '{}_{}_{}_{}'.format(opName, shapeStr, typeStr, argStr) |
| 1428 | else: |
| 1429 | testStr = '{}_{}_{}'.format(opName, shapeStr, typeStr) |
| 1430 | |
| 1431 | testList.append((opName, testStr, t, shapeList, args)) |
| 1432 | |
| 1433 | return testList |
| 1434 | |
| 1435 | def serializeTest(self, opName, testStr, dtype, shapeList, testArgs): |
| 1436 | try: |
| 1437 | op = self.TOSA_OP_LIST[opName] |
| 1438 | except KeyError as e: |
| 1439 | raise Exception('Cannot find op with name {}'.format(opName)) |
| 1440 | |
| 1441 | # Create a serializer |
| 1442 | self.createSerializer(opName, testStr) |
| 1443 | |
| 1444 | build_fcn, tgen_fcn, agen_fcn = op['build_fcn'] |
| 1445 | pCount, cCount = op['operands'] |
| 1446 | |
| 1447 | try: |
| 1448 | qgen = op['qgen'] |
| 1449 | except KeyError: |
| 1450 | qgen = None |
| 1451 | |
| 1452 | # Build the random tensor operands and the test |
| 1453 | tens = [] |
Kevin Cheng | aee1fac | 2020-11-11 13:54:06 -0800 | [diff] [blame^] | 1454 | |
| 1455 | # If test is ArithmeticRightShift, force value of operand[1] to be within [0, num_bits] |
| 1456 | if op['op'] == Op.ARITHMETIC_RIGHT_SHIFT: |
| 1457 | assert pCount == 2 and cCount == 0, 'Op.ArithmeticRightShift must have 2 placeholders, 0 consts' |
| 1458 | |
| 1459 | placeholders = [] |
| 1460 | for idx, shape in enumerate(shapeList[:]): |
| 1461 | if idx == 1: |
| 1462 | if dtype == DType.INT8: |
| 1463 | arr = np.int32(self.rng.integers(low=0, high=8, size=shape)) |
| 1464 | elif dtype == DType.INT16: |
| 1465 | arr = np.int32(self.rng.integers(low=0, high=16, size=shape)) |
| 1466 | elif dtype == DType.INT32: |
| 1467 | arr = np.int32(self.rng.integers(low=0, high=32, size=shape)) |
| 1468 | else: |
| 1469 | raise Exception('OpArithmeticRightShift: invalid input dtype') |
| 1470 | else: |
| 1471 | arr = self.getRandTensor(shapeList[0], dtype) |
| 1472 | placeholders.append(self.ser.addPlaceholder(shape, dtype, Usage.ACTIVATION, [], arr)) |
| 1473 | |
| 1474 | tens.extend(placeholders) |
| 1475 | else: |
| 1476 | tens.extend(self.buildPlaceholderTensors(shapeList[0:pCount], dtype)) |
| 1477 | tens.extend(self.buildConstTensors(shapeList[pCount:], dtype)) |
Eric Kunze | e5e2676 | 2020-10-13 16:11:07 -0700 | [diff] [blame] | 1478 | |
| 1479 | if qgen is not None: |
| 1480 | qinfo = qgen(self, op, dtype) |
| 1481 | else: |
| 1482 | qinfo = None |
| 1483 | |
| 1484 | try: |
| 1485 | if qinfo is not None: |
| 1486 | resultName = build_fcn(self, op['op'], *tens, *testArgs, qinfo) |
| 1487 | else: |
| 1488 | resultName = build_fcn(self, op['op'], *tens, *testArgs) |
| 1489 | except TypeError as e: |
| 1490 | print('build_fcn: {}\nTensors: {}\nArgs: {}\n'.format(build_fcn, tens, testArgs)) |
| 1491 | raise e |
| 1492 | |
| 1493 | # Save the serialized test |
| 1494 | self.serialize('test') |
| 1495 | |
| 1496 | def createDynamicOpLists(self): |
| 1497 | |
| 1498 | # Dynamically create op lists for convolutions with a list of kernel sizes |
| 1499 | KERNELS = [ [1, 1], [2, 2], [3, 3], [5, 5], [3, 1], [1, 3] ] |
| 1500 | |
| 1501 | for k in KERNELS: |
| 1502 | testName = 'conv2d_{}x{}'.format(k[0], k[1]) |
| 1503 | self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST['conv2d_TEMPLATE'].copy() |
| 1504 | self.TOSA_OP_LIST[testName]['filter'] = k |
| 1505 | self.TOSA_OP_LIST[testName]['template'] = False |
| 1506 | |
| 1507 | testName = 'depthwise_conv2d_{}x{}'.format(k[0], k[1]) |
| 1508 | self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST['depthwise_conv2d_TEMPLATE'].copy() |
| 1509 | self.TOSA_OP_LIST[testName]['filter'] = k |
| 1510 | self.TOSA_OP_LIST[testName]['template'] = False |
| 1511 | |
| 1512 | testName = 'transpose_conv2d_{}x{}'.format(k[0], k[1]) |
| 1513 | self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST['transpose_conv2d_TEMPLATE'].copy() |
| 1514 | self.TOSA_OP_LIST[testName]['filter'] = k |
| 1515 | self.TOSA_OP_LIST[testName]['template'] = False |
| 1516 | |
| 1517 | # Delete any templates after having created any dynamic ops |
| 1518 | # This is a two-pass operation because it's bad practice to delete |
| 1519 | # keys from dictionaries while iterating |
| 1520 | keyList = [] |
| 1521 | for k in self.TOSA_OP_LIST: |
| 1522 | try: |
| 1523 | if self.TOSA_OP_LIST[k]['template'] == True: |
| 1524 | keyList.append(k) |
| 1525 | continue |
| 1526 | except KeyError: |
| 1527 | pass |
| 1528 | |
| 1529 | for k in keyList: |
| 1530 | del self.TOSA_OP_LIST[k] |
| 1531 | |
| 1532 | def initOpListDefaults(self): |
| 1533 | '''Fill in default fields for ops if they aren't already specified. |
| 1534 | Look for missing required fields (datastructure linting).''' |
| 1535 | for op in self.TOSA_OP_LIST: |
| 1536 | |
| 1537 | # Required fields |
| 1538 | try: |
| 1539 | pl, c = self.TOSA_OP_LIST[op]['operands'] |
| 1540 | except (KeyError, ValueError, TypeError): |
| 1541 | raise Exception('Op {} is missing a valid operand tuple in TOSA_OP_LIST'.format(op)) |
| 1542 | |
| 1543 | try: |
| 1544 | fcn, tgen, arggen = self.TOSA_OP_LIST[op]['build_fcn'] |
| 1545 | except (KeyError, ValueError, TypeError): |
| 1546 | raise Exception('Op {} is missing a valid build_fcn tuple in TOSA_OP_LIST'.format(op)) |
| 1547 | |
| 1548 | try: |
| 1549 | types = self.TOSA_OP_LIST[op]['types'] |
| 1550 | except KeyError as e: |
| 1551 | raise Exception('Op {} is missing a valid type list in TOSA_OP_LIST'.format(op)) |
| 1552 | |
| 1553 | try: |
| 1554 | opcode = self.TOSA_OP_LIST[op]['op'] |
| 1555 | except KeyError as e: |
| 1556 | raise Exception('Op {} is missing the Op field in TOSA_OP_LIST'.format(op)) |
| 1557 | |
| 1558 | # Put in default rank range, if missing |
| 1559 | try: |
| 1560 | rank = self.TOSA_OP_LIST[op]['rank'] |
| 1561 | except KeyError: |
| 1562 | self.TOSA_OP_LIST[op]['rank'] = self.DEFAULT_RANK_RANGE |
| 1563 | |
| 1564 | # Tensor operator list |
| 1565 | # 'op': op name |
| 1566 | # 'operands': tuple of (placeholder, const) operands |
| 1567 | # 'rank': optional, restricts rank to tuple inclusive of (min, max), if not specified, defaults to (1, 4) |
| 1568 | # 'build_fcn': tuple of the function to (build_operator(), TensorGen function, ArgGen enum) |
| 1569 | # 'types': array of datatypes to be tested |
| 1570 | TYPE_FP = [ DType.FLOAT ] |
| 1571 | |
| 1572 | # Type with an aint8 |
| 1573 | TYPE_INT = [ DType.AINT8, DType.INT16, DType.INT32 ] # Most operators support AINT8 instead of INT8, excludes INT4 |
| 1574 | TYPE_INT_FP = [ DType.AINT8, DType.INT16, DType.INT32, DType.FLOAT ] # Most operators support AINT8 instead of INT8, excludes INT4 |
| 1575 | |
| 1576 | # Types with an int8 |
| 1577 | TYPE_PURE_INT = [ DType.INT8, DType.INT16, DType.INT32 ] # Note: excludes INT4 |
| 1578 | TYPE_PURE_INT_FP = [ DType.INT8, DType.INT16, DType.INT32, DType.FLOAT ] # Note: excludes INT4 |
| 1579 | TYPE_BOOL = [ DType.BOOL ] |
| 1580 | TYPE_FI32 = [ DType.FLOAT, DType.INT32 ] |
| 1581 | TYPE_FIB = [ DType.FLOAT, DType.AINT8, DType.INT8, DType.INT16, DType.INT32, DType.BOOL ] |
| 1582 | TYPE_FI16 = [ DType.FLOAT, DType.INT16 ] |
| 1583 | |
| 1584 | TYPE_NARROW_INT_FP = [ DType.AINT8, DType.INT16, DType.FLOAT ] |
| 1585 | |
| 1586 | DEFAULT_RANK_RANGE = (1, 4) |
| 1587 | |
| 1588 | TOSA_OP_LIST = { |
| 1589 | # Binary ops |
| 1590 | 'add': |
| 1591 | { 'op': Op.ADD, |
| 1592 | 'operands': (2, 0), |
| 1593 | 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), |
| 1594 | 'types': TYPE_FI32 }, |
| 1595 | |
| 1596 | 'arithmetic_right_shift': |
| 1597 | { 'op': Op.ARITHMETIC_RIGHT_SHIFT, |
| 1598 | 'operands': (2, 0), |
Kevin Cheng | aee1fac | 2020-11-11 13:54:06 -0800 | [diff] [blame^] | 1599 | 'build_fcn': (build_arithmetic_right_shift, TosaTensorGen.tgBroadcastFuzz, TosaArgGen.agArithmeticRightShift), |
Eric Kunze | e5e2676 | 2020-10-13 16:11:07 -0700 | [diff] [blame] | 1600 | 'types': TYPE_PURE_INT }, |
| 1601 | |
| 1602 | 'bitwise_and': |
| 1603 | { 'op': Op.BITWISE_AND, |
| 1604 | 'operands': (2, 0), |
| 1605 | 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), |
| 1606 | 'types': TYPE_INT }, |
| 1607 | |
| 1608 | 'bitwise_or': |
| 1609 | { 'op': Op.BITWISE_OR, |
| 1610 | 'operands': (2, 0), |
| 1611 | 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), |
| 1612 | 'types': TYPE_INT }, |
| 1613 | |
| 1614 | 'bitwise_xor': |
| 1615 | { 'op': Op.BITWISE_XOR, |
| 1616 | 'operands': (2, 0), |
| 1617 | 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), |
| 1618 | 'types': TYPE_INT }, |
| 1619 | |
| 1620 | 'logical_and': |
| 1621 | { 'op': Op.LOGICAL_AND, |
| 1622 | 'operands': (2, 0), |
| 1623 | 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), |
| 1624 | 'types': TYPE_BOOL }, |
| 1625 | |
| 1626 | 'logical_left_shift': |
| 1627 | { 'op': Op.LOGICAL_LEFT_SHIFT, |
| 1628 | 'operands': (2, 0), |
| 1629 | 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), |
| 1630 | 'types': TYPE_PURE_INT }, |
| 1631 | |
| 1632 | 'logical_right_shift': |
| 1633 | { 'op': Op.LOGICAL_RIGHT_SHIFT, |
| 1634 | 'operands': (2, 0), |
| 1635 | 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), |
| 1636 | 'types': TYPE_PURE_INT }, |
| 1637 | |
| 1638 | 'logical_or': |
| 1639 | { 'op': Op.LOGICAL_OR, |
| 1640 | 'operands': (2, 0), |
| 1641 | 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), |
| 1642 | 'types': TYPE_BOOL }, |
| 1643 | |
| 1644 | 'logical_xor': |
| 1645 | { 'op': Op.LOGICAL_XOR, |
| 1646 | 'operands': (2, 0), |
| 1647 | 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), |
| 1648 | 'types': TYPE_BOOL }, |
| 1649 | |
| 1650 | 'max': |
| 1651 | { 'op': Op.MAXIMUM, |
| 1652 | 'operands': (2, 0), |
| 1653 | 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), |
| 1654 | 'types': TYPE_FI32 }, |
| 1655 | |
| 1656 | 'min': |
| 1657 | { 'op': Op.MINIMUM, |
| 1658 | 'operands': (2, 0), |
| 1659 | 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), |
| 1660 | 'types': TYPE_FI32 }, |
| 1661 | |
| 1662 | 'mul': |
| 1663 | { 'op': Op.MUL, |
| 1664 | 'operands': (2, 0), |
Kevin Cheng | aee1fac | 2020-11-11 13:54:06 -0800 | [diff] [blame^] | 1665 | 'build_fcn': (build_mul, TosaTensorGen.tgBroadcastFuzz, TosaArgGen.agMul), |
Eric Kunze | e5e2676 | 2020-10-13 16:11:07 -0700 | [diff] [blame] | 1666 | 'types': TYPE_PURE_INT_FP }, |
| 1667 | |
| 1668 | 'pow': |
| 1669 | { 'op': Op.POW, |
| 1670 | 'operands': (2, 0), |
| 1671 | 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBasic, None), |
| 1672 | 'types': TYPE_FP }, |
| 1673 | |
| 1674 | 'sub': |
| 1675 | { 'op': Op.SUB, |
| 1676 | 'operands': (2, 0), |
| 1677 | 'build_fcn': (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), |
| 1678 | 'types': TYPE_FI32 }, |
| 1679 | |
| 1680 | 'table': |
| 1681 | { 'op': Op.TABLE, |
| 1682 | # Use the automatic generation functions to create the input array |
| 1683 | # but create the table tensor in the build function, as it may be |
| 1684 | # a different type from the input |
| 1685 | 'operands': (1, 0), |
| 1686 | 'build_fcn': (build_table, TosaTensorGen.tgBasic, None), |
| 1687 | 'types': [ DType.INT16 ] }, |
| 1688 | |
| 1689 | 'argmax': |
| 1690 | { 'op': Op.ARGMAX, |
| 1691 | 'operands': (1, 0), |
| 1692 | 'build_fcn': (build_argmax, TosaTensorGen.tgBasic, TosaArgGen.agAxis), |
| 1693 | 'types': TYPE_FP }, |
| 1694 | |
| 1695 | # Templated operator. Filled in by createDynamicOpLists |
| 1696 | 'conv2d_TEMPLATE': |
| 1697 | { 'op': Op.CONV2D, |
| 1698 | 'operands': (1, 2), |
| 1699 | 'rank': (4, 4), |
| 1700 | 'build_fcn': (build_conv2d, TosaTensorGen.tgConv2D, TosaArgGen.agConv2D), |
| 1701 | 'qgen': TosaQuantGen.qgConv, |
| 1702 | 'types': TYPE_FP, |
| 1703 | 'template': True }, |
| 1704 | |
| 1705 | # Templated operator. Filled in by createDynamicOpLists |
| 1706 | 'depthwise_conv2d_TEMPLATE': |
| 1707 | { 'op': Op.DEPTHWISE_CONV2D, |
| 1708 | 'operands': (1, 2), |
| 1709 | 'filter': [1, 1], |
| 1710 | 'rank': (4, 4), |
| 1711 | 'build_fcn': (build_depthwise_conv2d, TosaTensorGen.tgDepthwiseConv2D, TosaArgGen.agConv2D), |
| 1712 | 'qgen': TosaQuantGen.qgConv, |
| 1713 | 'types': TYPE_FP, |
| 1714 | 'template': True }, |
| 1715 | |
| 1716 | # Templated operator. Filled in by createDynamicOpLists |
| 1717 | 'transpose_conv2d_TEMPLATE': |
| 1718 | { 'op': Op.TRANSPOSE_CONV2D, |
| 1719 | 'operands': (1, 1), |
| 1720 | 'rank': (4, 4), |
| 1721 | 'build_fcn': (build_transpose_conv2d, TosaTensorGen.tgTransposeConv2D, TosaArgGen.agTransposeConv2D), |
| 1722 | 'qgen': TosaQuantGen.qgConv, |
| 1723 | 'types': TYPE_FP, |
| 1724 | 'template': True }, |
| 1725 | |
| 1726 | 'fully_connected': |
| 1727 | { 'op': Op.FULLY_CONNECTED, |
| 1728 | 'operands': (2, 0), |
| 1729 | 'rank': (2, 2), |
| 1730 | 'build_fcn': (build_fully_connected, TosaTensorGen.tgFullyConnected, None), |
| 1731 | 'qgen': TosaQuantGen.qgConv, |
| 1732 | 'types': TYPE_FP }, |
| 1733 | |
| 1734 | 'matmul': |
| 1735 | { 'op': Op.MATMUL, |
| 1736 | 'operands': (2, 0), |
| 1737 | 'rank': (2, 2), |
| 1738 | 'build_fcn': (build_matmul, TosaTensorGen.tgMatmul, None), |
| 1739 | 'qgen': TosaQuantGen.qgMatmul, |
| 1740 | 'types': TYPE_NARROW_INT_FP }, |
| 1741 | |
| 1742 | # Unary operators |
| 1743 | 'abs': |
| 1744 | { 'op': Op.ABS, |
| 1745 | 'operands': (1, 0), |
| 1746 | 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), |
| 1747 | 'types': TYPE_FI32 }, |
| 1748 | |
| 1749 | 'bitwise_not': |
| 1750 | { 'op': Op.BITWISE_NOT, |
| 1751 | 'operands': (1, 0), |
| 1752 | 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), |
| 1753 | 'types': TYPE_INT }, |
| 1754 | |
| 1755 | 'ceil': |
| 1756 | { 'op': Op.CEIL, |
| 1757 | 'operands': (1, 0), |
| 1758 | 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), |
| 1759 | 'types': TYPE_FP }, |
| 1760 | |
| 1761 | 'clz': |
| 1762 | { 'op': Op.CLZ, |
| 1763 | 'operands': (1, 0), |
| 1764 | 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), |
| 1765 | 'types': [ DType.INT32 ] }, |
| 1766 | |
| 1767 | 'exp': |
| 1768 | { 'op': Op.EXP, |
| 1769 | 'operands': (1, 0), |
| 1770 | 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), |
| 1771 | 'types': TYPE_FP }, |
| 1772 | |
| 1773 | 'floor': |
| 1774 | { 'op': Op.FLOOR, |
| 1775 | 'operands': (1, 0), |
| 1776 | 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), |
| 1777 | 'types': TYPE_FP }, |
| 1778 | |
| 1779 | 'log': |
| 1780 | { 'op': Op.LOG, |
| 1781 | 'operands': (1, 0), |
| 1782 | 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), |
| 1783 | 'types': TYPE_FP }, |
| 1784 | |
| 1785 | 'floor': |
| 1786 | { 'op': Op.FLOOR, |
| 1787 | 'operands': (1, 0), |
| 1788 | 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), |
| 1789 | 'types': TYPE_FP }, |
| 1790 | |
| 1791 | 'logical_not': |
| 1792 | { 'op': Op.LOGICAL_NOT, |
| 1793 | 'operands': (1, 0), |
| 1794 | 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), |
| 1795 | 'types': TYPE_BOOL }, |
| 1796 | |
| 1797 | 'negate': |
| 1798 | { 'op': Op.NEGATE, |
| 1799 | 'operands': (1, 0), |
| 1800 | 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), |
| 1801 | 'qgen': TosaQuantGen.qgUnary, |
| 1802 | 'types': TYPE_INT_FP }, |
| 1803 | |
| 1804 | 'reciprocal': |
| 1805 | { 'op': Op.RECIPROCAL, |
| 1806 | 'operands': (1, 0), |
| 1807 | 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), |
| 1808 | 'types': TYPE_FP }, |
| 1809 | |
| 1810 | 'rsqrt': |
| 1811 | { 'op': Op.RSQRT, |
| 1812 | 'operands': (1, 0), |
| 1813 | 'build_fcn': (build_unary, TosaTensorGen.tgBasic, None), |
| 1814 | 'types': TYPE_FP }, |
| 1815 | |
| 1816 | # Ternary operators |
| 1817 | 'select': |
| 1818 | { 'op': Op.SELECT, |
| 1819 | 'operands': (3, 0), |
| 1820 | 'build_fcn': (build_select, TosaTensorGen.tgBroadcastFuzz, None), |
| 1821 | 'types': TYPE_FIB }, |
| 1822 | |
| 1823 | # Comparison operators |
| 1824 | 'equal': |
| 1825 | { 'op': Op.EQUAL, |
| 1826 | 'operands': (2, 0), |
| 1827 | 'build_fcn': (build_comparison, TosaTensorGen.tgBroadcastFuzz, None), |
| 1828 | 'types': TYPE_FI32 }, |
| 1829 | |
| 1830 | 'greater_equal': |
| 1831 | { 'op': Op.GREATER_EQUAL, |
| 1832 | 'operands': (2, 0), |
| 1833 | 'build_fcn': (build_comparison, TosaTensorGen.tgBroadcastFuzz, None), |
| 1834 | 'types': TYPE_FI32 }, |
| 1835 | |
| 1836 | 'greater': |
| 1837 | { 'op': Op.GREATER, |
| 1838 | 'operands': (2, 0), |
| 1839 | 'build_fcn': (build_comparison, TosaTensorGen.tgBroadcastFuzz, None), |
| 1840 | 'types': TYPE_FI32 }, |
| 1841 | |
| 1842 | # Pooling operators |
| 1843 | 'avg_pool2d': |
| 1844 | { 'op': Op.AVG_POOL2D, |
| 1845 | 'operands': (1, 0), |
| 1846 | 'rank': (4, 4), |
| 1847 | 'build_fcn': (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling), |
| 1848 | 'qgen': TosaQuantGen.qgUnary, |
| 1849 | 'types': TYPE_NARROW_INT_FP }, |
| 1850 | |
| 1851 | |
| 1852 | 'max_pool2d': |
| 1853 | { 'op': Op.MAX_POOL2D, |
| 1854 | 'operands': (1, 0), |
| 1855 | 'rank': (4, 4), |
| 1856 | 'build_fcn': (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling), |
| 1857 | 'types': TYPE_NARROW_INT_FP }, |
| 1858 | |
| 1859 | # Reduce operators |
| 1860 | 'reduce_any': |
| 1861 | { 'op': Op.REDUCE_ANY, |
| 1862 | 'operands': (1, 0), |
| 1863 | 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis), |
| 1864 | 'types': TYPE_BOOL }, |
| 1865 | |
| 1866 | 'reduce_all': |
| 1867 | { 'op': Op.REDUCE_ALL, |
| 1868 | 'operands': (1, 0), |
| 1869 | 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis), |
| 1870 | 'types': TYPE_BOOL }, |
| 1871 | |
| 1872 | 'reduce_max': |
| 1873 | { 'op': Op.REDUCE_MAX, |
| 1874 | 'operands': (1, 0), |
| 1875 | 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis), |
| 1876 | 'types': TYPE_INT_FP }, |
| 1877 | |
| 1878 | 'reduce_min': |
| 1879 | { 'op': Op.REDUCE_MAX, |
| 1880 | 'operands': (1, 0), |
| 1881 | 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis), |
| 1882 | 'types': TYPE_INT_FP }, |
| 1883 | |
| 1884 | 'reduce_product': |
| 1885 | { 'op': Op.REDUCE_PRODUCT, |
| 1886 | 'operands': (1, 0), |
| 1887 | 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis), |
| 1888 | 'types': TYPE_FP }, |
| 1889 | |
| 1890 | 'reduce_sum': |
| 1891 | { 'op': Op.REDUCE_SUM, |
| 1892 | 'operands': (1, 0), |
| 1893 | 'build_fcn': (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis), |
| 1894 | 'types': TYPE_FI32 }, |
| 1895 | |
| 1896 | # Activation functions |
| 1897 | 'clamp': |
| 1898 | { 'op': Op.CLAMP, |
| 1899 | 'operands': (1, 0), |
| 1900 | 'build_fcn': (build_clamp, TosaTensorGen.tgBasic, None), |
| 1901 | 'types': TYPE_NARROW_INT_FP }, |
| 1902 | |
| 1903 | 'relun': |
| 1904 | { 'op': Op.RELUN, |
| 1905 | 'operands': (1, 0), |
| 1906 | 'build_fcn': (build_relun, TosaTensorGen.tgBasic, None), |
| 1907 | 'types': TYPE_FI32 }, |
| 1908 | |
| 1909 | 'sigmoid': |
| 1910 | { 'op': Op.SIGMOID, |
| 1911 | 'operands': (1, 0), |
| 1912 | 'build_fcn': (build_sigmoid, TosaTensorGen.tgBasic, None), |
| 1913 | 'types': TYPE_FP }, |
| 1914 | |
| 1915 | 'tanh': |
| 1916 | { 'op': Op.TANH, |
| 1917 | 'operands': (1, 0), |
| 1918 | 'build_fcn': (build_tanh, TosaTensorGen.tgBasic, None), |
| 1919 | 'types': TYPE_FP }, |
| 1920 | |
| 1921 | # Data layout operators |
| 1922 | 'concat': |
| 1923 | { 'op': Op.CONCAT, |
| 1924 | 'operands': (2, 0), |
| 1925 | 'build_fcn': (build_concat, TosaTensorGen.tgBasic, TosaArgGen.agAxis), |
| 1926 | 'types': TYPE_FIB }, |
| 1927 | |
| 1928 | 'pad': |
| 1929 | { 'op': Op.PAD, |
| 1930 | 'operands': (1, 0), |
| 1931 | 'build_fcn': (build_pad, TosaTensorGen.tgBasic, TosaArgGen.agPad), |
| 1932 | 'qgen': TosaQuantGen.qgPad, |
| 1933 | 'types': TYPE_FIB }, |
| 1934 | |
| 1935 | 'reshape': |
| 1936 | { 'op': Op.RESHAPE, |
| 1937 | 'operands': (1, 0), |
| 1938 | 'build_fcn': (build_reshape, TosaTensorGen.tgBasic, TosaArgGen.agReshape), |
| 1939 | 'types': TYPE_FIB }, |
| 1940 | |
| 1941 | 'reverse': |
| 1942 | { 'op': Op.REVERSE, |
| 1943 | 'operands': (1, 0), |
| 1944 | 'build_fcn': (build_reverse, TosaTensorGen.tgBasic, TosaArgGen.agAxis), |
| 1945 | 'types': TYPE_FIB }, |
| 1946 | |
| 1947 | 'slice': |
| 1948 | { 'op': Op.SLICE, |
| 1949 | 'operands': (1, 0), |
| 1950 | 'build_fcn': (build_slice, TosaTensorGen.tgBasic, TosaArgGen.agSlice), |
| 1951 | 'types': TYPE_FIB }, |
| 1952 | |
| 1953 | 'tile': |
| 1954 | { 'op': Op.TILE, |
| 1955 | 'operands': (1, 0), |
| 1956 | 'build_fcn': (build_tile, TosaTensorGen.tgBasic, TosaArgGen.agTile), |
| 1957 | 'types': TYPE_FIB }, |
| 1958 | |
| 1959 | 'transpose': |
| 1960 | { 'op': Op.TRANSPOSE, |
| 1961 | 'operands': (1, 0), |
| 1962 | 'rank': (2, 4), # Do not allow tranpose on rank=1 |
| 1963 | 'build_fcn': (build_transpose, TosaTensorGen.tgBasic, TosaArgGen.agTranspose), |
| 1964 | 'types': TYPE_FIB }, |
| 1965 | |
| 1966 | # Scatter/Gather |
| 1967 | 'gather': |
| 1968 | { 'op': Op.GATHER, |
| 1969 | 'operands': (1, 0), |
| 1970 | 'build_fcn': (build_gather, TosaTensorGen.tgBasic, TosaArgGen.agAxis), |
| 1971 | 'types': TYPE_INT }, |
| 1972 | |
| 1973 | |
| 1974 | # Image operations |
| 1975 | 'resize': |
| 1976 | { 'op': Op.RESIZE, |
| 1977 | 'operands': (1, 0), |
| 1978 | 'rank': (4, 4), |
| 1979 | 'build_fcn': ( build_resize, TosaTensorGen.tgNHWC, TosaArgGen.agResize), |
| 1980 | 'types': [ DType.INT8, DType.INT16 ] }, |
| 1981 | |
| 1982 | |
| 1983 | # Data nodes |
| 1984 | 'placeholder': |
| 1985 | { 'op': Op.PLACEHOLDER, |
| 1986 | 'operands': (1, 0), |
| 1987 | 'build_fcn': ( build_placeholder, TosaTensorGen.tgBasic, None), |
| 1988 | 'types': TYPE_FIB }, |
| 1989 | |
| 1990 | 'const': |
| 1991 | { 'op': Op.CONST, |
| 1992 | 'operands': (1, 0), |
| 1993 | 'build_fcn': ( build_placeholder, TosaTensorGen.tgBasic, None), |
| 1994 | 'types': TYPE_FIB }, |
| 1995 | |
| 1996 | |
| 1997 | 'identity': |
| 1998 | { 'op': Op.IDENTITY, |
| 1999 | 'operands': (1, 0), |
| 2000 | 'build_fcn': ( build_unary, TosaTensorGen.tgBasic, None), |
| 2001 | 'types': TYPE_FIB }, |
| 2002 | |
| 2003 | |
| 2004 | 'identityn': |
| 2005 | { 'op': Op.IDENTITYN, |
| 2006 | 'operands': (2, 0), |
| 2007 | 'build_fcn': ( build_identityn, TosaTensorGen.tgBasic, None), |
| 2008 | 'types': TYPE_FIB }, |
| 2009 | |
| 2010 | # Type conversion |
| 2011 | 'cast': |
| 2012 | { 'op': Op.CAST, |
| 2013 | 'operands': (1, 0), |
| 2014 | 'build_fcn': ( build_cast, TosaTensorGen.tgBasic, TosaArgGen.agCast ), |
| 2015 | 'types': [ DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL ] }, |
| 2016 | |
| 2017 | 'rescale': |
| 2018 | { 'op': Op.RESCALE, |
| 2019 | 'operands': (1, 0), |
| 2020 | 'build_fcn': ( build_rescale, TosaTensorGen.tgBasic, TosaArgGen.agRescale ), |
| 2021 | 'types': [ DType.AINT8, DType.INT16, DType.INT32, DType.INT48 ] }, |
| 2022 | |
| 2023 | # Custom |
| 2024 | # Not implemented. |
| 2025 | |
| 2026 | # Control flow |
| 2027 | |
| 2028 | # Two varients of cond_if, one that generates one of two constant tensors (no |
| 2029 | # inputs to the basic blocks, one output) and another that either adds or subtracts two tensors |
| 2030 | # (two inputs to the basic blocks, one output) |
| 2031 | 'cond_if_const': |
| 2032 | { 'op': Op.COND_IF, |
| 2033 | 'operands': (0, 2), |
| 2034 | 'build_fcn': ( build_cond_if_const, TosaTensorGen.tgBasic, TosaArgGen.agCondIf ), |
| 2035 | 'types': [ DType.BOOL ] }, |
| 2036 | |
| 2037 | 'cond_if_binary': |
| 2038 | { 'op': Op.COND_IF, |
| 2039 | 'operands': (2, 0), |
| 2040 | 'build_fcn': ( build_cond_if_binary, TosaTensorGen.tgBasic, TosaArgGen.agCondIf ), |
| 2041 | 'types': TYPE_FI32 }, |
| 2042 | |
| 2043 | # while_loop |
| 2044 | 'while_loop': |
| 2045 | { 'op': Op.WHILE_LOOP, |
| 2046 | 'operands': (0, 1), |
| 2047 | 'build_fcn': ( build_while_loop, TosaTensorGen.tgBasic, TosaArgGen.agWhileLoop ), |
| 2048 | 'types': [DType.INT32] }, |
| 2049 | |
| 2050 | |
| 2051 | } |
| 2052 | |
| 2053 | class OutputShaper: |
| 2054 | # Methods in this class compute the expected output shape and datatype |
| 2055 | # for common classes of operations |
| 2056 | def __init__(self): |
| 2057 | pass |
| 2058 | |
| 2059 | # These methods return arguments that can be used for |
| 2060 | # creating a new output tensor |
| 2061 | @staticmethod |
| 2062 | def binaryBroadcastOp(ser, a, b): |
| 2063 | assert(len(a.shape) == len(b.shape)) |
| 2064 | assert(a.dtype == b.dtype) |
| 2065 | |
| 2066 | shape = [] |
| 2067 | for i in range(len(a.shape)): |
| 2068 | if a.shape[i] == 1: |
| 2069 | shape.append(b.shape[i]) |
| 2070 | else: |
| 2071 | shape.append(a.shape[i]) |
| 2072 | |
| 2073 | return ser.addOutput(shape, a.dtype, a.usage, a.dformat) |
| 2074 | |
| 2075 | @staticmethod |
| 2076 | def binaryNonBroadcastOp(ser, a, b): |
| 2077 | assert(len(a.shape) == len(b.shape)) |
| 2078 | assert(a.dtype == b.dtype) |
| 2079 | |
| 2080 | shape = [] |
| 2081 | for i in range(len(a.shape)): |
| 2082 | assert(a.shape[i] == b.shape[i]) |
| 2083 | shape.append(a.shape[i]) |
| 2084 | |
| 2085 | return ser.addOutput(shape, a.dtype, a.usage, a.dformat) |
| 2086 | |
| 2087 | @staticmethod |
| 2088 | def unaryOp(ser, a): |
| 2089 | return ser.addOutput(a.shape, a.dtype, a.usage, a.dformat) |
| 2090 | |
| 2091 | @staticmethod |
| 2092 | def selectOp(ser, cond, a, b): |
| 2093 | assert(len(a.shape) == len(b.shape) and len(a.shape) == len(cond.shape)) |
| 2094 | assert(a.dtype == b.dtype) |
| 2095 | |
| 2096 | shape = [] |
| 2097 | for i in range(len(a.shape)): |
| 2098 | shape.append(max(cond.shape[i], a.shape[i], b.shape[i])) |
| 2099 | |
| 2100 | return ser.addOutput(shape, a.dtype, a.usage, a.dformat) |
| 2101 | |
| 2102 | @staticmethod |
| 2103 | def binaryComparisonOp(ser, a, b): |
| 2104 | assert(len(a.shape) == len(b.shape)) |
| 2105 | assert(a.dtype == b.dtype) |
| 2106 | |
| 2107 | # Do broadcast |
| 2108 | shape = [] |
| 2109 | for i in range(len(a.shape)): |
| 2110 | if a.shape[i] == 1: |
| 2111 | shape.append(b.shape[i]) |
| 2112 | else: |
| 2113 | shape.append(a.shape[i]) |
| 2114 | |
| 2115 | # Force the output type to bool |
| 2116 | return ser.addOutput(shape, DType.BOOL, a.usage, a.dformat) |
| 2117 | |
| 2118 | @staticmethod |
| 2119 | def reduceOp(ser, a, axis): |
| 2120 | |
| 2121 | shape = a.shape.copy() |
| 2122 | |
| 2123 | shape[axis] = 1 |
| 2124 | |
| 2125 | return ser.addOutput(shape, a.dtype, a.usage, a.dformat) |
| 2126 | |
| 2127 | @staticmethod |
| 2128 | def argmaxOp(ser, a, axis): |
| 2129 | shape = a.shape.copy() |
| 2130 | del shape[axis] |
| 2131 | return ser.addOutput(shape, DType.INT32, a.usage, a.dformat) |
| 2132 | |
| 2133 | @staticmethod |
| 2134 | def conv2dOp(ser, ifm, filter, strides, padding, dilations): |
| 2135 | |
| 2136 | # IFM: NHWC |
| 2137 | # Filter: OHWI |
| 2138 | # OFM: NHWC |
| 2139 | |
| 2140 | if len(padding) == 2: |
| 2141 | # Expand padding to 4 parameters in the case of transpose_conv2d |
| 2142 | # From H,W to T,B,L,R |
| 2143 | padding = [padding[0], padding[0], padding[1], padding[1]] |
| 2144 | |
| 2145 | h = (ifm.shape[1] - filter.shape[1] - (filter.shape[1] - 1) * (dilations[0] - 1) + \ |
| 2146 | padding[0] + padding[1]) // strides[0] + 1 |
| 2147 | |
| 2148 | w = (ifm.shape[2] - filter.shape[2] - (filter.shape[2] - 1) * (dilations[1] - 1) + \ |
| 2149 | padding[2] + padding[3]) // strides[1] + 1 |
| 2150 | |
| 2151 | if h <= 0 or w <= 0: |
| 2152 | # Invalid test parameters? |
| 2153 | h = 0 |
| 2154 | w = 0 |
| 2155 | ser.setExpectedFailure(True, 'Invalid combination of conv2d parameters') |
| 2156 | |
| 2157 | ofm_shape = [ifm.shape[0], h, w, filter.shape[0]] |
| 2158 | |
| 2159 | if ifm.dtype == DType.AINT8 or ifm.dtype == DType.INT8: |
| 2160 | out_dtype = DType.INT32 |
| 2161 | elif ifm.dtype == DType.INT16: |
| 2162 | out_dtype = DType.INT48 |
| 2163 | elif ifm.dtype == DType.FLOAT: |
| 2164 | out_dtype = DType.FLOAT |
| 2165 | else: |
| 2166 | raise Exception('Unsupported input dtype: {}'.format(ifm.dtype)) |
| 2167 | |
| 2168 | return ser.addOutput(ofm_shape, out_dtype, ifm.usage, ifm.dformat) |
| 2169 | |
| 2170 | @staticmethod |
| 2171 | def depthwiseConv2dOp(ser, ifm, filter, strides, padding, dilations): |
| 2172 | # IFM: NHWC |
| 2173 | # Filter: HWCM |
| 2174 | # OFM: NHW C*M |
| 2175 | h = (ifm.shape[1] - filter.shape[0] - (filter.shape[0] - 1) * (dilations[0] - 1) + \ |
| 2176 | padding[0] + padding[1]) // strides[0] + 1 |
| 2177 | |
| 2178 | w = (ifm.shape[2] - filter.shape[1] - (filter.shape[1] - 1) * (dilations[1] - 1) + \ |
| 2179 | padding[2] + padding[3]) // strides[1] + 1 |
| 2180 | |
| 2181 | if h <= 0 or w <= 0: |
| 2182 | # Invalid test parameters? |
| 2183 | h = 0 |
| 2184 | w = 0 |
| 2185 | ser.setExpectedFailure(True, 'Invalid combination of conv2d parameters') |
| 2186 | |
| 2187 | ofm_shape = [ifm.shape[0], h, w, filter.shape[2] * filter.shape[3]] |
| 2188 | |
| 2189 | if ifm.dtype == DType.AINT8 or ifm.dtype == DType.INT8: |
| 2190 | out_dtype = DType.INT32 |
| 2191 | elif ifm.dtype == DType.INT16: |
| 2192 | out_dtype = DType.INT48 |
| 2193 | elif ifm.dtype == DType.FLOAT: |
| 2194 | out_dtype = DType.FLOAT |
| 2195 | else: |
| 2196 | raise Exception('Unsupported input dtype: {}'.format(ifm.dtype)) |
| 2197 | |
| 2198 | return ser.addOutput(ofm_shape, out_dtype, ifm.usage, ifm.dformat) |
| 2199 | |
| 2200 | |
| 2201 | @staticmethod |
| 2202 | def pool2dOp(ser, ifm, kernel, stride, pad): |
| 2203 | # input: NHWC |
| 2204 | h = (ifm.shape[1] + pad[0] + pad[1] + stride[0] - kernel[0]) // stride[0] |
| 2205 | w = (ifm.shape[2] + pad[2] + pad[3] + stride[1] - kernel[1]) // stride[1] |
| 2206 | |
| 2207 | if h <= 0 or w <= 0: |
| 2208 | # Invalid test parameters? |
| 2209 | h = 0 |
| 2210 | w = 0 |
| 2211 | ser.setExpectedFailure(True, 'Invalid combination of pooling parameters') |
| 2212 | |
| 2213 | ofm_shape = [ifm.shape[0], h, w, ifm.shape[3]] |
| 2214 | return ser.addOutput(ofm_shape, ifm.dtype, ifm.usage, ifm.dformat) |
| 2215 | |
| 2216 | @staticmethod |
| 2217 | def fullyConnectedOp(ser, input, filter): |
| 2218 | # input: N, IC |
| 2219 | # filter: OC, IC |
| 2220 | # output: N, OC |
| 2221 | |
| 2222 | output_shape = [input.shape[0], filter.shape[0]] |
| 2223 | |
| 2224 | if input.dtype == DType.AINT8 or input.dtype == DType.INT8: |
| 2225 | out_dtype = DType.INT32 |
| 2226 | elif input.dtype == DType.INT16: |
| 2227 | out_dtype = DType.INT48 |
| 2228 | elif input.dtype == DType.FLOAT: |
| 2229 | out_dtype = DType.FLOAT |
| 2230 | else: |
| 2231 | raise Exception('Unsupported input dtype: {}'.format(input.dtype)) |
| 2232 | |
| 2233 | return ser.addOutput(output_shape, out_dtype, input.usage, input.dformat) |
| 2234 | |
| 2235 | @staticmethod |
| 2236 | def matmulOp(ser, a, b): |
| 2237 | # a: M, K |
| 2238 | # b: K, N |
| 2239 | # out: M, N |
| 2240 | |
| 2241 | output_shape = [a.shape[0], b.shape[1]] |
| 2242 | |
| 2243 | |
| 2244 | if a.dtype == DType.AINT8: |
| 2245 | out_dtype = DType.INT32 |
| 2246 | elif a.dtype == DType.INT16: |
| 2247 | out_dtype = DType.INT48 |
| 2248 | elif a.dtype == DType.FLOAT: |
| 2249 | out_dtype = DType.FLOAT |
| 2250 | else: |
| 2251 | raise Exception('UNsupported input dtype for matmul: {}'.format(a.dtype)) |
| 2252 | |
| 2253 | return ser.addOutput(output_shape, out_dtype, a.usage, a.dformat) |
| 2254 | |
| 2255 | @staticmethod |
| 2256 | def concatOp(ser, a, b, axis): |
| 2257 | |
| 2258 | output_shape = a.shape.copy() |
| 2259 | output_shape[axis] = a.shape[axis] + b.shape[axis] |
| 2260 | |
| 2261 | return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat) |
| 2262 | |
| 2263 | @staticmethod |
| 2264 | def padOp(ser, a, padding): |
| 2265 | |
| 2266 | output_shape = a.shape.copy() |
| 2267 | |
| 2268 | for i in range(len(output_shape)): |
| 2269 | output_shape[i] = padding[i][0] + padding[i][1] + output_shape[i] |
| 2270 | |
| 2271 | return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat) |
| 2272 | |
| 2273 | @staticmethod |
| 2274 | def reshapeOp(ser, a, shape): |
| 2275 | output_shape = shape.copy() |
| 2276 | |
| 2277 | totalElements = 1 |
| 2278 | for i in a.shape: |
| 2279 | totalElements *= i |
| 2280 | |
| 2281 | # If there are any -1 elements, figure out what that dimension must be |
| 2282 | totalOutputElements = 1 |
| 2283 | for i in output_shape: |
| 2284 | if i != -1: |
| 2285 | totalOutputElements *= i |
| 2286 | |
| 2287 | # And fill it in |
| 2288 | for i in range(len(output_shape)): |
| 2289 | if output_shape[i] == -1: |
| 2290 | output_shape[i] = totalElements // totalOutputElements |
| 2291 | |
| 2292 | return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat) |
| 2293 | |
| 2294 | @staticmethod |
| 2295 | def sliceOp(ser, a, begin, size): |
| 2296 | |
| 2297 | output_shape = size.copy() |
| 2298 | return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat) |
| 2299 | |
| 2300 | @staticmethod |
| 2301 | def tileOp(ser, a, multiples): |
| 2302 | |
| 2303 | output_shape = a.shape.copy() |
| 2304 | assert(len(multiples) == len(output_shape)) |
| 2305 | |
| 2306 | for i in range(len(output_shape)): |
| 2307 | output_shape[i] = a.shape[i] * multiples[i] |
| 2308 | |
| 2309 | return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat) |
| 2310 | |
| 2311 | @staticmethod |
| 2312 | def transposeOp(ser, a, perms): |
| 2313 | output_shape = a.shape.copy() |
| 2314 | assert(len(perms) == len(output_shape)) |
| 2315 | |
| 2316 | for i in range(len(output_shape)): |
| 2317 | output_shape[i] = a.shape[perms[i]] |
| 2318 | |
| 2319 | return ser.addOutput(output_shape, a.dtype, a.usage, a.dformat) |
| 2320 | |
| 2321 | @staticmethod |
| 2322 | def gatherOp(ser, values, indicies, axis): |
| 2323 | # indicies minus the axis + values - the indexes used to look up values. |
| 2324 | output_shape = [*values.shape[0:axis], indicies.shape[0], *values.shape[axis+1:]] |
| 2325 | |
| 2326 | return ser.addOutput(output_shape, values.dtype, indicies.usage, indicies.dformat) |
| 2327 | |
| 2328 | @staticmethod |
| 2329 | def tableOp(ser, input, table): |
| 2330 | # Same shape as the input, but with the type of the table. |
| 2331 | return ser.addOutput(input.shape, DType.INT32, input.usage, input.dformat) |
| 2332 | |
| 2333 | @staticmethod |
Kevin Cheng | aee1fac | 2020-11-11 13:54:06 -0800 | [diff] [blame^] | 2334 | def resizeOp(ser, input, mode, stride, offset, shift, output_dims, input_dtype, output_dtype): |
Eric Kunze | e5e2676 | 2020-10-13 16:11:07 -0700 | [diff] [blame] | 2335 | |
| 2336 | output_dims = [input.shape[0], output_dims[0], output_dims[1], input.shape[3]] |
| 2337 | |
| 2338 | if stride[0] <= 0 or stride[1] <= 0: |
| 2339 | ser.setExpectedFailure(True, 'Negative or zero stride') |
| 2340 | |
Kevin Cheng | aee1fac | 2020-11-11 13:54:06 -0800 | [diff] [blame^] | 2341 | if mode == ResizeMode.BILINEAR: |
| 2342 | if input_dtype == DType.INT8: |
| 2343 | if output_dtype != DType.INT32: |
| 2344 | ser.setExpectedFailure(True, 'Invalid output data type') |
| 2345 | elif input_dtype == DType.INT16: |
| 2346 | if output_dtype != DType.INT48: |
| 2347 | ser.setexpectedfailure(true, 'Invalid output data type') |
| 2348 | else: |
| 2349 | ser.setexpectedfailure(true, 'Invalid input data type') |
| 2350 | |
| 2351 | elif mode == ResizeMode.NEAREST: |
| 2352 | if input_dtype == DType.INT8: |
| 2353 | if output_dtype != DType.INT8: |
| 2354 | ser.setExpectedFailure(True, 'Invalid output data type') |
| 2355 | elif input_dtype == DType.INT16: |
| 2356 | if output_dtype != DType.INT16: |
| 2357 | ser.setexpectedfailure(true, 'Invalid output data type') |
| 2358 | else: |
| 2359 | ser.setexpectedfailure(true, 'Invalid input data type') |
| 2360 | |
| 2361 | else: |
| 2362 | ser.setexpectedfailure(true, 'Invalid resize mode') |
| 2363 | |
Eric Kunze | e5e2676 | 2020-10-13 16:11:07 -0700 | [diff] [blame] | 2364 | return ser.addOutput(output_dims, output_dtype, input.usage, input.dformat) |
| 2365 | |
| 2366 | @staticmethod |
| 2367 | def typeConversionOp(ser, val, out_dtype): |
| 2368 | return ser.addOutput(val.shape, out_dtype, val.usage, val.dformat) |
| 2369 | |
| 2370 | @staticmethod |
| 2371 | def transposeConv2DOp(ser, ifm, output_shape): |
| 2372 | if ifm.dtype == DType.AINT8 or ifm.dtype == DType.INT8: |
| 2373 | out_dtype = DType.INT32 |
| 2374 | elif ifm.dtype == DType.INT16: |
| 2375 | out_dtype = DType.INT48 |
| 2376 | elif ifm.dtype == DType.FLOAT: |
| 2377 | out_dtype = DType.FLOAT |
| 2378 | else: |
| 2379 | raise Exception('Unsupported input dtype: {}'.format(ifm.dtype)) |
| 2380 | |
| 2381 | if output_shape[1] <= 0 or output_shape[2] <= 0: |
| 2382 | ser.setExpectedFailure(True, 'Negative output shape') |
| 2383 | |
| 2384 | return ser.addOutput(output_shape, out_dtype, ifm.usage, ifm.dformat) |