blob: 955519521434b79e8d3f4c6cdc250ee928cc0978 [file] [log] [blame]
Eric Kunzee5e26762020-10-13 16:11:07 -07001#!/usr/bin/env python3
2
Kevin Cheng3a478572021-01-22 17:21:02 -08003# Copyright (c) 2020-2021, ARM Limited.
Eric Kunzee5e26762020-10-13 16:11:07 -07004#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17
18import numpy as np
19import argparse
20import sys
21import re
22import os
23import subprocess
24import shlex
25import json
26import glob
27import math
28import queue
29import threading
30import traceback
31import math
Jeremy Johnsona6185572021-06-21 15:55:35 +010032import itertools
Eric Kunzee5e26762020-10-13 16:11:07 -070033
34from enum import IntEnum, Enum, unique
Kevin Chengacb550f2021-06-29 15:32:19 -070035from tosa_ref_run import TosaReturnCode
Eric Kunzee5e26762020-10-13 16:11:07 -070036
Kevin Cheng550ccc52021-03-03 11:21:43 -080037# Include the ../thirdparty/serialization_lib/python directory in PYTHONPATH
38parent_dir = os.path.dirname(os.path.realpath(__file__))
39sys.path.append(
40 os.path.join(parent_dir, "..", "thirdparty", "serialization_lib", "python")
41)
Eric Kunzee5e26762020-10-13 16:11:07 -070042import tosa_serializer as ts
43from tosa_serializer import *
44import tosa
45
46# Convenience variables to the flatc-generated types that should be enums, but aren't
47DType = tosa.DType.DType()
Kevin Cheng550ccc52021-03-03 11:21:43 -080048Op = tosa.Op.Op()
Eric Kunzee5e26762020-10-13 16:11:07 -070049ResizeMode = tosa.ResizeMode.ResizeMode()
50
Kevin Cheng550ccc52021-03-03 11:21:43 -080051
Eric Kunzee5e26762020-10-13 16:11:07 -070052class TosaQuantGen:
Kevin Cheng550ccc52021-03-03 11:21:43 -080053 """QuantizedInfo random generator helper functions. Specify with 'qgen': in the operator defintion"""
54
Eric Kunzee5e26762020-10-13 16:11:07 -070055 def __init__(self):
56 pass
57
58 @staticmethod
Les Bell30e46802021-07-23 09:43:31 +010059 def getQinfo(testGen, dtype):
60 if dtype == DType.INT8:
61 return testGen.randInt(-128, 128)
62 if dtype == DType.UINT8:
63 return testGen.randInt(0, 256)
64 return 0
Eric Kunzee5e26762020-10-13 16:11:07 -070065
66 @staticmethod
67 def qgUnary(testGen, op, dtype):
68 qinfo = ts.TosaSerializerQuantInfo()
Kevin Chengacb550f2021-06-29 15:32:19 -070069 qinfo.UnaryQuantInfo(
70 TosaQuantGen.getQinfo(testGen, dtype), TosaQuantGen.getQinfo(testGen, dtype)
71 )
Eric Kunzee5e26762020-10-13 16:11:07 -070072 return qinfo
73
74 @staticmethod
Les Bell30e46802021-07-23 09:43:31 +010075 def qgConv(testGen, op, dtype_or_dtypeList):
Eric Kunzee5e26762020-10-13 16:11:07 -070076 qinfo = ts.TosaSerializerQuantInfo()
Les Bell30e46802021-07-23 09:43:31 +010077 if isinstance(dtype_or_dtypeList, list):
78 # a list of [input, weights, accumulator] dtypes
79 dtypeList = dtype_or_dtypeList
Eric Kunzee5e26762020-10-13 16:11:07 -070080 else:
Les Bell30e46802021-07-23 09:43:31 +010081 # an int, [input, weights, accumulator] dtypes are the same
82 dtypeList = [dtype_or_dtypeList] * 3
83 input_zp = TosaQuantGen.getQinfo(testGen, dtypeList[0])
84 weights_zp = TosaQuantGen.getQinfo(testGen, dtypeList[1])
85 qinfo.ConvQuantInfo(input_zp, weights_zp)
Eric Kunzee5e26762020-10-13 16:11:07 -070086 return qinfo
87
88 @staticmethod
89 def qgMatmul(testGen, op, dtype):
90 qinfo = ts.TosaSerializerQuantInfo()
Kevin Chengacb550f2021-06-29 15:32:19 -070091 qinfo.MatMulQuantInfo(
92 TosaQuantGen.getQinfo(testGen, dtype), TosaQuantGen.getQinfo(testGen, dtype)
93 )
Eric Kunzee5e26762020-10-13 16:11:07 -070094 return qinfo
95
96 @staticmethod
97 def qgPad(testGen, op, dtype):
98 qinfo = ts.TosaSerializerQuantInfo()
Les Bell30e46802021-07-23 09:43:31 +010099 qinfo.PadQuantInfo(TosaQuantGen.getQinfo(testGen, dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700100 return qinfo
101
102 @staticmethod
103 def computeMultiplierAndShift(scaleFp, scale32):
104 # Derived from computeMultiplierAndShiftTosaScale32
105 # Provide a floating-point scaling factor and the scale32 parameter
106 # to compute the multiplier and shift
107
108 if scale32:
109 scaleBits = 31
110 else:
111 scaleBits = 15
112
113 m, shift = math.frexp(scaleFp)
114
115 if scaleFp < 0.0:
116 m = -m
117
118 multiplier = round(m * (1 << scaleBits))
Kevin Cheng550ccc52021-03-03 11:21:43 -0800119 assert multiplier <= (1 << scaleBits)
Eric Kunzee5e26762020-10-13 16:11:07 -0700120
121 if multiplier == (1 << scaleBits):
122 multiplier = multiplier // 2
123 shift = shift + 1
124
125 shift = (-shift) + scaleBits
Matthew Haddonb724efc2021-08-25 16:40:29 +0100126 #print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(scaleFp, scaleBits, m, multiplier, shift))
127
128 # Adjust multiplier such that shift is in allowed value range.
129 if shift == 0:
130 multiplier = multiplier // 4
131 shift = shift + 2
132 elif shift == 1:
133 multiplier = multiplier // 2
134 shift = shift + 1
135 elif shift == 63:
136 multiplier = multiplier * 2
137 shift = shift - 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700138
Kevin Cheng550ccc52021-03-03 11:21:43 -0800139 assert multiplier <= (1 << scaleBits)
Matthew Haddonb724efc2021-08-25 16:40:29 +0100140 assert shift >= 2 and shift <= 62
Eric Kunzee5e26762020-10-13 16:11:07 -0700141
142 return multiplier, shift
143
144
Kevin Cheng550ccc52021-03-03 11:21:43 -0800145class TosaTensorGen:
146 """Tensor generators create a shape list for the placeholder and const tensor
147 data operands for the operator. The actual random data is generated separately for each test."""
148
Eric Kunzee5e26762020-10-13 16:11:07 -0700149 def __init__(self):
150 pass
151
152 @staticmethod
153 def tgBasic(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800154 pl, const = opName["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700155 shape = testGen.makeShape(rank)
156
157 shape_list = []
158 for i in range(pl + const):
159 shape_list.append(shape.copy())
160
161 return shape_list
162
163 @staticmethod
164 def tgNHWC(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800165 pl, const = opName["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700166
Kevin Cheng550ccc52021-03-03 11:21:43 -0800167 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700168
169 shape = testGen.makeShape(rank)
170
171 # Constrict the batch size?
172 if testGen.args.max_batch_size:
173 shape[0] = (shape[0] % testGen.args.max_batch_size) + 1
174
175 shape_list = []
176 for i in range(pl + const):
177 shape_list.append(shape.copy())
178
179 return shape_list
180
181 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -0800182 def tgScatter(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800183 pl, const = opName["operands"]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800184
Kevin Cheng550ccc52021-03-03 11:21:43 -0800185 assert pl == 2
186 assert const == 0
187 assert rank == 3
Kevin Cheng77d0f762020-11-24 10:26:32 -0800188
189 values_in_shape = testGen.makeShape(rank)
190
Matthew Haddon4b2881a2021-08-24 14:25:43 +0100191 # ignore max batch size if target shape is set
192 if testGen.args.max_batch_size and not testGen.args.target_shapes:
Kevin Cheng77d0f762020-11-24 10:26:32 -0800193 values_in_shape[0] = (values_in_shape[0] % testGen.args.max_batch_size) + 1
194
Kevin Cheng550ccc52021-03-03 11:21:43 -0800195 W = testGen.randInt(
196 testGen.args.tensor_shape_range[0], testGen.args.tensor_shape_range[1]
197 )
Matthew Haddon4b2881a2021-08-24 14:25:43 +0100198 # Constrict W if one dimension is too large to keep tensor size reasonable
199 if max(values_in_shape) > 5000:
200 W = testGen.randInt(0, 16)
201
Kevin Cheng77d0f762020-11-24 10:26:32 -0800202 input_shape = [values_in_shape[0], W, values_in_shape[2]]
203
204 shape_list = []
205 shape_list.append(values_in_shape.copy())
206 shape_list.append(input_shape.copy())
207
208 return shape_list
209
210 @staticmethod
Eric Kunzee5e26762020-10-13 16:11:07 -0700211 def tgBroadcastFuzz(testGen, op, rank):
212 shape = testGen.makeShape(rank)
213
Kevin Cheng550ccc52021-03-03 11:21:43 -0800214 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700215
216 shape_list = []
217
218 # Choose one of the inputs to broadcast
219 bcast_idx = testGen.randInt(0, pl + const)
220 for i in range(pl + const):
221 shape_bcast = shape.copy()
222
223 # If the chosen input, pick a random index to broadcast
224 if i == bcast_idx:
225 fuzz_idx = testGen.randInt(0, rank)
226 shape_bcast[fuzz_idx] = 1
227
228 shape_list.append(shape_bcast)
229
230 return shape_list
231
232 @staticmethod
233 def tgConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800234 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700235
Kevin Cheng550ccc52021-03-03 11:21:43 -0800236 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700237
238 # IFM dimensions are NHWC
239 ifm_shape = testGen.makeShape(rank)
240
241 # Constrict the batch size?
242 if testGen.args.max_batch_size:
243 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
244
245 # Get the filter height/width from the operator parameters
Kevin Cheng550ccc52021-03-03 11:21:43 -0800246 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700247
248 # Generate a random OFM depth
249 ofm_depth = testGen.makeShape(1)[0]
250
251 # The filter dimensions are OHWI
252 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
253
254 # The bias is OC
255 bias_shape = np.asarray([ofm_depth])
256
257 return [ifm_shape, filter_shape, bias_shape]
258
259 @staticmethod
Kevin Cheng1533b852021-09-01 12:51:58 -0700260 def tgConv3D(testGen, op, rank):
261 pl, const = op["operands"]
262
263 assert rank == 5
264
265 # IFM dimensions are NDHWC
266 ifm_shape = testGen.makeShape(rank)
267
268 # Constrict the batch size?
269 if testGen.args.max_batch_size:
270 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
271
272 # Get the filter depth/height/width from the operator parameters
273 filter_dhw = op["filter"]
274
275 # Generate a random OFM channel
276 ofm_channel = testGen.makeShape(1)[0]
277
278 # The filter dimensions are ODHWI
279 filter_shape = np.asarray(
280 [ofm_channel, filter_dhw[0], filter_dhw[1], filter_dhw[2], ifm_shape[4]]
281 )
282
283 # The bias is OC
284 bias_shape = np.asarray([ofm_channel])
285
286 return [ifm_shape, filter_shape, bias_shape]
287
288 @staticmethod
Eric Kunzee5e26762020-10-13 16:11:07 -0700289 def tgTransposeConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800290 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700291
Kevin Cheng550ccc52021-03-03 11:21:43 -0800292 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700293
294 # IFM dimensions are NHWC
295 ifm_shape = testGen.makeShape(rank)
296
297 # Constrict the batch size?
298 if testGen.args.max_batch_size:
299 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
300
301 # Get the filter height/width from the operator parameters
Kevin Cheng550ccc52021-03-03 11:21:43 -0800302 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700303
304 # Generate a random OFM depth
305 ofm_depth = testGen.makeShape(1)[0]
306
307 # The filter dimensions are OHWI
308 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
309
Kevin Cheng989cb052021-04-28 16:29:44 -0700310 # The bias is OC
311 bias_shape = np.asarray([ofm_depth])
312
313 return [ifm_shape, filter_shape, bias_shape]
Eric Kunzee5e26762020-10-13 16:11:07 -0700314
315 @staticmethod
316 def tgDepthwiseConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800317 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700318
Kevin Cheng550ccc52021-03-03 11:21:43 -0800319 assert rank == 4
320 assert pl == 1 and const == 2
Eric Kunzee5e26762020-10-13 16:11:07 -0700321
322 # IFM dimensions are NHWC
323 ifm_shape = testGen.makeShape(rank)
324
325 # Constrict the batch size?
326 if testGen.args.max_batch_size:
327 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
328
329 # Get the filter height/width from the operator parameters
330 # Filter is KH, HW, C, M
Kevin Cheng550ccc52021-03-03 11:21:43 -0800331 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700332
333 # Generate a random OFM depth, but don't let it get too big because
334 # the output depth is M * C
Kevin Cheng550ccc52021-03-03 11:21:43 -0800335 filter_m = (
336 testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)
337 ) + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700338
339 # The filter dimensions are HWCM
340 filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m])
341
342 # The bias is M * C
343 bias_shape = np.asarray([ifm_shape[3] * filter_m])
344
345 return [ifm_shape, filter_shape, bias_shape]
346
347 @staticmethod
348 def tgFullyConnected(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800349 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700350
Kevin Cheng550ccc52021-03-03 11:21:43 -0800351 assert rank == 2
Eric Kunzee5e26762020-10-13 16:11:07 -0700352
353 input_shape = testGen.makeShape(rank)
Kevin Chengacb550f2021-06-29 15:32:19 -0700354 filter_oc = testGen.rng.integers(
355 low=testGen.args.tensor_shape_range[0],
356 high=testGen.args.tensor_shape_range[1],
357 size=1,
358 )[0]
Eric Kunzee5e26762020-10-13 16:11:07 -0700359 filter_shape = np.asarray([filter_oc, input_shape[1]])
360
361 bias_shape = np.asarray([filter_oc])
362
363 return [input_shape, filter_shape, bias_shape]
364
365 @staticmethod
366 def tgMatmul(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800367 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700368
Kevin Cheng2d60f002021-06-09 14:18:32 -0700369 assert rank == 3
Kevin Cheng550ccc52021-03-03 11:21:43 -0800370 assert pl == 2 and const == 0
Eric Kunzee5e26762020-10-13 16:11:07 -0700371
372 a_shape = testGen.makeShape(rank)
Matthew Haddon68e7aee2021-08-16 11:20:25 +0100373 # Get a random number for b_oc even if target shape is defined
374 b_oc = np.int32(
375 testGen.rng.integers(
376 low=testGen.args.tensor_shape_range[0],
377 high=testGen.args.tensor_shape_range[1],
378 size=1,
379 )
380 )[0]
381 # If N or H is large let b_oc be 1 to reduce output tensor size
382 if max(a_shape) > 1000:
383 b_oc = 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700384
Matthew Haddon68e7aee2021-08-16 11:20:25 +0100385 b_shape = np.asarray([a_shape[0], a_shape[2], b_oc])
Eric Kunzee5e26762020-10-13 16:11:07 -0700386 return [a_shape, b_shape]
387
Matthew Haddon818ab902021-07-27 09:12:49 +0100388 @staticmethod
389 def tgConcat(testGen, opName, rank):
390 pl, const = opName["operands"]
391 shape = testGen.makeShape(rank)
392
393 # Create extra tensors to concat.
394 # Take into account value of pl when getting maximum number of concats
395 num_tensors = testGen.randInt(0, 4)
396 shape_list = []
397 for i in range(pl + const + num_tensors):
398 shape_list.append(shape.copy())
399
400 return shape_list
401
402 @staticmethod
403 def tgConcatConstInput(testGen, shapeList, axis):
404 # Split concat shape along axis to allow for multiple const inputs
405 # without making too many large tensors
406 shape = shapeList[0]
407 if len(shapeList) == 2 or shape[axis] < len(shapeList):
408 return shapeList
409
410 new_shapeList = [shape.copy()]
411 length_on_axis = shape[axis]
412 remaining_length = length_on_axis
Kevin Cheng93a16282021-08-31 16:14:03 -0700413 for i in range(len(shapeList) - 2):
Matthew Haddon818ab902021-07-27 09:12:49 +0100414 # Calculate split on axis and remaining value
415 split_shape_val = int(shape[axis] / 2)
416 remaining_length = remaining_length - split_shape_val
417
418 # Append new shape, and set remaining shape
419 shape[axis] = split_shape_val
420 new_shapeList.append(shape.copy())
421 shape[axis] = remaining_length
422 if i == len(shapeList) - 3:
423 new_shapeList.append(shape.copy())
424
425 return new_shapeList
426
427
Eric Kunzee5e26762020-10-13 16:11:07 -0700428class TosaArgGen:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800429 """Argument generators create exhaustive or random lists of attributes for operators that take
430 attributes or other parameters. The return value is a list of (descriptive_name, [arglist])
431 tuples where the descriptive_name is appended to the test name and the arglist is expanded
432 as arguments to the operator build function."""
433
Eric Kunzee5e26762020-10-13 16:11:07 -0700434 def __init__(self):
435 pass
436
437 @staticmethod
438 def agNone(testGen, opName, shapeList, dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800439 """A trivial argument generator for operators that don't take any
440 non-tensor arguments"""
441 return [("", [])]
Eric Kunzee5e26762020-10-13 16:11:07 -0700442
443 @staticmethod
444 def agAxis(testGen, opName, shapeList, dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800445 """Build the axis argument for operators that take a single axis"""
Eric Kunzee5e26762020-10-13 16:11:07 -0700446 axes = []
447
448 shape = shapeList[0]
449
450 for a in range(0, len(shape)):
Matthew Haddon43e37192021-07-09 14:13:02 +0100451 axes.append(("axis{}".format(a), [a]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700452 return axes
453
454 @staticmethod
455 def agConv2D(testGen, opName, shapeList, dtype):
456 arg_list = []
457
458 ifm_shape = shapeList[0]
459 filter_shape = shapeList[1]
460
461 # Must be rank 4
Kevin Cheng550ccc52021-03-03 11:21:43 -0800462 assert len(ifm_shape) == 4
463 assert len(filter_shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700464
465 maxStride = testGen.args.max_conv_stride
466 maxPadding = testGen.args.max_conv_padding + 1
467 maxDilation = testGen.args.max_conv_dilation
468
469 # Strides, padding, dilations
470 for stride in range(0, maxStride ** 2):
471 for padding in range(0, (maxPadding) ** 4):
472 for dilation in range(0, maxDilation ** 2):
473
Kevin Cheng550ccc52021-03-03 11:21:43 -0800474 s = [stride // maxStride + 1, stride % maxStride + 1]
475 p = [
476 (padding // (maxPadding * 4)) % maxPadding,
477 (padding // (maxPadding * 2)) % maxPadding,
478 (padding // (maxPadding * 1)) % maxPadding,
479 padding % maxPadding,
480 ]
481 d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
Eric Kunzee5e26762020-10-13 16:11:07 -0700482
483 # 4 padding parameters for regular conv2d
Kevin Cheng550ccc52021-03-03 11:21:43 -0800484 arg_list.append(
485 (
486 "st{}{}_pad{}{}{}{}_dilat{}{}".format(
487 s[0], s[1], p[0], p[1], p[2], p[3], d[0], d[1]
488 ),
489 [s, p, d],
490 )
491 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700492 return arg_list
493
494 @staticmethod
Kevin Cheng1533b852021-09-01 12:51:58 -0700495 def agConv3D(testGen, opName, shapeList, dtype):
496 arg_list = []
497
498 ifm_shape = shapeList[0]
499 filter_shape = shapeList[1]
500
501 # Must be rank 5
502 assert len(ifm_shape) == 5
503 assert len(filter_shape) == 5
504
505 # Generate basic argument list now
506 # TODO: increase coverage
507 s = [1, 1, 1]
508 p = [0, 0, 0, 0, 0, 0]
509 d = [1, 1, 1]
510 arg_list.append(
511 (
512 "st{}{}{}_pad{}{}{}{}{}{}_dilat{}{}{}".format(
513 s[0],
514 s[1],
515 s[2],
516 p[0],
517 p[1],
518 p[2],
519 p[3],
520 p[4],
521 p[5],
522 d[0],
523 d[1],
524 d[2],
525 ),
526 [s, p, d],
527 )
528 )
529 return arg_list
530
531 @staticmethod
Eric Kunzee5e26762020-10-13 16:11:07 -0700532 def agTransposeConv2D(testGen, opName, shapeList, dtype):
533 arg_list = []
534
535 ifm_shape = shapeList[0]
536 filter_shape = shapeList[1]
537
538 # Must be rank 4
Kevin Cheng550ccc52021-03-03 11:21:43 -0800539 assert len(ifm_shape) == 4
540 assert len(filter_shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700541
542 maxStride = testGen.args.max_conv_stride
543 maxPadding = testGen.args.max_conv_padding + 1
544 maxDilation = testGen.args.max_conv_dilation
545
546 # Strides, padding, dilations
547 for stride in range(0, maxStride ** 2):
548 for out_padding in range(0, (maxPadding) ** 2):
549 for dilation in range(0, maxDilation ** 2):
550
Kevin Cheng550ccc52021-03-03 11:21:43 -0800551 s = [stride // maxStride + 1, stride % maxStride + 1]
552 p = [
553 (out_padding // (maxPadding * 1)) % maxPadding,
554 out_padding % maxPadding,
555 ]
556 d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
Eric Kunzee5e26762020-10-13 16:11:07 -0700557
Kevin Cheng550ccc52021-03-03 11:21:43 -0800558 oh = (
559 ifm_shape[1]
560 - filter_shape[1]
561 - (filter_shape[1] - 1) * (d[0] - 1)
562 + 2 * p[0]
563 ) // s[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700564
Kevin Cheng550ccc52021-03-03 11:21:43 -0800565 ow = (
566 ifm_shape[2]
567 - filter_shape[2]
568 - (filter_shape[2] - 1) * (d[1] - 1)
569 + 2 * p[1]
570 ) // s[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700571
572 # Output shape
Kevin Cheng550ccc52021-03-03 11:21:43 -0800573 os = [ifm_shape[0], oh, ow, filter_shape[0]]
Eric Kunzee5e26762020-10-13 16:11:07 -0700574
Kevin Cheng550ccc52021-03-03 11:21:43 -0800575 arg_list.append(
576 (
577 "st{}{}_outpad{}{}_dilat{}{}_os{}x{}x{}x{}".format(
578 s[0],
579 s[1],
580 p[0],
581 p[1],
582 d[0],
583 d[1],
584 os[0],
585 os[1],
586 os[2],
587 os[3],
588 ),
589 [s, p, d, os],
590 )
591 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700592
593 return arg_list
594
595 @staticmethod
596 def agPad(testGen, opName, shapeList, dtype):
597 arg_list = []
598 rank = len(shapeList[0])
599
Les Bell7ffccce2021-07-28 15:37:02 +0100600 # Exhaustively test combinations of padding on each side of each dimension
601 # - the range of padding values is defined by pad_min and pad_max
602 # - for padding >9, the name format needs to be more distinctive
603 pad_min, pad_max = 0, 1
604 pad_values = [x for x in range(pad_min, pad_max + 1)]
605 axis_pad_values = [x for x in itertools.product(pad_values, pad_values)]
606 shape_pad_values = itertools.product(*([axis_pad_values] * rank))
Eric Kunzee5e26762020-10-13 16:11:07 -0700607
Les Bell7ffccce2021-07-28 15:37:02 +0100608 for paddings in shape_pad_values:
609 name = "pad"
610 for r in range(rank):
611 before, after = paddings[r]
612 name = f"{name}{before}{after}"
613 arg_list.append((name, [np.array(paddings)]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700614
615 return arg_list
616
617 @staticmethod
618 def agPooling(testGen, opName, shapeList, dtype):
619 arg_list = []
620
621 shape = shapeList[0]
Kevin Cheng550ccc52021-03-03 11:21:43 -0800622 assert len(shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700623
624 maxStride = testGen.args.max_pooling_stride
625 maxKernel = testGen.args.max_pooling_kernel
626 maxPadding = testGen.args.max_pooling_padding + 1
627
628 for kernel in range(0, maxKernel ** 2):
629 for stride in range(0, maxStride ** 2):
630 for padding in range(0, maxPadding ** 4):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800631 s = [stride // maxStride + 1, stride % maxStride + 1]
632 k = [(kernel // maxKernel) + 2, (kernel % maxKernel) + 2]
633 p = [
634 (padding // (maxPadding * 4)) % maxPadding,
635 (padding // (maxPadding * 2)) % maxPadding,
636 (padding // (maxPadding * 1)) % maxPadding,
637 padding % maxPadding,
638 ]
Eric Kunzee5e26762020-10-13 16:11:07 -0700639
Kevin Cheng550ccc52021-03-03 11:21:43 -0800640 arg_list.append(
641 (
642 "st{}{}_kern{}{}_pad{}{}{}{}".format(
643 s[0], s[1], k[0], k[1], p[0], p[1], p[2], p[3]
644 ),
Matthew Haddonb724efc2021-08-25 16:40:29 +0100645 [s, p, k],
Kevin Cheng550ccc52021-03-03 11:21:43 -0800646 )
647 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700648 return arg_list
649
650 @staticmethod
651 def agCast(testGen, opName, shapeList, inDtype):
652 arg_list = []
653
654 # Enumerate the output types here
655 if inDtype == DType.INT8:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800656 dtypeList = [DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700657 elif inDtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800658 dtypeList = [DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700659 elif inDtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800660 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700661 elif inDtype == DType.BOOL:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800662 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700663 elif inDtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800664 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700665 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800666 raise Exception("Unexpected input dtype: {}".format(inDtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700667
668 for dtype in dtypeList:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800669 arg_list.append(("out{}".format(DTypeNames[dtype]), [dtype]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700670
671 return arg_list
672
673 @staticmethod
674 def agRescale(testGen, opName, shapeList, inDtype):
675 arg_list = []
676
677 # Enumerate the output types here
Matthew Haddoncac4ee92021-07-22 14:30:53 +0100678 for dtype in [DType.UINT8, DType.INT8, DType.INT16, DType.INT32]:
679 if inDtype == DType.UINT8 and dtype != DType.INT8:
680 # The only output dtype for UINT8 is INT8, skip all other combinations
681 continue
682 if inDtype != DType.INT8 and dtype == DType.UINT8:
683 # The only input dtype for UINT8 is INT8, skip all other combinations
684 continue
685
Kevin Cheng550ccc52021-03-03 11:21:43 -0800686 for scale32 in [False, True]:
687 for double_round in [False, True]:
688 for per_channel in [False, True]:
Eric Kunzee5e26762020-10-13 16:11:07 -0700689
690 if inDtype == DType.INT48 and scale32:
691 # Illegal condition. Must be scale32=False
692 continue
Matthew Haddoncac4ee92021-07-22 14:30:53 +0100693 if double_round and not scale32:
694 # Illegal condition. ERROR_IF(!scale32 && double_round)
695 continue
Eric Kunzee5e26762020-10-13 16:11:07 -0700696
Kevin Cheng550ccc52021-03-03 11:21:43 -0800697 arg_list.append(
698 (
699 "out{}_sc{}_dr{}_pc{}".format(
700 DTypeNames[dtype],
701 int(scale32),
702 int(double_round),
703 int(per_channel),
704 ),
705 [dtype, scale32, double_round, per_channel],
706 )
707 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700708
709 return arg_list
710
Kevin Chengaee1fac2020-11-11 13:54:06 -0800711 @staticmethod
712 def agMul(testGen, opName, shapeList, dtype):
713 arg_list = []
714
715 if dtype is DType.INT32:
716 for p in range(testGen.args.num_rand_permutations):
717
718 shift = testGen.randInt(0, 32)
719
Kevin Cheng550ccc52021-03-03 11:21:43 -0800720 arg_list.append(("perm{}_shift{}".format(p, shift), [shift]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800721 else:
Matthew Haddon43e37192021-07-09 14:13:02 +0100722 arg_list.append(("perm0_shift0", [0]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800723
724 return arg_list
725
726 @staticmethod
727 def agArithmeticRightShift(testGen, opName, shapeList, dtype):
728 arg_list = []
729
Kevin Cheng550ccc52021-03-03 11:21:43 -0800730 arg_list.append(("roundTrue", [True]))
731 arg_list.append(("roundFalse", [False]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800732
733 return arg_list
734
Eric Kunzee5e26762020-10-13 16:11:07 -0700735 # Helper function for reshape. Gets some factors of a larger number.
736 @staticmethod
737 def getFactors(val, start=1):
738 factors = []
739
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100740 for i in range(start, int(np.sqrt(val)) + 1):
Eric Kunzee5e26762020-10-13 16:11:07 -0700741 if (val % i) == 0:
742 factors.append(i)
743
744 return factors
745
746 @staticmethod
747 def agReshape(testGen, opName, shapeList, dtype):
748 arg_list = []
749
750 origShape = shapeList[0]
751
752 totalElements = 1
753 for s in origShape:
754 totalElements *= s
755
756 # This code is NOT fast. Fortunately, the numbers are fairly small.
757 factors = TosaArgGen.getFactors(totalElements)
758
759 for p in range(testGen.args.num_rand_permutations):
Matthew Haddon5fc4e682021-07-07 11:28:29 +0100760 newRank = testGen.randInt(1, 7)
Kevin Cheng550ccc52021-03-03 11:21:43 -0800761 if len(factors) < newRank:
Eric Kunzee5e26762020-10-13 16:11:07 -0700762 continue
763
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100764 found = True
765 # escape_counter breaks while loop if it continues on for too long
766 escape_counter = 0
767 while found:
768 newShape = []
769 # Generate newShape ensuring it isn't a duplicate
770 remainingElements = totalElements
771 shuffledFactors = testGen.rng.permutation(factors)
Matthew Haddon5fc4e682021-07-07 11:28:29 +0100772 for i in range(1, newRank):
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100773 # pick rank-1 factors
774 newShape.append(shuffledFactors[0])
775 remainingElements = remainingElements // shuffledFactors[0]
776 shuffledFactors = testGen.rng.permutation(
777 TosaArgGen.getFactors(remainingElements)
778 )
779 newShape.append(remainingElements)
Eric Kunzee5e26762020-10-13 16:11:07 -0700780
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100781 # Toss in a -1 sometimes
782 minusOne = testGen.randInt(0, newRank * 4)
783 if minusOne < newRank:
784 newShape[minusOne] = -1
Eric Kunzee5e26762020-10-13 16:11:07 -0700785
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100786 # Check for duplicates
787 found = False
788 for name, other_shape in arg_list:
789 if other_shape[0] == newShape:
790 found = True
791 break
792
793 escape_counter += 1
794 if escape_counter >= 100:
795 break
796
797 if not found:
798 arg_list.append(("perm{}_rank{}".format(p, newRank), [newShape]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700799
800 return arg_list
801
Eric Kunzee5e26762020-10-13 16:11:07 -0700802 @staticmethod
803 def agTranspose(testGen, opName, shapeList, dtype):
804 arg_list = []
805
806 ifm_shape = shapeList[0]
807
Jeremy Johnsona6185572021-06-21 15:55:35 +0100808 # Get all permutations
809 permutations = [p for p in itertools.permutations(range(len(ifm_shape)))]
Eric Kunzee5e26762020-10-13 16:11:07 -0700810
Jeremy Johnsona6185572021-06-21 15:55:35 +0100811 # Limit to possible permutations from shape dimension or argument setting
812 limit = min(len(permutations), testGen.args.num_rand_permutations)
Eric Kunzee5e26762020-10-13 16:11:07 -0700813
Jeremy Johnsona6185572021-06-21 15:55:35 +0100814 # Get random permutation generator that uses all permutations
815 random_permutations = testGen.rng.permutation(permutations)
Eric Kunzee5e26762020-10-13 16:11:07 -0700816
Jeremy Johnsona6185572021-06-21 15:55:35 +0100817 # Create list of required amount of permutations
Kevin Chengacb550f2021-06-29 15:32:19 -0700818 arg_list = [
819 ("perm{}".format(p), [random_permutations[p].tolist()])
820 for p in range(limit)
821 ]
Eric Kunzee5e26762020-10-13 16:11:07 -0700822 return arg_list
823
824 @staticmethod
825 def agSlice(testGen, opName, shapeList, dtype):
826 arg_list = []
827
828 ifm_shape = shapeList[0]
829 rank = len(ifm_shape)
830
831 for p in range(testGen.args.num_rand_permutations):
832 begin = []
833 size = []
834
Kevin Cheng550ccc52021-03-03 11:21:43 -0800835 valid = True
Eric Kunzee5e26762020-10-13 16:11:07 -0700836
837 for i in range(rank):
838 if ifm_shape[i] > 1:
839 begin.append(testGen.randInt(0, ifm_shape[i]))
840 size.append(testGen.randInt(0, ifm_shape[i] - begin[i]))
841
842 # Invalid slice size?
843 if size[i] == 0:
844 valid = False
845 else:
846 begin.append(0)
847 size.append(1)
848
849 if valid:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800850 arg_list.append(("perm{}".format(p), [begin, size]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700851 return arg_list
852
853 @staticmethod
854 def agTile(testGen, opName, shapeList, dtype):
855 arg_list = []
856
857 ifm_shape = shapeList[0]
858 rank = len(ifm_shape)
859
860 for p in range(testGen.args.num_rand_permutations):
861
862 # Pick a few random, but small multiple values
863 # because otherwise this has a tendency to generate
864 # enormous tensors
865 multiples = []
866 for i in range(rank):
Matthew Haddon82ad4d62021-08-20 15:02:39 +0100867 if ifm_shape[i] > 1000:
868 # Multiple of 1 if ifm_shape dimension is large to reduce tensor size
869 multiples.append(1)
870 elif max(ifm_shape) > 1000:
871 multiples.append(2)
872 else:
873 multiples.append(testGen.randInt(1, 4))
Kevin Cheng550ccc52021-03-03 11:21:43 -0800874 arg_list.append(("perm{}".format(p), [multiples]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700875
876 return arg_list
877
878 @staticmethod
879 def agResize(testGen, opName, shapeList, dtype):
880 arg_list = []
881
882 ifm_shape = shapeList[0]
883
884 for m in [ResizeMode.NEAREST, ResizeMode.BILINEAR]:
885
886 # Exclude illegal {mode, type} configurations. Pick legal output types
887 if m == ResizeMode.NEAREST and dtype == DType.INT8:
Les Bell33d837e2021-08-10 08:34:43 +0100888 outputDTypeList = [DType.INT8]
Eric Kunzee5e26762020-10-13 16:11:07 -0700889 elif m == ResizeMode.NEAREST and dtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800890 outputDTypeList = [DType.INT16]
Eric Kunzee5e26762020-10-13 16:11:07 -0700891 elif m == ResizeMode.BILINEAR and dtype == DType.INT8:
Les Bell33d837e2021-08-10 08:34:43 +0100892 outputDTypeList = [DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700893 elif m == ResizeMode.BILINEAR and dtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800894 outputDTypeList = [DType.INT48]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800895 elif dtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800896 outputDTypeList = [DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700897 else:
898 continue
899
900 for outputDType in outputDTypeList:
901 for perm in range(testGen.args.num_rand_permutations):
902
903 # Randomly generate legal output dimensions and shift
904 # and then compute the stride and offset based on them
Kevin Cheng550ccc52021-03-03 11:21:43 -0800905 output_dims = [testGen.randInt(1), testGen.randInt(1)]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800906 in_center_h = (ifm_shape[1] - 1) / 2.0
907 in_center_w = (ifm_shape[2] - 1) / 2.0
908 out_center_h = (output_dims[0] - 1) / 2.0
909 out_center_w = (output_dims[1] - 1) / 2.0
Eric Kunzee5e26762020-10-13 16:11:07 -0700910
Kevin Cheng77d0f762020-11-24 10:26:32 -0800911 fp_stride_y = float(ifm_shape[1]) / float(output_dims[0])
912 fp_stride_x = float(ifm_shape[2]) / float(output_dims[1])
913 fp_offset_y = in_center_h - fp_stride_y * out_center_h
914 fp_offset_x = in_center_w - fp_stride_x * out_center_w
Eric Kunzee5e26762020-10-13 16:11:07 -0700915
Kevin Cheng77d0f762020-11-24 10:26:32 -0800916 if outputDType == DType.FLOAT:
917 shift = 0
918 stride = [0, 0]
919 offset = [0, 0]
Kevin Cheng550ccc52021-03-03 11:21:43 -0800920 stride_fp = [fp_stride_y, fp_stride_x]
921 offset_fp = [fp_offset_y, fp_offset_x]
922 arg_list.append(
923 (
924 "mode{}_odim{}x{}_out{}_st{:.2f}x{:.2f}_off{:.2f}x{:.2f}".format(
Les Bell33d837e2021-08-10 08:34:43 +0100925 "N" if m == ResizeMode.NEAREST else "B",
Kevin Cheng550ccc52021-03-03 11:21:43 -0800926 output_dims[0],
927 output_dims[1],
928 testGen.typeStr(outputDType),
929 stride_fp[0],
930 stride_fp[1],
931 offset_fp[0],
932 offset_fp[1],
933 ),
934 [
935 m,
936 stride,
937 offset,
938 shift,
939 stride_fp,
940 offset_fp,
941 output_dims,
942 dtype,
943 outputDType,
944 ],
945 )
946 )
Kevin Cheng77d0f762020-11-24 10:26:32 -0800947 else:
948 shift = 11
949 unit = float(1 << shift)
950 stride_y = int(round(fp_stride_y * unit))
951 stride_x = int(round(fp_stride_x * unit))
952 offset_y = int(round(fp_offset_y * unit))
953 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700954
Kevin Cheng550ccc52021-03-03 11:21:43 -0800955 while (
956 stride_y >= 32768
957 or stride_x >= 32768
958 or offset_y >= 32768
959 or offset_x >= 32768
960 or offset_y < -32768
961 or offset_x < -32768
962 ):
Kevin Cheng77d0f762020-11-24 10:26:32 -0800963 shift = shift - 1
964 unit = float(1 << shift)
965 stride_y = int(round(fp_stride_y * unit))
966 stride_x = int(round(fp_stride_x * unit))
967 offset_y = int(round(fp_offset_y * unit))
968 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700969
Kevin Cheng550ccc52021-03-03 11:21:43 -0800970 stride = [stride_y, stride_x]
971 offset = [offset_y, offset_x]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800972
973 stride_fp = [0.0, 0.0]
974 offset_fp = [0.0, 0.0]
975
Kevin Cheng550ccc52021-03-03 11:21:43 -0800976 arg_list.append(
977 (
978 "mode{}_shift{}_odim{}x{}_out{}_st{}x{}_off{}x{}".format(
Les Bell33d837e2021-08-10 08:34:43 +0100979 "N" if m == ResizeMode.NEAREST else "B",
Kevin Cheng550ccc52021-03-03 11:21:43 -0800980 shift,
981 output_dims[0],
982 output_dims[1],
983 testGen.typeStr(outputDType),
984 stride[0],
985 stride[1],
986 offset[0],
987 offset[1],
988 ),
989 [
990 m,
991 stride,
992 offset,
993 shift,
994 stride_fp,
995 offset_fp,
996 output_dims,
997 dtype,
998 outputDType,
999 ],
1000 )
1001 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001002
1003 return arg_list
1004
1005 def agCondIf(testGen, opName, shapeList, dtype):
1006 # CondIf generates the condition values here.
1007 # Convert to tensors in the build function, along with the
1008 # then and else blocks
1009 arg_list = []
1010
1011 for c in [False, True]:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001012 arg_list.append(("cond{}".format(int(c)), [c]))
Eric Kunzee5e26762020-10-13 16:11:07 -07001013
1014 return arg_list
1015
1016 def agWhileLoop(testGen, opName, shapeList, dtype):
1017 # While loop: 0 iterations, 1, more than 1
1018 arg_list = []
1019
1020 for iter in [0, 1, 4]:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001021 arg_list.append(("iter{}".format(iter), [iter]))
Eric Kunzee5e26762020-10-13 16:11:07 -07001022
1023 return arg_list
1024
Matthew Haddonb724efc2021-08-25 16:40:29 +01001025class TosaInvalidValidator:
1026
1027 @staticmethod
1028 def ivWrongDataTypeOrModeResize(**kwargs):
1029 input_dtype = kwargs["input_dtype"]
1030 args = kwargs["args"]
1031 mode = args[0]
1032 stride = args[1]
1033 stride_fp = args[4]
1034 output_dtype = args[8]
1035
1036 if mode == ResizeMode.BILINEAR:
1037 # Invalid output data type / Invalid input datatype
1038 return (
1039 not (input_dtype == DType.INT8 and output_dtype == DType.INT32) or
1040 not (input_dtype == DType.INT16 and output_dtype == DType.INT48) or
1041 not (input_dtype == DType.FLOAT and output_dtype == DType.FLOAT) or
1042 (input_dtype not in [DType.INT8, DType.INT32, DType.FLOAT])
1043 )
1044 elif mode == ResizeMode.NEAREST:
1045 # Invalid output data type / Invalid input datatype
1046 return (
1047 (input_dtype != output_dtype) or
1048 (input_dtype not in [DType.INT8, DType.INT32, DType.FLOAT])
1049 )
1050 else:
1051 # Invalid resize mode
1052 return True
1053
1054 @staticmethod
1055 def ivBadStride(**kwargs):
1056 input_dtype = kwargs["input_dtype"]
1057 args = kwargs["args"]
1058 stride_x = args[1][0]
1059 stride_y = args[1][1]
1060 stride_fp_x = args[4][0]
1061 stride_fp_y = args[4][1]
1062
1063 if input_dtype == DType.FLOAT:
1064 if stride_fp_x <= 0 or stride_fp_y <= 0:
1065 # Negative or zero stride
1066 return True
1067 else:
1068 if stride_x <= 0 or stride_y <= 0:
1069 # Negative or zero stride
1070 return True
1071 return False
1072
1073
1074
1075
1076 @staticmethod
1077 def ivHeightWidthSmallerZero(**kwargs):
1078 opName = kwargs['opName']
1079
1080 inputShapes = kwargs['shapeList']
1081 input = inputShapes[0]
1082 if not opName.endswith("pool2d"):
1083 filter = inputShapes[1]
1084
1085 args = kwargs['args']
1086 strides = args[0]
1087 padding = args[1]
1088 dilations = args[2]
1089 if opName.endswith("pool2d"):
1090 kernel = args[2]
1091
1092 if opName.startswith('conv2d'):
1093 h = (
1094 input[1]
1095 - filter[1]
1096 - (filter[1] - 1) * (dilations[0] - 1)
1097 + padding[0]
1098 + padding[1]
1099 ) // strides[0] + 1
1100
1101 w = (
1102 input[2]
1103 - filter[2]
1104 - (filter[2] - 1) * (dilations[1] - 1)
1105 + padding[2]
1106 + padding[3]
1107 ) // strides[1] + 1
1108 elif opName.startswith("depthwise_conv2d"):
1109 h = (
1110 input[1]
1111 - filter[0]
1112 - (filter[0] - 1) * (dilations[0] - 1)
1113 + padding[0]
1114 + padding[1]
1115 ) // strides[0] + 1
1116
1117 w = (
1118 input[2]
1119 - filter[1]
1120 - (filter[1] - 1) * (dilations[1] - 1)
1121 + padding[2]
1122 + padding[3]
1123 ) // strides[1] + 1
1124 elif opName.endswith("pool2d"):
1125 h = (input[1] + padding[0] + padding[1] + strides[0] - kernel[0]) // strides[0]
1126 w = (input[2] + padding[2] + padding[3] + strides[1] - kernel[1]) // strides[1]
1127 else:
1128 assert False, "Unrecognized Op"
1129
1130 if h <= 0 or w <= 0:
1131 # Invalid parameter combination
1132 return True
1133 return False
1134
1135 @staticmethod
1136 def ivNonPositiveOutputShape(**kwargs):
1137 args = kwargs['args']
1138 output_shape = args[3]
1139 if output_shape[1] <= 0 or output_shape[2] <= 0:
1140 # Negative output shape
1141 return True
1142 return False
1143
1144
Kevin Cheng550ccc52021-03-03 11:21:43 -08001145
Eric Kunzee5e26762020-10-13 16:11:07 -07001146class TosaTestGen:
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01001147 # Maximum rank of tensor supported by test generator.
1148 TOSA_TENSOR_MAX_RANK = 6
1149
Eric Kunzee5e26762020-10-13 16:11:07 -07001150 def __init__(self, args):
1151 self.args = args
1152 self.basePath = args.output_dir
1153 self.random_seed = args.random_seed
1154 self.ser = None
1155 self.rng = np.random.default_rng(self.random_seed)
1156 self.createDynamicOpLists()
1157 self.initOpListDefaults()
1158 self.quantGen = TosaQuantGen()
1159 # Force makeShape to do a specific starting shape
1160 self.targetted_shape = None
1161
1162 def createSerializer(self, opName, testPath):
1163 self.testPath = os.path.join(opName, testPath)
1164
1165 fullPath = os.path.join(self.basePath, self.testPath)
1166 os.makedirs(fullPath, exist_ok=True)
1167 self.ser = ts.TosaSerializer(fullPath)
1168
1169 def getSerializer(self):
1170 return self.ser
1171
1172 def serialize(self, testName):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001173 with open(
1174 os.path.join(self.basePath, self.testPath, "{}.tosa".format(testName)), "wb"
1175 ) as fd:
Eric Kunzee5e26762020-10-13 16:11:07 -07001176 fd.write(self.ser.serialize())
1177
Kevin Cheng550ccc52021-03-03 11:21:43 -08001178 with open(os.path.join(self.basePath, self.testPath, "desc.json"), "w") as fd:
1179 fd.write(self.ser.writeJson("{}.tosa".format(testName)))
Eric Kunzee5e26762020-10-13 16:11:07 -07001180
Matthew Haddon74567092021-07-16 15:38:20 +01001181 def resetRNG(self, seed=None):
1182 if seed == None:
1183 seed = self.random_seed + 1
1184 self.rng = np.random.default_rng(seed)
1185
Eric Kunzee5e26762020-10-13 16:11:07 -07001186 def getRandTensor(self, shape, dtype):
Eric Kunzee5e26762020-10-13 16:11:07 -07001187 if dtype == DType.BOOL:
1188 np_dt = np.bool
1189 return np.bool_(self.rng.choice(a=[False, True], size=shape))
Kevin Chenga9017402021-07-28 17:19:23 -07001190 # TOSA specific INT4 weight range from -7 to 7
Eric Kunzee5e26762020-10-13 16:11:07 -07001191 elif dtype == DType.INT4:
Kevin Chenga9017402021-07-28 17:19:23 -07001192 return np.int32(self.rng.integers(low=-7, high=8, size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001193 elif dtype == DType.INT8:
Jeremy Johnson18e26662021-07-22 16:15:29 +01001194 return np.int32(self.rng.integers(low=-128, high=128, size=shape))
1195 elif dtype == DType.UINT8:
1196 return np.int32(self.rng.integers(low=0, high=256, size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001197 elif dtype == DType.INT16:
1198 return np.int32(self.rng.integers(low=-32768, high=32768, size=shape))
1199 elif dtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001200 return np.int32(
1201 self.rng.integers(low=-(1 << 31), high=(1 << 31), size=shape)
1202 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001203 elif dtype == DType.INT48:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001204 return np.int64(
1205 self.rng.integers(low=-(1 << 47), high=(1 << 47), size=shape)
1206 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001207 elif dtype == DType.FLOAT:
Jeremy Johnson18e26662021-07-22 16:15:29 +01001208 return np.float32(self.rng.random(size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001209 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001210 raise Exception("Unrecognized Dtype: {}".format(dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07001211
Kevin Cheng989cb052021-04-28 16:29:44 -07001212 def buildPlaceholderTensors(self, shape_list, dtype_list):
Eric Kunzee5e26762020-10-13 16:11:07 -07001213 placeholders = []
1214
Kevin Cheng989cb052021-04-28 16:29:44 -07001215 assert len(shape_list) == len(dtype_list)
1216
1217 for idx, shape in enumerate(shape_list):
1218 arr = self.getRandTensor(shape, dtype_list[idx])
1219 placeholders.append(self.ser.addPlaceholder(shape, dtype_list[idx], arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001220
1221 return placeholders
1222
Kevin Cheng989cb052021-04-28 16:29:44 -07001223 def buildConstTensors(self, shape_list, dtype_list):
Eric Kunzee5e26762020-10-13 16:11:07 -07001224 consts = []
1225
Kevin Cheng989cb052021-04-28 16:29:44 -07001226 assert len(shape_list) == len(dtype_list)
1227
1228 for idx, shape in enumerate(shape_list):
1229 arr = self.getRandTensor(shape, dtype_list[idx])
1230 consts.append(self.ser.addConst(shape, dtype_list[idx], arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001231
1232 return consts
1233
1234 def makeShape(self, rank):
1235 if self.targetted_shape:
1236 return np.int32(self.targetted_shape)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001237 return np.int32(
1238 self.rng.integers(
1239 low=self.args.tensor_shape_range[0],
1240 high=self.args.tensor_shape_range[1],
1241 size=rank,
1242 )
1243 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001244
1245 def setTargetShape(self, shape):
1246 self.targetted_shape = shape
1247
1248 def randInt(self, low=0, high=256):
1249 return np.int32(self.rng.integers(low=low, high=high, size=1))[0]
1250
1251 def getRandNumberDType(self, dtype):
1252 if dtype == DType.FLOAT:
1253 return self.rng.random()
1254 elif dtype == DType.BOOL:
1255 return self.rng.choice([False, True])
Kevin Chenga9017402021-07-28 17:19:23 -07001256 # TOSA specific INT4 weight range from -7 to 7
Eric Kunzee5e26762020-10-13 16:11:07 -07001257 elif dtype == DType.INT4:
Kevin Chenga9017402021-07-28 17:19:23 -07001258 low, high = (-7, 8)
Eric Kunzee5e26762020-10-13 16:11:07 -07001259 elif dtype == DType.INT8:
Jeremy Johnson18e26662021-07-22 16:15:29 +01001260 low, high = (-128, 128)
Eric Kunzee5e26762020-10-13 16:11:07 -07001261 elif dtype == DType.INT16:
1262 low, high = (-32768, 32768)
1263 elif dtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001264 low, high = (-(1 << 31), (1 << 31))
Eric Kunzee5e26762020-10-13 16:11:07 -07001265 elif dtype == DType.INT48:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001266 low, high = (-(1 << 47), (1 << 47))
Eric Kunzee5e26762020-10-13 16:11:07 -07001267 # Special size
1268 return np.int64(self.rng.integers(low, high, size=1))[0]
1269 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001270 raise Exception("Unknown dtype: {}".format(dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07001271
1272 return np.int32(self.rng.integers(low, high, size=1))[0]
1273
1274 def shapeStr(self, shape):
1275
1276 sStr = []
1277 # Convert to strings
1278 for i in shape:
1279 sStr.append(str(i))
1280
Kevin Cheng550ccc52021-03-03 11:21:43 -08001281 return "x".join(sStr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001282
1283 def typeStr(self, t):
Kevin Cheng989cb052021-04-28 16:29:44 -07001284 if isinstance(t, list):
1285 assert len(t) >= 2
1286 return "{}x{}".format(self.typeStr(t[0]), self.typeStr(t[1]))
Eric Kunzee5e26762020-10-13 16:11:07 -07001287 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07001288 if t == DType.BOOL:
1289 return "b"
1290 elif t == DType.INT4:
1291 return "i4"
1292 elif t == DType.INT8:
1293 return "i8"
1294 elif t == DType.UINT8:
1295 return "u8"
1296 elif t == DType.INT16:
1297 return "i16"
1298 elif t == DType.INT32:
1299 return "i32"
1300 elif t == DType.INT48:
1301 return "i48"
1302 elif t == DType.FLOAT:
1303 return "float"
1304 else:
1305 raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
Eric Kunzee5e26762020-10-13 16:11:07 -07001306
1307 def typeWidth(self, t):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001308 """ Get the datatype width for integer types"""
Kevin Cheng3a478572021-01-22 17:21:02 -08001309 if t == DType.INT4:
Eric Kunzee5e26762020-10-13 16:11:07 -07001310 return 4
1311 elif t == DType.INT8:
1312 return 8
Kevin Cheng3a478572021-01-22 17:21:02 -08001313 elif t == DType.UINT8:
1314 return 8
Eric Kunzee5e26762020-10-13 16:11:07 -07001315 elif t == DType.INT16:
1316 return 16
1317 elif t == DType.INT32:
1318 return 32
1319 elif t == DType.INT48:
1320 return 48
1321 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001322 raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
Eric Kunzee5e26762020-10-13 16:11:07 -07001323
1324 # Argument generators
1325 # Returns a list of tuples (stringDescriptor, [build_fcn_arg_list])
1326 # Where the string descriptor is used to generate the test name and
1327 # The build_fcn_arg_list is expanded and passed to the operator test
1328 # build function
1329
Kevin Cheng550ccc52021-03-03 11:21:43 -08001330 def build_unary(self, op, a, qinfo=None):
Eric Kunzee5e26762020-10-13 16:11:07 -07001331 result_tens = OutputShaper.unaryOp(self.ser, a)
1332 self.ser.addOperator(op, [a.name], [result_tens.name], None, qinfo)
1333 return result_tens
1334
1335 def build_binary_broadcast(self, op, a, b):
1336 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1337 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1338 return result_tens
1339
1340 def build_binary_nonbroadcast(self, op, a, b):
1341 result_tens = OutputShaper.binaryNonBroadcastOp(self.ser, a, b)
1342 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1343 return result_tens
1344
Kevin Chengaee1fac2020-11-11 13:54:06 -08001345 def build_arithmetic_right_shift(self, op, a, b, round):
1346 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1347
1348 attr = ts.TosaSerializerAttribute()
1349 attr.ArithmeticRightShiftAttribute(round)
1350
1351 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
1352 return result_tens
1353
1354 def build_mul(self, op, a, b, shift):
Eric Kunzee5e26762020-10-13 16:11:07 -07001355 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1356
1357 # Special for multiply:
1358 # Force the result to INT32 for INT types
1359 if a.dtype != DType.FLOAT:
1360 result_tens.setDtype(DType.INT32)
1361
Kevin Chengaee1fac2020-11-11 13:54:06 -08001362 attr = ts.TosaSerializerAttribute()
1363 attr.MulAttribute(shift)
1364
1365 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001366 return result_tens
1367
1368 def build_table(self, op, a):
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001369 # Constant size depending on type, random values
1370 if a.dtype == DType.INT16:
Kevin Chengacb550f2021-06-29 15:32:19 -07001371 table_dtype = DType.INT16
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001372 table_arr = self.getRandTensor([513], table_dtype)
1373 else:
1374 assert a.dtype == DType.INT8
1375 table_dtype = DType.INT8
1376 table_arr = self.getRandTensor([256], table_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001377
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001378 table_tens = self.ser.addConst(table_arr.shape, table_dtype, table_arr)
1379 result_tens = OutputShaper.tableOp(self.ser, a, table_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001380 self.ser.addOperator(op, [a.name, table_tens.name], [result_tens.name], None)
1381
1382 return result_tens
1383
1384 def build_select(self, op, cond, a, b):
Eric Kunzee5e26762020-10-13 16:11:07 -07001385 result_tens = OutputShaper.selectOp(self.ser, cond, a, b)
1386 self.ser.addOperator(op, [cond.name, a.name, b.name], [result_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001387 return result_tens
1388
1389 def build_comparison(self, op, a, b):
1390 result_tens = OutputShaper.binaryComparisonOp(self.ser, a, b)
1391 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1392 return result_tens
1393
1394 def build_argmax(self, op, a, axis):
1395 result_tens = OutputShaper.argmaxOp(self.ser, a, axis)
1396
1397 attr = ts.TosaSerializerAttribute()
1398 attr.AxisAttribute(axis)
1399
1400 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1401 return result_tens
1402
Matthew Haddonb724efc2021-08-25 16:40:29 +01001403 def build_pool2d(self, op, input, stride, pad, kernel, qinfo=None):
Eric Kunzee5e26762020-10-13 16:11:07 -07001404 result_tens = OutputShaper.pool2dOp(self.ser, input, kernel, stride, pad)
1405
1406 attr = ts.TosaSerializerAttribute()
Kevin Cheng93a16282021-08-31 16:14:03 -07001407 attr.PoolAttribute(kernel, stride, pad)
Eric Kunzee5e26762020-10-13 16:11:07 -07001408
1409 self.ser.addOperator(op, [input.name], [result_tens.name], attr, qinfo)
1410 return result_tens
1411
1412 def build_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001413 assert len(padding) == 4
1414 result_tens = OutputShaper.conv2dOp(
1415 self.ser, ifm, filter, strides, padding, dilations
1416 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001417
1418 attr = ts.TosaSerializerAttribute()
Kevin Cheng93a16282021-08-31 16:14:03 -07001419 attr.ConvAttribute(padding, strides, dilations)
Eric Kunzee5e26762020-10-13 16:11:07 -07001420
Kevin Cheng550ccc52021-03-03 11:21:43 -08001421 self.ser.addOperator(
1422 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1423 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001424 return result_tens
1425
Kevin Cheng1533b852021-09-01 12:51:58 -07001426 def build_conv3d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo):
1427 assert len(padding) == 6
1428 result_tens = OutputShaper.conv3dOp(
1429 self.ser, ifm, filter, strides, padding, dilations
1430 )
1431
1432 attr = ts.TosaSerializerAttribute()
1433 attr.ConvAttribute(padding, strides, dilations)
1434
1435 self.ser.addOperator(
1436 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1437 )
1438 return result_tens
1439
Kevin Cheng550ccc52021-03-03 11:21:43 -08001440 def build_transpose_conv2d(
Kevin Cheng989cb052021-04-28 16:29:44 -07001441 self, op, ifm, filter, bias, stride, outpad, dilation, output_shape, qinfo
Kevin Cheng550ccc52021-03-03 11:21:43 -08001442 ):
1443 assert len(outpad) == 2
Eric Kunzee5e26762020-10-13 16:11:07 -07001444 result_tens = OutputShaper.transposeConv2DOp(self.ser, ifm, output_shape)
1445
1446 attr = ts.TosaSerializerAttribute()
Kevin Cheng93a16282021-08-31 16:14:03 -07001447 attr.TransposeConvAttribute(outpad, stride, dilation, output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07001448
Kevin Cheng550ccc52021-03-03 11:21:43 -08001449 self.ser.addOperator(
Kevin Cheng989cb052021-04-28 16:29:44 -07001450 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
Kevin Cheng550ccc52021-03-03 11:21:43 -08001451 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001452 return result_tens
1453
Kevin Cheng550ccc52021-03-03 11:21:43 -08001454 def build_depthwise_conv2d(
1455 self, op, ifm, filter, bias, strides, padding, dilations, qinfo
1456 ):
1457 result_tens = OutputShaper.depthwiseConv2dOp(
1458 self.ser, ifm, filter, strides, padding, dilations
1459 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001460
1461 attr = ts.TosaSerializerAttribute()
Kevin Cheng93a16282021-08-31 16:14:03 -07001462 attr.ConvAttribute(padding, strides, dilations)
Eric Kunzee5e26762020-10-13 16:11:07 -07001463
Kevin Cheng550ccc52021-03-03 11:21:43 -08001464 self.ser.addOperator(
1465 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1466 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001467 return result_tens
1468
1469 def build_fully_connected(self, op, ifm, filter, bias, qinfo):
1470 result_tens = OutputShaper.fullyConnectedOp(self.ser, ifm, filter)
1471
Kevin Cheng550ccc52021-03-03 11:21:43 -08001472 self.ser.addOperator(
1473 op, [ifm.name, filter.name, bias.name], [result_tens.name], None, qinfo
1474 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001475 return result_tens
1476
1477 def build_matmul(self, op, a, b, qinfo):
1478 result_tens = OutputShaper.matmulOp(self.ser, a, b)
1479 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], None, qinfo)
1480 return result_tens
1481
1482 def build_reduce(self, op, a, axis):
1483 result_tens = OutputShaper.reduceOp(self.ser, a, axis)
1484
1485 attr = ts.TosaSerializerAttribute()
1486 attr.AxisAttribute(axis)
1487
1488 self.ser.addOperator(op, [a.name], result_tens.name, attr)
1489 return result_tens
1490
1491 def build_clamp(self, op, a):
1492 result_tens = OutputShaper.unaryOp(self.ser, a)
1493
1494 attr = ts.TosaSerializerAttribute()
Jeremy Johnson18e26662021-07-22 16:15:29 +01001495 v = [self.getRandNumberDType(a.dtype), self.getRandNumberDType(a.dtype)]
Eric Kunzee5e26762020-10-13 16:11:07 -07001496
1497 if a.dtype == DType.FLOAT:
1498 attr.ClampAttribute(0, 0, min(v), max(v))
1499 else:
1500 attr.ClampAttribute(min(v), max(v), 0, 0)
1501
1502 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1503 return result_tens
1504
1505 def build_leaky_relu(self, op, a):
1506 result_tens = OutputShaper.unaryOp(self.ser, a)
1507 attr = ts.TosaSerializerAttribute()
1508
1509 attr.LeakyReluAttribute(self.getRandNumberDType(DType.FLOAT))
1510
1511 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1512 return result_tens
1513
1514 # Needs an additional type/input
1515 def build_prelu(self, op, a):
1516 result_tens = OutputShaper.unaryOp(self.ser, a)
1517
1518 self.ser.addOperator(op, [a.name], [result_tens.name])
1519 return result_tens
1520
1521 def build_relun(self, op, a):
1522 result_tens = OutputShaper.unaryOp(self.ser, a)
1523
1524 attr = ts.TosaSerializerAttribute()
1525
1526 if a.dtype == DType.FLOAT:
1527 attr.ReluNAttribute(0, self.getRandNumberDType(a.dtype))
1528 else:
1529 attr.ReluNAttribute(self.getRandNumberDType(a.dtype), 0)
1530
1531 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1532 return result_tens
1533
1534 def build_sigmoid(self, op, a):
1535 result_tens = OutputShaper.unaryOp(self.ser, a)
1536 self.ser.addOperator(op, [a.name], [result_tens.name])
1537 return result_tens
1538
1539 def build_tanh(self, op, a):
1540 result_tens = OutputShaper.unaryOp(self.ser, a)
1541 self.ser.addOperator(op, [a.name], [result_tens.name])
1542 return result_tens
1543
Matthew Haddon818ab902021-07-27 09:12:49 +01001544 def build_concat(self, op, *a):
Kevin Cheng93a16282021-08-31 16:14:03 -07001545 assert type(a[-1]) == int
Matthew Haddon818ab902021-07-27 09:12:49 +01001546
1547 # To store variable length list of input tensors we need to store axis along with it
1548 axis = a[-1]
1549 a = a[:-1]
1550
1551 result_tens = OutputShaper.concatOp(self.ser, axis, *a)
Eric Kunzee5e26762020-10-13 16:11:07 -07001552
1553 attr = ts.TosaSerializerAttribute()
1554 attr.AxisAttribute(axis)
1555
Matthew Haddon818ab902021-07-27 09:12:49 +01001556 input_tensor_names = []
1557 for tensor in a:
1558 input_tensor_names.append(tensor.name)
1559
1560 self.ser.addOperator(op, input_tensor_names, [result_tens.name], attr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001561
1562 def build_pad(self, op, a, padding, qinfo):
1563 result_tens = OutputShaper.padOp(self.ser, a, padding)
1564
1565 # Need to turn the padding array into a TOSA tensor here.
1566 # This is one of the few tensor operands that does not get
1567 # randomly generated
Kevin Cheng550ccc52021-03-03 11:21:43 -08001568 padding_tens = self.ser.addConst(padding.shape, DType.INT32, padding)
Eric Kunzee5e26762020-10-13 16:11:07 -07001569
Kevin Cheng550ccc52021-03-03 11:21:43 -08001570 self.ser.addOperator(
1571 op, [a.name, padding_tens.name], [result_tens.name], None, qinfo
1572 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001573
1574 def build_reshape(self, op, a, newShape):
1575 result_tens = OutputShaper.reshapeOp(self.ser, a, newShape)
1576
1577 attr = ts.TosaSerializerAttribute()
1578 attr.ReshapeAttribute(newShape)
1579
1580 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1581 return result_tens
1582
1583 def build_reverse(self, op, a, axis):
1584 result_tens = OutputShaper.unaryOp(self.ser, a)
1585
1586 attr = ts.TosaSerializerAttribute()
1587 attr.AxisAttribute(axis)
1588
1589 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1590 return result_tens
1591
1592 def build_transpose(self, op, a, perms):
1593 result_tens = OutputShaper.transposeOp(self.ser, a, perms)
1594
Kevin Cheng550ccc52021-03-03 11:21:43 -08001595 perms_tens = self.ser.addConst([len(perms)], DType.INT32, np.int32(perms))
Eric Kunzee5e26762020-10-13 16:11:07 -07001596
1597 self.ser.addOperator(op, [a.name, perms_tens.name], [result_tens.name])
1598 return result_tens
1599
1600 def build_slice(self, op, a, begin, size):
1601 result_tens = OutputShaper.sliceOp(self.ser, a, begin, size)
1602
1603 attr = ts.TosaSerializerAttribute()
1604 attr.SliceAttribute(begin, size)
1605
1606 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1607 return result_tens
1608
1609 def build_tile(self, op, a, multiples):
1610 result_tens = OutputShaper.tileOp(self.ser, a, multiples)
1611
1612 attr = ts.TosaSerializerAttribute()
1613 attr.TileAttribute(multiples)
1614
1615 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1616 return result_tens
1617
Kevin Cheng77d0f762020-11-24 10:26:32 -08001618 def build_gather(self, op, values):
Eric Kunzee5e26762020-10-13 16:11:07 -07001619
1620 # Create a new indicies tensor
1621 # here with data that doesn't exceed the dimensions of the values tensor
1622
Kevin Cheng550ccc52021-03-03 11:21:43 -08001623 K = values.shape[1] # K
1624 W = self.randInt(
1625 self.args.tensor_shape_range[0], self.args.tensor_shape_range[1]
1626 ) # W
1627 indicies_arr = np.int32(
1628 self.rng.integers(low=0, high=K, size=[values.shape[0], W])
1629 ) # (N, W)
1630 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001631
Kevin Cheng77d0f762020-11-24 10:26:32 -08001632 result_tens = OutputShaper.gatherOp(self.ser, values, indicies)
Eric Kunzee5e26762020-10-13 16:11:07 -07001633
Kevin Cheng77d0f762020-11-24 10:26:32 -08001634 self.ser.addOperator(op, [values.name, indicies.name], [result_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001635
1636 return result_tens
1637
Kevin Cheng77d0f762020-11-24 10:26:32 -08001638 def build_scatter(self, op, values_in, input):
1639
1640 # Create a new indicies tensor
1641 # here with data that doesn't exceed the dimensions of the values_in tensor
1642
Kevin Cheng550ccc52021-03-03 11:21:43 -08001643 K = values_in.shape[1] # K
1644 W = input.shape[1] # W
1645 indicies_arr = np.int32(
1646 self.rng.integers(low=0, high=K, size=[values_in.shape[0], W])
1647 ) # (N, W)
1648 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
Kevin Cheng77d0f762020-11-24 10:26:32 -08001649
1650 result_tens = OutputShaper.scatterOp(self.ser, values_in, indicies, input)
1651
Kevin Cheng550ccc52021-03-03 11:21:43 -08001652 self.ser.addOperator(
1653 op, [values_in.name, indicies.name, input.name], [result_tens.name]
1654 )
Kevin Cheng77d0f762020-11-24 10:26:32 -08001655
1656 return result_tens
1657
Kevin Cheng550ccc52021-03-03 11:21:43 -08001658 def build_resize(
1659 self,
1660 op,
1661 input,
1662 mode,
1663 stride,
1664 offset,
1665 shift,
1666 stride_fp,
1667 offset_fp,
1668 output_dims,
1669 input_dtype,
1670 output_dtype,
1671 ):
1672 result_tens = OutputShaper.resizeOp(
1673 self.ser,
1674 input,
1675 mode,
1676 stride,
1677 offset,
1678 shift,
1679 stride_fp,
1680 offset_fp,
1681 output_dims,
1682 input_dtype,
1683 output_dtype,
1684 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001685
1686 attr = ts.TosaSerializerAttribute()
Kevin Cheng77d0f762020-11-24 10:26:32 -08001687
Kevin Cheng550ccc52021-03-03 11:21:43 -08001688 attr.ResizeAttribute(
1689 output_dims, stride, offset, shift, stride_fp, offset_fp, mode
1690 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001691
1692 self.ser.addOperator(op, [input.name], [result_tens.name], attr)
1693 return result_tens
1694
1695 def build_identityn(self, op, val, val2):
1696
Kevin Cheng550ccc52021-03-03 11:21:43 -08001697 result_tens = OutputShaper.unaryOp(self.ser, val)
Eric Kunzee5e26762020-10-13 16:11:07 -07001698 result_tens2 = OutputShaper.unaryOp(self.ser, val2)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001699 self.ser.addOperator(
1700 op, [val.name, val2.name], [result_tens.name, result_tens2.name]
1701 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001702 return result_tens
1703
1704 def build_placeholder(self, op, val):
1705 # Add an identity op to avoid warning in the reference model
1706 return self.build_unary(Op.IDENTITY, val)
1707
1708 # Type Conversion
1709 def build_cast(self, op, val, out_dtype):
1710 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1711 self.ser.addOperator(op, [val.name], [result_tens.name])
1712 return result_tens
1713
1714 def build_rescale(self, op, val, out_dtype, scale32, double_round, per_channel):
1715 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1716
1717 if per_channel:
1718 nc = val.shape[-1]
1719 else:
1720 nc = 1
1721
1722 in_type_width = self.typeWidth(val.dtype)
1723 out_type_width = self.typeWidth(out_dtype)
1724
Kevin Cheng3a478572021-01-22 17:21:02 -08001725 if val.dtype == DType.INT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001726 input_zp = self.randInt(-128, 128)
1727 in_type_width = in_type_width + 1
Kevin Chengacb550f2021-06-29 15:32:19 -07001728 elif val.dtype == DType.UINT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001729 input_zp = self.randInt(0, 256)
Eric Kunzee5e26762020-10-13 16:11:07 -07001730 in_type_width = in_type_width + 1
1731 else:
1732 input_zp = 0
1733
Kevin Cheng3a478572021-01-22 17:21:02 -08001734 if out_dtype == DType.INT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001735 output_zp = self.randInt(-128, 128)
1736 out_type_width = out_type_width + 1
1737 elif out_dtype == DType.UINT8:
1738 output_zp = self.randInt(0, 256)
Eric Kunzee5e26762020-10-13 16:11:07 -07001739 out_type_width = out_type_width + 1
1740 else:
1741 output_zp = 0
1742
1743 # Calculate scale based on:
1744 # scale = a *(2^output_width)/(2^input_width))
1745
1746 a = np.float32(self.rng.random(size=[nc]))
1747 scale_arr = a * np.float32((1 << out_type_width) / (1 << in_type_width))
1748
1749 if scale32:
1750 pass
Matthew Haddonb724efc2021-08-25 16:40:29 +01001751 # Cap the scaling at 2^31 - 1 for scale32
Eric Kunzee5e26762020-10-13 16:11:07 -07001752 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), (1 << 31) - 1)
1753 else:
1754 # Cap the scaling at 2^15 - 1 for scale16
1755 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), 32767.0)
1756
Kevin Cheng550ccc52021-03-03 11:21:43 -08001757 # print('{} {} -> {}'.format(out_type_width, in_type_width, scale_arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001758
1759 multiplier_arr = np.int32(np.zeros(shape=[nc]))
1760 shift_arr = np.int32(np.zeros(shape=[nc]))
1761
1762 for i in range(nc):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001763 multiplier_arr[i], shift_arr[i] = TosaQuantGen.computeMultiplierAndShift(
1764 scale_arr[i], scale32
1765 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001766
Kevin Cheng550ccc52021-03-03 11:21:43 -08001767 # print('multiplier {} shift {} inzp {} outzp {}'.format(multiplier_arr, shift_arr, input_zp, output_zp))
Eric Kunzee5e26762020-10-13 16:11:07 -07001768
1769 attr = ts.TosaSerializerAttribute()
Kevin Cheng550ccc52021-03-03 11:21:43 -08001770 attr.RescaleAttribute(
1771 input_zp,
1772 output_zp,
1773 multiplier_arr,
1774 shift_arr,
1775 scale32,
1776 double_round,
1777 per_channel,
1778 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001779
1780 self.ser.addOperator(op, [val.name], [result_tens.name], attr)
1781 return result_tens
1782
1783 def build_cond_if_const(self, op, then_tens, else_tens, cond):
1784 # For cond_if with constants, we're supplied with then/else tensors that we ignore
1785 # (except for the generated shap) and the condition. Build Then/Else blocks
1786 # and fill them with const nodes for the body.
1787
1788 # Condition tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001789 cond_tens = self.ser.addConst([], DType.BOOL, [cond])
Eric Kunzee5e26762020-10-13 16:11:07 -07001790
1791 # Make then/else tensors
1792 out_shape = then_tens.shape
Jeremy Johnson18e26662021-07-22 16:15:29 +01001793 then_arr = np.int32(self.rng.integers(0, 256, size=out_shape))
1794 else_arr = np.int32(self.rng.integers(0, 256, size=out_shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001795
1796 # And the result tensor based on any of the outputs
Kevin Cheng550ccc52021-03-03 11:21:43 -08001797 result_tens = self.ser.addOutput(out_shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07001798
1799 # Create the attribute with the names of the then/else blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001800 then_block = "THEN_BLOCK"
1801 else_block = "ELSE_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001802 attr = ts.TosaSerializerAttribute()
1803 attr.CondIfAttribute(then_block, else_block)
1804
1805 # Finally, build the op and the two blocks
1806 self.ser.addOperator(op, [cond_tens.name], [result_tens.name], attr)
1807
1808 self.ser.startBasicBlock(then_block)
1809 # Build the actual then/else tensors inside their blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001810 then_tens = self.ser.addConst(out_shape, DType.INT32, then_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001811 self.ser.addOutputTensor(then_tens)
1812
1813 self.ser.startBasicBlock(else_block)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001814 else_tens = self.ser.addConst(out_shape, DType.INT32, else_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001815 self.ser.addOutputTensor(else_tens)
1816
1817 return result_tens
1818
1819 def build_cond_if_binary(self, op, a, b, cond):
1820 # For cond_if with a binary op in the then/else blocks, take a and b and
1821 # alternately add or subtract them based on the condition
1822
1823 # Condition tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001824 cond_tens = self.ser.addConst([], DType.BOOL, [cond])
Eric Kunzee5e26762020-10-13 16:11:07 -07001825
Kevin Cheng550ccc52021-03-03 11:21:43 -08001826 result_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001827 self.ser.currBasicBlock.addOutput(result_tens.name)
1828
1829 # Create the attribute with the names of the then/else blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001830 then_block = "THEN_BLOCK"
1831 else_block = "ELSE_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001832 attr = ts.TosaSerializerAttribute()
1833 attr.CondIfAttribute(then_block, else_block)
1834
1835 # Finally, build the op and the two blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001836 self.ser.addOperator(
1837 op, [cond_tens.name, a.name, b.name], [result_tens.name], attr
1838 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001839
1840 self.ser.startBasicBlock(then_block)
1841 self.ser.addInputTensor(a)
1842 self.ser.addInputTensor(b)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001843 then_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001844 self.ser.addOperator(Op.ADD, [a.name, b.name], [then_tens.name])
1845
1846 self.ser.startBasicBlock(else_block)
1847 self.ser.addInputTensor(a)
1848 self.ser.addInputTensor(b)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001849 else_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001850 self.ser.addOperator(Op.SUB, [a.name, b.name], [else_tens.name])
1851
1852 return result_tens
1853
1854 def build_while_loop(self, op, a, iter_val):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001855 iter = self.ser.addPlaceholder([], DType.INT32, [np.int32(iter_val)])
Eric Kunzee5e26762020-10-13 16:11:07 -07001856
Kevin Cheng550ccc52021-03-03 11:21:43 -08001857 cond_block = "COND_BLOCK"
1858 body_block = "BODY_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001859
1860 attr = ts.TosaSerializerAttribute()
1861 attr.WhileLoopAttribute(cond_block, body_block)
1862
1863 # Accumulator tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001864 # acc = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001865 acc_init_val = np.int32(np.zeros(a.shape))
Kevin Cheng550ccc52021-03-03 11:21:43 -08001866 acc = self.ser.addPlaceholder(a.shape, a.dtype, acc_init_val)
Eric Kunzee5e26762020-10-13 16:11:07 -07001867
1868 # Intermediate/output tensors for everything going through the loop
Kevin Cheng550ccc52021-03-03 11:21:43 -08001869 iter_out = self.ser.addIntermediate(iter.shape, iter.dtype)
1870 a_out = self.ser.addIntermediate(a.shape, a.dtype)
1871 acc_out = self.ser.addIntermediate(acc.shape, acc.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001872
1873 # While_loop operator
Kevin Cheng550ccc52021-03-03 11:21:43 -08001874 self.ser.addOperator(
1875 op,
1876 [iter.name, a.name, acc.name],
1877 [iter_out.name, a_out.name, acc_out.name],
1878 attr,
1879 )
Kevin Chengb227ae52021-09-02 13:43:17 -07001880 self.ser.addOutputTensor(acc_out)
Eric Kunzee5e26762020-10-13 16:11:07 -07001881
1882 # COND block (input: iter, output: cond_tens )
1883 self.ser.startBasicBlock(cond_block)
1884 self.ser.addInputTensor(iter)
1885 self.ser.addInputTensor(a)
1886 self.ser.addInputTensor(acc)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001887 zero_tens = self.ser.addConst([], DType.INT32, [np.int32(0)])
1888 cond_tens = self.ser.addOutput([], DType.BOOL)
1889 self.ser.addOperator(Op.GREATER, [iter.name, zero_tens.name], [cond_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001890
1891 # BODY block (input: a, acc, iter, output: a, acc, iter)
1892 # Note that local intermediate tensors need to be declared here for the outputs
1893 self.ser.startBasicBlock(body_block)
1894 self.ser.addInputTensor(iter)
1895 self.ser.addInputTensor(a)
1896 self.ser.addInputTensor(acc)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001897 one_tens = self.ser.addConst([], DType.INT32, [np.int32(1)])
1898 iter_body_out = self.ser.addIntermediate(iter.shape, iter.dtype)
1899 acc_body_out = self.ser.addIntermediate(acc.shape, acc.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001900 self.ser.addOperator(Op.ADD, [a.name, acc.name], [acc_body_out.name])
1901 self.ser.addOperator(Op.SUB, [iter.name, one_tens.name], [iter_body_out.name])
1902 self.ser.addOutputTensor(iter_body_out)
1903 self.ser.addOutputTensor(a)
1904 self.ser.addOutputTensor(acc_body_out)
1905
1906 return acc_out
1907
Kevin Cheng550ccc52021-03-03 11:21:43 -08001908 def genOpTestList(
Matthew Haddon74567092021-07-16 15:38:20 +01001909 self, opName, shapeFilter=[None], rankFilter=None, dtypeFilter=None, testType='positive'
Kevin Cheng550ccc52021-03-03 11:21:43 -08001910 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07001911
1912 try:
1913 op = self.TOSA_OP_LIST[opName]
1914 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001915 raise Exception("Cannot find op with name {}".format(opName))
Eric Kunzee5e26762020-10-13 16:11:07 -07001916
1917 # Initialize a new random number generator
1918 self.rng = np.random.default_rng(self.random_seed)
1919
Kevin Cheng550ccc52021-03-03 11:21:43 -08001920 build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001921
1922 # Generate the lists of arguments
Kevin Cheng550ccc52021-03-03 11:21:43 -08001923 rmin, rmax = op["rank"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001924
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01001925 # Create a default testing rank range, 1-4 inclusive to keep test sizes reasonably small.
1926 default_test_rank_range = range(1, 5)
1927
Eric Kunzee5e26762020-10-13 16:11:07 -07001928 # Test list consists of a tuple of:
1929 # (opName, testNameStr, dtype, shapeList, argumentsList)
1930 testList = []
1931
1932 if not shapeFilter:
1933 shapeFilter = [None]
1934
Matthew Haddon74567092021-07-16 15:38:20 +01001935 # Positive test loop
1936 if testType in ['positive', 'both']:
1937 for r in range(rmin, rmax + 1):
Eric Kunzee5e26762020-10-13 16:11:07 -07001938
Matthew Haddon74567092021-07-16 15:38:20 +01001939 # Filter out the rank?
1940 if rankFilter is not None and r not in rankFilter:
1941 continue
Kevin Cheng1533b852021-09-01 12:51:58 -07001942 if opName.startswith("conv3d"):
1943 assert r == 5, "conv3d test must have input rank == 5"
1944 elif (
Matthew Haddon74567092021-07-16 15:38:20 +01001945 rankFilter is None
1946 and shapeFilter[0] is None
1947 and r not in default_test_rank_range
1948 ):
1949 continue
Eric Kunzee5e26762020-10-13 16:11:07 -07001950
Matthew Haddon74567092021-07-16 15:38:20 +01001951 for t in op["types"]:
Eric Kunzee5e26762020-10-13 16:11:07 -07001952
Matthew Haddon74567092021-07-16 15:38:20 +01001953 # Filter tests based on dtype?
1954 if dtypeFilter is not None:
1955 if not (
1956 t in dtypeFilter
1957 or (isinstance(t, list) and t[0] in dtypeFilter)
1958 ):
1959 continue
Eric Kunzee5e26762020-10-13 16:11:07 -07001960
Matthew Haddon74567092021-07-16 15:38:20 +01001961 # Create the placeholder and const tensors
1962 for shape in shapeFilter:
1963 # A None shape chooses a random shape of a given rank
Eric Kunzee5e26762020-10-13 16:11:07 -07001964
Matthew Haddon74567092021-07-16 15:38:20 +01001965 # Filter out by rank
1966 if shape is not None and len(shape) != r:
1967 continue
Eric Kunzee5e26762020-10-13 16:11:07 -07001968
Matthew Haddon74567092021-07-16 15:38:20 +01001969 self.setTargetShape(shape)
1970 shapeList = tgen_fcn(self, op, r)
Eric Kunzee5e26762020-10-13 16:11:07 -07001971
Matthew Haddon74567092021-07-16 15:38:20 +01001972 shapeStr = self.shapeStr(shapeList[0])
1973 typeStr = self.typeStr(t)
Eric Kunzee5e26762020-10-13 16:11:07 -07001974
Matthew Haddon74567092021-07-16 15:38:20 +01001975 # Argument lists consists of tuples of the (str, []) string representation and the build function argument list
1976 argList = []
1977 if agen_fcn:
1978 argList = agen_fcn(self, opName, shapeList, t)
Eric Kunzee5e26762020-10-13 16:11:07 -07001979 else:
Matthew Haddon74567092021-07-16 15:38:20 +01001980 argList = [("", [])]
Eric Kunzee5e26762020-10-13 16:11:07 -07001981
Matthew Haddon74567092021-07-16 15:38:20 +01001982 for argStr, args in argList:
1983 if argStr:
1984 testStr = "{}_{}_{}_{}".format(
1985 opName, shapeStr, typeStr, argStr
1986 )
1987 else:
1988 testStr = "{}_{}_{}".format(opName, shapeStr, typeStr)
1989
1990 testList.append((opName, testStr, t, shapeList, args))
1991
Matthew Haddonb724efc2021-08-25 16:40:29 +01001992 # Remove tests which are expected to fail but don't correlate to a ERROR_IF statement
1993 if "invalid_test_validators" in op:
1994 invalid_test_validators = op["invalid_test_validators"]
1995 clean_testList = []
1996 for test in testList:
1997 for validator_fcn in invalid_test_validators:
1998 remove_test = False
1999 if validator_fcn(opName=test[0], input_dtype=test[2], shapeList=test[3], args=test[4]):
2000 remove_test = True
2001 if not remove_test:
2002 clean_testList.append(test)
2003 testList = clean_testList
2004
Matthew Haddon74567092021-07-16 15:38:20 +01002005 # Reset RNG so both positive and negative tests are reproducible
2006 self.resetRNG()
2007 # Negative test loop
2008 if testType in ['negative', 'both']:
2009 print("Negative tests unsupported")
Eric Kunzee5e26762020-10-13 16:11:07 -07002010
2011 return testList
2012
Kevin Cheng989cb052021-04-28 16:29:44 -07002013 def serializeTest(self, opName, testStr, dtype_or_dtypeList, shapeList, testArgs):
Eric Kunzee5e26762020-10-13 16:11:07 -07002014 try:
2015 op = self.TOSA_OP_LIST[opName]
2016 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002017 raise Exception("Cannot find op with name {}".format(opName))
Eric Kunzee5e26762020-10-13 16:11:07 -07002018
2019 # Create a serializer
2020 self.createSerializer(opName, testStr)
2021
Kevin Cheng550ccc52021-03-03 11:21:43 -08002022 build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
2023 pCount, cCount = op["operands"]
Kevin Cheng989cb052021-04-28 16:29:44 -07002024 num_operands = pCount + cCount
2025
2026 if isinstance(dtype_or_dtypeList, list):
2027 dtypeList = dtype_or_dtypeList
Kevin Cheng93a16282021-08-31 16:14:03 -07002028 elif op["op"] == Op.CONCAT:
Matthew Haddon818ab902021-07-27 09:12:49 +01002029 dtypeList = [dtype_or_dtypeList] * len(shapeList)
Kevin Cheng989cb052021-04-28 16:29:44 -07002030 else:
2031 dtypeList = [dtype_or_dtypeList] * (num_operands)
2032
Kevin Cheng93a16282021-08-31 16:14:03 -07002033 if op["op"] != Op.CONCAT:
Matthew Haddon818ab902021-07-27 09:12:49 +01002034 assert (
2035 len(shapeList) == num_operands
2036 ), "shapeList length {} must match number of operands {}".format(
2037 len(shapeList), num_operands
2038 )
2039 assert (
2040 len(dtypeList) == num_operands
2041 ), "dtypeList length {} must match number of operands {}".format(
2042 len(dtypeList), num_operands
2043 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002044
2045 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002046 qgen = op["qgen"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002047 except KeyError:
2048 qgen = None
2049
2050 # Build the random tensor operands and the test
2051 tens = []
Kevin Chengaee1fac2020-11-11 13:54:06 -08002052
Jeremy Johnsonef509a42021-09-07 13:59:47 +01002053 if (op["op"] == Op.ADD or op["op"] == Op.SUB) and dtypeList[0] == DType.INT32:
2054 # Make sure the operation does not cause value saturation - where
2055 # the number wraps due to limited number of bits to store the answer
2056 assert (
2057 pCount == 2 and cCount == 0
2058 ), "Op.ADD / Op.SUB must have 2 placeholders, 0 consts"
2059
2060 placeholders = []
2061 add = (op["op"] == Op.ADD)
2062 a_arr = self.getRandTensor(shapeList[0], dtypeList[0])
2063 b_arr = self.getRandTensor(shapeList[1], dtypeList[1])
2064 if add:
2065 res_arr = np.add(a_arr, b_arr, dtype=np.int64)
2066 else:
2067 res_arr = np.subtract(a_arr, b_arr, dtype=np.int64)
2068
2069 # Work out the saturation limits
2070 max_i32 = (1 << 31)-1
2071 min_i32 = -(1 << 31)
2072 max_arr = np.full(shapeList[1], max_i32)
2073 min_arr = np.full(shapeList[1], min_i32)
2074
2075 # Find how much values exceed the maximum/minimums
2076 sat_max_arr = np.maximum(res_arr - max_arr, 0)
2077 sat_min_arr = np.minimum(res_arr - min_arr, 0)
2078
2079 if not add:
2080 # Swap saturation values and negate values as we need to perform opposite operations
2081 sat_max_arr, sat_min_arr = -sat_min_arr, -sat_max_arr
2082
2083 # Create new array of unsaturated values by clipping values as needed
2084 b_unsat_arr = b_arr
2085 if (sat_max_arr != 0).any():
2086 # Clip values that cause saturation
2087 b_unsat_arr = np.subtract(b_unsat_arr, sat_max_arr, dtype=np.int32)
2088 # Reduce axes in unsaturated tensor to match original tensor
2089 for axis, dim in enumerate(b_arr.shape):
2090 if dim != b_unsat_arr.shape[axis]:
2091 assert ( dim == 1 ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable"
2092 b_unsat_arr = np.amin(b_unsat_arr, axis=axis, keepdims=True)
2093
2094 if (sat_min_arr != 0).any():
2095 # Clip values that cause saturation
2096 b_unsat_arr = np.subtract(b_unsat_arr, sat_min_arr, dtype=np.int32)
2097 # Reduce axes in unsaturated tensor to match original tensor
2098 for axis, dim in enumerate(b_arr.shape):
2099 if dim != b_unsat_arr.shape[axis]:
2100 assert ( dim == 1 ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable"
2101 b_unsat_arr = np.amax(b_unsat_arr, axis=axis, keepdims=True)
2102
2103 placeholders.append(
2104 self.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
2105 )
2106 placeholders.append(
2107 self.ser.addPlaceholder(shapeList[1], dtypeList[1], b_unsat_arr)
2108 )
2109
2110 tens.extend(placeholders)
2111 elif op["op"] == Op.ARITHMETIC_RIGHT_SHIFT:
2112 # Force value of operand[1] to be within [0, num_bits]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002113 assert (
2114 pCount == 2 and cCount == 0
2115 ), "Op.ArithmeticRightShift must have 2 placeholders, 0 consts"
Kevin Chengaee1fac2020-11-11 13:54:06 -08002116
2117 placeholders = []
2118 for idx, shape in enumerate(shapeList[:]):
2119 if idx == 1:
Kevin Cheng989cb052021-04-28 16:29:44 -07002120 if dtypeList[idx] == DType.INT8:
Kevin Chengaee1fac2020-11-11 13:54:06 -08002121 arr = np.int32(self.rng.integers(low=0, high=8, size=shape))
Kevin Cheng989cb052021-04-28 16:29:44 -07002122 elif dtypeList[idx] == DType.INT16:
Kevin Chengaee1fac2020-11-11 13:54:06 -08002123 arr = np.int32(self.rng.integers(low=0, high=16, size=shape))
Kevin Cheng989cb052021-04-28 16:29:44 -07002124 elif dtypeList[idx] == DType.INT32:
Kevin Chengaee1fac2020-11-11 13:54:06 -08002125 arr = np.int32(self.rng.integers(low=0, high=32, size=shape))
2126 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002127 raise Exception("OpArithmeticRightShift: invalid input dtype")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002128 else:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002129 arr = self.getRandTensor(shape, dtypeList[idx])
Kevin Cheng989cb052021-04-28 16:29:44 -07002130 placeholders.append(self.ser.addPlaceholder(shape, dtypeList[idx], arr))
Kevin Chengaee1fac2020-11-11 13:54:06 -08002131
2132 tens.extend(placeholders)
Matthew Haddona44ac5e2021-07-27 16:31:16 +01002133 elif op["op"] == Op.SELECT:
2134 # Set datatype of condition tensor to boolean
2135 dtypeList[0] = DType.BOOL
2136 tens.extend(
2137 self.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
2138 )
2139 tens.extend(self.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
Matthew Haddon459443c2021-08-23 16:43:13 +01002140 elif op["op"] == Op.INTDIV:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002141 assert (
2142 pCount == 2 and cCount == 0
Matthew Haddon459443c2021-08-23 16:43:13 +01002143 ), "Op.INTDIV must have 2 placeholders, 0 consts"
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002144
2145 placeholders = []
2146
Matthew Haddon459443c2021-08-23 16:43:13 +01002147 # Two invalid cases for Op.INTDIV:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002148 # 1. divisor == 0
Kevin Cheng47315e12021-05-13 17:41:28 -07002149 # 2. dividend == -(1<<31) and divisor == -1
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002150 while True:
2151 dividend_arr = self.getRandTensor(shapeList[0], dtypeList[0])
2152 divisor_arr = self.getRandTensor(shapeList[1], dtypeList[1])
2153
2154 if (divisor_arr == 0).any():
2155 continue
2156
Kevin Cheng47315e12021-05-13 17:41:28 -07002157 if (dividend_arr == -(2 ** 31)).any() and (divisor_arr == -1).any():
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002158 continue
2159
2160 break
2161
2162 placeholders.append(
2163 self.ser.addPlaceholder(shapeList[0], dtypeList[0], dividend_arr)
2164 )
2165 placeholders.append(
2166 self.ser.addPlaceholder(shapeList[1], dtypeList[1], divisor_arr)
2167 )
2168
2169 tens.extend(placeholders)
2170 elif op["op"] == Op.MUL:
2171 assert (
2172 pCount == 2 and cCount == 0
2173 ), "Op.MUL must have 2 placeholders, 0 consts"
2174
2175 if dtypeList[0] == DType.FLOAT:
2176 tens.extend(self.buildPlaceholderTensors(shapeList[:], dtypeList[:]))
2177 else:
2178 placeholders = []
2179
2180 # Make sure multiply result in int32 range
2181 shift = testArgs[0]
2182 if dtypeList[0] == DType.INT8:
2183 num_bits = 8
2184 elif dtypeList[0] == DType.INT16:
2185 num_bits = 16
2186 elif dtypeList[0] == DType.INT32:
2187 num_bits = 32
2188 else:
2189 raise Exception("OpMul: invalid input dtype")
2190
2191 for idx, shape in enumerate(shapeList[:]):
2192 low = -(2 ** (num_bits - 1))
2193 high = (2 ** (num_bits - 1)) - 1
2194
2195 a_arr = np.int32(
2196 self.rng.integers(low=low, high=high, size=shapeList[0])
2197 )
2198 b_arr = np.int32(
2199 self.rng.integers(low=low, high=high, size=shapeList[1])
2200 )
2201
2202 i = 0
2203 while True:
2204
2205 a_arr_64 = a_arr.astype(np.int64)
2206 b_arr_64 = b_arr.astype(np.int64)
2207
2208 if shift > 0:
2209 rounding = 1 << (shift - 1)
2210 result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift
2211 else:
2212 result_arr = a_arr_64 * b_arr_64
2213
2214 if (result_arr > -(2 ** 31)).all() and (
2215 result_arr <= ((2 ** 31) - 1)
2216 ).all():
2217 break
2218
2219 i = i + 1
2220 a_arr = a_arr // 2
2221 b_arr = b_arr // 2
2222
2223 placeholders.append(
2224 self.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
2225 )
2226 placeholders.append(
2227 self.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
2228 )
2229
2230 tens.extend(placeholders)
Matthew Haddon818ab902021-07-27 09:12:49 +01002231 elif op["op"] == Op.CONCAT:
2232 count = len(shapeList) - self.args.num_const_inputs_concat
2233 if count < 1:
2234 count = 1
2235 if self.args.num_const_inputs_concat == 0:
2236 count = len(shapeList)
2237
2238 shapeList = TosaTensorGen.tgConcatConstInput(self, shapeList, testArgs[0])
2239 tens.extend(
2240 self.buildPlaceholderTensors(shapeList[0:count], dtypeList[0:count])
2241 )
2242 tens.extend(self.buildConstTensors(shapeList[count:], dtypeList[count:]))
Kevin Chengaee1fac2020-11-11 13:54:06 -08002243 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07002244 tens.extend(
2245 self.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
2246 )
2247 tens.extend(self.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
Eric Kunzee5e26762020-10-13 16:11:07 -07002248
2249 if qgen is not None:
Les Bell30e46802021-07-23 09:43:31 +01002250 qinfo = qgen(self, op, dtype_or_dtypeList)
Eric Kunzee5e26762020-10-13 16:11:07 -07002251 else:
2252 qinfo = None
2253
2254 try:
2255 if qinfo is not None:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002256 resultName = build_fcn(self, op["op"], *tens, *testArgs, qinfo)
Eric Kunzee5e26762020-10-13 16:11:07 -07002257 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002258 resultName = build_fcn(self, op["op"], *tens, *testArgs)
Eric Kunzee5e26762020-10-13 16:11:07 -07002259 except TypeError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002260 print(
2261 "build_fcn: {}\nTensors: {}\nArgs: {}\n".format(
2262 build_fcn, tens, testArgs
2263 )
2264 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002265 raise e
2266
2267 # Save the serialized test
Kevin Cheng550ccc52021-03-03 11:21:43 -08002268 self.serialize("test")
Eric Kunzee5e26762020-10-13 16:11:07 -07002269
2270 def createDynamicOpLists(self):
2271
2272 # Dynamically create op lists for convolutions with a list of kernel sizes
Kevin Cheng1533b852021-09-01 12:51:58 -07002273 KERNELS_2D = [[1, 1], [2, 2], [3, 3], [5, 5], [3, 1], [1, 3]]
Eric Kunzee5e26762020-10-13 16:11:07 -07002274
Kevin Cheng1533b852021-09-01 12:51:58 -07002275 for k in KERNELS_2D:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002276 testName = "conv2d_{}x{}".format(k[0], k[1])
2277 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST["conv2d_TEMPLATE"].copy()
2278 self.TOSA_OP_LIST[testName]["filter"] = k
2279 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07002280
Kevin Cheng550ccc52021-03-03 11:21:43 -08002281 testName = "depthwise_conv2d_{}x{}".format(k[0], k[1])
2282 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
2283 "depthwise_conv2d_TEMPLATE"
2284 ].copy()
2285 self.TOSA_OP_LIST[testName]["filter"] = k
2286 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07002287
Kevin Cheng550ccc52021-03-03 11:21:43 -08002288 testName = "transpose_conv2d_{}x{}".format(k[0], k[1])
2289 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
2290 "transpose_conv2d_TEMPLATE"
2291 ].copy()
2292 self.TOSA_OP_LIST[testName]["filter"] = k
2293 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07002294
Kevin Cheng1533b852021-09-01 12:51:58 -07002295 KERNELS_3D = [[1, 1, 1], [2, 1, 1], [1, 2, 1], [1, 1, 2]]
2296 for k in KERNELS_3D:
2297 testName = "conv3d_{}x{}x{}".format(k[0], k[1], k[2])
2298 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST["conv3d_TEMPLATE"].copy()
2299 self.TOSA_OP_LIST[testName]["filter"] = k
2300 self.TOSA_OP_LIST[testName]["template"] = False
2301
Eric Kunzee5e26762020-10-13 16:11:07 -07002302 # Delete any templates after having created any dynamic ops
2303 # This is a two-pass operation because it's bad practice to delete
2304 # keys from dictionaries while iterating
2305 keyList = []
2306 for k in self.TOSA_OP_LIST:
2307 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002308 if self.TOSA_OP_LIST[k]["template"] == True:
Eric Kunzee5e26762020-10-13 16:11:07 -07002309 keyList.append(k)
2310 continue
2311 except KeyError:
2312 pass
2313
2314 for k in keyList:
2315 del self.TOSA_OP_LIST[k]
2316
2317 def initOpListDefaults(self):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002318 """Fill in default fields for ops if they aren't already specified.
2319 Look for missing required fields (datastructure linting)."""
Eric Kunzee5e26762020-10-13 16:11:07 -07002320 for op in self.TOSA_OP_LIST:
2321
2322 # Required fields
2323 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002324 pl, c = self.TOSA_OP_LIST[op]["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002325 except (KeyError, ValueError, TypeError):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002326 raise Exception(
2327 "Op {} is missing a valid operand tuple in TOSA_OP_LIST".format(op)
2328 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002329
2330 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002331 fcn, tgen, arggen = self.TOSA_OP_LIST[op]["build_fcn"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002332 except (KeyError, ValueError, TypeError):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002333 raise Exception(
2334 "Op {} is missing a valid build_fcn tuple in TOSA_OP_LIST".format(
2335 op
2336 )
2337 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002338
2339 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002340 types = self.TOSA_OP_LIST[op]["types"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002341 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002342 raise Exception(
2343 "Op {} is missing a valid type list in TOSA_OP_LIST".format(op)
2344 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002345
2346 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002347 opcode = self.TOSA_OP_LIST[op]["op"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002348 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002349 raise Exception(
2350 "Op {} is missing the Op field in TOSA_OP_LIST".format(op)
2351 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002352
2353 # Put in default rank range, if missing
2354 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002355 rank = self.TOSA_OP_LIST[op]["rank"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002356 except KeyError:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002357 self.TOSA_OP_LIST[op]["rank"] = self.DEFAULT_RANK_RANGE
Eric Kunzee5e26762020-10-13 16:11:07 -07002358
2359 # Tensor operator list
2360 # 'op': op name
2361 # 'operands': tuple of (placeholder, const) operands
Kevin Cheng3a478572021-01-22 17:21:02 -08002362 # 'rank': optional, restricts rank to tuple inclusive of (min, max),
2363 # if not specified, defaults to (1, 4)
Eric Kunzee5e26762020-10-13 16:11:07 -07002364 # 'build_fcn': tuple of the function to (build_operator(), TensorGen function, ArgGen enum)
2365 # 'types': array of datatypes to be tested
Kevin Cheng550ccc52021-03-03 11:21:43 -08002366 TYPE_FP = [DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -07002367
Kevin Cheng550ccc52021-03-03 11:21:43 -08002368 TYPE_INT = [DType.INT8, DType.INT16, DType.INT32] # Excludes INT4
2369 TYPE_INT_FP = [DType.INT8, DType.INT16, DType.INT32, DType.FLOAT] # Excludes INT4
Eric Kunzee5e26762020-10-13 16:11:07 -07002370
Kevin Cheng550ccc52021-03-03 11:21:43 -08002371 TYPE_BOOL = [DType.BOOL]
2372 TYPE_FI32 = [DType.FLOAT, DType.INT32]
2373 TYPE_FIB = [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL]
2374 TYPE_FI16 = [DType.FLOAT, DType.INT16]
Eric Kunzee5e26762020-10-13 16:11:07 -07002375
Kevin Cheng550ccc52021-03-03 11:21:43 -08002376 TYPE_NARROW_INT_FP = [DType.INT8, DType.INT16, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -07002377
Kevin Cheng1533b852021-09-01 12:51:58 -07002378 TYPE_CONV = [
Kevin Chenga9017402021-07-28 17:19:23 -07002379 [DType.INT8, DType.INT4, DType.INT32],
Kevin Cheng989cb052021-04-28 16:29:44 -07002380 [DType.INT8, DType.INT8, DType.INT32],
2381 [DType.INT16, DType.INT8, DType.INT48],
2382 DType.FLOAT,
2383 ]
2384
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01002385 DEFAULT_RANK_RANGE = (1, TOSA_TENSOR_MAX_RANK)
Eric Kunzee5e26762020-10-13 16:11:07 -07002386
2387 TOSA_OP_LIST = {
Jared Smolens573ecd42021-03-04 15:24:10 -08002388 # Tensor operators
Kevin Cheng550ccc52021-03-03 11:21:43 -08002389 "argmax": {
2390 "op": Op.ARGMAX,
2391 "operands": (1, 0),
2392 "build_fcn": (build_argmax, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2393 "types": TYPE_NARROW_INT_FP,
2394 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002395 "avg_pool2d": {
2396 "op": Op.AVG_POOL2D,
2397 "operands": (1, 0),
2398 "rank": (4, 4),
2399 "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
2400 "qgen": TosaQuantGen.qgUnary,
2401 "types": TYPE_NARROW_INT_FP,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002402 "invalid_test_validators": (TosaInvalidValidator.ivHeightWidthSmallerZero,)
Jared Smolens573ecd42021-03-04 15:24:10 -08002403 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002404 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002405 "conv2d_TEMPLATE": {
2406 "op": Op.CONV2D,
2407 "operands": (1, 2),
2408 "rank": (4, 4),
2409 "build_fcn": (build_conv2d, TosaTensorGen.tgConv2D, TosaArgGen.agConv2D),
2410 "qgen": TosaQuantGen.qgConv,
Kevin Cheng1533b852021-09-01 12:51:58 -07002411 "types": TYPE_CONV,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002412 "invalid_test_validators": (TosaInvalidValidator.ivHeightWidthSmallerZero,),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002413 "template": True,
2414 },
Kevin Cheng1533b852021-09-01 12:51:58 -07002415 # Templated operator. Filled in by createDynamicOpLists
2416 "conv3d_TEMPLATE": {
2417 "op": Op.CONV3D,
2418 "operands": (1, 2),
2419 "rank": (5, 5),
2420 "build_fcn": (build_conv3d, TosaTensorGen.tgConv3D, TosaArgGen.agConv3D),
2421 "qgen": TosaQuantGen.qgConv,
2422 "types": TYPE_CONV,
2423 "template": True,
2424 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002425 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002426 "depthwise_conv2d_TEMPLATE": {
2427 "op": Op.DEPTHWISE_CONV2D,
2428 "operands": (1, 2),
2429 "filter": [1, 1],
2430 "rank": (4, 4),
2431 "build_fcn": (
2432 build_depthwise_conv2d,
2433 TosaTensorGen.tgDepthwiseConv2D,
2434 TosaArgGen.agConv2D,
2435 ),
2436 "qgen": TosaQuantGen.qgConv,
Kevin Cheng1533b852021-09-01 12:51:58 -07002437 "types": TYPE_CONV,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002438 "invalid_test_validators": (TosaInvalidValidator.ivHeightWidthSmallerZero,),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002439 "template": True,
2440 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002441 "fully_connected": {
2442 "op": Op.FULLY_CONNECTED,
2443 "operands": (1, 2),
2444 "rank": (2, 2),
2445 "build_fcn": (build_fully_connected, TosaTensorGen.tgFullyConnected, None),
2446 "qgen": TosaQuantGen.qgConv,
Kevin Cheng1533b852021-09-01 12:51:58 -07002447 "types": TYPE_CONV,
Jared Smolens573ecd42021-03-04 15:24:10 -08002448 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002449 "matmul": {
2450 "op": Op.MATMUL,
2451 "operands": (2, 0),
Kevin Cheng2d60f002021-06-09 14:18:32 -07002452 "rank": (3, 3),
Jared Smolens573ecd42021-03-04 15:24:10 -08002453 "build_fcn": (build_matmul, TosaTensorGen.tgMatmul, None),
2454 "qgen": TosaQuantGen.qgMatmul,
2455 "types": TYPE_NARROW_INT_FP,
2456 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002457 "max_pool2d": {
2458 "op": Op.MAX_POOL2D,
2459 "operands": (1, 0),
2460 "rank": (4, 4),
2461 "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
2462 "types": TYPE_NARROW_INT_FP,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002463 "invalid_test_validators": (TosaInvalidValidator.ivHeightWidthSmallerZero,)
Jared Smolens573ecd42021-03-04 15:24:10 -08002464 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002465 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002466 "transpose_conv2d_TEMPLATE": {
2467 "op": Op.TRANSPOSE_CONV2D,
Kevin Cheng989cb052021-04-28 16:29:44 -07002468 "operands": (1, 2),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002469 "rank": (4, 4),
2470 "build_fcn": (
2471 build_transpose_conv2d,
2472 TosaTensorGen.tgTransposeConv2D,
2473 TosaArgGen.agTransposeConv2D,
2474 ),
2475 "qgen": TosaQuantGen.qgConv,
Kevin Cheng1533b852021-09-01 12:51:58 -07002476 "types": TYPE_CONV,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002477 "invalid_test_validators": (TosaInvalidValidator.ivNonPositiveOutputShape,),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002478 "template": True,
2479 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002480 # Activation functions
Kevin Cheng550ccc52021-03-03 11:21:43 -08002481 "clamp": {
2482 "op": Op.CLAMP,
2483 "operands": (1, 0),
2484 "build_fcn": (build_clamp, TosaTensorGen.tgBasic, None),
2485 "types": TYPE_NARROW_INT_FP,
2486 },
2487 "relun": {
2488 "op": Op.RELUN,
2489 "operands": (1, 0),
2490 "build_fcn": (build_relun, TosaTensorGen.tgBasic, None),
2491 "types": TYPE_FI32,
2492 },
2493 "sigmoid": {
2494 "op": Op.SIGMOID,
2495 "operands": (1, 0),
2496 "build_fcn": (build_sigmoid, TosaTensorGen.tgBasic, None),
2497 "types": TYPE_FP,
2498 },
2499 "tanh": {
2500 "op": Op.TANH,
2501 "operands": (1, 0),
2502 "build_fcn": (build_tanh, TosaTensorGen.tgBasic, None),
2503 "types": TYPE_FP,
2504 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002505 # Elementwise Binary Operators
2506 "add": {
2507 "op": Op.ADD,
2508 "operands": (2, 0),
2509 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2510 "types": TYPE_FI32,
2511 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002512 "arithmetic_right_shift": {
2513 "op": Op.ARITHMETIC_RIGHT_SHIFT,
2514 "operands": (2, 0),
2515 "build_fcn": (
2516 build_arithmetic_right_shift,
2517 TosaTensorGen.tgBroadcastFuzz,
2518 TosaArgGen.agArithmeticRightShift,
2519 ),
2520 "types": TYPE_INT,
2521 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002522 "bitwise_and": {
2523 "op": Op.BITWISE_AND,
2524 "operands": (2, 0),
2525 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2526 "types": TYPE_INT,
2527 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002528 "bitwise_or": {
2529 "op": Op.BITWISE_OR,
2530 "operands": (2, 0),
2531 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2532 "types": TYPE_INT,
2533 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002534 "bitwise_xor": {
2535 "op": Op.BITWISE_XOR,
2536 "operands": (2, 0),
2537 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2538 "types": TYPE_INT,
2539 },
Matthew Haddon459443c2021-08-23 16:43:13 +01002540 "intdiv": {
2541 "op": Op.INTDIV,
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002542 "operands": (2, 0),
2543 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2544 "types": [DType.INT32],
2545 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002546 "logical_and": {
2547 "op": Op.LOGICAL_AND,
2548 "operands": (2, 0),
2549 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2550 "types": TYPE_BOOL,
2551 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002552 "logical_left_shift": {
2553 "op": Op.LOGICAL_LEFT_SHIFT,
2554 "operands": (2, 0),
2555 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2556 "types": TYPE_INT,
2557 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002558 "logical_right_shift": {
2559 "op": Op.LOGICAL_RIGHT_SHIFT,
2560 "operands": (2, 0),
2561 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2562 "types": TYPE_INT,
2563 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002564 "logical_or": {
2565 "op": Op.LOGICAL_OR,
2566 "operands": (2, 0),
2567 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2568 "types": TYPE_BOOL,
2569 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002570 "logical_xor": {
2571 "op": Op.LOGICAL_XOR,
2572 "operands": (2, 0),
2573 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2574 "types": TYPE_BOOL,
2575 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002576 "maximum": {
2577 "op": Op.MAXIMUM,
2578 "operands": (2, 0),
2579 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2580 "types": TYPE_FI32,
2581 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002582 "minimum": {
2583 "op": Op.MINIMUM,
2584 "operands": (2, 0),
2585 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2586 "types": TYPE_FI32,
2587 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002588 "mul": {
2589 "op": Op.MUL,
2590 "operands": (2, 0),
2591 "build_fcn": (build_mul, TosaTensorGen.tgBroadcastFuzz, TosaArgGen.agMul),
2592 "types": TYPE_INT_FP,
2593 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002594 "pow": {
2595 "op": Op.POW,
2596 "operands": (2, 0),
2597 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBasic, None),
2598 "types": TYPE_FP,
2599 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002600 "sub": {
2601 "op": Op.SUB,
2602 "operands": (2, 0),
2603 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2604 "types": TYPE_FI32,
2605 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002606 "table": {
2607 "op": Op.TABLE,
2608 # Use the automatic generation functions to create the input array
2609 # but create the table tensor in the build function, as it may be
2610 # a different type from the input
2611 "operands": (1, 0),
2612 "build_fcn": (build_table, TosaTensorGen.tgBasic, None),
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01002613 "types": [DType.INT8, DType.INT16],
Jared Smolens573ecd42021-03-04 15:24:10 -08002614 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002615 # Elementwise Unary operators
2616 "abs": {
2617 "op": Op.ABS,
2618 "operands": (1, 0),
2619 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2620 "types": TYPE_FI32,
2621 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002622 "bitwise_not": {
2623 "op": Op.BITWISE_NOT,
2624 "operands": (1, 0),
2625 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2626 "types": TYPE_INT,
2627 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002628 "ceil": {
2629 "op": Op.CEIL,
2630 "operands": (1, 0),
2631 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2632 "types": TYPE_FP,
2633 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002634 "clz": {
2635 "op": Op.CLZ,
2636 "operands": (1, 0),
2637 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2638 "types": [DType.INT32],
2639 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002640 "exp": {
2641 "op": Op.EXP,
2642 "operands": (1, 0),
2643 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2644 "types": TYPE_FP,
2645 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002646 "floor": {
2647 "op": Op.FLOOR,
2648 "operands": (1, 0),
2649 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2650 "types": TYPE_FP,
2651 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002652 "log": {
2653 "op": Op.LOG,
2654 "operands": (1, 0),
2655 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2656 "types": TYPE_FP,
2657 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002658 "logical_not": {
2659 "op": Op.LOGICAL_NOT,
2660 "operands": (1, 0),
2661 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2662 "types": TYPE_BOOL,
2663 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002664 "negate": {
2665 "op": Op.NEGATE,
2666 "operands": (1, 0),
2667 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2668 "qgen": TosaQuantGen.qgUnary,
2669 "types": TYPE_INT_FP,
2670 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002671 "reciprocal": {
2672 "op": Op.RECIPROCAL,
2673 "operands": (1, 0),
2674 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2675 "types": TYPE_FP,
2676 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002677 "rsqrt": {
2678 "op": Op.RSQRT,
2679 "operands": (1, 0),
2680 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2681 "types": TYPE_FP,
2682 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002683 # Elementwise Ternary operators
2684 "select": {
2685 "op": Op.SELECT,
2686 "operands": (3, 0),
2687 "build_fcn": (build_select, TosaTensorGen.tgBroadcastFuzz, None),
2688 "types": TYPE_FIB,
2689 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002690 # Comparison operators
2691 "equal": {
2692 "op": Op.EQUAL,
2693 "operands": (2, 0),
2694 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2695 "types": TYPE_FI32,
2696 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002697 "greater_equal": {
2698 "op": Op.GREATER_EQUAL,
2699 "operands": (2, 0),
2700 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2701 "types": TYPE_FI32,
2702 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002703 "greater": {
2704 "op": Op.GREATER,
2705 "operands": (2, 0),
2706 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2707 "types": TYPE_FI32,
2708 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002709 # Reduction operators
2710 "reduce_all": {
2711 "op": Op.REDUCE_ALL,
2712 "operands": (1, 0),
2713 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2714 "types": TYPE_BOOL,
2715 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002716 "reduce_any": {
2717 "op": Op.REDUCE_ANY,
2718 "operands": (1, 0),
2719 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2720 "types": TYPE_BOOL,
2721 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002722 "reduce_max": {
2723 "op": Op.REDUCE_MAX,
2724 "operands": (1, 0),
2725 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2726 "types": TYPE_INT_FP,
2727 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002728 "reduce_min": {
2729 "op": Op.REDUCE_MAX,
2730 "operands": (1, 0),
2731 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2732 "types": TYPE_INT_FP,
2733 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002734 "reduce_product": {
2735 "op": Op.REDUCE_PRODUCT,
2736 "operands": (1, 0),
2737 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2738 "types": TYPE_FP,
2739 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002740 "reduce_sum": {
2741 "op": Op.REDUCE_SUM,
2742 "operands": (1, 0),
2743 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2744 "types": TYPE_FI32,
2745 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002746 # Data layout operators
Kevin Cheng550ccc52021-03-03 11:21:43 -08002747 "concat": {
2748 "op": Op.CONCAT,
2749 "operands": (2, 0),
Matthew Haddon818ab902021-07-27 09:12:49 +01002750 "build_fcn": (build_concat, TosaTensorGen.tgConcat, TosaArgGen.agAxis),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002751 "types": TYPE_FIB,
2752 },
2753 "pad": {
2754 "op": Op.PAD,
2755 "operands": (1, 0),
2756 "build_fcn": (build_pad, TosaTensorGen.tgBasic, TosaArgGen.agPad),
2757 "qgen": TosaQuantGen.qgPad,
2758 "types": TYPE_FIB,
2759 },
2760 "reshape": {
2761 "op": Op.RESHAPE,
2762 "operands": (1, 0),
2763 "build_fcn": (build_reshape, TosaTensorGen.tgBasic, TosaArgGen.agReshape),
2764 "types": TYPE_FIB,
2765 },
2766 "reverse": {
2767 "op": Op.REVERSE,
2768 "operands": (1, 0),
2769 "build_fcn": (build_reverse, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2770 "types": TYPE_FIB,
2771 },
2772 "slice": {
2773 "op": Op.SLICE,
2774 "operands": (1, 0),
2775 "build_fcn": (build_slice, TosaTensorGen.tgBasic, TosaArgGen.agSlice),
2776 "types": TYPE_FIB,
2777 },
2778 "tile": {
2779 "op": Op.TILE,
2780 "operands": (1, 0),
2781 "build_fcn": (build_tile, TosaTensorGen.tgBasic, TosaArgGen.agTile),
2782 "types": TYPE_FIB,
2783 },
2784 "transpose": {
2785 "op": Op.TRANSPOSE,
2786 "operands": (1, 0),
Jeremy Johnsona6185572021-06-21 15:55:35 +01002787 "rank": (1, 4),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002788 "build_fcn": (
2789 build_transpose,
2790 TosaTensorGen.tgBasic,
2791 TosaArgGen.agTranspose,
2792 ),
2793 "types": TYPE_FIB,
2794 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002795 # Data nodes
2796 "const": {
2797 "op": Op.CONST,
2798 "operands": (1, 0),
2799 "build_fcn": (build_placeholder, TosaTensorGen.tgBasic, None),
2800 "types": TYPE_FIB,
2801 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002802 "identity": {
2803 "op": Op.IDENTITY,
2804 "operands": (1, 0),
2805 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2806 "types": TYPE_FIB,
2807 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002808 # Scatter/Gather
Kevin Cheng550ccc52021-03-03 11:21:43 -08002809 "gather": {
2810 "op": Op.GATHER,
2811 # Only specify 'values' tensor here. 'indices' is generated in op building stage
2812 "operands": (1, 0),
2813 "rank": (3, 3),
2814 "build_fcn": (build_gather, TosaTensorGen.tgBasic, None),
2815 "types": TYPE_INT_FP,
2816 },
2817 "scatter": {
2818 "op": Op.SCATTER,
2819 # Only specify 'values_in' tensor here.
2820 #'indices' and 'input' are generated in op building stage
2821 "operands": (2, 0),
2822 "rank": (3, 3),
2823 "build_fcn": (build_scatter, TosaTensorGen.tgScatter, None),
2824 "types": TYPE_INT_FP,
2825 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002826 # Image operations
Kevin Cheng550ccc52021-03-03 11:21:43 -08002827 "resize": {
2828 "op": Op.RESIZE,
2829 "operands": (1, 0),
2830 "rank": (4, 4),
2831 "build_fcn": (build_resize, TosaTensorGen.tgNHWC, TosaArgGen.agResize),
2832 "types": [DType.INT8, DType.INT16, DType.FLOAT],
Matthew Haddonb724efc2021-08-25 16:40:29 +01002833 "invalid_test_validators": (TosaInvalidValidator.ivWrongDataTypeOrModeResize, TosaInvalidValidator.ivBadStride)
Kevin Cheng550ccc52021-03-03 11:21:43 -08002834 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002835 # Type conversion
Kevin Cheng550ccc52021-03-03 11:21:43 -08002836 "cast": {
2837 "op": Op.CAST,
2838 "operands": (1, 0),
2839 "build_fcn": (build_cast, TosaTensorGen.tgBasic, TosaArgGen.agCast),
2840 "types": [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL],
2841 },
2842 "rescale": {
2843 "op": Op.RESCALE,
2844 "operands": (1, 0),
2845 "build_fcn": (build_rescale, TosaTensorGen.tgBasic, TosaArgGen.agRescale),
Matthew Haddoncac4ee92021-07-22 14:30:53 +01002846 "types": [DType.UINT8, DType.INT8, DType.INT16, DType.INT32, DType.INT48],
Kevin Cheng550ccc52021-03-03 11:21:43 -08002847 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002848 # Custom
2849 # Not implemented.
Jared Smolens573ecd42021-03-04 15:24:10 -08002850 # Control flow operators
Eric Kunzee5e26762020-10-13 16:11:07 -07002851 # Two varients of cond_if, one that generates one of two constant tensors (no
2852 # inputs to the basic blocks, one output) and another that either adds or subtracts two tensors
2853 # (two inputs to the basic blocks, one output)
Kevin Cheng550ccc52021-03-03 11:21:43 -08002854 "cond_if_const": {
2855 "op": Op.COND_IF,
2856 "operands": (0, 2),
2857 "build_fcn": (
2858 build_cond_if_const,
2859 TosaTensorGen.tgBasic,
2860 TosaArgGen.agCondIf,
2861 ),
2862 "types": [DType.BOOL],
2863 },
2864 "cond_if_binary": {
2865 "op": Op.COND_IF,
2866 "operands": (2, 0),
2867 "build_fcn": (
2868 build_cond_if_binary,
2869 TosaTensorGen.tgBasic,
2870 TosaArgGen.agCondIf,
2871 ),
2872 "types": TYPE_FI32,
2873 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002874 # while_loop
Kevin Cheng550ccc52021-03-03 11:21:43 -08002875 "while_loop": {
2876 "op": Op.WHILE_LOOP,
2877 "operands": (0, 1),
2878 "build_fcn": (
2879 build_while_loop,
2880 TosaTensorGen.tgBasic,
2881 TosaArgGen.agWhileLoop,
2882 ),
2883 "types": [DType.INT32],
2884 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002885 }
2886
Kevin Cheng550ccc52021-03-03 11:21:43 -08002887
Eric Kunzee5e26762020-10-13 16:11:07 -07002888class OutputShaper:
2889 # Methods in this class compute the expected output shape and datatype
2890 # for common classes of operations
2891 def __init__(self):
2892 pass
2893
2894 # These methods return arguments that can be used for
2895 # creating a new output tensor
2896 @staticmethod
2897 def binaryBroadcastOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002898 assert len(a.shape) == len(b.shape)
2899 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002900
2901 shape = []
2902 for i in range(len(a.shape)):
2903 if a.shape[i] == 1:
2904 shape.append(b.shape[i])
2905 else:
2906 shape.append(a.shape[i])
2907
Kevin Cheng550ccc52021-03-03 11:21:43 -08002908 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002909
2910 @staticmethod
2911 def binaryNonBroadcastOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002912 assert len(a.shape) == len(b.shape)
2913 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002914
2915 shape = []
2916 for i in range(len(a.shape)):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002917 assert a.shape[i] == b.shape[i]
Eric Kunzee5e26762020-10-13 16:11:07 -07002918 shape.append(a.shape[i])
2919
Kevin Cheng550ccc52021-03-03 11:21:43 -08002920 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002921
2922 @staticmethod
2923 def unaryOp(ser, a):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002924 return ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002925
2926 @staticmethod
2927 def selectOp(ser, cond, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002928 assert len(a.shape) == len(b.shape) and len(a.shape) == len(cond.shape)
2929 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002930
2931 shape = []
2932 for i in range(len(a.shape)):
2933 shape.append(max(cond.shape[i], a.shape[i], b.shape[i]))
2934
Kevin Cheng550ccc52021-03-03 11:21:43 -08002935 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002936
2937 @staticmethod
2938 def binaryComparisonOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002939 assert len(a.shape) == len(b.shape)
2940 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002941
2942 # Do broadcast
2943 shape = []
2944 for i in range(len(a.shape)):
2945 if a.shape[i] == 1:
2946 shape.append(b.shape[i])
2947 else:
2948 shape.append(a.shape[i])
2949
2950 # Force the output type to bool
Kevin Cheng550ccc52021-03-03 11:21:43 -08002951 return ser.addOutput(shape, DType.BOOL)
Eric Kunzee5e26762020-10-13 16:11:07 -07002952
2953 @staticmethod
2954 def reduceOp(ser, a, axis):
2955
2956 shape = a.shape.copy()
2957
2958 shape[axis] = 1
2959
Kevin Cheng550ccc52021-03-03 11:21:43 -08002960 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002961
2962 @staticmethod
2963 def argmaxOp(ser, a, axis):
2964 shape = a.shape.copy()
2965 del shape[axis]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002966 return ser.addOutput(shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07002967
2968 @staticmethod
2969 def conv2dOp(ser, ifm, filter, strides, padding, dilations):
2970
2971 # IFM: NHWC
2972 # Filter: OHWI
2973 # OFM: NHWC
2974
2975 if len(padding) == 2:
2976 # Expand padding to 4 parameters in the case of transpose_conv2d
2977 # From H,W to T,B,L,R
2978 padding = [padding[0], padding[0], padding[1], padding[1]]
2979
Kevin Cheng550ccc52021-03-03 11:21:43 -08002980 h = (
2981 ifm.shape[1]
2982 - filter.shape[1]
2983 - (filter.shape[1] - 1) * (dilations[0] - 1)
2984 + padding[0]
2985 + padding[1]
2986 ) // strides[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002987
Kevin Cheng550ccc52021-03-03 11:21:43 -08002988 w = (
2989 ifm.shape[2]
2990 - filter.shape[2]
2991 - (filter.shape[2] - 1) * (dilations[1] - 1)
2992 + padding[2]
2993 + padding[3]
2994 ) // strides[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002995
Eric Kunzee5e26762020-10-13 16:11:07 -07002996 ofm_shape = [ifm.shape[0], h, w, filter.shape[0]]
2997
Kevin Cheng3a478572021-01-22 17:21:02 -08002998 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002999 out_dtype = DType.INT32
3000 elif ifm.dtype == DType.INT16:
3001 out_dtype = DType.INT48
3002 elif ifm.dtype == DType.FLOAT:
3003 out_dtype = DType.FLOAT
3004 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08003005 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07003006
Kevin Cheng550ccc52021-03-03 11:21:43 -08003007 return ser.addOutput(ofm_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003008
3009 @staticmethod
Kevin Cheng1533b852021-09-01 12:51:58 -07003010 def conv3dOp(ser, ifm, filter, strides, padding, dilations):
3011
3012 # IFM: NDHWC
3013 # Filter: ODHWI
3014 # OFM: NDHWC
3015
3016 d = (
3017 ifm.shape[1]
3018 - filter.shape[1]
3019 - (filter.shape[1] - 1) * (dilations[0] - 1)
3020 + padding[0]
3021 + padding[1]
3022 ) // strides[0] + 1
3023
3024 h = (
3025 ifm.shape[2]
3026 - filter.shape[2]
3027 - (filter.shape[2] - 1) * (dilations[1] - 1)
3028 + padding[2]
3029 + padding[3]
3030 ) // strides[1] + 1
3031
3032 w = (
3033 ifm.shape[3]
3034 - filter.shape[3]
3035 - (filter.shape[3] - 1) * (dilations[2] - 1)
3036 + padding[4]
3037 + padding[5]
3038 ) // strides[2] + 1
3039
3040 ofm_shape = [ifm.shape[0], d, h, w, filter.shape[0]]
3041
3042 if ifm.dtype == DType.INT8:
3043 out_dtype = DType.INT32
3044 elif ifm.dtype == DType.INT16:
3045 out_dtype = DType.INT48
3046 elif ifm.dtype == DType.FLOAT:
3047 out_dtype = DType.FLOAT
3048 else:
3049 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
3050
3051 return ser.addOutput(ofm_shape, out_dtype)
3052
3053 @staticmethod
Eric Kunzee5e26762020-10-13 16:11:07 -07003054 def depthwiseConv2dOp(ser, ifm, filter, strides, padding, dilations):
3055 # IFM: NHWC
3056 # Filter: HWCM
3057 # OFM: NHW C*M
Kevin Cheng550ccc52021-03-03 11:21:43 -08003058 h = (
3059 ifm.shape[1]
3060 - filter.shape[0]
3061 - (filter.shape[0] - 1) * (dilations[0] - 1)
3062 + padding[0]
3063 + padding[1]
3064 ) // strides[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07003065
Kevin Cheng550ccc52021-03-03 11:21:43 -08003066 w = (
3067 ifm.shape[2]
3068 - filter.shape[1]
3069 - (filter.shape[1] - 1) * (dilations[1] - 1)
3070 + padding[2]
3071 + padding[3]
3072 ) // strides[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07003073
Eric Kunzee5e26762020-10-13 16:11:07 -07003074 ofm_shape = [ifm.shape[0], h, w, filter.shape[2] * filter.shape[3]]
3075
Kevin Cheng3a478572021-01-22 17:21:02 -08003076 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07003077 out_dtype = DType.INT32
3078 elif ifm.dtype == DType.INT16:
3079 out_dtype = DType.INT48
3080 elif ifm.dtype == DType.FLOAT:
3081 out_dtype = DType.FLOAT
3082 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08003083 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07003084
Kevin Cheng550ccc52021-03-03 11:21:43 -08003085 return ser.addOutput(ofm_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003086
3087 @staticmethod
3088 def pool2dOp(ser, ifm, kernel, stride, pad):
3089 # input: NHWC
3090 h = (ifm.shape[1] + pad[0] + pad[1] + stride[0] - kernel[0]) // stride[0]
3091 w = (ifm.shape[2] + pad[2] + pad[3] + stride[1] - kernel[1]) // stride[1]
3092
Eric Kunzee5e26762020-10-13 16:11:07 -07003093 ofm_shape = [ifm.shape[0], h, w, ifm.shape[3]]
Kevin Cheng550ccc52021-03-03 11:21:43 -08003094 return ser.addOutput(ofm_shape, ifm.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003095
3096 @staticmethod
3097 def fullyConnectedOp(ser, input, filter):
3098 # input: N, IC
3099 # filter: OC, IC
3100 # output: N, OC
3101
3102 output_shape = [input.shape[0], filter.shape[0]]
3103
Kevin Cheng3a478572021-01-22 17:21:02 -08003104 if input.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07003105 out_dtype = DType.INT32
3106 elif input.dtype == DType.INT16:
3107 out_dtype = DType.INT48
3108 elif input.dtype == DType.FLOAT:
3109 out_dtype = DType.FLOAT
3110 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08003111 raise Exception("Unsupported input dtype: {}".format(input.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07003112
Kevin Cheng550ccc52021-03-03 11:21:43 -08003113 return ser.addOutput(output_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003114
3115 @staticmethod
3116 def matmulOp(ser, a, b):
Kevin Cheng2d60f002021-06-09 14:18:32 -07003117 # a: N, H, C
3118 # b: N, C, W
3119 # out: N, H, W
Eric Kunzee5e26762020-10-13 16:11:07 -07003120
Kevin Cheng2d60f002021-06-09 14:18:32 -07003121 output_shape = [a.shape[0], a.shape[1], b.shape[2]]
Eric Kunzee5e26762020-10-13 16:11:07 -07003122
Kevin Cheng3a478572021-01-22 17:21:02 -08003123 if a.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07003124 out_dtype = DType.INT32
3125 elif a.dtype == DType.INT16:
3126 out_dtype = DType.INT48
3127 elif a.dtype == DType.FLOAT:
3128 out_dtype = DType.FLOAT
3129 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08003130 raise Exception("UNsupported input dtype for matmul: {}".format(a.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07003131
Kevin Cheng550ccc52021-03-03 11:21:43 -08003132 return ser.addOutput(output_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003133
3134 @staticmethod
Matthew Haddon818ab902021-07-27 09:12:49 +01003135 def concatOp(ser, axis, *a):
3136 input1 = a[0]
3137 remaining_inputs = a[1:]
Eric Kunzee5e26762020-10-13 16:11:07 -07003138
Matthew Haddon818ab902021-07-27 09:12:49 +01003139 output_shape = input1.shape.copy()
Eric Kunzee5e26762020-10-13 16:11:07 -07003140
Matthew Haddon818ab902021-07-27 09:12:49 +01003141 output_shape[axis] = input1.shape[axis]
3142
3143 for tensor in remaining_inputs:
3144 output_shape[axis] += tensor.shape[axis]
3145
3146 return ser.addOutput(output_shape, input1.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003147
3148 @staticmethod
3149 def padOp(ser, a, padding):
3150
3151 output_shape = a.shape.copy()
3152
3153 for i in range(len(output_shape)):
3154 output_shape[i] = padding[i][0] + padding[i][1] + output_shape[i]
3155
Kevin Cheng550ccc52021-03-03 11:21:43 -08003156 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003157
3158 @staticmethod
3159 def reshapeOp(ser, a, shape):
3160 output_shape = shape.copy()
3161
3162 totalElements = 1
3163 for i in a.shape:
3164 totalElements *= i
3165
3166 # If there are any -1 elements, figure out what that dimension must be
3167 totalOutputElements = 1
3168 for i in output_shape:
3169 if i != -1:
3170 totalOutputElements *= i
3171
3172 # And fill it in
3173 for i in range(len(output_shape)):
3174 if output_shape[i] == -1:
3175 output_shape[i] = totalElements // totalOutputElements
3176
Kevin Cheng550ccc52021-03-03 11:21:43 -08003177 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003178
3179 @staticmethod
3180 def sliceOp(ser, a, begin, size):
3181
3182 output_shape = size.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08003183 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003184
3185 @staticmethod
3186 def tileOp(ser, a, multiples):
3187
3188 output_shape = a.shape.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08003189 assert len(multiples) == len(output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07003190
3191 for i in range(len(output_shape)):
3192 output_shape[i] = a.shape[i] * multiples[i]
3193
Kevin Cheng550ccc52021-03-03 11:21:43 -08003194 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003195
3196 @staticmethod
3197 def transposeOp(ser, a, perms):
3198 output_shape = a.shape.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08003199 assert len(perms) == len(output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07003200
3201 for i in range(len(output_shape)):
3202 output_shape[i] = a.shape[perms[i]]
3203
Kevin Cheng550ccc52021-03-03 11:21:43 -08003204 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003205
3206 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -08003207 def gatherOp(ser, values, indices):
3208 assert len(values.shape) == 3
3209 assert len(indices.shape) == 2
3210 assert values.shape[0] == indices.shape[0]
Eric Kunzee5e26762020-10-13 16:11:07 -07003211
Kevin Cheng77d0f762020-11-24 10:26:32 -08003212 output_shape = [values.shape[0], indices.shape[1], values.shape[2]]
3213
Kevin Cheng550ccc52021-03-03 11:21:43 -08003214 return ser.addOutput(output_shape, values.dtype)
Kevin Cheng77d0f762020-11-24 10:26:32 -08003215
3216 @staticmethod
3217 def scatterOp(ser, values_in, indices, input):
3218 assert len(values_in.shape) == 3
3219 assert len(indices.shape) == 2
3220 assert len(input.shape) == 3
Kevin Cheng550ccc52021-03-03 11:21:43 -08003221 assert values_in.shape[0] == indices.shape[0] # N
3222 assert input.shape[1] == indices.shape[1] # W
3223 assert values_in.shape[2] == input.shape[2] # C
Kevin Cheng77d0f762020-11-24 10:26:32 -08003224
3225 output_shape = values_in.shape
3226
Kevin Cheng550ccc52021-03-03 11:21:43 -08003227 return ser.addOutput(output_shape, values_in.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003228
3229 @staticmethod
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01003230 def tableOp(ser, input, table_dtype):
3231 # Same shape as the input, but dtype dependent on table dtype
3232 assert table_dtype == DType.INT16 or table_dtype == DType.INT8
3233 output_dtype = DType.INT32 if table_dtype == DType.INT16 else DType.INT8
3234 return ser.addOutput(input.shape, output_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003235
3236 @staticmethod
Kevin Cheng550ccc52021-03-03 11:21:43 -08003237 def resizeOp(
3238 ser,
3239 input,
3240 mode,
3241 stride,
3242 offset,
3243 shift,
3244 stride_fp,
3245 offset_fp,
3246 output_dims,
3247 input_dtype,
3248 output_dtype,
3249 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07003250
3251 output_dims = [input.shape[0], output_dims[0], output_dims[1], input.shape[3]]
3252
Kevin Cheng550ccc52021-03-03 11:21:43 -08003253 return ser.addOutput(output_dims, output_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003254
3255 @staticmethod
3256 def typeConversionOp(ser, val, out_dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -08003257 return ser.addOutput(val.shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003258
3259 @staticmethod
3260 def transposeConv2DOp(ser, ifm, output_shape):
Kevin Cheng3a478572021-01-22 17:21:02 -08003261 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07003262 out_dtype = DType.INT32
3263 elif ifm.dtype == DType.INT16:
3264 out_dtype = DType.INT48
3265 elif ifm.dtype == DType.FLOAT:
3266 out_dtype = DType.FLOAT
3267 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08003268 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07003269
Kevin Cheng550ccc52021-03-03 11:21:43 -08003270 return ser.addOutput(output_shape, out_dtype)