blob: a19c5f4eb966f0a2c7e33ec9ce3fe3939059b704 [file] [log] [blame]
Eric Kunzee5e26762020-10-13 16:11:07 -07001#!/usr/bin/env python3
2
Kevin Cheng3a478572021-01-22 17:21:02 -08003# Copyright (c) 2020-2021, ARM Limited.
Eric Kunzee5e26762020-10-13 16:11:07 -07004#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17
18import numpy as np
19import argparse
20import sys
21import re
22import os
23import subprocess
24import shlex
25import json
26import glob
27import math
28import queue
29import threading
30import traceback
31import math
Jeremy Johnsona6185572021-06-21 15:55:35 +010032import itertools
Eric Kunzee5e26762020-10-13 16:11:07 -070033
34from enum import IntEnum, Enum, unique
Kevin Chengacb550f2021-06-29 15:32:19 -070035from tosa_ref_run import TosaReturnCode
Eric Kunzee5e26762020-10-13 16:11:07 -070036
Kevin Cheng550ccc52021-03-03 11:21:43 -080037# Include the ../thirdparty/serialization_lib/python directory in PYTHONPATH
38parent_dir = os.path.dirname(os.path.realpath(__file__))
39sys.path.append(
40 os.path.join(parent_dir, "..", "thirdparty", "serialization_lib", "python")
41)
Eric Kunzee5e26762020-10-13 16:11:07 -070042import tosa_serializer as ts
43from tosa_serializer import *
44import tosa
45
46# Convenience variables to the flatc-generated types that should be enums, but aren't
47DType = tosa.DType.DType()
Kevin Cheng550ccc52021-03-03 11:21:43 -080048Op = tosa.Op.Op()
Eric Kunzee5e26762020-10-13 16:11:07 -070049ResizeMode = tosa.ResizeMode.ResizeMode()
50
Kevin Cheng550ccc52021-03-03 11:21:43 -080051
Eric Kunzee5e26762020-10-13 16:11:07 -070052class TosaQuantGen:
Kevin Cheng550ccc52021-03-03 11:21:43 -080053 """QuantizedInfo random generator helper functions. Specify with 'qgen': in the operator defintion"""
54
Eric Kunzee5e26762020-10-13 16:11:07 -070055 def __init__(self):
56 pass
57
58 @staticmethod
Les Bell30e46802021-07-23 09:43:31 +010059 def getQinfo(testGen, dtype):
60 if dtype == DType.INT8:
61 return testGen.randInt(-128, 128)
62 if dtype == DType.UINT8:
63 return testGen.randInt(0, 256)
64 return 0
Eric Kunzee5e26762020-10-13 16:11:07 -070065
66 @staticmethod
67 def qgUnary(testGen, op, dtype):
68 qinfo = ts.TosaSerializerQuantInfo()
Kevin Chengacb550f2021-06-29 15:32:19 -070069 qinfo.UnaryQuantInfo(
70 TosaQuantGen.getQinfo(testGen, dtype), TosaQuantGen.getQinfo(testGen, dtype)
71 )
Eric Kunzee5e26762020-10-13 16:11:07 -070072 return qinfo
73
74 @staticmethod
Les Bell30e46802021-07-23 09:43:31 +010075 def qgConv(testGen, op, dtype_or_dtypeList):
Eric Kunzee5e26762020-10-13 16:11:07 -070076 qinfo = ts.TosaSerializerQuantInfo()
Les Bell30e46802021-07-23 09:43:31 +010077 if isinstance(dtype_or_dtypeList, list):
78 # a list of [input, weights, accumulator] dtypes
79 dtypeList = dtype_or_dtypeList
Eric Kunzee5e26762020-10-13 16:11:07 -070080 else:
Les Bell30e46802021-07-23 09:43:31 +010081 # an int, [input, weights, accumulator] dtypes are the same
82 dtypeList = [dtype_or_dtypeList] * 3
83 input_zp = TosaQuantGen.getQinfo(testGen, dtypeList[0])
84 weights_zp = TosaQuantGen.getQinfo(testGen, dtypeList[1])
85 qinfo.ConvQuantInfo(input_zp, weights_zp)
Eric Kunzee5e26762020-10-13 16:11:07 -070086 return qinfo
87
88 @staticmethod
89 def qgMatmul(testGen, op, dtype):
90 qinfo = ts.TosaSerializerQuantInfo()
Kevin Chengacb550f2021-06-29 15:32:19 -070091 qinfo.MatMulQuantInfo(
92 TosaQuantGen.getQinfo(testGen, dtype), TosaQuantGen.getQinfo(testGen, dtype)
93 )
Eric Kunzee5e26762020-10-13 16:11:07 -070094 return qinfo
95
96 @staticmethod
97 def qgPad(testGen, op, dtype):
98 qinfo = ts.TosaSerializerQuantInfo()
Les Bell30e46802021-07-23 09:43:31 +010099 qinfo.PadQuantInfo(TosaQuantGen.getQinfo(testGen, dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700100 return qinfo
101
102 @staticmethod
103 def computeMultiplierAndShift(scaleFp, scale32):
104 # Derived from computeMultiplierAndShiftTosaScale32
105 # Provide a floating-point scaling factor and the scale32 parameter
106 # to compute the multiplier and shift
107
108 if scale32:
109 scaleBits = 31
110 else:
111 scaleBits = 15
112
113 m, shift = math.frexp(scaleFp)
114
115 if scaleFp < 0.0:
116 m = -m
117
118 multiplier = round(m * (1 << scaleBits))
Kevin Cheng550ccc52021-03-03 11:21:43 -0800119 assert multiplier <= (1 << scaleBits)
Eric Kunzee5e26762020-10-13 16:11:07 -0700120
121 if multiplier == (1 << scaleBits):
122 multiplier = multiplier // 2
123 shift = shift + 1
124
125 shift = (-shift) + scaleBits
Matthew Haddonb724efc2021-08-25 16:40:29 +0100126 #print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(scaleFp, scaleBits, m, multiplier, shift))
127
128 # Adjust multiplier such that shift is in allowed value range.
129 if shift == 0:
130 multiplier = multiplier // 4
131 shift = shift + 2
132 elif shift == 1:
133 multiplier = multiplier // 2
134 shift = shift + 1
135 elif shift == 63:
136 multiplier = multiplier * 2
137 shift = shift - 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700138
Kevin Cheng550ccc52021-03-03 11:21:43 -0800139 assert multiplier <= (1 << scaleBits)
Matthew Haddonb724efc2021-08-25 16:40:29 +0100140 assert shift >= 2 and shift <= 62
Eric Kunzee5e26762020-10-13 16:11:07 -0700141
142 return multiplier, shift
143
144
Kevin Cheng550ccc52021-03-03 11:21:43 -0800145class TosaTensorGen:
146 """Tensor generators create a shape list for the placeholder and const tensor
147 data operands for the operator. The actual random data is generated separately for each test."""
148
Eric Kunzee5e26762020-10-13 16:11:07 -0700149 def __init__(self):
150 pass
151
152 @staticmethod
153 def tgBasic(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800154 pl, const = opName["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700155 shape = testGen.makeShape(rank)
156
157 shape_list = []
158 for i in range(pl + const):
159 shape_list.append(shape.copy())
160
161 return shape_list
162
163 @staticmethod
164 def tgNHWC(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800165 pl, const = opName["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700166
Kevin Cheng550ccc52021-03-03 11:21:43 -0800167 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700168
169 shape = testGen.makeShape(rank)
170
171 # Constrict the batch size?
172 if testGen.args.max_batch_size:
173 shape[0] = (shape[0] % testGen.args.max_batch_size) + 1
174
175 shape_list = []
176 for i in range(pl + const):
177 shape_list.append(shape.copy())
178
179 return shape_list
180
181 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -0800182 def tgScatter(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800183 pl, const = opName["operands"]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800184
Kevin Cheng550ccc52021-03-03 11:21:43 -0800185 assert pl == 2
186 assert const == 0
187 assert rank == 3
Kevin Cheng77d0f762020-11-24 10:26:32 -0800188
189 values_in_shape = testGen.makeShape(rank)
190
Matthew Haddon4b2881a2021-08-24 14:25:43 +0100191 # ignore max batch size if target shape is set
192 if testGen.args.max_batch_size and not testGen.args.target_shapes:
Kevin Cheng77d0f762020-11-24 10:26:32 -0800193 values_in_shape[0] = (values_in_shape[0] % testGen.args.max_batch_size) + 1
194
Kevin Cheng550ccc52021-03-03 11:21:43 -0800195 W = testGen.randInt(
196 testGen.args.tensor_shape_range[0], testGen.args.tensor_shape_range[1]
197 )
Matthew Haddon4b2881a2021-08-24 14:25:43 +0100198 # Constrict W if one dimension is too large to keep tensor size reasonable
199 if max(values_in_shape) > 5000:
200 W = testGen.randInt(0, 16)
201
Kevin Cheng77d0f762020-11-24 10:26:32 -0800202 input_shape = [values_in_shape[0], W, values_in_shape[2]]
203
204 shape_list = []
205 shape_list.append(values_in_shape.copy())
206 shape_list.append(input_shape.copy())
207
208 return shape_list
209
210 @staticmethod
Eric Kunzee5e26762020-10-13 16:11:07 -0700211 def tgBroadcastFuzz(testGen, op, rank):
212 shape = testGen.makeShape(rank)
213
Kevin Cheng550ccc52021-03-03 11:21:43 -0800214 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700215
216 shape_list = []
217
218 # Choose one of the inputs to broadcast
219 bcast_idx = testGen.randInt(0, pl + const)
220 for i in range(pl + const):
221 shape_bcast = shape.copy()
222
223 # If the chosen input, pick a random index to broadcast
224 if i == bcast_idx:
225 fuzz_idx = testGen.randInt(0, rank)
226 shape_bcast[fuzz_idx] = 1
227
228 shape_list.append(shape_bcast)
229
230 return shape_list
231
232 @staticmethod
233 def tgConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800234 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700235
Kevin Cheng550ccc52021-03-03 11:21:43 -0800236 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700237
238 # IFM dimensions are NHWC
239 ifm_shape = testGen.makeShape(rank)
240
241 # Constrict the batch size?
242 if testGen.args.max_batch_size:
243 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
244
245 # Get the filter height/width from the operator parameters
Kevin Cheng550ccc52021-03-03 11:21:43 -0800246 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700247
248 # Generate a random OFM depth
249 ofm_depth = testGen.makeShape(1)[0]
250
251 # The filter dimensions are OHWI
252 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
253
254 # The bias is OC
255 bias_shape = np.asarray([ofm_depth])
256
257 return [ifm_shape, filter_shape, bias_shape]
258
259 @staticmethod
Kevin Cheng1533b852021-09-01 12:51:58 -0700260 def tgConv3D(testGen, op, rank):
261 pl, const = op["operands"]
262
263 assert rank == 5
264
265 # IFM dimensions are NDHWC
266 ifm_shape = testGen.makeShape(rank)
267
268 # Constrict the batch size?
269 if testGen.args.max_batch_size:
270 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
271
272 # Get the filter depth/height/width from the operator parameters
273 filter_dhw = op["filter"]
274
275 # Generate a random OFM channel
276 ofm_channel = testGen.makeShape(1)[0]
277
278 # The filter dimensions are ODHWI
279 filter_shape = np.asarray(
280 [ofm_channel, filter_dhw[0], filter_dhw[1], filter_dhw[2], ifm_shape[4]]
281 )
282
283 # The bias is OC
284 bias_shape = np.asarray([ofm_channel])
285
286 return [ifm_shape, filter_shape, bias_shape]
287
288 @staticmethod
Eric Kunzee5e26762020-10-13 16:11:07 -0700289 def tgTransposeConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800290 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700291
Kevin Cheng550ccc52021-03-03 11:21:43 -0800292 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700293
294 # IFM dimensions are NHWC
295 ifm_shape = testGen.makeShape(rank)
296
297 # Constrict the batch size?
298 if testGen.args.max_batch_size:
299 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
300
301 # Get the filter height/width from the operator parameters
Kevin Cheng550ccc52021-03-03 11:21:43 -0800302 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700303
304 # Generate a random OFM depth
305 ofm_depth = testGen.makeShape(1)[0]
306
307 # The filter dimensions are OHWI
308 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
309
Kevin Cheng989cb052021-04-28 16:29:44 -0700310 # The bias is OC
311 bias_shape = np.asarray([ofm_depth])
312
313 return [ifm_shape, filter_shape, bias_shape]
Eric Kunzee5e26762020-10-13 16:11:07 -0700314
315 @staticmethod
316 def tgDepthwiseConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800317 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700318
Kevin Cheng550ccc52021-03-03 11:21:43 -0800319 assert rank == 4
320 assert pl == 1 and const == 2
Eric Kunzee5e26762020-10-13 16:11:07 -0700321
322 # IFM dimensions are NHWC
323 ifm_shape = testGen.makeShape(rank)
324
325 # Constrict the batch size?
326 if testGen.args.max_batch_size:
327 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
328
329 # Get the filter height/width from the operator parameters
330 # Filter is KH, HW, C, M
Kevin Cheng550ccc52021-03-03 11:21:43 -0800331 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700332
333 # Generate a random OFM depth, but don't let it get too big because
334 # the output depth is M * C
Kevin Cheng550ccc52021-03-03 11:21:43 -0800335 filter_m = (
336 testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)
337 ) + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700338
339 # The filter dimensions are HWCM
340 filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m])
341
342 # The bias is M * C
343 bias_shape = np.asarray([ifm_shape[3] * filter_m])
344
345 return [ifm_shape, filter_shape, bias_shape]
346
347 @staticmethod
348 def tgFullyConnected(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800349 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700350
Kevin Cheng550ccc52021-03-03 11:21:43 -0800351 assert rank == 2
Eric Kunzee5e26762020-10-13 16:11:07 -0700352
353 input_shape = testGen.makeShape(rank)
Kevin Chengacb550f2021-06-29 15:32:19 -0700354 filter_oc = testGen.rng.integers(
355 low=testGen.args.tensor_shape_range[0],
356 high=testGen.args.tensor_shape_range[1],
357 size=1,
358 )[0]
Eric Kunzee5e26762020-10-13 16:11:07 -0700359 filter_shape = np.asarray([filter_oc, input_shape[1]])
360
361 bias_shape = np.asarray([filter_oc])
362
363 return [input_shape, filter_shape, bias_shape]
364
365 @staticmethod
366 def tgMatmul(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800367 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700368
Kevin Cheng2d60f002021-06-09 14:18:32 -0700369 assert rank == 3
Kevin Cheng550ccc52021-03-03 11:21:43 -0800370 assert pl == 2 and const == 0
Eric Kunzee5e26762020-10-13 16:11:07 -0700371
372 a_shape = testGen.makeShape(rank)
Matthew Haddon68e7aee2021-08-16 11:20:25 +0100373 # Get a random number for b_oc even if target shape is defined
374 b_oc = np.int32(
375 testGen.rng.integers(
376 low=testGen.args.tensor_shape_range[0],
377 high=testGen.args.tensor_shape_range[1],
378 size=1,
379 )
380 )[0]
381 # If N or H is large let b_oc be 1 to reduce output tensor size
382 if max(a_shape) > 1000:
383 b_oc = 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700384
Matthew Haddon68e7aee2021-08-16 11:20:25 +0100385 b_shape = np.asarray([a_shape[0], a_shape[2], b_oc])
Eric Kunzee5e26762020-10-13 16:11:07 -0700386 return [a_shape, b_shape]
387
Matthew Haddon818ab902021-07-27 09:12:49 +0100388 @staticmethod
389 def tgConcat(testGen, opName, rank):
390 pl, const = opName["operands"]
391 shape = testGen.makeShape(rank)
392
393 # Create extra tensors to concat.
394 # Take into account value of pl when getting maximum number of concats
395 num_tensors = testGen.randInt(0, 4)
396 shape_list = []
397 for i in range(pl + const + num_tensors):
398 shape_list.append(shape.copy())
399
400 return shape_list
401
402 @staticmethod
403 def tgConcatConstInput(testGen, shapeList, axis):
404 # Split concat shape along axis to allow for multiple const inputs
405 # without making too many large tensors
406 shape = shapeList[0]
407 if len(shapeList) == 2 or shape[axis] < len(shapeList):
408 return shapeList
409
410 new_shapeList = [shape.copy()]
411 length_on_axis = shape[axis]
412 remaining_length = length_on_axis
Kevin Cheng93a16282021-08-31 16:14:03 -0700413 for i in range(len(shapeList) - 2):
Matthew Haddon818ab902021-07-27 09:12:49 +0100414 # Calculate split on axis and remaining value
415 split_shape_val = int(shape[axis] / 2)
416 remaining_length = remaining_length - split_shape_val
417
418 # Append new shape, and set remaining shape
419 shape[axis] = split_shape_val
420 new_shapeList.append(shape.copy())
421 shape[axis] = remaining_length
422 if i == len(shapeList) - 3:
423 new_shapeList.append(shape.copy())
424
425 return new_shapeList
426
427
Eric Kunzee5e26762020-10-13 16:11:07 -0700428class TosaArgGen:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800429 """Argument generators create exhaustive or random lists of attributes for operators that take
430 attributes or other parameters. The return value is a list of (descriptive_name, [arglist])
431 tuples where the descriptive_name is appended to the test name and the arglist is expanded
432 as arguments to the operator build function."""
433
Eric Kunzee5e26762020-10-13 16:11:07 -0700434 def __init__(self):
435 pass
436
437 @staticmethod
438 def agNone(testGen, opName, shapeList, dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800439 """A trivial argument generator for operators that don't take any
440 non-tensor arguments"""
441 return [("", [])]
Eric Kunzee5e26762020-10-13 16:11:07 -0700442
443 @staticmethod
444 def agAxis(testGen, opName, shapeList, dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800445 """Build the axis argument for operators that take a single axis"""
Eric Kunzee5e26762020-10-13 16:11:07 -0700446 axes = []
447
448 shape = shapeList[0]
449
450 for a in range(0, len(shape)):
Matthew Haddon43e37192021-07-09 14:13:02 +0100451 axes.append(("axis{}".format(a), [a]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700452 return axes
453
454 @staticmethod
455 def agConv2D(testGen, opName, shapeList, dtype):
456 arg_list = []
457
458 ifm_shape = shapeList[0]
459 filter_shape = shapeList[1]
460
461 # Must be rank 4
Kevin Cheng550ccc52021-03-03 11:21:43 -0800462 assert len(ifm_shape) == 4
463 assert len(filter_shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700464
465 maxStride = testGen.args.max_conv_stride
466 maxPadding = testGen.args.max_conv_padding + 1
467 maxDilation = testGen.args.max_conv_dilation
468
469 # Strides, padding, dilations
470 for stride in range(0, maxStride ** 2):
471 for padding in range(0, (maxPadding) ** 4):
472 for dilation in range(0, maxDilation ** 2):
473
Kevin Cheng550ccc52021-03-03 11:21:43 -0800474 s = [stride // maxStride + 1, stride % maxStride + 1]
475 p = [
476 (padding // (maxPadding * 4)) % maxPadding,
477 (padding // (maxPadding * 2)) % maxPadding,
478 (padding // (maxPadding * 1)) % maxPadding,
479 padding % maxPadding,
480 ]
481 d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
Eric Kunzee5e26762020-10-13 16:11:07 -0700482
483 # 4 padding parameters for regular conv2d
Kevin Cheng550ccc52021-03-03 11:21:43 -0800484 arg_list.append(
485 (
486 "st{}{}_pad{}{}{}{}_dilat{}{}".format(
487 s[0], s[1], p[0], p[1], p[2], p[3], d[0], d[1]
488 ),
489 [s, p, d],
490 )
491 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700492 return arg_list
493
494 @staticmethod
Kevin Cheng1533b852021-09-01 12:51:58 -0700495 def agConv3D(testGen, opName, shapeList, dtype):
496 arg_list = []
497
498 ifm_shape = shapeList[0]
499 filter_shape = shapeList[1]
500
501 # Must be rank 5
502 assert len(ifm_shape) == 5
503 assert len(filter_shape) == 5
504
Les Bellf414b3c2021-09-06 11:29:46 +0100505 # Generate comprehensive argument list
506 p_range = [x for x in range(0, testGen.args.max_conv_padding + 1)]
507 paddings = [x for x in itertools.product(*([p_range] * 6))]
508 s_range = [x for x in range(1, testGen.args.max_conv_stride + 1)]
509 strides = [x for x in itertools.product(*([s_range] * 3))]
510 d_range = [x for x in range(1, testGen.args.max_conv_dilation + 1)]
511 dilations = [x for x in itertools.product(*([d_range] * 3))]
512
513 # There are too many parameter combinations, so generate them sparsely
514 # To get a variety of parameter combinations sparsity should not be a multiple of 2, or 3
515 # TODO: make sparsity a CLI option
516 sparsity = 37
517 n = 0
518
519 for s in strides:
520 for p in paddings:
521 for d in dilations:
522 if n % sparsity == 0:
523 arg_list.append(
524 (
525 "st{}_pad{}_dilat{}".format(
526 "".join([str(x) for x in s]),
527 "".join([str(x) for x in p]),
528 "".join([str(x) for x in d]),
529 ),
530 [s, p, d],
531 )
532 )
533 n += 1
534
Kevin Cheng1533b852021-09-01 12:51:58 -0700535 return arg_list
536
537 @staticmethod
Eric Kunzee5e26762020-10-13 16:11:07 -0700538 def agTransposeConv2D(testGen, opName, shapeList, dtype):
539 arg_list = []
540
541 ifm_shape = shapeList[0]
542 filter_shape = shapeList[1]
543
544 # Must be rank 4
Kevin Cheng550ccc52021-03-03 11:21:43 -0800545 assert len(ifm_shape) == 4
546 assert len(filter_shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700547
548 maxStride = testGen.args.max_conv_stride
549 maxPadding = testGen.args.max_conv_padding + 1
550 maxDilation = testGen.args.max_conv_dilation
551
552 # Strides, padding, dilations
553 for stride in range(0, maxStride ** 2):
554 for out_padding in range(0, (maxPadding) ** 2):
555 for dilation in range(0, maxDilation ** 2):
556
Kevin Cheng550ccc52021-03-03 11:21:43 -0800557 s = [stride // maxStride + 1, stride % maxStride + 1]
558 p = [
559 (out_padding // (maxPadding * 1)) % maxPadding,
560 out_padding % maxPadding,
561 ]
562 d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
Eric Kunzee5e26762020-10-13 16:11:07 -0700563
Kevin Cheng550ccc52021-03-03 11:21:43 -0800564 oh = (
565 ifm_shape[1]
566 - filter_shape[1]
567 - (filter_shape[1] - 1) * (d[0] - 1)
568 + 2 * p[0]
569 ) // s[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700570
Kevin Cheng550ccc52021-03-03 11:21:43 -0800571 ow = (
572 ifm_shape[2]
573 - filter_shape[2]
574 - (filter_shape[2] - 1) * (d[1] - 1)
575 + 2 * p[1]
576 ) // s[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700577
578 # Output shape
Kevin Cheng550ccc52021-03-03 11:21:43 -0800579 os = [ifm_shape[0], oh, ow, filter_shape[0]]
Eric Kunzee5e26762020-10-13 16:11:07 -0700580
Kevin Cheng550ccc52021-03-03 11:21:43 -0800581 arg_list.append(
582 (
583 "st{}{}_outpad{}{}_dilat{}{}_os{}x{}x{}x{}".format(
584 s[0],
585 s[1],
586 p[0],
587 p[1],
588 d[0],
589 d[1],
590 os[0],
591 os[1],
592 os[2],
593 os[3],
594 ),
595 [s, p, d, os],
596 )
597 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700598
599 return arg_list
600
601 @staticmethod
602 def agPad(testGen, opName, shapeList, dtype):
603 arg_list = []
604 rank = len(shapeList[0])
605
Les Bell7ffccce2021-07-28 15:37:02 +0100606 # Exhaustively test combinations of padding on each side of each dimension
607 # - the range of padding values is defined by pad_min and pad_max
608 # - for padding >9, the name format needs to be more distinctive
609 pad_min, pad_max = 0, 1
610 pad_values = [x for x in range(pad_min, pad_max + 1)]
611 axis_pad_values = [x for x in itertools.product(pad_values, pad_values)]
612 shape_pad_values = itertools.product(*([axis_pad_values] * rank))
Eric Kunzee5e26762020-10-13 16:11:07 -0700613
Les Bell7ffccce2021-07-28 15:37:02 +0100614 for paddings in shape_pad_values:
615 name = "pad"
616 for r in range(rank):
617 before, after = paddings[r]
618 name = f"{name}{before}{after}"
619 arg_list.append((name, [np.array(paddings)]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700620
621 return arg_list
622
623 @staticmethod
624 def agPooling(testGen, opName, shapeList, dtype):
625 arg_list = []
626
627 shape = shapeList[0]
Kevin Cheng550ccc52021-03-03 11:21:43 -0800628 assert len(shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700629
630 maxStride = testGen.args.max_pooling_stride
631 maxKernel = testGen.args.max_pooling_kernel
632 maxPadding = testGen.args.max_pooling_padding + 1
633
634 for kernel in range(0, maxKernel ** 2):
635 for stride in range(0, maxStride ** 2):
636 for padding in range(0, maxPadding ** 4):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800637 s = [stride // maxStride + 1, stride % maxStride + 1]
638 k = [(kernel // maxKernel) + 2, (kernel % maxKernel) + 2]
639 p = [
640 (padding // (maxPadding * 4)) % maxPadding,
641 (padding // (maxPadding * 2)) % maxPadding,
642 (padding // (maxPadding * 1)) % maxPadding,
643 padding % maxPadding,
644 ]
Eric Kunzee5e26762020-10-13 16:11:07 -0700645
Kevin Cheng550ccc52021-03-03 11:21:43 -0800646 arg_list.append(
647 (
648 "st{}{}_kern{}{}_pad{}{}{}{}".format(
649 s[0], s[1], k[0], k[1], p[0], p[1], p[2], p[3]
650 ),
Matthew Haddonb724efc2021-08-25 16:40:29 +0100651 [s, p, k],
Kevin Cheng550ccc52021-03-03 11:21:43 -0800652 )
653 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700654 return arg_list
655
656 @staticmethod
657 def agCast(testGen, opName, shapeList, inDtype):
658 arg_list = []
659
660 # Enumerate the output types here
661 if inDtype == DType.INT8:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800662 dtypeList = [DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700663 elif inDtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800664 dtypeList = [DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700665 elif inDtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800666 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700667 elif inDtype == DType.BOOL:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800668 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700669 elif inDtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800670 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700671 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800672 raise Exception("Unexpected input dtype: {}".format(inDtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700673
674 for dtype in dtypeList:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800675 arg_list.append(("out{}".format(DTypeNames[dtype]), [dtype]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700676
677 return arg_list
678
679 @staticmethod
680 def agRescale(testGen, opName, shapeList, inDtype):
681 arg_list = []
682
683 # Enumerate the output types here
Matthew Haddoncac4ee92021-07-22 14:30:53 +0100684 for dtype in [DType.UINT8, DType.INT8, DType.INT16, DType.INT32]:
685 if inDtype == DType.UINT8 and dtype != DType.INT8:
686 # The only output dtype for UINT8 is INT8, skip all other combinations
687 continue
688 if inDtype != DType.INT8 and dtype == DType.UINT8:
689 # The only input dtype for UINT8 is INT8, skip all other combinations
690 continue
691
Kevin Cheng550ccc52021-03-03 11:21:43 -0800692 for scale32 in [False, True]:
693 for double_round in [False, True]:
694 for per_channel in [False, True]:
Eric Kunzee5e26762020-10-13 16:11:07 -0700695
696 if inDtype == DType.INT48 and scale32:
697 # Illegal condition. Must be scale32=False
698 continue
Matthew Haddoncac4ee92021-07-22 14:30:53 +0100699 if double_round and not scale32:
700 # Illegal condition. ERROR_IF(!scale32 && double_round)
701 continue
Eric Kunzee5e26762020-10-13 16:11:07 -0700702
Kevin Cheng550ccc52021-03-03 11:21:43 -0800703 arg_list.append(
704 (
705 "out{}_sc{}_dr{}_pc{}".format(
706 DTypeNames[dtype],
707 int(scale32),
708 int(double_round),
709 int(per_channel),
710 ),
711 [dtype, scale32, double_round, per_channel],
712 )
713 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700714
715 return arg_list
716
Kevin Chengaee1fac2020-11-11 13:54:06 -0800717 @staticmethod
718 def agMul(testGen, opName, shapeList, dtype):
719 arg_list = []
720
721 if dtype is DType.INT32:
722 for p in range(testGen.args.num_rand_permutations):
723
724 shift = testGen.randInt(0, 32)
725
Kevin Cheng550ccc52021-03-03 11:21:43 -0800726 arg_list.append(("perm{}_shift{}".format(p, shift), [shift]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800727 else:
Matthew Haddon43e37192021-07-09 14:13:02 +0100728 arg_list.append(("perm0_shift0", [0]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800729
730 return arg_list
731
732 @staticmethod
733 def agArithmeticRightShift(testGen, opName, shapeList, dtype):
734 arg_list = []
735
Kevin Cheng550ccc52021-03-03 11:21:43 -0800736 arg_list.append(("roundTrue", [True]))
737 arg_list.append(("roundFalse", [False]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800738
739 return arg_list
740
Eric Kunzee5e26762020-10-13 16:11:07 -0700741 # Helper function for reshape. Gets some factors of a larger number.
742 @staticmethod
743 def getFactors(val, start=1):
744 factors = []
745
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100746 for i in range(start, int(np.sqrt(val)) + 1):
Eric Kunzee5e26762020-10-13 16:11:07 -0700747 if (val % i) == 0:
748 factors.append(i)
749
750 return factors
751
752 @staticmethod
753 def agReshape(testGen, opName, shapeList, dtype):
754 arg_list = []
755
756 origShape = shapeList[0]
757
758 totalElements = 1
759 for s in origShape:
760 totalElements *= s
761
762 # This code is NOT fast. Fortunately, the numbers are fairly small.
763 factors = TosaArgGen.getFactors(totalElements)
764
765 for p in range(testGen.args.num_rand_permutations):
Matthew Haddon5fc4e682021-07-07 11:28:29 +0100766 newRank = testGen.randInt(1, 7)
Kevin Cheng550ccc52021-03-03 11:21:43 -0800767 if len(factors) < newRank:
Eric Kunzee5e26762020-10-13 16:11:07 -0700768 continue
769
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100770 found = True
771 # escape_counter breaks while loop if it continues on for too long
772 escape_counter = 0
773 while found:
774 newShape = []
775 # Generate newShape ensuring it isn't a duplicate
776 remainingElements = totalElements
777 shuffledFactors = testGen.rng.permutation(factors)
Matthew Haddon5fc4e682021-07-07 11:28:29 +0100778 for i in range(1, newRank):
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100779 # pick rank-1 factors
780 newShape.append(shuffledFactors[0])
781 remainingElements = remainingElements // shuffledFactors[0]
782 shuffledFactors = testGen.rng.permutation(
783 TosaArgGen.getFactors(remainingElements)
784 )
785 newShape.append(remainingElements)
Eric Kunzee5e26762020-10-13 16:11:07 -0700786
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100787 # Toss in a -1 sometimes
788 minusOne = testGen.randInt(0, newRank * 4)
789 if minusOne < newRank:
790 newShape[minusOne] = -1
Eric Kunzee5e26762020-10-13 16:11:07 -0700791
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100792 # Check for duplicates
793 found = False
794 for name, other_shape in arg_list:
795 if other_shape[0] == newShape:
796 found = True
797 break
798
799 escape_counter += 1
800 if escape_counter >= 100:
801 break
802
803 if not found:
804 arg_list.append(("perm{}_rank{}".format(p, newRank), [newShape]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700805
806 return arg_list
807
Eric Kunzee5e26762020-10-13 16:11:07 -0700808 @staticmethod
809 def agTranspose(testGen, opName, shapeList, dtype):
810 arg_list = []
811
812 ifm_shape = shapeList[0]
813
Jeremy Johnsona6185572021-06-21 15:55:35 +0100814 # Get all permutations
815 permutations = [p for p in itertools.permutations(range(len(ifm_shape)))]
Eric Kunzee5e26762020-10-13 16:11:07 -0700816
Jeremy Johnsona6185572021-06-21 15:55:35 +0100817 # Limit to possible permutations from shape dimension or argument setting
818 limit = min(len(permutations), testGen.args.num_rand_permutations)
Eric Kunzee5e26762020-10-13 16:11:07 -0700819
Jeremy Johnsona6185572021-06-21 15:55:35 +0100820 # Get random permutation generator that uses all permutations
821 random_permutations = testGen.rng.permutation(permutations)
Eric Kunzee5e26762020-10-13 16:11:07 -0700822
Jeremy Johnsona6185572021-06-21 15:55:35 +0100823 # Create list of required amount of permutations
Kevin Chengacb550f2021-06-29 15:32:19 -0700824 arg_list = [
825 ("perm{}".format(p), [random_permutations[p].tolist()])
826 for p in range(limit)
827 ]
Eric Kunzee5e26762020-10-13 16:11:07 -0700828 return arg_list
829
830 @staticmethod
831 def agSlice(testGen, opName, shapeList, dtype):
832 arg_list = []
833
834 ifm_shape = shapeList[0]
835 rank = len(ifm_shape)
836
837 for p in range(testGen.args.num_rand_permutations):
838 begin = []
839 size = []
840
Kevin Cheng550ccc52021-03-03 11:21:43 -0800841 valid = True
Eric Kunzee5e26762020-10-13 16:11:07 -0700842
843 for i in range(rank):
844 if ifm_shape[i] > 1:
845 begin.append(testGen.randInt(0, ifm_shape[i]))
846 size.append(testGen.randInt(0, ifm_shape[i] - begin[i]))
847
848 # Invalid slice size?
849 if size[i] == 0:
850 valid = False
851 else:
852 begin.append(0)
853 size.append(1)
854
855 if valid:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800856 arg_list.append(("perm{}".format(p), [begin, size]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700857 return arg_list
858
859 @staticmethod
860 def agTile(testGen, opName, shapeList, dtype):
861 arg_list = []
862
863 ifm_shape = shapeList[0]
864 rank = len(ifm_shape)
865
866 for p in range(testGen.args.num_rand_permutations):
867
868 # Pick a few random, but small multiple values
869 # because otherwise this has a tendency to generate
870 # enormous tensors
871 multiples = []
872 for i in range(rank):
Matthew Haddon82ad4d62021-08-20 15:02:39 +0100873 if ifm_shape[i] > 1000:
874 # Multiple of 1 if ifm_shape dimension is large to reduce tensor size
875 multiples.append(1)
876 elif max(ifm_shape) > 1000:
877 multiples.append(2)
878 else:
879 multiples.append(testGen.randInt(1, 4))
Kevin Cheng550ccc52021-03-03 11:21:43 -0800880 arg_list.append(("perm{}".format(p), [multiples]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700881
882 return arg_list
883
884 @staticmethod
885 def agResize(testGen, opName, shapeList, dtype):
886 arg_list = []
887
888 ifm_shape = shapeList[0]
889
890 for m in [ResizeMode.NEAREST, ResizeMode.BILINEAR]:
891
892 # Exclude illegal {mode, type} configurations. Pick legal output types
893 if m == ResizeMode.NEAREST and dtype == DType.INT8:
Les Bell33d837e2021-08-10 08:34:43 +0100894 outputDTypeList = [DType.INT8]
Eric Kunzee5e26762020-10-13 16:11:07 -0700895 elif m == ResizeMode.NEAREST and dtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800896 outputDTypeList = [DType.INT16]
Eric Kunzee5e26762020-10-13 16:11:07 -0700897 elif m == ResizeMode.BILINEAR and dtype == DType.INT8:
Les Bell33d837e2021-08-10 08:34:43 +0100898 outputDTypeList = [DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700899 elif m == ResizeMode.BILINEAR and dtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800900 outputDTypeList = [DType.INT48]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800901 elif dtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800902 outputDTypeList = [DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700903 else:
904 continue
905
906 for outputDType in outputDTypeList:
907 for perm in range(testGen.args.num_rand_permutations):
908
909 # Randomly generate legal output dimensions and shift
910 # and then compute the stride and offset based on them
Kevin Cheng550ccc52021-03-03 11:21:43 -0800911 output_dims = [testGen.randInt(1), testGen.randInt(1)]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800912 in_center_h = (ifm_shape[1] - 1) / 2.0
913 in_center_w = (ifm_shape[2] - 1) / 2.0
914 out_center_h = (output_dims[0] - 1) / 2.0
915 out_center_w = (output_dims[1] - 1) / 2.0
Eric Kunzee5e26762020-10-13 16:11:07 -0700916
Kevin Cheng77d0f762020-11-24 10:26:32 -0800917 fp_stride_y = float(ifm_shape[1]) / float(output_dims[0])
918 fp_stride_x = float(ifm_shape[2]) / float(output_dims[1])
919 fp_offset_y = in_center_h - fp_stride_y * out_center_h
920 fp_offset_x = in_center_w - fp_stride_x * out_center_w
Eric Kunzee5e26762020-10-13 16:11:07 -0700921
Kevin Cheng77d0f762020-11-24 10:26:32 -0800922 if outputDType == DType.FLOAT:
923 shift = 0
924 stride = [0, 0]
925 offset = [0, 0]
Kevin Cheng550ccc52021-03-03 11:21:43 -0800926 stride_fp = [fp_stride_y, fp_stride_x]
927 offset_fp = [fp_offset_y, fp_offset_x]
928 arg_list.append(
929 (
930 "mode{}_odim{}x{}_out{}_st{:.2f}x{:.2f}_off{:.2f}x{:.2f}".format(
Les Bell33d837e2021-08-10 08:34:43 +0100931 "N" if m == ResizeMode.NEAREST else "B",
Kevin Cheng550ccc52021-03-03 11:21:43 -0800932 output_dims[0],
933 output_dims[1],
934 testGen.typeStr(outputDType),
935 stride_fp[0],
936 stride_fp[1],
937 offset_fp[0],
938 offset_fp[1],
939 ),
940 [
941 m,
942 stride,
943 offset,
944 shift,
945 stride_fp,
946 offset_fp,
947 output_dims,
948 dtype,
949 outputDType,
950 ],
951 )
952 )
Kevin Cheng77d0f762020-11-24 10:26:32 -0800953 else:
954 shift = 11
955 unit = float(1 << shift)
956 stride_y = int(round(fp_stride_y * unit))
957 stride_x = int(round(fp_stride_x * unit))
958 offset_y = int(round(fp_offset_y * unit))
959 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700960
Kevin Cheng550ccc52021-03-03 11:21:43 -0800961 while (
962 stride_y >= 32768
963 or stride_x >= 32768
964 or offset_y >= 32768
965 or offset_x >= 32768
966 or offset_y < -32768
967 or offset_x < -32768
968 ):
Kevin Cheng77d0f762020-11-24 10:26:32 -0800969 shift = shift - 1
970 unit = float(1 << shift)
971 stride_y = int(round(fp_stride_y * unit))
972 stride_x = int(round(fp_stride_x * unit))
973 offset_y = int(round(fp_offset_y * unit))
974 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700975
Kevin Cheng550ccc52021-03-03 11:21:43 -0800976 stride = [stride_y, stride_x]
977 offset = [offset_y, offset_x]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800978
979 stride_fp = [0.0, 0.0]
980 offset_fp = [0.0, 0.0]
981
Kevin Cheng550ccc52021-03-03 11:21:43 -0800982 arg_list.append(
983 (
984 "mode{}_shift{}_odim{}x{}_out{}_st{}x{}_off{}x{}".format(
Les Bell33d837e2021-08-10 08:34:43 +0100985 "N" if m == ResizeMode.NEAREST else "B",
Kevin Cheng550ccc52021-03-03 11:21:43 -0800986 shift,
987 output_dims[0],
988 output_dims[1],
989 testGen.typeStr(outputDType),
990 stride[0],
991 stride[1],
992 offset[0],
993 offset[1],
994 ),
995 [
996 m,
997 stride,
998 offset,
999 shift,
1000 stride_fp,
1001 offset_fp,
1002 output_dims,
1003 dtype,
1004 outputDType,
1005 ],
1006 )
1007 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001008
1009 return arg_list
1010
1011 def agCondIf(testGen, opName, shapeList, dtype):
1012 # CondIf generates the condition values here.
1013 # Convert to tensors in the build function, along with the
1014 # then and else blocks
1015 arg_list = []
1016
1017 for c in [False, True]:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001018 arg_list.append(("cond{}".format(int(c)), [c]))
Eric Kunzee5e26762020-10-13 16:11:07 -07001019
1020 return arg_list
1021
1022 def agWhileLoop(testGen, opName, shapeList, dtype):
1023 # While loop: 0 iterations, 1, more than 1
1024 arg_list = []
1025
1026 for iter in [0, 1, 4]:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001027 arg_list.append(("iter{}".format(iter), [iter]))
Eric Kunzee5e26762020-10-13 16:11:07 -07001028
1029 return arg_list
1030
Matthew Haddonb724efc2021-08-25 16:40:29 +01001031class TosaInvalidValidator:
1032
1033 @staticmethod
1034 def ivWrongDataTypeOrModeResize(**kwargs):
1035 input_dtype = kwargs["input_dtype"]
1036 args = kwargs["args"]
1037 mode = args[0]
1038 stride = args[1]
1039 stride_fp = args[4]
1040 output_dtype = args[8]
1041
1042 if mode == ResizeMode.BILINEAR:
1043 # Invalid output data type / Invalid input datatype
1044 return (
1045 not (input_dtype == DType.INT8 and output_dtype == DType.INT32) or
1046 not (input_dtype == DType.INT16 and output_dtype == DType.INT48) or
1047 not (input_dtype == DType.FLOAT and output_dtype == DType.FLOAT) or
1048 (input_dtype not in [DType.INT8, DType.INT32, DType.FLOAT])
1049 )
1050 elif mode == ResizeMode.NEAREST:
1051 # Invalid output data type / Invalid input datatype
1052 return (
1053 (input_dtype != output_dtype) or
1054 (input_dtype not in [DType.INT8, DType.INT32, DType.FLOAT])
1055 )
1056 else:
1057 # Invalid resize mode
1058 return True
1059
1060 @staticmethod
1061 def ivBadStride(**kwargs):
1062 input_dtype = kwargs["input_dtype"]
1063 args = kwargs["args"]
1064 stride_x = args[1][0]
1065 stride_y = args[1][1]
1066 stride_fp_x = args[4][0]
1067 stride_fp_y = args[4][1]
1068
1069 if input_dtype == DType.FLOAT:
1070 if stride_fp_x <= 0 or stride_fp_y <= 0:
1071 # Negative or zero stride
1072 return True
1073 else:
1074 if stride_x <= 0 or stride_y <= 0:
1075 # Negative or zero stride
1076 return True
1077 return False
1078
1079
1080
1081
1082 @staticmethod
1083 def ivHeightWidthSmallerZero(**kwargs):
1084 opName = kwargs['opName']
1085
1086 inputShapes = kwargs['shapeList']
1087 input = inputShapes[0]
1088 if not opName.endswith("pool2d"):
1089 filter = inputShapes[1]
1090
1091 args = kwargs['args']
1092 strides = args[0]
1093 padding = args[1]
1094 dilations = args[2]
1095 if opName.endswith("pool2d"):
1096 kernel = args[2]
1097
1098 if opName.startswith('conv2d'):
1099 h = (
1100 input[1]
1101 - filter[1]
1102 - (filter[1] - 1) * (dilations[0] - 1)
1103 + padding[0]
1104 + padding[1]
1105 ) // strides[0] + 1
1106
1107 w = (
1108 input[2]
1109 - filter[2]
1110 - (filter[2] - 1) * (dilations[1] - 1)
1111 + padding[2]
1112 + padding[3]
1113 ) // strides[1] + 1
1114 elif opName.startswith("depthwise_conv2d"):
1115 h = (
1116 input[1]
1117 - filter[0]
1118 - (filter[0] - 1) * (dilations[0] - 1)
1119 + padding[0]
1120 + padding[1]
1121 ) // strides[0] + 1
1122
1123 w = (
1124 input[2]
1125 - filter[1]
1126 - (filter[1] - 1) * (dilations[1] - 1)
1127 + padding[2]
1128 + padding[3]
1129 ) // strides[1] + 1
1130 elif opName.endswith("pool2d"):
1131 h = (input[1] + padding[0] + padding[1] + strides[0] - kernel[0]) // strides[0]
1132 w = (input[2] + padding[2] + padding[3] + strides[1] - kernel[1]) // strides[1]
1133 else:
1134 assert False, "Unrecognized Op"
1135
1136 if h <= 0 or w <= 0:
1137 # Invalid parameter combination
1138 return True
1139 return False
1140
1141 @staticmethod
1142 def ivNonPositiveOutputShape(**kwargs):
1143 args = kwargs['args']
1144 output_shape = args[3]
1145 if output_shape[1] <= 0 or output_shape[2] <= 0:
1146 # Negative output shape
1147 return True
1148 return False
1149
1150
Kevin Cheng550ccc52021-03-03 11:21:43 -08001151
Eric Kunzee5e26762020-10-13 16:11:07 -07001152class TosaTestGen:
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01001153 # Maximum rank of tensor supported by test generator.
1154 TOSA_TENSOR_MAX_RANK = 6
1155
Eric Kunzee5e26762020-10-13 16:11:07 -07001156 def __init__(self, args):
1157 self.args = args
1158 self.basePath = args.output_dir
1159 self.random_seed = args.random_seed
1160 self.ser = None
1161 self.rng = np.random.default_rng(self.random_seed)
1162 self.createDynamicOpLists()
1163 self.initOpListDefaults()
1164 self.quantGen = TosaQuantGen()
1165 # Force makeShape to do a specific starting shape
1166 self.targetted_shape = None
1167
1168 def createSerializer(self, opName, testPath):
1169 self.testPath = os.path.join(opName, testPath)
1170
1171 fullPath = os.path.join(self.basePath, self.testPath)
1172 os.makedirs(fullPath, exist_ok=True)
1173 self.ser = ts.TosaSerializer(fullPath)
1174
1175 def getSerializer(self):
1176 return self.ser
1177
1178 def serialize(self, testName):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001179 with open(
1180 os.path.join(self.basePath, self.testPath, "{}.tosa".format(testName)), "wb"
1181 ) as fd:
Eric Kunzee5e26762020-10-13 16:11:07 -07001182 fd.write(self.ser.serialize())
1183
Kevin Cheng550ccc52021-03-03 11:21:43 -08001184 with open(os.path.join(self.basePath, self.testPath, "desc.json"), "w") as fd:
1185 fd.write(self.ser.writeJson("{}.tosa".format(testName)))
Eric Kunzee5e26762020-10-13 16:11:07 -07001186
Matthew Haddon74567092021-07-16 15:38:20 +01001187 def resetRNG(self, seed=None):
1188 if seed == None:
1189 seed = self.random_seed + 1
1190 self.rng = np.random.default_rng(seed)
1191
Eric Kunzee5e26762020-10-13 16:11:07 -07001192 def getRandTensor(self, shape, dtype):
Eric Kunzee5e26762020-10-13 16:11:07 -07001193 if dtype == DType.BOOL:
1194 np_dt = np.bool
1195 return np.bool_(self.rng.choice(a=[False, True], size=shape))
Kevin Chenga9017402021-07-28 17:19:23 -07001196 # TOSA specific INT4 weight range from -7 to 7
Eric Kunzee5e26762020-10-13 16:11:07 -07001197 elif dtype == DType.INT4:
Kevin Chenga9017402021-07-28 17:19:23 -07001198 return np.int32(self.rng.integers(low=-7, high=8, size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001199 elif dtype == DType.INT8:
Jeremy Johnson18e26662021-07-22 16:15:29 +01001200 return np.int32(self.rng.integers(low=-128, high=128, size=shape))
1201 elif dtype == DType.UINT8:
1202 return np.int32(self.rng.integers(low=0, high=256, size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001203 elif dtype == DType.INT16:
1204 return np.int32(self.rng.integers(low=-32768, high=32768, size=shape))
1205 elif dtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001206 return np.int32(
1207 self.rng.integers(low=-(1 << 31), high=(1 << 31), size=shape)
1208 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001209 elif dtype == DType.INT48:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001210 return np.int64(
1211 self.rng.integers(low=-(1 << 47), high=(1 << 47), size=shape)
1212 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001213 elif dtype == DType.FLOAT:
Jeremy Johnson18e26662021-07-22 16:15:29 +01001214 return np.float32(self.rng.random(size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001215 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001216 raise Exception("Unrecognized Dtype: {}".format(dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07001217
Kevin Cheng989cb052021-04-28 16:29:44 -07001218 def buildPlaceholderTensors(self, shape_list, dtype_list):
Eric Kunzee5e26762020-10-13 16:11:07 -07001219 placeholders = []
1220
Kevin Cheng989cb052021-04-28 16:29:44 -07001221 assert len(shape_list) == len(dtype_list)
1222
1223 for idx, shape in enumerate(shape_list):
1224 arr = self.getRandTensor(shape, dtype_list[idx])
1225 placeholders.append(self.ser.addPlaceholder(shape, dtype_list[idx], arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001226
1227 return placeholders
1228
Kevin Cheng989cb052021-04-28 16:29:44 -07001229 def buildConstTensors(self, shape_list, dtype_list):
Eric Kunzee5e26762020-10-13 16:11:07 -07001230 consts = []
1231
Kevin Cheng989cb052021-04-28 16:29:44 -07001232 assert len(shape_list) == len(dtype_list)
1233
1234 for idx, shape in enumerate(shape_list):
1235 arr = self.getRandTensor(shape, dtype_list[idx])
1236 consts.append(self.ser.addConst(shape, dtype_list[idx], arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001237
1238 return consts
1239
1240 def makeShape(self, rank):
1241 if self.targetted_shape:
1242 return np.int32(self.targetted_shape)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001243 return np.int32(
1244 self.rng.integers(
1245 low=self.args.tensor_shape_range[0],
1246 high=self.args.tensor_shape_range[1],
1247 size=rank,
1248 )
1249 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001250
1251 def setTargetShape(self, shape):
1252 self.targetted_shape = shape
1253
1254 def randInt(self, low=0, high=256):
1255 return np.int32(self.rng.integers(low=low, high=high, size=1))[0]
1256
1257 def getRandNumberDType(self, dtype):
1258 if dtype == DType.FLOAT:
1259 return self.rng.random()
1260 elif dtype == DType.BOOL:
1261 return self.rng.choice([False, True])
Kevin Chenga9017402021-07-28 17:19:23 -07001262 # TOSA specific INT4 weight range from -7 to 7
Eric Kunzee5e26762020-10-13 16:11:07 -07001263 elif dtype == DType.INT4:
Kevin Chenga9017402021-07-28 17:19:23 -07001264 low, high = (-7, 8)
Eric Kunzee5e26762020-10-13 16:11:07 -07001265 elif dtype == DType.INT8:
Jeremy Johnson18e26662021-07-22 16:15:29 +01001266 low, high = (-128, 128)
Eric Kunzee5e26762020-10-13 16:11:07 -07001267 elif dtype == DType.INT16:
1268 low, high = (-32768, 32768)
1269 elif dtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001270 low, high = (-(1 << 31), (1 << 31))
Eric Kunzee5e26762020-10-13 16:11:07 -07001271 elif dtype == DType.INT48:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001272 low, high = (-(1 << 47), (1 << 47))
Eric Kunzee5e26762020-10-13 16:11:07 -07001273 # Special size
1274 return np.int64(self.rng.integers(low, high, size=1))[0]
1275 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001276 raise Exception("Unknown dtype: {}".format(dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07001277
1278 return np.int32(self.rng.integers(low, high, size=1))[0]
1279
1280 def shapeStr(self, shape):
1281
1282 sStr = []
1283 # Convert to strings
1284 for i in shape:
1285 sStr.append(str(i))
1286
Kevin Cheng550ccc52021-03-03 11:21:43 -08001287 return "x".join(sStr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001288
1289 def typeStr(self, t):
Kevin Cheng989cb052021-04-28 16:29:44 -07001290 if isinstance(t, list):
1291 assert len(t) >= 2
1292 return "{}x{}".format(self.typeStr(t[0]), self.typeStr(t[1]))
Eric Kunzee5e26762020-10-13 16:11:07 -07001293 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07001294 if t == DType.BOOL:
1295 return "b"
1296 elif t == DType.INT4:
1297 return "i4"
1298 elif t == DType.INT8:
1299 return "i8"
1300 elif t == DType.UINT8:
1301 return "u8"
1302 elif t == DType.INT16:
1303 return "i16"
1304 elif t == DType.INT32:
1305 return "i32"
1306 elif t == DType.INT48:
1307 return "i48"
1308 elif t == DType.FLOAT:
1309 return "float"
1310 else:
1311 raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
Eric Kunzee5e26762020-10-13 16:11:07 -07001312
1313 def typeWidth(self, t):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001314 """ Get the datatype width for integer types"""
Kevin Cheng3a478572021-01-22 17:21:02 -08001315 if t == DType.INT4:
Eric Kunzee5e26762020-10-13 16:11:07 -07001316 return 4
1317 elif t == DType.INT8:
1318 return 8
Kevin Cheng3a478572021-01-22 17:21:02 -08001319 elif t == DType.UINT8:
1320 return 8
Eric Kunzee5e26762020-10-13 16:11:07 -07001321 elif t == DType.INT16:
1322 return 16
1323 elif t == DType.INT32:
1324 return 32
1325 elif t == DType.INT48:
1326 return 48
1327 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001328 raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
Eric Kunzee5e26762020-10-13 16:11:07 -07001329
1330 # Argument generators
1331 # Returns a list of tuples (stringDescriptor, [build_fcn_arg_list])
1332 # Where the string descriptor is used to generate the test name and
1333 # The build_fcn_arg_list is expanded and passed to the operator test
1334 # build function
1335
Kevin Cheng550ccc52021-03-03 11:21:43 -08001336 def build_unary(self, op, a, qinfo=None):
Eric Kunzee5e26762020-10-13 16:11:07 -07001337 result_tens = OutputShaper.unaryOp(self.ser, a)
1338 self.ser.addOperator(op, [a.name], [result_tens.name], None, qinfo)
1339 return result_tens
1340
1341 def build_binary_broadcast(self, op, a, b):
1342 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1343 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1344 return result_tens
1345
1346 def build_binary_nonbroadcast(self, op, a, b):
1347 result_tens = OutputShaper.binaryNonBroadcastOp(self.ser, a, b)
1348 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1349 return result_tens
1350
Kevin Chengaee1fac2020-11-11 13:54:06 -08001351 def build_arithmetic_right_shift(self, op, a, b, round):
1352 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1353
1354 attr = ts.TosaSerializerAttribute()
1355 attr.ArithmeticRightShiftAttribute(round)
1356
1357 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
1358 return result_tens
1359
1360 def build_mul(self, op, a, b, shift):
Eric Kunzee5e26762020-10-13 16:11:07 -07001361 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1362
1363 # Special for multiply:
1364 # Force the result to INT32 for INT types
1365 if a.dtype != DType.FLOAT:
1366 result_tens.setDtype(DType.INT32)
1367
Kevin Chengaee1fac2020-11-11 13:54:06 -08001368 attr = ts.TosaSerializerAttribute()
1369 attr.MulAttribute(shift)
1370
1371 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001372 return result_tens
1373
1374 def build_table(self, op, a):
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001375 # Constant size depending on type, random values
1376 if a.dtype == DType.INT16:
Kevin Chengacb550f2021-06-29 15:32:19 -07001377 table_dtype = DType.INT16
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001378 table_arr = self.getRandTensor([513], table_dtype)
1379 else:
1380 assert a.dtype == DType.INT8
1381 table_dtype = DType.INT8
1382 table_arr = self.getRandTensor([256], table_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001383
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001384 table_tens = self.ser.addConst(table_arr.shape, table_dtype, table_arr)
1385 result_tens = OutputShaper.tableOp(self.ser, a, table_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001386 self.ser.addOperator(op, [a.name, table_tens.name], [result_tens.name], None)
1387
1388 return result_tens
1389
1390 def build_select(self, op, cond, a, b):
Eric Kunzee5e26762020-10-13 16:11:07 -07001391 result_tens = OutputShaper.selectOp(self.ser, cond, a, b)
1392 self.ser.addOperator(op, [cond.name, a.name, b.name], [result_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001393 return result_tens
1394
1395 def build_comparison(self, op, a, b):
1396 result_tens = OutputShaper.binaryComparisonOp(self.ser, a, b)
1397 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1398 return result_tens
1399
1400 def build_argmax(self, op, a, axis):
1401 result_tens = OutputShaper.argmaxOp(self.ser, a, axis)
1402
1403 attr = ts.TosaSerializerAttribute()
1404 attr.AxisAttribute(axis)
1405
1406 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1407 return result_tens
1408
Matthew Haddonb724efc2021-08-25 16:40:29 +01001409 def build_pool2d(self, op, input, stride, pad, kernel, qinfo=None):
Eric Kunzee5e26762020-10-13 16:11:07 -07001410 result_tens = OutputShaper.pool2dOp(self.ser, input, kernel, stride, pad)
1411
1412 attr = ts.TosaSerializerAttribute()
Kevin Cheng93a16282021-08-31 16:14:03 -07001413 attr.PoolAttribute(kernel, stride, pad)
Eric Kunzee5e26762020-10-13 16:11:07 -07001414
1415 self.ser.addOperator(op, [input.name], [result_tens.name], attr, qinfo)
1416 return result_tens
1417
1418 def build_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001419 assert len(padding) == 4
1420 result_tens = OutputShaper.conv2dOp(
1421 self.ser, ifm, filter, strides, padding, dilations
1422 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001423
1424 attr = ts.TosaSerializerAttribute()
Kevin Cheng93a16282021-08-31 16:14:03 -07001425 attr.ConvAttribute(padding, strides, dilations)
Eric Kunzee5e26762020-10-13 16:11:07 -07001426
Kevin Cheng550ccc52021-03-03 11:21:43 -08001427 self.ser.addOperator(
1428 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1429 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001430 return result_tens
1431
Kevin Cheng1533b852021-09-01 12:51:58 -07001432 def build_conv3d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo):
1433 assert len(padding) == 6
1434 result_tens = OutputShaper.conv3dOp(
1435 self.ser, ifm, filter, strides, padding, dilations
1436 )
1437
1438 attr = ts.TosaSerializerAttribute()
1439 attr.ConvAttribute(padding, strides, dilations)
1440
1441 self.ser.addOperator(
1442 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1443 )
1444 return result_tens
1445
Kevin Cheng550ccc52021-03-03 11:21:43 -08001446 def build_transpose_conv2d(
Kevin Cheng989cb052021-04-28 16:29:44 -07001447 self, op, ifm, filter, bias, stride, outpad, dilation, output_shape, qinfo
Kevin Cheng550ccc52021-03-03 11:21:43 -08001448 ):
1449 assert len(outpad) == 2
Eric Kunzee5e26762020-10-13 16:11:07 -07001450 result_tens = OutputShaper.transposeConv2DOp(self.ser, ifm, output_shape)
1451
1452 attr = ts.TosaSerializerAttribute()
Kevin Cheng93a16282021-08-31 16:14:03 -07001453 attr.TransposeConvAttribute(outpad, stride, dilation, output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07001454
Kevin Cheng550ccc52021-03-03 11:21:43 -08001455 self.ser.addOperator(
Kevin Cheng989cb052021-04-28 16:29:44 -07001456 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
Kevin Cheng550ccc52021-03-03 11:21:43 -08001457 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001458 return result_tens
1459
Kevin Cheng550ccc52021-03-03 11:21:43 -08001460 def build_depthwise_conv2d(
1461 self, op, ifm, filter, bias, strides, padding, dilations, qinfo
1462 ):
1463 result_tens = OutputShaper.depthwiseConv2dOp(
1464 self.ser, ifm, filter, strides, padding, dilations
1465 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001466
1467 attr = ts.TosaSerializerAttribute()
Kevin Cheng93a16282021-08-31 16:14:03 -07001468 attr.ConvAttribute(padding, strides, dilations)
Eric Kunzee5e26762020-10-13 16:11:07 -07001469
Kevin Cheng550ccc52021-03-03 11:21:43 -08001470 self.ser.addOperator(
1471 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1472 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001473 return result_tens
1474
1475 def build_fully_connected(self, op, ifm, filter, bias, qinfo):
1476 result_tens = OutputShaper.fullyConnectedOp(self.ser, ifm, filter)
1477
Kevin Cheng550ccc52021-03-03 11:21:43 -08001478 self.ser.addOperator(
1479 op, [ifm.name, filter.name, bias.name], [result_tens.name], None, qinfo
1480 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001481 return result_tens
1482
1483 def build_matmul(self, op, a, b, qinfo):
1484 result_tens = OutputShaper.matmulOp(self.ser, a, b)
1485 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], None, qinfo)
1486 return result_tens
1487
1488 def build_reduce(self, op, a, axis):
1489 result_tens = OutputShaper.reduceOp(self.ser, a, axis)
1490
1491 attr = ts.TosaSerializerAttribute()
1492 attr.AxisAttribute(axis)
1493
1494 self.ser.addOperator(op, [a.name], result_tens.name, attr)
1495 return result_tens
1496
1497 def build_clamp(self, op, a):
1498 result_tens = OutputShaper.unaryOp(self.ser, a)
1499
1500 attr = ts.TosaSerializerAttribute()
Jeremy Johnson18e26662021-07-22 16:15:29 +01001501 v = [self.getRandNumberDType(a.dtype), self.getRandNumberDType(a.dtype)]
Eric Kunzee5e26762020-10-13 16:11:07 -07001502
1503 if a.dtype == DType.FLOAT:
1504 attr.ClampAttribute(0, 0, min(v), max(v))
1505 else:
1506 attr.ClampAttribute(min(v), max(v), 0, 0)
1507
1508 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1509 return result_tens
1510
1511 def build_leaky_relu(self, op, a):
1512 result_tens = OutputShaper.unaryOp(self.ser, a)
1513 attr = ts.TosaSerializerAttribute()
1514
1515 attr.LeakyReluAttribute(self.getRandNumberDType(DType.FLOAT))
1516
1517 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1518 return result_tens
1519
1520 # Needs an additional type/input
1521 def build_prelu(self, op, a):
1522 result_tens = OutputShaper.unaryOp(self.ser, a)
1523
1524 self.ser.addOperator(op, [a.name], [result_tens.name])
1525 return result_tens
1526
1527 def build_relun(self, op, a):
1528 result_tens = OutputShaper.unaryOp(self.ser, a)
1529
1530 attr = ts.TosaSerializerAttribute()
1531
1532 if a.dtype == DType.FLOAT:
1533 attr.ReluNAttribute(0, self.getRandNumberDType(a.dtype))
1534 else:
1535 attr.ReluNAttribute(self.getRandNumberDType(a.dtype), 0)
1536
1537 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1538 return result_tens
1539
1540 def build_sigmoid(self, op, a):
1541 result_tens = OutputShaper.unaryOp(self.ser, a)
1542 self.ser.addOperator(op, [a.name], [result_tens.name])
1543 return result_tens
1544
1545 def build_tanh(self, op, a):
1546 result_tens = OutputShaper.unaryOp(self.ser, a)
1547 self.ser.addOperator(op, [a.name], [result_tens.name])
1548 return result_tens
1549
Matthew Haddon818ab902021-07-27 09:12:49 +01001550 def build_concat(self, op, *a):
Kevin Cheng93a16282021-08-31 16:14:03 -07001551 assert type(a[-1]) == int
Matthew Haddon818ab902021-07-27 09:12:49 +01001552
1553 # To store variable length list of input tensors we need to store axis along with it
1554 axis = a[-1]
1555 a = a[:-1]
1556
1557 result_tens = OutputShaper.concatOp(self.ser, axis, *a)
Eric Kunzee5e26762020-10-13 16:11:07 -07001558
1559 attr = ts.TosaSerializerAttribute()
1560 attr.AxisAttribute(axis)
1561
Matthew Haddon818ab902021-07-27 09:12:49 +01001562 input_tensor_names = []
1563 for tensor in a:
1564 input_tensor_names.append(tensor.name)
1565
1566 self.ser.addOperator(op, input_tensor_names, [result_tens.name], attr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001567
1568 def build_pad(self, op, a, padding, qinfo):
1569 result_tens = OutputShaper.padOp(self.ser, a, padding)
1570
1571 # Need to turn the padding array into a TOSA tensor here.
1572 # This is one of the few tensor operands that does not get
1573 # randomly generated
Kevin Cheng550ccc52021-03-03 11:21:43 -08001574 padding_tens = self.ser.addConst(padding.shape, DType.INT32, padding)
Eric Kunzee5e26762020-10-13 16:11:07 -07001575
Kevin Cheng550ccc52021-03-03 11:21:43 -08001576 self.ser.addOperator(
1577 op, [a.name, padding_tens.name], [result_tens.name], None, qinfo
1578 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001579
1580 def build_reshape(self, op, a, newShape):
1581 result_tens = OutputShaper.reshapeOp(self.ser, a, newShape)
1582
1583 attr = ts.TosaSerializerAttribute()
1584 attr.ReshapeAttribute(newShape)
1585
1586 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1587 return result_tens
1588
1589 def build_reverse(self, op, a, axis):
1590 result_tens = OutputShaper.unaryOp(self.ser, a)
1591
1592 attr = ts.TosaSerializerAttribute()
1593 attr.AxisAttribute(axis)
1594
1595 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1596 return result_tens
1597
1598 def build_transpose(self, op, a, perms):
1599 result_tens = OutputShaper.transposeOp(self.ser, a, perms)
1600
Kevin Cheng550ccc52021-03-03 11:21:43 -08001601 perms_tens = self.ser.addConst([len(perms)], DType.INT32, np.int32(perms))
Eric Kunzee5e26762020-10-13 16:11:07 -07001602
1603 self.ser.addOperator(op, [a.name, perms_tens.name], [result_tens.name])
1604 return result_tens
1605
1606 def build_slice(self, op, a, begin, size):
1607 result_tens = OutputShaper.sliceOp(self.ser, a, begin, size)
1608
1609 attr = ts.TosaSerializerAttribute()
1610 attr.SliceAttribute(begin, size)
1611
1612 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1613 return result_tens
1614
1615 def build_tile(self, op, a, multiples):
1616 result_tens = OutputShaper.tileOp(self.ser, a, multiples)
1617
1618 attr = ts.TosaSerializerAttribute()
1619 attr.TileAttribute(multiples)
1620
1621 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1622 return result_tens
1623
Kevin Cheng77d0f762020-11-24 10:26:32 -08001624 def build_gather(self, op, values):
Eric Kunzee5e26762020-10-13 16:11:07 -07001625
1626 # Create a new indicies tensor
1627 # here with data that doesn't exceed the dimensions of the values tensor
1628
Kevin Cheng550ccc52021-03-03 11:21:43 -08001629 K = values.shape[1] # K
1630 W = self.randInt(
1631 self.args.tensor_shape_range[0], self.args.tensor_shape_range[1]
1632 ) # W
1633 indicies_arr = np.int32(
1634 self.rng.integers(low=0, high=K, size=[values.shape[0], W])
1635 ) # (N, W)
1636 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001637
Kevin Cheng77d0f762020-11-24 10:26:32 -08001638 result_tens = OutputShaper.gatherOp(self.ser, values, indicies)
Eric Kunzee5e26762020-10-13 16:11:07 -07001639
Kevin Cheng77d0f762020-11-24 10:26:32 -08001640 self.ser.addOperator(op, [values.name, indicies.name], [result_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001641
1642 return result_tens
1643
Kevin Cheng77d0f762020-11-24 10:26:32 -08001644 def build_scatter(self, op, values_in, input):
1645
1646 # Create a new indicies tensor
1647 # here with data that doesn't exceed the dimensions of the values_in tensor
1648
Kevin Cheng550ccc52021-03-03 11:21:43 -08001649 K = values_in.shape[1] # K
1650 W = input.shape[1] # W
1651 indicies_arr = np.int32(
1652 self.rng.integers(low=0, high=K, size=[values_in.shape[0], W])
1653 ) # (N, W)
1654 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
Kevin Cheng77d0f762020-11-24 10:26:32 -08001655
1656 result_tens = OutputShaper.scatterOp(self.ser, values_in, indicies, input)
1657
Kevin Cheng550ccc52021-03-03 11:21:43 -08001658 self.ser.addOperator(
1659 op, [values_in.name, indicies.name, input.name], [result_tens.name]
1660 )
Kevin Cheng77d0f762020-11-24 10:26:32 -08001661
1662 return result_tens
1663
Kevin Cheng550ccc52021-03-03 11:21:43 -08001664 def build_resize(
1665 self,
1666 op,
1667 input,
1668 mode,
1669 stride,
1670 offset,
1671 shift,
1672 stride_fp,
1673 offset_fp,
1674 output_dims,
1675 input_dtype,
1676 output_dtype,
1677 ):
1678 result_tens = OutputShaper.resizeOp(
1679 self.ser,
1680 input,
1681 mode,
1682 stride,
1683 offset,
1684 shift,
1685 stride_fp,
1686 offset_fp,
1687 output_dims,
1688 input_dtype,
1689 output_dtype,
1690 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001691
1692 attr = ts.TosaSerializerAttribute()
Kevin Cheng77d0f762020-11-24 10:26:32 -08001693
Kevin Cheng550ccc52021-03-03 11:21:43 -08001694 attr.ResizeAttribute(
1695 output_dims, stride, offset, shift, stride_fp, offset_fp, mode
1696 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001697
1698 self.ser.addOperator(op, [input.name], [result_tens.name], attr)
1699 return result_tens
1700
1701 def build_identityn(self, op, val, val2):
1702
Kevin Cheng550ccc52021-03-03 11:21:43 -08001703 result_tens = OutputShaper.unaryOp(self.ser, val)
Eric Kunzee5e26762020-10-13 16:11:07 -07001704 result_tens2 = OutputShaper.unaryOp(self.ser, val2)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001705 self.ser.addOperator(
1706 op, [val.name, val2.name], [result_tens.name, result_tens2.name]
1707 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001708 return result_tens
1709
1710 def build_placeholder(self, op, val):
1711 # Add an identity op to avoid warning in the reference model
1712 return self.build_unary(Op.IDENTITY, val)
1713
1714 # Type Conversion
1715 def build_cast(self, op, val, out_dtype):
1716 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1717 self.ser.addOperator(op, [val.name], [result_tens.name])
1718 return result_tens
1719
1720 def build_rescale(self, op, val, out_dtype, scale32, double_round, per_channel):
1721 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1722
1723 if per_channel:
1724 nc = val.shape[-1]
1725 else:
1726 nc = 1
1727
1728 in_type_width = self.typeWidth(val.dtype)
1729 out_type_width = self.typeWidth(out_dtype)
1730
Kevin Cheng3a478572021-01-22 17:21:02 -08001731 if val.dtype == DType.INT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001732 input_zp = self.randInt(-128, 128)
1733 in_type_width = in_type_width + 1
Kevin Chengacb550f2021-06-29 15:32:19 -07001734 elif val.dtype == DType.UINT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001735 input_zp = self.randInt(0, 256)
Eric Kunzee5e26762020-10-13 16:11:07 -07001736 in_type_width = in_type_width + 1
1737 else:
1738 input_zp = 0
1739
Kevin Cheng3a478572021-01-22 17:21:02 -08001740 if out_dtype == DType.INT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001741 output_zp = self.randInt(-128, 128)
1742 out_type_width = out_type_width + 1
1743 elif out_dtype == DType.UINT8:
1744 output_zp = self.randInt(0, 256)
Eric Kunzee5e26762020-10-13 16:11:07 -07001745 out_type_width = out_type_width + 1
1746 else:
1747 output_zp = 0
1748
1749 # Calculate scale based on:
1750 # scale = a *(2^output_width)/(2^input_width))
1751
1752 a = np.float32(self.rng.random(size=[nc]))
1753 scale_arr = a * np.float32((1 << out_type_width) / (1 << in_type_width))
1754
1755 if scale32:
1756 pass
Matthew Haddonb724efc2021-08-25 16:40:29 +01001757 # Cap the scaling at 2^31 - 1 for scale32
Eric Kunzee5e26762020-10-13 16:11:07 -07001758 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), (1 << 31) - 1)
1759 else:
1760 # Cap the scaling at 2^15 - 1 for scale16
1761 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), 32767.0)
1762
Kevin Cheng550ccc52021-03-03 11:21:43 -08001763 # print('{} {} -> {}'.format(out_type_width, in_type_width, scale_arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001764
1765 multiplier_arr = np.int32(np.zeros(shape=[nc]))
1766 shift_arr = np.int32(np.zeros(shape=[nc]))
1767
1768 for i in range(nc):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001769 multiplier_arr[i], shift_arr[i] = TosaQuantGen.computeMultiplierAndShift(
1770 scale_arr[i], scale32
1771 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001772
Kevin Cheng550ccc52021-03-03 11:21:43 -08001773 # print('multiplier {} shift {} inzp {} outzp {}'.format(multiplier_arr, shift_arr, input_zp, output_zp))
Eric Kunzee5e26762020-10-13 16:11:07 -07001774
1775 attr = ts.TosaSerializerAttribute()
Kevin Cheng550ccc52021-03-03 11:21:43 -08001776 attr.RescaleAttribute(
1777 input_zp,
1778 output_zp,
1779 multiplier_arr,
1780 shift_arr,
1781 scale32,
1782 double_round,
1783 per_channel,
1784 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001785
1786 self.ser.addOperator(op, [val.name], [result_tens.name], attr)
1787 return result_tens
1788
1789 def build_cond_if_const(self, op, then_tens, else_tens, cond):
1790 # For cond_if with constants, we're supplied with then/else tensors that we ignore
1791 # (except for the generated shap) and the condition. Build Then/Else blocks
1792 # and fill them with const nodes for the body.
1793
1794 # Condition tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001795 cond_tens = self.ser.addConst([], DType.BOOL, [cond])
Eric Kunzee5e26762020-10-13 16:11:07 -07001796
1797 # Make then/else tensors
1798 out_shape = then_tens.shape
Jeremy Johnson18e26662021-07-22 16:15:29 +01001799 then_arr = np.int32(self.rng.integers(0, 256, size=out_shape))
1800 else_arr = np.int32(self.rng.integers(0, 256, size=out_shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001801
1802 # And the result tensor based on any of the outputs
Kevin Cheng550ccc52021-03-03 11:21:43 -08001803 result_tens = self.ser.addOutput(out_shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07001804
1805 # Create the attribute with the names of the then/else blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001806 then_block = "THEN_BLOCK"
1807 else_block = "ELSE_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001808 attr = ts.TosaSerializerAttribute()
1809 attr.CondIfAttribute(then_block, else_block)
1810
1811 # Finally, build the op and the two blocks
1812 self.ser.addOperator(op, [cond_tens.name], [result_tens.name], attr)
1813
1814 self.ser.startBasicBlock(then_block)
1815 # Build the actual then/else tensors inside their blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001816 then_tens = self.ser.addConst(out_shape, DType.INT32, then_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001817 self.ser.addOutputTensor(then_tens)
1818
1819 self.ser.startBasicBlock(else_block)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001820 else_tens = self.ser.addConst(out_shape, DType.INT32, else_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001821 self.ser.addOutputTensor(else_tens)
1822
1823 return result_tens
1824
1825 def build_cond_if_binary(self, op, a, b, cond):
1826 # For cond_if with a binary op in the then/else blocks, take a and b and
1827 # alternately add or subtract them based on the condition
1828
1829 # Condition tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001830 cond_tens = self.ser.addConst([], DType.BOOL, [cond])
Eric Kunzee5e26762020-10-13 16:11:07 -07001831
Kevin Cheng550ccc52021-03-03 11:21:43 -08001832 result_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001833 self.ser.currBasicBlock.addOutput(result_tens.name)
1834
1835 # Create the attribute with the names of the then/else blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001836 then_block = "THEN_BLOCK"
1837 else_block = "ELSE_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001838 attr = ts.TosaSerializerAttribute()
1839 attr.CondIfAttribute(then_block, else_block)
1840
1841 # Finally, build the op and the two blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001842 self.ser.addOperator(
1843 op, [cond_tens.name, a.name, b.name], [result_tens.name], attr
1844 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001845
1846 self.ser.startBasicBlock(then_block)
1847 self.ser.addInputTensor(a)
1848 self.ser.addInputTensor(b)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001849 then_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001850 self.ser.addOperator(Op.ADD, [a.name, b.name], [then_tens.name])
1851
1852 self.ser.startBasicBlock(else_block)
1853 self.ser.addInputTensor(a)
1854 self.ser.addInputTensor(b)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001855 else_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001856 self.ser.addOperator(Op.SUB, [a.name, b.name], [else_tens.name])
1857
1858 return result_tens
1859
1860 def build_while_loop(self, op, a, iter_val):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001861 iter = self.ser.addPlaceholder([], DType.INT32, [np.int32(iter_val)])
Eric Kunzee5e26762020-10-13 16:11:07 -07001862
Kevin Cheng550ccc52021-03-03 11:21:43 -08001863 cond_block = "COND_BLOCK"
1864 body_block = "BODY_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001865
1866 attr = ts.TosaSerializerAttribute()
1867 attr.WhileLoopAttribute(cond_block, body_block)
1868
1869 # Accumulator tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001870 # acc = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001871 acc_init_val = np.int32(np.zeros(a.shape))
Kevin Cheng550ccc52021-03-03 11:21:43 -08001872 acc = self.ser.addPlaceholder(a.shape, a.dtype, acc_init_val)
Eric Kunzee5e26762020-10-13 16:11:07 -07001873
1874 # Intermediate/output tensors for everything going through the loop
Kevin Cheng550ccc52021-03-03 11:21:43 -08001875 iter_out = self.ser.addIntermediate(iter.shape, iter.dtype)
1876 a_out = self.ser.addIntermediate(a.shape, a.dtype)
1877 acc_out = self.ser.addIntermediate(acc.shape, acc.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001878
1879 # While_loop operator
Kevin Cheng550ccc52021-03-03 11:21:43 -08001880 self.ser.addOperator(
1881 op,
1882 [iter.name, a.name, acc.name],
1883 [iter_out.name, a_out.name, acc_out.name],
1884 attr,
1885 )
Kevin Chengb227ae52021-09-02 13:43:17 -07001886 self.ser.addOutputTensor(acc_out)
Eric Kunzee5e26762020-10-13 16:11:07 -07001887
1888 # COND block (input: iter, output: cond_tens )
1889 self.ser.startBasicBlock(cond_block)
1890 self.ser.addInputTensor(iter)
1891 self.ser.addInputTensor(a)
1892 self.ser.addInputTensor(acc)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001893 zero_tens = self.ser.addConst([], DType.INT32, [np.int32(0)])
1894 cond_tens = self.ser.addOutput([], DType.BOOL)
1895 self.ser.addOperator(Op.GREATER, [iter.name, zero_tens.name], [cond_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001896
1897 # BODY block (input: a, acc, iter, output: a, acc, iter)
1898 # Note that local intermediate tensors need to be declared here for the outputs
1899 self.ser.startBasicBlock(body_block)
1900 self.ser.addInputTensor(iter)
1901 self.ser.addInputTensor(a)
1902 self.ser.addInputTensor(acc)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001903 one_tens = self.ser.addConst([], DType.INT32, [np.int32(1)])
1904 iter_body_out = self.ser.addIntermediate(iter.shape, iter.dtype)
1905 acc_body_out = self.ser.addIntermediate(acc.shape, acc.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001906 self.ser.addOperator(Op.ADD, [a.name, acc.name], [acc_body_out.name])
1907 self.ser.addOperator(Op.SUB, [iter.name, one_tens.name], [iter_body_out.name])
1908 self.ser.addOutputTensor(iter_body_out)
1909 self.ser.addOutputTensor(a)
1910 self.ser.addOutputTensor(acc_body_out)
1911
1912 return acc_out
1913
Kevin Cheng550ccc52021-03-03 11:21:43 -08001914 def genOpTestList(
Matthew Haddon74567092021-07-16 15:38:20 +01001915 self, opName, shapeFilter=[None], rankFilter=None, dtypeFilter=None, testType='positive'
Kevin Cheng550ccc52021-03-03 11:21:43 -08001916 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07001917
1918 try:
1919 op = self.TOSA_OP_LIST[opName]
1920 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001921 raise Exception("Cannot find op with name {}".format(opName))
Eric Kunzee5e26762020-10-13 16:11:07 -07001922
1923 # Initialize a new random number generator
1924 self.rng = np.random.default_rng(self.random_seed)
1925
Kevin Cheng550ccc52021-03-03 11:21:43 -08001926 build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001927
1928 # Generate the lists of arguments
Kevin Cheng550ccc52021-03-03 11:21:43 -08001929 rmin, rmax = op["rank"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001930
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01001931 # Create a default testing rank range, 1-4 inclusive to keep test sizes reasonably small.
1932 default_test_rank_range = range(1, 5)
1933
Eric Kunzee5e26762020-10-13 16:11:07 -07001934 # Test list consists of a tuple of:
1935 # (opName, testNameStr, dtype, shapeList, argumentsList)
1936 testList = []
1937
1938 if not shapeFilter:
1939 shapeFilter = [None]
1940
Matthew Haddon74567092021-07-16 15:38:20 +01001941 # Positive test loop
1942 if testType in ['positive', 'both']:
1943 for r in range(rmin, rmax + 1):
Eric Kunzee5e26762020-10-13 16:11:07 -07001944
Matthew Haddon74567092021-07-16 15:38:20 +01001945 # Filter out the rank?
1946 if rankFilter is not None and r not in rankFilter:
1947 continue
Kevin Cheng1533b852021-09-01 12:51:58 -07001948 if opName.startswith("conv3d"):
1949 assert r == 5, "conv3d test must have input rank == 5"
1950 elif (
Matthew Haddon74567092021-07-16 15:38:20 +01001951 rankFilter is None
1952 and shapeFilter[0] is None
1953 and r not in default_test_rank_range
1954 ):
1955 continue
Eric Kunzee5e26762020-10-13 16:11:07 -07001956
Matthew Haddon74567092021-07-16 15:38:20 +01001957 for t in op["types"]:
Eric Kunzee5e26762020-10-13 16:11:07 -07001958
Matthew Haddon74567092021-07-16 15:38:20 +01001959 # Filter tests based on dtype?
1960 if dtypeFilter is not None:
1961 if not (
1962 t in dtypeFilter
1963 or (isinstance(t, list) and t[0] in dtypeFilter)
1964 ):
1965 continue
Eric Kunzee5e26762020-10-13 16:11:07 -07001966
Matthew Haddon74567092021-07-16 15:38:20 +01001967 # Create the placeholder and const tensors
1968 for shape in shapeFilter:
1969 # A None shape chooses a random shape of a given rank
Eric Kunzee5e26762020-10-13 16:11:07 -07001970
Matthew Haddon74567092021-07-16 15:38:20 +01001971 # Filter out by rank
1972 if shape is not None and len(shape) != r:
1973 continue
Eric Kunzee5e26762020-10-13 16:11:07 -07001974
Matthew Haddon74567092021-07-16 15:38:20 +01001975 self.setTargetShape(shape)
1976 shapeList = tgen_fcn(self, op, r)
Eric Kunzee5e26762020-10-13 16:11:07 -07001977
Matthew Haddon74567092021-07-16 15:38:20 +01001978 shapeStr = self.shapeStr(shapeList[0])
1979 typeStr = self.typeStr(t)
Eric Kunzee5e26762020-10-13 16:11:07 -07001980
Matthew Haddon74567092021-07-16 15:38:20 +01001981 # Argument lists consists of tuples of the (str, []) string representation and the build function argument list
1982 argList = []
1983 if agen_fcn:
1984 argList = agen_fcn(self, opName, shapeList, t)
Eric Kunzee5e26762020-10-13 16:11:07 -07001985 else:
Matthew Haddon74567092021-07-16 15:38:20 +01001986 argList = [("", [])]
Eric Kunzee5e26762020-10-13 16:11:07 -07001987
Matthew Haddon74567092021-07-16 15:38:20 +01001988 for argStr, args in argList:
1989 if argStr:
1990 testStr = "{}_{}_{}_{}".format(
1991 opName, shapeStr, typeStr, argStr
1992 )
1993 else:
1994 testStr = "{}_{}_{}".format(opName, shapeStr, typeStr)
1995
1996 testList.append((opName, testStr, t, shapeList, args))
1997
Matthew Haddonb724efc2021-08-25 16:40:29 +01001998 # Remove tests which are expected to fail but don't correlate to a ERROR_IF statement
1999 if "invalid_test_validators" in op:
2000 invalid_test_validators = op["invalid_test_validators"]
2001 clean_testList = []
2002 for test in testList:
2003 for validator_fcn in invalid_test_validators:
2004 remove_test = False
2005 if validator_fcn(opName=test[0], input_dtype=test[2], shapeList=test[3], args=test[4]):
2006 remove_test = True
2007 if not remove_test:
2008 clean_testList.append(test)
2009 testList = clean_testList
2010
Matthew Haddon74567092021-07-16 15:38:20 +01002011 # Reset RNG so both positive and negative tests are reproducible
2012 self.resetRNG()
2013 # Negative test loop
2014 if testType in ['negative', 'both']:
2015 print("Negative tests unsupported")
Eric Kunzee5e26762020-10-13 16:11:07 -07002016
2017 return testList
2018
Kevin Cheng989cb052021-04-28 16:29:44 -07002019 def serializeTest(self, opName, testStr, dtype_or_dtypeList, shapeList, testArgs):
Eric Kunzee5e26762020-10-13 16:11:07 -07002020 try:
2021 op = self.TOSA_OP_LIST[opName]
2022 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002023 raise Exception("Cannot find op with name {}".format(opName))
Eric Kunzee5e26762020-10-13 16:11:07 -07002024
2025 # Create a serializer
2026 self.createSerializer(opName, testStr)
2027
Kevin Cheng550ccc52021-03-03 11:21:43 -08002028 build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
2029 pCount, cCount = op["operands"]
Kevin Cheng989cb052021-04-28 16:29:44 -07002030 num_operands = pCount + cCount
2031
2032 if isinstance(dtype_or_dtypeList, list):
2033 dtypeList = dtype_or_dtypeList
Kevin Cheng93a16282021-08-31 16:14:03 -07002034 elif op["op"] == Op.CONCAT:
Matthew Haddon818ab902021-07-27 09:12:49 +01002035 dtypeList = [dtype_or_dtypeList] * len(shapeList)
Kevin Cheng989cb052021-04-28 16:29:44 -07002036 else:
2037 dtypeList = [dtype_or_dtypeList] * (num_operands)
2038
Kevin Cheng93a16282021-08-31 16:14:03 -07002039 if op["op"] != Op.CONCAT:
Matthew Haddon818ab902021-07-27 09:12:49 +01002040 assert (
2041 len(shapeList) == num_operands
2042 ), "shapeList length {} must match number of operands {}".format(
2043 len(shapeList), num_operands
2044 )
2045 assert (
2046 len(dtypeList) == num_operands
2047 ), "dtypeList length {} must match number of operands {}".format(
2048 len(dtypeList), num_operands
2049 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002050
2051 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002052 qgen = op["qgen"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002053 except KeyError:
2054 qgen = None
2055
2056 # Build the random tensor operands and the test
2057 tens = []
Kevin Chengaee1fac2020-11-11 13:54:06 -08002058
Jeremy Johnsonef509a42021-09-07 13:59:47 +01002059 if (op["op"] == Op.ADD or op["op"] == Op.SUB) and dtypeList[0] == DType.INT32:
2060 # Make sure the operation does not cause value saturation - where
2061 # the number wraps due to limited number of bits to store the answer
2062 assert (
2063 pCount == 2 and cCount == 0
2064 ), "Op.ADD / Op.SUB must have 2 placeholders, 0 consts"
2065
2066 placeholders = []
2067 add = (op["op"] == Op.ADD)
2068 a_arr = self.getRandTensor(shapeList[0], dtypeList[0])
2069 b_arr = self.getRandTensor(shapeList[1], dtypeList[1])
2070 if add:
2071 res_arr = np.add(a_arr, b_arr, dtype=np.int64)
2072 else:
2073 res_arr = np.subtract(a_arr, b_arr, dtype=np.int64)
2074
2075 # Work out the saturation limits
2076 max_i32 = (1 << 31)-1
2077 min_i32 = -(1 << 31)
2078 max_arr = np.full(shapeList[1], max_i32)
2079 min_arr = np.full(shapeList[1], min_i32)
2080
2081 # Find how much values exceed the maximum/minimums
2082 sat_max_arr = np.maximum(res_arr - max_arr, 0)
2083 sat_min_arr = np.minimum(res_arr - min_arr, 0)
2084
2085 if not add:
2086 # Swap saturation values and negate values as we need to perform opposite operations
2087 sat_max_arr, sat_min_arr = -sat_min_arr, -sat_max_arr
2088
2089 # Create new array of unsaturated values by clipping values as needed
2090 b_unsat_arr = b_arr
2091 if (sat_max_arr != 0).any():
2092 # Clip values that cause saturation
2093 b_unsat_arr = np.subtract(b_unsat_arr, sat_max_arr, dtype=np.int32)
2094 # Reduce axes in unsaturated tensor to match original tensor
2095 for axis, dim in enumerate(b_arr.shape):
2096 if dim != b_unsat_arr.shape[axis]:
2097 assert ( dim == 1 ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable"
2098 b_unsat_arr = np.amin(b_unsat_arr, axis=axis, keepdims=True)
2099
2100 if (sat_min_arr != 0).any():
2101 # Clip values that cause saturation
2102 b_unsat_arr = np.subtract(b_unsat_arr, sat_min_arr, dtype=np.int32)
2103 # Reduce axes in unsaturated tensor to match original tensor
2104 for axis, dim in enumerate(b_arr.shape):
2105 if dim != b_unsat_arr.shape[axis]:
2106 assert ( dim == 1 ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable"
2107 b_unsat_arr = np.amax(b_unsat_arr, axis=axis, keepdims=True)
2108
2109 placeholders.append(
2110 self.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
2111 )
2112 placeholders.append(
2113 self.ser.addPlaceholder(shapeList[1], dtypeList[1], b_unsat_arr)
2114 )
2115
2116 tens.extend(placeholders)
2117 elif op["op"] == Op.ARITHMETIC_RIGHT_SHIFT:
2118 # Force value of operand[1] to be within [0, num_bits]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002119 assert (
2120 pCount == 2 and cCount == 0
2121 ), "Op.ArithmeticRightShift must have 2 placeholders, 0 consts"
Kevin Chengaee1fac2020-11-11 13:54:06 -08002122
2123 placeholders = []
2124 for idx, shape in enumerate(shapeList[:]):
2125 if idx == 1:
Kevin Cheng989cb052021-04-28 16:29:44 -07002126 if dtypeList[idx] == DType.INT8:
Kevin Chengaee1fac2020-11-11 13:54:06 -08002127 arr = np.int32(self.rng.integers(low=0, high=8, size=shape))
Kevin Cheng989cb052021-04-28 16:29:44 -07002128 elif dtypeList[idx] == DType.INT16:
Kevin Chengaee1fac2020-11-11 13:54:06 -08002129 arr = np.int32(self.rng.integers(low=0, high=16, size=shape))
Kevin Cheng989cb052021-04-28 16:29:44 -07002130 elif dtypeList[idx] == DType.INT32:
Kevin Chengaee1fac2020-11-11 13:54:06 -08002131 arr = np.int32(self.rng.integers(low=0, high=32, size=shape))
2132 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002133 raise Exception("OpArithmeticRightShift: invalid input dtype")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002134 else:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002135 arr = self.getRandTensor(shape, dtypeList[idx])
Kevin Cheng989cb052021-04-28 16:29:44 -07002136 placeholders.append(self.ser.addPlaceholder(shape, dtypeList[idx], arr))
Kevin Chengaee1fac2020-11-11 13:54:06 -08002137
2138 tens.extend(placeholders)
Matthew Haddona44ac5e2021-07-27 16:31:16 +01002139 elif op["op"] == Op.SELECT:
2140 # Set datatype of condition tensor to boolean
2141 dtypeList[0] = DType.BOOL
2142 tens.extend(
2143 self.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
2144 )
2145 tens.extend(self.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
Matthew Haddon459443c2021-08-23 16:43:13 +01002146 elif op["op"] == Op.INTDIV:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002147 assert (
2148 pCount == 2 and cCount == 0
Matthew Haddon459443c2021-08-23 16:43:13 +01002149 ), "Op.INTDIV must have 2 placeholders, 0 consts"
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002150
2151 placeholders = []
2152
Matthew Haddon459443c2021-08-23 16:43:13 +01002153 # Two invalid cases for Op.INTDIV:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002154 # 1. divisor == 0
Kevin Cheng47315e12021-05-13 17:41:28 -07002155 # 2. dividend == -(1<<31) and divisor == -1
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002156 while True:
2157 dividend_arr = self.getRandTensor(shapeList[0], dtypeList[0])
2158 divisor_arr = self.getRandTensor(shapeList[1], dtypeList[1])
2159
2160 if (divisor_arr == 0).any():
2161 continue
2162
Kevin Cheng47315e12021-05-13 17:41:28 -07002163 if (dividend_arr == -(2 ** 31)).any() and (divisor_arr == -1).any():
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002164 continue
2165
2166 break
2167
2168 placeholders.append(
2169 self.ser.addPlaceholder(shapeList[0], dtypeList[0], dividend_arr)
2170 )
2171 placeholders.append(
2172 self.ser.addPlaceholder(shapeList[1], dtypeList[1], divisor_arr)
2173 )
2174
2175 tens.extend(placeholders)
2176 elif op["op"] == Op.MUL:
2177 assert (
2178 pCount == 2 and cCount == 0
2179 ), "Op.MUL must have 2 placeholders, 0 consts"
2180
2181 if dtypeList[0] == DType.FLOAT:
2182 tens.extend(self.buildPlaceholderTensors(shapeList[:], dtypeList[:]))
2183 else:
2184 placeholders = []
2185
2186 # Make sure multiply result in int32 range
2187 shift = testArgs[0]
2188 if dtypeList[0] == DType.INT8:
2189 num_bits = 8
2190 elif dtypeList[0] == DType.INT16:
2191 num_bits = 16
2192 elif dtypeList[0] == DType.INT32:
2193 num_bits = 32
2194 else:
2195 raise Exception("OpMul: invalid input dtype")
2196
2197 for idx, shape in enumerate(shapeList[:]):
2198 low = -(2 ** (num_bits - 1))
2199 high = (2 ** (num_bits - 1)) - 1
2200
2201 a_arr = np.int32(
2202 self.rng.integers(low=low, high=high, size=shapeList[0])
2203 )
2204 b_arr = np.int32(
2205 self.rng.integers(low=low, high=high, size=shapeList[1])
2206 )
2207
2208 i = 0
2209 while True:
2210
2211 a_arr_64 = a_arr.astype(np.int64)
2212 b_arr_64 = b_arr.astype(np.int64)
2213
2214 if shift > 0:
2215 rounding = 1 << (shift - 1)
2216 result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift
2217 else:
2218 result_arr = a_arr_64 * b_arr_64
2219
2220 if (result_arr > -(2 ** 31)).all() and (
2221 result_arr <= ((2 ** 31) - 1)
2222 ).all():
2223 break
2224
2225 i = i + 1
2226 a_arr = a_arr // 2
2227 b_arr = b_arr // 2
2228
2229 placeholders.append(
2230 self.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
2231 )
2232 placeholders.append(
2233 self.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
2234 )
2235
2236 tens.extend(placeholders)
Matthew Haddon818ab902021-07-27 09:12:49 +01002237 elif op["op"] == Op.CONCAT:
2238 count = len(shapeList) - self.args.num_const_inputs_concat
2239 if count < 1:
2240 count = 1
2241 if self.args.num_const_inputs_concat == 0:
2242 count = len(shapeList)
2243
2244 shapeList = TosaTensorGen.tgConcatConstInput(self, shapeList, testArgs[0])
2245 tens.extend(
2246 self.buildPlaceholderTensors(shapeList[0:count], dtypeList[0:count])
2247 )
2248 tens.extend(self.buildConstTensors(shapeList[count:], dtypeList[count:]))
Kevin Chengaee1fac2020-11-11 13:54:06 -08002249 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07002250 tens.extend(
2251 self.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
2252 )
2253 tens.extend(self.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
Eric Kunzee5e26762020-10-13 16:11:07 -07002254
2255 if qgen is not None:
Les Bell30e46802021-07-23 09:43:31 +01002256 qinfo = qgen(self, op, dtype_or_dtypeList)
Eric Kunzee5e26762020-10-13 16:11:07 -07002257 else:
2258 qinfo = None
2259
2260 try:
2261 if qinfo is not None:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002262 resultName = build_fcn(self, op["op"], *tens, *testArgs, qinfo)
Eric Kunzee5e26762020-10-13 16:11:07 -07002263 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002264 resultName = build_fcn(self, op["op"], *tens, *testArgs)
Eric Kunzee5e26762020-10-13 16:11:07 -07002265 except TypeError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002266 print(
2267 "build_fcn: {}\nTensors: {}\nArgs: {}\n".format(
2268 build_fcn, tens, testArgs
2269 )
2270 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002271 raise e
2272
2273 # Save the serialized test
Kevin Cheng550ccc52021-03-03 11:21:43 -08002274 self.serialize("test")
Eric Kunzee5e26762020-10-13 16:11:07 -07002275
2276 def createDynamicOpLists(self):
2277
2278 # Dynamically create op lists for convolutions with a list of kernel sizes
Kevin Cheng1533b852021-09-01 12:51:58 -07002279 KERNELS_2D = [[1, 1], [2, 2], [3, 3], [5, 5], [3, 1], [1, 3]]
Eric Kunzee5e26762020-10-13 16:11:07 -07002280
Kevin Cheng1533b852021-09-01 12:51:58 -07002281 for k in KERNELS_2D:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002282 testName = "conv2d_{}x{}".format(k[0], k[1])
2283 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST["conv2d_TEMPLATE"].copy()
2284 self.TOSA_OP_LIST[testName]["filter"] = k
2285 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07002286
Kevin Cheng550ccc52021-03-03 11:21:43 -08002287 testName = "depthwise_conv2d_{}x{}".format(k[0], k[1])
2288 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
2289 "depthwise_conv2d_TEMPLATE"
2290 ].copy()
2291 self.TOSA_OP_LIST[testName]["filter"] = k
2292 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07002293
Kevin Cheng550ccc52021-03-03 11:21:43 -08002294 testName = "transpose_conv2d_{}x{}".format(k[0], k[1])
2295 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
2296 "transpose_conv2d_TEMPLATE"
2297 ].copy()
2298 self.TOSA_OP_LIST[testName]["filter"] = k
2299 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07002300
Kevin Cheng1533b852021-09-01 12:51:58 -07002301 KERNELS_3D = [[1, 1, 1], [2, 1, 1], [1, 2, 1], [1, 1, 2]]
2302 for k in KERNELS_3D:
2303 testName = "conv3d_{}x{}x{}".format(k[0], k[1], k[2])
2304 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST["conv3d_TEMPLATE"].copy()
2305 self.TOSA_OP_LIST[testName]["filter"] = k
2306 self.TOSA_OP_LIST[testName]["template"] = False
2307
Eric Kunzee5e26762020-10-13 16:11:07 -07002308 # Delete any templates after having created any dynamic ops
2309 # This is a two-pass operation because it's bad practice to delete
2310 # keys from dictionaries while iterating
2311 keyList = []
2312 for k in self.TOSA_OP_LIST:
2313 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002314 if self.TOSA_OP_LIST[k]["template"] == True:
Eric Kunzee5e26762020-10-13 16:11:07 -07002315 keyList.append(k)
2316 continue
2317 except KeyError:
2318 pass
2319
2320 for k in keyList:
2321 del self.TOSA_OP_LIST[k]
2322
2323 def initOpListDefaults(self):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002324 """Fill in default fields for ops if they aren't already specified.
2325 Look for missing required fields (datastructure linting)."""
Eric Kunzee5e26762020-10-13 16:11:07 -07002326 for op in self.TOSA_OP_LIST:
2327
2328 # Required fields
2329 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002330 pl, c = self.TOSA_OP_LIST[op]["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002331 except (KeyError, ValueError, TypeError):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002332 raise Exception(
2333 "Op {} is missing a valid operand tuple in TOSA_OP_LIST".format(op)
2334 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002335
2336 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002337 fcn, tgen, arggen = self.TOSA_OP_LIST[op]["build_fcn"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002338 except (KeyError, ValueError, TypeError):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002339 raise Exception(
2340 "Op {} is missing a valid build_fcn tuple in TOSA_OP_LIST".format(
2341 op
2342 )
2343 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002344
2345 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002346 types = self.TOSA_OP_LIST[op]["types"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002347 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002348 raise Exception(
2349 "Op {} is missing a valid type list in TOSA_OP_LIST".format(op)
2350 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002351
2352 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002353 opcode = self.TOSA_OP_LIST[op]["op"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002354 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002355 raise Exception(
2356 "Op {} is missing the Op field in TOSA_OP_LIST".format(op)
2357 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002358
2359 # Put in default rank range, if missing
2360 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002361 rank = self.TOSA_OP_LIST[op]["rank"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002362 except KeyError:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002363 self.TOSA_OP_LIST[op]["rank"] = self.DEFAULT_RANK_RANGE
Eric Kunzee5e26762020-10-13 16:11:07 -07002364
2365 # Tensor operator list
2366 # 'op': op name
2367 # 'operands': tuple of (placeholder, const) operands
Kevin Cheng3a478572021-01-22 17:21:02 -08002368 # 'rank': optional, restricts rank to tuple inclusive of (min, max),
2369 # if not specified, defaults to (1, 4)
Eric Kunzee5e26762020-10-13 16:11:07 -07002370 # 'build_fcn': tuple of the function to (build_operator(), TensorGen function, ArgGen enum)
2371 # 'types': array of datatypes to be tested
Kevin Cheng550ccc52021-03-03 11:21:43 -08002372 TYPE_FP = [DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -07002373
Kevin Cheng550ccc52021-03-03 11:21:43 -08002374 TYPE_INT = [DType.INT8, DType.INT16, DType.INT32] # Excludes INT4
2375 TYPE_INT_FP = [DType.INT8, DType.INT16, DType.INT32, DType.FLOAT] # Excludes INT4
Eric Kunzee5e26762020-10-13 16:11:07 -07002376
Kevin Cheng550ccc52021-03-03 11:21:43 -08002377 TYPE_BOOL = [DType.BOOL]
2378 TYPE_FI32 = [DType.FLOAT, DType.INT32]
2379 TYPE_FIB = [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL]
2380 TYPE_FI16 = [DType.FLOAT, DType.INT16]
Eric Kunzee5e26762020-10-13 16:11:07 -07002381
Kevin Cheng550ccc52021-03-03 11:21:43 -08002382 TYPE_NARROW_INT_FP = [DType.INT8, DType.INT16, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -07002383
Kevin Cheng1533b852021-09-01 12:51:58 -07002384 TYPE_CONV = [
Kevin Chenga9017402021-07-28 17:19:23 -07002385 [DType.INT8, DType.INT4, DType.INT32],
Kevin Cheng989cb052021-04-28 16:29:44 -07002386 [DType.INT8, DType.INT8, DType.INT32],
2387 [DType.INT16, DType.INT8, DType.INT48],
2388 DType.FLOAT,
2389 ]
2390
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01002391 DEFAULT_RANK_RANGE = (1, TOSA_TENSOR_MAX_RANK)
Eric Kunzee5e26762020-10-13 16:11:07 -07002392
2393 TOSA_OP_LIST = {
Jared Smolens573ecd42021-03-04 15:24:10 -08002394 # Tensor operators
Kevin Cheng550ccc52021-03-03 11:21:43 -08002395 "argmax": {
2396 "op": Op.ARGMAX,
2397 "operands": (1, 0),
2398 "build_fcn": (build_argmax, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2399 "types": TYPE_NARROW_INT_FP,
2400 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002401 "avg_pool2d": {
2402 "op": Op.AVG_POOL2D,
2403 "operands": (1, 0),
2404 "rank": (4, 4),
2405 "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
2406 "qgen": TosaQuantGen.qgUnary,
2407 "types": TYPE_NARROW_INT_FP,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002408 "invalid_test_validators": (TosaInvalidValidator.ivHeightWidthSmallerZero,)
Jared Smolens573ecd42021-03-04 15:24:10 -08002409 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002410 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002411 "conv2d_TEMPLATE": {
2412 "op": Op.CONV2D,
2413 "operands": (1, 2),
2414 "rank": (4, 4),
2415 "build_fcn": (build_conv2d, TosaTensorGen.tgConv2D, TosaArgGen.agConv2D),
2416 "qgen": TosaQuantGen.qgConv,
Kevin Cheng1533b852021-09-01 12:51:58 -07002417 "types": TYPE_CONV,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002418 "invalid_test_validators": (TosaInvalidValidator.ivHeightWidthSmallerZero,),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002419 "template": True,
2420 },
Kevin Cheng1533b852021-09-01 12:51:58 -07002421 # Templated operator. Filled in by createDynamicOpLists
2422 "conv3d_TEMPLATE": {
2423 "op": Op.CONV3D,
2424 "operands": (1, 2),
2425 "rank": (5, 5),
2426 "build_fcn": (build_conv3d, TosaTensorGen.tgConv3D, TosaArgGen.agConv3D),
2427 "qgen": TosaQuantGen.qgConv,
2428 "types": TYPE_CONV,
2429 "template": True,
2430 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002431 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002432 "depthwise_conv2d_TEMPLATE": {
2433 "op": Op.DEPTHWISE_CONV2D,
2434 "operands": (1, 2),
2435 "filter": [1, 1],
2436 "rank": (4, 4),
2437 "build_fcn": (
2438 build_depthwise_conv2d,
2439 TosaTensorGen.tgDepthwiseConv2D,
2440 TosaArgGen.agConv2D,
2441 ),
2442 "qgen": TosaQuantGen.qgConv,
Kevin Cheng1533b852021-09-01 12:51:58 -07002443 "types": TYPE_CONV,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002444 "invalid_test_validators": (TosaInvalidValidator.ivHeightWidthSmallerZero,),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002445 "template": True,
2446 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002447 "fully_connected": {
2448 "op": Op.FULLY_CONNECTED,
2449 "operands": (1, 2),
2450 "rank": (2, 2),
2451 "build_fcn": (build_fully_connected, TosaTensorGen.tgFullyConnected, None),
2452 "qgen": TosaQuantGen.qgConv,
Kevin Cheng1533b852021-09-01 12:51:58 -07002453 "types": TYPE_CONV,
Jared Smolens573ecd42021-03-04 15:24:10 -08002454 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002455 "matmul": {
2456 "op": Op.MATMUL,
2457 "operands": (2, 0),
Kevin Cheng2d60f002021-06-09 14:18:32 -07002458 "rank": (3, 3),
Jared Smolens573ecd42021-03-04 15:24:10 -08002459 "build_fcn": (build_matmul, TosaTensorGen.tgMatmul, None),
2460 "qgen": TosaQuantGen.qgMatmul,
2461 "types": TYPE_NARROW_INT_FP,
2462 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002463 "max_pool2d": {
2464 "op": Op.MAX_POOL2D,
2465 "operands": (1, 0),
2466 "rank": (4, 4),
2467 "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
2468 "types": TYPE_NARROW_INT_FP,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002469 "invalid_test_validators": (TosaInvalidValidator.ivHeightWidthSmallerZero,)
Jared Smolens573ecd42021-03-04 15:24:10 -08002470 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002471 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002472 "transpose_conv2d_TEMPLATE": {
2473 "op": Op.TRANSPOSE_CONV2D,
Kevin Cheng989cb052021-04-28 16:29:44 -07002474 "operands": (1, 2),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002475 "rank": (4, 4),
2476 "build_fcn": (
2477 build_transpose_conv2d,
2478 TosaTensorGen.tgTransposeConv2D,
2479 TosaArgGen.agTransposeConv2D,
2480 ),
2481 "qgen": TosaQuantGen.qgConv,
Kevin Cheng1533b852021-09-01 12:51:58 -07002482 "types": TYPE_CONV,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002483 "invalid_test_validators": (TosaInvalidValidator.ivNonPositiveOutputShape,),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002484 "template": True,
2485 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002486 # Activation functions
Kevin Cheng550ccc52021-03-03 11:21:43 -08002487 "clamp": {
2488 "op": Op.CLAMP,
2489 "operands": (1, 0),
2490 "build_fcn": (build_clamp, TosaTensorGen.tgBasic, None),
2491 "types": TYPE_NARROW_INT_FP,
2492 },
2493 "relun": {
2494 "op": Op.RELUN,
2495 "operands": (1, 0),
2496 "build_fcn": (build_relun, TosaTensorGen.tgBasic, None),
2497 "types": TYPE_FI32,
2498 },
2499 "sigmoid": {
2500 "op": Op.SIGMOID,
2501 "operands": (1, 0),
2502 "build_fcn": (build_sigmoid, TosaTensorGen.tgBasic, None),
2503 "types": TYPE_FP,
2504 },
2505 "tanh": {
2506 "op": Op.TANH,
2507 "operands": (1, 0),
2508 "build_fcn": (build_tanh, TosaTensorGen.tgBasic, None),
2509 "types": TYPE_FP,
2510 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002511 # Elementwise Binary Operators
2512 "add": {
2513 "op": Op.ADD,
2514 "operands": (2, 0),
2515 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2516 "types": TYPE_FI32,
2517 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002518 "arithmetic_right_shift": {
2519 "op": Op.ARITHMETIC_RIGHT_SHIFT,
2520 "operands": (2, 0),
2521 "build_fcn": (
2522 build_arithmetic_right_shift,
2523 TosaTensorGen.tgBroadcastFuzz,
2524 TosaArgGen.agArithmeticRightShift,
2525 ),
2526 "types": TYPE_INT,
2527 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002528 "bitwise_and": {
2529 "op": Op.BITWISE_AND,
2530 "operands": (2, 0),
2531 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2532 "types": TYPE_INT,
2533 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002534 "bitwise_or": {
2535 "op": Op.BITWISE_OR,
2536 "operands": (2, 0),
2537 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2538 "types": TYPE_INT,
2539 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002540 "bitwise_xor": {
2541 "op": Op.BITWISE_XOR,
2542 "operands": (2, 0),
2543 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2544 "types": TYPE_INT,
2545 },
Matthew Haddon459443c2021-08-23 16:43:13 +01002546 "intdiv": {
2547 "op": Op.INTDIV,
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002548 "operands": (2, 0),
2549 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2550 "types": [DType.INT32],
2551 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002552 "logical_and": {
2553 "op": Op.LOGICAL_AND,
2554 "operands": (2, 0),
2555 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2556 "types": TYPE_BOOL,
2557 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002558 "logical_left_shift": {
2559 "op": Op.LOGICAL_LEFT_SHIFT,
2560 "operands": (2, 0),
2561 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2562 "types": TYPE_INT,
2563 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002564 "logical_right_shift": {
2565 "op": Op.LOGICAL_RIGHT_SHIFT,
2566 "operands": (2, 0),
2567 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2568 "types": TYPE_INT,
2569 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002570 "logical_or": {
2571 "op": Op.LOGICAL_OR,
2572 "operands": (2, 0),
2573 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2574 "types": TYPE_BOOL,
2575 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002576 "logical_xor": {
2577 "op": Op.LOGICAL_XOR,
2578 "operands": (2, 0),
2579 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2580 "types": TYPE_BOOL,
2581 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002582 "maximum": {
2583 "op": Op.MAXIMUM,
2584 "operands": (2, 0),
2585 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2586 "types": TYPE_FI32,
2587 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002588 "minimum": {
2589 "op": Op.MINIMUM,
2590 "operands": (2, 0),
2591 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2592 "types": TYPE_FI32,
2593 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002594 "mul": {
2595 "op": Op.MUL,
2596 "operands": (2, 0),
2597 "build_fcn": (build_mul, TosaTensorGen.tgBroadcastFuzz, TosaArgGen.agMul),
2598 "types": TYPE_INT_FP,
2599 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002600 "pow": {
2601 "op": Op.POW,
2602 "operands": (2, 0),
2603 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBasic, None),
2604 "types": TYPE_FP,
2605 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002606 "sub": {
2607 "op": Op.SUB,
2608 "operands": (2, 0),
2609 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2610 "types": TYPE_FI32,
2611 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002612 "table": {
2613 "op": Op.TABLE,
2614 # Use the automatic generation functions to create the input array
2615 # but create the table tensor in the build function, as it may be
2616 # a different type from the input
2617 "operands": (1, 0),
2618 "build_fcn": (build_table, TosaTensorGen.tgBasic, None),
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01002619 "types": [DType.INT8, DType.INT16],
Jared Smolens573ecd42021-03-04 15:24:10 -08002620 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002621 # Elementwise Unary operators
2622 "abs": {
2623 "op": Op.ABS,
2624 "operands": (1, 0),
2625 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2626 "types": TYPE_FI32,
2627 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002628 "bitwise_not": {
2629 "op": Op.BITWISE_NOT,
2630 "operands": (1, 0),
2631 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2632 "types": TYPE_INT,
2633 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002634 "ceil": {
2635 "op": Op.CEIL,
2636 "operands": (1, 0),
2637 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2638 "types": TYPE_FP,
2639 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002640 "clz": {
2641 "op": Op.CLZ,
2642 "operands": (1, 0),
2643 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2644 "types": [DType.INT32],
2645 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002646 "exp": {
2647 "op": Op.EXP,
2648 "operands": (1, 0),
2649 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2650 "types": TYPE_FP,
2651 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002652 "floor": {
2653 "op": Op.FLOOR,
2654 "operands": (1, 0),
2655 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2656 "types": TYPE_FP,
2657 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002658 "log": {
2659 "op": Op.LOG,
2660 "operands": (1, 0),
2661 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2662 "types": TYPE_FP,
2663 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002664 "logical_not": {
2665 "op": Op.LOGICAL_NOT,
2666 "operands": (1, 0),
2667 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2668 "types": TYPE_BOOL,
2669 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002670 "negate": {
2671 "op": Op.NEGATE,
2672 "operands": (1, 0),
2673 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2674 "qgen": TosaQuantGen.qgUnary,
2675 "types": TYPE_INT_FP,
2676 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002677 "reciprocal": {
2678 "op": Op.RECIPROCAL,
2679 "operands": (1, 0),
2680 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2681 "types": TYPE_FP,
2682 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002683 "rsqrt": {
2684 "op": Op.RSQRT,
2685 "operands": (1, 0),
2686 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2687 "types": TYPE_FP,
2688 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002689 # Elementwise Ternary operators
2690 "select": {
2691 "op": Op.SELECT,
2692 "operands": (3, 0),
2693 "build_fcn": (build_select, TosaTensorGen.tgBroadcastFuzz, None),
2694 "types": TYPE_FIB,
2695 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002696 # Comparison operators
2697 "equal": {
2698 "op": Op.EQUAL,
2699 "operands": (2, 0),
2700 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2701 "types": TYPE_FI32,
2702 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002703 "greater_equal": {
2704 "op": Op.GREATER_EQUAL,
2705 "operands": (2, 0),
2706 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2707 "types": TYPE_FI32,
2708 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002709 "greater": {
2710 "op": Op.GREATER,
2711 "operands": (2, 0),
2712 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2713 "types": TYPE_FI32,
2714 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002715 # Reduction operators
2716 "reduce_all": {
2717 "op": Op.REDUCE_ALL,
2718 "operands": (1, 0),
2719 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2720 "types": TYPE_BOOL,
2721 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002722 "reduce_any": {
2723 "op": Op.REDUCE_ANY,
2724 "operands": (1, 0),
2725 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2726 "types": TYPE_BOOL,
2727 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002728 "reduce_max": {
2729 "op": Op.REDUCE_MAX,
2730 "operands": (1, 0),
2731 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2732 "types": TYPE_INT_FP,
2733 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002734 "reduce_min": {
2735 "op": Op.REDUCE_MAX,
2736 "operands": (1, 0),
2737 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2738 "types": TYPE_INT_FP,
2739 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002740 "reduce_product": {
2741 "op": Op.REDUCE_PRODUCT,
2742 "operands": (1, 0),
2743 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2744 "types": TYPE_FP,
2745 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002746 "reduce_sum": {
2747 "op": Op.REDUCE_SUM,
2748 "operands": (1, 0),
2749 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2750 "types": TYPE_FI32,
2751 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002752 # Data layout operators
Kevin Cheng550ccc52021-03-03 11:21:43 -08002753 "concat": {
2754 "op": Op.CONCAT,
2755 "operands": (2, 0),
Matthew Haddon818ab902021-07-27 09:12:49 +01002756 "build_fcn": (build_concat, TosaTensorGen.tgConcat, TosaArgGen.agAxis),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002757 "types": TYPE_FIB,
2758 },
2759 "pad": {
2760 "op": Op.PAD,
2761 "operands": (1, 0),
2762 "build_fcn": (build_pad, TosaTensorGen.tgBasic, TosaArgGen.agPad),
2763 "qgen": TosaQuantGen.qgPad,
2764 "types": TYPE_FIB,
2765 },
2766 "reshape": {
2767 "op": Op.RESHAPE,
2768 "operands": (1, 0),
2769 "build_fcn": (build_reshape, TosaTensorGen.tgBasic, TosaArgGen.agReshape),
2770 "types": TYPE_FIB,
2771 },
2772 "reverse": {
2773 "op": Op.REVERSE,
2774 "operands": (1, 0),
2775 "build_fcn": (build_reverse, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2776 "types": TYPE_FIB,
2777 },
2778 "slice": {
2779 "op": Op.SLICE,
2780 "operands": (1, 0),
2781 "build_fcn": (build_slice, TosaTensorGen.tgBasic, TosaArgGen.agSlice),
2782 "types": TYPE_FIB,
2783 },
2784 "tile": {
2785 "op": Op.TILE,
2786 "operands": (1, 0),
2787 "build_fcn": (build_tile, TosaTensorGen.tgBasic, TosaArgGen.agTile),
2788 "types": TYPE_FIB,
2789 },
2790 "transpose": {
2791 "op": Op.TRANSPOSE,
2792 "operands": (1, 0),
Jeremy Johnsona6185572021-06-21 15:55:35 +01002793 "rank": (1, 4),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002794 "build_fcn": (
2795 build_transpose,
2796 TosaTensorGen.tgBasic,
2797 TosaArgGen.agTranspose,
2798 ),
2799 "types": TYPE_FIB,
2800 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002801 # Data nodes
2802 "const": {
2803 "op": Op.CONST,
2804 "operands": (1, 0),
2805 "build_fcn": (build_placeholder, TosaTensorGen.tgBasic, None),
2806 "types": TYPE_FIB,
2807 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002808 "identity": {
2809 "op": Op.IDENTITY,
2810 "operands": (1, 0),
2811 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2812 "types": TYPE_FIB,
2813 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002814 # Scatter/Gather
Kevin Cheng550ccc52021-03-03 11:21:43 -08002815 "gather": {
2816 "op": Op.GATHER,
2817 # Only specify 'values' tensor here. 'indices' is generated in op building stage
2818 "operands": (1, 0),
2819 "rank": (3, 3),
2820 "build_fcn": (build_gather, TosaTensorGen.tgBasic, None),
2821 "types": TYPE_INT_FP,
2822 },
2823 "scatter": {
2824 "op": Op.SCATTER,
2825 # Only specify 'values_in' tensor here.
2826 #'indices' and 'input' are generated in op building stage
2827 "operands": (2, 0),
2828 "rank": (3, 3),
2829 "build_fcn": (build_scatter, TosaTensorGen.tgScatter, None),
2830 "types": TYPE_INT_FP,
2831 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002832 # Image operations
Kevin Cheng550ccc52021-03-03 11:21:43 -08002833 "resize": {
2834 "op": Op.RESIZE,
2835 "operands": (1, 0),
2836 "rank": (4, 4),
2837 "build_fcn": (build_resize, TosaTensorGen.tgNHWC, TosaArgGen.agResize),
2838 "types": [DType.INT8, DType.INT16, DType.FLOAT],
Matthew Haddonb724efc2021-08-25 16:40:29 +01002839 "invalid_test_validators": (TosaInvalidValidator.ivWrongDataTypeOrModeResize, TosaInvalidValidator.ivBadStride)
Kevin Cheng550ccc52021-03-03 11:21:43 -08002840 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002841 # Type conversion
Kevin Cheng550ccc52021-03-03 11:21:43 -08002842 "cast": {
2843 "op": Op.CAST,
2844 "operands": (1, 0),
2845 "build_fcn": (build_cast, TosaTensorGen.tgBasic, TosaArgGen.agCast),
2846 "types": [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL],
2847 },
2848 "rescale": {
2849 "op": Op.RESCALE,
2850 "operands": (1, 0),
2851 "build_fcn": (build_rescale, TosaTensorGen.tgBasic, TosaArgGen.agRescale),
Matthew Haddoncac4ee92021-07-22 14:30:53 +01002852 "types": [DType.UINT8, DType.INT8, DType.INT16, DType.INT32, DType.INT48],
Kevin Cheng550ccc52021-03-03 11:21:43 -08002853 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002854 # Custom
2855 # Not implemented.
Jared Smolens573ecd42021-03-04 15:24:10 -08002856 # Control flow operators
Eric Kunzee5e26762020-10-13 16:11:07 -07002857 # Two varients of cond_if, one that generates one of two constant tensors (no
2858 # inputs to the basic blocks, one output) and another that either adds or subtracts two tensors
2859 # (two inputs to the basic blocks, one output)
Kevin Cheng550ccc52021-03-03 11:21:43 -08002860 "cond_if_const": {
2861 "op": Op.COND_IF,
2862 "operands": (0, 2),
2863 "build_fcn": (
2864 build_cond_if_const,
2865 TosaTensorGen.tgBasic,
2866 TosaArgGen.agCondIf,
2867 ),
2868 "types": [DType.BOOL],
2869 },
2870 "cond_if_binary": {
2871 "op": Op.COND_IF,
2872 "operands": (2, 0),
2873 "build_fcn": (
2874 build_cond_if_binary,
2875 TosaTensorGen.tgBasic,
2876 TosaArgGen.agCondIf,
2877 ),
2878 "types": TYPE_FI32,
2879 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002880 # while_loop
Kevin Cheng550ccc52021-03-03 11:21:43 -08002881 "while_loop": {
2882 "op": Op.WHILE_LOOP,
2883 "operands": (0, 1),
2884 "build_fcn": (
2885 build_while_loop,
2886 TosaTensorGen.tgBasic,
2887 TosaArgGen.agWhileLoop,
2888 ),
2889 "types": [DType.INT32],
2890 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002891 }
2892
Kevin Cheng550ccc52021-03-03 11:21:43 -08002893
Eric Kunzee5e26762020-10-13 16:11:07 -07002894class OutputShaper:
2895 # Methods in this class compute the expected output shape and datatype
2896 # for common classes of operations
2897 def __init__(self):
2898 pass
2899
2900 # These methods return arguments that can be used for
2901 # creating a new output tensor
2902 @staticmethod
2903 def binaryBroadcastOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002904 assert len(a.shape) == len(b.shape)
2905 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002906
2907 shape = []
2908 for i in range(len(a.shape)):
2909 if a.shape[i] == 1:
2910 shape.append(b.shape[i])
2911 else:
2912 shape.append(a.shape[i])
2913
Kevin Cheng550ccc52021-03-03 11:21:43 -08002914 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002915
2916 @staticmethod
2917 def binaryNonBroadcastOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002918 assert len(a.shape) == len(b.shape)
2919 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002920
2921 shape = []
2922 for i in range(len(a.shape)):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002923 assert a.shape[i] == b.shape[i]
Eric Kunzee5e26762020-10-13 16:11:07 -07002924 shape.append(a.shape[i])
2925
Kevin Cheng550ccc52021-03-03 11:21:43 -08002926 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002927
2928 @staticmethod
2929 def unaryOp(ser, a):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002930 return ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002931
2932 @staticmethod
2933 def selectOp(ser, cond, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002934 assert len(a.shape) == len(b.shape) and len(a.shape) == len(cond.shape)
2935 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002936
2937 shape = []
2938 for i in range(len(a.shape)):
2939 shape.append(max(cond.shape[i], a.shape[i], b.shape[i]))
2940
Kevin Cheng550ccc52021-03-03 11:21:43 -08002941 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002942
2943 @staticmethod
2944 def binaryComparisonOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002945 assert len(a.shape) == len(b.shape)
2946 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002947
2948 # Do broadcast
2949 shape = []
2950 for i in range(len(a.shape)):
2951 if a.shape[i] == 1:
2952 shape.append(b.shape[i])
2953 else:
2954 shape.append(a.shape[i])
2955
2956 # Force the output type to bool
Kevin Cheng550ccc52021-03-03 11:21:43 -08002957 return ser.addOutput(shape, DType.BOOL)
Eric Kunzee5e26762020-10-13 16:11:07 -07002958
2959 @staticmethod
2960 def reduceOp(ser, a, axis):
2961
2962 shape = a.shape.copy()
2963
2964 shape[axis] = 1
2965
Kevin Cheng550ccc52021-03-03 11:21:43 -08002966 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002967
2968 @staticmethod
2969 def argmaxOp(ser, a, axis):
2970 shape = a.shape.copy()
2971 del shape[axis]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002972 return ser.addOutput(shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07002973
2974 @staticmethod
2975 def conv2dOp(ser, ifm, filter, strides, padding, dilations):
2976
2977 # IFM: NHWC
2978 # Filter: OHWI
2979 # OFM: NHWC
2980
2981 if len(padding) == 2:
2982 # Expand padding to 4 parameters in the case of transpose_conv2d
2983 # From H,W to T,B,L,R
2984 padding = [padding[0], padding[0], padding[1], padding[1]]
2985
Kevin Cheng550ccc52021-03-03 11:21:43 -08002986 h = (
2987 ifm.shape[1]
2988 - filter.shape[1]
2989 - (filter.shape[1] - 1) * (dilations[0] - 1)
2990 + padding[0]
2991 + padding[1]
2992 ) // strides[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002993
Kevin Cheng550ccc52021-03-03 11:21:43 -08002994 w = (
2995 ifm.shape[2]
2996 - filter.shape[2]
2997 - (filter.shape[2] - 1) * (dilations[1] - 1)
2998 + padding[2]
2999 + padding[3]
3000 ) // strides[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07003001
Eric Kunzee5e26762020-10-13 16:11:07 -07003002 ofm_shape = [ifm.shape[0], h, w, filter.shape[0]]
3003
Kevin Cheng3a478572021-01-22 17:21:02 -08003004 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07003005 out_dtype = DType.INT32
3006 elif ifm.dtype == DType.INT16:
3007 out_dtype = DType.INT48
3008 elif ifm.dtype == DType.FLOAT:
3009 out_dtype = DType.FLOAT
3010 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08003011 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07003012
Kevin Cheng550ccc52021-03-03 11:21:43 -08003013 return ser.addOutput(ofm_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003014
3015 @staticmethod
Kevin Cheng1533b852021-09-01 12:51:58 -07003016 def conv3dOp(ser, ifm, filter, strides, padding, dilations):
3017
3018 # IFM: NDHWC
3019 # Filter: ODHWI
3020 # OFM: NDHWC
3021
3022 d = (
3023 ifm.shape[1]
3024 - filter.shape[1]
3025 - (filter.shape[1] - 1) * (dilations[0] - 1)
3026 + padding[0]
3027 + padding[1]
3028 ) // strides[0] + 1
3029
3030 h = (
3031 ifm.shape[2]
3032 - filter.shape[2]
3033 - (filter.shape[2] - 1) * (dilations[1] - 1)
3034 + padding[2]
3035 + padding[3]
3036 ) // strides[1] + 1
3037
3038 w = (
3039 ifm.shape[3]
3040 - filter.shape[3]
3041 - (filter.shape[3] - 1) * (dilations[2] - 1)
3042 + padding[4]
3043 + padding[5]
3044 ) // strides[2] + 1
3045
3046 ofm_shape = [ifm.shape[0], d, h, w, filter.shape[0]]
3047
3048 if ifm.dtype == DType.INT8:
3049 out_dtype = DType.INT32
3050 elif ifm.dtype == DType.INT16:
3051 out_dtype = DType.INT48
3052 elif ifm.dtype == DType.FLOAT:
3053 out_dtype = DType.FLOAT
3054 else:
3055 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
3056
3057 return ser.addOutput(ofm_shape, out_dtype)
3058
3059 @staticmethod
Eric Kunzee5e26762020-10-13 16:11:07 -07003060 def depthwiseConv2dOp(ser, ifm, filter, strides, padding, dilations):
3061 # IFM: NHWC
3062 # Filter: HWCM
3063 # OFM: NHW C*M
Kevin Cheng550ccc52021-03-03 11:21:43 -08003064 h = (
3065 ifm.shape[1]
3066 - filter.shape[0]
3067 - (filter.shape[0] - 1) * (dilations[0] - 1)
3068 + padding[0]
3069 + padding[1]
3070 ) // strides[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07003071
Kevin Cheng550ccc52021-03-03 11:21:43 -08003072 w = (
3073 ifm.shape[2]
3074 - filter.shape[1]
3075 - (filter.shape[1] - 1) * (dilations[1] - 1)
3076 + padding[2]
3077 + padding[3]
3078 ) // strides[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07003079
Eric Kunzee5e26762020-10-13 16:11:07 -07003080 ofm_shape = [ifm.shape[0], h, w, filter.shape[2] * filter.shape[3]]
3081
Kevin Cheng3a478572021-01-22 17:21:02 -08003082 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07003083 out_dtype = DType.INT32
3084 elif ifm.dtype == DType.INT16:
3085 out_dtype = DType.INT48
3086 elif ifm.dtype == DType.FLOAT:
3087 out_dtype = DType.FLOAT
3088 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08003089 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07003090
Kevin Cheng550ccc52021-03-03 11:21:43 -08003091 return ser.addOutput(ofm_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003092
3093 @staticmethod
3094 def pool2dOp(ser, ifm, kernel, stride, pad):
3095 # input: NHWC
3096 h = (ifm.shape[1] + pad[0] + pad[1] + stride[0] - kernel[0]) // stride[0]
3097 w = (ifm.shape[2] + pad[2] + pad[3] + stride[1] - kernel[1]) // stride[1]
3098
Eric Kunzee5e26762020-10-13 16:11:07 -07003099 ofm_shape = [ifm.shape[0], h, w, ifm.shape[3]]
Kevin Cheng550ccc52021-03-03 11:21:43 -08003100 return ser.addOutput(ofm_shape, ifm.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003101
3102 @staticmethod
3103 def fullyConnectedOp(ser, input, filter):
3104 # input: N, IC
3105 # filter: OC, IC
3106 # output: N, OC
3107
3108 output_shape = [input.shape[0], filter.shape[0]]
3109
Kevin Cheng3a478572021-01-22 17:21:02 -08003110 if input.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07003111 out_dtype = DType.INT32
3112 elif input.dtype == DType.INT16:
3113 out_dtype = DType.INT48
3114 elif input.dtype == DType.FLOAT:
3115 out_dtype = DType.FLOAT
3116 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08003117 raise Exception("Unsupported input dtype: {}".format(input.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07003118
Kevin Cheng550ccc52021-03-03 11:21:43 -08003119 return ser.addOutput(output_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003120
3121 @staticmethod
3122 def matmulOp(ser, a, b):
Kevin Cheng2d60f002021-06-09 14:18:32 -07003123 # a: N, H, C
3124 # b: N, C, W
3125 # out: N, H, W
Eric Kunzee5e26762020-10-13 16:11:07 -07003126
Kevin Cheng2d60f002021-06-09 14:18:32 -07003127 output_shape = [a.shape[0], a.shape[1], b.shape[2]]
Eric Kunzee5e26762020-10-13 16:11:07 -07003128
Kevin Cheng3a478572021-01-22 17:21:02 -08003129 if a.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07003130 out_dtype = DType.INT32
3131 elif a.dtype == DType.INT16:
3132 out_dtype = DType.INT48
3133 elif a.dtype == DType.FLOAT:
3134 out_dtype = DType.FLOAT
3135 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08003136 raise Exception("UNsupported input dtype for matmul: {}".format(a.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07003137
Kevin Cheng550ccc52021-03-03 11:21:43 -08003138 return ser.addOutput(output_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003139
3140 @staticmethod
Matthew Haddon818ab902021-07-27 09:12:49 +01003141 def concatOp(ser, axis, *a):
3142 input1 = a[0]
3143 remaining_inputs = a[1:]
Eric Kunzee5e26762020-10-13 16:11:07 -07003144
Matthew Haddon818ab902021-07-27 09:12:49 +01003145 output_shape = input1.shape.copy()
Eric Kunzee5e26762020-10-13 16:11:07 -07003146
Matthew Haddon818ab902021-07-27 09:12:49 +01003147 output_shape[axis] = input1.shape[axis]
3148
3149 for tensor in remaining_inputs:
3150 output_shape[axis] += tensor.shape[axis]
3151
3152 return ser.addOutput(output_shape, input1.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003153
3154 @staticmethod
3155 def padOp(ser, a, padding):
3156
3157 output_shape = a.shape.copy()
3158
3159 for i in range(len(output_shape)):
3160 output_shape[i] = padding[i][0] + padding[i][1] + output_shape[i]
3161
Kevin Cheng550ccc52021-03-03 11:21:43 -08003162 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003163
3164 @staticmethod
3165 def reshapeOp(ser, a, shape):
3166 output_shape = shape.copy()
3167
3168 totalElements = 1
3169 for i in a.shape:
3170 totalElements *= i
3171
3172 # If there are any -1 elements, figure out what that dimension must be
3173 totalOutputElements = 1
3174 for i in output_shape:
3175 if i != -1:
3176 totalOutputElements *= i
3177
3178 # And fill it in
3179 for i in range(len(output_shape)):
3180 if output_shape[i] == -1:
3181 output_shape[i] = totalElements // totalOutputElements
3182
Kevin Cheng550ccc52021-03-03 11:21:43 -08003183 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003184
3185 @staticmethod
3186 def sliceOp(ser, a, begin, size):
3187
3188 output_shape = size.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08003189 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003190
3191 @staticmethod
3192 def tileOp(ser, a, multiples):
3193
3194 output_shape = a.shape.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08003195 assert len(multiples) == len(output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07003196
3197 for i in range(len(output_shape)):
3198 output_shape[i] = a.shape[i] * multiples[i]
3199
Kevin Cheng550ccc52021-03-03 11:21:43 -08003200 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003201
3202 @staticmethod
3203 def transposeOp(ser, a, perms):
3204 output_shape = a.shape.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08003205 assert len(perms) == len(output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07003206
3207 for i in range(len(output_shape)):
3208 output_shape[i] = a.shape[perms[i]]
3209
Kevin Cheng550ccc52021-03-03 11:21:43 -08003210 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003211
3212 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -08003213 def gatherOp(ser, values, indices):
3214 assert len(values.shape) == 3
3215 assert len(indices.shape) == 2
3216 assert values.shape[0] == indices.shape[0]
Eric Kunzee5e26762020-10-13 16:11:07 -07003217
Kevin Cheng77d0f762020-11-24 10:26:32 -08003218 output_shape = [values.shape[0], indices.shape[1], values.shape[2]]
3219
Kevin Cheng550ccc52021-03-03 11:21:43 -08003220 return ser.addOutput(output_shape, values.dtype)
Kevin Cheng77d0f762020-11-24 10:26:32 -08003221
3222 @staticmethod
3223 def scatterOp(ser, values_in, indices, input):
3224 assert len(values_in.shape) == 3
3225 assert len(indices.shape) == 2
3226 assert len(input.shape) == 3
Kevin Cheng550ccc52021-03-03 11:21:43 -08003227 assert values_in.shape[0] == indices.shape[0] # N
3228 assert input.shape[1] == indices.shape[1] # W
3229 assert values_in.shape[2] == input.shape[2] # C
Kevin Cheng77d0f762020-11-24 10:26:32 -08003230
3231 output_shape = values_in.shape
3232
Kevin Cheng550ccc52021-03-03 11:21:43 -08003233 return ser.addOutput(output_shape, values_in.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003234
3235 @staticmethod
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01003236 def tableOp(ser, input, table_dtype):
3237 # Same shape as the input, but dtype dependent on table dtype
3238 assert table_dtype == DType.INT16 or table_dtype == DType.INT8
3239 output_dtype = DType.INT32 if table_dtype == DType.INT16 else DType.INT8
3240 return ser.addOutput(input.shape, output_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003241
3242 @staticmethod
Kevin Cheng550ccc52021-03-03 11:21:43 -08003243 def resizeOp(
3244 ser,
3245 input,
3246 mode,
3247 stride,
3248 offset,
3249 shift,
3250 stride_fp,
3251 offset_fp,
3252 output_dims,
3253 input_dtype,
3254 output_dtype,
3255 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07003256
3257 output_dims = [input.shape[0], output_dims[0], output_dims[1], input.shape[3]]
3258
Kevin Cheng550ccc52021-03-03 11:21:43 -08003259 return ser.addOutput(output_dims, output_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003260
3261 @staticmethod
3262 def typeConversionOp(ser, val, out_dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -08003263 return ser.addOutput(val.shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003264
3265 @staticmethod
3266 def transposeConv2DOp(ser, ifm, output_shape):
Kevin Cheng3a478572021-01-22 17:21:02 -08003267 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07003268 out_dtype = DType.INT32
3269 elif ifm.dtype == DType.INT16:
3270 out_dtype = DType.INT48
3271 elif ifm.dtype == DType.FLOAT:
3272 out_dtype = DType.FLOAT
3273 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08003274 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07003275
Kevin Cheng550ccc52021-03-03 11:21:43 -08003276 return ser.addOutput(output_shape, out_dtype)