blob: 777c05979c6b6657786cbdf465f23fb09618cbdb [file] [log] [blame]
Eric Kunzee5e26762020-10-13 16:11:07 -07001#!/usr/bin/env python3
2
Kevin Cheng3a478572021-01-22 17:21:02 -08003# Copyright (c) 2020-2021, ARM Limited.
Eric Kunzee5e26762020-10-13 16:11:07 -07004#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17
18import numpy as np
19import argparse
20import sys
21import re
22import os
23import subprocess
24import shlex
25import json
26import glob
27import math
28import queue
29import threading
30import traceback
31import math
Jeremy Johnsona6185572021-06-21 15:55:35 +010032import itertools
Eric Kunzee5e26762020-10-13 16:11:07 -070033
34from enum import IntEnum, Enum, unique
Kevin Chengacb550f2021-06-29 15:32:19 -070035from tosa_ref_run import TosaReturnCode
Eric Kunzee5e26762020-10-13 16:11:07 -070036
Kevin Cheng550ccc52021-03-03 11:21:43 -080037# Include the ../thirdparty/serialization_lib/python directory in PYTHONPATH
38parent_dir = os.path.dirname(os.path.realpath(__file__))
39sys.path.append(
40 os.path.join(parent_dir, "..", "thirdparty", "serialization_lib", "python")
41)
Eric Kunzee5e26762020-10-13 16:11:07 -070042import tosa_serializer as ts
43from tosa_serializer import *
44import tosa
45
46# Convenience variables to the flatc-generated types that should be enums, but aren't
47DType = tosa.DType.DType()
Kevin Cheng550ccc52021-03-03 11:21:43 -080048Op = tosa.Op.Op()
Eric Kunzee5e26762020-10-13 16:11:07 -070049ResizeMode = tosa.ResizeMode.ResizeMode()
50
Kevin Cheng550ccc52021-03-03 11:21:43 -080051
Eric Kunzee5e26762020-10-13 16:11:07 -070052class TosaQuantGen:
Kevin Cheng550ccc52021-03-03 11:21:43 -080053 """QuantizedInfo random generator helper functions. Specify with 'qgen': in the operator defintion"""
54
Eric Kunzee5e26762020-10-13 16:11:07 -070055 def __init__(self):
56 pass
57
58 @staticmethod
Les Bell30e46802021-07-23 09:43:31 +010059 def getQinfo(testGen, dtype):
60 if dtype == DType.INT8:
61 return testGen.randInt(-128, 128)
62 if dtype == DType.UINT8:
63 return testGen.randInt(0, 256)
64 return 0
Eric Kunzee5e26762020-10-13 16:11:07 -070065
66 @staticmethod
67 def qgUnary(testGen, op, dtype):
68 qinfo = ts.TosaSerializerQuantInfo()
Kevin Chengacb550f2021-06-29 15:32:19 -070069 qinfo.UnaryQuantInfo(
70 TosaQuantGen.getQinfo(testGen, dtype), TosaQuantGen.getQinfo(testGen, dtype)
71 )
Eric Kunzee5e26762020-10-13 16:11:07 -070072 return qinfo
73
74 @staticmethod
Les Bell30e46802021-07-23 09:43:31 +010075 def qgConv(testGen, op, dtype_or_dtypeList):
Eric Kunzee5e26762020-10-13 16:11:07 -070076 qinfo = ts.TosaSerializerQuantInfo()
Les Bell30e46802021-07-23 09:43:31 +010077 if isinstance(dtype_or_dtypeList, list):
78 # a list of [input, weights, accumulator] dtypes
79 dtypeList = dtype_or_dtypeList
Eric Kunzee5e26762020-10-13 16:11:07 -070080 else:
Les Bell30e46802021-07-23 09:43:31 +010081 # an int, [input, weights, accumulator] dtypes are the same
82 dtypeList = [dtype_or_dtypeList] * 3
83 input_zp = TosaQuantGen.getQinfo(testGen, dtypeList[0])
84 weights_zp = TosaQuantGen.getQinfo(testGen, dtypeList[1])
85 qinfo.ConvQuantInfo(input_zp, weights_zp)
Eric Kunzee5e26762020-10-13 16:11:07 -070086 return qinfo
87
88 @staticmethod
89 def qgMatmul(testGen, op, dtype):
90 qinfo = ts.TosaSerializerQuantInfo()
Kevin Chengacb550f2021-06-29 15:32:19 -070091 qinfo.MatMulQuantInfo(
92 TosaQuantGen.getQinfo(testGen, dtype), TosaQuantGen.getQinfo(testGen, dtype)
93 )
Eric Kunzee5e26762020-10-13 16:11:07 -070094 return qinfo
95
96 @staticmethod
97 def qgPad(testGen, op, dtype):
98 qinfo = ts.TosaSerializerQuantInfo()
Les Bell30e46802021-07-23 09:43:31 +010099 qinfo.PadQuantInfo(TosaQuantGen.getQinfo(testGen, dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700100 return qinfo
101
102 @staticmethod
103 def computeMultiplierAndShift(scaleFp, scale32):
104 # Derived from computeMultiplierAndShiftTosaScale32
105 # Provide a floating-point scaling factor and the scale32 parameter
106 # to compute the multiplier and shift
107
108 if scale32:
109 scaleBits = 31
110 else:
111 scaleBits = 15
112
113 m, shift = math.frexp(scaleFp)
114
115 if scaleFp < 0.0:
116 m = -m
117
118 multiplier = round(m * (1 << scaleBits))
Kevin Cheng550ccc52021-03-03 11:21:43 -0800119 assert multiplier <= (1 << scaleBits)
Eric Kunzee5e26762020-10-13 16:11:07 -0700120
121 if multiplier == (1 << scaleBits):
122 multiplier = multiplier // 2
123 shift = shift + 1
124
125 shift = (-shift) + scaleBits
Kevin Cheng550ccc52021-03-03 11:21:43 -0800126 # print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(scaleFp, scaleBits, m, multiplier, shift))
Eric Kunzee5e26762020-10-13 16:11:07 -0700127
Kevin Cheng550ccc52021-03-03 11:21:43 -0800128 assert multiplier <= (1 << scaleBits)
129 assert shift >= 0 and shift <= 63
Eric Kunzee5e26762020-10-13 16:11:07 -0700130
131 return multiplier, shift
132
133
Kevin Cheng550ccc52021-03-03 11:21:43 -0800134class TosaTensorGen:
135 """Tensor generators create a shape list for the placeholder and const tensor
136 data operands for the operator. The actual random data is generated separately for each test."""
137
Eric Kunzee5e26762020-10-13 16:11:07 -0700138 def __init__(self):
139 pass
140
141 @staticmethod
142 def tgBasic(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800143 pl, const = opName["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700144 shape = testGen.makeShape(rank)
145
146 shape_list = []
147 for i in range(pl + const):
148 shape_list.append(shape.copy())
149
150 return shape_list
151
152 @staticmethod
153 def tgNHWC(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800154 pl, const = opName["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700155
Kevin Cheng550ccc52021-03-03 11:21:43 -0800156 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700157
158 shape = testGen.makeShape(rank)
159
160 # Constrict the batch size?
161 if testGen.args.max_batch_size:
162 shape[0] = (shape[0] % testGen.args.max_batch_size) + 1
163
164 shape_list = []
165 for i in range(pl + const):
166 shape_list.append(shape.copy())
167
168 return shape_list
169
170 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -0800171 def tgScatter(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800172 pl, const = opName["operands"]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800173
Kevin Cheng550ccc52021-03-03 11:21:43 -0800174 assert pl == 2
175 assert const == 0
176 assert rank == 3
Kevin Cheng77d0f762020-11-24 10:26:32 -0800177
178 values_in_shape = testGen.makeShape(rank)
179
Matthew Haddon4b2881a2021-08-24 14:25:43 +0100180 # ignore max batch size if target shape is set
181 if testGen.args.max_batch_size and not testGen.args.target_shapes:
Kevin Cheng77d0f762020-11-24 10:26:32 -0800182 values_in_shape[0] = (values_in_shape[0] % testGen.args.max_batch_size) + 1
183
Kevin Cheng550ccc52021-03-03 11:21:43 -0800184 W = testGen.randInt(
185 testGen.args.tensor_shape_range[0], testGen.args.tensor_shape_range[1]
186 )
Matthew Haddon4b2881a2021-08-24 14:25:43 +0100187 # Constrict W if one dimension is too large to keep tensor size reasonable
188 if max(values_in_shape) > 5000:
189 W = testGen.randInt(0, 16)
190
Kevin Cheng77d0f762020-11-24 10:26:32 -0800191 input_shape = [values_in_shape[0], W, values_in_shape[2]]
192
193 shape_list = []
194 shape_list.append(values_in_shape.copy())
195 shape_list.append(input_shape.copy())
196
197 return shape_list
198
199 @staticmethod
Eric Kunzee5e26762020-10-13 16:11:07 -0700200 def tgBroadcastFuzz(testGen, op, rank):
201 shape = testGen.makeShape(rank)
202
Kevin Cheng550ccc52021-03-03 11:21:43 -0800203 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700204
205 shape_list = []
206
207 # Choose one of the inputs to broadcast
208 bcast_idx = testGen.randInt(0, pl + const)
209 for i in range(pl + const):
210 shape_bcast = shape.copy()
211
212 # If the chosen input, pick a random index to broadcast
213 if i == bcast_idx:
214 fuzz_idx = testGen.randInt(0, rank)
215 shape_bcast[fuzz_idx] = 1
216
217 shape_list.append(shape_bcast)
218
219 return shape_list
220
221 @staticmethod
222 def tgConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800223 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700224
Kevin Cheng550ccc52021-03-03 11:21:43 -0800225 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700226
227 # IFM dimensions are NHWC
228 ifm_shape = testGen.makeShape(rank)
229
230 # Constrict the batch size?
231 if testGen.args.max_batch_size:
232 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
233
234 # Get the filter height/width from the operator parameters
Kevin Cheng550ccc52021-03-03 11:21:43 -0800235 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700236
237 # Generate a random OFM depth
238 ofm_depth = testGen.makeShape(1)[0]
239
240 # The filter dimensions are OHWI
241 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
242
243 # The bias is OC
244 bias_shape = np.asarray([ofm_depth])
245
246 return [ifm_shape, filter_shape, bias_shape]
247
248 @staticmethod
249 def tgTransposeConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800250 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700251
Kevin Cheng550ccc52021-03-03 11:21:43 -0800252 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700253
254 # IFM dimensions are NHWC
255 ifm_shape = testGen.makeShape(rank)
256
257 # Constrict the batch size?
258 if testGen.args.max_batch_size:
259 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
260
261 # Get the filter height/width from the operator parameters
Kevin Cheng550ccc52021-03-03 11:21:43 -0800262 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700263
264 # Generate a random OFM depth
265 ofm_depth = testGen.makeShape(1)[0]
266
267 # The filter dimensions are OHWI
268 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
269
Kevin Cheng989cb052021-04-28 16:29:44 -0700270 # The bias is OC
271 bias_shape = np.asarray([ofm_depth])
272
273 return [ifm_shape, filter_shape, bias_shape]
Eric Kunzee5e26762020-10-13 16:11:07 -0700274
275 @staticmethod
276 def tgDepthwiseConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800277 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700278
Kevin Cheng550ccc52021-03-03 11:21:43 -0800279 assert rank == 4
280 assert pl == 1 and const == 2
Eric Kunzee5e26762020-10-13 16:11:07 -0700281
282 # IFM dimensions are NHWC
283 ifm_shape = testGen.makeShape(rank)
284
285 # Constrict the batch size?
286 if testGen.args.max_batch_size:
287 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
288
289 # Get the filter height/width from the operator parameters
290 # Filter is KH, HW, C, M
Kevin Cheng550ccc52021-03-03 11:21:43 -0800291 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700292
293 # Generate a random OFM depth, but don't let it get too big because
294 # the output depth is M * C
Kevin Cheng550ccc52021-03-03 11:21:43 -0800295 filter_m = (
296 testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)
297 ) + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700298
299 # The filter dimensions are HWCM
300 filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m])
301
302 # The bias is M * C
303 bias_shape = np.asarray([ifm_shape[3] * filter_m])
304
305 return [ifm_shape, filter_shape, bias_shape]
306
307 @staticmethod
308 def tgFullyConnected(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800309 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700310
Kevin Cheng550ccc52021-03-03 11:21:43 -0800311 assert rank == 2
Eric Kunzee5e26762020-10-13 16:11:07 -0700312
313 input_shape = testGen.makeShape(rank)
Kevin Chengacb550f2021-06-29 15:32:19 -0700314 filter_oc = testGen.rng.integers(
315 low=testGen.args.tensor_shape_range[0],
316 high=testGen.args.tensor_shape_range[1],
317 size=1,
318 )[0]
Eric Kunzee5e26762020-10-13 16:11:07 -0700319 filter_shape = np.asarray([filter_oc, input_shape[1]])
320
321 bias_shape = np.asarray([filter_oc])
322
323 return [input_shape, filter_shape, bias_shape]
324
325 @staticmethod
326 def tgMatmul(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800327 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700328
Kevin Cheng2d60f002021-06-09 14:18:32 -0700329 assert rank == 3
Kevin Cheng550ccc52021-03-03 11:21:43 -0800330 assert pl == 2 and const == 0
Eric Kunzee5e26762020-10-13 16:11:07 -0700331
332 a_shape = testGen.makeShape(rank)
Matthew Haddon68e7aee2021-08-16 11:20:25 +0100333 # Get a random number for b_oc even if target shape is defined
334 b_oc = np.int32(
335 testGen.rng.integers(
336 low=testGen.args.tensor_shape_range[0],
337 high=testGen.args.tensor_shape_range[1],
338 size=1,
339 )
340 )[0]
341 # If N or H is large let b_oc be 1 to reduce output tensor size
342 if max(a_shape) > 1000:
343 b_oc = 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700344
Matthew Haddon68e7aee2021-08-16 11:20:25 +0100345 b_shape = np.asarray([a_shape[0], a_shape[2], b_oc])
Eric Kunzee5e26762020-10-13 16:11:07 -0700346 return [a_shape, b_shape]
347
Matthew Haddon818ab902021-07-27 09:12:49 +0100348 @staticmethod
349 def tgConcat(testGen, opName, rank):
350 pl, const = opName["operands"]
351 shape = testGen.makeShape(rank)
352
353 # Create extra tensors to concat.
354 # Take into account value of pl when getting maximum number of concats
355 num_tensors = testGen.randInt(0, 4)
356 shape_list = []
357 for i in range(pl + const + num_tensors):
358 shape_list.append(shape.copy())
359
360 return shape_list
361
362 @staticmethod
363 def tgConcatConstInput(testGen, shapeList, axis):
364 # Split concat shape along axis to allow for multiple const inputs
365 # without making too many large tensors
366 shape = shapeList[0]
367 if len(shapeList) == 2 or shape[axis] < len(shapeList):
368 return shapeList
369
370 new_shapeList = [shape.copy()]
371 length_on_axis = shape[axis]
372 remaining_length = length_on_axis
373 for i in range(len(shapeList)-2):
374 # Calculate split on axis and remaining value
375 split_shape_val = int(shape[axis] / 2)
376 remaining_length = remaining_length - split_shape_val
377
378 # Append new shape, and set remaining shape
379 shape[axis] = split_shape_val
380 new_shapeList.append(shape.copy())
381 shape[axis] = remaining_length
382 if i == len(shapeList) - 3:
383 new_shapeList.append(shape.copy())
384
385 return new_shapeList
386
387
Kevin Cheng550ccc52021-03-03 11:21:43 -0800388
Eric Kunzee5e26762020-10-13 16:11:07 -0700389class TosaArgGen:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800390 """Argument generators create exhaustive or random lists of attributes for operators that take
391 attributes or other parameters. The return value is a list of (descriptive_name, [arglist])
392 tuples where the descriptive_name is appended to the test name and the arglist is expanded
393 as arguments to the operator build function."""
394
Eric Kunzee5e26762020-10-13 16:11:07 -0700395 def __init__(self):
396 pass
397
398 @staticmethod
399 def agNone(testGen, opName, shapeList, dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800400 """A trivial argument generator for operators that don't take any
401 non-tensor arguments"""
402 return [("", [])]
Eric Kunzee5e26762020-10-13 16:11:07 -0700403
404 @staticmethod
405 def agAxis(testGen, opName, shapeList, dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800406 """Build the axis argument for operators that take a single axis"""
Eric Kunzee5e26762020-10-13 16:11:07 -0700407 axes = []
408
409 shape = shapeList[0]
410
411 for a in range(0, len(shape)):
Matthew Haddon43e37192021-07-09 14:13:02 +0100412 axes.append(("axis{}".format(a), [a]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700413 return axes
414
415 @staticmethod
416 def agConv2D(testGen, opName, shapeList, dtype):
417 arg_list = []
418
419 ifm_shape = shapeList[0]
420 filter_shape = shapeList[1]
421
422 # Must be rank 4
Kevin Cheng550ccc52021-03-03 11:21:43 -0800423 assert len(ifm_shape) == 4
424 assert len(filter_shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700425
426 maxStride = testGen.args.max_conv_stride
427 maxPadding = testGen.args.max_conv_padding + 1
428 maxDilation = testGen.args.max_conv_dilation
429
430 # Strides, padding, dilations
431 for stride in range(0, maxStride ** 2):
432 for padding in range(0, (maxPadding) ** 4):
433 for dilation in range(0, maxDilation ** 2):
434
Kevin Cheng550ccc52021-03-03 11:21:43 -0800435 s = [stride // maxStride + 1, stride % maxStride + 1]
436 p = [
437 (padding // (maxPadding * 4)) % maxPadding,
438 (padding // (maxPadding * 2)) % maxPadding,
439 (padding // (maxPadding * 1)) % maxPadding,
440 padding % maxPadding,
441 ]
442 d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
Eric Kunzee5e26762020-10-13 16:11:07 -0700443
444 # 4 padding parameters for regular conv2d
Kevin Cheng550ccc52021-03-03 11:21:43 -0800445 arg_list.append(
446 (
447 "st{}{}_pad{}{}{}{}_dilat{}{}".format(
448 s[0], s[1], p[0], p[1], p[2], p[3], d[0], d[1]
449 ),
450 [s, p, d],
451 )
452 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700453 return arg_list
454
455 @staticmethod
456 def agTransposeConv2D(testGen, opName, shapeList, dtype):
457 arg_list = []
458
459 ifm_shape = shapeList[0]
460 filter_shape = shapeList[1]
461
462 # Must be rank 4
Kevin Cheng550ccc52021-03-03 11:21:43 -0800463 assert len(ifm_shape) == 4
464 assert len(filter_shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700465
466 maxStride = testGen.args.max_conv_stride
467 maxPadding = testGen.args.max_conv_padding + 1
468 maxDilation = testGen.args.max_conv_dilation
469
470 # Strides, padding, dilations
471 for stride in range(0, maxStride ** 2):
472 for out_padding in range(0, (maxPadding) ** 2):
473 for dilation in range(0, maxDilation ** 2):
474
Kevin Cheng550ccc52021-03-03 11:21:43 -0800475 s = [stride // maxStride + 1, stride % maxStride + 1]
476 p = [
477 (out_padding // (maxPadding * 1)) % maxPadding,
478 out_padding % maxPadding,
479 ]
480 d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
Eric Kunzee5e26762020-10-13 16:11:07 -0700481
Kevin Cheng550ccc52021-03-03 11:21:43 -0800482 oh = (
483 ifm_shape[1]
484 - filter_shape[1]
485 - (filter_shape[1] - 1) * (d[0] - 1)
486 + 2 * p[0]
487 ) // s[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700488
Kevin Cheng550ccc52021-03-03 11:21:43 -0800489 ow = (
490 ifm_shape[2]
491 - filter_shape[2]
492 - (filter_shape[2] - 1) * (d[1] - 1)
493 + 2 * p[1]
494 ) // s[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700495
496 # Output shape
Kevin Cheng550ccc52021-03-03 11:21:43 -0800497 os = [ifm_shape[0], oh, ow, filter_shape[0]]
Eric Kunzee5e26762020-10-13 16:11:07 -0700498
Kevin Cheng550ccc52021-03-03 11:21:43 -0800499 arg_list.append(
500 (
501 "st{}{}_outpad{}{}_dilat{}{}_os{}x{}x{}x{}".format(
502 s[0],
503 s[1],
504 p[0],
505 p[1],
506 d[0],
507 d[1],
508 os[0],
509 os[1],
510 os[2],
511 os[3],
512 ),
513 [s, p, d, os],
514 )
515 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700516
517 return arg_list
518
519 @staticmethod
520 def agPad(testGen, opName, shapeList, dtype):
521 arg_list = []
522 rank = len(shapeList[0])
523
Les Bell7ffccce2021-07-28 15:37:02 +0100524 # Exhaustively test combinations of padding on each side of each dimension
525 # - the range of padding values is defined by pad_min and pad_max
526 # - for padding >9, the name format needs to be more distinctive
527 pad_min, pad_max = 0, 1
528 pad_values = [x for x in range(pad_min, pad_max + 1)]
529 axis_pad_values = [x for x in itertools.product(pad_values, pad_values)]
530 shape_pad_values = itertools.product(*([axis_pad_values] * rank))
Eric Kunzee5e26762020-10-13 16:11:07 -0700531
Les Bell7ffccce2021-07-28 15:37:02 +0100532 for paddings in shape_pad_values:
533 name = "pad"
534 for r in range(rank):
535 before, after = paddings[r]
536 name = f"{name}{before}{after}"
537 arg_list.append((name, [np.array(paddings)]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700538
539 return arg_list
540
541 @staticmethod
542 def agPooling(testGen, opName, shapeList, dtype):
543 arg_list = []
544
545 shape = shapeList[0]
Kevin Cheng550ccc52021-03-03 11:21:43 -0800546 assert len(shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700547
548 maxStride = testGen.args.max_pooling_stride
549 maxKernel = testGen.args.max_pooling_kernel
550 maxPadding = testGen.args.max_pooling_padding + 1
551
552 for kernel in range(0, maxKernel ** 2):
553 for stride in range(0, maxStride ** 2):
554 for padding in range(0, maxPadding ** 4):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800555 s = [stride // maxStride + 1, stride % maxStride + 1]
556 k = [(kernel // maxKernel) + 2, (kernel % maxKernel) + 2]
557 p = [
558 (padding // (maxPadding * 4)) % maxPadding,
559 (padding // (maxPadding * 2)) % maxPadding,
560 (padding // (maxPadding * 1)) % maxPadding,
561 padding % maxPadding,
562 ]
Eric Kunzee5e26762020-10-13 16:11:07 -0700563
Kevin Cheng550ccc52021-03-03 11:21:43 -0800564 arg_list.append(
565 (
566 "st{}{}_kern{}{}_pad{}{}{}{}".format(
567 s[0], s[1], k[0], k[1], p[0], p[1], p[2], p[3]
568 ),
569 [k, s, p],
570 )
571 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700572 return arg_list
573
574 @staticmethod
575 def agCast(testGen, opName, shapeList, inDtype):
576 arg_list = []
577
578 # Enumerate the output types here
579 if inDtype == DType.INT8:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800580 dtypeList = [DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700581 elif inDtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800582 dtypeList = [DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700583 elif inDtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800584 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700585 elif inDtype == DType.BOOL:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800586 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700587 elif inDtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800588 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700589 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800590 raise Exception("Unexpected input dtype: {}".format(inDtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700591
592 for dtype in dtypeList:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800593 arg_list.append(("out{}".format(DTypeNames[dtype]), [dtype]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700594
595 return arg_list
596
597 @staticmethod
598 def agRescale(testGen, opName, shapeList, inDtype):
599 arg_list = []
600
601 # Enumerate the output types here
Matthew Haddoncac4ee92021-07-22 14:30:53 +0100602 for dtype in [DType.UINT8, DType.INT8, DType.INT16, DType.INT32]:
603 if inDtype == DType.UINT8 and dtype != DType.INT8:
604 # The only output dtype for UINT8 is INT8, skip all other combinations
605 continue
606 if inDtype != DType.INT8 and dtype == DType.UINT8:
607 # The only input dtype for UINT8 is INT8, skip all other combinations
608 continue
609
Kevin Cheng550ccc52021-03-03 11:21:43 -0800610 for scale32 in [False, True]:
611 for double_round in [False, True]:
612 for per_channel in [False, True]:
Eric Kunzee5e26762020-10-13 16:11:07 -0700613
614 if inDtype == DType.INT48 and scale32:
615 # Illegal condition. Must be scale32=False
616 continue
Matthew Haddoncac4ee92021-07-22 14:30:53 +0100617 if double_round and not scale32:
618 # Illegal condition. ERROR_IF(!scale32 && double_round)
619 continue
Eric Kunzee5e26762020-10-13 16:11:07 -0700620
Kevin Cheng550ccc52021-03-03 11:21:43 -0800621 arg_list.append(
622 (
623 "out{}_sc{}_dr{}_pc{}".format(
624 DTypeNames[dtype],
625 int(scale32),
626 int(double_round),
627 int(per_channel),
628 ),
629 [dtype, scale32, double_round, per_channel],
630 )
631 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700632
633 return arg_list
634
Kevin Chengaee1fac2020-11-11 13:54:06 -0800635 @staticmethod
636 def agMul(testGen, opName, shapeList, dtype):
637 arg_list = []
638
639 if dtype is DType.INT32:
640 for p in range(testGen.args.num_rand_permutations):
641
642 shift = testGen.randInt(0, 32)
643
Kevin Cheng550ccc52021-03-03 11:21:43 -0800644 arg_list.append(("perm{}_shift{}".format(p, shift), [shift]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800645 else:
Matthew Haddon43e37192021-07-09 14:13:02 +0100646 arg_list.append(("perm0_shift0", [0]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800647
648 return arg_list
649
650 @staticmethod
651 def agArithmeticRightShift(testGen, opName, shapeList, dtype):
652 arg_list = []
653
Kevin Cheng550ccc52021-03-03 11:21:43 -0800654 arg_list.append(("roundTrue", [True]))
655 arg_list.append(("roundFalse", [False]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800656
657 return arg_list
658
Eric Kunzee5e26762020-10-13 16:11:07 -0700659 # Helper function for reshape. Gets some factors of a larger number.
660 @staticmethod
661 def getFactors(val, start=1):
662 factors = []
663
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100664 for i in range(start, int(np.sqrt(val)) + 1):
Eric Kunzee5e26762020-10-13 16:11:07 -0700665 if (val % i) == 0:
666 factors.append(i)
667
668 return factors
669
670 @staticmethod
671 def agReshape(testGen, opName, shapeList, dtype):
672 arg_list = []
673
674 origShape = shapeList[0]
675
676 totalElements = 1
677 for s in origShape:
678 totalElements *= s
679
680 # This code is NOT fast. Fortunately, the numbers are fairly small.
681 factors = TosaArgGen.getFactors(totalElements)
682
683 for p in range(testGen.args.num_rand_permutations):
Matthew Haddon5fc4e682021-07-07 11:28:29 +0100684 newRank = testGen.randInt(1, 7)
Kevin Cheng550ccc52021-03-03 11:21:43 -0800685 if len(factors) < newRank:
Eric Kunzee5e26762020-10-13 16:11:07 -0700686 continue
687
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100688 found = True
689 # escape_counter breaks while loop if it continues on for too long
690 escape_counter = 0
691 while found:
692 newShape = []
693 # Generate newShape ensuring it isn't a duplicate
694 remainingElements = totalElements
695 shuffledFactors = testGen.rng.permutation(factors)
Matthew Haddon5fc4e682021-07-07 11:28:29 +0100696 for i in range(1, newRank):
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100697 # pick rank-1 factors
698 newShape.append(shuffledFactors[0])
699 remainingElements = remainingElements // shuffledFactors[0]
700 shuffledFactors = testGen.rng.permutation(
701 TosaArgGen.getFactors(remainingElements)
702 )
703 newShape.append(remainingElements)
Eric Kunzee5e26762020-10-13 16:11:07 -0700704
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100705 # Toss in a -1 sometimes
706 minusOne = testGen.randInt(0, newRank * 4)
707 if minusOne < newRank:
708 newShape[minusOne] = -1
Eric Kunzee5e26762020-10-13 16:11:07 -0700709
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100710 # Check for duplicates
711 found = False
712 for name, other_shape in arg_list:
713 if other_shape[0] == newShape:
714 found = True
715 break
716
717 escape_counter += 1
718 if escape_counter >= 100:
719 break
720
721 if not found:
722 arg_list.append(("perm{}_rank{}".format(p, newRank), [newShape]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700723
724 return arg_list
725
Eric Kunzee5e26762020-10-13 16:11:07 -0700726 @staticmethod
727 def agTranspose(testGen, opName, shapeList, dtype):
728 arg_list = []
729
730 ifm_shape = shapeList[0]
731
Jeremy Johnsona6185572021-06-21 15:55:35 +0100732 # Get all permutations
733 permutations = [p for p in itertools.permutations(range(len(ifm_shape)))]
Eric Kunzee5e26762020-10-13 16:11:07 -0700734
Jeremy Johnsona6185572021-06-21 15:55:35 +0100735 # Limit to possible permutations from shape dimension or argument setting
736 limit = min(len(permutations), testGen.args.num_rand_permutations)
Eric Kunzee5e26762020-10-13 16:11:07 -0700737
Jeremy Johnsona6185572021-06-21 15:55:35 +0100738 # Get random permutation generator that uses all permutations
739 random_permutations = testGen.rng.permutation(permutations)
Eric Kunzee5e26762020-10-13 16:11:07 -0700740
Jeremy Johnsona6185572021-06-21 15:55:35 +0100741 # Create list of required amount of permutations
Kevin Chengacb550f2021-06-29 15:32:19 -0700742 arg_list = [
743 ("perm{}".format(p), [random_permutations[p].tolist()])
744 for p in range(limit)
745 ]
Eric Kunzee5e26762020-10-13 16:11:07 -0700746 return arg_list
747
748 @staticmethod
749 def agSlice(testGen, opName, shapeList, dtype):
750 arg_list = []
751
752 ifm_shape = shapeList[0]
753 rank = len(ifm_shape)
754
755 for p in range(testGen.args.num_rand_permutations):
756 begin = []
757 size = []
758
Kevin Cheng550ccc52021-03-03 11:21:43 -0800759 valid = True
Eric Kunzee5e26762020-10-13 16:11:07 -0700760
761 for i in range(rank):
762 if ifm_shape[i] > 1:
763 begin.append(testGen.randInt(0, ifm_shape[i]))
764 size.append(testGen.randInt(0, ifm_shape[i] - begin[i]))
765
766 # Invalid slice size?
767 if size[i] == 0:
768 valid = False
769 else:
770 begin.append(0)
771 size.append(1)
772
773 if valid:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800774 arg_list.append(("perm{}".format(p), [begin, size]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700775 return arg_list
776
777 @staticmethod
778 def agTile(testGen, opName, shapeList, dtype):
779 arg_list = []
780
781 ifm_shape = shapeList[0]
782 rank = len(ifm_shape)
783
784 for p in range(testGen.args.num_rand_permutations):
785
786 # Pick a few random, but small multiple values
787 # because otherwise this has a tendency to generate
788 # enormous tensors
789 multiples = []
790 for i in range(rank):
Matthew Haddon82ad4d62021-08-20 15:02:39 +0100791 if ifm_shape[i] > 1000:
792 # Multiple of 1 if ifm_shape dimension is large to reduce tensor size
793 multiples.append(1)
794 elif max(ifm_shape) > 1000:
795 multiples.append(2)
796 else:
797 multiples.append(testGen.randInt(1, 4))
Kevin Cheng550ccc52021-03-03 11:21:43 -0800798 arg_list.append(("perm{}".format(p), [multiples]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700799
800 return arg_list
801
802 @staticmethod
803 def agResize(testGen, opName, shapeList, dtype):
804 arg_list = []
805
806 ifm_shape = shapeList[0]
807
808 for m in [ResizeMode.NEAREST, ResizeMode.BILINEAR]:
809
810 # Exclude illegal {mode, type} configurations. Pick legal output types
811 if m == ResizeMode.NEAREST and dtype == DType.INT8:
Les Bell33d837e2021-08-10 08:34:43 +0100812 outputDTypeList = [DType.INT8]
Eric Kunzee5e26762020-10-13 16:11:07 -0700813 elif m == ResizeMode.NEAREST and dtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800814 outputDTypeList = [DType.INT16]
Eric Kunzee5e26762020-10-13 16:11:07 -0700815 elif m == ResizeMode.BILINEAR and dtype == DType.INT8:
Les Bell33d837e2021-08-10 08:34:43 +0100816 outputDTypeList = [DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700817 elif m == ResizeMode.BILINEAR and dtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800818 outputDTypeList = [DType.INT48]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800819 elif dtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800820 outputDTypeList = [DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700821 else:
822 continue
823
824 for outputDType in outputDTypeList:
825 for perm in range(testGen.args.num_rand_permutations):
826
827 # Randomly generate legal output dimensions and shift
828 # and then compute the stride and offset based on them
Kevin Cheng550ccc52021-03-03 11:21:43 -0800829 output_dims = [testGen.randInt(1), testGen.randInt(1)]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800830 in_center_h = (ifm_shape[1] - 1) / 2.0
831 in_center_w = (ifm_shape[2] - 1) / 2.0
832 out_center_h = (output_dims[0] - 1) / 2.0
833 out_center_w = (output_dims[1] - 1) / 2.0
Eric Kunzee5e26762020-10-13 16:11:07 -0700834
Kevin Cheng77d0f762020-11-24 10:26:32 -0800835 fp_stride_y = float(ifm_shape[1]) / float(output_dims[0])
836 fp_stride_x = float(ifm_shape[2]) / float(output_dims[1])
837 fp_offset_y = in_center_h - fp_stride_y * out_center_h
838 fp_offset_x = in_center_w - fp_stride_x * out_center_w
Eric Kunzee5e26762020-10-13 16:11:07 -0700839
Kevin Cheng77d0f762020-11-24 10:26:32 -0800840 if outputDType == DType.FLOAT:
841 shift = 0
842 stride = [0, 0]
843 offset = [0, 0]
Kevin Cheng550ccc52021-03-03 11:21:43 -0800844 stride_fp = [fp_stride_y, fp_stride_x]
845 offset_fp = [fp_offset_y, fp_offset_x]
846 arg_list.append(
847 (
848 "mode{}_odim{}x{}_out{}_st{:.2f}x{:.2f}_off{:.2f}x{:.2f}".format(
Les Bell33d837e2021-08-10 08:34:43 +0100849 "N" if m == ResizeMode.NEAREST else "B",
Kevin Cheng550ccc52021-03-03 11:21:43 -0800850 output_dims[0],
851 output_dims[1],
852 testGen.typeStr(outputDType),
853 stride_fp[0],
854 stride_fp[1],
855 offset_fp[0],
856 offset_fp[1],
857 ),
858 [
859 m,
860 stride,
861 offset,
862 shift,
863 stride_fp,
864 offset_fp,
865 output_dims,
866 dtype,
867 outputDType,
868 ],
869 )
870 )
Kevin Cheng77d0f762020-11-24 10:26:32 -0800871 else:
872 shift = 11
873 unit = float(1 << shift)
874 stride_y = int(round(fp_stride_y * unit))
875 stride_x = int(round(fp_stride_x * unit))
876 offset_y = int(round(fp_offset_y * unit))
877 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700878
Kevin Cheng550ccc52021-03-03 11:21:43 -0800879 while (
880 stride_y >= 32768
881 or stride_x >= 32768
882 or offset_y >= 32768
883 or offset_x >= 32768
884 or offset_y < -32768
885 or offset_x < -32768
886 ):
Kevin Cheng77d0f762020-11-24 10:26:32 -0800887 shift = shift - 1
888 unit = float(1 << shift)
889 stride_y = int(round(fp_stride_y * unit))
890 stride_x = int(round(fp_stride_x * unit))
891 offset_y = int(round(fp_offset_y * unit))
892 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700893
Kevin Cheng550ccc52021-03-03 11:21:43 -0800894 stride = [stride_y, stride_x]
895 offset = [offset_y, offset_x]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800896
897 stride_fp = [0.0, 0.0]
898 offset_fp = [0.0, 0.0]
899
Kevin Cheng550ccc52021-03-03 11:21:43 -0800900 arg_list.append(
901 (
902 "mode{}_shift{}_odim{}x{}_out{}_st{}x{}_off{}x{}".format(
Les Bell33d837e2021-08-10 08:34:43 +0100903 "N" if m == ResizeMode.NEAREST else "B",
Kevin Cheng550ccc52021-03-03 11:21:43 -0800904 shift,
905 output_dims[0],
906 output_dims[1],
907 testGen.typeStr(outputDType),
908 stride[0],
909 stride[1],
910 offset[0],
911 offset[1],
912 ),
913 [
914 m,
915 stride,
916 offset,
917 shift,
918 stride_fp,
919 offset_fp,
920 output_dims,
921 dtype,
922 outputDType,
923 ],
924 )
925 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700926
927 return arg_list
928
929 def agCondIf(testGen, opName, shapeList, dtype):
930 # CondIf generates the condition values here.
931 # Convert to tensors in the build function, along with the
932 # then and else blocks
933 arg_list = []
934
935 for c in [False, True]:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800936 arg_list.append(("cond{}".format(int(c)), [c]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700937
938 return arg_list
939
940 def agWhileLoop(testGen, opName, shapeList, dtype):
941 # While loop: 0 iterations, 1, more than 1
942 arg_list = []
943
944 for iter in [0, 1, 4]:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800945 arg_list.append(("iter{}".format(iter), [iter]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700946
947 return arg_list
948
Kevin Cheng550ccc52021-03-03 11:21:43 -0800949
Eric Kunzee5e26762020-10-13 16:11:07 -0700950class TosaTestGen:
Jeremy Johnson97eb75f2021-07-08 11:58:02 +0100951 # Maximum rank of tensor supported by test generator.
952 TOSA_TENSOR_MAX_RANK = 6
953
Eric Kunzee5e26762020-10-13 16:11:07 -0700954 def __init__(self, args):
955 self.args = args
956 self.basePath = args.output_dir
957 self.random_seed = args.random_seed
958 self.ser = None
959 self.rng = np.random.default_rng(self.random_seed)
960 self.createDynamicOpLists()
961 self.initOpListDefaults()
962 self.quantGen = TosaQuantGen()
963 # Force makeShape to do a specific starting shape
964 self.targetted_shape = None
965
966 def createSerializer(self, opName, testPath):
967 self.testPath = os.path.join(opName, testPath)
968
969 fullPath = os.path.join(self.basePath, self.testPath)
970 os.makedirs(fullPath, exist_ok=True)
971 self.ser = ts.TosaSerializer(fullPath)
972
973 def getSerializer(self):
974 return self.ser
975
976 def serialize(self, testName):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800977 with open(
978 os.path.join(self.basePath, self.testPath, "{}.tosa".format(testName)), "wb"
979 ) as fd:
Eric Kunzee5e26762020-10-13 16:11:07 -0700980 fd.write(self.ser.serialize())
981
Kevin Cheng550ccc52021-03-03 11:21:43 -0800982 with open(os.path.join(self.basePath, self.testPath, "desc.json"), "w") as fd:
983 fd.write(self.ser.writeJson("{}.tosa".format(testName)))
Eric Kunzee5e26762020-10-13 16:11:07 -0700984
Matthew Haddon74567092021-07-16 15:38:20 +0100985 def resetRNG(self, seed=None):
986 if seed == None:
987 seed = self.random_seed + 1
988 self.rng = np.random.default_rng(seed)
989
Eric Kunzee5e26762020-10-13 16:11:07 -0700990 def getRandTensor(self, shape, dtype):
Eric Kunzee5e26762020-10-13 16:11:07 -0700991 if dtype == DType.BOOL:
992 np_dt = np.bool
993 return np.bool_(self.rng.choice(a=[False, True], size=shape))
Kevin Chenga9017402021-07-28 17:19:23 -0700994 # TOSA specific INT4 weight range from -7 to 7
Eric Kunzee5e26762020-10-13 16:11:07 -0700995 elif dtype == DType.INT4:
Kevin Chenga9017402021-07-28 17:19:23 -0700996 return np.int32(self.rng.integers(low=-7, high=8, size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -0700997 elif dtype == DType.INT8:
Jeremy Johnson18e26662021-07-22 16:15:29 +0100998 return np.int32(self.rng.integers(low=-128, high=128, size=shape))
999 elif dtype == DType.UINT8:
1000 return np.int32(self.rng.integers(low=0, high=256, size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001001 elif dtype == DType.INT16:
1002 return np.int32(self.rng.integers(low=-32768, high=32768, size=shape))
1003 elif dtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001004 return np.int32(
1005 self.rng.integers(low=-(1 << 31), high=(1 << 31), size=shape)
1006 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001007 elif dtype == DType.INT48:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001008 return np.int64(
1009 self.rng.integers(low=-(1 << 47), high=(1 << 47), size=shape)
1010 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001011 elif dtype == DType.FLOAT:
Jeremy Johnson18e26662021-07-22 16:15:29 +01001012 return np.float32(self.rng.random(size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001013 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001014 raise Exception("Unrecognized Dtype: {}".format(dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07001015
Kevin Cheng989cb052021-04-28 16:29:44 -07001016 def buildPlaceholderTensors(self, shape_list, dtype_list):
Eric Kunzee5e26762020-10-13 16:11:07 -07001017 placeholders = []
1018
Kevin Cheng989cb052021-04-28 16:29:44 -07001019 assert len(shape_list) == len(dtype_list)
1020
1021 for idx, shape in enumerate(shape_list):
1022 arr = self.getRandTensor(shape, dtype_list[idx])
1023 placeholders.append(self.ser.addPlaceholder(shape, dtype_list[idx], arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001024
1025 return placeholders
1026
Kevin Cheng989cb052021-04-28 16:29:44 -07001027 def buildConstTensors(self, shape_list, dtype_list):
Eric Kunzee5e26762020-10-13 16:11:07 -07001028 consts = []
1029
Kevin Cheng989cb052021-04-28 16:29:44 -07001030 assert len(shape_list) == len(dtype_list)
1031
1032 for idx, shape in enumerate(shape_list):
1033 arr = self.getRandTensor(shape, dtype_list[idx])
1034 consts.append(self.ser.addConst(shape, dtype_list[idx], arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001035
1036 return consts
1037
1038 def makeShape(self, rank):
1039 if self.targetted_shape:
1040 return np.int32(self.targetted_shape)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001041 return np.int32(
1042 self.rng.integers(
1043 low=self.args.tensor_shape_range[0],
1044 high=self.args.tensor_shape_range[1],
1045 size=rank,
1046 )
1047 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001048
1049 def setTargetShape(self, shape):
1050 self.targetted_shape = shape
1051
1052 def randInt(self, low=0, high=256):
1053 return np.int32(self.rng.integers(low=low, high=high, size=1))[0]
1054
1055 def getRandNumberDType(self, dtype):
1056 if dtype == DType.FLOAT:
1057 return self.rng.random()
1058 elif dtype == DType.BOOL:
1059 return self.rng.choice([False, True])
Kevin Chenga9017402021-07-28 17:19:23 -07001060 # TOSA specific INT4 weight range from -7 to 7
Eric Kunzee5e26762020-10-13 16:11:07 -07001061 elif dtype == DType.INT4:
Kevin Chenga9017402021-07-28 17:19:23 -07001062 low, high = (-7, 8)
Eric Kunzee5e26762020-10-13 16:11:07 -07001063 elif dtype == DType.INT8:
Jeremy Johnson18e26662021-07-22 16:15:29 +01001064 low, high = (-128, 128)
Eric Kunzee5e26762020-10-13 16:11:07 -07001065 elif dtype == DType.INT16:
1066 low, high = (-32768, 32768)
1067 elif dtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001068 low, high = (-(1 << 31), (1 << 31))
Eric Kunzee5e26762020-10-13 16:11:07 -07001069 elif dtype == DType.INT48:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001070 low, high = (-(1 << 47), (1 << 47))
Eric Kunzee5e26762020-10-13 16:11:07 -07001071 # Special size
1072 return np.int64(self.rng.integers(low, high, size=1))[0]
1073 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001074 raise Exception("Unknown dtype: {}".format(dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07001075
1076 return np.int32(self.rng.integers(low, high, size=1))[0]
1077
1078 def shapeStr(self, shape):
1079
1080 sStr = []
1081 # Convert to strings
1082 for i in shape:
1083 sStr.append(str(i))
1084
Kevin Cheng550ccc52021-03-03 11:21:43 -08001085 return "x".join(sStr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001086
1087 def typeStr(self, t):
Kevin Cheng989cb052021-04-28 16:29:44 -07001088 if isinstance(t, list):
1089 assert len(t) >= 2
1090 return "{}x{}".format(self.typeStr(t[0]), self.typeStr(t[1]))
Eric Kunzee5e26762020-10-13 16:11:07 -07001091 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07001092 if t == DType.BOOL:
1093 return "b"
1094 elif t == DType.INT4:
1095 return "i4"
1096 elif t == DType.INT8:
1097 return "i8"
1098 elif t == DType.UINT8:
1099 return "u8"
1100 elif t == DType.INT16:
1101 return "i16"
1102 elif t == DType.INT32:
1103 return "i32"
1104 elif t == DType.INT48:
1105 return "i48"
1106 elif t == DType.FLOAT:
1107 return "float"
1108 else:
1109 raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
Eric Kunzee5e26762020-10-13 16:11:07 -07001110
1111 def typeWidth(self, t):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001112 """ Get the datatype width for integer types"""
Kevin Cheng3a478572021-01-22 17:21:02 -08001113 if t == DType.INT4:
Eric Kunzee5e26762020-10-13 16:11:07 -07001114 return 4
1115 elif t == DType.INT8:
1116 return 8
Kevin Cheng3a478572021-01-22 17:21:02 -08001117 elif t == DType.UINT8:
1118 return 8
Eric Kunzee5e26762020-10-13 16:11:07 -07001119 elif t == DType.INT16:
1120 return 16
1121 elif t == DType.INT32:
1122 return 32
1123 elif t == DType.INT48:
1124 return 48
1125 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001126 raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
Eric Kunzee5e26762020-10-13 16:11:07 -07001127
1128 # Argument generators
1129 # Returns a list of tuples (stringDescriptor, [build_fcn_arg_list])
1130 # Where the string descriptor is used to generate the test name and
1131 # The build_fcn_arg_list is expanded and passed to the operator test
1132 # build function
1133
Kevin Cheng550ccc52021-03-03 11:21:43 -08001134 def build_unary(self, op, a, qinfo=None):
Eric Kunzee5e26762020-10-13 16:11:07 -07001135 result_tens = OutputShaper.unaryOp(self.ser, a)
1136 self.ser.addOperator(op, [a.name], [result_tens.name], None, qinfo)
1137 return result_tens
1138
1139 def build_binary_broadcast(self, op, a, b):
1140 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1141 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1142 return result_tens
1143
1144 def build_binary_nonbroadcast(self, op, a, b):
1145 result_tens = OutputShaper.binaryNonBroadcastOp(self.ser, a, b)
1146 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1147 return result_tens
1148
Kevin Chengaee1fac2020-11-11 13:54:06 -08001149 def build_arithmetic_right_shift(self, op, a, b, round):
1150 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1151
1152 attr = ts.TosaSerializerAttribute()
1153 attr.ArithmeticRightShiftAttribute(round)
1154
1155 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
1156 return result_tens
1157
1158 def build_mul(self, op, a, b, shift):
Eric Kunzee5e26762020-10-13 16:11:07 -07001159 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1160
1161 # Special for multiply:
1162 # Force the result to INT32 for INT types
1163 if a.dtype != DType.FLOAT:
1164 result_tens.setDtype(DType.INT32)
1165
Kevin Chengaee1fac2020-11-11 13:54:06 -08001166 attr = ts.TosaSerializerAttribute()
1167 attr.MulAttribute(shift)
1168
1169 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001170 return result_tens
1171
1172 def build_table(self, op, a):
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001173 # Constant size depending on type, random values
1174 if a.dtype == DType.INT16:
Kevin Chengacb550f2021-06-29 15:32:19 -07001175 table_dtype = DType.INT16
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001176 table_arr = self.getRandTensor([513], table_dtype)
1177 else:
1178 assert a.dtype == DType.INT8
1179 table_dtype = DType.INT8
1180 table_arr = self.getRandTensor([256], table_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001181
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001182 table_tens = self.ser.addConst(table_arr.shape, table_dtype, table_arr)
1183 result_tens = OutputShaper.tableOp(self.ser, a, table_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001184 self.ser.addOperator(op, [a.name, table_tens.name], [result_tens.name], None)
1185
1186 return result_tens
1187
1188 def build_select(self, op, cond, a, b):
Eric Kunzee5e26762020-10-13 16:11:07 -07001189 result_tens = OutputShaper.selectOp(self.ser, cond, a, b)
1190 self.ser.addOperator(op, [cond.name, a.name, b.name], [result_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001191 return result_tens
1192
1193 def build_comparison(self, op, a, b):
1194 result_tens = OutputShaper.binaryComparisonOp(self.ser, a, b)
1195 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1196 return result_tens
1197
1198 def build_argmax(self, op, a, axis):
1199 result_tens = OutputShaper.argmaxOp(self.ser, a, axis)
1200
1201 attr = ts.TosaSerializerAttribute()
1202 attr.AxisAttribute(axis)
1203
1204 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1205 return result_tens
1206
Kevin Cheng550ccc52021-03-03 11:21:43 -08001207 def build_pool2d(self, op, input, kernel, stride, pad, qinfo=None):
Eric Kunzee5e26762020-10-13 16:11:07 -07001208 result_tens = OutputShaper.pool2dOp(self.ser, input, kernel, stride, pad)
1209
1210 attr = ts.TosaSerializerAttribute()
1211 attr.Pool2dAttribute(kernel, stride, pad)
Eric Kunzee5e26762020-10-13 16:11:07 -07001212
1213 self.ser.addOperator(op, [input.name], [result_tens.name], attr, qinfo)
1214 return result_tens
1215
1216 def build_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001217 assert len(padding) == 4
1218 result_tens = OutputShaper.conv2dOp(
1219 self.ser, ifm, filter, strides, padding, dilations
1220 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001221
1222 attr = ts.TosaSerializerAttribute()
1223 attr.Conv2dAttribute(padding, strides, dilations)
1224
Kevin Cheng550ccc52021-03-03 11:21:43 -08001225 self.ser.addOperator(
1226 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1227 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001228 return result_tens
1229
Kevin Cheng550ccc52021-03-03 11:21:43 -08001230 def build_transpose_conv2d(
Kevin Cheng989cb052021-04-28 16:29:44 -07001231 self, op, ifm, filter, bias, stride, outpad, dilation, output_shape, qinfo
Kevin Cheng550ccc52021-03-03 11:21:43 -08001232 ):
1233 assert len(outpad) == 2
Eric Kunzee5e26762020-10-13 16:11:07 -07001234 result_tens = OutputShaper.transposeConv2DOp(self.ser, ifm, output_shape)
1235
1236 attr = ts.TosaSerializerAttribute()
1237 attr.TransposeConv2DAttribute(outpad, stride, dilation, output_shape)
1238
Kevin Cheng550ccc52021-03-03 11:21:43 -08001239 self.ser.addOperator(
Kevin Cheng989cb052021-04-28 16:29:44 -07001240 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
Kevin Cheng550ccc52021-03-03 11:21:43 -08001241 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001242 return result_tens
1243
Kevin Cheng550ccc52021-03-03 11:21:43 -08001244 def build_depthwise_conv2d(
1245 self, op, ifm, filter, bias, strides, padding, dilations, qinfo
1246 ):
1247 result_tens = OutputShaper.depthwiseConv2dOp(
1248 self.ser, ifm, filter, strides, padding, dilations
1249 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001250
1251 attr = ts.TosaSerializerAttribute()
1252 attr.Conv2dAttribute(padding, strides, dilations)
1253
Kevin Cheng550ccc52021-03-03 11:21:43 -08001254 self.ser.addOperator(
1255 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1256 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001257 return result_tens
1258
1259 def build_fully_connected(self, op, ifm, filter, bias, qinfo):
1260 result_tens = OutputShaper.fullyConnectedOp(self.ser, ifm, filter)
1261
Kevin Cheng550ccc52021-03-03 11:21:43 -08001262 self.ser.addOperator(
1263 op, [ifm.name, filter.name, bias.name], [result_tens.name], None, qinfo
1264 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001265 return result_tens
1266
1267 def build_matmul(self, op, a, b, qinfo):
1268 result_tens = OutputShaper.matmulOp(self.ser, a, b)
1269 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], None, qinfo)
1270 return result_tens
1271
1272 def build_reduce(self, op, a, axis):
1273 result_tens = OutputShaper.reduceOp(self.ser, a, axis)
1274
1275 attr = ts.TosaSerializerAttribute()
1276 attr.AxisAttribute(axis)
1277
1278 self.ser.addOperator(op, [a.name], result_tens.name, attr)
1279 return result_tens
1280
1281 def build_clamp(self, op, a):
1282 result_tens = OutputShaper.unaryOp(self.ser, a)
1283
1284 attr = ts.TosaSerializerAttribute()
Jeremy Johnson18e26662021-07-22 16:15:29 +01001285 v = [self.getRandNumberDType(a.dtype), self.getRandNumberDType(a.dtype)]
Eric Kunzee5e26762020-10-13 16:11:07 -07001286
1287 if a.dtype == DType.FLOAT:
1288 attr.ClampAttribute(0, 0, min(v), max(v))
1289 else:
1290 attr.ClampAttribute(min(v), max(v), 0, 0)
1291
1292 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1293 return result_tens
1294
1295 def build_leaky_relu(self, op, a):
1296 result_tens = OutputShaper.unaryOp(self.ser, a)
1297 attr = ts.TosaSerializerAttribute()
1298
1299 attr.LeakyReluAttribute(self.getRandNumberDType(DType.FLOAT))
1300
1301 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1302 return result_tens
1303
1304 # Needs an additional type/input
1305 def build_prelu(self, op, a):
1306 result_tens = OutputShaper.unaryOp(self.ser, a)
1307
1308 self.ser.addOperator(op, [a.name], [result_tens.name])
1309 return result_tens
1310
1311 def build_relun(self, op, a):
1312 result_tens = OutputShaper.unaryOp(self.ser, a)
1313
1314 attr = ts.TosaSerializerAttribute()
1315
1316 if a.dtype == DType.FLOAT:
1317 attr.ReluNAttribute(0, self.getRandNumberDType(a.dtype))
1318 else:
1319 attr.ReluNAttribute(self.getRandNumberDType(a.dtype), 0)
1320
1321 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1322 return result_tens
1323
1324 def build_sigmoid(self, op, a):
1325 result_tens = OutputShaper.unaryOp(self.ser, a)
1326 self.ser.addOperator(op, [a.name], [result_tens.name])
1327 return result_tens
1328
1329 def build_tanh(self, op, a):
1330 result_tens = OutputShaper.unaryOp(self.ser, a)
1331 self.ser.addOperator(op, [a.name], [result_tens.name])
1332 return result_tens
1333
Matthew Haddon818ab902021-07-27 09:12:49 +01001334 def build_concat(self, op, *a):
1335 assert (type(a[-1]) == int)
1336
1337 # To store variable length list of input tensors we need to store axis along with it
1338 axis = a[-1]
1339 a = a[:-1]
1340
1341 result_tens = OutputShaper.concatOp(self.ser, axis, *a)
Eric Kunzee5e26762020-10-13 16:11:07 -07001342
1343 attr = ts.TosaSerializerAttribute()
1344 attr.AxisAttribute(axis)
1345
Matthew Haddon818ab902021-07-27 09:12:49 +01001346 input_tensor_names = []
1347 for tensor in a:
1348 input_tensor_names.append(tensor.name)
1349
1350 self.ser.addOperator(op, input_tensor_names, [result_tens.name], attr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001351
1352 def build_pad(self, op, a, padding, qinfo):
1353 result_tens = OutputShaper.padOp(self.ser, a, padding)
1354
1355 # Need to turn the padding array into a TOSA tensor here.
1356 # This is one of the few tensor operands that does not get
1357 # randomly generated
Kevin Cheng550ccc52021-03-03 11:21:43 -08001358 padding_tens = self.ser.addConst(padding.shape, DType.INT32, padding)
Eric Kunzee5e26762020-10-13 16:11:07 -07001359
Kevin Cheng550ccc52021-03-03 11:21:43 -08001360 self.ser.addOperator(
1361 op, [a.name, padding_tens.name], [result_tens.name], None, qinfo
1362 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001363
1364 def build_reshape(self, op, a, newShape):
1365 result_tens = OutputShaper.reshapeOp(self.ser, a, newShape)
1366
1367 attr = ts.TosaSerializerAttribute()
1368 attr.ReshapeAttribute(newShape)
1369
1370 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1371 return result_tens
1372
1373 def build_reverse(self, op, a, axis):
1374 result_tens = OutputShaper.unaryOp(self.ser, a)
1375
1376 attr = ts.TosaSerializerAttribute()
1377 attr.AxisAttribute(axis)
1378
1379 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1380 return result_tens
1381
1382 def build_transpose(self, op, a, perms):
1383 result_tens = OutputShaper.transposeOp(self.ser, a, perms)
1384
Kevin Cheng550ccc52021-03-03 11:21:43 -08001385 perms_tens = self.ser.addConst([len(perms)], DType.INT32, np.int32(perms))
Eric Kunzee5e26762020-10-13 16:11:07 -07001386
1387 self.ser.addOperator(op, [a.name, perms_tens.name], [result_tens.name])
1388 return result_tens
1389
1390 def build_slice(self, op, a, begin, size):
1391 result_tens = OutputShaper.sliceOp(self.ser, a, begin, size)
1392
1393 attr = ts.TosaSerializerAttribute()
1394 attr.SliceAttribute(begin, size)
1395
1396 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1397 return result_tens
1398
1399 def build_tile(self, op, a, multiples):
1400 result_tens = OutputShaper.tileOp(self.ser, a, multiples)
1401
1402 attr = ts.TosaSerializerAttribute()
1403 attr.TileAttribute(multiples)
1404
1405 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1406 return result_tens
1407
Kevin Cheng77d0f762020-11-24 10:26:32 -08001408 def build_gather(self, op, values):
Eric Kunzee5e26762020-10-13 16:11:07 -07001409
1410 # Create a new indicies tensor
1411 # here with data that doesn't exceed the dimensions of the values tensor
1412
Kevin Cheng550ccc52021-03-03 11:21:43 -08001413 K = values.shape[1] # K
1414 W = self.randInt(
1415 self.args.tensor_shape_range[0], self.args.tensor_shape_range[1]
1416 ) # W
1417 indicies_arr = np.int32(
1418 self.rng.integers(low=0, high=K, size=[values.shape[0], W])
1419 ) # (N, W)
1420 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001421
Kevin Cheng77d0f762020-11-24 10:26:32 -08001422 result_tens = OutputShaper.gatherOp(self.ser, values, indicies)
Eric Kunzee5e26762020-10-13 16:11:07 -07001423
Kevin Cheng77d0f762020-11-24 10:26:32 -08001424 self.ser.addOperator(op, [values.name, indicies.name], [result_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001425
1426 return result_tens
1427
Kevin Cheng77d0f762020-11-24 10:26:32 -08001428 def build_scatter(self, op, values_in, input):
1429
1430 # Create a new indicies tensor
1431 # here with data that doesn't exceed the dimensions of the values_in tensor
1432
Kevin Cheng550ccc52021-03-03 11:21:43 -08001433 K = values_in.shape[1] # K
1434 W = input.shape[1] # W
1435 indicies_arr = np.int32(
1436 self.rng.integers(low=0, high=K, size=[values_in.shape[0], W])
1437 ) # (N, W)
1438 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
Kevin Cheng77d0f762020-11-24 10:26:32 -08001439
1440 result_tens = OutputShaper.scatterOp(self.ser, values_in, indicies, input)
1441
Kevin Cheng550ccc52021-03-03 11:21:43 -08001442 self.ser.addOperator(
1443 op, [values_in.name, indicies.name, input.name], [result_tens.name]
1444 )
Kevin Cheng77d0f762020-11-24 10:26:32 -08001445
1446 return result_tens
1447
Kevin Cheng550ccc52021-03-03 11:21:43 -08001448 def build_resize(
1449 self,
1450 op,
1451 input,
1452 mode,
1453 stride,
1454 offset,
1455 shift,
1456 stride_fp,
1457 offset_fp,
1458 output_dims,
1459 input_dtype,
1460 output_dtype,
1461 ):
1462 result_tens = OutputShaper.resizeOp(
1463 self.ser,
1464 input,
1465 mode,
1466 stride,
1467 offset,
1468 shift,
1469 stride_fp,
1470 offset_fp,
1471 output_dims,
1472 input_dtype,
1473 output_dtype,
1474 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001475
1476 attr = ts.TosaSerializerAttribute()
Kevin Cheng77d0f762020-11-24 10:26:32 -08001477
Kevin Cheng550ccc52021-03-03 11:21:43 -08001478 attr.ResizeAttribute(
1479 output_dims, stride, offset, shift, stride_fp, offset_fp, mode
1480 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001481
1482 self.ser.addOperator(op, [input.name], [result_tens.name], attr)
1483 return result_tens
1484
1485 def build_identityn(self, op, val, val2):
1486
Kevin Cheng550ccc52021-03-03 11:21:43 -08001487 result_tens = OutputShaper.unaryOp(self.ser, val)
Eric Kunzee5e26762020-10-13 16:11:07 -07001488 result_tens2 = OutputShaper.unaryOp(self.ser, val2)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001489 self.ser.addOperator(
1490 op, [val.name, val2.name], [result_tens.name, result_tens2.name]
1491 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001492 return result_tens
1493
1494 def build_placeholder(self, op, val):
1495 # Add an identity op to avoid warning in the reference model
1496 return self.build_unary(Op.IDENTITY, val)
1497
1498 # Type Conversion
1499 def build_cast(self, op, val, out_dtype):
1500 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1501 self.ser.addOperator(op, [val.name], [result_tens.name])
1502 return result_tens
1503
1504 def build_rescale(self, op, val, out_dtype, scale32, double_round, per_channel):
1505 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1506
1507 if per_channel:
1508 nc = val.shape[-1]
1509 else:
1510 nc = 1
1511
1512 in_type_width = self.typeWidth(val.dtype)
1513 out_type_width = self.typeWidth(out_dtype)
1514
Kevin Cheng3a478572021-01-22 17:21:02 -08001515 if val.dtype == DType.INT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001516 input_zp = self.randInt(-128, 128)
1517 in_type_width = in_type_width + 1
Kevin Chengacb550f2021-06-29 15:32:19 -07001518 elif val.dtype == DType.UINT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001519 input_zp = self.randInt(0, 256)
Eric Kunzee5e26762020-10-13 16:11:07 -07001520 in_type_width = in_type_width + 1
1521 else:
1522 input_zp = 0
1523
Kevin Cheng3a478572021-01-22 17:21:02 -08001524 if out_dtype == DType.INT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001525 output_zp = self.randInt(-128, 128)
1526 out_type_width = out_type_width + 1
1527 elif out_dtype == DType.UINT8:
1528 output_zp = self.randInt(0, 256)
Eric Kunzee5e26762020-10-13 16:11:07 -07001529 out_type_width = out_type_width + 1
1530 else:
1531 output_zp = 0
1532
1533 # Calculate scale based on:
1534 # scale = a *(2^output_width)/(2^input_width))
1535
1536 a = np.float32(self.rng.random(size=[nc]))
1537 scale_arr = a * np.float32((1 << out_type_width) / (1 << in_type_width))
1538
1539 if scale32:
1540 pass
1541 # Cap the scaling at 2^15 - 1 for scale16
1542 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), (1 << 31) - 1)
1543 else:
1544 # Cap the scaling at 2^15 - 1 for scale16
1545 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), 32767.0)
1546
Kevin Cheng550ccc52021-03-03 11:21:43 -08001547 # print('{} {} -> {}'.format(out_type_width, in_type_width, scale_arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001548
1549 multiplier_arr = np.int32(np.zeros(shape=[nc]))
1550 shift_arr = np.int32(np.zeros(shape=[nc]))
1551
1552 for i in range(nc):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001553 multiplier_arr[i], shift_arr[i] = TosaQuantGen.computeMultiplierAndShift(
1554 scale_arr[i], scale32
1555 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08001556 if shift_arr[i] < 2 or shift_arr[i] > 62:
Kevin Chengacb550f2021-06-29 15:32:19 -07001557 self.ser.setExpectedReturnCode(
1558 TosaReturnCode.UNPREDICTABLE, "OpRescale: invalid shift value"
1559 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001560
Kevin Cheng550ccc52021-03-03 11:21:43 -08001561 # print('multiplier {} shift {} inzp {} outzp {}'.format(multiplier_arr, shift_arr, input_zp, output_zp))
Eric Kunzee5e26762020-10-13 16:11:07 -07001562
1563 attr = ts.TosaSerializerAttribute()
Kevin Cheng550ccc52021-03-03 11:21:43 -08001564 attr.RescaleAttribute(
1565 input_zp,
1566 output_zp,
1567 multiplier_arr,
1568 shift_arr,
1569 scale32,
1570 double_round,
1571 per_channel,
1572 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001573
1574 self.ser.addOperator(op, [val.name], [result_tens.name], attr)
1575 return result_tens
1576
1577 def build_cond_if_const(self, op, then_tens, else_tens, cond):
1578 # For cond_if with constants, we're supplied with then/else tensors that we ignore
1579 # (except for the generated shap) and the condition. Build Then/Else blocks
1580 # and fill them with const nodes for the body.
1581
1582 # Condition tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001583 cond_tens = self.ser.addConst([], DType.BOOL, [cond])
Eric Kunzee5e26762020-10-13 16:11:07 -07001584
1585 # Make then/else tensors
1586 out_shape = then_tens.shape
Jeremy Johnson18e26662021-07-22 16:15:29 +01001587 then_arr = np.int32(self.rng.integers(0, 256, size=out_shape))
1588 else_arr = np.int32(self.rng.integers(0, 256, size=out_shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001589
1590 # And the result tensor based on any of the outputs
Kevin Cheng550ccc52021-03-03 11:21:43 -08001591 result_tens = self.ser.addOutput(out_shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07001592
1593 # Create the attribute with the names of the then/else blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001594 then_block = "THEN_BLOCK"
1595 else_block = "ELSE_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001596 attr = ts.TosaSerializerAttribute()
1597 attr.CondIfAttribute(then_block, else_block)
1598
1599 # Finally, build the op and the two blocks
1600 self.ser.addOperator(op, [cond_tens.name], [result_tens.name], attr)
1601
1602 self.ser.startBasicBlock(then_block)
1603 # Build the actual then/else tensors inside their blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001604 then_tens = self.ser.addConst(out_shape, DType.INT32, then_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001605 self.ser.addOutputTensor(then_tens)
1606
1607 self.ser.startBasicBlock(else_block)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001608 else_tens = self.ser.addConst(out_shape, DType.INT32, else_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001609 self.ser.addOutputTensor(else_tens)
1610
1611 return result_tens
1612
1613 def build_cond_if_binary(self, op, a, b, cond):
1614 # For cond_if with a binary op in the then/else blocks, take a and b and
1615 # alternately add or subtract them based on the condition
1616
1617 # Condition tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001618 cond_tens = self.ser.addConst([], DType.BOOL, [cond])
Eric Kunzee5e26762020-10-13 16:11:07 -07001619
Kevin Cheng550ccc52021-03-03 11:21:43 -08001620 result_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001621 self.ser.currBasicBlock.addOutput(result_tens.name)
1622
1623 # Create the attribute with the names of the then/else blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001624 then_block = "THEN_BLOCK"
1625 else_block = "ELSE_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001626 attr = ts.TosaSerializerAttribute()
1627 attr.CondIfAttribute(then_block, else_block)
1628
1629 # Finally, build the op and the two blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001630 self.ser.addOperator(
1631 op, [cond_tens.name, a.name, b.name], [result_tens.name], attr
1632 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001633
1634 self.ser.startBasicBlock(then_block)
1635 self.ser.addInputTensor(a)
1636 self.ser.addInputTensor(b)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001637 then_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001638 self.ser.addOperator(Op.ADD, [a.name, b.name], [then_tens.name])
1639
1640 self.ser.startBasicBlock(else_block)
1641 self.ser.addInputTensor(a)
1642 self.ser.addInputTensor(b)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001643 else_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001644 self.ser.addOperator(Op.SUB, [a.name, b.name], [else_tens.name])
1645
1646 return result_tens
1647
1648 def build_while_loop(self, op, a, iter_val):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001649 iter = self.ser.addPlaceholder([], DType.INT32, [np.int32(iter_val)])
Eric Kunzee5e26762020-10-13 16:11:07 -07001650
Kevin Cheng550ccc52021-03-03 11:21:43 -08001651 cond_block = "COND_BLOCK"
1652 body_block = "BODY_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001653
1654 attr = ts.TosaSerializerAttribute()
1655 attr.WhileLoopAttribute(cond_block, body_block)
1656
1657 # Accumulator tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001658 # acc = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001659 acc_init_val = np.int32(np.zeros(a.shape))
Kevin Cheng550ccc52021-03-03 11:21:43 -08001660 acc = self.ser.addPlaceholder(a.shape, a.dtype, acc_init_val)
Eric Kunzee5e26762020-10-13 16:11:07 -07001661
1662 # Intermediate/output tensors for everything going through the loop
Kevin Cheng550ccc52021-03-03 11:21:43 -08001663 iter_out = self.ser.addIntermediate(iter.shape, iter.dtype)
1664 a_out = self.ser.addIntermediate(a.shape, a.dtype)
1665 acc_out = self.ser.addIntermediate(acc.shape, acc.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001666
1667 # While_loop operator
Kevin Cheng550ccc52021-03-03 11:21:43 -08001668 self.ser.addOperator(
1669 op,
1670 [iter.name, a.name, acc.name],
1671 [iter_out.name, a_out.name, acc_out.name],
1672 attr,
1673 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001674
1675 # COND block (input: iter, output: cond_tens )
1676 self.ser.startBasicBlock(cond_block)
1677 self.ser.addInputTensor(iter)
1678 self.ser.addInputTensor(a)
1679 self.ser.addInputTensor(acc)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001680 zero_tens = self.ser.addConst([], DType.INT32, [np.int32(0)])
1681 cond_tens = self.ser.addOutput([], DType.BOOL)
1682 self.ser.addOperator(Op.GREATER, [iter.name, zero_tens.name], [cond_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001683
1684 # BODY block (input: a, acc, iter, output: a, acc, iter)
1685 # Note that local intermediate tensors need to be declared here for the outputs
1686 self.ser.startBasicBlock(body_block)
1687 self.ser.addInputTensor(iter)
1688 self.ser.addInputTensor(a)
1689 self.ser.addInputTensor(acc)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001690 one_tens = self.ser.addConst([], DType.INT32, [np.int32(1)])
1691 iter_body_out = self.ser.addIntermediate(iter.shape, iter.dtype)
1692 acc_body_out = self.ser.addIntermediate(acc.shape, acc.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001693 self.ser.addOperator(Op.ADD, [a.name, acc.name], [acc_body_out.name])
1694 self.ser.addOperator(Op.SUB, [iter.name, one_tens.name], [iter_body_out.name])
1695 self.ser.addOutputTensor(iter_body_out)
1696 self.ser.addOutputTensor(a)
1697 self.ser.addOutputTensor(acc_body_out)
1698
1699 return acc_out
1700
Kevin Cheng550ccc52021-03-03 11:21:43 -08001701 def genOpTestList(
Matthew Haddon74567092021-07-16 15:38:20 +01001702 self, opName, shapeFilter=[None], rankFilter=None, dtypeFilter=None, testType='positive'
Kevin Cheng550ccc52021-03-03 11:21:43 -08001703 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07001704
1705 try:
1706 op = self.TOSA_OP_LIST[opName]
1707 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001708 raise Exception("Cannot find op with name {}".format(opName))
Eric Kunzee5e26762020-10-13 16:11:07 -07001709
1710 # Initialize a new random number generator
1711 self.rng = np.random.default_rng(self.random_seed)
1712
Kevin Cheng550ccc52021-03-03 11:21:43 -08001713 build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001714
1715 # Generate the lists of arguments
Kevin Cheng550ccc52021-03-03 11:21:43 -08001716 rmin, rmax = op["rank"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001717
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01001718 # Create a default testing rank range, 1-4 inclusive to keep test sizes reasonably small.
1719 default_test_rank_range = range(1, 5)
1720
Eric Kunzee5e26762020-10-13 16:11:07 -07001721 # Test list consists of a tuple of:
1722 # (opName, testNameStr, dtype, shapeList, argumentsList)
1723 testList = []
1724
1725 if not shapeFilter:
1726 shapeFilter = [None]
1727
Matthew Haddon74567092021-07-16 15:38:20 +01001728 # Positive test loop
1729 if testType in ['positive', 'both']:
1730 for r in range(rmin, rmax + 1):
Eric Kunzee5e26762020-10-13 16:11:07 -07001731
Matthew Haddon74567092021-07-16 15:38:20 +01001732 # Filter out the rank?
1733 if rankFilter is not None and r not in rankFilter:
1734 continue
1735 if (
1736 rankFilter is None
1737 and shapeFilter[0] is None
1738 and r not in default_test_rank_range
1739 ):
1740 continue
Eric Kunzee5e26762020-10-13 16:11:07 -07001741
Matthew Haddon74567092021-07-16 15:38:20 +01001742 for t in op["types"]:
Eric Kunzee5e26762020-10-13 16:11:07 -07001743
Matthew Haddon74567092021-07-16 15:38:20 +01001744 # Filter tests based on dtype?
1745 if dtypeFilter is not None:
1746 if not (
1747 t in dtypeFilter
1748 or (isinstance(t, list) and t[0] in dtypeFilter)
1749 ):
1750 continue
Eric Kunzee5e26762020-10-13 16:11:07 -07001751
Matthew Haddon74567092021-07-16 15:38:20 +01001752 # Create the placeholder and const tensors
1753 for shape in shapeFilter:
1754 # A None shape chooses a random shape of a given rank
Eric Kunzee5e26762020-10-13 16:11:07 -07001755
Matthew Haddon74567092021-07-16 15:38:20 +01001756 # Filter out by rank
1757 if shape is not None and len(shape) != r:
1758 continue
Eric Kunzee5e26762020-10-13 16:11:07 -07001759
Matthew Haddon74567092021-07-16 15:38:20 +01001760 self.setTargetShape(shape)
1761 shapeList = tgen_fcn(self, op, r)
Eric Kunzee5e26762020-10-13 16:11:07 -07001762
Matthew Haddon74567092021-07-16 15:38:20 +01001763 shapeStr = self.shapeStr(shapeList[0])
1764 typeStr = self.typeStr(t)
Eric Kunzee5e26762020-10-13 16:11:07 -07001765
Matthew Haddon74567092021-07-16 15:38:20 +01001766 # Argument lists consists of tuples of the (str, []) string representation and the build function argument list
1767 argList = []
1768 if agen_fcn:
1769 argList = agen_fcn(self, opName, shapeList, t)
Eric Kunzee5e26762020-10-13 16:11:07 -07001770 else:
Matthew Haddon74567092021-07-16 15:38:20 +01001771 argList = [("", [])]
Eric Kunzee5e26762020-10-13 16:11:07 -07001772
Matthew Haddon74567092021-07-16 15:38:20 +01001773 for argStr, args in argList:
1774 if argStr:
1775 testStr = "{}_{}_{}_{}".format(
1776 opName, shapeStr, typeStr, argStr
1777 )
1778 else:
1779 testStr = "{}_{}_{}".format(opName, shapeStr, typeStr)
1780
1781 testList.append((opName, testStr, t, shapeList, args))
1782
1783 # Reset RNG so both positive and negative tests are reproducible
1784 self.resetRNG()
1785 # Negative test loop
1786 if testType in ['negative', 'both']:
1787 print("Negative tests unsupported")
Eric Kunzee5e26762020-10-13 16:11:07 -07001788
1789 return testList
1790
Kevin Cheng989cb052021-04-28 16:29:44 -07001791 def serializeTest(self, opName, testStr, dtype_or_dtypeList, shapeList, testArgs):
Eric Kunzee5e26762020-10-13 16:11:07 -07001792 try:
1793 op = self.TOSA_OP_LIST[opName]
1794 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001795 raise Exception("Cannot find op with name {}".format(opName))
Eric Kunzee5e26762020-10-13 16:11:07 -07001796
1797 # Create a serializer
1798 self.createSerializer(opName, testStr)
1799
Kevin Cheng550ccc52021-03-03 11:21:43 -08001800 build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
1801 pCount, cCount = op["operands"]
Kevin Cheng989cb052021-04-28 16:29:44 -07001802 num_operands = pCount + cCount
1803
1804 if isinstance(dtype_or_dtypeList, list):
1805 dtypeList = dtype_or_dtypeList
Matthew Haddon818ab902021-07-27 09:12:49 +01001806 elif op['op'] == Op.CONCAT:
1807 dtypeList = [dtype_or_dtypeList] * len(shapeList)
Kevin Cheng989cb052021-04-28 16:29:44 -07001808 else:
1809 dtypeList = [dtype_or_dtypeList] * (num_operands)
1810
Matthew Haddon818ab902021-07-27 09:12:49 +01001811 if op['op'] != Op.CONCAT:
1812 assert (
1813 len(shapeList) == num_operands
1814 ), "shapeList length {} must match number of operands {}".format(
1815 len(shapeList), num_operands
1816 )
1817 assert (
1818 len(dtypeList) == num_operands
1819 ), "dtypeList length {} must match number of operands {}".format(
1820 len(dtypeList), num_operands
1821 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001822
1823 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001824 qgen = op["qgen"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001825 except KeyError:
1826 qgen = None
1827
1828 # Build the random tensor operands and the test
1829 tens = []
Kevin Chengaee1fac2020-11-11 13:54:06 -08001830
1831 # If test is ArithmeticRightShift, force value of operand[1] to be within [0, num_bits]
Kevin Cheng550ccc52021-03-03 11:21:43 -08001832 if op["op"] == Op.ARITHMETIC_RIGHT_SHIFT:
1833 assert (
1834 pCount == 2 and cCount == 0
1835 ), "Op.ArithmeticRightShift must have 2 placeholders, 0 consts"
Kevin Chengaee1fac2020-11-11 13:54:06 -08001836
1837 placeholders = []
1838 for idx, shape in enumerate(shapeList[:]):
1839 if idx == 1:
Kevin Cheng989cb052021-04-28 16:29:44 -07001840 if dtypeList[idx] == DType.INT8:
Kevin Chengaee1fac2020-11-11 13:54:06 -08001841 arr = np.int32(self.rng.integers(low=0, high=8, size=shape))
Kevin Cheng989cb052021-04-28 16:29:44 -07001842 elif dtypeList[idx] == DType.INT16:
Kevin Chengaee1fac2020-11-11 13:54:06 -08001843 arr = np.int32(self.rng.integers(low=0, high=16, size=shape))
Kevin Cheng989cb052021-04-28 16:29:44 -07001844 elif dtypeList[idx] == DType.INT32:
Kevin Chengaee1fac2020-11-11 13:54:06 -08001845 arr = np.int32(self.rng.integers(low=0, high=32, size=shape))
1846 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001847 raise Exception("OpArithmeticRightShift: invalid input dtype")
Kevin Chengaee1fac2020-11-11 13:54:06 -08001848 else:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001849 arr = self.getRandTensor(shape, dtypeList[idx])
Kevin Cheng989cb052021-04-28 16:29:44 -07001850 placeholders.append(self.ser.addPlaceholder(shape, dtypeList[idx], arr))
Kevin Chengaee1fac2020-11-11 13:54:06 -08001851
1852 tens.extend(placeholders)
Matthew Haddona44ac5e2021-07-27 16:31:16 +01001853 elif op["op"] == Op.SELECT:
1854 # Set datatype of condition tensor to boolean
1855 dtypeList[0] = DType.BOOL
1856 tens.extend(
1857 self.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
1858 )
1859 tens.extend(self.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
Matthew Haddon459443c2021-08-23 16:43:13 +01001860 elif op["op"] == Op.INTDIV:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001861 assert (
1862 pCount == 2 and cCount == 0
Matthew Haddon459443c2021-08-23 16:43:13 +01001863 ), "Op.INTDIV must have 2 placeholders, 0 consts"
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001864
1865 placeholders = []
1866
Matthew Haddon459443c2021-08-23 16:43:13 +01001867 # Two invalid cases for Op.INTDIV:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001868 # 1. divisor == 0
Kevin Cheng47315e12021-05-13 17:41:28 -07001869 # 2. dividend == -(1<<31) and divisor == -1
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001870 while True:
1871 dividend_arr = self.getRandTensor(shapeList[0], dtypeList[0])
1872 divisor_arr = self.getRandTensor(shapeList[1], dtypeList[1])
1873
1874 if (divisor_arr == 0).any():
1875 continue
1876
Kevin Cheng47315e12021-05-13 17:41:28 -07001877 if (dividend_arr == -(2 ** 31)).any() and (divisor_arr == -1).any():
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001878 continue
1879
1880 break
1881
1882 placeholders.append(
1883 self.ser.addPlaceholder(shapeList[0], dtypeList[0], dividend_arr)
1884 )
1885 placeholders.append(
1886 self.ser.addPlaceholder(shapeList[1], dtypeList[1], divisor_arr)
1887 )
1888
1889 tens.extend(placeholders)
1890 elif op["op"] == Op.MUL:
1891 assert (
1892 pCount == 2 and cCount == 0
1893 ), "Op.MUL must have 2 placeholders, 0 consts"
1894
1895 if dtypeList[0] == DType.FLOAT:
1896 tens.extend(self.buildPlaceholderTensors(shapeList[:], dtypeList[:]))
1897 else:
1898 placeholders = []
1899
1900 # Make sure multiply result in int32 range
1901 shift = testArgs[0]
1902 if dtypeList[0] == DType.INT8:
1903 num_bits = 8
1904 elif dtypeList[0] == DType.INT16:
1905 num_bits = 16
1906 elif dtypeList[0] == DType.INT32:
1907 num_bits = 32
1908 else:
1909 raise Exception("OpMul: invalid input dtype")
1910
1911 for idx, shape in enumerate(shapeList[:]):
1912 low = -(2 ** (num_bits - 1))
1913 high = (2 ** (num_bits - 1)) - 1
1914
1915 a_arr = np.int32(
1916 self.rng.integers(low=low, high=high, size=shapeList[0])
1917 )
1918 b_arr = np.int32(
1919 self.rng.integers(low=low, high=high, size=shapeList[1])
1920 )
1921
1922 i = 0
1923 while True:
1924
1925 a_arr_64 = a_arr.astype(np.int64)
1926 b_arr_64 = b_arr.astype(np.int64)
1927
1928 if shift > 0:
1929 rounding = 1 << (shift - 1)
1930 result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift
1931 else:
1932 result_arr = a_arr_64 * b_arr_64
1933
1934 if (result_arr > -(2 ** 31)).all() and (
1935 result_arr <= ((2 ** 31) - 1)
1936 ).all():
1937 break
1938
1939 i = i + 1
1940 a_arr = a_arr // 2
1941 b_arr = b_arr // 2
1942
1943 placeholders.append(
1944 self.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
1945 )
1946 placeholders.append(
1947 self.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
1948 )
1949
1950 tens.extend(placeholders)
Matthew Haddon818ab902021-07-27 09:12:49 +01001951 elif op["op"] == Op.CONCAT:
1952 count = len(shapeList) - self.args.num_const_inputs_concat
1953 if count < 1:
1954 count = 1
1955 if self.args.num_const_inputs_concat == 0:
1956 count = len(shapeList)
1957
1958 shapeList = TosaTensorGen.tgConcatConstInput(self, shapeList, testArgs[0])
1959 tens.extend(
1960 self.buildPlaceholderTensors(shapeList[0:count], dtypeList[0:count])
1961 )
1962 tens.extend(self.buildConstTensors(shapeList[count:], dtypeList[count:]))
Kevin Chengaee1fac2020-11-11 13:54:06 -08001963 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07001964 tens.extend(
1965 self.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
1966 )
1967 tens.extend(self.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
Eric Kunzee5e26762020-10-13 16:11:07 -07001968
1969 if qgen is not None:
Les Bell30e46802021-07-23 09:43:31 +01001970 qinfo = qgen(self, op, dtype_or_dtypeList)
Eric Kunzee5e26762020-10-13 16:11:07 -07001971 else:
1972 qinfo = None
1973
1974 try:
1975 if qinfo is not None:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001976 resultName = build_fcn(self, op["op"], *tens, *testArgs, qinfo)
Eric Kunzee5e26762020-10-13 16:11:07 -07001977 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001978 resultName = build_fcn(self, op["op"], *tens, *testArgs)
Eric Kunzee5e26762020-10-13 16:11:07 -07001979 except TypeError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001980 print(
1981 "build_fcn: {}\nTensors: {}\nArgs: {}\n".format(
1982 build_fcn, tens, testArgs
1983 )
1984 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001985 raise e
1986
1987 # Save the serialized test
Kevin Cheng550ccc52021-03-03 11:21:43 -08001988 self.serialize("test")
Eric Kunzee5e26762020-10-13 16:11:07 -07001989
1990 def createDynamicOpLists(self):
1991
1992 # Dynamically create op lists for convolutions with a list of kernel sizes
Kevin Cheng550ccc52021-03-03 11:21:43 -08001993 KERNELS = [[1, 1], [2, 2], [3, 3], [5, 5], [3, 1], [1, 3]]
Eric Kunzee5e26762020-10-13 16:11:07 -07001994
1995 for k in KERNELS:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001996 testName = "conv2d_{}x{}".format(k[0], k[1])
1997 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST["conv2d_TEMPLATE"].copy()
1998 self.TOSA_OP_LIST[testName]["filter"] = k
1999 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07002000
Kevin Cheng550ccc52021-03-03 11:21:43 -08002001 testName = "depthwise_conv2d_{}x{}".format(k[0], k[1])
2002 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
2003 "depthwise_conv2d_TEMPLATE"
2004 ].copy()
2005 self.TOSA_OP_LIST[testName]["filter"] = k
2006 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07002007
Kevin Cheng550ccc52021-03-03 11:21:43 -08002008 testName = "transpose_conv2d_{}x{}".format(k[0], k[1])
2009 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
2010 "transpose_conv2d_TEMPLATE"
2011 ].copy()
2012 self.TOSA_OP_LIST[testName]["filter"] = k
2013 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07002014
2015 # Delete any templates after having created any dynamic ops
2016 # This is a two-pass operation because it's bad practice to delete
2017 # keys from dictionaries while iterating
2018 keyList = []
2019 for k in self.TOSA_OP_LIST:
2020 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002021 if self.TOSA_OP_LIST[k]["template"] == True:
Eric Kunzee5e26762020-10-13 16:11:07 -07002022 keyList.append(k)
2023 continue
2024 except KeyError:
2025 pass
2026
2027 for k in keyList:
2028 del self.TOSA_OP_LIST[k]
2029
2030 def initOpListDefaults(self):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002031 """Fill in default fields for ops if they aren't already specified.
2032 Look for missing required fields (datastructure linting)."""
Eric Kunzee5e26762020-10-13 16:11:07 -07002033 for op in self.TOSA_OP_LIST:
2034
2035 # Required fields
2036 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002037 pl, c = self.TOSA_OP_LIST[op]["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002038 except (KeyError, ValueError, TypeError):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002039 raise Exception(
2040 "Op {} is missing a valid operand tuple in TOSA_OP_LIST".format(op)
2041 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002042
2043 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002044 fcn, tgen, arggen = self.TOSA_OP_LIST[op]["build_fcn"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002045 except (KeyError, ValueError, TypeError):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002046 raise Exception(
2047 "Op {} is missing a valid build_fcn tuple in TOSA_OP_LIST".format(
2048 op
2049 )
2050 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002051
2052 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002053 types = self.TOSA_OP_LIST[op]["types"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002054 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002055 raise Exception(
2056 "Op {} is missing a valid type list in TOSA_OP_LIST".format(op)
2057 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002058
2059 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002060 opcode = self.TOSA_OP_LIST[op]["op"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002061 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002062 raise Exception(
2063 "Op {} is missing the Op field in TOSA_OP_LIST".format(op)
2064 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002065
2066 # Put in default rank range, if missing
2067 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002068 rank = self.TOSA_OP_LIST[op]["rank"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002069 except KeyError:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002070 self.TOSA_OP_LIST[op]["rank"] = self.DEFAULT_RANK_RANGE
Eric Kunzee5e26762020-10-13 16:11:07 -07002071
2072 # Tensor operator list
2073 # 'op': op name
2074 # 'operands': tuple of (placeholder, const) operands
Kevin Cheng3a478572021-01-22 17:21:02 -08002075 # 'rank': optional, restricts rank to tuple inclusive of (min, max),
2076 # if not specified, defaults to (1, 4)
Eric Kunzee5e26762020-10-13 16:11:07 -07002077 # 'build_fcn': tuple of the function to (build_operator(), TensorGen function, ArgGen enum)
2078 # 'types': array of datatypes to be tested
Kevin Cheng550ccc52021-03-03 11:21:43 -08002079 TYPE_FP = [DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -07002080
Kevin Cheng550ccc52021-03-03 11:21:43 -08002081 TYPE_INT = [DType.INT8, DType.INT16, DType.INT32] # Excludes INT4
2082 TYPE_INT_FP = [DType.INT8, DType.INT16, DType.INT32, DType.FLOAT] # Excludes INT4
Eric Kunzee5e26762020-10-13 16:11:07 -07002083
Kevin Cheng550ccc52021-03-03 11:21:43 -08002084 TYPE_BOOL = [DType.BOOL]
2085 TYPE_FI32 = [DType.FLOAT, DType.INT32]
2086 TYPE_FIB = [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL]
2087 TYPE_FI16 = [DType.FLOAT, DType.INT16]
Eric Kunzee5e26762020-10-13 16:11:07 -07002088
Kevin Cheng550ccc52021-03-03 11:21:43 -08002089 TYPE_NARROW_INT_FP = [DType.INT8, DType.INT16, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -07002090
Kevin Cheng989cb052021-04-28 16:29:44 -07002091 TYPE_CONV2D = [
Kevin Chenga9017402021-07-28 17:19:23 -07002092 [DType.INT8, DType.INT4, DType.INT32],
Kevin Cheng989cb052021-04-28 16:29:44 -07002093 [DType.INT8, DType.INT8, DType.INT32],
2094 [DType.INT16, DType.INT8, DType.INT48],
2095 DType.FLOAT,
2096 ]
2097
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01002098 DEFAULT_RANK_RANGE = (1, TOSA_TENSOR_MAX_RANK)
Eric Kunzee5e26762020-10-13 16:11:07 -07002099
2100 TOSA_OP_LIST = {
Jared Smolens573ecd42021-03-04 15:24:10 -08002101 # Tensor operators
Kevin Cheng550ccc52021-03-03 11:21:43 -08002102 "argmax": {
2103 "op": Op.ARGMAX,
2104 "operands": (1, 0),
2105 "build_fcn": (build_argmax, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2106 "types": TYPE_NARROW_INT_FP,
2107 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002108 "avg_pool2d": {
2109 "op": Op.AVG_POOL2D,
2110 "operands": (1, 0),
2111 "rank": (4, 4),
2112 "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
2113 "qgen": TosaQuantGen.qgUnary,
2114 "types": TYPE_NARROW_INT_FP,
2115 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002116 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002117 "conv2d_TEMPLATE": {
2118 "op": Op.CONV2D,
2119 "operands": (1, 2),
2120 "rank": (4, 4),
2121 "build_fcn": (build_conv2d, TosaTensorGen.tgConv2D, TosaArgGen.agConv2D),
2122 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07002123 "types": TYPE_CONV2D,
Kevin Cheng550ccc52021-03-03 11:21:43 -08002124 "template": True,
2125 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002126 # Conv3d TBD
Eric Kunzee5e26762020-10-13 16:11:07 -07002127 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002128 "depthwise_conv2d_TEMPLATE": {
2129 "op": Op.DEPTHWISE_CONV2D,
2130 "operands": (1, 2),
2131 "filter": [1, 1],
2132 "rank": (4, 4),
2133 "build_fcn": (
2134 build_depthwise_conv2d,
2135 TosaTensorGen.tgDepthwiseConv2D,
2136 TosaArgGen.agConv2D,
2137 ),
2138 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07002139 "types": TYPE_CONV2D,
Kevin Cheng550ccc52021-03-03 11:21:43 -08002140 "template": True,
2141 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002142 "fully_connected": {
2143 "op": Op.FULLY_CONNECTED,
2144 "operands": (1, 2),
2145 "rank": (2, 2),
2146 "build_fcn": (build_fully_connected, TosaTensorGen.tgFullyConnected, None),
2147 "qgen": TosaQuantGen.qgConv,
2148 "types": TYPE_CONV2D,
2149 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002150 "matmul": {
2151 "op": Op.MATMUL,
2152 "operands": (2, 0),
Kevin Cheng2d60f002021-06-09 14:18:32 -07002153 "rank": (3, 3),
Jared Smolens573ecd42021-03-04 15:24:10 -08002154 "build_fcn": (build_matmul, TosaTensorGen.tgMatmul, None),
2155 "qgen": TosaQuantGen.qgMatmul,
2156 "types": TYPE_NARROW_INT_FP,
2157 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002158 "max_pool2d": {
2159 "op": Op.MAX_POOL2D,
2160 "operands": (1, 0),
2161 "rank": (4, 4),
2162 "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
2163 "types": TYPE_NARROW_INT_FP,
2164 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002165 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002166 "transpose_conv2d_TEMPLATE": {
2167 "op": Op.TRANSPOSE_CONV2D,
Kevin Cheng989cb052021-04-28 16:29:44 -07002168 "operands": (1, 2),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002169 "rank": (4, 4),
2170 "build_fcn": (
2171 build_transpose_conv2d,
2172 TosaTensorGen.tgTransposeConv2D,
2173 TosaArgGen.agTransposeConv2D,
2174 ),
2175 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07002176 "types": TYPE_CONV2D,
Kevin Cheng550ccc52021-03-03 11:21:43 -08002177 "template": True,
2178 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002179 # Activation functions
Kevin Cheng550ccc52021-03-03 11:21:43 -08002180 "clamp": {
2181 "op": Op.CLAMP,
2182 "operands": (1, 0),
2183 "build_fcn": (build_clamp, TosaTensorGen.tgBasic, None),
2184 "types": TYPE_NARROW_INT_FP,
2185 },
2186 "relun": {
2187 "op": Op.RELUN,
2188 "operands": (1, 0),
2189 "build_fcn": (build_relun, TosaTensorGen.tgBasic, None),
2190 "types": TYPE_FI32,
2191 },
2192 "sigmoid": {
2193 "op": Op.SIGMOID,
2194 "operands": (1, 0),
2195 "build_fcn": (build_sigmoid, TosaTensorGen.tgBasic, None),
2196 "types": TYPE_FP,
2197 },
2198 "tanh": {
2199 "op": Op.TANH,
2200 "operands": (1, 0),
2201 "build_fcn": (build_tanh, TosaTensorGen.tgBasic, None),
2202 "types": TYPE_FP,
2203 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002204 # Elementwise Binary Operators
2205 "add": {
2206 "op": Op.ADD,
2207 "operands": (2, 0),
2208 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2209 "types": TYPE_FI32,
2210 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002211 "arithmetic_right_shift": {
2212 "op": Op.ARITHMETIC_RIGHT_SHIFT,
2213 "operands": (2, 0),
2214 "build_fcn": (
2215 build_arithmetic_right_shift,
2216 TosaTensorGen.tgBroadcastFuzz,
2217 TosaArgGen.agArithmeticRightShift,
2218 ),
2219 "types": TYPE_INT,
2220 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002221 "bitwise_and": {
2222 "op": Op.BITWISE_AND,
2223 "operands": (2, 0),
2224 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2225 "types": TYPE_INT,
2226 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002227 "bitwise_or": {
2228 "op": Op.BITWISE_OR,
2229 "operands": (2, 0),
2230 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2231 "types": TYPE_INT,
2232 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002233 "bitwise_xor": {
2234 "op": Op.BITWISE_XOR,
2235 "operands": (2, 0),
2236 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2237 "types": TYPE_INT,
2238 },
Matthew Haddon459443c2021-08-23 16:43:13 +01002239 "intdiv": {
2240 "op": Op.INTDIV,
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002241 "operands": (2, 0),
2242 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2243 "types": [DType.INT32],
2244 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002245 "logical_and": {
2246 "op": Op.LOGICAL_AND,
2247 "operands": (2, 0),
2248 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2249 "types": TYPE_BOOL,
2250 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002251 "logical_left_shift": {
2252 "op": Op.LOGICAL_LEFT_SHIFT,
2253 "operands": (2, 0),
2254 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2255 "types": TYPE_INT,
2256 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002257 "logical_right_shift": {
2258 "op": Op.LOGICAL_RIGHT_SHIFT,
2259 "operands": (2, 0),
2260 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2261 "types": TYPE_INT,
2262 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002263 "logical_or": {
2264 "op": Op.LOGICAL_OR,
2265 "operands": (2, 0),
2266 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2267 "types": TYPE_BOOL,
2268 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002269 "logical_xor": {
2270 "op": Op.LOGICAL_XOR,
2271 "operands": (2, 0),
2272 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2273 "types": TYPE_BOOL,
2274 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002275 "maximum": {
2276 "op": Op.MAXIMUM,
2277 "operands": (2, 0),
2278 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2279 "types": TYPE_FI32,
2280 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002281 "minimum": {
2282 "op": Op.MINIMUM,
2283 "operands": (2, 0),
2284 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2285 "types": TYPE_FI32,
2286 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002287 "mul": {
2288 "op": Op.MUL,
2289 "operands": (2, 0),
2290 "build_fcn": (build_mul, TosaTensorGen.tgBroadcastFuzz, TosaArgGen.agMul),
2291 "types": TYPE_INT_FP,
2292 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002293 "pow": {
2294 "op": Op.POW,
2295 "operands": (2, 0),
2296 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBasic, None),
2297 "types": TYPE_FP,
2298 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002299 "sub": {
2300 "op": Op.SUB,
2301 "operands": (2, 0),
2302 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2303 "types": TYPE_FI32,
2304 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002305 "table": {
2306 "op": Op.TABLE,
2307 # Use the automatic generation functions to create the input array
2308 # but create the table tensor in the build function, as it may be
2309 # a different type from the input
2310 "operands": (1, 0),
2311 "build_fcn": (build_table, TosaTensorGen.tgBasic, None),
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01002312 "types": [DType.INT8, DType.INT16],
Jared Smolens573ecd42021-03-04 15:24:10 -08002313 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002314 # Elementwise Unary operators
2315 "abs": {
2316 "op": Op.ABS,
2317 "operands": (1, 0),
2318 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2319 "types": TYPE_FI32,
2320 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002321 "bitwise_not": {
2322 "op": Op.BITWISE_NOT,
2323 "operands": (1, 0),
2324 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2325 "types": TYPE_INT,
2326 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002327 "ceil": {
2328 "op": Op.CEIL,
2329 "operands": (1, 0),
2330 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2331 "types": TYPE_FP,
2332 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002333 "clz": {
2334 "op": Op.CLZ,
2335 "operands": (1, 0),
2336 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2337 "types": [DType.INT32],
2338 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002339 "exp": {
2340 "op": Op.EXP,
2341 "operands": (1, 0),
2342 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2343 "types": TYPE_FP,
2344 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002345 "floor": {
2346 "op": Op.FLOOR,
2347 "operands": (1, 0),
2348 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2349 "types": TYPE_FP,
2350 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002351 "log": {
2352 "op": Op.LOG,
2353 "operands": (1, 0),
2354 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2355 "types": TYPE_FP,
2356 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002357 "logical_not": {
2358 "op": Op.LOGICAL_NOT,
2359 "operands": (1, 0),
2360 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2361 "types": TYPE_BOOL,
2362 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002363 "negate": {
2364 "op": Op.NEGATE,
2365 "operands": (1, 0),
2366 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2367 "qgen": TosaQuantGen.qgUnary,
2368 "types": TYPE_INT_FP,
2369 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002370 "reciprocal": {
2371 "op": Op.RECIPROCAL,
2372 "operands": (1, 0),
2373 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2374 "types": TYPE_FP,
2375 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002376 "rsqrt": {
2377 "op": Op.RSQRT,
2378 "operands": (1, 0),
2379 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2380 "types": TYPE_FP,
2381 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002382 # Elementwise Ternary operators
2383 "select": {
2384 "op": Op.SELECT,
2385 "operands": (3, 0),
2386 "build_fcn": (build_select, TosaTensorGen.tgBroadcastFuzz, None),
2387 "types": TYPE_FIB,
2388 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002389 # Comparison operators
2390 "equal": {
2391 "op": Op.EQUAL,
2392 "operands": (2, 0),
2393 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2394 "types": TYPE_FI32,
2395 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002396 "greater_equal": {
2397 "op": Op.GREATER_EQUAL,
2398 "operands": (2, 0),
2399 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2400 "types": TYPE_FI32,
2401 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002402 "greater": {
2403 "op": Op.GREATER,
2404 "operands": (2, 0),
2405 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2406 "types": TYPE_FI32,
2407 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002408 # Reduction operators
2409 "reduce_all": {
2410 "op": Op.REDUCE_ALL,
2411 "operands": (1, 0),
2412 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2413 "types": TYPE_BOOL,
2414 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002415 "reduce_any": {
2416 "op": Op.REDUCE_ANY,
2417 "operands": (1, 0),
2418 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2419 "types": TYPE_BOOL,
2420 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002421 "reduce_max": {
2422 "op": Op.REDUCE_MAX,
2423 "operands": (1, 0),
2424 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2425 "types": TYPE_INT_FP,
2426 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002427 "reduce_min": {
2428 "op": Op.REDUCE_MAX,
2429 "operands": (1, 0),
2430 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2431 "types": TYPE_INT_FP,
2432 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002433 "reduce_product": {
2434 "op": Op.REDUCE_PRODUCT,
2435 "operands": (1, 0),
2436 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2437 "types": TYPE_FP,
2438 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002439 "reduce_sum": {
2440 "op": Op.REDUCE_SUM,
2441 "operands": (1, 0),
2442 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2443 "types": TYPE_FI32,
2444 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002445 # Data layout operators
Kevin Cheng550ccc52021-03-03 11:21:43 -08002446 "concat": {
2447 "op": Op.CONCAT,
2448 "operands": (2, 0),
Matthew Haddon818ab902021-07-27 09:12:49 +01002449 "build_fcn": (build_concat, TosaTensorGen.tgConcat, TosaArgGen.agAxis),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002450 "types": TYPE_FIB,
2451 },
2452 "pad": {
2453 "op": Op.PAD,
2454 "operands": (1, 0),
2455 "build_fcn": (build_pad, TosaTensorGen.tgBasic, TosaArgGen.agPad),
2456 "qgen": TosaQuantGen.qgPad,
2457 "types": TYPE_FIB,
2458 },
2459 "reshape": {
2460 "op": Op.RESHAPE,
2461 "operands": (1, 0),
2462 "build_fcn": (build_reshape, TosaTensorGen.tgBasic, TosaArgGen.agReshape),
2463 "types": TYPE_FIB,
2464 },
2465 "reverse": {
2466 "op": Op.REVERSE,
2467 "operands": (1, 0),
2468 "build_fcn": (build_reverse, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2469 "types": TYPE_FIB,
2470 },
2471 "slice": {
2472 "op": Op.SLICE,
2473 "operands": (1, 0),
2474 "build_fcn": (build_slice, TosaTensorGen.tgBasic, TosaArgGen.agSlice),
2475 "types": TYPE_FIB,
2476 },
2477 "tile": {
2478 "op": Op.TILE,
2479 "operands": (1, 0),
2480 "build_fcn": (build_tile, TosaTensorGen.tgBasic, TosaArgGen.agTile),
2481 "types": TYPE_FIB,
2482 },
2483 "transpose": {
2484 "op": Op.TRANSPOSE,
2485 "operands": (1, 0),
Jeremy Johnsona6185572021-06-21 15:55:35 +01002486 "rank": (1, 4),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002487 "build_fcn": (
2488 build_transpose,
2489 TosaTensorGen.tgBasic,
2490 TosaArgGen.agTranspose,
2491 ),
2492 "types": TYPE_FIB,
2493 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002494 # Data nodes
2495 "const": {
2496 "op": Op.CONST,
2497 "operands": (1, 0),
2498 "build_fcn": (build_placeholder, TosaTensorGen.tgBasic, None),
2499 "types": TYPE_FIB,
2500 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002501 "identity": {
2502 "op": Op.IDENTITY,
2503 "operands": (1, 0),
2504 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2505 "types": TYPE_FIB,
2506 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002507 # Scatter/Gather
Kevin Cheng550ccc52021-03-03 11:21:43 -08002508 "gather": {
2509 "op": Op.GATHER,
2510 # Only specify 'values' tensor here. 'indices' is generated in op building stage
2511 "operands": (1, 0),
2512 "rank": (3, 3),
2513 "build_fcn": (build_gather, TosaTensorGen.tgBasic, None),
2514 "types": TYPE_INT_FP,
2515 },
2516 "scatter": {
2517 "op": Op.SCATTER,
2518 # Only specify 'values_in' tensor here.
2519 #'indices' and 'input' are generated in op building stage
2520 "operands": (2, 0),
2521 "rank": (3, 3),
2522 "build_fcn": (build_scatter, TosaTensorGen.tgScatter, None),
2523 "types": TYPE_INT_FP,
2524 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002525 # Image operations
Kevin Cheng550ccc52021-03-03 11:21:43 -08002526 "resize": {
2527 "op": Op.RESIZE,
2528 "operands": (1, 0),
2529 "rank": (4, 4),
2530 "build_fcn": (build_resize, TosaTensorGen.tgNHWC, TosaArgGen.agResize),
2531 "types": [DType.INT8, DType.INT16, DType.FLOAT],
2532 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002533 # Type conversion
Kevin Cheng550ccc52021-03-03 11:21:43 -08002534 "cast": {
2535 "op": Op.CAST,
2536 "operands": (1, 0),
2537 "build_fcn": (build_cast, TosaTensorGen.tgBasic, TosaArgGen.agCast),
2538 "types": [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL],
2539 },
2540 "rescale": {
2541 "op": Op.RESCALE,
2542 "operands": (1, 0),
2543 "build_fcn": (build_rescale, TosaTensorGen.tgBasic, TosaArgGen.agRescale),
Matthew Haddoncac4ee92021-07-22 14:30:53 +01002544 "types": [DType.UINT8, DType.INT8, DType.INT16, DType.INT32, DType.INT48],
Kevin Cheng550ccc52021-03-03 11:21:43 -08002545 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002546 # Custom
2547 # Not implemented.
Jared Smolens573ecd42021-03-04 15:24:10 -08002548 # Control flow operators
Eric Kunzee5e26762020-10-13 16:11:07 -07002549 # Two varients of cond_if, one that generates one of two constant tensors (no
2550 # inputs to the basic blocks, one output) and another that either adds or subtracts two tensors
2551 # (two inputs to the basic blocks, one output)
Kevin Cheng550ccc52021-03-03 11:21:43 -08002552 "cond_if_const": {
2553 "op": Op.COND_IF,
2554 "operands": (0, 2),
2555 "build_fcn": (
2556 build_cond_if_const,
2557 TosaTensorGen.tgBasic,
2558 TosaArgGen.agCondIf,
2559 ),
2560 "types": [DType.BOOL],
2561 },
2562 "cond_if_binary": {
2563 "op": Op.COND_IF,
2564 "operands": (2, 0),
2565 "build_fcn": (
2566 build_cond_if_binary,
2567 TosaTensorGen.tgBasic,
2568 TosaArgGen.agCondIf,
2569 ),
2570 "types": TYPE_FI32,
2571 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002572 # while_loop
Kevin Cheng550ccc52021-03-03 11:21:43 -08002573 "while_loop": {
2574 "op": Op.WHILE_LOOP,
2575 "operands": (0, 1),
2576 "build_fcn": (
2577 build_while_loop,
2578 TosaTensorGen.tgBasic,
2579 TosaArgGen.agWhileLoop,
2580 ),
2581 "types": [DType.INT32],
2582 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002583 }
2584
Kevin Cheng550ccc52021-03-03 11:21:43 -08002585
Eric Kunzee5e26762020-10-13 16:11:07 -07002586class OutputShaper:
2587 # Methods in this class compute the expected output shape and datatype
2588 # for common classes of operations
2589 def __init__(self):
2590 pass
2591
2592 # These methods return arguments that can be used for
2593 # creating a new output tensor
2594 @staticmethod
2595 def binaryBroadcastOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002596 assert len(a.shape) == len(b.shape)
2597 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002598
2599 shape = []
2600 for i in range(len(a.shape)):
2601 if a.shape[i] == 1:
2602 shape.append(b.shape[i])
2603 else:
2604 shape.append(a.shape[i])
2605
Kevin Cheng550ccc52021-03-03 11:21:43 -08002606 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002607
2608 @staticmethod
2609 def binaryNonBroadcastOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002610 assert len(a.shape) == len(b.shape)
2611 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002612
2613 shape = []
2614 for i in range(len(a.shape)):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002615 assert a.shape[i] == b.shape[i]
Eric Kunzee5e26762020-10-13 16:11:07 -07002616 shape.append(a.shape[i])
2617
Kevin Cheng550ccc52021-03-03 11:21:43 -08002618 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002619
2620 @staticmethod
2621 def unaryOp(ser, a):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002622 return ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002623
2624 @staticmethod
2625 def selectOp(ser, cond, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002626 assert len(a.shape) == len(b.shape) and len(a.shape) == len(cond.shape)
2627 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002628
2629 shape = []
2630 for i in range(len(a.shape)):
2631 shape.append(max(cond.shape[i], a.shape[i], b.shape[i]))
2632
Kevin Cheng550ccc52021-03-03 11:21:43 -08002633 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002634
2635 @staticmethod
2636 def binaryComparisonOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002637 assert len(a.shape) == len(b.shape)
2638 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002639
2640 # Do broadcast
2641 shape = []
2642 for i in range(len(a.shape)):
2643 if a.shape[i] == 1:
2644 shape.append(b.shape[i])
2645 else:
2646 shape.append(a.shape[i])
2647
2648 # Force the output type to bool
Kevin Cheng550ccc52021-03-03 11:21:43 -08002649 return ser.addOutput(shape, DType.BOOL)
Eric Kunzee5e26762020-10-13 16:11:07 -07002650
2651 @staticmethod
2652 def reduceOp(ser, a, axis):
2653
2654 shape = a.shape.copy()
2655
2656 shape[axis] = 1
2657
Kevin Cheng550ccc52021-03-03 11:21:43 -08002658 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002659
2660 @staticmethod
2661 def argmaxOp(ser, a, axis):
2662 shape = a.shape.copy()
2663 del shape[axis]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002664 return ser.addOutput(shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07002665
2666 @staticmethod
2667 def conv2dOp(ser, ifm, filter, strides, padding, dilations):
2668
2669 # IFM: NHWC
2670 # Filter: OHWI
2671 # OFM: NHWC
2672
2673 if len(padding) == 2:
2674 # Expand padding to 4 parameters in the case of transpose_conv2d
2675 # From H,W to T,B,L,R
2676 padding = [padding[0], padding[0], padding[1], padding[1]]
2677
Kevin Cheng550ccc52021-03-03 11:21:43 -08002678 h = (
2679 ifm.shape[1]
2680 - filter.shape[1]
2681 - (filter.shape[1] - 1) * (dilations[0] - 1)
2682 + padding[0]
2683 + padding[1]
2684 ) // strides[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002685
Kevin Cheng550ccc52021-03-03 11:21:43 -08002686 w = (
2687 ifm.shape[2]
2688 - filter.shape[2]
2689 - (filter.shape[2] - 1) * (dilations[1] - 1)
2690 + padding[2]
2691 + padding[3]
2692 ) // strides[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002693
2694 if h <= 0 or w <= 0:
2695 # Invalid test parameters?
2696 h = 0
2697 w = 0
Kevin Chengacb550f2021-06-29 15:32:19 -07002698 ser.setExpectedReturnCode(
2699 TosaReturnCode.UNPREDICTABLE, "Invalid combination of conv2d parameters"
2700 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002701
2702 ofm_shape = [ifm.shape[0], h, w, filter.shape[0]]
2703
Kevin Cheng3a478572021-01-22 17:21:02 -08002704 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002705 out_dtype = DType.INT32
2706 elif ifm.dtype == DType.INT16:
2707 out_dtype = DType.INT48
2708 elif ifm.dtype == DType.FLOAT:
2709 out_dtype = DType.FLOAT
2710 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002711 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002712
Kevin Cheng550ccc52021-03-03 11:21:43 -08002713 return ser.addOutput(ofm_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002714
2715 @staticmethod
2716 def depthwiseConv2dOp(ser, ifm, filter, strides, padding, dilations):
2717 # IFM: NHWC
2718 # Filter: HWCM
2719 # OFM: NHW C*M
Kevin Cheng550ccc52021-03-03 11:21:43 -08002720 h = (
2721 ifm.shape[1]
2722 - filter.shape[0]
2723 - (filter.shape[0] - 1) * (dilations[0] - 1)
2724 + padding[0]
2725 + padding[1]
2726 ) // strides[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002727
Kevin Cheng550ccc52021-03-03 11:21:43 -08002728 w = (
2729 ifm.shape[2]
2730 - filter.shape[1]
2731 - (filter.shape[1] - 1) * (dilations[1] - 1)
2732 + padding[2]
2733 + padding[3]
2734 ) // strides[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002735
2736 if h <= 0 or w <= 0:
2737 # Invalid test parameters?
2738 h = 0
2739 w = 0
Kevin Chengacb550f2021-06-29 15:32:19 -07002740 ser.setExpectedReturnCode(
2741 TosaReturnCode.UNPREDICTABLE, "Invalid combination of conv2d parameters"
2742 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002743
2744 ofm_shape = [ifm.shape[0], h, w, filter.shape[2] * filter.shape[3]]
2745
Kevin Cheng3a478572021-01-22 17:21:02 -08002746 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002747 out_dtype = DType.INT32
2748 elif ifm.dtype == DType.INT16:
2749 out_dtype = DType.INT48
2750 elif ifm.dtype == DType.FLOAT:
2751 out_dtype = DType.FLOAT
2752 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002753 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002754
Kevin Cheng550ccc52021-03-03 11:21:43 -08002755 return ser.addOutput(ofm_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002756
2757 @staticmethod
2758 def pool2dOp(ser, ifm, kernel, stride, pad):
2759 # input: NHWC
2760 h = (ifm.shape[1] + pad[0] + pad[1] + stride[0] - kernel[0]) // stride[0]
2761 w = (ifm.shape[2] + pad[2] + pad[3] + stride[1] - kernel[1]) // stride[1]
2762
2763 if h <= 0 or w <= 0:
2764 # Invalid test parameters?
2765 h = 0
2766 w = 0
Kevin Chengacb550f2021-06-29 15:32:19 -07002767 ser.setExpectedReturnCode(
2768 TosaReturnCode.UNPREDICTABLE, "Invalid combination of pool2d parameters"
2769 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002770
2771 ofm_shape = [ifm.shape[0], h, w, ifm.shape[3]]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002772 return ser.addOutput(ofm_shape, ifm.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002773
2774 @staticmethod
2775 def fullyConnectedOp(ser, input, filter):
2776 # input: N, IC
2777 # filter: OC, IC
2778 # output: N, OC
2779
2780 output_shape = [input.shape[0], filter.shape[0]]
2781
Kevin Cheng3a478572021-01-22 17:21:02 -08002782 if input.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002783 out_dtype = DType.INT32
2784 elif input.dtype == DType.INT16:
2785 out_dtype = DType.INT48
2786 elif input.dtype == DType.FLOAT:
2787 out_dtype = DType.FLOAT
2788 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002789 raise Exception("Unsupported input dtype: {}".format(input.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002790
Kevin Cheng550ccc52021-03-03 11:21:43 -08002791 return ser.addOutput(output_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002792
2793 @staticmethod
2794 def matmulOp(ser, a, b):
Kevin Cheng2d60f002021-06-09 14:18:32 -07002795 # a: N, H, C
2796 # b: N, C, W
2797 # out: N, H, W
Eric Kunzee5e26762020-10-13 16:11:07 -07002798
Kevin Cheng2d60f002021-06-09 14:18:32 -07002799 output_shape = [a.shape[0], a.shape[1], b.shape[2]]
Eric Kunzee5e26762020-10-13 16:11:07 -07002800
Kevin Cheng3a478572021-01-22 17:21:02 -08002801 if a.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002802 out_dtype = DType.INT32
2803 elif a.dtype == DType.INT16:
2804 out_dtype = DType.INT48
2805 elif a.dtype == DType.FLOAT:
2806 out_dtype = DType.FLOAT
2807 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002808 raise Exception("UNsupported input dtype for matmul: {}".format(a.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002809
Kevin Cheng550ccc52021-03-03 11:21:43 -08002810 return ser.addOutput(output_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002811
2812 @staticmethod
Matthew Haddon818ab902021-07-27 09:12:49 +01002813 def concatOp(ser, axis, *a):
2814 input1 = a[0]
2815 remaining_inputs = a[1:]
Eric Kunzee5e26762020-10-13 16:11:07 -07002816
Matthew Haddon818ab902021-07-27 09:12:49 +01002817 output_shape = input1.shape.copy()
Eric Kunzee5e26762020-10-13 16:11:07 -07002818
Matthew Haddon818ab902021-07-27 09:12:49 +01002819 output_shape[axis] = input1.shape[axis]
2820
2821 for tensor in remaining_inputs:
2822 output_shape[axis] += tensor.shape[axis]
2823
2824 return ser.addOutput(output_shape, input1.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002825
2826 @staticmethod
2827 def padOp(ser, a, padding):
2828
2829 output_shape = a.shape.copy()
2830
2831 for i in range(len(output_shape)):
2832 output_shape[i] = padding[i][0] + padding[i][1] + output_shape[i]
2833
Kevin Cheng550ccc52021-03-03 11:21:43 -08002834 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002835
2836 @staticmethod
2837 def reshapeOp(ser, a, shape):
2838 output_shape = shape.copy()
2839
2840 totalElements = 1
2841 for i in a.shape:
2842 totalElements *= i
2843
2844 # If there are any -1 elements, figure out what that dimension must be
2845 totalOutputElements = 1
2846 for i in output_shape:
2847 if i != -1:
2848 totalOutputElements *= i
2849
2850 # And fill it in
2851 for i in range(len(output_shape)):
2852 if output_shape[i] == -1:
2853 output_shape[i] = totalElements // totalOutputElements
2854
Kevin Cheng550ccc52021-03-03 11:21:43 -08002855 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002856
2857 @staticmethod
2858 def sliceOp(ser, a, begin, size):
2859
2860 output_shape = size.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08002861 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002862
2863 @staticmethod
2864 def tileOp(ser, a, multiples):
2865
2866 output_shape = a.shape.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08002867 assert len(multiples) == len(output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07002868
2869 for i in range(len(output_shape)):
2870 output_shape[i] = a.shape[i] * multiples[i]
2871
Kevin Cheng550ccc52021-03-03 11:21:43 -08002872 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002873
2874 @staticmethod
2875 def transposeOp(ser, a, perms):
2876 output_shape = a.shape.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08002877 assert len(perms) == len(output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07002878
2879 for i in range(len(output_shape)):
2880 output_shape[i] = a.shape[perms[i]]
2881
Kevin Cheng550ccc52021-03-03 11:21:43 -08002882 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002883
2884 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -08002885 def gatherOp(ser, values, indices):
2886 assert len(values.shape) == 3
2887 assert len(indices.shape) == 2
2888 assert values.shape[0] == indices.shape[0]
Eric Kunzee5e26762020-10-13 16:11:07 -07002889
Kevin Cheng77d0f762020-11-24 10:26:32 -08002890 output_shape = [values.shape[0], indices.shape[1], values.shape[2]]
2891
Kevin Cheng550ccc52021-03-03 11:21:43 -08002892 return ser.addOutput(output_shape, values.dtype)
Kevin Cheng77d0f762020-11-24 10:26:32 -08002893
2894 @staticmethod
2895 def scatterOp(ser, values_in, indices, input):
2896 assert len(values_in.shape) == 3
2897 assert len(indices.shape) == 2
2898 assert len(input.shape) == 3
Kevin Cheng550ccc52021-03-03 11:21:43 -08002899 assert values_in.shape[0] == indices.shape[0] # N
2900 assert input.shape[1] == indices.shape[1] # W
2901 assert values_in.shape[2] == input.shape[2] # C
Kevin Cheng77d0f762020-11-24 10:26:32 -08002902
2903 output_shape = values_in.shape
2904
Kevin Cheng550ccc52021-03-03 11:21:43 -08002905 return ser.addOutput(output_shape, values_in.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002906
2907 @staticmethod
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01002908 def tableOp(ser, input, table_dtype):
2909 # Same shape as the input, but dtype dependent on table dtype
2910 assert table_dtype == DType.INT16 or table_dtype == DType.INT8
2911 output_dtype = DType.INT32 if table_dtype == DType.INT16 else DType.INT8
2912 return ser.addOutput(input.shape, output_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002913
2914 @staticmethod
Kevin Cheng550ccc52021-03-03 11:21:43 -08002915 def resizeOp(
2916 ser,
2917 input,
2918 mode,
2919 stride,
2920 offset,
2921 shift,
2922 stride_fp,
2923 offset_fp,
2924 output_dims,
2925 input_dtype,
2926 output_dtype,
2927 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07002928
2929 output_dims = [input.shape[0], output_dims[0], output_dims[1], input.shape[3]]
2930
Kevin Cheng77d0f762020-11-24 10:26:32 -08002931 if input_dtype == DType.FLOAT:
2932 if stride_fp[0] <= 0 or stride_fp[1] <= 0:
Kevin Chengacb550f2021-06-29 15:32:19 -07002933 ser.setExpectedReturnCode(
2934 TosaReturnCode.ERROR, "Negative or zero stride"
2935 )
Kevin Cheng77d0f762020-11-24 10:26:32 -08002936 else:
2937 if stride[0] <= 0 or stride[1] <= 0:
Kevin Chengacb550f2021-06-29 15:32:19 -07002938 ser.setExpectedReturnCode(
2939 TosaReturnCode.ERROR, "Negative or zero stride"
2940 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002941
Kevin Chengaee1fac2020-11-11 13:54:06 -08002942 if mode == ResizeMode.BILINEAR:
2943 if input_dtype == DType.INT8:
2944 if output_dtype != DType.INT32:
Kevin Chengacb550f2021-06-29 15:32:19 -07002945 ser.setExpectedReturnCode(
2946 TosaReturnCode.ERROR, "Invalid output data type"
2947 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08002948 elif input_dtype == DType.INT16:
2949 if output_dtype != DType.INT48:
Kevin Chengacb550f2021-06-29 15:32:19 -07002950 ser.setExpectedReturnCode(
2951 TosaReturnCode.ERROR, "Invalid output data type"
2952 )
Kevin Cheng77d0f762020-11-24 10:26:32 -08002953 elif input_dtype == DType.FLOAT:
2954 if output_dtype != DType.FLOAT:
Kevin Chengacb550f2021-06-29 15:32:19 -07002955 ser.setExpectedReturnCode(
2956 TosaReturnCode.ERROR, "Invalid output data type"
2957 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08002958 else:
Kevin Chengacb550f2021-06-29 15:32:19 -07002959 ser.setExpectedReturnCode(
2960 TosaReturnCode.ERROR, "Invalid input data type"
2961 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08002962
2963 elif mode == ResizeMode.NEAREST:
2964 if input_dtype == DType.INT8:
2965 if output_dtype != DType.INT8:
Kevin Chengacb550f2021-06-29 15:32:19 -07002966 ser.setExpectedReturnCode(
2967 TosaReturnCode.ERROR, "Invalid output data type"
2968 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08002969 elif input_dtype == DType.INT16:
2970 if output_dtype != DType.INT16:
Kevin Chengacb550f2021-06-29 15:32:19 -07002971 ser.setExpectedReturnCode(
2972 TosaReturnCode.ERROR, "Invalid output data type"
2973 )
Kevin Cheng77d0f762020-11-24 10:26:32 -08002974 elif input_dtype == DType.FLOAT:
2975 if output_dtype != DType.FLOAT:
Kevin Chengacb550f2021-06-29 15:32:19 -07002976 ser.setExpectedReturnCode(
2977 TosaReturnCode.ERROR, "Invalid output data type"
2978 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08002979 else:
Kevin Chengacb550f2021-06-29 15:32:19 -07002980 ser.setExpectedReturnCode(
2981 TosaReturnCode.ERROR, "Invalid input data type"
2982 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08002983
2984 else:
Kevin Chengacb550f2021-06-29 15:32:19 -07002985 ser.setExpectedReturnCode(TosaReturnCode.ERROR, "Invalid resize mode")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002986
Kevin Cheng550ccc52021-03-03 11:21:43 -08002987 return ser.addOutput(output_dims, output_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002988
2989 @staticmethod
2990 def typeConversionOp(ser, val, out_dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002991 return ser.addOutput(val.shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002992
2993 @staticmethod
2994 def transposeConv2DOp(ser, ifm, output_shape):
Kevin Cheng3a478572021-01-22 17:21:02 -08002995 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002996 out_dtype = DType.INT32
2997 elif ifm.dtype == DType.INT16:
2998 out_dtype = DType.INT48
2999 elif ifm.dtype == DType.FLOAT:
3000 out_dtype = DType.FLOAT
3001 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08003002 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07003003
3004 if output_shape[1] <= 0 or output_shape[2] <= 0:
Kevin Chengacb550f2021-06-29 15:32:19 -07003005 ser.setExpectedReturnCode(
3006 TosaReturnCode.UNPREDICTABLE, "Negative output shape"
3007 )
Eric Kunzee5e26762020-10-13 16:11:07 -07003008
Kevin Cheng550ccc52021-03-03 11:21:43 -08003009 return ser.addOutput(output_shape, out_dtype)