blob: 3e14c5b74d66deb257de3383b8f480d89c3d8250 [file] [log] [blame]
Eric Kunzee5e26762020-10-13 16:11:07 -07001#!/usr/bin/env python3
2
Kevin Cheng3a478572021-01-22 17:21:02 -08003# Copyright (c) 2020-2021, ARM Limited.
Eric Kunzee5e26762020-10-13 16:11:07 -07004#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17
18import numpy as np
19import argparse
20import sys
21import re
22import os
23import subprocess
24import shlex
25import json
26import glob
27import math
28import queue
29import threading
30import traceback
31import math
Jeremy Johnsona6185572021-06-21 15:55:35 +010032import itertools
Eric Kunzee5e26762020-10-13 16:11:07 -070033
34from enum import IntEnum, Enum, unique
Kevin Chengacb550f2021-06-29 15:32:19 -070035from tosa_ref_run import TosaReturnCode
Eric Kunzee5e26762020-10-13 16:11:07 -070036
Kevin Cheng550ccc52021-03-03 11:21:43 -080037# Include the ../thirdparty/serialization_lib/python directory in PYTHONPATH
38parent_dir = os.path.dirname(os.path.realpath(__file__))
39sys.path.append(
40 os.path.join(parent_dir, "..", "thirdparty", "serialization_lib", "python")
41)
Eric Kunzee5e26762020-10-13 16:11:07 -070042import tosa_serializer as ts
43from tosa_serializer import *
44import tosa
45
46# Convenience variables to the flatc-generated types that should be enums, but aren't
47DType = tosa.DType.DType()
Kevin Cheng550ccc52021-03-03 11:21:43 -080048Op = tosa.Op.Op()
Eric Kunzee5e26762020-10-13 16:11:07 -070049ResizeMode = tosa.ResizeMode.ResizeMode()
50
Kevin Cheng550ccc52021-03-03 11:21:43 -080051
Eric Kunzee5e26762020-10-13 16:11:07 -070052class TosaQuantGen:
Kevin Cheng550ccc52021-03-03 11:21:43 -080053 """QuantizedInfo random generator helper functions. Specify with 'qgen': in the operator defintion"""
54
Eric Kunzee5e26762020-10-13 16:11:07 -070055 def __init__(self):
56 pass
57
58 @staticmethod
Les Bell30e46802021-07-23 09:43:31 +010059 def getQinfo(testGen, dtype):
60 if dtype == DType.INT8:
61 return testGen.randInt(-128, 128)
62 if dtype == DType.UINT8:
63 return testGen.randInt(0, 256)
64 return 0
Eric Kunzee5e26762020-10-13 16:11:07 -070065
66 @staticmethod
67 def qgUnary(testGen, op, dtype):
68 qinfo = ts.TosaSerializerQuantInfo()
Kevin Chengacb550f2021-06-29 15:32:19 -070069 qinfo.UnaryQuantInfo(
70 TosaQuantGen.getQinfo(testGen, dtype), TosaQuantGen.getQinfo(testGen, dtype)
71 )
Eric Kunzee5e26762020-10-13 16:11:07 -070072 return qinfo
73
74 @staticmethod
Les Bell30e46802021-07-23 09:43:31 +010075 def qgConv(testGen, op, dtype_or_dtypeList):
Eric Kunzee5e26762020-10-13 16:11:07 -070076 qinfo = ts.TosaSerializerQuantInfo()
Les Bell30e46802021-07-23 09:43:31 +010077 if isinstance(dtype_or_dtypeList, list):
78 # a list of [input, weights, accumulator] dtypes
79 dtypeList = dtype_or_dtypeList
Eric Kunzee5e26762020-10-13 16:11:07 -070080 else:
Les Bell30e46802021-07-23 09:43:31 +010081 # an int, [input, weights, accumulator] dtypes are the same
82 dtypeList = [dtype_or_dtypeList] * 3
83 input_zp = TosaQuantGen.getQinfo(testGen, dtypeList[0])
84 weights_zp = TosaQuantGen.getQinfo(testGen, dtypeList[1])
85 qinfo.ConvQuantInfo(input_zp, weights_zp)
Eric Kunzee5e26762020-10-13 16:11:07 -070086 return qinfo
87
88 @staticmethod
89 def qgMatmul(testGen, op, dtype):
90 qinfo = ts.TosaSerializerQuantInfo()
Kevin Chengacb550f2021-06-29 15:32:19 -070091 qinfo.MatMulQuantInfo(
92 TosaQuantGen.getQinfo(testGen, dtype), TosaQuantGen.getQinfo(testGen, dtype)
93 )
Eric Kunzee5e26762020-10-13 16:11:07 -070094 return qinfo
95
96 @staticmethod
97 def qgPad(testGen, op, dtype):
98 qinfo = ts.TosaSerializerQuantInfo()
Les Bell30e46802021-07-23 09:43:31 +010099 qinfo.PadQuantInfo(TosaQuantGen.getQinfo(testGen, dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700100 return qinfo
101
102 @staticmethod
103 def computeMultiplierAndShift(scaleFp, scale32):
104 # Derived from computeMultiplierAndShiftTosaScale32
105 # Provide a floating-point scaling factor and the scale32 parameter
106 # to compute the multiplier and shift
107
108 if scale32:
109 scaleBits = 31
110 else:
111 scaleBits = 15
112
113 m, shift = math.frexp(scaleFp)
114
115 if scaleFp < 0.0:
116 m = -m
117
118 multiplier = round(m * (1 << scaleBits))
Kevin Cheng550ccc52021-03-03 11:21:43 -0800119 assert multiplier <= (1 << scaleBits)
Eric Kunzee5e26762020-10-13 16:11:07 -0700120
121 if multiplier == (1 << scaleBits):
122 multiplier = multiplier // 2
123 shift = shift + 1
124
125 shift = (-shift) + scaleBits
Matthew Haddonb724efc2021-08-25 16:40:29 +0100126 #print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(scaleFp, scaleBits, m, multiplier, shift))
127
128 # Adjust multiplier such that shift is in allowed value range.
129 if shift == 0:
130 multiplier = multiplier // 4
131 shift = shift + 2
132 elif shift == 1:
133 multiplier = multiplier // 2
134 shift = shift + 1
135 elif shift == 63:
136 multiplier = multiplier * 2
137 shift = shift - 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700138
Kevin Cheng550ccc52021-03-03 11:21:43 -0800139 assert multiplier <= (1 << scaleBits)
Matthew Haddonb724efc2021-08-25 16:40:29 +0100140 assert shift >= 2 and shift <= 62
Eric Kunzee5e26762020-10-13 16:11:07 -0700141
142 return multiplier, shift
143
144
Kevin Cheng550ccc52021-03-03 11:21:43 -0800145class TosaTensorGen:
146 """Tensor generators create a shape list for the placeholder and const tensor
147 data operands for the operator. The actual random data is generated separately for each test."""
148
Eric Kunzee5e26762020-10-13 16:11:07 -0700149 def __init__(self):
150 pass
151
152 @staticmethod
153 def tgBasic(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800154 pl, const = opName["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700155 shape = testGen.makeShape(rank)
156
157 shape_list = []
158 for i in range(pl + const):
159 shape_list.append(shape.copy())
160
161 return shape_list
162
163 @staticmethod
164 def tgNHWC(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800165 pl, const = opName["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700166
Kevin Cheng550ccc52021-03-03 11:21:43 -0800167 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700168
169 shape = testGen.makeShape(rank)
170
171 # Constrict the batch size?
172 if testGen.args.max_batch_size:
173 shape[0] = (shape[0] % testGen.args.max_batch_size) + 1
174
175 shape_list = []
176 for i in range(pl + const):
177 shape_list.append(shape.copy())
178
179 return shape_list
180
181 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -0800182 def tgScatter(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800183 pl, const = opName["operands"]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800184
Kevin Cheng550ccc52021-03-03 11:21:43 -0800185 assert pl == 2
186 assert const == 0
187 assert rank == 3
Kevin Cheng77d0f762020-11-24 10:26:32 -0800188
189 values_in_shape = testGen.makeShape(rank)
190
Matthew Haddon4b2881a2021-08-24 14:25:43 +0100191 # ignore max batch size if target shape is set
192 if testGen.args.max_batch_size and not testGen.args.target_shapes:
Kevin Cheng77d0f762020-11-24 10:26:32 -0800193 values_in_shape[0] = (values_in_shape[0] % testGen.args.max_batch_size) + 1
194
Kevin Cheng550ccc52021-03-03 11:21:43 -0800195 W = testGen.randInt(
196 testGen.args.tensor_shape_range[0], testGen.args.tensor_shape_range[1]
197 )
Matthew Haddon4b2881a2021-08-24 14:25:43 +0100198 # Constrict W if one dimension is too large to keep tensor size reasonable
199 if max(values_in_shape) > 5000:
200 W = testGen.randInt(0, 16)
201
Kevin Cheng77d0f762020-11-24 10:26:32 -0800202 input_shape = [values_in_shape[0], W, values_in_shape[2]]
203
204 shape_list = []
205 shape_list.append(values_in_shape.copy())
206 shape_list.append(input_shape.copy())
207
208 return shape_list
209
210 @staticmethod
Eric Kunzee5e26762020-10-13 16:11:07 -0700211 def tgBroadcastFuzz(testGen, op, rank):
212 shape = testGen.makeShape(rank)
213
Kevin Cheng550ccc52021-03-03 11:21:43 -0800214 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700215
216 shape_list = []
217
218 # Choose one of the inputs to broadcast
219 bcast_idx = testGen.randInt(0, pl + const)
220 for i in range(pl + const):
221 shape_bcast = shape.copy()
222
223 # If the chosen input, pick a random index to broadcast
224 if i == bcast_idx:
225 fuzz_idx = testGen.randInt(0, rank)
226 shape_bcast[fuzz_idx] = 1
227
228 shape_list.append(shape_bcast)
229
230 return shape_list
231
232 @staticmethod
233 def tgConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800234 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700235
Kevin Cheng550ccc52021-03-03 11:21:43 -0800236 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700237
238 # IFM dimensions are NHWC
239 ifm_shape = testGen.makeShape(rank)
240
241 # Constrict the batch size?
242 if testGen.args.max_batch_size:
243 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
244
245 # Get the filter height/width from the operator parameters
Kevin Cheng550ccc52021-03-03 11:21:43 -0800246 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700247
248 # Generate a random OFM depth
249 ofm_depth = testGen.makeShape(1)[0]
250
251 # The filter dimensions are OHWI
252 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
253
254 # The bias is OC
255 bias_shape = np.asarray([ofm_depth])
256
257 return [ifm_shape, filter_shape, bias_shape]
258
259 @staticmethod
260 def tgTransposeConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800261 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700262
Kevin Cheng550ccc52021-03-03 11:21:43 -0800263 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700264
265 # IFM dimensions are NHWC
266 ifm_shape = testGen.makeShape(rank)
267
268 # Constrict the batch size?
269 if testGen.args.max_batch_size:
270 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
271
272 # Get the filter height/width from the operator parameters
Kevin Cheng550ccc52021-03-03 11:21:43 -0800273 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700274
275 # Generate a random OFM depth
276 ofm_depth = testGen.makeShape(1)[0]
277
278 # The filter dimensions are OHWI
279 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
280
Kevin Cheng989cb052021-04-28 16:29:44 -0700281 # The bias is OC
282 bias_shape = np.asarray([ofm_depth])
283
284 return [ifm_shape, filter_shape, bias_shape]
Eric Kunzee5e26762020-10-13 16:11:07 -0700285
286 @staticmethod
287 def tgDepthwiseConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800288 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700289
Kevin Cheng550ccc52021-03-03 11:21:43 -0800290 assert rank == 4
291 assert pl == 1 and const == 2
Eric Kunzee5e26762020-10-13 16:11:07 -0700292
293 # IFM dimensions are NHWC
294 ifm_shape = testGen.makeShape(rank)
295
296 # Constrict the batch size?
297 if testGen.args.max_batch_size:
298 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
299
300 # Get the filter height/width from the operator parameters
301 # Filter is KH, HW, C, M
Kevin Cheng550ccc52021-03-03 11:21:43 -0800302 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700303
304 # Generate a random OFM depth, but don't let it get too big because
305 # the output depth is M * C
Kevin Cheng550ccc52021-03-03 11:21:43 -0800306 filter_m = (
307 testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)
308 ) + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700309
310 # The filter dimensions are HWCM
311 filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m])
312
313 # The bias is M * C
314 bias_shape = np.asarray([ifm_shape[3] * filter_m])
315
316 return [ifm_shape, filter_shape, bias_shape]
317
318 @staticmethod
319 def tgFullyConnected(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800320 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700321
Kevin Cheng550ccc52021-03-03 11:21:43 -0800322 assert rank == 2
Eric Kunzee5e26762020-10-13 16:11:07 -0700323
324 input_shape = testGen.makeShape(rank)
Kevin Chengacb550f2021-06-29 15:32:19 -0700325 filter_oc = testGen.rng.integers(
326 low=testGen.args.tensor_shape_range[0],
327 high=testGen.args.tensor_shape_range[1],
328 size=1,
329 )[0]
Eric Kunzee5e26762020-10-13 16:11:07 -0700330 filter_shape = np.asarray([filter_oc, input_shape[1]])
331
332 bias_shape = np.asarray([filter_oc])
333
334 return [input_shape, filter_shape, bias_shape]
335
336 @staticmethod
337 def tgMatmul(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800338 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700339
Kevin Cheng2d60f002021-06-09 14:18:32 -0700340 assert rank == 3
Kevin Cheng550ccc52021-03-03 11:21:43 -0800341 assert pl == 2 and const == 0
Eric Kunzee5e26762020-10-13 16:11:07 -0700342
343 a_shape = testGen.makeShape(rank)
Matthew Haddon68e7aee2021-08-16 11:20:25 +0100344 # Get a random number for b_oc even if target shape is defined
345 b_oc = np.int32(
346 testGen.rng.integers(
347 low=testGen.args.tensor_shape_range[0],
348 high=testGen.args.tensor_shape_range[1],
349 size=1,
350 )
351 )[0]
352 # If N or H is large let b_oc be 1 to reduce output tensor size
353 if max(a_shape) > 1000:
354 b_oc = 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700355
Matthew Haddon68e7aee2021-08-16 11:20:25 +0100356 b_shape = np.asarray([a_shape[0], a_shape[2], b_oc])
Eric Kunzee5e26762020-10-13 16:11:07 -0700357 return [a_shape, b_shape]
358
Matthew Haddon818ab902021-07-27 09:12:49 +0100359 @staticmethod
360 def tgConcat(testGen, opName, rank):
361 pl, const = opName["operands"]
362 shape = testGen.makeShape(rank)
363
364 # Create extra tensors to concat.
365 # Take into account value of pl when getting maximum number of concats
366 num_tensors = testGen.randInt(0, 4)
367 shape_list = []
368 for i in range(pl + const + num_tensors):
369 shape_list.append(shape.copy())
370
371 return shape_list
372
373 @staticmethod
374 def tgConcatConstInput(testGen, shapeList, axis):
375 # Split concat shape along axis to allow for multiple const inputs
376 # without making too many large tensors
377 shape = shapeList[0]
378 if len(shapeList) == 2 or shape[axis] < len(shapeList):
379 return shapeList
380
381 new_shapeList = [shape.copy()]
382 length_on_axis = shape[axis]
383 remaining_length = length_on_axis
384 for i in range(len(shapeList)-2):
385 # Calculate split on axis and remaining value
386 split_shape_val = int(shape[axis] / 2)
387 remaining_length = remaining_length - split_shape_val
388
389 # Append new shape, and set remaining shape
390 shape[axis] = split_shape_val
391 new_shapeList.append(shape.copy())
392 shape[axis] = remaining_length
393 if i == len(shapeList) - 3:
394 new_shapeList.append(shape.copy())
395
396 return new_shapeList
397
398
Kevin Cheng550ccc52021-03-03 11:21:43 -0800399
Eric Kunzee5e26762020-10-13 16:11:07 -0700400class TosaArgGen:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800401 """Argument generators create exhaustive or random lists of attributes for operators that take
402 attributes or other parameters. The return value is a list of (descriptive_name, [arglist])
403 tuples where the descriptive_name is appended to the test name and the arglist is expanded
404 as arguments to the operator build function."""
405
Eric Kunzee5e26762020-10-13 16:11:07 -0700406 def __init__(self):
407 pass
408
409 @staticmethod
410 def agNone(testGen, opName, shapeList, dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800411 """A trivial argument generator for operators that don't take any
412 non-tensor arguments"""
413 return [("", [])]
Eric Kunzee5e26762020-10-13 16:11:07 -0700414
415 @staticmethod
416 def agAxis(testGen, opName, shapeList, dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800417 """Build the axis argument for operators that take a single axis"""
Eric Kunzee5e26762020-10-13 16:11:07 -0700418 axes = []
419
420 shape = shapeList[0]
421
422 for a in range(0, len(shape)):
Matthew Haddon43e37192021-07-09 14:13:02 +0100423 axes.append(("axis{}".format(a), [a]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700424 return axes
425
426 @staticmethod
427 def agConv2D(testGen, opName, shapeList, dtype):
428 arg_list = []
429
430 ifm_shape = shapeList[0]
431 filter_shape = shapeList[1]
432
433 # Must be rank 4
Kevin Cheng550ccc52021-03-03 11:21:43 -0800434 assert len(ifm_shape) == 4
435 assert len(filter_shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700436
437 maxStride = testGen.args.max_conv_stride
438 maxPadding = testGen.args.max_conv_padding + 1
439 maxDilation = testGen.args.max_conv_dilation
440
441 # Strides, padding, dilations
442 for stride in range(0, maxStride ** 2):
443 for padding in range(0, (maxPadding) ** 4):
444 for dilation in range(0, maxDilation ** 2):
445
Kevin Cheng550ccc52021-03-03 11:21:43 -0800446 s = [stride // maxStride + 1, stride % maxStride + 1]
447 p = [
448 (padding // (maxPadding * 4)) % maxPadding,
449 (padding // (maxPadding * 2)) % maxPadding,
450 (padding // (maxPadding * 1)) % maxPadding,
451 padding % maxPadding,
452 ]
453 d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
Eric Kunzee5e26762020-10-13 16:11:07 -0700454
455 # 4 padding parameters for regular conv2d
Kevin Cheng550ccc52021-03-03 11:21:43 -0800456 arg_list.append(
457 (
458 "st{}{}_pad{}{}{}{}_dilat{}{}".format(
459 s[0], s[1], p[0], p[1], p[2], p[3], d[0], d[1]
460 ),
461 [s, p, d],
462 )
463 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700464 return arg_list
465
466 @staticmethod
467 def agTransposeConv2D(testGen, opName, shapeList, dtype):
468 arg_list = []
469
470 ifm_shape = shapeList[0]
471 filter_shape = shapeList[1]
472
473 # Must be rank 4
Kevin Cheng550ccc52021-03-03 11:21:43 -0800474 assert len(ifm_shape) == 4
475 assert len(filter_shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700476
477 maxStride = testGen.args.max_conv_stride
478 maxPadding = testGen.args.max_conv_padding + 1
479 maxDilation = testGen.args.max_conv_dilation
480
481 # Strides, padding, dilations
482 for stride in range(0, maxStride ** 2):
483 for out_padding in range(0, (maxPadding) ** 2):
484 for dilation in range(0, maxDilation ** 2):
485
Kevin Cheng550ccc52021-03-03 11:21:43 -0800486 s = [stride // maxStride + 1, stride % maxStride + 1]
487 p = [
488 (out_padding // (maxPadding * 1)) % maxPadding,
489 out_padding % maxPadding,
490 ]
491 d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
Eric Kunzee5e26762020-10-13 16:11:07 -0700492
Kevin Cheng550ccc52021-03-03 11:21:43 -0800493 oh = (
494 ifm_shape[1]
495 - filter_shape[1]
496 - (filter_shape[1] - 1) * (d[0] - 1)
497 + 2 * p[0]
498 ) // s[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700499
Kevin Cheng550ccc52021-03-03 11:21:43 -0800500 ow = (
501 ifm_shape[2]
502 - filter_shape[2]
503 - (filter_shape[2] - 1) * (d[1] - 1)
504 + 2 * p[1]
505 ) // s[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700506
507 # Output shape
Kevin Cheng550ccc52021-03-03 11:21:43 -0800508 os = [ifm_shape[0], oh, ow, filter_shape[0]]
Eric Kunzee5e26762020-10-13 16:11:07 -0700509
Kevin Cheng550ccc52021-03-03 11:21:43 -0800510 arg_list.append(
511 (
512 "st{}{}_outpad{}{}_dilat{}{}_os{}x{}x{}x{}".format(
513 s[0],
514 s[1],
515 p[0],
516 p[1],
517 d[0],
518 d[1],
519 os[0],
520 os[1],
521 os[2],
522 os[3],
523 ),
524 [s, p, d, os],
525 )
526 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700527
528 return arg_list
529
530 @staticmethod
531 def agPad(testGen, opName, shapeList, dtype):
532 arg_list = []
533 rank = len(shapeList[0])
534
Les Bell7ffccce2021-07-28 15:37:02 +0100535 # Exhaustively test combinations of padding on each side of each dimension
536 # - the range of padding values is defined by pad_min and pad_max
537 # - for padding >9, the name format needs to be more distinctive
538 pad_min, pad_max = 0, 1
539 pad_values = [x for x in range(pad_min, pad_max + 1)]
540 axis_pad_values = [x for x in itertools.product(pad_values, pad_values)]
541 shape_pad_values = itertools.product(*([axis_pad_values] * rank))
Eric Kunzee5e26762020-10-13 16:11:07 -0700542
Les Bell7ffccce2021-07-28 15:37:02 +0100543 for paddings in shape_pad_values:
544 name = "pad"
545 for r in range(rank):
546 before, after = paddings[r]
547 name = f"{name}{before}{after}"
548 arg_list.append((name, [np.array(paddings)]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700549
550 return arg_list
551
552 @staticmethod
553 def agPooling(testGen, opName, shapeList, dtype):
554 arg_list = []
555
556 shape = shapeList[0]
Kevin Cheng550ccc52021-03-03 11:21:43 -0800557 assert len(shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700558
559 maxStride = testGen.args.max_pooling_stride
560 maxKernel = testGen.args.max_pooling_kernel
561 maxPadding = testGen.args.max_pooling_padding + 1
562
563 for kernel in range(0, maxKernel ** 2):
564 for stride in range(0, maxStride ** 2):
565 for padding in range(0, maxPadding ** 4):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800566 s = [stride // maxStride + 1, stride % maxStride + 1]
567 k = [(kernel // maxKernel) + 2, (kernel % maxKernel) + 2]
568 p = [
569 (padding // (maxPadding * 4)) % maxPadding,
570 (padding // (maxPadding * 2)) % maxPadding,
571 (padding // (maxPadding * 1)) % maxPadding,
572 padding % maxPadding,
573 ]
Eric Kunzee5e26762020-10-13 16:11:07 -0700574
Kevin Cheng550ccc52021-03-03 11:21:43 -0800575 arg_list.append(
576 (
577 "st{}{}_kern{}{}_pad{}{}{}{}".format(
578 s[0], s[1], k[0], k[1], p[0], p[1], p[2], p[3]
579 ),
Matthew Haddonb724efc2021-08-25 16:40:29 +0100580 [s, p, k],
Kevin Cheng550ccc52021-03-03 11:21:43 -0800581 )
582 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700583 return arg_list
584
585 @staticmethod
586 def agCast(testGen, opName, shapeList, inDtype):
587 arg_list = []
588
589 # Enumerate the output types here
590 if inDtype == DType.INT8:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800591 dtypeList = [DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700592 elif inDtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800593 dtypeList = [DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700594 elif inDtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800595 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700596 elif inDtype == DType.BOOL:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800597 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700598 elif inDtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800599 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700600 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800601 raise Exception("Unexpected input dtype: {}".format(inDtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700602
603 for dtype in dtypeList:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800604 arg_list.append(("out{}".format(DTypeNames[dtype]), [dtype]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700605
606 return arg_list
607
608 @staticmethod
609 def agRescale(testGen, opName, shapeList, inDtype):
610 arg_list = []
611
612 # Enumerate the output types here
Matthew Haddoncac4ee92021-07-22 14:30:53 +0100613 for dtype in [DType.UINT8, DType.INT8, DType.INT16, DType.INT32]:
614 if inDtype == DType.UINT8 and dtype != DType.INT8:
615 # The only output dtype for UINT8 is INT8, skip all other combinations
616 continue
617 if inDtype != DType.INT8 and dtype == DType.UINT8:
618 # The only input dtype for UINT8 is INT8, skip all other combinations
619 continue
620
Kevin Cheng550ccc52021-03-03 11:21:43 -0800621 for scale32 in [False, True]:
622 for double_round in [False, True]:
623 for per_channel in [False, True]:
Eric Kunzee5e26762020-10-13 16:11:07 -0700624
625 if inDtype == DType.INT48 and scale32:
626 # Illegal condition. Must be scale32=False
627 continue
Matthew Haddoncac4ee92021-07-22 14:30:53 +0100628 if double_round and not scale32:
629 # Illegal condition. ERROR_IF(!scale32 && double_round)
630 continue
Eric Kunzee5e26762020-10-13 16:11:07 -0700631
Kevin Cheng550ccc52021-03-03 11:21:43 -0800632 arg_list.append(
633 (
634 "out{}_sc{}_dr{}_pc{}".format(
635 DTypeNames[dtype],
636 int(scale32),
637 int(double_round),
638 int(per_channel),
639 ),
640 [dtype, scale32, double_round, per_channel],
641 )
642 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700643
644 return arg_list
645
Kevin Chengaee1fac2020-11-11 13:54:06 -0800646 @staticmethod
647 def agMul(testGen, opName, shapeList, dtype):
648 arg_list = []
649
650 if dtype is DType.INT32:
651 for p in range(testGen.args.num_rand_permutations):
652
653 shift = testGen.randInt(0, 32)
654
Kevin Cheng550ccc52021-03-03 11:21:43 -0800655 arg_list.append(("perm{}_shift{}".format(p, shift), [shift]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800656 else:
Matthew Haddon43e37192021-07-09 14:13:02 +0100657 arg_list.append(("perm0_shift0", [0]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800658
659 return arg_list
660
661 @staticmethod
662 def agArithmeticRightShift(testGen, opName, shapeList, dtype):
663 arg_list = []
664
Kevin Cheng550ccc52021-03-03 11:21:43 -0800665 arg_list.append(("roundTrue", [True]))
666 arg_list.append(("roundFalse", [False]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800667
668 return arg_list
669
Eric Kunzee5e26762020-10-13 16:11:07 -0700670 # Helper function for reshape. Gets some factors of a larger number.
671 @staticmethod
672 def getFactors(val, start=1):
673 factors = []
674
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100675 for i in range(start, int(np.sqrt(val)) + 1):
Eric Kunzee5e26762020-10-13 16:11:07 -0700676 if (val % i) == 0:
677 factors.append(i)
678
679 return factors
680
681 @staticmethod
682 def agReshape(testGen, opName, shapeList, dtype):
683 arg_list = []
684
685 origShape = shapeList[0]
686
687 totalElements = 1
688 for s in origShape:
689 totalElements *= s
690
691 # This code is NOT fast. Fortunately, the numbers are fairly small.
692 factors = TosaArgGen.getFactors(totalElements)
693
694 for p in range(testGen.args.num_rand_permutations):
Matthew Haddon5fc4e682021-07-07 11:28:29 +0100695 newRank = testGen.randInt(1, 7)
Kevin Cheng550ccc52021-03-03 11:21:43 -0800696 if len(factors) < newRank:
Eric Kunzee5e26762020-10-13 16:11:07 -0700697 continue
698
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100699 found = True
700 # escape_counter breaks while loop if it continues on for too long
701 escape_counter = 0
702 while found:
703 newShape = []
704 # Generate newShape ensuring it isn't a duplicate
705 remainingElements = totalElements
706 shuffledFactors = testGen.rng.permutation(factors)
Matthew Haddon5fc4e682021-07-07 11:28:29 +0100707 for i in range(1, newRank):
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100708 # pick rank-1 factors
709 newShape.append(shuffledFactors[0])
710 remainingElements = remainingElements // shuffledFactors[0]
711 shuffledFactors = testGen.rng.permutation(
712 TosaArgGen.getFactors(remainingElements)
713 )
714 newShape.append(remainingElements)
Eric Kunzee5e26762020-10-13 16:11:07 -0700715
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100716 # Toss in a -1 sometimes
717 minusOne = testGen.randInt(0, newRank * 4)
718 if minusOne < newRank:
719 newShape[minusOne] = -1
Eric Kunzee5e26762020-10-13 16:11:07 -0700720
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100721 # Check for duplicates
722 found = False
723 for name, other_shape in arg_list:
724 if other_shape[0] == newShape:
725 found = True
726 break
727
728 escape_counter += 1
729 if escape_counter >= 100:
730 break
731
732 if not found:
733 arg_list.append(("perm{}_rank{}".format(p, newRank), [newShape]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700734
735 return arg_list
736
Eric Kunzee5e26762020-10-13 16:11:07 -0700737 @staticmethod
738 def agTranspose(testGen, opName, shapeList, dtype):
739 arg_list = []
740
741 ifm_shape = shapeList[0]
742
Jeremy Johnsona6185572021-06-21 15:55:35 +0100743 # Get all permutations
744 permutations = [p for p in itertools.permutations(range(len(ifm_shape)))]
Eric Kunzee5e26762020-10-13 16:11:07 -0700745
Jeremy Johnsona6185572021-06-21 15:55:35 +0100746 # Limit to possible permutations from shape dimension or argument setting
747 limit = min(len(permutations), testGen.args.num_rand_permutations)
Eric Kunzee5e26762020-10-13 16:11:07 -0700748
Jeremy Johnsona6185572021-06-21 15:55:35 +0100749 # Get random permutation generator that uses all permutations
750 random_permutations = testGen.rng.permutation(permutations)
Eric Kunzee5e26762020-10-13 16:11:07 -0700751
Jeremy Johnsona6185572021-06-21 15:55:35 +0100752 # Create list of required amount of permutations
Kevin Chengacb550f2021-06-29 15:32:19 -0700753 arg_list = [
754 ("perm{}".format(p), [random_permutations[p].tolist()])
755 for p in range(limit)
756 ]
Eric Kunzee5e26762020-10-13 16:11:07 -0700757 return arg_list
758
759 @staticmethod
760 def agSlice(testGen, opName, shapeList, dtype):
761 arg_list = []
762
763 ifm_shape = shapeList[0]
764 rank = len(ifm_shape)
765
766 for p in range(testGen.args.num_rand_permutations):
767 begin = []
768 size = []
769
Kevin Cheng550ccc52021-03-03 11:21:43 -0800770 valid = True
Eric Kunzee5e26762020-10-13 16:11:07 -0700771
772 for i in range(rank):
773 if ifm_shape[i] > 1:
774 begin.append(testGen.randInt(0, ifm_shape[i]))
775 size.append(testGen.randInt(0, ifm_shape[i] - begin[i]))
776
777 # Invalid slice size?
778 if size[i] == 0:
779 valid = False
780 else:
781 begin.append(0)
782 size.append(1)
783
784 if valid:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800785 arg_list.append(("perm{}".format(p), [begin, size]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700786 return arg_list
787
788 @staticmethod
789 def agTile(testGen, opName, shapeList, dtype):
790 arg_list = []
791
792 ifm_shape = shapeList[0]
793 rank = len(ifm_shape)
794
795 for p in range(testGen.args.num_rand_permutations):
796
797 # Pick a few random, but small multiple values
798 # because otherwise this has a tendency to generate
799 # enormous tensors
800 multiples = []
801 for i in range(rank):
Matthew Haddon82ad4d62021-08-20 15:02:39 +0100802 if ifm_shape[i] > 1000:
803 # Multiple of 1 if ifm_shape dimension is large to reduce tensor size
804 multiples.append(1)
805 elif max(ifm_shape) > 1000:
806 multiples.append(2)
807 else:
808 multiples.append(testGen.randInt(1, 4))
Kevin Cheng550ccc52021-03-03 11:21:43 -0800809 arg_list.append(("perm{}".format(p), [multiples]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700810
811 return arg_list
812
813 @staticmethod
814 def agResize(testGen, opName, shapeList, dtype):
815 arg_list = []
816
817 ifm_shape = shapeList[0]
818
819 for m in [ResizeMode.NEAREST, ResizeMode.BILINEAR]:
820
821 # Exclude illegal {mode, type} configurations. Pick legal output types
822 if m == ResizeMode.NEAREST and dtype == DType.INT8:
Les Bell33d837e2021-08-10 08:34:43 +0100823 outputDTypeList = [DType.INT8]
Eric Kunzee5e26762020-10-13 16:11:07 -0700824 elif m == ResizeMode.NEAREST and dtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800825 outputDTypeList = [DType.INT16]
Eric Kunzee5e26762020-10-13 16:11:07 -0700826 elif m == ResizeMode.BILINEAR and dtype == DType.INT8:
Les Bell33d837e2021-08-10 08:34:43 +0100827 outputDTypeList = [DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700828 elif m == ResizeMode.BILINEAR and dtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800829 outputDTypeList = [DType.INT48]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800830 elif dtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800831 outputDTypeList = [DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700832 else:
833 continue
834
835 for outputDType in outputDTypeList:
836 for perm in range(testGen.args.num_rand_permutations):
837
838 # Randomly generate legal output dimensions and shift
839 # and then compute the stride and offset based on them
Kevin Cheng550ccc52021-03-03 11:21:43 -0800840 output_dims = [testGen.randInt(1), testGen.randInt(1)]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800841 in_center_h = (ifm_shape[1] - 1) / 2.0
842 in_center_w = (ifm_shape[2] - 1) / 2.0
843 out_center_h = (output_dims[0] - 1) / 2.0
844 out_center_w = (output_dims[1] - 1) / 2.0
Eric Kunzee5e26762020-10-13 16:11:07 -0700845
Kevin Cheng77d0f762020-11-24 10:26:32 -0800846 fp_stride_y = float(ifm_shape[1]) / float(output_dims[0])
847 fp_stride_x = float(ifm_shape[2]) / float(output_dims[1])
848 fp_offset_y = in_center_h - fp_stride_y * out_center_h
849 fp_offset_x = in_center_w - fp_stride_x * out_center_w
Eric Kunzee5e26762020-10-13 16:11:07 -0700850
Kevin Cheng77d0f762020-11-24 10:26:32 -0800851 if outputDType == DType.FLOAT:
852 shift = 0
853 stride = [0, 0]
854 offset = [0, 0]
Kevin Cheng550ccc52021-03-03 11:21:43 -0800855 stride_fp = [fp_stride_y, fp_stride_x]
856 offset_fp = [fp_offset_y, fp_offset_x]
857 arg_list.append(
858 (
859 "mode{}_odim{}x{}_out{}_st{:.2f}x{:.2f}_off{:.2f}x{:.2f}".format(
Les Bell33d837e2021-08-10 08:34:43 +0100860 "N" if m == ResizeMode.NEAREST else "B",
Kevin Cheng550ccc52021-03-03 11:21:43 -0800861 output_dims[0],
862 output_dims[1],
863 testGen.typeStr(outputDType),
864 stride_fp[0],
865 stride_fp[1],
866 offset_fp[0],
867 offset_fp[1],
868 ),
869 [
870 m,
871 stride,
872 offset,
873 shift,
874 stride_fp,
875 offset_fp,
876 output_dims,
877 dtype,
878 outputDType,
879 ],
880 )
881 )
Kevin Cheng77d0f762020-11-24 10:26:32 -0800882 else:
883 shift = 11
884 unit = float(1 << shift)
885 stride_y = int(round(fp_stride_y * unit))
886 stride_x = int(round(fp_stride_x * unit))
887 offset_y = int(round(fp_offset_y * unit))
888 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700889
Kevin Cheng550ccc52021-03-03 11:21:43 -0800890 while (
891 stride_y >= 32768
892 or stride_x >= 32768
893 or offset_y >= 32768
894 or offset_x >= 32768
895 or offset_y < -32768
896 or offset_x < -32768
897 ):
Kevin Cheng77d0f762020-11-24 10:26:32 -0800898 shift = shift - 1
899 unit = float(1 << shift)
900 stride_y = int(round(fp_stride_y * unit))
901 stride_x = int(round(fp_stride_x * unit))
902 offset_y = int(round(fp_offset_y * unit))
903 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700904
Kevin Cheng550ccc52021-03-03 11:21:43 -0800905 stride = [stride_y, stride_x]
906 offset = [offset_y, offset_x]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800907
908 stride_fp = [0.0, 0.0]
909 offset_fp = [0.0, 0.0]
910
Kevin Cheng550ccc52021-03-03 11:21:43 -0800911 arg_list.append(
912 (
913 "mode{}_shift{}_odim{}x{}_out{}_st{}x{}_off{}x{}".format(
Les Bell33d837e2021-08-10 08:34:43 +0100914 "N" if m == ResizeMode.NEAREST else "B",
Kevin Cheng550ccc52021-03-03 11:21:43 -0800915 shift,
916 output_dims[0],
917 output_dims[1],
918 testGen.typeStr(outputDType),
919 stride[0],
920 stride[1],
921 offset[0],
922 offset[1],
923 ),
924 [
925 m,
926 stride,
927 offset,
928 shift,
929 stride_fp,
930 offset_fp,
931 output_dims,
932 dtype,
933 outputDType,
934 ],
935 )
936 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700937
938 return arg_list
939
940 def agCondIf(testGen, opName, shapeList, dtype):
941 # CondIf generates the condition values here.
942 # Convert to tensors in the build function, along with the
943 # then and else blocks
944 arg_list = []
945
946 for c in [False, True]:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800947 arg_list.append(("cond{}".format(int(c)), [c]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700948
949 return arg_list
950
951 def agWhileLoop(testGen, opName, shapeList, dtype):
952 # While loop: 0 iterations, 1, more than 1
953 arg_list = []
954
955 for iter in [0, 1, 4]:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800956 arg_list.append(("iter{}".format(iter), [iter]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700957
958 return arg_list
959
Matthew Haddonb724efc2021-08-25 16:40:29 +0100960class TosaInvalidValidator:
961
962 @staticmethod
963 def ivWrongDataTypeOrModeResize(**kwargs):
964 input_dtype = kwargs["input_dtype"]
965 args = kwargs["args"]
966 mode = args[0]
967 stride = args[1]
968 stride_fp = args[4]
969 output_dtype = args[8]
970
971 if mode == ResizeMode.BILINEAR:
972 # Invalid output data type / Invalid input datatype
973 return (
974 not (input_dtype == DType.INT8 and output_dtype == DType.INT32) or
975 not (input_dtype == DType.INT16 and output_dtype == DType.INT48) or
976 not (input_dtype == DType.FLOAT and output_dtype == DType.FLOAT) or
977 (input_dtype not in [DType.INT8, DType.INT32, DType.FLOAT])
978 )
979 elif mode == ResizeMode.NEAREST:
980 # Invalid output data type / Invalid input datatype
981 return (
982 (input_dtype != output_dtype) or
983 (input_dtype not in [DType.INT8, DType.INT32, DType.FLOAT])
984 )
985 else:
986 # Invalid resize mode
987 return True
988
989 @staticmethod
990 def ivBadStride(**kwargs):
991 input_dtype = kwargs["input_dtype"]
992 args = kwargs["args"]
993 stride_x = args[1][0]
994 stride_y = args[1][1]
995 stride_fp_x = args[4][0]
996 stride_fp_y = args[4][1]
997
998 if input_dtype == DType.FLOAT:
999 if stride_fp_x <= 0 or stride_fp_y <= 0:
1000 # Negative or zero stride
1001 return True
1002 else:
1003 if stride_x <= 0 or stride_y <= 0:
1004 # Negative or zero stride
1005 return True
1006 return False
1007
1008
1009
1010
1011 @staticmethod
1012 def ivHeightWidthSmallerZero(**kwargs):
1013 opName = kwargs['opName']
1014
1015 inputShapes = kwargs['shapeList']
1016 input = inputShapes[0]
1017 if not opName.endswith("pool2d"):
1018 filter = inputShapes[1]
1019
1020 args = kwargs['args']
1021 strides = args[0]
1022 padding = args[1]
1023 dilations = args[2]
1024 if opName.endswith("pool2d"):
1025 kernel = args[2]
1026
1027 if opName.startswith('conv2d'):
1028 h = (
1029 input[1]
1030 - filter[1]
1031 - (filter[1] - 1) * (dilations[0] - 1)
1032 + padding[0]
1033 + padding[1]
1034 ) // strides[0] + 1
1035
1036 w = (
1037 input[2]
1038 - filter[2]
1039 - (filter[2] - 1) * (dilations[1] - 1)
1040 + padding[2]
1041 + padding[3]
1042 ) // strides[1] + 1
1043 elif opName.startswith("depthwise_conv2d"):
1044 h = (
1045 input[1]
1046 - filter[0]
1047 - (filter[0] - 1) * (dilations[0] - 1)
1048 + padding[0]
1049 + padding[1]
1050 ) // strides[0] + 1
1051
1052 w = (
1053 input[2]
1054 - filter[1]
1055 - (filter[1] - 1) * (dilations[1] - 1)
1056 + padding[2]
1057 + padding[3]
1058 ) // strides[1] + 1
1059 elif opName.endswith("pool2d"):
1060 h = (input[1] + padding[0] + padding[1] + strides[0] - kernel[0]) // strides[0]
1061 w = (input[2] + padding[2] + padding[3] + strides[1] - kernel[1]) // strides[1]
1062 else:
1063 assert False, "Unrecognized Op"
1064
1065 if h <= 0 or w <= 0:
1066 # Invalid parameter combination
1067 return True
1068 return False
1069
1070 @staticmethod
1071 def ivNonPositiveOutputShape(**kwargs):
1072 args = kwargs['args']
1073 output_shape = args[3]
1074 if output_shape[1] <= 0 or output_shape[2] <= 0:
1075 # Negative output shape
1076 return True
1077 return False
1078
1079
Kevin Cheng550ccc52021-03-03 11:21:43 -08001080
Eric Kunzee5e26762020-10-13 16:11:07 -07001081class TosaTestGen:
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01001082 # Maximum rank of tensor supported by test generator.
1083 TOSA_TENSOR_MAX_RANK = 6
1084
Eric Kunzee5e26762020-10-13 16:11:07 -07001085 def __init__(self, args):
1086 self.args = args
1087 self.basePath = args.output_dir
1088 self.random_seed = args.random_seed
1089 self.ser = None
1090 self.rng = np.random.default_rng(self.random_seed)
1091 self.createDynamicOpLists()
1092 self.initOpListDefaults()
1093 self.quantGen = TosaQuantGen()
1094 # Force makeShape to do a specific starting shape
1095 self.targetted_shape = None
1096
1097 def createSerializer(self, opName, testPath):
1098 self.testPath = os.path.join(opName, testPath)
1099
1100 fullPath = os.path.join(self.basePath, self.testPath)
1101 os.makedirs(fullPath, exist_ok=True)
1102 self.ser = ts.TosaSerializer(fullPath)
1103
1104 def getSerializer(self):
1105 return self.ser
1106
1107 def serialize(self, testName):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001108 with open(
1109 os.path.join(self.basePath, self.testPath, "{}.tosa".format(testName)), "wb"
1110 ) as fd:
Eric Kunzee5e26762020-10-13 16:11:07 -07001111 fd.write(self.ser.serialize())
1112
Kevin Cheng550ccc52021-03-03 11:21:43 -08001113 with open(os.path.join(self.basePath, self.testPath, "desc.json"), "w") as fd:
1114 fd.write(self.ser.writeJson("{}.tosa".format(testName)))
Eric Kunzee5e26762020-10-13 16:11:07 -07001115
Matthew Haddon74567092021-07-16 15:38:20 +01001116 def resetRNG(self, seed=None):
1117 if seed == None:
1118 seed = self.random_seed + 1
1119 self.rng = np.random.default_rng(seed)
1120
Eric Kunzee5e26762020-10-13 16:11:07 -07001121 def getRandTensor(self, shape, dtype):
Eric Kunzee5e26762020-10-13 16:11:07 -07001122 if dtype == DType.BOOL:
1123 np_dt = np.bool
1124 return np.bool_(self.rng.choice(a=[False, True], size=shape))
Kevin Chenga9017402021-07-28 17:19:23 -07001125 # TOSA specific INT4 weight range from -7 to 7
Eric Kunzee5e26762020-10-13 16:11:07 -07001126 elif dtype == DType.INT4:
Kevin Chenga9017402021-07-28 17:19:23 -07001127 return np.int32(self.rng.integers(low=-7, high=8, size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001128 elif dtype == DType.INT8:
Jeremy Johnson18e26662021-07-22 16:15:29 +01001129 return np.int32(self.rng.integers(low=-128, high=128, size=shape))
1130 elif dtype == DType.UINT8:
1131 return np.int32(self.rng.integers(low=0, high=256, size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001132 elif dtype == DType.INT16:
1133 return np.int32(self.rng.integers(low=-32768, high=32768, size=shape))
1134 elif dtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001135 return np.int32(
1136 self.rng.integers(low=-(1 << 31), high=(1 << 31), size=shape)
1137 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001138 elif dtype == DType.INT48:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001139 return np.int64(
1140 self.rng.integers(low=-(1 << 47), high=(1 << 47), size=shape)
1141 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001142 elif dtype == DType.FLOAT:
Jeremy Johnson18e26662021-07-22 16:15:29 +01001143 return np.float32(self.rng.random(size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001144 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001145 raise Exception("Unrecognized Dtype: {}".format(dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07001146
Kevin Cheng989cb052021-04-28 16:29:44 -07001147 def buildPlaceholderTensors(self, shape_list, dtype_list):
Eric Kunzee5e26762020-10-13 16:11:07 -07001148 placeholders = []
1149
Kevin Cheng989cb052021-04-28 16:29:44 -07001150 assert len(shape_list) == len(dtype_list)
1151
1152 for idx, shape in enumerate(shape_list):
1153 arr = self.getRandTensor(shape, dtype_list[idx])
1154 placeholders.append(self.ser.addPlaceholder(shape, dtype_list[idx], arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001155
1156 return placeholders
1157
Kevin Cheng989cb052021-04-28 16:29:44 -07001158 def buildConstTensors(self, shape_list, dtype_list):
Eric Kunzee5e26762020-10-13 16:11:07 -07001159 consts = []
1160
Kevin Cheng989cb052021-04-28 16:29:44 -07001161 assert len(shape_list) == len(dtype_list)
1162
1163 for idx, shape in enumerate(shape_list):
1164 arr = self.getRandTensor(shape, dtype_list[idx])
1165 consts.append(self.ser.addConst(shape, dtype_list[idx], arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001166
1167 return consts
1168
1169 def makeShape(self, rank):
1170 if self.targetted_shape:
1171 return np.int32(self.targetted_shape)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001172 return np.int32(
1173 self.rng.integers(
1174 low=self.args.tensor_shape_range[0],
1175 high=self.args.tensor_shape_range[1],
1176 size=rank,
1177 )
1178 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001179
1180 def setTargetShape(self, shape):
1181 self.targetted_shape = shape
1182
1183 def randInt(self, low=0, high=256):
1184 return np.int32(self.rng.integers(low=low, high=high, size=1))[0]
1185
1186 def getRandNumberDType(self, dtype):
1187 if dtype == DType.FLOAT:
1188 return self.rng.random()
1189 elif dtype == DType.BOOL:
1190 return self.rng.choice([False, True])
Kevin Chenga9017402021-07-28 17:19:23 -07001191 # TOSA specific INT4 weight range from -7 to 7
Eric Kunzee5e26762020-10-13 16:11:07 -07001192 elif dtype == DType.INT4:
Kevin Chenga9017402021-07-28 17:19:23 -07001193 low, high = (-7, 8)
Eric Kunzee5e26762020-10-13 16:11:07 -07001194 elif dtype == DType.INT8:
Jeremy Johnson18e26662021-07-22 16:15:29 +01001195 low, high = (-128, 128)
Eric Kunzee5e26762020-10-13 16:11:07 -07001196 elif dtype == DType.INT16:
1197 low, high = (-32768, 32768)
1198 elif dtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001199 low, high = (-(1 << 31), (1 << 31))
Eric Kunzee5e26762020-10-13 16:11:07 -07001200 elif dtype == DType.INT48:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001201 low, high = (-(1 << 47), (1 << 47))
Eric Kunzee5e26762020-10-13 16:11:07 -07001202 # Special size
1203 return np.int64(self.rng.integers(low, high, size=1))[0]
1204 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001205 raise Exception("Unknown dtype: {}".format(dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07001206
1207 return np.int32(self.rng.integers(low, high, size=1))[0]
1208
1209 def shapeStr(self, shape):
1210
1211 sStr = []
1212 # Convert to strings
1213 for i in shape:
1214 sStr.append(str(i))
1215
Kevin Cheng550ccc52021-03-03 11:21:43 -08001216 return "x".join(sStr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001217
1218 def typeStr(self, t):
Kevin Cheng989cb052021-04-28 16:29:44 -07001219 if isinstance(t, list):
1220 assert len(t) >= 2
1221 return "{}x{}".format(self.typeStr(t[0]), self.typeStr(t[1]))
Eric Kunzee5e26762020-10-13 16:11:07 -07001222 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07001223 if t == DType.BOOL:
1224 return "b"
1225 elif t == DType.INT4:
1226 return "i4"
1227 elif t == DType.INT8:
1228 return "i8"
1229 elif t == DType.UINT8:
1230 return "u8"
1231 elif t == DType.INT16:
1232 return "i16"
1233 elif t == DType.INT32:
1234 return "i32"
1235 elif t == DType.INT48:
1236 return "i48"
1237 elif t == DType.FLOAT:
1238 return "float"
1239 else:
1240 raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
Eric Kunzee5e26762020-10-13 16:11:07 -07001241
1242 def typeWidth(self, t):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001243 """ Get the datatype width for integer types"""
Kevin Cheng3a478572021-01-22 17:21:02 -08001244 if t == DType.INT4:
Eric Kunzee5e26762020-10-13 16:11:07 -07001245 return 4
1246 elif t == DType.INT8:
1247 return 8
Kevin Cheng3a478572021-01-22 17:21:02 -08001248 elif t == DType.UINT8:
1249 return 8
Eric Kunzee5e26762020-10-13 16:11:07 -07001250 elif t == DType.INT16:
1251 return 16
1252 elif t == DType.INT32:
1253 return 32
1254 elif t == DType.INT48:
1255 return 48
1256 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001257 raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
Eric Kunzee5e26762020-10-13 16:11:07 -07001258
1259 # Argument generators
1260 # Returns a list of tuples (stringDescriptor, [build_fcn_arg_list])
1261 # Where the string descriptor is used to generate the test name and
1262 # The build_fcn_arg_list is expanded and passed to the operator test
1263 # build function
1264
Kevin Cheng550ccc52021-03-03 11:21:43 -08001265 def build_unary(self, op, a, qinfo=None):
Eric Kunzee5e26762020-10-13 16:11:07 -07001266 result_tens = OutputShaper.unaryOp(self.ser, a)
1267 self.ser.addOperator(op, [a.name], [result_tens.name], None, qinfo)
1268 return result_tens
1269
1270 def build_binary_broadcast(self, op, a, b):
1271 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1272 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1273 return result_tens
1274
1275 def build_binary_nonbroadcast(self, op, a, b):
1276 result_tens = OutputShaper.binaryNonBroadcastOp(self.ser, a, b)
1277 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1278 return result_tens
1279
Kevin Chengaee1fac2020-11-11 13:54:06 -08001280 def build_arithmetic_right_shift(self, op, a, b, round):
1281 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1282
1283 attr = ts.TosaSerializerAttribute()
1284 attr.ArithmeticRightShiftAttribute(round)
1285
1286 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
1287 return result_tens
1288
1289 def build_mul(self, op, a, b, shift):
Eric Kunzee5e26762020-10-13 16:11:07 -07001290 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1291
1292 # Special for multiply:
1293 # Force the result to INT32 for INT types
1294 if a.dtype != DType.FLOAT:
1295 result_tens.setDtype(DType.INT32)
1296
Kevin Chengaee1fac2020-11-11 13:54:06 -08001297 attr = ts.TosaSerializerAttribute()
1298 attr.MulAttribute(shift)
1299
1300 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001301 return result_tens
1302
1303 def build_table(self, op, a):
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001304 # Constant size depending on type, random values
1305 if a.dtype == DType.INT16:
Kevin Chengacb550f2021-06-29 15:32:19 -07001306 table_dtype = DType.INT16
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001307 table_arr = self.getRandTensor([513], table_dtype)
1308 else:
1309 assert a.dtype == DType.INT8
1310 table_dtype = DType.INT8
1311 table_arr = self.getRandTensor([256], table_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001312
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001313 table_tens = self.ser.addConst(table_arr.shape, table_dtype, table_arr)
1314 result_tens = OutputShaper.tableOp(self.ser, a, table_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001315 self.ser.addOperator(op, [a.name, table_tens.name], [result_tens.name], None)
1316
1317 return result_tens
1318
1319 def build_select(self, op, cond, a, b):
Eric Kunzee5e26762020-10-13 16:11:07 -07001320 result_tens = OutputShaper.selectOp(self.ser, cond, a, b)
1321 self.ser.addOperator(op, [cond.name, a.name, b.name], [result_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001322 return result_tens
1323
1324 def build_comparison(self, op, a, b):
1325 result_tens = OutputShaper.binaryComparisonOp(self.ser, a, b)
1326 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1327 return result_tens
1328
1329 def build_argmax(self, op, a, axis):
1330 result_tens = OutputShaper.argmaxOp(self.ser, a, axis)
1331
1332 attr = ts.TosaSerializerAttribute()
1333 attr.AxisAttribute(axis)
1334
1335 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1336 return result_tens
1337
Matthew Haddonb724efc2021-08-25 16:40:29 +01001338 def build_pool2d(self, op, input, stride, pad, kernel, qinfo=None):
Eric Kunzee5e26762020-10-13 16:11:07 -07001339 result_tens = OutputShaper.pool2dOp(self.ser, input, kernel, stride, pad)
1340
1341 attr = ts.TosaSerializerAttribute()
1342 attr.Pool2dAttribute(kernel, stride, pad)
Eric Kunzee5e26762020-10-13 16:11:07 -07001343
1344 self.ser.addOperator(op, [input.name], [result_tens.name], attr, qinfo)
1345 return result_tens
1346
1347 def build_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001348 assert len(padding) == 4
1349 result_tens = OutputShaper.conv2dOp(
1350 self.ser, ifm, filter, strides, padding, dilations
1351 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001352
1353 attr = ts.TosaSerializerAttribute()
1354 attr.Conv2dAttribute(padding, strides, dilations)
1355
Kevin Cheng550ccc52021-03-03 11:21:43 -08001356 self.ser.addOperator(
1357 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1358 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001359 return result_tens
1360
Kevin Cheng550ccc52021-03-03 11:21:43 -08001361 def build_transpose_conv2d(
Kevin Cheng989cb052021-04-28 16:29:44 -07001362 self, op, ifm, filter, bias, stride, outpad, dilation, output_shape, qinfo
Kevin Cheng550ccc52021-03-03 11:21:43 -08001363 ):
1364 assert len(outpad) == 2
Eric Kunzee5e26762020-10-13 16:11:07 -07001365 result_tens = OutputShaper.transposeConv2DOp(self.ser, ifm, output_shape)
1366
1367 attr = ts.TosaSerializerAttribute()
1368 attr.TransposeConv2DAttribute(outpad, stride, dilation, output_shape)
1369
Kevin Cheng550ccc52021-03-03 11:21:43 -08001370 self.ser.addOperator(
Kevin Cheng989cb052021-04-28 16:29:44 -07001371 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
Kevin Cheng550ccc52021-03-03 11:21:43 -08001372 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001373 return result_tens
1374
Kevin Cheng550ccc52021-03-03 11:21:43 -08001375 def build_depthwise_conv2d(
1376 self, op, ifm, filter, bias, strides, padding, dilations, qinfo
1377 ):
1378 result_tens = OutputShaper.depthwiseConv2dOp(
1379 self.ser, ifm, filter, strides, padding, dilations
1380 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001381
1382 attr = ts.TosaSerializerAttribute()
1383 attr.Conv2dAttribute(padding, strides, dilations)
1384
Kevin Cheng550ccc52021-03-03 11:21:43 -08001385 self.ser.addOperator(
1386 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1387 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001388 return result_tens
1389
1390 def build_fully_connected(self, op, ifm, filter, bias, qinfo):
1391 result_tens = OutputShaper.fullyConnectedOp(self.ser, ifm, filter)
1392
Kevin Cheng550ccc52021-03-03 11:21:43 -08001393 self.ser.addOperator(
1394 op, [ifm.name, filter.name, bias.name], [result_tens.name], None, qinfo
1395 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001396 return result_tens
1397
1398 def build_matmul(self, op, a, b, qinfo):
1399 result_tens = OutputShaper.matmulOp(self.ser, a, b)
1400 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], None, qinfo)
1401 return result_tens
1402
1403 def build_reduce(self, op, a, axis):
1404 result_tens = OutputShaper.reduceOp(self.ser, a, axis)
1405
1406 attr = ts.TosaSerializerAttribute()
1407 attr.AxisAttribute(axis)
1408
1409 self.ser.addOperator(op, [a.name], result_tens.name, attr)
1410 return result_tens
1411
1412 def build_clamp(self, op, a):
1413 result_tens = OutputShaper.unaryOp(self.ser, a)
1414
1415 attr = ts.TosaSerializerAttribute()
Jeremy Johnson18e26662021-07-22 16:15:29 +01001416 v = [self.getRandNumberDType(a.dtype), self.getRandNumberDType(a.dtype)]
Eric Kunzee5e26762020-10-13 16:11:07 -07001417
1418 if a.dtype == DType.FLOAT:
1419 attr.ClampAttribute(0, 0, min(v), max(v))
1420 else:
1421 attr.ClampAttribute(min(v), max(v), 0, 0)
1422
1423 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1424 return result_tens
1425
1426 def build_leaky_relu(self, op, a):
1427 result_tens = OutputShaper.unaryOp(self.ser, a)
1428 attr = ts.TosaSerializerAttribute()
1429
1430 attr.LeakyReluAttribute(self.getRandNumberDType(DType.FLOAT))
1431
1432 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1433 return result_tens
1434
1435 # Needs an additional type/input
1436 def build_prelu(self, op, a):
1437 result_tens = OutputShaper.unaryOp(self.ser, a)
1438
1439 self.ser.addOperator(op, [a.name], [result_tens.name])
1440 return result_tens
1441
1442 def build_relun(self, op, a):
1443 result_tens = OutputShaper.unaryOp(self.ser, a)
1444
1445 attr = ts.TosaSerializerAttribute()
1446
1447 if a.dtype == DType.FLOAT:
1448 attr.ReluNAttribute(0, self.getRandNumberDType(a.dtype))
1449 else:
1450 attr.ReluNAttribute(self.getRandNumberDType(a.dtype), 0)
1451
1452 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1453 return result_tens
1454
1455 def build_sigmoid(self, op, a):
1456 result_tens = OutputShaper.unaryOp(self.ser, a)
1457 self.ser.addOperator(op, [a.name], [result_tens.name])
1458 return result_tens
1459
1460 def build_tanh(self, op, a):
1461 result_tens = OutputShaper.unaryOp(self.ser, a)
1462 self.ser.addOperator(op, [a.name], [result_tens.name])
1463 return result_tens
1464
Matthew Haddon818ab902021-07-27 09:12:49 +01001465 def build_concat(self, op, *a):
1466 assert (type(a[-1]) == int)
1467
1468 # To store variable length list of input tensors we need to store axis along with it
1469 axis = a[-1]
1470 a = a[:-1]
1471
1472 result_tens = OutputShaper.concatOp(self.ser, axis, *a)
Eric Kunzee5e26762020-10-13 16:11:07 -07001473
1474 attr = ts.TosaSerializerAttribute()
1475 attr.AxisAttribute(axis)
1476
Matthew Haddon818ab902021-07-27 09:12:49 +01001477 input_tensor_names = []
1478 for tensor in a:
1479 input_tensor_names.append(tensor.name)
1480
1481 self.ser.addOperator(op, input_tensor_names, [result_tens.name], attr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001482
1483 def build_pad(self, op, a, padding, qinfo):
1484 result_tens = OutputShaper.padOp(self.ser, a, padding)
1485
1486 # Need to turn the padding array into a TOSA tensor here.
1487 # This is one of the few tensor operands that does not get
1488 # randomly generated
Kevin Cheng550ccc52021-03-03 11:21:43 -08001489 padding_tens = self.ser.addConst(padding.shape, DType.INT32, padding)
Eric Kunzee5e26762020-10-13 16:11:07 -07001490
Kevin Cheng550ccc52021-03-03 11:21:43 -08001491 self.ser.addOperator(
1492 op, [a.name, padding_tens.name], [result_tens.name], None, qinfo
1493 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001494
1495 def build_reshape(self, op, a, newShape):
1496 result_tens = OutputShaper.reshapeOp(self.ser, a, newShape)
1497
1498 attr = ts.TosaSerializerAttribute()
1499 attr.ReshapeAttribute(newShape)
1500
1501 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1502 return result_tens
1503
1504 def build_reverse(self, op, a, axis):
1505 result_tens = OutputShaper.unaryOp(self.ser, a)
1506
1507 attr = ts.TosaSerializerAttribute()
1508 attr.AxisAttribute(axis)
1509
1510 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1511 return result_tens
1512
1513 def build_transpose(self, op, a, perms):
1514 result_tens = OutputShaper.transposeOp(self.ser, a, perms)
1515
Kevin Cheng550ccc52021-03-03 11:21:43 -08001516 perms_tens = self.ser.addConst([len(perms)], DType.INT32, np.int32(perms))
Eric Kunzee5e26762020-10-13 16:11:07 -07001517
1518 self.ser.addOperator(op, [a.name, perms_tens.name], [result_tens.name])
1519 return result_tens
1520
1521 def build_slice(self, op, a, begin, size):
1522 result_tens = OutputShaper.sliceOp(self.ser, a, begin, size)
1523
1524 attr = ts.TosaSerializerAttribute()
1525 attr.SliceAttribute(begin, size)
1526
1527 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1528 return result_tens
1529
1530 def build_tile(self, op, a, multiples):
1531 result_tens = OutputShaper.tileOp(self.ser, a, multiples)
1532
1533 attr = ts.TosaSerializerAttribute()
1534 attr.TileAttribute(multiples)
1535
1536 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1537 return result_tens
1538
Kevin Cheng77d0f762020-11-24 10:26:32 -08001539 def build_gather(self, op, values):
Eric Kunzee5e26762020-10-13 16:11:07 -07001540
1541 # Create a new indicies tensor
1542 # here with data that doesn't exceed the dimensions of the values tensor
1543
Kevin Cheng550ccc52021-03-03 11:21:43 -08001544 K = values.shape[1] # K
1545 W = self.randInt(
1546 self.args.tensor_shape_range[0], self.args.tensor_shape_range[1]
1547 ) # W
1548 indicies_arr = np.int32(
1549 self.rng.integers(low=0, high=K, size=[values.shape[0], W])
1550 ) # (N, W)
1551 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001552
Kevin Cheng77d0f762020-11-24 10:26:32 -08001553 result_tens = OutputShaper.gatherOp(self.ser, values, indicies)
Eric Kunzee5e26762020-10-13 16:11:07 -07001554
Kevin Cheng77d0f762020-11-24 10:26:32 -08001555 self.ser.addOperator(op, [values.name, indicies.name], [result_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001556
1557 return result_tens
1558
Kevin Cheng77d0f762020-11-24 10:26:32 -08001559 def build_scatter(self, op, values_in, input):
1560
1561 # Create a new indicies tensor
1562 # here with data that doesn't exceed the dimensions of the values_in tensor
1563
Kevin Cheng550ccc52021-03-03 11:21:43 -08001564 K = values_in.shape[1] # K
1565 W = input.shape[1] # W
1566 indicies_arr = np.int32(
1567 self.rng.integers(low=0, high=K, size=[values_in.shape[0], W])
1568 ) # (N, W)
1569 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
Kevin Cheng77d0f762020-11-24 10:26:32 -08001570
1571 result_tens = OutputShaper.scatterOp(self.ser, values_in, indicies, input)
1572
Kevin Cheng550ccc52021-03-03 11:21:43 -08001573 self.ser.addOperator(
1574 op, [values_in.name, indicies.name, input.name], [result_tens.name]
1575 )
Kevin Cheng77d0f762020-11-24 10:26:32 -08001576
1577 return result_tens
1578
Kevin Cheng550ccc52021-03-03 11:21:43 -08001579 def build_resize(
1580 self,
1581 op,
1582 input,
1583 mode,
1584 stride,
1585 offset,
1586 shift,
1587 stride_fp,
1588 offset_fp,
1589 output_dims,
1590 input_dtype,
1591 output_dtype,
1592 ):
1593 result_tens = OutputShaper.resizeOp(
1594 self.ser,
1595 input,
1596 mode,
1597 stride,
1598 offset,
1599 shift,
1600 stride_fp,
1601 offset_fp,
1602 output_dims,
1603 input_dtype,
1604 output_dtype,
1605 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001606
1607 attr = ts.TosaSerializerAttribute()
Kevin Cheng77d0f762020-11-24 10:26:32 -08001608
Kevin Cheng550ccc52021-03-03 11:21:43 -08001609 attr.ResizeAttribute(
1610 output_dims, stride, offset, shift, stride_fp, offset_fp, mode
1611 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001612
1613 self.ser.addOperator(op, [input.name], [result_tens.name], attr)
1614 return result_tens
1615
1616 def build_identityn(self, op, val, val2):
1617
Kevin Cheng550ccc52021-03-03 11:21:43 -08001618 result_tens = OutputShaper.unaryOp(self.ser, val)
Eric Kunzee5e26762020-10-13 16:11:07 -07001619 result_tens2 = OutputShaper.unaryOp(self.ser, val2)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001620 self.ser.addOperator(
1621 op, [val.name, val2.name], [result_tens.name, result_tens2.name]
1622 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001623 return result_tens
1624
1625 def build_placeholder(self, op, val):
1626 # Add an identity op to avoid warning in the reference model
1627 return self.build_unary(Op.IDENTITY, val)
1628
1629 # Type Conversion
1630 def build_cast(self, op, val, out_dtype):
1631 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1632 self.ser.addOperator(op, [val.name], [result_tens.name])
1633 return result_tens
1634
1635 def build_rescale(self, op, val, out_dtype, scale32, double_round, per_channel):
1636 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1637
1638 if per_channel:
1639 nc = val.shape[-1]
1640 else:
1641 nc = 1
1642
1643 in_type_width = self.typeWidth(val.dtype)
1644 out_type_width = self.typeWidth(out_dtype)
1645
Kevin Cheng3a478572021-01-22 17:21:02 -08001646 if val.dtype == DType.INT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001647 input_zp = self.randInt(-128, 128)
1648 in_type_width = in_type_width + 1
Kevin Chengacb550f2021-06-29 15:32:19 -07001649 elif val.dtype == DType.UINT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001650 input_zp = self.randInt(0, 256)
Eric Kunzee5e26762020-10-13 16:11:07 -07001651 in_type_width = in_type_width + 1
1652 else:
1653 input_zp = 0
1654
Kevin Cheng3a478572021-01-22 17:21:02 -08001655 if out_dtype == DType.INT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001656 output_zp = self.randInt(-128, 128)
1657 out_type_width = out_type_width + 1
1658 elif out_dtype == DType.UINT8:
1659 output_zp = self.randInt(0, 256)
Eric Kunzee5e26762020-10-13 16:11:07 -07001660 out_type_width = out_type_width + 1
1661 else:
1662 output_zp = 0
1663
1664 # Calculate scale based on:
1665 # scale = a *(2^output_width)/(2^input_width))
1666
1667 a = np.float32(self.rng.random(size=[nc]))
1668 scale_arr = a * np.float32((1 << out_type_width) / (1 << in_type_width))
1669
1670 if scale32:
1671 pass
Matthew Haddonb724efc2021-08-25 16:40:29 +01001672 # Cap the scaling at 2^31 - 1 for scale32
Eric Kunzee5e26762020-10-13 16:11:07 -07001673 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), (1 << 31) - 1)
1674 else:
1675 # Cap the scaling at 2^15 - 1 for scale16
1676 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), 32767.0)
1677
Kevin Cheng550ccc52021-03-03 11:21:43 -08001678 # print('{} {} -> {}'.format(out_type_width, in_type_width, scale_arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001679
1680 multiplier_arr = np.int32(np.zeros(shape=[nc]))
1681 shift_arr = np.int32(np.zeros(shape=[nc]))
1682
1683 for i in range(nc):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001684 multiplier_arr[i], shift_arr[i] = TosaQuantGen.computeMultiplierAndShift(
1685 scale_arr[i], scale32
1686 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001687
Kevin Cheng550ccc52021-03-03 11:21:43 -08001688 # print('multiplier {} shift {} inzp {} outzp {}'.format(multiplier_arr, shift_arr, input_zp, output_zp))
Eric Kunzee5e26762020-10-13 16:11:07 -07001689
1690 attr = ts.TosaSerializerAttribute()
Kevin Cheng550ccc52021-03-03 11:21:43 -08001691 attr.RescaleAttribute(
1692 input_zp,
1693 output_zp,
1694 multiplier_arr,
1695 shift_arr,
1696 scale32,
1697 double_round,
1698 per_channel,
1699 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001700
1701 self.ser.addOperator(op, [val.name], [result_tens.name], attr)
1702 return result_tens
1703
1704 def build_cond_if_const(self, op, then_tens, else_tens, cond):
1705 # For cond_if with constants, we're supplied with then/else tensors that we ignore
1706 # (except for the generated shap) and the condition. Build Then/Else blocks
1707 # and fill them with const nodes for the body.
1708
1709 # Condition tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001710 cond_tens = self.ser.addConst([], DType.BOOL, [cond])
Eric Kunzee5e26762020-10-13 16:11:07 -07001711
1712 # Make then/else tensors
1713 out_shape = then_tens.shape
Jeremy Johnson18e26662021-07-22 16:15:29 +01001714 then_arr = np.int32(self.rng.integers(0, 256, size=out_shape))
1715 else_arr = np.int32(self.rng.integers(0, 256, size=out_shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001716
1717 # And the result tensor based on any of the outputs
Kevin Cheng550ccc52021-03-03 11:21:43 -08001718 result_tens = self.ser.addOutput(out_shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07001719
1720 # Create the attribute with the names of the then/else blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001721 then_block = "THEN_BLOCK"
1722 else_block = "ELSE_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001723 attr = ts.TosaSerializerAttribute()
1724 attr.CondIfAttribute(then_block, else_block)
1725
1726 # Finally, build the op and the two blocks
1727 self.ser.addOperator(op, [cond_tens.name], [result_tens.name], attr)
1728
1729 self.ser.startBasicBlock(then_block)
1730 # Build the actual then/else tensors inside their blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001731 then_tens = self.ser.addConst(out_shape, DType.INT32, then_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001732 self.ser.addOutputTensor(then_tens)
1733
1734 self.ser.startBasicBlock(else_block)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001735 else_tens = self.ser.addConst(out_shape, DType.INT32, else_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001736 self.ser.addOutputTensor(else_tens)
1737
1738 return result_tens
1739
1740 def build_cond_if_binary(self, op, a, b, cond):
1741 # For cond_if with a binary op in the then/else blocks, take a and b and
1742 # alternately add or subtract them based on the condition
1743
1744 # Condition tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001745 cond_tens = self.ser.addConst([], DType.BOOL, [cond])
Eric Kunzee5e26762020-10-13 16:11:07 -07001746
Kevin Cheng550ccc52021-03-03 11:21:43 -08001747 result_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001748 self.ser.currBasicBlock.addOutput(result_tens.name)
1749
1750 # Create the attribute with the names of the then/else blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001751 then_block = "THEN_BLOCK"
1752 else_block = "ELSE_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001753 attr = ts.TosaSerializerAttribute()
1754 attr.CondIfAttribute(then_block, else_block)
1755
1756 # Finally, build the op and the two blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001757 self.ser.addOperator(
1758 op, [cond_tens.name, a.name, b.name], [result_tens.name], attr
1759 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001760
1761 self.ser.startBasicBlock(then_block)
1762 self.ser.addInputTensor(a)
1763 self.ser.addInputTensor(b)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001764 then_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001765 self.ser.addOperator(Op.ADD, [a.name, b.name], [then_tens.name])
1766
1767 self.ser.startBasicBlock(else_block)
1768 self.ser.addInputTensor(a)
1769 self.ser.addInputTensor(b)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001770 else_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001771 self.ser.addOperator(Op.SUB, [a.name, b.name], [else_tens.name])
1772
1773 return result_tens
1774
1775 def build_while_loop(self, op, a, iter_val):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001776 iter = self.ser.addPlaceholder([], DType.INT32, [np.int32(iter_val)])
Eric Kunzee5e26762020-10-13 16:11:07 -07001777
Kevin Cheng550ccc52021-03-03 11:21:43 -08001778 cond_block = "COND_BLOCK"
1779 body_block = "BODY_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001780
1781 attr = ts.TosaSerializerAttribute()
1782 attr.WhileLoopAttribute(cond_block, body_block)
1783
1784 # Accumulator tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001785 # acc = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001786 acc_init_val = np.int32(np.zeros(a.shape))
Kevin Cheng550ccc52021-03-03 11:21:43 -08001787 acc = self.ser.addPlaceholder(a.shape, a.dtype, acc_init_val)
Eric Kunzee5e26762020-10-13 16:11:07 -07001788
1789 # Intermediate/output tensors for everything going through the loop
Kevin Cheng550ccc52021-03-03 11:21:43 -08001790 iter_out = self.ser.addIntermediate(iter.shape, iter.dtype)
1791 a_out = self.ser.addIntermediate(a.shape, a.dtype)
1792 acc_out = self.ser.addIntermediate(acc.shape, acc.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001793
1794 # While_loop operator
Kevin Cheng550ccc52021-03-03 11:21:43 -08001795 self.ser.addOperator(
1796 op,
1797 [iter.name, a.name, acc.name],
1798 [iter_out.name, a_out.name, acc_out.name],
1799 attr,
1800 )
Kevin Chengb227ae52021-09-02 13:43:17 -07001801 self.ser.addOutputTensor(acc_out)
Eric Kunzee5e26762020-10-13 16:11:07 -07001802
1803 # COND block (input: iter, output: cond_tens )
1804 self.ser.startBasicBlock(cond_block)
1805 self.ser.addInputTensor(iter)
1806 self.ser.addInputTensor(a)
1807 self.ser.addInputTensor(acc)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001808 zero_tens = self.ser.addConst([], DType.INT32, [np.int32(0)])
1809 cond_tens = self.ser.addOutput([], DType.BOOL)
1810 self.ser.addOperator(Op.GREATER, [iter.name, zero_tens.name], [cond_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001811
1812 # BODY block (input: a, acc, iter, output: a, acc, iter)
1813 # Note that local intermediate tensors need to be declared here for the outputs
1814 self.ser.startBasicBlock(body_block)
1815 self.ser.addInputTensor(iter)
1816 self.ser.addInputTensor(a)
1817 self.ser.addInputTensor(acc)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001818 one_tens = self.ser.addConst([], DType.INT32, [np.int32(1)])
1819 iter_body_out = self.ser.addIntermediate(iter.shape, iter.dtype)
1820 acc_body_out = self.ser.addIntermediate(acc.shape, acc.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001821 self.ser.addOperator(Op.ADD, [a.name, acc.name], [acc_body_out.name])
1822 self.ser.addOperator(Op.SUB, [iter.name, one_tens.name], [iter_body_out.name])
1823 self.ser.addOutputTensor(iter_body_out)
1824 self.ser.addOutputTensor(a)
1825 self.ser.addOutputTensor(acc_body_out)
1826
1827 return acc_out
1828
Kevin Cheng550ccc52021-03-03 11:21:43 -08001829 def genOpTestList(
Matthew Haddon74567092021-07-16 15:38:20 +01001830 self, opName, shapeFilter=[None], rankFilter=None, dtypeFilter=None, testType='positive'
Kevin Cheng550ccc52021-03-03 11:21:43 -08001831 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07001832
1833 try:
1834 op = self.TOSA_OP_LIST[opName]
1835 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001836 raise Exception("Cannot find op with name {}".format(opName))
Eric Kunzee5e26762020-10-13 16:11:07 -07001837
1838 # Initialize a new random number generator
1839 self.rng = np.random.default_rng(self.random_seed)
1840
Kevin Cheng550ccc52021-03-03 11:21:43 -08001841 build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001842
1843 # Generate the lists of arguments
Kevin Cheng550ccc52021-03-03 11:21:43 -08001844 rmin, rmax = op["rank"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001845
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01001846 # Create a default testing rank range, 1-4 inclusive to keep test sizes reasonably small.
1847 default_test_rank_range = range(1, 5)
1848
Eric Kunzee5e26762020-10-13 16:11:07 -07001849 # Test list consists of a tuple of:
1850 # (opName, testNameStr, dtype, shapeList, argumentsList)
1851 testList = []
1852
1853 if not shapeFilter:
1854 shapeFilter = [None]
1855
Matthew Haddon74567092021-07-16 15:38:20 +01001856 # Positive test loop
1857 if testType in ['positive', 'both']:
1858 for r in range(rmin, rmax + 1):
Eric Kunzee5e26762020-10-13 16:11:07 -07001859
Matthew Haddon74567092021-07-16 15:38:20 +01001860 # Filter out the rank?
1861 if rankFilter is not None and r not in rankFilter:
1862 continue
1863 if (
1864 rankFilter is None
1865 and shapeFilter[0] is None
1866 and r not in default_test_rank_range
1867 ):
1868 continue
Eric Kunzee5e26762020-10-13 16:11:07 -07001869
Matthew Haddon74567092021-07-16 15:38:20 +01001870 for t in op["types"]:
Eric Kunzee5e26762020-10-13 16:11:07 -07001871
Matthew Haddon74567092021-07-16 15:38:20 +01001872 # Filter tests based on dtype?
1873 if dtypeFilter is not None:
1874 if not (
1875 t in dtypeFilter
1876 or (isinstance(t, list) and t[0] in dtypeFilter)
1877 ):
1878 continue
Eric Kunzee5e26762020-10-13 16:11:07 -07001879
Matthew Haddon74567092021-07-16 15:38:20 +01001880 # Create the placeholder and const tensors
1881 for shape in shapeFilter:
1882 # A None shape chooses a random shape of a given rank
Eric Kunzee5e26762020-10-13 16:11:07 -07001883
Matthew Haddon74567092021-07-16 15:38:20 +01001884 # Filter out by rank
1885 if shape is not None and len(shape) != r:
1886 continue
Eric Kunzee5e26762020-10-13 16:11:07 -07001887
Matthew Haddon74567092021-07-16 15:38:20 +01001888 self.setTargetShape(shape)
1889 shapeList = tgen_fcn(self, op, r)
Eric Kunzee5e26762020-10-13 16:11:07 -07001890
Matthew Haddon74567092021-07-16 15:38:20 +01001891 shapeStr = self.shapeStr(shapeList[0])
1892 typeStr = self.typeStr(t)
Eric Kunzee5e26762020-10-13 16:11:07 -07001893
Matthew Haddon74567092021-07-16 15:38:20 +01001894 # Argument lists consists of tuples of the (str, []) string representation and the build function argument list
1895 argList = []
1896 if agen_fcn:
1897 argList = agen_fcn(self, opName, shapeList, t)
Eric Kunzee5e26762020-10-13 16:11:07 -07001898 else:
Matthew Haddon74567092021-07-16 15:38:20 +01001899 argList = [("", [])]
Eric Kunzee5e26762020-10-13 16:11:07 -07001900
Matthew Haddon74567092021-07-16 15:38:20 +01001901 for argStr, args in argList:
1902 if argStr:
1903 testStr = "{}_{}_{}_{}".format(
1904 opName, shapeStr, typeStr, argStr
1905 )
1906 else:
1907 testStr = "{}_{}_{}".format(opName, shapeStr, typeStr)
1908
1909 testList.append((opName, testStr, t, shapeList, args))
1910
Matthew Haddonb724efc2021-08-25 16:40:29 +01001911 # Remove tests which are expected to fail but don't correlate to a ERROR_IF statement
1912 if "invalid_test_validators" in op:
1913 invalid_test_validators = op["invalid_test_validators"]
1914 clean_testList = []
1915 for test in testList:
1916 for validator_fcn in invalid_test_validators:
1917 remove_test = False
1918 if validator_fcn(opName=test[0], input_dtype=test[2], shapeList=test[3], args=test[4]):
1919 remove_test = True
1920 if not remove_test:
1921 clean_testList.append(test)
1922 testList = clean_testList
1923
Matthew Haddon74567092021-07-16 15:38:20 +01001924 # Reset RNG so both positive and negative tests are reproducible
1925 self.resetRNG()
1926 # Negative test loop
1927 if testType in ['negative', 'both']:
1928 print("Negative tests unsupported")
Eric Kunzee5e26762020-10-13 16:11:07 -07001929
1930 return testList
1931
Kevin Cheng989cb052021-04-28 16:29:44 -07001932 def serializeTest(self, opName, testStr, dtype_or_dtypeList, shapeList, testArgs):
Eric Kunzee5e26762020-10-13 16:11:07 -07001933 try:
1934 op = self.TOSA_OP_LIST[opName]
1935 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001936 raise Exception("Cannot find op with name {}".format(opName))
Eric Kunzee5e26762020-10-13 16:11:07 -07001937
1938 # Create a serializer
1939 self.createSerializer(opName, testStr)
1940
Kevin Cheng550ccc52021-03-03 11:21:43 -08001941 build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
1942 pCount, cCount = op["operands"]
Kevin Cheng989cb052021-04-28 16:29:44 -07001943 num_operands = pCount + cCount
1944
1945 if isinstance(dtype_or_dtypeList, list):
1946 dtypeList = dtype_or_dtypeList
Matthew Haddon818ab902021-07-27 09:12:49 +01001947 elif op['op'] == Op.CONCAT:
1948 dtypeList = [dtype_or_dtypeList] * len(shapeList)
Kevin Cheng989cb052021-04-28 16:29:44 -07001949 else:
1950 dtypeList = [dtype_or_dtypeList] * (num_operands)
1951
Matthew Haddon818ab902021-07-27 09:12:49 +01001952 if op['op'] != Op.CONCAT:
1953 assert (
1954 len(shapeList) == num_operands
1955 ), "shapeList length {} must match number of operands {}".format(
1956 len(shapeList), num_operands
1957 )
1958 assert (
1959 len(dtypeList) == num_operands
1960 ), "dtypeList length {} must match number of operands {}".format(
1961 len(dtypeList), num_operands
1962 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001963
1964 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001965 qgen = op["qgen"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001966 except KeyError:
1967 qgen = None
1968
1969 # Build the random tensor operands and the test
1970 tens = []
Kevin Chengaee1fac2020-11-11 13:54:06 -08001971
Jeremy Johnsonef509a42021-09-07 13:59:47 +01001972 if (op["op"] == Op.ADD or op["op"] == Op.SUB) and dtypeList[0] == DType.INT32:
1973 # Make sure the operation does not cause value saturation - where
1974 # the number wraps due to limited number of bits to store the answer
1975 assert (
1976 pCount == 2 and cCount == 0
1977 ), "Op.ADD / Op.SUB must have 2 placeholders, 0 consts"
1978
1979 placeholders = []
1980 add = (op["op"] == Op.ADD)
1981 a_arr = self.getRandTensor(shapeList[0], dtypeList[0])
1982 b_arr = self.getRandTensor(shapeList[1], dtypeList[1])
1983 if add:
1984 res_arr = np.add(a_arr, b_arr, dtype=np.int64)
1985 else:
1986 res_arr = np.subtract(a_arr, b_arr, dtype=np.int64)
1987
1988 # Work out the saturation limits
1989 max_i32 = (1 << 31)-1
1990 min_i32 = -(1 << 31)
1991 max_arr = np.full(shapeList[1], max_i32)
1992 min_arr = np.full(shapeList[1], min_i32)
1993
1994 # Find how much values exceed the maximum/minimums
1995 sat_max_arr = np.maximum(res_arr - max_arr, 0)
1996 sat_min_arr = np.minimum(res_arr - min_arr, 0)
1997
1998 if not add:
1999 # Swap saturation values and negate values as we need to perform opposite operations
2000 sat_max_arr, sat_min_arr = -sat_min_arr, -sat_max_arr
2001
2002 # Create new array of unsaturated values by clipping values as needed
2003 b_unsat_arr = b_arr
2004 if (sat_max_arr != 0).any():
2005 # Clip values that cause saturation
2006 b_unsat_arr = np.subtract(b_unsat_arr, sat_max_arr, dtype=np.int32)
2007 # Reduce axes in unsaturated tensor to match original tensor
2008 for axis, dim in enumerate(b_arr.shape):
2009 if dim != b_unsat_arr.shape[axis]:
2010 assert ( dim == 1 ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable"
2011 b_unsat_arr = np.amin(b_unsat_arr, axis=axis, keepdims=True)
2012
2013 if (sat_min_arr != 0).any():
2014 # Clip values that cause saturation
2015 b_unsat_arr = np.subtract(b_unsat_arr, sat_min_arr, dtype=np.int32)
2016 # Reduce axes in unsaturated tensor to match original tensor
2017 for axis, dim in enumerate(b_arr.shape):
2018 if dim != b_unsat_arr.shape[axis]:
2019 assert ( dim == 1 ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable"
2020 b_unsat_arr = np.amax(b_unsat_arr, axis=axis, keepdims=True)
2021
2022 placeholders.append(
2023 self.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
2024 )
2025 placeholders.append(
2026 self.ser.addPlaceholder(shapeList[1], dtypeList[1], b_unsat_arr)
2027 )
2028
2029 tens.extend(placeholders)
2030 elif op["op"] == Op.ARITHMETIC_RIGHT_SHIFT:
2031 # Force value of operand[1] to be within [0, num_bits]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002032 assert (
2033 pCount == 2 and cCount == 0
2034 ), "Op.ArithmeticRightShift must have 2 placeholders, 0 consts"
Kevin Chengaee1fac2020-11-11 13:54:06 -08002035
2036 placeholders = []
2037 for idx, shape in enumerate(shapeList[:]):
2038 if idx == 1:
Kevin Cheng989cb052021-04-28 16:29:44 -07002039 if dtypeList[idx] == DType.INT8:
Kevin Chengaee1fac2020-11-11 13:54:06 -08002040 arr = np.int32(self.rng.integers(low=0, high=8, size=shape))
Kevin Cheng989cb052021-04-28 16:29:44 -07002041 elif dtypeList[idx] == DType.INT16:
Kevin Chengaee1fac2020-11-11 13:54:06 -08002042 arr = np.int32(self.rng.integers(low=0, high=16, size=shape))
Kevin Cheng989cb052021-04-28 16:29:44 -07002043 elif dtypeList[idx] == DType.INT32:
Kevin Chengaee1fac2020-11-11 13:54:06 -08002044 arr = np.int32(self.rng.integers(low=0, high=32, size=shape))
2045 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002046 raise Exception("OpArithmeticRightShift: invalid input dtype")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002047 else:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002048 arr = self.getRandTensor(shape, dtypeList[idx])
Kevin Cheng989cb052021-04-28 16:29:44 -07002049 placeholders.append(self.ser.addPlaceholder(shape, dtypeList[idx], arr))
Kevin Chengaee1fac2020-11-11 13:54:06 -08002050
2051 tens.extend(placeholders)
Matthew Haddona44ac5e2021-07-27 16:31:16 +01002052 elif op["op"] == Op.SELECT:
2053 # Set datatype of condition tensor to boolean
2054 dtypeList[0] = DType.BOOL
2055 tens.extend(
2056 self.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
2057 )
2058 tens.extend(self.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
Matthew Haddon459443c2021-08-23 16:43:13 +01002059 elif op["op"] == Op.INTDIV:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002060 assert (
2061 pCount == 2 and cCount == 0
Matthew Haddon459443c2021-08-23 16:43:13 +01002062 ), "Op.INTDIV must have 2 placeholders, 0 consts"
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002063
2064 placeholders = []
2065
Matthew Haddon459443c2021-08-23 16:43:13 +01002066 # Two invalid cases for Op.INTDIV:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002067 # 1. divisor == 0
Kevin Cheng47315e12021-05-13 17:41:28 -07002068 # 2. dividend == -(1<<31) and divisor == -1
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002069 while True:
2070 dividend_arr = self.getRandTensor(shapeList[0], dtypeList[0])
2071 divisor_arr = self.getRandTensor(shapeList[1], dtypeList[1])
2072
2073 if (divisor_arr == 0).any():
2074 continue
2075
Kevin Cheng47315e12021-05-13 17:41:28 -07002076 if (dividend_arr == -(2 ** 31)).any() and (divisor_arr == -1).any():
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002077 continue
2078
2079 break
2080
2081 placeholders.append(
2082 self.ser.addPlaceholder(shapeList[0], dtypeList[0], dividend_arr)
2083 )
2084 placeholders.append(
2085 self.ser.addPlaceholder(shapeList[1], dtypeList[1], divisor_arr)
2086 )
2087
2088 tens.extend(placeholders)
2089 elif op["op"] == Op.MUL:
2090 assert (
2091 pCount == 2 and cCount == 0
2092 ), "Op.MUL must have 2 placeholders, 0 consts"
2093
2094 if dtypeList[0] == DType.FLOAT:
2095 tens.extend(self.buildPlaceholderTensors(shapeList[:], dtypeList[:]))
2096 else:
2097 placeholders = []
2098
2099 # Make sure multiply result in int32 range
2100 shift = testArgs[0]
2101 if dtypeList[0] == DType.INT8:
2102 num_bits = 8
2103 elif dtypeList[0] == DType.INT16:
2104 num_bits = 16
2105 elif dtypeList[0] == DType.INT32:
2106 num_bits = 32
2107 else:
2108 raise Exception("OpMul: invalid input dtype")
2109
2110 for idx, shape in enumerate(shapeList[:]):
2111 low = -(2 ** (num_bits - 1))
2112 high = (2 ** (num_bits - 1)) - 1
2113
2114 a_arr = np.int32(
2115 self.rng.integers(low=low, high=high, size=shapeList[0])
2116 )
2117 b_arr = np.int32(
2118 self.rng.integers(low=low, high=high, size=shapeList[1])
2119 )
2120
2121 i = 0
2122 while True:
2123
2124 a_arr_64 = a_arr.astype(np.int64)
2125 b_arr_64 = b_arr.astype(np.int64)
2126
2127 if shift > 0:
2128 rounding = 1 << (shift - 1)
2129 result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift
2130 else:
2131 result_arr = a_arr_64 * b_arr_64
2132
2133 if (result_arr > -(2 ** 31)).all() and (
2134 result_arr <= ((2 ** 31) - 1)
2135 ).all():
2136 break
2137
2138 i = i + 1
2139 a_arr = a_arr // 2
2140 b_arr = b_arr // 2
2141
2142 placeholders.append(
2143 self.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
2144 )
2145 placeholders.append(
2146 self.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
2147 )
2148
2149 tens.extend(placeholders)
Matthew Haddon818ab902021-07-27 09:12:49 +01002150 elif op["op"] == Op.CONCAT:
2151 count = len(shapeList) - self.args.num_const_inputs_concat
2152 if count < 1:
2153 count = 1
2154 if self.args.num_const_inputs_concat == 0:
2155 count = len(shapeList)
2156
2157 shapeList = TosaTensorGen.tgConcatConstInput(self, shapeList, testArgs[0])
2158 tens.extend(
2159 self.buildPlaceholderTensors(shapeList[0:count], dtypeList[0:count])
2160 )
2161 tens.extend(self.buildConstTensors(shapeList[count:], dtypeList[count:]))
Kevin Chengaee1fac2020-11-11 13:54:06 -08002162 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07002163 tens.extend(
2164 self.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
2165 )
2166 tens.extend(self.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
Eric Kunzee5e26762020-10-13 16:11:07 -07002167
2168 if qgen is not None:
Les Bell30e46802021-07-23 09:43:31 +01002169 qinfo = qgen(self, op, dtype_or_dtypeList)
Eric Kunzee5e26762020-10-13 16:11:07 -07002170 else:
2171 qinfo = None
2172
2173 try:
2174 if qinfo is not None:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002175 resultName = build_fcn(self, op["op"], *tens, *testArgs, qinfo)
Eric Kunzee5e26762020-10-13 16:11:07 -07002176 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002177 resultName = build_fcn(self, op["op"], *tens, *testArgs)
Eric Kunzee5e26762020-10-13 16:11:07 -07002178 except TypeError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002179 print(
2180 "build_fcn: {}\nTensors: {}\nArgs: {}\n".format(
2181 build_fcn, tens, testArgs
2182 )
2183 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002184 raise e
2185
2186 # Save the serialized test
Kevin Cheng550ccc52021-03-03 11:21:43 -08002187 self.serialize("test")
Eric Kunzee5e26762020-10-13 16:11:07 -07002188
2189 def createDynamicOpLists(self):
2190
2191 # Dynamically create op lists for convolutions with a list of kernel sizes
Kevin Cheng550ccc52021-03-03 11:21:43 -08002192 KERNELS = [[1, 1], [2, 2], [3, 3], [5, 5], [3, 1], [1, 3]]
Eric Kunzee5e26762020-10-13 16:11:07 -07002193
2194 for k in KERNELS:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002195 testName = "conv2d_{}x{}".format(k[0], k[1])
2196 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST["conv2d_TEMPLATE"].copy()
2197 self.TOSA_OP_LIST[testName]["filter"] = k
2198 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07002199
Kevin Cheng550ccc52021-03-03 11:21:43 -08002200 testName = "depthwise_conv2d_{}x{}".format(k[0], k[1])
2201 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
2202 "depthwise_conv2d_TEMPLATE"
2203 ].copy()
2204 self.TOSA_OP_LIST[testName]["filter"] = k
2205 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07002206
Kevin Cheng550ccc52021-03-03 11:21:43 -08002207 testName = "transpose_conv2d_{}x{}".format(k[0], k[1])
2208 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
2209 "transpose_conv2d_TEMPLATE"
2210 ].copy()
2211 self.TOSA_OP_LIST[testName]["filter"] = k
2212 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07002213
2214 # Delete any templates after having created any dynamic ops
2215 # This is a two-pass operation because it's bad practice to delete
2216 # keys from dictionaries while iterating
2217 keyList = []
2218 for k in self.TOSA_OP_LIST:
2219 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002220 if self.TOSA_OP_LIST[k]["template"] == True:
Eric Kunzee5e26762020-10-13 16:11:07 -07002221 keyList.append(k)
2222 continue
2223 except KeyError:
2224 pass
2225
2226 for k in keyList:
2227 del self.TOSA_OP_LIST[k]
2228
2229 def initOpListDefaults(self):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002230 """Fill in default fields for ops if they aren't already specified.
2231 Look for missing required fields (datastructure linting)."""
Eric Kunzee5e26762020-10-13 16:11:07 -07002232 for op in self.TOSA_OP_LIST:
2233
2234 # Required fields
2235 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002236 pl, c = self.TOSA_OP_LIST[op]["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002237 except (KeyError, ValueError, TypeError):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002238 raise Exception(
2239 "Op {} is missing a valid operand tuple in TOSA_OP_LIST".format(op)
2240 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002241
2242 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002243 fcn, tgen, arggen = self.TOSA_OP_LIST[op]["build_fcn"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002244 except (KeyError, ValueError, TypeError):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002245 raise Exception(
2246 "Op {} is missing a valid build_fcn tuple in TOSA_OP_LIST".format(
2247 op
2248 )
2249 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002250
2251 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002252 types = self.TOSA_OP_LIST[op]["types"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002253 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002254 raise Exception(
2255 "Op {} is missing a valid type list in TOSA_OP_LIST".format(op)
2256 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002257
2258 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002259 opcode = self.TOSA_OP_LIST[op]["op"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002260 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002261 raise Exception(
2262 "Op {} is missing the Op field in TOSA_OP_LIST".format(op)
2263 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002264
2265 # Put in default rank range, if missing
2266 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002267 rank = self.TOSA_OP_LIST[op]["rank"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002268 except KeyError:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002269 self.TOSA_OP_LIST[op]["rank"] = self.DEFAULT_RANK_RANGE
Eric Kunzee5e26762020-10-13 16:11:07 -07002270
2271 # Tensor operator list
2272 # 'op': op name
2273 # 'operands': tuple of (placeholder, const) operands
Kevin Cheng3a478572021-01-22 17:21:02 -08002274 # 'rank': optional, restricts rank to tuple inclusive of (min, max),
2275 # if not specified, defaults to (1, 4)
Eric Kunzee5e26762020-10-13 16:11:07 -07002276 # 'build_fcn': tuple of the function to (build_operator(), TensorGen function, ArgGen enum)
2277 # 'types': array of datatypes to be tested
Kevin Cheng550ccc52021-03-03 11:21:43 -08002278 TYPE_FP = [DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -07002279
Kevin Cheng550ccc52021-03-03 11:21:43 -08002280 TYPE_INT = [DType.INT8, DType.INT16, DType.INT32] # Excludes INT4
2281 TYPE_INT_FP = [DType.INT8, DType.INT16, DType.INT32, DType.FLOAT] # Excludes INT4
Eric Kunzee5e26762020-10-13 16:11:07 -07002282
Kevin Cheng550ccc52021-03-03 11:21:43 -08002283 TYPE_BOOL = [DType.BOOL]
2284 TYPE_FI32 = [DType.FLOAT, DType.INT32]
2285 TYPE_FIB = [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL]
2286 TYPE_FI16 = [DType.FLOAT, DType.INT16]
Eric Kunzee5e26762020-10-13 16:11:07 -07002287
Kevin Cheng550ccc52021-03-03 11:21:43 -08002288 TYPE_NARROW_INT_FP = [DType.INT8, DType.INT16, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -07002289
Kevin Cheng989cb052021-04-28 16:29:44 -07002290 TYPE_CONV2D = [
Kevin Chenga9017402021-07-28 17:19:23 -07002291 [DType.INT8, DType.INT4, DType.INT32],
Kevin Cheng989cb052021-04-28 16:29:44 -07002292 [DType.INT8, DType.INT8, DType.INT32],
2293 [DType.INT16, DType.INT8, DType.INT48],
2294 DType.FLOAT,
2295 ]
2296
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01002297 DEFAULT_RANK_RANGE = (1, TOSA_TENSOR_MAX_RANK)
Eric Kunzee5e26762020-10-13 16:11:07 -07002298
2299 TOSA_OP_LIST = {
Jared Smolens573ecd42021-03-04 15:24:10 -08002300 # Tensor operators
Kevin Cheng550ccc52021-03-03 11:21:43 -08002301 "argmax": {
2302 "op": Op.ARGMAX,
2303 "operands": (1, 0),
2304 "build_fcn": (build_argmax, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2305 "types": TYPE_NARROW_INT_FP,
2306 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002307 "avg_pool2d": {
2308 "op": Op.AVG_POOL2D,
2309 "operands": (1, 0),
2310 "rank": (4, 4),
2311 "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
2312 "qgen": TosaQuantGen.qgUnary,
2313 "types": TYPE_NARROW_INT_FP,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002314 "invalid_test_validators": (TosaInvalidValidator.ivHeightWidthSmallerZero,)
Jared Smolens573ecd42021-03-04 15:24:10 -08002315 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002316 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002317 "conv2d_TEMPLATE": {
2318 "op": Op.CONV2D,
2319 "operands": (1, 2),
2320 "rank": (4, 4),
2321 "build_fcn": (build_conv2d, TosaTensorGen.tgConv2D, TosaArgGen.agConv2D),
2322 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07002323 "types": TYPE_CONV2D,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002324 "invalid_test_validators": (TosaInvalidValidator.ivHeightWidthSmallerZero,),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002325 "template": True,
2326 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002327 # Conv3d TBD
Eric Kunzee5e26762020-10-13 16:11:07 -07002328 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002329 "depthwise_conv2d_TEMPLATE": {
2330 "op": Op.DEPTHWISE_CONV2D,
2331 "operands": (1, 2),
2332 "filter": [1, 1],
2333 "rank": (4, 4),
2334 "build_fcn": (
2335 build_depthwise_conv2d,
2336 TosaTensorGen.tgDepthwiseConv2D,
2337 TosaArgGen.agConv2D,
2338 ),
2339 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07002340 "types": TYPE_CONV2D,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002341 "invalid_test_validators": (TosaInvalidValidator.ivHeightWidthSmallerZero,),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002342 "template": True,
2343 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002344 "fully_connected": {
2345 "op": Op.FULLY_CONNECTED,
2346 "operands": (1, 2),
2347 "rank": (2, 2),
2348 "build_fcn": (build_fully_connected, TosaTensorGen.tgFullyConnected, None),
2349 "qgen": TosaQuantGen.qgConv,
2350 "types": TYPE_CONV2D,
2351 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002352 "matmul": {
2353 "op": Op.MATMUL,
2354 "operands": (2, 0),
Kevin Cheng2d60f002021-06-09 14:18:32 -07002355 "rank": (3, 3),
Jared Smolens573ecd42021-03-04 15:24:10 -08002356 "build_fcn": (build_matmul, TosaTensorGen.tgMatmul, None),
2357 "qgen": TosaQuantGen.qgMatmul,
2358 "types": TYPE_NARROW_INT_FP,
2359 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002360 "max_pool2d": {
2361 "op": Op.MAX_POOL2D,
2362 "operands": (1, 0),
2363 "rank": (4, 4),
2364 "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
2365 "types": TYPE_NARROW_INT_FP,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002366 "invalid_test_validators": (TosaInvalidValidator.ivHeightWidthSmallerZero,)
Jared Smolens573ecd42021-03-04 15:24:10 -08002367 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002368 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002369 "transpose_conv2d_TEMPLATE": {
2370 "op": Op.TRANSPOSE_CONV2D,
Kevin Cheng989cb052021-04-28 16:29:44 -07002371 "operands": (1, 2),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002372 "rank": (4, 4),
2373 "build_fcn": (
2374 build_transpose_conv2d,
2375 TosaTensorGen.tgTransposeConv2D,
2376 TosaArgGen.agTransposeConv2D,
2377 ),
2378 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07002379 "types": TYPE_CONV2D,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002380 "invalid_test_validators": (TosaInvalidValidator.ivNonPositiveOutputShape,),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002381 "template": True,
2382 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002383 # Activation functions
Kevin Cheng550ccc52021-03-03 11:21:43 -08002384 "clamp": {
2385 "op": Op.CLAMP,
2386 "operands": (1, 0),
2387 "build_fcn": (build_clamp, TosaTensorGen.tgBasic, None),
2388 "types": TYPE_NARROW_INT_FP,
2389 },
2390 "relun": {
2391 "op": Op.RELUN,
2392 "operands": (1, 0),
2393 "build_fcn": (build_relun, TosaTensorGen.tgBasic, None),
2394 "types": TYPE_FI32,
2395 },
2396 "sigmoid": {
2397 "op": Op.SIGMOID,
2398 "operands": (1, 0),
2399 "build_fcn": (build_sigmoid, TosaTensorGen.tgBasic, None),
2400 "types": TYPE_FP,
2401 },
2402 "tanh": {
2403 "op": Op.TANH,
2404 "operands": (1, 0),
2405 "build_fcn": (build_tanh, TosaTensorGen.tgBasic, None),
2406 "types": TYPE_FP,
2407 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002408 # Elementwise Binary Operators
2409 "add": {
2410 "op": Op.ADD,
2411 "operands": (2, 0),
2412 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2413 "types": TYPE_FI32,
2414 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002415 "arithmetic_right_shift": {
2416 "op": Op.ARITHMETIC_RIGHT_SHIFT,
2417 "operands": (2, 0),
2418 "build_fcn": (
2419 build_arithmetic_right_shift,
2420 TosaTensorGen.tgBroadcastFuzz,
2421 TosaArgGen.agArithmeticRightShift,
2422 ),
2423 "types": TYPE_INT,
2424 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002425 "bitwise_and": {
2426 "op": Op.BITWISE_AND,
2427 "operands": (2, 0),
2428 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2429 "types": TYPE_INT,
2430 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002431 "bitwise_or": {
2432 "op": Op.BITWISE_OR,
2433 "operands": (2, 0),
2434 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2435 "types": TYPE_INT,
2436 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002437 "bitwise_xor": {
2438 "op": Op.BITWISE_XOR,
2439 "operands": (2, 0),
2440 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2441 "types": TYPE_INT,
2442 },
Matthew Haddon459443c2021-08-23 16:43:13 +01002443 "intdiv": {
2444 "op": Op.INTDIV,
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002445 "operands": (2, 0),
2446 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2447 "types": [DType.INT32],
2448 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002449 "logical_and": {
2450 "op": Op.LOGICAL_AND,
2451 "operands": (2, 0),
2452 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2453 "types": TYPE_BOOL,
2454 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002455 "logical_left_shift": {
2456 "op": Op.LOGICAL_LEFT_SHIFT,
2457 "operands": (2, 0),
2458 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2459 "types": TYPE_INT,
2460 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002461 "logical_right_shift": {
2462 "op": Op.LOGICAL_RIGHT_SHIFT,
2463 "operands": (2, 0),
2464 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2465 "types": TYPE_INT,
2466 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002467 "logical_or": {
2468 "op": Op.LOGICAL_OR,
2469 "operands": (2, 0),
2470 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2471 "types": TYPE_BOOL,
2472 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002473 "logical_xor": {
2474 "op": Op.LOGICAL_XOR,
2475 "operands": (2, 0),
2476 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2477 "types": TYPE_BOOL,
2478 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002479 "maximum": {
2480 "op": Op.MAXIMUM,
2481 "operands": (2, 0),
2482 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2483 "types": TYPE_FI32,
2484 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002485 "minimum": {
2486 "op": Op.MINIMUM,
2487 "operands": (2, 0),
2488 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2489 "types": TYPE_FI32,
2490 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002491 "mul": {
2492 "op": Op.MUL,
2493 "operands": (2, 0),
2494 "build_fcn": (build_mul, TosaTensorGen.tgBroadcastFuzz, TosaArgGen.agMul),
2495 "types": TYPE_INT_FP,
2496 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002497 "pow": {
2498 "op": Op.POW,
2499 "operands": (2, 0),
2500 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBasic, None),
2501 "types": TYPE_FP,
2502 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002503 "sub": {
2504 "op": Op.SUB,
2505 "operands": (2, 0),
2506 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2507 "types": TYPE_FI32,
2508 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002509 "table": {
2510 "op": Op.TABLE,
2511 # Use the automatic generation functions to create the input array
2512 # but create the table tensor in the build function, as it may be
2513 # a different type from the input
2514 "operands": (1, 0),
2515 "build_fcn": (build_table, TosaTensorGen.tgBasic, None),
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01002516 "types": [DType.INT8, DType.INT16],
Jared Smolens573ecd42021-03-04 15:24:10 -08002517 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002518 # Elementwise Unary operators
2519 "abs": {
2520 "op": Op.ABS,
2521 "operands": (1, 0),
2522 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2523 "types": TYPE_FI32,
2524 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002525 "bitwise_not": {
2526 "op": Op.BITWISE_NOT,
2527 "operands": (1, 0),
2528 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2529 "types": TYPE_INT,
2530 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002531 "ceil": {
2532 "op": Op.CEIL,
2533 "operands": (1, 0),
2534 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2535 "types": TYPE_FP,
2536 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002537 "clz": {
2538 "op": Op.CLZ,
2539 "operands": (1, 0),
2540 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2541 "types": [DType.INT32],
2542 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002543 "exp": {
2544 "op": Op.EXP,
2545 "operands": (1, 0),
2546 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2547 "types": TYPE_FP,
2548 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002549 "floor": {
2550 "op": Op.FLOOR,
2551 "operands": (1, 0),
2552 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2553 "types": TYPE_FP,
2554 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002555 "log": {
2556 "op": Op.LOG,
2557 "operands": (1, 0),
2558 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2559 "types": TYPE_FP,
2560 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002561 "logical_not": {
2562 "op": Op.LOGICAL_NOT,
2563 "operands": (1, 0),
2564 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2565 "types": TYPE_BOOL,
2566 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002567 "negate": {
2568 "op": Op.NEGATE,
2569 "operands": (1, 0),
2570 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2571 "qgen": TosaQuantGen.qgUnary,
2572 "types": TYPE_INT_FP,
2573 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002574 "reciprocal": {
2575 "op": Op.RECIPROCAL,
2576 "operands": (1, 0),
2577 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2578 "types": TYPE_FP,
2579 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002580 "rsqrt": {
2581 "op": Op.RSQRT,
2582 "operands": (1, 0),
2583 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2584 "types": TYPE_FP,
2585 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002586 # Elementwise Ternary operators
2587 "select": {
2588 "op": Op.SELECT,
2589 "operands": (3, 0),
2590 "build_fcn": (build_select, TosaTensorGen.tgBroadcastFuzz, None),
2591 "types": TYPE_FIB,
2592 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002593 # Comparison operators
2594 "equal": {
2595 "op": Op.EQUAL,
2596 "operands": (2, 0),
2597 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2598 "types": TYPE_FI32,
2599 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002600 "greater_equal": {
2601 "op": Op.GREATER_EQUAL,
2602 "operands": (2, 0),
2603 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2604 "types": TYPE_FI32,
2605 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002606 "greater": {
2607 "op": Op.GREATER,
2608 "operands": (2, 0),
2609 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2610 "types": TYPE_FI32,
2611 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002612 # Reduction operators
2613 "reduce_all": {
2614 "op": Op.REDUCE_ALL,
2615 "operands": (1, 0),
2616 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2617 "types": TYPE_BOOL,
2618 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002619 "reduce_any": {
2620 "op": Op.REDUCE_ANY,
2621 "operands": (1, 0),
2622 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2623 "types": TYPE_BOOL,
2624 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002625 "reduce_max": {
2626 "op": Op.REDUCE_MAX,
2627 "operands": (1, 0),
2628 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2629 "types": TYPE_INT_FP,
2630 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002631 "reduce_min": {
2632 "op": Op.REDUCE_MAX,
2633 "operands": (1, 0),
2634 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2635 "types": TYPE_INT_FP,
2636 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002637 "reduce_product": {
2638 "op": Op.REDUCE_PRODUCT,
2639 "operands": (1, 0),
2640 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2641 "types": TYPE_FP,
2642 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002643 "reduce_sum": {
2644 "op": Op.REDUCE_SUM,
2645 "operands": (1, 0),
2646 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2647 "types": TYPE_FI32,
2648 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002649 # Data layout operators
Kevin Cheng550ccc52021-03-03 11:21:43 -08002650 "concat": {
2651 "op": Op.CONCAT,
2652 "operands": (2, 0),
Matthew Haddon818ab902021-07-27 09:12:49 +01002653 "build_fcn": (build_concat, TosaTensorGen.tgConcat, TosaArgGen.agAxis),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002654 "types": TYPE_FIB,
2655 },
2656 "pad": {
2657 "op": Op.PAD,
2658 "operands": (1, 0),
2659 "build_fcn": (build_pad, TosaTensorGen.tgBasic, TosaArgGen.agPad),
2660 "qgen": TosaQuantGen.qgPad,
2661 "types": TYPE_FIB,
2662 },
2663 "reshape": {
2664 "op": Op.RESHAPE,
2665 "operands": (1, 0),
2666 "build_fcn": (build_reshape, TosaTensorGen.tgBasic, TosaArgGen.agReshape),
2667 "types": TYPE_FIB,
2668 },
2669 "reverse": {
2670 "op": Op.REVERSE,
2671 "operands": (1, 0),
2672 "build_fcn": (build_reverse, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2673 "types": TYPE_FIB,
2674 },
2675 "slice": {
2676 "op": Op.SLICE,
2677 "operands": (1, 0),
2678 "build_fcn": (build_slice, TosaTensorGen.tgBasic, TosaArgGen.agSlice),
2679 "types": TYPE_FIB,
2680 },
2681 "tile": {
2682 "op": Op.TILE,
2683 "operands": (1, 0),
2684 "build_fcn": (build_tile, TosaTensorGen.tgBasic, TosaArgGen.agTile),
2685 "types": TYPE_FIB,
2686 },
2687 "transpose": {
2688 "op": Op.TRANSPOSE,
2689 "operands": (1, 0),
Jeremy Johnsona6185572021-06-21 15:55:35 +01002690 "rank": (1, 4),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002691 "build_fcn": (
2692 build_transpose,
2693 TosaTensorGen.tgBasic,
2694 TosaArgGen.agTranspose,
2695 ),
2696 "types": TYPE_FIB,
2697 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002698 # Data nodes
2699 "const": {
2700 "op": Op.CONST,
2701 "operands": (1, 0),
2702 "build_fcn": (build_placeholder, TosaTensorGen.tgBasic, None),
2703 "types": TYPE_FIB,
2704 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002705 "identity": {
2706 "op": Op.IDENTITY,
2707 "operands": (1, 0),
2708 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2709 "types": TYPE_FIB,
2710 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002711 # Scatter/Gather
Kevin Cheng550ccc52021-03-03 11:21:43 -08002712 "gather": {
2713 "op": Op.GATHER,
2714 # Only specify 'values' tensor here. 'indices' is generated in op building stage
2715 "operands": (1, 0),
2716 "rank": (3, 3),
2717 "build_fcn": (build_gather, TosaTensorGen.tgBasic, None),
2718 "types": TYPE_INT_FP,
2719 },
2720 "scatter": {
2721 "op": Op.SCATTER,
2722 # Only specify 'values_in' tensor here.
2723 #'indices' and 'input' are generated in op building stage
2724 "operands": (2, 0),
2725 "rank": (3, 3),
2726 "build_fcn": (build_scatter, TosaTensorGen.tgScatter, None),
2727 "types": TYPE_INT_FP,
2728 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002729 # Image operations
Kevin Cheng550ccc52021-03-03 11:21:43 -08002730 "resize": {
2731 "op": Op.RESIZE,
2732 "operands": (1, 0),
2733 "rank": (4, 4),
2734 "build_fcn": (build_resize, TosaTensorGen.tgNHWC, TosaArgGen.agResize),
2735 "types": [DType.INT8, DType.INT16, DType.FLOAT],
Matthew Haddonb724efc2021-08-25 16:40:29 +01002736 "invalid_test_validators": (TosaInvalidValidator.ivWrongDataTypeOrModeResize, TosaInvalidValidator.ivBadStride)
Kevin Cheng550ccc52021-03-03 11:21:43 -08002737 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002738 # Type conversion
Kevin Cheng550ccc52021-03-03 11:21:43 -08002739 "cast": {
2740 "op": Op.CAST,
2741 "operands": (1, 0),
2742 "build_fcn": (build_cast, TosaTensorGen.tgBasic, TosaArgGen.agCast),
2743 "types": [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL],
2744 },
2745 "rescale": {
2746 "op": Op.RESCALE,
2747 "operands": (1, 0),
2748 "build_fcn": (build_rescale, TosaTensorGen.tgBasic, TosaArgGen.agRescale),
Matthew Haddoncac4ee92021-07-22 14:30:53 +01002749 "types": [DType.UINT8, DType.INT8, DType.INT16, DType.INT32, DType.INT48],
Kevin Cheng550ccc52021-03-03 11:21:43 -08002750 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002751 # Custom
2752 # Not implemented.
Jared Smolens573ecd42021-03-04 15:24:10 -08002753 # Control flow operators
Eric Kunzee5e26762020-10-13 16:11:07 -07002754 # Two varients of cond_if, one that generates one of two constant tensors (no
2755 # inputs to the basic blocks, one output) and another that either adds or subtracts two tensors
2756 # (two inputs to the basic blocks, one output)
Kevin Cheng550ccc52021-03-03 11:21:43 -08002757 "cond_if_const": {
2758 "op": Op.COND_IF,
2759 "operands": (0, 2),
2760 "build_fcn": (
2761 build_cond_if_const,
2762 TosaTensorGen.tgBasic,
2763 TosaArgGen.agCondIf,
2764 ),
2765 "types": [DType.BOOL],
2766 },
2767 "cond_if_binary": {
2768 "op": Op.COND_IF,
2769 "operands": (2, 0),
2770 "build_fcn": (
2771 build_cond_if_binary,
2772 TosaTensorGen.tgBasic,
2773 TosaArgGen.agCondIf,
2774 ),
2775 "types": TYPE_FI32,
2776 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002777 # while_loop
Kevin Cheng550ccc52021-03-03 11:21:43 -08002778 "while_loop": {
2779 "op": Op.WHILE_LOOP,
2780 "operands": (0, 1),
2781 "build_fcn": (
2782 build_while_loop,
2783 TosaTensorGen.tgBasic,
2784 TosaArgGen.agWhileLoop,
2785 ),
2786 "types": [DType.INT32],
2787 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002788 }
2789
Kevin Cheng550ccc52021-03-03 11:21:43 -08002790
Eric Kunzee5e26762020-10-13 16:11:07 -07002791class OutputShaper:
2792 # Methods in this class compute the expected output shape and datatype
2793 # for common classes of operations
2794 def __init__(self):
2795 pass
2796
2797 # These methods return arguments that can be used for
2798 # creating a new output tensor
2799 @staticmethod
2800 def binaryBroadcastOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002801 assert len(a.shape) == len(b.shape)
2802 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002803
2804 shape = []
2805 for i in range(len(a.shape)):
2806 if a.shape[i] == 1:
2807 shape.append(b.shape[i])
2808 else:
2809 shape.append(a.shape[i])
2810
Kevin Cheng550ccc52021-03-03 11:21:43 -08002811 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002812
2813 @staticmethod
2814 def binaryNonBroadcastOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002815 assert len(a.shape) == len(b.shape)
2816 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002817
2818 shape = []
2819 for i in range(len(a.shape)):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002820 assert a.shape[i] == b.shape[i]
Eric Kunzee5e26762020-10-13 16:11:07 -07002821 shape.append(a.shape[i])
2822
Kevin Cheng550ccc52021-03-03 11:21:43 -08002823 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002824
2825 @staticmethod
2826 def unaryOp(ser, a):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002827 return ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002828
2829 @staticmethod
2830 def selectOp(ser, cond, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002831 assert len(a.shape) == len(b.shape) and len(a.shape) == len(cond.shape)
2832 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002833
2834 shape = []
2835 for i in range(len(a.shape)):
2836 shape.append(max(cond.shape[i], a.shape[i], b.shape[i]))
2837
Kevin Cheng550ccc52021-03-03 11:21:43 -08002838 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002839
2840 @staticmethod
2841 def binaryComparisonOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002842 assert len(a.shape) == len(b.shape)
2843 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002844
2845 # Do broadcast
2846 shape = []
2847 for i in range(len(a.shape)):
2848 if a.shape[i] == 1:
2849 shape.append(b.shape[i])
2850 else:
2851 shape.append(a.shape[i])
2852
2853 # Force the output type to bool
Kevin Cheng550ccc52021-03-03 11:21:43 -08002854 return ser.addOutput(shape, DType.BOOL)
Eric Kunzee5e26762020-10-13 16:11:07 -07002855
2856 @staticmethod
2857 def reduceOp(ser, a, axis):
2858
2859 shape = a.shape.copy()
2860
2861 shape[axis] = 1
2862
Kevin Cheng550ccc52021-03-03 11:21:43 -08002863 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002864
2865 @staticmethod
2866 def argmaxOp(ser, a, axis):
2867 shape = a.shape.copy()
2868 del shape[axis]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002869 return ser.addOutput(shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07002870
2871 @staticmethod
2872 def conv2dOp(ser, ifm, filter, strides, padding, dilations):
2873
2874 # IFM: NHWC
2875 # Filter: OHWI
2876 # OFM: NHWC
2877
2878 if len(padding) == 2:
2879 # Expand padding to 4 parameters in the case of transpose_conv2d
2880 # From H,W to T,B,L,R
2881 padding = [padding[0], padding[0], padding[1], padding[1]]
2882
Kevin Cheng550ccc52021-03-03 11:21:43 -08002883 h = (
2884 ifm.shape[1]
2885 - filter.shape[1]
2886 - (filter.shape[1] - 1) * (dilations[0] - 1)
2887 + padding[0]
2888 + padding[1]
2889 ) // strides[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002890
Kevin Cheng550ccc52021-03-03 11:21:43 -08002891 w = (
2892 ifm.shape[2]
2893 - filter.shape[2]
2894 - (filter.shape[2] - 1) * (dilations[1] - 1)
2895 + padding[2]
2896 + padding[3]
2897 ) // strides[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002898
Eric Kunzee5e26762020-10-13 16:11:07 -07002899 ofm_shape = [ifm.shape[0], h, w, filter.shape[0]]
2900
Kevin Cheng3a478572021-01-22 17:21:02 -08002901 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002902 out_dtype = DType.INT32
2903 elif ifm.dtype == DType.INT16:
2904 out_dtype = DType.INT48
2905 elif ifm.dtype == DType.FLOAT:
2906 out_dtype = DType.FLOAT
2907 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002908 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002909
Kevin Cheng550ccc52021-03-03 11:21:43 -08002910 return ser.addOutput(ofm_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002911
2912 @staticmethod
2913 def depthwiseConv2dOp(ser, ifm, filter, strides, padding, dilations):
2914 # IFM: NHWC
2915 # Filter: HWCM
2916 # OFM: NHW C*M
Kevin Cheng550ccc52021-03-03 11:21:43 -08002917 h = (
2918 ifm.shape[1]
2919 - filter.shape[0]
2920 - (filter.shape[0] - 1) * (dilations[0] - 1)
2921 + padding[0]
2922 + padding[1]
2923 ) // strides[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002924
Kevin Cheng550ccc52021-03-03 11:21:43 -08002925 w = (
2926 ifm.shape[2]
2927 - filter.shape[1]
2928 - (filter.shape[1] - 1) * (dilations[1] - 1)
2929 + padding[2]
2930 + padding[3]
2931 ) // strides[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002932
Eric Kunzee5e26762020-10-13 16:11:07 -07002933 ofm_shape = [ifm.shape[0], h, w, filter.shape[2] * filter.shape[3]]
2934
Kevin Cheng3a478572021-01-22 17:21:02 -08002935 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002936 out_dtype = DType.INT32
2937 elif ifm.dtype == DType.INT16:
2938 out_dtype = DType.INT48
2939 elif ifm.dtype == DType.FLOAT:
2940 out_dtype = DType.FLOAT
2941 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002942 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002943
Kevin Cheng550ccc52021-03-03 11:21:43 -08002944 return ser.addOutput(ofm_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002945
2946 @staticmethod
2947 def pool2dOp(ser, ifm, kernel, stride, pad):
2948 # input: NHWC
2949 h = (ifm.shape[1] + pad[0] + pad[1] + stride[0] - kernel[0]) // stride[0]
2950 w = (ifm.shape[2] + pad[2] + pad[3] + stride[1] - kernel[1]) // stride[1]
2951
Eric Kunzee5e26762020-10-13 16:11:07 -07002952 ofm_shape = [ifm.shape[0], h, w, ifm.shape[3]]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002953 return ser.addOutput(ofm_shape, ifm.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002954
2955 @staticmethod
2956 def fullyConnectedOp(ser, input, filter):
2957 # input: N, IC
2958 # filter: OC, IC
2959 # output: N, OC
2960
2961 output_shape = [input.shape[0], filter.shape[0]]
2962
Kevin Cheng3a478572021-01-22 17:21:02 -08002963 if input.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002964 out_dtype = DType.INT32
2965 elif input.dtype == DType.INT16:
2966 out_dtype = DType.INT48
2967 elif input.dtype == DType.FLOAT:
2968 out_dtype = DType.FLOAT
2969 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002970 raise Exception("Unsupported input dtype: {}".format(input.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002971
Kevin Cheng550ccc52021-03-03 11:21:43 -08002972 return ser.addOutput(output_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002973
2974 @staticmethod
2975 def matmulOp(ser, a, b):
Kevin Cheng2d60f002021-06-09 14:18:32 -07002976 # a: N, H, C
2977 # b: N, C, W
2978 # out: N, H, W
Eric Kunzee5e26762020-10-13 16:11:07 -07002979
Kevin Cheng2d60f002021-06-09 14:18:32 -07002980 output_shape = [a.shape[0], a.shape[1], b.shape[2]]
Eric Kunzee5e26762020-10-13 16:11:07 -07002981
Kevin Cheng3a478572021-01-22 17:21:02 -08002982 if a.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002983 out_dtype = DType.INT32
2984 elif a.dtype == DType.INT16:
2985 out_dtype = DType.INT48
2986 elif a.dtype == DType.FLOAT:
2987 out_dtype = DType.FLOAT
2988 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002989 raise Exception("UNsupported input dtype for matmul: {}".format(a.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002990
Kevin Cheng550ccc52021-03-03 11:21:43 -08002991 return ser.addOutput(output_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002992
2993 @staticmethod
Matthew Haddon818ab902021-07-27 09:12:49 +01002994 def concatOp(ser, axis, *a):
2995 input1 = a[0]
2996 remaining_inputs = a[1:]
Eric Kunzee5e26762020-10-13 16:11:07 -07002997
Matthew Haddon818ab902021-07-27 09:12:49 +01002998 output_shape = input1.shape.copy()
Eric Kunzee5e26762020-10-13 16:11:07 -07002999
Matthew Haddon818ab902021-07-27 09:12:49 +01003000 output_shape[axis] = input1.shape[axis]
3001
3002 for tensor in remaining_inputs:
3003 output_shape[axis] += tensor.shape[axis]
3004
3005 return ser.addOutput(output_shape, input1.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003006
3007 @staticmethod
3008 def padOp(ser, a, padding):
3009
3010 output_shape = a.shape.copy()
3011
3012 for i in range(len(output_shape)):
3013 output_shape[i] = padding[i][0] + padding[i][1] + output_shape[i]
3014
Kevin Cheng550ccc52021-03-03 11:21:43 -08003015 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003016
3017 @staticmethod
3018 def reshapeOp(ser, a, shape):
3019 output_shape = shape.copy()
3020
3021 totalElements = 1
3022 for i in a.shape:
3023 totalElements *= i
3024
3025 # If there are any -1 elements, figure out what that dimension must be
3026 totalOutputElements = 1
3027 for i in output_shape:
3028 if i != -1:
3029 totalOutputElements *= i
3030
3031 # And fill it in
3032 for i in range(len(output_shape)):
3033 if output_shape[i] == -1:
3034 output_shape[i] = totalElements // totalOutputElements
3035
Kevin Cheng550ccc52021-03-03 11:21:43 -08003036 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003037
3038 @staticmethod
3039 def sliceOp(ser, a, begin, size):
3040
3041 output_shape = size.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08003042 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003043
3044 @staticmethod
3045 def tileOp(ser, a, multiples):
3046
3047 output_shape = a.shape.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08003048 assert len(multiples) == len(output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07003049
3050 for i in range(len(output_shape)):
3051 output_shape[i] = a.shape[i] * multiples[i]
3052
Kevin Cheng550ccc52021-03-03 11:21:43 -08003053 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003054
3055 @staticmethod
3056 def transposeOp(ser, a, perms):
3057 output_shape = a.shape.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08003058 assert len(perms) == len(output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07003059
3060 for i in range(len(output_shape)):
3061 output_shape[i] = a.shape[perms[i]]
3062
Kevin Cheng550ccc52021-03-03 11:21:43 -08003063 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003064
3065 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -08003066 def gatherOp(ser, values, indices):
3067 assert len(values.shape) == 3
3068 assert len(indices.shape) == 2
3069 assert values.shape[0] == indices.shape[0]
Eric Kunzee5e26762020-10-13 16:11:07 -07003070
Kevin Cheng77d0f762020-11-24 10:26:32 -08003071 output_shape = [values.shape[0], indices.shape[1], values.shape[2]]
3072
Kevin Cheng550ccc52021-03-03 11:21:43 -08003073 return ser.addOutput(output_shape, values.dtype)
Kevin Cheng77d0f762020-11-24 10:26:32 -08003074
3075 @staticmethod
3076 def scatterOp(ser, values_in, indices, input):
3077 assert len(values_in.shape) == 3
3078 assert len(indices.shape) == 2
3079 assert len(input.shape) == 3
Kevin Cheng550ccc52021-03-03 11:21:43 -08003080 assert values_in.shape[0] == indices.shape[0] # N
3081 assert input.shape[1] == indices.shape[1] # W
3082 assert values_in.shape[2] == input.shape[2] # C
Kevin Cheng77d0f762020-11-24 10:26:32 -08003083
3084 output_shape = values_in.shape
3085
Kevin Cheng550ccc52021-03-03 11:21:43 -08003086 return ser.addOutput(output_shape, values_in.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003087
3088 @staticmethod
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01003089 def tableOp(ser, input, table_dtype):
3090 # Same shape as the input, but dtype dependent on table dtype
3091 assert table_dtype == DType.INT16 or table_dtype == DType.INT8
3092 output_dtype = DType.INT32 if table_dtype == DType.INT16 else DType.INT8
3093 return ser.addOutput(input.shape, output_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003094
3095 @staticmethod
Kevin Cheng550ccc52021-03-03 11:21:43 -08003096 def resizeOp(
3097 ser,
3098 input,
3099 mode,
3100 stride,
3101 offset,
3102 shift,
3103 stride_fp,
3104 offset_fp,
3105 output_dims,
3106 input_dtype,
3107 output_dtype,
3108 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07003109
3110 output_dims = [input.shape[0], output_dims[0], output_dims[1], input.shape[3]]
3111
Kevin Cheng550ccc52021-03-03 11:21:43 -08003112 return ser.addOutput(output_dims, output_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003113
3114 @staticmethod
3115 def typeConversionOp(ser, val, out_dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -08003116 return ser.addOutput(val.shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003117
3118 @staticmethod
3119 def transposeConv2DOp(ser, ifm, output_shape):
Kevin Cheng3a478572021-01-22 17:21:02 -08003120 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07003121 out_dtype = DType.INT32
3122 elif ifm.dtype == DType.INT16:
3123 out_dtype = DType.INT48
3124 elif ifm.dtype == DType.FLOAT:
3125 out_dtype = DType.FLOAT
3126 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08003127 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07003128
Kevin Cheng550ccc52021-03-03 11:21:43 -08003129 return ser.addOutput(output_shape, out_dtype)