blob: efc819c455fd2569de4e144c7143bf35fb232069 [file] [log] [blame]
Eric Kunzee5e26762020-10-13 16:11:07 -07001#!/usr/bin/env python3
2
Kevin Cheng3a478572021-01-22 17:21:02 -08003# Copyright (c) 2020-2021, ARM Limited.
Eric Kunzee5e26762020-10-13 16:11:07 -07004#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17
18import numpy as np
19import argparse
20import sys
21import re
22import os
23import subprocess
24import shlex
25import json
26import glob
27import math
28import queue
29import threading
30import traceback
31import math
Jeremy Johnsona6185572021-06-21 15:55:35 +010032import itertools
Eric Kunzee5e26762020-10-13 16:11:07 -070033
34from enum import IntEnum, Enum, unique
Kevin Chengacb550f2021-06-29 15:32:19 -070035from tosa_ref_run import TosaReturnCode
Eric Kunzee5e26762020-10-13 16:11:07 -070036
Kevin Cheng550ccc52021-03-03 11:21:43 -080037# Include the ../thirdparty/serialization_lib/python directory in PYTHONPATH
38parent_dir = os.path.dirname(os.path.realpath(__file__))
39sys.path.append(
40 os.path.join(parent_dir, "..", "thirdparty", "serialization_lib", "python")
41)
Eric Kunzee5e26762020-10-13 16:11:07 -070042import tosa_serializer as ts
43from tosa_serializer import *
44import tosa
45
46# Convenience variables to the flatc-generated types that should be enums, but aren't
47DType = tosa.DType.DType()
Kevin Cheng550ccc52021-03-03 11:21:43 -080048Op = tosa.Op.Op()
Eric Kunzee5e26762020-10-13 16:11:07 -070049ResizeMode = tosa.ResizeMode.ResizeMode()
50
Kevin Cheng550ccc52021-03-03 11:21:43 -080051
Eric Kunzee5e26762020-10-13 16:11:07 -070052class TosaQuantGen:
Kevin Cheng550ccc52021-03-03 11:21:43 -080053 """QuantizedInfo random generator helper functions. Specify with 'qgen': in the operator defintion"""
54
Eric Kunzee5e26762020-10-13 16:11:07 -070055 def __init__(self):
56 pass
57
58 @staticmethod
Les Bell30e46802021-07-23 09:43:31 +010059 def getQinfo(testGen, dtype):
60 if dtype == DType.INT8:
61 return testGen.randInt(-128, 128)
62 if dtype == DType.UINT8:
63 return testGen.randInt(0, 256)
64 return 0
Eric Kunzee5e26762020-10-13 16:11:07 -070065
66 @staticmethod
67 def qgUnary(testGen, op, dtype):
68 qinfo = ts.TosaSerializerQuantInfo()
Kevin Chengacb550f2021-06-29 15:32:19 -070069 qinfo.UnaryQuantInfo(
70 TosaQuantGen.getQinfo(testGen, dtype), TosaQuantGen.getQinfo(testGen, dtype)
71 )
Eric Kunzee5e26762020-10-13 16:11:07 -070072 return qinfo
73
74 @staticmethod
Les Bell30e46802021-07-23 09:43:31 +010075 def qgConv(testGen, op, dtype_or_dtypeList):
Eric Kunzee5e26762020-10-13 16:11:07 -070076 qinfo = ts.TosaSerializerQuantInfo()
Les Bell30e46802021-07-23 09:43:31 +010077 if isinstance(dtype_or_dtypeList, list):
78 # a list of [input, weights, accumulator] dtypes
79 dtypeList = dtype_or_dtypeList
Eric Kunzee5e26762020-10-13 16:11:07 -070080 else:
Les Bell30e46802021-07-23 09:43:31 +010081 # an int, [input, weights, accumulator] dtypes are the same
82 dtypeList = [dtype_or_dtypeList] * 3
83 input_zp = TosaQuantGen.getQinfo(testGen, dtypeList[0])
84 weights_zp = TosaQuantGen.getQinfo(testGen, dtypeList[1])
85 qinfo.ConvQuantInfo(input_zp, weights_zp)
Eric Kunzee5e26762020-10-13 16:11:07 -070086 return qinfo
87
88 @staticmethod
89 def qgMatmul(testGen, op, dtype):
90 qinfo = ts.TosaSerializerQuantInfo()
Kevin Chengacb550f2021-06-29 15:32:19 -070091 qinfo.MatMulQuantInfo(
92 TosaQuantGen.getQinfo(testGen, dtype), TosaQuantGen.getQinfo(testGen, dtype)
93 )
Eric Kunzee5e26762020-10-13 16:11:07 -070094 return qinfo
95
96 @staticmethod
97 def qgPad(testGen, op, dtype):
98 qinfo = ts.TosaSerializerQuantInfo()
Les Bell30e46802021-07-23 09:43:31 +010099 qinfo.PadQuantInfo(TosaQuantGen.getQinfo(testGen, dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700100 return qinfo
101
102 @staticmethod
103 def computeMultiplierAndShift(scaleFp, scale32):
104 # Derived from computeMultiplierAndShiftTosaScale32
105 # Provide a floating-point scaling factor and the scale32 parameter
106 # to compute the multiplier and shift
107
108 if scale32:
109 scaleBits = 31
110 else:
111 scaleBits = 15
112
113 m, shift = math.frexp(scaleFp)
114
115 if scaleFp < 0.0:
116 m = -m
117
118 multiplier = round(m * (1 << scaleBits))
Kevin Cheng550ccc52021-03-03 11:21:43 -0800119 assert multiplier <= (1 << scaleBits)
Eric Kunzee5e26762020-10-13 16:11:07 -0700120
121 if multiplier == (1 << scaleBits):
122 multiplier = multiplier // 2
123 shift = shift + 1
124
125 shift = (-shift) + scaleBits
Kevin Cheng550ccc52021-03-03 11:21:43 -0800126 # print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(scaleFp, scaleBits, m, multiplier, shift))
Eric Kunzee5e26762020-10-13 16:11:07 -0700127
Kevin Cheng550ccc52021-03-03 11:21:43 -0800128 assert multiplier <= (1 << scaleBits)
129 assert shift >= 0 and shift <= 63
Eric Kunzee5e26762020-10-13 16:11:07 -0700130
131 return multiplier, shift
132
133
Kevin Cheng550ccc52021-03-03 11:21:43 -0800134class TosaTensorGen:
135 """Tensor generators create a shape list for the placeholder and const tensor
136 data operands for the operator. The actual random data is generated separately for each test."""
137
Eric Kunzee5e26762020-10-13 16:11:07 -0700138 def __init__(self):
139 pass
140
141 @staticmethod
142 def tgBasic(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800143 pl, const = opName["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700144 shape = testGen.makeShape(rank)
145
146 shape_list = []
147 for i in range(pl + const):
148 shape_list.append(shape.copy())
149
150 return shape_list
151
152 @staticmethod
153 def tgNHWC(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800154 pl, const = opName["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700155
Kevin Cheng550ccc52021-03-03 11:21:43 -0800156 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700157
158 shape = testGen.makeShape(rank)
159
160 # Constrict the batch size?
161 if testGen.args.max_batch_size:
162 shape[0] = (shape[0] % testGen.args.max_batch_size) + 1
163
164 shape_list = []
165 for i in range(pl + const):
166 shape_list.append(shape.copy())
167
168 return shape_list
169
170 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -0800171 def tgScatter(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800172 pl, const = opName["operands"]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800173
Kevin Cheng550ccc52021-03-03 11:21:43 -0800174 assert pl == 2
175 assert const == 0
176 assert rank == 3
Kevin Cheng77d0f762020-11-24 10:26:32 -0800177
178 values_in_shape = testGen.makeShape(rank)
179
180 # Constrict the batch size?
181 if testGen.args.max_batch_size:
182 values_in_shape[0] = (values_in_shape[0] % testGen.args.max_batch_size) + 1
183
Kevin Cheng550ccc52021-03-03 11:21:43 -0800184 W = testGen.randInt(
185 testGen.args.tensor_shape_range[0], testGen.args.tensor_shape_range[1]
186 )
Kevin Cheng77d0f762020-11-24 10:26:32 -0800187 input_shape = [values_in_shape[0], W, values_in_shape[2]]
188
189 shape_list = []
190 shape_list.append(values_in_shape.copy())
191 shape_list.append(input_shape.copy())
192
193 return shape_list
194
195 @staticmethod
Eric Kunzee5e26762020-10-13 16:11:07 -0700196 def tgBroadcastFuzz(testGen, op, rank):
197 shape = testGen.makeShape(rank)
198
Kevin Cheng550ccc52021-03-03 11:21:43 -0800199 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700200
201 shape_list = []
202
203 # Choose one of the inputs to broadcast
204 bcast_idx = testGen.randInt(0, pl + const)
205 for i in range(pl + const):
206 shape_bcast = shape.copy()
207
208 # If the chosen input, pick a random index to broadcast
209 if i == bcast_idx:
210 fuzz_idx = testGen.randInt(0, rank)
211 shape_bcast[fuzz_idx] = 1
212
213 shape_list.append(shape_bcast)
214
215 return shape_list
216
217 @staticmethod
218 def tgConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800219 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700220
Kevin Cheng550ccc52021-03-03 11:21:43 -0800221 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700222
223 # IFM dimensions are NHWC
224 ifm_shape = testGen.makeShape(rank)
225
226 # Constrict the batch size?
227 if testGen.args.max_batch_size:
228 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
229
230 # Get the filter height/width from the operator parameters
Kevin Cheng550ccc52021-03-03 11:21:43 -0800231 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700232
233 # Generate a random OFM depth
234 ofm_depth = testGen.makeShape(1)[0]
235
236 # The filter dimensions are OHWI
237 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
238
239 # The bias is OC
240 bias_shape = np.asarray([ofm_depth])
241
242 return [ifm_shape, filter_shape, bias_shape]
243
244 @staticmethod
245 def tgTransposeConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800246 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700247
Kevin Cheng550ccc52021-03-03 11:21:43 -0800248 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700249
250 # IFM dimensions are NHWC
251 ifm_shape = testGen.makeShape(rank)
252
253 # Constrict the batch size?
254 if testGen.args.max_batch_size:
255 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
256
257 # Get the filter height/width from the operator parameters
Kevin Cheng550ccc52021-03-03 11:21:43 -0800258 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700259
260 # Generate a random OFM depth
261 ofm_depth = testGen.makeShape(1)[0]
262
263 # The filter dimensions are OHWI
264 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
265
Kevin Cheng989cb052021-04-28 16:29:44 -0700266 # The bias is OC
267 bias_shape = np.asarray([ofm_depth])
268
269 return [ifm_shape, filter_shape, bias_shape]
Eric Kunzee5e26762020-10-13 16:11:07 -0700270
271 @staticmethod
272 def tgDepthwiseConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800273 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700274
Kevin Cheng550ccc52021-03-03 11:21:43 -0800275 assert rank == 4
276 assert pl == 1 and const == 2
Eric Kunzee5e26762020-10-13 16:11:07 -0700277
278 # IFM dimensions are NHWC
279 ifm_shape = testGen.makeShape(rank)
280
281 # Constrict the batch size?
282 if testGen.args.max_batch_size:
283 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
284
285 # Get the filter height/width from the operator parameters
286 # Filter is KH, HW, C, M
Kevin Cheng550ccc52021-03-03 11:21:43 -0800287 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700288
289 # Generate a random OFM depth, but don't let it get too big because
290 # the output depth is M * C
Kevin Cheng550ccc52021-03-03 11:21:43 -0800291 filter_m = (
292 testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)
293 ) + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700294
295 # The filter dimensions are HWCM
296 filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m])
297
298 # The bias is M * C
299 bias_shape = np.asarray([ifm_shape[3] * filter_m])
300
301 return [ifm_shape, filter_shape, bias_shape]
302
303 @staticmethod
304 def tgFullyConnected(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800305 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700306
Kevin Cheng550ccc52021-03-03 11:21:43 -0800307 assert rank == 2
Eric Kunzee5e26762020-10-13 16:11:07 -0700308
309 input_shape = testGen.makeShape(rank)
Kevin Chengacb550f2021-06-29 15:32:19 -0700310 filter_oc = testGen.rng.integers(
311 low=testGen.args.tensor_shape_range[0],
312 high=testGen.args.tensor_shape_range[1],
313 size=1,
314 )[0]
Eric Kunzee5e26762020-10-13 16:11:07 -0700315 filter_shape = np.asarray([filter_oc, input_shape[1]])
316
317 bias_shape = np.asarray([filter_oc])
318
319 return [input_shape, filter_shape, bias_shape]
320
321 @staticmethod
322 def tgMatmul(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800323 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700324
Kevin Cheng2d60f002021-06-09 14:18:32 -0700325 assert rank == 3
Kevin Cheng550ccc52021-03-03 11:21:43 -0800326 assert pl == 2 and const == 0
Eric Kunzee5e26762020-10-13 16:11:07 -0700327
328 a_shape = testGen.makeShape(rank)
Matthew Haddon68e7aee2021-08-16 11:20:25 +0100329 # Get a random number for b_oc even if target shape is defined
330 b_oc = np.int32(
331 testGen.rng.integers(
332 low=testGen.args.tensor_shape_range[0],
333 high=testGen.args.tensor_shape_range[1],
334 size=1,
335 )
336 )[0]
337 # If N or H is large let b_oc be 1 to reduce output tensor size
338 if max(a_shape) > 1000:
339 b_oc = 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700340
Matthew Haddon68e7aee2021-08-16 11:20:25 +0100341 b_shape = np.asarray([a_shape[0], a_shape[2], b_oc])
Eric Kunzee5e26762020-10-13 16:11:07 -0700342 return [a_shape, b_shape]
343
Matthew Haddon818ab902021-07-27 09:12:49 +0100344 @staticmethod
345 def tgConcat(testGen, opName, rank):
346 pl, const = opName["operands"]
347 shape = testGen.makeShape(rank)
348
349 # Create extra tensors to concat.
350 # Take into account value of pl when getting maximum number of concats
351 num_tensors = testGen.randInt(0, 4)
352 shape_list = []
353 for i in range(pl + const + num_tensors):
354 shape_list.append(shape.copy())
355
356 return shape_list
357
358 @staticmethod
359 def tgConcatConstInput(testGen, shapeList, axis):
360 # Split concat shape along axis to allow for multiple const inputs
361 # without making too many large tensors
362 shape = shapeList[0]
363 if len(shapeList) == 2 or shape[axis] < len(shapeList):
364 return shapeList
365
366 new_shapeList = [shape.copy()]
367 length_on_axis = shape[axis]
368 remaining_length = length_on_axis
369 for i in range(len(shapeList)-2):
370 # Calculate split on axis and remaining value
371 split_shape_val = int(shape[axis] / 2)
372 remaining_length = remaining_length - split_shape_val
373
374 # Append new shape, and set remaining shape
375 shape[axis] = split_shape_val
376 new_shapeList.append(shape.copy())
377 shape[axis] = remaining_length
378 if i == len(shapeList) - 3:
379 new_shapeList.append(shape.copy())
380
381 return new_shapeList
382
383
Kevin Cheng550ccc52021-03-03 11:21:43 -0800384
Eric Kunzee5e26762020-10-13 16:11:07 -0700385class TosaArgGen:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800386 """Argument generators create exhaustive or random lists of attributes for operators that take
387 attributes or other parameters. The return value is a list of (descriptive_name, [arglist])
388 tuples where the descriptive_name is appended to the test name and the arglist is expanded
389 as arguments to the operator build function."""
390
Eric Kunzee5e26762020-10-13 16:11:07 -0700391 def __init__(self):
392 pass
393
394 @staticmethod
395 def agNone(testGen, opName, shapeList, dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800396 """A trivial argument generator for operators that don't take any
397 non-tensor arguments"""
398 return [("", [])]
Eric Kunzee5e26762020-10-13 16:11:07 -0700399
400 @staticmethod
401 def agAxis(testGen, opName, shapeList, dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800402 """Build the axis argument for operators that take a single axis"""
Eric Kunzee5e26762020-10-13 16:11:07 -0700403 axes = []
404
405 shape = shapeList[0]
406
407 for a in range(0, len(shape)):
Matthew Haddon43e37192021-07-09 14:13:02 +0100408 axes.append(("axis{}".format(a), [a]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700409 return axes
410
411 @staticmethod
412 def agConv2D(testGen, opName, shapeList, dtype):
413 arg_list = []
414
415 ifm_shape = shapeList[0]
416 filter_shape = shapeList[1]
417
418 # Must be rank 4
Kevin Cheng550ccc52021-03-03 11:21:43 -0800419 assert len(ifm_shape) == 4
420 assert len(filter_shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700421
422 maxStride = testGen.args.max_conv_stride
423 maxPadding = testGen.args.max_conv_padding + 1
424 maxDilation = testGen.args.max_conv_dilation
425
426 # Strides, padding, dilations
427 for stride in range(0, maxStride ** 2):
428 for padding in range(0, (maxPadding) ** 4):
429 for dilation in range(0, maxDilation ** 2):
430
Kevin Cheng550ccc52021-03-03 11:21:43 -0800431 s = [stride // maxStride + 1, stride % maxStride + 1]
432 p = [
433 (padding // (maxPadding * 4)) % maxPadding,
434 (padding // (maxPadding * 2)) % maxPadding,
435 (padding // (maxPadding * 1)) % maxPadding,
436 padding % maxPadding,
437 ]
438 d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
Eric Kunzee5e26762020-10-13 16:11:07 -0700439
440 # 4 padding parameters for regular conv2d
Kevin Cheng550ccc52021-03-03 11:21:43 -0800441 arg_list.append(
442 (
443 "st{}{}_pad{}{}{}{}_dilat{}{}".format(
444 s[0], s[1], p[0], p[1], p[2], p[3], d[0], d[1]
445 ),
446 [s, p, d],
447 )
448 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700449 return arg_list
450
451 @staticmethod
452 def agTransposeConv2D(testGen, opName, shapeList, dtype):
453 arg_list = []
454
455 ifm_shape = shapeList[0]
456 filter_shape = shapeList[1]
457
458 # Must be rank 4
Kevin Cheng550ccc52021-03-03 11:21:43 -0800459 assert len(ifm_shape) == 4
460 assert len(filter_shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700461
462 maxStride = testGen.args.max_conv_stride
463 maxPadding = testGen.args.max_conv_padding + 1
464 maxDilation = testGen.args.max_conv_dilation
465
466 # Strides, padding, dilations
467 for stride in range(0, maxStride ** 2):
468 for out_padding in range(0, (maxPadding) ** 2):
469 for dilation in range(0, maxDilation ** 2):
470
Kevin Cheng550ccc52021-03-03 11:21:43 -0800471 s = [stride // maxStride + 1, stride % maxStride + 1]
472 p = [
473 (out_padding // (maxPadding * 1)) % maxPadding,
474 out_padding % maxPadding,
475 ]
476 d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
Eric Kunzee5e26762020-10-13 16:11:07 -0700477
Kevin Cheng550ccc52021-03-03 11:21:43 -0800478 oh = (
479 ifm_shape[1]
480 - filter_shape[1]
481 - (filter_shape[1] - 1) * (d[0] - 1)
482 + 2 * p[0]
483 ) // s[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700484
Kevin Cheng550ccc52021-03-03 11:21:43 -0800485 ow = (
486 ifm_shape[2]
487 - filter_shape[2]
488 - (filter_shape[2] - 1) * (d[1] - 1)
489 + 2 * p[1]
490 ) // s[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700491
492 # Output shape
Kevin Cheng550ccc52021-03-03 11:21:43 -0800493 os = [ifm_shape[0], oh, ow, filter_shape[0]]
Eric Kunzee5e26762020-10-13 16:11:07 -0700494
Kevin Cheng550ccc52021-03-03 11:21:43 -0800495 arg_list.append(
496 (
497 "st{}{}_outpad{}{}_dilat{}{}_os{}x{}x{}x{}".format(
498 s[0],
499 s[1],
500 p[0],
501 p[1],
502 d[0],
503 d[1],
504 os[0],
505 os[1],
506 os[2],
507 os[3],
508 ),
509 [s, p, d, os],
510 )
511 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700512
513 return arg_list
514
515 @staticmethod
516 def agPad(testGen, opName, shapeList, dtype):
517 arg_list = []
518 rank = len(shapeList[0])
519
Les Bell7ffccce2021-07-28 15:37:02 +0100520 # Exhaustively test combinations of padding on each side of each dimension
521 # - the range of padding values is defined by pad_min and pad_max
522 # - for padding >9, the name format needs to be more distinctive
523 pad_min, pad_max = 0, 1
524 pad_values = [x for x in range(pad_min, pad_max + 1)]
525 axis_pad_values = [x for x in itertools.product(pad_values, pad_values)]
526 shape_pad_values = itertools.product(*([axis_pad_values] * rank))
Eric Kunzee5e26762020-10-13 16:11:07 -0700527
Les Bell7ffccce2021-07-28 15:37:02 +0100528 for paddings in shape_pad_values:
529 name = "pad"
530 for r in range(rank):
531 before, after = paddings[r]
532 name = f"{name}{before}{after}"
533 arg_list.append((name, [np.array(paddings)]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700534
535 return arg_list
536
537 @staticmethod
538 def agPooling(testGen, opName, shapeList, dtype):
539 arg_list = []
540
541 shape = shapeList[0]
Kevin Cheng550ccc52021-03-03 11:21:43 -0800542 assert len(shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700543
544 maxStride = testGen.args.max_pooling_stride
545 maxKernel = testGen.args.max_pooling_kernel
546 maxPadding = testGen.args.max_pooling_padding + 1
547
548 for kernel in range(0, maxKernel ** 2):
549 for stride in range(0, maxStride ** 2):
550 for padding in range(0, maxPadding ** 4):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800551 s = [stride // maxStride + 1, stride % maxStride + 1]
552 k = [(kernel // maxKernel) + 2, (kernel % maxKernel) + 2]
553 p = [
554 (padding // (maxPadding * 4)) % maxPadding,
555 (padding // (maxPadding * 2)) % maxPadding,
556 (padding // (maxPadding * 1)) % maxPadding,
557 padding % maxPadding,
558 ]
Eric Kunzee5e26762020-10-13 16:11:07 -0700559
Kevin Cheng550ccc52021-03-03 11:21:43 -0800560 arg_list.append(
561 (
562 "st{}{}_kern{}{}_pad{}{}{}{}".format(
563 s[0], s[1], k[0], k[1], p[0], p[1], p[2], p[3]
564 ),
565 [k, s, p],
566 )
567 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700568 return arg_list
569
570 @staticmethod
571 def agCast(testGen, opName, shapeList, inDtype):
572 arg_list = []
573
574 # Enumerate the output types here
575 if inDtype == DType.INT8:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800576 dtypeList = [DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700577 elif inDtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800578 dtypeList = [DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700579 elif inDtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800580 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700581 elif inDtype == DType.BOOL:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800582 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700583 elif inDtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800584 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700585 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800586 raise Exception("Unexpected input dtype: {}".format(inDtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700587
588 for dtype in dtypeList:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800589 arg_list.append(("out{}".format(DTypeNames[dtype]), [dtype]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700590
591 return arg_list
592
593 @staticmethod
594 def agRescale(testGen, opName, shapeList, inDtype):
595 arg_list = []
596
597 # Enumerate the output types here
Matthew Haddoncac4ee92021-07-22 14:30:53 +0100598 for dtype in [DType.UINT8, DType.INT8, DType.INT16, DType.INT32]:
599 if inDtype == DType.UINT8 and dtype != DType.INT8:
600 # The only output dtype for UINT8 is INT8, skip all other combinations
601 continue
602 if inDtype != DType.INT8 and dtype == DType.UINT8:
603 # The only input dtype for UINT8 is INT8, skip all other combinations
604 continue
605
Kevin Cheng550ccc52021-03-03 11:21:43 -0800606 for scale32 in [False, True]:
607 for double_round in [False, True]:
608 for per_channel in [False, True]:
Eric Kunzee5e26762020-10-13 16:11:07 -0700609
610 if inDtype == DType.INT48 and scale32:
611 # Illegal condition. Must be scale32=False
612 continue
Matthew Haddoncac4ee92021-07-22 14:30:53 +0100613 if double_round and not scale32:
614 # Illegal condition. ERROR_IF(!scale32 && double_round)
615 continue
Eric Kunzee5e26762020-10-13 16:11:07 -0700616
Kevin Cheng550ccc52021-03-03 11:21:43 -0800617 arg_list.append(
618 (
619 "out{}_sc{}_dr{}_pc{}".format(
620 DTypeNames[dtype],
621 int(scale32),
622 int(double_round),
623 int(per_channel),
624 ),
625 [dtype, scale32, double_round, per_channel],
626 )
627 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700628
629 return arg_list
630
Kevin Chengaee1fac2020-11-11 13:54:06 -0800631 @staticmethod
632 def agMul(testGen, opName, shapeList, dtype):
633 arg_list = []
634
635 if dtype is DType.INT32:
636 for p in range(testGen.args.num_rand_permutations):
637
638 shift = testGen.randInt(0, 32)
639
Kevin Cheng550ccc52021-03-03 11:21:43 -0800640 arg_list.append(("perm{}_shift{}".format(p, shift), [shift]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800641 else:
Matthew Haddon43e37192021-07-09 14:13:02 +0100642 arg_list.append(("perm0_shift0", [0]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800643
644 return arg_list
645
646 @staticmethod
647 def agArithmeticRightShift(testGen, opName, shapeList, dtype):
648 arg_list = []
649
Kevin Cheng550ccc52021-03-03 11:21:43 -0800650 arg_list.append(("roundTrue", [True]))
651 arg_list.append(("roundFalse", [False]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800652
653 return arg_list
654
Eric Kunzee5e26762020-10-13 16:11:07 -0700655 # Helper function for reshape. Gets some factors of a larger number.
656 @staticmethod
657 def getFactors(val, start=1):
658 factors = []
659
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100660 for i in range(start, int(np.sqrt(val)) + 1):
Eric Kunzee5e26762020-10-13 16:11:07 -0700661 if (val % i) == 0:
662 factors.append(i)
663
664 return factors
665
666 @staticmethod
667 def agReshape(testGen, opName, shapeList, dtype):
668 arg_list = []
669
670 origShape = shapeList[0]
671
672 totalElements = 1
673 for s in origShape:
674 totalElements *= s
675
676 # This code is NOT fast. Fortunately, the numbers are fairly small.
677 factors = TosaArgGen.getFactors(totalElements)
678
679 for p in range(testGen.args.num_rand_permutations):
Matthew Haddon5fc4e682021-07-07 11:28:29 +0100680 newRank = testGen.randInt(1, 7)
Kevin Cheng550ccc52021-03-03 11:21:43 -0800681 if len(factors) < newRank:
Eric Kunzee5e26762020-10-13 16:11:07 -0700682 continue
683
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100684 found = True
685 # escape_counter breaks while loop if it continues on for too long
686 escape_counter = 0
687 while found:
688 newShape = []
689 # Generate newShape ensuring it isn't a duplicate
690 remainingElements = totalElements
691 shuffledFactors = testGen.rng.permutation(factors)
Matthew Haddon5fc4e682021-07-07 11:28:29 +0100692 for i in range(1, newRank):
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100693 # pick rank-1 factors
694 newShape.append(shuffledFactors[0])
695 remainingElements = remainingElements // shuffledFactors[0]
696 shuffledFactors = testGen.rng.permutation(
697 TosaArgGen.getFactors(remainingElements)
698 )
699 newShape.append(remainingElements)
Eric Kunzee5e26762020-10-13 16:11:07 -0700700
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100701 # Toss in a -1 sometimes
702 minusOne = testGen.randInt(0, newRank * 4)
703 if minusOne < newRank:
704 newShape[minusOne] = -1
Eric Kunzee5e26762020-10-13 16:11:07 -0700705
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100706 # Check for duplicates
707 found = False
708 for name, other_shape in arg_list:
709 if other_shape[0] == newShape:
710 found = True
711 break
712
713 escape_counter += 1
714 if escape_counter >= 100:
715 break
716
717 if not found:
718 arg_list.append(("perm{}_rank{}".format(p, newRank), [newShape]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700719
720 return arg_list
721
Eric Kunzee5e26762020-10-13 16:11:07 -0700722 @staticmethod
723 def agTranspose(testGen, opName, shapeList, dtype):
724 arg_list = []
725
726 ifm_shape = shapeList[0]
727
Jeremy Johnsona6185572021-06-21 15:55:35 +0100728 # Get all permutations
729 permutations = [p for p in itertools.permutations(range(len(ifm_shape)))]
Eric Kunzee5e26762020-10-13 16:11:07 -0700730
Jeremy Johnsona6185572021-06-21 15:55:35 +0100731 # Limit to possible permutations from shape dimension or argument setting
732 limit = min(len(permutations), testGen.args.num_rand_permutations)
Eric Kunzee5e26762020-10-13 16:11:07 -0700733
Jeremy Johnsona6185572021-06-21 15:55:35 +0100734 # Get random permutation generator that uses all permutations
735 random_permutations = testGen.rng.permutation(permutations)
Eric Kunzee5e26762020-10-13 16:11:07 -0700736
Jeremy Johnsona6185572021-06-21 15:55:35 +0100737 # Create list of required amount of permutations
Kevin Chengacb550f2021-06-29 15:32:19 -0700738 arg_list = [
739 ("perm{}".format(p), [random_permutations[p].tolist()])
740 for p in range(limit)
741 ]
Eric Kunzee5e26762020-10-13 16:11:07 -0700742 return arg_list
743
744 @staticmethod
745 def agSlice(testGen, opName, shapeList, dtype):
746 arg_list = []
747
748 ifm_shape = shapeList[0]
749 rank = len(ifm_shape)
750
751 for p in range(testGen.args.num_rand_permutations):
752 begin = []
753 size = []
754
Kevin Cheng550ccc52021-03-03 11:21:43 -0800755 valid = True
Eric Kunzee5e26762020-10-13 16:11:07 -0700756
757 for i in range(rank):
758 if ifm_shape[i] > 1:
759 begin.append(testGen.randInt(0, ifm_shape[i]))
760 size.append(testGen.randInt(0, ifm_shape[i] - begin[i]))
761
762 # Invalid slice size?
763 if size[i] == 0:
764 valid = False
765 else:
766 begin.append(0)
767 size.append(1)
768
769 if valid:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800770 arg_list.append(("perm{}".format(p), [begin, size]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700771 return arg_list
772
773 @staticmethod
774 def agTile(testGen, opName, shapeList, dtype):
775 arg_list = []
776
777 ifm_shape = shapeList[0]
778 rank = len(ifm_shape)
779
780 for p in range(testGen.args.num_rand_permutations):
781
782 # Pick a few random, but small multiple values
783 # because otherwise this has a tendency to generate
784 # enormous tensors
785 multiples = []
786 for i in range(rank):
787 multiples.append(testGen.randInt(1, 4))
788
Kevin Cheng550ccc52021-03-03 11:21:43 -0800789 arg_list.append(("perm{}".format(p), [multiples]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700790
791 return arg_list
792
793 @staticmethod
794 def agResize(testGen, opName, shapeList, dtype):
795 arg_list = []
796
797 ifm_shape = shapeList[0]
798
799 for m in [ResizeMode.NEAREST, ResizeMode.BILINEAR]:
800
801 # Exclude illegal {mode, type} configurations. Pick legal output types
802 if m == ResizeMode.NEAREST and dtype == DType.INT8:
Les Bell33d837e2021-08-10 08:34:43 +0100803 outputDTypeList = [DType.INT8]
Eric Kunzee5e26762020-10-13 16:11:07 -0700804 elif m == ResizeMode.NEAREST and dtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800805 outputDTypeList = [DType.INT16]
Eric Kunzee5e26762020-10-13 16:11:07 -0700806 elif m == ResizeMode.BILINEAR and dtype == DType.INT8:
Les Bell33d837e2021-08-10 08:34:43 +0100807 outputDTypeList = [DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700808 elif m == ResizeMode.BILINEAR and dtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800809 outputDTypeList = [DType.INT48]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800810 elif dtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800811 outputDTypeList = [DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700812 else:
813 continue
814
815 for outputDType in outputDTypeList:
816 for perm in range(testGen.args.num_rand_permutations):
817
818 # Randomly generate legal output dimensions and shift
819 # and then compute the stride and offset based on them
Kevin Cheng550ccc52021-03-03 11:21:43 -0800820 output_dims = [testGen.randInt(1), testGen.randInt(1)]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800821 in_center_h = (ifm_shape[1] - 1) / 2.0
822 in_center_w = (ifm_shape[2] - 1) / 2.0
823 out_center_h = (output_dims[0] - 1) / 2.0
824 out_center_w = (output_dims[1] - 1) / 2.0
Eric Kunzee5e26762020-10-13 16:11:07 -0700825
Kevin Cheng77d0f762020-11-24 10:26:32 -0800826 fp_stride_y = float(ifm_shape[1]) / float(output_dims[0])
827 fp_stride_x = float(ifm_shape[2]) / float(output_dims[1])
828 fp_offset_y = in_center_h - fp_stride_y * out_center_h
829 fp_offset_x = in_center_w - fp_stride_x * out_center_w
Eric Kunzee5e26762020-10-13 16:11:07 -0700830
Kevin Cheng77d0f762020-11-24 10:26:32 -0800831 if outputDType == DType.FLOAT:
832 shift = 0
833 stride = [0, 0]
834 offset = [0, 0]
Kevin Cheng550ccc52021-03-03 11:21:43 -0800835 stride_fp = [fp_stride_y, fp_stride_x]
836 offset_fp = [fp_offset_y, fp_offset_x]
837 arg_list.append(
838 (
839 "mode{}_odim{}x{}_out{}_st{:.2f}x{:.2f}_off{:.2f}x{:.2f}".format(
Les Bell33d837e2021-08-10 08:34:43 +0100840 "N" if m == ResizeMode.NEAREST else "B",
Kevin Cheng550ccc52021-03-03 11:21:43 -0800841 output_dims[0],
842 output_dims[1],
843 testGen.typeStr(outputDType),
844 stride_fp[0],
845 stride_fp[1],
846 offset_fp[0],
847 offset_fp[1],
848 ),
849 [
850 m,
851 stride,
852 offset,
853 shift,
854 stride_fp,
855 offset_fp,
856 output_dims,
857 dtype,
858 outputDType,
859 ],
860 )
861 )
Kevin Cheng77d0f762020-11-24 10:26:32 -0800862 else:
863 shift = 11
864 unit = float(1 << shift)
865 stride_y = int(round(fp_stride_y * unit))
866 stride_x = int(round(fp_stride_x * unit))
867 offset_y = int(round(fp_offset_y * unit))
868 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700869
Kevin Cheng550ccc52021-03-03 11:21:43 -0800870 while (
871 stride_y >= 32768
872 or stride_x >= 32768
873 or offset_y >= 32768
874 or offset_x >= 32768
875 or offset_y < -32768
876 or offset_x < -32768
877 ):
Kevin Cheng77d0f762020-11-24 10:26:32 -0800878 shift = shift - 1
879 unit = float(1 << shift)
880 stride_y = int(round(fp_stride_y * unit))
881 stride_x = int(round(fp_stride_x * unit))
882 offset_y = int(round(fp_offset_y * unit))
883 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700884
Kevin Cheng550ccc52021-03-03 11:21:43 -0800885 stride = [stride_y, stride_x]
886 offset = [offset_y, offset_x]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800887
888 stride_fp = [0.0, 0.0]
889 offset_fp = [0.0, 0.0]
890
Kevin Cheng550ccc52021-03-03 11:21:43 -0800891 arg_list.append(
892 (
893 "mode{}_shift{}_odim{}x{}_out{}_st{}x{}_off{}x{}".format(
Les Bell33d837e2021-08-10 08:34:43 +0100894 "N" if m == ResizeMode.NEAREST else "B",
Kevin Cheng550ccc52021-03-03 11:21:43 -0800895 shift,
896 output_dims[0],
897 output_dims[1],
898 testGen.typeStr(outputDType),
899 stride[0],
900 stride[1],
901 offset[0],
902 offset[1],
903 ),
904 [
905 m,
906 stride,
907 offset,
908 shift,
909 stride_fp,
910 offset_fp,
911 output_dims,
912 dtype,
913 outputDType,
914 ],
915 )
916 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700917
918 return arg_list
919
920 def agCondIf(testGen, opName, shapeList, dtype):
921 # CondIf generates the condition values here.
922 # Convert to tensors in the build function, along with the
923 # then and else blocks
924 arg_list = []
925
926 for c in [False, True]:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800927 arg_list.append(("cond{}".format(int(c)), [c]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700928
929 return arg_list
930
931 def agWhileLoop(testGen, opName, shapeList, dtype):
932 # While loop: 0 iterations, 1, more than 1
933 arg_list = []
934
935 for iter in [0, 1, 4]:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800936 arg_list.append(("iter{}".format(iter), [iter]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700937
938 return arg_list
939
Kevin Cheng550ccc52021-03-03 11:21:43 -0800940
Eric Kunzee5e26762020-10-13 16:11:07 -0700941class TosaTestGen:
Jeremy Johnson97eb75f2021-07-08 11:58:02 +0100942 # Maximum rank of tensor supported by test generator.
943 TOSA_TENSOR_MAX_RANK = 6
944
Eric Kunzee5e26762020-10-13 16:11:07 -0700945 def __init__(self, args):
946 self.args = args
947 self.basePath = args.output_dir
948 self.random_seed = args.random_seed
949 self.ser = None
950 self.rng = np.random.default_rng(self.random_seed)
951 self.createDynamicOpLists()
952 self.initOpListDefaults()
953 self.quantGen = TosaQuantGen()
954 # Force makeShape to do a specific starting shape
955 self.targetted_shape = None
956
957 def createSerializer(self, opName, testPath):
958 self.testPath = os.path.join(opName, testPath)
959
960 fullPath = os.path.join(self.basePath, self.testPath)
961 os.makedirs(fullPath, exist_ok=True)
962 self.ser = ts.TosaSerializer(fullPath)
963
964 def getSerializer(self):
965 return self.ser
966
967 def serialize(self, testName):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800968 with open(
969 os.path.join(self.basePath, self.testPath, "{}.tosa".format(testName)), "wb"
970 ) as fd:
Eric Kunzee5e26762020-10-13 16:11:07 -0700971 fd.write(self.ser.serialize())
972
Kevin Cheng550ccc52021-03-03 11:21:43 -0800973 with open(os.path.join(self.basePath, self.testPath, "desc.json"), "w") as fd:
974 fd.write(self.ser.writeJson("{}.tosa".format(testName)))
Eric Kunzee5e26762020-10-13 16:11:07 -0700975
976 def getRandTensor(self, shape, dtype):
Eric Kunzee5e26762020-10-13 16:11:07 -0700977 if dtype == DType.BOOL:
978 np_dt = np.bool
979 return np.bool_(self.rng.choice(a=[False, True], size=shape))
Kevin Chenga9017402021-07-28 17:19:23 -0700980 # TOSA specific INT4 weight range from -7 to 7
Eric Kunzee5e26762020-10-13 16:11:07 -0700981 elif dtype == DType.INT4:
Kevin Chenga9017402021-07-28 17:19:23 -0700982 return np.int32(self.rng.integers(low=-7, high=8, size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -0700983 elif dtype == DType.INT8:
Jeremy Johnson18e26662021-07-22 16:15:29 +0100984 return np.int32(self.rng.integers(low=-128, high=128, size=shape))
985 elif dtype == DType.UINT8:
986 return np.int32(self.rng.integers(low=0, high=256, size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -0700987 elif dtype == DType.INT16:
988 return np.int32(self.rng.integers(low=-32768, high=32768, size=shape))
989 elif dtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800990 return np.int32(
991 self.rng.integers(low=-(1 << 31), high=(1 << 31), size=shape)
992 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700993 elif dtype == DType.INT48:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800994 return np.int64(
995 self.rng.integers(low=-(1 << 47), high=(1 << 47), size=shape)
996 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700997 elif dtype == DType.FLOAT:
Jeremy Johnson18e26662021-07-22 16:15:29 +0100998 return np.float32(self.rng.random(size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -0700999 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001000 raise Exception("Unrecognized Dtype: {}".format(dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07001001
Kevin Cheng989cb052021-04-28 16:29:44 -07001002 def buildPlaceholderTensors(self, shape_list, dtype_list):
Eric Kunzee5e26762020-10-13 16:11:07 -07001003 placeholders = []
1004
Kevin Cheng989cb052021-04-28 16:29:44 -07001005 assert len(shape_list) == len(dtype_list)
1006
1007 for idx, shape in enumerate(shape_list):
1008 arr = self.getRandTensor(shape, dtype_list[idx])
1009 placeholders.append(self.ser.addPlaceholder(shape, dtype_list[idx], arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001010
1011 return placeholders
1012
Kevin Cheng989cb052021-04-28 16:29:44 -07001013 def buildConstTensors(self, shape_list, dtype_list):
Eric Kunzee5e26762020-10-13 16:11:07 -07001014 consts = []
1015
Kevin Cheng989cb052021-04-28 16:29:44 -07001016 assert len(shape_list) == len(dtype_list)
1017
1018 for idx, shape in enumerate(shape_list):
1019 arr = self.getRandTensor(shape, dtype_list[idx])
1020 consts.append(self.ser.addConst(shape, dtype_list[idx], arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001021
1022 return consts
1023
1024 def makeShape(self, rank):
1025 if self.targetted_shape:
1026 return np.int32(self.targetted_shape)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001027 return np.int32(
1028 self.rng.integers(
1029 low=self.args.tensor_shape_range[0],
1030 high=self.args.tensor_shape_range[1],
1031 size=rank,
1032 )
1033 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001034
1035 def setTargetShape(self, shape):
1036 self.targetted_shape = shape
1037
1038 def randInt(self, low=0, high=256):
1039 return np.int32(self.rng.integers(low=low, high=high, size=1))[0]
1040
1041 def getRandNumberDType(self, dtype):
1042 if dtype == DType.FLOAT:
1043 return self.rng.random()
1044 elif dtype == DType.BOOL:
1045 return self.rng.choice([False, True])
Kevin Chenga9017402021-07-28 17:19:23 -07001046 # TOSA specific INT4 weight range from -7 to 7
Eric Kunzee5e26762020-10-13 16:11:07 -07001047 elif dtype == DType.INT4:
Kevin Chenga9017402021-07-28 17:19:23 -07001048 low, high = (-7, 8)
Eric Kunzee5e26762020-10-13 16:11:07 -07001049 elif dtype == DType.INT8:
Jeremy Johnson18e26662021-07-22 16:15:29 +01001050 low, high = (-128, 128)
Eric Kunzee5e26762020-10-13 16:11:07 -07001051 elif dtype == DType.INT16:
1052 low, high = (-32768, 32768)
1053 elif dtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001054 low, high = (-(1 << 31), (1 << 31))
Eric Kunzee5e26762020-10-13 16:11:07 -07001055 elif dtype == DType.INT48:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001056 low, high = (-(1 << 47), (1 << 47))
Eric Kunzee5e26762020-10-13 16:11:07 -07001057 # Special size
1058 return np.int64(self.rng.integers(low, high, size=1))[0]
1059 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001060 raise Exception("Unknown dtype: {}".format(dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07001061
1062 return np.int32(self.rng.integers(low, high, size=1))[0]
1063
1064 def shapeStr(self, shape):
1065
1066 sStr = []
1067 # Convert to strings
1068 for i in shape:
1069 sStr.append(str(i))
1070
Kevin Cheng550ccc52021-03-03 11:21:43 -08001071 return "x".join(sStr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001072
1073 def typeStr(self, t):
Kevin Cheng989cb052021-04-28 16:29:44 -07001074 if isinstance(t, list):
1075 assert len(t) >= 2
1076 return "{}x{}".format(self.typeStr(t[0]), self.typeStr(t[1]))
Eric Kunzee5e26762020-10-13 16:11:07 -07001077 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07001078 if t == DType.BOOL:
1079 return "b"
1080 elif t == DType.INT4:
1081 return "i4"
1082 elif t == DType.INT8:
1083 return "i8"
1084 elif t == DType.UINT8:
1085 return "u8"
1086 elif t == DType.INT16:
1087 return "i16"
1088 elif t == DType.INT32:
1089 return "i32"
1090 elif t == DType.INT48:
1091 return "i48"
1092 elif t == DType.FLOAT:
1093 return "float"
1094 else:
1095 raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
Eric Kunzee5e26762020-10-13 16:11:07 -07001096
1097 def typeWidth(self, t):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001098 """ Get the datatype width for integer types"""
Kevin Cheng3a478572021-01-22 17:21:02 -08001099 if t == DType.INT4:
Eric Kunzee5e26762020-10-13 16:11:07 -07001100 return 4
1101 elif t == DType.INT8:
1102 return 8
Kevin Cheng3a478572021-01-22 17:21:02 -08001103 elif t == DType.UINT8:
1104 return 8
Eric Kunzee5e26762020-10-13 16:11:07 -07001105 elif t == DType.INT16:
1106 return 16
1107 elif t == DType.INT32:
1108 return 32
1109 elif t == DType.INT48:
1110 return 48
1111 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001112 raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
Eric Kunzee5e26762020-10-13 16:11:07 -07001113
1114 # Argument generators
1115 # Returns a list of tuples (stringDescriptor, [build_fcn_arg_list])
1116 # Where the string descriptor is used to generate the test name and
1117 # The build_fcn_arg_list is expanded and passed to the operator test
1118 # build function
1119
Kevin Cheng550ccc52021-03-03 11:21:43 -08001120 def build_unary(self, op, a, qinfo=None):
Eric Kunzee5e26762020-10-13 16:11:07 -07001121 result_tens = OutputShaper.unaryOp(self.ser, a)
1122 self.ser.addOperator(op, [a.name], [result_tens.name], None, qinfo)
1123 return result_tens
1124
1125 def build_binary_broadcast(self, op, a, b):
1126 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1127 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1128 return result_tens
1129
1130 def build_binary_nonbroadcast(self, op, a, b):
1131 result_tens = OutputShaper.binaryNonBroadcastOp(self.ser, a, b)
1132 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1133 return result_tens
1134
Kevin Chengaee1fac2020-11-11 13:54:06 -08001135 def build_arithmetic_right_shift(self, op, a, b, round):
1136 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1137
1138 attr = ts.TosaSerializerAttribute()
1139 attr.ArithmeticRightShiftAttribute(round)
1140
1141 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
1142 return result_tens
1143
1144 def build_mul(self, op, a, b, shift):
Eric Kunzee5e26762020-10-13 16:11:07 -07001145 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1146
1147 # Special for multiply:
1148 # Force the result to INT32 for INT types
1149 if a.dtype != DType.FLOAT:
1150 result_tens.setDtype(DType.INT32)
1151
Kevin Chengaee1fac2020-11-11 13:54:06 -08001152 attr = ts.TosaSerializerAttribute()
1153 attr.MulAttribute(shift)
1154
1155 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001156 return result_tens
1157
1158 def build_table(self, op, a):
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001159 # Constant size depending on type, random values
1160 if a.dtype == DType.INT16:
Kevin Chengacb550f2021-06-29 15:32:19 -07001161 table_dtype = DType.INT16
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001162 table_arr = self.getRandTensor([513], table_dtype)
1163 else:
1164 assert a.dtype == DType.INT8
1165 table_dtype = DType.INT8
1166 table_arr = self.getRandTensor([256], table_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001167
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001168 table_tens = self.ser.addConst(table_arr.shape, table_dtype, table_arr)
1169 result_tens = OutputShaper.tableOp(self.ser, a, table_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001170 self.ser.addOperator(op, [a.name, table_tens.name], [result_tens.name], None)
1171
1172 return result_tens
1173
1174 def build_select(self, op, cond, a, b):
Eric Kunzee5e26762020-10-13 16:11:07 -07001175 result_tens = OutputShaper.selectOp(self.ser, cond, a, b)
1176 self.ser.addOperator(op, [cond.name, a.name, b.name], [result_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001177 return result_tens
1178
1179 def build_comparison(self, op, a, b):
1180 result_tens = OutputShaper.binaryComparisonOp(self.ser, a, b)
1181 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1182 return result_tens
1183
1184 def build_argmax(self, op, a, axis):
1185 result_tens = OutputShaper.argmaxOp(self.ser, a, axis)
1186
1187 attr = ts.TosaSerializerAttribute()
1188 attr.AxisAttribute(axis)
1189
1190 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1191 return result_tens
1192
Kevin Cheng550ccc52021-03-03 11:21:43 -08001193 def build_pool2d(self, op, input, kernel, stride, pad, qinfo=None):
Eric Kunzee5e26762020-10-13 16:11:07 -07001194 result_tens = OutputShaper.pool2dOp(self.ser, input, kernel, stride, pad)
1195
1196 attr = ts.TosaSerializerAttribute()
1197 attr.Pool2dAttribute(kernel, stride, pad)
Eric Kunzee5e26762020-10-13 16:11:07 -07001198
1199 self.ser.addOperator(op, [input.name], [result_tens.name], attr, qinfo)
1200 return result_tens
1201
1202 def build_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001203 assert len(padding) == 4
1204 result_tens = OutputShaper.conv2dOp(
1205 self.ser, ifm, filter, strides, padding, dilations
1206 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001207
1208 attr = ts.TosaSerializerAttribute()
1209 attr.Conv2dAttribute(padding, strides, dilations)
1210
Kevin Cheng550ccc52021-03-03 11:21:43 -08001211 self.ser.addOperator(
1212 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1213 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001214 return result_tens
1215
Kevin Cheng550ccc52021-03-03 11:21:43 -08001216 def build_transpose_conv2d(
Kevin Cheng989cb052021-04-28 16:29:44 -07001217 self, op, ifm, filter, bias, stride, outpad, dilation, output_shape, qinfo
Kevin Cheng550ccc52021-03-03 11:21:43 -08001218 ):
1219 assert len(outpad) == 2
Eric Kunzee5e26762020-10-13 16:11:07 -07001220 result_tens = OutputShaper.transposeConv2DOp(self.ser, ifm, output_shape)
1221
1222 attr = ts.TosaSerializerAttribute()
1223 attr.TransposeConv2DAttribute(outpad, stride, dilation, output_shape)
1224
Kevin Cheng550ccc52021-03-03 11:21:43 -08001225 self.ser.addOperator(
Kevin Cheng989cb052021-04-28 16:29:44 -07001226 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
Kevin Cheng550ccc52021-03-03 11:21:43 -08001227 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001228 return result_tens
1229
Kevin Cheng550ccc52021-03-03 11:21:43 -08001230 def build_depthwise_conv2d(
1231 self, op, ifm, filter, bias, strides, padding, dilations, qinfo
1232 ):
1233 result_tens = OutputShaper.depthwiseConv2dOp(
1234 self.ser, ifm, filter, strides, padding, dilations
1235 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001236
1237 attr = ts.TosaSerializerAttribute()
1238 attr.Conv2dAttribute(padding, strides, dilations)
1239
Kevin Cheng550ccc52021-03-03 11:21:43 -08001240 self.ser.addOperator(
1241 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1242 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001243 return result_tens
1244
1245 def build_fully_connected(self, op, ifm, filter, bias, qinfo):
1246 result_tens = OutputShaper.fullyConnectedOp(self.ser, ifm, filter)
1247
Kevin Cheng550ccc52021-03-03 11:21:43 -08001248 self.ser.addOperator(
1249 op, [ifm.name, filter.name, bias.name], [result_tens.name], None, qinfo
1250 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001251 return result_tens
1252
1253 def build_matmul(self, op, a, b, qinfo):
1254 result_tens = OutputShaper.matmulOp(self.ser, a, b)
1255 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], None, qinfo)
1256 return result_tens
1257
1258 def build_reduce(self, op, a, axis):
1259 result_tens = OutputShaper.reduceOp(self.ser, a, axis)
1260
1261 attr = ts.TosaSerializerAttribute()
1262 attr.AxisAttribute(axis)
1263
1264 self.ser.addOperator(op, [a.name], result_tens.name, attr)
1265 return result_tens
1266
1267 def build_clamp(self, op, a):
1268 result_tens = OutputShaper.unaryOp(self.ser, a)
1269
1270 attr = ts.TosaSerializerAttribute()
Jeremy Johnson18e26662021-07-22 16:15:29 +01001271 v = [self.getRandNumberDType(a.dtype), self.getRandNumberDType(a.dtype)]
Eric Kunzee5e26762020-10-13 16:11:07 -07001272
1273 if a.dtype == DType.FLOAT:
1274 attr.ClampAttribute(0, 0, min(v), max(v))
1275 else:
1276 attr.ClampAttribute(min(v), max(v), 0, 0)
1277
1278 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1279 return result_tens
1280
1281 def build_leaky_relu(self, op, a):
1282 result_tens = OutputShaper.unaryOp(self.ser, a)
1283 attr = ts.TosaSerializerAttribute()
1284
1285 attr.LeakyReluAttribute(self.getRandNumberDType(DType.FLOAT))
1286
1287 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1288 return result_tens
1289
1290 # Needs an additional type/input
1291 def build_prelu(self, op, a):
1292 result_tens = OutputShaper.unaryOp(self.ser, a)
1293
1294 self.ser.addOperator(op, [a.name], [result_tens.name])
1295 return result_tens
1296
1297 def build_relun(self, op, a):
1298 result_tens = OutputShaper.unaryOp(self.ser, a)
1299
1300 attr = ts.TosaSerializerAttribute()
1301
1302 if a.dtype == DType.FLOAT:
1303 attr.ReluNAttribute(0, self.getRandNumberDType(a.dtype))
1304 else:
1305 attr.ReluNAttribute(self.getRandNumberDType(a.dtype), 0)
1306
1307 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1308 return result_tens
1309
1310 def build_sigmoid(self, op, a):
1311 result_tens = OutputShaper.unaryOp(self.ser, a)
1312 self.ser.addOperator(op, [a.name], [result_tens.name])
1313 return result_tens
1314
1315 def build_tanh(self, op, a):
1316 result_tens = OutputShaper.unaryOp(self.ser, a)
1317 self.ser.addOperator(op, [a.name], [result_tens.name])
1318 return result_tens
1319
Matthew Haddon818ab902021-07-27 09:12:49 +01001320 def build_concat(self, op, *a):
1321 assert (type(a[-1]) == int)
1322
1323 # To store variable length list of input tensors we need to store axis along with it
1324 axis = a[-1]
1325 a = a[:-1]
1326
1327 result_tens = OutputShaper.concatOp(self.ser, axis, *a)
Eric Kunzee5e26762020-10-13 16:11:07 -07001328
1329 attr = ts.TosaSerializerAttribute()
1330 attr.AxisAttribute(axis)
1331
Matthew Haddon818ab902021-07-27 09:12:49 +01001332 input_tensor_names = []
1333 for tensor in a:
1334 input_tensor_names.append(tensor.name)
1335
1336 self.ser.addOperator(op, input_tensor_names, [result_tens.name], attr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001337
1338 def build_pad(self, op, a, padding, qinfo):
1339 result_tens = OutputShaper.padOp(self.ser, a, padding)
1340
1341 # Need to turn the padding array into a TOSA tensor here.
1342 # This is one of the few tensor operands that does not get
1343 # randomly generated
Kevin Cheng550ccc52021-03-03 11:21:43 -08001344 padding_tens = self.ser.addConst(padding.shape, DType.INT32, padding)
Eric Kunzee5e26762020-10-13 16:11:07 -07001345
Kevin Cheng550ccc52021-03-03 11:21:43 -08001346 self.ser.addOperator(
1347 op, [a.name, padding_tens.name], [result_tens.name], None, qinfo
1348 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001349
1350 def build_reshape(self, op, a, newShape):
1351 result_tens = OutputShaper.reshapeOp(self.ser, a, newShape)
1352
1353 attr = ts.TosaSerializerAttribute()
1354 attr.ReshapeAttribute(newShape)
1355
1356 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1357 return result_tens
1358
1359 def build_reverse(self, op, a, axis):
1360 result_tens = OutputShaper.unaryOp(self.ser, a)
1361
1362 attr = ts.TosaSerializerAttribute()
1363 attr.AxisAttribute(axis)
1364
1365 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1366 return result_tens
1367
1368 def build_transpose(self, op, a, perms):
1369 result_tens = OutputShaper.transposeOp(self.ser, a, perms)
1370
Kevin Cheng550ccc52021-03-03 11:21:43 -08001371 perms_tens = self.ser.addConst([len(perms)], DType.INT32, np.int32(perms))
Eric Kunzee5e26762020-10-13 16:11:07 -07001372
1373 self.ser.addOperator(op, [a.name, perms_tens.name], [result_tens.name])
1374 return result_tens
1375
1376 def build_slice(self, op, a, begin, size):
1377 result_tens = OutputShaper.sliceOp(self.ser, a, begin, size)
1378
1379 attr = ts.TosaSerializerAttribute()
1380 attr.SliceAttribute(begin, size)
1381
1382 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1383 return result_tens
1384
1385 def build_tile(self, op, a, multiples):
1386 result_tens = OutputShaper.tileOp(self.ser, a, multiples)
1387
1388 attr = ts.TosaSerializerAttribute()
1389 attr.TileAttribute(multiples)
1390
1391 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1392 return result_tens
1393
Kevin Cheng77d0f762020-11-24 10:26:32 -08001394 def build_gather(self, op, values):
Eric Kunzee5e26762020-10-13 16:11:07 -07001395
1396 # Create a new indicies tensor
1397 # here with data that doesn't exceed the dimensions of the values tensor
1398
Kevin Cheng550ccc52021-03-03 11:21:43 -08001399 K = values.shape[1] # K
1400 W = self.randInt(
1401 self.args.tensor_shape_range[0], self.args.tensor_shape_range[1]
1402 ) # W
1403 indicies_arr = np.int32(
1404 self.rng.integers(low=0, high=K, size=[values.shape[0], W])
1405 ) # (N, W)
1406 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001407
Kevin Cheng77d0f762020-11-24 10:26:32 -08001408 result_tens = OutputShaper.gatherOp(self.ser, values, indicies)
Eric Kunzee5e26762020-10-13 16:11:07 -07001409
Kevin Cheng77d0f762020-11-24 10:26:32 -08001410 self.ser.addOperator(op, [values.name, indicies.name], [result_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001411
1412 return result_tens
1413
Kevin Cheng77d0f762020-11-24 10:26:32 -08001414 def build_scatter(self, op, values_in, input):
1415
1416 # Create a new indicies tensor
1417 # here with data that doesn't exceed the dimensions of the values_in tensor
1418
Kevin Cheng550ccc52021-03-03 11:21:43 -08001419 K = values_in.shape[1] # K
1420 W = input.shape[1] # W
1421 indicies_arr = np.int32(
1422 self.rng.integers(low=0, high=K, size=[values_in.shape[0], W])
1423 ) # (N, W)
1424 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
Kevin Cheng77d0f762020-11-24 10:26:32 -08001425
1426 result_tens = OutputShaper.scatterOp(self.ser, values_in, indicies, input)
1427
Kevin Cheng550ccc52021-03-03 11:21:43 -08001428 self.ser.addOperator(
1429 op, [values_in.name, indicies.name, input.name], [result_tens.name]
1430 )
Kevin Cheng77d0f762020-11-24 10:26:32 -08001431
1432 return result_tens
1433
Kevin Cheng550ccc52021-03-03 11:21:43 -08001434 def build_resize(
1435 self,
1436 op,
1437 input,
1438 mode,
1439 stride,
1440 offset,
1441 shift,
1442 stride_fp,
1443 offset_fp,
1444 output_dims,
1445 input_dtype,
1446 output_dtype,
1447 ):
1448 result_tens = OutputShaper.resizeOp(
1449 self.ser,
1450 input,
1451 mode,
1452 stride,
1453 offset,
1454 shift,
1455 stride_fp,
1456 offset_fp,
1457 output_dims,
1458 input_dtype,
1459 output_dtype,
1460 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001461
1462 attr = ts.TosaSerializerAttribute()
Kevin Cheng77d0f762020-11-24 10:26:32 -08001463
Kevin Cheng550ccc52021-03-03 11:21:43 -08001464 attr.ResizeAttribute(
1465 output_dims, stride, offset, shift, stride_fp, offset_fp, mode
1466 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001467
1468 self.ser.addOperator(op, [input.name], [result_tens.name], attr)
1469 return result_tens
1470
1471 def build_identityn(self, op, val, val2):
1472
Kevin Cheng550ccc52021-03-03 11:21:43 -08001473 result_tens = OutputShaper.unaryOp(self.ser, val)
Eric Kunzee5e26762020-10-13 16:11:07 -07001474 result_tens2 = OutputShaper.unaryOp(self.ser, val2)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001475 self.ser.addOperator(
1476 op, [val.name, val2.name], [result_tens.name, result_tens2.name]
1477 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001478 return result_tens
1479
1480 def build_placeholder(self, op, val):
1481 # Add an identity op to avoid warning in the reference model
1482 return self.build_unary(Op.IDENTITY, val)
1483
1484 # Type Conversion
1485 def build_cast(self, op, val, out_dtype):
1486 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1487 self.ser.addOperator(op, [val.name], [result_tens.name])
1488 return result_tens
1489
1490 def build_rescale(self, op, val, out_dtype, scale32, double_round, per_channel):
1491 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1492
1493 if per_channel:
1494 nc = val.shape[-1]
1495 else:
1496 nc = 1
1497
1498 in_type_width = self.typeWidth(val.dtype)
1499 out_type_width = self.typeWidth(out_dtype)
1500
Kevin Cheng3a478572021-01-22 17:21:02 -08001501 if val.dtype == DType.INT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001502 input_zp = self.randInt(-128, 128)
1503 in_type_width = in_type_width + 1
Kevin Chengacb550f2021-06-29 15:32:19 -07001504 elif val.dtype == DType.UINT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001505 input_zp = self.randInt(0, 256)
Eric Kunzee5e26762020-10-13 16:11:07 -07001506 in_type_width = in_type_width + 1
1507 else:
1508 input_zp = 0
1509
Kevin Cheng3a478572021-01-22 17:21:02 -08001510 if out_dtype == DType.INT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001511 output_zp = self.randInt(-128, 128)
1512 out_type_width = out_type_width + 1
1513 elif out_dtype == DType.UINT8:
1514 output_zp = self.randInt(0, 256)
Eric Kunzee5e26762020-10-13 16:11:07 -07001515 out_type_width = out_type_width + 1
1516 else:
1517 output_zp = 0
1518
1519 # Calculate scale based on:
1520 # scale = a *(2^output_width)/(2^input_width))
1521
1522 a = np.float32(self.rng.random(size=[nc]))
1523 scale_arr = a * np.float32((1 << out_type_width) / (1 << in_type_width))
1524
1525 if scale32:
1526 pass
1527 # Cap the scaling at 2^15 - 1 for scale16
1528 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), (1 << 31) - 1)
1529 else:
1530 # Cap the scaling at 2^15 - 1 for scale16
1531 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), 32767.0)
1532
Kevin Cheng550ccc52021-03-03 11:21:43 -08001533 # print('{} {} -> {}'.format(out_type_width, in_type_width, scale_arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001534
1535 multiplier_arr = np.int32(np.zeros(shape=[nc]))
1536 shift_arr = np.int32(np.zeros(shape=[nc]))
1537
1538 for i in range(nc):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001539 multiplier_arr[i], shift_arr[i] = TosaQuantGen.computeMultiplierAndShift(
1540 scale_arr[i], scale32
1541 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08001542 if shift_arr[i] < 2 or shift_arr[i] > 62:
Kevin Chengacb550f2021-06-29 15:32:19 -07001543 self.ser.setExpectedReturnCode(
1544 TosaReturnCode.UNPREDICTABLE, "OpRescale: invalid shift value"
1545 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001546
Kevin Cheng550ccc52021-03-03 11:21:43 -08001547 # print('multiplier {} shift {} inzp {} outzp {}'.format(multiplier_arr, shift_arr, input_zp, output_zp))
Eric Kunzee5e26762020-10-13 16:11:07 -07001548
1549 attr = ts.TosaSerializerAttribute()
Kevin Cheng550ccc52021-03-03 11:21:43 -08001550 attr.RescaleAttribute(
1551 input_zp,
1552 output_zp,
1553 multiplier_arr,
1554 shift_arr,
1555 scale32,
1556 double_round,
1557 per_channel,
1558 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001559
1560 self.ser.addOperator(op, [val.name], [result_tens.name], attr)
1561 return result_tens
1562
1563 def build_cond_if_const(self, op, then_tens, else_tens, cond):
1564 # For cond_if with constants, we're supplied with then/else tensors that we ignore
1565 # (except for the generated shap) and the condition. Build Then/Else blocks
1566 # and fill them with const nodes for the body.
1567
1568 # Condition tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001569 cond_tens = self.ser.addConst([], DType.BOOL, [cond])
Eric Kunzee5e26762020-10-13 16:11:07 -07001570
1571 # Make then/else tensors
1572 out_shape = then_tens.shape
Jeremy Johnson18e26662021-07-22 16:15:29 +01001573 then_arr = np.int32(self.rng.integers(0, 256, size=out_shape))
1574 else_arr = np.int32(self.rng.integers(0, 256, size=out_shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001575
1576 # And the result tensor based on any of the outputs
Kevin Cheng550ccc52021-03-03 11:21:43 -08001577 result_tens = self.ser.addOutput(out_shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07001578
1579 # Create the attribute with the names of the then/else blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001580 then_block = "THEN_BLOCK"
1581 else_block = "ELSE_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001582 attr = ts.TosaSerializerAttribute()
1583 attr.CondIfAttribute(then_block, else_block)
1584
1585 # Finally, build the op and the two blocks
1586 self.ser.addOperator(op, [cond_tens.name], [result_tens.name], attr)
1587
1588 self.ser.startBasicBlock(then_block)
1589 # Build the actual then/else tensors inside their blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001590 then_tens = self.ser.addConst(out_shape, DType.INT32, then_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001591 self.ser.addOutputTensor(then_tens)
1592
1593 self.ser.startBasicBlock(else_block)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001594 else_tens = self.ser.addConst(out_shape, DType.INT32, else_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001595 self.ser.addOutputTensor(else_tens)
1596
1597 return result_tens
1598
1599 def build_cond_if_binary(self, op, a, b, cond):
1600 # For cond_if with a binary op in the then/else blocks, take a and b and
1601 # alternately add or subtract them based on the condition
1602
1603 # Condition tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001604 cond_tens = self.ser.addConst([], DType.BOOL, [cond])
Eric Kunzee5e26762020-10-13 16:11:07 -07001605
Kevin Cheng550ccc52021-03-03 11:21:43 -08001606 result_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001607 self.ser.currBasicBlock.addOutput(result_tens.name)
1608
1609 # Create the attribute with the names of the then/else blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001610 then_block = "THEN_BLOCK"
1611 else_block = "ELSE_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001612 attr = ts.TosaSerializerAttribute()
1613 attr.CondIfAttribute(then_block, else_block)
1614
1615 # Finally, build the op and the two blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001616 self.ser.addOperator(
1617 op, [cond_tens.name, a.name, b.name], [result_tens.name], attr
1618 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001619
1620 self.ser.startBasicBlock(then_block)
1621 self.ser.addInputTensor(a)
1622 self.ser.addInputTensor(b)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001623 then_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001624 self.ser.addOperator(Op.ADD, [a.name, b.name], [then_tens.name])
1625
1626 self.ser.startBasicBlock(else_block)
1627 self.ser.addInputTensor(a)
1628 self.ser.addInputTensor(b)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001629 else_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001630 self.ser.addOperator(Op.SUB, [a.name, b.name], [else_tens.name])
1631
1632 return result_tens
1633
1634 def build_while_loop(self, op, a, iter_val):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001635 iter = self.ser.addPlaceholder([], DType.INT32, [np.int32(iter_val)])
Eric Kunzee5e26762020-10-13 16:11:07 -07001636
Kevin Cheng550ccc52021-03-03 11:21:43 -08001637 cond_block = "COND_BLOCK"
1638 body_block = "BODY_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001639
1640 attr = ts.TosaSerializerAttribute()
1641 attr.WhileLoopAttribute(cond_block, body_block)
1642
1643 # Accumulator tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001644 # acc = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001645 acc_init_val = np.int32(np.zeros(a.shape))
Kevin Cheng550ccc52021-03-03 11:21:43 -08001646 acc = self.ser.addPlaceholder(a.shape, a.dtype, acc_init_val)
Eric Kunzee5e26762020-10-13 16:11:07 -07001647
1648 # Intermediate/output tensors for everything going through the loop
Kevin Cheng550ccc52021-03-03 11:21:43 -08001649 iter_out = self.ser.addIntermediate(iter.shape, iter.dtype)
1650 a_out = self.ser.addIntermediate(a.shape, a.dtype)
1651 acc_out = self.ser.addIntermediate(acc.shape, acc.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001652
1653 # While_loop operator
Kevin Cheng550ccc52021-03-03 11:21:43 -08001654 self.ser.addOperator(
1655 op,
1656 [iter.name, a.name, acc.name],
1657 [iter_out.name, a_out.name, acc_out.name],
1658 attr,
1659 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001660
1661 # COND block (input: iter, output: cond_tens )
1662 self.ser.startBasicBlock(cond_block)
1663 self.ser.addInputTensor(iter)
1664 self.ser.addInputTensor(a)
1665 self.ser.addInputTensor(acc)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001666 zero_tens = self.ser.addConst([], DType.INT32, [np.int32(0)])
1667 cond_tens = self.ser.addOutput([], DType.BOOL)
1668 self.ser.addOperator(Op.GREATER, [iter.name, zero_tens.name], [cond_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001669
1670 # BODY block (input: a, acc, iter, output: a, acc, iter)
1671 # Note that local intermediate tensors need to be declared here for the outputs
1672 self.ser.startBasicBlock(body_block)
1673 self.ser.addInputTensor(iter)
1674 self.ser.addInputTensor(a)
1675 self.ser.addInputTensor(acc)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001676 one_tens = self.ser.addConst([], DType.INT32, [np.int32(1)])
1677 iter_body_out = self.ser.addIntermediate(iter.shape, iter.dtype)
1678 acc_body_out = self.ser.addIntermediate(acc.shape, acc.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001679 self.ser.addOperator(Op.ADD, [a.name, acc.name], [acc_body_out.name])
1680 self.ser.addOperator(Op.SUB, [iter.name, one_tens.name], [iter_body_out.name])
1681 self.ser.addOutputTensor(iter_body_out)
1682 self.ser.addOutputTensor(a)
1683 self.ser.addOutputTensor(acc_body_out)
1684
1685 return acc_out
1686
Kevin Cheng550ccc52021-03-03 11:21:43 -08001687 def genOpTestList(
1688 self, opName, shapeFilter=[None], rankFilter=None, dtypeFilter=None
1689 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07001690
1691 try:
1692 op = self.TOSA_OP_LIST[opName]
1693 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001694 raise Exception("Cannot find op with name {}".format(opName))
Eric Kunzee5e26762020-10-13 16:11:07 -07001695
1696 # Initialize a new random number generator
1697 self.rng = np.random.default_rng(self.random_seed)
1698
Kevin Cheng550ccc52021-03-03 11:21:43 -08001699 build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001700
1701 # Generate the lists of arguments
Kevin Cheng550ccc52021-03-03 11:21:43 -08001702 rmin, rmax = op["rank"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001703
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01001704 # Create a default testing rank range, 1-4 inclusive to keep test sizes reasonably small.
1705 default_test_rank_range = range(1, 5)
1706
Eric Kunzee5e26762020-10-13 16:11:07 -07001707 # Test list consists of a tuple of:
1708 # (opName, testNameStr, dtype, shapeList, argumentsList)
1709 testList = []
1710
1711 if not shapeFilter:
1712 shapeFilter = [None]
1713
1714 for r in range(rmin, rmax + 1):
1715
1716 # Filter out the rank?
1717 if rankFilter is not None and r not in rankFilter:
1718 continue
Kevin Chengacb550f2021-06-29 15:32:19 -07001719 if (
1720 rankFilter is None
1721 and shapeFilter[0] is None
1722 and r not in default_test_rank_range
1723 ):
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01001724 continue
Eric Kunzee5e26762020-10-13 16:11:07 -07001725
Kevin Cheng550ccc52021-03-03 11:21:43 -08001726 for t in op["types"]:
Eric Kunzee5e26762020-10-13 16:11:07 -07001727
1728 # Filter tests based on dtype?
1729 if dtypeFilter is not None:
Kevin Chengacb550f2021-06-29 15:32:19 -07001730 if not (
1731 t in dtypeFilter
1732 or (isinstance(t, list) and t[0] in dtypeFilter)
1733 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07001734 continue
1735
1736 # Create the placeholder and const tensors
1737 for shape in shapeFilter:
1738 # A None shape chooses a random shape of a given rank
1739
1740 # Filter out by rank
1741 if shape is not None and len(shape) != r:
1742 continue
1743
1744 self.setTargetShape(shape)
1745 shapeList = tgen_fcn(self, op, r)
1746
1747 shapeStr = self.shapeStr(shapeList[0])
1748 typeStr = self.typeStr(t)
1749
1750 # Argument lists consists of tuples of the (str, []) string representation and the build function argument list
1751 argList = []
1752 if agen_fcn:
1753 argList = agen_fcn(self, opName, shapeList, t)
1754 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001755 argList = [("", [])]
Eric Kunzee5e26762020-10-13 16:11:07 -07001756
1757 for argStr, args in argList:
1758 if argStr:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001759 testStr = "{}_{}_{}_{}".format(
1760 opName, shapeStr, typeStr, argStr
1761 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001762 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001763 testStr = "{}_{}_{}".format(opName, shapeStr, typeStr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001764
1765 testList.append((opName, testStr, t, shapeList, args))
1766
1767 return testList
1768
Kevin Cheng989cb052021-04-28 16:29:44 -07001769 def serializeTest(self, opName, testStr, dtype_or_dtypeList, shapeList, testArgs):
Eric Kunzee5e26762020-10-13 16:11:07 -07001770 try:
1771 op = self.TOSA_OP_LIST[opName]
1772 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001773 raise Exception("Cannot find op with name {}".format(opName))
Eric Kunzee5e26762020-10-13 16:11:07 -07001774
1775 # Create a serializer
1776 self.createSerializer(opName, testStr)
1777
Kevin Cheng550ccc52021-03-03 11:21:43 -08001778 build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
1779 pCount, cCount = op["operands"]
Kevin Cheng989cb052021-04-28 16:29:44 -07001780 num_operands = pCount + cCount
1781
1782 if isinstance(dtype_or_dtypeList, list):
1783 dtypeList = dtype_or_dtypeList
Matthew Haddon818ab902021-07-27 09:12:49 +01001784 elif op['op'] == Op.CONCAT:
1785 dtypeList = [dtype_or_dtypeList] * len(shapeList)
Kevin Cheng989cb052021-04-28 16:29:44 -07001786 else:
1787 dtypeList = [dtype_or_dtypeList] * (num_operands)
1788
Matthew Haddon818ab902021-07-27 09:12:49 +01001789 if op['op'] != Op.CONCAT:
1790 assert (
1791 len(shapeList) == num_operands
1792 ), "shapeList length {} must match number of operands {}".format(
1793 len(shapeList), num_operands
1794 )
1795 assert (
1796 len(dtypeList) == num_operands
1797 ), "dtypeList length {} must match number of operands {}".format(
1798 len(dtypeList), num_operands
1799 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001800
1801 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001802 qgen = op["qgen"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001803 except KeyError:
1804 qgen = None
1805
1806 # Build the random tensor operands and the test
1807 tens = []
Kevin Chengaee1fac2020-11-11 13:54:06 -08001808
1809 # If test is ArithmeticRightShift, force value of operand[1] to be within [0, num_bits]
Kevin Cheng550ccc52021-03-03 11:21:43 -08001810 if op["op"] == Op.ARITHMETIC_RIGHT_SHIFT:
1811 assert (
1812 pCount == 2 and cCount == 0
1813 ), "Op.ArithmeticRightShift must have 2 placeholders, 0 consts"
Kevin Chengaee1fac2020-11-11 13:54:06 -08001814
1815 placeholders = []
1816 for idx, shape in enumerate(shapeList[:]):
1817 if idx == 1:
Kevin Cheng989cb052021-04-28 16:29:44 -07001818 if dtypeList[idx] == DType.INT8:
Kevin Chengaee1fac2020-11-11 13:54:06 -08001819 arr = np.int32(self.rng.integers(low=0, high=8, size=shape))
Kevin Cheng989cb052021-04-28 16:29:44 -07001820 elif dtypeList[idx] == DType.INT16:
Kevin Chengaee1fac2020-11-11 13:54:06 -08001821 arr = np.int32(self.rng.integers(low=0, high=16, size=shape))
Kevin Cheng989cb052021-04-28 16:29:44 -07001822 elif dtypeList[idx] == DType.INT32:
Kevin Chengaee1fac2020-11-11 13:54:06 -08001823 arr = np.int32(self.rng.integers(low=0, high=32, size=shape))
1824 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001825 raise Exception("OpArithmeticRightShift: invalid input dtype")
Kevin Chengaee1fac2020-11-11 13:54:06 -08001826 else:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001827 arr = self.getRandTensor(shape, dtypeList[idx])
Kevin Cheng989cb052021-04-28 16:29:44 -07001828 placeholders.append(self.ser.addPlaceholder(shape, dtypeList[idx], arr))
Kevin Chengaee1fac2020-11-11 13:54:06 -08001829
1830 tens.extend(placeholders)
Matthew Haddona44ac5e2021-07-27 16:31:16 +01001831 elif op["op"] == Op.SELECT:
1832 # Set datatype of condition tensor to boolean
1833 dtypeList[0] = DType.BOOL
1834 tens.extend(
1835 self.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
1836 )
1837 tens.extend(self.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001838 elif op["op"] == Op.DIV:
1839 assert (
1840 pCount == 2 and cCount == 0
1841 ), "Op.Div must have 2 placeholders, 0 consts"
1842
1843 placeholders = []
1844
1845 # Two invalid cases for Op.DIV:
1846 # 1. divisor == 0
Kevin Cheng47315e12021-05-13 17:41:28 -07001847 # 2. dividend == -(1<<31) and divisor == -1
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001848 while True:
1849 dividend_arr = self.getRandTensor(shapeList[0], dtypeList[0])
1850 divisor_arr = self.getRandTensor(shapeList[1], dtypeList[1])
1851
1852 if (divisor_arr == 0).any():
1853 continue
1854
Kevin Cheng47315e12021-05-13 17:41:28 -07001855 if (dividend_arr == -(2 ** 31)).any() and (divisor_arr == -1).any():
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001856 continue
1857
1858 break
1859
1860 placeholders.append(
1861 self.ser.addPlaceholder(shapeList[0], dtypeList[0], dividend_arr)
1862 )
1863 placeholders.append(
1864 self.ser.addPlaceholder(shapeList[1], dtypeList[1], divisor_arr)
1865 )
1866
1867 tens.extend(placeholders)
1868 elif op["op"] == Op.MUL:
1869 assert (
1870 pCount == 2 and cCount == 0
1871 ), "Op.MUL must have 2 placeholders, 0 consts"
1872
1873 if dtypeList[0] == DType.FLOAT:
1874 tens.extend(self.buildPlaceholderTensors(shapeList[:], dtypeList[:]))
1875 else:
1876 placeholders = []
1877
1878 # Make sure multiply result in int32 range
1879 shift = testArgs[0]
1880 if dtypeList[0] == DType.INT8:
1881 num_bits = 8
1882 elif dtypeList[0] == DType.INT16:
1883 num_bits = 16
1884 elif dtypeList[0] == DType.INT32:
1885 num_bits = 32
1886 else:
1887 raise Exception("OpMul: invalid input dtype")
1888
1889 for idx, shape in enumerate(shapeList[:]):
1890 low = -(2 ** (num_bits - 1))
1891 high = (2 ** (num_bits - 1)) - 1
1892
1893 a_arr = np.int32(
1894 self.rng.integers(low=low, high=high, size=shapeList[0])
1895 )
1896 b_arr = np.int32(
1897 self.rng.integers(low=low, high=high, size=shapeList[1])
1898 )
1899
1900 i = 0
1901 while True:
1902
1903 a_arr_64 = a_arr.astype(np.int64)
1904 b_arr_64 = b_arr.astype(np.int64)
1905
1906 if shift > 0:
1907 rounding = 1 << (shift - 1)
1908 result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift
1909 else:
1910 result_arr = a_arr_64 * b_arr_64
1911
1912 if (result_arr > -(2 ** 31)).all() and (
1913 result_arr <= ((2 ** 31) - 1)
1914 ).all():
1915 break
1916
1917 i = i + 1
1918 a_arr = a_arr // 2
1919 b_arr = b_arr // 2
1920
1921 placeholders.append(
1922 self.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
1923 )
1924 placeholders.append(
1925 self.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
1926 )
1927
1928 tens.extend(placeholders)
Matthew Haddon818ab902021-07-27 09:12:49 +01001929 elif op["op"] == Op.CONCAT:
1930 count = len(shapeList) - self.args.num_const_inputs_concat
1931 if count < 1:
1932 count = 1
1933 if self.args.num_const_inputs_concat == 0:
1934 count = len(shapeList)
1935
1936 shapeList = TosaTensorGen.tgConcatConstInput(self, shapeList, testArgs[0])
1937 tens.extend(
1938 self.buildPlaceholderTensors(shapeList[0:count], dtypeList[0:count])
1939 )
1940 tens.extend(self.buildConstTensors(shapeList[count:], dtypeList[count:]))
Kevin Chengaee1fac2020-11-11 13:54:06 -08001941 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07001942 tens.extend(
1943 self.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
1944 )
1945 tens.extend(self.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
Eric Kunzee5e26762020-10-13 16:11:07 -07001946
1947 if qgen is not None:
Les Bell30e46802021-07-23 09:43:31 +01001948 qinfo = qgen(self, op, dtype_or_dtypeList)
Eric Kunzee5e26762020-10-13 16:11:07 -07001949 else:
1950 qinfo = None
1951
1952 try:
1953 if qinfo is not None:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001954 resultName = build_fcn(self, op["op"], *tens, *testArgs, qinfo)
Eric Kunzee5e26762020-10-13 16:11:07 -07001955 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001956 resultName = build_fcn(self, op["op"], *tens, *testArgs)
Eric Kunzee5e26762020-10-13 16:11:07 -07001957 except TypeError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001958 print(
1959 "build_fcn: {}\nTensors: {}\nArgs: {}\n".format(
1960 build_fcn, tens, testArgs
1961 )
1962 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001963 raise e
1964
1965 # Save the serialized test
Kevin Cheng550ccc52021-03-03 11:21:43 -08001966 self.serialize("test")
Eric Kunzee5e26762020-10-13 16:11:07 -07001967
1968 def createDynamicOpLists(self):
1969
1970 # Dynamically create op lists for convolutions with a list of kernel sizes
Kevin Cheng550ccc52021-03-03 11:21:43 -08001971 KERNELS = [[1, 1], [2, 2], [3, 3], [5, 5], [3, 1], [1, 3]]
Eric Kunzee5e26762020-10-13 16:11:07 -07001972
1973 for k in KERNELS:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001974 testName = "conv2d_{}x{}".format(k[0], k[1])
1975 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST["conv2d_TEMPLATE"].copy()
1976 self.TOSA_OP_LIST[testName]["filter"] = k
1977 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07001978
Kevin Cheng550ccc52021-03-03 11:21:43 -08001979 testName = "depthwise_conv2d_{}x{}".format(k[0], k[1])
1980 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
1981 "depthwise_conv2d_TEMPLATE"
1982 ].copy()
1983 self.TOSA_OP_LIST[testName]["filter"] = k
1984 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07001985
Kevin Cheng550ccc52021-03-03 11:21:43 -08001986 testName = "transpose_conv2d_{}x{}".format(k[0], k[1])
1987 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
1988 "transpose_conv2d_TEMPLATE"
1989 ].copy()
1990 self.TOSA_OP_LIST[testName]["filter"] = k
1991 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07001992
1993 # Delete any templates after having created any dynamic ops
1994 # This is a two-pass operation because it's bad practice to delete
1995 # keys from dictionaries while iterating
1996 keyList = []
1997 for k in self.TOSA_OP_LIST:
1998 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001999 if self.TOSA_OP_LIST[k]["template"] == True:
Eric Kunzee5e26762020-10-13 16:11:07 -07002000 keyList.append(k)
2001 continue
2002 except KeyError:
2003 pass
2004
2005 for k in keyList:
2006 del self.TOSA_OP_LIST[k]
2007
2008 def initOpListDefaults(self):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002009 """Fill in default fields for ops if they aren't already specified.
2010 Look for missing required fields (datastructure linting)."""
Eric Kunzee5e26762020-10-13 16:11:07 -07002011 for op in self.TOSA_OP_LIST:
2012
2013 # Required fields
2014 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002015 pl, c = self.TOSA_OP_LIST[op]["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002016 except (KeyError, ValueError, TypeError):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002017 raise Exception(
2018 "Op {} is missing a valid operand tuple in TOSA_OP_LIST".format(op)
2019 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002020
2021 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002022 fcn, tgen, arggen = self.TOSA_OP_LIST[op]["build_fcn"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002023 except (KeyError, ValueError, TypeError):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002024 raise Exception(
2025 "Op {} is missing a valid build_fcn tuple in TOSA_OP_LIST".format(
2026 op
2027 )
2028 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002029
2030 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002031 types = self.TOSA_OP_LIST[op]["types"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002032 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002033 raise Exception(
2034 "Op {} is missing a valid type list in TOSA_OP_LIST".format(op)
2035 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002036
2037 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002038 opcode = self.TOSA_OP_LIST[op]["op"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002039 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002040 raise Exception(
2041 "Op {} is missing the Op field in TOSA_OP_LIST".format(op)
2042 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002043
2044 # Put in default rank range, if missing
2045 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002046 rank = self.TOSA_OP_LIST[op]["rank"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002047 except KeyError:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002048 self.TOSA_OP_LIST[op]["rank"] = self.DEFAULT_RANK_RANGE
Eric Kunzee5e26762020-10-13 16:11:07 -07002049
2050 # Tensor operator list
2051 # 'op': op name
2052 # 'operands': tuple of (placeholder, const) operands
Kevin Cheng3a478572021-01-22 17:21:02 -08002053 # 'rank': optional, restricts rank to tuple inclusive of (min, max),
2054 # if not specified, defaults to (1, 4)
Eric Kunzee5e26762020-10-13 16:11:07 -07002055 # 'build_fcn': tuple of the function to (build_operator(), TensorGen function, ArgGen enum)
2056 # 'types': array of datatypes to be tested
Kevin Cheng550ccc52021-03-03 11:21:43 -08002057 TYPE_FP = [DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -07002058
Kevin Cheng550ccc52021-03-03 11:21:43 -08002059 TYPE_INT = [DType.INT8, DType.INT16, DType.INT32] # Excludes INT4
2060 TYPE_INT_FP = [DType.INT8, DType.INT16, DType.INT32, DType.FLOAT] # Excludes INT4
Eric Kunzee5e26762020-10-13 16:11:07 -07002061
Kevin Cheng550ccc52021-03-03 11:21:43 -08002062 TYPE_BOOL = [DType.BOOL]
2063 TYPE_FI32 = [DType.FLOAT, DType.INT32]
2064 TYPE_FIB = [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL]
2065 TYPE_FI16 = [DType.FLOAT, DType.INT16]
Eric Kunzee5e26762020-10-13 16:11:07 -07002066
Kevin Cheng550ccc52021-03-03 11:21:43 -08002067 TYPE_NARROW_INT_FP = [DType.INT8, DType.INT16, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -07002068
Kevin Cheng989cb052021-04-28 16:29:44 -07002069 TYPE_CONV2D = [
Kevin Chenga9017402021-07-28 17:19:23 -07002070 [DType.INT8, DType.INT4, DType.INT32],
Kevin Cheng989cb052021-04-28 16:29:44 -07002071 [DType.INT8, DType.INT8, DType.INT32],
2072 [DType.INT16, DType.INT8, DType.INT48],
2073 DType.FLOAT,
2074 ]
2075
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01002076 DEFAULT_RANK_RANGE = (1, TOSA_TENSOR_MAX_RANK)
Eric Kunzee5e26762020-10-13 16:11:07 -07002077
2078 TOSA_OP_LIST = {
Jared Smolens573ecd42021-03-04 15:24:10 -08002079 # Tensor operators
Kevin Cheng550ccc52021-03-03 11:21:43 -08002080 "argmax": {
2081 "op": Op.ARGMAX,
2082 "operands": (1, 0),
2083 "build_fcn": (build_argmax, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2084 "types": TYPE_NARROW_INT_FP,
2085 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002086 "avg_pool2d": {
2087 "op": Op.AVG_POOL2D,
2088 "operands": (1, 0),
2089 "rank": (4, 4),
2090 "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
2091 "qgen": TosaQuantGen.qgUnary,
2092 "types": TYPE_NARROW_INT_FP,
2093 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002094 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002095 "conv2d_TEMPLATE": {
2096 "op": Op.CONV2D,
2097 "operands": (1, 2),
2098 "rank": (4, 4),
2099 "build_fcn": (build_conv2d, TosaTensorGen.tgConv2D, TosaArgGen.agConv2D),
2100 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07002101 "types": TYPE_CONV2D,
Kevin Cheng550ccc52021-03-03 11:21:43 -08002102 "template": True,
2103 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002104 # Conv3d TBD
Eric Kunzee5e26762020-10-13 16:11:07 -07002105 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002106 "depthwise_conv2d_TEMPLATE": {
2107 "op": Op.DEPTHWISE_CONV2D,
2108 "operands": (1, 2),
2109 "filter": [1, 1],
2110 "rank": (4, 4),
2111 "build_fcn": (
2112 build_depthwise_conv2d,
2113 TosaTensorGen.tgDepthwiseConv2D,
2114 TosaArgGen.agConv2D,
2115 ),
2116 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07002117 "types": TYPE_CONV2D,
Kevin Cheng550ccc52021-03-03 11:21:43 -08002118 "template": True,
2119 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002120 "fully_connected": {
2121 "op": Op.FULLY_CONNECTED,
2122 "operands": (1, 2),
2123 "rank": (2, 2),
2124 "build_fcn": (build_fully_connected, TosaTensorGen.tgFullyConnected, None),
2125 "qgen": TosaQuantGen.qgConv,
2126 "types": TYPE_CONV2D,
2127 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002128 "matmul": {
2129 "op": Op.MATMUL,
2130 "operands": (2, 0),
Kevin Cheng2d60f002021-06-09 14:18:32 -07002131 "rank": (3, 3),
Jared Smolens573ecd42021-03-04 15:24:10 -08002132 "build_fcn": (build_matmul, TosaTensorGen.tgMatmul, None),
2133 "qgen": TosaQuantGen.qgMatmul,
2134 "types": TYPE_NARROW_INT_FP,
2135 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002136 "max_pool2d": {
2137 "op": Op.MAX_POOL2D,
2138 "operands": (1, 0),
2139 "rank": (4, 4),
2140 "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
2141 "types": TYPE_NARROW_INT_FP,
2142 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002143 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002144 "transpose_conv2d_TEMPLATE": {
2145 "op": Op.TRANSPOSE_CONV2D,
Kevin Cheng989cb052021-04-28 16:29:44 -07002146 "operands": (1, 2),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002147 "rank": (4, 4),
2148 "build_fcn": (
2149 build_transpose_conv2d,
2150 TosaTensorGen.tgTransposeConv2D,
2151 TosaArgGen.agTransposeConv2D,
2152 ),
2153 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07002154 "types": TYPE_CONV2D,
Kevin Cheng550ccc52021-03-03 11:21:43 -08002155 "template": True,
2156 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002157 # Activation functions
Kevin Cheng550ccc52021-03-03 11:21:43 -08002158 "clamp": {
2159 "op": Op.CLAMP,
2160 "operands": (1, 0),
2161 "build_fcn": (build_clamp, TosaTensorGen.tgBasic, None),
2162 "types": TYPE_NARROW_INT_FP,
2163 },
2164 "relun": {
2165 "op": Op.RELUN,
2166 "operands": (1, 0),
2167 "build_fcn": (build_relun, TosaTensorGen.tgBasic, None),
2168 "types": TYPE_FI32,
2169 },
2170 "sigmoid": {
2171 "op": Op.SIGMOID,
2172 "operands": (1, 0),
2173 "build_fcn": (build_sigmoid, TosaTensorGen.tgBasic, None),
2174 "types": TYPE_FP,
2175 },
2176 "tanh": {
2177 "op": Op.TANH,
2178 "operands": (1, 0),
2179 "build_fcn": (build_tanh, TosaTensorGen.tgBasic, None),
2180 "types": TYPE_FP,
2181 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002182 # Elementwise Binary Operators
2183 "add": {
2184 "op": Op.ADD,
2185 "operands": (2, 0),
2186 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2187 "types": TYPE_FI32,
2188 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002189 "arithmetic_right_shift": {
2190 "op": Op.ARITHMETIC_RIGHT_SHIFT,
2191 "operands": (2, 0),
2192 "build_fcn": (
2193 build_arithmetic_right_shift,
2194 TosaTensorGen.tgBroadcastFuzz,
2195 TosaArgGen.agArithmeticRightShift,
2196 ),
2197 "types": TYPE_INT,
2198 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002199 "bitwise_and": {
2200 "op": Op.BITWISE_AND,
2201 "operands": (2, 0),
2202 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2203 "types": TYPE_INT,
2204 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002205 "bitwise_or": {
2206 "op": Op.BITWISE_OR,
2207 "operands": (2, 0),
2208 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2209 "types": TYPE_INT,
2210 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002211 "bitwise_xor": {
2212 "op": Op.BITWISE_XOR,
2213 "operands": (2, 0),
2214 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2215 "types": TYPE_INT,
2216 },
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002217 "div": {
2218 "op": Op.DIV,
2219 "operands": (2, 0),
2220 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2221 "types": [DType.INT32],
2222 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002223 "logical_and": {
2224 "op": Op.LOGICAL_AND,
2225 "operands": (2, 0),
2226 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2227 "types": TYPE_BOOL,
2228 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002229 "logical_left_shift": {
2230 "op": Op.LOGICAL_LEFT_SHIFT,
2231 "operands": (2, 0),
2232 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2233 "types": TYPE_INT,
2234 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002235 "logical_right_shift": {
2236 "op": Op.LOGICAL_RIGHT_SHIFT,
2237 "operands": (2, 0),
2238 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2239 "types": TYPE_INT,
2240 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002241 "logical_or": {
2242 "op": Op.LOGICAL_OR,
2243 "operands": (2, 0),
2244 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2245 "types": TYPE_BOOL,
2246 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002247 "logical_xor": {
2248 "op": Op.LOGICAL_XOR,
2249 "operands": (2, 0),
2250 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2251 "types": TYPE_BOOL,
2252 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002253 "maximum": {
2254 "op": Op.MAXIMUM,
2255 "operands": (2, 0),
2256 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2257 "types": TYPE_FI32,
2258 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002259 "minimum": {
2260 "op": Op.MINIMUM,
2261 "operands": (2, 0),
2262 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2263 "types": TYPE_FI32,
2264 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002265 "mul": {
2266 "op": Op.MUL,
2267 "operands": (2, 0),
2268 "build_fcn": (build_mul, TosaTensorGen.tgBroadcastFuzz, TosaArgGen.agMul),
2269 "types": TYPE_INT_FP,
2270 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002271 "pow": {
2272 "op": Op.POW,
2273 "operands": (2, 0),
2274 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBasic, None),
2275 "types": TYPE_FP,
2276 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002277 "sub": {
2278 "op": Op.SUB,
2279 "operands": (2, 0),
2280 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2281 "types": TYPE_FI32,
2282 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002283 "table": {
2284 "op": Op.TABLE,
2285 # Use the automatic generation functions to create the input array
2286 # but create the table tensor in the build function, as it may be
2287 # a different type from the input
2288 "operands": (1, 0),
2289 "build_fcn": (build_table, TosaTensorGen.tgBasic, None),
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01002290 "types": [DType.INT8, DType.INT16],
Jared Smolens573ecd42021-03-04 15:24:10 -08002291 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002292 # Elementwise Unary operators
2293 "abs": {
2294 "op": Op.ABS,
2295 "operands": (1, 0),
2296 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2297 "types": TYPE_FI32,
2298 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002299 "bitwise_not": {
2300 "op": Op.BITWISE_NOT,
2301 "operands": (1, 0),
2302 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2303 "types": TYPE_INT,
2304 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002305 "ceil": {
2306 "op": Op.CEIL,
2307 "operands": (1, 0),
2308 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2309 "types": TYPE_FP,
2310 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002311 "clz": {
2312 "op": Op.CLZ,
2313 "operands": (1, 0),
2314 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2315 "types": [DType.INT32],
2316 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002317 "exp": {
2318 "op": Op.EXP,
2319 "operands": (1, 0),
2320 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2321 "types": TYPE_FP,
2322 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002323 "floor": {
2324 "op": Op.FLOOR,
2325 "operands": (1, 0),
2326 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2327 "types": TYPE_FP,
2328 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002329 "log": {
2330 "op": Op.LOG,
2331 "operands": (1, 0),
2332 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2333 "types": TYPE_FP,
2334 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002335 "logical_not": {
2336 "op": Op.LOGICAL_NOT,
2337 "operands": (1, 0),
2338 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2339 "types": TYPE_BOOL,
2340 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002341 "negate": {
2342 "op": Op.NEGATE,
2343 "operands": (1, 0),
2344 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2345 "qgen": TosaQuantGen.qgUnary,
2346 "types": TYPE_INT_FP,
2347 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002348 "reciprocal": {
2349 "op": Op.RECIPROCAL,
2350 "operands": (1, 0),
2351 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2352 "types": TYPE_FP,
2353 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002354 "rsqrt": {
2355 "op": Op.RSQRT,
2356 "operands": (1, 0),
2357 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2358 "types": TYPE_FP,
2359 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002360 # Elementwise Ternary operators
2361 "select": {
2362 "op": Op.SELECT,
2363 "operands": (3, 0),
2364 "build_fcn": (build_select, TosaTensorGen.tgBroadcastFuzz, None),
2365 "types": TYPE_FIB,
2366 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002367 # Comparison operators
2368 "equal": {
2369 "op": Op.EQUAL,
2370 "operands": (2, 0),
2371 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2372 "types": TYPE_FI32,
2373 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002374 "greater_equal": {
2375 "op": Op.GREATER_EQUAL,
2376 "operands": (2, 0),
2377 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2378 "types": TYPE_FI32,
2379 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002380 "greater": {
2381 "op": Op.GREATER,
2382 "operands": (2, 0),
2383 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2384 "types": TYPE_FI32,
2385 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002386 # Reduction operators
2387 "reduce_all": {
2388 "op": Op.REDUCE_ALL,
2389 "operands": (1, 0),
2390 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2391 "types": TYPE_BOOL,
2392 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002393 "reduce_any": {
2394 "op": Op.REDUCE_ANY,
2395 "operands": (1, 0),
2396 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2397 "types": TYPE_BOOL,
2398 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002399 "reduce_max": {
2400 "op": Op.REDUCE_MAX,
2401 "operands": (1, 0),
2402 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2403 "types": TYPE_INT_FP,
2404 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002405 "reduce_min": {
2406 "op": Op.REDUCE_MAX,
2407 "operands": (1, 0),
2408 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2409 "types": TYPE_INT_FP,
2410 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002411 "reduce_product": {
2412 "op": Op.REDUCE_PRODUCT,
2413 "operands": (1, 0),
2414 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2415 "types": TYPE_FP,
2416 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002417 "reduce_sum": {
2418 "op": Op.REDUCE_SUM,
2419 "operands": (1, 0),
2420 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2421 "types": TYPE_FI32,
2422 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002423 # Data layout operators
Kevin Cheng550ccc52021-03-03 11:21:43 -08002424 "concat": {
2425 "op": Op.CONCAT,
2426 "operands": (2, 0),
Matthew Haddon818ab902021-07-27 09:12:49 +01002427 "build_fcn": (build_concat, TosaTensorGen.tgConcat, TosaArgGen.agAxis),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002428 "types": TYPE_FIB,
2429 },
2430 "pad": {
2431 "op": Op.PAD,
2432 "operands": (1, 0),
2433 "build_fcn": (build_pad, TosaTensorGen.tgBasic, TosaArgGen.agPad),
2434 "qgen": TosaQuantGen.qgPad,
2435 "types": TYPE_FIB,
2436 },
2437 "reshape": {
2438 "op": Op.RESHAPE,
2439 "operands": (1, 0),
2440 "build_fcn": (build_reshape, TosaTensorGen.tgBasic, TosaArgGen.agReshape),
2441 "types": TYPE_FIB,
2442 },
2443 "reverse": {
2444 "op": Op.REVERSE,
2445 "operands": (1, 0),
2446 "build_fcn": (build_reverse, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2447 "types": TYPE_FIB,
2448 },
2449 "slice": {
2450 "op": Op.SLICE,
2451 "operands": (1, 0),
2452 "build_fcn": (build_slice, TosaTensorGen.tgBasic, TosaArgGen.agSlice),
2453 "types": TYPE_FIB,
2454 },
2455 "tile": {
2456 "op": Op.TILE,
2457 "operands": (1, 0),
2458 "build_fcn": (build_tile, TosaTensorGen.tgBasic, TosaArgGen.agTile),
2459 "types": TYPE_FIB,
2460 },
2461 "transpose": {
2462 "op": Op.TRANSPOSE,
2463 "operands": (1, 0),
Jeremy Johnsona6185572021-06-21 15:55:35 +01002464 "rank": (1, 4),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002465 "build_fcn": (
2466 build_transpose,
2467 TosaTensorGen.tgBasic,
2468 TosaArgGen.agTranspose,
2469 ),
2470 "types": TYPE_FIB,
2471 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002472 # Data nodes
2473 "const": {
2474 "op": Op.CONST,
2475 "operands": (1, 0),
2476 "build_fcn": (build_placeholder, TosaTensorGen.tgBasic, None),
2477 "types": TYPE_FIB,
2478 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002479 "identity": {
2480 "op": Op.IDENTITY,
2481 "operands": (1, 0),
2482 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2483 "types": TYPE_FIB,
2484 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002485 # Scatter/Gather
Kevin Cheng550ccc52021-03-03 11:21:43 -08002486 "gather": {
2487 "op": Op.GATHER,
2488 # Only specify 'values' tensor here. 'indices' is generated in op building stage
2489 "operands": (1, 0),
2490 "rank": (3, 3),
2491 "build_fcn": (build_gather, TosaTensorGen.tgBasic, None),
2492 "types": TYPE_INT_FP,
2493 },
2494 "scatter": {
2495 "op": Op.SCATTER,
2496 # Only specify 'values_in' tensor here.
2497 #'indices' and 'input' are generated in op building stage
2498 "operands": (2, 0),
2499 "rank": (3, 3),
2500 "build_fcn": (build_scatter, TosaTensorGen.tgScatter, None),
2501 "types": TYPE_INT_FP,
2502 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002503 # Image operations
Kevin Cheng550ccc52021-03-03 11:21:43 -08002504 "resize": {
2505 "op": Op.RESIZE,
2506 "operands": (1, 0),
2507 "rank": (4, 4),
2508 "build_fcn": (build_resize, TosaTensorGen.tgNHWC, TosaArgGen.agResize),
2509 "types": [DType.INT8, DType.INT16, DType.FLOAT],
2510 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002511 # Type conversion
Kevin Cheng550ccc52021-03-03 11:21:43 -08002512 "cast": {
2513 "op": Op.CAST,
2514 "operands": (1, 0),
2515 "build_fcn": (build_cast, TosaTensorGen.tgBasic, TosaArgGen.agCast),
2516 "types": [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL],
2517 },
2518 "rescale": {
2519 "op": Op.RESCALE,
2520 "operands": (1, 0),
2521 "build_fcn": (build_rescale, TosaTensorGen.tgBasic, TosaArgGen.agRescale),
Matthew Haddoncac4ee92021-07-22 14:30:53 +01002522 "types": [DType.UINT8, DType.INT8, DType.INT16, DType.INT32, DType.INT48],
Kevin Cheng550ccc52021-03-03 11:21:43 -08002523 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002524 # Custom
2525 # Not implemented.
Jared Smolens573ecd42021-03-04 15:24:10 -08002526 # Control flow operators
Eric Kunzee5e26762020-10-13 16:11:07 -07002527 # Two varients of cond_if, one that generates one of two constant tensors (no
2528 # inputs to the basic blocks, one output) and another that either adds or subtracts two tensors
2529 # (two inputs to the basic blocks, one output)
Kevin Cheng550ccc52021-03-03 11:21:43 -08002530 "cond_if_const": {
2531 "op": Op.COND_IF,
2532 "operands": (0, 2),
2533 "build_fcn": (
2534 build_cond_if_const,
2535 TosaTensorGen.tgBasic,
2536 TosaArgGen.agCondIf,
2537 ),
2538 "types": [DType.BOOL],
2539 },
2540 "cond_if_binary": {
2541 "op": Op.COND_IF,
2542 "operands": (2, 0),
2543 "build_fcn": (
2544 build_cond_if_binary,
2545 TosaTensorGen.tgBasic,
2546 TosaArgGen.agCondIf,
2547 ),
2548 "types": TYPE_FI32,
2549 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002550 # while_loop
Kevin Cheng550ccc52021-03-03 11:21:43 -08002551 "while_loop": {
2552 "op": Op.WHILE_LOOP,
2553 "operands": (0, 1),
2554 "build_fcn": (
2555 build_while_loop,
2556 TosaTensorGen.tgBasic,
2557 TosaArgGen.agWhileLoop,
2558 ),
2559 "types": [DType.INT32],
2560 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002561 }
2562
Kevin Cheng550ccc52021-03-03 11:21:43 -08002563
Eric Kunzee5e26762020-10-13 16:11:07 -07002564class OutputShaper:
2565 # Methods in this class compute the expected output shape and datatype
2566 # for common classes of operations
2567 def __init__(self):
2568 pass
2569
2570 # These methods return arguments that can be used for
2571 # creating a new output tensor
2572 @staticmethod
2573 def binaryBroadcastOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002574 assert len(a.shape) == len(b.shape)
2575 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002576
2577 shape = []
2578 for i in range(len(a.shape)):
2579 if a.shape[i] == 1:
2580 shape.append(b.shape[i])
2581 else:
2582 shape.append(a.shape[i])
2583
Kevin Cheng550ccc52021-03-03 11:21:43 -08002584 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002585
2586 @staticmethod
2587 def binaryNonBroadcastOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002588 assert len(a.shape) == len(b.shape)
2589 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002590
2591 shape = []
2592 for i in range(len(a.shape)):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002593 assert a.shape[i] == b.shape[i]
Eric Kunzee5e26762020-10-13 16:11:07 -07002594 shape.append(a.shape[i])
2595
Kevin Cheng550ccc52021-03-03 11:21:43 -08002596 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002597
2598 @staticmethod
2599 def unaryOp(ser, a):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002600 return ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002601
2602 @staticmethod
2603 def selectOp(ser, cond, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002604 assert len(a.shape) == len(b.shape) and len(a.shape) == len(cond.shape)
2605 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002606
2607 shape = []
2608 for i in range(len(a.shape)):
2609 shape.append(max(cond.shape[i], a.shape[i], b.shape[i]))
2610
Kevin Cheng550ccc52021-03-03 11:21:43 -08002611 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002612
2613 @staticmethod
2614 def binaryComparisonOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002615 assert len(a.shape) == len(b.shape)
2616 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002617
2618 # Do broadcast
2619 shape = []
2620 for i in range(len(a.shape)):
2621 if a.shape[i] == 1:
2622 shape.append(b.shape[i])
2623 else:
2624 shape.append(a.shape[i])
2625
2626 # Force the output type to bool
Kevin Cheng550ccc52021-03-03 11:21:43 -08002627 return ser.addOutput(shape, DType.BOOL)
Eric Kunzee5e26762020-10-13 16:11:07 -07002628
2629 @staticmethod
2630 def reduceOp(ser, a, axis):
2631
2632 shape = a.shape.copy()
2633
2634 shape[axis] = 1
2635
Kevin Cheng550ccc52021-03-03 11:21:43 -08002636 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002637
2638 @staticmethod
2639 def argmaxOp(ser, a, axis):
2640 shape = a.shape.copy()
2641 del shape[axis]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002642 return ser.addOutput(shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07002643
2644 @staticmethod
2645 def conv2dOp(ser, ifm, filter, strides, padding, dilations):
2646
2647 # IFM: NHWC
2648 # Filter: OHWI
2649 # OFM: NHWC
2650
2651 if len(padding) == 2:
2652 # Expand padding to 4 parameters in the case of transpose_conv2d
2653 # From H,W to T,B,L,R
2654 padding = [padding[0], padding[0], padding[1], padding[1]]
2655
Kevin Cheng550ccc52021-03-03 11:21:43 -08002656 h = (
2657 ifm.shape[1]
2658 - filter.shape[1]
2659 - (filter.shape[1] - 1) * (dilations[0] - 1)
2660 + padding[0]
2661 + padding[1]
2662 ) // strides[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002663
Kevin Cheng550ccc52021-03-03 11:21:43 -08002664 w = (
2665 ifm.shape[2]
2666 - filter.shape[2]
2667 - (filter.shape[2] - 1) * (dilations[1] - 1)
2668 + padding[2]
2669 + padding[3]
2670 ) // strides[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002671
2672 if h <= 0 or w <= 0:
2673 # Invalid test parameters?
2674 h = 0
2675 w = 0
Kevin Chengacb550f2021-06-29 15:32:19 -07002676 ser.setExpectedReturnCode(
2677 TosaReturnCode.UNPREDICTABLE, "Invalid combination of conv2d parameters"
2678 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002679
2680 ofm_shape = [ifm.shape[0], h, w, filter.shape[0]]
2681
Kevin Cheng3a478572021-01-22 17:21:02 -08002682 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002683 out_dtype = DType.INT32
2684 elif ifm.dtype == DType.INT16:
2685 out_dtype = DType.INT48
2686 elif ifm.dtype == DType.FLOAT:
2687 out_dtype = DType.FLOAT
2688 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002689 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002690
Kevin Cheng550ccc52021-03-03 11:21:43 -08002691 return ser.addOutput(ofm_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002692
2693 @staticmethod
2694 def depthwiseConv2dOp(ser, ifm, filter, strides, padding, dilations):
2695 # IFM: NHWC
2696 # Filter: HWCM
2697 # OFM: NHW C*M
Kevin Cheng550ccc52021-03-03 11:21:43 -08002698 h = (
2699 ifm.shape[1]
2700 - filter.shape[0]
2701 - (filter.shape[0] - 1) * (dilations[0] - 1)
2702 + padding[0]
2703 + padding[1]
2704 ) // strides[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002705
Kevin Cheng550ccc52021-03-03 11:21:43 -08002706 w = (
2707 ifm.shape[2]
2708 - filter.shape[1]
2709 - (filter.shape[1] - 1) * (dilations[1] - 1)
2710 + padding[2]
2711 + padding[3]
2712 ) // strides[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002713
2714 if h <= 0 or w <= 0:
2715 # Invalid test parameters?
2716 h = 0
2717 w = 0
Kevin Chengacb550f2021-06-29 15:32:19 -07002718 ser.setExpectedReturnCode(
2719 TosaReturnCode.UNPREDICTABLE, "Invalid combination of conv2d parameters"
2720 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002721
2722 ofm_shape = [ifm.shape[0], h, w, filter.shape[2] * filter.shape[3]]
2723
Kevin Cheng3a478572021-01-22 17:21:02 -08002724 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002725 out_dtype = DType.INT32
2726 elif ifm.dtype == DType.INT16:
2727 out_dtype = DType.INT48
2728 elif ifm.dtype == DType.FLOAT:
2729 out_dtype = DType.FLOAT
2730 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002731 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002732
Kevin Cheng550ccc52021-03-03 11:21:43 -08002733 return ser.addOutput(ofm_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002734
2735 @staticmethod
2736 def pool2dOp(ser, ifm, kernel, stride, pad):
2737 # input: NHWC
2738 h = (ifm.shape[1] + pad[0] + pad[1] + stride[0] - kernel[0]) // stride[0]
2739 w = (ifm.shape[2] + pad[2] + pad[3] + stride[1] - kernel[1]) // stride[1]
2740
2741 if h <= 0 or w <= 0:
2742 # Invalid test parameters?
2743 h = 0
2744 w = 0
Kevin Chengacb550f2021-06-29 15:32:19 -07002745 ser.setExpectedReturnCode(
2746 TosaReturnCode.UNPREDICTABLE, "Invalid combination of pool2d parameters"
2747 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002748
2749 ofm_shape = [ifm.shape[0], h, w, ifm.shape[3]]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002750 return ser.addOutput(ofm_shape, ifm.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002751
2752 @staticmethod
2753 def fullyConnectedOp(ser, input, filter):
2754 # input: N, IC
2755 # filter: OC, IC
2756 # output: N, OC
2757
2758 output_shape = [input.shape[0], filter.shape[0]]
2759
Kevin Cheng3a478572021-01-22 17:21:02 -08002760 if input.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002761 out_dtype = DType.INT32
2762 elif input.dtype == DType.INT16:
2763 out_dtype = DType.INT48
2764 elif input.dtype == DType.FLOAT:
2765 out_dtype = DType.FLOAT
2766 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002767 raise Exception("Unsupported input dtype: {}".format(input.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002768
Kevin Cheng550ccc52021-03-03 11:21:43 -08002769 return ser.addOutput(output_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002770
2771 @staticmethod
2772 def matmulOp(ser, a, b):
Kevin Cheng2d60f002021-06-09 14:18:32 -07002773 # a: N, H, C
2774 # b: N, C, W
2775 # out: N, H, W
Eric Kunzee5e26762020-10-13 16:11:07 -07002776
Kevin Cheng2d60f002021-06-09 14:18:32 -07002777 output_shape = [a.shape[0], a.shape[1], b.shape[2]]
Eric Kunzee5e26762020-10-13 16:11:07 -07002778
Kevin Cheng3a478572021-01-22 17:21:02 -08002779 if a.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002780 out_dtype = DType.INT32
2781 elif a.dtype == DType.INT16:
2782 out_dtype = DType.INT48
2783 elif a.dtype == DType.FLOAT:
2784 out_dtype = DType.FLOAT
2785 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002786 raise Exception("UNsupported input dtype for matmul: {}".format(a.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002787
Kevin Cheng550ccc52021-03-03 11:21:43 -08002788 return ser.addOutput(output_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002789
2790 @staticmethod
Matthew Haddon818ab902021-07-27 09:12:49 +01002791 def concatOp(ser, axis, *a):
2792 input1 = a[0]
2793 remaining_inputs = a[1:]
Eric Kunzee5e26762020-10-13 16:11:07 -07002794
Matthew Haddon818ab902021-07-27 09:12:49 +01002795 output_shape = input1.shape.copy()
Eric Kunzee5e26762020-10-13 16:11:07 -07002796
Matthew Haddon818ab902021-07-27 09:12:49 +01002797 output_shape[axis] = input1.shape[axis]
2798
2799 for tensor in remaining_inputs:
2800 output_shape[axis] += tensor.shape[axis]
2801
2802 return ser.addOutput(output_shape, input1.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002803
2804 @staticmethod
2805 def padOp(ser, a, padding):
2806
2807 output_shape = a.shape.copy()
2808
2809 for i in range(len(output_shape)):
2810 output_shape[i] = padding[i][0] + padding[i][1] + output_shape[i]
2811
Kevin Cheng550ccc52021-03-03 11:21:43 -08002812 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002813
2814 @staticmethod
2815 def reshapeOp(ser, a, shape):
2816 output_shape = shape.copy()
2817
2818 totalElements = 1
2819 for i in a.shape:
2820 totalElements *= i
2821
2822 # If there are any -1 elements, figure out what that dimension must be
2823 totalOutputElements = 1
2824 for i in output_shape:
2825 if i != -1:
2826 totalOutputElements *= i
2827
2828 # And fill it in
2829 for i in range(len(output_shape)):
2830 if output_shape[i] == -1:
2831 output_shape[i] = totalElements // totalOutputElements
2832
Kevin Cheng550ccc52021-03-03 11:21:43 -08002833 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002834
2835 @staticmethod
2836 def sliceOp(ser, a, begin, size):
2837
2838 output_shape = size.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08002839 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002840
2841 @staticmethod
2842 def tileOp(ser, a, multiples):
2843
2844 output_shape = a.shape.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08002845 assert len(multiples) == len(output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07002846
2847 for i in range(len(output_shape)):
2848 output_shape[i] = a.shape[i] * multiples[i]
2849
Kevin Cheng550ccc52021-03-03 11:21:43 -08002850 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002851
2852 @staticmethod
2853 def transposeOp(ser, a, perms):
2854 output_shape = a.shape.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08002855 assert len(perms) == len(output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07002856
2857 for i in range(len(output_shape)):
2858 output_shape[i] = a.shape[perms[i]]
2859
Kevin Cheng550ccc52021-03-03 11:21:43 -08002860 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002861
2862 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -08002863 def gatherOp(ser, values, indices):
2864 assert len(values.shape) == 3
2865 assert len(indices.shape) == 2
2866 assert values.shape[0] == indices.shape[0]
Eric Kunzee5e26762020-10-13 16:11:07 -07002867
Kevin Cheng77d0f762020-11-24 10:26:32 -08002868 output_shape = [values.shape[0], indices.shape[1], values.shape[2]]
2869
Kevin Cheng550ccc52021-03-03 11:21:43 -08002870 return ser.addOutput(output_shape, values.dtype)
Kevin Cheng77d0f762020-11-24 10:26:32 -08002871
2872 @staticmethod
2873 def scatterOp(ser, values_in, indices, input):
2874 assert len(values_in.shape) == 3
2875 assert len(indices.shape) == 2
2876 assert len(input.shape) == 3
Kevin Cheng550ccc52021-03-03 11:21:43 -08002877 assert values_in.shape[0] == indices.shape[0] # N
2878 assert input.shape[1] == indices.shape[1] # W
2879 assert values_in.shape[2] == input.shape[2] # C
Kevin Cheng77d0f762020-11-24 10:26:32 -08002880
2881 output_shape = values_in.shape
2882
Kevin Cheng550ccc52021-03-03 11:21:43 -08002883 return ser.addOutput(output_shape, values_in.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002884
2885 @staticmethod
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01002886 def tableOp(ser, input, table_dtype):
2887 # Same shape as the input, but dtype dependent on table dtype
2888 assert table_dtype == DType.INT16 or table_dtype == DType.INT8
2889 output_dtype = DType.INT32 if table_dtype == DType.INT16 else DType.INT8
2890 return ser.addOutput(input.shape, output_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002891
2892 @staticmethod
Kevin Cheng550ccc52021-03-03 11:21:43 -08002893 def resizeOp(
2894 ser,
2895 input,
2896 mode,
2897 stride,
2898 offset,
2899 shift,
2900 stride_fp,
2901 offset_fp,
2902 output_dims,
2903 input_dtype,
2904 output_dtype,
2905 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07002906
2907 output_dims = [input.shape[0], output_dims[0], output_dims[1], input.shape[3]]
2908
Kevin Cheng77d0f762020-11-24 10:26:32 -08002909 if input_dtype == DType.FLOAT:
2910 if stride_fp[0] <= 0 or stride_fp[1] <= 0:
Kevin Chengacb550f2021-06-29 15:32:19 -07002911 ser.setExpectedReturnCode(
2912 TosaReturnCode.ERROR, "Negative or zero stride"
2913 )
Kevin Cheng77d0f762020-11-24 10:26:32 -08002914 else:
2915 if stride[0] <= 0 or stride[1] <= 0:
Kevin Chengacb550f2021-06-29 15:32:19 -07002916 ser.setExpectedReturnCode(
2917 TosaReturnCode.ERROR, "Negative or zero stride"
2918 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002919
Kevin Chengaee1fac2020-11-11 13:54:06 -08002920 if mode == ResizeMode.BILINEAR:
2921 if input_dtype == DType.INT8:
2922 if output_dtype != DType.INT32:
Kevin Chengacb550f2021-06-29 15:32:19 -07002923 ser.setExpectedReturnCode(
2924 TosaReturnCode.ERROR, "Invalid output data type"
2925 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08002926 elif input_dtype == DType.INT16:
2927 if output_dtype != DType.INT48:
Kevin Chengacb550f2021-06-29 15:32:19 -07002928 ser.setExpectedReturnCode(
2929 TosaReturnCode.ERROR, "Invalid output data type"
2930 )
Kevin Cheng77d0f762020-11-24 10:26:32 -08002931 elif input_dtype == DType.FLOAT:
2932 if output_dtype != DType.FLOAT:
Kevin Chengacb550f2021-06-29 15:32:19 -07002933 ser.setExpectedReturnCode(
2934 TosaReturnCode.ERROR, "Invalid output data type"
2935 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08002936 else:
Kevin Chengacb550f2021-06-29 15:32:19 -07002937 ser.setExpectedReturnCode(
2938 TosaReturnCode.ERROR, "Invalid input data type"
2939 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08002940
2941 elif mode == ResizeMode.NEAREST:
2942 if input_dtype == DType.INT8:
2943 if output_dtype != DType.INT8:
Kevin Chengacb550f2021-06-29 15:32:19 -07002944 ser.setExpectedReturnCode(
2945 TosaReturnCode.ERROR, "Invalid output data type"
2946 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08002947 elif input_dtype == DType.INT16:
2948 if output_dtype != DType.INT16:
Kevin Chengacb550f2021-06-29 15:32:19 -07002949 ser.setExpectedReturnCode(
2950 TosaReturnCode.ERROR, "Invalid output data type"
2951 )
Kevin Cheng77d0f762020-11-24 10:26:32 -08002952 elif input_dtype == DType.FLOAT:
2953 if output_dtype != DType.FLOAT:
Kevin Chengacb550f2021-06-29 15:32:19 -07002954 ser.setExpectedReturnCode(
2955 TosaReturnCode.ERROR, "Invalid output data type"
2956 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08002957 else:
Kevin Chengacb550f2021-06-29 15:32:19 -07002958 ser.setExpectedReturnCode(
2959 TosaReturnCode.ERROR, "Invalid input data type"
2960 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08002961
2962 else:
Kevin Chengacb550f2021-06-29 15:32:19 -07002963 ser.setExpectedReturnCode(TosaReturnCode.ERROR, "Invalid resize mode")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002964
Kevin Cheng550ccc52021-03-03 11:21:43 -08002965 return ser.addOutput(output_dims, output_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002966
2967 @staticmethod
2968 def typeConversionOp(ser, val, out_dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002969 return ser.addOutput(val.shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002970
2971 @staticmethod
2972 def transposeConv2DOp(ser, ifm, output_shape):
Kevin Cheng3a478572021-01-22 17:21:02 -08002973 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002974 out_dtype = DType.INT32
2975 elif ifm.dtype == DType.INT16:
2976 out_dtype = DType.INT48
2977 elif ifm.dtype == DType.FLOAT:
2978 out_dtype = DType.FLOAT
2979 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002980 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002981
2982 if output_shape[1] <= 0 or output_shape[2] <= 0:
Kevin Chengacb550f2021-06-29 15:32:19 -07002983 ser.setExpectedReturnCode(
2984 TosaReturnCode.UNPREDICTABLE, "Negative output shape"
2985 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002986
Kevin Cheng550ccc52021-03-03 11:21:43 -08002987 return ser.addOutput(output_shape, out_dtype)