blob: d41d6e21c5aebf491fc83e6e3be46d5cee5c090b [file] [log] [blame]
Eric Kunzee5e26762020-10-13 16:11:07 -07001#!/usr/bin/env python3
2
Kevin Cheng3a478572021-01-22 17:21:02 -08003# Copyright (c) 2020-2021, ARM Limited.
Eric Kunzee5e26762020-10-13 16:11:07 -07004#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17
18import numpy as np
19import argparse
20import sys
21import re
22import os
23import subprocess
24import shlex
25import json
26import glob
27import math
28import queue
29import threading
30import traceback
31import math
Jeremy Johnsona6185572021-06-21 15:55:35 +010032import itertools
Eric Kunzee5e26762020-10-13 16:11:07 -070033
34from enum import IntEnum, Enum, unique
Kevin Chengacb550f2021-06-29 15:32:19 -070035from tosa_ref_run import TosaReturnCode
Eric Kunzee5e26762020-10-13 16:11:07 -070036
Kevin Cheng550ccc52021-03-03 11:21:43 -080037# Include the ../thirdparty/serialization_lib/python directory in PYTHONPATH
38parent_dir = os.path.dirname(os.path.realpath(__file__))
39sys.path.append(
40 os.path.join(parent_dir, "..", "thirdparty", "serialization_lib", "python")
41)
Eric Kunzee5e26762020-10-13 16:11:07 -070042import tosa_serializer as ts
43from tosa_serializer import *
44import tosa
45
46# Convenience variables to the flatc-generated types that should be enums, but aren't
47DType = tosa.DType.DType()
Kevin Cheng550ccc52021-03-03 11:21:43 -080048Op = tosa.Op.Op()
Eric Kunzee5e26762020-10-13 16:11:07 -070049ResizeMode = tosa.ResizeMode.ResizeMode()
50
Kevin Cheng550ccc52021-03-03 11:21:43 -080051
Eric Kunzee5e26762020-10-13 16:11:07 -070052class TosaQuantGen:
Kevin Cheng550ccc52021-03-03 11:21:43 -080053 """QuantizedInfo random generator helper functions. Specify with 'qgen': in the operator defintion"""
54
Eric Kunzee5e26762020-10-13 16:11:07 -070055 def __init__(self):
56 pass
57
58 @staticmethod
Les Bell30e46802021-07-23 09:43:31 +010059 def getQinfo(testGen, dtype):
60 if dtype == DType.INT8:
61 return testGen.randInt(-128, 128)
62 if dtype == DType.UINT8:
63 return testGen.randInt(0, 256)
64 return 0
Eric Kunzee5e26762020-10-13 16:11:07 -070065
66 @staticmethod
67 def qgUnary(testGen, op, dtype):
68 qinfo = ts.TosaSerializerQuantInfo()
Kevin Chengacb550f2021-06-29 15:32:19 -070069 qinfo.UnaryQuantInfo(
70 TosaQuantGen.getQinfo(testGen, dtype), TosaQuantGen.getQinfo(testGen, dtype)
71 )
Eric Kunzee5e26762020-10-13 16:11:07 -070072 return qinfo
73
74 @staticmethod
Les Bell30e46802021-07-23 09:43:31 +010075 def qgConv(testGen, op, dtype_or_dtypeList):
Eric Kunzee5e26762020-10-13 16:11:07 -070076 qinfo = ts.TosaSerializerQuantInfo()
Les Bell30e46802021-07-23 09:43:31 +010077 if isinstance(dtype_or_dtypeList, list):
78 # a list of [input, weights, accumulator] dtypes
79 dtypeList = dtype_or_dtypeList
Eric Kunzee5e26762020-10-13 16:11:07 -070080 else:
Les Bell30e46802021-07-23 09:43:31 +010081 # an int, [input, weights, accumulator] dtypes are the same
82 dtypeList = [dtype_or_dtypeList] * 3
83 input_zp = TosaQuantGen.getQinfo(testGen, dtypeList[0])
84 weights_zp = TosaQuantGen.getQinfo(testGen, dtypeList[1])
85 qinfo.ConvQuantInfo(input_zp, weights_zp)
Eric Kunzee5e26762020-10-13 16:11:07 -070086 return qinfo
87
88 @staticmethod
89 def qgMatmul(testGen, op, dtype):
90 qinfo = ts.TosaSerializerQuantInfo()
Kevin Chengacb550f2021-06-29 15:32:19 -070091 qinfo.MatMulQuantInfo(
92 TosaQuantGen.getQinfo(testGen, dtype), TosaQuantGen.getQinfo(testGen, dtype)
93 )
Eric Kunzee5e26762020-10-13 16:11:07 -070094 return qinfo
95
96 @staticmethod
97 def qgPad(testGen, op, dtype):
98 qinfo = ts.TosaSerializerQuantInfo()
Les Bell30e46802021-07-23 09:43:31 +010099 qinfo.PadQuantInfo(TosaQuantGen.getQinfo(testGen, dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700100 return qinfo
101
102 @staticmethod
103 def computeMultiplierAndShift(scaleFp, scale32):
104 # Derived from computeMultiplierAndShiftTosaScale32
105 # Provide a floating-point scaling factor and the scale32 parameter
106 # to compute the multiplier and shift
107
108 if scale32:
109 scaleBits = 31
110 else:
111 scaleBits = 15
112
113 m, shift = math.frexp(scaleFp)
114
115 if scaleFp < 0.0:
116 m = -m
117
118 multiplier = round(m * (1 << scaleBits))
Kevin Cheng550ccc52021-03-03 11:21:43 -0800119 assert multiplier <= (1 << scaleBits)
Eric Kunzee5e26762020-10-13 16:11:07 -0700120
121 if multiplier == (1 << scaleBits):
122 multiplier = multiplier // 2
123 shift = shift + 1
124
125 shift = (-shift) + scaleBits
Kevin Cheng550ccc52021-03-03 11:21:43 -0800126 # print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(scaleFp, scaleBits, m, multiplier, shift))
Eric Kunzee5e26762020-10-13 16:11:07 -0700127
Kevin Cheng550ccc52021-03-03 11:21:43 -0800128 assert multiplier <= (1 << scaleBits)
129 assert shift >= 0 and shift <= 63
Eric Kunzee5e26762020-10-13 16:11:07 -0700130
131 return multiplier, shift
132
133
Kevin Cheng550ccc52021-03-03 11:21:43 -0800134class TosaTensorGen:
135 """Tensor generators create a shape list for the placeholder and const tensor
136 data operands for the operator. The actual random data is generated separately for each test."""
137
Eric Kunzee5e26762020-10-13 16:11:07 -0700138 def __init__(self):
139 pass
140
141 @staticmethod
142 def tgBasic(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800143 pl, const = opName["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700144 shape = testGen.makeShape(rank)
145
146 shape_list = []
147 for i in range(pl + const):
148 shape_list.append(shape.copy())
149
150 return shape_list
151
152 @staticmethod
153 def tgNHWC(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800154 pl, const = opName["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700155
Kevin Cheng550ccc52021-03-03 11:21:43 -0800156 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700157
158 shape = testGen.makeShape(rank)
159
160 # Constrict the batch size?
161 if testGen.args.max_batch_size:
162 shape[0] = (shape[0] % testGen.args.max_batch_size) + 1
163
164 shape_list = []
165 for i in range(pl + const):
166 shape_list.append(shape.copy())
167
168 return shape_list
169
170 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -0800171 def tgScatter(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800172 pl, const = opName["operands"]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800173
Kevin Cheng550ccc52021-03-03 11:21:43 -0800174 assert pl == 2
175 assert const == 0
176 assert rank == 3
Kevin Cheng77d0f762020-11-24 10:26:32 -0800177
178 values_in_shape = testGen.makeShape(rank)
179
180 # Constrict the batch size?
181 if testGen.args.max_batch_size:
182 values_in_shape[0] = (values_in_shape[0] % testGen.args.max_batch_size) + 1
183
Kevin Cheng550ccc52021-03-03 11:21:43 -0800184 W = testGen.randInt(
185 testGen.args.tensor_shape_range[0], testGen.args.tensor_shape_range[1]
186 )
Kevin Cheng77d0f762020-11-24 10:26:32 -0800187 input_shape = [values_in_shape[0], W, values_in_shape[2]]
188
189 shape_list = []
190 shape_list.append(values_in_shape.copy())
191 shape_list.append(input_shape.copy())
192
193 return shape_list
194
195 @staticmethod
Eric Kunzee5e26762020-10-13 16:11:07 -0700196 def tgBroadcastFuzz(testGen, op, rank):
197 shape = testGen.makeShape(rank)
198
Kevin Cheng550ccc52021-03-03 11:21:43 -0800199 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700200
201 shape_list = []
202
203 # Choose one of the inputs to broadcast
204 bcast_idx = testGen.randInt(0, pl + const)
205 for i in range(pl + const):
206 shape_bcast = shape.copy()
207
208 # If the chosen input, pick a random index to broadcast
209 if i == bcast_idx:
210 fuzz_idx = testGen.randInt(0, rank)
211 shape_bcast[fuzz_idx] = 1
212
213 shape_list.append(shape_bcast)
214
215 return shape_list
216
217 @staticmethod
218 def tgConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800219 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700220
Kevin Cheng550ccc52021-03-03 11:21:43 -0800221 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700222
223 # IFM dimensions are NHWC
224 ifm_shape = testGen.makeShape(rank)
225
226 # Constrict the batch size?
227 if testGen.args.max_batch_size:
228 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
229
230 # Get the filter height/width from the operator parameters
Kevin Cheng550ccc52021-03-03 11:21:43 -0800231 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700232
233 # Generate a random OFM depth
234 ofm_depth = testGen.makeShape(1)[0]
235
236 # The filter dimensions are OHWI
237 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
238
239 # The bias is OC
240 bias_shape = np.asarray([ofm_depth])
241
242 return [ifm_shape, filter_shape, bias_shape]
243
244 @staticmethod
245 def tgTransposeConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800246 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700247
Kevin Cheng550ccc52021-03-03 11:21:43 -0800248 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700249
250 # IFM dimensions are NHWC
251 ifm_shape = testGen.makeShape(rank)
252
253 # Constrict the batch size?
254 if testGen.args.max_batch_size:
255 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
256
257 # Get the filter height/width from the operator parameters
Kevin Cheng550ccc52021-03-03 11:21:43 -0800258 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700259
260 # Generate a random OFM depth
261 ofm_depth = testGen.makeShape(1)[0]
262
263 # The filter dimensions are OHWI
264 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
265
Kevin Cheng989cb052021-04-28 16:29:44 -0700266 # The bias is OC
267 bias_shape = np.asarray([ofm_depth])
268
269 return [ifm_shape, filter_shape, bias_shape]
Eric Kunzee5e26762020-10-13 16:11:07 -0700270
271 @staticmethod
272 def tgDepthwiseConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800273 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700274
Kevin Cheng550ccc52021-03-03 11:21:43 -0800275 assert rank == 4
276 assert pl == 1 and const == 2
Eric Kunzee5e26762020-10-13 16:11:07 -0700277
278 # IFM dimensions are NHWC
279 ifm_shape = testGen.makeShape(rank)
280
281 # Constrict the batch size?
282 if testGen.args.max_batch_size:
283 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
284
285 # Get the filter height/width from the operator parameters
286 # Filter is KH, HW, C, M
Kevin Cheng550ccc52021-03-03 11:21:43 -0800287 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700288
289 # Generate a random OFM depth, but don't let it get too big because
290 # the output depth is M * C
Kevin Cheng550ccc52021-03-03 11:21:43 -0800291 filter_m = (
292 testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)
293 ) + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700294
295 # The filter dimensions are HWCM
296 filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m])
297
298 # The bias is M * C
299 bias_shape = np.asarray([ifm_shape[3] * filter_m])
300
301 return [ifm_shape, filter_shape, bias_shape]
302
303 @staticmethod
304 def tgFullyConnected(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800305 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700306
Kevin Cheng550ccc52021-03-03 11:21:43 -0800307 assert rank == 2
Eric Kunzee5e26762020-10-13 16:11:07 -0700308
309 input_shape = testGen.makeShape(rank)
Kevin Chengacb550f2021-06-29 15:32:19 -0700310 filter_oc = testGen.rng.integers(
311 low=testGen.args.tensor_shape_range[0],
312 high=testGen.args.tensor_shape_range[1],
313 size=1,
314 )[0]
Eric Kunzee5e26762020-10-13 16:11:07 -0700315 filter_shape = np.asarray([filter_oc, input_shape[1]])
316
317 bias_shape = np.asarray([filter_oc])
318
319 return [input_shape, filter_shape, bias_shape]
320
321 @staticmethod
322 def tgMatmul(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800323 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700324
Kevin Cheng2d60f002021-06-09 14:18:32 -0700325 assert rank == 3
Kevin Cheng550ccc52021-03-03 11:21:43 -0800326 assert pl == 2 and const == 0
Eric Kunzee5e26762020-10-13 16:11:07 -0700327
328 a_shape = testGen.makeShape(rank)
Matthew Haddon68e7aee2021-08-16 11:20:25 +0100329 # Get a random number for b_oc even if target shape is defined
330 b_oc = np.int32(
331 testGen.rng.integers(
332 low=testGen.args.tensor_shape_range[0],
333 high=testGen.args.tensor_shape_range[1],
334 size=1,
335 )
336 )[0]
337 # If N or H is large let b_oc be 1 to reduce output tensor size
338 if max(a_shape) > 1000:
339 b_oc = 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700340
Matthew Haddon68e7aee2021-08-16 11:20:25 +0100341 b_shape = np.asarray([a_shape[0], a_shape[2], b_oc])
Eric Kunzee5e26762020-10-13 16:11:07 -0700342 return [a_shape, b_shape]
343
Matthew Haddon818ab902021-07-27 09:12:49 +0100344 @staticmethod
345 def tgConcat(testGen, opName, rank):
346 pl, const = opName["operands"]
347 shape = testGen.makeShape(rank)
348
349 # Create extra tensors to concat.
350 # Take into account value of pl when getting maximum number of concats
351 num_tensors = testGen.randInt(0, 4)
352 shape_list = []
353 for i in range(pl + const + num_tensors):
354 shape_list.append(shape.copy())
355
356 return shape_list
357
358 @staticmethod
359 def tgConcatConstInput(testGen, shapeList, axis):
360 # Split concat shape along axis to allow for multiple const inputs
361 # without making too many large tensors
362 shape = shapeList[0]
363 if len(shapeList) == 2 or shape[axis] < len(shapeList):
364 return shapeList
365
366 new_shapeList = [shape.copy()]
367 length_on_axis = shape[axis]
368 remaining_length = length_on_axis
369 for i in range(len(shapeList)-2):
370 # Calculate split on axis and remaining value
371 split_shape_val = int(shape[axis] / 2)
372 remaining_length = remaining_length - split_shape_val
373
374 # Append new shape, and set remaining shape
375 shape[axis] = split_shape_val
376 new_shapeList.append(shape.copy())
377 shape[axis] = remaining_length
378 if i == len(shapeList) - 3:
379 new_shapeList.append(shape.copy())
380
381 return new_shapeList
382
383
Kevin Cheng550ccc52021-03-03 11:21:43 -0800384
Eric Kunzee5e26762020-10-13 16:11:07 -0700385class TosaArgGen:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800386 """Argument generators create exhaustive or random lists of attributes for operators that take
387 attributes or other parameters. The return value is a list of (descriptive_name, [arglist])
388 tuples where the descriptive_name is appended to the test name and the arglist is expanded
389 as arguments to the operator build function."""
390
Eric Kunzee5e26762020-10-13 16:11:07 -0700391 def __init__(self):
392 pass
393
394 @staticmethod
395 def agNone(testGen, opName, shapeList, dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800396 """A trivial argument generator for operators that don't take any
397 non-tensor arguments"""
398 return [("", [])]
Eric Kunzee5e26762020-10-13 16:11:07 -0700399
400 @staticmethod
401 def agAxis(testGen, opName, shapeList, dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800402 """Build the axis argument for operators that take a single axis"""
Eric Kunzee5e26762020-10-13 16:11:07 -0700403 axes = []
404
405 shape = shapeList[0]
406
407 for a in range(0, len(shape)):
Matthew Haddon43e37192021-07-09 14:13:02 +0100408 axes.append(("axis{}".format(a), [a]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700409 return axes
410
411 @staticmethod
412 def agConv2D(testGen, opName, shapeList, dtype):
413 arg_list = []
414
415 ifm_shape = shapeList[0]
416 filter_shape = shapeList[1]
417
418 # Must be rank 4
Kevin Cheng550ccc52021-03-03 11:21:43 -0800419 assert len(ifm_shape) == 4
420 assert len(filter_shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700421
422 maxStride = testGen.args.max_conv_stride
423 maxPadding = testGen.args.max_conv_padding + 1
424 maxDilation = testGen.args.max_conv_dilation
425
426 # Strides, padding, dilations
427 for stride in range(0, maxStride ** 2):
428 for padding in range(0, (maxPadding) ** 4):
429 for dilation in range(0, maxDilation ** 2):
430
Kevin Cheng550ccc52021-03-03 11:21:43 -0800431 s = [stride // maxStride + 1, stride % maxStride + 1]
432 p = [
433 (padding // (maxPadding * 4)) % maxPadding,
434 (padding // (maxPadding * 2)) % maxPadding,
435 (padding // (maxPadding * 1)) % maxPadding,
436 padding % maxPadding,
437 ]
438 d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
Eric Kunzee5e26762020-10-13 16:11:07 -0700439
440 # 4 padding parameters for regular conv2d
Kevin Cheng550ccc52021-03-03 11:21:43 -0800441 arg_list.append(
442 (
443 "st{}{}_pad{}{}{}{}_dilat{}{}".format(
444 s[0], s[1], p[0], p[1], p[2], p[3], d[0], d[1]
445 ),
446 [s, p, d],
447 )
448 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700449 return arg_list
450
451 @staticmethod
452 def agTransposeConv2D(testGen, opName, shapeList, dtype):
453 arg_list = []
454
455 ifm_shape = shapeList[0]
456 filter_shape = shapeList[1]
457
458 # Must be rank 4
Kevin Cheng550ccc52021-03-03 11:21:43 -0800459 assert len(ifm_shape) == 4
460 assert len(filter_shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700461
462 maxStride = testGen.args.max_conv_stride
463 maxPadding = testGen.args.max_conv_padding + 1
464 maxDilation = testGen.args.max_conv_dilation
465
466 # Strides, padding, dilations
467 for stride in range(0, maxStride ** 2):
468 for out_padding in range(0, (maxPadding) ** 2):
469 for dilation in range(0, maxDilation ** 2):
470
Kevin Cheng550ccc52021-03-03 11:21:43 -0800471 s = [stride // maxStride + 1, stride % maxStride + 1]
472 p = [
473 (out_padding // (maxPadding * 1)) % maxPadding,
474 out_padding % maxPadding,
475 ]
476 d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
Eric Kunzee5e26762020-10-13 16:11:07 -0700477
Kevin Cheng550ccc52021-03-03 11:21:43 -0800478 oh = (
479 ifm_shape[1]
480 - filter_shape[1]
481 - (filter_shape[1] - 1) * (d[0] - 1)
482 + 2 * p[0]
483 ) // s[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700484
Kevin Cheng550ccc52021-03-03 11:21:43 -0800485 ow = (
486 ifm_shape[2]
487 - filter_shape[2]
488 - (filter_shape[2] - 1) * (d[1] - 1)
489 + 2 * p[1]
490 ) // s[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700491
492 # Output shape
Kevin Cheng550ccc52021-03-03 11:21:43 -0800493 os = [ifm_shape[0], oh, ow, filter_shape[0]]
Eric Kunzee5e26762020-10-13 16:11:07 -0700494
Kevin Cheng550ccc52021-03-03 11:21:43 -0800495 arg_list.append(
496 (
497 "st{}{}_outpad{}{}_dilat{}{}_os{}x{}x{}x{}".format(
498 s[0],
499 s[1],
500 p[0],
501 p[1],
502 d[0],
503 d[1],
504 os[0],
505 os[1],
506 os[2],
507 os[3],
508 ),
509 [s, p, d, os],
510 )
511 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700512
513 return arg_list
514
515 @staticmethod
516 def agPad(testGen, opName, shapeList, dtype):
517 arg_list = []
518 rank = len(shapeList[0])
519
Les Bell7ffccce2021-07-28 15:37:02 +0100520 # Exhaustively test combinations of padding on each side of each dimension
521 # - the range of padding values is defined by pad_min and pad_max
522 # - for padding >9, the name format needs to be more distinctive
523 pad_min, pad_max = 0, 1
524 pad_values = [x for x in range(pad_min, pad_max + 1)]
525 axis_pad_values = [x for x in itertools.product(pad_values, pad_values)]
526 shape_pad_values = itertools.product(*([axis_pad_values] * rank))
Eric Kunzee5e26762020-10-13 16:11:07 -0700527
Les Bell7ffccce2021-07-28 15:37:02 +0100528 for paddings in shape_pad_values:
529 name = "pad"
530 for r in range(rank):
531 before, after = paddings[r]
532 name = f"{name}{before}{after}"
533 arg_list.append((name, [np.array(paddings)]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700534
535 return arg_list
536
537 @staticmethod
538 def agPooling(testGen, opName, shapeList, dtype):
539 arg_list = []
540
541 shape = shapeList[0]
Kevin Cheng550ccc52021-03-03 11:21:43 -0800542 assert len(shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700543
544 maxStride = testGen.args.max_pooling_stride
545 maxKernel = testGen.args.max_pooling_kernel
546 maxPadding = testGen.args.max_pooling_padding + 1
547
548 for kernel in range(0, maxKernel ** 2):
549 for stride in range(0, maxStride ** 2):
550 for padding in range(0, maxPadding ** 4):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800551 s = [stride // maxStride + 1, stride % maxStride + 1]
552 k = [(kernel // maxKernel) + 2, (kernel % maxKernel) + 2]
553 p = [
554 (padding // (maxPadding * 4)) % maxPadding,
555 (padding // (maxPadding * 2)) % maxPadding,
556 (padding // (maxPadding * 1)) % maxPadding,
557 padding % maxPadding,
558 ]
Eric Kunzee5e26762020-10-13 16:11:07 -0700559
Kevin Cheng550ccc52021-03-03 11:21:43 -0800560 arg_list.append(
561 (
562 "st{}{}_kern{}{}_pad{}{}{}{}".format(
563 s[0], s[1], k[0], k[1], p[0], p[1], p[2], p[3]
564 ),
565 [k, s, p],
566 )
567 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700568 return arg_list
569
570 @staticmethod
571 def agCast(testGen, opName, shapeList, inDtype):
572 arg_list = []
573
574 # Enumerate the output types here
575 if inDtype == DType.INT8:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800576 dtypeList = [DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700577 elif inDtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800578 dtypeList = [DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700579 elif inDtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800580 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700581 elif inDtype == DType.BOOL:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800582 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700583 elif inDtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800584 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700585 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800586 raise Exception("Unexpected input dtype: {}".format(inDtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700587
588 for dtype in dtypeList:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800589 arg_list.append(("out{}".format(DTypeNames[dtype]), [dtype]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700590
591 return arg_list
592
593 @staticmethod
594 def agRescale(testGen, opName, shapeList, inDtype):
595 arg_list = []
596
597 # Enumerate the output types here
Matthew Haddoncac4ee92021-07-22 14:30:53 +0100598 for dtype in [DType.UINT8, DType.INT8, DType.INT16, DType.INT32]:
599 if inDtype == DType.UINT8 and dtype != DType.INT8:
600 # The only output dtype for UINT8 is INT8, skip all other combinations
601 continue
602 if inDtype != DType.INT8 and dtype == DType.UINT8:
603 # The only input dtype for UINT8 is INT8, skip all other combinations
604 continue
605
Kevin Cheng550ccc52021-03-03 11:21:43 -0800606 for scale32 in [False, True]:
607 for double_round in [False, True]:
608 for per_channel in [False, True]:
Eric Kunzee5e26762020-10-13 16:11:07 -0700609
610 if inDtype == DType.INT48 and scale32:
611 # Illegal condition. Must be scale32=False
612 continue
Matthew Haddoncac4ee92021-07-22 14:30:53 +0100613 if double_round and not scale32:
614 # Illegal condition. ERROR_IF(!scale32 && double_round)
615 continue
Eric Kunzee5e26762020-10-13 16:11:07 -0700616
Kevin Cheng550ccc52021-03-03 11:21:43 -0800617 arg_list.append(
618 (
619 "out{}_sc{}_dr{}_pc{}".format(
620 DTypeNames[dtype],
621 int(scale32),
622 int(double_round),
623 int(per_channel),
624 ),
625 [dtype, scale32, double_round, per_channel],
626 )
627 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700628
629 return arg_list
630
Kevin Chengaee1fac2020-11-11 13:54:06 -0800631 @staticmethod
632 def agMul(testGen, opName, shapeList, dtype):
633 arg_list = []
634
635 if dtype is DType.INT32:
636 for p in range(testGen.args.num_rand_permutations):
637
638 shift = testGen.randInt(0, 32)
639
Kevin Cheng550ccc52021-03-03 11:21:43 -0800640 arg_list.append(("perm{}_shift{}".format(p, shift), [shift]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800641 else:
Matthew Haddon43e37192021-07-09 14:13:02 +0100642 arg_list.append(("perm0_shift0", [0]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800643
644 return arg_list
645
646 @staticmethod
647 def agArithmeticRightShift(testGen, opName, shapeList, dtype):
648 arg_list = []
649
Kevin Cheng550ccc52021-03-03 11:21:43 -0800650 arg_list.append(("roundTrue", [True]))
651 arg_list.append(("roundFalse", [False]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800652
653 return arg_list
654
Eric Kunzee5e26762020-10-13 16:11:07 -0700655 # Helper function for reshape. Gets some factors of a larger number.
656 @staticmethod
657 def getFactors(val, start=1):
658 factors = []
659
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100660 for i in range(start, int(np.sqrt(val)) + 1):
Eric Kunzee5e26762020-10-13 16:11:07 -0700661 if (val % i) == 0:
662 factors.append(i)
663
664 return factors
665
666 @staticmethod
667 def agReshape(testGen, opName, shapeList, dtype):
668 arg_list = []
669
670 origShape = shapeList[0]
671
672 totalElements = 1
673 for s in origShape:
674 totalElements *= s
675
676 # This code is NOT fast. Fortunately, the numbers are fairly small.
677 factors = TosaArgGen.getFactors(totalElements)
678
679 for p in range(testGen.args.num_rand_permutations):
Matthew Haddon5fc4e682021-07-07 11:28:29 +0100680 newRank = testGen.randInt(1, 7)
Kevin Cheng550ccc52021-03-03 11:21:43 -0800681 if len(factors) < newRank:
Eric Kunzee5e26762020-10-13 16:11:07 -0700682 continue
683
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100684 found = True
685 # escape_counter breaks while loop if it continues on for too long
686 escape_counter = 0
687 while found:
688 newShape = []
689 # Generate newShape ensuring it isn't a duplicate
690 remainingElements = totalElements
691 shuffledFactors = testGen.rng.permutation(factors)
Matthew Haddon5fc4e682021-07-07 11:28:29 +0100692 for i in range(1, newRank):
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100693 # pick rank-1 factors
694 newShape.append(shuffledFactors[0])
695 remainingElements = remainingElements // shuffledFactors[0]
696 shuffledFactors = testGen.rng.permutation(
697 TosaArgGen.getFactors(remainingElements)
698 )
699 newShape.append(remainingElements)
Eric Kunzee5e26762020-10-13 16:11:07 -0700700
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100701 # Toss in a -1 sometimes
702 minusOne = testGen.randInt(0, newRank * 4)
703 if minusOne < newRank:
704 newShape[minusOne] = -1
Eric Kunzee5e26762020-10-13 16:11:07 -0700705
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100706 # Check for duplicates
707 found = False
708 for name, other_shape in arg_list:
709 if other_shape[0] == newShape:
710 found = True
711 break
712
713 escape_counter += 1
714 if escape_counter >= 100:
715 break
716
717 if not found:
718 arg_list.append(("perm{}_rank{}".format(p, newRank), [newShape]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700719
720 return arg_list
721
Eric Kunzee5e26762020-10-13 16:11:07 -0700722 @staticmethod
723 def agTranspose(testGen, opName, shapeList, dtype):
724 arg_list = []
725
726 ifm_shape = shapeList[0]
727
Jeremy Johnsona6185572021-06-21 15:55:35 +0100728 # Get all permutations
729 permutations = [p for p in itertools.permutations(range(len(ifm_shape)))]
Eric Kunzee5e26762020-10-13 16:11:07 -0700730
Jeremy Johnsona6185572021-06-21 15:55:35 +0100731 # Limit to possible permutations from shape dimension or argument setting
732 limit = min(len(permutations), testGen.args.num_rand_permutations)
Eric Kunzee5e26762020-10-13 16:11:07 -0700733
Jeremy Johnsona6185572021-06-21 15:55:35 +0100734 # Get random permutation generator that uses all permutations
735 random_permutations = testGen.rng.permutation(permutations)
Eric Kunzee5e26762020-10-13 16:11:07 -0700736
Jeremy Johnsona6185572021-06-21 15:55:35 +0100737 # Create list of required amount of permutations
Kevin Chengacb550f2021-06-29 15:32:19 -0700738 arg_list = [
739 ("perm{}".format(p), [random_permutations[p].tolist()])
740 for p in range(limit)
741 ]
Eric Kunzee5e26762020-10-13 16:11:07 -0700742 return arg_list
743
744 @staticmethod
745 def agSlice(testGen, opName, shapeList, dtype):
746 arg_list = []
747
748 ifm_shape = shapeList[0]
749 rank = len(ifm_shape)
750
751 for p in range(testGen.args.num_rand_permutations):
752 begin = []
753 size = []
754
Kevin Cheng550ccc52021-03-03 11:21:43 -0800755 valid = True
Eric Kunzee5e26762020-10-13 16:11:07 -0700756
757 for i in range(rank):
758 if ifm_shape[i] > 1:
759 begin.append(testGen.randInt(0, ifm_shape[i]))
760 size.append(testGen.randInt(0, ifm_shape[i] - begin[i]))
761
762 # Invalid slice size?
763 if size[i] == 0:
764 valid = False
765 else:
766 begin.append(0)
767 size.append(1)
768
769 if valid:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800770 arg_list.append(("perm{}".format(p), [begin, size]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700771 return arg_list
772
773 @staticmethod
774 def agTile(testGen, opName, shapeList, dtype):
775 arg_list = []
776
777 ifm_shape = shapeList[0]
778 rank = len(ifm_shape)
779
780 for p in range(testGen.args.num_rand_permutations):
781
782 # Pick a few random, but small multiple values
783 # because otherwise this has a tendency to generate
784 # enormous tensors
785 multiples = []
786 for i in range(rank):
Matthew Haddon82ad4d62021-08-20 15:02:39 +0100787 if ifm_shape[i] > 1000:
788 # Multiple of 1 if ifm_shape dimension is large to reduce tensor size
789 multiples.append(1)
790 elif max(ifm_shape) > 1000:
791 multiples.append(2)
792 else:
793 multiples.append(testGen.randInt(1, 4))
Kevin Cheng550ccc52021-03-03 11:21:43 -0800794 arg_list.append(("perm{}".format(p), [multiples]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700795
796 return arg_list
797
798 @staticmethod
799 def agResize(testGen, opName, shapeList, dtype):
800 arg_list = []
801
802 ifm_shape = shapeList[0]
803
804 for m in [ResizeMode.NEAREST, ResizeMode.BILINEAR]:
805
806 # Exclude illegal {mode, type} configurations. Pick legal output types
807 if m == ResizeMode.NEAREST and dtype == DType.INT8:
Les Bell33d837e2021-08-10 08:34:43 +0100808 outputDTypeList = [DType.INT8]
Eric Kunzee5e26762020-10-13 16:11:07 -0700809 elif m == ResizeMode.NEAREST and dtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800810 outputDTypeList = [DType.INT16]
Eric Kunzee5e26762020-10-13 16:11:07 -0700811 elif m == ResizeMode.BILINEAR and dtype == DType.INT8:
Les Bell33d837e2021-08-10 08:34:43 +0100812 outputDTypeList = [DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700813 elif m == ResizeMode.BILINEAR and dtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800814 outputDTypeList = [DType.INT48]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800815 elif dtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800816 outputDTypeList = [DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700817 else:
818 continue
819
820 for outputDType in outputDTypeList:
821 for perm in range(testGen.args.num_rand_permutations):
822
823 # Randomly generate legal output dimensions and shift
824 # and then compute the stride and offset based on them
Kevin Cheng550ccc52021-03-03 11:21:43 -0800825 output_dims = [testGen.randInt(1), testGen.randInt(1)]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800826 in_center_h = (ifm_shape[1] - 1) / 2.0
827 in_center_w = (ifm_shape[2] - 1) / 2.0
828 out_center_h = (output_dims[0] - 1) / 2.0
829 out_center_w = (output_dims[1] - 1) / 2.0
Eric Kunzee5e26762020-10-13 16:11:07 -0700830
Kevin Cheng77d0f762020-11-24 10:26:32 -0800831 fp_stride_y = float(ifm_shape[1]) / float(output_dims[0])
832 fp_stride_x = float(ifm_shape[2]) / float(output_dims[1])
833 fp_offset_y = in_center_h - fp_stride_y * out_center_h
834 fp_offset_x = in_center_w - fp_stride_x * out_center_w
Eric Kunzee5e26762020-10-13 16:11:07 -0700835
Kevin Cheng77d0f762020-11-24 10:26:32 -0800836 if outputDType == DType.FLOAT:
837 shift = 0
838 stride = [0, 0]
839 offset = [0, 0]
Kevin Cheng550ccc52021-03-03 11:21:43 -0800840 stride_fp = [fp_stride_y, fp_stride_x]
841 offset_fp = [fp_offset_y, fp_offset_x]
842 arg_list.append(
843 (
844 "mode{}_odim{}x{}_out{}_st{:.2f}x{:.2f}_off{:.2f}x{:.2f}".format(
Les Bell33d837e2021-08-10 08:34:43 +0100845 "N" if m == ResizeMode.NEAREST else "B",
Kevin Cheng550ccc52021-03-03 11:21:43 -0800846 output_dims[0],
847 output_dims[1],
848 testGen.typeStr(outputDType),
849 stride_fp[0],
850 stride_fp[1],
851 offset_fp[0],
852 offset_fp[1],
853 ),
854 [
855 m,
856 stride,
857 offset,
858 shift,
859 stride_fp,
860 offset_fp,
861 output_dims,
862 dtype,
863 outputDType,
864 ],
865 )
866 )
Kevin Cheng77d0f762020-11-24 10:26:32 -0800867 else:
868 shift = 11
869 unit = float(1 << shift)
870 stride_y = int(round(fp_stride_y * unit))
871 stride_x = int(round(fp_stride_x * unit))
872 offset_y = int(round(fp_offset_y * unit))
873 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700874
Kevin Cheng550ccc52021-03-03 11:21:43 -0800875 while (
876 stride_y >= 32768
877 or stride_x >= 32768
878 or offset_y >= 32768
879 or offset_x >= 32768
880 or offset_y < -32768
881 or offset_x < -32768
882 ):
Kevin Cheng77d0f762020-11-24 10:26:32 -0800883 shift = shift - 1
884 unit = float(1 << shift)
885 stride_y = int(round(fp_stride_y * unit))
886 stride_x = int(round(fp_stride_x * unit))
887 offset_y = int(round(fp_offset_y * unit))
888 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700889
Kevin Cheng550ccc52021-03-03 11:21:43 -0800890 stride = [stride_y, stride_x]
891 offset = [offset_y, offset_x]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800892
893 stride_fp = [0.0, 0.0]
894 offset_fp = [0.0, 0.0]
895
Kevin Cheng550ccc52021-03-03 11:21:43 -0800896 arg_list.append(
897 (
898 "mode{}_shift{}_odim{}x{}_out{}_st{}x{}_off{}x{}".format(
Les Bell33d837e2021-08-10 08:34:43 +0100899 "N" if m == ResizeMode.NEAREST else "B",
Kevin Cheng550ccc52021-03-03 11:21:43 -0800900 shift,
901 output_dims[0],
902 output_dims[1],
903 testGen.typeStr(outputDType),
904 stride[0],
905 stride[1],
906 offset[0],
907 offset[1],
908 ),
909 [
910 m,
911 stride,
912 offset,
913 shift,
914 stride_fp,
915 offset_fp,
916 output_dims,
917 dtype,
918 outputDType,
919 ],
920 )
921 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700922
923 return arg_list
924
925 def agCondIf(testGen, opName, shapeList, dtype):
926 # CondIf generates the condition values here.
927 # Convert to tensors in the build function, along with the
928 # then and else blocks
929 arg_list = []
930
931 for c in [False, True]:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800932 arg_list.append(("cond{}".format(int(c)), [c]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700933
934 return arg_list
935
936 def agWhileLoop(testGen, opName, shapeList, dtype):
937 # While loop: 0 iterations, 1, more than 1
938 arg_list = []
939
940 for iter in [0, 1, 4]:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800941 arg_list.append(("iter{}".format(iter), [iter]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700942
943 return arg_list
944
Kevin Cheng550ccc52021-03-03 11:21:43 -0800945
Eric Kunzee5e26762020-10-13 16:11:07 -0700946class TosaTestGen:
Jeremy Johnson97eb75f2021-07-08 11:58:02 +0100947 # Maximum rank of tensor supported by test generator.
948 TOSA_TENSOR_MAX_RANK = 6
949
Eric Kunzee5e26762020-10-13 16:11:07 -0700950 def __init__(self, args):
951 self.args = args
952 self.basePath = args.output_dir
953 self.random_seed = args.random_seed
954 self.ser = None
955 self.rng = np.random.default_rng(self.random_seed)
956 self.createDynamicOpLists()
957 self.initOpListDefaults()
958 self.quantGen = TosaQuantGen()
959 # Force makeShape to do a specific starting shape
960 self.targetted_shape = None
961
962 def createSerializer(self, opName, testPath):
963 self.testPath = os.path.join(opName, testPath)
964
965 fullPath = os.path.join(self.basePath, self.testPath)
966 os.makedirs(fullPath, exist_ok=True)
967 self.ser = ts.TosaSerializer(fullPath)
968
969 def getSerializer(self):
970 return self.ser
971
972 def serialize(self, testName):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800973 with open(
974 os.path.join(self.basePath, self.testPath, "{}.tosa".format(testName)), "wb"
975 ) as fd:
Eric Kunzee5e26762020-10-13 16:11:07 -0700976 fd.write(self.ser.serialize())
977
Kevin Cheng550ccc52021-03-03 11:21:43 -0800978 with open(os.path.join(self.basePath, self.testPath, "desc.json"), "w") as fd:
979 fd.write(self.ser.writeJson("{}.tosa".format(testName)))
Eric Kunzee5e26762020-10-13 16:11:07 -0700980
981 def getRandTensor(self, shape, dtype):
Eric Kunzee5e26762020-10-13 16:11:07 -0700982 if dtype == DType.BOOL:
983 np_dt = np.bool
984 return np.bool_(self.rng.choice(a=[False, True], size=shape))
Kevin Chenga9017402021-07-28 17:19:23 -0700985 # TOSA specific INT4 weight range from -7 to 7
Eric Kunzee5e26762020-10-13 16:11:07 -0700986 elif dtype == DType.INT4:
Kevin Chenga9017402021-07-28 17:19:23 -0700987 return np.int32(self.rng.integers(low=-7, high=8, size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -0700988 elif dtype == DType.INT8:
Jeremy Johnson18e26662021-07-22 16:15:29 +0100989 return np.int32(self.rng.integers(low=-128, high=128, size=shape))
990 elif dtype == DType.UINT8:
991 return np.int32(self.rng.integers(low=0, high=256, size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -0700992 elif dtype == DType.INT16:
993 return np.int32(self.rng.integers(low=-32768, high=32768, size=shape))
994 elif dtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800995 return np.int32(
996 self.rng.integers(low=-(1 << 31), high=(1 << 31), size=shape)
997 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700998 elif dtype == DType.INT48:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800999 return np.int64(
1000 self.rng.integers(low=-(1 << 47), high=(1 << 47), size=shape)
1001 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001002 elif dtype == DType.FLOAT:
Jeremy Johnson18e26662021-07-22 16:15:29 +01001003 return np.float32(self.rng.random(size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001004 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001005 raise Exception("Unrecognized Dtype: {}".format(dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07001006
Kevin Cheng989cb052021-04-28 16:29:44 -07001007 def buildPlaceholderTensors(self, shape_list, dtype_list):
Eric Kunzee5e26762020-10-13 16:11:07 -07001008 placeholders = []
1009
Kevin Cheng989cb052021-04-28 16:29:44 -07001010 assert len(shape_list) == len(dtype_list)
1011
1012 for idx, shape in enumerate(shape_list):
1013 arr = self.getRandTensor(shape, dtype_list[idx])
1014 placeholders.append(self.ser.addPlaceholder(shape, dtype_list[idx], arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001015
1016 return placeholders
1017
Kevin Cheng989cb052021-04-28 16:29:44 -07001018 def buildConstTensors(self, shape_list, dtype_list):
Eric Kunzee5e26762020-10-13 16:11:07 -07001019 consts = []
1020
Kevin Cheng989cb052021-04-28 16:29:44 -07001021 assert len(shape_list) == len(dtype_list)
1022
1023 for idx, shape in enumerate(shape_list):
1024 arr = self.getRandTensor(shape, dtype_list[idx])
1025 consts.append(self.ser.addConst(shape, dtype_list[idx], arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001026
1027 return consts
1028
1029 def makeShape(self, rank):
1030 if self.targetted_shape:
1031 return np.int32(self.targetted_shape)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001032 return np.int32(
1033 self.rng.integers(
1034 low=self.args.tensor_shape_range[0],
1035 high=self.args.tensor_shape_range[1],
1036 size=rank,
1037 )
1038 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001039
1040 def setTargetShape(self, shape):
1041 self.targetted_shape = shape
1042
1043 def randInt(self, low=0, high=256):
1044 return np.int32(self.rng.integers(low=low, high=high, size=1))[0]
1045
1046 def getRandNumberDType(self, dtype):
1047 if dtype == DType.FLOAT:
1048 return self.rng.random()
1049 elif dtype == DType.BOOL:
1050 return self.rng.choice([False, True])
Kevin Chenga9017402021-07-28 17:19:23 -07001051 # TOSA specific INT4 weight range from -7 to 7
Eric Kunzee5e26762020-10-13 16:11:07 -07001052 elif dtype == DType.INT4:
Kevin Chenga9017402021-07-28 17:19:23 -07001053 low, high = (-7, 8)
Eric Kunzee5e26762020-10-13 16:11:07 -07001054 elif dtype == DType.INT8:
Jeremy Johnson18e26662021-07-22 16:15:29 +01001055 low, high = (-128, 128)
Eric Kunzee5e26762020-10-13 16:11:07 -07001056 elif dtype == DType.INT16:
1057 low, high = (-32768, 32768)
1058 elif dtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001059 low, high = (-(1 << 31), (1 << 31))
Eric Kunzee5e26762020-10-13 16:11:07 -07001060 elif dtype == DType.INT48:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001061 low, high = (-(1 << 47), (1 << 47))
Eric Kunzee5e26762020-10-13 16:11:07 -07001062 # Special size
1063 return np.int64(self.rng.integers(low, high, size=1))[0]
1064 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001065 raise Exception("Unknown dtype: {}".format(dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07001066
1067 return np.int32(self.rng.integers(low, high, size=1))[0]
1068
1069 def shapeStr(self, shape):
1070
1071 sStr = []
1072 # Convert to strings
1073 for i in shape:
1074 sStr.append(str(i))
1075
Kevin Cheng550ccc52021-03-03 11:21:43 -08001076 return "x".join(sStr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001077
1078 def typeStr(self, t):
Kevin Cheng989cb052021-04-28 16:29:44 -07001079 if isinstance(t, list):
1080 assert len(t) >= 2
1081 return "{}x{}".format(self.typeStr(t[0]), self.typeStr(t[1]))
Eric Kunzee5e26762020-10-13 16:11:07 -07001082 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07001083 if t == DType.BOOL:
1084 return "b"
1085 elif t == DType.INT4:
1086 return "i4"
1087 elif t == DType.INT8:
1088 return "i8"
1089 elif t == DType.UINT8:
1090 return "u8"
1091 elif t == DType.INT16:
1092 return "i16"
1093 elif t == DType.INT32:
1094 return "i32"
1095 elif t == DType.INT48:
1096 return "i48"
1097 elif t == DType.FLOAT:
1098 return "float"
1099 else:
1100 raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
Eric Kunzee5e26762020-10-13 16:11:07 -07001101
1102 def typeWidth(self, t):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001103 """ Get the datatype width for integer types"""
Kevin Cheng3a478572021-01-22 17:21:02 -08001104 if t == DType.INT4:
Eric Kunzee5e26762020-10-13 16:11:07 -07001105 return 4
1106 elif t == DType.INT8:
1107 return 8
Kevin Cheng3a478572021-01-22 17:21:02 -08001108 elif t == DType.UINT8:
1109 return 8
Eric Kunzee5e26762020-10-13 16:11:07 -07001110 elif t == DType.INT16:
1111 return 16
1112 elif t == DType.INT32:
1113 return 32
1114 elif t == DType.INT48:
1115 return 48
1116 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001117 raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
Eric Kunzee5e26762020-10-13 16:11:07 -07001118
1119 # Argument generators
1120 # Returns a list of tuples (stringDescriptor, [build_fcn_arg_list])
1121 # Where the string descriptor is used to generate the test name and
1122 # The build_fcn_arg_list is expanded and passed to the operator test
1123 # build function
1124
Kevin Cheng550ccc52021-03-03 11:21:43 -08001125 def build_unary(self, op, a, qinfo=None):
Eric Kunzee5e26762020-10-13 16:11:07 -07001126 result_tens = OutputShaper.unaryOp(self.ser, a)
1127 self.ser.addOperator(op, [a.name], [result_tens.name], None, qinfo)
1128 return result_tens
1129
1130 def build_binary_broadcast(self, op, a, b):
1131 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1132 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1133 return result_tens
1134
1135 def build_binary_nonbroadcast(self, op, a, b):
1136 result_tens = OutputShaper.binaryNonBroadcastOp(self.ser, a, b)
1137 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1138 return result_tens
1139
Kevin Chengaee1fac2020-11-11 13:54:06 -08001140 def build_arithmetic_right_shift(self, op, a, b, round):
1141 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1142
1143 attr = ts.TosaSerializerAttribute()
1144 attr.ArithmeticRightShiftAttribute(round)
1145
1146 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
1147 return result_tens
1148
1149 def build_mul(self, op, a, b, shift):
Eric Kunzee5e26762020-10-13 16:11:07 -07001150 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1151
1152 # Special for multiply:
1153 # Force the result to INT32 for INT types
1154 if a.dtype != DType.FLOAT:
1155 result_tens.setDtype(DType.INT32)
1156
Kevin Chengaee1fac2020-11-11 13:54:06 -08001157 attr = ts.TosaSerializerAttribute()
1158 attr.MulAttribute(shift)
1159
1160 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001161 return result_tens
1162
1163 def build_table(self, op, a):
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001164 # Constant size depending on type, random values
1165 if a.dtype == DType.INT16:
Kevin Chengacb550f2021-06-29 15:32:19 -07001166 table_dtype = DType.INT16
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001167 table_arr = self.getRandTensor([513], table_dtype)
1168 else:
1169 assert a.dtype == DType.INT8
1170 table_dtype = DType.INT8
1171 table_arr = self.getRandTensor([256], table_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001172
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001173 table_tens = self.ser.addConst(table_arr.shape, table_dtype, table_arr)
1174 result_tens = OutputShaper.tableOp(self.ser, a, table_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001175 self.ser.addOperator(op, [a.name, table_tens.name], [result_tens.name], None)
1176
1177 return result_tens
1178
1179 def build_select(self, op, cond, a, b):
Eric Kunzee5e26762020-10-13 16:11:07 -07001180 result_tens = OutputShaper.selectOp(self.ser, cond, a, b)
1181 self.ser.addOperator(op, [cond.name, a.name, b.name], [result_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001182 return result_tens
1183
1184 def build_comparison(self, op, a, b):
1185 result_tens = OutputShaper.binaryComparisonOp(self.ser, a, b)
1186 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1187 return result_tens
1188
1189 def build_argmax(self, op, a, axis):
1190 result_tens = OutputShaper.argmaxOp(self.ser, a, axis)
1191
1192 attr = ts.TosaSerializerAttribute()
1193 attr.AxisAttribute(axis)
1194
1195 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1196 return result_tens
1197
Kevin Cheng550ccc52021-03-03 11:21:43 -08001198 def build_pool2d(self, op, input, kernel, stride, pad, qinfo=None):
Eric Kunzee5e26762020-10-13 16:11:07 -07001199 result_tens = OutputShaper.pool2dOp(self.ser, input, kernel, stride, pad)
1200
1201 attr = ts.TosaSerializerAttribute()
1202 attr.Pool2dAttribute(kernel, stride, pad)
Eric Kunzee5e26762020-10-13 16:11:07 -07001203
1204 self.ser.addOperator(op, [input.name], [result_tens.name], attr, qinfo)
1205 return result_tens
1206
1207 def build_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001208 assert len(padding) == 4
1209 result_tens = OutputShaper.conv2dOp(
1210 self.ser, ifm, filter, strides, padding, dilations
1211 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001212
1213 attr = ts.TosaSerializerAttribute()
1214 attr.Conv2dAttribute(padding, strides, dilations)
1215
Kevin Cheng550ccc52021-03-03 11:21:43 -08001216 self.ser.addOperator(
1217 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1218 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001219 return result_tens
1220
Kevin Cheng550ccc52021-03-03 11:21:43 -08001221 def build_transpose_conv2d(
Kevin Cheng989cb052021-04-28 16:29:44 -07001222 self, op, ifm, filter, bias, stride, outpad, dilation, output_shape, qinfo
Kevin Cheng550ccc52021-03-03 11:21:43 -08001223 ):
1224 assert len(outpad) == 2
Eric Kunzee5e26762020-10-13 16:11:07 -07001225 result_tens = OutputShaper.transposeConv2DOp(self.ser, ifm, output_shape)
1226
1227 attr = ts.TosaSerializerAttribute()
1228 attr.TransposeConv2DAttribute(outpad, stride, dilation, output_shape)
1229
Kevin Cheng550ccc52021-03-03 11:21:43 -08001230 self.ser.addOperator(
Kevin Cheng989cb052021-04-28 16:29:44 -07001231 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
Kevin Cheng550ccc52021-03-03 11:21:43 -08001232 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001233 return result_tens
1234
Kevin Cheng550ccc52021-03-03 11:21:43 -08001235 def build_depthwise_conv2d(
1236 self, op, ifm, filter, bias, strides, padding, dilations, qinfo
1237 ):
1238 result_tens = OutputShaper.depthwiseConv2dOp(
1239 self.ser, ifm, filter, strides, padding, dilations
1240 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001241
1242 attr = ts.TosaSerializerAttribute()
1243 attr.Conv2dAttribute(padding, strides, dilations)
1244
Kevin Cheng550ccc52021-03-03 11:21:43 -08001245 self.ser.addOperator(
1246 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1247 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001248 return result_tens
1249
1250 def build_fully_connected(self, op, ifm, filter, bias, qinfo):
1251 result_tens = OutputShaper.fullyConnectedOp(self.ser, ifm, filter)
1252
Kevin Cheng550ccc52021-03-03 11:21:43 -08001253 self.ser.addOperator(
1254 op, [ifm.name, filter.name, bias.name], [result_tens.name], None, qinfo
1255 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001256 return result_tens
1257
1258 def build_matmul(self, op, a, b, qinfo):
1259 result_tens = OutputShaper.matmulOp(self.ser, a, b)
1260 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], None, qinfo)
1261 return result_tens
1262
1263 def build_reduce(self, op, a, axis):
1264 result_tens = OutputShaper.reduceOp(self.ser, a, axis)
1265
1266 attr = ts.TosaSerializerAttribute()
1267 attr.AxisAttribute(axis)
1268
1269 self.ser.addOperator(op, [a.name], result_tens.name, attr)
1270 return result_tens
1271
1272 def build_clamp(self, op, a):
1273 result_tens = OutputShaper.unaryOp(self.ser, a)
1274
1275 attr = ts.TosaSerializerAttribute()
Jeremy Johnson18e26662021-07-22 16:15:29 +01001276 v = [self.getRandNumberDType(a.dtype), self.getRandNumberDType(a.dtype)]
Eric Kunzee5e26762020-10-13 16:11:07 -07001277
1278 if a.dtype == DType.FLOAT:
1279 attr.ClampAttribute(0, 0, min(v), max(v))
1280 else:
1281 attr.ClampAttribute(min(v), max(v), 0, 0)
1282
1283 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1284 return result_tens
1285
1286 def build_leaky_relu(self, op, a):
1287 result_tens = OutputShaper.unaryOp(self.ser, a)
1288 attr = ts.TosaSerializerAttribute()
1289
1290 attr.LeakyReluAttribute(self.getRandNumberDType(DType.FLOAT))
1291
1292 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1293 return result_tens
1294
1295 # Needs an additional type/input
1296 def build_prelu(self, op, a):
1297 result_tens = OutputShaper.unaryOp(self.ser, a)
1298
1299 self.ser.addOperator(op, [a.name], [result_tens.name])
1300 return result_tens
1301
1302 def build_relun(self, op, a):
1303 result_tens = OutputShaper.unaryOp(self.ser, a)
1304
1305 attr = ts.TosaSerializerAttribute()
1306
1307 if a.dtype == DType.FLOAT:
1308 attr.ReluNAttribute(0, self.getRandNumberDType(a.dtype))
1309 else:
1310 attr.ReluNAttribute(self.getRandNumberDType(a.dtype), 0)
1311
1312 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1313 return result_tens
1314
1315 def build_sigmoid(self, op, a):
1316 result_tens = OutputShaper.unaryOp(self.ser, a)
1317 self.ser.addOperator(op, [a.name], [result_tens.name])
1318 return result_tens
1319
1320 def build_tanh(self, op, a):
1321 result_tens = OutputShaper.unaryOp(self.ser, a)
1322 self.ser.addOperator(op, [a.name], [result_tens.name])
1323 return result_tens
1324
Matthew Haddon818ab902021-07-27 09:12:49 +01001325 def build_concat(self, op, *a):
1326 assert (type(a[-1]) == int)
1327
1328 # To store variable length list of input tensors we need to store axis along with it
1329 axis = a[-1]
1330 a = a[:-1]
1331
1332 result_tens = OutputShaper.concatOp(self.ser, axis, *a)
Eric Kunzee5e26762020-10-13 16:11:07 -07001333
1334 attr = ts.TosaSerializerAttribute()
1335 attr.AxisAttribute(axis)
1336
Matthew Haddon818ab902021-07-27 09:12:49 +01001337 input_tensor_names = []
1338 for tensor in a:
1339 input_tensor_names.append(tensor.name)
1340
1341 self.ser.addOperator(op, input_tensor_names, [result_tens.name], attr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001342
1343 def build_pad(self, op, a, padding, qinfo):
1344 result_tens = OutputShaper.padOp(self.ser, a, padding)
1345
1346 # Need to turn the padding array into a TOSA tensor here.
1347 # This is one of the few tensor operands that does not get
1348 # randomly generated
Kevin Cheng550ccc52021-03-03 11:21:43 -08001349 padding_tens = self.ser.addConst(padding.shape, DType.INT32, padding)
Eric Kunzee5e26762020-10-13 16:11:07 -07001350
Kevin Cheng550ccc52021-03-03 11:21:43 -08001351 self.ser.addOperator(
1352 op, [a.name, padding_tens.name], [result_tens.name], None, qinfo
1353 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001354
1355 def build_reshape(self, op, a, newShape):
1356 result_tens = OutputShaper.reshapeOp(self.ser, a, newShape)
1357
1358 attr = ts.TosaSerializerAttribute()
1359 attr.ReshapeAttribute(newShape)
1360
1361 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1362 return result_tens
1363
1364 def build_reverse(self, op, a, axis):
1365 result_tens = OutputShaper.unaryOp(self.ser, a)
1366
1367 attr = ts.TosaSerializerAttribute()
1368 attr.AxisAttribute(axis)
1369
1370 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1371 return result_tens
1372
1373 def build_transpose(self, op, a, perms):
1374 result_tens = OutputShaper.transposeOp(self.ser, a, perms)
1375
Kevin Cheng550ccc52021-03-03 11:21:43 -08001376 perms_tens = self.ser.addConst([len(perms)], DType.INT32, np.int32(perms))
Eric Kunzee5e26762020-10-13 16:11:07 -07001377
1378 self.ser.addOperator(op, [a.name, perms_tens.name], [result_tens.name])
1379 return result_tens
1380
1381 def build_slice(self, op, a, begin, size):
1382 result_tens = OutputShaper.sliceOp(self.ser, a, begin, size)
1383
1384 attr = ts.TosaSerializerAttribute()
1385 attr.SliceAttribute(begin, size)
1386
1387 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1388 return result_tens
1389
1390 def build_tile(self, op, a, multiples):
1391 result_tens = OutputShaper.tileOp(self.ser, a, multiples)
1392
1393 attr = ts.TosaSerializerAttribute()
1394 attr.TileAttribute(multiples)
1395
1396 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1397 return result_tens
1398
Kevin Cheng77d0f762020-11-24 10:26:32 -08001399 def build_gather(self, op, values):
Eric Kunzee5e26762020-10-13 16:11:07 -07001400
1401 # Create a new indicies tensor
1402 # here with data that doesn't exceed the dimensions of the values tensor
1403
Kevin Cheng550ccc52021-03-03 11:21:43 -08001404 K = values.shape[1] # K
1405 W = self.randInt(
1406 self.args.tensor_shape_range[0], self.args.tensor_shape_range[1]
1407 ) # W
1408 indicies_arr = np.int32(
1409 self.rng.integers(low=0, high=K, size=[values.shape[0], W])
1410 ) # (N, W)
1411 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001412
Kevin Cheng77d0f762020-11-24 10:26:32 -08001413 result_tens = OutputShaper.gatherOp(self.ser, values, indicies)
Eric Kunzee5e26762020-10-13 16:11:07 -07001414
Kevin Cheng77d0f762020-11-24 10:26:32 -08001415 self.ser.addOperator(op, [values.name, indicies.name], [result_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001416
1417 return result_tens
1418
Kevin Cheng77d0f762020-11-24 10:26:32 -08001419 def build_scatter(self, op, values_in, input):
1420
1421 # Create a new indicies tensor
1422 # here with data that doesn't exceed the dimensions of the values_in tensor
1423
Kevin Cheng550ccc52021-03-03 11:21:43 -08001424 K = values_in.shape[1] # K
1425 W = input.shape[1] # W
1426 indicies_arr = np.int32(
1427 self.rng.integers(low=0, high=K, size=[values_in.shape[0], W])
1428 ) # (N, W)
1429 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
Kevin Cheng77d0f762020-11-24 10:26:32 -08001430
1431 result_tens = OutputShaper.scatterOp(self.ser, values_in, indicies, input)
1432
Kevin Cheng550ccc52021-03-03 11:21:43 -08001433 self.ser.addOperator(
1434 op, [values_in.name, indicies.name, input.name], [result_tens.name]
1435 )
Kevin Cheng77d0f762020-11-24 10:26:32 -08001436
1437 return result_tens
1438
Kevin Cheng550ccc52021-03-03 11:21:43 -08001439 def build_resize(
1440 self,
1441 op,
1442 input,
1443 mode,
1444 stride,
1445 offset,
1446 shift,
1447 stride_fp,
1448 offset_fp,
1449 output_dims,
1450 input_dtype,
1451 output_dtype,
1452 ):
1453 result_tens = OutputShaper.resizeOp(
1454 self.ser,
1455 input,
1456 mode,
1457 stride,
1458 offset,
1459 shift,
1460 stride_fp,
1461 offset_fp,
1462 output_dims,
1463 input_dtype,
1464 output_dtype,
1465 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001466
1467 attr = ts.TosaSerializerAttribute()
Kevin Cheng77d0f762020-11-24 10:26:32 -08001468
Kevin Cheng550ccc52021-03-03 11:21:43 -08001469 attr.ResizeAttribute(
1470 output_dims, stride, offset, shift, stride_fp, offset_fp, mode
1471 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001472
1473 self.ser.addOperator(op, [input.name], [result_tens.name], attr)
1474 return result_tens
1475
1476 def build_identityn(self, op, val, val2):
1477
Kevin Cheng550ccc52021-03-03 11:21:43 -08001478 result_tens = OutputShaper.unaryOp(self.ser, val)
Eric Kunzee5e26762020-10-13 16:11:07 -07001479 result_tens2 = OutputShaper.unaryOp(self.ser, val2)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001480 self.ser.addOperator(
1481 op, [val.name, val2.name], [result_tens.name, result_tens2.name]
1482 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001483 return result_tens
1484
1485 def build_placeholder(self, op, val):
1486 # Add an identity op to avoid warning in the reference model
1487 return self.build_unary(Op.IDENTITY, val)
1488
1489 # Type Conversion
1490 def build_cast(self, op, val, out_dtype):
1491 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1492 self.ser.addOperator(op, [val.name], [result_tens.name])
1493 return result_tens
1494
1495 def build_rescale(self, op, val, out_dtype, scale32, double_round, per_channel):
1496 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1497
1498 if per_channel:
1499 nc = val.shape[-1]
1500 else:
1501 nc = 1
1502
1503 in_type_width = self.typeWidth(val.dtype)
1504 out_type_width = self.typeWidth(out_dtype)
1505
Kevin Cheng3a478572021-01-22 17:21:02 -08001506 if val.dtype == DType.INT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001507 input_zp = self.randInt(-128, 128)
1508 in_type_width = in_type_width + 1
Kevin Chengacb550f2021-06-29 15:32:19 -07001509 elif val.dtype == DType.UINT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001510 input_zp = self.randInt(0, 256)
Eric Kunzee5e26762020-10-13 16:11:07 -07001511 in_type_width = in_type_width + 1
1512 else:
1513 input_zp = 0
1514
Kevin Cheng3a478572021-01-22 17:21:02 -08001515 if out_dtype == DType.INT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001516 output_zp = self.randInt(-128, 128)
1517 out_type_width = out_type_width + 1
1518 elif out_dtype == DType.UINT8:
1519 output_zp = self.randInt(0, 256)
Eric Kunzee5e26762020-10-13 16:11:07 -07001520 out_type_width = out_type_width + 1
1521 else:
1522 output_zp = 0
1523
1524 # Calculate scale based on:
1525 # scale = a *(2^output_width)/(2^input_width))
1526
1527 a = np.float32(self.rng.random(size=[nc]))
1528 scale_arr = a * np.float32((1 << out_type_width) / (1 << in_type_width))
1529
1530 if scale32:
1531 pass
1532 # Cap the scaling at 2^15 - 1 for scale16
1533 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), (1 << 31) - 1)
1534 else:
1535 # Cap the scaling at 2^15 - 1 for scale16
1536 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), 32767.0)
1537
Kevin Cheng550ccc52021-03-03 11:21:43 -08001538 # print('{} {} -> {}'.format(out_type_width, in_type_width, scale_arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001539
1540 multiplier_arr = np.int32(np.zeros(shape=[nc]))
1541 shift_arr = np.int32(np.zeros(shape=[nc]))
1542
1543 for i in range(nc):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001544 multiplier_arr[i], shift_arr[i] = TosaQuantGen.computeMultiplierAndShift(
1545 scale_arr[i], scale32
1546 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08001547 if shift_arr[i] < 2 or shift_arr[i] > 62:
Kevin Chengacb550f2021-06-29 15:32:19 -07001548 self.ser.setExpectedReturnCode(
1549 TosaReturnCode.UNPREDICTABLE, "OpRescale: invalid shift value"
1550 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001551
Kevin Cheng550ccc52021-03-03 11:21:43 -08001552 # print('multiplier {} shift {} inzp {} outzp {}'.format(multiplier_arr, shift_arr, input_zp, output_zp))
Eric Kunzee5e26762020-10-13 16:11:07 -07001553
1554 attr = ts.TosaSerializerAttribute()
Kevin Cheng550ccc52021-03-03 11:21:43 -08001555 attr.RescaleAttribute(
1556 input_zp,
1557 output_zp,
1558 multiplier_arr,
1559 shift_arr,
1560 scale32,
1561 double_round,
1562 per_channel,
1563 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001564
1565 self.ser.addOperator(op, [val.name], [result_tens.name], attr)
1566 return result_tens
1567
1568 def build_cond_if_const(self, op, then_tens, else_tens, cond):
1569 # For cond_if with constants, we're supplied with then/else tensors that we ignore
1570 # (except for the generated shap) and the condition. Build Then/Else blocks
1571 # and fill them with const nodes for the body.
1572
1573 # Condition tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001574 cond_tens = self.ser.addConst([], DType.BOOL, [cond])
Eric Kunzee5e26762020-10-13 16:11:07 -07001575
1576 # Make then/else tensors
1577 out_shape = then_tens.shape
Jeremy Johnson18e26662021-07-22 16:15:29 +01001578 then_arr = np.int32(self.rng.integers(0, 256, size=out_shape))
1579 else_arr = np.int32(self.rng.integers(0, 256, size=out_shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001580
1581 # And the result tensor based on any of the outputs
Kevin Cheng550ccc52021-03-03 11:21:43 -08001582 result_tens = self.ser.addOutput(out_shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07001583
1584 # Create the attribute with the names of the then/else blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001585 then_block = "THEN_BLOCK"
1586 else_block = "ELSE_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001587 attr = ts.TosaSerializerAttribute()
1588 attr.CondIfAttribute(then_block, else_block)
1589
1590 # Finally, build the op and the two blocks
1591 self.ser.addOperator(op, [cond_tens.name], [result_tens.name], attr)
1592
1593 self.ser.startBasicBlock(then_block)
1594 # Build the actual then/else tensors inside their blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001595 then_tens = self.ser.addConst(out_shape, DType.INT32, then_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001596 self.ser.addOutputTensor(then_tens)
1597
1598 self.ser.startBasicBlock(else_block)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001599 else_tens = self.ser.addConst(out_shape, DType.INT32, else_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001600 self.ser.addOutputTensor(else_tens)
1601
1602 return result_tens
1603
1604 def build_cond_if_binary(self, op, a, b, cond):
1605 # For cond_if with a binary op in the then/else blocks, take a and b and
1606 # alternately add or subtract them based on the condition
1607
1608 # Condition tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001609 cond_tens = self.ser.addConst([], DType.BOOL, [cond])
Eric Kunzee5e26762020-10-13 16:11:07 -07001610
Kevin Cheng550ccc52021-03-03 11:21:43 -08001611 result_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001612 self.ser.currBasicBlock.addOutput(result_tens.name)
1613
1614 # Create the attribute with the names of the then/else blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001615 then_block = "THEN_BLOCK"
1616 else_block = "ELSE_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001617 attr = ts.TosaSerializerAttribute()
1618 attr.CondIfAttribute(then_block, else_block)
1619
1620 # Finally, build the op and the two blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001621 self.ser.addOperator(
1622 op, [cond_tens.name, a.name, b.name], [result_tens.name], attr
1623 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001624
1625 self.ser.startBasicBlock(then_block)
1626 self.ser.addInputTensor(a)
1627 self.ser.addInputTensor(b)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001628 then_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001629 self.ser.addOperator(Op.ADD, [a.name, b.name], [then_tens.name])
1630
1631 self.ser.startBasicBlock(else_block)
1632 self.ser.addInputTensor(a)
1633 self.ser.addInputTensor(b)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001634 else_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001635 self.ser.addOperator(Op.SUB, [a.name, b.name], [else_tens.name])
1636
1637 return result_tens
1638
1639 def build_while_loop(self, op, a, iter_val):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001640 iter = self.ser.addPlaceholder([], DType.INT32, [np.int32(iter_val)])
Eric Kunzee5e26762020-10-13 16:11:07 -07001641
Kevin Cheng550ccc52021-03-03 11:21:43 -08001642 cond_block = "COND_BLOCK"
1643 body_block = "BODY_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001644
1645 attr = ts.TosaSerializerAttribute()
1646 attr.WhileLoopAttribute(cond_block, body_block)
1647
1648 # Accumulator tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001649 # acc = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001650 acc_init_val = np.int32(np.zeros(a.shape))
Kevin Cheng550ccc52021-03-03 11:21:43 -08001651 acc = self.ser.addPlaceholder(a.shape, a.dtype, acc_init_val)
Eric Kunzee5e26762020-10-13 16:11:07 -07001652
1653 # Intermediate/output tensors for everything going through the loop
Kevin Cheng550ccc52021-03-03 11:21:43 -08001654 iter_out = self.ser.addIntermediate(iter.shape, iter.dtype)
1655 a_out = self.ser.addIntermediate(a.shape, a.dtype)
1656 acc_out = self.ser.addIntermediate(acc.shape, acc.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001657
1658 # While_loop operator
Kevin Cheng550ccc52021-03-03 11:21:43 -08001659 self.ser.addOperator(
1660 op,
1661 [iter.name, a.name, acc.name],
1662 [iter_out.name, a_out.name, acc_out.name],
1663 attr,
1664 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001665
1666 # COND block (input: iter, output: cond_tens )
1667 self.ser.startBasicBlock(cond_block)
1668 self.ser.addInputTensor(iter)
1669 self.ser.addInputTensor(a)
1670 self.ser.addInputTensor(acc)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001671 zero_tens = self.ser.addConst([], DType.INT32, [np.int32(0)])
1672 cond_tens = self.ser.addOutput([], DType.BOOL)
1673 self.ser.addOperator(Op.GREATER, [iter.name, zero_tens.name], [cond_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001674
1675 # BODY block (input: a, acc, iter, output: a, acc, iter)
1676 # Note that local intermediate tensors need to be declared here for the outputs
1677 self.ser.startBasicBlock(body_block)
1678 self.ser.addInputTensor(iter)
1679 self.ser.addInputTensor(a)
1680 self.ser.addInputTensor(acc)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001681 one_tens = self.ser.addConst([], DType.INT32, [np.int32(1)])
1682 iter_body_out = self.ser.addIntermediate(iter.shape, iter.dtype)
1683 acc_body_out = self.ser.addIntermediate(acc.shape, acc.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001684 self.ser.addOperator(Op.ADD, [a.name, acc.name], [acc_body_out.name])
1685 self.ser.addOperator(Op.SUB, [iter.name, one_tens.name], [iter_body_out.name])
1686 self.ser.addOutputTensor(iter_body_out)
1687 self.ser.addOutputTensor(a)
1688 self.ser.addOutputTensor(acc_body_out)
1689
1690 return acc_out
1691
Kevin Cheng550ccc52021-03-03 11:21:43 -08001692 def genOpTestList(
1693 self, opName, shapeFilter=[None], rankFilter=None, dtypeFilter=None
1694 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07001695
1696 try:
1697 op = self.TOSA_OP_LIST[opName]
1698 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001699 raise Exception("Cannot find op with name {}".format(opName))
Eric Kunzee5e26762020-10-13 16:11:07 -07001700
1701 # Initialize a new random number generator
1702 self.rng = np.random.default_rng(self.random_seed)
1703
Kevin Cheng550ccc52021-03-03 11:21:43 -08001704 build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001705
1706 # Generate the lists of arguments
Kevin Cheng550ccc52021-03-03 11:21:43 -08001707 rmin, rmax = op["rank"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001708
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01001709 # Create a default testing rank range, 1-4 inclusive to keep test sizes reasonably small.
1710 default_test_rank_range = range(1, 5)
1711
Eric Kunzee5e26762020-10-13 16:11:07 -07001712 # Test list consists of a tuple of:
1713 # (opName, testNameStr, dtype, shapeList, argumentsList)
1714 testList = []
1715
1716 if not shapeFilter:
1717 shapeFilter = [None]
1718
1719 for r in range(rmin, rmax + 1):
1720
1721 # Filter out the rank?
1722 if rankFilter is not None and r not in rankFilter:
1723 continue
Kevin Chengacb550f2021-06-29 15:32:19 -07001724 if (
1725 rankFilter is None
1726 and shapeFilter[0] is None
1727 and r not in default_test_rank_range
1728 ):
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01001729 continue
Eric Kunzee5e26762020-10-13 16:11:07 -07001730
Kevin Cheng550ccc52021-03-03 11:21:43 -08001731 for t in op["types"]:
Eric Kunzee5e26762020-10-13 16:11:07 -07001732
1733 # Filter tests based on dtype?
1734 if dtypeFilter is not None:
Kevin Chengacb550f2021-06-29 15:32:19 -07001735 if not (
1736 t in dtypeFilter
1737 or (isinstance(t, list) and t[0] in dtypeFilter)
1738 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07001739 continue
1740
1741 # Create the placeholder and const tensors
1742 for shape in shapeFilter:
1743 # A None shape chooses a random shape of a given rank
1744
1745 # Filter out by rank
1746 if shape is not None and len(shape) != r:
1747 continue
1748
1749 self.setTargetShape(shape)
1750 shapeList = tgen_fcn(self, op, r)
1751
1752 shapeStr = self.shapeStr(shapeList[0])
1753 typeStr = self.typeStr(t)
1754
1755 # Argument lists consists of tuples of the (str, []) string representation and the build function argument list
1756 argList = []
1757 if agen_fcn:
1758 argList = agen_fcn(self, opName, shapeList, t)
1759 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001760 argList = [("", [])]
Eric Kunzee5e26762020-10-13 16:11:07 -07001761
1762 for argStr, args in argList:
1763 if argStr:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001764 testStr = "{}_{}_{}_{}".format(
1765 opName, shapeStr, typeStr, argStr
1766 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001767 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001768 testStr = "{}_{}_{}".format(opName, shapeStr, typeStr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001769
1770 testList.append((opName, testStr, t, shapeList, args))
1771
1772 return testList
1773
Kevin Cheng989cb052021-04-28 16:29:44 -07001774 def serializeTest(self, opName, testStr, dtype_or_dtypeList, shapeList, testArgs):
Eric Kunzee5e26762020-10-13 16:11:07 -07001775 try:
1776 op = self.TOSA_OP_LIST[opName]
1777 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001778 raise Exception("Cannot find op with name {}".format(opName))
Eric Kunzee5e26762020-10-13 16:11:07 -07001779
1780 # Create a serializer
1781 self.createSerializer(opName, testStr)
1782
Kevin Cheng550ccc52021-03-03 11:21:43 -08001783 build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
1784 pCount, cCount = op["operands"]
Kevin Cheng989cb052021-04-28 16:29:44 -07001785 num_operands = pCount + cCount
1786
1787 if isinstance(dtype_or_dtypeList, list):
1788 dtypeList = dtype_or_dtypeList
Matthew Haddon818ab902021-07-27 09:12:49 +01001789 elif op['op'] == Op.CONCAT:
1790 dtypeList = [dtype_or_dtypeList] * len(shapeList)
Kevin Cheng989cb052021-04-28 16:29:44 -07001791 else:
1792 dtypeList = [dtype_or_dtypeList] * (num_operands)
1793
Matthew Haddon818ab902021-07-27 09:12:49 +01001794 if op['op'] != Op.CONCAT:
1795 assert (
1796 len(shapeList) == num_operands
1797 ), "shapeList length {} must match number of operands {}".format(
1798 len(shapeList), num_operands
1799 )
1800 assert (
1801 len(dtypeList) == num_operands
1802 ), "dtypeList length {} must match number of operands {}".format(
1803 len(dtypeList), num_operands
1804 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001805
1806 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001807 qgen = op["qgen"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001808 except KeyError:
1809 qgen = None
1810
1811 # Build the random tensor operands and the test
1812 tens = []
Kevin Chengaee1fac2020-11-11 13:54:06 -08001813
1814 # If test is ArithmeticRightShift, force value of operand[1] to be within [0, num_bits]
Kevin Cheng550ccc52021-03-03 11:21:43 -08001815 if op["op"] == Op.ARITHMETIC_RIGHT_SHIFT:
1816 assert (
1817 pCount == 2 and cCount == 0
1818 ), "Op.ArithmeticRightShift must have 2 placeholders, 0 consts"
Kevin Chengaee1fac2020-11-11 13:54:06 -08001819
1820 placeholders = []
1821 for idx, shape in enumerate(shapeList[:]):
1822 if idx == 1:
Kevin Cheng989cb052021-04-28 16:29:44 -07001823 if dtypeList[idx] == DType.INT8:
Kevin Chengaee1fac2020-11-11 13:54:06 -08001824 arr = np.int32(self.rng.integers(low=0, high=8, size=shape))
Kevin Cheng989cb052021-04-28 16:29:44 -07001825 elif dtypeList[idx] == DType.INT16:
Kevin Chengaee1fac2020-11-11 13:54:06 -08001826 arr = np.int32(self.rng.integers(low=0, high=16, size=shape))
Kevin Cheng989cb052021-04-28 16:29:44 -07001827 elif dtypeList[idx] == DType.INT32:
Kevin Chengaee1fac2020-11-11 13:54:06 -08001828 arr = np.int32(self.rng.integers(low=0, high=32, size=shape))
1829 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001830 raise Exception("OpArithmeticRightShift: invalid input dtype")
Kevin Chengaee1fac2020-11-11 13:54:06 -08001831 else:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001832 arr = self.getRandTensor(shape, dtypeList[idx])
Kevin Cheng989cb052021-04-28 16:29:44 -07001833 placeholders.append(self.ser.addPlaceholder(shape, dtypeList[idx], arr))
Kevin Chengaee1fac2020-11-11 13:54:06 -08001834
1835 tens.extend(placeholders)
Matthew Haddona44ac5e2021-07-27 16:31:16 +01001836 elif op["op"] == Op.SELECT:
1837 # Set datatype of condition tensor to boolean
1838 dtypeList[0] = DType.BOOL
1839 tens.extend(
1840 self.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
1841 )
1842 tens.extend(self.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001843 elif op["op"] == Op.DIV:
1844 assert (
1845 pCount == 2 and cCount == 0
1846 ), "Op.Div must have 2 placeholders, 0 consts"
1847
1848 placeholders = []
1849
1850 # Two invalid cases for Op.DIV:
1851 # 1. divisor == 0
Kevin Cheng47315e12021-05-13 17:41:28 -07001852 # 2. dividend == -(1<<31) and divisor == -1
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001853 while True:
1854 dividend_arr = self.getRandTensor(shapeList[0], dtypeList[0])
1855 divisor_arr = self.getRandTensor(shapeList[1], dtypeList[1])
1856
1857 if (divisor_arr == 0).any():
1858 continue
1859
Kevin Cheng47315e12021-05-13 17:41:28 -07001860 if (dividend_arr == -(2 ** 31)).any() and (divisor_arr == -1).any():
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001861 continue
1862
1863 break
1864
1865 placeholders.append(
1866 self.ser.addPlaceholder(shapeList[0], dtypeList[0], dividend_arr)
1867 )
1868 placeholders.append(
1869 self.ser.addPlaceholder(shapeList[1], dtypeList[1], divisor_arr)
1870 )
1871
1872 tens.extend(placeholders)
1873 elif op["op"] == Op.MUL:
1874 assert (
1875 pCount == 2 and cCount == 0
1876 ), "Op.MUL must have 2 placeholders, 0 consts"
1877
1878 if dtypeList[0] == DType.FLOAT:
1879 tens.extend(self.buildPlaceholderTensors(shapeList[:], dtypeList[:]))
1880 else:
1881 placeholders = []
1882
1883 # Make sure multiply result in int32 range
1884 shift = testArgs[0]
1885 if dtypeList[0] == DType.INT8:
1886 num_bits = 8
1887 elif dtypeList[0] == DType.INT16:
1888 num_bits = 16
1889 elif dtypeList[0] == DType.INT32:
1890 num_bits = 32
1891 else:
1892 raise Exception("OpMul: invalid input dtype")
1893
1894 for idx, shape in enumerate(shapeList[:]):
1895 low = -(2 ** (num_bits - 1))
1896 high = (2 ** (num_bits - 1)) - 1
1897
1898 a_arr = np.int32(
1899 self.rng.integers(low=low, high=high, size=shapeList[0])
1900 )
1901 b_arr = np.int32(
1902 self.rng.integers(low=low, high=high, size=shapeList[1])
1903 )
1904
1905 i = 0
1906 while True:
1907
1908 a_arr_64 = a_arr.astype(np.int64)
1909 b_arr_64 = b_arr.astype(np.int64)
1910
1911 if shift > 0:
1912 rounding = 1 << (shift - 1)
1913 result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift
1914 else:
1915 result_arr = a_arr_64 * b_arr_64
1916
1917 if (result_arr > -(2 ** 31)).all() and (
1918 result_arr <= ((2 ** 31) - 1)
1919 ).all():
1920 break
1921
1922 i = i + 1
1923 a_arr = a_arr // 2
1924 b_arr = b_arr // 2
1925
1926 placeholders.append(
1927 self.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
1928 )
1929 placeholders.append(
1930 self.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
1931 )
1932
1933 tens.extend(placeholders)
Matthew Haddon818ab902021-07-27 09:12:49 +01001934 elif op["op"] == Op.CONCAT:
1935 count = len(shapeList) - self.args.num_const_inputs_concat
1936 if count < 1:
1937 count = 1
1938 if self.args.num_const_inputs_concat == 0:
1939 count = len(shapeList)
1940
1941 shapeList = TosaTensorGen.tgConcatConstInput(self, shapeList, testArgs[0])
1942 tens.extend(
1943 self.buildPlaceholderTensors(shapeList[0:count], dtypeList[0:count])
1944 )
1945 tens.extend(self.buildConstTensors(shapeList[count:], dtypeList[count:]))
Kevin Chengaee1fac2020-11-11 13:54:06 -08001946 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07001947 tens.extend(
1948 self.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
1949 )
1950 tens.extend(self.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
Eric Kunzee5e26762020-10-13 16:11:07 -07001951
1952 if qgen is not None:
Les Bell30e46802021-07-23 09:43:31 +01001953 qinfo = qgen(self, op, dtype_or_dtypeList)
Eric Kunzee5e26762020-10-13 16:11:07 -07001954 else:
1955 qinfo = None
1956
1957 try:
1958 if qinfo is not None:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001959 resultName = build_fcn(self, op["op"], *tens, *testArgs, qinfo)
Eric Kunzee5e26762020-10-13 16:11:07 -07001960 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001961 resultName = build_fcn(self, op["op"], *tens, *testArgs)
Eric Kunzee5e26762020-10-13 16:11:07 -07001962 except TypeError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001963 print(
1964 "build_fcn: {}\nTensors: {}\nArgs: {}\n".format(
1965 build_fcn, tens, testArgs
1966 )
1967 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001968 raise e
1969
1970 # Save the serialized test
Kevin Cheng550ccc52021-03-03 11:21:43 -08001971 self.serialize("test")
Eric Kunzee5e26762020-10-13 16:11:07 -07001972
1973 def createDynamicOpLists(self):
1974
1975 # Dynamically create op lists for convolutions with a list of kernel sizes
Kevin Cheng550ccc52021-03-03 11:21:43 -08001976 KERNELS = [[1, 1], [2, 2], [3, 3], [5, 5], [3, 1], [1, 3]]
Eric Kunzee5e26762020-10-13 16:11:07 -07001977
1978 for k in KERNELS:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001979 testName = "conv2d_{}x{}".format(k[0], k[1])
1980 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST["conv2d_TEMPLATE"].copy()
1981 self.TOSA_OP_LIST[testName]["filter"] = k
1982 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07001983
Kevin Cheng550ccc52021-03-03 11:21:43 -08001984 testName = "depthwise_conv2d_{}x{}".format(k[0], k[1])
1985 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
1986 "depthwise_conv2d_TEMPLATE"
1987 ].copy()
1988 self.TOSA_OP_LIST[testName]["filter"] = k
1989 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07001990
Kevin Cheng550ccc52021-03-03 11:21:43 -08001991 testName = "transpose_conv2d_{}x{}".format(k[0], k[1])
1992 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
1993 "transpose_conv2d_TEMPLATE"
1994 ].copy()
1995 self.TOSA_OP_LIST[testName]["filter"] = k
1996 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07001997
1998 # Delete any templates after having created any dynamic ops
1999 # This is a two-pass operation because it's bad practice to delete
2000 # keys from dictionaries while iterating
2001 keyList = []
2002 for k in self.TOSA_OP_LIST:
2003 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002004 if self.TOSA_OP_LIST[k]["template"] == True:
Eric Kunzee5e26762020-10-13 16:11:07 -07002005 keyList.append(k)
2006 continue
2007 except KeyError:
2008 pass
2009
2010 for k in keyList:
2011 del self.TOSA_OP_LIST[k]
2012
2013 def initOpListDefaults(self):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002014 """Fill in default fields for ops if they aren't already specified.
2015 Look for missing required fields (datastructure linting)."""
Eric Kunzee5e26762020-10-13 16:11:07 -07002016 for op in self.TOSA_OP_LIST:
2017
2018 # Required fields
2019 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002020 pl, c = self.TOSA_OP_LIST[op]["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002021 except (KeyError, ValueError, TypeError):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002022 raise Exception(
2023 "Op {} is missing a valid operand tuple in TOSA_OP_LIST".format(op)
2024 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002025
2026 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002027 fcn, tgen, arggen = self.TOSA_OP_LIST[op]["build_fcn"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002028 except (KeyError, ValueError, TypeError):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002029 raise Exception(
2030 "Op {} is missing a valid build_fcn tuple in TOSA_OP_LIST".format(
2031 op
2032 )
2033 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002034
2035 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002036 types = self.TOSA_OP_LIST[op]["types"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002037 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002038 raise Exception(
2039 "Op {} is missing a valid type list in TOSA_OP_LIST".format(op)
2040 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002041
2042 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002043 opcode = self.TOSA_OP_LIST[op]["op"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002044 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002045 raise Exception(
2046 "Op {} is missing the Op field in TOSA_OP_LIST".format(op)
2047 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002048
2049 # Put in default rank range, if missing
2050 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002051 rank = self.TOSA_OP_LIST[op]["rank"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002052 except KeyError:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002053 self.TOSA_OP_LIST[op]["rank"] = self.DEFAULT_RANK_RANGE
Eric Kunzee5e26762020-10-13 16:11:07 -07002054
2055 # Tensor operator list
2056 # 'op': op name
2057 # 'operands': tuple of (placeholder, const) operands
Kevin Cheng3a478572021-01-22 17:21:02 -08002058 # 'rank': optional, restricts rank to tuple inclusive of (min, max),
2059 # if not specified, defaults to (1, 4)
Eric Kunzee5e26762020-10-13 16:11:07 -07002060 # 'build_fcn': tuple of the function to (build_operator(), TensorGen function, ArgGen enum)
2061 # 'types': array of datatypes to be tested
Kevin Cheng550ccc52021-03-03 11:21:43 -08002062 TYPE_FP = [DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -07002063
Kevin Cheng550ccc52021-03-03 11:21:43 -08002064 TYPE_INT = [DType.INT8, DType.INT16, DType.INT32] # Excludes INT4
2065 TYPE_INT_FP = [DType.INT8, DType.INT16, DType.INT32, DType.FLOAT] # Excludes INT4
Eric Kunzee5e26762020-10-13 16:11:07 -07002066
Kevin Cheng550ccc52021-03-03 11:21:43 -08002067 TYPE_BOOL = [DType.BOOL]
2068 TYPE_FI32 = [DType.FLOAT, DType.INT32]
2069 TYPE_FIB = [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL]
2070 TYPE_FI16 = [DType.FLOAT, DType.INT16]
Eric Kunzee5e26762020-10-13 16:11:07 -07002071
Kevin Cheng550ccc52021-03-03 11:21:43 -08002072 TYPE_NARROW_INT_FP = [DType.INT8, DType.INT16, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -07002073
Kevin Cheng989cb052021-04-28 16:29:44 -07002074 TYPE_CONV2D = [
Kevin Chenga9017402021-07-28 17:19:23 -07002075 [DType.INT8, DType.INT4, DType.INT32],
Kevin Cheng989cb052021-04-28 16:29:44 -07002076 [DType.INT8, DType.INT8, DType.INT32],
2077 [DType.INT16, DType.INT8, DType.INT48],
2078 DType.FLOAT,
2079 ]
2080
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01002081 DEFAULT_RANK_RANGE = (1, TOSA_TENSOR_MAX_RANK)
Eric Kunzee5e26762020-10-13 16:11:07 -07002082
2083 TOSA_OP_LIST = {
Jared Smolens573ecd42021-03-04 15:24:10 -08002084 # Tensor operators
Kevin Cheng550ccc52021-03-03 11:21:43 -08002085 "argmax": {
2086 "op": Op.ARGMAX,
2087 "operands": (1, 0),
2088 "build_fcn": (build_argmax, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2089 "types": TYPE_NARROW_INT_FP,
2090 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002091 "avg_pool2d": {
2092 "op": Op.AVG_POOL2D,
2093 "operands": (1, 0),
2094 "rank": (4, 4),
2095 "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
2096 "qgen": TosaQuantGen.qgUnary,
2097 "types": TYPE_NARROW_INT_FP,
2098 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002099 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002100 "conv2d_TEMPLATE": {
2101 "op": Op.CONV2D,
2102 "operands": (1, 2),
2103 "rank": (4, 4),
2104 "build_fcn": (build_conv2d, TosaTensorGen.tgConv2D, TosaArgGen.agConv2D),
2105 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07002106 "types": TYPE_CONV2D,
Kevin Cheng550ccc52021-03-03 11:21:43 -08002107 "template": True,
2108 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002109 # Conv3d TBD
Eric Kunzee5e26762020-10-13 16:11:07 -07002110 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002111 "depthwise_conv2d_TEMPLATE": {
2112 "op": Op.DEPTHWISE_CONV2D,
2113 "operands": (1, 2),
2114 "filter": [1, 1],
2115 "rank": (4, 4),
2116 "build_fcn": (
2117 build_depthwise_conv2d,
2118 TosaTensorGen.tgDepthwiseConv2D,
2119 TosaArgGen.agConv2D,
2120 ),
2121 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07002122 "types": TYPE_CONV2D,
Kevin Cheng550ccc52021-03-03 11:21:43 -08002123 "template": True,
2124 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002125 "fully_connected": {
2126 "op": Op.FULLY_CONNECTED,
2127 "operands": (1, 2),
2128 "rank": (2, 2),
2129 "build_fcn": (build_fully_connected, TosaTensorGen.tgFullyConnected, None),
2130 "qgen": TosaQuantGen.qgConv,
2131 "types": TYPE_CONV2D,
2132 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002133 "matmul": {
2134 "op": Op.MATMUL,
2135 "operands": (2, 0),
Kevin Cheng2d60f002021-06-09 14:18:32 -07002136 "rank": (3, 3),
Jared Smolens573ecd42021-03-04 15:24:10 -08002137 "build_fcn": (build_matmul, TosaTensorGen.tgMatmul, None),
2138 "qgen": TosaQuantGen.qgMatmul,
2139 "types": TYPE_NARROW_INT_FP,
2140 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002141 "max_pool2d": {
2142 "op": Op.MAX_POOL2D,
2143 "operands": (1, 0),
2144 "rank": (4, 4),
2145 "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
2146 "types": TYPE_NARROW_INT_FP,
2147 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002148 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002149 "transpose_conv2d_TEMPLATE": {
2150 "op": Op.TRANSPOSE_CONV2D,
Kevin Cheng989cb052021-04-28 16:29:44 -07002151 "operands": (1, 2),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002152 "rank": (4, 4),
2153 "build_fcn": (
2154 build_transpose_conv2d,
2155 TosaTensorGen.tgTransposeConv2D,
2156 TosaArgGen.agTransposeConv2D,
2157 ),
2158 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07002159 "types": TYPE_CONV2D,
Kevin Cheng550ccc52021-03-03 11:21:43 -08002160 "template": True,
2161 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002162 # Activation functions
Kevin Cheng550ccc52021-03-03 11:21:43 -08002163 "clamp": {
2164 "op": Op.CLAMP,
2165 "operands": (1, 0),
2166 "build_fcn": (build_clamp, TosaTensorGen.tgBasic, None),
2167 "types": TYPE_NARROW_INT_FP,
2168 },
2169 "relun": {
2170 "op": Op.RELUN,
2171 "operands": (1, 0),
2172 "build_fcn": (build_relun, TosaTensorGen.tgBasic, None),
2173 "types": TYPE_FI32,
2174 },
2175 "sigmoid": {
2176 "op": Op.SIGMOID,
2177 "operands": (1, 0),
2178 "build_fcn": (build_sigmoid, TosaTensorGen.tgBasic, None),
2179 "types": TYPE_FP,
2180 },
2181 "tanh": {
2182 "op": Op.TANH,
2183 "operands": (1, 0),
2184 "build_fcn": (build_tanh, TosaTensorGen.tgBasic, None),
2185 "types": TYPE_FP,
2186 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002187 # Elementwise Binary Operators
2188 "add": {
2189 "op": Op.ADD,
2190 "operands": (2, 0),
2191 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2192 "types": TYPE_FI32,
2193 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002194 "arithmetic_right_shift": {
2195 "op": Op.ARITHMETIC_RIGHT_SHIFT,
2196 "operands": (2, 0),
2197 "build_fcn": (
2198 build_arithmetic_right_shift,
2199 TosaTensorGen.tgBroadcastFuzz,
2200 TosaArgGen.agArithmeticRightShift,
2201 ),
2202 "types": TYPE_INT,
2203 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002204 "bitwise_and": {
2205 "op": Op.BITWISE_AND,
2206 "operands": (2, 0),
2207 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2208 "types": TYPE_INT,
2209 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002210 "bitwise_or": {
2211 "op": Op.BITWISE_OR,
2212 "operands": (2, 0),
2213 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2214 "types": TYPE_INT,
2215 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002216 "bitwise_xor": {
2217 "op": Op.BITWISE_XOR,
2218 "operands": (2, 0),
2219 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2220 "types": TYPE_INT,
2221 },
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002222 "div": {
2223 "op": Op.DIV,
2224 "operands": (2, 0),
2225 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2226 "types": [DType.INT32],
2227 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002228 "logical_and": {
2229 "op": Op.LOGICAL_AND,
2230 "operands": (2, 0),
2231 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2232 "types": TYPE_BOOL,
2233 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002234 "logical_left_shift": {
2235 "op": Op.LOGICAL_LEFT_SHIFT,
2236 "operands": (2, 0),
2237 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2238 "types": TYPE_INT,
2239 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002240 "logical_right_shift": {
2241 "op": Op.LOGICAL_RIGHT_SHIFT,
2242 "operands": (2, 0),
2243 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2244 "types": TYPE_INT,
2245 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002246 "logical_or": {
2247 "op": Op.LOGICAL_OR,
2248 "operands": (2, 0),
2249 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2250 "types": TYPE_BOOL,
2251 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002252 "logical_xor": {
2253 "op": Op.LOGICAL_XOR,
2254 "operands": (2, 0),
2255 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2256 "types": TYPE_BOOL,
2257 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002258 "maximum": {
2259 "op": Op.MAXIMUM,
2260 "operands": (2, 0),
2261 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2262 "types": TYPE_FI32,
2263 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002264 "minimum": {
2265 "op": Op.MINIMUM,
2266 "operands": (2, 0),
2267 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2268 "types": TYPE_FI32,
2269 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002270 "mul": {
2271 "op": Op.MUL,
2272 "operands": (2, 0),
2273 "build_fcn": (build_mul, TosaTensorGen.tgBroadcastFuzz, TosaArgGen.agMul),
2274 "types": TYPE_INT_FP,
2275 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002276 "pow": {
2277 "op": Op.POW,
2278 "operands": (2, 0),
2279 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBasic, None),
2280 "types": TYPE_FP,
2281 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002282 "sub": {
2283 "op": Op.SUB,
2284 "operands": (2, 0),
2285 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2286 "types": TYPE_FI32,
2287 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002288 "table": {
2289 "op": Op.TABLE,
2290 # Use the automatic generation functions to create the input array
2291 # but create the table tensor in the build function, as it may be
2292 # a different type from the input
2293 "operands": (1, 0),
2294 "build_fcn": (build_table, TosaTensorGen.tgBasic, None),
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01002295 "types": [DType.INT8, DType.INT16],
Jared Smolens573ecd42021-03-04 15:24:10 -08002296 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002297 # Elementwise Unary operators
2298 "abs": {
2299 "op": Op.ABS,
2300 "operands": (1, 0),
2301 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2302 "types": TYPE_FI32,
2303 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002304 "bitwise_not": {
2305 "op": Op.BITWISE_NOT,
2306 "operands": (1, 0),
2307 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2308 "types": TYPE_INT,
2309 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002310 "ceil": {
2311 "op": Op.CEIL,
2312 "operands": (1, 0),
2313 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2314 "types": TYPE_FP,
2315 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002316 "clz": {
2317 "op": Op.CLZ,
2318 "operands": (1, 0),
2319 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2320 "types": [DType.INT32],
2321 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002322 "exp": {
2323 "op": Op.EXP,
2324 "operands": (1, 0),
2325 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2326 "types": TYPE_FP,
2327 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002328 "floor": {
2329 "op": Op.FLOOR,
2330 "operands": (1, 0),
2331 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2332 "types": TYPE_FP,
2333 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002334 "log": {
2335 "op": Op.LOG,
2336 "operands": (1, 0),
2337 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2338 "types": TYPE_FP,
2339 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002340 "logical_not": {
2341 "op": Op.LOGICAL_NOT,
2342 "operands": (1, 0),
2343 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2344 "types": TYPE_BOOL,
2345 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002346 "negate": {
2347 "op": Op.NEGATE,
2348 "operands": (1, 0),
2349 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2350 "qgen": TosaQuantGen.qgUnary,
2351 "types": TYPE_INT_FP,
2352 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002353 "reciprocal": {
2354 "op": Op.RECIPROCAL,
2355 "operands": (1, 0),
2356 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2357 "types": TYPE_FP,
2358 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002359 "rsqrt": {
2360 "op": Op.RSQRT,
2361 "operands": (1, 0),
2362 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2363 "types": TYPE_FP,
2364 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002365 # Elementwise Ternary operators
2366 "select": {
2367 "op": Op.SELECT,
2368 "operands": (3, 0),
2369 "build_fcn": (build_select, TosaTensorGen.tgBroadcastFuzz, None),
2370 "types": TYPE_FIB,
2371 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002372 # Comparison operators
2373 "equal": {
2374 "op": Op.EQUAL,
2375 "operands": (2, 0),
2376 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2377 "types": TYPE_FI32,
2378 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002379 "greater_equal": {
2380 "op": Op.GREATER_EQUAL,
2381 "operands": (2, 0),
2382 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2383 "types": TYPE_FI32,
2384 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002385 "greater": {
2386 "op": Op.GREATER,
2387 "operands": (2, 0),
2388 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2389 "types": TYPE_FI32,
2390 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002391 # Reduction operators
2392 "reduce_all": {
2393 "op": Op.REDUCE_ALL,
2394 "operands": (1, 0),
2395 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2396 "types": TYPE_BOOL,
2397 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002398 "reduce_any": {
2399 "op": Op.REDUCE_ANY,
2400 "operands": (1, 0),
2401 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2402 "types": TYPE_BOOL,
2403 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002404 "reduce_max": {
2405 "op": Op.REDUCE_MAX,
2406 "operands": (1, 0),
2407 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2408 "types": TYPE_INT_FP,
2409 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002410 "reduce_min": {
2411 "op": Op.REDUCE_MAX,
2412 "operands": (1, 0),
2413 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2414 "types": TYPE_INT_FP,
2415 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002416 "reduce_product": {
2417 "op": Op.REDUCE_PRODUCT,
2418 "operands": (1, 0),
2419 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2420 "types": TYPE_FP,
2421 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002422 "reduce_sum": {
2423 "op": Op.REDUCE_SUM,
2424 "operands": (1, 0),
2425 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2426 "types": TYPE_FI32,
2427 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002428 # Data layout operators
Kevin Cheng550ccc52021-03-03 11:21:43 -08002429 "concat": {
2430 "op": Op.CONCAT,
2431 "operands": (2, 0),
Matthew Haddon818ab902021-07-27 09:12:49 +01002432 "build_fcn": (build_concat, TosaTensorGen.tgConcat, TosaArgGen.agAxis),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002433 "types": TYPE_FIB,
2434 },
2435 "pad": {
2436 "op": Op.PAD,
2437 "operands": (1, 0),
2438 "build_fcn": (build_pad, TosaTensorGen.tgBasic, TosaArgGen.agPad),
2439 "qgen": TosaQuantGen.qgPad,
2440 "types": TYPE_FIB,
2441 },
2442 "reshape": {
2443 "op": Op.RESHAPE,
2444 "operands": (1, 0),
2445 "build_fcn": (build_reshape, TosaTensorGen.tgBasic, TosaArgGen.agReshape),
2446 "types": TYPE_FIB,
2447 },
2448 "reverse": {
2449 "op": Op.REVERSE,
2450 "operands": (1, 0),
2451 "build_fcn": (build_reverse, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2452 "types": TYPE_FIB,
2453 },
2454 "slice": {
2455 "op": Op.SLICE,
2456 "operands": (1, 0),
2457 "build_fcn": (build_slice, TosaTensorGen.tgBasic, TosaArgGen.agSlice),
2458 "types": TYPE_FIB,
2459 },
2460 "tile": {
2461 "op": Op.TILE,
2462 "operands": (1, 0),
2463 "build_fcn": (build_tile, TosaTensorGen.tgBasic, TosaArgGen.agTile),
2464 "types": TYPE_FIB,
2465 },
2466 "transpose": {
2467 "op": Op.TRANSPOSE,
2468 "operands": (1, 0),
Jeremy Johnsona6185572021-06-21 15:55:35 +01002469 "rank": (1, 4),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002470 "build_fcn": (
2471 build_transpose,
2472 TosaTensorGen.tgBasic,
2473 TosaArgGen.agTranspose,
2474 ),
2475 "types": TYPE_FIB,
2476 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002477 # Data nodes
2478 "const": {
2479 "op": Op.CONST,
2480 "operands": (1, 0),
2481 "build_fcn": (build_placeholder, TosaTensorGen.tgBasic, None),
2482 "types": TYPE_FIB,
2483 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002484 "identity": {
2485 "op": Op.IDENTITY,
2486 "operands": (1, 0),
2487 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2488 "types": TYPE_FIB,
2489 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002490 # Scatter/Gather
Kevin Cheng550ccc52021-03-03 11:21:43 -08002491 "gather": {
2492 "op": Op.GATHER,
2493 # Only specify 'values' tensor here. 'indices' is generated in op building stage
2494 "operands": (1, 0),
2495 "rank": (3, 3),
2496 "build_fcn": (build_gather, TosaTensorGen.tgBasic, None),
2497 "types": TYPE_INT_FP,
2498 },
2499 "scatter": {
2500 "op": Op.SCATTER,
2501 # Only specify 'values_in' tensor here.
2502 #'indices' and 'input' are generated in op building stage
2503 "operands": (2, 0),
2504 "rank": (3, 3),
2505 "build_fcn": (build_scatter, TosaTensorGen.tgScatter, None),
2506 "types": TYPE_INT_FP,
2507 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002508 # Image operations
Kevin Cheng550ccc52021-03-03 11:21:43 -08002509 "resize": {
2510 "op": Op.RESIZE,
2511 "operands": (1, 0),
2512 "rank": (4, 4),
2513 "build_fcn": (build_resize, TosaTensorGen.tgNHWC, TosaArgGen.agResize),
2514 "types": [DType.INT8, DType.INT16, DType.FLOAT],
2515 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002516 # Type conversion
Kevin Cheng550ccc52021-03-03 11:21:43 -08002517 "cast": {
2518 "op": Op.CAST,
2519 "operands": (1, 0),
2520 "build_fcn": (build_cast, TosaTensorGen.tgBasic, TosaArgGen.agCast),
2521 "types": [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL],
2522 },
2523 "rescale": {
2524 "op": Op.RESCALE,
2525 "operands": (1, 0),
2526 "build_fcn": (build_rescale, TosaTensorGen.tgBasic, TosaArgGen.agRescale),
Matthew Haddoncac4ee92021-07-22 14:30:53 +01002527 "types": [DType.UINT8, DType.INT8, DType.INT16, DType.INT32, DType.INT48],
Kevin Cheng550ccc52021-03-03 11:21:43 -08002528 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002529 # Custom
2530 # Not implemented.
Jared Smolens573ecd42021-03-04 15:24:10 -08002531 # Control flow operators
Eric Kunzee5e26762020-10-13 16:11:07 -07002532 # Two varients of cond_if, one that generates one of two constant tensors (no
2533 # inputs to the basic blocks, one output) and another that either adds or subtracts two tensors
2534 # (two inputs to the basic blocks, one output)
Kevin Cheng550ccc52021-03-03 11:21:43 -08002535 "cond_if_const": {
2536 "op": Op.COND_IF,
2537 "operands": (0, 2),
2538 "build_fcn": (
2539 build_cond_if_const,
2540 TosaTensorGen.tgBasic,
2541 TosaArgGen.agCondIf,
2542 ),
2543 "types": [DType.BOOL],
2544 },
2545 "cond_if_binary": {
2546 "op": Op.COND_IF,
2547 "operands": (2, 0),
2548 "build_fcn": (
2549 build_cond_if_binary,
2550 TosaTensorGen.tgBasic,
2551 TosaArgGen.agCondIf,
2552 ),
2553 "types": TYPE_FI32,
2554 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002555 # while_loop
Kevin Cheng550ccc52021-03-03 11:21:43 -08002556 "while_loop": {
2557 "op": Op.WHILE_LOOP,
2558 "operands": (0, 1),
2559 "build_fcn": (
2560 build_while_loop,
2561 TosaTensorGen.tgBasic,
2562 TosaArgGen.agWhileLoop,
2563 ),
2564 "types": [DType.INT32],
2565 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002566 }
2567
Kevin Cheng550ccc52021-03-03 11:21:43 -08002568
Eric Kunzee5e26762020-10-13 16:11:07 -07002569class OutputShaper:
2570 # Methods in this class compute the expected output shape and datatype
2571 # for common classes of operations
2572 def __init__(self):
2573 pass
2574
2575 # These methods return arguments that can be used for
2576 # creating a new output tensor
2577 @staticmethod
2578 def binaryBroadcastOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002579 assert len(a.shape) == len(b.shape)
2580 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002581
2582 shape = []
2583 for i in range(len(a.shape)):
2584 if a.shape[i] == 1:
2585 shape.append(b.shape[i])
2586 else:
2587 shape.append(a.shape[i])
2588
Kevin Cheng550ccc52021-03-03 11:21:43 -08002589 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002590
2591 @staticmethod
2592 def binaryNonBroadcastOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002593 assert len(a.shape) == len(b.shape)
2594 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002595
2596 shape = []
2597 for i in range(len(a.shape)):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002598 assert a.shape[i] == b.shape[i]
Eric Kunzee5e26762020-10-13 16:11:07 -07002599 shape.append(a.shape[i])
2600
Kevin Cheng550ccc52021-03-03 11:21:43 -08002601 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002602
2603 @staticmethod
2604 def unaryOp(ser, a):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002605 return ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002606
2607 @staticmethod
2608 def selectOp(ser, cond, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002609 assert len(a.shape) == len(b.shape) and len(a.shape) == len(cond.shape)
2610 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002611
2612 shape = []
2613 for i in range(len(a.shape)):
2614 shape.append(max(cond.shape[i], a.shape[i], b.shape[i]))
2615
Kevin Cheng550ccc52021-03-03 11:21:43 -08002616 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002617
2618 @staticmethod
2619 def binaryComparisonOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002620 assert len(a.shape) == len(b.shape)
2621 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002622
2623 # Do broadcast
2624 shape = []
2625 for i in range(len(a.shape)):
2626 if a.shape[i] == 1:
2627 shape.append(b.shape[i])
2628 else:
2629 shape.append(a.shape[i])
2630
2631 # Force the output type to bool
Kevin Cheng550ccc52021-03-03 11:21:43 -08002632 return ser.addOutput(shape, DType.BOOL)
Eric Kunzee5e26762020-10-13 16:11:07 -07002633
2634 @staticmethod
2635 def reduceOp(ser, a, axis):
2636
2637 shape = a.shape.copy()
2638
2639 shape[axis] = 1
2640
Kevin Cheng550ccc52021-03-03 11:21:43 -08002641 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002642
2643 @staticmethod
2644 def argmaxOp(ser, a, axis):
2645 shape = a.shape.copy()
2646 del shape[axis]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002647 return ser.addOutput(shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07002648
2649 @staticmethod
2650 def conv2dOp(ser, ifm, filter, strides, padding, dilations):
2651
2652 # IFM: NHWC
2653 # Filter: OHWI
2654 # OFM: NHWC
2655
2656 if len(padding) == 2:
2657 # Expand padding to 4 parameters in the case of transpose_conv2d
2658 # From H,W to T,B,L,R
2659 padding = [padding[0], padding[0], padding[1], padding[1]]
2660
Kevin Cheng550ccc52021-03-03 11:21:43 -08002661 h = (
2662 ifm.shape[1]
2663 - filter.shape[1]
2664 - (filter.shape[1] - 1) * (dilations[0] - 1)
2665 + padding[0]
2666 + padding[1]
2667 ) // strides[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002668
Kevin Cheng550ccc52021-03-03 11:21:43 -08002669 w = (
2670 ifm.shape[2]
2671 - filter.shape[2]
2672 - (filter.shape[2] - 1) * (dilations[1] - 1)
2673 + padding[2]
2674 + padding[3]
2675 ) // strides[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002676
2677 if h <= 0 or w <= 0:
2678 # Invalid test parameters?
2679 h = 0
2680 w = 0
Kevin Chengacb550f2021-06-29 15:32:19 -07002681 ser.setExpectedReturnCode(
2682 TosaReturnCode.UNPREDICTABLE, "Invalid combination of conv2d parameters"
2683 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002684
2685 ofm_shape = [ifm.shape[0], h, w, filter.shape[0]]
2686
Kevin Cheng3a478572021-01-22 17:21:02 -08002687 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002688 out_dtype = DType.INT32
2689 elif ifm.dtype == DType.INT16:
2690 out_dtype = DType.INT48
2691 elif ifm.dtype == DType.FLOAT:
2692 out_dtype = DType.FLOAT
2693 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002694 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002695
Kevin Cheng550ccc52021-03-03 11:21:43 -08002696 return ser.addOutput(ofm_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002697
2698 @staticmethod
2699 def depthwiseConv2dOp(ser, ifm, filter, strides, padding, dilations):
2700 # IFM: NHWC
2701 # Filter: HWCM
2702 # OFM: NHW C*M
Kevin Cheng550ccc52021-03-03 11:21:43 -08002703 h = (
2704 ifm.shape[1]
2705 - filter.shape[0]
2706 - (filter.shape[0] - 1) * (dilations[0] - 1)
2707 + padding[0]
2708 + padding[1]
2709 ) // strides[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002710
Kevin Cheng550ccc52021-03-03 11:21:43 -08002711 w = (
2712 ifm.shape[2]
2713 - filter.shape[1]
2714 - (filter.shape[1] - 1) * (dilations[1] - 1)
2715 + padding[2]
2716 + padding[3]
2717 ) // strides[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002718
2719 if h <= 0 or w <= 0:
2720 # Invalid test parameters?
2721 h = 0
2722 w = 0
Kevin Chengacb550f2021-06-29 15:32:19 -07002723 ser.setExpectedReturnCode(
2724 TosaReturnCode.UNPREDICTABLE, "Invalid combination of conv2d parameters"
2725 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002726
2727 ofm_shape = [ifm.shape[0], h, w, filter.shape[2] * filter.shape[3]]
2728
Kevin Cheng3a478572021-01-22 17:21:02 -08002729 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002730 out_dtype = DType.INT32
2731 elif ifm.dtype == DType.INT16:
2732 out_dtype = DType.INT48
2733 elif ifm.dtype == DType.FLOAT:
2734 out_dtype = DType.FLOAT
2735 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002736 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002737
Kevin Cheng550ccc52021-03-03 11:21:43 -08002738 return ser.addOutput(ofm_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002739
2740 @staticmethod
2741 def pool2dOp(ser, ifm, kernel, stride, pad):
2742 # input: NHWC
2743 h = (ifm.shape[1] + pad[0] + pad[1] + stride[0] - kernel[0]) // stride[0]
2744 w = (ifm.shape[2] + pad[2] + pad[3] + stride[1] - kernel[1]) // stride[1]
2745
2746 if h <= 0 or w <= 0:
2747 # Invalid test parameters?
2748 h = 0
2749 w = 0
Kevin Chengacb550f2021-06-29 15:32:19 -07002750 ser.setExpectedReturnCode(
2751 TosaReturnCode.UNPREDICTABLE, "Invalid combination of pool2d parameters"
2752 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002753
2754 ofm_shape = [ifm.shape[0], h, w, ifm.shape[3]]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002755 return ser.addOutput(ofm_shape, ifm.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002756
2757 @staticmethod
2758 def fullyConnectedOp(ser, input, filter):
2759 # input: N, IC
2760 # filter: OC, IC
2761 # output: N, OC
2762
2763 output_shape = [input.shape[0], filter.shape[0]]
2764
Kevin Cheng3a478572021-01-22 17:21:02 -08002765 if input.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002766 out_dtype = DType.INT32
2767 elif input.dtype == DType.INT16:
2768 out_dtype = DType.INT48
2769 elif input.dtype == DType.FLOAT:
2770 out_dtype = DType.FLOAT
2771 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002772 raise Exception("Unsupported input dtype: {}".format(input.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002773
Kevin Cheng550ccc52021-03-03 11:21:43 -08002774 return ser.addOutput(output_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002775
2776 @staticmethod
2777 def matmulOp(ser, a, b):
Kevin Cheng2d60f002021-06-09 14:18:32 -07002778 # a: N, H, C
2779 # b: N, C, W
2780 # out: N, H, W
Eric Kunzee5e26762020-10-13 16:11:07 -07002781
Kevin Cheng2d60f002021-06-09 14:18:32 -07002782 output_shape = [a.shape[0], a.shape[1], b.shape[2]]
Eric Kunzee5e26762020-10-13 16:11:07 -07002783
Kevin Cheng3a478572021-01-22 17:21:02 -08002784 if a.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002785 out_dtype = DType.INT32
2786 elif a.dtype == DType.INT16:
2787 out_dtype = DType.INT48
2788 elif a.dtype == DType.FLOAT:
2789 out_dtype = DType.FLOAT
2790 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002791 raise Exception("UNsupported input dtype for matmul: {}".format(a.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002792
Kevin Cheng550ccc52021-03-03 11:21:43 -08002793 return ser.addOutput(output_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002794
2795 @staticmethod
Matthew Haddon818ab902021-07-27 09:12:49 +01002796 def concatOp(ser, axis, *a):
2797 input1 = a[0]
2798 remaining_inputs = a[1:]
Eric Kunzee5e26762020-10-13 16:11:07 -07002799
Matthew Haddon818ab902021-07-27 09:12:49 +01002800 output_shape = input1.shape.copy()
Eric Kunzee5e26762020-10-13 16:11:07 -07002801
Matthew Haddon818ab902021-07-27 09:12:49 +01002802 output_shape[axis] = input1.shape[axis]
2803
2804 for tensor in remaining_inputs:
2805 output_shape[axis] += tensor.shape[axis]
2806
2807 return ser.addOutput(output_shape, input1.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002808
2809 @staticmethod
2810 def padOp(ser, a, padding):
2811
2812 output_shape = a.shape.copy()
2813
2814 for i in range(len(output_shape)):
2815 output_shape[i] = padding[i][0] + padding[i][1] + output_shape[i]
2816
Kevin Cheng550ccc52021-03-03 11:21:43 -08002817 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002818
2819 @staticmethod
2820 def reshapeOp(ser, a, shape):
2821 output_shape = shape.copy()
2822
2823 totalElements = 1
2824 for i in a.shape:
2825 totalElements *= i
2826
2827 # If there are any -1 elements, figure out what that dimension must be
2828 totalOutputElements = 1
2829 for i in output_shape:
2830 if i != -1:
2831 totalOutputElements *= i
2832
2833 # And fill it in
2834 for i in range(len(output_shape)):
2835 if output_shape[i] == -1:
2836 output_shape[i] = totalElements // totalOutputElements
2837
Kevin Cheng550ccc52021-03-03 11:21:43 -08002838 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002839
2840 @staticmethod
2841 def sliceOp(ser, a, begin, size):
2842
2843 output_shape = size.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08002844 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002845
2846 @staticmethod
2847 def tileOp(ser, a, multiples):
2848
2849 output_shape = a.shape.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08002850 assert len(multiples) == len(output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07002851
2852 for i in range(len(output_shape)):
2853 output_shape[i] = a.shape[i] * multiples[i]
2854
Kevin Cheng550ccc52021-03-03 11:21:43 -08002855 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002856
2857 @staticmethod
2858 def transposeOp(ser, a, perms):
2859 output_shape = a.shape.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08002860 assert len(perms) == len(output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07002861
2862 for i in range(len(output_shape)):
2863 output_shape[i] = a.shape[perms[i]]
2864
Kevin Cheng550ccc52021-03-03 11:21:43 -08002865 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002866
2867 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -08002868 def gatherOp(ser, values, indices):
2869 assert len(values.shape) == 3
2870 assert len(indices.shape) == 2
2871 assert values.shape[0] == indices.shape[0]
Eric Kunzee5e26762020-10-13 16:11:07 -07002872
Kevin Cheng77d0f762020-11-24 10:26:32 -08002873 output_shape = [values.shape[0], indices.shape[1], values.shape[2]]
2874
Kevin Cheng550ccc52021-03-03 11:21:43 -08002875 return ser.addOutput(output_shape, values.dtype)
Kevin Cheng77d0f762020-11-24 10:26:32 -08002876
2877 @staticmethod
2878 def scatterOp(ser, values_in, indices, input):
2879 assert len(values_in.shape) == 3
2880 assert len(indices.shape) == 2
2881 assert len(input.shape) == 3
Kevin Cheng550ccc52021-03-03 11:21:43 -08002882 assert values_in.shape[0] == indices.shape[0] # N
2883 assert input.shape[1] == indices.shape[1] # W
2884 assert values_in.shape[2] == input.shape[2] # C
Kevin Cheng77d0f762020-11-24 10:26:32 -08002885
2886 output_shape = values_in.shape
2887
Kevin Cheng550ccc52021-03-03 11:21:43 -08002888 return ser.addOutput(output_shape, values_in.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002889
2890 @staticmethod
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01002891 def tableOp(ser, input, table_dtype):
2892 # Same shape as the input, but dtype dependent on table dtype
2893 assert table_dtype == DType.INT16 or table_dtype == DType.INT8
2894 output_dtype = DType.INT32 if table_dtype == DType.INT16 else DType.INT8
2895 return ser.addOutput(input.shape, output_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002896
2897 @staticmethod
Kevin Cheng550ccc52021-03-03 11:21:43 -08002898 def resizeOp(
2899 ser,
2900 input,
2901 mode,
2902 stride,
2903 offset,
2904 shift,
2905 stride_fp,
2906 offset_fp,
2907 output_dims,
2908 input_dtype,
2909 output_dtype,
2910 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07002911
2912 output_dims = [input.shape[0], output_dims[0], output_dims[1], input.shape[3]]
2913
Kevin Cheng77d0f762020-11-24 10:26:32 -08002914 if input_dtype == DType.FLOAT:
2915 if stride_fp[0] <= 0 or stride_fp[1] <= 0:
Kevin Chengacb550f2021-06-29 15:32:19 -07002916 ser.setExpectedReturnCode(
2917 TosaReturnCode.ERROR, "Negative or zero stride"
2918 )
Kevin Cheng77d0f762020-11-24 10:26:32 -08002919 else:
2920 if stride[0] <= 0 or stride[1] <= 0:
Kevin Chengacb550f2021-06-29 15:32:19 -07002921 ser.setExpectedReturnCode(
2922 TosaReturnCode.ERROR, "Negative or zero stride"
2923 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002924
Kevin Chengaee1fac2020-11-11 13:54:06 -08002925 if mode == ResizeMode.BILINEAR:
2926 if input_dtype == DType.INT8:
2927 if output_dtype != DType.INT32:
Kevin Chengacb550f2021-06-29 15:32:19 -07002928 ser.setExpectedReturnCode(
2929 TosaReturnCode.ERROR, "Invalid output data type"
2930 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08002931 elif input_dtype == DType.INT16:
2932 if output_dtype != DType.INT48:
Kevin Chengacb550f2021-06-29 15:32:19 -07002933 ser.setExpectedReturnCode(
2934 TosaReturnCode.ERROR, "Invalid output data type"
2935 )
Kevin Cheng77d0f762020-11-24 10:26:32 -08002936 elif input_dtype == DType.FLOAT:
2937 if output_dtype != DType.FLOAT:
Kevin Chengacb550f2021-06-29 15:32:19 -07002938 ser.setExpectedReturnCode(
2939 TosaReturnCode.ERROR, "Invalid output data type"
2940 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08002941 else:
Kevin Chengacb550f2021-06-29 15:32:19 -07002942 ser.setExpectedReturnCode(
2943 TosaReturnCode.ERROR, "Invalid input data type"
2944 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08002945
2946 elif mode == ResizeMode.NEAREST:
2947 if input_dtype == DType.INT8:
2948 if output_dtype != DType.INT8:
Kevin Chengacb550f2021-06-29 15:32:19 -07002949 ser.setExpectedReturnCode(
2950 TosaReturnCode.ERROR, "Invalid output data type"
2951 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08002952 elif input_dtype == DType.INT16:
2953 if output_dtype != DType.INT16:
Kevin Chengacb550f2021-06-29 15:32:19 -07002954 ser.setExpectedReturnCode(
2955 TosaReturnCode.ERROR, "Invalid output data type"
2956 )
Kevin Cheng77d0f762020-11-24 10:26:32 -08002957 elif input_dtype == DType.FLOAT:
2958 if output_dtype != DType.FLOAT:
Kevin Chengacb550f2021-06-29 15:32:19 -07002959 ser.setExpectedReturnCode(
2960 TosaReturnCode.ERROR, "Invalid output data type"
2961 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08002962 else:
Kevin Chengacb550f2021-06-29 15:32:19 -07002963 ser.setExpectedReturnCode(
2964 TosaReturnCode.ERROR, "Invalid input data type"
2965 )
Kevin Chengaee1fac2020-11-11 13:54:06 -08002966
2967 else:
Kevin Chengacb550f2021-06-29 15:32:19 -07002968 ser.setExpectedReturnCode(TosaReturnCode.ERROR, "Invalid resize mode")
Kevin Chengaee1fac2020-11-11 13:54:06 -08002969
Kevin Cheng550ccc52021-03-03 11:21:43 -08002970 return ser.addOutput(output_dims, output_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002971
2972 @staticmethod
2973 def typeConversionOp(ser, val, out_dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002974 return ser.addOutput(val.shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002975
2976 @staticmethod
2977 def transposeConv2DOp(ser, ifm, output_shape):
Kevin Cheng3a478572021-01-22 17:21:02 -08002978 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002979 out_dtype = DType.INT32
2980 elif ifm.dtype == DType.INT16:
2981 out_dtype = DType.INT48
2982 elif ifm.dtype == DType.FLOAT:
2983 out_dtype = DType.FLOAT
2984 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002985 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002986
2987 if output_shape[1] <= 0 or output_shape[2] <= 0:
Kevin Chengacb550f2021-06-29 15:32:19 -07002988 ser.setExpectedReturnCode(
2989 TosaReturnCode.UNPREDICTABLE, "Negative output shape"
2990 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002991
Kevin Cheng550ccc52021-03-03 11:21:43 -08002992 return ser.addOutput(output_shape, out_dtype)