blob: 942f75c3de48fdb013c780056913e3e27298674c [file] [log] [blame]
Eric Kunzee5e26762020-10-13 16:11:07 -07001#!/usr/bin/env python3
2
Kevin Cheng3a478572021-01-22 17:21:02 -08003# Copyright (c) 2020-2021, ARM Limited.
Eric Kunzee5e26762020-10-13 16:11:07 -07004#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17
18import numpy as np
19import argparse
20import sys
21import re
22import os
23import subprocess
24import shlex
25import json
26import glob
27import math
28import queue
29import threading
30import traceback
31import math
Jeremy Johnsona6185572021-06-21 15:55:35 +010032import itertools
Eric Kunzee5e26762020-10-13 16:11:07 -070033
34from enum import IntEnum, Enum, unique
Kevin Chengacb550f2021-06-29 15:32:19 -070035from tosa_ref_run import TosaReturnCode
Eric Kunzee5e26762020-10-13 16:11:07 -070036
Kevin Cheng550ccc52021-03-03 11:21:43 -080037# Include the ../thirdparty/serialization_lib/python directory in PYTHONPATH
38parent_dir = os.path.dirname(os.path.realpath(__file__))
39sys.path.append(
40 os.path.join(parent_dir, "..", "thirdparty", "serialization_lib", "python")
41)
Eric Kunzee5e26762020-10-13 16:11:07 -070042import tosa_serializer as ts
43from tosa_serializer import *
44import tosa
45
46# Convenience variables to the flatc-generated types that should be enums, but aren't
47DType = tosa.DType.DType()
Kevin Cheng550ccc52021-03-03 11:21:43 -080048Op = tosa.Op.Op()
Eric Kunzee5e26762020-10-13 16:11:07 -070049ResizeMode = tosa.ResizeMode.ResizeMode()
50
Kevin Cheng550ccc52021-03-03 11:21:43 -080051
Eric Kunzee5e26762020-10-13 16:11:07 -070052class TosaQuantGen:
Kevin Cheng550ccc52021-03-03 11:21:43 -080053 """QuantizedInfo random generator helper functions. Specify with 'qgen': in the operator defintion"""
54
Eric Kunzee5e26762020-10-13 16:11:07 -070055 def __init__(self):
56 pass
57
58 @staticmethod
Les Bell30e46802021-07-23 09:43:31 +010059 def getQinfo(testGen, dtype):
60 if dtype == DType.INT8:
61 return testGen.randInt(-128, 128)
62 if dtype == DType.UINT8:
63 return testGen.randInt(0, 256)
64 return 0
Eric Kunzee5e26762020-10-13 16:11:07 -070065
66 @staticmethod
67 def qgUnary(testGen, op, dtype):
68 qinfo = ts.TosaSerializerQuantInfo()
Kevin Chengacb550f2021-06-29 15:32:19 -070069 qinfo.UnaryQuantInfo(
70 TosaQuantGen.getQinfo(testGen, dtype), TosaQuantGen.getQinfo(testGen, dtype)
71 )
Eric Kunzee5e26762020-10-13 16:11:07 -070072 return qinfo
73
74 @staticmethod
Les Bell30e46802021-07-23 09:43:31 +010075 def qgConv(testGen, op, dtype_or_dtypeList):
Eric Kunzee5e26762020-10-13 16:11:07 -070076 qinfo = ts.TosaSerializerQuantInfo()
Les Bell30e46802021-07-23 09:43:31 +010077 if isinstance(dtype_or_dtypeList, list):
78 # a list of [input, weights, accumulator] dtypes
79 dtypeList = dtype_or_dtypeList
Eric Kunzee5e26762020-10-13 16:11:07 -070080 else:
Les Bell30e46802021-07-23 09:43:31 +010081 # an int, [input, weights, accumulator] dtypes are the same
82 dtypeList = [dtype_or_dtypeList] * 3
83 input_zp = TosaQuantGen.getQinfo(testGen, dtypeList[0])
84 weights_zp = TosaQuantGen.getQinfo(testGen, dtypeList[1])
85 qinfo.ConvQuantInfo(input_zp, weights_zp)
Eric Kunzee5e26762020-10-13 16:11:07 -070086 return qinfo
87
88 @staticmethod
89 def qgMatmul(testGen, op, dtype):
90 qinfo = ts.TosaSerializerQuantInfo()
Kevin Chengacb550f2021-06-29 15:32:19 -070091 qinfo.MatMulQuantInfo(
92 TosaQuantGen.getQinfo(testGen, dtype), TosaQuantGen.getQinfo(testGen, dtype)
93 )
Eric Kunzee5e26762020-10-13 16:11:07 -070094 return qinfo
95
96 @staticmethod
97 def qgPad(testGen, op, dtype):
98 qinfo = ts.TosaSerializerQuantInfo()
Les Bell30e46802021-07-23 09:43:31 +010099 qinfo.PadQuantInfo(TosaQuantGen.getQinfo(testGen, dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700100 return qinfo
101
102 @staticmethod
103 def computeMultiplierAndShift(scaleFp, scale32):
104 # Derived from computeMultiplierAndShiftTosaScale32
105 # Provide a floating-point scaling factor and the scale32 parameter
106 # to compute the multiplier and shift
107
108 if scale32:
109 scaleBits = 31
110 else:
111 scaleBits = 15
112
113 m, shift = math.frexp(scaleFp)
114
115 if scaleFp < 0.0:
116 m = -m
117
118 multiplier = round(m * (1 << scaleBits))
Kevin Cheng550ccc52021-03-03 11:21:43 -0800119 assert multiplier <= (1 << scaleBits)
Eric Kunzee5e26762020-10-13 16:11:07 -0700120
121 if multiplier == (1 << scaleBits):
122 multiplier = multiplier // 2
123 shift = shift + 1
124
125 shift = (-shift) + scaleBits
Matthew Haddonb724efc2021-08-25 16:40:29 +0100126 #print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(scaleFp, scaleBits, m, multiplier, shift))
127
128 # Adjust multiplier such that shift is in allowed value range.
129 if shift == 0:
130 multiplier = multiplier // 4
131 shift = shift + 2
132 elif shift == 1:
133 multiplier = multiplier // 2
134 shift = shift + 1
135 elif shift == 63:
136 multiplier = multiplier * 2
137 shift = shift - 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700138
Kevin Cheng550ccc52021-03-03 11:21:43 -0800139 assert multiplier <= (1 << scaleBits)
Matthew Haddonb724efc2021-08-25 16:40:29 +0100140 assert shift >= 2 and shift <= 62
Eric Kunzee5e26762020-10-13 16:11:07 -0700141
142 return multiplier, shift
143
144
Kevin Cheng550ccc52021-03-03 11:21:43 -0800145class TosaTensorGen:
146 """Tensor generators create a shape list for the placeholder and const tensor
147 data operands for the operator. The actual random data is generated separately for each test."""
148
Eric Kunzee5e26762020-10-13 16:11:07 -0700149 def __init__(self):
150 pass
151
152 @staticmethod
153 def tgBasic(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800154 pl, const = opName["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700155 shape = testGen.makeShape(rank)
156
157 shape_list = []
158 for i in range(pl + const):
159 shape_list.append(shape.copy())
160
161 return shape_list
162
163 @staticmethod
164 def tgNHWC(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800165 pl, const = opName["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700166
Kevin Cheng550ccc52021-03-03 11:21:43 -0800167 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700168
169 shape = testGen.makeShape(rank)
170
171 # Constrict the batch size?
172 if testGen.args.max_batch_size:
173 shape[0] = (shape[0] % testGen.args.max_batch_size) + 1
174
175 shape_list = []
176 for i in range(pl + const):
177 shape_list.append(shape.copy())
178
179 return shape_list
180
181 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -0800182 def tgScatter(testGen, opName, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800183 pl, const = opName["operands"]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800184
Kevin Cheng550ccc52021-03-03 11:21:43 -0800185 assert pl == 2
186 assert const == 0
187 assert rank == 3
Kevin Cheng77d0f762020-11-24 10:26:32 -0800188
189 values_in_shape = testGen.makeShape(rank)
190
Matthew Haddon4b2881a2021-08-24 14:25:43 +0100191 # ignore max batch size if target shape is set
192 if testGen.args.max_batch_size and not testGen.args.target_shapes:
Kevin Cheng77d0f762020-11-24 10:26:32 -0800193 values_in_shape[0] = (values_in_shape[0] % testGen.args.max_batch_size) + 1
194
Kevin Cheng550ccc52021-03-03 11:21:43 -0800195 W = testGen.randInt(
196 testGen.args.tensor_shape_range[0], testGen.args.tensor_shape_range[1]
197 )
Matthew Haddon4b2881a2021-08-24 14:25:43 +0100198 # Constrict W if one dimension is too large to keep tensor size reasonable
199 if max(values_in_shape) > 5000:
200 W = testGen.randInt(0, 16)
201
Kevin Cheng77d0f762020-11-24 10:26:32 -0800202 input_shape = [values_in_shape[0], W, values_in_shape[2]]
203
204 shape_list = []
205 shape_list.append(values_in_shape.copy())
206 shape_list.append(input_shape.copy())
207
208 return shape_list
209
210 @staticmethod
Eric Kunzee5e26762020-10-13 16:11:07 -0700211 def tgBroadcastFuzz(testGen, op, rank):
212 shape = testGen.makeShape(rank)
213
Kevin Cheng550ccc52021-03-03 11:21:43 -0800214 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700215
216 shape_list = []
217
218 # Choose one of the inputs to broadcast
219 bcast_idx = testGen.randInt(0, pl + const)
220 for i in range(pl + const):
221 shape_bcast = shape.copy()
222
223 # If the chosen input, pick a random index to broadcast
224 if i == bcast_idx:
225 fuzz_idx = testGen.randInt(0, rank)
226 shape_bcast[fuzz_idx] = 1
227
228 shape_list.append(shape_bcast)
229
230 return shape_list
231
232 @staticmethod
233 def tgConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800234 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700235
Kevin Cheng550ccc52021-03-03 11:21:43 -0800236 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700237
238 # IFM dimensions are NHWC
239 ifm_shape = testGen.makeShape(rank)
240
241 # Constrict the batch size?
242 if testGen.args.max_batch_size:
243 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
244
245 # Get the filter height/width from the operator parameters
Kevin Cheng550ccc52021-03-03 11:21:43 -0800246 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700247
248 # Generate a random OFM depth
249 ofm_depth = testGen.makeShape(1)[0]
250
251 # The filter dimensions are OHWI
252 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
253
254 # The bias is OC
255 bias_shape = np.asarray([ofm_depth])
256
257 return [ifm_shape, filter_shape, bias_shape]
258
259 @staticmethod
260 def tgTransposeConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800261 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700262
Kevin Cheng550ccc52021-03-03 11:21:43 -0800263 assert rank == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700264
265 # IFM dimensions are NHWC
266 ifm_shape = testGen.makeShape(rank)
267
268 # Constrict the batch size?
269 if testGen.args.max_batch_size:
270 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
271
272 # Get the filter height/width from the operator parameters
Kevin Cheng550ccc52021-03-03 11:21:43 -0800273 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700274
275 # Generate a random OFM depth
276 ofm_depth = testGen.makeShape(1)[0]
277
278 # The filter dimensions are OHWI
279 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
280
Kevin Cheng989cb052021-04-28 16:29:44 -0700281 # The bias is OC
282 bias_shape = np.asarray([ofm_depth])
283
284 return [ifm_shape, filter_shape, bias_shape]
Eric Kunzee5e26762020-10-13 16:11:07 -0700285
286 @staticmethod
287 def tgDepthwiseConv2D(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800288 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700289
Kevin Cheng550ccc52021-03-03 11:21:43 -0800290 assert rank == 4
291 assert pl == 1 and const == 2
Eric Kunzee5e26762020-10-13 16:11:07 -0700292
293 # IFM dimensions are NHWC
294 ifm_shape = testGen.makeShape(rank)
295
296 # Constrict the batch size?
297 if testGen.args.max_batch_size:
298 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
299
300 # Get the filter height/width from the operator parameters
301 # Filter is KH, HW, C, M
Kevin Cheng550ccc52021-03-03 11:21:43 -0800302 filter_hw = op["filter"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700303
304 # Generate a random OFM depth, but don't let it get too big because
305 # the output depth is M * C
Kevin Cheng550ccc52021-03-03 11:21:43 -0800306 filter_m = (
307 testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)
308 ) + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700309
310 # The filter dimensions are HWCM
311 filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m])
312
313 # The bias is M * C
314 bias_shape = np.asarray([ifm_shape[3] * filter_m])
315
316 return [ifm_shape, filter_shape, bias_shape]
317
318 @staticmethod
319 def tgFullyConnected(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800320 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700321
Kevin Cheng550ccc52021-03-03 11:21:43 -0800322 assert rank == 2
Eric Kunzee5e26762020-10-13 16:11:07 -0700323
324 input_shape = testGen.makeShape(rank)
Kevin Chengacb550f2021-06-29 15:32:19 -0700325 filter_oc = testGen.rng.integers(
326 low=testGen.args.tensor_shape_range[0],
327 high=testGen.args.tensor_shape_range[1],
328 size=1,
329 )[0]
Eric Kunzee5e26762020-10-13 16:11:07 -0700330 filter_shape = np.asarray([filter_oc, input_shape[1]])
331
332 bias_shape = np.asarray([filter_oc])
333
334 return [input_shape, filter_shape, bias_shape]
335
336 @staticmethod
337 def tgMatmul(testGen, op, rank):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800338 pl, const = op["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -0700339
Kevin Cheng2d60f002021-06-09 14:18:32 -0700340 assert rank == 3
Kevin Cheng550ccc52021-03-03 11:21:43 -0800341 assert pl == 2 and const == 0
Eric Kunzee5e26762020-10-13 16:11:07 -0700342
343 a_shape = testGen.makeShape(rank)
Matthew Haddon68e7aee2021-08-16 11:20:25 +0100344 # Get a random number for b_oc even if target shape is defined
345 b_oc = np.int32(
346 testGen.rng.integers(
347 low=testGen.args.tensor_shape_range[0],
348 high=testGen.args.tensor_shape_range[1],
349 size=1,
350 )
351 )[0]
352 # If N or H is large let b_oc be 1 to reduce output tensor size
353 if max(a_shape) > 1000:
354 b_oc = 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700355
Matthew Haddon68e7aee2021-08-16 11:20:25 +0100356 b_shape = np.asarray([a_shape[0], a_shape[2], b_oc])
Eric Kunzee5e26762020-10-13 16:11:07 -0700357 return [a_shape, b_shape]
358
Matthew Haddon818ab902021-07-27 09:12:49 +0100359 @staticmethod
360 def tgConcat(testGen, opName, rank):
361 pl, const = opName["operands"]
362 shape = testGen.makeShape(rank)
363
364 # Create extra tensors to concat.
365 # Take into account value of pl when getting maximum number of concats
366 num_tensors = testGen.randInt(0, 4)
367 shape_list = []
368 for i in range(pl + const + num_tensors):
369 shape_list.append(shape.copy())
370
371 return shape_list
372
373 @staticmethod
374 def tgConcatConstInput(testGen, shapeList, axis):
375 # Split concat shape along axis to allow for multiple const inputs
376 # without making too many large tensors
377 shape = shapeList[0]
378 if len(shapeList) == 2 or shape[axis] < len(shapeList):
379 return shapeList
380
381 new_shapeList = [shape.copy()]
382 length_on_axis = shape[axis]
383 remaining_length = length_on_axis
384 for i in range(len(shapeList)-2):
385 # Calculate split on axis and remaining value
386 split_shape_val = int(shape[axis] / 2)
387 remaining_length = remaining_length - split_shape_val
388
389 # Append new shape, and set remaining shape
390 shape[axis] = split_shape_val
391 new_shapeList.append(shape.copy())
392 shape[axis] = remaining_length
393 if i == len(shapeList) - 3:
394 new_shapeList.append(shape.copy())
395
396 return new_shapeList
397
398
Kevin Cheng550ccc52021-03-03 11:21:43 -0800399
Eric Kunzee5e26762020-10-13 16:11:07 -0700400class TosaArgGen:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800401 """Argument generators create exhaustive or random lists of attributes for operators that take
402 attributes or other parameters. The return value is a list of (descriptive_name, [arglist])
403 tuples where the descriptive_name is appended to the test name and the arglist is expanded
404 as arguments to the operator build function."""
405
Eric Kunzee5e26762020-10-13 16:11:07 -0700406 def __init__(self):
407 pass
408
409 @staticmethod
410 def agNone(testGen, opName, shapeList, dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800411 """A trivial argument generator for operators that don't take any
412 non-tensor arguments"""
413 return [("", [])]
Eric Kunzee5e26762020-10-13 16:11:07 -0700414
415 @staticmethod
416 def agAxis(testGen, opName, shapeList, dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800417 """Build the axis argument for operators that take a single axis"""
Eric Kunzee5e26762020-10-13 16:11:07 -0700418 axes = []
419
420 shape = shapeList[0]
421
422 for a in range(0, len(shape)):
Matthew Haddon43e37192021-07-09 14:13:02 +0100423 axes.append(("axis{}".format(a), [a]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700424 return axes
425
426 @staticmethod
427 def agConv2D(testGen, opName, shapeList, dtype):
428 arg_list = []
429
430 ifm_shape = shapeList[0]
431 filter_shape = shapeList[1]
432
433 # Must be rank 4
Kevin Cheng550ccc52021-03-03 11:21:43 -0800434 assert len(ifm_shape) == 4
435 assert len(filter_shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700436
437 maxStride = testGen.args.max_conv_stride
438 maxPadding = testGen.args.max_conv_padding + 1
439 maxDilation = testGen.args.max_conv_dilation
440
441 # Strides, padding, dilations
442 for stride in range(0, maxStride ** 2):
443 for padding in range(0, (maxPadding) ** 4):
444 for dilation in range(0, maxDilation ** 2):
445
Kevin Cheng550ccc52021-03-03 11:21:43 -0800446 s = [stride // maxStride + 1, stride % maxStride + 1]
447 p = [
448 (padding // (maxPadding * 4)) % maxPadding,
449 (padding // (maxPadding * 2)) % maxPadding,
450 (padding // (maxPadding * 1)) % maxPadding,
451 padding % maxPadding,
452 ]
453 d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
Eric Kunzee5e26762020-10-13 16:11:07 -0700454
455 # 4 padding parameters for regular conv2d
Kevin Cheng550ccc52021-03-03 11:21:43 -0800456 arg_list.append(
457 (
458 "st{}{}_pad{}{}{}{}_dilat{}{}".format(
459 s[0], s[1], p[0], p[1], p[2], p[3], d[0], d[1]
460 ),
461 [s, p, d],
462 )
463 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700464 return arg_list
465
466 @staticmethod
467 def agTransposeConv2D(testGen, opName, shapeList, dtype):
468 arg_list = []
469
470 ifm_shape = shapeList[0]
471 filter_shape = shapeList[1]
472
473 # Must be rank 4
Kevin Cheng550ccc52021-03-03 11:21:43 -0800474 assert len(ifm_shape) == 4
475 assert len(filter_shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700476
477 maxStride = testGen.args.max_conv_stride
478 maxPadding = testGen.args.max_conv_padding + 1
479 maxDilation = testGen.args.max_conv_dilation
480
481 # Strides, padding, dilations
482 for stride in range(0, maxStride ** 2):
483 for out_padding in range(0, (maxPadding) ** 2):
484 for dilation in range(0, maxDilation ** 2):
485
Kevin Cheng550ccc52021-03-03 11:21:43 -0800486 s = [stride // maxStride + 1, stride % maxStride + 1]
487 p = [
488 (out_padding // (maxPadding * 1)) % maxPadding,
489 out_padding % maxPadding,
490 ]
491 d = [dilation // maxDilation + 1, dilation % maxDilation + 1]
Eric Kunzee5e26762020-10-13 16:11:07 -0700492
Kevin Cheng550ccc52021-03-03 11:21:43 -0800493 oh = (
494 ifm_shape[1]
495 - filter_shape[1]
496 - (filter_shape[1] - 1) * (d[0] - 1)
497 + 2 * p[0]
498 ) // s[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700499
Kevin Cheng550ccc52021-03-03 11:21:43 -0800500 ow = (
501 ifm_shape[2]
502 - filter_shape[2]
503 - (filter_shape[2] - 1) * (d[1] - 1)
504 + 2 * p[1]
505 ) // s[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -0700506
507 # Output shape
Kevin Cheng550ccc52021-03-03 11:21:43 -0800508 os = [ifm_shape[0], oh, ow, filter_shape[0]]
Eric Kunzee5e26762020-10-13 16:11:07 -0700509
Kevin Cheng550ccc52021-03-03 11:21:43 -0800510 arg_list.append(
511 (
512 "st{}{}_outpad{}{}_dilat{}{}_os{}x{}x{}x{}".format(
513 s[0],
514 s[1],
515 p[0],
516 p[1],
517 d[0],
518 d[1],
519 os[0],
520 os[1],
521 os[2],
522 os[3],
523 ),
524 [s, p, d, os],
525 )
526 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700527
528 return arg_list
529
530 @staticmethod
531 def agPad(testGen, opName, shapeList, dtype):
532 arg_list = []
533 rank = len(shapeList[0])
534
Les Bell7ffccce2021-07-28 15:37:02 +0100535 # Exhaustively test combinations of padding on each side of each dimension
536 # - the range of padding values is defined by pad_min and pad_max
537 # - for padding >9, the name format needs to be more distinctive
538 pad_min, pad_max = 0, 1
539 pad_values = [x for x in range(pad_min, pad_max + 1)]
540 axis_pad_values = [x for x in itertools.product(pad_values, pad_values)]
541 shape_pad_values = itertools.product(*([axis_pad_values] * rank))
Eric Kunzee5e26762020-10-13 16:11:07 -0700542
Les Bell7ffccce2021-07-28 15:37:02 +0100543 for paddings in shape_pad_values:
544 name = "pad"
545 for r in range(rank):
546 before, after = paddings[r]
547 name = f"{name}{before}{after}"
548 arg_list.append((name, [np.array(paddings)]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700549
550 return arg_list
551
552 @staticmethod
553 def agPooling(testGen, opName, shapeList, dtype):
554 arg_list = []
555
556 shape = shapeList[0]
Kevin Cheng550ccc52021-03-03 11:21:43 -0800557 assert len(shape) == 4
Eric Kunzee5e26762020-10-13 16:11:07 -0700558
559 maxStride = testGen.args.max_pooling_stride
560 maxKernel = testGen.args.max_pooling_kernel
561 maxPadding = testGen.args.max_pooling_padding + 1
562
563 for kernel in range(0, maxKernel ** 2):
564 for stride in range(0, maxStride ** 2):
565 for padding in range(0, maxPadding ** 4):
Kevin Cheng550ccc52021-03-03 11:21:43 -0800566 s = [stride // maxStride + 1, stride % maxStride + 1]
567 k = [(kernel // maxKernel) + 2, (kernel % maxKernel) + 2]
568 p = [
569 (padding // (maxPadding * 4)) % maxPadding,
570 (padding // (maxPadding * 2)) % maxPadding,
571 (padding // (maxPadding * 1)) % maxPadding,
572 padding % maxPadding,
573 ]
Eric Kunzee5e26762020-10-13 16:11:07 -0700574
Kevin Cheng550ccc52021-03-03 11:21:43 -0800575 arg_list.append(
576 (
577 "st{}{}_kern{}{}_pad{}{}{}{}".format(
578 s[0], s[1], k[0], k[1], p[0], p[1], p[2], p[3]
579 ),
Matthew Haddonb724efc2021-08-25 16:40:29 +0100580 [s, p, k],
Kevin Cheng550ccc52021-03-03 11:21:43 -0800581 )
582 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700583 return arg_list
584
585 @staticmethod
586 def agCast(testGen, opName, shapeList, inDtype):
587 arg_list = []
588
589 # Enumerate the output types here
590 if inDtype == DType.INT8:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800591 dtypeList = [DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700592 elif inDtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800593 dtypeList = [DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700594 elif inDtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800595 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700596 elif inDtype == DType.BOOL:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800597 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700598 elif inDtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800599 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700600 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800601 raise Exception("Unexpected input dtype: {}".format(inDtype))
Eric Kunzee5e26762020-10-13 16:11:07 -0700602
603 for dtype in dtypeList:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800604 arg_list.append(("out{}".format(DTypeNames[dtype]), [dtype]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700605
606 return arg_list
607
608 @staticmethod
609 def agRescale(testGen, opName, shapeList, inDtype):
610 arg_list = []
611
612 # Enumerate the output types here
Matthew Haddoncac4ee92021-07-22 14:30:53 +0100613 for dtype in [DType.UINT8, DType.INT8, DType.INT16, DType.INT32]:
614 if inDtype == DType.UINT8 and dtype != DType.INT8:
615 # The only output dtype for UINT8 is INT8, skip all other combinations
616 continue
617 if inDtype != DType.INT8 and dtype == DType.UINT8:
618 # The only input dtype for UINT8 is INT8, skip all other combinations
619 continue
620
Kevin Cheng550ccc52021-03-03 11:21:43 -0800621 for scale32 in [False, True]:
622 for double_round in [False, True]:
623 for per_channel in [False, True]:
Eric Kunzee5e26762020-10-13 16:11:07 -0700624
625 if inDtype == DType.INT48 and scale32:
626 # Illegal condition. Must be scale32=False
627 continue
Matthew Haddoncac4ee92021-07-22 14:30:53 +0100628 if double_round and not scale32:
629 # Illegal condition. ERROR_IF(!scale32 && double_round)
630 continue
Eric Kunzee5e26762020-10-13 16:11:07 -0700631
Kevin Cheng550ccc52021-03-03 11:21:43 -0800632 arg_list.append(
633 (
634 "out{}_sc{}_dr{}_pc{}".format(
635 DTypeNames[dtype],
636 int(scale32),
637 int(double_round),
638 int(per_channel),
639 ),
640 [dtype, scale32, double_round, per_channel],
641 )
642 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700643
644 return arg_list
645
Kevin Chengaee1fac2020-11-11 13:54:06 -0800646 @staticmethod
647 def agMul(testGen, opName, shapeList, dtype):
648 arg_list = []
649
650 if dtype is DType.INT32:
651 for p in range(testGen.args.num_rand_permutations):
652
653 shift = testGen.randInt(0, 32)
654
Kevin Cheng550ccc52021-03-03 11:21:43 -0800655 arg_list.append(("perm{}_shift{}".format(p, shift), [shift]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800656 else:
Matthew Haddon43e37192021-07-09 14:13:02 +0100657 arg_list.append(("perm0_shift0", [0]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800658
659 return arg_list
660
661 @staticmethod
662 def agArithmeticRightShift(testGen, opName, shapeList, dtype):
663 arg_list = []
664
Kevin Cheng550ccc52021-03-03 11:21:43 -0800665 arg_list.append(("roundTrue", [True]))
666 arg_list.append(("roundFalse", [False]))
Kevin Chengaee1fac2020-11-11 13:54:06 -0800667
668 return arg_list
669
Eric Kunzee5e26762020-10-13 16:11:07 -0700670 # Helper function for reshape. Gets some factors of a larger number.
671 @staticmethod
672 def getFactors(val, start=1):
673 factors = []
674
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100675 for i in range(start, int(np.sqrt(val)) + 1):
Eric Kunzee5e26762020-10-13 16:11:07 -0700676 if (val % i) == 0:
677 factors.append(i)
678
679 return factors
680
681 @staticmethod
682 def agReshape(testGen, opName, shapeList, dtype):
683 arg_list = []
684
685 origShape = shapeList[0]
686
687 totalElements = 1
688 for s in origShape:
689 totalElements *= s
690
691 # This code is NOT fast. Fortunately, the numbers are fairly small.
692 factors = TosaArgGen.getFactors(totalElements)
693
694 for p in range(testGen.args.num_rand_permutations):
Matthew Haddon5fc4e682021-07-07 11:28:29 +0100695 newRank = testGen.randInt(1, 7)
Kevin Cheng550ccc52021-03-03 11:21:43 -0800696 if len(factors) < newRank:
Eric Kunzee5e26762020-10-13 16:11:07 -0700697 continue
698
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100699 found = True
700 # escape_counter breaks while loop if it continues on for too long
701 escape_counter = 0
702 while found:
703 newShape = []
704 # Generate newShape ensuring it isn't a duplicate
705 remainingElements = totalElements
706 shuffledFactors = testGen.rng.permutation(factors)
Matthew Haddon5fc4e682021-07-07 11:28:29 +0100707 for i in range(1, newRank):
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100708 # pick rank-1 factors
709 newShape.append(shuffledFactors[0])
710 remainingElements = remainingElements // shuffledFactors[0]
711 shuffledFactors = testGen.rng.permutation(
712 TosaArgGen.getFactors(remainingElements)
713 )
714 newShape.append(remainingElements)
Eric Kunzee5e26762020-10-13 16:11:07 -0700715
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100716 # Toss in a -1 sometimes
717 minusOne = testGen.randInt(0, newRank * 4)
718 if minusOne < newRank:
719 newShape[minusOne] = -1
Eric Kunzee5e26762020-10-13 16:11:07 -0700720
Matthew Haddon2ad047d2021-06-22 16:55:23 +0100721 # Check for duplicates
722 found = False
723 for name, other_shape in arg_list:
724 if other_shape[0] == newShape:
725 found = True
726 break
727
728 escape_counter += 1
729 if escape_counter >= 100:
730 break
731
732 if not found:
733 arg_list.append(("perm{}_rank{}".format(p, newRank), [newShape]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700734
735 return arg_list
736
Eric Kunzee5e26762020-10-13 16:11:07 -0700737 @staticmethod
738 def agTranspose(testGen, opName, shapeList, dtype):
739 arg_list = []
740
741 ifm_shape = shapeList[0]
742
Jeremy Johnsona6185572021-06-21 15:55:35 +0100743 # Get all permutations
744 permutations = [p for p in itertools.permutations(range(len(ifm_shape)))]
Eric Kunzee5e26762020-10-13 16:11:07 -0700745
Jeremy Johnsona6185572021-06-21 15:55:35 +0100746 # Limit to possible permutations from shape dimension or argument setting
747 limit = min(len(permutations), testGen.args.num_rand_permutations)
Eric Kunzee5e26762020-10-13 16:11:07 -0700748
Jeremy Johnsona6185572021-06-21 15:55:35 +0100749 # Get random permutation generator that uses all permutations
750 random_permutations = testGen.rng.permutation(permutations)
Eric Kunzee5e26762020-10-13 16:11:07 -0700751
Jeremy Johnsona6185572021-06-21 15:55:35 +0100752 # Create list of required amount of permutations
Kevin Chengacb550f2021-06-29 15:32:19 -0700753 arg_list = [
754 ("perm{}".format(p), [random_permutations[p].tolist()])
755 for p in range(limit)
756 ]
Eric Kunzee5e26762020-10-13 16:11:07 -0700757 return arg_list
758
759 @staticmethod
760 def agSlice(testGen, opName, shapeList, dtype):
761 arg_list = []
762
763 ifm_shape = shapeList[0]
764 rank = len(ifm_shape)
765
766 for p in range(testGen.args.num_rand_permutations):
767 begin = []
768 size = []
769
Kevin Cheng550ccc52021-03-03 11:21:43 -0800770 valid = True
Eric Kunzee5e26762020-10-13 16:11:07 -0700771
772 for i in range(rank):
773 if ifm_shape[i] > 1:
774 begin.append(testGen.randInt(0, ifm_shape[i]))
775 size.append(testGen.randInt(0, ifm_shape[i] - begin[i]))
776
777 # Invalid slice size?
778 if size[i] == 0:
779 valid = False
780 else:
781 begin.append(0)
782 size.append(1)
783
784 if valid:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800785 arg_list.append(("perm{}".format(p), [begin, size]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700786 return arg_list
787
788 @staticmethod
789 def agTile(testGen, opName, shapeList, dtype):
790 arg_list = []
791
792 ifm_shape = shapeList[0]
793 rank = len(ifm_shape)
794
795 for p in range(testGen.args.num_rand_permutations):
796
797 # Pick a few random, but small multiple values
798 # because otherwise this has a tendency to generate
799 # enormous tensors
800 multiples = []
801 for i in range(rank):
Matthew Haddon82ad4d62021-08-20 15:02:39 +0100802 if ifm_shape[i] > 1000:
803 # Multiple of 1 if ifm_shape dimension is large to reduce tensor size
804 multiples.append(1)
805 elif max(ifm_shape) > 1000:
806 multiples.append(2)
807 else:
808 multiples.append(testGen.randInt(1, 4))
Kevin Cheng550ccc52021-03-03 11:21:43 -0800809 arg_list.append(("perm{}".format(p), [multiples]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700810
811 return arg_list
812
813 @staticmethod
814 def agResize(testGen, opName, shapeList, dtype):
815 arg_list = []
816
817 ifm_shape = shapeList[0]
818
819 for m in [ResizeMode.NEAREST, ResizeMode.BILINEAR]:
820
821 # Exclude illegal {mode, type} configurations. Pick legal output types
822 if m == ResizeMode.NEAREST and dtype == DType.INT8:
Les Bell33d837e2021-08-10 08:34:43 +0100823 outputDTypeList = [DType.INT8]
Eric Kunzee5e26762020-10-13 16:11:07 -0700824 elif m == ResizeMode.NEAREST and dtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800825 outputDTypeList = [DType.INT16]
Eric Kunzee5e26762020-10-13 16:11:07 -0700826 elif m == ResizeMode.BILINEAR and dtype == DType.INT8:
Les Bell33d837e2021-08-10 08:34:43 +0100827 outputDTypeList = [DType.INT32]
Eric Kunzee5e26762020-10-13 16:11:07 -0700828 elif m == ResizeMode.BILINEAR and dtype == DType.INT16:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800829 outputDTypeList = [DType.INT48]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800830 elif dtype == DType.FLOAT:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800831 outputDTypeList = [DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -0700832 else:
833 continue
834
835 for outputDType in outputDTypeList:
836 for perm in range(testGen.args.num_rand_permutations):
837
838 # Randomly generate legal output dimensions and shift
839 # and then compute the stride and offset based on them
Kevin Cheng550ccc52021-03-03 11:21:43 -0800840 output_dims = [testGen.randInt(1), testGen.randInt(1)]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800841 in_center_h = (ifm_shape[1] - 1) / 2.0
842 in_center_w = (ifm_shape[2] - 1) / 2.0
843 out_center_h = (output_dims[0] - 1) / 2.0
844 out_center_w = (output_dims[1] - 1) / 2.0
Eric Kunzee5e26762020-10-13 16:11:07 -0700845
Kevin Cheng77d0f762020-11-24 10:26:32 -0800846 fp_stride_y = float(ifm_shape[1]) / float(output_dims[0])
847 fp_stride_x = float(ifm_shape[2]) / float(output_dims[1])
848 fp_offset_y = in_center_h - fp_stride_y * out_center_h
849 fp_offset_x = in_center_w - fp_stride_x * out_center_w
Eric Kunzee5e26762020-10-13 16:11:07 -0700850
Kevin Cheng77d0f762020-11-24 10:26:32 -0800851 if outputDType == DType.FLOAT:
852 shift = 0
853 stride = [0, 0]
854 offset = [0, 0]
Kevin Cheng550ccc52021-03-03 11:21:43 -0800855 stride_fp = [fp_stride_y, fp_stride_x]
856 offset_fp = [fp_offset_y, fp_offset_x]
857 arg_list.append(
858 (
859 "mode{}_odim{}x{}_out{}_st{:.2f}x{:.2f}_off{:.2f}x{:.2f}".format(
Les Bell33d837e2021-08-10 08:34:43 +0100860 "N" if m == ResizeMode.NEAREST else "B",
Kevin Cheng550ccc52021-03-03 11:21:43 -0800861 output_dims[0],
862 output_dims[1],
863 testGen.typeStr(outputDType),
864 stride_fp[0],
865 stride_fp[1],
866 offset_fp[0],
867 offset_fp[1],
868 ),
869 [
870 m,
871 stride,
872 offset,
873 shift,
874 stride_fp,
875 offset_fp,
876 output_dims,
877 dtype,
878 outputDType,
879 ],
880 )
881 )
Kevin Cheng77d0f762020-11-24 10:26:32 -0800882 else:
883 shift = 11
884 unit = float(1 << shift)
885 stride_y = int(round(fp_stride_y * unit))
886 stride_x = int(round(fp_stride_x * unit))
887 offset_y = int(round(fp_offset_y * unit))
888 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700889
Kevin Cheng550ccc52021-03-03 11:21:43 -0800890 while (
891 stride_y >= 32768
892 or stride_x >= 32768
893 or offset_y >= 32768
894 or offset_x >= 32768
895 or offset_y < -32768
896 or offset_x < -32768
897 ):
Kevin Cheng77d0f762020-11-24 10:26:32 -0800898 shift = shift - 1
899 unit = float(1 << shift)
900 stride_y = int(round(fp_stride_y * unit))
901 stride_x = int(round(fp_stride_x * unit))
902 offset_y = int(round(fp_offset_y * unit))
903 offset_x = int(round(fp_offset_x * unit))
Eric Kunzee5e26762020-10-13 16:11:07 -0700904
Kevin Cheng550ccc52021-03-03 11:21:43 -0800905 stride = [stride_y, stride_x]
906 offset = [offset_y, offset_x]
Kevin Cheng77d0f762020-11-24 10:26:32 -0800907
908 stride_fp = [0.0, 0.0]
909 offset_fp = [0.0, 0.0]
910
Kevin Cheng550ccc52021-03-03 11:21:43 -0800911 arg_list.append(
912 (
913 "mode{}_shift{}_odim{}x{}_out{}_st{}x{}_off{}x{}".format(
Les Bell33d837e2021-08-10 08:34:43 +0100914 "N" if m == ResizeMode.NEAREST else "B",
Kevin Cheng550ccc52021-03-03 11:21:43 -0800915 shift,
916 output_dims[0],
917 output_dims[1],
918 testGen.typeStr(outputDType),
919 stride[0],
920 stride[1],
921 offset[0],
922 offset[1],
923 ),
924 [
925 m,
926 stride,
927 offset,
928 shift,
929 stride_fp,
930 offset_fp,
931 output_dims,
932 dtype,
933 outputDType,
934 ],
935 )
936 )
Eric Kunzee5e26762020-10-13 16:11:07 -0700937
938 return arg_list
939
940 def agCondIf(testGen, opName, shapeList, dtype):
941 # CondIf generates the condition values here.
942 # Convert to tensors in the build function, along with the
943 # then and else blocks
944 arg_list = []
945
946 for c in [False, True]:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800947 arg_list.append(("cond{}".format(int(c)), [c]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700948
949 return arg_list
950
951 def agWhileLoop(testGen, opName, shapeList, dtype):
952 # While loop: 0 iterations, 1, more than 1
953 arg_list = []
954
955 for iter in [0, 1, 4]:
Kevin Cheng550ccc52021-03-03 11:21:43 -0800956 arg_list.append(("iter{}".format(iter), [iter]))
Eric Kunzee5e26762020-10-13 16:11:07 -0700957
958 return arg_list
959
Matthew Haddonb724efc2021-08-25 16:40:29 +0100960class TosaInvalidValidator:
961
962 @staticmethod
963 def ivWrongDataTypeOrModeResize(**kwargs):
964 input_dtype = kwargs["input_dtype"]
965 args = kwargs["args"]
966 mode = args[0]
967 stride = args[1]
968 stride_fp = args[4]
969 output_dtype = args[8]
970
971 if mode == ResizeMode.BILINEAR:
972 # Invalid output data type / Invalid input datatype
973 return (
974 not (input_dtype == DType.INT8 and output_dtype == DType.INT32) or
975 not (input_dtype == DType.INT16 and output_dtype == DType.INT48) or
976 not (input_dtype == DType.FLOAT and output_dtype == DType.FLOAT) or
977 (input_dtype not in [DType.INT8, DType.INT32, DType.FLOAT])
978 )
979 elif mode == ResizeMode.NEAREST:
980 # Invalid output data type / Invalid input datatype
981 return (
982 (input_dtype != output_dtype) or
983 (input_dtype not in [DType.INT8, DType.INT32, DType.FLOAT])
984 )
985 else:
986 # Invalid resize mode
987 return True
988
989 @staticmethod
990 def ivBadStride(**kwargs):
991 input_dtype = kwargs["input_dtype"]
992 args = kwargs["args"]
993 stride_x = args[1][0]
994 stride_y = args[1][1]
995 stride_fp_x = args[4][0]
996 stride_fp_y = args[4][1]
997
998 if input_dtype == DType.FLOAT:
999 if stride_fp_x <= 0 or stride_fp_y <= 0:
1000 # Negative or zero stride
1001 return True
1002 else:
1003 if stride_x <= 0 or stride_y <= 0:
1004 # Negative or zero stride
1005 return True
1006 return False
1007
1008
1009
1010
1011 @staticmethod
1012 def ivHeightWidthSmallerZero(**kwargs):
1013 opName = kwargs['opName']
1014
1015 inputShapes = kwargs['shapeList']
1016 input = inputShapes[0]
1017 if not opName.endswith("pool2d"):
1018 filter = inputShapes[1]
1019
1020 args = kwargs['args']
1021 strides = args[0]
1022 padding = args[1]
1023 dilations = args[2]
1024 if opName.endswith("pool2d"):
1025 kernel = args[2]
1026
1027 if opName.startswith('conv2d'):
1028 h = (
1029 input[1]
1030 - filter[1]
1031 - (filter[1] - 1) * (dilations[0] - 1)
1032 + padding[0]
1033 + padding[1]
1034 ) // strides[0] + 1
1035
1036 w = (
1037 input[2]
1038 - filter[2]
1039 - (filter[2] - 1) * (dilations[1] - 1)
1040 + padding[2]
1041 + padding[3]
1042 ) // strides[1] + 1
1043 elif opName.startswith("depthwise_conv2d"):
1044 h = (
1045 input[1]
1046 - filter[0]
1047 - (filter[0] - 1) * (dilations[0] - 1)
1048 + padding[0]
1049 + padding[1]
1050 ) // strides[0] + 1
1051
1052 w = (
1053 input[2]
1054 - filter[1]
1055 - (filter[1] - 1) * (dilations[1] - 1)
1056 + padding[2]
1057 + padding[3]
1058 ) // strides[1] + 1
1059 elif opName.endswith("pool2d"):
1060 h = (input[1] + padding[0] + padding[1] + strides[0] - kernel[0]) // strides[0]
1061 w = (input[2] + padding[2] + padding[3] + strides[1] - kernel[1]) // strides[1]
1062 else:
1063 assert False, "Unrecognized Op"
1064
1065 if h <= 0 or w <= 0:
1066 # Invalid parameter combination
1067 return True
1068 return False
1069
1070 @staticmethod
1071 def ivNonPositiveOutputShape(**kwargs):
1072 args = kwargs['args']
1073 output_shape = args[3]
1074 if output_shape[1] <= 0 or output_shape[2] <= 0:
1075 # Negative output shape
1076 return True
1077 return False
1078
1079
Kevin Cheng550ccc52021-03-03 11:21:43 -08001080
Eric Kunzee5e26762020-10-13 16:11:07 -07001081class TosaTestGen:
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01001082 # Maximum rank of tensor supported by test generator.
1083 TOSA_TENSOR_MAX_RANK = 6
1084
Eric Kunzee5e26762020-10-13 16:11:07 -07001085 def __init__(self, args):
1086 self.args = args
1087 self.basePath = args.output_dir
1088 self.random_seed = args.random_seed
1089 self.ser = None
1090 self.rng = np.random.default_rng(self.random_seed)
1091 self.createDynamicOpLists()
1092 self.initOpListDefaults()
1093 self.quantGen = TosaQuantGen()
1094 # Force makeShape to do a specific starting shape
1095 self.targetted_shape = None
1096
1097 def createSerializer(self, opName, testPath):
1098 self.testPath = os.path.join(opName, testPath)
1099
1100 fullPath = os.path.join(self.basePath, self.testPath)
1101 os.makedirs(fullPath, exist_ok=True)
1102 self.ser = ts.TosaSerializer(fullPath)
1103
1104 def getSerializer(self):
1105 return self.ser
1106
1107 def serialize(self, testName):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001108 with open(
1109 os.path.join(self.basePath, self.testPath, "{}.tosa".format(testName)), "wb"
1110 ) as fd:
Eric Kunzee5e26762020-10-13 16:11:07 -07001111 fd.write(self.ser.serialize())
1112
Kevin Cheng550ccc52021-03-03 11:21:43 -08001113 with open(os.path.join(self.basePath, self.testPath, "desc.json"), "w") as fd:
1114 fd.write(self.ser.writeJson("{}.tosa".format(testName)))
Eric Kunzee5e26762020-10-13 16:11:07 -07001115
Matthew Haddon74567092021-07-16 15:38:20 +01001116 def resetRNG(self, seed=None):
1117 if seed == None:
1118 seed = self.random_seed + 1
1119 self.rng = np.random.default_rng(seed)
1120
Eric Kunzee5e26762020-10-13 16:11:07 -07001121 def getRandTensor(self, shape, dtype):
Eric Kunzee5e26762020-10-13 16:11:07 -07001122 if dtype == DType.BOOL:
1123 np_dt = np.bool
1124 return np.bool_(self.rng.choice(a=[False, True], size=shape))
Kevin Chenga9017402021-07-28 17:19:23 -07001125 # TOSA specific INT4 weight range from -7 to 7
Eric Kunzee5e26762020-10-13 16:11:07 -07001126 elif dtype == DType.INT4:
Kevin Chenga9017402021-07-28 17:19:23 -07001127 return np.int32(self.rng.integers(low=-7, high=8, size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001128 elif dtype == DType.INT8:
Jeremy Johnson18e26662021-07-22 16:15:29 +01001129 return np.int32(self.rng.integers(low=-128, high=128, size=shape))
1130 elif dtype == DType.UINT8:
1131 return np.int32(self.rng.integers(low=0, high=256, size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001132 elif dtype == DType.INT16:
1133 return np.int32(self.rng.integers(low=-32768, high=32768, size=shape))
1134 elif dtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001135 return np.int32(
1136 self.rng.integers(low=-(1 << 31), high=(1 << 31), size=shape)
1137 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001138 elif dtype == DType.INT48:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001139 return np.int64(
1140 self.rng.integers(low=-(1 << 47), high=(1 << 47), size=shape)
1141 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001142 elif dtype == DType.FLOAT:
Jeremy Johnson18e26662021-07-22 16:15:29 +01001143 return np.float32(self.rng.random(size=shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001144 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001145 raise Exception("Unrecognized Dtype: {}".format(dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07001146
Kevin Cheng989cb052021-04-28 16:29:44 -07001147 def buildPlaceholderTensors(self, shape_list, dtype_list):
Eric Kunzee5e26762020-10-13 16:11:07 -07001148 placeholders = []
1149
Kevin Cheng989cb052021-04-28 16:29:44 -07001150 assert len(shape_list) == len(dtype_list)
1151
1152 for idx, shape in enumerate(shape_list):
1153 arr = self.getRandTensor(shape, dtype_list[idx])
1154 placeholders.append(self.ser.addPlaceholder(shape, dtype_list[idx], arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001155
1156 return placeholders
1157
Kevin Cheng989cb052021-04-28 16:29:44 -07001158 def buildConstTensors(self, shape_list, dtype_list):
Eric Kunzee5e26762020-10-13 16:11:07 -07001159 consts = []
1160
Kevin Cheng989cb052021-04-28 16:29:44 -07001161 assert len(shape_list) == len(dtype_list)
1162
1163 for idx, shape in enumerate(shape_list):
1164 arr = self.getRandTensor(shape, dtype_list[idx])
1165 consts.append(self.ser.addConst(shape, dtype_list[idx], arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001166
1167 return consts
1168
1169 def makeShape(self, rank):
1170 if self.targetted_shape:
1171 return np.int32(self.targetted_shape)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001172 return np.int32(
1173 self.rng.integers(
1174 low=self.args.tensor_shape_range[0],
1175 high=self.args.tensor_shape_range[1],
1176 size=rank,
1177 )
1178 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001179
1180 def setTargetShape(self, shape):
1181 self.targetted_shape = shape
1182
1183 def randInt(self, low=0, high=256):
1184 return np.int32(self.rng.integers(low=low, high=high, size=1))[0]
1185
1186 def getRandNumberDType(self, dtype):
1187 if dtype == DType.FLOAT:
1188 return self.rng.random()
1189 elif dtype == DType.BOOL:
1190 return self.rng.choice([False, True])
Kevin Chenga9017402021-07-28 17:19:23 -07001191 # TOSA specific INT4 weight range from -7 to 7
Eric Kunzee5e26762020-10-13 16:11:07 -07001192 elif dtype == DType.INT4:
Kevin Chenga9017402021-07-28 17:19:23 -07001193 low, high = (-7, 8)
Eric Kunzee5e26762020-10-13 16:11:07 -07001194 elif dtype == DType.INT8:
Jeremy Johnson18e26662021-07-22 16:15:29 +01001195 low, high = (-128, 128)
Eric Kunzee5e26762020-10-13 16:11:07 -07001196 elif dtype == DType.INT16:
1197 low, high = (-32768, 32768)
1198 elif dtype == DType.INT32:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001199 low, high = (-(1 << 31), (1 << 31))
Eric Kunzee5e26762020-10-13 16:11:07 -07001200 elif dtype == DType.INT48:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001201 low, high = (-(1 << 47), (1 << 47))
Eric Kunzee5e26762020-10-13 16:11:07 -07001202 # Special size
1203 return np.int64(self.rng.integers(low, high, size=1))[0]
1204 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001205 raise Exception("Unknown dtype: {}".format(dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07001206
1207 return np.int32(self.rng.integers(low, high, size=1))[0]
1208
1209 def shapeStr(self, shape):
1210
1211 sStr = []
1212 # Convert to strings
1213 for i in shape:
1214 sStr.append(str(i))
1215
Kevin Cheng550ccc52021-03-03 11:21:43 -08001216 return "x".join(sStr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001217
1218 def typeStr(self, t):
Kevin Cheng989cb052021-04-28 16:29:44 -07001219 if isinstance(t, list):
1220 assert len(t) >= 2
1221 return "{}x{}".format(self.typeStr(t[0]), self.typeStr(t[1]))
Eric Kunzee5e26762020-10-13 16:11:07 -07001222 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07001223 if t == DType.BOOL:
1224 return "b"
1225 elif t == DType.INT4:
1226 return "i4"
1227 elif t == DType.INT8:
1228 return "i8"
1229 elif t == DType.UINT8:
1230 return "u8"
1231 elif t == DType.INT16:
1232 return "i16"
1233 elif t == DType.INT32:
1234 return "i32"
1235 elif t == DType.INT48:
1236 return "i48"
1237 elif t == DType.FLOAT:
1238 return "float"
1239 else:
1240 raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
Eric Kunzee5e26762020-10-13 16:11:07 -07001241
1242 def typeWidth(self, t):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001243 """ Get the datatype width for integer types"""
Kevin Cheng3a478572021-01-22 17:21:02 -08001244 if t == DType.INT4:
Eric Kunzee5e26762020-10-13 16:11:07 -07001245 return 4
1246 elif t == DType.INT8:
1247 return 8
Kevin Cheng3a478572021-01-22 17:21:02 -08001248 elif t == DType.UINT8:
1249 return 8
Eric Kunzee5e26762020-10-13 16:11:07 -07001250 elif t == DType.INT16:
1251 return 16
1252 elif t == DType.INT32:
1253 return 32
1254 elif t == DType.INT48:
1255 return 48
1256 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001257 raise Exception("Unknown dtype, cannot convert to string: {}".format(t))
Eric Kunzee5e26762020-10-13 16:11:07 -07001258
1259 # Argument generators
1260 # Returns a list of tuples (stringDescriptor, [build_fcn_arg_list])
1261 # Where the string descriptor is used to generate the test name and
1262 # The build_fcn_arg_list is expanded and passed to the operator test
1263 # build function
1264
Kevin Cheng550ccc52021-03-03 11:21:43 -08001265 def build_unary(self, op, a, qinfo=None):
Eric Kunzee5e26762020-10-13 16:11:07 -07001266 result_tens = OutputShaper.unaryOp(self.ser, a)
1267 self.ser.addOperator(op, [a.name], [result_tens.name], None, qinfo)
1268 return result_tens
1269
1270 def build_binary_broadcast(self, op, a, b):
1271 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1272 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1273 return result_tens
1274
1275 def build_binary_nonbroadcast(self, op, a, b):
1276 result_tens = OutputShaper.binaryNonBroadcastOp(self.ser, a, b)
1277 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1278 return result_tens
1279
Kevin Chengaee1fac2020-11-11 13:54:06 -08001280 def build_arithmetic_right_shift(self, op, a, b, round):
1281 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1282
1283 attr = ts.TosaSerializerAttribute()
1284 attr.ArithmeticRightShiftAttribute(round)
1285
1286 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
1287 return result_tens
1288
1289 def build_mul(self, op, a, b, shift):
Eric Kunzee5e26762020-10-13 16:11:07 -07001290 result_tens = OutputShaper.binaryBroadcastOp(self.ser, a, b)
1291
1292 # Special for multiply:
1293 # Force the result to INT32 for INT types
1294 if a.dtype != DType.FLOAT:
1295 result_tens.setDtype(DType.INT32)
1296
Kevin Chengaee1fac2020-11-11 13:54:06 -08001297 attr = ts.TosaSerializerAttribute()
1298 attr.MulAttribute(shift)
1299
1300 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], attr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001301 return result_tens
1302
1303 def build_table(self, op, a):
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001304 # Constant size depending on type, random values
1305 if a.dtype == DType.INT16:
Kevin Chengacb550f2021-06-29 15:32:19 -07001306 table_dtype = DType.INT16
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001307 table_arr = self.getRandTensor([513], table_dtype)
1308 else:
1309 assert a.dtype == DType.INT8
1310 table_dtype = DType.INT8
1311 table_arr = self.getRandTensor([256], table_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001312
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01001313 table_tens = self.ser.addConst(table_arr.shape, table_dtype, table_arr)
1314 result_tens = OutputShaper.tableOp(self.ser, a, table_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001315 self.ser.addOperator(op, [a.name, table_tens.name], [result_tens.name], None)
1316
1317 return result_tens
1318
1319 def build_select(self, op, cond, a, b):
Eric Kunzee5e26762020-10-13 16:11:07 -07001320 result_tens = OutputShaper.selectOp(self.ser, cond, a, b)
1321 self.ser.addOperator(op, [cond.name, a.name, b.name], [result_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001322 return result_tens
1323
1324 def build_comparison(self, op, a, b):
1325 result_tens = OutputShaper.binaryComparisonOp(self.ser, a, b)
1326 self.ser.addOperator(op, [a.name, b.name], [result_tens.name])
1327 return result_tens
1328
1329 def build_argmax(self, op, a, axis):
1330 result_tens = OutputShaper.argmaxOp(self.ser, a, axis)
1331
1332 attr = ts.TosaSerializerAttribute()
1333 attr.AxisAttribute(axis)
1334
1335 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1336 return result_tens
1337
Matthew Haddonb724efc2021-08-25 16:40:29 +01001338 def build_pool2d(self, op, input, stride, pad, kernel, qinfo=None):
Eric Kunzee5e26762020-10-13 16:11:07 -07001339 result_tens = OutputShaper.pool2dOp(self.ser, input, kernel, stride, pad)
1340
1341 attr = ts.TosaSerializerAttribute()
1342 attr.Pool2dAttribute(kernel, stride, pad)
Eric Kunzee5e26762020-10-13 16:11:07 -07001343
1344 self.ser.addOperator(op, [input.name], [result_tens.name], attr, qinfo)
1345 return result_tens
1346
1347 def build_conv2d(self, op, ifm, filter, bias, strides, padding, dilations, qinfo):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001348 assert len(padding) == 4
1349 result_tens = OutputShaper.conv2dOp(
1350 self.ser, ifm, filter, strides, padding, dilations
1351 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001352
1353 attr = ts.TosaSerializerAttribute()
1354 attr.Conv2dAttribute(padding, strides, dilations)
1355
Kevin Cheng550ccc52021-03-03 11:21:43 -08001356 self.ser.addOperator(
1357 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1358 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001359 return result_tens
1360
Kevin Cheng550ccc52021-03-03 11:21:43 -08001361 def build_transpose_conv2d(
Kevin Cheng989cb052021-04-28 16:29:44 -07001362 self, op, ifm, filter, bias, stride, outpad, dilation, output_shape, qinfo
Kevin Cheng550ccc52021-03-03 11:21:43 -08001363 ):
1364 assert len(outpad) == 2
Eric Kunzee5e26762020-10-13 16:11:07 -07001365 result_tens = OutputShaper.transposeConv2DOp(self.ser, ifm, output_shape)
1366
1367 attr = ts.TosaSerializerAttribute()
1368 attr.TransposeConv2DAttribute(outpad, stride, dilation, output_shape)
1369
Kevin Cheng550ccc52021-03-03 11:21:43 -08001370 self.ser.addOperator(
Kevin Cheng989cb052021-04-28 16:29:44 -07001371 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
Kevin Cheng550ccc52021-03-03 11:21:43 -08001372 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001373 return result_tens
1374
Kevin Cheng550ccc52021-03-03 11:21:43 -08001375 def build_depthwise_conv2d(
1376 self, op, ifm, filter, bias, strides, padding, dilations, qinfo
1377 ):
1378 result_tens = OutputShaper.depthwiseConv2dOp(
1379 self.ser, ifm, filter, strides, padding, dilations
1380 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001381
1382 attr = ts.TosaSerializerAttribute()
1383 attr.Conv2dAttribute(padding, strides, dilations)
1384
Kevin Cheng550ccc52021-03-03 11:21:43 -08001385 self.ser.addOperator(
1386 op, [ifm.name, filter.name, bias.name], [result_tens.name], attr, qinfo
1387 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001388 return result_tens
1389
1390 def build_fully_connected(self, op, ifm, filter, bias, qinfo):
1391 result_tens = OutputShaper.fullyConnectedOp(self.ser, ifm, filter)
1392
Kevin Cheng550ccc52021-03-03 11:21:43 -08001393 self.ser.addOperator(
1394 op, [ifm.name, filter.name, bias.name], [result_tens.name], None, qinfo
1395 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001396 return result_tens
1397
1398 def build_matmul(self, op, a, b, qinfo):
1399 result_tens = OutputShaper.matmulOp(self.ser, a, b)
1400 self.ser.addOperator(op, [a.name, b.name], [result_tens.name], None, qinfo)
1401 return result_tens
1402
1403 def build_reduce(self, op, a, axis):
1404 result_tens = OutputShaper.reduceOp(self.ser, a, axis)
1405
1406 attr = ts.TosaSerializerAttribute()
1407 attr.AxisAttribute(axis)
1408
1409 self.ser.addOperator(op, [a.name], result_tens.name, attr)
1410 return result_tens
1411
1412 def build_clamp(self, op, a):
1413 result_tens = OutputShaper.unaryOp(self.ser, a)
1414
1415 attr = ts.TosaSerializerAttribute()
Jeremy Johnson18e26662021-07-22 16:15:29 +01001416 v = [self.getRandNumberDType(a.dtype), self.getRandNumberDType(a.dtype)]
Eric Kunzee5e26762020-10-13 16:11:07 -07001417
1418 if a.dtype == DType.FLOAT:
1419 attr.ClampAttribute(0, 0, min(v), max(v))
1420 else:
1421 attr.ClampAttribute(min(v), max(v), 0, 0)
1422
1423 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1424 return result_tens
1425
1426 def build_leaky_relu(self, op, a):
1427 result_tens = OutputShaper.unaryOp(self.ser, a)
1428 attr = ts.TosaSerializerAttribute()
1429
1430 attr.LeakyReluAttribute(self.getRandNumberDType(DType.FLOAT))
1431
1432 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1433 return result_tens
1434
1435 # Needs an additional type/input
1436 def build_prelu(self, op, a):
1437 result_tens = OutputShaper.unaryOp(self.ser, a)
1438
1439 self.ser.addOperator(op, [a.name], [result_tens.name])
1440 return result_tens
1441
1442 def build_relun(self, op, a):
1443 result_tens = OutputShaper.unaryOp(self.ser, a)
1444
1445 attr = ts.TosaSerializerAttribute()
1446
1447 if a.dtype == DType.FLOAT:
1448 attr.ReluNAttribute(0, self.getRandNumberDType(a.dtype))
1449 else:
1450 attr.ReluNAttribute(self.getRandNumberDType(a.dtype), 0)
1451
1452 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1453 return result_tens
1454
1455 def build_sigmoid(self, op, a):
1456 result_tens = OutputShaper.unaryOp(self.ser, a)
1457 self.ser.addOperator(op, [a.name], [result_tens.name])
1458 return result_tens
1459
1460 def build_tanh(self, op, a):
1461 result_tens = OutputShaper.unaryOp(self.ser, a)
1462 self.ser.addOperator(op, [a.name], [result_tens.name])
1463 return result_tens
1464
Matthew Haddon818ab902021-07-27 09:12:49 +01001465 def build_concat(self, op, *a):
1466 assert (type(a[-1]) == int)
1467
1468 # To store variable length list of input tensors we need to store axis along with it
1469 axis = a[-1]
1470 a = a[:-1]
1471
1472 result_tens = OutputShaper.concatOp(self.ser, axis, *a)
Eric Kunzee5e26762020-10-13 16:11:07 -07001473
1474 attr = ts.TosaSerializerAttribute()
1475 attr.AxisAttribute(axis)
1476
Matthew Haddon818ab902021-07-27 09:12:49 +01001477 input_tensor_names = []
1478 for tensor in a:
1479 input_tensor_names.append(tensor.name)
1480
1481 self.ser.addOperator(op, input_tensor_names, [result_tens.name], attr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001482
1483 def build_pad(self, op, a, padding, qinfo):
1484 result_tens = OutputShaper.padOp(self.ser, a, padding)
1485
1486 # Need to turn the padding array into a TOSA tensor here.
1487 # This is one of the few tensor operands that does not get
1488 # randomly generated
Kevin Cheng550ccc52021-03-03 11:21:43 -08001489 padding_tens = self.ser.addConst(padding.shape, DType.INT32, padding)
Eric Kunzee5e26762020-10-13 16:11:07 -07001490
Kevin Cheng550ccc52021-03-03 11:21:43 -08001491 self.ser.addOperator(
1492 op, [a.name, padding_tens.name], [result_tens.name], None, qinfo
1493 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001494
1495 def build_reshape(self, op, a, newShape):
1496 result_tens = OutputShaper.reshapeOp(self.ser, a, newShape)
1497
1498 attr = ts.TosaSerializerAttribute()
1499 attr.ReshapeAttribute(newShape)
1500
1501 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1502 return result_tens
1503
1504 def build_reverse(self, op, a, axis):
1505 result_tens = OutputShaper.unaryOp(self.ser, a)
1506
1507 attr = ts.TosaSerializerAttribute()
1508 attr.AxisAttribute(axis)
1509
1510 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1511 return result_tens
1512
1513 def build_transpose(self, op, a, perms):
1514 result_tens = OutputShaper.transposeOp(self.ser, a, perms)
1515
Kevin Cheng550ccc52021-03-03 11:21:43 -08001516 perms_tens = self.ser.addConst([len(perms)], DType.INT32, np.int32(perms))
Eric Kunzee5e26762020-10-13 16:11:07 -07001517
1518 self.ser.addOperator(op, [a.name, perms_tens.name], [result_tens.name])
1519 return result_tens
1520
1521 def build_slice(self, op, a, begin, size):
1522 result_tens = OutputShaper.sliceOp(self.ser, a, begin, size)
1523
1524 attr = ts.TosaSerializerAttribute()
1525 attr.SliceAttribute(begin, size)
1526
1527 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1528 return result_tens
1529
1530 def build_tile(self, op, a, multiples):
1531 result_tens = OutputShaper.tileOp(self.ser, a, multiples)
1532
1533 attr = ts.TosaSerializerAttribute()
1534 attr.TileAttribute(multiples)
1535
1536 self.ser.addOperator(op, [a.name], [result_tens.name], attr)
1537 return result_tens
1538
Kevin Cheng77d0f762020-11-24 10:26:32 -08001539 def build_gather(self, op, values):
Eric Kunzee5e26762020-10-13 16:11:07 -07001540
1541 # Create a new indicies tensor
1542 # here with data that doesn't exceed the dimensions of the values tensor
1543
Kevin Cheng550ccc52021-03-03 11:21:43 -08001544 K = values.shape[1] # K
1545 W = self.randInt(
1546 self.args.tensor_shape_range[0], self.args.tensor_shape_range[1]
1547 ) # W
1548 indicies_arr = np.int32(
1549 self.rng.integers(low=0, high=K, size=[values.shape[0], W])
1550 ) # (N, W)
1551 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001552
Kevin Cheng77d0f762020-11-24 10:26:32 -08001553 result_tens = OutputShaper.gatherOp(self.ser, values, indicies)
Eric Kunzee5e26762020-10-13 16:11:07 -07001554
Kevin Cheng77d0f762020-11-24 10:26:32 -08001555 self.ser.addOperator(op, [values.name, indicies.name], [result_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001556
1557 return result_tens
1558
Kevin Cheng77d0f762020-11-24 10:26:32 -08001559 def build_scatter(self, op, values_in, input):
1560
1561 # Create a new indicies tensor
1562 # here with data that doesn't exceed the dimensions of the values_in tensor
1563
Kevin Cheng550ccc52021-03-03 11:21:43 -08001564 K = values_in.shape[1] # K
1565 W = input.shape[1] # W
1566 indicies_arr = np.int32(
1567 self.rng.integers(low=0, high=K, size=[values_in.shape[0], W])
1568 ) # (N, W)
1569 indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
Kevin Cheng77d0f762020-11-24 10:26:32 -08001570
1571 result_tens = OutputShaper.scatterOp(self.ser, values_in, indicies, input)
1572
Kevin Cheng550ccc52021-03-03 11:21:43 -08001573 self.ser.addOperator(
1574 op, [values_in.name, indicies.name, input.name], [result_tens.name]
1575 )
Kevin Cheng77d0f762020-11-24 10:26:32 -08001576
1577 return result_tens
1578
Kevin Cheng550ccc52021-03-03 11:21:43 -08001579 def build_resize(
1580 self,
1581 op,
1582 input,
1583 mode,
1584 stride,
1585 offset,
1586 shift,
1587 stride_fp,
1588 offset_fp,
1589 output_dims,
1590 input_dtype,
1591 output_dtype,
1592 ):
1593 result_tens = OutputShaper.resizeOp(
1594 self.ser,
1595 input,
1596 mode,
1597 stride,
1598 offset,
1599 shift,
1600 stride_fp,
1601 offset_fp,
1602 output_dims,
1603 input_dtype,
1604 output_dtype,
1605 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001606
1607 attr = ts.TosaSerializerAttribute()
Kevin Cheng77d0f762020-11-24 10:26:32 -08001608
Kevin Cheng550ccc52021-03-03 11:21:43 -08001609 attr.ResizeAttribute(
1610 output_dims, stride, offset, shift, stride_fp, offset_fp, mode
1611 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001612
1613 self.ser.addOperator(op, [input.name], [result_tens.name], attr)
1614 return result_tens
1615
1616 def build_identityn(self, op, val, val2):
1617
Kevin Cheng550ccc52021-03-03 11:21:43 -08001618 result_tens = OutputShaper.unaryOp(self.ser, val)
Eric Kunzee5e26762020-10-13 16:11:07 -07001619 result_tens2 = OutputShaper.unaryOp(self.ser, val2)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001620 self.ser.addOperator(
1621 op, [val.name, val2.name], [result_tens.name, result_tens2.name]
1622 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001623 return result_tens
1624
1625 def build_placeholder(self, op, val):
1626 # Add an identity op to avoid warning in the reference model
1627 return self.build_unary(Op.IDENTITY, val)
1628
1629 # Type Conversion
1630 def build_cast(self, op, val, out_dtype):
1631 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1632 self.ser.addOperator(op, [val.name], [result_tens.name])
1633 return result_tens
1634
1635 def build_rescale(self, op, val, out_dtype, scale32, double_round, per_channel):
1636 result_tens = OutputShaper.typeConversionOp(self.ser, val, out_dtype)
1637
1638 if per_channel:
1639 nc = val.shape[-1]
1640 else:
1641 nc = 1
1642
1643 in_type_width = self.typeWidth(val.dtype)
1644 out_type_width = self.typeWidth(out_dtype)
1645
Kevin Cheng3a478572021-01-22 17:21:02 -08001646 if val.dtype == DType.INT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001647 input_zp = self.randInt(-128, 128)
1648 in_type_width = in_type_width + 1
Kevin Chengacb550f2021-06-29 15:32:19 -07001649 elif val.dtype == DType.UINT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001650 input_zp = self.randInt(0, 256)
Eric Kunzee5e26762020-10-13 16:11:07 -07001651 in_type_width = in_type_width + 1
1652 else:
1653 input_zp = 0
1654
Kevin Cheng3a478572021-01-22 17:21:02 -08001655 if out_dtype == DType.INT8:
Matthew Haddoncac4ee92021-07-22 14:30:53 +01001656 output_zp = self.randInt(-128, 128)
1657 out_type_width = out_type_width + 1
1658 elif out_dtype == DType.UINT8:
1659 output_zp = self.randInt(0, 256)
Eric Kunzee5e26762020-10-13 16:11:07 -07001660 out_type_width = out_type_width + 1
1661 else:
1662 output_zp = 0
1663
1664 # Calculate scale based on:
1665 # scale = a *(2^output_width)/(2^input_width))
1666
1667 a = np.float32(self.rng.random(size=[nc]))
1668 scale_arr = a * np.float32((1 << out_type_width) / (1 << in_type_width))
1669
1670 if scale32:
1671 pass
Matthew Haddonb724efc2021-08-25 16:40:29 +01001672 # Cap the scaling at 2^31 - 1 for scale32
Eric Kunzee5e26762020-10-13 16:11:07 -07001673 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), (1 << 31) - 1)
1674 else:
1675 # Cap the scaling at 2^15 - 1 for scale16
1676 scale_arr = np.clip(scale_arr, 1.0 / (1 << 31), 32767.0)
1677
Kevin Cheng550ccc52021-03-03 11:21:43 -08001678 # print('{} {} -> {}'.format(out_type_width, in_type_width, scale_arr))
Eric Kunzee5e26762020-10-13 16:11:07 -07001679
1680 multiplier_arr = np.int32(np.zeros(shape=[nc]))
1681 shift_arr = np.int32(np.zeros(shape=[nc]))
1682
1683 for i in range(nc):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001684 multiplier_arr[i], shift_arr[i] = TosaQuantGen.computeMultiplierAndShift(
1685 scale_arr[i], scale32
1686 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001687
Kevin Cheng550ccc52021-03-03 11:21:43 -08001688 # print('multiplier {} shift {} inzp {} outzp {}'.format(multiplier_arr, shift_arr, input_zp, output_zp))
Eric Kunzee5e26762020-10-13 16:11:07 -07001689
1690 attr = ts.TosaSerializerAttribute()
Kevin Cheng550ccc52021-03-03 11:21:43 -08001691 attr.RescaleAttribute(
1692 input_zp,
1693 output_zp,
1694 multiplier_arr,
1695 shift_arr,
1696 scale32,
1697 double_round,
1698 per_channel,
1699 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001700
1701 self.ser.addOperator(op, [val.name], [result_tens.name], attr)
1702 return result_tens
1703
1704 def build_cond_if_const(self, op, then_tens, else_tens, cond):
1705 # For cond_if with constants, we're supplied with then/else tensors that we ignore
1706 # (except for the generated shap) and the condition. Build Then/Else blocks
1707 # and fill them with const nodes for the body.
1708
1709 # Condition tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001710 cond_tens = self.ser.addConst([], DType.BOOL, [cond])
Eric Kunzee5e26762020-10-13 16:11:07 -07001711
1712 # Make then/else tensors
1713 out_shape = then_tens.shape
Jeremy Johnson18e26662021-07-22 16:15:29 +01001714 then_arr = np.int32(self.rng.integers(0, 256, size=out_shape))
1715 else_arr = np.int32(self.rng.integers(0, 256, size=out_shape))
Eric Kunzee5e26762020-10-13 16:11:07 -07001716
1717 # And the result tensor based on any of the outputs
Kevin Cheng550ccc52021-03-03 11:21:43 -08001718 result_tens = self.ser.addOutput(out_shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07001719
1720 # Create the attribute with the names of the then/else blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001721 then_block = "THEN_BLOCK"
1722 else_block = "ELSE_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001723 attr = ts.TosaSerializerAttribute()
1724 attr.CondIfAttribute(then_block, else_block)
1725
1726 # Finally, build the op and the two blocks
1727 self.ser.addOperator(op, [cond_tens.name], [result_tens.name], attr)
1728
1729 self.ser.startBasicBlock(then_block)
1730 # Build the actual then/else tensors inside their blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001731 then_tens = self.ser.addConst(out_shape, DType.INT32, then_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001732 self.ser.addOutputTensor(then_tens)
1733
1734 self.ser.startBasicBlock(else_block)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001735 else_tens = self.ser.addConst(out_shape, DType.INT32, else_arr)
Eric Kunzee5e26762020-10-13 16:11:07 -07001736 self.ser.addOutputTensor(else_tens)
1737
1738 return result_tens
1739
1740 def build_cond_if_binary(self, op, a, b, cond):
1741 # For cond_if with a binary op in the then/else blocks, take a and b and
1742 # alternately add or subtract them based on the condition
1743
1744 # Condition tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001745 cond_tens = self.ser.addConst([], DType.BOOL, [cond])
Eric Kunzee5e26762020-10-13 16:11:07 -07001746
Kevin Cheng550ccc52021-03-03 11:21:43 -08001747 result_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001748 self.ser.currBasicBlock.addOutput(result_tens.name)
1749
1750 # Create the attribute with the names of the then/else blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001751 then_block = "THEN_BLOCK"
1752 else_block = "ELSE_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001753 attr = ts.TosaSerializerAttribute()
1754 attr.CondIfAttribute(then_block, else_block)
1755
1756 # Finally, build the op and the two blocks
Kevin Cheng550ccc52021-03-03 11:21:43 -08001757 self.ser.addOperator(
1758 op, [cond_tens.name, a.name, b.name], [result_tens.name], attr
1759 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001760
1761 self.ser.startBasicBlock(then_block)
1762 self.ser.addInputTensor(a)
1763 self.ser.addInputTensor(b)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001764 then_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001765 self.ser.addOperator(Op.ADD, [a.name, b.name], [then_tens.name])
1766
1767 self.ser.startBasicBlock(else_block)
1768 self.ser.addInputTensor(a)
1769 self.ser.addInputTensor(b)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001770 else_tens = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001771 self.ser.addOperator(Op.SUB, [a.name, b.name], [else_tens.name])
1772
1773 return result_tens
1774
1775 def build_while_loop(self, op, a, iter_val):
Kevin Cheng550ccc52021-03-03 11:21:43 -08001776 iter = self.ser.addPlaceholder([], DType.INT32, [np.int32(iter_val)])
Eric Kunzee5e26762020-10-13 16:11:07 -07001777
Kevin Cheng550ccc52021-03-03 11:21:43 -08001778 cond_block = "COND_BLOCK"
1779 body_block = "BODY_BLOCK"
Eric Kunzee5e26762020-10-13 16:11:07 -07001780
1781 attr = ts.TosaSerializerAttribute()
1782 attr.WhileLoopAttribute(cond_block, body_block)
1783
1784 # Accumulator tensor
Kevin Cheng550ccc52021-03-03 11:21:43 -08001785 # acc = self.ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001786 acc_init_val = np.int32(np.zeros(a.shape))
Kevin Cheng550ccc52021-03-03 11:21:43 -08001787 acc = self.ser.addPlaceholder(a.shape, a.dtype, acc_init_val)
Eric Kunzee5e26762020-10-13 16:11:07 -07001788
1789 # Intermediate/output tensors for everything going through the loop
Kevin Cheng550ccc52021-03-03 11:21:43 -08001790 iter_out = self.ser.addIntermediate(iter.shape, iter.dtype)
1791 a_out = self.ser.addIntermediate(a.shape, a.dtype)
1792 acc_out = self.ser.addIntermediate(acc.shape, acc.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001793
1794 # While_loop operator
Kevin Cheng550ccc52021-03-03 11:21:43 -08001795 self.ser.addOperator(
1796 op,
1797 [iter.name, a.name, acc.name],
1798 [iter_out.name, a_out.name, acc_out.name],
1799 attr,
1800 )
Kevin Chengb227ae52021-09-02 13:43:17 -07001801 self.ser.addOutputTensor(acc_out)
Eric Kunzee5e26762020-10-13 16:11:07 -07001802
1803 # COND block (input: iter, output: cond_tens )
1804 self.ser.startBasicBlock(cond_block)
1805 self.ser.addInputTensor(iter)
1806 self.ser.addInputTensor(a)
1807 self.ser.addInputTensor(acc)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001808 zero_tens = self.ser.addConst([], DType.INT32, [np.int32(0)])
1809 cond_tens = self.ser.addOutput([], DType.BOOL)
1810 self.ser.addOperator(Op.GREATER, [iter.name, zero_tens.name], [cond_tens.name])
Eric Kunzee5e26762020-10-13 16:11:07 -07001811
1812 # BODY block (input: a, acc, iter, output: a, acc, iter)
1813 # Note that local intermediate tensors need to be declared here for the outputs
1814 self.ser.startBasicBlock(body_block)
1815 self.ser.addInputTensor(iter)
1816 self.ser.addInputTensor(a)
1817 self.ser.addInputTensor(acc)
Kevin Cheng550ccc52021-03-03 11:21:43 -08001818 one_tens = self.ser.addConst([], DType.INT32, [np.int32(1)])
1819 iter_body_out = self.ser.addIntermediate(iter.shape, iter.dtype)
1820 acc_body_out = self.ser.addIntermediate(acc.shape, acc.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07001821 self.ser.addOperator(Op.ADD, [a.name, acc.name], [acc_body_out.name])
1822 self.ser.addOperator(Op.SUB, [iter.name, one_tens.name], [iter_body_out.name])
1823 self.ser.addOutputTensor(iter_body_out)
1824 self.ser.addOutputTensor(a)
1825 self.ser.addOutputTensor(acc_body_out)
1826
1827 return acc_out
1828
Kevin Cheng550ccc52021-03-03 11:21:43 -08001829 def genOpTestList(
Matthew Haddon74567092021-07-16 15:38:20 +01001830 self, opName, shapeFilter=[None], rankFilter=None, dtypeFilter=None, testType='positive'
Kevin Cheng550ccc52021-03-03 11:21:43 -08001831 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07001832
1833 try:
1834 op = self.TOSA_OP_LIST[opName]
1835 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001836 raise Exception("Cannot find op with name {}".format(opName))
Eric Kunzee5e26762020-10-13 16:11:07 -07001837
1838 # Initialize a new random number generator
1839 self.rng = np.random.default_rng(self.random_seed)
1840
Kevin Cheng550ccc52021-03-03 11:21:43 -08001841 build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001842
1843 # Generate the lists of arguments
Kevin Cheng550ccc52021-03-03 11:21:43 -08001844 rmin, rmax = op["rank"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001845
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01001846 # Create a default testing rank range, 1-4 inclusive to keep test sizes reasonably small.
1847 default_test_rank_range = range(1, 5)
1848
Eric Kunzee5e26762020-10-13 16:11:07 -07001849 # Test list consists of a tuple of:
1850 # (opName, testNameStr, dtype, shapeList, argumentsList)
1851 testList = []
1852
1853 if not shapeFilter:
1854 shapeFilter = [None]
1855
Matthew Haddon74567092021-07-16 15:38:20 +01001856 # Positive test loop
1857 if testType in ['positive', 'both']:
1858 for r in range(rmin, rmax + 1):
Eric Kunzee5e26762020-10-13 16:11:07 -07001859
Matthew Haddon74567092021-07-16 15:38:20 +01001860 # Filter out the rank?
1861 if rankFilter is not None and r not in rankFilter:
1862 continue
1863 if (
1864 rankFilter is None
1865 and shapeFilter[0] is None
1866 and r not in default_test_rank_range
1867 ):
1868 continue
Eric Kunzee5e26762020-10-13 16:11:07 -07001869
Matthew Haddon74567092021-07-16 15:38:20 +01001870 for t in op["types"]:
Eric Kunzee5e26762020-10-13 16:11:07 -07001871
Matthew Haddon74567092021-07-16 15:38:20 +01001872 # Filter tests based on dtype?
1873 if dtypeFilter is not None:
1874 if not (
1875 t in dtypeFilter
1876 or (isinstance(t, list) and t[0] in dtypeFilter)
1877 ):
1878 continue
Eric Kunzee5e26762020-10-13 16:11:07 -07001879
Matthew Haddon74567092021-07-16 15:38:20 +01001880 # Create the placeholder and const tensors
1881 for shape in shapeFilter:
1882 # A None shape chooses a random shape of a given rank
Eric Kunzee5e26762020-10-13 16:11:07 -07001883
Matthew Haddon74567092021-07-16 15:38:20 +01001884 # Filter out by rank
1885 if shape is not None and len(shape) != r:
1886 continue
Eric Kunzee5e26762020-10-13 16:11:07 -07001887
Matthew Haddon74567092021-07-16 15:38:20 +01001888 self.setTargetShape(shape)
1889 shapeList = tgen_fcn(self, op, r)
Eric Kunzee5e26762020-10-13 16:11:07 -07001890
Matthew Haddon74567092021-07-16 15:38:20 +01001891 shapeStr = self.shapeStr(shapeList[0])
1892 typeStr = self.typeStr(t)
Eric Kunzee5e26762020-10-13 16:11:07 -07001893
Matthew Haddon74567092021-07-16 15:38:20 +01001894 # Argument lists consists of tuples of the (str, []) string representation and the build function argument list
1895 argList = []
1896 if agen_fcn:
1897 argList = agen_fcn(self, opName, shapeList, t)
Eric Kunzee5e26762020-10-13 16:11:07 -07001898 else:
Matthew Haddon74567092021-07-16 15:38:20 +01001899 argList = [("", [])]
Eric Kunzee5e26762020-10-13 16:11:07 -07001900
Matthew Haddon74567092021-07-16 15:38:20 +01001901 for argStr, args in argList:
1902 if argStr:
1903 testStr = "{}_{}_{}_{}".format(
1904 opName, shapeStr, typeStr, argStr
1905 )
1906 else:
1907 testStr = "{}_{}_{}".format(opName, shapeStr, typeStr)
1908
1909 testList.append((opName, testStr, t, shapeList, args))
1910
Matthew Haddonb724efc2021-08-25 16:40:29 +01001911 # Remove tests which are expected to fail but don't correlate to a ERROR_IF statement
1912 if "invalid_test_validators" in op:
1913 invalid_test_validators = op["invalid_test_validators"]
1914 clean_testList = []
1915 for test in testList:
1916 for validator_fcn in invalid_test_validators:
1917 remove_test = False
1918 if validator_fcn(opName=test[0], input_dtype=test[2], shapeList=test[3], args=test[4]):
1919 remove_test = True
1920 if not remove_test:
1921 clean_testList.append(test)
1922 testList = clean_testList
1923
Matthew Haddon74567092021-07-16 15:38:20 +01001924 # Reset RNG so both positive and negative tests are reproducible
1925 self.resetRNG()
1926 # Negative test loop
1927 if testType in ['negative', 'both']:
1928 print("Negative tests unsupported")
Eric Kunzee5e26762020-10-13 16:11:07 -07001929
1930 return testList
1931
Kevin Cheng989cb052021-04-28 16:29:44 -07001932 def serializeTest(self, opName, testStr, dtype_or_dtypeList, shapeList, testArgs):
Eric Kunzee5e26762020-10-13 16:11:07 -07001933 try:
1934 op = self.TOSA_OP_LIST[opName]
1935 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001936 raise Exception("Cannot find op with name {}".format(opName))
Eric Kunzee5e26762020-10-13 16:11:07 -07001937
1938 # Create a serializer
1939 self.createSerializer(opName, testStr)
1940
Kevin Cheng550ccc52021-03-03 11:21:43 -08001941 build_fcn, tgen_fcn, agen_fcn = op["build_fcn"]
1942 pCount, cCount = op["operands"]
Kevin Cheng989cb052021-04-28 16:29:44 -07001943 num_operands = pCount + cCount
1944
1945 if isinstance(dtype_or_dtypeList, list):
1946 dtypeList = dtype_or_dtypeList
Matthew Haddon818ab902021-07-27 09:12:49 +01001947 elif op['op'] == Op.CONCAT:
1948 dtypeList = [dtype_or_dtypeList] * len(shapeList)
Kevin Cheng989cb052021-04-28 16:29:44 -07001949 else:
1950 dtypeList = [dtype_or_dtypeList] * (num_operands)
1951
Matthew Haddon818ab902021-07-27 09:12:49 +01001952 if op['op'] != Op.CONCAT:
1953 assert (
1954 len(shapeList) == num_operands
1955 ), "shapeList length {} must match number of operands {}".format(
1956 len(shapeList), num_operands
1957 )
1958 assert (
1959 len(dtypeList) == num_operands
1960 ), "dtypeList length {} must match number of operands {}".format(
1961 len(dtypeList), num_operands
1962 )
Eric Kunzee5e26762020-10-13 16:11:07 -07001963
1964 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001965 qgen = op["qgen"]
Eric Kunzee5e26762020-10-13 16:11:07 -07001966 except KeyError:
1967 qgen = None
1968
1969 # Build the random tensor operands and the test
1970 tens = []
Kevin Chengaee1fac2020-11-11 13:54:06 -08001971
1972 # If test is ArithmeticRightShift, force value of operand[1] to be within [0, num_bits]
Kevin Cheng550ccc52021-03-03 11:21:43 -08001973 if op["op"] == Op.ARITHMETIC_RIGHT_SHIFT:
1974 assert (
1975 pCount == 2 and cCount == 0
1976 ), "Op.ArithmeticRightShift must have 2 placeholders, 0 consts"
Kevin Chengaee1fac2020-11-11 13:54:06 -08001977
1978 placeholders = []
1979 for idx, shape in enumerate(shapeList[:]):
1980 if idx == 1:
Kevin Cheng989cb052021-04-28 16:29:44 -07001981 if dtypeList[idx] == DType.INT8:
Kevin Chengaee1fac2020-11-11 13:54:06 -08001982 arr = np.int32(self.rng.integers(low=0, high=8, size=shape))
Kevin Cheng989cb052021-04-28 16:29:44 -07001983 elif dtypeList[idx] == DType.INT16:
Kevin Chengaee1fac2020-11-11 13:54:06 -08001984 arr = np.int32(self.rng.integers(low=0, high=16, size=shape))
Kevin Cheng989cb052021-04-28 16:29:44 -07001985 elif dtypeList[idx] == DType.INT32:
Kevin Chengaee1fac2020-11-11 13:54:06 -08001986 arr = np.int32(self.rng.integers(low=0, high=32, size=shape))
1987 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08001988 raise Exception("OpArithmeticRightShift: invalid input dtype")
Kevin Chengaee1fac2020-11-11 13:54:06 -08001989 else:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07001990 arr = self.getRandTensor(shape, dtypeList[idx])
Kevin Cheng989cb052021-04-28 16:29:44 -07001991 placeholders.append(self.ser.addPlaceholder(shape, dtypeList[idx], arr))
Kevin Chengaee1fac2020-11-11 13:54:06 -08001992
1993 tens.extend(placeholders)
Matthew Haddona44ac5e2021-07-27 16:31:16 +01001994 elif op["op"] == Op.SELECT:
1995 # Set datatype of condition tensor to boolean
1996 dtypeList[0] = DType.BOOL
1997 tens.extend(
1998 self.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
1999 )
2000 tens.extend(self.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
Matthew Haddon459443c2021-08-23 16:43:13 +01002001 elif op["op"] == Op.INTDIV:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002002 assert (
2003 pCount == 2 and cCount == 0
Matthew Haddon459443c2021-08-23 16:43:13 +01002004 ), "Op.INTDIV must have 2 placeholders, 0 consts"
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002005
2006 placeholders = []
2007
Matthew Haddon459443c2021-08-23 16:43:13 +01002008 # Two invalid cases for Op.INTDIV:
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002009 # 1. divisor == 0
Kevin Cheng47315e12021-05-13 17:41:28 -07002010 # 2. dividend == -(1<<31) and divisor == -1
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002011 while True:
2012 dividend_arr = self.getRandTensor(shapeList[0], dtypeList[0])
2013 divisor_arr = self.getRandTensor(shapeList[1], dtypeList[1])
2014
2015 if (divisor_arr == 0).any():
2016 continue
2017
Kevin Cheng47315e12021-05-13 17:41:28 -07002018 if (dividend_arr == -(2 ** 31)).any() and (divisor_arr == -1).any():
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002019 continue
2020
2021 break
2022
2023 placeholders.append(
2024 self.ser.addPlaceholder(shapeList[0], dtypeList[0], dividend_arr)
2025 )
2026 placeholders.append(
2027 self.ser.addPlaceholder(shapeList[1], dtypeList[1], divisor_arr)
2028 )
2029
2030 tens.extend(placeholders)
2031 elif op["op"] == Op.MUL:
2032 assert (
2033 pCount == 2 and cCount == 0
2034 ), "Op.MUL must have 2 placeholders, 0 consts"
2035
2036 if dtypeList[0] == DType.FLOAT:
2037 tens.extend(self.buildPlaceholderTensors(shapeList[:], dtypeList[:]))
2038 else:
2039 placeholders = []
2040
2041 # Make sure multiply result in int32 range
2042 shift = testArgs[0]
2043 if dtypeList[0] == DType.INT8:
2044 num_bits = 8
2045 elif dtypeList[0] == DType.INT16:
2046 num_bits = 16
2047 elif dtypeList[0] == DType.INT32:
2048 num_bits = 32
2049 else:
2050 raise Exception("OpMul: invalid input dtype")
2051
2052 for idx, shape in enumerate(shapeList[:]):
2053 low = -(2 ** (num_bits - 1))
2054 high = (2 ** (num_bits - 1)) - 1
2055
2056 a_arr = np.int32(
2057 self.rng.integers(low=low, high=high, size=shapeList[0])
2058 )
2059 b_arr = np.int32(
2060 self.rng.integers(low=low, high=high, size=shapeList[1])
2061 )
2062
2063 i = 0
2064 while True:
2065
2066 a_arr_64 = a_arr.astype(np.int64)
2067 b_arr_64 = b_arr.astype(np.int64)
2068
2069 if shift > 0:
2070 rounding = 1 << (shift - 1)
2071 result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift
2072 else:
2073 result_arr = a_arr_64 * b_arr_64
2074
2075 if (result_arr > -(2 ** 31)).all() and (
2076 result_arr <= ((2 ** 31) - 1)
2077 ).all():
2078 break
2079
2080 i = i + 1
2081 a_arr = a_arr // 2
2082 b_arr = b_arr // 2
2083
2084 placeholders.append(
2085 self.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
2086 )
2087 placeholders.append(
2088 self.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
2089 )
2090
2091 tens.extend(placeholders)
Matthew Haddon818ab902021-07-27 09:12:49 +01002092 elif op["op"] == Op.CONCAT:
2093 count = len(shapeList) - self.args.num_const_inputs_concat
2094 if count < 1:
2095 count = 1
2096 if self.args.num_const_inputs_concat == 0:
2097 count = len(shapeList)
2098
2099 shapeList = TosaTensorGen.tgConcatConstInput(self, shapeList, testArgs[0])
2100 tens.extend(
2101 self.buildPlaceholderTensors(shapeList[0:count], dtypeList[0:count])
2102 )
2103 tens.extend(self.buildConstTensors(shapeList[count:], dtypeList[count:]))
Kevin Chengaee1fac2020-11-11 13:54:06 -08002104 else:
Kevin Cheng989cb052021-04-28 16:29:44 -07002105 tens.extend(
2106 self.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
2107 )
2108 tens.extend(self.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
Eric Kunzee5e26762020-10-13 16:11:07 -07002109
2110 if qgen is not None:
Les Bell30e46802021-07-23 09:43:31 +01002111 qinfo = qgen(self, op, dtype_or_dtypeList)
Eric Kunzee5e26762020-10-13 16:11:07 -07002112 else:
2113 qinfo = None
2114
2115 try:
2116 if qinfo is not None:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002117 resultName = build_fcn(self, op["op"], *tens, *testArgs, qinfo)
Eric Kunzee5e26762020-10-13 16:11:07 -07002118 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002119 resultName = build_fcn(self, op["op"], *tens, *testArgs)
Eric Kunzee5e26762020-10-13 16:11:07 -07002120 except TypeError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002121 print(
2122 "build_fcn: {}\nTensors: {}\nArgs: {}\n".format(
2123 build_fcn, tens, testArgs
2124 )
2125 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002126 raise e
2127
2128 # Save the serialized test
Kevin Cheng550ccc52021-03-03 11:21:43 -08002129 self.serialize("test")
Eric Kunzee5e26762020-10-13 16:11:07 -07002130
2131 def createDynamicOpLists(self):
2132
2133 # Dynamically create op lists for convolutions with a list of kernel sizes
Kevin Cheng550ccc52021-03-03 11:21:43 -08002134 KERNELS = [[1, 1], [2, 2], [3, 3], [5, 5], [3, 1], [1, 3]]
Eric Kunzee5e26762020-10-13 16:11:07 -07002135
2136 for k in KERNELS:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002137 testName = "conv2d_{}x{}".format(k[0], k[1])
2138 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST["conv2d_TEMPLATE"].copy()
2139 self.TOSA_OP_LIST[testName]["filter"] = k
2140 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07002141
Kevin Cheng550ccc52021-03-03 11:21:43 -08002142 testName = "depthwise_conv2d_{}x{}".format(k[0], k[1])
2143 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
2144 "depthwise_conv2d_TEMPLATE"
2145 ].copy()
2146 self.TOSA_OP_LIST[testName]["filter"] = k
2147 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07002148
Kevin Cheng550ccc52021-03-03 11:21:43 -08002149 testName = "transpose_conv2d_{}x{}".format(k[0], k[1])
2150 self.TOSA_OP_LIST[testName] = self.TOSA_OP_LIST[
2151 "transpose_conv2d_TEMPLATE"
2152 ].copy()
2153 self.TOSA_OP_LIST[testName]["filter"] = k
2154 self.TOSA_OP_LIST[testName]["template"] = False
Eric Kunzee5e26762020-10-13 16:11:07 -07002155
2156 # Delete any templates after having created any dynamic ops
2157 # This is a two-pass operation because it's bad practice to delete
2158 # keys from dictionaries while iterating
2159 keyList = []
2160 for k in self.TOSA_OP_LIST:
2161 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002162 if self.TOSA_OP_LIST[k]["template"] == True:
Eric Kunzee5e26762020-10-13 16:11:07 -07002163 keyList.append(k)
2164 continue
2165 except KeyError:
2166 pass
2167
2168 for k in keyList:
2169 del self.TOSA_OP_LIST[k]
2170
2171 def initOpListDefaults(self):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002172 """Fill in default fields for ops if they aren't already specified.
2173 Look for missing required fields (datastructure linting)."""
Eric Kunzee5e26762020-10-13 16:11:07 -07002174 for op in self.TOSA_OP_LIST:
2175
2176 # Required fields
2177 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002178 pl, c = self.TOSA_OP_LIST[op]["operands"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002179 except (KeyError, ValueError, TypeError):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002180 raise Exception(
2181 "Op {} is missing a valid operand tuple in TOSA_OP_LIST".format(op)
2182 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002183
2184 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002185 fcn, tgen, arggen = self.TOSA_OP_LIST[op]["build_fcn"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002186 except (KeyError, ValueError, TypeError):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002187 raise Exception(
2188 "Op {} is missing a valid build_fcn tuple in TOSA_OP_LIST".format(
2189 op
2190 )
2191 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002192
2193 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002194 types = self.TOSA_OP_LIST[op]["types"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002195 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002196 raise Exception(
2197 "Op {} is missing a valid type list in TOSA_OP_LIST".format(op)
2198 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002199
2200 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002201 opcode = self.TOSA_OP_LIST[op]["op"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002202 except KeyError as e:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002203 raise Exception(
2204 "Op {} is missing the Op field in TOSA_OP_LIST".format(op)
2205 )
Eric Kunzee5e26762020-10-13 16:11:07 -07002206
2207 # Put in default rank range, if missing
2208 try:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002209 rank = self.TOSA_OP_LIST[op]["rank"]
Eric Kunzee5e26762020-10-13 16:11:07 -07002210 except KeyError:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002211 self.TOSA_OP_LIST[op]["rank"] = self.DEFAULT_RANK_RANGE
Eric Kunzee5e26762020-10-13 16:11:07 -07002212
2213 # Tensor operator list
2214 # 'op': op name
2215 # 'operands': tuple of (placeholder, const) operands
Kevin Cheng3a478572021-01-22 17:21:02 -08002216 # 'rank': optional, restricts rank to tuple inclusive of (min, max),
2217 # if not specified, defaults to (1, 4)
Eric Kunzee5e26762020-10-13 16:11:07 -07002218 # 'build_fcn': tuple of the function to (build_operator(), TensorGen function, ArgGen enum)
2219 # 'types': array of datatypes to be tested
Kevin Cheng550ccc52021-03-03 11:21:43 -08002220 TYPE_FP = [DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -07002221
Kevin Cheng550ccc52021-03-03 11:21:43 -08002222 TYPE_INT = [DType.INT8, DType.INT16, DType.INT32] # Excludes INT4
2223 TYPE_INT_FP = [DType.INT8, DType.INT16, DType.INT32, DType.FLOAT] # Excludes INT4
Eric Kunzee5e26762020-10-13 16:11:07 -07002224
Kevin Cheng550ccc52021-03-03 11:21:43 -08002225 TYPE_BOOL = [DType.BOOL]
2226 TYPE_FI32 = [DType.FLOAT, DType.INT32]
2227 TYPE_FIB = [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL]
2228 TYPE_FI16 = [DType.FLOAT, DType.INT16]
Eric Kunzee5e26762020-10-13 16:11:07 -07002229
Kevin Cheng550ccc52021-03-03 11:21:43 -08002230 TYPE_NARROW_INT_FP = [DType.INT8, DType.INT16, DType.FLOAT]
Eric Kunzee5e26762020-10-13 16:11:07 -07002231
Kevin Cheng989cb052021-04-28 16:29:44 -07002232 TYPE_CONV2D = [
Kevin Chenga9017402021-07-28 17:19:23 -07002233 [DType.INT8, DType.INT4, DType.INT32],
Kevin Cheng989cb052021-04-28 16:29:44 -07002234 [DType.INT8, DType.INT8, DType.INT32],
2235 [DType.INT16, DType.INT8, DType.INT48],
2236 DType.FLOAT,
2237 ]
2238
Jeremy Johnson97eb75f2021-07-08 11:58:02 +01002239 DEFAULT_RANK_RANGE = (1, TOSA_TENSOR_MAX_RANK)
Eric Kunzee5e26762020-10-13 16:11:07 -07002240
2241 TOSA_OP_LIST = {
Jared Smolens573ecd42021-03-04 15:24:10 -08002242 # Tensor operators
Kevin Cheng550ccc52021-03-03 11:21:43 -08002243 "argmax": {
2244 "op": Op.ARGMAX,
2245 "operands": (1, 0),
2246 "build_fcn": (build_argmax, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2247 "types": TYPE_NARROW_INT_FP,
2248 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002249 "avg_pool2d": {
2250 "op": Op.AVG_POOL2D,
2251 "operands": (1, 0),
2252 "rank": (4, 4),
2253 "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
2254 "qgen": TosaQuantGen.qgUnary,
2255 "types": TYPE_NARROW_INT_FP,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002256 "invalid_test_validators": (TosaInvalidValidator.ivHeightWidthSmallerZero,)
Jared Smolens573ecd42021-03-04 15:24:10 -08002257 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002258 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002259 "conv2d_TEMPLATE": {
2260 "op": Op.CONV2D,
2261 "operands": (1, 2),
2262 "rank": (4, 4),
2263 "build_fcn": (build_conv2d, TosaTensorGen.tgConv2D, TosaArgGen.agConv2D),
2264 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07002265 "types": TYPE_CONV2D,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002266 "invalid_test_validators": (TosaInvalidValidator.ivHeightWidthSmallerZero,),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002267 "template": True,
2268 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002269 # Conv3d TBD
Eric Kunzee5e26762020-10-13 16:11:07 -07002270 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002271 "depthwise_conv2d_TEMPLATE": {
2272 "op": Op.DEPTHWISE_CONV2D,
2273 "operands": (1, 2),
2274 "filter": [1, 1],
2275 "rank": (4, 4),
2276 "build_fcn": (
2277 build_depthwise_conv2d,
2278 TosaTensorGen.tgDepthwiseConv2D,
2279 TosaArgGen.agConv2D,
2280 ),
2281 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07002282 "types": TYPE_CONV2D,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002283 "invalid_test_validators": (TosaInvalidValidator.ivHeightWidthSmallerZero,),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002284 "template": True,
2285 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002286 "fully_connected": {
2287 "op": Op.FULLY_CONNECTED,
2288 "operands": (1, 2),
2289 "rank": (2, 2),
2290 "build_fcn": (build_fully_connected, TosaTensorGen.tgFullyConnected, None),
2291 "qgen": TosaQuantGen.qgConv,
2292 "types": TYPE_CONV2D,
2293 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002294 "matmul": {
2295 "op": Op.MATMUL,
2296 "operands": (2, 0),
Kevin Cheng2d60f002021-06-09 14:18:32 -07002297 "rank": (3, 3),
Jared Smolens573ecd42021-03-04 15:24:10 -08002298 "build_fcn": (build_matmul, TosaTensorGen.tgMatmul, None),
2299 "qgen": TosaQuantGen.qgMatmul,
2300 "types": TYPE_NARROW_INT_FP,
2301 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002302 "max_pool2d": {
2303 "op": Op.MAX_POOL2D,
2304 "operands": (1, 0),
2305 "rank": (4, 4),
2306 "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling),
2307 "types": TYPE_NARROW_INT_FP,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002308 "invalid_test_validators": (TosaInvalidValidator.ivHeightWidthSmallerZero,)
Jared Smolens573ecd42021-03-04 15:24:10 -08002309 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002310 # Templated operator. Filled in by createDynamicOpLists
Kevin Cheng550ccc52021-03-03 11:21:43 -08002311 "transpose_conv2d_TEMPLATE": {
2312 "op": Op.TRANSPOSE_CONV2D,
Kevin Cheng989cb052021-04-28 16:29:44 -07002313 "operands": (1, 2),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002314 "rank": (4, 4),
2315 "build_fcn": (
2316 build_transpose_conv2d,
2317 TosaTensorGen.tgTransposeConv2D,
2318 TosaArgGen.agTransposeConv2D,
2319 ),
2320 "qgen": TosaQuantGen.qgConv,
Kevin Cheng989cb052021-04-28 16:29:44 -07002321 "types": TYPE_CONV2D,
Matthew Haddonb724efc2021-08-25 16:40:29 +01002322 "invalid_test_validators": (TosaInvalidValidator.ivNonPositiveOutputShape,),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002323 "template": True,
2324 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002325 # Activation functions
Kevin Cheng550ccc52021-03-03 11:21:43 -08002326 "clamp": {
2327 "op": Op.CLAMP,
2328 "operands": (1, 0),
2329 "build_fcn": (build_clamp, TosaTensorGen.tgBasic, None),
2330 "types": TYPE_NARROW_INT_FP,
2331 },
2332 "relun": {
2333 "op": Op.RELUN,
2334 "operands": (1, 0),
2335 "build_fcn": (build_relun, TosaTensorGen.tgBasic, None),
2336 "types": TYPE_FI32,
2337 },
2338 "sigmoid": {
2339 "op": Op.SIGMOID,
2340 "operands": (1, 0),
2341 "build_fcn": (build_sigmoid, TosaTensorGen.tgBasic, None),
2342 "types": TYPE_FP,
2343 },
2344 "tanh": {
2345 "op": Op.TANH,
2346 "operands": (1, 0),
2347 "build_fcn": (build_tanh, TosaTensorGen.tgBasic, None),
2348 "types": TYPE_FP,
2349 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002350 # Elementwise Binary Operators
2351 "add": {
2352 "op": Op.ADD,
2353 "operands": (2, 0),
2354 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2355 "types": TYPE_FI32,
2356 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002357 "arithmetic_right_shift": {
2358 "op": Op.ARITHMETIC_RIGHT_SHIFT,
2359 "operands": (2, 0),
2360 "build_fcn": (
2361 build_arithmetic_right_shift,
2362 TosaTensorGen.tgBroadcastFuzz,
2363 TosaArgGen.agArithmeticRightShift,
2364 ),
2365 "types": TYPE_INT,
2366 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002367 "bitwise_and": {
2368 "op": Op.BITWISE_AND,
2369 "operands": (2, 0),
2370 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2371 "types": TYPE_INT,
2372 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002373 "bitwise_or": {
2374 "op": Op.BITWISE_OR,
2375 "operands": (2, 0),
2376 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2377 "types": TYPE_INT,
2378 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002379 "bitwise_xor": {
2380 "op": Op.BITWISE_XOR,
2381 "operands": (2, 0),
2382 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2383 "types": TYPE_INT,
2384 },
Matthew Haddon459443c2021-08-23 16:43:13 +01002385 "intdiv": {
2386 "op": Op.INTDIV,
Kevin Cheng14d7f7a2021-05-12 10:44:49 -07002387 "operands": (2, 0),
2388 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2389 "types": [DType.INT32],
2390 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002391 "logical_and": {
2392 "op": Op.LOGICAL_AND,
2393 "operands": (2, 0),
2394 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2395 "types": TYPE_BOOL,
2396 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002397 "logical_left_shift": {
2398 "op": Op.LOGICAL_LEFT_SHIFT,
2399 "operands": (2, 0),
2400 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2401 "types": TYPE_INT,
2402 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002403 "logical_right_shift": {
2404 "op": Op.LOGICAL_RIGHT_SHIFT,
2405 "operands": (2, 0),
2406 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2407 "types": TYPE_INT,
2408 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002409 "logical_or": {
2410 "op": Op.LOGICAL_OR,
2411 "operands": (2, 0),
2412 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2413 "types": TYPE_BOOL,
2414 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002415 "logical_xor": {
2416 "op": Op.LOGICAL_XOR,
2417 "operands": (2, 0),
2418 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2419 "types": TYPE_BOOL,
2420 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002421 "maximum": {
2422 "op": Op.MAXIMUM,
2423 "operands": (2, 0),
2424 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2425 "types": TYPE_FI32,
2426 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002427 "minimum": {
2428 "op": Op.MINIMUM,
2429 "operands": (2, 0),
2430 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2431 "types": TYPE_FI32,
2432 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002433 "mul": {
2434 "op": Op.MUL,
2435 "operands": (2, 0),
2436 "build_fcn": (build_mul, TosaTensorGen.tgBroadcastFuzz, TosaArgGen.agMul),
2437 "types": TYPE_INT_FP,
2438 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002439 "pow": {
2440 "op": Op.POW,
2441 "operands": (2, 0),
2442 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBasic, None),
2443 "types": TYPE_FP,
2444 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002445 "sub": {
2446 "op": Op.SUB,
2447 "operands": (2, 0),
2448 "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None),
2449 "types": TYPE_FI32,
2450 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002451 "table": {
2452 "op": Op.TABLE,
2453 # Use the automatic generation functions to create the input array
2454 # but create the table tensor in the build function, as it may be
2455 # a different type from the input
2456 "operands": (1, 0),
2457 "build_fcn": (build_table, TosaTensorGen.tgBasic, None),
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01002458 "types": [DType.INT8, DType.INT16],
Jared Smolens573ecd42021-03-04 15:24:10 -08002459 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002460 # Elementwise Unary operators
2461 "abs": {
2462 "op": Op.ABS,
2463 "operands": (1, 0),
2464 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2465 "types": TYPE_FI32,
2466 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002467 "bitwise_not": {
2468 "op": Op.BITWISE_NOT,
2469 "operands": (1, 0),
2470 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2471 "types": TYPE_INT,
2472 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002473 "ceil": {
2474 "op": Op.CEIL,
2475 "operands": (1, 0),
2476 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2477 "types": TYPE_FP,
2478 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002479 "clz": {
2480 "op": Op.CLZ,
2481 "operands": (1, 0),
2482 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2483 "types": [DType.INT32],
2484 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002485 "exp": {
2486 "op": Op.EXP,
2487 "operands": (1, 0),
2488 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2489 "types": TYPE_FP,
2490 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002491 "floor": {
2492 "op": Op.FLOOR,
2493 "operands": (1, 0),
2494 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2495 "types": TYPE_FP,
2496 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002497 "log": {
2498 "op": Op.LOG,
2499 "operands": (1, 0),
2500 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2501 "types": TYPE_FP,
2502 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002503 "logical_not": {
2504 "op": Op.LOGICAL_NOT,
2505 "operands": (1, 0),
2506 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2507 "types": TYPE_BOOL,
2508 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002509 "negate": {
2510 "op": Op.NEGATE,
2511 "operands": (1, 0),
2512 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2513 "qgen": TosaQuantGen.qgUnary,
2514 "types": TYPE_INT_FP,
2515 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002516 "reciprocal": {
2517 "op": Op.RECIPROCAL,
2518 "operands": (1, 0),
2519 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2520 "types": TYPE_FP,
2521 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002522 "rsqrt": {
2523 "op": Op.RSQRT,
2524 "operands": (1, 0),
2525 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2526 "types": TYPE_FP,
2527 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002528 # Elementwise Ternary operators
2529 "select": {
2530 "op": Op.SELECT,
2531 "operands": (3, 0),
2532 "build_fcn": (build_select, TosaTensorGen.tgBroadcastFuzz, None),
2533 "types": TYPE_FIB,
2534 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002535 # Comparison operators
2536 "equal": {
2537 "op": Op.EQUAL,
2538 "operands": (2, 0),
2539 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2540 "types": TYPE_FI32,
2541 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002542 "greater_equal": {
2543 "op": Op.GREATER_EQUAL,
2544 "operands": (2, 0),
2545 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2546 "types": TYPE_FI32,
2547 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002548 "greater": {
2549 "op": Op.GREATER,
2550 "operands": (2, 0),
2551 "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None),
2552 "types": TYPE_FI32,
2553 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002554 # Reduction operators
2555 "reduce_all": {
2556 "op": Op.REDUCE_ALL,
2557 "operands": (1, 0),
2558 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2559 "types": TYPE_BOOL,
2560 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002561 "reduce_any": {
2562 "op": Op.REDUCE_ANY,
2563 "operands": (1, 0),
2564 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2565 "types": TYPE_BOOL,
2566 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002567 "reduce_max": {
2568 "op": Op.REDUCE_MAX,
2569 "operands": (1, 0),
2570 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2571 "types": TYPE_INT_FP,
2572 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002573 "reduce_min": {
2574 "op": Op.REDUCE_MAX,
2575 "operands": (1, 0),
2576 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2577 "types": TYPE_INT_FP,
2578 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002579 "reduce_product": {
2580 "op": Op.REDUCE_PRODUCT,
2581 "operands": (1, 0),
2582 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2583 "types": TYPE_FP,
2584 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002585 "reduce_sum": {
2586 "op": Op.REDUCE_SUM,
2587 "operands": (1, 0),
2588 "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2589 "types": TYPE_FI32,
2590 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002591 # Data layout operators
Kevin Cheng550ccc52021-03-03 11:21:43 -08002592 "concat": {
2593 "op": Op.CONCAT,
2594 "operands": (2, 0),
Matthew Haddon818ab902021-07-27 09:12:49 +01002595 "build_fcn": (build_concat, TosaTensorGen.tgConcat, TosaArgGen.agAxis),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002596 "types": TYPE_FIB,
2597 },
2598 "pad": {
2599 "op": Op.PAD,
2600 "operands": (1, 0),
2601 "build_fcn": (build_pad, TosaTensorGen.tgBasic, TosaArgGen.agPad),
2602 "qgen": TosaQuantGen.qgPad,
2603 "types": TYPE_FIB,
2604 },
2605 "reshape": {
2606 "op": Op.RESHAPE,
2607 "operands": (1, 0),
2608 "build_fcn": (build_reshape, TosaTensorGen.tgBasic, TosaArgGen.agReshape),
2609 "types": TYPE_FIB,
2610 },
2611 "reverse": {
2612 "op": Op.REVERSE,
2613 "operands": (1, 0),
2614 "build_fcn": (build_reverse, TosaTensorGen.tgBasic, TosaArgGen.agAxis),
2615 "types": TYPE_FIB,
2616 },
2617 "slice": {
2618 "op": Op.SLICE,
2619 "operands": (1, 0),
2620 "build_fcn": (build_slice, TosaTensorGen.tgBasic, TosaArgGen.agSlice),
2621 "types": TYPE_FIB,
2622 },
2623 "tile": {
2624 "op": Op.TILE,
2625 "operands": (1, 0),
2626 "build_fcn": (build_tile, TosaTensorGen.tgBasic, TosaArgGen.agTile),
2627 "types": TYPE_FIB,
2628 },
2629 "transpose": {
2630 "op": Op.TRANSPOSE,
2631 "operands": (1, 0),
Jeremy Johnsona6185572021-06-21 15:55:35 +01002632 "rank": (1, 4),
Kevin Cheng550ccc52021-03-03 11:21:43 -08002633 "build_fcn": (
2634 build_transpose,
2635 TosaTensorGen.tgBasic,
2636 TosaArgGen.agTranspose,
2637 ),
2638 "types": TYPE_FIB,
2639 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002640 # Data nodes
2641 "const": {
2642 "op": Op.CONST,
2643 "operands": (1, 0),
2644 "build_fcn": (build_placeholder, TosaTensorGen.tgBasic, None),
2645 "types": TYPE_FIB,
2646 },
Jared Smolens573ecd42021-03-04 15:24:10 -08002647 "identity": {
2648 "op": Op.IDENTITY,
2649 "operands": (1, 0),
2650 "build_fcn": (build_unary, TosaTensorGen.tgBasic, None),
2651 "types": TYPE_FIB,
2652 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002653 # Scatter/Gather
Kevin Cheng550ccc52021-03-03 11:21:43 -08002654 "gather": {
2655 "op": Op.GATHER,
2656 # Only specify 'values' tensor here. 'indices' is generated in op building stage
2657 "operands": (1, 0),
2658 "rank": (3, 3),
2659 "build_fcn": (build_gather, TosaTensorGen.tgBasic, None),
2660 "types": TYPE_INT_FP,
2661 },
2662 "scatter": {
2663 "op": Op.SCATTER,
2664 # Only specify 'values_in' tensor here.
2665 #'indices' and 'input' are generated in op building stage
2666 "operands": (2, 0),
2667 "rank": (3, 3),
2668 "build_fcn": (build_scatter, TosaTensorGen.tgScatter, None),
2669 "types": TYPE_INT_FP,
2670 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002671 # Image operations
Kevin Cheng550ccc52021-03-03 11:21:43 -08002672 "resize": {
2673 "op": Op.RESIZE,
2674 "operands": (1, 0),
2675 "rank": (4, 4),
2676 "build_fcn": (build_resize, TosaTensorGen.tgNHWC, TosaArgGen.agResize),
2677 "types": [DType.INT8, DType.INT16, DType.FLOAT],
Matthew Haddonb724efc2021-08-25 16:40:29 +01002678 "invalid_test_validators": (TosaInvalidValidator.ivWrongDataTypeOrModeResize, TosaInvalidValidator.ivBadStride)
Kevin Cheng550ccc52021-03-03 11:21:43 -08002679 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002680 # Type conversion
Kevin Cheng550ccc52021-03-03 11:21:43 -08002681 "cast": {
2682 "op": Op.CAST,
2683 "operands": (1, 0),
2684 "build_fcn": (build_cast, TosaTensorGen.tgBasic, TosaArgGen.agCast),
2685 "types": [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL],
2686 },
2687 "rescale": {
2688 "op": Op.RESCALE,
2689 "operands": (1, 0),
2690 "build_fcn": (build_rescale, TosaTensorGen.tgBasic, TosaArgGen.agRescale),
Matthew Haddoncac4ee92021-07-22 14:30:53 +01002691 "types": [DType.UINT8, DType.INT8, DType.INT16, DType.INT32, DType.INT48],
Kevin Cheng550ccc52021-03-03 11:21:43 -08002692 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002693 # Custom
2694 # Not implemented.
Jared Smolens573ecd42021-03-04 15:24:10 -08002695 # Control flow operators
Eric Kunzee5e26762020-10-13 16:11:07 -07002696 # Two varients of cond_if, one that generates one of two constant tensors (no
2697 # inputs to the basic blocks, one output) and another that either adds or subtracts two tensors
2698 # (two inputs to the basic blocks, one output)
Kevin Cheng550ccc52021-03-03 11:21:43 -08002699 "cond_if_const": {
2700 "op": Op.COND_IF,
2701 "operands": (0, 2),
2702 "build_fcn": (
2703 build_cond_if_const,
2704 TosaTensorGen.tgBasic,
2705 TosaArgGen.agCondIf,
2706 ),
2707 "types": [DType.BOOL],
2708 },
2709 "cond_if_binary": {
2710 "op": Op.COND_IF,
2711 "operands": (2, 0),
2712 "build_fcn": (
2713 build_cond_if_binary,
2714 TosaTensorGen.tgBasic,
2715 TosaArgGen.agCondIf,
2716 ),
2717 "types": TYPE_FI32,
2718 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002719 # while_loop
Kevin Cheng550ccc52021-03-03 11:21:43 -08002720 "while_loop": {
2721 "op": Op.WHILE_LOOP,
2722 "operands": (0, 1),
2723 "build_fcn": (
2724 build_while_loop,
2725 TosaTensorGen.tgBasic,
2726 TosaArgGen.agWhileLoop,
2727 ),
2728 "types": [DType.INT32],
2729 },
Eric Kunzee5e26762020-10-13 16:11:07 -07002730 }
2731
Kevin Cheng550ccc52021-03-03 11:21:43 -08002732
Eric Kunzee5e26762020-10-13 16:11:07 -07002733class OutputShaper:
2734 # Methods in this class compute the expected output shape and datatype
2735 # for common classes of operations
2736 def __init__(self):
2737 pass
2738
2739 # These methods return arguments that can be used for
2740 # creating a new output tensor
2741 @staticmethod
2742 def binaryBroadcastOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002743 assert len(a.shape) == len(b.shape)
2744 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002745
2746 shape = []
2747 for i in range(len(a.shape)):
2748 if a.shape[i] == 1:
2749 shape.append(b.shape[i])
2750 else:
2751 shape.append(a.shape[i])
2752
Kevin Cheng550ccc52021-03-03 11:21:43 -08002753 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002754
2755 @staticmethod
2756 def binaryNonBroadcastOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002757 assert len(a.shape) == len(b.shape)
2758 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002759
2760 shape = []
2761 for i in range(len(a.shape)):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002762 assert a.shape[i] == b.shape[i]
Eric Kunzee5e26762020-10-13 16:11:07 -07002763 shape.append(a.shape[i])
2764
Kevin Cheng550ccc52021-03-03 11:21:43 -08002765 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002766
2767 @staticmethod
2768 def unaryOp(ser, a):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002769 return ser.addOutput(a.shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002770
2771 @staticmethod
2772 def selectOp(ser, cond, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002773 assert len(a.shape) == len(b.shape) and len(a.shape) == len(cond.shape)
2774 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002775
2776 shape = []
2777 for i in range(len(a.shape)):
2778 shape.append(max(cond.shape[i], a.shape[i], b.shape[i]))
2779
Kevin Cheng550ccc52021-03-03 11:21:43 -08002780 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002781
2782 @staticmethod
2783 def binaryComparisonOp(ser, a, b):
Kevin Cheng550ccc52021-03-03 11:21:43 -08002784 assert len(a.shape) == len(b.shape)
2785 assert a.dtype == b.dtype
Eric Kunzee5e26762020-10-13 16:11:07 -07002786
2787 # Do broadcast
2788 shape = []
2789 for i in range(len(a.shape)):
2790 if a.shape[i] == 1:
2791 shape.append(b.shape[i])
2792 else:
2793 shape.append(a.shape[i])
2794
2795 # Force the output type to bool
Kevin Cheng550ccc52021-03-03 11:21:43 -08002796 return ser.addOutput(shape, DType.BOOL)
Eric Kunzee5e26762020-10-13 16:11:07 -07002797
2798 @staticmethod
2799 def reduceOp(ser, a, axis):
2800
2801 shape = a.shape.copy()
2802
2803 shape[axis] = 1
2804
Kevin Cheng550ccc52021-03-03 11:21:43 -08002805 return ser.addOutput(shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002806
2807 @staticmethod
2808 def argmaxOp(ser, a, axis):
2809 shape = a.shape.copy()
2810 del shape[axis]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002811 return ser.addOutput(shape, DType.INT32)
Eric Kunzee5e26762020-10-13 16:11:07 -07002812
2813 @staticmethod
2814 def conv2dOp(ser, ifm, filter, strides, padding, dilations):
2815
2816 # IFM: NHWC
2817 # Filter: OHWI
2818 # OFM: NHWC
2819
2820 if len(padding) == 2:
2821 # Expand padding to 4 parameters in the case of transpose_conv2d
2822 # From H,W to T,B,L,R
2823 padding = [padding[0], padding[0], padding[1], padding[1]]
2824
Kevin Cheng550ccc52021-03-03 11:21:43 -08002825 h = (
2826 ifm.shape[1]
2827 - filter.shape[1]
2828 - (filter.shape[1] - 1) * (dilations[0] - 1)
2829 + padding[0]
2830 + padding[1]
2831 ) // strides[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002832
Kevin Cheng550ccc52021-03-03 11:21:43 -08002833 w = (
2834 ifm.shape[2]
2835 - filter.shape[2]
2836 - (filter.shape[2] - 1) * (dilations[1] - 1)
2837 + padding[2]
2838 + padding[3]
2839 ) // strides[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002840
Eric Kunzee5e26762020-10-13 16:11:07 -07002841 ofm_shape = [ifm.shape[0], h, w, filter.shape[0]]
2842
Kevin Cheng3a478572021-01-22 17:21:02 -08002843 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002844 out_dtype = DType.INT32
2845 elif ifm.dtype == DType.INT16:
2846 out_dtype = DType.INT48
2847 elif ifm.dtype == DType.FLOAT:
2848 out_dtype = DType.FLOAT
2849 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002850 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002851
Kevin Cheng550ccc52021-03-03 11:21:43 -08002852 return ser.addOutput(ofm_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002853
2854 @staticmethod
2855 def depthwiseConv2dOp(ser, ifm, filter, strides, padding, dilations):
2856 # IFM: NHWC
2857 # Filter: HWCM
2858 # OFM: NHW C*M
Kevin Cheng550ccc52021-03-03 11:21:43 -08002859 h = (
2860 ifm.shape[1]
2861 - filter.shape[0]
2862 - (filter.shape[0] - 1) * (dilations[0] - 1)
2863 + padding[0]
2864 + padding[1]
2865 ) // strides[0] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002866
Kevin Cheng550ccc52021-03-03 11:21:43 -08002867 w = (
2868 ifm.shape[2]
2869 - filter.shape[1]
2870 - (filter.shape[1] - 1) * (dilations[1] - 1)
2871 + padding[2]
2872 + padding[3]
2873 ) // strides[1] + 1
Eric Kunzee5e26762020-10-13 16:11:07 -07002874
Eric Kunzee5e26762020-10-13 16:11:07 -07002875 ofm_shape = [ifm.shape[0], h, w, filter.shape[2] * filter.shape[3]]
2876
Kevin Cheng3a478572021-01-22 17:21:02 -08002877 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002878 out_dtype = DType.INT32
2879 elif ifm.dtype == DType.INT16:
2880 out_dtype = DType.INT48
2881 elif ifm.dtype == DType.FLOAT:
2882 out_dtype = DType.FLOAT
2883 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002884 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002885
Kevin Cheng550ccc52021-03-03 11:21:43 -08002886 return ser.addOutput(ofm_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002887
2888 @staticmethod
2889 def pool2dOp(ser, ifm, kernel, stride, pad):
2890 # input: NHWC
2891 h = (ifm.shape[1] + pad[0] + pad[1] + stride[0] - kernel[0]) // stride[0]
2892 w = (ifm.shape[2] + pad[2] + pad[3] + stride[1] - kernel[1]) // stride[1]
2893
Eric Kunzee5e26762020-10-13 16:11:07 -07002894 ofm_shape = [ifm.shape[0], h, w, ifm.shape[3]]
Kevin Cheng550ccc52021-03-03 11:21:43 -08002895 return ser.addOutput(ofm_shape, ifm.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002896
2897 @staticmethod
2898 def fullyConnectedOp(ser, input, filter):
2899 # input: N, IC
2900 # filter: OC, IC
2901 # output: N, OC
2902
2903 output_shape = [input.shape[0], filter.shape[0]]
2904
Kevin Cheng3a478572021-01-22 17:21:02 -08002905 if input.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002906 out_dtype = DType.INT32
2907 elif input.dtype == DType.INT16:
2908 out_dtype = DType.INT48
2909 elif input.dtype == DType.FLOAT:
2910 out_dtype = DType.FLOAT
2911 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002912 raise Exception("Unsupported input dtype: {}".format(input.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002913
Kevin Cheng550ccc52021-03-03 11:21:43 -08002914 return ser.addOutput(output_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002915
2916 @staticmethod
2917 def matmulOp(ser, a, b):
Kevin Cheng2d60f002021-06-09 14:18:32 -07002918 # a: N, H, C
2919 # b: N, C, W
2920 # out: N, H, W
Eric Kunzee5e26762020-10-13 16:11:07 -07002921
Kevin Cheng2d60f002021-06-09 14:18:32 -07002922 output_shape = [a.shape[0], a.shape[1], b.shape[2]]
Eric Kunzee5e26762020-10-13 16:11:07 -07002923
Kevin Cheng3a478572021-01-22 17:21:02 -08002924 if a.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07002925 out_dtype = DType.INT32
2926 elif a.dtype == DType.INT16:
2927 out_dtype = DType.INT48
2928 elif a.dtype == DType.FLOAT:
2929 out_dtype = DType.FLOAT
2930 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08002931 raise Exception("UNsupported input dtype for matmul: {}".format(a.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07002932
Kevin Cheng550ccc52021-03-03 11:21:43 -08002933 return ser.addOutput(output_shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002934
2935 @staticmethod
Matthew Haddon818ab902021-07-27 09:12:49 +01002936 def concatOp(ser, axis, *a):
2937 input1 = a[0]
2938 remaining_inputs = a[1:]
Eric Kunzee5e26762020-10-13 16:11:07 -07002939
Matthew Haddon818ab902021-07-27 09:12:49 +01002940 output_shape = input1.shape.copy()
Eric Kunzee5e26762020-10-13 16:11:07 -07002941
Matthew Haddon818ab902021-07-27 09:12:49 +01002942 output_shape[axis] = input1.shape[axis]
2943
2944 for tensor in remaining_inputs:
2945 output_shape[axis] += tensor.shape[axis]
2946
2947 return ser.addOutput(output_shape, input1.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002948
2949 @staticmethod
2950 def padOp(ser, a, padding):
2951
2952 output_shape = a.shape.copy()
2953
2954 for i in range(len(output_shape)):
2955 output_shape[i] = padding[i][0] + padding[i][1] + output_shape[i]
2956
Kevin Cheng550ccc52021-03-03 11:21:43 -08002957 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002958
2959 @staticmethod
2960 def reshapeOp(ser, a, shape):
2961 output_shape = shape.copy()
2962
2963 totalElements = 1
2964 for i in a.shape:
2965 totalElements *= i
2966
2967 # If there are any -1 elements, figure out what that dimension must be
2968 totalOutputElements = 1
2969 for i in output_shape:
2970 if i != -1:
2971 totalOutputElements *= i
2972
2973 # And fill it in
2974 for i in range(len(output_shape)):
2975 if output_shape[i] == -1:
2976 output_shape[i] = totalElements // totalOutputElements
2977
Kevin Cheng550ccc52021-03-03 11:21:43 -08002978 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002979
2980 @staticmethod
2981 def sliceOp(ser, a, begin, size):
2982
2983 output_shape = size.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08002984 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002985
2986 @staticmethod
2987 def tileOp(ser, a, multiples):
2988
2989 output_shape = a.shape.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08002990 assert len(multiples) == len(output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07002991
2992 for i in range(len(output_shape)):
2993 output_shape[i] = a.shape[i] * multiples[i]
2994
Kevin Cheng550ccc52021-03-03 11:21:43 -08002995 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07002996
2997 @staticmethod
2998 def transposeOp(ser, a, perms):
2999 output_shape = a.shape.copy()
Kevin Cheng550ccc52021-03-03 11:21:43 -08003000 assert len(perms) == len(output_shape)
Eric Kunzee5e26762020-10-13 16:11:07 -07003001
3002 for i in range(len(output_shape)):
3003 output_shape[i] = a.shape[perms[i]]
3004
Kevin Cheng550ccc52021-03-03 11:21:43 -08003005 return ser.addOutput(output_shape, a.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003006
3007 @staticmethod
Kevin Cheng77d0f762020-11-24 10:26:32 -08003008 def gatherOp(ser, values, indices):
3009 assert len(values.shape) == 3
3010 assert len(indices.shape) == 2
3011 assert values.shape[0] == indices.shape[0]
Eric Kunzee5e26762020-10-13 16:11:07 -07003012
Kevin Cheng77d0f762020-11-24 10:26:32 -08003013 output_shape = [values.shape[0], indices.shape[1], values.shape[2]]
3014
Kevin Cheng550ccc52021-03-03 11:21:43 -08003015 return ser.addOutput(output_shape, values.dtype)
Kevin Cheng77d0f762020-11-24 10:26:32 -08003016
3017 @staticmethod
3018 def scatterOp(ser, values_in, indices, input):
3019 assert len(values_in.shape) == 3
3020 assert len(indices.shape) == 2
3021 assert len(input.shape) == 3
Kevin Cheng550ccc52021-03-03 11:21:43 -08003022 assert values_in.shape[0] == indices.shape[0] # N
3023 assert input.shape[1] == indices.shape[1] # W
3024 assert values_in.shape[2] == input.shape[2] # C
Kevin Cheng77d0f762020-11-24 10:26:32 -08003025
3026 output_shape = values_in.shape
3027
Kevin Cheng550ccc52021-03-03 11:21:43 -08003028 return ser.addOutput(output_shape, values_in.dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003029
3030 @staticmethod
Jeremy Johnsonf54d8a22021-07-20 16:01:06 +01003031 def tableOp(ser, input, table_dtype):
3032 # Same shape as the input, but dtype dependent on table dtype
3033 assert table_dtype == DType.INT16 or table_dtype == DType.INT8
3034 output_dtype = DType.INT32 if table_dtype == DType.INT16 else DType.INT8
3035 return ser.addOutput(input.shape, output_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003036
3037 @staticmethod
Kevin Cheng550ccc52021-03-03 11:21:43 -08003038 def resizeOp(
3039 ser,
3040 input,
3041 mode,
3042 stride,
3043 offset,
3044 shift,
3045 stride_fp,
3046 offset_fp,
3047 output_dims,
3048 input_dtype,
3049 output_dtype,
3050 ):
Eric Kunzee5e26762020-10-13 16:11:07 -07003051
3052 output_dims = [input.shape[0], output_dims[0], output_dims[1], input.shape[3]]
3053
Kevin Cheng550ccc52021-03-03 11:21:43 -08003054 return ser.addOutput(output_dims, output_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003055
3056 @staticmethod
3057 def typeConversionOp(ser, val, out_dtype):
Kevin Cheng550ccc52021-03-03 11:21:43 -08003058 return ser.addOutput(val.shape, out_dtype)
Eric Kunzee5e26762020-10-13 16:11:07 -07003059
3060 @staticmethod
3061 def transposeConv2DOp(ser, ifm, output_shape):
Kevin Cheng3a478572021-01-22 17:21:02 -08003062 if ifm.dtype == DType.INT8:
Eric Kunzee5e26762020-10-13 16:11:07 -07003063 out_dtype = DType.INT32
3064 elif ifm.dtype == DType.INT16:
3065 out_dtype = DType.INT48
3066 elif ifm.dtype == DType.FLOAT:
3067 out_dtype = DType.FLOAT
3068 else:
Kevin Cheng550ccc52021-03-03 11:21:43 -08003069 raise Exception("Unsupported input dtype: {}".format(ifm.dtype))
Eric Kunzee5e26762020-10-13 16:11:07 -07003070
Kevin Cheng550ccc52021-03-03 11:21:43 -08003071 return ser.addOutput(output_shape, out_dtype)