blob: 4370215dc6023e5edbb3d55c2f2f72912f21fa6d [file] [log] [blame]
Jerry Ge9e94af82022-10-27 09:57:00 -07001# Copyright (c) 2020-2023, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00002# SPDX-License-Identifier: Apache-2.0
Jerry Geb1f25012023-03-03 11:33:51 -08003import enum
4
Jeremy Johnson015c3552022-02-23 12:15:03 +00005import numpy as np
6import tensorflow as tf
7
8# FIXME: replace hardcoded '* 2' with random integers, where possible
9
10# The scaling factor for random numbers generated in input tensors. The
11# random numbers are calculated as:
12# (np.random.rand() - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR
13# FIXME: improve range here
14RAND_SCALE_FACTOR = 4.0
15# Amount to add to random numbers
16RAND_SHIFT_FACTOR = 0.5
17
18RAND_INT_MIN = -128
19RAND_INT_MAX = 128
20
21
Jerry Geb1f25012023-03-03 11:33:51 -080022class ElemSignedness(enum.Enum):
23 ALL_RANGE = 1
24 POSITIVE = 2
25 NEGATIVE = 3
26
27
Jeremy Johnson015c3552022-02-23 12:15:03 +000028class TGen:
29 """A collection of functions to build tensor value arguments for an operator"""
30
31 def __init__(self):
32 pass
33
34 @staticmethod
Jerry Geb1f25012023-03-03 11:33:51 -080035 def getRand(shape, dtype, rng, elem_signedness=ElemSignedness.ALL_RANGE):
36 if elem_signedness == ElemSignedness.POSITIVE:
37 RAND_SHIFT_FACTOR = 0
38 elif elem_signedness == ElemSignedness.NEGATIVE:
39 RAND_SHIFT_FACTOR = 1
40 else:
41 RAND_SHIFT_FACTOR = 0.5
42
Jeremy Johnson015c3552022-02-23 12:15:03 +000043 if dtype == tf.float32:
Won Jeonf9c0cee2023-09-18 16:32:45 -070044 return (
45 np.float32(
46 (rng.random(size=shape) - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR
47 )
48 if shape != ()
49 else np.float32(rng.random())
Jeremy Johnson015c3552022-02-23 12:15:03 +000050 )
51 if dtype == tf.float16:
52 return np.float16(
53 (rng.random(size=shape) - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR
54 )
55 if dtype == tf.int32:
56 return np.int32(
57 rng.integers(low=RAND_INT_MIN, high=RAND_INT_MAX, size=shape)
58 )
59 if dtype == tf.uint32:
60 return np.uint32(rng.integers(low=0, high=RAND_INT_MAX, size=shape))
61 if dtype == tf.bool:
62 return np.bool_(rng.choice(a=[False, True], size=shape))
Luke Hutton714aa602023-02-08 19:45:26 +000063 if dtype == tf.complex64:
64 return TGen.getRand(shape, np.float32, rng) + 1j * TGen.getRand(
65 shape, np.float32, rng
66 )
Jeremy Johnson015c3552022-02-23 12:15:03 +000067
68 raise Exception("Unsupported type: {}".format(dtype))
69
70 @staticmethod
Jerry Geb1f25012023-03-03 11:33:51 -080071 def tgBasicPositive(op, shape, dtype, rng, elem_signedness=ElemSignedness.POSITIVE):
72 return TGen.tgBasic(op, shape, dtype, rng, elem_signedness)
73
74 @staticmethod
75 def tgBasic(op, shape, dtype, rng, elem_signedness=ElemSignedness.ALL_RANGE):
Jeremy Johnson015c3552022-02-23 12:15:03 +000076 # Build random tensor placeholder node args of a given shape
77 pl, const = op["operands"]
78
79 tf_placeholders = []
80 tf_consts = []
81
82 for i in range(pl):
83 tf_placeholders.append(
Jerry Geb1f25012023-03-03 11:33:51 -080084 (
85 "placeholder_{}".format(i),
86 TGen.getRand(shape, dtype, rng, elem_signedness),
87 )
Jeremy Johnson015c3552022-02-23 12:15:03 +000088 )
89
90 for i in range(const):
Jerry Geb1f25012023-03-03 11:33:51 -080091 tf_consts.append(
92 ("const_{}".format(i), TGen.getRand(shape, dtype, rng, elem_signedness))
93 )
Jeremy Johnson015c3552022-02-23 12:15:03 +000094
95 return tf_placeholders, tf_consts
96
97 @staticmethod
Won Jeon6c93f412023-07-08 07:04:08 +000098 def tgBFuzz(op, shape, dtype, rng, for_tflite_converter=True):
Jeremy Johnson015c3552022-02-23 12:15:03 +000099 # Build random tensor placeholder node args of a given shape, optionally
100 # fuzzing the arguments with random 1's to force broadcasting
101
102 pl, const = op["operands"]
103
104 assert const == 0
105
Won Jeon6c93f412023-07-08 07:04:08 +0000106 if not for_tflite_converter:
107 fuzz_arg = rng.integers(0, pl + const)
108 fuzz_idx = rng.integers(0, len(shape))
Jeremy Johnson015c3552022-02-23 12:15:03 +0000109
110 tf_placeholders = []
111 tf_consts = []
Won Jeon6c93f412023-07-08 07:04:08 +0000112
Jeremy Johnson015c3552022-02-23 12:15:03 +0000113 for i in range(pl):
Won Jeon6c93f412023-07-08 07:04:08 +0000114 if not for_tflite_converter and i == fuzz_arg:
Jeremy Johnson015c3552022-02-23 12:15:03 +0000115 # Insert the broadcast in one dimension index
116 s_fuzz = list(shape)
117 s_fuzz[fuzz_idx] = 1
118 s_fuzz = tuple(s_fuzz)
119 i_shape = s_fuzz
120 else:
121 i_shape = shape
Won Jeon6c93f412023-07-08 07:04:08 +0000122
Jeremy Johnson015c3552022-02-23 12:15:03 +0000123 tf_placeholders.append(
124 ("placeholder_{}".format(i), TGen.getRand(i_shape, dtype, rng))
125 )
126
127 return tf_placeholders, tf_consts
128
129 @staticmethod
TatWai Chongfd629052022-07-25 04:01:58 +0000130 def tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000131
132 # Take the shape and generate an input and filter
133 tf_placeholders = []
134 tf_consts = []
TatWai Chongfd629052022-07-25 04:01:58 +0000135 tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng)))
136 tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng)))
137
138 try:
139 bias = op["bias"]
140 except KeyError:
141 bias = False
142
143 if bias:
144 # bias is 1D and size == output channels
145 bias_shape = (out_channels,)
146 tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng)))
147
148 return tf_placeholders, tf_consts
149
150 @staticmethod
151 def tgConv2d(op, ifm_shape, dtype, rng):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000152
153 # Require rank 4 shape
154 if len(ifm_shape) != 4:
155 return [], []
156
157 filter_h, filter_w = op["filter"]
158
159 # TODO: Hard-code the test by making the OFM depth 2x the IFM depth.
160 # Could randomize this in the future.
TatWai Chongfd629052022-07-25 04:01:58 +0000161 out_channels = ifm_shape[3] * 2
162 filter_shape = (filter_h, filter_w, ifm_shape[3], out_channels)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000163
TatWai Chongfd629052022-07-25 04:01:58 +0000164 return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000165
166 @staticmethod
167 def tgDepthwiseConv2d(op, ifm_shape, dtype, rng):
168
Jeremy Johnson015c3552022-02-23 12:15:03 +0000169 # Require rank 4 shape
170 if len(ifm_shape) != 4:
171 return [], []
172
173 filter_h, filter_w = op["filter"]
174
TatWai Chongfd629052022-07-25 04:01:58 +0000175 # TODO: Hard-code the test by making the channel_multiplier=2.
176 # Could randomize this in the future.
Jeremy Johnson015c3552022-02-23 12:15:03 +0000177 filter_shape = (filter_h, filter_w, ifm_shape[3], 2)
TatWai Chongfd629052022-07-25 04:01:58 +0000178 out_channels = ifm_shape[3] * 2
Jeremy Johnson015c3552022-02-23 12:15:03 +0000179
TatWai Chongfd629052022-07-25 04:01:58 +0000180 return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000181
182 @staticmethod
183 def tgTransposeConv2d(op, ifm_shape, dtype, rng):
184
Jeremy Johnson015c3552022-02-23 12:15:03 +0000185 # Require rank 4 shape
186 if len(ifm_shape) != 4:
187 return [], []
188
189 filter_h, filter_w = op["filter"]
190
191 # TODO: Hard-code the test by making the IFM depth 2x the OFM depth.
192 # Could randomize this in the future.
TatWai Chongfd629052022-07-25 04:01:58 +0000193 out_channels = ifm_shape[3] * 2
194 filter_shape = (filter_h, filter_w, out_channels, ifm_shape[3])
Jeremy Johnson015c3552022-02-23 12:15:03 +0000195
TatWai Chongfd629052022-07-25 04:01:58 +0000196 return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000197
TatWai Chongfd629052022-07-25 04:01:58 +0000198 @staticmethod
199 def tgConv3d(op, ifm_shape, dtype, rng):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000200
TatWai Chongfd629052022-07-25 04:01:58 +0000201 # Require rank 5 shape
202 if len(ifm_shape) != 5:
203 return [], []
Jeremy Johnson015c3552022-02-23 12:15:03 +0000204
TatWai Chongfd629052022-07-25 04:01:58 +0000205 filter_d, filter_h, filter_w = op["filter"]
206
207 # TODO: Hard-code the test by making the OFM depth 2x the IFM depth.
208 # Could randomize this in the future.
TatWai Chong5a76b2a2022-08-29 14:50:48 -0700209 in_channels = ifm_shape[4]
210 out_channels = in_channels * 2
211 filter_shape = (filter_d, filter_h, filter_w, in_channels, out_channels)
TatWai Chongfd629052022-07-25 04:01:58 +0000212
213 return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000214
215 @staticmethod
216 def tgPooling(op, shapes, dtype, rng):
217 # Pooling does nothing special except filter out non-rank-4 tensors
218 if len(shapes) != 4:
219 return [], []
220
221 return TGen.tgBasic(op, shapes, dtype, rng)
222
223 @staticmethod
224 def tgMatmul(op, ifm_shape, dtype, rng):
225 # Take the shape and generate an input and filter
226 tf_placeholders = []
227 tf_consts = []
228
229 if len(ifm_shape) < 2:
230 return [], []
231
232 # For ifm_shape = [..., N, K]
233 # Generate rhs tensor with shape [..., K x (2 * N)]
234 tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng)))
235
236 shape_rhs = list(ifm_shape)
237 shape_rhs[-2] = ifm_shape[-1]
238 shape_rhs[-1] = ifm_shape[-2] * 2
239 tf_placeholders.append(
240 (
241 "placeholder_1",
242 TGen.getRand(shape_rhs, dtype, rng),
243 )
244 )
245
246 return tf_placeholders, tf_consts
247
248 @staticmethod
249 def tgOneHot(op, shape, dtype, rng):
250 # Build random tensor placeholder node args of a given shape
251 pl, const = op["operands"]
252
253 assert pl == 3 and const == 1
254
255 tf_placeholders = []
256 tf_consts = []
257
258 # depth
259 depth = np.int32(rng.integers(low=1, high=32, size=None))
260 tf_consts.append(("const_0", depth))
261
262 # indices
263 indices = np.int32(rng.integers(low=0, high=depth, size=shape))
264 tf_placeholders.append(("placeholder_0", indices))
265
266 # on_value
267 tf_placeholders.append(("placeholder_1", TGen.getRand(None, dtype, rng)))
268
269 # off_value
270 tf_placeholders.append(("placeholder_2", TGen.getRand(None, dtype, rng)))
271
272 return tf_placeholders, tf_consts
273
274 @staticmethod
275 def tgSelect(op, shape, dtype, rng):
276 # Build random tensor placeholder node args of a given shape
277 pl, const = op["operands"]
278 assert pl == 3 and const == 0
279
280 tf_placeholders = []
281 tf_consts = []
282
283 # selector
284 tf_placeholders.append(("placeholder_0", TGen.getRand(None, tf.bool, rng)))
285 # inputs
286 tf_placeholders.append(("placeholder_1", TGen.getRand(shape, dtype, rng)))
287 tf_placeholders.append(("placeholder_2", TGen.getRand(shape, dtype, rng)))
288
289 return tf_placeholders, tf_consts
Jerry Ge9e94af82022-10-27 09:57:00 -0700290
291 @staticmethod
292 def tgRecurrent(op, ifm_shape, dtype, rng):
293 # Require rank 3 shape for recurrent networks
294 if len(ifm_shape) != 3:
295 return [], []
296 pl, const = op["operands"]
297
298 tf_placeholders = []
299 tf_consts = []
300
301 for i in range(pl):
302 tf_placeholders.append(
303 ("placeholder_{}".format(i), TGen.getRand(ifm_shape, dtype, rng))
304 )
305
306 for i in range(const):
307 tf_consts.append(
308 ("const_{}".format(i), TGen.getRand(ifm_shape, dtype, rng))
309 )
310
311 return tf_placeholders, tf_consts
Luke Hutton261b7b62023-01-10 14:50:31 +0000312
313 @staticmethod
314 def tgRFFT2d(op, shape, dtype, rng):
315 # Require rank 3 shape
316 if len(shape) != 3:
317 return [], []
318
Luke Hutton714aa602023-02-08 19:45:26 +0000319 return TGen.tgBasic(op, shape, dtype, rng)
320
321 @staticmethod
322 def tgComplexComponents(op, shape, dtype, rng):
323 # Temporarily require up to rank 3 shape, due to
324 # slice maximum rank limitiation.
325 if len(shape) > 3:
326 return [], []
327
328 return TGen.tgBasic(op, shape, dtype, rng)
Tai Lyfe36fa92023-06-01 21:45:12 +0000329
330 @staticmethod
331 def tgBroadcastTo(op, shape, dtype, rng):
332
333 pl, const = op["operands"]
334
335 assert pl == 1
336 assert const == 1
337
338 tf_placeholders = []
339 tf_consts = []
340
341 shape_list = list(shape)
342 t_shape_list = []
343 s_shape_list = []
344 for i in range(len(shape)):
345 dim = shape_list[i]
346 if rng.integers(0, 1) == 0:
347 # append dim in s_shape_list, and 1 in t_shape_list unless it is still empty
348 s_shape_list.append(dim)
349 if len(t_shape_list) > 0:
350 t_shape_list.append(1)
351 else:
352 # append 1 in s_shape_list, and dim in t_shape_list
353 s_shape_list.append(1)
354 t_shape_list.append(dim)
355
356 # if t_shape_list is empty, then insert 1
357 if len(t_shape_list) == 0:
358 t_shape_list.append(1)
359
360 tf_placeholders.append(
361 ("placeholder_0", TGen.getRand(tuple(t_shape_list), dtype, rng))
362 )
363
364 tf_consts.append(("shape", tuple(s_shape_list)))
365
366 return tf_placeholders, tf_consts