blob: d50bc74912f2b28b5fa78101bb52035ab9a06229 [file] [log] [blame]
Jerry Ge9e94af82022-10-27 09:57:00 -07001# Copyright (c) 2020-2023, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00002# SPDX-License-Identifier: Apache-2.0
Jerry Geb1f25012023-03-03 11:33:51 -08003import enum
4
Jeremy Johnson015c3552022-02-23 12:15:03 +00005import numpy as np
6import tensorflow as tf
7
8# FIXME: replace hardcoded '* 2' with random integers, where possible
9
10# The scaling factor for random numbers generated in input tensors. The
11# random numbers are calculated as:
12# (np.random.rand() - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR
13# FIXME: improve range here
14RAND_SCALE_FACTOR = 4.0
15# Amount to add to random numbers
16RAND_SHIFT_FACTOR = 0.5
17
18RAND_INT_MIN = -128
19RAND_INT_MAX = 128
20
21
Jerry Geb1f25012023-03-03 11:33:51 -080022class ElemSignedness(enum.Enum):
23 ALL_RANGE = 1
24 POSITIVE = 2
25 NEGATIVE = 3
26
27
Jeremy Johnson015c3552022-02-23 12:15:03 +000028class TGen:
29 """A collection of functions to build tensor value arguments for an operator"""
30
31 def __init__(self):
32 pass
33
34 @staticmethod
Jerry Geb1f25012023-03-03 11:33:51 -080035 def getRand(shape, dtype, rng, elem_signedness=ElemSignedness.ALL_RANGE):
36 if elem_signedness == ElemSignedness.POSITIVE:
37 RAND_SHIFT_FACTOR = 0
38 elif elem_signedness == ElemSignedness.NEGATIVE:
39 RAND_SHIFT_FACTOR = 1
40 else:
41 RAND_SHIFT_FACTOR = 0.5
42
Jeremy Johnson015c3552022-02-23 12:15:03 +000043 if dtype == tf.float32:
44 return np.float32(
45 (rng.random(size=shape) - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR
46 )
47 if dtype == tf.float16:
48 return np.float16(
49 (rng.random(size=shape) - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR
50 )
51 if dtype == tf.int32:
52 return np.int32(
53 rng.integers(low=RAND_INT_MIN, high=RAND_INT_MAX, size=shape)
54 )
55 if dtype == tf.uint32:
56 return np.uint32(rng.integers(low=0, high=RAND_INT_MAX, size=shape))
57 if dtype == tf.bool:
58 return np.bool_(rng.choice(a=[False, True], size=shape))
Luke Hutton714aa602023-02-08 19:45:26 +000059 if dtype == tf.complex64:
60 return TGen.getRand(shape, np.float32, rng) + 1j * TGen.getRand(
61 shape, np.float32, rng
62 )
Jeremy Johnson015c3552022-02-23 12:15:03 +000063
64 raise Exception("Unsupported type: {}".format(dtype))
65
66 @staticmethod
Jerry Geb1f25012023-03-03 11:33:51 -080067 def tgBasicPositive(op, shape, dtype, rng, elem_signedness=ElemSignedness.POSITIVE):
68 return TGen.tgBasic(op, shape, dtype, rng, elem_signedness)
69
70 @staticmethod
71 def tgBasic(op, shape, dtype, rng, elem_signedness=ElemSignedness.ALL_RANGE):
Jeremy Johnson015c3552022-02-23 12:15:03 +000072 # Build random tensor placeholder node args of a given shape
73 pl, const = op["operands"]
74
75 tf_placeholders = []
76 tf_consts = []
77
78 for i in range(pl):
79 tf_placeholders.append(
Jerry Geb1f25012023-03-03 11:33:51 -080080 (
81 "placeholder_{}".format(i),
82 TGen.getRand(shape, dtype, rng, elem_signedness),
83 )
Jeremy Johnson015c3552022-02-23 12:15:03 +000084 )
85
86 for i in range(const):
Jerry Geb1f25012023-03-03 11:33:51 -080087 tf_consts.append(
88 ("const_{}".format(i), TGen.getRand(shape, dtype, rng, elem_signedness))
89 )
Jeremy Johnson015c3552022-02-23 12:15:03 +000090
91 return tf_placeholders, tf_consts
92
93 @staticmethod
Won Jeone2325d12023-06-10 15:25:54 +000094 def tgBFuzz(op, shape, dtype, rng, fuzzed=[]):
Jeremy Johnson015c3552022-02-23 12:15:03 +000095 # Build random tensor placeholder node args of a given shape, optionally
96 # fuzzing the arguments with random 1's to force broadcasting
97
98 pl, const = op["operands"]
99
100 assert const == 0
101
102 fuzz_arg = rng.integers(0, pl + const)
103 fuzz_idx = rng.integers(0, len(shape))
104
105 tf_placeholders = []
106 tf_consts = []
107 for i in range(pl):
Won Jeone2325d12023-06-10 15:25:54 +0000108 if not fuzzed and i == fuzz_arg:
Jeremy Johnson015c3552022-02-23 12:15:03 +0000109 # Insert the broadcast in one dimension index
110 s_fuzz = list(shape)
111 s_fuzz[fuzz_idx] = 1
112 s_fuzz = tuple(s_fuzz)
113 i_shape = s_fuzz
Won Jeone2325d12023-06-10 15:25:54 +0000114 # Record the fuzzed index.
115 fuzzed.append(i)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000116 else:
117 i_shape = shape
118 tf_placeholders.append(
119 ("placeholder_{}".format(i), TGen.getRand(i_shape, dtype, rng))
120 )
121
122 return tf_placeholders, tf_consts
123
124 @staticmethod
TatWai Chongfd629052022-07-25 04:01:58 +0000125 def tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000126
127 # Take the shape and generate an input and filter
128 tf_placeholders = []
129 tf_consts = []
TatWai Chongfd629052022-07-25 04:01:58 +0000130 tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng)))
131 tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng)))
132
133 try:
134 bias = op["bias"]
135 except KeyError:
136 bias = False
137
138 if bias:
139 # bias is 1D and size == output channels
140 bias_shape = (out_channels,)
141 tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng)))
142
143 return tf_placeholders, tf_consts
144
145 @staticmethod
146 def tgConv2d(op, ifm_shape, dtype, rng):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000147
148 # Require rank 4 shape
149 if len(ifm_shape) != 4:
150 return [], []
151
152 filter_h, filter_w = op["filter"]
153
154 # TODO: Hard-code the test by making the OFM depth 2x the IFM depth.
155 # Could randomize this in the future.
TatWai Chongfd629052022-07-25 04:01:58 +0000156 out_channels = ifm_shape[3] * 2
157 filter_shape = (filter_h, filter_w, ifm_shape[3], out_channels)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000158
TatWai Chongfd629052022-07-25 04:01:58 +0000159 return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000160
161 @staticmethod
162 def tgDepthwiseConv2d(op, ifm_shape, dtype, rng):
163
Jeremy Johnson015c3552022-02-23 12:15:03 +0000164 # Require rank 4 shape
165 if len(ifm_shape) != 4:
166 return [], []
167
168 filter_h, filter_w = op["filter"]
169
TatWai Chongfd629052022-07-25 04:01:58 +0000170 # TODO: Hard-code the test by making the channel_multiplier=2.
171 # Could randomize this in the future.
Jeremy Johnson015c3552022-02-23 12:15:03 +0000172 filter_shape = (filter_h, filter_w, ifm_shape[3], 2)
TatWai Chongfd629052022-07-25 04:01:58 +0000173 out_channels = ifm_shape[3] * 2
Jeremy Johnson015c3552022-02-23 12:15:03 +0000174
TatWai Chongfd629052022-07-25 04:01:58 +0000175 return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000176
177 @staticmethod
178 def tgTransposeConv2d(op, ifm_shape, dtype, rng):
179
Jeremy Johnson015c3552022-02-23 12:15:03 +0000180 # Require rank 4 shape
181 if len(ifm_shape) != 4:
182 return [], []
183
184 filter_h, filter_w = op["filter"]
185
186 # TODO: Hard-code the test by making the IFM depth 2x the OFM depth.
187 # Could randomize this in the future.
TatWai Chongfd629052022-07-25 04:01:58 +0000188 out_channels = ifm_shape[3] * 2
189 filter_shape = (filter_h, filter_w, out_channels, ifm_shape[3])
Jeremy Johnson015c3552022-02-23 12:15:03 +0000190
TatWai Chongfd629052022-07-25 04:01:58 +0000191 return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000192
TatWai Chongfd629052022-07-25 04:01:58 +0000193 @staticmethod
194 def tgConv3d(op, ifm_shape, dtype, rng):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000195
TatWai Chongfd629052022-07-25 04:01:58 +0000196 # Require rank 5 shape
197 if len(ifm_shape) != 5:
198 return [], []
Jeremy Johnson015c3552022-02-23 12:15:03 +0000199
TatWai Chongfd629052022-07-25 04:01:58 +0000200 filter_d, filter_h, filter_w = op["filter"]
201
202 # TODO: Hard-code the test by making the OFM depth 2x the IFM depth.
203 # Could randomize this in the future.
TatWai Chong5a76b2a2022-08-29 14:50:48 -0700204 in_channels = ifm_shape[4]
205 out_channels = in_channels * 2
206 filter_shape = (filter_d, filter_h, filter_w, in_channels, out_channels)
TatWai Chongfd629052022-07-25 04:01:58 +0000207
208 return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000209
210 @staticmethod
211 def tgPooling(op, shapes, dtype, rng):
212 # Pooling does nothing special except filter out non-rank-4 tensors
213 if len(shapes) != 4:
214 return [], []
215
216 return TGen.tgBasic(op, shapes, dtype, rng)
217
218 @staticmethod
219 def tgMatmul(op, ifm_shape, dtype, rng):
220 # Take the shape and generate an input and filter
221 tf_placeholders = []
222 tf_consts = []
223
224 if len(ifm_shape) < 2:
225 return [], []
226
227 # For ifm_shape = [..., N, K]
228 # Generate rhs tensor with shape [..., K x (2 * N)]
229 tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng)))
230
231 shape_rhs = list(ifm_shape)
232 shape_rhs[-2] = ifm_shape[-1]
233 shape_rhs[-1] = ifm_shape[-2] * 2
234 tf_placeholders.append(
235 (
236 "placeholder_1",
237 TGen.getRand(shape_rhs, dtype, rng),
238 )
239 )
240
241 return tf_placeholders, tf_consts
242
243 @staticmethod
244 def tgOneHot(op, shape, dtype, rng):
245 # Build random tensor placeholder node args of a given shape
246 pl, const = op["operands"]
247
248 assert pl == 3 and const == 1
249
250 tf_placeholders = []
251 tf_consts = []
252
253 # depth
254 depth = np.int32(rng.integers(low=1, high=32, size=None))
255 tf_consts.append(("const_0", depth))
256
257 # indices
258 indices = np.int32(rng.integers(low=0, high=depth, size=shape))
259 tf_placeholders.append(("placeholder_0", indices))
260
261 # on_value
262 tf_placeholders.append(("placeholder_1", TGen.getRand(None, dtype, rng)))
263
264 # off_value
265 tf_placeholders.append(("placeholder_2", TGen.getRand(None, dtype, rng)))
266
267 return tf_placeholders, tf_consts
268
269 @staticmethod
270 def tgSelect(op, shape, dtype, rng):
271 # Build random tensor placeholder node args of a given shape
272 pl, const = op["operands"]
273 assert pl == 3 and const == 0
274
275 tf_placeholders = []
276 tf_consts = []
277
278 # selector
279 tf_placeholders.append(("placeholder_0", TGen.getRand(None, tf.bool, rng)))
280 # inputs
281 tf_placeholders.append(("placeholder_1", TGen.getRand(shape, dtype, rng)))
282 tf_placeholders.append(("placeholder_2", TGen.getRand(shape, dtype, rng)))
283
284 return tf_placeholders, tf_consts
Jerry Ge9e94af82022-10-27 09:57:00 -0700285
286 @staticmethod
287 def tgRecurrent(op, ifm_shape, dtype, rng):
288 # Require rank 3 shape for recurrent networks
289 if len(ifm_shape) != 3:
290 return [], []
291 pl, const = op["operands"]
292
293 tf_placeholders = []
294 tf_consts = []
295
296 for i in range(pl):
297 tf_placeholders.append(
298 ("placeholder_{}".format(i), TGen.getRand(ifm_shape, dtype, rng))
299 )
300
301 for i in range(const):
302 tf_consts.append(
303 ("const_{}".format(i), TGen.getRand(ifm_shape, dtype, rng))
304 )
305
306 return tf_placeholders, tf_consts
Luke Hutton261b7b62023-01-10 14:50:31 +0000307
308 @staticmethod
309 def tgRFFT2d(op, shape, dtype, rng):
310 # Require rank 3 shape
311 if len(shape) != 3:
312 return [], []
313
Luke Hutton714aa602023-02-08 19:45:26 +0000314 return TGen.tgBasic(op, shape, dtype, rng)
315
316 @staticmethod
317 def tgComplexComponents(op, shape, dtype, rng):
318 # Temporarily require up to rank 3 shape, due to
319 # slice maximum rank limitiation.
320 if len(shape) > 3:
321 return [], []
322
323 return TGen.tgBasic(op, shape, dtype, rng)
Tai Lyfe36fa92023-06-01 21:45:12 +0000324
325 @staticmethod
326 def tgBroadcastTo(op, shape, dtype, rng):
327
328 pl, const = op["operands"]
329
330 assert pl == 1
331 assert const == 1
332
333 tf_placeholders = []
334 tf_consts = []
335
336 shape_list = list(shape)
337 t_shape_list = []
338 s_shape_list = []
339 for i in range(len(shape)):
340 dim = shape_list[i]
341 if rng.integers(0, 1) == 0:
342 # append dim in s_shape_list, and 1 in t_shape_list unless it is still empty
343 s_shape_list.append(dim)
344 if len(t_shape_list) > 0:
345 t_shape_list.append(1)
346 else:
347 # append 1 in s_shape_list, and dim in t_shape_list
348 s_shape_list.append(1)
349 t_shape_list.append(dim)
350
351 # if t_shape_list is empty, then insert 1
352 if len(t_shape_list) == 0:
353 t_shape_list.append(1)
354
355 tf_placeholders.append(
356 ("placeholder_0", TGen.getRand(tuple(t_shape_list), dtype, rng))
357 )
358
359 tf_consts.append(("shape", tuple(s_shape_list)))
360
361 return tf_placeholders, tf_consts