blob: 60e17cea10b648fd62b9eaf8c412a2554b68eda9 [file] [log] [blame]
Jerry Ge9e94af82022-10-27 09:57:00 -07001# Copyright (c) 2020-2023, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00002# SPDX-License-Identifier: Apache-2.0
Jerry Geb1f25012023-03-03 11:33:51 -08003import enum
4
Jeremy Johnson015c3552022-02-23 12:15:03 +00005import numpy as np
6import tensorflow as tf
7
8# FIXME: replace hardcoded '* 2' with random integers, where possible
9
10# The scaling factor for random numbers generated in input tensors. The
11# random numbers are calculated as:
12# (np.random.rand() - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR
13# FIXME: improve range here
14RAND_SCALE_FACTOR = 4.0
15# Amount to add to random numbers
16RAND_SHIFT_FACTOR = 0.5
17
18RAND_INT_MIN = -128
19RAND_INT_MAX = 128
20
21
Jerry Geb1f25012023-03-03 11:33:51 -080022class ElemSignedness(enum.Enum):
23 ALL_RANGE = 1
24 POSITIVE = 2
25 NEGATIVE = 3
26
27
Jeremy Johnson015c3552022-02-23 12:15:03 +000028class TGen:
29 """A collection of functions to build tensor value arguments for an operator"""
30
31 def __init__(self):
32 pass
33
34 @staticmethod
Jerry Geb1f25012023-03-03 11:33:51 -080035 def getRand(shape, dtype, rng, elem_signedness=ElemSignedness.ALL_RANGE):
36 if elem_signedness == ElemSignedness.POSITIVE:
37 RAND_SHIFT_FACTOR = 0
38 elif elem_signedness == ElemSignedness.NEGATIVE:
39 RAND_SHIFT_FACTOR = 1
40 else:
41 RAND_SHIFT_FACTOR = 0.5
42
Jeremy Johnson015c3552022-02-23 12:15:03 +000043 if dtype == tf.float32:
44 return np.float32(
45 (rng.random(size=shape) - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR
46 )
47 if dtype == tf.float16:
48 return np.float16(
49 (rng.random(size=shape) - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR
50 )
51 if dtype == tf.int32:
52 return np.int32(
53 rng.integers(low=RAND_INT_MIN, high=RAND_INT_MAX, size=shape)
54 )
55 if dtype == tf.uint32:
56 return np.uint32(rng.integers(low=0, high=RAND_INT_MAX, size=shape))
57 if dtype == tf.bool:
58 return np.bool_(rng.choice(a=[False, True], size=shape))
Luke Hutton714aa602023-02-08 19:45:26 +000059 if dtype == tf.complex64:
60 return TGen.getRand(shape, np.float32, rng) + 1j * TGen.getRand(
61 shape, np.float32, rng
62 )
Jeremy Johnson015c3552022-02-23 12:15:03 +000063
64 raise Exception("Unsupported type: {}".format(dtype))
65
66 @staticmethod
Jerry Geb1f25012023-03-03 11:33:51 -080067 def tgBasicPositive(op, shape, dtype, rng, elem_signedness=ElemSignedness.POSITIVE):
68 return TGen.tgBasic(op, shape, dtype, rng, elem_signedness)
69
70 @staticmethod
71 def tgBasic(op, shape, dtype, rng, elem_signedness=ElemSignedness.ALL_RANGE):
Jeremy Johnson015c3552022-02-23 12:15:03 +000072 # Build random tensor placeholder node args of a given shape
73 pl, const = op["operands"]
74
75 tf_placeholders = []
76 tf_consts = []
77
78 for i in range(pl):
79 tf_placeholders.append(
Jerry Geb1f25012023-03-03 11:33:51 -080080 (
81 "placeholder_{}".format(i),
82 TGen.getRand(shape, dtype, rng, elem_signedness),
83 )
Jeremy Johnson015c3552022-02-23 12:15:03 +000084 )
85
86 for i in range(const):
Jerry Geb1f25012023-03-03 11:33:51 -080087 tf_consts.append(
88 ("const_{}".format(i), TGen.getRand(shape, dtype, rng, elem_signedness))
89 )
Jeremy Johnson015c3552022-02-23 12:15:03 +000090
91 return tf_placeholders, tf_consts
92
93 @staticmethod
94 def tgBFuzz(op, shape, dtype, rng):
95 # Build random tensor placeholder node args of a given shape, optionally
96 # fuzzing the arguments with random 1's to force broadcasting
97
98 pl, const = op["operands"]
99
100 assert const == 0
101
102 fuzz_arg = rng.integers(0, pl + const)
103 fuzz_idx = rng.integers(0, len(shape))
104
105 tf_placeholders = []
106 tf_consts = []
107 for i in range(pl):
108 if i == fuzz_arg:
109 # Insert the broadcast in one dimension index
110 s_fuzz = list(shape)
111 s_fuzz[fuzz_idx] = 1
112 s_fuzz = tuple(s_fuzz)
113 i_shape = s_fuzz
114 else:
115 i_shape = shape
116 tf_placeholders.append(
117 ("placeholder_{}".format(i), TGen.getRand(i_shape, dtype, rng))
118 )
119
120 return tf_placeholders, tf_consts
121
122 @staticmethod
TatWai Chongfd629052022-07-25 04:01:58 +0000123 def tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000124
125 # Take the shape and generate an input and filter
126 tf_placeholders = []
127 tf_consts = []
TatWai Chongfd629052022-07-25 04:01:58 +0000128 tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng)))
129 tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng)))
130
131 try:
132 bias = op["bias"]
133 except KeyError:
134 bias = False
135
136 if bias:
137 # bias is 1D and size == output channels
138 bias_shape = (out_channels,)
139 tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng)))
140
141 return tf_placeholders, tf_consts
142
143 @staticmethod
144 def tgConv2d(op, ifm_shape, dtype, rng):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000145
146 # Require rank 4 shape
147 if len(ifm_shape) != 4:
148 return [], []
149
150 filter_h, filter_w = op["filter"]
151
152 # TODO: Hard-code the test by making the OFM depth 2x the IFM depth.
153 # Could randomize this in the future.
TatWai Chongfd629052022-07-25 04:01:58 +0000154 out_channels = ifm_shape[3] * 2
155 filter_shape = (filter_h, filter_w, ifm_shape[3], out_channels)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000156
TatWai Chongfd629052022-07-25 04:01:58 +0000157 return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000158
159 @staticmethod
160 def tgDepthwiseConv2d(op, ifm_shape, dtype, rng):
161
Jeremy Johnson015c3552022-02-23 12:15:03 +0000162 # Require rank 4 shape
163 if len(ifm_shape) != 4:
164 return [], []
165
166 filter_h, filter_w = op["filter"]
167
TatWai Chongfd629052022-07-25 04:01:58 +0000168 # TODO: Hard-code the test by making the channel_multiplier=2.
169 # Could randomize this in the future.
Jeremy Johnson015c3552022-02-23 12:15:03 +0000170 filter_shape = (filter_h, filter_w, ifm_shape[3], 2)
TatWai Chongfd629052022-07-25 04:01:58 +0000171 out_channels = ifm_shape[3] * 2
Jeremy Johnson015c3552022-02-23 12:15:03 +0000172
TatWai Chongfd629052022-07-25 04:01:58 +0000173 return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000174
175 @staticmethod
176 def tgTransposeConv2d(op, ifm_shape, dtype, rng):
177
Jeremy Johnson015c3552022-02-23 12:15:03 +0000178 # Require rank 4 shape
179 if len(ifm_shape) != 4:
180 return [], []
181
182 filter_h, filter_w = op["filter"]
183
184 # TODO: Hard-code the test by making the IFM depth 2x the OFM depth.
185 # Could randomize this in the future.
TatWai Chongfd629052022-07-25 04:01:58 +0000186 out_channels = ifm_shape[3] * 2
187 filter_shape = (filter_h, filter_w, out_channels, ifm_shape[3])
Jeremy Johnson015c3552022-02-23 12:15:03 +0000188
TatWai Chongfd629052022-07-25 04:01:58 +0000189 return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000190
TatWai Chongfd629052022-07-25 04:01:58 +0000191 @staticmethod
192 def tgConv3d(op, ifm_shape, dtype, rng):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000193
TatWai Chongfd629052022-07-25 04:01:58 +0000194 # Require rank 5 shape
195 if len(ifm_shape) != 5:
196 return [], []
Jeremy Johnson015c3552022-02-23 12:15:03 +0000197
TatWai Chongfd629052022-07-25 04:01:58 +0000198 filter_d, filter_h, filter_w = op["filter"]
199
200 # TODO: Hard-code the test by making the OFM depth 2x the IFM depth.
201 # Could randomize this in the future.
TatWai Chong5a76b2a2022-08-29 14:50:48 -0700202 in_channels = ifm_shape[4]
203 out_channels = in_channels * 2
204 filter_shape = (filter_d, filter_h, filter_w, in_channels, out_channels)
TatWai Chongfd629052022-07-25 04:01:58 +0000205
206 return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000207
208 @staticmethod
209 def tgPooling(op, shapes, dtype, rng):
210 # Pooling does nothing special except filter out non-rank-4 tensors
211 if len(shapes) != 4:
212 return [], []
213
214 return TGen.tgBasic(op, shapes, dtype, rng)
215
216 @staticmethod
217 def tgMatmul(op, ifm_shape, dtype, rng):
218 # Take the shape and generate an input and filter
219 tf_placeholders = []
220 tf_consts = []
221
222 if len(ifm_shape) < 2:
223 return [], []
224
225 # For ifm_shape = [..., N, K]
226 # Generate rhs tensor with shape [..., K x (2 * N)]
227 tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng)))
228
229 shape_rhs = list(ifm_shape)
230 shape_rhs[-2] = ifm_shape[-1]
231 shape_rhs[-1] = ifm_shape[-2] * 2
232 tf_placeholders.append(
233 (
234 "placeholder_1",
235 TGen.getRand(shape_rhs, dtype, rng),
236 )
237 )
238
239 return tf_placeholders, tf_consts
240
241 @staticmethod
242 def tgOneHot(op, shape, dtype, rng):
243 # Build random tensor placeholder node args of a given shape
244 pl, const = op["operands"]
245
246 assert pl == 3 and const == 1
247
248 tf_placeholders = []
249 tf_consts = []
250
251 # depth
252 depth = np.int32(rng.integers(low=1, high=32, size=None))
253 tf_consts.append(("const_0", depth))
254
255 # indices
256 indices = np.int32(rng.integers(low=0, high=depth, size=shape))
257 tf_placeholders.append(("placeholder_0", indices))
258
259 # on_value
260 tf_placeholders.append(("placeholder_1", TGen.getRand(None, dtype, rng)))
261
262 # off_value
263 tf_placeholders.append(("placeholder_2", TGen.getRand(None, dtype, rng)))
264
265 return tf_placeholders, tf_consts
266
267 @staticmethod
268 def tgSelect(op, shape, dtype, rng):
269 # Build random tensor placeholder node args of a given shape
270 pl, const = op["operands"]
271 assert pl == 3 and const == 0
272
273 tf_placeholders = []
274 tf_consts = []
275
276 # selector
277 tf_placeholders.append(("placeholder_0", TGen.getRand(None, tf.bool, rng)))
278 # inputs
279 tf_placeholders.append(("placeholder_1", TGen.getRand(shape, dtype, rng)))
280 tf_placeholders.append(("placeholder_2", TGen.getRand(shape, dtype, rng)))
281
282 return tf_placeholders, tf_consts
Jerry Ge9e94af82022-10-27 09:57:00 -0700283
284 @staticmethod
285 def tgRecurrent(op, ifm_shape, dtype, rng):
286 # Require rank 3 shape for recurrent networks
287 if len(ifm_shape) != 3:
288 return [], []
289 pl, const = op["operands"]
290
291 tf_placeholders = []
292 tf_consts = []
293
294 for i in range(pl):
295 tf_placeholders.append(
296 ("placeholder_{}".format(i), TGen.getRand(ifm_shape, dtype, rng))
297 )
298
299 for i in range(const):
300 tf_consts.append(
301 ("const_{}".format(i), TGen.getRand(ifm_shape, dtype, rng))
302 )
303
304 return tf_placeholders, tf_consts
Luke Hutton261b7b62023-01-10 14:50:31 +0000305
306 @staticmethod
307 def tgRFFT2d(op, shape, dtype, rng):
308 # Require rank 3 shape
309 if len(shape) != 3:
310 return [], []
311
Luke Hutton714aa602023-02-08 19:45:26 +0000312 return TGen.tgBasic(op, shape, dtype, rng)
313
314 @staticmethod
315 def tgComplexComponents(op, shape, dtype, rng):
316 # Temporarily require up to rank 3 shape, due to
317 # slice maximum rank limitiation.
318 if len(shape) > 3:
319 return [], []
320
321 return TGen.tgBasic(op, shape, dtype, rng)