blob: c534a5801d3bfd1322b43e0238e648b06583eb28 [file] [log] [blame]
Jerry Ge9e94af82022-10-27 09:57:00 -07001# Copyright (c) 2020-2023, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00002# SPDX-License-Identifier: Apache-2.0
3import numpy as np
4import tensorflow as tf
5
6# FIXME: replace hardcoded '* 2' with random integers, where possible
7
8# The scaling factor for random numbers generated in input tensors. The
9# random numbers are calculated as:
10# (np.random.rand() - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR
11# FIXME: improve range here
12RAND_SCALE_FACTOR = 4.0
13# Amount to add to random numbers
14RAND_SHIFT_FACTOR = 0.5
15
16RAND_INT_MIN = -128
17RAND_INT_MAX = 128
18
19
20class TGen:
21 """A collection of functions to build tensor value arguments for an operator"""
22
23 def __init__(self):
24 pass
25
26 @staticmethod
27 def getRand(shape, dtype, rng):
28 if dtype == tf.float32:
29 return np.float32(
30 (rng.random(size=shape) - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR
31 )
32 if dtype == tf.float16:
33 return np.float16(
34 (rng.random(size=shape) - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR
35 )
36 if dtype == tf.int32:
37 return np.int32(
38 rng.integers(low=RAND_INT_MIN, high=RAND_INT_MAX, size=shape)
39 )
40 if dtype == tf.uint32:
41 return np.uint32(rng.integers(low=0, high=RAND_INT_MAX, size=shape))
42 if dtype == tf.bool:
43 return np.bool_(rng.choice(a=[False, True], size=shape))
44
45 raise Exception("Unsupported type: {}".format(dtype))
46
47 @staticmethod
48 def tgBasic(op, shape, dtype, rng):
49 # Build random tensor placeholder node args of a given shape
50 pl, const = op["operands"]
51
52 tf_placeholders = []
53 tf_consts = []
54
55 for i in range(pl):
56 tf_placeholders.append(
57 ("placeholder_{}".format(i), TGen.getRand(shape, dtype, rng))
58 )
59
60 for i in range(const):
61 tf_consts.append(("const_{}".format(i), TGen.getRand(shape, dtype, rng)))
62
63 return tf_placeholders, tf_consts
64
65 @staticmethod
66 def tgBFuzz(op, shape, dtype, rng):
67 # Build random tensor placeholder node args of a given shape, optionally
68 # fuzzing the arguments with random 1's to force broadcasting
69
70 pl, const = op["operands"]
71
72 assert const == 0
73
74 fuzz_arg = rng.integers(0, pl + const)
75 fuzz_idx = rng.integers(0, len(shape))
76
77 tf_placeholders = []
78 tf_consts = []
79 for i in range(pl):
80 if i == fuzz_arg:
81 # Insert the broadcast in one dimension index
82 s_fuzz = list(shape)
83 s_fuzz[fuzz_idx] = 1
84 s_fuzz = tuple(s_fuzz)
85 i_shape = s_fuzz
86 else:
87 i_shape = shape
88 tf_placeholders.append(
89 ("placeholder_{}".format(i), TGen.getRand(i_shape, dtype, rng))
90 )
91
92 return tf_placeholders, tf_consts
93
94 @staticmethod
TatWai Chongfd629052022-07-25 04:01:58 +000095 def tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng):
Jeremy Johnson015c3552022-02-23 12:15:03 +000096
97 # Take the shape and generate an input and filter
98 tf_placeholders = []
99 tf_consts = []
TatWai Chongfd629052022-07-25 04:01:58 +0000100 tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng)))
101 tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng)))
102
103 try:
104 bias = op["bias"]
105 except KeyError:
106 bias = False
107
108 if bias:
109 # bias is 1D and size == output channels
110 bias_shape = (out_channels,)
111 tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng)))
112
113 return tf_placeholders, tf_consts
114
115 @staticmethod
116 def tgConv2d(op, ifm_shape, dtype, rng):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000117
118 # Require rank 4 shape
119 if len(ifm_shape) != 4:
120 return [], []
121
122 filter_h, filter_w = op["filter"]
123
124 # TODO: Hard-code the test by making the OFM depth 2x the IFM depth.
125 # Could randomize this in the future.
TatWai Chongfd629052022-07-25 04:01:58 +0000126 out_channels = ifm_shape[3] * 2
127 filter_shape = (filter_h, filter_w, ifm_shape[3], out_channels)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000128
TatWai Chongfd629052022-07-25 04:01:58 +0000129 return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000130
131 @staticmethod
132 def tgDepthwiseConv2d(op, ifm_shape, dtype, rng):
133
Jeremy Johnson015c3552022-02-23 12:15:03 +0000134 # Require rank 4 shape
135 if len(ifm_shape) != 4:
136 return [], []
137
138 filter_h, filter_w = op["filter"]
139
TatWai Chongfd629052022-07-25 04:01:58 +0000140 # TODO: Hard-code the test by making the channel_multiplier=2.
141 # Could randomize this in the future.
Jeremy Johnson015c3552022-02-23 12:15:03 +0000142 filter_shape = (filter_h, filter_w, ifm_shape[3], 2)
TatWai Chongfd629052022-07-25 04:01:58 +0000143 out_channels = ifm_shape[3] * 2
Jeremy Johnson015c3552022-02-23 12:15:03 +0000144
TatWai Chongfd629052022-07-25 04:01:58 +0000145 return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000146
147 @staticmethod
148 def tgTransposeConv2d(op, ifm_shape, dtype, rng):
149
Jeremy Johnson015c3552022-02-23 12:15:03 +0000150 # Require rank 4 shape
151 if len(ifm_shape) != 4:
152 return [], []
153
154 filter_h, filter_w = op["filter"]
155
156 # TODO: Hard-code the test by making the IFM depth 2x the OFM depth.
157 # Could randomize this in the future.
TatWai Chongfd629052022-07-25 04:01:58 +0000158 out_channels = ifm_shape[3] * 2
159 filter_shape = (filter_h, filter_w, out_channels, ifm_shape[3])
Jeremy Johnson015c3552022-02-23 12:15:03 +0000160
TatWai Chongfd629052022-07-25 04:01:58 +0000161 return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000162
TatWai Chongfd629052022-07-25 04:01:58 +0000163 @staticmethod
164 def tgConv3d(op, ifm_shape, dtype, rng):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000165
TatWai Chongfd629052022-07-25 04:01:58 +0000166 # Require rank 5 shape
167 if len(ifm_shape) != 5:
168 return [], []
Jeremy Johnson015c3552022-02-23 12:15:03 +0000169
TatWai Chongfd629052022-07-25 04:01:58 +0000170 filter_d, filter_h, filter_w = op["filter"]
171
172 # TODO: Hard-code the test by making the OFM depth 2x the IFM depth.
173 # Could randomize this in the future.
TatWai Chong5a76b2a2022-08-29 14:50:48 -0700174 in_channels = ifm_shape[4]
175 out_channels = in_channels * 2
176 filter_shape = (filter_d, filter_h, filter_w, in_channels, out_channels)
TatWai Chongfd629052022-07-25 04:01:58 +0000177
178 return TGen.tgConvCommon(op, ifm_shape, filter_shape, out_channels, dtype, rng)
Jeremy Johnson015c3552022-02-23 12:15:03 +0000179
180 @staticmethod
181 def tgPooling(op, shapes, dtype, rng):
182 # Pooling does nothing special except filter out non-rank-4 tensors
183 if len(shapes) != 4:
184 return [], []
185
186 return TGen.tgBasic(op, shapes, dtype, rng)
187
188 @staticmethod
189 def tgMatmul(op, ifm_shape, dtype, rng):
190 # Take the shape and generate an input and filter
191 tf_placeholders = []
192 tf_consts = []
193
194 if len(ifm_shape) < 2:
195 return [], []
196
197 # For ifm_shape = [..., N, K]
198 # Generate rhs tensor with shape [..., K x (2 * N)]
199 tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng)))
200
201 shape_rhs = list(ifm_shape)
202 shape_rhs[-2] = ifm_shape[-1]
203 shape_rhs[-1] = ifm_shape[-2] * 2
204 tf_placeholders.append(
205 (
206 "placeholder_1",
207 TGen.getRand(shape_rhs, dtype, rng),
208 )
209 )
210
211 return tf_placeholders, tf_consts
212
213 @staticmethod
214 def tgOneHot(op, shape, dtype, rng):
215 # Build random tensor placeholder node args of a given shape
216 pl, const = op["operands"]
217
218 assert pl == 3 and const == 1
219
220 tf_placeholders = []
221 tf_consts = []
222
223 # depth
224 depth = np.int32(rng.integers(low=1, high=32, size=None))
225 tf_consts.append(("const_0", depth))
226
227 # indices
228 indices = np.int32(rng.integers(low=0, high=depth, size=shape))
229 tf_placeholders.append(("placeholder_0", indices))
230
231 # on_value
232 tf_placeholders.append(("placeholder_1", TGen.getRand(None, dtype, rng)))
233
234 # off_value
235 tf_placeholders.append(("placeholder_2", TGen.getRand(None, dtype, rng)))
236
237 return tf_placeholders, tf_consts
238
239 @staticmethod
240 def tgSelect(op, shape, dtype, rng):
241 # Build random tensor placeholder node args of a given shape
242 pl, const = op["operands"]
243 assert pl == 3 and const == 0
244
245 tf_placeholders = []
246 tf_consts = []
247
248 # selector
249 tf_placeholders.append(("placeholder_0", TGen.getRand(None, tf.bool, rng)))
250 # inputs
251 tf_placeholders.append(("placeholder_1", TGen.getRand(shape, dtype, rng)))
252 tf_placeholders.append(("placeholder_2", TGen.getRand(shape, dtype, rng)))
253
254 return tf_placeholders, tf_consts
Jerry Ge9e94af82022-10-27 09:57:00 -0700255
256 @staticmethod
257 def tgRecurrent(op, ifm_shape, dtype, rng):
258 # Require rank 3 shape for recurrent networks
259 if len(ifm_shape) != 3:
260 return [], []
261 pl, const = op["operands"]
262
263 tf_placeholders = []
264 tf_consts = []
265
266 for i in range(pl):
267 tf_placeholders.append(
268 ("placeholder_{}".format(i), TGen.getRand(ifm_shape, dtype, rng))
269 )
270
271 for i in range(const):
272 tf_consts.append(
273 ("const_{}".format(i), TGen.getRand(ifm_shape, dtype, rng))
274 )
275
276 return tf_placeholders, tf_consts
Luke Hutton261b7b62023-01-10 14:50:31 +0000277
278 @staticmethod
279 def tgRFFT2d(op, shape, dtype, rng):
280 # Require rank 3 shape
281 if len(shape) != 3:
282 return [], []
283
284 tf_placeholders = [("placeholder_0", TGen.getRand(shape, dtype, rng))]
285 return tf_placeholders, []