Jeremy Johnson | 015c355 | 2022-02-23 12:15:03 +0000 | [diff] [blame^] | 1 | # Copyright (c) 2020-2022, ARM Limited. |
| 2 | # SPDX-License-Identifier: Apache-2.0 |
| 3 | import numpy as np |
| 4 | import tensorflow as tf |
| 5 | |
| 6 | # FIXME: replace hardcoded '* 2' with random integers, where possible |
| 7 | |
| 8 | # The scaling factor for random numbers generated in input tensors. The |
| 9 | # random numbers are calculated as: |
| 10 | # (np.random.rand() - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR |
| 11 | # FIXME: improve range here |
| 12 | RAND_SCALE_FACTOR = 4.0 |
| 13 | # Amount to add to random numbers |
| 14 | RAND_SHIFT_FACTOR = 0.5 |
| 15 | |
| 16 | RAND_INT_MIN = -128 |
| 17 | RAND_INT_MAX = 128 |
| 18 | |
| 19 | |
| 20 | class TGen: |
| 21 | """A collection of functions to build tensor value arguments for an operator""" |
| 22 | |
| 23 | def __init__(self): |
| 24 | pass |
| 25 | |
| 26 | @staticmethod |
| 27 | def getRand(shape, dtype, rng): |
| 28 | if dtype == tf.float32: |
| 29 | return np.float32( |
| 30 | (rng.random(size=shape) - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR |
| 31 | ) |
| 32 | if dtype == tf.float16: |
| 33 | return np.float16( |
| 34 | (rng.random(size=shape) - RAND_SHIFT_FACTOR) * RAND_SCALE_FACTOR |
| 35 | ) |
| 36 | if dtype == tf.int32: |
| 37 | return np.int32( |
| 38 | rng.integers(low=RAND_INT_MIN, high=RAND_INT_MAX, size=shape) |
| 39 | ) |
| 40 | if dtype == tf.uint32: |
| 41 | return np.uint32(rng.integers(low=0, high=RAND_INT_MAX, size=shape)) |
| 42 | if dtype == tf.bool: |
| 43 | return np.bool_(rng.choice(a=[False, True], size=shape)) |
| 44 | |
| 45 | raise Exception("Unsupported type: {}".format(dtype)) |
| 46 | |
| 47 | @staticmethod |
| 48 | def tgBasic(op, shape, dtype, rng): |
| 49 | # Build random tensor placeholder node args of a given shape |
| 50 | pl, const = op["operands"] |
| 51 | |
| 52 | tf_placeholders = [] |
| 53 | tf_consts = [] |
| 54 | |
| 55 | for i in range(pl): |
| 56 | tf_placeholders.append( |
| 57 | ("placeholder_{}".format(i), TGen.getRand(shape, dtype, rng)) |
| 58 | ) |
| 59 | |
| 60 | for i in range(const): |
| 61 | tf_consts.append(("const_{}".format(i), TGen.getRand(shape, dtype, rng))) |
| 62 | |
| 63 | return tf_placeholders, tf_consts |
| 64 | |
| 65 | @staticmethod |
| 66 | def tgBFuzz(op, shape, dtype, rng): |
| 67 | # Build random tensor placeholder node args of a given shape, optionally |
| 68 | # fuzzing the arguments with random 1's to force broadcasting |
| 69 | |
| 70 | pl, const = op["operands"] |
| 71 | |
| 72 | assert const == 0 |
| 73 | |
| 74 | fuzz_arg = rng.integers(0, pl + const) |
| 75 | fuzz_idx = rng.integers(0, len(shape)) |
| 76 | |
| 77 | tf_placeholders = [] |
| 78 | tf_consts = [] |
| 79 | for i in range(pl): |
| 80 | if i == fuzz_arg: |
| 81 | # Insert the broadcast in one dimension index |
| 82 | s_fuzz = list(shape) |
| 83 | s_fuzz[fuzz_idx] = 1 |
| 84 | s_fuzz = tuple(s_fuzz) |
| 85 | i_shape = s_fuzz |
| 86 | else: |
| 87 | i_shape = shape |
| 88 | tf_placeholders.append( |
| 89 | ("placeholder_{}".format(i), TGen.getRand(i_shape, dtype, rng)) |
| 90 | ) |
| 91 | |
| 92 | return tf_placeholders, tf_consts |
| 93 | |
| 94 | @staticmethod |
| 95 | def tgConv2d(op, ifm_shape, dtype, rng): |
| 96 | |
| 97 | # Take the shape and generate an input and filter |
| 98 | tf_placeholders = [] |
| 99 | tf_consts = [] |
| 100 | |
| 101 | # Require rank 4 shape |
| 102 | if len(ifm_shape) != 4: |
| 103 | return [], [] |
| 104 | |
| 105 | filter_h, filter_w = op["filter"] |
| 106 | |
| 107 | # TODO: Hard-code the test by making the OFM depth 2x the IFM depth. |
| 108 | # Could randomize this in the future. |
| 109 | filter_shape = (filter_h, filter_w, ifm_shape[3], ifm_shape[3] * 2) |
| 110 | |
| 111 | tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng))) |
| 112 | tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng))) |
| 113 | |
| 114 | try: |
| 115 | bias = op["bias"] |
| 116 | except KeyError: |
| 117 | bias = False |
| 118 | |
| 119 | if bias: |
| 120 | # bias is 1D and size == output channels |
| 121 | bias_shape = (ifm_shape[3] * 2,) |
| 122 | tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng))) |
| 123 | |
| 124 | return tf_placeholders, tf_consts |
| 125 | |
| 126 | @staticmethod |
| 127 | def tgDepthwiseConv2d(op, ifm_shape, dtype, rng): |
| 128 | |
| 129 | # Take the shape and generate an input and filter |
| 130 | tf_placeholders = [] |
| 131 | tf_consts = [] |
| 132 | |
| 133 | # Require rank 4 shape |
| 134 | if len(ifm_shape) != 4: |
| 135 | return [], [] |
| 136 | |
| 137 | filter_h, filter_w = op["filter"] |
| 138 | |
| 139 | # TODO: Hard-code the test by making the channel_multiplier=2. Could randomize |
| 140 | # this in the future. |
| 141 | filter_shape = (filter_h, filter_w, ifm_shape[3], 2) |
| 142 | |
| 143 | tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng))) |
| 144 | tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng))) |
| 145 | |
| 146 | try: |
| 147 | bias = op["bias"] |
| 148 | except KeyError: |
| 149 | bias = False |
| 150 | |
| 151 | if bias: |
| 152 | # bias is 1D and size == output channels |
| 153 | bias_shape = (ifm_shape[3] * 2,) |
| 154 | tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng))) |
| 155 | |
| 156 | return tf_placeholders, tf_consts |
| 157 | |
| 158 | @staticmethod |
| 159 | def tgTransposeConv2d(op, ifm_shape, dtype, rng): |
| 160 | |
| 161 | # Take the shape and generate an input and filter |
| 162 | tf_placeholders = [] |
| 163 | tf_consts = [] |
| 164 | |
| 165 | # Require rank 4 shape |
| 166 | if len(ifm_shape) != 4: |
| 167 | return [], [] |
| 168 | |
| 169 | filter_h, filter_w = op["filter"] |
| 170 | |
| 171 | # TODO: Hard-code the test by making the IFM depth 2x the OFM depth. |
| 172 | # Could randomize this in the future. |
| 173 | filter_shape = (filter_h, filter_w, ifm_shape[3] * 2, ifm_shape[3]) |
| 174 | |
| 175 | tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng))) |
| 176 | tf_consts.append(("const_0", TGen.getRand(filter_shape, dtype, rng))) |
| 177 | |
| 178 | try: |
| 179 | bias = op["bias"] |
| 180 | except KeyError: |
| 181 | bias = False |
| 182 | |
| 183 | if bias: |
| 184 | # bias is 1D and size == output channels |
| 185 | bias_shape = ifm_shape[3] * 2 |
| 186 | tf_consts.append(("const_1", TGen.getRand(bias_shape, dtype, rng))) |
| 187 | |
| 188 | return tf_placeholders, tf_consts |
| 189 | |
| 190 | @staticmethod |
| 191 | def tgPooling(op, shapes, dtype, rng): |
| 192 | # Pooling does nothing special except filter out non-rank-4 tensors |
| 193 | if len(shapes) != 4: |
| 194 | return [], [] |
| 195 | |
| 196 | return TGen.tgBasic(op, shapes, dtype, rng) |
| 197 | |
| 198 | @staticmethod |
| 199 | def tgMatmul(op, ifm_shape, dtype, rng): |
| 200 | # Take the shape and generate an input and filter |
| 201 | tf_placeholders = [] |
| 202 | tf_consts = [] |
| 203 | |
| 204 | if len(ifm_shape) < 2: |
| 205 | return [], [] |
| 206 | |
| 207 | # For ifm_shape = [..., N, K] |
| 208 | # Generate rhs tensor with shape [..., K x (2 * N)] |
| 209 | tf_placeholders.append(("placeholder_0", TGen.getRand(ifm_shape, dtype, rng))) |
| 210 | |
| 211 | shape_rhs = list(ifm_shape) |
| 212 | shape_rhs[-2] = ifm_shape[-1] |
| 213 | shape_rhs[-1] = ifm_shape[-2] * 2 |
| 214 | tf_placeholders.append( |
| 215 | ( |
| 216 | "placeholder_1", |
| 217 | TGen.getRand(shape_rhs, dtype, rng), |
| 218 | ) |
| 219 | ) |
| 220 | |
| 221 | return tf_placeholders, tf_consts |
| 222 | |
| 223 | @staticmethod |
| 224 | def tgOneHot(op, shape, dtype, rng): |
| 225 | # Build random tensor placeholder node args of a given shape |
| 226 | pl, const = op["operands"] |
| 227 | |
| 228 | assert pl == 3 and const == 1 |
| 229 | |
| 230 | tf_placeholders = [] |
| 231 | tf_consts = [] |
| 232 | |
| 233 | # depth |
| 234 | depth = np.int32(rng.integers(low=1, high=32, size=None)) |
| 235 | tf_consts.append(("const_0", depth)) |
| 236 | |
| 237 | # indices |
| 238 | indices = np.int32(rng.integers(low=0, high=depth, size=shape)) |
| 239 | tf_placeholders.append(("placeholder_0", indices)) |
| 240 | |
| 241 | # on_value |
| 242 | tf_placeholders.append(("placeholder_1", TGen.getRand(None, dtype, rng))) |
| 243 | |
| 244 | # off_value |
| 245 | tf_placeholders.append(("placeholder_2", TGen.getRand(None, dtype, rng))) |
| 246 | |
| 247 | return tf_placeholders, tf_consts |
| 248 | |
| 249 | @staticmethod |
| 250 | def tgSelect(op, shape, dtype, rng): |
| 251 | # Build random tensor placeholder node args of a given shape |
| 252 | pl, const = op["operands"] |
| 253 | assert pl == 3 and const == 0 |
| 254 | |
| 255 | tf_placeholders = [] |
| 256 | tf_consts = [] |
| 257 | |
| 258 | # selector |
| 259 | tf_placeholders.append(("placeholder_0", TGen.getRand(None, tf.bool, rng))) |
| 260 | # inputs |
| 261 | tf_placeholders.append(("placeholder_1", TGen.getRand(shape, dtype, rng))) |
| 262 | tf_placeholders.append(("placeholder_2", TGen.getRand(shape, dtype, rng))) |
| 263 | |
| 264 | return tf_placeholders, tf_consts |