blob: 82588278f1413c9c6a93802364c54ede02248359 [file] [log] [blame]
Louis Verhaard0b8268a2020-08-05 16:11:29 +02001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16# Description:
17# Utilities used in vela unit tests
18import numpy as np
19
20from ethosu.vela import architecture_features
21from ethosu.vela.data_type import DataType
22from ethosu.vela.nn_graph import Subgraph
Louis Verhaard0b8268a2020-08-05 16:11:29 +020023from ethosu.vela.operation import Operation
24from ethosu.vela.tensor import create_const_tensor
Dwight Lidman8359a472020-09-28 15:53:40 +020025from ethosu.vela.tensor import QuantizationParameters
Louis Verhaard0b8268a2020-08-05 16:11:29 +020026from ethosu.vela.tensor import Tensor
27
28
29def create_arch():
30 return architecture_features.ArchitectureFeatures(
31 vela_config=None,
32 system_config=None,
33 accelerator_config=architecture_features.Accelerator.Ethos_U55_128.value,
Louis Verhaard0b8268a2020-08-05 16:11:29 +020034 override_block_config=None,
35 block_config_limit=None,
36 global_memory_clock_scale=1.0,
37 max_blockdep=0,
Patrik Gustavsson90831bc2020-08-24 16:26:11 +020038 weight_estimation_scaling=1.0,
Louis Verhaard0b8268a2020-08-05 16:11:29 +020039 )
40
41
Michael McGeagh65fd9982020-10-20 11:49:28 +010042def default_quant_params():
43 qp = QuantizationParameters()
44 qp.scale_f32 = np.float32(1)
45 qp.zero_point = 0
46 return qp
47
48
Dwight Lidman8359a472020-09-28 15:53:40 +020049def create_elemwise_op(
Michael McGeagh65fd9982020-10-20 11:49:28 +010050 op_type,
Dwight Lidman8359a472020-09-28 15:53:40 +020051 name,
52 ifm_shape,
53 ifm2_shape,
54 ofm_shape,
55 datatype=DataType.uint8,
Michael McGeagh65fd9982020-10-20 11:49:28 +010056 ifm_quant=default_quant_params(),
57 ifm2_quant=default_quant_params(),
58 ofm_quant=default_quant_params(),
Dwight Lidman8359a472020-09-28 15:53:40 +020059):
Louis Verhaard0b8268a2020-08-05 16:11:29 +020060 # Creates elementwise operation with constant IFM/IFM2
61 if datatype.size_in_bytes() == 1:
62 np_type = np.uint8
63 elif datatype.size_in_bytes() == 2:
64 np_type = np.int16
65 else:
66 np_type = np.int32
Michael McGeagh65fd9982020-10-20 11:49:28 +010067 op = Operation(op_type, name)
Dwight Lidman8359a472020-09-28 15:53:40 +020068 op.add_input_tensor(
69 create_const_tensor(name + "_ifm", ifm_shape, datatype, np.zeros(ifm_shape), np_type, quantization=ifm_quant)
70 )
Michael McGeagh65fd9982020-10-20 11:49:28 +010071 if ifm2_shape is not None:
72 op.add_input_tensor(
73 create_const_tensor(
74 name + "_ifm2", ifm2_shape, datatype, np.zeros(ifm2_shape), np_type, quantization=ifm2_quant
75 )
Dwight Lidman8359a472020-09-28 15:53:40 +020076 )
Louis Verhaard0b8268a2020-08-05 16:11:29 +020077 ofm = Tensor(ofm_shape, datatype, name + "_ofm")
Dwight Lidman8359a472020-09-28 15:53:40 +020078 ofm.quantization = ofm_quant
Louis Verhaard0b8268a2020-08-05 16:11:29 +020079 op.set_output_tensor(ofm)
Louis Verhaard0b8268a2020-08-05 16:11:29 +020080 return op
81
82
Dwight Lidmanc7187432020-11-16 17:40:46 +010083def create_op_with_quant_tensors(
84 op_type, ifm_shape, ofm_shape, weights_shape=None, bias_shape=None, datatype=DataType.uint8
85):
Michael McGeagh1f951fc2020-10-14 09:30:02 +010086 ifm = Tensor(ifm_shape, datatype, "in")
Michael McGeagh65fd9982020-10-20 11:49:28 +010087 ifm.quantization = default_quant_params()
Michael McGeagh1f951fc2020-10-14 09:30:02 +010088 ofm = Tensor(ofm_shape, datatype, "out")
Michael McGeagh65fd9982020-10-20 11:49:28 +010089 ofm.quantization = default_quant_params()
Michael McGeagh1f951fc2020-10-14 09:30:02 +010090 op = Operation(op_type, "op")
91 op.add_input_tensor(ifm)
92 op.set_output_tensor(ofm)
93 # Optional weight tensor
94 if weights_shape is not None:
95 if datatype.size_in_bytes() == 1:
96 np_type = np.uint8
97 elif datatype.size_in_bytes() == 2:
98 np_type = np.int16
99 else:
100 np_type = np.int32
Michael McGeagh65fd9982020-10-20 11:49:28 +0100101 qp = default_quant_params()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100102 qp.zero_point = np.zeros(weights_shape)
103 weights = create_const_tensor(
104 "weights", weights_shape, datatype, np.zeros(weights_shape), np_type, quantization=qp
105 )
106 op.add_input_tensor(weights)
Dwight Lidmanc7187432020-11-16 17:40:46 +0100107 # Optional bias tensor
108 if bias_shape is not None:
109 qp = default_quant_params()
110 qp.zero_point = np.zeros(bias_shape)
111 bias = create_const_tensor("bias", bias_shape, DataType.int32, np.zeros(bias_shape), np.int32, quantization=qp)
112 op.add_input_tensor(bias)
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100113 return op
114
115
Louis Verhaardfa2f92a2020-09-21 11:56:18 +0200116def create_op(op_type, inputs, output, attrs=dict()):
117 op = Operation(op_type, output.name + "_op")
118 op.inputs = inputs
119 op.outputs = [output]
120 op.attrs = attrs
121 return op
122
123
Louis Verhaard0b8268a2020-08-05 16:11:29 +0200124def create_subgraph(op_list):
125 # Creates subgraph using the given list of operations
126 sg = Subgraph()
127 all_inputs = set(tens for op in op_list for tens in op.inputs)
128 # Reversing, so that the resulting subgraph has same order as op_list
129 for op in op_list[::-1]:
130 for tens in op.outputs:
131 if tens not in all_inputs and tens not in sg.output_tensors:
132 sg.output_tensors.append(tens)
133 return sg