blob: 7cdd4f5ed2d1e100403cf92745a5a928d66dec89 [file] [log] [blame]
Louis Verhaard0b8268a2020-08-05 16:11:29 +02001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16# Description:
17# Utilities used in vela unit tests
18import numpy as np
19
20from ethosu.vela import architecture_features
21from ethosu.vela.data_type import DataType
22from ethosu.vela.nn_graph import Subgraph
Louis Verhaard0b8268a2020-08-05 16:11:29 +020023from ethosu.vela.operation import Operation
24from ethosu.vela.tensor import create_const_tensor
Dwight Lidman8359a472020-09-28 15:53:40 +020025from ethosu.vela.tensor import QuantizationParameters
Louis Verhaard0b8268a2020-08-05 16:11:29 +020026from ethosu.vela.tensor import Tensor
27
28
29def create_arch():
30 return architecture_features.ArchitectureFeatures(
Tim Hall1bd531d2020-11-01 20:59:36 +000031 vela_config_files=None,
Louis Verhaard0b8268a2020-08-05 16:11:29 +020032 accelerator_config=architecture_features.Accelerator.Ethos_U55_128.value,
Tim Hall1bd531d2020-11-01 20:59:36 +000033 system_config=architecture_features.ArchitectureFeatures.DEFAULT_CONFIG,
34 memory_mode=architecture_features.ArchitectureFeatures.DEFAULT_CONFIG,
Louis Verhaard0b8268a2020-08-05 16:11:29 +020035 override_block_config=None,
36 block_config_limit=None,
Louis Verhaard0b8268a2020-08-05 16:11:29 +020037 max_blockdep=0,
Patrik Gustavsson90831bc2020-08-24 16:26:11 +020038 weight_estimation_scaling=1.0,
Tim Hall1bd531d2020-11-01 20:59:36 +000039 verbose_config=False,
Louis Verhaard0b8268a2020-08-05 16:11:29 +020040 )
41
42
Michael McGeagh65fd9982020-10-20 11:49:28 +010043def default_quant_params():
44 qp = QuantizationParameters()
45 qp.scale_f32 = np.float32(1)
46 qp.zero_point = 0
47 return qp
48
49
Dwight Lidman8359a472020-09-28 15:53:40 +020050def create_elemwise_op(
Michael McGeagh65fd9982020-10-20 11:49:28 +010051 op_type,
Dwight Lidman8359a472020-09-28 15:53:40 +020052 name,
53 ifm_shape,
54 ifm2_shape,
55 ofm_shape,
56 datatype=DataType.uint8,
Michael McGeagh65fd9982020-10-20 11:49:28 +010057 ifm_quant=default_quant_params(),
58 ifm2_quant=default_quant_params(),
59 ofm_quant=default_quant_params(),
Dwight Lidman8359a472020-09-28 15:53:40 +020060):
Louis Verhaard0b8268a2020-08-05 16:11:29 +020061 # Creates elementwise operation with constant IFM/IFM2
62 if datatype.size_in_bytes() == 1:
63 np_type = np.uint8
64 elif datatype.size_in_bytes() == 2:
65 np_type = np.int16
66 else:
67 np_type = np.int32
Michael McGeagh65fd9982020-10-20 11:49:28 +010068 op = Operation(op_type, name)
Dwight Lidman8359a472020-09-28 15:53:40 +020069 op.add_input_tensor(
70 create_const_tensor(name + "_ifm", ifm_shape, datatype, np.zeros(ifm_shape), np_type, quantization=ifm_quant)
71 )
Michael McGeagh65fd9982020-10-20 11:49:28 +010072 if ifm2_shape is not None:
73 op.add_input_tensor(
74 create_const_tensor(
75 name + "_ifm2", ifm2_shape, datatype, np.zeros(ifm2_shape), np_type, quantization=ifm2_quant
76 )
Dwight Lidman8359a472020-09-28 15:53:40 +020077 )
Louis Verhaard0b8268a2020-08-05 16:11:29 +020078 ofm = Tensor(ofm_shape, datatype, name + "_ofm")
Dwight Lidman8359a472020-09-28 15:53:40 +020079 ofm.quantization = ofm_quant
Louis Verhaard0b8268a2020-08-05 16:11:29 +020080 op.set_output_tensor(ofm)
Louis Verhaard0b8268a2020-08-05 16:11:29 +020081 return op
82
83
Dwight Lidmanc7187432020-11-16 17:40:46 +010084def create_op_with_quant_tensors(
85 op_type, ifm_shape, ofm_shape, weights_shape=None, bias_shape=None, datatype=DataType.uint8
86):
Michael McGeagh1f951fc2020-10-14 09:30:02 +010087 ifm = Tensor(ifm_shape, datatype, "in")
Michael McGeagh65fd9982020-10-20 11:49:28 +010088 ifm.quantization = default_quant_params()
Michael McGeagh1f951fc2020-10-14 09:30:02 +010089 ofm = Tensor(ofm_shape, datatype, "out")
Michael McGeagh65fd9982020-10-20 11:49:28 +010090 ofm.quantization = default_quant_params()
Michael McGeagh1f951fc2020-10-14 09:30:02 +010091 op = Operation(op_type, "op")
92 op.add_input_tensor(ifm)
93 op.set_output_tensor(ofm)
94 # Optional weight tensor
95 if weights_shape is not None:
96 if datatype.size_in_bytes() == 1:
97 np_type = np.uint8
98 elif datatype.size_in_bytes() == 2:
99 np_type = np.int16
100 else:
101 np_type = np.int32
Michael McGeagh65fd9982020-10-20 11:49:28 +0100102 qp = default_quant_params()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100103 qp.zero_point = np.zeros(weights_shape)
104 weights = create_const_tensor(
105 "weights", weights_shape, datatype, np.zeros(weights_shape), np_type, quantization=qp
106 )
107 op.add_input_tensor(weights)
Dwight Lidmanc7187432020-11-16 17:40:46 +0100108 # Optional bias tensor
109 if bias_shape is not None:
110 qp = default_quant_params()
111 qp.zero_point = np.zeros(bias_shape)
112 bias = create_const_tensor("bias", bias_shape, DataType.int32, np.zeros(bias_shape), np.int32, quantization=qp)
113 op.add_input_tensor(bias)
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100114 return op
115
116
Louis Verhaardfa2f92a2020-09-21 11:56:18 +0200117def create_op(op_type, inputs, output, attrs=dict()):
118 op = Operation(op_type, output.name + "_op")
119 op.inputs = inputs
120 op.outputs = [output]
121 op.attrs = attrs
122 return op
123
124
Louis Verhaard0b8268a2020-08-05 16:11:29 +0200125def create_subgraph(op_list):
126 # Creates subgraph using the given list of operations
127 sg = Subgraph()
128 all_inputs = set(tens for op in op_list for tens in op.inputs)
129 # Reversing, so that the resulting subgraph has same order as op_list
130 for op in op_list[::-1]:
131 for tens in op.outputs:
132 if tens not in all_inputs and tens not in sg.output_tensors:
133 sg.output_tensors.append(tens)
134 return sg