blob: da163b649359d6b1ef764eeb0c4ea64dd8236bde [file] [log] [blame]
Michele Di Giorgio3418ba52019-03-01 17:19:55 +00001/*
SiCong Li4841c972021-02-03 12:17:35 +00002 * Copyright (c) 2019-2021 Arm Limited.
Michele Di Giorgio3418ba52019-03-01 17:19:55 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/graph.h"
25#include "arm_compute/graph/Types.h"
26#include "support/ToolchainSupport.h"
27#include "utils/CommonGraphOptions.h"
28#include "utils/GraphUtils.h"
29#include "utils/Utils.h"
30
31using namespace arm_compute::utils;
32using namespace arm_compute::graph;
33using namespace arm_compute::graph::frontend;
34using namespace arm_compute::graph_utils;
35
36/** Example demonstrating how to implement DeepSpeech v0.4.1's network using the Compute Library's graph API */
37class GraphDeepSpeechExample : public Example
38{
39public:
40 GraphDeepSpeechExample()
41 : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "DeepSpeech v0.4.1")
42 {
43 }
44 bool do_setup(int argc, char **argv) override
45 {
46 // Parse arguments
47 cmd_parser.parse(argc, argv);
Georgios Pinitascd60a5f2019-08-21 17:06:54 +010048 cmd_parser.validate();
Michele Di Giorgio3418ba52019-03-01 17:19:55 +000049
50 // Consume common parameters
51 common_params = consume_common_graph_parameters(common_opts);
52
53 // Return when help menu is requested
54 if(common_params.help)
55 {
56 cmd_parser.print_help(argv[0]);
57 return false;
58 }
59
Michele Di Giorgio3418ba52019-03-01 17:19:55 +000060 // Print parameter values
61 std::cout << common_params << std::endl;
62
63 // Get trainable parameters data path
64 std::string data_path = common_params.data_path;
65 const std::string model_path = "/cnn_data/deepspeech_model/";
66
67 if(!data_path.empty())
68 {
69 data_path += model_path;
70 }
71
72 // How many timesteps to process at once, higher values mean more latency
73 // Notice that this corresponds to the number of LSTM cells that will be instantiated
74 const unsigned int n_steps = 16;
75
76 // ReLU clipping value for non-recurrent layers
77 const float cell_clip = 20.f;
78
79 // Create input descriptor
80 const TensorShape tensor_shape = permute_shape(TensorShape(26U, 19U, n_steps, 1U), DataLayout::NHWC, common_params.data_layout);
81 TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout);
82
83 // Set weights trained layout
84 const DataLayout weights_layout = DataLayout::NHWC;
85
86 graph << common_params.target
87 << common_params.fast_math_hint
88 << InputLayer(input_descriptor,
89 get_weights_accessor(data_path, "input_values_x" + std::to_string(n_steps) + ".npy", weights_layout))
90 .set_name("input_node");
91
92 if(common_params.data_layout == DataLayout::NCHW)
93 {
94 graph << PermuteLayer(PermutationVector(2U, 0U, 1U), common_params.data_layout).set_name("permute_to_nhwc");
95 }
96
97 graph << ReshapeLayer(TensorShape(494U, n_steps)).set_name("Reshape_input")
98 // Layer 1
99 << FullyConnectedLayer(
100 2048U,
101 get_weights_accessor(data_path, "h1_transpose.npy", weights_layout),
102 get_weights_accessor(data_path, "MatMul_bias.npy"))
103 .set_name("fc0")
104 << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, cell_clip))
105 .set_name("Relu")
106 // Layer 2
107 << FullyConnectedLayer(
108 2048U,
109 get_weights_accessor(data_path, "h2_transpose.npy", weights_layout),
110 get_weights_accessor(data_path, "MatMul_1_bias.npy"))
111 .set_name("fc1")
112 << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, cell_clip))
113 .set_name("Relu_1")
114 // Layer 3
115 << FullyConnectedLayer(
116 2048U,
117 get_weights_accessor(data_path, "h3_transpose.npy", weights_layout),
118 get_weights_accessor(data_path, "MatMul_2_bias.npy"))
119 .set_name("fc2")
120 << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, cell_clip))
121 .set_name("Relu_2")
122 // Layer 4
123 << ReshapeLayer(TensorShape(2048U, 1U, n_steps)).set_name("Reshape_1");
124
125 // Unstack Layer (using SplitLayerNode)
126 NodeParams unstack_params = { "unstack", graph.hints().target_hint };
127 NodeID unstack_nid = GraphBuilder::add_split_node(graph.graph(), unstack_params, { graph.tail_node(), 0 }, n_steps, 2);
128
129 // Create input state descriptor
130 TensorDescriptor state_descriptor = TensorDescriptor(TensorShape(2048U), common_params.data_type).set_layout(common_params.data_layout);
131 SubStream previous_state(graph);
132 SubStream add_y(graph);
133
134 // Initial state for LSTM is all zeroes for both state_h and state_c, therefore only one input is created
135 previous_state << InputLayer(state_descriptor,
136 get_weights_accessor(data_path, "zeros.npy"))
137 .set_name("previous_state_c_h");
138 add_y << InputLayer(state_descriptor,
139 get_weights_accessor(data_path, "ones.npy"))
140 .set_name("add_y");
141
Michele Di Giorgio3418ba52019-03-01 17:19:55 +0000142 // Create LSTM Fully Connected weights and bias descriptors
Michalis Spyroubd188112019-10-02 15:43:44 +0100143 TensorDescriptor lstm_weights_descriptor = TensorDescriptor(TensorShape(4096U, 8192U), common_params.data_type).set_layout(common_params.data_layout);
144 TensorDescriptor lstm_bias_descriptor = TensorDescriptor(TensorShape(8192U), common_params.data_type).set_layout(common_params.data_layout);
145 SubStream lstm_fc_weights(graph);
146 SubStream lstm_fc_bias(graph);
147 lstm_fc_weights << ConstantLayer(lstm_weights_descriptor,
148 get_weights_accessor(data_path, "rnn_lstm_cell_kernel_transpose.npy", weights_layout))
149 .set_name("h5/transpose");
150 lstm_fc_bias << ConstantLayer(lstm_bias_descriptor,
151 get_weights_accessor(data_path, "rnn_lstm_cell_MatMul_bias.npy"))
152 .set_name("MatMul_3_bias");
Michele Di Giorgio3418ba52019-03-01 17:19:55 +0000153
154 // LSTM Block
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100155 std::pair<SubStream, SubStream> new_state_1 = add_lstm_cell(unstack_nid, 0, previous_state, previous_state, add_y, lstm_fc_weights, lstm_fc_bias);
156 std::pair<SubStream, SubStream> new_state_2 = add_lstm_cell(unstack_nid, 1, new_state_1.first, new_state_1.second, add_y, lstm_fc_weights, lstm_fc_bias);
157 std::pair<SubStream, SubStream> new_state_3 = add_lstm_cell(unstack_nid, 2, new_state_2.first, new_state_2.second, add_y, lstm_fc_weights, lstm_fc_bias);
158 std::pair<SubStream, SubStream> new_state_4 = add_lstm_cell(unstack_nid, 3, new_state_3.first, new_state_3.second, add_y, lstm_fc_weights, lstm_fc_bias);
159 std::pair<SubStream, SubStream> new_state_5 = add_lstm_cell(unstack_nid, 4, new_state_4.first, new_state_4.second, add_y, lstm_fc_weights, lstm_fc_bias);
160 std::pair<SubStream, SubStream> new_state_6 = add_lstm_cell(unstack_nid, 5, new_state_5.first, new_state_5.second, add_y, lstm_fc_weights, lstm_fc_bias);
161 std::pair<SubStream, SubStream> new_state_7 = add_lstm_cell(unstack_nid, 6, new_state_6.first, new_state_6.second, add_y, lstm_fc_weights, lstm_fc_bias);
162 std::pair<SubStream, SubStream> new_state_8 = add_lstm_cell(unstack_nid, 7, new_state_7.first, new_state_7.second, add_y, lstm_fc_weights, lstm_fc_bias);
163 std::pair<SubStream, SubStream> new_state_9 = add_lstm_cell(unstack_nid, 8, new_state_8.first, new_state_8.second, add_y, lstm_fc_weights, lstm_fc_bias);
164 std::pair<SubStream, SubStream> new_state_10 = add_lstm_cell(unstack_nid, 9, new_state_9.first, new_state_9.second, add_y, lstm_fc_weights, lstm_fc_bias);
165 std::pair<SubStream, SubStream> new_state_11 = add_lstm_cell(unstack_nid, 10, new_state_10.first, new_state_10.second, add_y, lstm_fc_weights, lstm_fc_bias);
166 std::pair<SubStream, SubStream> new_state_12 = add_lstm_cell(unstack_nid, 11, new_state_11.first, new_state_11.second, add_y, lstm_fc_weights, lstm_fc_bias);
167 std::pair<SubStream, SubStream> new_state_13 = add_lstm_cell(unstack_nid, 12, new_state_12.first, new_state_12.second, add_y, lstm_fc_weights, lstm_fc_bias);
168 std::pair<SubStream, SubStream> new_state_14 = add_lstm_cell(unstack_nid, 13, new_state_13.first, new_state_13.second, add_y, lstm_fc_weights, lstm_fc_bias);
169 std::pair<SubStream, SubStream> new_state_15 = add_lstm_cell(unstack_nid, 14, new_state_14.first, new_state_14.second, add_y, lstm_fc_weights, lstm_fc_bias);
170 std::pair<SubStream, SubStream> new_state_16 = add_lstm_cell(unstack_nid, 15, new_state_15.first, new_state_15.second, add_y, lstm_fc_weights, lstm_fc_bias);
Michele Di Giorgio3418ba52019-03-01 17:19:55 +0000171
Michalis Spyroubd188112019-10-02 15:43:44 +0100172 // Concatenate new states on height
173 const int axis = 1;
174 graph << StackLayer(axis,
175 std::move(new_state_1.second),
176 std::move(new_state_2.second),
177 std::move(new_state_3.second),
178 std::move(new_state_4.second),
179 std::move(new_state_5.second),
180 std::move(new_state_6.second),
181 std::move(new_state_7.second),
182 std::move(new_state_8.second),
183 std::move(new_state_9.second),
184 std::move(new_state_10.second),
185 std::move(new_state_11.second),
186 std::move(new_state_12.second),
187 std::move(new_state_13.second),
188 std::move(new_state_14.second),
189 std::move(new_state_15.second),
190 std::move(new_state_16.second))
191 .set_name("concat");
Michele Di Giorgio3418ba52019-03-01 17:19:55 +0000192
193 graph << FullyConnectedLayer(
194 2048U,
195 get_weights_accessor(data_path, "h5_transpose.npy", weights_layout),
196 get_weights_accessor(data_path, "MatMul_3_bias.npy"))
197 .set_name("fc3")
198 << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, cell_clip))
199 .set_name("Relu3")
200 << FullyConnectedLayer(
201 29U,
202 get_weights_accessor(data_path, "h6_transpose.npy", weights_layout),
203 get_weights_accessor(data_path, "MatMul_4_bias.npy"))
204 .set_name("fc3")
205 << SoftmaxLayer().set_name("logits");
206
207 graph << OutputLayer(get_output_accessor(common_params, 5));
208
209 // Finalize graph
210 GraphConfig config;
SiCongLif466d752021-03-01 15:26:18 +0000211 config.num_threads = common_params.threads;
212 config.use_tuner = common_params.enable_tuner;
213 config.tuner_file = common_params.tuner_file;
214 config.mlgo_file = common_params.mlgo_file;
215 config.use_synthetic_type = arm_compute::is_data_type_quantized(common_params.data_type);
216 config.synthetic_type = common_params.data_type;
Michele Di Giorgio3418ba52019-03-01 17:19:55 +0000217
218 graph.finalize(common_params.target, config);
219
220 return true;
221 }
222 void do_run() override
223 {
224 // Run graph
225 graph.run();
226 }
227
228private:
229 CommandLineParser cmd_parser;
230 CommonGraphOptions common_opts;
231 CommonGraphParams common_params;
232 Stream graph;
233
234 Status set_node_params(Graph &g, NodeID nid, NodeParams &params)
235 {
236 INode *node = g.node(nid);
237 ARM_COMPUTE_RETURN_ERROR_ON(!node);
238
239 node->set_common_node_parameters(params);
240
241 return Status{};
242 }
243
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100244 std::pair<SubStream, SubStream> add_lstm_cell(NodeID unstack_nid,
Michele Di Giorgio3418ba52019-03-01 17:19:55 +0000245 unsigned int unstack_idx,
246 SubStream previous_state_c,
247 SubStream previous_state_h,
Michalis Spyroubd188112019-10-02 15:43:44 +0100248 SubStream add_y,
249 SubStream lstm_fc_weights,
250 SubStream lstm_fc_bias)
Michele Di Giorgio3418ba52019-03-01 17:19:55 +0000251 {
252 const std::string cell_name("rnn/lstm_cell_" + std::to_string(unstack_idx));
253 const DataLayoutDimension concat_dim = (common_params.data_layout == DataLayout::NHWC) ? DataLayoutDimension::CHANNEL : DataLayoutDimension::WIDTH;
254
255 // Concatenate result of Unstack with previous_state_h
256 NodeParams concat_params = { cell_name + "/concat", graph.hints().target_hint };
257 NodeID concat_nid = graph.graph().add_node<ConcatenateLayerNode>(2, concat_dim);
258 graph.graph().add_connection(unstack_nid, unstack_idx, concat_nid, 0);
259 graph.graph().add_connection(previous_state_h.tail_node(), 0, concat_nid, 1);
260 set_node_params(graph.graph(), concat_nid, concat_params);
261 graph.forward_tail(concat_nid);
262
263 graph << FullyConnectedLayer(
264 8192U,
Michalis Spyroubd188112019-10-02 15:43:44 +0100265 lstm_fc_weights,
266 lstm_fc_bias)
Michele Di Giorgio3418ba52019-03-01 17:19:55 +0000267 .set_name(cell_name + "/BiasAdd");
268
269 // Split Layer
270 const unsigned int num_splits = 4;
271 const unsigned int split_axis = 0;
272
273 NodeParams split_params = { cell_name + "/split", graph.hints().target_hint };
274 NodeID split_nid = GraphBuilder::add_split_node(graph.graph(), split_params, { graph.tail_node(), 0 }, num_splits, split_axis);
275
276 NodeParams sigmoid_1_params = { cell_name + "/Sigmoid_1", graph.hints().target_hint };
277 NodeParams add_params = { cell_name + "/add", graph.hints().target_hint };
278 NodeParams sigmoid_2_params = { cell_name + "/Sigmoid_2", graph.hints().target_hint };
279 NodeParams tanh_params = { cell_name + "/Tanh", graph.hints().target_hint };
280
281 // Sigmoid 1 (first split)
282 NodeID sigmoid_1_nid = graph.graph().add_node<ActivationLayerNode>(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
283 graph.graph().add_connection(split_nid, 0, sigmoid_1_nid, 0);
284 set_node_params(graph.graph(), sigmoid_1_nid, sigmoid_1_params);
285
286 // Tanh (second split)
287 NodeID tanh_nid = graph.graph().add_node<ActivationLayerNode>(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH, 1.f, 1.f));
288 graph.graph().add_connection(split_nid, 1, tanh_nid, 0);
289 set_node_params(graph.graph(), tanh_nid, tanh_params);
290
291 SubStream tanh_ss(graph);
292 tanh_ss.forward_tail(tanh_nid);
293
294 // Add (third split)
Sang-Hoon Park797b76b2020-03-11 23:21:14 +0000295 NodeID add_nid = graph.graph().add_node<EltwiseLayerNode>(descriptors::EltwiseLayerDescriptor{ EltwiseOperation::Add });
Michele Di Giorgio3418ba52019-03-01 17:19:55 +0000296 graph.graph().add_connection(split_nid, 2, add_nid, 0);
297 graph.graph().add_connection(add_y.tail_node(), 0, add_nid, 1);
298 set_node_params(graph.graph(), add_nid, add_params);
299
300 // Sigmoid 2 (fourth split)
301 NodeID sigmoid_2_nid = graph.graph().add_node<ActivationLayerNode>(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
302 graph.graph().add_connection(split_nid, 3, sigmoid_2_nid, 0);
303 set_node_params(graph.graph(), sigmoid_2_nid, sigmoid_2_params);
304
Michalis Spyroubcfd09a2019-05-01 13:03:59 +0100305 SubStream sigmoid_1_ss(graph);
306 sigmoid_1_ss.forward_tail(sigmoid_1_nid);
307 SubStream mul_1_ss(sigmoid_1_ss);
308 mul_1_ss << EltwiseLayer(std::move(sigmoid_1_ss), std::move(tanh_ss), EltwiseOperation::Mul)
Michele Di Giorgio3418ba52019-03-01 17:19:55 +0000309 .set_name(cell_name + "/mul_1");
310
Michalis Spyroubcfd09a2019-05-01 13:03:59 +0100311 SubStream tanh_1_ss_tmp(graph);
312 tanh_1_ss_tmp.forward_tail(add_nid);
Michele Di Giorgio3418ba52019-03-01 17:19:55 +0000313
Michalis Spyroubcfd09a2019-05-01 13:03:59 +0100314 tanh_1_ss_tmp << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC))
315 .set_name(cell_name + "/Sigmoid");
316 SubStream tanh_1_ss_tmp2(tanh_1_ss_tmp);
317 tanh_1_ss_tmp2 << EltwiseLayer(std::move(tanh_1_ss_tmp), std::move(previous_state_c), EltwiseOperation::Mul)
318 .set_name(cell_name + "/mul");
319 SubStream tanh_1_ss(tanh_1_ss_tmp2);
320 tanh_1_ss << EltwiseLayer(std::move(tanh_1_ss_tmp2), std::move(mul_1_ss), EltwiseOperation::Add)
Michele Di Giorgio3418ba52019-03-01 17:19:55 +0000321 .set_name(cell_name + "/new_state_c");
322 SubStream new_state_c(tanh_1_ss);
323
324 tanh_1_ss << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH, 1.f, 1.f))
325 .set_name(cell_name + "/Tanh_1");
326
327 SubStream sigmoid_2_ss(graph);
328 sigmoid_2_ss.forward_tail(sigmoid_2_nid);
329 graph << EltwiseLayer(std::move(sigmoid_2_ss), std::move(tanh_1_ss), EltwiseOperation::Mul)
330 .set_name(cell_name + "/new_state_h");
331
332 SubStream new_state_h(graph);
333 return std::pair<SubStream, SubStream>(new_state_c, new_state_h);
334 }
335};
336
337/** Main program for DeepSpeech v0.4.1
338 *
339 * Model is based on:
340 * https://arxiv.org/abs/1412.5567
341 * "Deep Speech: Scaling up end-to-end speech recognition"
342 * Awni Hannun, Carl Case, Jared Casper, Bryan Catanzaro, Greg Diamos, Erich Elsen, Ryan Prenger, Sanjeev Satheesh, Shubho Sengupta, Adam Coates, Andrew Y. Ng
343 *
344 * Provenance: https://github.com/mozilla/DeepSpeech
345 *
346 * @note To list all the possible arguments execute the binary appended with the --help option
347 *
348 * @param[in] argc Number of arguments
349 * @param[in] argv Arguments
350 *
351 * @return Return code
352 */
353int main(int argc, char **argv)
354{
355 return arm_compute::utils::run_example<GraphDeepSpeechExample>(argc, argv);
356}