blob: 942de2081c9a9554c7a5401344348f48d534aebb [file] [log] [blame]
Éanna Ó Catháin145c88f2020-11-16 14:12:11 +00001# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
2# SPDX-License-Identifier: MIT
3
4"""Automatic speech recognition with PyArmNN demo for processing audio clips to text."""
5
6import sys
7import os
8from argparse import ArgumentParser
9
10script_dir = os.path.dirname(__file__)
11sys.path.insert(1, os.path.join(script_dir, '..', 'common'))
12
13from network_executor import ArmnnNetworkExecutor
14from utils import dict_labels
15from preprocess import MFCCParams, Preprocessor, MFCC
16from audio_capture import AudioCapture, ModelParams
17from audio_utils import decode_text, prepare_input_tensors, display_text
18
19
20def parse_args():
21 parser = ArgumentParser(description="ASR with PyArmNN")
22 parser.add_argument(
23 "--audio_file_path",
24 required=True,
25 type=str,
26 help="Path to the audio file to perform ASR",
27 )
28 parser.add_argument(
29 "--model_file_path",
30 required=True,
31 type=str,
32 help="Path to ASR model to use",
33 )
34 parser.add_argument(
35 "--labels_file_path",
36 required=True,
37 type=str,
38 help="Path to text file containing labels to map to model output",
39 )
40 parser.add_argument(
41 "--preferred_backends",
42 type=str,
43 nargs="+",
44 default=["CpuAcc", "CpuRef"],
45 help="""List of backends in order of preference for optimizing
46 subgraphs, falling back to the next backend in the list on unsupported
47 layers. Defaults to [CpuAcc, CpuRef]""",
48 )
49 return parser.parse_args()
50
51
52def main(args):
53 # Read command line args
54 audio_file = args.audio_file_path
55 model = ModelParams(args.model_file_path)
56 labels = dict_labels(args.labels_file_path)
57
58 # Create the ArmNN inference runner
59 network = ArmnnNetworkExecutor(model.path, args.preferred_backends)
60
61 audio_capture = AudioCapture(model)
62 buffer = audio_capture.from_audio_file(audio_file)
63
64 # Create the preprocessor
65 mfcc_params = MFCCParams(sampling_freq=16000, num_fbank_bins=128, mel_lo_freq=0, mel_hi_freq=8000,
66 num_mfcc_feats=13, frame_len=512, use_htk_method=False, n_FFT=512)
67 mfcc = MFCC(mfcc_params)
Nina Drozd4018b212021-02-02 17:49:17 +000068 preprocessor = Preprocessor(mfcc, model_input_size=296, stride=160)
Éanna Ó Catháin145c88f2020-11-16 14:12:11 +000069
70 text = ""
71 current_r_context = ""
72 is_first_window = True
73
74 print("Processing Audio Frames...")
75 for audio_data in buffer:
76 # Prepare the input Tensors
77 input_tensors = prepare_input_tensors(audio_data, network.input_binding_info, preprocessor)
78
79 # Run inference
80 output_result = network.run(input_tensors)
81
82 # Slice and Decode the text, and store the right context
83 current_r_context, text = decode_text(is_first_window, labels, output_result)
84
85 is_first_window = False
86
87 display_text(text)
88
89 print(current_r_context, flush=True)
90
91
92if __name__ == "__main__":
93 args = parse_args()
94 main(args)