blob: 52f19d2c15d1157e5d425341e68e3ed1220f62e5 [file] [log] [blame]
alexanderf42f5682021-07-16 11:30:56 +01001# Copyright © 2020-2021 Arm Ltd and Contributors. All rights reserved.
2# SPDX-License-Identifier: MIT
3
4"""
5Object detection demo that takes a video file, runs inference on each frame producing
6bounding boxes and labels around detected objects, and saves the processed video.
7"""
8
9import os
10import sys
11script_dir = os.path.dirname(__file__)
12sys.path.insert(1, os.path.join(script_dir, '..', 'common'))
13
14import cv2
15from tqdm import tqdm
16from argparse import ArgumentParser
17
18from ssd import ssd_processing, ssd_resize_factor
19from yolo import yolo_processing, yolo_resize_factor
20from utils import dict_labels
21from cv_utils import init_video_file_capture, preprocess, draw_bounding_boxes
22from network_executor import ArmnnNetworkExecutor
23
24
25def get_model_processing(model_name: str, video: cv2.VideoCapture, input_binding_info: tuple):
26 """
27 Gets model-specific information such as model labels and decoding and processing functions.
28 The user can include their own network and functions by adding another statement.
29
30 Args:
31 model_name: Name of type of supported model.
32 video: Video capture object, contains information about data source.
33 input_binding_info: Contains shape of model input layer, used for scaling bounding boxes.
34
35 Returns:
36 Model labels, decoding and processing functions.
37 """
38 if model_name == 'ssd_mobilenet_v1':
39 return ssd_processing, ssd_resize_factor(video)
40 elif model_name == 'yolo_v3_tiny':
41 return yolo_processing, yolo_resize_factor(video, input_binding_info)
42 else:
43 raise ValueError(f'{model_name} is not a valid model name')
44
45
46def main(args):
47 video, video_writer, frame_count = init_video_file_capture(args.video_file_path, args.output_video_file_path)
48
49 executor = ArmnnNetworkExecutor(args.model_file_path, args.preferred_backends)
50 process_output, resize_factor = get_model_processing(args.model_name, video, executor.input_binding_info)
51 labels = dict_labels(args.label_path, include_rgb=True)
52
53 for _ in tqdm(frame_count, desc='Processing frames'):
54 frame_present, frame = video.read()
55 if not frame_present:
56 continue
57 model_name = args.model_name
58 if model_name == "ssd_mobilenet_v1":
59 input_tensors = preprocess(frame, executor.input_binding_info, True)
60 else:
61 input_tensors = preprocess(frame, executor.input_binding_info, False)
62 output_result = executor.run(input_tensors)
63 detections = process_output(output_result)
64 draw_bounding_boxes(frame, detections, resize_factor, labels)
65 video_writer.write(frame)
66 print('Finished processing frames')
67 video.release(), video_writer.release()
68
69
70if __name__ == '__main__':
71 parser = ArgumentParser()
72 parser.add_argument('--video_file_path', required=True, type=str,
73 help='Path to the video file to run object detection on')
74 parser.add_argument('--model_file_path', required=True, type=str,
75 help='Path to the Object Detection model to use')
76 parser.add_argument('--model_name', required=True, type=str,
77 help='The name of the model being used. Accepted options: ssd_mobilenet_v1, yolo_v3_tiny')
78 parser.add_argument('--label_path', required=True, type=str,
79 help='Path to the labelset for the provided model file')
80 parser.add_argument('--output_video_file_path', type=str,
81 help='Path to the output video file with detections added in')
82 parser.add_argument('--preferred_backends', type=str, nargs='+', default=['CpuAcc', 'CpuRef'],
83 help='Takes the preferred backends in preference order, separated by whitespace, '
84 'for example: CpuAcc GpuAcc CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc]. '
85 'Defaults to [CpuAcc, CpuRef]')
86 args = parser.parse_args()
87 main(args)