blob: 7b6ef253b2eadb4cb7b0e5055dfa702421c03385 [file] [log] [blame]
alexanderf42f5682021-07-16 11:30:56 +01001# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
2# SPDX-License-Identifier: MIT
3
4"""
5Object detection demo that takes a video stream from a device, runs inference
6on each frame producing bounding boxes and labels around detected objects,
7and displays a window with the latest processed frame.
8"""
9
10import os
11import sys
Raviv Shalev97ddc062021-12-07 15:18:09 +020012
alexanderf42f5682021-07-16 11:30:56 +010013script_dir = os.path.dirname(__file__)
14sys.path.insert(1, os.path.join(script_dir, '..', 'common'))
15
16import cv2
17from argparse import ArgumentParser
alexanderf42f5682021-07-16 11:30:56 +010018from ssd import ssd_processing, ssd_resize_factor
19from yolo import yolo_processing, yolo_resize_factor
Raviv Shalev97ddc062021-12-07 15:18:09 +020020from utils import dict_labels, Profiling
alexanderf42f5682021-07-16 11:30:56 +010021from cv_utils import init_video_stream_capture, preprocess, draw_bounding_boxes
Raviv Shalev97ddc062021-12-07 15:18:09 +020022import style_transfer
alexanderf42f5682021-07-16 11:30:56 +010023
24
Raviv Shalev97ddc062021-12-07 15:18:09 +020025def get_model_processing(model_name: str, video: cv2.VideoCapture, input_data_shape: tuple):
alexanderf42f5682021-07-16 11:30:56 +010026 """
27 Gets model-specific information such as model labels and decoding and processing functions.
28 The user can include their own network and functions by adding another statement.
29
30 Args:
31 model_name: Name of type of supported model.
32 video: Video capture object, contains information about data source.
Raviv Shalev97ddc062021-12-07 15:18:09 +020033 input_data_shape: Contains shape of model input layer, used for scaling bounding boxes.
alexanderf42f5682021-07-16 11:30:56 +010034
35 Returns:
36 Model labels, decoding and processing functions.
37 """
38 if model_name == 'ssd_mobilenet_v1':
39 return ssd_processing, ssd_resize_factor(video)
40 elif model_name == 'yolo_v3_tiny':
Raviv Shalev97ddc062021-12-07 15:18:09 +020041 return yolo_processing, yolo_resize_factor(video, input_data_shape)
alexanderf42f5682021-07-16 11:30:56 +010042 else:
43 raise ValueError(f'{model_name} is not a valid model name')
44
45
46def main(args):
alexanderf42f5682021-07-16 11:30:56 +010047
Raviv Shalev97ddc062021-12-07 15:18:09 +020048 enable_profile = args.profiling_enabled == "true"
49 action_profiler = Profiling(enable_profile)
50 action_profiler.profiling_start()
51
52 if args.tflite_delegate_path is not None:
53 from network_executor_tflite import TFLiteNetworkExecutor as NetworkExecutor
54 exec_input_args = (args.model_file_path, args.preferred_backends, args.tflite_delegate_path)
55 else:
56 from network_executor import ArmnnNetworkExecutor as NetworkExecutor
57 exec_input_args = (args.model_file_path, args.preferred_backends)
58
59 executor = NetworkExecutor(*exec_input_args)
60 action_profiler.profiling_stop_and_print_us("Executor initialization")
61
62 action_profiler.profiling_start()
63 video = init_video_stream_capture(args.video_source)
64 action_profiler.profiling_stop_and_print_us("Video initialization")
alexanderf42f5682021-07-16 11:30:56 +010065 model_name = args.model_name
Raviv Shalev97ddc062021-12-07 15:18:09 +020066 process_output, resize_factor = get_model_processing(args.model_name, video, executor.get_shape())
alexanderf42f5682021-07-16 11:30:56 +010067 labels = dict_labels(args.label_path, include_rgb=True)
68
Raviv Shalev97ddc062021-12-07 15:18:09 +020069 if all(element is not None for element in [args.style_predict_model_file_path,
70 args.style_transfer_model_file_path,
71 args.style_image_path, args.style_transfer_class]):
72 style_image = cv2.imread(args.style_image_path)
73 action_profiler.profiling_start()
74 style_transfer_executor = style_transfer.StyleTransfer(args.style_predict_model_file_path,
75 args.style_transfer_model_file_path,
76 style_image, args.preferred_backends,
77 args.tflite_delegate_path)
78 action_profiler.profiling_stop_and_print_us("Style Transfer Executor initialization")
79
alexanderf42f5682021-07-16 11:30:56 +010080 while True:
81 frame_present, frame = video.read()
82 frame = cv2.flip(frame, 1) # Horizontally flip the frame
83 if not frame_present:
84 raise RuntimeError('Error reading frame from video stream')
85
Raviv Shalev97ddc062021-12-07 15:18:09 +020086 action_profiler.profiling_start()
alexanderf42f5682021-07-16 11:30:56 +010087 if model_name == "ssd_mobilenet_v1":
Raviv Shalev97ddc062021-12-07 15:18:09 +020088 input_data = preprocess(frame, executor.get_data_type(), executor.get_shape(), True)
alexanderf42f5682021-07-16 11:30:56 +010089 else:
Raviv Shalev97ddc062021-12-07 15:18:09 +020090 input_data = preprocess(frame, executor.get_data_type(), executor.get_shape(), False)
91
92 output_result = executor.run([input_data])
93 if not enable_profile:
94 print("Running inference...")
95 action_profiler.profiling_stop_and_print_us("Running inference...")
alexanderf42f5682021-07-16 11:30:56 +010096 detections = process_output(output_result)
Raviv Shalev97ddc062021-12-07 15:18:09 +020097 if all(element is not None for element in [args.style_predict_model_file_path,
98 args.style_transfer_model_file_path,
99 args.style_image_path, args.style_transfer_class]):
100 action_profiler.profiling_start()
101 frame = style_transfer.create_stylized_detection(style_transfer_executor, args.style_transfer_class,
102 frame, detections, resize_factor, labels)
103 action_profiler.profiling_stop_and_print_us("Running Style Transfer")
104 else:
105 draw_bounding_boxes(frame, detections, resize_factor, labels)
alexanderf42f5682021-07-16 11:30:56 +0100106 cv2.imshow('PyArmNN Object Detection Demo', frame)
107 if cv2.waitKey(1) == 27:
108 print('\nExit key activated. Closing video...')
109 break
110 video.release(), cv2.destroyAllWindows()
111
112
113if __name__ == '__main__':
114 parser = ArgumentParser()
115 parser.add_argument('--video_source', type=int, default=0,
116 help='Device index to access video stream. Defaults to primary device camera at index 0')
117 parser.add_argument('--model_file_path', required=True, type=str,
118 help='Path to the Object Detection model to use')
119 parser.add_argument('--model_name', required=True, type=str,
120 help='The name of the model being used. Accepted options: ssd_mobilenet_v1, yolo_v3_tiny')
121 parser.add_argument('--label_path', required=True, type=str,
122 help='Path to the labelset for the provided model file')
123 parser.add_argument('--preferred_backends', type=str, nargs='+', default=['CpuAcc', 'CpuRef'],
124 help='Takes the preferred backends in preference order, separated by whitespace, '
125 'for example: CpuAcc GpuAcc CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc]. '
126 'Defaults to [CpuAcc, CpuRef]')
Raviv Shalev97ddc062021-12-07 15:18:09 +0200127 parser.add_argument('--tflite_delegate_path', type=str,
128 help='Enter TensorFlow Lite Delegate file path (.so file). If not entered,'
129 'will use armnn executor')
130 parser.add_argument('--profiling_enabled', type=str,
131 help='[OPTIONAL] Enabling this option will print important ML related milestones timing'
132 'information in micro-seconds. By default, this option is disabled.'
133 'Accepted options are true/false.')
134 parser.add_argument('--style_predict_model_file_path', type=str,
135 help='Path to the style prediction model to use')
136 parser.add_argument('--style_transfer_model_file_path', type=str,
137 help='Path to the style transfer model to use')
138 parser.add_argument('--style_image_path', type=str,
139 help='Path to the style image to create stylized frames')
140 parser.add_argument('--style_transfer_class', type=str,
141 help='A class to transform its style')
142
alexanderf42f5682021-07-16 11:30:56 +0100143 args = parser.parse_args()
144 main(args)