1# Copyright © 2020 Arm Ltd and Contributors. All rights reserved. 2# SPDX-License-Identifier: MIT 3 4""" 5Object detection demo that takes a video stream from a device, runs inference 6on each frame producing bounding boxes and labels around detected objects, 7and displays a window with the latest processed frame. 8""" 9 10import os 11import sys 12 13script_dir = os.path.dirname(__file__) 14sys.path.insert(1, os.path.join(script_dir, '..', 'common')) 15 16import cv2 17from argparse import ArgumentParser 18from ssd import ssd_processing, ssd_resize_factor 19from yolo import yolo_processing, yolo_resize_factor 20from utils import dict_labels, Profiling 21from cv_utils import init_video_stream_capture, preprocess, draw_bounding_boxes 22import style_transfer 23 24 25def get_model_processing(model_name: str, video: cv2.VideoCapture, input_data_shape: tuple): 26 """ 27 Gets model-specific information such as model labels and decoding and processing functions. 28 The user can include their own network and functions by adding another statement. 29 30 Args: 31 model_name: Name of type of supported model. 32 video: Video capture object, contains information about data source. 33 input_data_shape: Contains shape of model input layer, used for scaling bounding boxes. 34 35 Returns: 36 Model labels, decoding and processing functions. 37 """ 38 if model_name == 'ssd_mobilenet_v1': 39 return ssd_processing, ssd_resize_factor(video) 40 elif model_name == 'yolo_v3_tiny': 41 return yolo_processing, yolo_resize_factor(video, input_data_shape) 42 else: 43 raise ValueError(f'{model_name} is not a valid model name') 44 45 46def main(args): 47 48 enable_profile = args.profiling_enabled == "true" 49 action_profiler = Profiling(enable_profile) 50 action_profiler.profiling_start() 51 52 if args.tflite_delegate_path is not None: 53 from network_executor_tflite import TFLiteNetworkExecutor as NetworkExecutor 54 exec_input_args = (args.model_file_path, args.preferred_backends, args.tflite_delegate_path) 55 else: 56 from network_executor import ArmnnNetworkExecutor as NetworkExecutor 57 exec_input_args = (args.model_file_path, args.preferred_backends) 58 59 executor = NetworkExecutor(*exec_input_args) 60 action_profiler.profiling_stop_and_print_us("Executor initialization") 61 62 action_profiler.profiling_start() 63 video = init_video_stream_capture(args.video_source) 64 action_profiler.profiling_stop_and_print_us("Video initialization") 65 model_name = args.model_name 66 process_output, resize_factor = get_model_processing(args.model_name, video, executor.get_shape()) 67 labels = dict_labels(args.label_path, include_rgb=True) 68 69 if all(element is not None for element in [args.style_predict_model_file_path, 70 args.style_transfer_model_file_path, 71 args.style_image_path, args.style_transfer_class]): 72 style_image = cv2.imread(args.style_image_path) 73 action_profiler.profiling_start() 74 style_transfer_executor = style_transfer.StyleTransfer(args.style_predict_model_file_path, 75 args.style_transfer_model_file_path, 76 style_image, args.preferred_backends, 77 args.tflite_delegate_path) 78 action_profiler.profiling_stop_and_print_us("Style Transfer Executor initialization") 79 80 while True: 81 frame_present, frame = video.read() 82 frame = cv2.flip(frame, 1) # Horizontally flip the frame 83 if not frame_present: 84 raise RuntimeError('Error reading frame from video stream') 85 86 action_profiler.profiling_start() 87 if model_name == "ssd_mobilenet_v1": 88 input_data = preprocess(frame, executor.get_data_type(), executor.get_shape(), True) 89 else: 90 input_data = preprocess(frame, executor.get_data_type(), executor.get_shape(), False) 91 92 output_result = executor.run([input_data]) 93 if not enable_profile: 94 print("Running inference...") 95 action_profiler.profiling_stop_and_print_us("Running inference...") 96 detections = process_output(output_result) 97 if all(element is not None for element in [args.style_predict_model_file_path, 98 args.style_transfer_model_file_path, 99 args.style_image_path, args.style_transfer_class]): 100 action_profiler.profiling_start() 101 frame = style_transfer.create_stylized_detection(style_transfer_executor, args.style_transfer_class, 102 frame, detections, resize_factor, labels) 103 action_profiler.profiling_stop_and_print_us("Running Style Transfer") 104 else: 105 draw_bounding_boxes(frame, detections, resize_factor, labels) 106 cv2.imshow('PyArmNN Object Detection Demo', frame) 107 if cv2.waitKey(1) == 27: 108 print('\nExit key activated. Closing video...') 109 break 110 video.release(), cv2.destroyAllWindows() 111 112 113if __name__ == '__main__': 114 parser = ArgumentParser() 115 parser.add_argument('--video_source', type=int, default=0, 116 help='Device index to access video stream. Defaults to primary device camera at index 0') 117 parser.add_argument('--model_file_path', required=True, type=str, 118 help='Path to the Object Detection model to use') 119 parser.add_argument('--model_name', required=True, type=str, 120 help='The name of the model being used. Accepted options: ssd_mobilenet_v1, yolo_v3_tiny') 121 parser.add_argument('--label_path', required=True, type=str, 122 help='Path to the labelset for the provided model file') 123 parser.add_argument('--preferred_backends', type=str, nargs='+', default=['CpuAcc', 'CpuRef'], 124 help='Takes the preferred backends in preference order, separated by whitespace, ' 125 'for example: CpuAcc GpuAcc CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc]. ' 126 'Defaults to [CpuAcc, CpuRef]') 127 parser.add_argument('--tflite_delegate_path', type=str, 128 help='Enter TensorFlow Lite Delegate file path (.so file). If not entered,' 129 'will use armnn executor') 130 parser.add_argument('--profiling_enabled', type=str, 131 help='[OPTIONAL] Enabling this option will print important ML related milestones timing' 132 'information in micro-seconds. By default, this option is disabled.' 133 'Accepted options are true/false.') 134 parser.add_argument('--style_predict_model_file_path', type=str, 135 help='Path to the style prediction model to use') 136 parser.add_argument('--style_transfer_model_file_path', type=str, 137 help='Path to the style transfer model to use') 138 parser.add_argument('--style_image_path', type=str, 139 help='Path to the style image to create stylized frames') 140 parser.add_argument('--style_transfer_class', type=str, 141 help='A class to transform its style') 142 143 args = parser.parse_args() 144 main(args) 145