xref: /aosp_15_r20/external/armnn/python/pyarmnn/examples/object_detection/run_video_file.py (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1# Copyright © 2020-2022 Arm Ltd and Contributors. All rights reserved.
2# SPDX-License-Identifier: MIT
3
4"""
5Object detection demo that takes a video file, runs inference on each frame producing
6bounding boxes and labels around detected objects, and saves the processed video.
7"""
8
9import os
10import sys
11
12script_dir = os.path.dirname(__file__)
13sys.path.insert(1, os.path.join(script_dir, '..', 'common'))
14
15import cv2
16from tqdm import tqdm
17from argparse import ArgumentParser
18
19from ssd import ssd_processing, ssd_resize_factor
20from yolo import yolo_processing, yolo_resize_factor
21from utils import dict_labels, Profiling
22from cv_utils import init_video_file_capture, preprocess, draw_bounding_boxes
23import style_transfer
24
25
26def get_model_processing(model_name: str, video: cv2.VideoCapture, input_data_shape: tuple):
27    """
28    Gets model-specific information such as model labels and decoding and processing functions.
29    The user can include their own network and functions by adding another statement.
30
31    Args:
32        model_name: Name of type of supported model.
33        video: Video capture object, contains information about data source.
34       input_data_shape: Contains shape of model input layer.
35
36    Returns:
37        Model labels, decoding and processing functions.
38    """
39    if model_name == 'ssd_mobilenet_v1':
40        return ssd_processing, ssd_resize_factor(video)
41    elif model_name == 'yolo_v3_tiny':
42        return yolo_processing, yolo_resize_factor(video, input_data_shape)
43    else:
44        raise ValueError(f'{model_name} is not a valid model name')
45
46
47def main(args):
48    enable_profile = args.profiling_enabled == "true"
49    action_profiler = Profiling(enable_profile)
50    overall_profiler = Profiling(enable_profile)
51    overall_profiler.profiling_start()
52    action_profiler.profiling_start()
53
54    if args.tflite_delegate_path is not None:
55        from network_executor_tflite import TFLiteNetworkExecutor as NetworkExecutor
56        exec_input_args = (args.model_file_path, args.preferred_backends, args.tflite_delegate_path)
57    else:
58        from network_executor import ArmnnNetworkExecutor as NetworkExecutor
59        exec_input_args = (args.model_file_path, args.preferred_backends)
60
61    executor = NetworkExecutor(*exec_input_args)
62    action_profiler.profiling_stop_and_print_us("Executor initialization")
63
64    action_profiler.profiling_start()
65    video, video_writer, frame_count = init_video_file_capture(args.video_file_path, args.output_video_file_path)
66    process_output, resize_factor = get_model_processing(args.model_name, video, executor.get_shape())
67    action_profiler.profiling_stop_and_print_us("Video initialization")
68
69    labels = dict_labels(args.label_path, include_rgb=True)
70
71    if all(element is not None for element in [args.style_predict_model_file_path,
72                                               args.style_transfer_model_file_path,
73                                               args.style_image_path, args.style_transfer_class]):
74        style_image = cv2.imread(args.style_image_path)
75        action_profiler.profiling_start()
76        style_transfer_executor = style_transfer.StyleTransfer(args.style_predict_model_file_path,
77                                                               args.style_transfer_model_file_path,
78                                                               style_image, args.preferred_backends,
79                                                               args.tflite_delegate_path)
80        action_profiler.profiling_stop_and_print_us("Style Transfer Executor initialization")
81
82    for _ in tqdm(frame_count, desc='Processing frames'):
83        frame_present, frame = video.read()
84        if not frame_present:
85            continue
86        model_name = args.model_name
87        if model_name == "ssd_mobilenet_v1":
88            input_data = preprocess(frame, executor.get_data_type(), executor.get_shape(), True)
89        else:
90            input_data = preprocess(frame, executor.get_data_type(), executor.get_shape(), False)
91
92        action_profiler.profiling_start()
93        output_result = executor.run([input_data])
94        action_profiler.profiling_stop_and_print_us("Running inference")
95
96        detections = process_output(output_result)
97
98        if all(element is not None for element in [args.style_predict_model_file_path,
99                                                   args.style_transfer_model_file_path,
100                                                   args.style_image_path, args.style_transfer_class]):
101            action_profiler.profiling_start()
102            frame = style_transfer.create_stylized_detection(style_transfer_executor, args.style_transfer_class,
103                                                             frame, detections, resize_factor, labels)
104            action_profiler.profiling_stop_and_print_us("Running Style Transfer")
105        else:
106            draw_bounding_boxes(frame, detections, resize_factor, labels)
107
108        video_writer.write(frame)
109    print('Finished processing frames')
110    overall_profiler.profiling_stop_and_print_us("Total compute time")
111    video.release(), video_writer.release()
112
113
114if __name__ == '__main__':
115    parser = ArgumentParser()
116    parser.add_argument('--video_file_path', required=True, type=str,
117                        help='Path to the video file to run object detection on')
118    parser.add_argument('--model_file_path', required=True, type=str,
119                        help='Path to the Object Detection model to use')
120    parser.add_argument('--model_name', required=True, type=str,
121                        help='The name of the model being used. Accepted options: ssd_mobilenet_v1, yolo_v3_tiny')
122    parser.add_argument('--label_path', required=True, type=str,
123                        help='Path to the labelset for the provided model file')
124    parser.add_argument('--output_video_file_path', type=str,
125                        help='Path to the output video file with detections added in')
126    parser.add_argument('--preferred_backends', type=str, nargs='+', default=['CpuAcc', 'CpuRef'],
127                        help='Takes the preferred backends in preference order, separated by whitespace, '
128                             'for example: CpuAcc GpuAcc CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc]. '
129                             'Defaults to [CpuAcc, CpuRef]')
130    parser.add_argument('--tflite_delegate_path', type=str,
131                        help='Enter TensorFlow Lite Delegate file path (.so file). If not entered,'
132                             'will use armnn executor')
133    parser.add_argument('--profiling_enabled', type=str,
134                        help='[OPTIONAL] Enabling this option will print important ML related milestones timing'
135                             'information in micro-seconds. By default, this option is disabled.'
136                             'Accepted options are true/false.')
137    parser.add_argument('--style_predict_model_file_path', type=str,
138                        help='Path to the style prediction model to use')
139    parser.add_argument('--style_transfer_model_file_path', type=str,
140                        help='Path to the style transfer model to use')
141    parser.add_argument('--style_image_path', type=str,
142                        help='Path to the style image to create stylized frames')
143    parser.add_argument('--style_transfer_class', type=str,
144                        help='A class to transform its style')
145
146    args = parser.parse_args()
147    main(args)
148