Amazing-Python-Scripts

Форк
0
/
intruder_detector_jupyter.py 
710 строк · 25.0 Кб
1
#!/usr/bin/env python3
2
"""
3
* Copyright (c) 2018 Intel Corporation.
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining
6
* a copy of this software and associated documentation files (the
7
* "Software"), to deal in the Software without restriction, including
8
* without limitation the rights to use, copy, modify, merge, publish,
9
* distribute, sublicense, and/or sell copies of the Software, and to
10
* permit persons to whom the Software is furnished to do so, subject to
11
* the following conditions:
12
*
13
* The above copyright notice and this permission notice shall be
14
* included in all copies or substantial portions of the Software.
15
*
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23
"""
24

25

26
from __future__ import print_function
27
import sys
28
import os
29
from argparse import ArgumentParser
30
import cv2
31
import numpy
32
import time
33
import collections
34
import queue
35
import signal
36
import json
37
import pathlib
38
from inference import Network
39

40
# CONSTANTS
41
CONFIG_FILE = '../resources/config.json'
42
EVENT_FILE = "../UI/resources/video_data/events.json"
43
DATA_FILE = "../UI/resources/video_data/data.json"
44
TARGET_DEVICE = "CPU"
45
OUTPUT_VIDEO_PATH = "../UI/resources/videos"
46
CPU_EXTENSION = ""
47
LOOP_VIDEO = False
48
UI = False
49
CONF_THRESHOLD_VALUE = 0.55
50
LOG_FILE_PATH = "./intruders.log"
51
LOG_WIN_HEIGHT = 432
52
LOG_WIN_WIDTH = 410
53
CONF_CANDIDATE_CONFIDENCE = 4
54
CODEC = 0x31637661
55

56
# Opencv windows per each row
57
CONF_WINDOW_COLUMNS = 2
58

59
# Global variables
60
model_xml = ''
61
model_bin = ''
62
conf_labels_file_path = ''
63
video_caps = []
64
accepted_devices = ["CPU", "GPU", "HETERO:FPGA,CPU", "MYRIAD", "HDDL"]
65
is_async_mode = True
66

67
# Event class to store the intruder details
68

69

70
class Event:
71
    def __init__(self, event_time=None, intruder=None, count=None, frame=None):
72
        self.time = event_time
73
        self.intruder = intruder
74
        self.count = count
75
        self.frame = frame
76

77

78
# VideoCap class to manage the input source
79
class VideoCap:
80
    def __init__(self, vc, cam_name, cams, is_cam):
81
        self.input_width = vc.get(3)
82
        self.input_height = vc.get(4)
83
        self.vc = vc
84
        self.cam_name = cam_name
85
        self.is_cam = is_cam
86
        self.no_of_labels = 0
87
        self.last_correct_count = []
88
        self.total_count = []
89
        self.current_count = []
90
        self.changed_count = []
91
        self.candidate_count = []
92
        self.candidate_confidence = []
93
        self.frame = None
94
        self.loop_frames = 0
95
        self.frame_count = 0
96
        self.events = []
97
        self.video_name = 'video{}.mp4'.format(cams)
98
        self.vw = None
99

100
    def init(self, size):
101
        self.no_of_labels = size
102
        for i in range(size):
103
            self.last_correct_count.append(0)
104
            self.total_count.append(0)
105
            self.changed_count.append(False)
106
            self.current_count.append(0)
107
            self.candidate_count.append(0)
108
            self.candidate_confidence.append(0)
109

110
    def init_vw(self, h, w):
111
        self.vw = cv2.VideoWriter(os.path.join(OUTPUT_VIDEO_PATH, self.video_name), CODEC,
112
                                  self.vc.get(cv2.CAP_PROP_FPS), (w, h), True)
113
        if not self.vw.isOpened():
114
            return -1, self.video_name
115
        return 0, ''
116

117

118
def parse_args():
119
    """
120
    Parse the command line argument
121

122
    :return status: 0 on success, negative value on failure
123
    """
124
    global LOOP_VIDEO
125
    global conf_labels_file_path
126
    global model_xml
127
    global model_bin
128
    global CPU_EXTENSION
129
    global UI
130
    global TARGET_DEVICE
131
    global is_async_mode
132

133
    try:
134
        model_xml = os.environ["MODEL"]
135
        model_bin = os.path.splitext(model_xml)[0] + ".bin"
136
    except:
137
        return -2
138

139
    try:
140
        conf_labels_file_path = os.environ["LABEL_FILE"]
141
    except:
142
        return -3
143

144
    CPU_EXTENSION = os.environ[
145
        'CPU_EXTENSION'] if 'CPU_EXTENSION' in os.environ.keys() else None
146

147
    try:
148
        LOOP_VIDEO = os.environ["LOOP_VIDEO"]
149
        if LOOP_VIDEO == "True" or LOOP_VIDEO == "true":
150
            LOOP_VIDEO = True
151
        elif LOOP_VIDEO == "False" or LOOP_VIDEO == "false":
152
            LOOP_VIDEO = False
153
        else:
154
            print("Invalid input for LOOP_VIDEO. Defaulting to LOOP_VIDEO = False")
155
            LOOP_VIDEO = False
156
    except:
157
        LOOP_VIDEO = False
158

159
    if 'DEVICE' in os.environ.keys():
160
        TARGET_DEVICE = os.environ['DEVICE']
161

162
    try:
163
        UI = os.environ["UI"]
164
        if UI == "True" or UI == "true":
165
            UI = True
166
        elif UI == "False" or UI == "false":
167
            UI = False
168
        else:
169
            print("Invalid input for UI. Defaulting to UI = False")
170
            UI = False
171
    except:
172
        UI = False
173

174
    if 'FLAG' in os.environ.keys():
175
        async_mode = os.environ['FLAG']
176
        if async_mode == "sync":
177
            is_async_mode = False
178
        else:
179
            is_async_mode = True
180

181

182
def check_args():
183
    """
184
    Validate the command line arguments
185
    :return status: 0 on success, negative value on failure
186
    """
187
    global model_xml
188
    global conf_labels_file_path
189

190
    if model_xml == '':
191
        return -2
192

193
    if conf_labels_file_path == '':
194
        return -3
195

196
    if 'MULTI' not in TARGET_DEVICE and TARGET_DEVICE not in accepted_devices:
197
        print("Unsupported device: " + TARGET_DEVICE)
198
        return -17
199
    elif 'MULTI' in TARGET_DEVICE:
200
        target_devices = TARGET_DEVICE.split(':')[1].split(',')
201
        for multi_device in target_devices:
202
            if multi_device not in accepted_devices:
203
                print("Unsupported device: " + TARGET_DEVICE)
204
                return -17
205
    return 0
206

207

208
def get_used_labels(req_labels):
209
    """
210
    Read the model's label file and get the position of labels required by the application
211

212
    :param req_labels: intruders to be detected in the input source
213
    :return status: 0 on success, negative value on failure
214
            labels: On success, list of labels present in model's label file
215
            used_labels: On success, list of bool values where true indicates that a label in labels list at that position is
216
                        used in the application
217
    """
218
    global conf_labels_file_path
219
    used_labels = []
220

221
    if conf_labels_file_path:
222
        labels = []
223
        with open(conf_labels_file_path, 'r') as label_file:
224
            if not label_file:
225
                return [-4, [], []]
226
            labels = [x.strip() for x in label_file]
227

228
        if not labels:
229
            return [-5, [], []]
230

231
        for label in labels:
232
            if label in req_labels:
233
                used_labels.append(True)
234
            else:
235
                used_labels.append(False)
236

237
        return [0, labels, used_labels]
238

239
    return [-6, [], []]
240

241

242
def get_input():
243
    """
244
    Parse the configuration file
245

246
    :return status: 0 on success, negative value on failure
247
            streams: On success, list of VideoCap containing configuration file data
248
            labels: On success, labels or intruder to be detected
249
    """
250
    global CONFIG_FILE
251
    global video_caps
252
    labels = []
253
    streams = []
254

255
    assert os.path.isfile(
256
        CONFIG_FILE), "{} file doesn't exist".format(CONFIG_FILE)
257
    config = json.loads(open(CONFIG_FILE).read())
258
    for id, item in enumerate(config['inputs']):
259
        for idx, video in enumerate(item['video']):
260
            cams = idx + 1
261
            cam_name = "Cam {}".format(idx)
262
            if video.isdigit():
263
                video_cap = VideoCap(cv2.VideoCapture(
264
                    int(video)), cam_name, cams, is_cam=True)
265
            else:
266
                if os.path.isfile(video):
267
                    video_cap = VideoCap(cv2.VideoCapture(
268
                        video), cam_name, cams, is_cam=False)
269
                else:
270
                    return [-8, [video]]
271
            video_caps.append(video_cap)
272
        labels = item['label']
273

274
    for video_cap in video_caps:
275
        if not video_cap.vc.isOpened():
276
            return [-9, [video_cap.cam_name]]
277

278
        video_cap.init(len(labels))
279
    return [0, labels]
280

281

282
def save_json():
283
    """
284
    Write the video results to json files
285

286
    :return status: 0 on success, negative value on failure
287
    """
288
    global video_caps
289
    global EVENT_FILE
290
    global DATA_FILE
291
    events = []
292
    if video_caps:
293
        events = video_caps[0].events
294
    total = 0
295
    event_json = open(EVENT_FILE, 'w')
296
    if not event_json:
297
        return -10
298

299
    data_json = open(DATA_FILE, 'w')
300
    if not data_json:
301
        return -11
302

303
    data_json.write("{\n\t\"video1\": {\n")
304
    event_json.write("{\n\t\"video1\": {\n")
305
    events_size = len(events) - 1
306
    if events:
307
        fps = video_caps[0].vc.get(cv2.CAP_PROP_FPS)
308
        for i in range(events_size):
309
            event_json.write("\t\t\"%d\":{\n" % (i))
310
            event_json.write("\t\t\t\"time\":\"%s\",\n" % events[i].time)
311
            event_json.write("\t\t\t\"content\":\"%s\",\n" %
312
                             events[i].intruder)
313
            event_json.write("\t\t\t\"videoTime\":\"%d\"\n" %
314
                             float(events[i].frame / fps))
315
            event_json.write("\t\t},\n")
316
            data_json.write("\t\t\"%d\": \"%d\",\n" %
317
                            (float(events[i].frame / fps), events[i].count))
318
        event_json.write("\t\t\"%d\":{\n" % events_size)
319
        event_json.write("\t\t\t\"time\":\"%s\",\n" % events[events_size].time)
320
        event_json.write("\t\t\t\"content\":\"%s\",\n" %
321
                         events[events_size].intruder)
322
        event_json.write("\t\t\t\"videoTime\":\"%d\"\n" %
323
                         float(events[events_size].frame / fps))
324
        event_json.write("\t\t}\n")
325
        data_json.write("\t\t\"%d\": \"%d\"\n" % (
326
            float(events[events_size].frame / fps), events[events_size].count))
327
        total = events[events_size].count
328
    event_json.write("\t}\n")
329
    event_json.write("}")
330
    data_json.write("\t},\n")
331
    data_json.write("\t\"totals\":{\n")
332
    data_json.write("\t\t\"video1\": \"%d\"\n" % total)
333
    data_json.write("\t}\n")
334
    data_json.write("}")
335
    event_json.close()
336
    data_json.close()
337
    return 0
338

339

340
def arrange_windows():
341
    """
342
    Arranges the windows so that they are not overlapping
343

344
    :return: None
345
    """
346
    global CONF_WINDOW_COLUMNS
347
    global video_caps
348
    spacer = 470
349
    row_spacer = 250
350
    cols = 0
351
    rows = 0
352
    window_width = 768
353
    window_height = 432
354

355
    # Arrange log window
356
    cv2.namedWindow("Intruder Log", cv2.WINDOW_AUTOSIZE)
357
    cv2.moveWindow("Intruder Log", 0, 0)
358

359
    # Arrange video windows
360
    for idx in range(len(video_caps)):
361
        if cols == CONF_WINDOW_COLUMNS:
362
            rows += 1
363
            cols = 1
364
            cv2.namedWindow(video_caps[idx].cam_name, cv2.WINDOW_NORMAL)
365
            cv2.resizeWindow(video_caps[idx].cam_name,
366
                             window_width, window_height)
367
            cv2.moveWindow(video_caps[idx].cam_name,
368
                           spacer * cols, row_spacer * rows)
369
        else:
370
            cols += 1
371
            cv2.namedWindow(video_caps[idx].cam_name, cv2.WINDOW_NORMAL)
372
            cv2.resizeWindow(video_caps[idx].cam_name,
373
                             window_width, window_height)
374
            cv2.moveWindow(video_caps[idx].cam_name,
375
                           spacer * cols, row_spacer * rows)
376

377

378
# Signal handler
379
def signal_handler(sig, frame):
380
    global video_caps
381
    global EVENT_FILE
382
    global DATA_FILE
383
    if video_caps:
384
        ret = save_json()
385
        if ret != 0:
386
            if ret == -10:
387
                print("Could not create event JSON file " + EVENT_FILE + "!")
388
            elif ret == -11:
389
                print("Could not create data JSON file " + DATA_FILE + "!")
390

391
    clean_up()
392
    sys.exit(0)
393

394

395
def clean_up():
396
    """
397
    Destroys all the opencv windows and releases the objects of videoCapture and videoWriter
398
    """
399
    global video_caps
400
    cv2.destroyAllWindows()
401
    for video_cap in video_caps:
402
        if video_cap.vw:
403
            video_cap.vw.release()
404
        if video_cap.vc:
405
            video_cap.vc.release()
406

407

408
def intruder_detector():
409
    """
410
    Process the input source frame by frame and detects intruder, if any.
411

412
    :return status: 0 on success, negative value on failure
413
    """
414
    global CONF_CANDIDATE_CONFIDENCE
415
    global LOG_WIN_HEIGHT
416
    global LOG_WIN_WIDTH
417
    global CONFIG_FILE
418
    global video_caps
419
    global conf_labels_file_path
420
    global is_async_mode
421
    global UI
422
    global LOOP_VIDEO
423

424
    parse_args()
425
    ret = check_args()
426
    if ret != 0:
427
        return ret, ""
428

429
    if not os.path.isfile(CONFIG_FILE):
430
        return -12, ""
431

432
    if not os.path.isfile(conf_labels_file_path):
433
        return -13, ""
434

435
    # Creates subdirectory to save output snapshots
436
    pathlib.Path(os.getcwd() + '/output/').mkdir(parents=True, exist_ok=True)
437

438
    # Read the configuration file
439
    ret, req_labels = get_input()
440
    if ret != 0:
441
        return ret, req_labels[0]
442

443
    if not video_caps:
444
        return -14, ''
445

446
    # Get the labels that are used in the application
447
    ret, label_names, used_labels = get_used_labels(req_labels)
448
    if ret != 0:
449
        return ret, ''
450
    if True not in used_labels:
451
        return -15, ''
452

453
    # Init a rolling log to store events
454
    rolling_log_size = int((LOG_WIN_HEIGHT - 15) / 20)
455
    log_list = collections.deque(maxlen=rolling_log_size)
456

457
    # Open a file for intruder logs
458
    log_file = open(LOG_FILE_PATH, 'w')
459
    if not log_file:
460
        return -16, ''
461

462
    # Initializing VideoWriter for each source
463
    if UI and not LOOP_VIDEO:
464
        for video_cap in video_caps:
465
            ret, ret_value = video_cap.init_vw(
466
                int(video_cap.input_height), int(video_cap.input_width))
467
            if ret != 0:
468
                return ret, ret_value
469

470
    # Initialise the class
471
    infer_network = Network()
472
    # Load the network to IE plugin to get shape of input layer
473
    n, c, h, w = infer_network.load_model(
474
        model_xml, TARGET_DEVICE, 1, 1, 2, CPU_EXTENSION)[1]
475
    # Arrange windows so that they are not overlapping
476
    arrange_windows()
477

478
    min_fps = min([i.vc.get(cv2.CAP_PROP_FPS) for i in video_caps])
479
    signal.signal(signal.SIGINT, signal_handler, )
480
    no_more_data = [False] * len(video_caps)
481
    start_time = time.time()
482
    inf_time = 0
483
    next_request_id = 1
484
    cur_request_id = 0
485
    # Main loop starts here. Loop over all the video captures
486

487
    if is_async_mode:
488
        print("Application running in async mode...")
489
    else:
490
        print("Application running in sync mode...")
491

492
    while True:
493
        for idx, video_cap in enumerate(video_caps):
494
            # Get a new frame
495
            vfps = int(round(video_cap.vc.get(cv2.CAP_PROP_FPS)))
496
            for i in range(0, int(round(vfps / min_fps))):
497
                if is_async_mode:
498
                    ret, video_cap.next_frame = video_cap.vc.read()
499
                else:
500
                    ret, video_cap.frame = video_cap.vc.read()
501
                video_cap.loop_frames += 1
502
                # If no new frame or error in reading a frame, exit the loop
503
                if not ret:
504
                    no_more_data[idx] = True
505
                    break
506
            if no_more_data[idx]:
507
                stream_end_frame = numpy.zeros((int(video_cap.input_height), int(video_cap.input_width), 1),
508
                                               dtype='uint8')
509
                stream_end_message = "Stream from {} has ended.".format(
510
                    video_cap.cam_name)
511
                cv2.putText(stream_end_frame, stream_end_message, (int(video_cap.input_width / 2) - 30,
512
                                                                   int(video_cap.input_height / 2) - 30),
513
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)
514
                cv2.imshow(video_cap.cam_name, stream_end_frame)
515
                continue
516
            for i in range(video_cap.no_of_labels):
517
                video_cap.current_count[i] = 0
518
                video_cap.changed_count[i] = False
519

520
            # Resize to expected size (in model .xml file)
521
            # Input frame is resized to infer resolution
522
            if is_async_mode:
523
                in_frame = cv2.resize(video_cap.next_frame, (w, h))
524
                in_frame = in_frame.transpose((2, 0, 1))
525
                in_frame = in_frame.reshape((n, c, h, w))
526

527
                # Start asynchronous inference for specified request.
528
                infer_network.exec_net(next_request_id, in_frame)
529
                video_cap.frame = video_cap.next_frame
530
                # Async enabled and only one video capture
531
                if len(video_caps) == 1:
532
                    videoCapResult = video_cap
533
                # Async enabled and more than one video capture
534
                else:
535
                    # Get previous index
536
                    videoCapResult = video_caps[idx -
537
                                                1 if idx - 1 >= 0 else len(video_caps) - 1]
538

539
            else:
540
                in_frame = cv2.resize(video_cap.frame, (w, h))
541
                in_frame = in_frame.transpose((2, 0, 1))
542
                in_frame = in_frame.reshape((n, c, h, w))
543

544
                # Start synchronous inference for specified request.
545
                infer_network.exec_net(cur_request_id, in_frame)
546
                videoCapResult = video_cap
547

548
            inf_start = time.time()
549
            # Wait for the result
550
            if infer_network.wait(cur_request_id) == 0:
551
                inf_time = time.time() - inf_start
552
                # Results of the output layer of the network
553
                res = infer_network.get_output(cur_request_id)
554
                for obj in res[0][0]:
555
                    label = int(obj[1]) - 1
556
                    # Draw the bounding box around the object when the probability is more than specified threshold
557
                    if obj[2] > CONF_THRESHOLD_VALUE and used_labels[label]:
558
                        videoCapResult.current_count[label] += 1
559
                        xmin = int(obj[3] * videoCapResult.input_width)
560
                        ymin = int(obj[4] * videoCapResult.input_height)
561
                        xmax = int(obj[5] * videoCapResult.input_width)
562
                        ymax = int(obj[6] * videoCapResult.input_height)
563
                        # Draw bounding box around the intruder detected
564
                        cv2.rectangle(videoCapResult.frame, (xmin, ymin),
565
                                      (xmax, ymax), (0, 255, 0), 4, 16)
566

567
                for i in range(videoCapResult.no_of_labels):
568
                    if videoCapResult.candidate_count[i] == videoCapResult.current_count[i]:
569
                        videoCapResult.candidate_confidence[i] += 1
570
                    else:
571
                        videoCapResult.candidate_confidence[i] = 0
572
                        videoCapResult.candidate_count[i] = videoCapResult.current_count[i]
573

574
                    if videoCapResult.candidate_confidence[i] == CONF_CANDIDATE_CONFIDENCE:
575
                        videoCapResult.candidate_confidence[i] = 0
576
                        videoCapResult.changed_count[i] = True
577
                    else:
578
                        continue
579

580
                    if videoCapResult.current_count[i] > videoCapResult.last_correct_count[i]:
581
                        videoCapResult.total_count[i] += videoCapResult.current_count[i] - \
582
                            videoCapResult.last_correct_count[i]
583
                        det_objs = videoCapResult.current_count[i] - \
584
                            videoCapResult.last_correct_count[i]
585
                        total_count = sum(videoCapResult.total_count)
586
                        for det_obj in range(det_objs):
587
                            current_time = time.strftime("%H:%M:%S")
588
                            log = "{} - Intruder {} detected on {}".format(current_time, label_names[i],
589
                                                                           videoCapResult.cam_name)
590
                            log_list.append(log)
591
                            log_file.write(log + "\n")
592
                            event = Event(event_time=current_time, intruder=label_names[i], count=total_count,
593
                                          frame=videoCapResult.frame_count)
594
                            videoCapResult.events.append(event)
595

596
                        snapshot_name = "output/intruder_{}.png".format(
597
                            total_count)
598
                        cv2.imwrite(snapshot_name, videoCapResult.frame)
599
                    videoCapResult.last_correct_count[i] = videoCapResult.current_count[i]
600

601
                # Create intruder log window, add logs to the frame and display it
602
                log_window = numpy.zeros(
603
                    (LOG_WIN_HEIGHT, LOG_WIN_WIDTH, 1), dtype='uint8')
604
                for i, log in enumerate(log_list):
605
                    cv2.putText(log_window, log, (10, 20 * i + 15),
606
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
607
                cv2.imshow("Intruder Log", log_window)
608
                videoCapResult.frame_count += 1
609

610
                # Video output
611
                if UI and not LOOP_VIDEO:
612
                    videoCapResult.vw.write(videoCapResult.frame)
613

614
                log_message = "Async mode is on." if is_async_mode else \
615
                    "Async mode is off."
616
                cv2.putText(videoCapResult.frame, log_message, (10, int(videoCapResult.input_height) - 50),
617
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (200, 10, 10), 1)
618
                inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
619
                    "Inference time: {:.3f} ms".format(inf_time * 1000)
620
                cv2.putText(videoCapResult.frame, inf_time_message, (10, int(videoCapResult.input_height) - 30),
621
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
622
                fps_time = time.time() - start_time
623
                fps_message = "FPS: {:.3f} fps".format(1/fps_time)
624
                cv2.putText(videoCapResult.frame, fps_message, (10, int(videoCapResult.input_height) - 10),
625
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
626

627
                # Display the video output
628
                cv2.imshow(videoCapResult.cam_name, videoCapResult.frame)
629

630
            start_time = time.time()
631

632
            # Loop video to mimic continuous input if LOOP_VIDEO flag is True
633
            if LOOP_VIDEO and not videoCapResult.is_cam:
634
                vfps = int(round(videoCapResult.vc.get(cv2.CAP_PROP_FPS)))
635
                # If a video capture has ended restart it
636
                if videoCapResult.loop_frames > videoCapResult.vc.get(cv2.CAP_PROP_FRAME_COUNT) - int(round(vfps / min_fps)):
637
                    videoCapResult.loop_frames = 0
638
                    videoCapResult.vc.set(cv2.CAP_PROP_POS_FRAMES, 0)
639

640
            if is_async_mode:
641
                # Swap infer request IDs
642
                cur_request_id, next_request_id = next_request_id, cur_request_id
643

644
        if cv2.waitKey(1) == 27:
645
            break
646

647
        if cv2.waitKey(1) == 9:
648
            is_async_mode = not is_async_mode
649
            print("Switched to {} mode".format(
650
                "async" if is_async_mode else "sync"))
651

652
        if False not in no_more_data:
653
            break
654

655
    ret = save_json()
656
    if ret != 0:
657
        return ret, ''
658

659
    infer_network.clean()
660
    log_file.close()
661
    return [0, '']
662

663

664
if __name__ == '__main__':
665
    status, value = intruder_detector()
666

667
    if status == 0:
668
        print("Success!")
669
    elif status == -1:
670
        print("Could not open for write" + value + "!")
671
    elif status == -2:
672
        print("Path to the .xml file not specified!")
673
        print("Specify it using %env MODEL")
674
    elif status == -3:
675
        print("You need to specify the path to the labels file")
676
        print("Specify it using %env LABEL_FILE")
677
    elif status == -4:
678
        print("Error in opening labels file!")
679
    elif status == -5:
680
        print("No labels found in label file!")
681
    elif status == -6:
682
        print("Labels file not found!")
683
    elif status == -7:
684
        print("Error in opening Configuration file " + CONFIG_FILE + "!")
685
    elif status == -8:
686
        print("Could not find the video file " + value + "!")
687
    elif status == -9:
688
        print("\nCould not open " + value + " for reading!")
689
    elif status == -10:
690
        print("Could not create event JSON file " + EVENT_FILE + "!")
691
    elif status == -11:
692
        print("Could not create data JSON file " + DATA_FILE + "!")
693
    elif status == -12:
694
        print(CONFIG_FILE + " configuration file not found!")
695
    elif status == -13:
696
        print(conf_labels_file_path + " label file not found!")
697
    elif status == -14:
698
        print("No input source found in configuration file!")
699
    elif status == -15:
700
        print("Error: No labels currently in use. Please edit " +
701
              CONFIG_FILE+" file!")
702
    elif status == -16:
703
        print("Error in opening intruder log file!")
704
    elif status == -17:
705
        print("Path to cpu extensions library path not specified")
706
        print("Specify it using %env CPU_EXTENSION")
707
    else:
708
        print("Unknown error occurred!")
709

710
    clean_up()
711

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.