/
deepdish.py
1582 lines (1388 loc) · 70.5 KB
/
deepdish.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import platform
import re
import io
import psutil
from timeit import time
from time import time, asctime, localtime, sleep
import warnings
import sys
import argparse
import signal
import traceback
from collections import deque
import numpy as np
import cv2
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
# pylint: disable=g-import-not-at-top
# Be capable of running partial functionality even without all dependencies installed
try:
from tools.ssd_mobilenet import SSD_MOBILENET
except:
pass
try:
from tools.tflite import TFLITE
except:
pass
try:
from tools.yolo import YOLO
except:
pass
try:
from tools.yolov5 import YOLOV5
except:
pass
try:
from tools.saved_model import SAVED_MODEL
except:
pass
# pylint: enable=g-import-not-at-top
from tools.intersection import any_intersection, intersection
import cameratransform as ct
from deep_sort import preprocessing
from deep_sort import nn_matching
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
from deep_sort.detection import Detection as ddet
from deepdish.framerecords import FrameRecords
import asyncio
import uvloop
import aiofiles
import concurrent.futures
from gmqtt import Client as MQTTClient
import json
import xml.etree.ElementTree as ET
from quart import Quart, Response, current_app
from hypercorn.asyncio import serve
from hypercorn.config import Config
import threading
import faulthandler
faulthandler.enable()
##################################################
# Video capture thread
class MBox:
def __init__(self):
self.message = None
self.lock = threading.Lock()
def get_message(self):
self.lock.acquire()
message = self.message
self.lock.release()
return message
def set_message(self, message):
self.lock.acquire()
self.message = message
self.lock.release()
def capthread_f(cap, kickstart, box, everyframe, interframe_interval, simcam):
count = 0
# interframe_interval here is already converted to seconds
delay = interframe_interval
try:
kickstart.wait()
prev_t = time()
ret = True
while ret:
t1 = time()
ret, frame = cap.read()
if not ret:
frame = None
elif simcam:
frame = cv2.resize(frame, simcam)
t2 = time()
capthread_delta_t = t2 - prev_t
prev_t = t2
count += 1
box.set_message((count,frame,t2,t2-t1))
# If we are ensuring every frame is processed then wait for
# synchronising event to be triggered
if everyframe is not None:
everyframe.wait()
everyframe.clear()
elif interframe_interval is not None and frame is not None:
# Adjust 'delay' so that the measured capthread_delta_t approaches it
if capthread_delta_t < interframe_interval:
delay+=0.001
elif capthread_delta_t > interframe_interval:
delay-=0.001
delay = max(0, delay)
sleep(delay)
finally:
cap.release()
##################################################
# Quart web app
class Error(Exception):
def __init__(self, msg):
self.message = msg
webapp = Quart(__name__)
# Concurrency-safe box for passing along video frames to the web stream
class StreamingInfo:
def __init__(self):
self.lock = asyncio.Lock()
self.frame = None
async def get_frame(self):
async with self.lock:
return self.frame
async def set_frame(self, frame):
async with self.lock:
self.frame = frame
streaminfo = StreamingInfo()
# Yield successive frames to the web stream
async def generate(si):
# loop over frames from the output stream
while True:
await asyncio.sleep(0.003) # cooperate with other processes
# wait until the lock is acquired
frame = await si.get_frame()
# check if the output frame is available, otherwise skip
# the iteration of the loop
if frame is None:
continue
t1=time()
# encode the frame in JPEG format
(flag, encodedImage) = cv2.imencode(".jpg", frame)
t2=time()
#print("imencode={:.0f}ms".format((t2-t1)*1000))
# ensure the frame was successfully encoded
if not flag:
continue
# yield the output frame in the byte format
t1=time()
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(encodedImage) + b'\r\n')
t2=time()
#print("yield={:.0f}ms".format((t2-t1)*1000))
@webapp.route("/")
async def video_feed():
# return the response generated along with the specific media
# type (mime type)
return Response(generate(streaminfo), mimetype = "multipart/x-mixed-replace; boundary=frame")
##################################################
# Utility classes
class FreshQueue(asyncio.Queue):
"""A subclass of queue that keeps only one, fresh item"""
def _init(self, maxsize):
self._queue = []
def _put(self, item):
self._queue = [item]
def _get(self):
item = self._queue[0]
self._queue = []
return item
def full(self):
return False
class FontLib:
def __init__(self, display_w, fontbasedirs = ['.', '/usr/local/share', '/usr/share']):
tinysize = int(24.0 / 640.0 * display_w)
smallsize = int(40.0 / 640.0 * display_w)
largesize = int(48.0 / 640.0 * display_w)
fontfile = None
for bd in fontbasedirs:
f = os.path.join(bd, 'fonts/truetype/freefont/FreeSansBold.ttf')
if os.path.exists(f):
fontfile = f
break
self.table = {'tiny': ImageFont.truetype(fontfile, tinysize),
'small': ImageFont.truetype(fontfile, smallsize),
'large': ImageFont.truetype(fontfile, largesize)}
def fetch(self, name):
if name in self.table:
return self.table[name]
else:
return self.table['large']
# Details for drawing things on a buffer
class RenderInfo:
def __init__(self, ratio, fontlib, draw, buffer):
self.ratio = ratio
self.fontlib = fontlib
self.draw = draw
self.buffer = buffer
##################################################
# Output elements - things that are rendered in video or text output
class FrameInfo:
"""Basics about the current video frame"""
def __init__(self, t_frame, framenum):
self.t_frame = t_frame
self.framenum = framenum
self.priority = 0
def do_text(self, handle, elements):
handle.write('Frame {}:'.format(self.framenum))
for e in elements:
if isinstance(e, TimingInfo):
handle.write(' {}={:.0f}ms'.format(e.short_label, e.delta_t*1000))
elif isinstance(e, TempInfo):
handle.write(' temp={:.0f}C'.format(e.temp))
elif isinstance(e, PipelineInfo):
handle.write(' pipe={} cpup={:.0f}%'.format(e.count, e.cpup))
handle.write('\n')
def do_json(self, json):
json['framenum'] = self.framenum
json['acp_ts'] = str(self.t_frame)
class TimingInfo:
"""Various categories of profiling (time)"""
def __init__(self, desc, short_label, delta_t):
self.description = desc
self.short_label = short_label
self.delta_t = delta_t
self.priority = 1
def do_json(self, json):
if 'timing' not in json: json['timing'] = {}
json['timing'][self.short_label]=round(self.delta_t*1000)
class TempInfo:
"""CPU Temp"""
def __init__(self, temp):
self.temp = temp
self.priority = 2
def do_json(self, json):
json['temp']=self.temp
class PipelineInfo:
"""Pipeline profiling info - sizes of queues, number of frames in flight, cpu % and cpu freq."""
def __init__(self, count, qsizes, cpup, freq):
self.count = count
self.priority = 3
self.qsizes = qsizes
self.cpup = cpup # cpu %
self.freq = freq # cpu freq
def do_json(self, json):
json['pipe']=self.count
json['qsizes']=self.qsizes
json['cpup']=self.cpup
json['freq']=self.freq
class DetectedObject:
"""A detected object - simply the information conveyed by the object detector"""
def __init__(self, bbox):
self.bbox = bbox
self.priority = 5
self.outline = (255, 0, 0)
def do_render(self, render):
pts = list(np.int32(np.array(self.bbox).reshape(-1,2) * render.ratio).reshape(-1))
render.draw.rectangle(pts, outline=self.outline)
def do_json(self, json):
if 'detections' not in json: json['detections'] = []
json['detections'].append({'bbox': self.bbox.astype(np.int32).tolist()})
class TrackedObject:
"""A tracked object based on the output of the tracker"""
def __init__(self, bbox, txt, lbl, conf, track_id, ratios):
self.bbox = bbox
self.txt = txt
self.label = lbl
self.track_id = track_id
self.confidence = conf
self.ratios = ratios
self.priority = 6
self.outline = (255, 255, 255)
self.font_fill = (0, 255, 0)
self.font = 'tiny'
def do_render(self, render):
pts = list(np.int32(np.array(self.bbox).reshape(-1,2) * render.ratio).reshape(-1))
render.draw.rectangle(pts, outline=self.outline)
render.draw.text(self.bbox[:2],str(self.txt), fill=self.font_fill, font=render.fontlib.fetch(self.font))
def do_json(self, json):
if 'tracks' not in json: json['tracks'] = []
wr, hr = self.ratios
bbox = self.bbox.astype(np.float32) * [wr,hr,wr,hr]
json['tracks'].append({'bbox': bbox.astype(np.int32).tolist(), 'label': self.label, 'confidence': self.confidence, 'track_id': self.track_id})
class Line:
"""Base class for graphical elements that draw a line"""
def do_render(self, render):
pts = list(np.int32(np.array(self.pts).reshape(-1,2) * render.ratio).reshape(-1))
render.draw.line(pts, fill=self.fill, width=self.width)
class TrackedPath(Line):
def __init__(self, pts):
self.pts = pts
self.priority = 3
self.width = 3
self.fill = (255, 0, 255)
class TrackedPathIntersection(Line):
def __init__(self, pts):
self.pts = pts
self.priority = 4
self.width = 5
self.fill = (0, 0, 255)
class CameraCountLine(Line):
def __init__(self, pts):
self.pts = pts
self.priority = 2
self.width = 3
self.fill = (0, 0, 255)
class CameraImage:
"""The background image"""
def __init__(self, image):
self.image = image
self.priority = 1
self.raw = True
def do_render(self, render):
render.buffer.paste(self.image)
class FGMask:
"""Apply a foreground mask if desired"""
def __init__(self, fgMask):
self.fgMask = fgMask
self.priority = 2
def do_render(self, render):
image = Image.fromarray(self.fgMask)
render.buffer.paste(image)
class CountingStats:
"""Stats about the objects being counted in the scene"""
def __init__(self, negcount, poscount):
self.negcount = negcount
self.poscount = poscount
self.priority = 10
self.font_fill_negcount = (255, 0, 0)
self.font_fill_abscount = (0, 255, 0)
self.font_fill_poscount = (0, 0, 255)
self.font = 'tiny'
self.labels = list(negcount.keys())
self.labels.reverse()
def do_render(self, render):
font = render.fontlib.fetch(self.font)
[w, h] = render.buffer.size
cursor = h
for lbl in self.labels:
(_, dy) = font.getsize(str(self.negcount[lbl]))
cursor -= dy
render.draw.text((0, cursor), str(self.negcount[lbl]), fill=self.font_fill_negcount, font=font)
#central = str(abs(self.negcount[lbl]-self.poscount[lbl]))
central = lbl
(dx, dy) = font.getsize(central)
render.draw.text(((w - dx)/2, cursor), central, fill=self.font_fill_abscount, font=font)
(dx, dy) = font.getsize(str(self.poscount[lbl]))
render.draw.text((w - dx, cursor), str(self.poscount[lbl]), fill=self.font_fill_poscount, font=font)
class TopDownView:
"""Show a top-down viewport if enabled"""
def __init__(self, topdownview):
(viewpos, viewsize) = topdownview
self.viewpos = np.array(viewpos,dtype=int)
self.viewsize = np.array(viewsize,dtype=int)
self.priority = 9
def do_render(self, render):
pts = list(np.array([self.viewpos, self.viewpos + self.viewsize]).reshape(-1))
render.draw.rectangle(pts, fill=(0, 0, 0))
class TopDownObj():
"""Show an object in the top-down viewport if enabled"""
def __init__(self, topdownview, pts):
(viewpos, viewsize) = topdownview
self.viewpos = np.array(viewpos,dtype=int)
self.viewsize = np.array(viewsize,dtype=int)
# transform points into top-down view window
self.pts = pts.reshape(-1, 2) * np.array([1, -1]) + viewsize * np.array([0.5, 1]) + viewpos
self.priority = 10
self.fill = (0, 255, 0)
self.width = 2
def do_render(self, render):
half = np.array([self.width/2.0, self.width/2.0])
pt0 = self.pts[-1]
rectpts = list(np.array([pt0 - half, pt0 + half],dtype=int).reshape(-1))
render.draw.rectangle(rectpts, fill=self.fill)
linepts = list(np.array(self.pts.reshape(-1),dtype=int))
render.draw.line(linepts, fill=self.fill, width=self.width)
##################################################
# Main pipeline of video input, object detection, feature encoding,
# tracking and output. See Danish, et al. (2022) for more details.
class Pipeline:
"""Object detection and tracking pipeline"""
def __init__(self, args):
self.args = args
# Track the current process
self.process = psutil.Process()
# Initialise camera & camera viewport
self.init_camera()
# Initialise output
self.init_output(self.args.output)
# Process comma-separated list of wanted labels
self.wanted_labels = self.args.wanted_labels.strip().split(',')
# Open annotations XML file if it exists
self.xmltree = None
if self.args.input_cvat_dir is not None:
self.annotationfile = os.path.join(self.args.input_cvat_dir, "annotations.xml")
try:
self.xmltree = ET.parse(self.annotationfile)
except FileNotFoundError:
self.xmltree = None
self.basedir = self.args.basedir
model = os.path.join(self.basedir, self.args.model)
if self.args.labels:
labels = os.path.join(self.basedir, self.args.labels)
else:
labels = None
# Initialise object detector (for some reason it has to happen
# here & not within detect_objects(), or else the inference engine
# gets upset and starts throwing NaNs at me. Thanks, Python.)
use_edgetpu = 'edgetpu' in self.args.model and not self.args.disable_edgetpu
if 'yolov5' in self.args.model:
self.object_detector = YOLOV5(wanted_labels=self.wanted_labels, model_file=model, label_file=labels, num_threads=self.args.num_threads, edgetpu=use_edgetpu)
elif 'yolo' in self.args.model:
self.object_detector = YOLO(wanted_labels=self.wanted_labels, model_file=model, label_file=labels, num_threads=self.args.num_threads)
elif 'saved_model' in self.args.model:
self.object_detector = SAVED_MODEL(wanted_labels=self.wanted_labels, model_file=model, label_file=labels, num_threads=self.args.num_threads)
elif 'mobilenet' in self.args.model:
self.object_detector = SSD_MOBILENET(wanted_labels=self.wanted_labels, model_file=model, label_file=labels,
num_threads=self.args.num_threads, edgetpu=use_edgetpu)
elif 'tflite' in self.args.model:
self.object_detector = TFLITE(wanted_labels=self.wanted_labels, model_file=model, label_file=labels,
num_threads=self.args.num_threads, edgetpu=use_edgetpu)
elif use_edgetpu:
from tools.edgetpu import EDGETPU
self.object_detector = EDGETPU(wanted_labels=self.wanted_labels, model_file=model, label_file=labels,
num_threads=self.args.num_threads, edgetpu=use_edgetpu)
else:
print('Unsure what to do with model file {}'.format(self.args.model))
sys.exit(1)
# Initialise feature encoder
if self.args.encoder_model is None:
model_filename = os.path.join(self.args.deepsorthome, 'mars-64x32x3.pb')
else:
model_filename = os.path.join(self.args.deepsorthome, self.args.encoder_model)
self.encoder = gdet.create_box_encoder(model_filename, batch_size=self.args.encoder_batch_size, num_threads=self.args.num_threads)
self.background_subtraction = not self.args.disable_background_subtraction
# Initialise tracker
nn_budget = None
metric = nn_matching.NearestNeighborDistanceMetric("cosine", self.args.max_cosine_distance, nn_budget)
self.tracker = Tracker(metric,max_iou_distance=self.args.max_iou_distance, max_age=self.args.max_age)
# Initialise database
self.db = {}
self.data_lock = asyncio.Lock()
self.framenum_committed = 0 # The frame number associated with
# the information kept in the
# following variables
self.delcount = dict([(lbl, 0) for lbl in self.wanted_labels])
self.intcount = dict([(lbl, 0) for lbl in self.wanted_labels])
self.poscount = dict([(lbl, 0) for lbl in self.wanted_labels])
self.negcount = dict([(lbl, 0) for lbl in self.wanted_labels])
self.mqtt = None
self.topic = self.args.mqtt_topic
self.mqtt_acp_id = self.args.mqtt_acp_id
self.heartbeat_delay_secs = self.args.heartbeat_delay_secs
self.pipeline_sem = asyncio.Semaphore()
self.final_frame = None # Not set until the final frame is
# reached and known, if ever.
self.frame_count = 0 # self.frame_count is only used to assign
# the next 'framenum' number. It should
# not be read otherwise because
# pipelining means there could be
# overlapping stages & race conditions.
self.log = self.args.log
if self.log is not None:
if self.args.restore_from_log and os.path.exists(self.log):
with open(self.log, mode='r') as f:
q = deque(f, 1)
if len(q) > 0:
last_line = q.pop()
data = json.loads(last_line)
for lbl in self.wanted_labels:
self.poscount[lbl] = data.get('poscount_'+lbl, 0)
self.negcount[lbl] = data.get('negcount_'+lbl, 0)
self.delcount[lbl] = data.get('delcount_'+lbl, 0)
self.intcount[lbl] = data.get('intcount_'+lbl, 0)
self.frame_count = data.get('frame_count', 0)
else:
with open(self.log, mode='w+') as f:
f.truncate()
self.loop = asyncio.get_event_loop()
self.t_prev = None # frame to frame times
self.cpu_temp_file = '/sys/class/thermal/thermal_zone0/temp'
if self.args.cpu_temp_file is not None:
self.cpu_temp_file = self.args.cpu_temp_file
if not os.path.exists(self.cpu_temp_file):
self.cpu_temp_file = None
self.cpu_freq_file = '/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq'
if self.args.cpu_freq_file is not None:
self.cpu_freq_file = self.args.cpu_freq_file
if not os.path.exists(self.cpu_freq_file):
self.cpu_freq_file = None
self.cpu_gov_file = None
else:
self.cpu_gov_file = os.path.join(os.path.dirname(self.cpu_freq_file), 'scaling_governor')
if not os.path.exists(self.cpu_gov_file):
self.cpu_gov_file = None
self.powersave_delay = 0
self.powersave_delay_maximum = float(self.args.powersave_delay_maximum) / 1000.0
if self.args.disable_powersaving:
self.powersave_delay_increment = 0
else:
self.powersave_delay_increment = float(self.args.powersave_delay_increment) / 1000.0
self.cam = None
self.topdownview = None
self.topdownview_scalefactors = None
if self.args.three_d:
if self.args.focallength_mm is not None and self.args.sensor_width_mm is not None and self.args.sensor_height_mm is not None and self.args.elevation_m is not None and self.args.tilt_deg is not None:
(w, h) = self.input_size
self.cam = ct.Camera(ct.RectilinearProjection(focallength_mm=self.args.focallength_mm,
sensor=(self.args.sensor_width_mm, self.args.sensor_height_mm),
image=self.input_size),
ct.SpatialOrientation(elevation_m=self.args.elevation_m,
tilt_deg=self.args.tilt_deg,
roll_deg=self.args.roll_deg))
defaultviewsize = ((0, 0), (w/4, h/4))
if self.args.topdownview_size_m is not None:
size = np.array(list(map(int,self.args.topdownview_size_m.strip().split(','))),dtype=float)
scalefactors = np.array(defaultviewsize[1],dtype=float) / size
self.topdownview = defaultviewsize
self.topdownview_scalefactors = scalefactors
else:
self.topdownview = defaultviewsize
self.topdownview_scalefactors = np.array([1,1])
else:
raise Error('3-D transform requires focallength, sensor size, camera elevation and tilt.')
# Initialise frame recording system
self.framerec = FrameRecords(self.object_detector.labels)
# Examine CVAT-format XML file if given
if self.xmltree is not None:
# Compare labels in XML spec to labels in labelfile
full_labels = self.object_detector.labels
labels_to_id = {v: k for k, v in full_labels.items()}
for l in self.xmltree.getroot().findall('./meta/task/labels/label'):
name = l.find('name').text
id = labels_to_id.get(name, None)
color = l.find('color').text
# print("Annotation label '{}' mapped to detector label '{}' ID '{}', assigned color '{}'".format(name, full_labels.get(id, None), id, color))
# print("To change this try argument: --remap-annotation-labels 'annotation label:detector label,...'")
self.framerec.add_annotation_label_info(name, id, color)
for t in self.xmltree.getroot().findall('./track'):
lblname = t.get('label')
track_id = int(t.get('id'))
# print("Track {} label='{}'".format(track_id, lblname))
for b in t.findall('box'):
frame=int(b.get('frame'))
outside=b.get('outside')=='1'
occluded=b.get('occluded')=='1'
keyframe=b.get('keyframe')=='1'
pts = np.array([b.get('xtl'), b.get('ytl'), b.get('xbr'), b.get('ybr')], dtype=float)
z_order = int(b.get('z_order'))
# print("box frame={} outside={:d} occluded={:d} keyframe={:d} pts={} z_order={}".format(frame,outside,occluded,keyframe,pts,z_order))
self.framerec.add_annotated_track(frame, track_id, lblname, pts, outside, occluded, keyframe, z_order)
def on_mqtt_connect(self, client, flags, rc, properties):
self.mqtt_connect_event.set()
if self.args.mqtt_verbosity > 1:
payload = {'acp_ts': str(time()), 'acp_event': 'initialisation', 'acp_id': self.mqtt_acp_id,
'model': self.args.model, 'model_class': type(self.object_detector).__name__,
'encoder_model': self.args.encoder_model, 'encoder_model_class': type(self.encoder.image_encoder).__name__,
'input': self.input,
'use_edgetpu': self.object_detector.use_edgetpu,
'input_shape': [self.object_detector.width, self.object_detector.height],
'encoder_input_shape': [self.encoder.width, self.encoder.height],
'num_threads': self.object_detector.num_threads,
'max_age': self.args.max_age,
'max_iou_distance': self.args.max_iou_distance,
'nms_max_overlap': self.args.nms_max_overlap,
'max_cosine_distance': self.args.max_cosine_distance,
'background_subtraction': None if self.args.disable_background_subtraction else self.args.background_subtraction_ratio,
'powersaving': None if self.args.disable_powersaving else (self.args.powersave_delay_increment, self.args.powersave_delay_maximum),
'cpu_governor': self.cpu_governor,
'object_detector_skip_frames': self.args.object_detector_skip_frames,
'interframe_interval': self.args.interframe_interval,
'simulate_camera': self.args.simulate_camera
}
self.mqtt.publish(self.topic, json.dumps(payload))
async def init_mqtt(self):
self.cpu_governor = await self.get_cpu_governor() # only used in MQTT messages anyway
if self.args.mqtt_broker is not None:
self.mqtt = MQTTClient('deepdish-'+platform.node())
if self.topic is None:
self.topic = 'default/topic'
self.mqtt_connect_event = asyncio.Event()
self.mqtt.on_connect = self.on_mqtt_connect
self.mqtt.set_config({'reconnect_retries': 10, 'reconnect_delay': 1})
if self.args.mqtt_user is not None:
self.mqtt.set_auth_credentials(self.args.mqtt_user, self.args.mqtt_pass)
print('Waiting to connect to MQTT broker.')
await self.mqtt.connect(self.args.mqtt_broker, self.args.mqtt_port)
await self.mqtt_connect_event.wait()
def init_camera(self):
self.input = self.args.input
self.simcam = None
if self.args.input_cvat_dir is not None:
# Set up frame-by-frame from files in input CVAT directory
self.input = os.path.join(self.args.input_cvat_dir, "images/frame_%06d.jpg")
# Open test file
with Image.open(self.input % 1) as im:
self.input_size = im.size
# Capture every frame from the video file / dir
self.everyframe = threading.Event()
# Disable power-saving delay mechanism
self.args.disable_powersaving = True
self.powersave_delay_increment = 0
elif self.input is None:
self.input_size = (self.args.camera_width, self.args.camera_height)
if self.args.gstreamer is not None:
src = self.args.gstreamer
elif self.args.gstreamer_nvidia:
src = "nvarguscamerasrc ! video/x-raw(memory:NVMM), width=(int){}, height=(int){}, format=(string)NV12, framerate=(fraction)30/1 ! nvvidconv flip-method=0 ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink drop=true".format(*self.input_size)
else:
src = self.args.camera
self.input = src
# Allow live camera frames to be dropped
self.everyframe = None
else:
# 'live camera simulation'
if self.args.simulate_camera:
simcam = self.args.simulate_camera
simcam = [int(n) for n in simcam]
# if only one dimension is specified, assume square input_size
if len(simcam) == 1:
simcam = [simcam[0], simcam[0]]
self.simcam = simcam[0:2]
else:
self.simcam = None
if self.args.interframe_interval is None:
# Capture every frame from the video file in self.input
self.everyframe = threading.Event()
# Disable power-saving delay mechanism
self.args.disable_powersaving = True
self.powersave_delay_increment = 0
# Set up the OpenCV video capture
self.cap = cv2.VideoCapture(self.input)
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
# Just in case input_size wasn't already set up
self.input_size = (int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
if self.simcam:
# in case we are simulating a camera, data should be
# scaled back to original coordinates
self.trackdata_ratios = (float(self.input_size[0])/float(self.simcam[0]),
float(self.input_size[1])/float(self.simcam[1]))
else:
self.trackdata_ratios = (1, 1)
# Configure the 'counting line' in the camera viewport
if self.args.line is None:
w, h = self.input_size
self.countline = np.array([[w/2,0],[w/2,h]],dtype=int)
else:
self.countline = np.array(list(map(int,self.args.line.strip().split(','))),dtype=int).reshape(2,2)
self.cameracountline = self.countline.astype(float)
def init_output(self, output):
if self.args.disable_graphics:
self.output = None
return
self.color_mode = None # fixme
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
if self.cap is not None:
fps = self.cap.get(cv2.CAP_PROP_FPS)
else:
fps = 15 # FIXME: no way of determining FPS
w, h = self.input_size
self.backbuf = Image.new("RGBA", self.input_size, (0,0,0,0))
self.draw = ImageDraw.Draw(self.backbuf)
if self.args.output_cvat_dir is None:
self.output = cv2.VideoWriter(self.args.output,fourcc, fps, self.input_size)
else:
# write individual frame files in CVAT format
outpath = os.path.join(self.args.output_cvat_dir,'images','frame_%06d.jpg')
os.makedirs(os.path.dirname(outpath), exist_ok=True)
self.output = cv2.VideoWriter(outpath, 0, 0, self.input_size)
if not self.args.framebuffer:
self.framebufdev = None
else:
self.framebufdev = self.args.framebuffer_device
fbX = self.framebufdev[-3:]
vsizefile = '/sys/class/graphics/{}/virtual_size'.format(fbX)
if not os.path.exists(self.framebufdev) or not os.path.exists(vsizefile):
#raise Error('Invalid framebuffer device: {}'.format(self.framebufdev))
print('Invalid framebuffer device: {}'.format(self.framebufdev))
self.framebufdev = None
if self.framebufdev is not None:
# Framebuffer size can be different than input/output size
(w, h) = (self.args.framebuffer_width, self.args.framebuffer_height)
if w is None or h is None:
nums = re.findall('(.*),(.*)', open(vsizefile).read())[0]
if w is None:
w = int(nums[0])
if h is None:
h = int(nums[1])
self.framebufres = (w, h)
print('Framebuffer device: {} resolution: {},{}'.format(self.framebufdev,w,h))
def shutdown(self):
global shutdown_event
self.running = False
print('Shutting down pipeline.')
if self.args.output_cvat_dir is not None:
print('Writing CVAT output.')
# Write CVAT-format annotations XML file if possible
if self.xmltree is not None:
meta = self.xmltree.getroot().find('./meta')
else:
meta = None
xmlout = self.framerec.xml_output(meta=meta)
xmloutfile = os.path.join(self.args.output_cvat_dir, "annotations.xml")
with open(xmloutfile, mode='wb') as f:
xmlout.write(f, xml_declaration=True, encoding='utf-8', short_empty_elements=False)
if cmdserver:
print('Shutting down command server.')
cmdserver.close()
if self.mqtt:
print('Shutting down MQTT client.')
if self.args.mqtt_verbosity > 1:
payload = {'acp_ts': str(time()), 'acp_event': 'shutdown', 'acp_id': self.mqtt_acp_id, 'model': self.args.model, 'input': self.input}
self.mqtt.publish(self.topic, json.dumps(payload))
print('Shutting down Quart server.')
shutdown_event.set()
async def get_cpu_temp(self):
if not self.cpu_temp_file: return None
async with aiofiles.open(self.cpu_temp_file, mode='r') as f:
line = await f.read()
temp = float(line)
return temp/1000
async def get_cpu_freq(self):
if not self.cpu_freq_file: return None
async with aiofiles.open(self.cpu_freq_file, mode='r') as f:
line = await f.read()
freq = int(line)
return freq
async def get_cpu_governor(self):
if not self.cpu_gov_file: return None
async with aiofiles.open(self.cpu_gov_file, mode='r') as f:
gov = await f.read()
return gov.strip()
async def capture(self, q, box):
try:
# The purpose of this loop is to decouple the capthread from the pipeline.
# When dealing with a live video stream (having
# everyframe=False) then we must pull frames off the
# live camera as fast as they appear, or else OpenCV
# starts queueing them up internally (a rather poor
# design) and we fall behind 'real time'.
while self.running:
# Fetch frame from box where capthread has placed it
frame = None
msg = None
while msg is None:
await asyncio.sleep(0.003) # cooperative yield
# note that .sleep(0) doesn't work right, causing severely inconsistent timings
msg = box.get_message()
(orig_framenum, frame, t_frame, dt_cap) = msg
if self.everyframe:
box.set_message(None) # avoid repeating frames
if frame is None:
self.final_frame = self.frame_count
break
if self.args.camera_flip:
# If we need to flip the image vertically
frame = cv2.flip(frame, 0)
# Ensure frame is proper size
frame = cv2.resize(frame, self.input_size)
# q is a 1-element FreshQueue that overwrites the existing element if there is one
q.put_nowait((orig_framenum, frame, dt_cap, t_frame, time()))
# slow down pipeline if trying to save power
if self.powersave_delay > 0:
await asyncio.sleep(self.powersave_delay)
finally:
if self.cap is not None:
self.cap.release()
def run_object_detector(self, frame):
t1 = time()
image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGRA2RGBA))
(boxes, labels, scores) = self.object_detector.detect_image(image)
t2 = time()
return (boxes, labels, scores, t2 - t1)
async def detect_objects(self, q_in, q_out):
# Initialise background subtractor
backSub = cv2.createBackgroundSubtractorMOG2()
# Initialise vars for skipping a set number of frames between objd invocations:
skip_rem = 0
prev_objd_result = None
# Feed some dummy data to warm-up the object detector and feature encoder
dummyframe = np.zeros((self.input_size[1], self.input_size[0], 3), dtype=np.uint8)
await self.loop.run_in_executor(None, self.run_object_detector, dummyframe)
await self.loop.run_in_executor(None, self.encoder, dummyframe, [(0, 0, self.encoder.width, self.encoder.height)])
# Now we're ready to start the capthread:
self.kickstart.set()
while self.running:
# Obtain next video frame
(orig_framenum, frame, dt_cap, t_frame, t_prev) = await q_in.get()
if orig_framenum <= self.frame_count:
# We've already seen this frame
await asyncio.sleep(0.003) # cooperative yield
continue
t_frame_recv = time()
framenum = orig_framenum
self.frame_count = orig_framenum
# Frame num. 'framenum' begins its journey through the pipeline here
self.pipeline_sem.release()
if self.everyframe:
# Notify other side that this frame is in the pipeline
self.everyframe.set()
# Apply background subtraction to find image-mask of areas of motion
if self.background_subtraction:
fgMask = backSub.apply(frame)
if self.args.enable_background_masking:
frame = cv2.bitwise_and(frame,frame,mask = fgMask)
# Convert to PIL Image
#image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGRA2RGBA))
t_backsub = time()
if skip_rem > 0 and prev_objd_result is not None:
(boxes0, labels0, scores0, delta_t) = prev_objd_result
skip_rem-=1
objd_skipped = True
else:
# Run object detection engine within a Thread Pool
(boxes0, labels0, scores0, delta_t) = await self.loop.run_in_executor(None, self.run_object_detector, frame)
prev_objd_result = (boxes0, labels0, scores0, delta_t)
skip_rem = self.args.object_detector_skip_frames or 0
objd_skipped = False
# Filter object detection boxes, including only those with areas of motion
t1 = time()
boxes = []
labels = []
scores = []
max_x, max_y = self.input_size
for ((x,y,w,h), lbl, scr) in zip(boxes0, labels0, scores0):
if np.any(np.isnan(boxes0)):
# Drop any rubbish results
continue
x, y = int(np.clip(x,0,max_x)), int(np.clip(y,0,max_y))
w, h = int(np.clip(w,0,max_x-x)), int(np.clip(h,0,max_y-y))
# Check if the box is almost as large as the camera viewport
if w * h > 0.9 * max_x * max_y:
# reject as spurious
continue
# Check if the box includes sufficient detected motion
if not self.background_subtraction or np.count_nonzero(fgMask[y:y+h,x:x+w]) >= self.args.background_subtraction_ratio * w * h:
boxes.append((x,y,w,h))
labels.append(lbl)
scores.append(scr)
t2 = time()
# start slowing down the pipeline if there are no objects in scene
if not self.args.disable_powersaving and len(boxes) == 0:
self.powersave_delay += self.powersave_delay_increment
if self.powersave_delay > self.powersave_delay_maximum:
self.powersave_delay = self.powersave_delay_maximum
else:
self.powersave_delay = 0
# Send results to next step in pipeline
elements = [FrameInfo(t_frame, framenum),
CameraImage(Image.fromarray(cv2.cvtColor(frame,cv2.COLOR_BGRA2RGB), mode='RGB')),
CameraCountLine(self.cameracountline),
TimingInfo('Frame capture latency', 'fcap', dt_cap),
TimingInfo('Frame return [Q0] latency', 'fram', t_prev - t_frame),
TimingInfo('Frame / Q1 item received latency', 'q1', t_frame_recv - t_prev),
#TimingInfo('Frame prep latency', 'prep', t_prep - t_frame_recv),
TimingInfo('Background subtraction latency', 'bsub', t_backsub - t_frame_recv)]
if not objd_skipped:
elements.append(TimingInfo('Object detection latency', 'objd', delta_t+(t2-t1)))
await q_out.put((frame, framenum, boxes, labels, scores, objd_skipped, elements, time()))
async def encode_features(self, q_in, q_out):
with concurrent.futures.ThreadPoolExecutor() as pool:
prev_features = None
while self.running:
# Obtain next video frame and object detection boxes
(frame, framenum, boxes, labels, scores, objd_skipped, elements, t_prev) = await q_in.get()
t1 = time()
# Run non-max suppression to eliminate spurious boxes
boxesA0 = np.array(boxes)
scoresA0 = np.array(scores)
indices = preprocessing.non_max_suppression(boxesA0, self.args.nms_max_overlap, scoresA0)
boxesA1 = boxesA0[indices]
scoresA1 = scoresA0[indices]
labels1 = [labels[i] for i in indices]
# Consider and modify boxes based on info contained in the frame record