-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapp.py
executable file
·89 lines (68 loc) · 3 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import time
from typing import List
import edgeiq
import numpy as np
def stack_frames(marked_frames: List[np.ndarray]) -> np.ndarray:
"""
Stack frames in the following arrangement:
| cam1 | cam2 |
| cam3 | cam4 |
"""
stacked_frames: List[np.ndarray] = []
for i in range(0, len(marked_frames), 2):
if i + 1 < len(marked_frames):
stacked_frames.append(np.hstack(marked_frames[i:i + 2]))
else:
# Handle odd number of streams
last_frame = marked_frames[i]
black_frame = np.zeros_like(last_frame)
stacked_frames.append(np.hstack((last_frame, black_frame)))
return np.vstack(stacked_frames)
def main():
obj_detect = edgeiq.ObjectDetection("alwaysai/yolo_v3_xavier_nx_batch4")
obj_detect.load(engine=edgeiq.Engine.TENSOR_RT)
print("Loaded model:\n{}\n".format(obj_detect.model_id))
print("Engine: {}".format(obj_detect.engine))
print("Accelerator: {}\n".format(obj_detect.accelerator))
print("Labels:\n{}\n".format(obj_detect.labels))
fps = edgeiq.FPS()
try:
with edgeiq.FileVideoStream('videos/sample1.mp4') as video_stream0, \
edgeiq.FileVideoStream('videos/sample2.mp4') as video_stream1, \
edgeiq.Streamer(max_image_width=1080, max_image_height=760) as streamer:
# Allow Webcam to warm up
time.sleep(2.0)
fps.start()
# loop detection
while True:
frame0 = video_stream0.read()
frame1 = video_stream1.read()
frames = [frame0, frame1, frame0, frame1]
results = obj_detect.detect_objects_batch(frames,
confidence_level=.1)
# Generate text to display on streamer
text = ["Model: {}".format(obj_detect.model_id)]
text.append(
"Inference time: {:1.3f} s".format(results[0].duration))
text.append("Objects:")
# Loop for markup of images with corresponding detections
# and text generation
for index in range(len(frames)):
text.append("Results-{}".format(index))
frames[index] = edgeiq.markup_image(
frames[index], results[index].predictions,
colors=obj_detect.colors)
for prediction in results[index].predictions:
text.append("{}: {:2.2f}%".format(
prediction.label, prediction.confidence * 100))
streamer.send_data(stack_frames(frames), text)
fps.update()
if streamer.check_exit():
break
finally:
fps.stop()
print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
print("approx. FPS: {:.2f}".format(fps.compute_fps()))
print("Program Ending")
if __name__ == "__main__":
main()