forked from JdeRobot/ObjectDetector
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathobjectdetector.py
122 lines (106 loc) · 3.7 KB
/
objectdetector.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
#
# Created on Oct, 2017
#
# @author: naxvm
#
# It receives images from a live video and classify them into digits
# employing a convolutional neural network, based on TensorFlow Deep Learning middleware.
# It shows the live video and the results in a GUI.
#
# Based on @nuriaoyaga code:
# https://github.com/RoboticsURJC-students/2016-tfg-nuria-oyaga/blob/
# master/numberclassifier.py
# and @dpascualhe's:
# https://github.com/RoboticsURJC-students/2016-tfg-david-pascual/blob/
# master/digitclassifier.py
#
#
import sys
import signal
import yaml
from PyQt5 import QtWidgets
from Camera.threadcamera import ThreadCamera
from GUI.gui import GUI
from GUI.threadgui import ThreadGUI
from Net.threadnetwork import ThreadNetwork
signal.signal(signal.SIGINT, signal.SIG_DFL)
def selectVideoSource(cfg):
"""
@param cfg: configuration
@return cam: selected camera
@raise SystemExit in case of unsupported video source
"""
source = cfg['ObjectDetector']['Source']
if source.lower() == 'local':
from Camera.local_camera import Camera
cam_idx = cfg['ObjectDetector']['Local']['DeviceNo']
print(' Chosen source: local camera (index %d)' % (cam_idx))
cam = Camera(cam_idx)
elif source.lower() == 'video':
from Camera.local_video import Camera
video_path = cfg['ObjectDetector']['Video']['Path']
print(' Chosen source: local video (%s)' % (video_path))
cam = Camera(video_path)
elif source.lower() == 'stream':
# comm already prints the source technology (ICE/ROS)
import comm
import config
cfg = config.load(sys.argv[1])
jdrc = comm.init(cfg, 'ObjectDetector')
proxy = jdrc.getCameraClient('ObjectDetector.Stream')
from Camera.stream_camera import Camera
cam = Camera(proxy)
else:
raise SystemExit(('%s not supported! Supported source: Local, Video, Stream') % (source))
return cam
def selectNetwork(cfg):
"""
@param cfg: configuration
@return net_prop, DetectionNetwork: network properties and Network class
@raise SystemExit in case of invalid network
"""
net_prop = cfg['ObjectDetector']['Network']
framework = net_prop['Framework']
if framework.lower() == 'tensorflow':
from Net.TensorFlow.network import DetectionNetwork
elif framework.lower() == 'keras':
sys.path.append('Net/Keras')
from Net.Keras.network import DetectionNetwork
else:
raise SystemExit(('%s not supported! Supported frameworks: Keras, TensorFlow') % (framework))
return net_prop, DetectionNetwork
def readConfig():
try:
with open(sys.argv[1], 'r') as stream:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
raise SystemExit('Error: Cannot read/parse YML file. Check YAML syntax.')
except:
raise SystemExit('\n\tUsage: python2 objectdetector.py objectdetector.yml\n')
if __name__ == '__main__':
cfg = readConfig()
cam = selectVideoSource(cfg)
net_prop, DetectionNetwork = selectNetwork(cfg)
# Threading the camera...
t_cam = ThreadCamera(cam)
t_cam.start()
network = DetectionNetwork(net_prop)
network.setCamera(cam)
t_network = ThreadNetwork(network)
t_network.start()
app = QtWidgets.QApplication(sys.argv)
window = GUI()
window.setCamera(cam, t_cam)
window.setNetwork(network, t_network)
window.show()
# Threading GUI
t_gui = ThreadGUI(window)
t_gui.start()
print("")
print("Requested timers:")
print(" Camera: %d ms" % (t_cam.t_cycle))
print(" GUI: %d ms" % (t_gui.t_cycle))
print(" Network: %d ms" % (t_network.t_cycle))
print("")
sys.exit(app.exec_())