-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdetector.py
More file actions
116 lines (84 loc) · 4.19 KB
/
detector.py
File metadata and controls
116 lines (84 loc) · 4.19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import cv2
import numpy as np
import time
import os
np.random.seed(20)
class Detector:
def __init__(self, videoPath, configPath, modelPath, classesPath):
self.videoPath = videoPath
self.configPath = configPath
self.modelPath = modelPath
self.classesPath = classesPath
self.net = cv2.dnn_DetectionModel(self.modelPath, self.configPath)
self.net.setInputSize(320, 320)
self.net.setInputScale(1.0 / 127.5)
self.net.setInputMean((127.5, 125.5, 127.5))
self.net.setInputSwapRB(True)
self.readClasses()
def readClasses(self):
with open(self.classesPath, 'r') as f:
self.classesList = f.read().splitlines()
self.classesList.insert(0, '__Background__')
self.colorList = np.random.uniform(low=0, high=255, size=(len(self.classesList), 3))
def detectObjects(self, image):
classLabelIDs, confidences, bboxs = self.net.detect(image, confThreshold=0.5)
bboxs = list(bboxs)
confidences = list(np.array(confidences).reshape(1, -1)[0])
confidences = list(map(float, confidences))
bboxIdx = cv2.dnn.NMSBoxes(bboxs, confidences, score_threshold=0.5, nms_threshold=0.2)
detectedClasses = set()
if len(bboxIdx) != 0:
for i in range(0, len(bboxIdx)):
idx = np.squeeze(bboxIdx[i])
if idx < len(bboxs) and idx < len(confidences) and idx < len(classLabelIDs):
bbox = bboxs[idx]
classConfidence = confidences[idx]
classLabelID = classLabelIDs[idx]
classLabel = self.classesList[classLabelID]
classColor = [int(c) for c in self.colorList[classLabelID]]
detectedClasses.add(classLabel)
displayText = "{}:{:.2f}".format(classLabel, classConfidence)
x, y, w, h = bbox
cv2.rectangle(image, (x, y), (x + w, y + h), color=classColor, thickness=1)
cv2.putText(image, displayText, (x, y-10), cv2.FONT_HERSHEY_PLAIN, 1, classColor, 2)
lineWidth = min(int(w * 0.3), int(h * 0.3))
cv2.line(image, (x,y), (x + lineWidth, y), classColor, thickness=5)
cv2.line(image, (x,y), (x, y + lineWidth), classColor, thickness=5)
cv2.line(image, (x + w,y), (x + w - lineWidth, y), classColor, thickness=5)
cv2.line(image, (x + w,y), (x + w, y + lineWidth), classColor, thickness=5)
cv2.line(image, (x,y + h), (x + lineWidth, y + h), classColor, thickness=5)
cv2.line(image, (x,y+h), (x, y + h - lineWidth), classColor, thickness=5)
cv2.line(image, (x + w,y + h), (x + w - lineWidth, y + h), classColor, thickness=5)
cv2.line(image, (x + w,y + h), (x + w, y + h - lineWidth), classColor, thickness=5)
return image, detectedClasses
def onVideo(self):
cap = cv2.VideoCapture(self.videoPath)
if not cap.isOpened():
print("Error opening video file...")
return
startTime = 0
while True:
success, image = cap.read()
if not success:
break
currentTime = time.time()
fps = 1 / (currentTime - startTime)
startTime = currentTime
image, _ = self.detectObjects(image)
cv2.putText(image, "FPS: " + str(int(fps)), (20, 70), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2)
cv2.imshow("Result", image)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def onImage(self):
image = cv2.imread(self.videoPath)
if image is None:
print("Error opening image file...")
return
image, detectedClasses = self.detectObjects(image)
print("Detected objects in the image:", detectedClasses)
cv2.imshow("Result", image)
cv2.waitKey(0)
cv2.destroyAllWindows()