import cv2 as cv
import time
Conf_threshold = 0.5
NMS_threshold = 0.4
COLORS = [(0, 255, 0), (0, 0, 255), (255, 0, 0),
(255, 255, 0), (255, 0, 255), (0, 255, 255)]
class_name = []
with open('classes.txt', 'r') as f:
class_name = [cname.strip() for cname in f.readlines()]
# print(class_name)
#net = cv.dnn.readNet('yolov4-tiny.weights', 'yolov4-tiny.cfg')
#net = cv.dnn.readNet('yolov4.weights', 'yolov4.cfg')
net = cv.dnn.readNet('yolov3.weights', 'yolov3.cfg')
#net.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA)
#net.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA_FP16)
cap=cv.VideoCapture(0)
#cap = cv.VideoCapture('output.avi')
ret, frame = cap.read()
img_size_h, img_size_w, channels = frame.shape
model = cv.dnn_DetectionModel(net)
#model.setInputParams(size=(416, 416), scale=1/255, swapRB=True)
model.setInputParams(size=(img_size_w, img_size_h), scale=1/255, swapRB=True)
starting_time = time.time()
frame_counter = 0
while True:
ret, frame = cap.read()
frame_counter += 1
if ret == False:
break
classes, scores, boxes = model.detect(frame, Conf_threshold, NMS_threshold)
for (classid, score, box) in zip(classes, scores, boxes):
if class_name[int(classid)] != "person":
continue
color = COLORS[int(classid) % len(COLORS)]
label = "%s : %.2f" % (class_name[int(classid)], score *100)
#label = "%f" % round(score *100 , 2)
cv.rectangle(frame, box, color, 1)
cv.putText(frame, label, (box[0], box[1]-10),
cv.FONT_HERSHEY_COMPLEX, 0.3, color, 1)
endingTime = time.time() - starting_time
fps = "FPS: %.2f" % (frame_counter/endingTime)
# print(fps)
cv.putText(frame, fps, (20, 50),
cv.FONT_HERSHEY_COMPLEX, 0.7, (0, 255, 0), 2)
cv.imshow('frame', frame)
key = cv.waitKey(1)
if key == ord('q'):
break
cap.release()
cv.destroyAllWindows()
沒有留言:
張貼留言