大家好,欢迎来到IT知识分享网。
代码如下:
import os
import yolov3
import cv2
import numpy as np
import threading
from timeit import default_timer as timer
def detect_camera(videoPath=None, loop=None, output_path="", trackingAlt=True):
"""
检测本地视频目标或者检测开启摄像头后视频的目标
:param videoPath: 视频的本地地址;默认或者为0表示开启摄像头
:param loop: 每隔(loop-1)帧做一次检测
:param output_path: 是否保存地址
:param trackingAlt: 是否使用跟踪算法
:return:
"""
if (videoPath == None) or (videoPath == 0):
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("The camera is not opening!")
return
else:
cap = cv2.VideoCapture(videoPath)
if not cap.isOpened():
print("Invalid address entered!")
return
if loop == None:
loop = 5
if loop < 1:
print("Please enter an integer greater than one. loop:%f", loop)
video_FourCC = int(cap.get(cv2.CAP_PROP_FOURCC))
video_fps = cap.get(cv2.CAP_PROP_FPS)
video_size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
loc = [] # 位置信息 x, y, w, h
scores = [] # 置信度
lab = [] # 目标的标签
frame = 0 # 帧数,循环控制参数
initImg = [] # 跟踪算法的初始化图片
while True:
return_value, img = cap.read()
if not return_value:
print("The video is over!")
return
pro_time = timer()
if frame % loop == 0:
loc, scores, lab = yolov3.yolo_detect(img) # 目标检测
initImg = img
else:
if trackingAlt:
loc, scores, lab = tracking(initImg, img, loc, scores, lab, frame) # 目标跟踪
else:
loc, scores, lab = yolov3.yolo_detect(img)
post_time = timer()
exec_time = post_time - pro_time
curr_fps = 1 / exec_time
fps = '{}: {:.3f}'.format('FPS', curr_fps)
(fps_w, fps_h), baseline = cv2.getTextSize(fps, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 2)
cv2.rectangle(img, (2, 20 - fps_h - baseline), (2 + fps_w, 18), color=(0, 0, 0), thickness=-1)
cv2.putText(img, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.5, color=(255, 255, 255), thickness=2)
cv2.imshow("result", img)
if isOutput:
out.write(img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# quit on ESC button
if cv2.waitKey(1) & 0xFF == 27: # Esc pressed
break
frame = frame + 1
frame = frame % loop
tracker = cv2.MultiTracker_create()
COLORS = np.random.randint(0, 255, size=(80, 3), dtype='uint8')
def tracking(initImg, img, loc, scores, lab, frame):
"""
使用跟踪算法跟踪目标
:param initImg: 初始化的图片
:param img: 需要跟踪的图片
:param loc: 初始化的位置信息
:param scores: 初始化的置信度
:param lab: 初始化的类别
:param frame: 判断是否需要初始化
:return:
"""
# 初始化跟踪器
if frame == 1:
global tracker # 这步很重要,每次初始化跟踪时需要清除原先所跟踪的目标;否则,跟踪的目标会累加
tracker = cv2.MultiTracker_create() # tracker.clear()不能清除原先所跟踪的目标;暂时只能写成这样
for i, newbox in enumerate(loc):
# TrackerCSRT_create(),TrackerKCF_create(),TrackerMOSSE_create()
ok = tracker.add(cv2.TrackerKCF_create(), initImg, (newbox[0], newbox[1], newbox[2], newbox[3]))
if not ok:
print("The tracker initialization failed!")
return
ok, boxes = tracker.update(img)
if ok:
loc = boxes
for i, newbox in enumerate(boxes):
color = [int(c) for c in COLORS[i]]
x, y, w, h = int(newbox[0]), int(newbox[1]), int(newbox[2]), int(newbox[3])
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
text = '{}: {:.3f}'.format(lab[i], scores[i])
(text_w, text_h), baseline = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 2)
cv2.rectangle(img, (x, y - text_h - baseline), (x + text_w, y), color, -1)
cv2.putText(img, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
return loc, scores, lab
if __name__ == "__main__":
detect_camera(videoPath="./video/aa.avi", loop=10, trackingAlt=True, output_path="aa1.avi") # videoPath="./video/12.avi"
效果展示(图片大小是1080*1920):FPS小的是检测时,大的跟踪算法跟踪时(没有显卡,奔腾处理器[捂脸])
|
|
免责声明:本站所有文章内容,图片,视频等均是来源于用户投稿和互联网及文摘转载整编而成,不代表本站观点,不承担相关法律责任。其著作权各归其原作者或其出版社所有。如发现本站有涉嫌抄袭侵权/违法违规的内容,侵犯到您的权益,请在线联系站长,一经查实,本站将立刻删除。 本文来自网络,若有侵权,请联系删除,如若转载,请注明出处:https://yundeesoft.com/24941.html