基于dlib实现人脸跟踪和物体跟踪(demo)

Posted 月疯

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了基于dlib实现人脸跟踪和物体跟踪(demo)相关的知识,希望对你有一定的参考价值。

dlib的安装,直接上代码查看:主要是视频的跟踪,但是存在很多问题,这个库不行还是跟yolo差很远,学习一下

#人脸跟踪
#1、导入库
import cv2
import dlib
#2、主函数

def main():
    #3、打开摄像头
    # capture = cv2.VideoCapture(0)
    #读取本地视频
    capture = cv2.VideoCapture("../videos/2925646b8bb765b59b79b3ef78cbea49.mp4")

    #4、基于dlib库获取人脸检测器
    detector = dlib.get_frontal_face_detector()

    #5、基于dlib库实时跟踪
    tractor = dlib.correlation_tracker()

    #6、跟踪状态
    tracking_state= False

    #7、循环读取每一帧
    while True:
        ret,frame = capture.read()

        #8如果没有跟踪,启动跟踪器
        if tracking_state is False:
            gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
            faces = detector(gray,1) #返回检测到的人脸
            if len(faces) >0:
                tractor.start_track(frame,faces[0])
                tracking_state = True

        #9、正在跟踪,实时获取人脸的位置,显示
        if tracking_state is True:
            tractor.update(frame)#跟新画面
            position = tractor.get_position()#获取人脸的坐标
            cv2.rectangle(frame,(int(position.left()),int(position.top())),(int(position.right()),int(position.bottom())),(0,255,0),3)

        key = cv2.waitKey(1) & 0xFF
        if key ==ord('q'):
            break
        cv2.imshow("face tracking", frame)
    capture.release()
    cv2.destroyAllWindows()

if __name__ == "__main__":
    main()

#增加提示信息
#人脸跟踪,
# 保存摄像头获取到的视频
#增加提示信息
#1、导入库
import cv2
import dlib

#增加提示性信息,图片和状态传进来
def show_info(frame,tracking_state):
    #显示状态信息的坐标
    pos1 = (20,40)
    pos2 = (20,80)
    #按下1是跟踪
    cv2.putText(frame,"'1':reset",pos1,cv2.FONT_HERSHEY_COMPLEX,0.5,(255,255,255)) #位置,字体,粗细,颜色,
    #根据状态,显示不同的信息
    if tracking_state is True:
        cv2.putTest(frame,"tracking now ......",pos2,cv2.FONT_HERSHEY_COMPLEX,0.5,(255,0,0))
    else:
        cv2.putTest(frame, "no tracking  ......", pos2, cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0))


#2、主函数
def main():
    #3、打开摄像头
    capture = cv2.VideoCapture(0)

    #4、基于dlib库获取人脸检测器
    detector = dlib.get_frontal_face_detector()

    #5、基于dlib库实时跟踪
    tractor = dlib.correlation_tracker()

    #6、跟踪状态
    tracking_state= False
    #增加功能:保存视频
    frame_width = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
    frame_height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
    frame_fps = capture.get(cv2.CAP_PROP_FPS)
    #设置视频格式
    fourcc = cv2.VideoWrite_fourcc(*"XVID")
    #设置输出视频格式("文件名.格式",格式,每秒多少帧,视频的宽度,高度,True表示彩色,False表示黑白)
    out_put = cv2.VideoWrite("record.avi",fourcc,int(frame_fps),int(frame_width),int(frame_height),True)

    #7、循环读取每一帧
    while True:
        ret,frame = capture.read()
        #显示提示信息
        show_info(frame, tracking_state)
        #8如果没有跟踪,启动跟踪器
        if tracking_state is False:
            gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
            faces = detector(gray,1) #返回检测到的人脸
            if len(faces) >0:
                tractor.start_track(frame,faces[0])
                tracking_state = True

        #9、正在跟踪,实时获取人脸的位置,显示
        if tracking_state is True:
            tractor.update(frame)#跟新画面
            position = tractor.get_position()#获取人脸的坐标
            cv2.rectangle(frame,(int(position.left()),int(position.top())),(int(position.right()),int(position.bottom())),(0,255,0),3)

        key = cv2.waitKey(1) & 0xFF
        if key ==ord('q'):
            break
    cv2.imshow("face tracking",frame)
    #保存图片
    out_put.write(frame)

    capture.release()
    cv2.destroyAllWindows()

if __name__ == "__main__":
    main()
保存摄像头获取到的视频
#人脸跟踪,保存摄像头获取到的视频
#1、导入库
import cv2
import dlib


#2、主函数
def main():
    #3、打开摄像头
    capture = cv2.VideoCapture(0)

    #4、基于dlib库获取人脸检测器
    detector = dlib.get_frontal_face_detector()

    #5、基于dlib库实时跟踪
    tractor = dlib.correlation_tracker()

    #6、跟踪状态
    tracking_state= False
    #增加功能:保存视频
    frame_width = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
    frame_height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
    frame_fps = capture.get(cv2.CAP_PROP_FPS)
    #设置视频格式
    fourcc = cv2.VideoWrite_fourcc(*"XVID")
    #设置输出视频格式("文件名.格式",格式,每秒多少帧,视频的宽度,高度,True表示彩色,False表示黑白)
    out_put = cv2.VideoWrite("record.avi",fourcc,int(frame_fps),int(frame_width),int(frame_height),True)

    #7、循环读取每一帧
    while True:
        ret,frame = capture.read()

        #8如果没有跟踪,启动跟踪器
        if tracking_state is False:
            gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
            faces = detector(gray,1) #返回检测到的人脸
            if len(faces) >0:
                tractor.start_track(frame,faces[0])
                tracking_state = True

        #9、正在跟踪,实时获取人脸的位置,显示
        if tracking_state is True:
            tractor.update(frame)#跟新画面
            position = tractor.get_position()#获取人脸的坐标
            cv2.rectangle(frame,(int(position.left()),int(position.top())),(int(position.right()),int(position.bottom())),(0,255,0),3)

        key = cv2.waitKey(1) & 0xFF
        if key ==ord('q'):
            break
    cv2.imshow("face tracking",frame)
    #保存图片
    out_put.write(frame)

    capture.release()
    cv2.destroyAllWindows()

if __name__ == "__main__":
    main()

object的跟踪:

 项目过程:鼠标对物体区域进行框选,然后让这个区域跟踪物体移动

# 物体检测,定位

# 1、导入库
import cv2
import dlib


# 定义方法:显示信息
def show_info():
    pos1=(10,20)
    pos2=(10,40)
    pos3=(10,60)

    info1 = "put left button,select an area,starct tracking"
    info2 = "'1':strack tracking,'2':stop tacking,'q':exit"
    cv2.putText(frame,info1,pos1,cv2.FOUNT_HERSHEY_COMPLEX,0.5,(255,255,255))
    cv2.putText(frame,info2,pos2,cv2.FOUNT_HERSHEY_COMPLEX,0.5,(255,255,255))
    if tracking_state:
        cv2.putText(frame,"tracking now...",pos3,cv2.FOUNT_HERSHEY_COMPLEX,0.5,(0,255,0))
    else:
        cv2.putText(frame, "stop tracking ...", pos3, cv2.FOUNT_HERSHEY_COMPLEX, 0.5, (255,0, 0))


points = []  # 存放鼠标事件坐标点

# 定义方法:鼠标绑定到点击事件
def mouse_event_handler(event, x, y, flags, params):
    global points  # 全局变量
    if event == cv2.EVENT_LBUTTONDOWN:  # 鼠标按下
        points = [(x, y)]
    elif event == cv2.EVENT_LBUTTONUP:  # 鼠标松开
        points.append([(x, y)])


# 2、打开摄像头
capture = cv2.VideoCapture(0)

# 3、设定窗口名称
nameWindow = "Object Tracking"
# 4、将鼠标绑定到窗口上去
cv2.nameWindow(nameWindow)
cv2.setMouseCallBack(nameWindow, mouse_event_handler)

# 5、启动跟踪器dlib.correlation_tracker()
tracker = dlib.correlation_tracker()

# 6、假设跟踪状态
tracking_state = False
# 7、循环读取视频流
while True:
    # 8、获取每一帧
    ret, frame = capture.read()
    # 9、显示提示信息:调用方法
    show_info()
    # 10、如果获取到的坐标点为2个,那么就绘制出矩形框,以及也要让dlib的rectangle()知道坐标点在哪里
    if len(points) == 2:
        cv2.rectangle(frame, points[0], points[1], (0, 255, 0), 3)
        dlib_rect = dlib.rectangle(points[0][0], points[0][1], points[1][0], points[1][1])
    # 11、根据状态判断:如果跟踪状态为true,那么更新跟踪,同时还需要获取位置,绘制矩形框
    if tracking_state is True:
        tracker.update(frame)  # 更新画面
        pos = tracker.get_posint()  # 获取位置坐标
        cv2.rectangle(frame, (int(pos.left()), int(pos.top())), (int(pos.right()), int(pos.bottom())), (0, 255, 0), 3)

    # 12、事件判断,根据按键’1‘,’2‘,’q‘
    key = cv2.waitKey(1) & 0xFF

    if key == ord('1'):
        if len(points) == 2:
            tracker.start_track(frame, dlib_rect)
            tracking_state = True
            points = []
    if key == ord('2'):
        points = []
        tracking_state = False

    if key == ord('q'):
        break

    # 13、显示整体效果
    cv2.imshow(nameWindow, frame)

# 关闭掉窗口
capture.release()
cv2.destroyAllWindows()

以上是关于基于dlib实现人脸跟踪和物体跟踪(demo)的主要内容,如果未能解决你的问题,请参考以下文章

dlib库包的介绍与使用,opencv+dlib检测人脸框opencv+dlib进行人脸68关键点检测,opencv+dlib实现人脸识别,dlib进行人脸特征聚类dlib视频目标跟踪

基于Dlib人脸检测(图像视频)demo

基于Dlib人脸检测(图像视频)demo

[AI开发]基于深度学习的视频多目标跟踪实现

基于mean shift算法的物体跟踪(python+opencv代码)

Python+OpenCV实现AI人脸识别身份认证系统—人脸识别原理