基于旭日派的Ros系统小车开发——实时人脸识别脚本

1. 基于MiPi相机的人脸识别

        在没有看过我写的MiPi相机相关作品之前,先看这篇文章:基于旭日派的Ros系统小车的再开发——使用python脚本调用MIPI相机(学习笔记)-CSDN博客

        其他相关作品:

基于旭日派的Ros系统小车的再开发——使用python脚本调用深度相机(学习笔记)-CSDN博客

Linux下快速执行.sh文件的脚本-CSDN博客

1.1 Jupyter Notenook代码:

        没有jupyter环境的,首先还是一样去找方法去安装,C站很多教程教你怎么安装jupyter环境,这里不多做赘述。直接先上代码:

@ 人脸检测器(默认):haarcascade_frontalface_default.xml

@ 人脸检测器(侧视):haarcascade_profileface.xml 

        这两个文件统一说一下,私信我或者去我主页找付费下载即可。后面的代码均是基于这两个xml文件来进行的。

###下面的代码jupyter内应该是n个模块,这里为方便起见,我直接放一块了,统一说明,中间用回车空三行的就是不同的模块,注意使用时去区分。###

# 中文开关,默认为英文 Chinese switch. The default value is English
g_ENABLE_CHINESE = False

Name_widgets = {
    'Start': ("Start", "开始"),
    'Close_Camera': ("Close_Camera", "关闭摄像头")
}



# 显示摄像头组件 Camera display widgets
import ipywidgets.widgets as widgets
from IPython.display import display

import cv2
import time
import numpy as np
import threading



#bgr8转jpeg格式
def bgr8_to_jpeg(value, quality=75):
    return bytes(cv2.imencode('.jpg', value)[1])



from SunriseRobotLib import Mipi_Camera

img_width=320
img_height=240
g_camera = Mipi_Camera(img_width, img_height)

if g_camera.isOpened():
    print("Open Camera OK")
else:
    print("Fail To Open Camera")


g_stop_program = False
g_start_function = False
g_car_runtime = -1



# image_widget = widgets.Image(format='jpeg', width=320, height=240)
# 摄像头显示控件  Camera display widgets
DISPLAY_WIDTH = img_width
DISPLAY_HEIGHT = img_height
image_widget = widgets.Image(format='jpeg', width=DISPLAY_WIDTH, height=DISPLAY_HEIGHT)



# 开始 Start
Button_Start = widgets.Button(        
    description=Name_widgets['Start'][g_ENABLE_CHINESE],        
    button_style='success', # 'success', 'info', 'warning', 'danger' or ''     
    tooltip='Description',     
    icon='uncheck' )

# 关闭摄像头 Close_Camera
Button_Close_Camera = widgets.Button(
    value=False,  
    description=Name_widgets['Close_Camera'][g_ENABLE_CHINESE],      
    button_style='danger', # 'success', 'info', 'warning', 'danger' or ''     
    tooltip='Description',     
    icon='uncheck' )



# 按键按下事件处理   Key press event processing
def on_button_clicked(b):
    global g_stop_program
    b.icon = 'check'
    with output:
        print("Button clicked:", b.description)
    if b.description == Name_widgets['Close_Camera'][g_ENABLE_CHINESE]:
        # 停止线程,释放摄像头  Stop the thread and release the camera
        Button_Start.icon = 'uncheck'
        g_stop_program = True
        time.sleep(.1)
        g_camera.release()
        b.icon = 'uncheck'
        

# 功能按钮按下事件处理 Function button press event handling
def on_button_function_clicked(b):
    global g_start_function
    if b.description == Name_widgets['Start'][g_ENABLE_CHINESE]:
        if b.icon == 'uncheck':
            b.icon = 'check'
            g_start_function = True
        else:
            b.icon = 'uncheck'
            g_start_function = False
            # time.sleep(.1)
    with output:
        print("Button clicked:", b.description, g_start_function)



# 关联按键事件回调 Button event callbacks
Button_Close_Camera.on_click(on_button_clicked)
Button_Start.on_click(on_button_function_clicked)



face_haar = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# face_haar = cv2.CascadeClassifier("haarcascade_profileface.xml")

def face_detect(image):
    gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    faces = face_haar.detectMultiScale(gray_img, 1.1, 3)
    for face_x,face_y,face_w,face_h in faces:
        return "1", (face_x,face_y,face_w,face_h)
    return None, (0,0,0,0)



face_haar = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# face_haar = cv2.CascadeClassifier("haarcascade_profileface.xml")

def face_detect(image):
    gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    faces = face_haar.detectMultiScale(gray_img, 1.1, 3)
    for face_x,face_y,face_w,face_h in faces:
        return "1", (face_x,face_y,face_w,face_h)
    return None, (0,0,0,0)



# 启动摄像头显示任务  Start the camera display task
thread1 = threading.Thread(target=task_processing)
thread1.setDaemon(True)
thread1.start()

# 创建一个横向的盒子容器,以便将图像小部件相邻放置
# create a horizontal box container to place the image widget next to eachother
image_container = widgets.HBox([image_widget])
button_group = widgets.HBox([Button_Start, Button_Close_Camera])
output = widgets.Output()
box_display = widgets.VBox([image_container, button_group, output])
display(box_display)

        这里jupyter notenook代码相比于后面的几个代码来说有一个很好的一点就是,jupyter这里做到了多线程去进行识别,点击弹出的start窗口实时显示的图像才会进行人脸识别,关闭状态下,仅仅是做一个图像采集的作用,这样做的好处就是避免了资源损耗,避免机器长时间进行识别而导致机器寿命下降等等一系列原因。

1.2 Python代码:

import cv2
import time
import ipywidgets.widgets as widgets
from IPython.display import display
from SunriseRobotLib import Mipi_Camera


# 加载人脸识别模型
face_haar = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")

width = 320
height = 240
g_camera = Mipi_Camera(width, height, debug=True)

if g_camera.isOpened():
    print("Open Camera OK!")
else:
    print("Fail To Open Camera!")

def bgr8_to_jpeg(value, quality=75):
    return bytes(cv2.imencode('.jpg', value)[1])

image_widget = widgets.Image(format='jpeg', width=320, height=240)

#display(image_widget)

try:
    m_fps = 0
    t_start = time.time()
    saved_count = 1  # 统计已经保存的文件数量
    while g_camera.isOpened():
        ret, frame = g_camera.read()
        if not ret:
            print("Camera Read Fail!")
            break

        # 将帧转为灰度图像
        gray_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # 检测人脸位置
        faces = face_haar.detectMultiScale(gray_img, 1.1, 3)

        # 遍历检测到的人脸
        for (face_x, face_y, face_w, face_h) in faces:
            # 绘制矩形框显示人脸位置
            cv2.rectangle(frame, (face_x, face_y), (face_x+face_w, face_y+face_h),
                          (0, 255, 255), 1)

        m_fps = m_fps + 1
        fps = m_fps / (time.time() - t_start)

        if (time.time() - t_start) >= 2:
            m_fps = fps
            t_start = time.time() - 1

        text = "FPS: " + str(int(fps))
        cv2.putText(frame, text, (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
        image_widget.value = bgr8_to_jpeg(frame)

        # 检测是否按下 s 键
        k = cv2.waitKey(1)
        if k == ord('s'):
            # 获取当前的日期时间戳
            timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime())

            # 生成指定格式的文件名
            filename = f"{saved_count:04d}_{timestamp}.jpg"

            # 保存图像至本地文件夹
            cv2.imwrite(filename, frame)

            # 提示用户已经成功保存当前图像,并更新已保存的文件数量
            print(f"Saved {filename} successfully!")
            saved_count += 1

        cv2.imshow('Frame', frame)

        # 等待键盘输入并判断是否关闭窗口
        if cv2.waitKey(1) & 0xFF == ord(' '):
            break

except Exception as e:
    print("Exception: ", e)

        这里的python代码写的也很简单,并没有做复杂的多线程化处理,比较python脚本嘛,一切从简即可,所以相较于上面的jupyter代码不足之处就是没有做优化处理,所以执行之后可能会出现掉帧或者卡顿的情况,(可能出现的原因是基于机器的配置而定的);对比jupyter代码好处就是使用python脚本会很方便,易于修改和直接。不过个人觉得python和jupyter各有特色,并没有站队的意思。

2. 基于系统相机的人脸识别

1. 系统相机:

import cv2
import time
import ipywidgets.widgets as widgets
from IPython.display import display

# 设置窗口大小
window_width = 640
window_height = 480

# 加载人脸识别模型
face_haar = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')

# 使用电脑摄像头
g_camera = cv2.VideoCapture(0)

# 设置摄像头的宽度和高度
width = 320
height = 240
g_camera.set(3, width)
g_camera.set(4, height)

# 创建窗口并设置大小
cv2.namedWindow('Frame', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Frame', window_width, window_height)

def bgr8_to_jpeg(value, quality=75):
    return bytes(cv2.imencode('.jpg', value)[1])

image_widget = widgets.Image(format='jpeg', width=320, height=240)
display(image_widget)

try:
    m_fps = 0
    t_start = time.time()

    while True:
        ret, frame = g_camera.read()
        if not ret:
            print("Camera Read Fail!")
            break

        # 将帧转为灰度图像
        gray_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # 检测人脸位置
        faces = face_haar.detectMultiScale(gray_img, 1.1, 3)

        # 遍历检测到的人脸
        for (face_x, face_y, face_w, face_h) in faces:
            # 绘制矩形框显示人脸位置
            cv2.rectangle(frame, (face_x, face_y), (face_x+face_w, face_y+face_h), (0, 255, 255), 1)

        m_fps = m_fps + 1
        fps = m_fps / (time.time() - t_start)

        if (time.time() - t_start) >= 2:
            m_fps = fps
            t_start = time.time() - 1

        text = "FPS: " + str(int(fps))
        cv2.putText(frame, text, (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
        image_widget.value = bgr8_to_jpeg(frame)

        cv2.imshow('Frame', frame)

        # 等待键盘输入并判断是否关闭窗口
        if cv2.waitKey(1) & 0xFF == ord(' '):
            break

except Exception as e:
    print("Exception: ", e)

# 释放摄像头对象
g_camera.release()

# 关闭所有的窗口
cv2.destroyAllWindows()

        这里g_camera = cv2.VideoCapture(0)表示调用默认系统的摄像头,其他的代码属性写法和MiPi相机的很像,都可以实时采集识别人脸,也可以按下键盘的‘s’键进行保存,按下键盘的空格键进行退出程序。