OpenCV MediaPipe实现颜值打分功能
目录
- 颜值打分
- 摄像头实时检测颜值打分
- 达芬奇指标
- 摄像头实时达芬奇颜值指标
颜值打分
定义可视化图像函数
导入三维人脸关键点检测模型
导入可视化函数和可视化样式
将图像模型输入,获取预测结果
BGR转RGB
将RGB图像输入模型,获取预测结果
预测人人脸个数
获取脸上关键点轮廓的坐标,并且将相应的坐标标注出来,在标注点之间绘制连线(例如:左眼左眼角的识别点标号为33号)
# 颜值打分--五眼指标 import cv2 as cv import mediapipe as mp import numpy as np from tqdm import tqdm import time import matplotlib.pyplot as plt # 定义可视化图像函数 def look_img(img): img_RGB=cv.cvtColor(img,cv.COLOR_BGR2RGB) plt.imshow(img_RGB) plt.show() # 导入三维人脸关键点检测模型 mp_face_mesh=mp.solutions.face_mesh # help(mp_face_mesh.FaceMesh) model=mp_face_mesh.FaceMesh( static_image_mode=True,#TRUE:静态图片/False:摄像头实时读取 refine_landmarks=True,#使用Attention Mesh模型 max_num_faces=40, min_detection_confidence=0.5, #置信度阈值,越接近1越准 min_tracking_confidence=0.5,#追踪阈值 ) # 导入可视化函数和可视化样式 mp_drawing=mp.solutions.drawing_utils # mp_drawing_styles=mp.solutions.drawing_styles draw_spec=mp_drawing.DrawingSpec(thickness=2,circle_radius=1,color=[66,77,229]) img=cv.imread("img.png") # 将图像模型输入,获取预测结果 # BGR转RGB img_RGB=cv.cvtColor(img,cv.COLOR_BGR2RGB) scaler=1 h,w=img.shape[0],img.shape[1] # 将RGB图像输入模型,获取预测结果 results=model.process(img_RGB) # # 预测人人脸个数 # len(results.multi_face_landmarks) # # print(len(results.multi_face_landmarks)) if results.multi_face_landmarks: for face_landmarks in results.multi_face_landmarks: mp_drawing.draw_landmarks( image=img, landmark_list=face_landmarks, connections=mp_face_mesh.FACEMESH_CONTOURS, landmark_drawing_spec=draw_spec, connection_drawing_spec=draw_spec ) for idx, coord in enumerate(face_landmarks.landmark): cx = int(coord.x * w) cy = int(coord.y * h) img = cv.putText(img, ' FACE DELECTED', (25, 50), cv.FONT_HERSHEY_SIMPLEX, 0.1, (218, 112, 214), 1, 1) img = cv.putText(img, str(idx), (cx, cy), cv.FONT_HERSHEY_SIMPLEX, 0.3,编程客栈 (218, 112, 214), 1, 1) else: img = cv.putText(img, 'NO FACE DELECTED', (25, 50), cv.FONT_HERSHEY_SIMPLEX, 1.25, (218, 112, 214), 1, 8) look_img(img) cv.imwrite('face_id.jpg',img) # 连轮廓最左侧点 FL=results.multi_face_landmarks[0].landmark[234]; FL_X,FL_Y=int(FL.x*w),int(FL.y*h);FL_Color=(234,0,255) img=cv.circle(img,(FL_X,FL_Y),20,FL_Color,-1) look_img(img) # 脸上侧边缘 FT=results.multi_face_landmarks[0].landmark[10];# 10 坐标为上图中标注的点的序号 FT_X,FT_Y=int(FT.x*w),int(FT.y*h);FT_Color=(231,141,181) img=cv.circle(img,(FT_X,FT_Y),20,FT_Color,-1) look_img(img) # 下侧边缘 FB=results.multi_face_landmarks[0].landmark[152];# 152 坐标为上图中标注的点的序号 FB_X,FB_Y=int(FB.x*w),int(FB.y*h);FB_Color=(231,141,181) img=cv.circle(img,(FB_X,FB_Y),20,FB_Color,-1) look_img(img) # 右侧 FR=results.multi_face_landmarks[0].landmark[454];# 454 坐标为上图中标注的点的序号 FR_X,FR_Y=int(FR.x*w),int(FR.y*h);FR_Color=(0,255,0) img=cv.circle(img,(FR_X,FR_Y),20,FR_Color,-1) look_img(img) # 左眼左眼角 ELL=results.multi_face_landmarks[0].landmark[33];# 33坐标为上图中标注的点的序号 ELL_X,ELL_Y=int(ELL.x*w),int(ELL.y*h);ELL_Color=(0,255,0) img=cv.circle(img,(ELL_X,ELL_Y),20,ELL_Color,-1) look_img(img) #左眼右眼角 ELR=results.multi_face_landmarks[0].landmark[133];# 133坐标为上图中标注的点的序号 ELR_X,ELR_Y=int(ELR.x*w),int(ELR.y*h);ELR_Color=(0,255,0) img=cv.circle(img,(ELR_X,ELR_Y),20,ELR_Color,-1) look_img(img) # 右眼左眼角362 ERL=results.multi_face_landmarks[0].landmark[362];# 133坐标为上图中标注的点的序号 ERL_X,ERL_Y=int(ERL.x*w),int(ERL.y*h);ERL_Color=(233,255,128) img=cv.circle(img,(ERL_X,ERL_Y),20,ERL_Color,-1) look_img(img) # 右眼右眼角263 ERR=results.multi_face_landmarks[0].landmark[263];# 133坐标为上图中标注的点的序号 ERR_X,ERR_Y=int(ERR.x*w),int(ERR.y*h);ERR_Color=(23,255,128) img=cv.circle(img,(ERR_X,ERR_Y),20,ERR_Color,-1) look_img(img) # 从左往右六个点的横坐标 Six_X=np.array([FL_X,ELL_X,ELR_X,ERL_X,ERR_X,FR_X]) # 从最左到最右的距离 Left_Right=FR_X-FL_X # 从左向右六个点的间隔的五个距离一并划归 Five_Distance=100*np.diff(Six_X)/Left_Right # 两眼宽度的平均值 Eye_Width_Mean=np.mean((Five_Distance[1],Five_Distance[3])) # 五个距离分别与两眼宽度均值的差 Five_Eye_Diff=Five_Distance-Eye_Width_Mean # 求L2范数,作为颜值的指标 Five_Eye_Metrics=np.linalg.norm(Five_Eye_Diff) cv.line(img,(FL_X,FT_Y),(FL_X,FB_Y),FL_Color,3) cv.line(img,(ELL_X,FT_Y),(ELL_X,FB_Y),ELL_Color,3) cv.line(img,(ELR_X,FT_Y),(ELR_X,FB_Y),ELR_Color,3) cv.line(img,(ERL_X,FT_Y),(ERL_X,FB_Y),ERL_Color,3) cv.line(img,(ERR_X,FT_Y),(ERR_X,FB_Y),ERR_Color,3) cv.line(img,(FR_X,FT_Y),(FR_X,FB_Y),FR_Color,3) cv.line(img,(FL_X,FT_Y),(FR_X,FT_Y),FT_Color,3) cv.line(img,(FL_X,FB_Y),(FR_X,FB_Y),FB_Color,3) scaler=1 img = cv.putText(img, 'Five Eye Metrics{:.2f}'.format(Five_Eye_Metrics), (25, 50), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 6, 6) img = cv.putText(img, 'Distance 1{:.2f}'.format(Five_Eye_DiIfUKIvtnff[0]), (25, 100), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 5, 5) img = cv.putText(img, 'Distance 1{:.2f}'.format(Five_Eye_Diff[2]), (25, 150), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 4, 4) img = cv.putText(img, 'Distance 1{:.2f}'.format(Five_Eye_Diff[4]), (25, 200), cv.FONT_HERSHEY_SIMPLEX,1, (218, 112, 214), 3, 4) look_img(img) cv.imwrite("yanzhi.jpg",img)
摄像头实时检测颜值打分
最后一部分代码是调用摄像头的模板,可以直接使用
关键步骤在代码注释中有体现
import cv2 as cv import mediapipe as mp import numpy as np from tqdm import tqdm import time import matplotlib.pyplot as plt # 定义可视化图像函数 def look_img(img): img_RGB=cv.cvtColor(img,cv.COLOR_BGR2RGB) plt.imshow(img_RGB) plt.show() # 导入三维人脸关键点检测模型 mp_face_mesh=mp.solutions.face_mesh # help(mp_face_mesh.FaceMesh) model=mp_face_mesh.FaceMesh( static_image_mode=False,#TRUE:静态图片/False:摄像头实时读取 refine_landmarks=True,#使用Attention Mesh模型 max_num_faces=5, min_detection_confidence=0.5, #置信度阈值,越接近1越准 min_tracking_confidence=0.5,#追踪阈值 ) # 导入可视化函数和可视化样式 mp_drawing=mp.solutions.drawing_utils # mp_drawing_styles=mp.solutions.drawing_styles draw_spec=mp_drawing.DrawingSpec(thickness=2,circle_radius=1,color=[66,77,229]) landmark_drawing_spec=mp_drawing.DrawingSpec(thickness=1,circle_radius=2,color=[66,77,229]) # 轮廓可视化 connection_drawing_spec=mp_drawing.DrawingSpec(thickness=2,circle_radius=1,color=[233,155,6]) # 处理帧函数 def process_frame(img): start_time = time.time() scaler = 1 h, w = img.shape[0], img.shape[1] img_RGB = cv.cvtColor(img, cv.COLOR_BGR2RGB) results = model.process(img_RGB) if results.multi_face_landmarks: # for face_landmarks in results.multi_face_landmarks: # 连轮廓最左侧点 FL = results.multi_face_landmarks[0].landmark[234]; FL_X, FL_Y = int(FL.x * w), int(FL.y * h); FL_Color = (234, 0, 255) img = cv.circle(img, (FL_X, FL_Y), 5, FL_Color, -1) look_img(img) # 脸上侧边缘 FT = results.multi_face_landmarks[0].landmark[10]; # 10 坐标为上图中标注的点的序号 FT_X, FT_Y = int(FT.x * w), int(FT.y * h); FT_Color = (231, 141, 181) img = cv.circle(img, (FT_X, FT_Y), 5, FT_Color, -1) look_img(img) # 下侧边缘 FB = results.multi_face_landmarks[0].landmark[152]; # 152 坐标为上图中标注的点的序号 FB_X, FB_Y = int(FB.x * w), int(FB.y * h); FB_Color = (231, 141, 181) img = cv.circle(img, (FB_X, FB_Y), 5, FB_Color, -1) look_img(img) # 右侧 FR = results.multi_face_landmarks[0].landmark[454]; # 454 坐标为上图中标注的点的序号 FR_X, FR_Y = int(FR.x * w), int(FR.y * h); FR_Color = (0, 255, 0) img = cv.circle(img, (FR_X, FR_Y), 5, FR_Color, -1) look_img(img) # 左眼左眼角 ELL = results.multi_face_landmarks[0].landmark[33]; # 33坐标为上图中标注的点的序号 ELL_X, ELL_Y = int(ELL.x * w), int(ELL.y * h); ELL_Color = (0, 255, 0) img = cv.circle(img, (ELL_X, ELL_Y), 5, ELL_Color, -1) look_img(img) # 左眼右眼角 ELR = results.multi_face_landmarks[0].landmark[133]; # 133坐标为上图中标注的点的序号 ELR_X, ELR_Y = int(ELR.x * w), int(ELR.y * h); ELR_Color = (0, 255, 0) img = cv.circle(img, (ELR_X, ELR_Y), 5, ELR_Color, -1) look_img(img) # 右眼左眼角362 ERL = results.multi_face_landmarks[0].landmark[362]; # 133坐标为上图中标注的点的序号 ERL_X, ERL_Y = int(ERL.x * w), int(ERL.y * h); ERL_Color = (233, 255, 128) img = cv.circle(img, (ERL_X, ERL_Y), 5, ERL_Color, -1) look_img(img) # 右眼右眼角263 ERR = results.multi_face_landmarks[0].landmark[263]; # 133坐标为上图中标注的点的序号 ERR_X, ERR_Y = int(ERR.x * w), int(ERR.y * h); ERR_Color = (23, 255, 128) img = cv.circle(img, (ERR_X, ERR_Y), 5, ERR_Color, -1) look_img(img) # 从左往右六个点的横坐标 Six_X = np.array([FL_X, ELL_X, ELR_X, ERL_X, ERR_X, FR_X]) # 从最左到最右的距离 Left_Right = FR_X - FL_X # 从左向右六个点的间隔的五个距离一并划归 Five_Distance = 100 * np.diff(Six_X) / Left_Right # 两眼宽度的平均值 Eye_Width_Mean = np.mean((Five_Distance[1], Five_Distance[3])) # 五个距离分别与两眼宽度均值的差 Five_Eye_Diff = Five_Distance - Eye_Width_Mean # 求L2范数,作为颜值的指标 Five_Eye_Metrics = np.linalg.norm(Five_Eye_Diff) cv.line(img, (FL_X, FT_Y), (FL_X, FB_Y), FL_Color, 3) cv.line(img, (ELL_X, FT_Y), (ELL_X, FB_Y), ELL_Color, 3) cv.line(img, (ELR_X, FT_Y), (ELR_X, FB_Y), ELR_Color, 3) cv.line(img, (ERL_X, FT_Y), (ERL_X, FB_Y), ERL_Color, 3) cv.line(img, (ERR_X, FT_Y), (ERR_X, FB_Y), ERR_Color, 3) cv.line(img, (FR_X, FT_Y), (FR_X, FB_Y), FR_Color, 3) cv.line(img, (FL_X, FT_Y), (FR_X, FT_Y), FT_Color, 3) cv.line(img, (FL_X, FB_Y), (FR_X, FB_Y), FB_Color, 3) scaler = 1 img = cv.putText(img, 'Five Eye Metrics{:.2f}'.format(Five_Eye_Metrics), (25, 50), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 2, 6) img = cv.putText(img, 'Distance 1{:.2f}'.format(Five_Eye_Diff[0]), (25, 100), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 2, 5) img = cv.putText(img, 'Distance 1{:.2f}'.format(Five_Eye_Diff[2]), (25, 150), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 2, 4) img = cv.putText(img, 'Distance 1{:.2f}'.format(Five_Eye_Diff[4]), (25, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 2, 4) else: img = cv.putText(img, 'NO FACE DELECTED', (25, 50), cv.FONT_HERSHEY_SIMPLEX, 1.25, (218, 112, 214), 1, 8) # 记录该帧处理完毕的时间 end_time = time.time() # 计算每秒处理图像的帧数FPS FPS = 1 / (end_time - start_time) scaler = 1 img = cv.putText(img, 'FPS' + str(int(FPS)), (25 * scaler, 300 * scaler), cv.FONT_HERSHEY_SIMPLEX, 1.25 * scaler, (0, 0, 255), 1, 8) return img # 调用摄像头 cap=cv.VideoCapture(0) cap.open(0) # 无限循环,直到break被触发 while cap.isOpened(): success,frame=cap.read() # if not success: # print('ERROR') # break frame=process_frame(frame) #展示处理后的三通道图像 cv.imshow('my_window',frame) if cv.waitKey(1) &0xff==ord('q'): break cap.release() cv.destroyAllWindows()
好像也可以识别出哈士奇
达芬奇指标
这里更加深化了上面的代码,增加了更多的指标
import cv2 as cv import mediapipe as mp import numpy as np from tqdm import tqdm import time import matplotlib.pyplot as plt # 定义可视化图像函数 def look_img(img): img_RGB=cv.cvtColor(img,cv.COLOR_BGR2RGB) plt.imshow(img_RGB) plt.show() # 导入三维人脸关键点检测模型 mp_face_mesh=mp.solutions.face_mesh # help(mp_face_mesh.FaceMesh) model=mp_face_mesh.FaceMesh( static_image_mode=True,#TRUE:静态图片/False:摄像头实时读取 refine_landmarks=True,#使用Attention Mesh模型 max_num_faces=40, min_detection_confidence=0.2, #置信度阈值,越接近1越准 min_tracking_confidence=0.5,#追踪阈值 ) # 导入可视化函数和可视化样式 mp_drawing=mp.solutions.drawing_utils # mp_drawing_styles=mp.solutions.drawing_styles draw_spec=mp_drawing.DrawingSpec(thickness=2,circle_radius=1,color=[223,155,6]) # 读取图像 img=cv.imread("img.png") # width=img1.shape[1] # height=img1.shape[0] # img=cv.resize(img1,(width*10,height*10)) # look_img(img) # 将图像模型输入,获取预测结果 # BGR转RGB img_RGB=cv.cvtColor(img,cv.COLOR_BGR2RGB) # 将RGB图像输入模型,获取预测结果 results=model.process(img_RGB) radius=12 lw=2 scaler=1 h,w=img.shape[0],img.shape[1] # 将RGB图像输入模型,获取预测结果 # # 预测人人脸个数 # len(results.multi_face_landmarks) # # print(len(results.multi_face_landmarks)) if results.multi_face_landmarks: for face_landmarks in results.multi_face_landmarks: mp_drawing.draw_landmarks( image=img, landmark_list=face_landmarks, connections=mp_face_mesh.FACEMESH_CONTOURS, landmark_drawing_spec=draw_spec, connection_drawing_spec=draw_spec ) for idx, coord in enumerate(face_landmarks.landmark): cx = int(coord.x * w) cy = int(coord.y * h) img = cv.putText(img, ' FACE DELECTED', (25, 50), cv.FONT_HERSHEY_SIMPLEX, 0.1, (218, 112, 214), 1, 1) img = cv.putText(img, str(idx), (cx, cy), cv.FONT_HERSHEY_SIMPLEX, 0.3, (218, 112, 214), 1, 1) else: img = cv.putText(img, 'NO FACE DELECTED', (25, 50), cv.FONT_HERSHEY_SIMPLEX, 1.25, (218, 112, 214), 1, 8) look_img(img) # 连轮廓最左侧点 FL=results.multi_face_landmarks[0].landmark[234]; FL_X,FL_Y=int(FL.x*w),int(FL.y*h);FL_Color=(234,0,255) img=cv.circle(img,(FL_X,FL_Y),radius,FL_Color,-1) look_img(img) # 脸上侧边缘 FT=results.multi_face_landmarks[0].landmark[10];# 10 坐标为上图中标注的点的序号 FT_X,FT_Y=int(FT.x*w),int(FT.y*h);FT_Color=(231,141,181) img=cv.circle(img,(FT_X,FT_Y),radius,FT_Color,-1) look_img(img) # 下侧边缘 FB=results.multi_face_landmarks[0].landmark[152];# 152 坐标为上图中标注的点的序号 FB_X,FB_Y=int(FB.x*w),int(FB.y*h);FB_Color=(231,141,181) img=cv.circle(img,(FB_X,FB_Y),radius,FB_Color,-1) look_img(img) # 右侧 FR=results.multi_face_landmarks[0].landmark[454];# 454 坐标为上图中标注的点的序号 FR_X,FR_Y=int(FR.x*w),int(FR.y*h);FR_Color=(0,255,0) img=cv.circle(img,(FR_X,FR_Y),radius,FR_Color,-1) look_img(img) # 左眼左眼角 ELL=results.multi_face_landmarks[0].landmark[33];# 33坐标为上图中标注的点的序号 ELL_X,ELL_Y=int(ELL.x*w),int(ELL.y*h);ELL_Color=(0,255,0) img=cv.circle(img,(ELL_X,ELL_Y),radius,ELL_Color,-1) look_img(img) #左眼右眼角 ELR=results.multi_face_landmarks[0].landmark[133];# 133坐标为上图中标注的点的序号 ELR_X,ELR_Y=int(ELR.x*w),int(ELR.y*h);ELR_Color=(0,255,0) img=cv.circle(img,(ELR_X,ELR_Y),radius,ELR_Color,-1) look_img(img) # 右眼左眼角362 ERL=results.multi_face_landmarks[0].landmark[362];# 133坐标为上图中标注的点的序号 ERL_X,ERL_Y=int(ERL.x*w),int(ERL.y*h);ERL_Color=(233,255,128) img=cv.circle(img,(ERL_X,ERL_Y),radius,ERL_Color,-1) look_img(img) # 右眼右眼角263 ERR=results.multi_face_landmarks[0].landmark[263];# 133坐标为上图中标注的点的序号 ERR_X,ERR_Y=int(ERR.x*w),int(ERR.y*h);ERR_Color=(23,255,128) img=cv.circle(img,(ERR_X,ERR_Y),radius,ERR_Color,-1) look_img(img) # 从左往右六个点的横坐标 Six_X=np.array([FL_X,ELL_X,ELR_X,ERL_X,ERR_X,FR_X]) # 从最左到最右的距离 Left_Right=FR_X-FL_X # 从左向右六个点的间隔的五个距离一并划归 Five_Distance=100*np.diff(Six_X)/Left_Right # 两眼宽度的平均值 Eye_Width_Mean=np.mean((Five_Distance[1],Five_Distance[3])) # 五个距离分别与两眼宽度均值的差 Five_Eye_Diff=Five_Distance-Eye_Width_Mean # 求L2范数,作为颜值的指标 Five_Eye_Metrics=np.linalg.norm(Five_Eye_Diff) # 三庭 # 眉心 MX=results.multi_face_landmarks[0].landmark[9];# 9 坐标为上图中标注的点的序号 MX_X,MX_Y=int(MX.x*w),int(MX.y*h);MX_Color=(29,123,234) img=cv.circle(img,(MX_X,MX_Y),radius,MX_Color,-1) look_img(img) # 鼻翼下缘 2 NB=results.multi_face_landmarks[0].landmark[2];# 2 坐标为上图中标注的点的序号 NB_X,NB_Y=int(NB.x*w),int(NB.y*h);NB_Color=(180,187,28) img=cv.circle(img,(NB_X,NB_Y),radius,NB_Color,-1) look_img(img) # 嘴唇中心 13 LC=results.multi_face_landmarks[0].landmark[13];# 17 坐标为上图中标注的点的序号 LC_X,LC_Y=int(LC.x*w),int(LC.y*h);LC_Color=(0,0,258) img=cv.circle(img,(LC_X,LC_Y),radius,LC_Color,-1) look_img(img) # 嘴唇下缘 17 LB=results.multi_face_landmarks[0].landmark[17];# 17 坐标为上图中标注的点的序号 LB_X,LB_Y=int(LB.x*w),int(LB.y*h);LB_Color=(139,0,0) img=cv.circle(img,(LB_X,LB_Y),radius,LB_Color,-1) look_img(img) Six_Y=np.array([FT_Y,MX_Y,NB_Y,LC_Y,LB_Y,FB_Y]) Top_Down=FB_Y-FT_Y Three_Section_Distance =100*np.diff(Six_Y)/Top_Down Three_Section_Mrtric_A=np.abs(Three_Section_Distance[1]-sum(Three_Section_Distance[2:])) # 鼻下到唇心距离 占第三庭的三分之一 Three_Section_Mrtric_B=np.abs(Three_Section_Distance[2]-sum(Three_Section_Distance[2:])/3) #唇心到下巴尖距离 占 第三庭的二分之一 Three_Section_Mrtric_C=np.abs(sum(Three_Section_Distance[3:])-sum(Three_Section_Distance[2:])/2) # 达芬奇 # 嘴唇左角 61 LL=results.multi_face_landmarks[0].landmark[61];# 61 坐标为上图中标注的点的序号 LL_X,LL_Y=int(LL.x*w),int(LL.y*h);LL_Color=(255,255,255) img=cv.circle(img,(LL_X,LL_Y),radius,LL_Color,-1) look_img(img) # 嘴唇右角 291 LR=results.multi_face_landmarks[0].landmark[291];# 291 坐标为上图中标注的点的序号 LR_X,LR_Y=int(LR.x*w),int(LR.y*h);LR_Color=(255,255,255) img=cv.circle(img,(LR_X,LR_Y),radius,LR_Color,-1) look_img(img) # 鼻子左缘 129 NL=results.multi_face_landmarks[0].landmark[129];# 291 坐标为上图中标注的点的序号 NL_X,NL_Y=int(NL.x*w),int(NL.y*h);NL_Color=(255,255,255) img=cv.circle(img,(NL_X,NL_Y),radius,NL_Color,-1) look_img(img) # 鼻子右缘 358 NR=results.multi_face_landmarks[0].landmark[358];# 358 坐标为上图中标注的点的序号 NR_X,NR_Y=int(NR.x*w),int(NR.y*h);NR_Color=(255,255,255) img=cv.circle(img,(NR_X,NR_Y),radius,NR_Color,-1) look_img(img) # 嘴宽为鼻宽的1.5/1.6倍 Da_Vinci=(LR.x-LL.x)/(NR.x-NL.x) # 眉毛 # 左眉毛左眉角 46 EBLL=results.multi_face_landmarks[0].landmark[46];# 46 坐标为上图中标注的点的序号 EBLL_X,EBLL_Y=int(EBLL.x*w),int(EBLL.y*h);EBLL_Color=(255,355,155) img=cv.circle(img,(EBLL_X,EBLL_Y),radius,EBLL_Color,-1) look_img(img) # 左眉毛眉峰 105 EBLT=results.multi_face_landmarks[0].landmark[105];# 105 坐标为上图中标注的点的序号 EBLT_X,EBLT_Y=int(EBLT.x*w),int(EBLT.y*h);EBLT_Color=(255,355,155) img=cv.circle(img,(EBLT_X,EBLT_Y),radius,EBLT_Color,-1) look_img(img) #左眉毛右角 107 EBLR=results.multi_face_landmarks[0].landmark[107];# 107 坐标为上图中标注的点的序号 EBLR_X,EBLR_Y=int(EBLR.x*w),int(EBLR.y*h);EBLR_Color=(255,355,155) img=cv.circle(img,(EBLR_X,EBLR_Y),radius,EBLR_Color,-1) look_img(img) # 右眉毛左角 336 EBRL=results.multi_face_landmarks[0].landmark[336];# 336 坐标为上图中标注的点的序号 EBRL_X,EBRL_Y=int(EBRL.x*w),int(EBRL.y*h);EBRL_Color=(295,355,105) img=cv.circle(img,(EBRL_X,EBRL_Y),radius,EBRL_Color,-1) look_img(img) # 右眉毛眉峰 334 EBRT=results.multi_face_landmarks[0].landmark[334];# 334 坐标为上图中标注的点的序号 EBRT_X,EBRT_Y=int(EBRT.x*w),int(EBRT.y*h);EBRT_Color=( 355,155,155) img=cv.circle(img,(EBRT_X,EBRT_Y),radius,EBRT_Color,-1) look_img(img) # 右眉毛右角 276 EBRR=results.multi_face_landmarks[0].landmark[276];# 107 坐标为上图中标注的点的序号 EBRR_X,EBRR_Y=int(EBRR.x*w),int(EBRR.y*h);EBRR_Color=(155,305,195) img=cv.circle(img,(EBRR_X,EBRR_Y),radius,EBRR_Color,-1) look_img(img) # 眉头是否在眼角的正上方 EB_Metric_A=(EBLR_X-ELR_X)/Left_Right EB_Metric_B=(EBRL_X-ERL_X)/Left_Right EB_Metric_C=(EBLT_X-ELL_X)/Left_Right EB_Metric_D=(EBRT_X-ERR_X)/Left_Right EB_Metric_E=0.5*np.linalg.det([[EBLL_X,EBLL_Y,1],[ELL_X,ELL_Y,1],[NL_X,NL_Y,1]])/(Left_Right)**2 EB_Metric_F=0.5*np.linalg.det([[EBRR_X,EBRR_Y,1],[ERR_X,ERR_Y,1],[NR_X,NR_Y,1]])/(Left_Right)**2 cv.line(img,(EBLL_X,EBLL_Y),(ELL_X,ELL_Y),EBLL_Color,lw) cv.line(img,(ELL_X,ELL_Y),(NL_X,NL_Y),EBLL_Color,lw) cv.line(img,(EBLL_X,EBLL_Y),(NL_X,NL_Y),EBLL_Color,lw) cv.line(img,(EBRR_X,EBRR_Y),(ERR_X,ERR_Y),EBLL_Color,lw) cv.line(img,(EBRR_X,EBRR_Y),(NR_X,NR_Y),EBLL_Color,lw) cv.line(img,(EBRR_X,EBRR_Y),(NR_X,NR_Y),EBLL_Color,lw) look_img(img) #左内眼角上点 157 ELRT=results.multi_face_landmarks[0].landmark[157];# 157 坐标为上图中标注的点的序号 ELRT_X,ELRT_Y=int(ELRT.x*w),int(ELRT.y*h);ELRT_Color=(155,305,195) img=cv.circle(img,(ELRT_X,ELRT_Y),radius,ELRT_Color,-1) look_img(img) #左内眼角下点 154 ELRB=results.multi_face_landmarks[0].landmark[154];# 154 坐标为上图中标注的点的序号 ELRB_X,ELRB_Y=int(ELRB.x*w),int(ELRB.y*h);ELRB_Color=(155,305,195) img=cv.circle(img,(ELRB_X,ELRB_Y),radius,ELRB_Color,-1) look_img(img) #右内眼角上点 384 ERLT=results.multi_face_landmarks[0].landmark[384];# 384 坐标为上图中标注的点的序号 ERLT_X,ERLT_Y=int(ERLT.x*w),int(ERLT.y*h);ERLT_Color=(155,305,195) img=cv.circle(img,(ERLT_X,ERLT_Y),radius,ERLT_Color,-1) look_img(img) # 右内眼角下点 381 ERRB=results.multi_face_landmarks[0].landmark[381];# 384 坐标为上图中标注的点的序号 ERRB_X,ERRB_Y=int(ERRB.x*w),int(ERRB.y*h);ERRB_Color=(155,305,195) img=cv.circle(img,(ERRB_X,ERRB_Y),radius,ERRB_Color,-1) look_img(img) # 角度 vector_a=np.array([ELRT_X-ELR_X,ELRT_Y-ELR_Y]) vector_b=np.array([ELRB_X-ELR_X,ELRB_Y-ELR_Y]) cos=vector_a.dot(vector_b)/(np.linalg.norm(vector_a)*np.linalg.norm(vector_b)) EB_Metric_G=np.degrees(np.arccos(cos)) vector_a=np.array([ERLT_X-ERL_X,ERLT_Y-ERL_Y]) vector_b=np.array([ERRB_X-ERL_X,ERRB_Y-ERL_Y]) cos=vector_a.dot(vector_b)/(np.linalg.norm(vector_a)*np.linalg.norm(vector_b)) EB_Metric_H=np.degrees(np.arccos(cos)) # 可视化 cv.line(img,(FL_X,FT_Y),(FL_X,FB_Y),FL_Color,3) cv.line(img,(ELL_X,FT_Y),(ELL_X,FB_Y),ELL_Color,3) cv.line(img,(ELR_X,FT_Y),(ELR_X,FB_Y),ELR_Color,3) cv.line(img,(ERL_X,FT_Y),(ERL_X,FB_Y),ERL_Color,3) cv.line(img,(ERR_X,FT_Y),(ERR_X,FB_Y),ERR_Color,3) cv.line(img,(FR_X,FT_Y),(FR_X,FB_Y),FR_Color,3) cv.line(img,(FL_X,FT_Y),(FR_X,FT_Y),FT_Color,3) cv.line(img,(FL_X,FB_Y),(FR_X,FB_Y),FB_Color,3) cv.line(img,(FL_X,MX_Y),(FR_X,MX_Y),MX_Color,lw) cv.line(img,(FL_X,NB_Y),(FR_X,NB_Y),NB_Color,lw) cv.line(img,(FL_X,LC_Y),(FR_X,LC_Y),LC_Color,lw) cv.line(img,(FL_X,LB_Y),(FR_X,LB_Y),LB_Color,lw) scaler=1 img = cv.putText(img, 'Five Eye Metrics{:.2f}'.format(Five_Eye_Metrics), (25, 50), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 3, 10) img = cv.putText(img, 'A{:.2f}'.format(Five_Eye_Diff[0]), (25, 100), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 3, 10) img = cv.putText(img, 'B{:.2f}'.format(Five_Eye_Diff[2]), (25, 150), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 3, 10) img = cv.putText(img, 'C{:.2f}'.format(Five_Eye_Diff[4]), (25, 200), cv.FONT_HERSHEY_SIMPLEX,1, (218, 112, 214), 3, 10) img = cv.putText(img, 'Three Scetion{:.2f}'.format(Three_Section_Mrtric_A), (25, 300), cv.FONT_HERSHEY_SIMPLEX,1, (218, 112, 214), 3, 10) img = cv.putText(img, '1/3{:.2f}'.format(Three_Section_Mrtric_B), (25, 400), cv.FONT_HERSHEY_SIMPLEX,1, (218, 112, 214), 3, 10) img = cv.putText(img, '1/2{:.2f}'.format(Three_Section_Mrtric_C), (25, 500), cv.FONT_HERSHEY_SIMPLEX,1, (218, 112, 214), 3, 10) img = cv.putText(img, 'Da Vinci{:.2f}'.format(Da_Vinci), (25, 600), cv.FONT_HERSHEY_SIMPLEX,1, (218, 112, 214), 3, 10) look_img(img)
在这张图上体现了更加细致的指标
摄像头实时达芬奇颜值指标
与上面操作流程类似,可参考上面的流程
import cv2 as cv import mediapipe as mp import numpy as np from tqdm import tqdm import time import matplotlib.pyplot as plt # 定义可视化图像函数 def look_img(img): img_RGB=cv.cvtColor(img,cv.COLOR_BGR2RGB) plt.imshow(img_RGB) plt.show() # 导入三维人脸关键点检测模型 mp_face_mesh=mp.solutions.face_mesh # help(mp_face_mesh.FaceMesh) model=mp_face_mesh.FaceMesh( static_image_mode=False,#TRUE:静态图片/False:摄像头实时读取 refine_landmarks=True,#使用Attention Mesh模型 max_num_faces=5, min_detection_confidence=0.5, #置信度阈值,越接近1越准 min_tracking_confidence=0.5,#追踪阈值 ) # 导入可视化函数和可视化样式 mp_drawing=mp.solutions.drawing_utils # mp_drawing_styles=mp.solutions.drawing_styles draw_spec=mp_drawing.DrawingSpec(thickness=2,circle_radius=1,color=[66,77,229]) landmark_drawing_spec=mp_drawing.DrawingSpec(thickness=1,circle_radius=2,color=[66,77,229]) # 轮廓可视化 connection_drawing_spec=mp_drawing.DrawingSpec(thickness=2,circle_radius=1,color=[233,155,6]) # 处理帧函数 def process_frame(img): start_time = time.time() scaler = 1 scaler = 1 radius = 12 lw = 2 scaler = 1 h, w = img.shape[0], img.shape[1] img_RGB = cv.cvtColor(img, cv.COLOR_BGR2RGB) results = model.process(img_RGB) if results.multi_face_landmarks: # for face_landmarks in results.multi_face_landmarks: # 连轮廓最左侧点 FL = results.multi_face_landmarks[0].landmark[234]; FL_X, FL_Y = int(FL.x * w), int(FL.y * h); FL_Color = (234, 0, 255) img = cv.circle(img, (FL_X, FL_Y), 5, FL_Color, -1) look_img(img) # 脸上侧边缘 FT = results.multi_face_landmarks[0].landmark[10]; # 10 坐标为上图中标注的点的序号 FT_X, FT_Y = int(FT.x * w), int(FT.y * h); FT_Color = (231, 141, 181) img = cv.circle(img, (FT_X, FT_Y), 5, FT_Color, -1) look_img(img) # 下侧边缘 FB = results.multi_face_landmarks[0].landmark[152]; # 152 坐标为上图中标注的点的序号 FB_X, FB_Y = int(FB.x * w), int(FB.y * h); FB_Color = (231, 141, 181) img = cv.circle(img, (FB_X, FB_Y), 5, FB_Color, -1) look_img(img) # 右侧 FR = results.multi_face_landmarks[0].landmark[454]; # 454 坐标为上图中标注的点的序号 FR_X, FR_Y = int(FR.x * w), int(FR.y * h); FR_Color = (0, 255, 0) img = cv.circle(img, (FR_X, FR_Y), 5, FR_Color, -1) look_img(img) # 左眼左眼角 ELL = results.multi_face_landmarks[0].landmark[33]; # 33坐标为上图中标注的点的序号 ELL_X, ELL_Y = int(ELL.x * w), int(ELL.y * h); ELL_Color = (0, 255, 0) img = cv.circle(img, (ELL_X, ELL_Y), 5, ELL_Color, -1) look_img(img) # 左眼右眼角 ELR = results.multi_face_landmarks[0].landmark[133]; # 133坐标为上图中标注的点的序号 ELR_X, ELR_Y = int(ELR.x * w), int(ELR.y * h); ELR_Color = (0, 255, 0) img = cv.circle(img, (ELR_X, ELR_Y), 5, ELR_Color, -1) look_img(img) # 右眼左眼角362 ERL = results.multi_face_landmarks[0].landmark[362]; # 133坐标为上图中标注的点的序号 ERL_X, ERL_Y = int(ERL.x * w), int(ERL.y * h); ERL_Color = (233, 255, 128) img = cv.circle(img, (ERL_X, ERL_Y), 5, ERL_Color, -1) look_img(img) # 右眼右眼角263 ERR = results.multi_face_landmarks[0].landmark[263]; # 133坐标为上图中标注的点的序号 ERR_X, ERR_Y = int(ERR.x * w), int(ERR.y * h); ERR_Color = (23, 255, 128) img = cv.circle(img, (ERR_X, ERR_Y), 5, ERR_Color, -1) look_img(img) # 从左往右六个点的横坐标 Six_X = np.array([FL_X, ELL_X, ELR_X, ERL_X, ERR_X, FR_X]) # 从最左到最右的距离 Left_Right = FR_X - FL_X # 从左向右六个点的间隔的五个距离一并划归 Five_Distance = 100 * np.diff(Six_X) / Left_Right # 两眼宽度的平均值 Eye_Width_Mean = np.mean((Five_Distance[1], Five_Distance[3])) # 五个距离分别与两眼宽度均值的差 Five_Eye_Diff = Five_Distance - Eye_Width_Mean # 求L2范数,作为颜值的指标 Five_Eye_Metrics = np.linalg.norm(Five_Eye_Diff) cv.line(img, (FL_X, FT_Y), (FL_X, FB_Y), FL_Color, 3) cv.line(img, (ELL_X, FT_Y), (ELL_X, FB_Y), ELL_Color, 3) cv.line(img, (ELR_X, FT_Y), (ELR_X, FB_Y), ELR_Color, 3) cv.line(img, (ERL_X, FT_Y), (ERL_X, FB_Y), ERL_Color, 3) IfUKIvtn cv.line(img, (ERR_X, FT_Y), (ERR_X, FB_Y), ERR_Color, 3) www.cppcns.com cv.line(img, (FR_X, FT_Y), (FR_X, FB_Y), FR_Color, 3) cv.line(img, (FL_X, FT_Y), (FR_X, FT_Y), FT_Color, 3) cv.line(img, (FL_X, FB_Y), (FR_X, FB_Y), FB_Color, 3) # 三庭 # 眉心 MX = results.multi_face_landmarks[0].landmark[9]; # 9 坐标为上图中标注的点的序号 MX_X, MX_Y = int(MX.x * w), int(MX.y * h); MX_Color = (29, 123, 234) img = cv.circle(img, (MX_X, MX_Y), radius, MX_Color, -1) look_img(img) # 鼻翼下缘 2 NB = results.multi_face_landmarks[0].landmark[2]; # 2 坐标为上图中标注的点的序号 NB_X, NB_Y = int(NB.x * w), int(NB.y * h); NB_Color = (180, 187, 28) img = cv.circle(img, (NB_X, NB_Y), radius, NB_Color, -1) look_img(img) # 嘴唇中心 13 LC = results.multi_face_landmarks[0].landmark[13]; # 17 坐标为上图中标注的点的序号 LC_X, LC_Y = int(LC.x * w), int(LC.y * h); LC_Color = (0, 0, 258) img = cv.circle(img, (LC_X, LC_Y), radius, LC_Color, -1) look_img(img) # 嘴唇下缘 17 LB = results.multi_face_landmarks[0].landmark[17]; # 17 坐标为上图中标注的点的序号 LB_X, LB_Y = int(LB.x * w), int(LB.y * h); LB_Color = (139, 0, 0) img = cv.circle(img, (LB_X, LB_Y), radius, LB_Color, -1) look_img(img) Six_Y = np.array([FT_Y, MX_Y, NB_Y, LC_Y, LB_Y, FB_Y]) Top_Down = FB_Y - FT_Y Three_Section_Distance = 100 * np.diff(Six_Y) / Top_Down Three_Section_Mrtric_A = np.abs(Three_Section_Distance[1] - sum(Three_Section_Distance[2:])) # 鼻下到唇心距离 占第三庭的三分之一 Three_Section_Mrtric_B = np.abs(Three_Section_Distance[2] - sum(Three_Section_Distance[2:]) / 3) # 唇心到下巴尖距离 占 第三庭的二分之一 Three_Section_Mrtric_C = np.abs(sum(Three_Section_Distance[3:]) - sum(Three_Section_Distance[2:]) / 2) # 达芬奇 # 嘴唇左角 61 LL = results.multi_face_landmarks[0].landmark[61]; # 61 坐标为上图中标注的点的序号 LL_X, LL_Y = int(LL.x * w), int(LL.y * h); LL_Color = (255, 255, 255) img = cv.circle(img, (LL_X, LL_Y), radius, LL_Color, -1) look_img(img) # 嘴唇右角 291 LR = results.multi_face_landmarks[0].landmark[291]; # 291 坐标为上图中标注的点的序号 LR_X, LR_Y = int(LR.x * w), int(LR.y * h); LR_Color = (255, 255, 255) img = cv.circle(img, (LR_X, LR_Y), radius, LR_Color, -1) look_img(img) # 鼻子左缘 129 NL = results.multi_face_landmarks[0].landmark[129]; # 291 坐标为上图中标注的点的序号 NL_X, NL_Y = int(NL.x * w), int(NL.y * h); NL_Color = (255, 255, 255) img = cv.circle(img, (NL_X, NL_Y), radius, NL_Color, -1) look_img(img) # 鼻子右缘 358 NR = results.multi_face_landmarks[0].landmark[358]; # 358 坐标为上图中标注的点的序号 NR_X, NR_Y = int(NR.x * w), int(NR.y * h); NR_Color = (255, 255, 255) img = cv.circle(img, (NR_X, NR_Y), radius, NR_Color, -1) look_img(img) # 嘴宽为鼻宽的1.5/1.6倍 Da_Vinci = (LR.x - LL.x) / (NR.x - NL.x) # 眉毛 # 左眉毛左眉角 46 EBLL = results.multi_face_landmarks[0].landmark[46]; # 46 坐标为上图中标注的点的序号 EBLL_X, EBLL_Y = int(EBLL.x * w), int(EBLL.y * h); EBLL_Color = (255, 355, 155) img = cv.circle(img, (EBLL_X, EBLL_Y), radius, EBLL_Color, -1) look_img(img) # 左眉毛眉峰 105 EBLT = results.multi_face_landmarks[0].landmark[105]; # 105 坐标为上图中标注的点的序号 EBLT_X, EBLT_Y = int(EBLT.x * w), int(EBLT.y * h); EBLT_Color = (255, 355, 155) img = cv.circle(img, (EBLT_X, EBLT_Y), radius, EBLT_Color, -1) look_img(img) # 左眉毛右角 107 EBLR = results.multi_face_landmarks[0].landmark[107]; # 107 坐标为上图中标注的点的序号 EBLR_X, EBLR_Y = int(EBLR.x * w), int(EBLR.y * h); EBLR_Color = (255, 355, 155) img = cv.circle(img, (EBLR_X, EBLR_Y), radius, EBLR_Color, -1) look_img(img) # 右眉毛左角 336 EBRL = results.multi_face_landmarks[0].landmark[336]; # 336 坐标为上图中标注的点的序号 EBRL_X, EBRL_Y = int(EBRL.x * w), int(EBRL.y * h); EBRL_Color = (295, 355, 105) img = cv.circle(img, (EBRL_X, EBRL_Y), radius, EBRL_Color, -1) look_img(img) # 右眉毛眉峰 334 EBRT = results.multi_face_landmarks[0].landmark[334]; # 334 坐标为上图中标注的点的序号 EBRT_X, EBRT_Y = int(EBRT.x * w), int(EBRT.y * h); EBRT_Color = (355, 155, 155) img = cv.circle(img, (EBRT_X, EBRT_Y), radius, EBRT_Color, -1) look_img(img) # 右眉毛右角 276 EBRR = results.multi_face_landmarks[0].landmark[276]; # 107 坐标为上图中标注的点的序号 EBRR_X, EBRR_Y = int(EBRR.x * w), int(EBRR.y * h); EBRR_Color = (155, 305, 195) img = cv.circle(img, (EBRR_X, EBRR_Y), radius, EBRR_Color, -1) look_img(img) # 眉头是否在眼角的正上方 EB_Metric_A = (EBLR_X - ELR_X) / Left_Right EB_Metric_B = (EBRL_X - ERL_X) / Left_Right EB_Metric_C = (EBLT_X - ELL_X) / Left_Right EB_Metric_D = (EBRT_X - ERR_X) / Left_Right EB_Metric_E = 0.5 * np.linalg.det([[EBLL_X, EBLL_Y, 1], [ELL_X, ELL_Y, 1], [NL_X, NL_Y, 1]]) / (Left_Right) ** 2 EB_Metric_F = 0.5 * np.linalg.det([[EBRR_X, EBRR_Y, 1], [ERR_X, ERR_Y, 1], [NR_X, NR_Y, 1]]) / (Left_Right) ** 2 cv.line(img, (EBLL_X, EBLL_Y), (ELL_X, ELL_Y), EBLL_Color, lw) cv.line(img, (ELL_X, ELL_Y), (NL_X, NL_Y), EBLL_Color, lw) cv.line(img, (EBLL_X, EBLL_Y), (NL_X, NL_Y), EBLL_Color, lw) cv.line(img, (EBRR_X, EBRR_Y), (ERR_X, ERR_Y), EBLL_Color, lw) cv.line(img, (EBRR_X, EBRR_Y), (NR_X, NR_Y), EBLL_Color, lw) cv.line(img, (EBRR_X, EBRR_Y), (NR_X, NR_Y), EBLL_Color, lw) look_img(img) # 左内眼角上点 157 ELRT = results.multi_face_landmarks[0].landmark[157]; # 157 坐标为上图中标注的点的序号 ELRT_X, ELRT_Y = int(ELRT.x * w), int(ELRT.y * h); ELRT_Color = (155, 305, 195) img = cv.circle(img, (ELRT_X, ELRT_Y), radius, ELRT_Color, -1) look_img(img) # 左内眼角下点 154 ELRB = results.multi_face_landmarks[0].landmark[154]; # 154 坐标为上图中标注的点的序号 ELRB_X, ELRB_Y = int(ELRB.x * w), int(ELRB.y * h); ELRB_Color = (155, 305, 195) img = cv.circle(img, (ELRB_X, ELRB_Y), radius, ELRB_Color, -1) look_img(img) # 右内眼角上点 384 ERLT = results.multi_face_landmarks[0].landmark[384]; # 384 坐标为上图中标注的点的序号 ERLT_X, ERLT_Y = int(ERLT.x * w), int(ERLT.y * h); ERLT_Color = (155, 305, 195) img = cv.circle(img, (ERLT_X, ERLT_Y), radius, ERLT_Color, -1) look_img(img) # 右内眼角下点 381 ERRB = results.multi_face_landmarks[0].landmark[381]; # 384 坐标为上图中标注的点的序号 ERRB_X, ERRB_Y = int(ERRB.x * w), int(ERRB.y * h); ERRB_Color = (155, 305, 195) img = cv.circle(img, (ERRB_X, ERRB_Y), radius, ERRB_Color, -1) look_img(img) # 角度 vector_a = np.array([ELRT_X - ELR_X, ELRT_Y - ELR_Y]) vector_b = np.array([ELRB_X - ELR_X, ELRB_Y - ELR_Y]) cos = vector_a.dot(vector_b) / (np.linalg.norm(vector_a) * np.linalg.norm(vector_b)) EB_Metric_G = np.degrees(np.arccos(cos)) vector_a = np.array([ERLT_X - ERL_X, ERLT_Y - ERL_Y]) vector_b = np.array([ERRB_X - ERL_X, ERRB_Y - ERL_Y]) cos = vector_a.dot(vector_b) / (np.linalg.norm(vector_a) * np.linalg.norm(vector_b)) EB_Metric_H = np.degrees(np.arccos(cos)) # 可视化 cv.line(img, (FL_X, FT_Y), (FL_X, FB_Y), FL_Color, 3) cv.line(img, (ELL_X, FT_Y), (ELL_X, FB_Y), ELL_Color, 3) cv.line(img, (ELR_X, FT_Y), (ELR_X, FB_Y), ELR_Color, 3) cv.line(img, (ERL_X, FT_Y), (ERL_X, FB_Y), ERL_Color, 3) cv.line(img, (ERR_X, FT_Y), (ERR_X, FB_Y), ERR_Color, 3) cv.line(img, (FR_X, FT_Y), (FR_X, FB_Y), FR_Color, 3) cv.line(img, (FL_X, FT_Y), (FR_X, FT_Y), FT_Color, 3) cv.line(img, (FL_X, FB_Y), (FR_X, FB_Y), FB_Color, 3) cv.line(img, (FL_X, MX_Y), (FR_X, MX_Y), MX_Color, lw) cv.line(img, (FL_X, NB_Y), (FR_X, NB_Y), NB_Color, lw) cv.line(img, (FL_X, LC_Y), (FR_X, LC_Y), LC_Color, lw) cv.line(img, (FL_X, LB_Y), (FR_X, LB_Y), LB_Color, lw) img = cv.putText(img, 'Five Eye Metrics{:.2f}'.format(Five_Eye_Metrics), (25, 50), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 3, 10) img = cv.putText(img, 'A{:.2f}'.format(Five_Eye_Diff[0]), (25, 100), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 3, 10) img = cv.putText(img, 'B{:.2f}'.format(Five_Eye_Diff[2]), (25, 150), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 3, 10) IfUKIvtn img = cv.putText(img, 'C{:.2f}'.format(Five_Eye_Diff[4]), (25, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 3, 10) img = cv.putText(img, 'Three Scetion{:.2f}'.format(Three_Section_Mrtric_A), (25, 300), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 3, 10) img = cv.putText(img, '1/3{:.2f}'.format(Three_Section_Mrtric_B), (25, 400), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 3, 10) img = cv.putText(img, '1/2{:.2f}'.format(Three_Section_Mrtric_C), (25, 500), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 3, 10) img = cv.putText(img, 'Da Vinci{:.2f}'.format(Da_Vinci), (25, 600), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 3, 10) look_img(img) img = cv.putText(img, 'Five Eye Metrics{:.2f}'.format(Five_Eye_Metrics), (25, 50), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 2, 6) img = cv.putText(img, 'Distance 1{:.2f}'.format(Five_Eye_Diff[0]), (25, 100), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 2, 5) img = cv.putText(img, 'Distance 1{:.2f}'.format(Five_Eye_Diff[2]), (25, 150), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 2, 4) img = cv.putText(img, 'Distance 1{:.2f}'.format(Five_Eye_Diff[4]), (25, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (218, 112, 214), 2, 4) else: img = cv.putText(img, 'NO FACE DELECTED', (25, 50), cv.FONT_HERSHEY_SIMPLEX, 1.25, (218, 112, 214), 1, 8) # 记录该帧处理完毕的时间 end_time = time.time() # 计算每秒处理图像的帧数FPS FPS = 1 / (end_time - start_time) scaler = 1 img = cv.putText(img, 'FPS' + str(int(FPS)), (25 * scaler, 700 * scaler), cv.FONT_HERSHEY_SIMPLEX, 1.25 * scaler, (0, 0, 255), 1, 8) return img # 调用摄像头 cap=cv.VideoCapture(0) cap.open(0) # 无限循环,直到break被触发 while cap.isOpened(): success,frame=cap.read() # if not success: # print('ERROR') # break frame=process_frame(frame) #展示处理后的三通道图像 cv.imshow('my_window',frame) if cv.waitKey(1) &0xff==ord('q'): break cap.release() cv.destroyAllWindows()
以上就是OpenCV MediaPipe实现颜值打分功能的详细内容,更多关于OpenCV MediaPipe颜值打分的资料请关注我们其它相关文章!
精彩评论