github链接地址:https://github.com/menyifang/DCT-Net
一、项目介绍
实现人脸实时卡通化
二、配置环境
conda create -n dctnet python=3.8
conda activate dctnet
pip install torch-1.10.0+cu113-cp38-cp38-win_amd64.whl
pip install torchvision-0.11.0+cu113-cp38-cp38-win_amd64.whl
pip install opencv-python -i https://mirror.baidu.com/pypi/simple
pip install tensorflow==2.8.0 -i https://mirror.baidu.com/pypi/simple
pip install protobuf==3.20.1 -i https://mirror.baidu.com/pypi/simple
pip install easydict -i https://mirror.baidu.com/pypi/simple
pip install numpy==1.22 -i https://mirror.baidu.com/pypi/simple
三、运行
test_video.py
四、实时的卡通互动
import cv2
from source.cartoonize import Cartoonizer
import os
import numpy as np
from socket import *
def get_model_list(model_dir):
list_models = []
m_dirs = os.listdir(model_dir)
for dir in m_dirs:
path_model = os.path.join(model_dir,dir)
list_models.append(path_model)
return list_models
def katong_video(i):
list_models = get_model_list("models")
print(list_models)
algo =Cartoonizer(list_models[i])
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1080)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 640)
while True:
success,img = cap.read()
if success:
# 增加亮度
# alpha = 1.1 # 增加的倍数
# beta = 30 # 增加的常量值
# img = cv2.addWeighted(img, alpha, np.zeros(img.shape, dtype=img.dtype), 0, beta)
brightness = 30 # 亮度增益,可以改变数值试试效果
img = cv2.addWeighted(img, 1 + brightness / 100, img, 0, 0)
result = algo.cartoonize(img[...,::-1]
cv2.imshow('video',np.array(result,dtype=np.uint8))
else:
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
if __name__ == '__main__':
# i是模型的不同风格
i = 5
katong_video(i)