1 Star 0 Fork 17

ivlucks/wave2lip

forked from oceans2/wave2lip 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
克隆/下载
inference3_makeup.py 14.14 KB
一键复制 编辑 原始数据 按行查看 历史
guoyandan 提交于 2022-11-17 18:24 . 编码格式调整
from os import listdir, path
from time import time
import numpy as np
import scipy, cv2, os, sys, argparse, audio
import json, subprocess, random, string
from tqdm import tqdm
from glob import glob
import torch, face_detection
from face_parsing import init_parser, swap_regions
from gfpgan import GFPGANer
from models import Wav2Lip
import platform
parser = argparse.ArgumentParser(description='Inference code to lip-sync videos in the wild using Wav2Lip models')
parser.add_argument('--checkpoint_path', type=str,
help='Name of saved checkpoint to load weights from', required=True)
parser.add_argument('--segmentation_path', type=str,
help='Name of saved checkpoint of segmentation network', required=True)
parser.add_argument('--gfpgan_path', type=str,
help='Name of saved checkpoint of segmentation network', required=True)
parser.add_argument('--face', type=str,
help='Filepath of video/image that contains faces to use', required=True)
parser.add_argument('--audio', type=str,
help='Filepath of video/audio file to use as raw audio source', required=True)
parser.add_argument('--outfile', type=str, help='Video path to save result. See default for an e.g.',
default='results/result_voice.mp4')
parser.add_argument('--static', type=bool,
help='If True, then use only first video frame for inference', default=False)
parser.add_argument('--fps', type=float, help='Can be specified only if input is a static image (default: 25)',
default=25., required=False)
parser.add_argument('--pads', nargs='+', type=int, default=[0, 10, 0, 0],
help='Padding (top, bottom, left, right). Please adjust to include chin at least')
parser.add_argument('--face_det_batch_size', type=int,
help='Batch size for face detection', default=16)
parser.add_argument('--wav2lip_batch_size', type=int, help='Batch size for Wav2Lip model(s)', default=128)
parser.add_argument('--resize_factor', default=1, type=int,
help='Reduce the resolution by this factor. Sometimes, best results are obtained at 480p or 720p')
parser.add_argument('--crop', nargs='+', type=int, default=[0, -1, 0, -1],
help='Crop video to a smaller region (top, bottom, left, right). Applied after resize_factor and rotate arg. '
'Useful if multiple face present. -1 implies the value will be auto-inferred based on height, width')
parser.add_argument('--box', nargs='+', type=int, default=[-1, -1, -1, -1],
help='Specify a constant bounding box for the face. Use only as a last resort if the face is not detected.'
'Also, might work only if the face is not moving around much. Syntax: (top, bottom, left, right).')
parser.add_argument('--rotate', default=False, action='store_true',
help='Sometimes videos taken from a phone can be flipped 90deg. If true, will flip video right by 90deg.'
'Use if you get a flipped result, despite feeding a normal looking video')
parser.add_argument('--nosmooth', default=False, action='store_true',
help='Prevent smoothing face detections over a short temporal window')
parser.add_argument('--no_segmentation', default=False, action='store_true',
help='Prevent using face segmentation')
parser.add_argument(
'-s', '--upscale', type=int, default=2, help='The final upsampling scale of the image. Default: 2')
parser.add_argument('--only_center_face', action='store_true', help='Only restore the center face')
parser.add_argument('--aligned', action='store_true', help='Input are aligned faces')
parser.add_argument(
'--bg_upsampler', type=str, default='realesrgan', help='background upsampler. Default: realesrgan')
parser.add_argument(
'--bg_tile',
type=int,
default=400,
help='Tile size for background sampler, 0 for no tile during testing. Default: 400')
parser.add_argument('-w', '--weight', type=float, default=0.5, help='Adjustable weights.')
args = parser.parse_args()
args.img_size = 96
if os.path.isfile(args.face) and args.face.split('.')[1] in ['jpg', 'png', 'jpeg']:
args.static = True
def get_smoothened_boxes(boxes, T):
for i in range(len(boxes)):
if i + T > len(boxes):
window = boxes[len(boxes) - T:]
else:
window = boxes[i : i + T]
boxes[i] = np.mean(window, axis=0)
return boxes
def face_detect(images):
# TODO 识别头像信息
detector = face_detection.FaceAlignment(face_detection.LandmarksType._2D,
flip_input=False, device=device)
batch_size = args.face_det_batch_size
while 1:
predictions = []
try:
for i in tqdm(range(0, len(images), batch_size)):
predictions.extend(detector.get_detections_for_batch(np.array(images[i:i + batch_size])))
except RuntimeError:
if batch_size == 1:
raise RuntimeError('Image too big to run face detection on GPU. Please use the --resize_factor argument')
batch_size //= 2
print('Recovering from OOM error; New batch size: {}'.format(batch_size))
continue
break
head_exist = []
results = []
pady1, pady2, padx1, padx2 = args.pads
first_head_rect = None
first_head_image =None
for rect, image in zip(predictions, images):
if rect is not None:
first_head_rect = rect
first_head_image = image
break
for rect, image in zip(predictions, images):
if rect is None:
head_exist.append(False)
if len(results)==0:
y1 = max(0, first_head_rect[1] - pady1)
y2 = min(first_head_image.shape[0], first_head_rect[3] + pady2)
x1 = max(0, first_head_rect[0] - padx1)
x2 = min(first_head_image.shape[1], first_head_rect[2] + padx2)
results.append([x1, y1, x2, y2])
else:
results.append(results[-1])
# cv2.imwrite('temp/faulty_frame.jpg', image) # check this frame where the face was not detected.
# raise ValueError('Face not detected! Ensure the video contains a face in all the frames.')
else:
head_exist.append(True)
y1 = max(0, rect[1] - pady1)
y2 = min(image.shape[0], rect[3] + pady2)
x1 = max(0, rect[0] - padx1)
x2 = min(image.shape[1], rect[2] + padx2)
results.append([x1, y1, x2, y2])
boxes = np.array(results)
if not args.nosmooth: boxes = get_smoothened_boxes(boxes, T=5)
results = [[image[y1: y2, x1:x2], (y1, y2, x1, x2)] for image, (x1, y1, x2, y2) in zip(images, boxes)]
del detector
return results,head_exist
def datagen(frames, mels):
img_batch,head_exist_batch, mel_batch, frame_batch, coords_batch = [], [], [], [],[]
# ***************************1、识别人脸对应的位置坐标,未识别的人脸的帧对应为None ***************************
if args.box[0] == -1:
if not args.static:
face_det_results,head_exist = face_detect(frames) # BGR2RGB for CNN face detection
else:
face_det_results,head_exist = face_detect([frames[0]])
else:
print('Using the specified bounding box instead of face detection...')
y1, y2, x1, x2 = args.box
face_det_results = [[f[y1: y2, x1:x2], (y1, y2, x1, x2)] for f in frames]
head_exist = [True]*len(frames)
for i, m in enumerate(mels):
#获取对应的一组音频对应的帧下标idx
idx = 0 if args.static else i%len(frames)
#获取对应的一组音频对应的帧
frame_to_save = frames[idx].copy()
#获取对应的一组音频对应的帧对应的人脸坐标
face, coords = face_det_results[idx].copy()
face = cv2.resize(face, (args.img_size, args.img_size))
head_exist_batch.append(head_exist[idx])
img_batch.append(face)
mel_batch.append(m)
frame_batch.append(frame_to_save)
coords_batch.append(coords)
if len(img_batch) >= args.wav2lip_batch_size:
img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)
img_masked = img_batch.copy()
img_masked[:, args.img_size//2:] = 0
img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.
mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])
yield img_batch,head_exist_batch, mel_batch, frame_batch, coords_batch
img_batch,head_exist_batch, mel_batch, frame_batch, coords_batch = [],[], [], [], []
if len(img_batch) > 0:
img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)
img_masked = img_batch.copy()
img_masked[:, args.img_size//2:] = 0
img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.
mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])
yield img_batch,head_exist_batch, mel_batch, frame_batch, coords_batch
mel_step_size = 16
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Using {} for inference.'.format(device))
def _load(checkpoint_path):
if device == 'cuda':
checkpoint = torch.load(checkpoint_path)
else:
checkpoint = torch.load(checkpoint_path,
map_location=lambda storage, loc: storage)
return checkpoint
def load_model(path):
model = Wav2Lip()
print("Load checkpoint from: {}".format(path))
checkpoint = _load(path)
s = checkpoint["state_dict"]
new_s = {}
for k, v in s.items():
new_s[k.replace('module.', '')] = v
model.load_state_dict(new_s)
model = model.to(device)
return model.eval()
def main():
if not os.path.isfile(args.face):
raise ValueError('--face argument must be a valid path to video/image file')
elif args.face.split('.')[1] in ['jpg', 'png', 'jpeg']:
full_frames = [cv2.imread(args.face)]
fps = args.fps
else:
video_stream = cv2.VideoCapture(args.face)
fps = video_stream.get(cv2.CAP_PROP_FPS)
print('Reading video frames...')
full_frames = []
while 1:
still_reading, frame = video_stream.read()
if not still_reading:
video_stream.release()
break
if args.resize_factor > 1:
frame = cv2.resize(frame, (frame.shape[1]//args.resize_factor, frame.shape[0]//args.resize_factor))
if args.rotate:
frame = cv2.rotate(frame, cv2.cv2.ROTATE_90_CLOCKWISE)
y1, y2, x1, x2 = args.crop
if x2 == -1: x2 = frame.shape[1]
if y2 == -1: y2 = frame.shape[0]
frame = frame[y1:y2, x1:x2]
full_frames.append(frame)
print ("Number of frames available for inference: "+str(len(full_frames)))
if not args.audio.endswith('.wav'):
print('Extracting raw audio...')
command = 'ffmpeg -y -i {} -strict -2 {}'.format(args.audio, 'temp/temp.wav')
subprocess.call(command, shell=True)
args.audio = 'temp/temp.wav'
wav = audio.load_wav(args.audio, 16000)
mel = audio.melspectrogram(wav)
print(mel.shape)
if np.isnan(mel.reshape(-1)).sum() > 0:
raise ValueError('Mel contains nan! Using a TTS voice? Add a small epsilon noise to the wav file and try again')
mel_chunks = []
#TODO 与视频对应起来,每16,理论上来说,mel_idx_multiplier与mel_step_size相等,将音频分组,并获取与音频长度相等的视频帧
mel_idx_multiplier = 80./fps
i = 0
while 1:
start_idx = int(i * mel_idx_multiplier)
if start_idx + mel_step_size > len(mel[0]):
mel_chunks.append(mel[:, len(mel[0]) - mel_step_size:])
break
mel_chunks.append(mel[:, start_idx : start_idx + mel_step_size])
i += 1
print("Length of mel chunks: {}".format(len(mel_chunks)))
#TODO 找到视频与音频的对应关系
full_frames = full_frames[:len(mel_chunks)]
batch_size = args.wav2lip_batch_size
gen = datagen(full_frames.copy(), mel_chunks)
#覆盖对应的帧(脑袋部位像素)
for i, (img_batch,exist_head_batch, mel_batch, frames, coords) in enumerate(tqdm(gen,
total=int(np.ceil(float(len(mel_chunks))/batch_size)))):
if i == 0:
print("Loading segmentation network...")
seg_net = init_parser(args.segmentation_path)
arch = 'clean'
channel_multiplier = 2
#url = 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth'
if args.bg_upsampler == 'realesrgan':
if not torch.cuda.is_available(): # CPU
import warnings
warnings.warn('The unoptimized RealESRGAN is slow on CPU. We do not use it. '
'If you really want to use it, please modify the corresponding codes.')
bg_upsampler = None
else:
from basicsr.archs.rrdbnet_arch import RRDBNet
from realesrgan import RealESRGANer
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
bg_upsampler = RealESRGANer(
scale=2,
model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth',
model=model,
tile=args.bg_tile,
tile_pad=10,
pre_pad=0,
half=True) # need to set False in CPU mode
else:
bg_upsampler = None
restorer = GFPGANer(
model_path=args.gfpgan_path,
upscale=args.upscale,
arch=arch,
channel_multiplier=channel_multiplier,
bg_upsampler=bg_upsampler)
model = load_model(args.checkpoint_path)
print("Model loaded")
#frame_h, frame_w = full_frames[0].shape[:-1]
out = None
img_batch = torch.FloatTensor(np.transpose(img_batch, (0, 3, 1, 2))).to(device)
mel_batch = torch.FloatTensor(np.transpose(mel_batch, (0, 3, 1, 2))).to(device)
print("batch write message:",len(img_batch),len(frames),len(coords),len(exist_head_batch))
with torch.no_grad():
pred = model(mel_batch, img_batch)
pred = pred.cpu().numpy().transpose(0, 2, 3, 1) * 255.
# #逐帧更新并写入到临时视频文件中去
j = 0
for p, f, c,exist in zip(pred, frames, coords,exist_head_batch):
if exist:
y1, y2, x1, x2 = c
p = cv2.resize(p.astype(np.uint8), (x2 - x1, y2 - y1))
if not args.no_segmentation:
p = swap_regions(f[y1:y2, x1:x2], p, seg_net)
head_high, head_width, _ = p.shape
#f[y1:y2-int(head_high*0.1), x1:x2 ] = p[0:head_high-int(head_high*0.1), :]
f[y1:y2, x1:x2] = p
else:
head_high,head_width,_ = p.shape
width_cut=int(head_width*0.2)
f[y1:y2, x1+width_cut:x2-width_cut] = p[:,width_cut:head_width-width_cut]
cropped_faces, restored_faces, restored_img = restorer.enhance(
f,
has_aligned=args.aligned,
only_center_face=args.only_center_face,
paste_back=True,
weight=args.weight)
restored_img = np.clip(restored_img, 0, 255).astype(np.uint8)
if i == 0 and j == 0:
out = cv2.VideoWriter('temp/result.avi', cv2.VideoWriter_fourcc(*'DIVX'), int(fps),
(restored_img.shape[1], restored_img.shape[0]))
out.write(restored_img)
j += 1
out.release()
command = 'ffmpeg -y -i {} -i {} -c:v libx264 -c:a aac -map 0:a:0 -map 1:v:0 {}'.format(args.audio, 'temp/result.avi', args.outfile)
subprocess.call(command, shell=platform.system() != 'Windows')
if __name__ == '__main__':
start_time = time()
main()
print("total time: ",time()-start_time)
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Python
1
https://gitee.com/ivlucks/wave2lip.git
git@gitee.com:ivlucks/wave2lip.git
ivlucks
wave2lip
wave2lip
master

搜索帮助