Real-ESRGAN
/
inference_realesrgan_video.py
398 строк · 16.5 Кб
1import argparse2import cv23import glob4import mimetypes5import numpy as np6import os7import shutil8import subprocess9import torch10from basicsr.archs.rrdbnet_arch import RRDBNet11from basicsr.utils.download_util import load_file_from_url12from os import path as osp13from tqdm import tqdm14
15from realesrgan import RealESRGANer16from realesrgan.archs.srvgg_arch import SRVGGNetCompact17
18try:19import ffmpeg20except ImportError:21import pip22pip.main(['install', '--user', 'ffmpeg-python'])23import ffmpeg24
25
26def get_video_meta_info(video_path):27ret = {}28probe = ffmpeg.probe(video_path)29video_streams = [stream for stream in probe['streams'] if stream['codec_type'] == 'video']30has_audio = any(stream['codec_type'] == 'audio' for stream in probe['streams'])31ret['width'] = video_streams[0]['width']32ret['height'] = video_streams[0]['height']33ret['fps'] = eval(video_streams[0]['avg_frame_rate'])34ret['audio'] = ffmpeg.input(video_path).audio if has_audio else None35ret['nb_frames'] = int(video_streams[0]['nb_frames'])36return ret37
38
39def get_sub_video(args, num_process, process_idx):40if num_process == 1:41return args.input42meta = get_video_meta_info(args.input)43duration = int(meta['nb_frames'] / meta['fps'])44part_time = duration // num_process45print(f'duration: {duration}, part_time: {part_time}')46os.makedirs(osp.join(args.output, f'{args.video_name}_inp_tmp_videos'), exist_ok=True)47out_path = osp.join(args.output, f'{args.video_name}_inp_tmp_videos', f'{process_idx:03d}.mp4')48cmd = [49args.ffmpeg_bin, f'-i {args.input}', '-ss', f'{part_time * process_idx}',50f'-to {part_time * (process_idx + 1)}' if process_idx != num_process - 1 else '', '-async 1', out_path, '-y'51]52print(' '.join(cmd))53subprocess.call(' '.join(cmd), shell=True)54return out_path55
56
57class Reader:58
59def __init__(self, args, total_workers=1, worker_idx=0):60self.args = args61input_type = mimetypes.guess_type(args.input)[0]62self.input_type = 'folder' if input_type is None else input_type63self.paths = [] # for image&folder type64self.audio = None65self.input_fps = None66if self.input_type.startswith('video'):67video_path = get_sub_video(args, total_workers, worker_idx)68self.stream_reader = (69ffmpeg.input(video_path).output('pipe:', format='rawvideo', pix_fmt='bgr24',70loglevel='error').run_async(71pipe_stdin=True, pipe_stdout=True, cmd=args.ffmpeg_bin))72meta = get_video_meta_info(video_path)73self.width = meta['width']74self.height = meta['height']75self.input_fps = meta['fps']76self.audio = meta['audio']77self.nb_frames = meta['nb_frames']78
79else:80if self.input_type.startswith('image'):81self.paths = [args.input]82else:83paths = sorted(glob.glob(os.path.join(args.input, '*')))84tot_frames = len(paths)85num_frame_per_worker = tot_frames // total_workers + (1 if tot_frames % total_workers else 0)86self.paths = paths[num_frame_per_worker * worker_idx:num_frame_per_worker * (worker_idx + 1)]87
88self.nb_frames = len(self.paths)89assert self.nb_frames > 0, 'empty folder'90from PIL import Image91tmp_img = Image.open(self.paths[0])92self.width, self.height = tmp_img.size93self.idx = 094
95def get_resolution(self):96return self.height, self.width97
98def get_fps(self):99if self.args.fps is not None:100return self.args.fps101elif self.input_fps is not None:102return self.input_fps103return 24104
105def get_audio(self):106return self.audio107
108def __len__(self):109return self.nb_frames110
111def get_frame_from_stream(self):112img_bytes = self.stream_reader.stdout.read(self.width * self.height * 3) # 3 bytes for one pixel113if not img_bytes:114return None115img = np.frombuffer(img_bytes, np.uint8).reshape([self.height, self.width, 3])116return img117
118def get_frame_from_list(self):119if self.idx >= self.nb_frames:120return None121img = cv2.imread(self.paths[self.idx])122self.idx += 1123return img124
125def get_frame(self):126if self.input_type.startswith('video'):127return self.get_frame_from_stream()128else:129return self.get_frame_from_list()130
131def close(self):132if self.input_type.startswith('video'):133self.stream_reader.stdin.close()134self.stream_reader.wait()135
136
137class Writer:138
139def __init__(self, args, audio, height, width, video_save_path, fps):140out_width, out_height = int(width * args.outscale), int(height * args.outscale)141if out_height > 2160:142print('You are generating video that is larger than 4K, which will be very slow due to IO speed.',143'We highly recommend to decrease the outscale(aka, -s).')144
145if audio is not None:146self.stream_writer = (147ffmpeg.input('pipe:', format='rawvideo', pix_fmt='bgr24', s=f'{out_width}x{out_height}',148framerate=fps).output(149audio,150video_save_path,151pix_fmt='yuv420p',152vcodec='libx264',153loglevel='error',154acodec='copy').overwrite_output().run_async(155pipe_stdin=True, pipe_stdout=True, cmd=args.ffmpeg_bin))156else:157self.stream_writer = (158ffmpeg.input('pipe:', format='rawvideo', pix_fmt='bgr24', s=f'{out_width}x{out_height}',159framerate=fps).output(160video_save_path, pix_fmt='yuv420p', vcodec='libx264',161loglevel='error').overwrite_output().run_async(162pipe_stdin=True, pipe_stdout=True, cmd=args.ffmpeg_bin))163
164def write_frame(self, frame):165frame = frame.astype(np.uint8).tobytes()166self.stream_writer.stdin.write(frame)167
168def close(self):169self.stream_writer.stdin.close()170self.stream_writer.wait()171
172
173def inference_video(args, video_save_path, device=None, total_workers=1, worker_idx=0):174# ---------------------- determine models according to model names ---------------------- #175args.model_name = args.model_name.split('.pth')[0]176if args.model_name == 'RealESRGAN_x4plus': # x4 RRDBNet model177model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)178netscale = 4179file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth']180elif args.model_name == 'RealESRNet_x4plus': # x4 RRDBNet model181model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)182netscale = 4183file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth']184elif args.model_name == 'RealESRGAN_x4plus_anime_6B': # x4 RRDBNet model with 6 blocks185model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)186netscale = 4187file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth']188elif args.model_name == 'RealESRGAN_x2plus': # x2 RRDBNet model189model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)190netscale = 2191file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth']192elif args.model_name == 'realesr-animevideov3': # x4 VGG-style model (XS size)193model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')194netscale = 4195file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth']196elif args.model_name == 'realesr-general-x4v3': # x4 VGG-style model (S size)197model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')198netscale = 4199file_url = [200'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth',201'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth'202]203
204# ---------------------- determine model paths ---------------------- #205model_path = os.path.join('weights', args.model_name + '.pth')206if not os.path.isfile(model_path):207ROOT_DIR = os.path.dirname(os.path.abspath(__file__))208for url in file_url:209# model_path will be updated210model_path = load_file_from_url(211url=url, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None)212
213# use dni to control the denoise strength214dni_weight = None215if args.model_name == 'realesr-general-x4v3' and args.denoise_strength != 1:216wdn_model_path = model_path.replace('realesr-general-x4v3', 'realesr-general-wdn-x4v3')217model_path = [model_path, wdn_model_path]218dni_weight = [args.denoise_strength, 1 - args.denoise_strength]219
220# restorer221upsampler = RealESRGANer(222scale=netscale,223model_path=model_path,224dni_weight=dni_weight,225model=model,226tile=args.tile,227tile_pad=args.tile_pad,228pre_pad=args.pre_pad,229half=not args.fp32,230device=device,231)232
233if 'anime' in args.model_name and args.face_enhance:234print('face_enhance is not supported in anime models, we turned this option off for you. '235'if you insist on turning it on, please manually comment the relevant lines of code.')236args.face_enhance = False237
238if args.face_enhance: # Use GFPGAN for face enhancement239from gfpgan import GFPGANer240face_enhancer = GFPGANer(241model_path='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth',242upscale=args.outscale,243arch='clean',244channel_multiplier=2,245bg_upsampler=upsampler) # TODO support custom device246else:247face_enhancer = None248
249reader = Reader(args, total_workers, worker_idx)250audio = reader.get_audio()251height, width = reader.get_resolution()252fps = reader.get_fps()253writer = Writer(args, audio, height, width, video_save_path, fps)254
255pbar = tqdm(total=len(reader), unit='frame', desc='inference')256while True:257img = reader.get_frame()258if img is None:259break260
261try:262if args.face_enhance:263_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)264else:265output, _ = upsampler.enhance(img, outscale=args.outscale)266except RuntimeError as error:267print('Error', error)268print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')269else:270writer.write_frame(output)271
272torch.cuda.synchronize(device)273pbar.update(1)274
275reader.close()276writer.close()277
278
279def run(args):280args.video_name = osp.splitext(os.path.basename(args.input))[0]281video_save_path = osp.join(args.output, f'{args.video_name}_{args.suffix}.mp4')282
283if args.extract_frame_first:284tmp_frames_folder = osp.join(args.output, f'{args.video_name}_inp_tmp_frames')285os.makedirs(tmp_frames_folder, exist_ok=True)286os.system(f'ffmpeg -i {args.input} -qscale:v 1 -qmin 1 -qmax 1 -vsync 0 {tmp_frames_folder}/frame%08d.png')287args.input = tmp_frames_folder288
289num_gpus = torch.cuda.device_count()290num_process = num_gpus * args.num_process_per_gpu291if num_process == 1:292inference_video(args, video_save_path)293return294
295ctx = torch.multiprocessing.get_context('spawn')296pool = ctx.Pool(num_process)297os.makedirs(osp.join(args.output, f'{args.video_name}_out_tmp_videos'), exist_ok=True)298pbar = tqdm(total=num_process, unit='sub_video', desc='inference')299for i in range(num_process):300sub_video_save_path = osp.join(args.output, f'{args.video_name}_out_tmp_videos', f'{i:03d}.mp4')301pool.apply_async(302inference_video,303args=(args, sub_video_save_path, torch.device(i % num_gpus), num_process, i),304callback=lambda arg: pbar.update(1))305pool.close()306pool.join()307
308# combine sub videos309# prepare vidlist.txt310with open(f'{args.output}/{args.video_name}_vidlist.txt', 'w') as f:311for i in range(num_process):312f.write(f'file \'{args.video_name}_out_tmp_videos/{i:03d}.mp4\'\n')313
314cmd = [315args.ffmpeg_bin, '-f', 'concat', '-safe', '0', '-i', f'{args.output}/{args.video_name}_vidlist.txt', '-c',316'copy', f'{video_save_path}'317]318print(' '.join(cmd))319subprocess.call(cmd)320shutil.rmtree(osp.join(args.output, f'{args.video_name}_out_tmp_videos'))321if osp.exists(osp.join(args.output, f'{args.video_name}_inp_tmp_videos')):322shutil.rmtree(osp.join(args.output, f'{args.video_name}_inp_tmp_videos'))323os.remove(f'{args.output}/{args.video_name}_vidlist.txt')324
325
326def main():327"""Inference demo for Real-ESRGAN.328It mainly for restoring anime videos.
329
330"""
331parser = argparse.ArgumentParser()332parser.add_argument('-i', '--input', type=str, default='inputs', help='Input video, image or folder')333parser.add_argument(334'-n',335'--model_name',336type=str,337default='realesr-animevideov3',338help=('Model names: realesr-animevideov3 | RealESRGAN_x4plus_anime_6B | RealESRGAN_x4plus | RealESRNet_x4plus |'339' RealESRGAN_x2plus | realesr-general-x4v3'340'Default:realesr-animevideov3'))341parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')342parser.add_argument(343'-dn',344'--denoise_strength',345type=float,346default=0.5,347help=('Denoise strength. 0 for weak denoise (keep noise), 1 for strong denoise ability. '348'Only used for the realesr-general-x4v3 model'))349parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')350parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored video')351parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')352parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')353parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')354parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face')355parser.add_argument(356'--fp32', action='store_true', help='Use fp32 precision during inference. Default: fp16 (half precision).')357parser.add_argument('--fps', type=float, default=None, help='FPS of the output video')358parser.add_argument('--ffmpeg_bin', type=str, default='ffmpeg', help='The path to ffmpeg')359parser.add_argument('--extract_frame_first', action='store_true')360parser.add_argument('--num_process_per_gpu', type=int, default=1)361
362parser.add_argument(363'--alpha_upsampler',364type=str,365default='realesrgan',366help='The upsampler for the alpha channels. Options: realesrgan | bicubic')367parser.add_argument(368'--ext',369type=str,370default='auto',371help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')372args = parser.parse_args()373
374args.input = args.input.rstrip('/').rstrip('\\')375os.makedirs(args.output, exist_ok=True)376
377if mimetypes.guess_type(args.input)[0] is not None and mimetypes.guess_type(args.input)[0].startswith('video'):378is_video = True379else:380is_video = False381
382if is_video and args.input.endswith('.flv'):383mp4_path = args.input.replace('.flv', '.mp4')384os.system(f'ffmpeg -i {args.input} -codec copy {mp4_path}')385args.input = mp4_path386
387if args.extract_frame_first and not is_video:388args.extract_frame_first = False389
390run(args)391
392if args.extract_frame_first:393tmp_frames_folder = osp.join(args.output, f'{args.video_name}_inp_tmp_frames')394shutil.rmtree(tmp_frames_folder)395
396
397if __name__ == '__main__':398main()399