2
from collections import Counter
3
from os import path as osp
4
from torch import distributed as dist
7
from basicsr.metrics import calculate_metric
8
from basicsr.utils import get_root_logger, imwrite, tensor2img
9
from basicsr.utils.dist_util import get_dist_info
10
from basicsr.utils.registry import MODEL_REGISTRY
11
from .sr_model import SRModel
14
@MODEL_REGISTRY.register()
15
class VideoBaseModel(SRModel):
16
"""Base video SR model."""
18
def dist_validation(self, dataloader, current_iter, tb_logger, save_img):
19
dataset = dataloader.dataset
20
dataset_name = dataset.opt['name']
21
with_metrics = self.opt['val']['metrics'] is not None
22
# initialize self.metric_results
24
# 'folder1': tensor (num_frame x len(metrics)),
25
# 'folder2': tensor (num_frame x len(metrics))
28
if not hasattr(self, 'metric_results'): # only execute in the first run
29
self.metric_results = {}
30
num_frame_each_folder = Counter(dataset.data_info['folder'])
31
for folder, num_frame in num_frame_each_folder.items():
32
self.metric_results[folder] = torch.zeros(
33
num_frame, len(self.opt['val']['metrics']), dtype=torch.float32, device='cuda')
34
# initialize the best metric results
35
self._initialize_best_metric_results(dataset_name)
36
# zero self.metric_results
37
rank, world_size = get_dist_info()
39
for _, tensor in self.metric_results.items():
43
# record all frames (border and center frames)
45
pbar = tqdm(total=len(dataset), unit='frame')
46
for idx in range(rank, len(dataset), world_size):
47
val_data = dataset[idx]
48
val_data['lq'].unsqueeze_(0)
49
val_data['gt'].unsqueeze_(0)
50
folder = val_data['folder']
51
frame_idx, max_idx = val_data['idx'].split('/')
52
lq_path = val_data['lq_path']
54
self.feed_data(val_data)
56
visuals = self.get_current_visuals()
57
result_img = tensor2img([visuals['result']])
58
metric_data['img'] = result_img
60
gt_img = tensor2img([visuals['gt']])
61
metric_data['img2'] = gt_img
64
# tentative for out of GPU memory
67
torch.cuda.empty_cache()
70
if self.opt['is_train']:
71
raise NotImplementedError('saving image is not supported during training.')
73
if 'vimeo' in dataset_name.lower(): # vimeo90k dataset
74
split_result = lq_path.split('/')
75
img_name = f'{split_result[-3]}_{split_result[-2]}_{split_result[-1].split(".")[0]}'
76
else: # other datasets, e.g., REDS, Vid4
77
img_name = osp.splitext(osp.basename(lq_path))[0]
79
if self.opt['val']['suffix']:
80
save_img_path = osp.join(self.opt['path']['visualization'], dataset_name, folder,
81
f'{img_name}_{self.opt["val"]["suffix"]}.png')
83
save_img_path = osp.join(self.opt['path']['visualization'], dataset_name, folder,
84
f'{img_name}_{self.opt["name"]}.png')
85
imwrite(result_img, save_img_path)
89
for metric_idx, opt_ in enumerate(self.opt['val']['metrics'].values()):
90
result = calculate_metric(metric_data, opt_)
91
self.metric_results[folder][int(frame_idx), metric_idx] += result
95
for _ in range(world_size):
97
pbar.set_description(f'Test {folder}: {int(frame_idx) + world_size}/{max_idx}')
103
# collect data among GPUs
104
for _, tensor in self.metric_results.items():
105
dist.reduce(tensor, 0)
108
pass # assume use one gpu in non-dist testing
111
self._log_validation_metric_values(current_iter, dataset_name, tb_logger)
113
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
114
logger = get_root_logger()
115
logger.warning('nondist_validation is not implemented. Run dist_validation.')
116
self.dist_validation(dataloader, current_iter, tb_logger, save_img)
118
def _log_validation_metric_values(self, current_iter, dataset_name, tb_logger):
119
# ----------------- calculate the average values for each folder, and for each metric ----------------- #
120
# average all frames for each sub-folder
121
# metric_results_avg is a dict:{
122
# 'folder1': tensor (len(metrics)),
123
# 'folder2': tensor (len(metrics))
125
metric_results_avg = {
126
folder: torch.mean(tensor, dim=0).cpu()
127
for (folder, tensor) in self.metric_results.items()
129
# total_avg_results is a dict: {
133
total_avg_results = {metric: 0 for metric in self.opt['val']['metrics'].keys()}
134
for folder, tensor in metric_results_avg.items():
135
for idx, metric in enumerate(total_avg_results.keys()):
136
total_avg_results[metric] += metric_results_avg[folder][idx].item()
137
# average among folders
138
for metric in total_avg_results.keys():
139
total_avg_results[metric] /= len(metric_results_avg)
140
# update the best metric result
141
self._update_best_metric_result(dataset_name, metric, total_avg_results[metric], current_iter)
143
# ------------------------------------------ log the metric ------------------------------------------ #
144
log_str = f'Validation {dataset_name}\n'
145
for metric_idx, (metric, value) in enumerate(total_avg_results.items()):
146
log_str += f'\t # {metric}: {value:.4f}'
147
for folder, tensor in metric_results_avg.items():
148
log_str += f'\t # {folder}: {tensor[metric_idx].item():.4f}'
149
if hasattr(self, 'best_metric_results'):
150
log_str += (f'\n\t Best: {self.best_metric_results[dataset_name][metric]["val"]:.4f} @ '
151
f'{self.best_metric_results[dataset_name][metric]["iter"]} iter')
154
logger = get_root_logger()
157
for metric_idx, (metric, value) in enumerate(total_avg_results.items()):
158
tb_logger.add_scalar(f'metrics/{metric}', value, current_iter)
159
for folder, tensor in metric_results_avg.items():
160
tb_logger.add_scalar(f'metrics/{metric}/{folder}', tensor[metric_idx].item(), current_iter)