lama

Форк
0
/
predict_inner_features.py 
120 строк · 4.9 Кб
1
#!/usr/bin/env python3
2

3
# Example command:
4
# ./bin/predict.py \
5
#       model.path=<path to checkpoint, prepared by make_checkpoint.py> \
6
#       indir=<path to input data> \
7
#       outdir=<where to store predicts>
8

9
import logging
10
import os
11
import sys
12
import traceback
13

14
from saicinpainting.evaluation.utils import move_to_device
15

16
os.environ['OMP_NUM_THREADS'] = '1'
17
os.environ['OPENBLAS_NUM_THREADS'] = '1'
18
os.environ['MKL_NUM_THREADS'] = '1'
19
os.environ['VECLIB_MAXIMUM_THREADS'] = '1'
20
os.environ['NUMEXPR_NUM_THREADS'] = '1'
21

22
import cv2
23
import hydra
24
import numpy as np
25
import torch
26
import tqdm
27
import yaml
28
from omegaconf import OmegaConf
29
from torch.utils.data._utils.collate import default_collate
30

31
from saicinpainting.training.data.datasets import make_default_val_dataset
32
from saicinpainting.training.trainers import load_checkpoint, DefaultInpaintingTrainingModule
33
from saicinpainting.utils import register_debug_signal_handlers, get_shape
34

35
LOGGER = logging.getLogger(__name__)
36

37

38
@hydra.main(config_path='../configs/prediction', config_name='default_inner_features.yaml')
39
def main(predict_config: OmegaConf):
40
    try:
41
        if sys.platform != 'win32':
42
            register_debug_signal_handlers()  # kill -10 <pid> will result in traceback dumped into log
43

44
        device = torch.device(predict_config.device)
45

46
        train_config_path = os.path.join(predict_config.model.path, 'config.yaml')
47
        with open(train_config_path, 'r') as f:
48
            train_config = OmegaConf.create(yaml.safe_load(f))
49

50
        checkpoint_path = os.path.join(predict_config.model.path, 'models', predict_config.model.checkpoint)
51
        model = load_checkpoint(train_config, checkpoint_path, strict=False)
52
        model.freeze()
53
        model.to(device)
54

55
        assert isinstance(model, DefaultInpaintingTrainingModule), 'Only DefaultInpaintingTrainingModule is supported'
56
        assert isinstance(getattr(model.generator, 'model', None), torch.nn.Sequential)
57

58
        if not predict_config.indir.endswith('/'):
59
            predict_config.indir += '/'
60

61
        dataset = make_default_val_dataset(predict_config.indir, **predict_config.dataset)
62

63
        max_level = max(predict_config.levels)
64

65
        with torch.no_grad():
66
            for img_i in tqdm.trange(len(dataset)):
67
                mask_fname = dataset.mask_filenames[img_i]
68
                cur_out_fname = os.path.join(predict_config.outdir, os.path.splitext(mask_fname[len(predict_config.indir):])[0])
69
                os.makedirs(os.path.dirname(cur_out_fname), exist_ok=True)
70

71
                batch = move_to_device(default_collate([dataset[img_i]]), device)
72

73
                img = batch['image']
74
                mask = batch['mask']
75
                mask[:] = 0
76
                mask_h, mask_w = mask.shape[-2:]
77
                mask[:, :,
78
                    mask_h // 2 - predict_config.hole_radius : mask_h // 2 + predict_config.hole_radius,
79
                    mask_w // 2 - predict_config.hole_radius : mask_w // 2 + predict_config.hole_radius] = 1
80

81
                masked_img = torch.cat([img * (1 - mask), mask], dim=1)
82

83
                feats = masked_img
84
                for level_i, level in enumerate(model.generator.model):
85
                    feats = level(feats)
86
                    if level_i in predict_config.levels:
87
                        cur_feats = torch.cat([f for f in feats if torch.is_tensor(f)], dim=1) \
88
                            if isinstance(feats, tuple) else feats
89

90
                        if predict_config.slice_channels:
91
                            cur_feats = cur_feats[:, slice(*predict_config.slice_channels)]
92

93
                        cur_feat = cur_feats.pow(2).mean(1).pow(0.5).clone()
94
                        cur_feat -= cur_feat.min()
95
                        cur_feat /= cur_feat.std()
96
                        cur_feat = cur_feat.clamp(0, 1) / 1
97
                        cur_feat = cur_feat.cpu().numpy()[0]
98
                        cur_feat *= 255
99
                        cur_feat = np.clip(cur_feat, 0, 255).astype('uint8')
100
                        cv2.imwrite(cur_out_fname + f'_lev{level_i:02d}_norm.png', cur_feat)
101

102
                        # for channel_i in predict_config.channels:
103
                        #
104
                        #     cur_feat = cur_feats[0, channel_i].clone().detach().cpu().numpy()
105
                        #     cur_feat -= cur_feat.min()
106
                        #     cur_feat /= cur_feat.max()
107
                        #     cur_feat *= 255
108
                        #     cur_feat = np.clip(cur_feat, 0, 255).astype('uint8')
109
                        #     cv2.imwrite(cur_out_fname + f'_lev{level_i}_ch{channel_i}.png', cur_feat)
110
                    elif level_i >= max_level:
111
                        break
112
    except KeyboardInterrupt:
113
        LOGGER.warning('Interrupted by user')
114
    except Exception as ex:
115
        LOGGER.critical(f'Prediction failed due to {ex}:\n{traceback.format_exc()}')
116
        sys.exit(1)
117

118

119
if __name__ == '__main__':
120
    main()
121

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.