stable-diffusion-webui

Форк
0
119 строк · 4.3 Кб
1
from functools import wraps
2
import html
3
import time
4

5
from modules import shared, progress, errors, devices, fifo_lock
6

7
queue_lock = fifo_lock.FIFOLock()
8

9

10
def wrap_queued_call(func):
11
    def f(*args, **kwargs):
12
        with queue_lock:
13
            res = func(*args, **kwargs)
14

15
        return res
16

17
    return f
18

19

20
def wrap_gradio_gpu_call(func, extra_outputs=None):
21
    @wraps(func)
22
    def f(*args, **kwargs):
23

24
        # if the first argument is a string that says "task(...)", it is treated as a job id
25
        if args and type(args[0]) == str and args[0].startswith("task(") and args[0].endswith(")"):
26
            id_task = args[0]
27
            progress.add_task_to_queue(id_task)
28
        else:
29
            id_task = None
30

31
        with queue_lock:
32
            shared.state.begin(job=id_task)
33
            progress.start_task(id_task)
34

35
            try:
36
                res = func(*args, **kwargs)
37
                progress.record_results(id_task, res)
38
            finally:
39
                progress.finish_task(id_task)
40

41
            shared.state.end()
42

43
        return res
44

45
    return wrap_gradio_call(f, extra_outputs=extra_outputs, add_stats=True)
46

47

48
def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
49
    @wraps(func)
50
    def f(*args, extra_outputs_array=extra_outputs, **kwargs):
51
        run_memmon = shared.opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled and add_stats
52
        if run_memmon:
53
            shared.mem_mon.monitor()
54
        t = time.perf_counter()
55

56
        try:
57
            res = list(func(*args, **kwargs))
58
        except Exception as e:
59
            # When printing out our debug argument list,
60
            # do not print out more than a 100 KB of text
61
            max_debug_str_len = 131072
62
            message = "Error completing request"
63
            arg_str = f"Arguments: {args} {kwargs}"[:max_debug_str_len]
64
            if len(arg_str) > max_debug_str_len:
65
                arg_str += f" (Argument list truncated at {max_debug_str_len}/{len(arg_str)} characters)"
66
            errors.report(f"{message}\n{arg_str}", exc_info=True)
67

68
            shared.state.job = ""
69
            shared.state.job_count = 0
70

71
            if extra_outputs_array is None:
72
                extra_outputs_array = [None, '']
73

74
            error_message = f'{type(e).__name__}: {e}'
75
            res = extra_outputs_array + [f"<div class='error'>{html.escape(error_message)}</div>"]
76

77
        devices.torch_gc()
78

79
        shared.state.skipped = False
80
        shared.state.interrupted = False
81
        shared.state.stopping_generation = False
82
        shared.state.job_count = 0
83

84
        if not add_stats:
85
            return tuple(res)
86

87
        elapsed = time.perf_counter() - t
88
        elapsed_m = int(elapsed // 60)
89
        elapsed_s = elapsed % 60
90
        elapsed_text = f"{elapsed_s:.1f} sec."
91
        if elapsed_m > 0:
92
            elapsed_text = f"{elapsed_m} min. "+elapsed_text
93

94
        if run_memmon:
95
            mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
96
            active_peak = mem_stats['active_peak']
97
            reserved_peak = mem_stats['reserved_peak']
98
            sys_peak = mem_stats['system_peak']
99
            sys_total = mem_stats['total']
100
            sys_pct = sys_peak/max(sys_total, 1) * 100
101

102
            toltip_a = "Active: peak amount of video memory used during generation (excluding cached data)"
103
            toltip_r = "Reserved: total amout of video memory allocated by the Torch library "
104
            toltip_sys = "System: peak amout of video memory allocated by all running programs, out of total capacity"
105

106
            text_a = f"<abbr title='{toltip_a}'>A</abbr>: <span class='measurement'>{active_peak/1024:.2f} GB</span>"
107
            text_r = f"<abbr title='{toltip_r}'>R</abbr>: <span class='measurement'>{reserved_peak/1024:.2f} GB</span>"
108
            text_sys = f"<abbr title='{toltip_sys}'>Sys</abbr>: <span class='measurement'>{sys_peak/1024:.1f}/{sys_total/1024:g} GB</span> ({sys_pct:.1f}%)"
109

110
            vram_html = f"<p class='vram'>{text_a}, <wbr>{text_r}, <wbr>{text_sys}</p>"
111
        else:
112
            vram_html = ''
113

114
        # last item is always HTML
115
        res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr><span class='measurement'>{elapsed_text}</span></p>{vram_html}</div>"
116

117
        return tuple(res)
118

119
    return f
120

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.