stable-diffusion-webui
98 строк · 5.8 Кб
1import logging
2
3import torch
4from torch import Tensor
5import platform
6from modules.sd_hijack_utils import CondFunc
7from packaging import version
8from modules import shared
9
10log = logging.getLogger(__name__)
11
12
13# before torch version 1.13, has_mps is only available in nightly pytorch and macOS 12.3+,
14# use check `getattr` and try it for compatibility.
15# in torch version 1.13, backends.mps.is_available() and backends.mps.is_built() are introduced in to check mps availabilty,
16# since torch 2.0.1+ nightly build, getattr(torch, 'has_mps', False) was deprecated, see https://github.com/pytorch/pytorch/pull/103279
17def check_for_mps() -> bool:
18if version.parse(torch.__version__) <= version.parse("2.0.1"):
19if not getattr(torch, 'has_mps', False):
20return False
21try:
22torch.zeros(1).to(torch.device("mps"))
23return True
24except Exception:
25return False
26else:
27return torch.backends.mps.is_available() and torch.backends.mps.is_built()
28
29
30has_mps = check_for_mps()
31
32
33def torch_mps_gc() -> None:
34try:
35if shared.state.current_latent is not None:
36log.debug("`current_latent` is set, skipping MPS garbage collection")
37return
38from torch.mps import empty_cache
39empty_cache()
40except Exception:
41log.warning("MPS garbage collection failed", exc_info=True)
42
43
44# MPS workaround for https://github.com/pytorch/pytorch/issues/89784
45def cumsum_fix(input, cumsum_func, *args, **kwargs):
46if input.device.type == 'mps':
47output_dtype = kwargs.get('dtype', input.dtype)
48if output_dtype == torch.int64:
49return cumsum_func(input.cpu(), *args, **kwargs).to(input.device)
50elif output_dtype == torch.bool or cumsum_needs_int_fix and (output_dtype == torch.int8 or output_dtype == torch.int16):
51return cumsum_func(input.to(torch.int32), *args, **kwargs).to(torch.int64)
52return cumsum_func(input, *args, **kwargs)
53
54
55# MPS workaround for https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14046
56def interpolate_with_fp32_fallback(orig_func, *args, **kwargs) -> Tensor:
57try:
58return orig_func(*args, **kwargs)
59except RuntimeError as e:
60if "not implemented for" in str(e) and "Half" in str(e):
61input_tensor = args[0]
62return orig_func(input_tensor.to(torch.float32), *args[1:], **kwargs).to(input_tensor.dtype)
63else:
64print(f"An unexpected RuntimeError occurred: {str(e)}")
65
66if has_mps:
67if platform.mac_ver()[0].startswith("13.2."):
68# MPS workaround for https://github.com/pytorch/pytorch/issues/95188, thanks to danieldk (https://github.com/explosion/curated-transformers/pull/124)
69CondFunc('torch.nn.functional.linear', lambda _, input, weight, bias: (torch.matmul(input, weight.t()) + bias) if bias is not None else torch.matmul(input, weight.t()), lambda _, input, weight, bias: input.numel() > 10485760)
70
71if version.parse(torch.__version__) < version.parse("1.13"):
72# PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working
73
74# MPS workaround for https://github.com/pytorch/pytorch/issues/79383
75CondFunc('torch.Tensor.to', lambda orig_func, self, *args, **kwargs: orig_func(self.contiguous(), *args, **kwargs),
76lambda _, self, *args, **kwargs: self.device.type != 'mps' and (args and isinstance(args[0], torch.device) and args[0].type == 'mps' or isinstance(kwargs.get('device'), torch.device) and kwargs['device'].type == 'mps'))
77# MPS workaround for https://github.com/pytorch/pytorch/issues/80800
78CondFunc('torch.nn.functional.layer_norm', lambda orig_func, *args, **kwargs: orig_func(*([args[0].contiguous()] + list(args[1:])), **kwargs),
79lambda _, *args, **kwargs: args and isinstance(args[0], torch.Tensor) and args[0].device.type == 'mps')
80# MPS workaround for https://github.com/pytorch/pytorch/issues/90532
81CondFunc('torch.Tensor.numpy', lambda orig_func, self, *args, **kwargs: orig_func(self.detach(), *args, **kwargs), lambda _, self, *args, **kwargs: self.requires_grad)
82elif version.parse(torch.__version__) > version.parse("1.13.1"):
83cumsum_needs_int_fix = not torch.Tensor([1,2]).to(torch.device("mps")).equal(torch.ShortTensor([1,1]).to(torch.device("mps")).cumsum(0))
84cumsum_fix_func = lambda orig_func, input, *args, **kwargs: cumsum_fix(input, orig_func, *args, **kwargs)
85CondFunc('torch.cumsum', cumsum_fix_func, None)
86CondFunc('torch.Tensor.cumsum', cumsum_fix_func, None)
87CondFunc('torch.narrow', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).clone(), None)
88
89# MPS workaround for https://github.com/pytorch/pytorch/issues/96113
90CondFunc('torch.nn.functional.layer_norm', lambda orig_func, x, normalized_shape, weight, bias, eps, **kwargs: orig_func(x.float(), normalized_shape, weight.float() if weight is not None else None, bias.float() if bias is not None else bias, eps).to(x.dtype), lambda _, input, *args, **kwargs: len(args) == 4 and input.device.type == 'mps')
91
92# MPS workaround for https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14046
93CondFunc('torch.nn.functional.interpolate', interpolate_with_fp32_fallback, None)
94
95# MPS workaround for https://github.com/pytorch/pytorch/issues/92311
96if platform.processor() == 'i386':
97for funcName in ['torch.argmax', 'torch.Tensor.argmax']:
98CondFunc(funcName, lambda _, input, *args, **kwargs: torch.max(input.float() if input.dtype == torch.int64 else input, *args, **kwargs)[1], lambda _, input, *args, **kwargs: input.device.type == 'mps')
99