pytorch

Форк
0
/
_python_dispatcher.py 
181 строка · 6.9 Кб
1
import re
2

3
import torch._C as C
4

5

6
"""
7
PythonDispatcher class is a thin python-binding to C++ dispatcher and it
8
is designed to show how dispatcher precompute works. In particular,
9
it shows for a certain op `foo`, what the computed dispatch table looks
10
like after user register their kernels to certains dispatch keys.
11

12
In the real C++ dispatcher we support many dispatch keys for different
13
functionalities. For simplicity PythonDispatcher only supports dispatch
14
keys for a single example of each use case. These use cases are listed below:
15

16
- CPU/AutogradCPU: represents in-tree backends which we usually have dedicated inference &
17
    autograd kernel in pytorch core library.
18
    E.g. CPU, CUDA
19
- FPGA/AutogradOther: represents in-tree backends which we usually have backend specific
20
    inference kernels, but they share the same autograd kernel specified in AutogradOther.
21
    E.g. FPGA, SparseCsrCPU
22
- XLA/AutogradXLA: represents out-of-tree backends which we don't have either inference or autograd
23
    kernel defined in pytorch core library. Backend owner is responsible for registering both
24
    inference & autograd kernels in their extensions(e.g. torch-xla) for the operators they support.
25
    E.g. XLA, XPU, MPS
26
- CompositeExplicitAutograd: alias key mapped to inference kernels of all backends like CPU, CUDA, XLA etc.
27
    Kernels registered to this key MUST work for inference for all backends.
28
- Autograd: alias key mapped to autograd of all backends like AutogradCPU, AutogradXLA, AutogradOther.
29
    Kernels registered to this key MUST work for autograd for all backends.
30
- CompositeImplicitAutograd: alias key CompositeImplicitAutograd = CompositeExplicitAutograd + Autograd
31
    Kernels registered to this key MUST work for both inference + autograd for all backends.
32

33
Note we only allow registrations to alias keys inside pytorch core library. E.g
34
you shouldn't register a CompositeImplicitAutograd or CompositeExplicitAutograd
35
kernel from torch-xla extension, instead you should upstream the kernel into
36
pytorch/pytorch repo so that it's available for all backends and continuously
37
tested even without the extension.
38

39
Usage:
40
  dispatcher = PythonDispatcher()
41
  dispatcher.register(["CPU", "XLA", "CompositeImplicitAutograd"])
42
  print(dispatcher.dispatchTable()) # This tells you exactly which kernel is used for certain backend.
43
  # For more debugging information
44
  # print(dispatcher.keys())
45
  # print(dispatcher.registrations())
46
  # print(dispatcher.rawRegistrations())
47
  # print(dispatcher.rawDispatchTable())
48
PythonDispatcher calls C++ dispatcher under the hood for to precompute dispatch table.
49
This file only provides the simplified API for developers, relevant test code is located in
50
test/test_dispatch.py
51
"""
52

53

54
class PythonDispatcher:
55
    namespace = "__test__"
56
    name = "foo"
57
    # fmt: off
58
    runtime_keys = [
59
        "CPU", "AutogradCPU",
60
        "FPGA", "AutogradOther",
61
        "XLA", "AutogradXLA",
62
        "Lazy", "AutogradLazy",
63
    ]
64
    # fmt: on
65
    alias_keys = [
66
        "CompositeExplicitAutograd",
67
        "Autograd",
68
        "CompositeImplicitAutograd",
69
    ]
70
    supported_keys = runtime_keys + alias_keys
71

72
    def __init__(self):
73
        C._dispatch_check_invariants(self.name)  # type: ignore[attr-defined]
74
        self.ref = C._dispatch_library("FRAGMENT", self.namespace, "")
75
        self.ref.def_("foo(Tensor x) -> Tensor")
76

77
    """
78
    Returns a list of dispatch keys supported by PythonDispatcher.
79
    You can register kernels to these keys.
80
    """
81

82
    def keys(self):
83
        return self.supported_keys
84

85
    """
86
    Register kernels to the target dispatchKeys.
87
    dispatchKeys(list[str]): a list of dispatch keys that you want to register
88
      your own kernel. Note that you don't need to write the kernel yourself in
89
      this PythonDispatcher.E.g. for CPU key, a kernel(e.g fn_CPU for CPU) is
90
      automatically generated and registered.
91
    """
92

93
    def register(self, dispatchKeys):
94
        # Overriden is not supported and triggers a warning in C++ dispatcher.
95
        if len(set(dispatchKeys)) != len(dispatchKeys):
96
            raise RuntimeError(
97
                f"Overriden is not allowed but found duplicates in {dispatchKeys}."
98
            )
99
        # We currently forbid this in codegen instead of C++ dispatcher.
100
        if (
101
            "CompositeImplicitAutograd" in dispatchKeys
102
            and "CompositeExplicitAutograd" in dispatchKeys
103
        ):
104
            raise RuntimeError(
105
                "Registration to both CompositeImplicitAutograd and CompositeExplicitAutograd is not allowed."
106
            )
107
        for key in dispatchKeys:
108
            if key not in self.supported_keys:
109
                raise RuntimeError(
110
                    f"{key} is not supported, please select a dispatch key in {self.supported_keys}."
111
                )
112
            self.ref.impl_t_t("foo", dispatch=key, debug="fn_" + key)
113

114
    """
115
    Helper function to format (key, kernel).
116
    """
117

118
    def _format_line(self, key, kernel):
119
        return f"{key:<15} {kernel}\n"
120

121
    """
122
    Helper function to print a table header.
123
    """
124

125
    def _format_header(self, header):
126
        s = f"""
127
{header}
128
"""
129
        s += self._format_line("key", "kernel")
130
        s += "---------------------------\n"
131
        return s
132

133
    """
134
    Returns raw output of all registration info for debugging only.
135
    Use registrations() for a simplified version.
136
    """
137

138
    def rawRegistrations(self):
139
        return C._dispatch_dump(f"{self.namespace}::{self.name}")  # type: ignore[attr-defined]
140

141
    """
142
    Returns raw output of computed dispatch table for debugging only.
143
    Use dispatchTable() for a simplified version.
144
    """
145

146
    def rawDispatchTable(self):
147
        return C._dispatch_dump_table(f"{self.namespace}::{self.name}")  # type: ignore[attr-defined]
148

149
    """
150
    Returns a table(str) including all the registrations from users.
151
    Note this includes registrations to both runtime keys and alias keys.
152
    """
153

154
    def registrations(self):
155
        output = self._format_header("Registered Kernels")
156
        state = self.rawRegistrations()
157
        state_entries = state.split("\n")
158
        for line in state_entries:
159
            first = line.split(":")[0]
160
            if any(first.startswith(k) for k in self.supported_keys):
161
                kernel = line.split("::")[0].split(" ")[1]
162
                output += self._format_line(first, kernel)
163
        return output
164

165
    """
166
    Returns the computed dispatch table(str). Note this only include
167
    runtime keys, registrations to alias keys have been decoded to their
168
    mapped runtime keys.
169
    """
170

171
    def dispatchTable(self):
172
        output = self._format_header("Computed Dispatch Table")
173
        table = self.rawDispatchTable()
174
        table_entries = table.split("\n")
175
        regex = re.compile(r"registered at .*FallbackKernel\.cpp.*(\[)")
176
        for line in table_entries:
177
            k = line.split(":")[0]
178
            if k in self.runtime_keys:
179
                entry = regex.sub("[", line)
180
                output += self._format_line(k, entry.split(": ")[1])
181
        return output
182

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.