pytorch

Форк
0
/
parse_logs.py 
197 строк · 5.7 Кб
1
import csv
2
import os
3
import re
4
import sys
5

6
# This script takes the logs produced by the benchmark scripts (e.g.,
7
# torchbench.py) and parses it into a CSV file that summarizes what
8
# is failing and why.  It is kept separate from the benchmark script
9
# emitting a more structured output as it is often more convenient
10
# to iterate quickly on log files offline instead of having to make
11
# a change to the benchmark script and then do a full sweep to see
12
# the updates.
13
#
14
# This script is not very well written, feel free to rewrite it as necessary
15

16
assert len(sys.argv) == 2
17

18
full_log = open(sys.argv[1]).read()
19

20
# If the log contains a gist URL, extract it so we can include it in the CSV
21
gist_url = ""
22
m = re.search(r"https://gist.github.com/[a-f0-9]+", full_log)
23
if m is not None:
24
    gist_url = m.group(0)
25

26
# Split the log into an entry per benchmark
27
entries = re.split(
28
    r"(?:cuda (?:train|eval) +([^ ]+)|WARNING:root:([^ ]+) failed to load)", full_log
29
)[1:]
30
# Entries schema example:
31
# `['hf_Bert', None, '
32
#  PASS\nTIMING: entire_frame_compile:1.80925 backend_compile:6e-05\nDynamo produced 1 graph(s) covering 367 ops\n']`
33

34

35
def chunker(seq, size):
36
    return (seq[pos : pos + size] for pos in range(0, len(seq), size))
37

38

39
c = 0
40
i = 0
41

42
out = csv.DictWriter(
43
    sys.stdout,
44
    [
45
        "bench",
46
        "name",
47
        "result",
48
        "component",
49
        "context",
50
        "explain",
51
        "frame_time",
52
        "backend_time",
53
        "graph_count",
54
        "op_count",
55
        "graph_breaks",
56
        "unique_graph_breaks",
57
    ],
58
    dialect="excel",
59
)
60
out.writeheader()
61
out.writerow({"explain": gist_url})
62

63

64
# Sometimes backtraces will be in third party code, which results
65
# in very long file names.  Delete the absolute path in this case.
66
def normalize_file(f):
67
    if "site-packages/" in f:
68
        return f.split("site-packages/", 2)[1]
69
    else:
70
        return os.path.relpath(f)
71

72

73
# Assume we run torchbench, huggingface, timm_models in that order
74
# (as output doesn't say which suite the benchmark is part of)
75
# TODO: make this more robust
76

77
bench = "torchbench"
78

79
# 3 = 1 + number of matches in the entries split regex
80
for name, name2, log in chunker(entries, 3):
81
    if name is None:
82
        name = name2
83
    if name.startswith("Albert"):
84
        bench = "huggingface"
85
    elif name.startswith("adv_inc"):
86
        bench = "timm_models"
87

88
    # Payload that will go into the csv
89
    r = "UNKNOWN"
90
    explain = ""
91
    component = ""
92
    context = ""
93

94
    if "PASS" in log:
95
        r = "PASS"
96
    if "TIMEOUT" in log:
97
        r = "FAIL TIMEOUT"
98
    if "Accuracy failed" in log:
99
        r = "FAIL ACCURACY"
100

101
    # Attempt to extract out useful information from the traceback
102

103
    log = log.split(
104
        "The above exception was the direct cause of the following exception"
105
    )[0]
106
    split = log.split("Traceback (most recent call last)", maxsplit=1)
107
    if len(split) == 2:
108
        log = split[1]
109
    log = log.split("Original traceback:")[0]
110
    m = re.search(
111
        r'File "([^"]+)", line ([0-9]+), in .+\n +(.+)\n([A-Za-z]+(?:Error|Exception|NotImplementedError): ?.*)',
112
        log,
113
    )
114

115
    if m is not None:
116
        r = "FAIL"
117
        component = f"{normalize_file(m.group(1))}:{m.group(2)}"
118
        context = m.group(3)
119
        explain = f"{m.group(4)}"
120
    else:
121
        m = re.search(
122
            r'File "([^"]+)", line ([0-9]+), in .+\n +(.+)\nAssertionError', log
123
        )
124
        if m is not None:
125
            r = "FAIL"
126
            component = f"{normalize_file(m.group(1))}:{m.group(2)}"
127
            context = m.group(3)
128
            explain = "AssertionError"
129

130
    # Sometimes, the benchmark will say FAIL without any useful info
131
    # See https://github.com/pytorch/torchdynamo/issues/1910
132
    if "FAIL" in log:
133
        r = "FAIL"
134

135
    if r == "UNKNOWN":
136
        c += 1
137

138
    backend_time = None
139
    frame_time = None
140
    if "TIMING:" in log:
141
        result = re.search("TIMING:(.*)\n", log).group(1)
142
        split_str = result.split("backend_compile:")
143
        if len(split_str) == 2:
144
            backend_time = float(split_str[1])
145
            frame_time = float(split_str[0].split("entire_frame_compile:")[1])
146

147
    if "STATS:" in log:
148
        result = re.search("STATS:(.*)\n", log).group(1)
149
        # call_* op count: 970 | FakeTensor.__torch_dispatch__:35285 | ProxyTorchDispatchMode.__torch_dispatch__:13339
150
        split_all = result.split("|")
151
        # TODO: rewrite this to work with arbitrarily many stats
152

153
    graph_count = None
154
    op_count = None
155
    graph_breaks = None
156
    unique_graph_breaks = None
157
    if m := re.search(
158
        r"Dynamo produced (\d+) graphs covering (\d+) ops with (\d+) graph breaks \((\d+) unique\)",
159
        log,
160
    ):
161
        graph_count = m.group(1)
162
        op_count = m.group(2)
163
        graph_breaks = m.group(3)
164
        unique_graph_breaks = m.group(4)
165

166
    # If the context string is too long, don't put it in the CSV.
167
    # This is a hack to try to make it more likely that Google Sheets will
168
    # offer to split columns
169
    if len(context) > 78:
170
        context = ""
171

172
    # Temporary file names are meaningless, report it's generated code in this
173
    # case
174
    if "/tmp/" in component:
175
        component = "generated code"
176
        context = ""
177

178
    out.writerow(
179
        {
180
            "bench": bench,
181
            "name": name,
182
            "result": r,
183
            "component": component,
184
            "context": context,
185
            "explain": explain,
186
            "frame_time": frame_time,
187
            "backend_time": backend_time,
188
            "graph_count": graph_count,
189
            "op_count": op_count,
190
            "graph_breaks": graph_breaks,
191
            "unique_graph_breaks": unique_graph_breaks,
192
        }
193
    )
194
    i += 1
195

196
if c:
197
    print(f"failed to classify {c} entries", file=sys.stderr)
198

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.