pytorch

Форк
0
/
collect_env.py 
623 строки · 21.6 Кб
1

2
# Unlike the rest of the PyTorch this file must be python2 compliant.
3
# This script outputs relevant system environment info
4
# Run it with `python collect_env.py` or `python -m torch.utils.collect_env`
5
import datetime
6
import locale
7
import re
8
import subprocess
9
import sys
10
import os
11
from collections import namedtuple
12

13

14
try:
15
    import torch
16
    TORCH_AVAILABLE = True
17
except (ImportError, NameError, AttributeError, OSError):
18
    TORCH_AVAILABLE = False
19

20
# System Environment Information
21
SystemEnv = namedtuple('SystemEnv', [
22
    'torch_version',
23
    'is_debug_build',
24
    'cuda_compiled_version',
25
    'gcc_version',
26
    'clang_version',
27
    'cmake_version',
28
    'os',
29
    'libc_version',
30
    'python_version',
31
    'python_platform',
32
    'is_cuda_available',
33
    'cuda_runtime_version',
34
    'cuda_module_loading',
35
    'nvidia_driver_version',
36
    'nvidia_gpu_models',
37
    'cudnn_version',
38
    'pip_version',  # 'pip' or 'pip3'
39
    'pip_packages',
40
    'conda_packages',
41
    'hip_compiled_version',
42
    'hip_runtime_version',
43
    'miopen_runtime_version',
44
    'caching_allocator_config',
45
    'is_xnnpack_available',
46
    'cpu_info',
47
])
48

49
DEFAULT_CONDA_PATTERNS = {
50
    "torch",
51
    "numpy",
52
    "cudatoolkit",
53
    "soumith",
54
    "mkl",
55
    "magma",
56
    "triton",
57
    "optree",
58
}
59

60
DEFAULT_PIP_PATTERNS = {
61
    "torch",
62
    "numpy",
63
    "mypy",
64
    "flake8",
65
    "triton",
66
    "optree",
67
    "onnx",
68
}
69

70

71
def run(command):
72
    """Return (return-code, stdout, stderr)."""
73
    shell = True if type(command) is str else False
74
    p = subprocess.Popen(command, stdout=subprocess.PIPE,
75
                         stderr=subprocess.PIPE, shell=shell)
76
    raw_output, raw_err = p.communicate()
77
    rc = p.returncode
78
    if get_platform() == 'win32':
79
        enc = 'oem'
80
    else:
81
        enc = locale.getpreferredencoding()
82
    output = raw_output.decode(enc)
83
    err = raw_err.decode(enc)
84
    return rc, output.strip(), err.strip()
85

86

87
def run_and_read_all(run_lambda, command):
88
    """Run command using run_lambda; reads and returns entire output if rc is 0."""
89
    rc, out, _ = run_lambda(command)
90
    if rc != 0:
91
        return None
92
    return out
93

94

95
def run_and_parse_first_match(run_lambda, command, regex):
96
    """Run command using run_lambda, returns the first regex match if it exists."""
97
    rc, out, _ = run_lambda(command)
98
    if rc != 0:
99
        return None
100
    match = re.search(regex, out)
101
    if match is None:
102
        return None
103
    return match.group(1)
104

105
def run_and_return_first_line(run_lambda, command):
106
    """Run command using run_lambda and returns first line if output is not empty."""
107
    rc, out, _ = run_lambda(command)
108
    if rc != 0:
109
        return None
110
    return out.split('\n')[0]
111

112

113
def get_conda_packages(run_lambda, patterns=None):
114
    if patterns is None:
115
        patterns = DEFAULT_CONDA_PATTERNS
116
    conda = os.environ.get('CONDA_EXE', 'conda')
117
    out = run_and_read_all(run_lambda, "{} list".format(conda))
118
    if out is None:
119
        return out
120

121
    return "\n".join(
122
        line
123
        for line in out.splitlines()
124
        if not line.startswith("#")
125
        and any(name in line for name in patterns)
126
    )
127

128
def get_gcc_version(run_lambda):
129
    return run_and_parse_first_match(run_lambda, 'gcc --version', r'gcc (.*)')
130

131
def get_clang_version(run_lambda):
132
    return run_and_parse_first_match(run_lambda, 'clang --version', r'clang version (.*)')
133

134

135
def get_cmake_version(run_lambda):
136
    return run_and_parse_first_match(run_lambda, 'cmake --version', r'cmake (.*)')
137

138

139
def get_nvidia_driver_version(run_lambda):
140
    if get_platform() == 'darwin':
141
        cmd = 'kextstat | grep -i cuda'
142
        return run_and_parse_first_match(run_lambda, cmd,
143
                                         r'com[.]nvidia[.]CUDA [(](.*?)[)]')
144
    smi = get_nvidia_smi()
145
    return run_and_parse_first_match(run_lambda, smi, r'Driver Version: (.*?) ')
146

147

148
def get_gpu_info(run_lambda):
149
    if get_platform() == 'darwin' or (TORCH_AVAILABLE and hasattr(torch.version, 'hip') and torch.version.hip is not None):
150
        if TORCH_AVAILABLE and torch.cuda.is_available():
151
            if torch.version.hip is not None:
152
                prop = torch.cuda.get_device_properties(0)
153
                if hasattr(prop, "gcnArchName"):
154
                    gcnArch = " ({})".format(prop.gcnArchName)
155
                else:
156
                    gcnArch = "NoGCNArchNameOnOldPyTorch"
157
            else:
158
                gcnArch = ""
159
            return torch.cuda.get_device_name(None) + gcnArch
160
        return None
161
    smi = get_nvidia_smi()
162
    uuid_regex = re.compile(r' \(UUID: .+?\)')
163
    rc, out, _ = run_lambda(smi + ' -L')
164
    if rc != 0:
165
        return None
166
    # Anonymize GPUs by removing their UUID
167
    return re.sub(uuid_regex, '', out)
168

169

170
def get_running_cuda_version(run_lambda):
171
    return run_and_parse_first_match(run_lambda, 'nvcc --version', r'release .+ V(.*)')
172

173

174
def get_cudnn_version(run_lambda):
175
    """Return a list of libcudnn.so; it's hard to tell which one is being used."""
176
    if get_platform() == 'win32':
177
        system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
178
        cuda_path = os.environ.get('CUDA_PATH', "%CUDA_PATH%")
179
        where_cmd = os.path.join(system_root, 'System32', 'where')
180
        cudnn_cmd = '{} /R "{}\\bin" cudnn*.dll'.format(where_cmd, cuda_path)
181
    elif get_platform() == 'darwin':
182
        # CUDA libraries and drivers can be found in /usr/local/cuda/. See
183
        # https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#install
184
        # https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#installmac
185
        # Use CUDNN_LIBRARY when cudnn library is installed elsewhere.
186
        cudnn_cmd = 'ls /usr/local/cuda/lib/libcudnn*'
187
    else:
188
        cudnn_cmd = 'ldconfig -p | grep libcudnn | rev | cut -d" " -f1 | rev'
189
    rc, out, _ = run_lambda(cudnn_cmd)
190
    # find will return 1 if there are permission errors or if not found
191
    if len(out) == 0 or (rc != 1 and rc != 0):
192
        l = os.environ.get('CUDNN_LIBRARY')
193
        if l is not None and os.path.isfile(l):
194
            return os.path.realpath(l)
195
        return None
196
    files_set = set()
197
    for fn in out.split('\n'):
198
        fn = os.path.realpath(fn)  # eliminate symbolic links
199
        if os.path.isfile(fn):
200
            files_set.add(fn)
201
    if not files_set:
202
        return None
203
    # Alphabetize the result because the order is non-deterministic otherwise
204
    files = sorted(files_set)
205
    if len(files) == 1:
206
        return files[0]
207
    result = '\n'.join(files)
208
    return 'Probably one of the following:\n{}'.format(result)
209

210

211
def get_nvidia_smi():
212
    # Note: nvidia-smi is currently available only on Windows and Linux
213
    smi = 'nvidia-smi'
214
    if get_platform() == 'win32':
215
        system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
216
        program_files_root = os.environ.get('PROGRAMFILES', 'C:\\Program Files')
217
        legacy_path = os.path.join(program_files_root, 'NVIDIA Corporation', 'NVSMI', smi)
218
        new_path = os.path.join(system_root, 'System32', smi)
219
        smis = [new_path, legacy_path]
220
        for candidate_smi in smis:
221
            if os.path.exists(candidate_smi):
222
                smi = '"{}"'.format(candidate_smi)
223
                break
224
    return smi
225

226

227
# example outputs of CPU infos
228
#  * linux
229
#    Architecture:            x86_64
230
#      CPU op-mode(s):        32-bit, 64-bit
231
#      Address sizes:         46 bits physical, 48 bits virtual
232
#      Byte Order:            Little Endian
233
#    CPU(s):                  128
234
#      On-line CPU(s) list:   0-127
235
#    Vendor ID:               GenuineIntel
236
#      Model name:            Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
237
#        CPU family:          6
238
#        Model:               106
239
#        Thread(s) per core:  2
240
#        Core(s) per socket:  32
241
#        Socket(s):           2
242
#        Stepping:            6
243
#        BogoMIPS:            5799.78
244
#        Flags:               fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr
245
#                             sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl
246
#                             xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq monitor ssse3 fma cx16
247
#                             pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand
248
#                             hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced
249
#                             fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap
250
#                             avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1
251
#                             xsaves wbnoinvd ida arat avx512vbmi pku ospke avx512_vbmi2 gfni vaes vpclmulqdq
252
#                             avx512_vnni avx512_bitalg tme avx512_vpopcntdq rdpid md_clear flush_l1d arch_capabilities
253
#    Virtualization features:
254
#      Hypervisor vendor:     KVM
255
#      Virtualization type:   full
256
#    Caches (sum of all):
257
#      L1d:                   3 MiB (64 instances)
258
#      L1i:                   2 MiB (64 instances)
259
#      L2:                    80 MiB (64 instances)
260
#      L3:                    108 MiB (2 instances)
261
#    NUMA:
262
#      NUMA node(s):          2
263
#      NUMA node0 CPU(s):     0-31,64-95
264
#      NUMA node1 CPU(s):     32-63,96-127
265
#    Vulnerabilities:
266
#      Itlb multihit:         Not affected
267
#      L1tf:                  Not affected
268
#      Mds:                   Not affected
269
#      Meltdown:              Not affected
270
#      Mmio stale data:       Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
271
#      Retbleed:              Not affected
272
#      Spec store bypass:     Mitigation; Speculative Store Bypass disabled via prctl and seccomp
273
#      Spectre v1:            Mitigation; usercopy/swapgs barriers and __user pointer sanitization
274
#      Spectre v2:            Mitigation; Enhanced IBRS, IBPB conditional, RSB filling, PBRSB-eIBRS SW sequence
275
#      Srbds:                 Not affected
276
#      Tsx async abort:       Not affected
277
#  * win32
278
#    Architecture=9
279
#    CurrentClockSpeed=2900
280
#    DeviceID=CPU0
281
#    Family=179
282
#    L2CacheSize=40960
283
#    L2CacheSpeed=
284
#    Manufacturer=GenuineIntel
285
#    MaxClockSpeed=2900
286
#    Name=Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
287
#    ProcessorType=3
288
#    Revision=27142
289
#
290
#    Architecture=9
291
#    CurrentClockSpeed=2900
292
#    DeviceID=CPU1
293
#    Family=179
294
#    L2CacheSize=40960
295
#    L2CacheSpeed=
296
#    Manufacturer=GenuineIntel
297
#    MaxClockSpeed=2900
298
#    Name=Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
299
#    ProcessorType=3
300
#    Revision=27142
301

302
def get_cpu_info(run_lambda):
303
    rc, out, err = 0, '', ''
304
    if get_platform() == 'linux':
305
        rc, out, err = run_lambda('lscpu')
306
    elif get_platform() == 'win32':
307
        rc, out, err = run_lambda('wmic cpu get Name,Manufacturer,Family,Architecture,ProcessorType,DeviceID, \
308
        CurrentClockSpeed,MaxClockSpeed,L2CacheSize,L2CacheSpeed,Revision /VALUE')
309
    elif get_platform() == 'darwin':
310
        rc, out, err = run_lambda("sysctl -n machdep.cpu.brand_string")
311
    cpu_info = 'None'
312
    if rc == 0:
313
        cpu_info = out
314
    else:
315
        cpu_info = err
316
    return cpu_info
317

318

319
def get_platform():
320
    if sys.platform.startswith('linux'):
321
        return 'linux'
322
    elif sys.platform.startswith('win32'):
323
        return 'win32'
324
    elif sys.platform.startswith('cygwin'):
325
        return 'cygwin'
326
    elif sys.platform.startswith('darwin'):
327
        return 'darwin'
328
    else:
329
        return sys.platform
330

331

332
def get_mac_version(run_lambda):
333
    return run_and_parse_first_match(run_lambda, 'sw_vers -productVersion', r'(.*)')
334

335

336
def get_windows_version(run_lambda):
337
    system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
338
    wmic_cmd = os.path.join(system_root, 'System32', 'Wbem', 'wmic')
339
    findstr_cmd = os.path.join(system_root, 'System32', 'findstr')
340
    return run_and_read_all(run_lambda, '{} os get Caption | {} /v Caption'.format(wmic_cmd, findstr_cmd))
341

342

343
def get_lsb_version(run_lambda):
344
    return run_and_parse_first_match(run_lambda, 'lsb_release -a', r'Description:\t(.*)')
345

346

347
def check_release_file(run_lambda):
348
    return run_and_parse_first_match(run_lambda, 'cat /etc/*-release',
349
                                     r'PRETTY_NAME="(.*)"')
350

351

352
def get_os(run_lambda):
353
    from platform import machine
354
    platform = get_platform()
355

356
    if platform == 'win32' or platform == 'cygwin':
357
        return get_windows_version(run_lambda)
358

359
    if platform == 'darwin':
360
        version = get_mac_version(run_lambda)
361
        if version is None:
362
            return None
363
        return 'macOS {} ({})'.format(version, machine())
364

365
    if platform == 'linux':
366
        # Ubuntu/Debian based
367
        desc = get_lsb_version(run_lambda)
368
        if desc is not None:
369
            return '{} ({})'.format(desc, machine())
370

371
        # Try reading /etc/*-release
372
        desc = check_release_file(run_lambda)
373
        if desc is not None:
374
            return '{} ({})'.format(desc, machine())
375

376
        return '{} ({})'.format(platform, machine())
377

378
    # Unknown platform
379
    return platform
380

381

382
def get_python_platform():
383
    import platform
384
    return platform.platform()
385

386

387
def get_libc_version():
388
    import platform
389
    if get_platform() != 'linux':
390
        return 'N/A'
391
    return '-'.join(platform.libc_ver())
392

393

394
def get_pip_packages(run_lambda, patterns=None):
395
    """Return `pip list` output. Note: will also find conda-installed pytorch and numpy packages."""
396
    if patterns is None:
397
        patterns = DEFAULT_PIP_PATTERNS
398

399
    # People generally have `pip` as `pip` or `pip3`
400
    # But here it is invoked as `python -mpip`
401
    def run_with_pip(pip):
402
        out = run_and_read_all(run_lambda, pip + ["list", "--format=freeze"])
403
        return "\n".join(
404
            line
405
            for line in out.splitlines()
406
            if any(name in line for name in patterns)
407
        )
408

409
    pip_version = 'pip3' if sys.version[0] == '3' else 'pip'
410
    out = run_with_pip([sys.executable, '-mpip'])
411

412
    return pip_version, out
413

414

415
def get_cachingallocator_config():
416
    ca_config = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', '')
417
    return ca_config
418

419

420
def get_cuda_module_loading_config():
421
    if TORCH_AVAILABLE and torch.cuda.is_available():
422
        torch.cuda.init()
423
        config = os.environ.get('CUDA_MODULE_LOADING', '')
424
        return config
425
    else:
426
        return "N/A"
427

428

429
def is_xnnpack_available():
430
    if TORCH_AVAILABLE:
431
        import torch.backends.xnnpack
432
        return str(torch.backends.xnnpack.enabled)  # type: ignore[attr-defined]
433
    else:
434
        return "N/A"
435

436
def get_env_info():
437
    run_lambda = run
438
    pip_version, pip_list_output = get_pip_packages(run_lambda)
439

440
    if TORCH_AVAILABLE:
441
        version_str = torch.__version__
442
        debug_mode_str = str(torch.version.debug)
443
        cuda_available_str = str(torch.cuda.is_available())
444
        cuda_version_str = torch.version.cuda
445
        if not hasattr(torch.version, 'hip') or torch.version.hip is None:  # cuda version
446
            hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
447
        else:  # HIP version
448
            def get_version_or_na(cfg, prefix):
449
                _lst = [s.rsplit(None, 1)[-1] for s in cfg if prefix in s]
450
                return _lst[0] if _lst else 'N/A'
451

452
            cfg = torch._C._show_config().split('\n')
453
            hip_runtime_version = get_version_or_na(cfg, 'HIP Runtime')
454
            miopen_runtime_version = get_version_or_na(cfg, 'MIOpen')
455
            cuda_version_str = 'N/A'
456
            hip_compiled_version = torch.version.hip
457
    else:
458
        version_str = debug_mode_str = cuda_available_str = cuda_version_str = 'N/A'
459
        hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
460

461
    sys_version = sys.version.replace("\n", " ")
462

463
    conda_packages = get_conda_packages(run_lambda)
464

465
    return SystemEnv(
466
        torch_version=version_str,
467
        is_debug_build=debug_mode_str,
468
        python_version='{} ({}-bit runtime)'.format(sys_version, sys.maxsize.bit_length() + 1),
469
        python_platform=get_python_platform(),
470
        is_cuda_available=cuda_available_str,
471
        cuda_compiled_version=cuda_version_str,
472
        cuda_runtime_version=get_running_cuda_version(run_lambda),
473
        cuda_module_loading=get_cuda_module_loading_config(),
474
        nvidia_gpu_models=get_gpu_info(run_lambda),
475
        nvidia_driver_version=get_nvidia_driver_version(run_lambda),
476
        cudnn_version=get_cudnn_version(run_lambda),
477
        hip_compiled_version=hip_compiled_version,
478
        hip_runtime_version=hip_runtime_version,
479
        miopen_runtime_version=miopen_runtime_version,
480
        pip_version=pip_version,
481
        pip_packages=pip_list_output,
482
        conda_packages=conda_packages,
483
        os=get_os(run_lambda),
484
        libc_version=get_libc_version(),
485
        gcc_version=get_gcc_version(run_lambda),
486
        clang_version=get_clang_version(run_lambda),
487
        cmake_version=get_cmake_version(run_lambda),
488
        caching_allocator_config=get_cachingallocator_config(),
489
        is_xnnpack_available=is_xnnpack_available(),
490
        cpu_info=get_cpu_info(run_lambda),
491
    )
492

493
env_info_fmt = """
494
PyTorch version: {torch_version}
495
Is debug build: {is_debug_build}
496
CUDA used to build PyTorch: {cuda_compiled_version}
497
ROCM used to build PyTorch: {hip_compiled_version}
498

499
OS: {os}
500
GCC version: {gcc_version}
501
Clang version: {clang_version}
502
CMake version: {cmake_version}
503
Libc version: {libc_version}
504

505
Python version: {python_version}
506
Python platform: {python_platform}
507
Is CUDA available: {is_cuda_available}
508
CUDA runtime version: {cuda_runtime_version}
509
CUDA_MODULE_LOADING set to: {cuda_module_loading}
510
GPU models and configuration: {nvidia_gpu_models}
511
Nvidia driver version: {nvidia_driver_version}
512
cuDNN version: {cudnn_version}
513
HIP runtime version: {hip_runtime_version}
514
MIOpen runtime version: {miopen_runtime_version}
515
Is XNNPACK available: {is_xnnpack_available}
516

517
CPU:
518
{cpu_info}
519

520
Versions of relevant libraries:
521
{pip_packages}
522
{conda_packages}
523
""".strip()
524

525

526
def pretty_str(envinfo):
527
    def replace_nones(dct, replacement='Could not collect'):
528
        for key in dct.keys():
529
            if dct[key] is not None:
530
                continue
531
            dct[key] = replacement
532
        return dct
533

534
    def replace_bools(dct, true='Yes', false='No'):
535
        for key in dct.keys():
536
            if dct[key] is True:
537
                dct[key] = true
538
            elif dct[key] is False:
539
                dct[key] = false
540
        return dct
541

542
    def prepend(text, tag='[prepend]'):
543
        lines = text.split('\n')
544
        updated_lines = [tag + line for line in lines]
545
        return '\n'.join(updated_lines)
546

547
    def replace_if_empty(text, replacement='No relevant packages'):
548
        if text is not None and len(text) == 0:
549
            return replacement
550
        return text
551

552
    def maybe_start_on_next_line(string):
553
        # If `string` is multiline, prepend a \n to it.
554
        if string is not None and len(string.split('\n')) > 1:
555
            return '\n{}\n'.format(string)
556
        return string
557

558
    mutable_dict = envinfo._asdict()
559

560
    # If nvidia_gpu_models is multiline, start on the next line
561
    mutable_dict['nvidia_gpu_models'] = \
562
        maybe_start_on_next_line(envinfo.nvidia_gpu_models)
563

564
    # If the machine doesn't have CUDA, report some fields as 'No CUDA'
565
    dynamic_cuda_fields = [
566
        'cuda_runtime_version',
567
        'nvidia_gpu_models',
568
        'nvidia_driver_version',
569
    ]
570
    all_cuda_fields = dynamic_cuda_fields + ['cudnn_version']
571
    all_dynamic_cuda_fields_missing = all(
572
        mutable_dict[field] is None for field in dynamic_cuda_fields)
573
    if TORCH_AVAILABLE and not torch.cuda.is_available() and all_dynamic_cuda_fields_missing:
574
        for field in all_cuda_fields:
575
            mutable_dict[field] = 'No CUDA'
576
        if envinfo.cuda_compiled_version is None:
577
            mutable_dict['cuda_compiled_version'] = 'None'
578

579
    # Replace True with Yes, False with No
580
    mutable_dict = replace_bools(mutable_dict)
581

582
    # Replace all None objects with 'Could not collect'
583
    mutable_dict = replace_nones(mutable_dict)
584

585
    # If either of these are '', replace with 'No relevant packages'
586
    mutable_dict['pip_packages'] = replace_if_empty(mutable_dict['pip_packages'])
587
    mutable_dict['conda_packages'] = replace_if_empty(mutable_dict['conda_packages'])
588

589
    # Tag conda and pip packages with a prefix
590
    # If they were previously None, they'll show up as ie '[conda] Could not collect'
591
    if mutable_dict['pip_packages']:
592
        mutable_dict['pip_packages'] = prepend(mutable_dict['pip_packages'],
593
                                               '[{}] '.format(envinfo.pip_version))
594
    if mutable_dict['conda_packages']:
595
        mutable_dict['conda_packages'] = prepend(mutable_dict['conda_packages'],
596
                                                 '[conda] ')
597
    mutable_dict['cpu_info'] = envinfo.cpu_info
598
    return env_info_fmt.format(**mutable_dict)
599

600

601
def get_pretty_env_info():
602
    return pretty_str(get_env_info())
603

604

605
def main():
606
    print("Collecting environment information...")
607
    output = get_pretty_env_info()
608
    print(output)
609

610
    if TORCH_AVAILABLE and hasattr(torch, 'utils') and hasattr(torch.utils, '_crash_handler'):
611
        minidump_dir = torch.utils._crash_handler.DEFAULT_MINIDUMP_DIR
612
        if sys.platform == "linux" and os.path.exists(minidump_dir):
613
            dumps = [os.path.join(minidump_dir, dump) for dump in os.listdir(minidump_dir)]
614
            latest = max(dumps, key=os.path.getctime)
615
            ctime = os.path.getctime(latest)
616
            creation_time = datetime.datetime.fromtimestamp(ctime).strftime('%Y-%m-%d %H:%M:%S')
617
            msg = "\n*** Detected a minidump at {} created on {}, ".format(latest, creation_time) + \
618
                  "if this is related to your bug please include it when you file a report ***"
619
            print(msg, file=sys.stderr)
620

621

622

623
if __name__ == '__main__':
624
    main()
625

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.