vision

Форк
0
/
setup.py 
597 строк · 23.1 Кб
1
import distutils.command.clean
2
import distutils.spawn
3
import glob
4
import os
5
import shutil
6
import subprocess
7
import sys
8
import warnings
9
from pathlib import Path
10

11
import torch
12
from pkg_resources import DistributionNotFound, get_distribution, parse_version
13
from setuptools import find_packages, setup
14
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDA_HOME, CUDAExtension, ROCM_HOME
15

16
FORCE_CUDA = os.getenv("FORCE_CUDA", "0") == "1"
17
FORCE_MPS = os.getenv("FORCE_MPS", "0") == "1"
18
DEBUG = os.getenv("DEBUG", "0") == "1"
19
USE_PNG = os.getenv("TORCHVISION_USE_PNG", "1") == "1"
20
USE_JPEG = os.getenv("TORCHVISION_USE_JPEG", "1") == "1"
21
USE_WEBP = os.getenv("TORCHVISION_USE_WEBP", "1") == "1"
22
USE_HEIC = os.getenv("TORCHVISION_USE_HEIC", "0") == "1"  # TODO enable by default!
23
USE_AVIF = os.getenv("TORCHVISION_USE_AVIF", "0") == "1"  # TODO enable by default!
24
USE_NVJPEG = os.getenv("TORCHVISION_USE_NVJPEG", "1") == "1"
25
NVCC_FLAGS = os.getenv("NVCC_FLAGS", None)
26
# Note: the GPU video decoding stuff used to be called "video codec", which
27
# isn't an accurate or descriptive name considering there are at least 2 other
28
# video deocding backends in torchvision. I'm renaming this to "gpu video
29
# decoder" where possible, keeping user facing names (like the env var below) to
30
# the old scheme for BC.
31
USE_GPU_VIDEO_DECODER = os.getenv("TORCHVISION_USE_VIDEO_CODEC", "1") == "1"
32
# Same here: "use ffmpeg" was used to denote "use cpu video decoder".
33
USE_CPU_VIDEO_DECODER = os.getenv("TORCHVISION_USE_FFMPEG", "1") == "1"
34

35
TORCHVISION_INCLUDE = os.environ.get("TORCHVISION_INCLUDE", "")
36
TORCHVISION_LIBRARY = os.environ.get("TORCHVISION_LIBRARY", "")
37
TORCHVISION_INCLUDE = TORCHVISION_INCLUDE.split(os.pathsep) if TORCHVISION_INCLUDE else []
38
TORCHVISION_LIBRARY = TORCHVISION_LIBRARY.split(os.pathsep) if TORCHVISION_LIBRARY else []
39

40
ROOT_DIR = Path(__file__).absolute().parent
41
CSRS_DIR = ROOT_DIR / "torchvision/csrc"
42
IS_ROCM = (torch.version.hip is not None) and (ROCM_HOME is not None)
43
BUILD_CUDA_SOURCES = (torch.cuda.is_available() and ((CUDA_HOME is not None) or IS_ROCM)) or FORCE_CUDA
44

45
PACKAGE_NAME = "torchvision"
46

47
print("Torchvision build configuration:")
48
print(f"{FORCE_CUDA = }")
49
print(f"{FORCE_MPS = }")
50
print(f"{DEBUG = }")
51
print(f"{USE_PNG = }")
52
print(f"{USE_JPEG = }")
53
print(f"{USE_WEBP = }")
54
print(f"{USE_HEIC = }")
55
print(f"{USE_AVIF = }")
56
print(f"{USE_NVJPEG = }")
57
print(f"{NVCC_FLAGS = }")
58
print(f"{USE_CPU_VIDEO_DECODER = }")
59
print(f"{USE_GPU_VIDEO_DECODER = }")
60
print(f"{TORCHVISION_INCLUDE = }")
61
print(f"{TORCHVISION_LIBRARY = }")
62
print(f"{IS_ROCM = }")
63
print(f"{BUILD_CUDA_SOURCES = }")
64

65

66
def get_version():
67
    with open(ROOT_DIR / "version.txt") as f:
68
        version = f.readline().strip()
69
    sha = "Unknown"
70

71
    try:
72
        sha = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=str(ROOT_DIR)).decode("ascii").strip()
73
    except Exception:
74
        pass
75

76
    if os.getenv("BUILD_VERSION"):
77
        version = os.getenv("BUILD_VERSION")
78
    elif sha != "Unknown":
79
        version += "+" + sha[:7]
80

81
    return version, sha
82

83

84
def write_version_file(version, sha):
85
    # Exists for BC, probably completely useless.
86
    with open(ROOT_DIR / "torchvision/version.py", "w") as f:
87
        f.write(f"__version__ = '{version}'\n")
88
        f.write(f"git_version = {repr(sha)}\n")
89
        f.write("from torchvision.extension import _check_cuda_version\n")
90
        f.write("if _check_cuda_version() > 0:\n")
91
        f.write("    cuda = _check_cuda_version()\n")
92

93

94
def get_requirements():
95
    def get_dist(pkgname):
96
        try:
97
            return get_distribution(pkgname)
98
        except DistributionNotFound:
99
            return None
100

101
    pytorch_dep = "torch"
102
    if os.getenv("PYTORCH_VERSION"):
103
        pytorch_dep += "==" + os.getenv("PYTORCH_VERSION")
104

105
    requirements = [
106
        "numpy",
107
        pytorch_dep,
108
    ]
109

110
    # Excluding 8.3.* because of https://github.com/pytorch/vision/issues/4934
111
    pillow_ver = " >= 5.3.0, !=8.3.*"
112
    pillow_req = "pillow-simd" if get_dist("pillow-simd") is not None else "pillow"
113
    requirements.append(pillow_req + pillow_ver)
114

115
    return requirements
116

117

118
def get_macros_and_flags():
119
    define_macros = []
120
    extra_compile_args = {"cxx": []}
121
    if BUILD_CUDA_SOURCES:
122
        if IS_ROCM:
123
            define_macros += [("WITH_HIP", None)]
124
            nvcc_flags = []
125
        else:
126
            define_macros += [("WITH_CUDA", None)]
127
            if NVCC_FLAGS is None:
128
                nvcc_flags = []
129
            else:
130
                nvcc_flags = nvcc_flags.split(" ")
131
        extra_compile_args["nvcc"] = nvcc_flags
132

133
    if sys.platform == "win32":
134
        define_macros += [("torchvision_EXPORTS", None)]
135
        extra_compile_args["cxx"].append("/MP")
136

137
    if DEBUG:
138
        extra_compile_args["cxx"].append("-g")
139
        extra_compile_args["cxx"].append("-O0")
140
        if "nvcc" in extra_compile_args:
141
            # we have to remove "-OX" and "-g" flag if exists and append
142
            nvcc_flags = extra_compile_args["nvcc"]
143
            extra_compile_args["nvcc"] = [f for f in nvcc_flags if not ("-O" in f or "-g" in f)]
144
            extra_compile_args["nvcc"].append("-O0")
145
            extra_compile_args["nvcc"].append("-g")
146
    else:
147
        extra_compile_args["cxx"].append("-g0")
148

149
    return define_macros, extra_compile_args
150

151

152
def make_C_extension():
153
    print("Building _C extension")
154

155
    sources = (
156
        list(CSRS_DIR.glob("*.cpp"))
157
        + list(CSRS_DIR.glob("ops/*.cpp"))
158
        + list(CSRS_DIR.glob("ops/autocast/*.cpp"))
159
        + list(CSRS_DIR.glob("ops/autograd/*.cpp"))
160
        + list(CSRS_DIR.glob("ops/cpu/*.cpp"))
161
        + list(CSRS_DIR.glob("ops/quantized/cpu/*.cpp"))
162
    )
163
    mps_sources = list(CSRS_DIR.glob("ops/mps/*.mm"))
164

165
    if IS_ROCM:
166
        from torch.utils.hipify import hipify_python
167

168
        hipify_python.hipify(
169
            project_directory=str(ROOT_DIR),
170
            output_directory=str(ROOT_DIR),
171
            includes="torchvision/csrc/ops/cuda/*",
172
            show_detailed=True,
173
            is_pytorch_extension=True,
174
        )
175
        cuda_sources = list(CSRS_DIR.glob("ops/hip/*.hip"))
176
        for header in CSRS_DIR.glob("ops/cuda/*.h"):
177
            shutil.copy(str(header), str(CSRS_DIR / "ops/hip"))
178
    else:
179
        cuda_sources = list(CSRS_DIR.glob("ops/cuda/*.cu"))
180

181
    if BUILD_CUDA_SOURCES:
182
        Extension = CUDAExtension
183
        sources += cuda_sources
184
    else:
185
        Extension = CppExtension
186
        if torch.backends.mps.is_available() or FORCE_MPS:
187
            sources += mps_sources
188

189
    define_macros, extra_compile_args = get_macros_and_flags()
190
    return Extension(
191
        name="torchvision._C",
192
        sources=sorted(str(s) for s in sources),
193
        include_dirs=[CSRS_DIR],
194
        define_macros=define_macros,
195
        extra_compile_args=extra_compile_args,
196
    )
197

198

199
def find_libpng():
200
    # Returns (found, include dir, library dir, library name)
201
    if sys.platform in ("linux", "darwin"):
202
        libpng_config = shutil.which("libpng-config")
203
        if libpng_config is None:
204
            warnings.warn("libpng-config not found")
205
            return False, None, None, None
206
        min_version = parse_version("1.6.0")
207
        png_version = parse_version(
208
            subprocess.run([libpng_config, "--version"], stdout=subprocess.PIPE).stdout.strip().decode("utf-8")
209
        )
210
        if png_version < min_version:
211
            warnings.warn("libpng version {png_version} is less than minimum required version {min_version}")
212
            return False, None, None, None
213

214
        include_dir = (
215
            subprocess.run([libpng_config, "--I_opts"], stdout=subprocess.PIPE)
216
            .stdout.strip()
217
            .decode("utf-8")
218
            .split("-I")[1]
219
        )
220
        library_dir = subprocess.run([libpng_config, "--libdir"], stdout=subprocess.PIPE).stdout.strip().decode("utf-8")
221
        library = "png"
222
    else:  # Windows
223
        pngfix = shutil.which("pngfix")
224
        if pngfix is None:
225
            warnings.warn("pngfix not found")
226
            return False, None, None, None
227
        pngfix_dir = Path(pngfix).absolute().parent.parent
228

229
        library_dir = str(pngfix_dir / "lib")
230
        include_dir = str(pngfix_dir / "include/libpng16")
231
        library = "libpng"
232

233
    return True, include_dir, library_dir, library
234

235

236
def find_library(header):
237
    # returns (found, include dir, library dir)
238
    # if include dir or library dir is None, it means that the library is in
239
    # standard paths and don't need to be added to compiler / linker search
240
    # paths
241

242
    searching_for = f"Searching for {header}"
243

244
    for folder in TORCHVISION_INCLUDE:
245
        if (Path(folder) / header).exists():
246
            print(f"{searching_for} in {Path(folder) / header}. Found in TORCHVISION_INCLUDE.")
247
            return True, None, None
248
    print(f"{searching_for}. Didn't find in TORCHVISION_INCLUDE.")
249

250
    # Try conda-related prefixes. If BUILD_PREFIX is set it means conda-build is
251
    # being run. If CONDA_PREFIX is set then we're in a conda environment.
252
    for prefix_env_var in ("BUILD_PREFIX", "CONDA_PREFIX"):
253
        if (prefix := os.environ.get(prefix_env_var)) is not None:
254
            prefix = Path(prefix)
255
            if sys.platform == "win32":
256
                prefix = prefix / "Library"
257
            include_dir = prefix / "include"
258
            library_dir = prefix / "lib"
259
            if (include_dir / header).exists():
260
                print(f"{searching_for}. Found in {prefix_env_var}.")
261
                return True, str(include_dir), str(library_dir)
262
        print(f"{searching_for}. Didn't find in {prefix_env_var}.")
263

264
    if sys.platform == "linux":
265
        for prefix in (Path("/usr/include"), Path("/usr/local/include")):
266
            if (prefix / header).exists():
267
                print(f"{searching_for}. Found in {prefix}.")
268
                return True, None, None
269
            print(f"{searching_for}. Didn't find in {prefix}")
270

271
    return False, None, None
272

273

274
def make_image_extension():
275
    print("Building image extension")
276

277
    include_dirs = TORCHVISION_INCLUDE.copy()
278
    library_dirs = TORCHVISION_LIBRARY.copy()
279

280
    libraries = []
281
    define_macros, extra_compile_args = get_macros_and_flags()
282

283
    image_dir = CSRS_DIR / "io/image"
284
    sources = list(image_dir.glob("*.cpp")) + list(image_dir.glob("cpu/*.cpp")) + list(image_dir.glob("cpu/giflib/*.c"))
285

286
    if IS_ROCM:
287
        sources += list(image_dir.glob("hip/*.cpp"))
288
        # we need to exclude this in favor of the hipified source
289
        sources.remove(image_dir / "image.cpp")
290
    else:
291
        sources += list(image_dir.glob("cuda/*.cpp"))
292

293
    Extension = CppExtension
294

295
    if USE_PNG:
296
        png_found, png_include_dir, png_library_dir, png_library = find_libpng()
297
        if png_found:
298
            print("Building torchvision with PNG support")
299
            print(f"{png_include_dir = }")
300
            print(f"{png_library_dir = }")
301
            include_dirs.append(png_include_dir)
302
            library_dirs.append(png_library_dir)
303
            libraries.append(png_library)
304
            define_macros += [("PNG_FOUND", 1)]
305
        else:
306
            warnings.warn("Building torchvision without PNG support")
307

308
    if USE_JPEG:
309
        jpeg_found, jpeg_include_dir, jpeg_library_dir = find_library(header="jpeglib.h")
310
        if jpeg_found:
311
            print("Building torchvision with JPEG support")
312
            print(f"{jpeg_include_dir = }")
313
            print(f"{jpeg_library_dir = }")
314
            if jpeg_include_dir is not None and jpeg_library_dir is not None:
315
                # if those are None it means they come from standard paths that are already in the search paths, which we don't need to re-add.
316
                include_dirs.append(jpeg_include_dir)
317
                library_dirs.append(jpeg_library_dir)
318
            libraries.append("jpeg")
319
            define_macros += [("JPEG_FOUND", 1)]
320
        else:
321
            warnings.warn("Building torchvision without JPEG support")
322

323
    if USE_WEBP:
324
        webp_found, webp_include_dir, webp_library_dir = find_library(header="webp/decode.h")
325
        if webp_found:
326
            print("Building torchvision with WEBP support")
327
            print(f"{webp_include_dir = }")
328
            print(f"{webp_library_dir = }")
329
            if webp_include_dir is not None and webp_library_dir is not None:
330
                # if those are None it means they come from standard paths that are already in the search paths, which we don't need to re-add.
331
                include_dirs.append(webp_include_dir)
332
                library_dirs.append(webp_library_dir)
333
            webp_library = "libwebp" if sys.platform == "win32" else "webp"
334
            libraries.append(webp_library)
335
            define_macros += [("WEBP_FOUND", 1)]
336
        else:
337
            warnings.warn("Building torchvision without WEBP support")
338

339
    if USE_HEIC:
340
        heic_found, heic_include_dir, heic_library_dir = find_library(header="libheif/heif.h")
341
        if heic_found:
342
            print("Building torchvision with HEIC support")
343
            print(f"{heic_include_dir = }")
344
            print(f"{heic_library_dir = }")
345
            if heic_include_dir is not None and heic_library_dir is not None:
346
                # if those are None it means they come from standard paths that are already in the search paths, which we don't need to re-add.
347
                include_dirs.append(heic_include_dir)
348
                library_dirs.append(heic_library_dir)
349
            libraries.append("heif")
350
            define_macros += [("HEIC_FOUND", 1)]
351
        else:
352
            warnings.warn("Building torchvision without HEIC support")
353

354
    if USE_AVIF:
355
        avif_found, avif_include_dir, avif_library_dir = find_library(header="avif/avif.h")
356
        if avif_found:
357
            print("Building torchvision with AVIF support")
358
            print(f"{avif_include_dir = }")
359
            print(f"{avif_library_dir = }")
360
            if avif_include_dir is not None and avif_library_dir is not None:
361
                # if those are None it means they come from standard paths that are already in the search paths, which we don't need to re-add.
362
                include_dirs.append(avif_include_dir)
363
                library_dirs.append(avif_library_dir)
364
            libraries.append("avif")
365
            define_macros += [("AVIF_FOUND", 1)]
366
        else:
367
            warnings.warn("Building torchvision without AVIF support")
368

369
    if USE_NVJPEG and torch.cuda.is_available():
370
        nvjpeg_found = CUDA_HOME is not None and (Path(CUDA_HOME) / "include/nvjpeg.h").exists()
371

372
        if nvjpeg_found:
373
            print("Building torchvision with NVJPEG image support")
374
            libraries.append("nvjpeg")
375
            define_macros += [("NVJPEG_FOUND", 1)]
376
            Extension = CUDAExtension
377
        else:
378
            warnings.warn("Building torchvision without NVJPEG support")
379

380
    return Extension(
381
        name="torchvision.image",
382
        sources=sorted(str(s) for s in sources),
383
        include_dirs=include_dirs,
384
        library_dirs=library_dirs,
385
        define_macros=define_macros,
386
        libraries=libraries,
387
        extra_compile_args=extra_compile_args,
388
    )
389

390

391
def make_video_decoders_extensions():
392
    print("Building video decoder extensions")
393

394
    build_without_extensions_msg = "Building without video decoders extensions."
395
    if sys.platform != "linux" or (sys.version_info.major == 3 and sys.version_info.minor == 9):
396
        # FIXME: Building torchvision with ffmpeg on MacOS or with Python 3.9
397
        # FIXME: causes crash. See the following GitHub issues for more details.
398
        # FIXME: https://github.com/pytorch/pytorch/issues/65000
399
        # FIXME: https://github.com/pytorch/vision/issues/3367
400
        print("Can only build video decoder extensions on linux and Python != 3.9")
401
        return []
402

403
    ffmpeg_exe = shutil.which("ffmpeg")
404
    if ffmpeg_exe is None:
405
        print(f"{build_without_extensions_msg} Couldn't find ffmpeg binary.")
406
        return []
407

408
    def find_ffmpeg_libraries():
409
        ffmpeg_libraries = {"libavcodec", "libavformat", "libavutil", "libswresample", "libswscale"}
410

411
        ffmpeg_bin = os.path.dirname(ffmpeg_exe)
412
        ffmpeg_root = os.path.dirname(ffmpeg_bin)
413
        ffmpeg_include_dir = os.path.join(ffmpeg_root, "include")
414
        ffmpeg_library_dir = os.path.join(ffmpeg_root, "lib")
415

416
        gcc = os.environ.get("CC", shutil.which("gcc"))
417
        platform_tag = subprocess.run([gcc, "-print-multiarch"], stdout=subprocess.PIPE)
418
        platform_tag = platform_tag.stdout.strip().decode("utf-8")
419

420
        if platform_tag:
421
            # Most probably a Debian-based distribution
422
            ffmpeg_include_dir = [ffmpeg_include_dir, os.path.join(ffmpeg_include_dir, platform_tag)]
423
            ffmpeg_library_dir = [ffmpeg_library_dir, os.path.join(ffmpeg_library_dir, platform_tag)]
424
        else:
425
            ffmpeg_include_dir = [ffmpeg_include_dir]
426
            ffmpeg_library_dir = [ffmpeg_library_dir]
427

428
        for library in ffmpeg_libraries:
429
            library_found = False
430
            for search_path in ffmpeg_include_dir + TORCHVISION_INCLUDE:
431
                full_path = os.path.join(search_path, library, "*.h")
432
                library_found |= len(glob.glob(full_path)) > 0
433

434
            if not library_found:
435
                print(f"{build_without_extensions_msg}")
436
                print(f"{library} header files were not found.")
437
                return None, None
438

439
        return ffmpeg_include_dir, ffmpeg_library_dir
440

441
    ffmpeg_include_dir, ffmpeg_library_dir = find_ffmpeg_libraries()
442
    if ffmpeg_include_dir is None or ffmpeg_library_dir is None:
443
        return []
444

445
    print("Found ffmpeg:")
446
    print(f"  ffmpeg include path: {ffmpeg_include_dir}")
447
    print(f"  ffmpeg library_dir: {ffmpeg_library_dir}")
448

449
    extensions = []
450
    if USE_CPU_VIDEO_DECODER:
451
        print("Building with CPU video decoder support")
452

453
        # TorchVision base decoder + video reader
454
        video_reader_src_dir = os.path.join(ROOT_DIR, "torchvision", "csrc", "io", "video_reader")
455
        video_reader_src = glob.glob(os.path.join(video_reader_src_dir, "*.cpp"))
456
        base_decoder_src_dir = os.path.join(ROOT_DIR, "torchvision", "csrc", "io", "decoder")
457
        base_decoder_src = glob.glob(os.path.join(base_decoder_src_dir, "*.cpp"))
458
        # Torchvision video API
459
        videoapi_src_dir = os.path.join(ROOT_DIR, "torchvision", "csrc", "io", "video")
460
        videoapi_src = glob.glob(os.path.join(videoapi_src_dir, "*.cpp"))
461
        # exclude tests
462
        base_decoder_src = [x for x in base_decoder_src if "_test.cpp" not in x]
463

464
        combined_src = video_reader_src + base_decoder_src + videoapi_src
465

466
        extensions.append(
467
            CppExtension(
468
                # This is an aweful name. It should be "cpu_video_decoder". Keeping for BC.
469
                "torchvision.video_reader",
470
                combined_src,
471
                include_dirs=[
472
                    base_decoder_src_dir,
473
                    video_reader_src_dir,
474
                    videoapi_src_dir,
475
                    str(CSRS_DIR),
476
                    *ffmpeg_include_dir,
477
                    *TORCHVISION_INCLUDE,
478
                ],
479
                library_dirs=ffmpeg_library_dir + TORCHVISION_LIBRARY,
480
                libraries=[
481
                    "avcodec",
482
                    "avformat",
483
                    "avutil",
484
                    "swresample",
485
                    "swscale",
486
                ],
487
                extra_compile_args=["-std=c++17"] if os.name != "nt" else ["/std:c++17", "/MP"],
488
                extra_link_args=["-std=c++17" if os.name != "nt" else "/std:c++17"],
489
            )
490
        )
491

492
    if USE_GPU_VIDEO_DECODER:
493
        # Locating GPU video decoder headers and libraries
494
        # CUDA_HOME should be set to the cuda root directory.
495
        # TORCHVISION_INCLUDE and TORCHVISION_LIBRARY should include the locations
496
        # to the headers and libraries below
497
        if not (
498
            BUILD_CUDA_SOURCES
499
            and CUDA_HOME is not None
500
            and any([os.path.exists(os.path.join(folder, "cuviddec.h")) for folder in TORCHVISION_INCLUDE])
501
            and any([os.path.exists(os.path.join(folder, "nvcuvid.h")) for folder in TORCHVISION_INCLUDE])
502
            and any([os.path.exists(os.path.join(folder, "libnvcuvid.so")) for folder in TORCHVISION_LIBRARY])
503
            and any([os.path.exists(os.path.join(folder, "libavcodec", "bsf.h")) for folder in ffmpeg_include_dir])
504
        ):
505
            print("Could not find necessary dependencies. Refer the setup.py to check which ones are needed.")
506
            print("Building without GPU video decoder support")
507
            return extensions
508
        print("Building torchvision with GPU video decoder support")
509

510
        gpu_decoder_path = os.path.join(CSRS_DIR, "io", "decoder", "gpu")
511
        gpu_decoder_src = glob.glob(os.path.join(gpu_decoder_path, "*.cpp"))
512
        cuda_libs = os.path.join(CUDA_HOME, "lib64")
513
        cuda_inc = os.path.join(CUDA_HOME, "include")
514

515
        _, extra_compile_args = get_macros_and_flags()
516
        extensions.append(
517
            CUDAExtension(
518
                "torchvision.gpu_decoder",
519
                gpu_decoder_src,
520
                include_dirs=[CSRS_DIR] + TORCHVISION_INCLUDE + [gpu_decoder_path] + [cuda_inc] + ffmpeg_include_dir,
521
                library_dirs=ffmpeg_library_dir + TORCHVISION_LIBRARY + [cuda_libs],
522
                libraries=[
523
                    "avcodec",
524
                    "avformat",
525
                    "avutil",
526
                    "swresample",
527
                    "swscale",
528
                    "nvcuvid",
529
                    "cuda",
530
                    "cudart",
531
                    "z",
532
                    "pthread",
533
                    "dl",
534
                    "nppicc",
535
                ],
536
                extra_compile_args=extra_compile_args,
537
            )
538
        )
539

540
    return extensions
541

542

543
class clean(distutils.command.clean.clean):
544
    def run(self):
545
        with open(".gitignore") as f:
546
            ignores = f.read()
547
            for wildcard in filter(None, ignores.split("\n")):
548
                for filename in glob.glob(wildcard):
549
                    try:
550
                        os.remove(filename)
551
                    except OSError:
552
                        shutil.rmtree(filename, ignore_errors=True)
553

554
        # It's an old-style class in Python 2.7...
555
        distutils.command.clean.clean.run(self)
556

557

558
if __name__ == "__main__":
559
    version, sha = get_version()
560
    write_version_file(version, sha)
561

562
    print(f"Building wheel {PACKAGE_NAME}-{version}")
563

564
    with open("README.md") as f:
565
        readme = f.read()
566

567
    extensions = [
568
        make_C_extension(),
569
        make_image_extension(),
570
        *make_video_decoders_extensions(),
571
    ]
572

573
    setup(
574
        name=PACKAGE_NAME,
575
        version=version,
576
        author="PyTorch Core Team",
577
        author_email="soumith@pytorch.org",
578
        url="https://github.com/pytorch/vision",
579
        description="image and video datasets and models for torch deep learning",
580
        long_description=readme,
581
        long_description_content_type="text/markdown",
582
        license="BSD",
583
        packages=find_packages(exclude=("test",)),
584
        package_data={PACKAGE_NAME: ["*.dll", "*.dylib", "*.so", "prototype/datasets/_builtin/*.categories"]},
585
        zip_safe=False,
586
        install_requires=get_requirements(),
587
        extras_require={
588
            "gdown": ["gdown>=4.7.3"],
589
            "scipy": ["scipy"],
590
        },
591
        ext_modules=extensions,
592
        python_requires=">=3.8",
593
        cmdclass={
594
            "build_ext": BuildExtension.with_options(no_python_abi_suffix=True),
595
            "clean": clean,
596
        },
597
    )
598

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.