pytorch
/
setup.py
1518 строк · 53.0 Кб
1# Welcome to the PyTorch setup.py.
2# Environment variables you are probably interested in:
3#
4# DEBUG
5# build with -O0 and -g (debug symbols)
6#
7# REL_WITH_DEB_INFO
8# build with optimizations and -g (debug symbols)
9#
10# USE_CUSTOM_DEBINFO="path/to/file1.cpp;path/to/file2.cpp"
11# build with debug info only for specified files
12#
13# MAX_JOBS
14# maximum number of compile jobs we should use to compile your code
15#
16# USE_CUDA=0
17# disables CUDA build
18#
19# CFLAGS
20# flags to apply to both C and C++ files to be compiled (a quirk of setup.py
21# which we have faithfully adhered to in our build system is that CFLAGS
22# also applies to C++ files (unless CXXFLAGS is set), in contrast to the
23# default behavior of autogoo and cmake build systems.)
24#
25# CC
26# the C/C++ compiler to use
27#
28# Environment variables for feature toggles:
29#
30# DEBUG_CUDA=1
31# if used in conjunction with DEBUG or REL_WITH_DEB_INFO, will also
32# build CUDA kernels with -lineinfo --source-in-ptx. Note that
33# on CUDA 12 this may cause nvcc to OOM, so this is disabled by default.
34
35# USE_CUDNN=0
36# disables the cuDNN build
37#
38# USE_CUSPARSELT=0
39# disables the cuSPARSELt build
40#
41# USE_CUDSS=0
42# disables the cuDSS build
43#
44# USE_CUFILE=0
45# disables the cuFile build
46#
47# USE_FBGEMM=0
48# disables the FBGEMM build
49#
50# USE_KINETO=0
51# disables usage of libkineto library for profiling
52#
53# USE_NUMPY=0
54# disables the NumPy build
55#
56# BUILD_TEST=0
57# disables the test build
58#
59# USE_MKLDNN=0
60# disables use of MKLDNN
61#
62# USE_MKLDNN_ACL
63# enables use of Compute Library backend for MKLDNN on Arm;
64# USE_MKLDNN must be explicitly enabled.
65#
66# MKLDNN_CPU_RUNTIME
67# MKL-DNN threading mode: TBB or OMP (default)
68#
69# USE_STATIC_MKL
70# Prefer to link with MKL statically - Unix only
71# USE_ITT=0
72# disable use of Intel(R) VTune Profiler's ITT functionality
73#
74# USE_NNPACK=0
75# disables NNPACK build
76#
77# USE_DISTRIBUTED=0
78# disables distributed (c10d, gloo, mpi, etc.) build
79#
80# USE_TENSORPIPE=0
81# disables distributed Tensorpipe backend build
82#
83# USE_GLOO=0
84# disables distributed gloo backend build
85#
86# USE_MPI=0
87# disables distributed MPI backend build
88#
89# USE_SYSTEM_NCCL=0
90# disables use of system-wide nccl (we will use our submoduled
91# copy in third_party/nccl)
92#
93# USE_OPENMP=0
94# disables use of OpenMP for parallelization
95#
96# USE_FLASH_ATTENTION=0
97# disables building flash attention for scaled dot product attention
98#
99# USE_MEM_EFF_ATTENTION=0
100# disables building memory efficient attention for scaled dot product attention
101#
102# BUILD_BINARY
103# enables the additional binaries/ build
104#
105# ATEN_AVX512_256=TRUE
106# ATen AVX2 kernels can use 32 ymm registers, instead of the default 16.
107# This option can be used if AVX512 doesn't perform well on a machine.
108# The FBGEMM library also uses AVX512_256 kernels on Xeon D processors,
109# but it also has some (optimized) assembly code.
110#
111# PYTORCH_BUILD_VERSION
112# PYTORCH_BUILD_NUMBER
113# specify the version of PyTorch, rather than the hard-coded version
114# in this file; used when we're building binaries for distribution
115#
116# TORCH_CUDA_ARCH_LIST
117# specify which CUDA architectures to build for.
118# ie `TORCH_CUDA_ARCH_LIST="6.0;7.0"`
119# These are not CUDA versions, instead, they specify what
120# classes of NVIDIA hardware we should generate PTX for.
121#
122# PYTORCH_ROCM_ARCH
123# specify which AMD GPU targets to build for.
124# ie `PYTORCH_ROCM_ARCH="gfx900;gfx906"`
125#
126# ONNX_NAMESPACE
127# specify a namespace for ONNX built here rather than the hard-coded
128# one in this file; needed to build with other frameworks that share ONNX.
129#
130# BLAS
131# BLAS to be used by Caffe2. Can be MKL, Eigen, ATLAS, FlexiBLAS, or OpenBLAS. If set
132# then the build will fail if the requested BLAS is not found, otherwise
133# the BLAS will be chosen based on what is found on your system.
134#
135# MKL_THREADING
136# MKL threading mode: SEQ, TBB or OMP (default)
137#
138# USE_ROCM_KERNEL_ASSERT=1
139# Enable kernel assert in ROCm platform
140#
141# Environment variables we respect (these environment variables are
142# conventional and are often understood/set by other software.)
143#
144# CUDA_HOME (Linux/OS X)
145# CUDA_PATH (Windows)
146# specify where CUDA is installed; usually /usr/local/cuda or
147# /usr/local/cuda-x.y
148# CUDAHOSTCXX
149# specify a different compiler than the system one to use as the CUDA
150# host compiler for nvcc.
151#
152# CUDA_NVCC_EXECUTABLE
153# Specify a NVCC to use. This is used in our CI to point to a cached nvcc
154#
155# CUDNN_LIB_DIR
156# CUDNN_INCLUDE_DIR
157# CUDNN_LIBRARY
158# specify where cuDNN is installed
159#
160# MIOPEN_LIB_DIR
161# MIOPEN_INCLUDE_DIR
162# MIOPEN_LIBRARY
163# specify where MIOpen is installed
164#
165# NCCL_ROOT
166# NCCL_LIB_DIR
167# NCCL_INCLUDE_DIR
168# specify where nccl is installed
169#
170# ACL_ROOT_DIR
171# specify where Compute Library is installed
172#
173# LIBRARY_PATH
174# LD_LIBRARY_PATH
175# we will search for libraries in these paths
176#
177# ATEN_THREADING
178# ATen parallel backend to use for intra- and inter-op parallelism
179# possible values:
180# OMP - use OpenMP for intra-op and native backend for inter-op tasks
181# NATIVE - use native thread pool for both intra- and inter-op tasks
182#
183# USE_SYSTEM_LIBS (work in progress)
184# Use system-provided libraries to satisfy the build dependencies.
185# When turned on, the following cmake variables will be toggled as well:
186# USE_SYSTEM_CPUINFO=ON USE_SYSTEM_SLEEF=ON BUILD_CUSTOM_PROTOBUF=OFF
187#
188# USE_MIMALLOC
189# Static link mimalloc into C10, and use mimalloc in alloc_cpu & alloc_free.
190# By default, It is only enabled on Windows.
191#
192# USE_PRIORITIZED_TEXT_FOR_LD
193# Uses prioritized text form cmake/prioritized_text.txt for LD
194#
195# BUILD_LIBTORCH_WHL
196# Builds libtorch.so and its dependencies as a wheel
197#
198# BUILD_PYTHON_ONLY
199# Builds pytorch as a wheel using libtorch.so from a seperate wheel
200
201import os
202import sys
203
204
205if sys.platform == "win32" and sys.maxsize.bit_length() == 31:
206print(
207"32-bit Windows Python runtime is not supported. Please switch to 64-bit Python."
208)
209sys.exit(-1)
210
211import platform
212
213
214BUILD_LIBTORCH_WHL = os.getenv("BUILD_LIBTORCH_WHL", "0") == "1"
215BUILD_PYTHON_ONLY = os.getenv("BUILD_PYTHON_ONLY", "0") == "1"
216
217python_min_version = (3, 8, 0)
218python_min_version_str = ".".join(map(str, python_min_version))
219if sys.version_info < python_min_version:
220print(
221f"You are using Python {platform.python_version()}. Python >={python_min_version_str} is required."
222)
223sys.exit(-1)
224
225import filecmp
226import glob
227import importlib
228import importlib.util
229import json
230import shutil
231import subprocess
232import sysconfig
233import time
234from collections import defaultdict
235
236import setuptools.command.build_ext
237import setuptools.command.install
238import setuptools.command.sdist
239from setuptools import Extension, find_packages, setup
240from setuptools.dist import Distribution
241from tools.build_pytorch_libs import build_caffe2
242from tools.generate_torch_version import get_torch_version
243from tools.setup_helpers.cmake import CMake
244from tools.setup_helpers.env import build_type, IS_DARWIN, IS_LINUX, IS_WINDOWS
245from tools.setup_helpers.generate_linker_script import gen_linker_script
246
247
248def _get_package_path(package_name):
249spec = importlib.util.find_spec(package_name)
250if spec:
251# The package might be a namespace package, so get_data may fail
252try:
253loader = spec.loader
254if loader is not None:
255file_path = loader.get_filename() # type: ignore[attr-defined]
256return os.path.dirname(file_path)
257except AttributeError:
258pass
259return None
260
261
262# set up appropriate env variables
263if BUILD_LIBTORCH_WHL:
264# Set up environment variables for ONLY building libtorch.so and not libtorch_python.so
265# functorch is not supported without python
266os.environ["BUILD_FUNCTORCH"] = "OFF"
267
268
269if BUILD_PYTHON_ONLY:
270os.environ["BUILD_LIBTORCHLESS"] = "ON"
271os.environ["LIBTORCH_LIB_PATH"] = f"{_get_package_path('torch')}/lib"
272
273################################################################################
274# Parameters parsed from environment
275################################################################################
276
277VERBOSE_SCRIPT = True
278RUN_BUILD_DEPS = True
279# see if the user passed a quiet flag to setup.py arguments and respect
280# that in our parts of the build
281EMIT_BUILD_WARNING = False
282RERUN_CMAKE = False
283CMAKE_ONLY = False
284filtered_args = []
285for i, arg in enumerate(sys.argv):
286if arg == "--cmake":
287RERUN_CMAKE = True
288continue
289if arg == "--cmake-only":
290# Stop once cmake terminates. Leave users a chance to adjust build
291# options.
292CMAKE_ONLY = True
293continue
294if arg == "rebuild" or arg == "build":
295arg = "build" # rebuild is gone, make it build
296EMIT_BUILD_WARNING = True
297if arg == "--":
298filtered_args += sys.argv[i:]
299break
300if arg == "-q" or arg == "--quiet":
301VERBOSE_SCRIPT = False
302if arg in ["clean", "egg_info", "sdist"]:
303RUN_BUILD_DEPS = False
304filtered_args.append(arg)
305sys.argv = filtered_args
306
307if VERBOSE_SCRIPT:
308
309def report(*args):
310print(*args)
311
312else:
313
314def report(*args):
315pass
316
317# Make distutils respect --quiet too
318setuptools.distutils.log.warn = report
319
320# Constant known variables used throughout this file
321cwd = os.path.dirname(os.path.abspath(__file__))
322lib_path = os.path.join(cwd, "torch", "lib")
323third_party_path = os.path.join(cwd, "third_party")
324caffe2_build_dir = os.path.join(cwd, "build")
325
326# CMAKE: full path to python library
327if IS_WINDOWS:
328cmake_python_library = "{}/libs/python{}.lib".format(
329sysconfig.get_config_var("prefix"), sysconfig.get_config_var("VERSION")
330)
331# Fix virtualenv builds
332if not os.path.exists(cmake_python_library):
333cmake_python_library = "{}/libs/python{}.lib".format(
334sys.base_prefix, sysconfig.get_config_var("VERSION")
335)
336else:
337cmake_python_library = "{}/{}".format(
338sysconfig.get_config_var("LIBDIR"), sysconfig.get_config_var("INSTSONAME")
339)
340cmake_python_include_dir = sysconfig.get_path("include")
341
342
343################################################################################
344# Version, create_version_file, and package_name
345################################################################################
346
347package_name = os.getenv("TORCH_PACKAGE_NAME", "torch")
348LIBTORCH_PKG_NAME = os.getenv("LIBTORCH_PACKAGE_NAME", "torch_no_python")
349if BUILD_LIBTORCH_WHL:
350package_name = LIBTORCH_PKG_NAME
351
352
353package_type = os.getenv("PACKAGE_TYPE", "wheel")
354version = get_torch_version()
355report(f"Building wheel {package_name}-{version}")
356
357cmake = CMake()
358
359
360def get_submodule_folders():
361git_modules_path = os.path.join(cwd, ".gitmodules")
362default_modules_path = [
363os.path.join(third_party_path, name)
364for name in [
365"gloo",
366"cpuinfo",
367"onnx",
368"fbgemm",
369"cutlass",
370]
371]
372if not os.path.exists(git_modules_path):
373return default_modules_path
374with open(git_modules_path) as f:
375return [
376os.path.join(cwd, line.split("=", 1)[1].strip())
377for line in f
378if line.strip().startswith("path")
379]
380
381
382def check_submodules():
383def check_for_files(folder, files):
384if not any(os.path.exists(os.path.join(folder, f)) for f in files):
385report("Could not find any of {} in {}".format(", ".join(files), folder))
386report("Did you run 'git submodule update --init --recursive'?")
387sys.exit(1)
388
389def not_exists_or_empty(folder):
390return not os.path.exists(folder) or (
391os.path.isdir(folder) and len(os.listdir(folder)) == 0
392)
393
394if bool(os.getenv("USE_SYSTEM_LIBS", False)):
395return
396folders = get_submodule_folders()
397# If none of the submodule folders exists, try to initialize them
398if all(not_exists_or_empty(folder) for folder in folders):
399try:
400print(" --- Trying to initialize submodules")
401start = time.time()
402subprocess.check_call(
403["git", "submodule", "update", "--init", "--recursive"], cwd=cwd
404)
405end = time.time()
406print(f" --- Submodule initialization took {end - start:.2f} sec")
407except Exception:
408print(" --- Submodule initalization failed")
409print("Please run:\n\tgit submodule update --init --recursive")
410sys.exit(1)
411for folder in folders:
412check_for_files(
413folder,
414[
415"CMakeLists.txt",
416"Makefile",
417"setup.py",
418"LICENSE",
419"LICENSE.md",
420"LICENSE.txt",
421],
422)
423check_for_files(
424os.path.join(third_party_path, "fbgemm", "third_party", "asmjit"),
425["CMakeLists.txt"],
426)
427check_for_files(
428os.path.join(third_party_path, "onnx", "third_party", "benchmark"),
429["CMakeLists.txt"],
430)
431
432
433# Windows has very bad support for symbolic links.
434# Instead of using symlinks, we're going to copy files over
435def mirror_files_into_torchgen():
436# (new_path, orig_path)
437# Directories are OK and are recursively mirrored.
438paths = [
439(
440"torchgen/packaged/ATen/native/native_functions.yaml",
441"aten/src/ATen/native/native_functions.yaml",
442),
443("torchgen/packaged/ATen/native/tags.yaml", "aten/src/ATen/native/tags.yaml"),
444("torchgen/packaged/ATen/templates", "aten/src/ATen/templates"),
445("torchgen/packaged/autograd", "tools/autograd"),
446("torchgen/packaged/autograd/templates", "tools/autograd/templates"),
447]
448for new_path, orig_path in paths:
449# Create the dirs involved in new_path if they don't exist
450if not os.path.exists(new_path):
451os.makedirs(os.path.dirname(new_path), exist_ok=True)
452
453# Copy the files from the orig location to the new location
454if os.path.isfile(orig_path):
455shutil.copyfile(orig_path, new_path)
456continue
457if os.path.isdir(orig_path):
458if os.path.exists(new_path):
459# copytree fails if the tree exists already, so remove it.
460shutil.rmtree(new_path)
461shutil.copytree(orig_path, new_path)
462continue
463raise RuntimeError("Check the file paths in `mirror_files_into_torchgen()`")
464
465
466# all the work we need to do _before_ setup runs
467def build_deps():
468report("-- Building version " + version)
469
470check_submodules()
471check_pydep("yaml", "pyyaml")
472build_python = not BUILD_LIBTORCH_WHL
473build_caffe2(
474version=version,
475cmake_python_library=cmake_python_library,
476build_python=build_python,
477rerun_cmake=RERUN_CMAKE,
478cmake_only=CMAKE_ONLY,
479cmake=cmake,
480)
481
482if CMAKE_ONLY:
483report(
484'Finished running cmake. Run "ccmake build" or '
485'"cmake-gui build" to adjust build options and '
486'"python setup.py install" to build.'
487)
488sys.exit()
489
490# Use copies instead of symbolic files.
491# Windows has very poor support for them.
492sym_files = [
493"tools/shared/_utils_internal.py",
494"torch/utils/benchmark/utils/valgrind_wrapper/callgrind.h",
495"torch/utils/benchmark/utils/valgrind_wrapper/valgrind.h",
496]
497orig_files = [
498"torch/_utils_internal.py",
499"third_party/valgrind-headers/callgrind.h",
500"third_party/valgrind-headers/valgrind.h",
501]
502for sym_file, orig_file in zip(sym_files, orig_files):
503same = False
504if os.path.exists(sym_file):
505if filecmp.cmp(sym_file, orig_file):
506same = True
507else:
508os.remove(sym_file)
509if not same:
510shutil.copyfile(orig_file, sym_file)
511
512
513################################################################################
514# Building dependent libraries
515################################################################################
516
517missing_pydep = """
518Missing build dependency: Unable to `import {importname}`.
519Please install it via `conda install {module}` or `pip install {module}`
520""".strip()
521
522
523def check_pydep(importname, module):
524try:
525importlib.import_module(importname)
526except ImportError as e:
527raise RuntimeError(
528missing_pydep.format(importname=importname, module=module)
529) from e
530
531
532class build_ext(setuptools.command.build_ext.build_ext):
533def _embed_libomp(self):
534# Copy libiomp5.dylib/libomp.dylib inside the wheel package on MacOS
535lib_dir = os.path.join(self.build_lib, "torch", "lib")
536libtorch_cpu_path = os.path.join(lib_dir, "libtorch_cpu.dylib")
537if not os.path.exists(libtorch_cpu_path):
538return
539# Parse libtorch_cpu load commands
540otool_cmds = (
541subprocess.check_output(["otool", "-l", libtorch_cpu_path])
542.decode("utf-8")
543.split("\n")
544)
545rpaths, libs = [], []
546for idx, line in enumerate(otool_cmds):
547if line.strip() == "cmd LC_LOAD_DYLIB":
548lib_name = otool_cmds[idx + 2].strip()
549assert lib_name.startswith("name ")
550libs.append(lib_name.split(" ", 1)[1].rsplit("(", 1)[0][:-1])
551
552if line.strip() == "cmd LC_RPATH":
553rpath = otool_cmds[idx + 2].strip()
554assert rpath.startswith("path ")
555rpaths.append(rpath.split(" ", 1)[1].rsplit("(", 1)[0][:-1])
556
557omp_lib_name = (
558"libomp.dylib" if os.uname().machine == "arm64" else "libiomp5.dylib"
559)
560omp_rpath_lib_path = os.path.join("@rpath", omp_lib_name)
561if omp_rpath_lib_path not in libs:
562return
563
564# Copy libomp/libiomp5 from rpath locations
565for rpath in rpaths:
566source_lib = os.path.join(rpath, omp_lib_name)
567if not os.path.exists(source_lib):
568continue
569target_lib = os.path.join(self.build_lib, "torch", "lib", omp_lib_name)
570self.copy_file(source_lib, target_lib)
571# Delete old rpath and add @loader_lib to the rpath
572# This should prevent delocate from attempting to package another instance
573# of OpenMP library in torch wheel as well as loading two libomp.dylib into
574# the address space, as libraries are cached by their unresolved names
575subprocess.check_call(
576[
577"install_name_tool",
578"-rpath",
579rpath,
580"@loader_path",
581libtorch_cpu_path,
582]
583)
584break
585
586# Copy omp.h from OpenMP_C_FLAGS and copy it into include folder
587omp_cflags = get_cmake_cache_vars()["OpenMP_C_FLAGS"]
588if not omp_cflags:
589return
590for include_dir in [f[2:] for f in omp_cflags.split(" ") if f.startswith("-I")]:
591omp_h = os.path.join(include_dir, "omp.h")
592if not os.path.exists(omp_h):
593continue
594target_omp_h = os.path.join(self.build_lib, "torch", "include", "omp.h")
595self.copy_file(omp_h, target_omp_h)
596break
597
598def run(self):
599# Report build options. This is run after the build completes so # `CMakeCache.txt` exists and we can get an
600# accurate report on what is used and what is not.
601cmake_cache_vars = defaultdict(lambda: False, cmake.get_cmake_cache_variables())
602if cmake_cache_vars["USE_NUMPY"]:
603report("-- Building with NumPy bindings")
604else:
605report("-- NumPy not found")
606if cmake_cache_vars["USE_CUDNN"]:
607report(
608"-- Detected cuDNN at "
609+ cmake_cache_vars["CUDNN_LIBRARY"]
610+ ", "
611+ cmake_cache_vars["CUDNN_INCLUDE_DIR"]
612)
613else:
614report("-- Not using cuDNN")
615if cmake_cache_vars["USE_CUDA"]:
616report("-- Detected CUDA at " + cmake_cache_vars["CUDA_TOOLKIT_ROOT_DIR"])
617else:
618report("-- Not using CUDA")
619if cmake_cache_vars["USE_XPU"]:
620report("-- Detected XPU runtime at " + cmake_cache_vars["SYCL_LIBRARY_DIR"])
621else:
622report("-- Not using XPU")
623if cmake_cache_vars["USE_MKLDNN"]:
624report("-- Using MKLDNN")
625if cmake_cache_vars["USE_MKLDNN_ACL"]:
626report("-- Using Compute Library for the Arm architecture with MKLDNN")
627else:
628report(
629"-- Not using Compute Library for the Arm architecture with MKLDNN"
630)
631if cmake_cache_vars["USE_MKLDNN_CBLAS"]:
632report("-- Using CBLAS in MKLDNN")
633else:
634report("-- Not using CBLAS in MKLDNN")
635else:
636report("-- Not using MKLDNN")
637if cmake_cache_vars["USE_NCCL"] and cmake_cache_vars["USE_SYSTEM_NCCL"]:
638report(
639"-- Using system provided NCCL library at {}, {}".format(
640cmake_cache_vars["NCCL_LIBRARIES"],
641cmake_cache_vars["NCCL_INCLUDE_DIRS"],
642)
643)
644elif cmake_cache_vars["USE_NCCL"]:
645report("-- Building NCCL library")
646else:
647report("-- Not using NCCL")
648if cmake_cache_vars["USE_DISTRIBUTED"]:
649if IS_WINDOWS:
650report("-- Building without distributed package")
651else:
652report("-- Building with distributed package: ")
653report(
654" -- USE_TENSORPIPE={}".format(cmake_cache_vars["USE_TENSORPIPE"])
655)
656report(" -- USE_GLOO={}".format(cmake_cache_vars["USE_GLOO"]))
657report(" -- USE_MPI={}".format(cmake_cache_vars["USE_OPENMPI"]))
658else:
659report("-- Building without distributed package")
660if cmake_cache_vars["STATIC_DISPATCH_BACKEND"]:
661report(
662"-- Using static dispatch with backend {}".format(
663cmake_cache_vars["STATIC_DISPATCH_BACKEND"]
664)
665)
666if cmake_cache_vars["USE_LIGHTWEIGHT_DISPATCH"]:
667report("-- Using lightweight dispatch")
668if cmake_cache_vars["BUILD_EXECUTORCH"]:
669report("-- Building Executorch")
670
671if cmake_cache_vars["USE_ITT"]:
672report("-- Using ITT")
673else:
674report("-- Not using ITT")
675
676# Do not use clang to compile extensions if `-fstack-clash-protection` is defined
677# in system CFLAGS
678c_flags = str(os.getenv("CFLAGS", ""))
679if (
680IS_LINUX
681and "-fstack-clash-protection" in c_flags
682and "clang" in os.environ.get("CC", "")
683):
684os.environ["CC"] = str(os.environ["CC"])
685
686# It's an old-style class in Python 2.7...
687setuptools.command.build_ext.build_ext.run(self)
688
689if IS_DARWIN and package_type != "conda":
690self._embed_libomp()
691
692# Copy the essential export library to compile C++ extensions.
693if IS_WINDOWS:
694build_temp = self.build_temp
695
696ext_filename = self.get_ext_filename("_C")
697lib_filename = ".".join(ext_filename.split(".")[:-1]) + ".lib"
698
699export_lib = os.path.join(
700build_temp, "torch", "csrc", lib_filename
701).replace("\\", "/")
702
703build_lib = self.build_lib
704
705target_lib = os.path.join(build_lib, "torch", "lib", "_C.lib").replace(
706"\\", "/"
707)
708
709# Create "torch/lib" directory if not exists.
710# (It is not created yet in "develop" mode.)
711target_dir = os.path.dirname(target_lib)
712if not os.path.exists(target_dir):
713os.makedirs(target_dir)
714
715self.copy_file(export_lib, target_lib)
716
717def build_extensions(self):
718self.create_compile_commands()
719# The caffe2 extensions are created in
720# tmp_install/lib/pythonM.m/site-packages/caffe2/python/
721# and need to be copied to build/lib.linux.... , which will be a
722# platform dependent build folder created by the "build" command of
723# setuptools. Only the contents of this folder are installed in the
724# "install" command by default.
725# We only make this copy for Caffe2's pybind extensions
726caffe2_pybind_exts = [
727"caffe2.python.caffe2_pybind11_state",
728"caffe2.python.caffe2_pybind11_state_gpu",
729"caffe2.python.caffe2_pybind11_state_hip",
730]
731if BUILD_LIBTORCH_WHL:
732caffe2_pybind_exts = []
733i = 0
734while i < len(self.extensions):
735ext = self.extensions[i]
736if ext.name not in caffe2_pybind_exts:
737i += 1
738continue
739fullname = self.get_ext_fullname(ext.name)
740filename = self.get_ext_filename(fullname)
741report(f"\nCopying extension {ext.name}")
742
743relative_site_packages = (
744sysconfig.get_path("purelib")
745.replace(sysconfig.get_path("data"), "")
746.lstrip(os.path.sep)
747)
748src = os.path.join("torch", relative_site_packages, filename)
749if not os.path.exists(src):
750report(f"{src} does not exist")
751del self.extensions[i]
752else:
753dst = os.path.join(os.path.realpath(self.build_lib), filename)
754report(f"Copying {ext.name} from {src} to {dst}")
755dst_dir = os.path.dirname(dst)
756if not os.path.exists(dst_dir):
757os.makedirs(dst_dir)
758self.copy_file(src, dst)
759i += 1
760
761# Copy functorch extension
762for i, ext in enumerate(self.extensions):
763if ext.name != "functorch._C":
764continue
765fullname = self.get_ext_fullname(ext.name)
766filename = self.get_ext_filename(fullname)
767fileext = os.path.splitext(filename)[1]
768src = os.path.join(os.path.dirname(filename), "functorch" + fileext)
769dst = os.path.join(os.path.realpath(self.build_lib), filename)
770if os.path.exists(src):
771report(f"Copying {ext.name} from {src} to {dst}")
772dst_dir = os.path.dirname(dst)
773if not os.path.exists(dst_dir):
774os.makedirs(dst_dir)
775self.copy_file(src, dst)
776
777setuptools.command.build_ext.build_ext.build_extensions(self)
778
779def get_outputs(self):
780outputs = setuptools.command.build_ext.build_ext.get_outputs(self)
781outputs.append(os.path.join(self.build_lib, "caffe2"))
782report(f"setup.py::get_outputs returning {outputs}")
783return outputs
784
785def create_compile_commands(self):
786def load(filename):
787with open(filename) as f:
788return json.load(f)
789
790ninja_files = glob.glob("build/*compile_commands.json")
791cmake_files = glob.glob("torch/lib/build/*/compile_commands.json")
792all_commands = [entry for f in ninja_files + cmake_files for entry in load(f)]
793
794# cquery does not like c++ compiles that start with gcc.
795# It forgets to include the c++ header directories.
796# We can work around this by replacing the gcc calls that python
797# setup.py generates with g++ calls instead
798for command in all_commands:
799if command["command"].startswith("gcc "):
800command["command"] = "g++ " + command["command"][4:]
801
802new_contents = json.dumps(all_commands, indent=2)
803contents = ""
804if os.path.exists("compile_commands.json"):
805with open("compile_commands.json") as f:
806contents = f.read()
807if contents != new_contents:
808with open("compile_commands.json", "w") as f:
809f.write(new_contents)
810
811
812class concat_license_files:
813"""Merge LICENSE and LICENSES_BUNDLED.txt as a context manager
814
815LICENSE is the main PyTorch license, LICENSES_BUNDLED.txt is auto-generated
816from all the licenses found in ./third_party/. We concatenate them so there
817is a single license file in the sdist and wheels with all of the necessary
818licensing info.
819"""
820
821def __init__(self, include_files=False):
822self.f1 = "LICENSE"
823self.f2 = "third_party/LICENSES_BUNDLED.txt"
824self.include_files = include_files
825
826def __enter__(self):
827"""Concatenate files"""
828
829old_path = sys.path
830sys.path.append(third_party_path)
831try:
832from build_bundled import create_bundled
833finally:
834sys.path = old_path
835
836with open(self.f1) as f1:
837self.bsd_text = f1.read()
838
839with open(self.f1, "a") as f1:
840f1.write("\n\n")
841create_bundled(
842os.path.relpath(third_party_path), f1, include_files=self.include_files
843)
844
845def __exit__(self, exception_type, exception_value, traceback):
846"""Restore content of f1"""
847with open(self.f1, "w") as f:
848f.write(self.bsd_text)
849
850
851try:
852from wheel.bdist_wheel import bdist_wheel
853except ImportError:
854# This is useful when wheel is not installed and bdist_wheel is not
855# specified on the command line. If it _is_ specified, parsing the command
856# line will fail before wheel_concatenate is needed
857wheel_concatenate = None
858else:
859# Need to create the proper LICENSE.txt for the wheel
860class wheel_concatenate(bdist_wheel):
861"""check submodules on sdist to prevent incomplete tarballs"""
862
863def run(self):
864with concat_license_files(include_files=True):
865super().run()
866
867def write_wheelfile(self, *args, **kwargs):
868super().write_wheelfile(*args, **kwargs)
869
870if BUILD_LIBTORCH_WHL:
871# Remove extraneneous files in the libtorch wheel
872for root, dirs, files in os.walk(self.bdist_dir):
873for file in files:
874if file.endswith((".a", ".so")) and os.path.isfile(
875os.path.join(self.bdist_dir, file)
876):
877os.remove(os.path.join(root, file))
878elif file.endswith(".py"):
879os.remove(os.path.join(root, file))
880# need an __init__.py file otherwise we wouldn't have a package
881open(os.path.join(self.bdist_dir, "torch", "__init__.py"), "w").close()
882
883
884class install(setuptools.command.install.install):
885def run(self):
886super().run()
887
888
889class clean(setuptools.Command):
890user_options = []
891
892def initialize_options(self):
893pass
894
895def finalize_options(self):
896pass
897
898def run(self):
899import glob
900import re
901
902with open(".gitignore") as f:
903ignores = f.read()
904pat = re.compile(r"^#( BEGIN NOT-CLEAN-FILES )?")
905for wildcard in filter(None, ignores.split("\n")):
906match = pat.match(wildcard)
907if match:
908if match.group(1):
909# Marker is found and stop reading .gitignore.
910break
911# Ignore lines which begin with '#'.
912else:
913# Don't remove absolute paths from the system
914wildcard = wildcard.lstrip("./")
915
916for filename in glob.glob(wildcard):
917try:
918os.remove(filename)
919except OSError:
920shutil.rmtree(filename, ignore_errors=True)
921
922
923class sdist(setuptools.command.sdist.sdist):
924def run(self):
925with concat_license_files():
926super().run()
927
928
929def get_cmake_cache_vars():
930try:
931return defaultdict(lambda: False, cmake.get_cmake_cache_variables())
932except FileNotFoundError:
933# CMakeCache.txt does not exist. Probably running "python setup.py clean" over a clean directory.
934return defaultdict(lambda: False)
935
936
937def configure_extension_build():
938r"""Configures extension build options according to system environment and user's choice.
939
940Returns:
941The input to parameters ext_modules, cmdclass, packages, and entry_points as required in setuptools.setup.
942"""
943
944cmake_cache_vars = get_cmake_cache_vars()
945
946################################################################################
947# Configure compile flags
948################################################################################
949
950library_dirs = []
951extra_install_requires = []
952
953if IS_WINDOWS:
954# /NODEFAULTLIB makes sure we only link to DLL runtime
955# and matches the flags set for protobuf and ONNX
956extra_link_args = ["/NODEFAULTLIB:LIBCMT.LIB"]
957# /MD links against DLL runtime
958# and matches the flags set for protobuf and ONNX
959# /EHsc is about standard C++ exception handling
960extra_compile_args = ["/MD", "/FS", "/EHsc"]
961else:
962extra_link_args = []
963extra_compile_args = [
964"-Wall",
965"-Wextra",
966"-Wno-strict-overflow",
967"-Wno-unused-parameter",
968"-Wno-missing-field-initializers",
969"-Wno-unknown-pragmas",
970# Python 2.6 requires -fno-strict-aliasing, see
971# http://legacy.python.org/dev/peps/pep-3123/
972# We also depend on it in our code (even Python 3).
973"-fno-strict-aliasing",
974]
975
976library_dirs.append(lib_path)
977
978main_compile_args = []
979main_libraries = ["torch_python"]
980
981main_link_args = []
982main_sources = ["torch/csrc/stub.c"]
983
984if BUILD_LIBTORCH_WHL:
985main_libraries = ["torch"]
986main_sources = []
987
988if build_type.is_debug():
989if IS_WINDOWS:
990extra_compile_args.append("/Z7")
991extra_link_args.append("/DEBUG:FULL")
992else:
993extra_compile_args += ["-O0", "-g"]
994extra_link_args += ["-O0", "-g"]
995
996if build_type.is_rel_with_deb_info():
997if IS_WINDOWS:
998extra_compile_args.append("/Z7")
999extra_link_args.append("/DEBUG:FULL")
1000else:
1001extra_compile_args += ["-g"]
1002extra_link_args += ["-g"]
1003
1004# pypi cuda package that requires installation of cuda runtime, cudnn and cublas
1005# should be included in all wheels uploaded to pypi
1006pytorch_extra_install_requirements = os.getenv(
1007"PYTORCH_EXTRA_INSTALL_REQUIREMENTS", ""
1008)
1009if pytorch_extra_install_requirements:
1010report(
1011f"pytorch_extra_install_requirements: {pytorch_extra_install_requirements}"
1012)
1013extra_install_requires += pytorch_extra_install_requirements.split("|")
1014
1015# Cross-compile for M1
1016if IS_DARWIN:
1017macos_target_arch = os.getenv("CMAKE_OSX_ARCHITECTURES", "")
1018if macos_target_arch in ["arm64", "x86_64"]:
1019macos_sysroot_path = os.getenv("CMAKE_OSX_SYSROOT")
1020if macos_sysroot_path is None:
1021macos_sysroot_path = (
1022subprocess.check_output(
1023["xcrun", "--show-sdk-path", "--sdk", "macosx"]
1024)
1025.decode("utf-8")
1026.strip()
1027)
1028extra_compile_args += [
1029"-arch",
1030macos_target_arch,
1031"-isysroot",
1032macos_sysroot_path,
1033]
1034extra_link_args += ["-arch", macos_target_arch]
1035
1036def make_relative_rpath_args(path):
1037if IS_DARWIN:
1038return ["-Wl,-rpath,@loader_path/" + path]
1039elif IS_WINDOWS:
1040return []
1041else:
1042return ["-Wl,-rpath,$ORIGIN/" + path]
1043
1044################################################################################
1045# Declare extensions and package
1046################################################################################
1047
1048extensions = []
1049excludes = ["tools", "tools.*"]
1050if not cmake_cache_vars["BUILD_CAFFE2"]:
1051excludes.extend(["caffe2", "caffe2.*"])
1052if not cmake_cache_vars["BUILD_FUNCTORCH"]:
1053excludes.extend(["functorch", "functorch.*"])
1054packages = find_packages(exclude=excludes)
1055C = Extension(
1056"torch._C",
1057libraries=main_libraries,
1058sources=main_sources,
1059language="c",
1060extra_compile_args=main_compile_args + extra_compile_args,
1061include_dirs=[],
1062library_dirs=library_dirs,
1063extra_link_args=extra_link_args
1064+ main_link_args
1065+ make_relative_rpath_args("lib"),
1066)
1067extensions.append(C)
1068
1069# These extensions are built by cmake and copied manually in build_extensions()
1070# inside the build_ext implementation
1071if cmake_cache_vars["BUILD_CAFFE2"]:
1072extensions.append(
1073Extension(name="caffe2.python.caffe2_pybind11_state", sources=[]),
1074)
1075if cmake_cache_vars["USE_CUDA"]:
1076extensions.append(
1077Extension(name="caffe2.python.caffe2_pybind11_state_gpu", sources=[]),
1078)
1079if cmake_cache_vars["USE_ROCM"]:
1080extensions.append(
1081Extension(name="caffe2.python.caffe2_pybind11_state_hip", sources=[]),
1082)
1083if cmake_cache_vars["BUILD_FUNCTORCH"]:
1084extensions.append(
1085Extension(name="functorch._C", sources=[]),
1086)
1087
1088cmdclass = {
1089"bdist_wheel": wheel_concatenate,
1090"build_ext": build_ext,
1091"clean": clean,
1092"install": install,
1093"sdist": sdist,
1094}
1095
1096entry_points = {
1097"console_scripts": [
1098"convert-caffe2-to-onnx = caffe2.python.onnx.bin.conversion:caffe2_to_onnx",
1099"convert-onnx-to-caffe2 = caffe2.python.onnx.bin.conversion:onnx_to_caffe2",
1100"torchrun = torch.distributed.run:main",
1101],
1102"torchrun.logs_specs": [
1103"default = torch.distributed.elastic.multiprocessing:DefaultLogsSpecs",
1104],
1105}
1106
1107if cmake_cache_vars["USE_DISTRIBUTED"]:
1108# Only enable fr_trace command if distributed is enabled
1109entry_points["console_scripts"].append(
1110"torchfrtrace = tools.flight_recorder.fr_trace:main",
1111)
1112return extensions, cmdclass, packages, entry_points, extra_install_requires
1113
1114
1115# post run, warnings, printed at the end to make them more visible
1116build_update_message = """
1117It is no longer necessary to use the 'build' or 'rebuild' targets
1118
1119To install:
1120$ python setup.py install
1121To develop locally:
1122$ python setup.py develop
1123To force cmake to re-generate native build files (off by default):
1124$ python setup.py develop --cmake
1125"""
1126
1127
1128def print_box(msg):
1129lines = msg.split("\n")
1130size = max(len(l) + 1 for l in lines)
1131print("-" * (size + 2))
1132for l in lines:
1133print("|{}{}|".format(l, " " * (size - len(l))))
1134print("-" * (size + 2))
1135
1136
1137def main():
1138if BUILD_LIBTORCH_WHL and BUILD_PYTHON_ONLY:
1139raise RuntimeError(
1140"Conflict: 'BUILD_LIBTORCH_WHL' and 'BUILD_PYTHON_ONLY' can't both be 1. Set one to 0 and rerun."
1141)
1142install_requires = [
1143"filelock",
1144"typing-extensions>=4.8.0",
1145'setuptools ; python_version >= "3.12"',
1146'sympy==1.12.1 ; python_version == "3.8"',
1147'sympy==1.13.1 ; python_version >= "3.9"',
1148"networkx",
1149"jinja2",
1150"fsspec",
1151]
1152
1153if BUILD_PYTHON_ONLY:
1154install_requires.append(f"{LIBTORCH_PKG_NAME}=={get_torch_version()}")
1155
1156use_prioritized_text = str(os.getenv("USE_PRIORITIZED_TEXT_FOR_LD", ""))
1157if (
1158use_prioritized_text == ""
1159and platform.system() == "Linux"
1160and platform.processor() == "aarch64"
1161):
1162print_box(
1163"""
1164WARNING: we strongly recommend enabling linker script optimization for ARM + CUDA.
1165To do so please export USE_PRIORITIZED_TEXT_FOR_LD=1
1166"""
1167)
1168if use_prioritized_text == "1" or use_prioritized_text == "True":
1169gen_linker_script(
1170filein="cmake/prioritized_text.txt", fout="cmake/linker_script.ld"
1171)
1172linker_script_path = os.path.abspath("cmake/linker_script.ld")
1173os.environ["LDFLAGS"] = os.getenv("LDFLAGS", "") + f" -T{linker_script_path}"
1174os.environ["CFLAGS"] = (
1175os.getenv("CFLAGS", "") + " -ffunction-sections -fdata-sections"
1176)
1177os.environ["CXXFLAGS"] = (
1178os.getenv("CXXFLAGS", "") + " -ffunction-sections -fdata-sections"
1179)
1180
1181# Parse the command line and check the arguments before we proceed with
1182# building deps and setup. We need to set values so `--help` works.
1183dist = Distribution()
1184dist.script_name = os.path.basename(sys.argv[0])
1185dist.script_args = sys.argv[1:]
1186try:
1187dist.parse_command_line()
1188except setuptools.distutils.errors.DistutilsArgError as e:
1189print(e)
1190sys.exit(1)
1191
1192mirror_files_into_torchgen()
1193if RUN_BUILD_DEPS:
1194build_deps()
1195
1196(
1197extensions,
1198cmdclass,
1199packages,
1200entry_points,
1201extra_install_requires,
1202) = configure_extension_build()
1203install_requires += extra_install_requires
1204
1205extras_require = {
1206"optree": ["optree>=0.12.0"],
1207"opt-einsum": ["opt-einsum>=3.3"],
1208}
1209
1210# Read in README.md for our long_description
1211with open(os.path.join(cwd, "README.md"), encoding="utf-8") as f:
1212long_description = f.read()
1213
1214version_range_max = max(sys.version_info[1], 12) + 1
1215torch_package_data = [
1216"py.typed",
1217"bin/*",
1218"test/*",
1219"*.pyi",
1220"_C/*.pyi",
1221"cuda/*.pyi",
1222"fx/*.pyi",
1223"optim/*.pyi",
1224"autograd/*.pyi",
1225"jit/*.pyi",
1226"nn/*.pyi",
1227"nn/modules/*.pyi",
1228"nn/parallel/*.pyi",
1229"utils/data/*.pyi",
1230"utils/data/datapipes/*.pyi",
1231"lib/*.pdb",
1232"lib/*shm*",
1233"lib/torch_shm_manager",
1234"lib/*.h",
1235"include/*.h",
1236"include/ATen/*.h",
1237"include/ATen/cpu/*.h",
1238"include/ATen/cpu/vec/vec256/*.h",
1239"include/ATen/cpu/vec/vec256/vsx/*.h",
1240"include/ATen/cpu/vec/vec256/zarch/*.h",
1241"include/ATen/cpu/vec/vec512/*.h",
1242"include/ATen/cpu/vec/*.h",
1243"include/ATen/core/*.h",
1244"include/ATen/cuda/*.cuh",
1245"include/ATen/cuda/*.h",
1246"include/ATen/cuda/detail/*.cuh",
1247"include/ATen/cuda/detail/*.h",
1248"include/ATen/cuda/tunable/*.h",
1249"include/ATen/cudnn/*.h",
1250"include/ATen/functorch/*.h",
1251"include/ATen/ops/*.h",
1252"include/ATen/hip/*.cuh",
1253"include/ATen/hip/*.h",
1254"include/ATen/hip/detail/*.cuh",
1255"include/ATen/hip/detail/*.h",
1256"include/ATen/hip/impl/*.h",
1257"include/ATen/hip/tunable/*.h",
1258"include/ATen/mps/*.h",
1259"include/ATen/miopen/*.h",
1260"include/ATen/detail/*.h",
1261"include/ATen/native/*.h",
1262"include/ATen/native/cpu/*.h",
1263"include/ATen/native/cuda/*.h",
1264"include/ATen/native/cuda/*.cuh",
1265"include/ATen/native/hip/*.h",
1266"include/ATen/native/hip/*.cuh",
1267"include/ATen/native/mps/*.h",
1268"include/ATen/native/nested/*.h",
1269"include/ATen/native/quantized/*.h",
1270"include/ATen/native/quantized/cpu/*.h",
1271"include/ATen/native/transformers/*.h",
1272"include/ATen/native/sparse/*.h",
1273"include/ATen/native/utils/*.h",
1274"include/ATen/quantized/*.h",
1275"include/ATen/xpu/*.h",
1276"include/ATen/xpu/detail/*.h",
1277"include/caffe2/serialize/*.h",
1278"include/c10/*.h",
1279"include/c10/macros/*.h",
1280"include/c10/core/*.h",
1281"include/ATen/core/boxing/*.h",
1282"include/ATen/core/boxing/impl/*.h",
1283"include/ATen/core/dispatch/*.h",
1284"include/ATen/core/op_registration/*.h",
1285"include/c10/core/impl/*.h",
1286"include/c10/util/*.h",
1287"include/c10/cuda/*.h",
1288"include/c10/cuda/impl/*.h",
1289"include/c10/hip/*.h",
1290"include/c10/hip/impl/*.h",
1291"include/c10/xpu/*.h",
1292"include/c10/xpu/impl/*.h",
1293"include/torch/*.h",
1294"include/torch/csrc/*.h",
1295"include/torch/csrc/api/include/torch/*.h",
1296"include/torch/csrc/api/include/torch/data/*.h",
1297"include/torch/csrc/api/include/torch/data/dataloader/*.h",
1298"include/torch/csrc/api/include/torch/data/datasets/*.h",
1299"include/torch/csrc/api/include/torch/data/detail/*.h",
1300"include/torch/csrc/api/include/torch/data/samplers/*.h",
1301"include/torch/csrc/api/include/torch/data/transforms/*.h",
1302"include/torch/csrc/api/include/torch/detail/*.h",
1303"include/torch/csrc/api/include/torch/detail/ordered_dict.h",
1304"include/torch/csrc/api/include/torch/nn/*.h",
1305"include/torch/csrc/api/include/torch/nn/functional/*.h",
1306"include/torch/csrc/api/include/torch/nn/options/*.h",
1307"include/torch/csrc/api/include/torch/nn/modules/*.h",
1308"include/torch/csrc/api/include/torch/nn/modules/container/*.h",
1309"include/torch/csrc/api/include/torch/nn/parallel/*.h",
1310"include/torch/csrc/api/include/torch/nn/utils/*.h",
1311"include/torch/csrc/api/include/torch/optim/*.h",
1312"include/torch/csrc/api/include/torch/optim/schedulers/*.h",
1313"include/torch/csrc/api/include/torch/serialize/*.h",
1314"include/torch/csrc/autograd/*.h",
1315"include/torch/csrc/autograd/functions/*.h",
1316"include/torch/csrc/autograd/generated/*.h",
1317"include/torch/csrc/autograd/utils/*.h",
1318"include/torch/csrc/cuda/*.h",
1319"include/torch/csrc/distributed/c10d/*.h",
1320"include/torch/csrc/distributed/c10d/*.hpp",
1321"include/torch/csrc/distributed/rpc/*.h",
1322"include/torch/csrc/distributed/autograd/context/*.h",
1323"include/torch/csrc/distributed/autograd/functions/*.h",
1324"include/torch/csrc/distributed/autograd/rpc_messages/*.h",
1325"include/torch/csrc/dynamo/*.h",
1326"include/torch/csrc/inductor/*.h",
1327"include/torch/csrc/inductor/aoti_runner/*.h",
1328"include/torch/csrc/inductor/aoti_runtime/*.h",
1329"include/torch/csrc/inductor/aoti_torch/*.h",
1330"include/torch/csrc/inductor/aoti_torch/c/*.h",
1331"include/torch/csrc/inductor/aoti_torch/generated/*.h",
1332"include/torch/csrc/jit/*.h",
1333"include/torch/csrc/jit/backends/*.h",
1334"include/torch/csrc/jit/generated/*.h",
1335"include/torch/csrc/jit/passes/*.h",
1336"include/torch/csrc/jit/passes/quantization/*.h",
1337"include/torch/csrc/jit/passes/utils/*.h",
1338"include/torch/csrc/jit/runtime/*.h",
1339"include/torch/csrc/jit/ir/*.h",
1340"include/torch/csrc/jit/frontend/*.h",
1341"include/torch/csrc/jit/api/*.h",
1342"include/torch/csrc/jit/serialization/*.h",
1343"include/torch/csrc/jit/python/*.h",
1344"include/torch/csrc/jit/mobile/*.h",
1345"include/torch/csrc/jit/testing/*.h",
1346"include/torch/csrc/jit/tensorexpr/*.h",
1347"include/torch/csrc/jit/tensorexpr/operators/*.h",
1348"include/torch/csrc/jit/codegen/cuda/*.h",
1349"include/torch/csrc/onnx/*.h",
1350"include/torch/csrc/profiler/*.h",
1351"include/torch/csrc/profiler/orchestration/*.h",
1352"include/torch/csrc/profiler/standalone/*.h",
1353"include/torch/csrc/profiler/stubs/*.h",
1354"include/torch/csrc/profiler/unwind/*.h",
1355"include/torch/csrc/profiler/python/*.h",
1356"include/torch/csrc/utils/*.h",
1357"include/torch/csrc/tensor/*.h",
1358"include/torch/csrc/lazy/backend/*.h",
1359"include/torch/csrc/lazy/core/*.h",
1360"include/torch/csrc/lazy/core/internal_ops/*.h",
1361"include/torch/csrc/lazy/core/ops/*.h",
1362"include/torch/csrc/lazy/python/python_util.h",
1363"include/torch/csrc/lazy/ts_backend/*.h",
1364"include/torch/csrc/xpu/*.h",
1365"include/pybind11/*.h",
1366"include/pybind11/detail/*.h",
1367"include/pybind11/eigen/*.h",
1368"include/TH/*.h*",
1369"include/TH/generic/*.h*",
1370"include/THC/*.cuh",
1371"include/THC/*.h*",
1372"include/THC/generic/*.h",
1373"include/THH/*.cuh",
1374"include/THH/*.h*",
1375"include/THH/generic/*.h",
1376"include/sleef.h",
1377"_inductor/codegen/*.h",
1378"_inductor/codegen/aoti_runtime/*.cpp",
1379"_export/serde/*.yaml",
1380"share/cmake/ATen/*.cmake",
1381"share/cmake/Caffe2/*.cmake",
1382"share/cmake/Caffe2/public/*.cmake",
1383"share/cmake/Caffe2/Modules_CUDA_fix/*.cmake",
1384"share/cmake/Caffe2/Modules_CUDA_fix/upstream/*.cmake",
1385"share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/*.cmake",
1386"share/cmake/Gloo/*.cmake",
1387"share/cmake/Tensorpipe/*.cmake",
1388"share/cmake/Torch/*.cmake",
1389"utils/benchmark/utils/*.cpp",
1390"utils/benchmark/utils/valgrind_wrapper/*.cpp",
1391"utils/benchmark/utils/valgrind_wrapper/*.h",
1392"utils/model_dump/skeleton.html",
1393"utils/model_dump/code.js",
1394"utils/model_dump/*.mjs",
1395]
1396
1397if not BUILD_LIBTORCH_WHL:
1398torch_package_data.extend(
1399[
1400"lib/libtorch_python.so",
1401"lib/libtorch_python.dylib",
1402"lib/libtorch_python.dll",
1403]
1404)
1405if not BUILD_PYTHON_ONLY:
1406torch_package_data.extend(
1407[
1408"lib/*.so*",
1409"lib/*.dylib*",
1410"lib/*.dll",
1411"lib/*.lib",
1412]
1413)
1414if get_cmake_cache_vars()["BUILD_CAFFE2"]:
1415torch_package_data.extend(
1416[
1417"include/caffe2/**/*.h",
1418"include/caffe2/utils/*.h",
1419"include/caffe2/utils/**/*.h",
1420]
1421)
1422if get_cmake_cache_vars()["USE_TENSORPIPE"]:
1423torch_package_data.extend(
1424[
1425"include/tensorpipe/*.h",
1426"include/tensorpipe/channel/*.h",
1427"include/tensorpipe/channel/basic/*.h",
1428"include/tensorpipe/channel/cma/*.h",
1429"include/tensorpipe/channel/mpt/*.h",
1430"include/tensorpipe/channel/xth/*.h",
1431"include/tensorpipe/common/*.h",
1432"include/tensorpipe/core/*.h",
1433"include/tensorpipe/transport/*.h",
1434"include/tensorpipe/transport/ibv/*.h",
1435"include/tensorpipe/transport/shm/*.h",
1436"include/tensorpipe/transport/uv/*.h",
1437]
1438)
1439if get_cmake_cache_vars()["USE_KINETO"]:
1440torch_package_data.extend(
1441[
1442"include/kineto/*.h",
1443]
1444)
1445torchgen_package_data = [
1446# Recursive glob doesn't work in setup.py,
1447# https://github.com/pypa/setuptools/issues/1806
1448# To make this robust we should replace it with some code that
1449# returns a list of everything under packaged/
1450"packaged/ATen/*",
1451"packaged/ATen/native/*",
1452"packaged/ATen/templates/*",
1453"packaged/autograd/*",
1454"packaged/autograd/templates/*",
1455]
1456package_data = {
1457"torch": torch_package_data,
1458}
1459
1460if not BUILD_LIBTORCH_WHL:
1461package_data["torchgen"] = torchgen_package_data
1462package_data["caffe2"] = [
1463"python/serialized_test/data/operator_test/*.zip",
1464]
1465else:
1466# no extensions in BUILD_LIBTORCH_WHL mode
1467extensions = []
1468
1469setup(
1470name=package_name,
1471version=version,
1472description=(
1473"Tensors and Dynamic neural networks in "
1474"Python with strong GPU acceleration"
1475),
1476long_description=long_description,
1477long_description_content_type="text/markdown",
1478ext_modules=extensions,
1479cmdclass=cmdclass,
1480packages=packages,
1481entry_points=entry_points,
1482install_requires=install_requires,
1483extras_require=extras_require,
1484package_data=package_data,
1485url="https://pytorch.org/",
1486download_url="https://github.com/pytorch/pytorch/tags",
1487author="PyTorch Team",
1488author_email="packages@pytorch.org",
1489python_requires=f">={python_min_version_str}",
1490# PyPI package information.
1491classifiers=[
1492"Development Status :: 5 - Production/Stable",
1493"Intended Audience :: Developers",
1494"Intended Audience :: Education",
1495"Intended Audience :: Science/Research",
1496"License :: OSI Approved :: BSD License",
1497"Topic :: Scientific/Engineering",
1498"Topic :: Scientific/Engineering :: Mathematics",
1499"Topic :: Scientific/Engineering :: Artificial Intelligence",
1500"Topic :: Software Development",
1501"Topic :: Software Development :: Libraries",
1502"Topic :: Software Development :: Libraries :: Python Modules",
1503"Programming Language :: C++",
1504"Programming Language :: Python :: 3",
1505]
1506+ [
1507f"Programming Language :: Python :: 3.{i}"
1508for i in range(python_min_version[1], version_range_max)
1509],
1510license="BSD-3",
1511keywords="pytorch, machine learning",
1512)
1513if EMIT_BUILD_WARNING:
1514print_box(build_update_message)
1515
1516
1517if __name__ == "__main__":
1518main()
1519