3
"""Generates a matrix to be utilized through github actions
5
Will output a condensed version of the matrix if on a pull request that only
6
includes the latest version of python we support built on three different
14
from typing import Dict, List, Optional, Tuple
16
CUDA_ARCHES = ["11.8", "12.1"]
19
CUDA_ARCHES_FULL_VERSION = {"11.8": "11.8.0", "12.1": "12.1.1"}
22
CUDA_ARCHES_CUDNN_VERSION = {"11.8": "8", "12.1": "8"}
25
ROCM_ARCHES = ["5.7", "6.0"]
28
CPU_CXX11_ABI_ARCH = ["cpu-cxx11-abi"]
31
CPU_AARCH64_ARCH = ["cpu-aarch64"]
34
PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
36
"nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | "
37
"nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | "
38
"nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | "
39
"nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | "
40
"nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | "
41
"nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | "
42
"nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
43
"nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | "
44
"nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
45
"nvidia-nccl-cu11==2.19.3; platform_system == 'Linux' and platform_machine == 'x86_64' | "
46
"nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'"
49
"nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | "
50
"nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | "
51
"nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | "
52
"nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | "
53
"nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
54
"nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | "
55
"nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | "
56
"nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | "
57
"nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | "
58
"nvidia-nccl-cu12==2.19.3; platform_system == 'Linux' and platform_machine == 'x86_64' | "
59
"nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'"
64
def get_nccl_submodule_version() -> str:
65
from pathlib import Path
68
Path(__file__).absolute().parent.parent.parent
75
if not nccl_version_mk.exists():
77
"Please make sure that nccl submodule is checked out when importing this script"
79
with nccl_version_mk.open("r") as f:
82
for l in content.split("\n"):
83
if not l.startswith("NCCL_"):
85
(k, v) = l.split(":=")
86
d[k.strip()] = v.strip()
87
return f"{d['NCCL_MAJOR']}.{d['NCCL_MINOR']}.{d['NCCL_PATCH']}"
90
def get_nccl_wheel_version(arch_version: str) -> str:
94
str.strip, re.split("[;|]", PYTORCH_EXTRA_INSTALL_REQUIREMENTS[arch_version])
96
return next(x for x in requirements if x.startswith("nvidia-nccl-cu")).split("==")[
101
def validate_nccl_dep_consistency(arch_version: str) -> None:
102
wheel_ver = get_nccl_wheel_version(arch_version)
103
submodule_ver = get_nccl_submodule_version()
104
if wheel_ver != submodule_ver:
106
f"NCCL submodule version {submodule_ver} differs from wheel version {wheel_ver}"
110
def arch_type(arch_version: str) -> str:
111
if arch_version in CUDA_ARCHES:
113
elif arch_version in ROCM_ARCHES:
115
elif arch_version in CPU_CXX11_ABI_ARCH:
116
return "cpu-cxx11-abi"
117
elif arch_version in CPU_AARCH64_ARCH:
124
DEFAULT_TAG = os.getenv("RELEASE_VERSION_TAG", "main")
126
WHEEL_CONTAINER_IMAGES = {
128
gpu_arch: f"pytorch/manylinux-builder:cuda{gpu_arch}-{DEFAULT_TAG}"
129
for gpu_arch in CUDA_ARCHES
132
gpu_arch: f"pytorch/manylinux-builder:rocm{gpu_arch}-{DEFAULT_TAG}"
133
for gpu_arch in ROCM_ARCHES
135
"cpu": f"pytorch/manylinux-builder:cpu-{DEFAULT_TAG}",
136
"cpu-cxx11-abi": f"pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-{DEFAULT_TAG}",
137
"cpu-aarch64": f"pytorch/manylinuxaarch64-builder:cpu-aarch64-{DEFAULT_TAG}",
140
CONDA_CONTAINER_IMAGES = {
142
gpu_arch: f"pytorch/conda-builder:cuda{gpu_arch}-{DEFAULT_TAG}"
143
for gpu_arch in CUDA_ARCHES
145
"cpu": f"pytorch/conda-builder:cpu-{DEFAULT_TAG}",
148
PRE_CXX11_ABI = "pre-cxx11"
149
CXX11_ABI = "cxx11-abi"
153
LIBTORCH_CONTAINER_IMAGES: Dict[Tuple[str, str], str] = {
158
): f"pytorch/manylinux-builder:cuda{gpu_arch}-{DEFAULT_TAG}"
159
for gpu_arch in CUDA_ARCHES
165
): f"pytorch/libtorch-cxx11-builder:cuda{gpu_arch}-{DEFAULT_TAG}"
166
for gpu_arch in CUDA_ARCHES
172
): f"pytorch/manylinux-builder:rocm{gpu_arch}-{DEFAULT_TAG}"
173
for gpu_arch in ROCM_ARCHES
179
): f"pytorch/libtorch-cxx11-builder:rocm{gpu_arch}-{DEFAULT_TAG}"
180
for gpu_arch in ROCM_ARCHES
182
("cpu", PRE_CXX11_ABI): f"pytorch/manylinux-builder:cpu-{DEFAULT_TAG}",
183
("cpu", CXX11_ABI): f"pytorch/libtorch-cxx11-builder:cpu-{DEFAULT_TAG}",
186
FULL_PYTHON_VERSIONS = ["3.8", "3.9", "3.10", "3.11", "3.12"]
189
def translate_desired_cuda(gpu_arch_type: str, gpu_arch_version: str) -> str:
192
"cpu-aarch64": "cpu",
193
"cpu-cxx11-abi": "cpu-cxx11-abi",
194
"cuda": f"cu{gpu_arch_version.replace('.', '')}",
195
"rocm": f"rocm{gpu_arch_version}",
196
}.get(gpu_arch_type, gpu_arch_version)
199
def list_without(in_list: List[str], without: List[str]) -> List[str]:
200
return [item for item in in_list if item not in without]
203
def generate_conda_matrix(os: str) -> List[Dict[str, str]]:
204
ret: List[Dict[str, str]] = []
206
python_versions = FULL_PYTHON_VERSIONS
207
if os == "linux" or os == "windows":
208
arches += CUDA_ARCHES
209
for python_version in python_versions:
211
for arch_version in arches:
212
gpu_arch_type = arch_type(arch_version)
213
gpu_arch_version = "" if arch_version == "cpu" else arch_version
216
"python_version": python_version,
217
"gpu_arch_type": gpu_arch_type,
218
"gpu_arch_version": gpu_arch_version,
219
"desired_cuda": translate_desired_cuda(
220
gpu_arch_type, gpu_arch_version
222
"container_image": CONDA_CONTAINER_IMAGES[arch_version],
223
"package_type": "conda",
224
"build_name": f"conda-py{python_version}-{gpu_arch_type}{gpu_arch_version}".replace(
232
def generate_libtorch_matrix(
235
arches: Optional[List[str]] = None,
236
libtorch_variants: Optional[List[str]] = None,
237
) -> List[Dict[str, str]]:
241
arches += CUDA_ARCHES
242
arches += ROCM_ARCHES
243
elif os == "windows":
244
arches += CUDA_ARCHES
246
if libtorch_variants is None:
247
libtorch_variants = [
249
"shared-without-deps",
251
"static-without-deps",
254
ret: List[Dict[str, str]] = []
255
for arch_version in arches:
256
for libtorch_variant in libtorch_variants:
260
gpu_arch_type = arch_type(arch_version)
261
gpu_arch_version = "" if arch_version == "cpu" else arch_version
263
if gpu_arch_type == "rocm" and "without-deps" in libtorch_variant:
267
"gpu_arch_type": gpu_arch_type,
268
"gpu_arch_version": gpu_arch_version,
269
"desired_cuda": translate_desired_cuda(
270
gpu_arch_type, gpu_arch_version
272
"libtorch_variant": libtorch_variant,
273
"libtorch_config": abi_version if os == "windows" else "",
274
"devtoolset": abi_version if os != "windows" else "",
275
"container_image": LIBTORCH_CONTAINER_IMAGES[
276
(arch_version, abi_version)
280
"package_type": "libtorch",
281
"build_name": f"libtorch-{gpu_arch_type}{gpu_arch_version}-{libtorch_variant}-{abi_version}".replace(
289
def generate_wheels_matrix(
291
arches: Optional[List[str]] = None,
292
python_versions: Optional[List[str]] = None,
293
) -> List[Dict[str, str]]:
294
package_type = "wheel"
295
if os == "linux" or os == "linux-aarch64":
297
package_type = "manywheel"
299
if python_versions is None:
300
python_versions = FULL_PYTHON_VERSIONS
306
arches += CPU_CXX11_ABI_ARCH + CUDA_ARCHES + ROCM_ARCHES
307
elif os == "windows":
308
arches += CUDA_ARCHES
309
elif os == "linux-aarch64":
312
arches = ["cpu-aarch64"]
314
ret: List[Dict[str, str]] = []
315
for python_version in python_versions:
316
for arch_version in arches:
317
gpu_arch_type = arch_type(arch_version)
320
if arch_version == "cpu"
321
or arch_version == "cpu-cxx11-abi"
322
or arch_version == "cpu-aarch64"
327
if arch_version in ["12.1", "11.8"] and os == "linux":
330
"python_version": python_version,
331
"gpu_arch_type": gpu_arch_type,
332
"gpu_arch_version": gpu_arch_version,
333
"desired_cuda": translate_desired_cuda(
334
gpu_arch_type, gpu_arch_version
337
"container_image": WHEEL_CONTAINER_IMAGES[arch_version],
338
"package_type": package_type,
339
"pytorch_extra_install_requirements": PYTORCH_EXTRA_INSTALL_REQUIREMENTS[arch_version],
340
"build_name": f"{package_type}-py{python_version}-{gpu_arch_type}{gpu_arch_version}".replace(
348
"python_version": python_version,
349
"gpu_arch_type": gpu_arch_type,
350
"gpu_arch_version": gpu_arch_version,
351
"desired_cuda": translate_desired_cuda(
352
gpu_arch_type, gpu_arch_version
354
"devtoolset": "cxx11-abi"
355
if arch_version == "cpu-cxx11-abi"
357
"container_image": WHEEL_CONTAINER_IMAGES[arch_version],
358
"package_type": package_type,
359
"build_name": f"{package_type}-py{python_version}-{gpu_arch_type}{gpu_arch_version}".replace(
362
"pytorch_extra_install_requirements":
363
PYTORCH_EXTRA_INSTALL_REQUIREMENTS["12.1"]
364
if os != "linux" else "",
370
validate_nccl_dep_consistency("12.1")
371
validate_nccl_dep_consistency("11.8")