onnxruntime
/
setup.py
773 строки · 31.1 Кб
1# ------------------------------------------------------------------------
2# Copyright (c) Microsoft Corporation. All rights reserved.
3# Licensed under the MIT License.
4# ------------------------------------------------------------------------
5# pylint: disable=C0103
6
7import datetime
8import logging
9import platform
10import shlex
11import subprocess
12import sys
13from glob import glob, iglob
14from os import environ, getcwd, path, popen, remove
15from pathlib import Path
16from shutil import copyfile
17
18from packaging.tags import sys_tags
19from setuptools import Extension, setup
20from setuptools.command.build_ext import build_ext as _build_ext
21from setuptools.command.install import install as InstallCommandBase
22
23nightly_build = False
24package_name = "onnxruntime"
25wheel_name_suffix = None
26logger = logging.getLogger()
27
28
29def parse_arg_remove_boolean(argv, arg_name):
30arg_value = False
31if arg_name in sys.argv:
32arg_value = True
33argv.remove(arg_name)
34
35return arg_value
36
37
38def parse_arg_remove_string(argv, arg_name_equal):
39arg_value = None
40for arg in sys.argv[1:]:
41if arg.startswith(arg_name_equal):
42arg_value = arg[len(arg_name_equal) :]
43sys.argv.remove(arg)
44break
45
46return arg_value
47
48
49# Any combination of the following arguments can be applied
50
51if parse_arg_remove_boolean(sys.argv, "--nightly_build"):
52package_name = "ort-nightly"
53nightly_build = True
54
55wheel_name_suffix = parse_arg_remove_string(sys.argv, "--wheel_name_suffix=")
56
57cuda_version = None
58rocm_version = None
59is_migraphx = False
60is_rocm = False
61is_openvino = False
62# The following arguments are mutually exclusive
63if wheel_name_suffix == "gpu":
64# TODO: how to support multiple CUDA versions?
65cuda_version = parse_arg_remove_string(sys.argv, "--cuda_version=")
66elif parse_arg_remove_boolean(sys.argv, "--use_rocm"):
67is_rocm = True
68rocm_version = parse_arg_remove_string(sys.argv, "--rocm_version=")
69elif parse_arg_remove_boolean(sys.argv, "--use_migraphx"):
70is_migraphx = True
71elif parse_arg_remove_boolean(sys.argv, "--use_openvino"):
72is_openvino = True
73package_name = "onnxruntime-openvino"
74elif parse_arg_remove_boolean(sys.argv, "--use_dnnl"):
75package_name = "onnxruntime-dnnl"
76elif parse_arg_remove_boolean(sys.argv, "--use_tvm"):
77package_name = "onnxruntime-tvm"
78elif parse_arg_remove_boolean(sys.argv, "--use_vitisai"):
79package_name = "onnxruntime-vitisai"
80elif parse_arg_remove_boolean(sys.argv, "--use_acl"):
81package_name = "onnxruntime-acl"
82elif parse_arg_remove_boolean(sys.argv, "--use_armnn"):
83package_name = "onnxruntime-armnn"
84elif parse_arg_remove_boolean(sys.argv, "--use_cann"):
85package_name = "onnxruntime-cann"
86elif parse_arg_remove_boolean(sys.argv, "--use_azure"):
87# keep the same name since AzureEP will release with CpuEP by default.
88pass
89elif parse_arg_remove_boolean(sys.argv, "--use_qnn"):
90package_name = "onnxruntime-qnn"
91
92if is_rocm or is_migraphx:
93package_name = "onnxruntime-rocm" if not nightly_build else "ort-rocm-nightly"
94
95# PEP 513 defined manylinux1_x86_64 and manylinux1_i686
96# PEP 571 defined manylinux2010_x86_64 and manylinux2010_i686
97# PEP 599 defines the following platform tags:
98# manylinux2014_x86_64
99# manylinux2014_i686
100# manylinux2014_aarch64
101# manylinux2014_armv7l
102# manylinux2014_ppc64
103# manylinux2014_ppc64le
104# manylinux2014_s390x
105manylinux_tags = [
106"manylinux1_x86_64",
107"manylinux1_i686",
108"manylinux2010_x86_64",
109"manylinux2010_i686",
110"manylinux2014_x86_64",
111"manylinux2014_i686",
112"manylinux2014_aarch64",
113"manylinux2014_armv7l",
114"manylinux2014_ppc64",
115"manylinux2014_ppc64le",
116"manylinux2014_s390x",
117"manylinux_2_28_x86_64",
118"manylinux_2_28_aarch64",
119]
120is_manylinux = environ.get("AUDITWHEEL_PLAT", None) in manylinux_tags
121
122
123class build_ext(_build_ext): # noqa: N801
124def build_extension(self, ext):
125dest_file = self.get_ext_fullpath(ext.name)
126logger.info("copying %s -> %s", ext.sources[0], dest_file)
127copyfile(ext.sources[0], dest_file)
128
129
130try:
131from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
132
133class bdist_wheel(_bdist_wheel): # noqa: N801
134"""Helper functions to create wheel package"""
135
136if is_openvino and is_manylinux:
137
138def get_tag(self):
139_, _, plat = _bdist_wheel.get_tag(self)
140if platform.system() == "Linux":
141# Get the right platform tag by querying the linker version
142glibc_major, glibc_minor = popen("ldd --version | head -1").read().split()[-1].split(".")
143"""# See https://github.com/mayeut/pep600_compliance/blob/master/
144pep600_compliance/tools/manylinux-policy.json"""
145if glibc_major == "2" and glibc_minor == "17":
146plat = "manylinux_2_17_x86_64.manylinux2014_x86_64"
147else: # For manylinux2014 and above, no alias is required
148plat = f"manylinux_{glibc_major}_{glibc_minor}_x86_64"
149tags = next(sys_tags())
150return (tags.interpreter, tags.abi, plat)
151
152def finalize_options(self):
153_bdist_wheel.finalize_options(self)
154if not is_manylinux:
155self.root_is_pure = False
156
157def _rewrite_ld_preload(self, to_preload):
158with open("onnxruntime/capi/_ld_preload.py", "a") as f:
159if len(to_preload) > 0:
160f.write("from ctypes import CDLL, RTLD_GLOBAL\n")
161for library in to_preload:
162f.write('_{} = CDLL("{}", mode=RTLD_GLOBAL)\n'.format(library.split(".")[0], library))
163
164def _rewrite_ld_preload_cuda(self, to_preload):
165with open("onnxruntime/capi/_ld_preload.py", "a") as f:
166if len(to_preload) > 0:
167f.write("from ctypes import CDLL, RTLD_GLOBAL\n")
168f.write("try:\n")
169for library in to_preload:
170f.write(' _{} = CDLL("{}", mode=RTLD_GLOBAL)\n'.format(library.split(".")[0], library))
171f.write("except OSError:\n")
172f.write(" import os\n")
173f.write(' os.environ["ORT_CUDA_UNAVAILABLE"] = "1"\n')
174
175def _rewrite_ld_preload_tensorrt(self, to_preload):
176with open("onnxruntime/capi/_ld_preload.py", "a", encoding="ascii") as f:
177if len(to_preload) > 0:
178f.write("from ctypes import CDLL, RTLD_GLOBAL\n")
179f.write("try:\n")
180for library in to_preload:
181f.write(' _{} = CDLL("{}", mode=RTLD_GLOBAL)\n'.format(library.split(".")[0], library))
182f.write("except OSError:\n")
183f.write(" import os\n")
184f.write(' os.environ["ORT_TENSORRT_UNAVAILABLE"] = "1"\n')
185
186def run(self):
187if is_manylinux:
188source = "onnxruntime/capi/onnxruntime_pybind11_state.so"
189dest = "onnxruntime/capi/onnxruntime_pybind11_state_manylinux1.so"
190logger.info("copying %s -> %s", source, dest)
191copyfile(source, dest)
192
193to_preload = []
194to_preload_cuda = []
195to_preload_tensorrt = []
196to_preload_cann = []
197
198cuda_dependencies = [
199"libcuda.so.1",
200"libcublas.so.11",
201"libcublas.so.12",
202"libcublasLt.so.11",
203"libcublasLt.so.12",
204"libcudart.so.11.0",
205"libcudart.so.12",
206"libcudnn.so.8",
207"libcudnn.so.9",
208"libcufft.so.10",
209"libcufft.so.11",
210"libcurand.so.10",
211"libcudnn_adv_infer.so.8",
212"libcudnn_adv_train.so.8",
213"libcudnn_cnn_infer.so.8",
214"libcudnn_cnn_train.so.8",
215"libcudnn_ops_infer.so.8",
216"libcudnn_ops_train.so.8",
217"libcudnn_adv.so.9",
218"libcudnn_cnn.so.9",
219"libcudnn_engines_precompiled.so.9",
220"libcudnn_engines_runtime_compiled.so.9",
221"libcudnn_graph.so.9",
222"libcudnn_heuristic.so.9",
223"libcudnn_ops.so.9",
224"libnvJitLink.so.12",
225"libnvrtc.so.11",
226"libnvrtc.so.12",
227"libnvrtc-builtins.so.11",
228"libnvrtc-builtins.so.12",
229]
230
231rocm_dependencies = [
232"libamd_comgr.so.2",
233"libamdhip64.so.5",
234"libamdhip64.so.6",
235"libdrm.so.2",
236"libdrm_amdgpu.so.1",
237"libelf.so.1",
238"libhipfft.so.0",
239"libhiprtc.so.5",
240"libhiprtc.so.6",
241"libhsa-runtime64.so.1",
242"libMIOpen.so.1",
243"libnuma.so.1",
244"librccl.so.1",
245"librocblas.so.3",
246"librocblas.so.4",
247"librocfft.so.0",
248"libroctx64.so.4",
249"librocm_smi64.so.5",
250"librocm_smi64.so.6",
251"libroctracer64.so.4",
252"libtinfo.so.6",
253"libmigraphx_c.so.3",
254"libmigraphx.so.2",
255"libmigraphx_onnx.so.2",
256"libmigraphx_tf.so.2",
257]
258
259tensorrt_dependencies = ["libnvinfer.so.10", "libnvinfer_plugin.so.10", "libnvonnxparser.so.10"]
260
261cann_dependencies = ["libascendcl.so", "libacl_op_compiler.so", "libfmk_onnx_parser.so"]
262
263dest = "onnxruntime/capi/libonnxruntime_providers_openvino.so"
264if path.isfile(dest):
265subprocess.run(
266["patchelf", "--set-rpath", "$ORIGIN", dest, "--force-rpath"],
267check=True,
268stdout=subprocess.PIPE,
269text=True,
270)
271
272self._rewrite_ld_preload(to_preload)
273self._rewrite_ld_preload_cuda(to_preload_cuda)
274self._rewrite_ld_preload_tensorrt(to_preload_tensorrt)
275self._rewrite_ld_preload(to_preload_cann)
276
277else:
278pass
279
280_bdist_wheel.run(self)
281if is_manylinux and not disable_auditwheel_repair and not is_openvino:
282assert self.dist_dir is not None
283file = glob(path.join(self.dist_dir, "*linux*.whl"))[0]
284logger.info("repairing %s for manylinux1", file)
285auditwheel_cmd = ["auditwheel", "-v", "repair", "-w", self.dist_dir, file]
286for i in cuda_dependencies + rocm_dependencies + tensorrt_dependencies + cann_dependencies:
287auditwheel_cmd += ["--exclude", i]
288logger.info("Running %s", " ".join([shlex.quote(arg) for arg in auditwheel_cmd]))
289try:
290subprocess.run(auditwheel_cmd, check=True, stdout=subprocess.PIPE)
291finally:
292logger.info("removing %s", file)
293remove(file)
294
295except ImportError as error:
296print("Error importing dependencies:")
297print(error)
298bdist_wheel = None
299
300
301class InstallCommand(InstallCommandBase):
302def finalize_options(self):
303ret = InstallCommandBase.finalize_options(self)
304self.install_lib = self.install_platlib
305return ret
306
307
308providers_cuda_or_rocm = "onnxruntime_providers_" + ("rocm" if is_rocm else "cuda")
309providers_tensorrt_or_migraphx = "onnxruntime_providers_" + ("migraphx" if is_migraphx else "tensorrt")
310providers_openvino = "onnxruntime_providers_openvino"
311providers_cann = "onnxruntime_providers_cann"
312
313if platform.system() == "Linux":
314providers_cuda_or_rocm = "lib" + providers_cuda_or_rocm + ".so"
315providers_tensorrt_or_migraphx = "lib" + providers_tensorrt_or_migraphx + ".so"
316providers_openvino = "lib" + providers_openvino + ".so"
317providers_cann = "lib" + providers_cann + ".so"
318elif platform.system() == "Windows":
319providers_cuda_or_rocm = providers_cuda_or_rocm + ".dll"
320providers_tensorrt_or_migraphx = providers_tensorrt_or_migraphx + ".dll"
321providers_openvino = providers_openvino + ".dll"
322providers_cann = providers_cann + ".dll"
323
324# Additional binaries
325dl_libs = []
326libs = []
327
328if platform.system() == "Linux" or platform.system() == "AIX":
329libs = [
330"onnxruntime_pybind11_state.so",
331"libdnnl.so.2",
332"libmklml_intel.so",
333"libmklml_gnu.so",
334"libiomp5.so",
335"mimalloc.so",
336"libonnxruntime.so*",
337]
338dl_libs = ["libonnxruntime_providers_shared.so"]
339dl_libs.append(providers_cuda_or_rocm)
340dl_libs.append(providers_tensorrt_or_migraphx)
341dl_libs.append(providers_cann)
342dl_libs.append("libonnxruntime.so*")
343# DNNL, TensorRT & OpenVINO EPs are built as shared libs
344libs.extend(["libonnxruntime_providers_shared.so"])
345libs.extend(["libonnxruntime_providers_dnnl.so"])
346libs.extend(["libonnxruntime_providers_openvino.so"])
347libs.extend(["libonnxruntime_providers_vitisai.so"])
348libs.append(providers_cuda_or_rocm)
349libs.append(providers_tensorrt_or_migraphx)
350libs.append(providers_cann)
351if nightly_build:
352libs.extend(["libonnxruntime_pywrapper.so"])
353elif platform.system() == "Darwin":
354libs = [
355"onnxruntime_pybind11_state.so",
356"libdnnl.2.dylib",
357"mimalloc.so",
358"libonnxruntime*.dylib",
359] # TODO add libmklml and libiomp5 later.
360# DNNL & TensorRT EPs are built as shared libs
361libs.extend(["libonnxruntime_providers_shared.dylib"])
362libs.extend(["libonnxruntime_providers_dnnl.dylib"])
363libs.extend(["libonnxruntime_providers_tensorrt.dylib"])
364libs.extend(["libonnxruntime_providers_cuda.dylib"])
365libs.extend(["libonnxruntime_providers_vitisai.dylib"])
366if nightly_build:
367libs.extend(["libonnxruntime_pywrapper.dylib"])
368else:
369libs = [
370"onnxruntime_pybind11_state.pyd",
371"dnnl.dll",
372"mklml.dll",
373"libiomp5md.dll",
374providers_cuda_or_rocm,
375providers_tensorrt_or_migraphx,
376providers_cann,
377"onnxruntime.dll",
378]
379# DNNL, TensorRT & OpenVINO EPs are built as shared libs
380libs.extend(["onnxruntime_providers_shared.dll"])
381libs.extend(["onnxruntime_providers_dnnl.dll"])
382libs.extend(["onnxruntime_providers_tensorrt.dll"])
383libs.extend(["onnxruntime_providers_openvino.dll"])
384libs.extend(["onnxruntime_providers_cuda.dll"])
385libs.extend(["onnxruntime_providers_vitisai.dll"])
386# DirectML Libs
387libs.extend(["DirectML.dll"])
388# QNN V68/V73 dependencies
389qnn_deps = [
390"QnnCpu.dll",
391"QnnHtp.dll",
392"QnnSaver.dll",
393"QnnSystem.dll",
394"QnnHtpPrepare.dll",
395"QnnHtpV73Stub.dll",
396"libQnnHtpV73Skel.so",
397"libqnnhtpv73.cat",
398"QnnHtpV68Stub.dll",
399"libQnnHtpV68Skel.so",
400]
401libs.extend(qnn_deps)
402if nightly_build:
403libs.extend(["onnxruntime_pywrapper.dll"])
404
405if is_manylinux:
406if is_openvino:
407ov_libs = [
408"libopenvino_intel_cpu_plugin.so",
409"libopenvino_intel_gpu_plugin.so",
410"libopenvino_auto_plugin.so",
411"libopenvino_hetero_plugin.so",
412"libtbb.so.2",
413"libtbbmalloc.so.2",
414"libopenvino.so",
415"libopenvino_c.so",
416"libopenvino_onnx_frontend.so",
417]
418for x in ov_libs:
419y = "onnxruntime/capi/" + x
420subprocess.run(
421["patchelf", "--set-rpath", "$ORIGIN", y, "--force-rpath"],
422check=True,
423stdout=subprocess.PIPE,
424text=True,
425)
426dl_libs.append(x)
427dl_libs.append(providers_openvino)
428dl_libs.append("plugins.xml")
429dl_libs.append("usb-ma2x8x.mvcmd")
430data = ["capi/libonnxruntime_pywrapper.so"] if nightly_build else []
431data += [path.join("capi", x) for x in dl_libs if glob(path.join("onnxruntime", "capi", x))]
432ext_modules = [
433Extension(
434"onnxruntime.capi.onnxruntime_pybind11_state",
435["onnxruntime/capi/onnxruntime_pybind11_state_manylinux1.so"],
436),
437]
438else:
439data = [path.join("capi", x) for x in libs if glob(path.join("onnxruntime", "capi", x))]
440ext_modules = []
441
442# Additional examples
443examples_names = ["mul_1.onnx", "logreg_iris.onnx", "sigmoid.onnx"]
444examples = [path.join("datasets", x) for x in examples_names]
445
446# Extra files such as EULA and ThirdPartyNotices (and Qualcomm License, only for QNN release packages)
447extra = ["LICENSE", "ThirdPartyNotices.txt", "Privacy.md", "Qualcomm AI Hub Proprietary License.pdf"]
448
449# Description
450readme_file = "docs/python/ReadMeOV.rst" if is_openvino else "docs/python/README.rst"
451README = path.join(getcwd(), readme_file)
452if not path.exists(README):
453this = path.dirname(__file__)
454README = path.join(this, readme_file)
455
456if not path.exists(README):
457raise FileNotFoundError("Unable to find 'README.rst'")
458with open(README, encoding="utf-8") as fdesc:
459long_description = fdesc.read()
460
461# Include files in onnxruntime/external if --enable_external_custom_op_schemas build.sh command
462# line option is specified.
463# If the options is not specified this following condition fails as onnxruntime/external folder is not created in the
464# build flow under the build binary directory.
465if path.isdir(path.join("onnxruntime", "external")):
466# Gather all files under onnxruntime/external directory.
467extra.extend(
468list(
469str(Path(*Path(x).parts[1:]))
470for x in list(iglob(path.join(path.join("onnxruntime", "external"), "**/*.*"), recursive=True))
471)
472)
473
474packages = [
475"onnxruntime",
476"onnxruntime.backend",
477"onnxruntime.capi",
478"onnxruntime.datasets",
479"onnxruntime.tools",
480"onnxruntime.tools.mobile_helpers",
481"onnxruntime.tools.ort_format_model",
482"onnxruntime.tools.ort_format_model.ort_flatbuffers_py",
483"onnxruntime.tools.ort_format_model.ort_flatbuffers_py.fbs",
484"onnxruntime.tools.qdq_helpers",
485"onnxruntime.quantization",
486"onnxruntime.quantization.operators",
487"onnxruntime.quantization.CalTableFlatBuffers",
488"onnxruntime.quantization.fusions",
489"onnxruntime.quantization.execution_providers.qnn",
490"onnxruntime.transformers",
491"onnxruntime.transformers.models.bart",
492"onnxruntime.transformers.models.bert",
493"onnxruntime.transformers.models.gpt2",
494"onnxruntime.transformers.models.llama",
495"onnxruntime.transformers.models.longformer",
496"onnxruntime.transformers.models.phi2",
497"onnxruntime.transformers.models.t5",
498"onnxruntime.transformers.models.stable_diffusion",
499"onnxruntime.transformers.models.whisper",
500]
501
502package_data = {"onnxruntime.tools.mobile_helpers": ["*.md", "*.config"]}
503data_files = []
504
505requirements_file = "requirements.txt"
506
507local_version = None
508enable_training = parse_arg_remove_boolean(sys.argv, "--enable_training")
509enable_training_apis = parse_arg_remove_boolean(sys.argv, "--enable_training_apis")
510enable_rocm_profiling = parse_arg_remove_boolean(sys.argv, "--enable_rocm_profiling")
511disable_auditwheel_repair = parse_arg_remove_boolean(sys.argv, "--disable_auditwheel_repair")
512default_training_package_device = parse_arg_remove_boolean(sys.argv, "--default_training_package_device")
513
514classifiers = [
515"Development Status :: 5 - Production/Stable",
516"Intended Audience :: Developers",
517"License :: OSI Approved :: MIT License",
518"Operating System :: POSIX :: Linux",
519"Topic :: Scientific/Engineering",
520"Topic :: Scientific/Engineering :: Mathematics",
521"Topic :: Scientific/Engineering :: Artificial Intelligence",
522"Topic :: Software Development",
523"Topic :: Software Development :: Libraries",
524"Topic :: Software Development :: Libraries :: Python Modules",
525"Programming Language :: Python",
526"Programming Language :: Python :: 3 :: Only",
527"Programming Language :: Python :: 3.7",
528"Programming Language :: Python :: 3.8",
529"Programming Language :: Python :: 3.9",
530"Programming Language :: Python :: 3.10",
531"Programming Language :: Python :: 3.11",
532"Programming Language :: Python :: 3.12",
533"Operating System :: Microsoft :: Windows",
534"Operating System :: MacOS",
535]
536
537if enable_training or enable_training_apis:
538packages.append("onnxruntime.training")
539if enable_training:
540packages.extend(
541[
542"onnxruntime.training.amp",
543"onnxruntime.training.experimental",
544"onnxruntime.training.experimental.gradient_graph",
545"onnxruntime.training.optim",
546"onnxruntime.training.ortmodule",
547"onnxruntime.training.ortmodule.experimental",
548"onnxruntime.training.ortmodule.experimental.json_config",
549"onnxruntime.training.ortmodule.experimental.hierarchical_ortmodule",
550"onnxruntime.training.ortmodule.torch_cpp_extensions",
551"onnxruntime.training.ortmodule.torch_cpp_extensions.cpu.aten_op_executor",
552"onnxruntime.training.ortmodule.torch_cpp_extensions.cpu.torch_interop_utils",
553"onnxruntime.training.ortmodule.torch_cpp_extensions.cuda.torch_gpu_allocator",
554"onnxruntime.training.ortmodule.torch_cpp_extensions.cuda.fused_ops",
555"onnxruntime.training.ortmodule.graph_optimizers",
556"onnxruntime.training.ortmodule.experimental.pipe",
557"onnxruntime.training.ort_triton",
558"onnxruntime.training.ort_triton.kernel",
559"onnxruntime.training.utils",
560"onnxruntime.training.utils.data",
561"onnxruntime.training.utils.hooks",
562"onnxruntime.training.api",
563"onnxruntime.training.onnxblock",
564"onnxruntime.training.onnxblock.loss",
565"onnxruntime.training.onnxblock.optim",
566]
567)
568
569package_data["onnxruntime.training.ortmodule.torch_cpp_extensions.cpu.aten_op_executor"] = ["*.cc"]
570package_data["onnxruntime.training.ortmodule.torch_cpp_extensions.cpu.torch_interop_utils"] = ["*.cc", "*.h"]
571package_data["onnxruntime.training.ortmodule.torch_cpp_extensions.cuda.torch_gpu_allocator"] = ["*.cc"]
572package_data["onnxruntime.training.ortmodule.torch_cpp_extensions.cuda.fused_ops"] = [
573"*.cpp",
574"*.cu",
575"*.cuh",
576"*.h",
577]
578
579requirements_file = "requirements-training.txt"
580# with training, we want to follow this naming convention:
581# stable:
582# onnxruntime-training-1.7.0+cu111-cp36-cp36m-linux_x86_64.whl
583# nightly:
584# onnxruntime-training-1.7.0.dev20210408+cu111-cp36-cp36m-linux_x86_64.whl
585# this is needed immediately by pytorch/ort so that the user is able to
586# install an onnxruntime training package with matching torch cuda version.
587if not is_openvino:
588# To support the package consisting of both openvino and training modules part of it
589package_name = "onnxruntime-training"
590
591disable_local_version = environ.get("ORT_DISABLE_PYTHON_PACKAGE_LOCAL_VERSION", "0")
592disable_local_version = (
593disable_local_version == "1"
594or disable_local_version.lower() == "true"
595or disable_local_version.lower() == "yes"
596)
597# local version should be disabled for internal feeds.
598if not disable_local_version:
599# we want put default training packages to pypi. pypi does not accept package with a local version.
600if not default_training_package_device or nightly_build:
601if cuda_version:
602# removing '.' to make Cuda version number in the same form as Pytorch.
603local_version = "+cu" + cuda_version.replace(".", "")
604elif rocm_version:
605# removing '.' to make Rocm version number in the same form as Pytorch.
606local_version = "+rocm" + rocm_version.replace(".", "")
607else:
608# cpu version for documentation
609local_version = "+cpu"
610else:
611if not (cuda_version or rocm_version):
612# Training CPU package for ADO feeds is called onnxruntime-training-cpu
613package_name = "onnxruntime-training-cpu"
614
615if rocm_version:
616# Training ROCM package for ADO feeds is called onnxruntime-training-rocm
617package_name = "onnxruntime-training-rocm"
618
619if package_name == "onnxruntime-tvm":
620packages += ["onnxruntime.providers.tvm"]
621
622package_data["onnxruntime"] = data + examples + extra
623
624version_number = ""
625with open("VERSION_NUMBER") as f:
626version_number = f.readline().strip()
627if nightly_build:
628# https://docs.microsoft.com/en-us/azure/devops/pipelines/build/variables
629build_suffix = environ.get("BUILD_BUILDNUMBER")
630if build_suffix is None:
631# The following line is only for local testing
632build_suffix = str(datetime.datetime.now().date().strftime("%Y%m%d"))
633else:
634build_suffix = build_suffix.replace(".", "")
635
636if len(build_suffix) > 8 and len(build_suffix) < 12:
637# we want to format the build_suffix to avoid (the 12th run on 20210630 vs the first run on 20210701):
638# 2021063012 > 202107011
639# in above 2021063012 is treated as the latest which is incorrect.
640# we want to convert the format to:
641# 20210630012 < 20210701001
642# where the first 8 digits are date. the last 3 digits are run count.
643# as long as there are less than 1000 runs per day, we will not have the problem.
644# to test this code locally, run:
645# NIGHTLY_BUILD=1 BUILD_BUILDNUMBER=202107011 python tools/ci_build/build.py --config RelWithDebInfo \
646# --enable_training --use_cuda --cuda_home /usr/local/cuda --cudnn_home /usr/lib/x86_64-linux-gnu/ \
647# --nccl_home /usr/lib/x86_64-linux-gnu/ --build_dir build/Linux --build --build_wheel --skip_tests \
648# --cuda_version 11.1
649def check_date_format(date_str):
650try:
651datetime.datetime.strptime(date_str, "%Y%m%d")
652return True
653except Exception:
654return False
655
656def reformat_run_count(count_str):
657try:
658count = int(count_str)
659if count >= 0 and count < 1000:
660return f"{count:03}"
661elif count >= 1000:
662raise RuntimeError(f"Too many builds for the same day: {count}")
663return ""
664except Exception:
665return ""
666
667build_suffix_is_date_format = check_date_format(build_suffix[:8])
668build_suffix_run_count = reformat_run_count(build_suffix[8:])
669if build_suffix_is_date_format and build_suffix_run_count:
670build_suffix = build_suffix[:8] + build_suffix_run_count
671elif len(build_suffix) >= 12:
672raise RuntimeError(f'Incorrect build suffix: "{build_suffix}"')
673
674if enable_training:
675from packaging import version
676from packaging.version import Version
677
678# with training package, we need to bump up version minor number so that
679# nightly releases take precedence over the latest release when --pre is used during pip install.
680# eventually this shall be the behavior of all onnxruntime releases.
681# alternatively we may bump up version number right after every release.
682ort_version = version.parse(version_number)
683if isinstance(ort_version, Version):
684# TODO: this is the last time we have to do this!!!
685# We shall bump up release number right after release cut.
686if ort_version.major == 1 and ort_version.minor == 8 and ort_version.micro == 0:
687version_number = f"{ort_version.major}.{ort_version.minor + 1}.{ort_version.micro}"
688
689version_number = version_number + ".dev" + build_suffix
690
691if local_version:
692version_number = version_number + local_version
693if is_rocm and enable_rocm_profiling:
694version_number = version_number + ".profiling"
695
696if wheel_name_suffix:
697if not (enable_training and wheel_name_suffix == "gpu"):
698# for training packages, local version is used to indicate device types
699package_name = f"{package_name}-{wheel_name_suffix}"
700
701cmd_classes = {}
702if bdist_wheel is not None:
703cmd_classes["bdist_wheel"] = bdist_wheel
704cmd_classes["install"] = InstallCommand
705cmd_classes["build_ext"] = build_ext
706
707requirements_path = path.join(getcwd(), requirements_file)
708if not path.exists(requirements_path):
709this = path.dirname(__file__)
710requirements_path = path.join(this, requirements_file)
711if not path.exists(requirements_path):
712raise FileNotFoundError("Unable to find " + requirements_file)
713with open(requirements_path) as f:
714install_requires = f.read().splitlines()
715
716
717if enable_training:
718
719def save_build_and_package_info(package_name, version_number, cuda_version, rocm_version):
720sys.path.append(path.join(path.dirname(__file__), "onnxruntime", "python"))
721from onnxruntime_collect_build_info import find_cudart_versions
722
723version_path = path.join("onnxruntime", "capi", "build_and_package_info.py")
724with open(version_path, "w") as f:
725f.write(f"package_name = '{package_name}'\n")
726f.write(f"__version__ = '{version_number}'\n")
727
728if cuda_version:
729f.write(f"cuda_version = '{cuda_version}'\n")
730
731# cudart_versions are integers
732cudart_versions = find_cudart_versions(build_env=True)
733if cudart_versions and len(cudart_versions) == 1:
734f.write(f"cudart_version = {cudart_versions[0]}\n")
735else:
736print(
737"Error getting cudart version. ",
738(
739"did not find any cudart library"
740if not cudart_versions or len(cudart_versions) == 0
741else "found multiple cudart libraries"
742),
743)
744elif rocm_version:
745f.write(f"rocm_version = '{rocm_version}'\n")
746
747save_build_and_package_info(package_name, version_number, cuda_version, rocm_version)
748
749# Setup
750setup(
751name=package_name,
752version=version_number,
753description="ONNX Runtime is a runtime accelerator for Machine Learning models",
754long_description=long_description,
755author="Microsoft Corporation",
756author_email="onnxruntime@microsoft.com",
757cmdclass=cmd_classes,
758license="MIT License",
759packages=packages,
760ext_modules=ext_modules,
761package_data=package_data,
762url="https://onnxruntime.ai",
763download_url="https://github.com/microsoft/onnxruntime/tags",
764data_files=data_files,
765install_requires=install_requires,
766keywords="onnx machine learning",
767entry_points={
768"console_scripts": [
769"onnxruntime_test = onnxruntime.tools.onnxruntime_test:main",
770]
771},
772classifiers=classifiers,
773)
774