pytorch
/
build.bzl
327 строк · 11.2 Кб
1load(
2":ufunc_defs.bzl",
3"aten_ufunc_generated_cpu_kernel_sources",
4"aten_ufunc_generated_cpu_sources",
5"aten_ufunc_generated_cuda_sources",
6)
7
8def define_targets(rules):
9rules.cc_library(
10name = "caffe2_core_macros",
11hdrs = [":caffe2_core_macros_h"],
12)
13
14rules.cmake_configure_file(
15name = "caffe2_core_macros_h",
16src = "caffe2/core/macros.h.in",
17out = "caffe2/core/macros.h",
18definitions = [
19"CAFFE2_BUILD_SHARED_LIBS",
20"CAFFE2_PERF_WITH_AVX",
21"CAFFE2_PERF_WITH_AVX2",
22"CAFFE2_USE_EXCEPTION_PTR",
23"CAFFE2_USE_CUDNN",
24"USE_MKLDNN",
25"CAFFE2_USE_ITT",
26"USE_ROCM_KERNEL_ASSERT",
27"EIGEN_MPL2_ONLY",
28],
29)
30
31rules.cc_library(
32name = "caffe2_serialize",
33srcs = [
34"caffe2/serialize/file_adapter.cc",
35"caffe2/serialize/inline_container.cc",
36"caffe2/serialize/istream_adapter.cc",
37"caffe2/serialize/read_adapter_interface.cc",
38],
39copts = ["-fexceptions"],
40tags = [
41"-fbcode",
42"supermodule:android/default/pytorch",
43"supermodule:ios/default/public.pytorch",
44"xplat",
45],
46visibility = ["//visibility:public"],
47deps = [
48":caffe2_headers",
49"//c10",
50"//third_party/miniz-2.1.0:miniz",
51"@com_github_glog//:glog",
52],
53)
54
55#
56# ATen generated code
57# You need to keep this is sync with the files written out
58# by gen.py (in the cmake build system, we track generated files
59# via generated_cpp.txt and generated_cpp.txt-cuda
60#
61# Sure would be nice to use gen.py to create this list dynamically
62# instead of hardcoding, no? Well, we can't, as discussed in this
63# thread:
64# https://fb.facebook.com/groups/askbuck/permalink/1924258337622772/
65
66gen_aten_srcs = [
67"aten/src/ATen/native/native_functions.yaml",
68"aten/src/ATen/native/tags.yaml",
69] + rules.glob(["aten/src/ATen/templates/*"])
70
71gen_aten_cmd = " ".join([
72"$(execpath //torchgen:gen)",
73"--install_dir=$(RULEDIR)",
74"--source-path aten/src/ATen",
75"--aoti_install_dir=$(RULEDIR)/torch/csrc/inductor/aoti_torch/generated"
76] + (["--static_dispatch_backend CPU"] if rules.is_cpu_static_dispatch_build() else []))
77
78gen_aten_outs_cuda = (
79GENERATED_H_CUDA + GENERATED_CPP_CUDA + GENERATED_AOTI_CUDA_CPP +
80aten_ufunc_generated_cuda_sources()
81)
82
83gen_aten_outs = (
84GENERATED_H + GENERATED_H_CORE +
85GENERATED_CPP + GENERATED_CPP_CORE +
86GENERATED_AOTI_CPP +
87aten_ufunc_generated_cpu_sources() +
88aten_ufunc_generated_cpu_kernel_sources() + [
89"Declarations.yaml",
90] + gen_aten_outs_cuda
91)
92
93rules.genrule(
94name = "gen_aten",
95srcs = gen_aten_srcs,
96outs = gen_aten_outs,
97cmd = gen_aten_cmd,
98tools = ["//torchgen:gen"],
99)
100
101rules.genrule(
102name = "gen_aten_hip",
103srcs = gen_aten_srcs,
104outs = gen_aten_outs_cuda,
105cmd = gen_aten_cmd + " --rocm",
106features = ["-create_bazel_outputs"],
107tags = ["-bazel"],
108tools = ["//torchgen:gen"],
109)
110
111rules.genrule(
112name = "generate-code",
113srcs = [
114":DispatchKeyNativeFunctions.cpp",
115":DispatchKeyNativeFunctions.h",
116":LazyIr.h",
117":LazyNonNativeIr.h",
118":RegisterDispatchDefinitions.ini",
119":RegisterDispatchKey.cpp",
120":native_functions.yaml",
121":shape_inference.h",
122":tags.yaml",
123":ts_native_functions.cpp",
124":ts_native_functions.yaml",
125],
126outs = GENERATED_AUTOGRAD_CPP + GENERATED_AUTOGRAD_PYTHON + GENERATED_TESTING_PY,
127cmd = "$(execpath //tools/setup_helpers:generate_code) " +
128"--gen-dir=$(RULEDIR) " +
129"--native-functions-path $(location :native_functions.yaml) " +
130"--tags-path=$(location :tags.yaml) " +
131"--gen_lazy_ts_backend",
132tools = ["//tools/setup_helpers:generate_code"],
133)
134
135rules.cc_library(
136name = "generated-autograd-headers",
137hdrs = [":{}".format(h) for h in _GENERATED_AUTOGRAD_CPP_HEADERS + _GENERATED_AUTOGRAD_PYTHON_HEADERS],
138visibility = ["//visibility:public"],
139)
140
141rules.genrule(
142name = "version_h",
143srcs = [
144":torch/csrc/api/include/torch/version.h.in",
145":version.txt",
146],
147outs = ["torch/csrc/api/include/torch/version.h"],
148cmd = "$(execpath //tools/setup_helpers:gen_version_header) " +
149"--template-path $(location :torch/csrc/api/include/torch/version.h.in) " +
150"--version-path $(location :version.txt) --output-path $@ ",
151tools = ["//tools/setup_helpers:gen_version_header"],
152)
153
154#
155# ATen generated code
156# You need to keep this is sync with the files written out
157# by gen.py (in the cmake build system, we track generated files
158# via generated_cpp.txt and generated_cpp.txt-cuda
159#
160# Sure would be nice to use gen.py to create this list dynamically
161# instead of hardcoding, no? Well, we can't, as discussed in this
162# thread:
163# https://fb.facebook.com/groups/askbuck/permalink/1924258337622772/
164
165GENERATED_H = [
166"Functions.h",
167"NativeFunctions.h",
168"NativeMetaFunctions.h",
169"FunctionalInverses.h",
170"RedispatchFunctions.h",
171"RegistrationDeclarations.h",
172"VmapGeneratedPlumbing.h",
173]
174
175GENERATED_H_CORE = [
176"Operators.h",
177# CPUFunctions.h (and likely similar headers) need to be part of core because
178# of the static dispatch build: TensorBody.h directly includes CPUFunctions.h.
179# The disinction looks pretty arbitrary though; maybe will can kill core
180# and merge the two?
181"CPUFunctions.h",
182"CPUFunctions_inl.h",
183"CompositeExplicitAutogradFunctions.h",
184"CompositeExplicitAutogradFunctions_inl.h",
185"CompositeExplicitAutogradNonFunctionalFunctions.h",
186"CompositeExplicitAutogradNonFunctionalFunctions_inl.h",
187"CompositeImplicitAutogradFunctions.h",
188"CompositeImplicitAutogradFunctions_inl.h",
189"CompositeImplicitAutogradNestedTensorFunctions.h",
190"CompositeImplicitAutogradNestedTensorFunctions_inl.h",
191"MetaFunctions.h",
192"MetaFunctions_inl.h",
193"core/TensorBody.h",
194"MethodOperators.h",
195"core/aten_interned_strings.h",
196"core/enum_tag.h",
197]
198
199GENERATED_H_CUDA = [
200"CUDAFunctions.h",
201"CUDAFunctions_inl.h",
202]
203
204GENERATED_CPP_CUDA = [
205"RegisterCUDA.cpp",
206"RegisterNestedTensorCUDA.cpp",
207"RegisterSparseCUDA.cpp",
208"RegisterSparseCsrCUDA.cpp",
209"RegisterQuantizedCUDA.cpp",
210]
211
212GENERATED_CPP = [
213"Functions.cpp",
214"RegisterBackendSelect.cpp",
215"RegisterCPU.cpp",
216"RegisterQuantizedCPU.cpp",
217"RegisterNestedTensorCPU.cpp",
218"RegisterSparseCPU.cpp",
219"RegisterSparseCsrCPU.cpp",
220"RegisterMkldnnCPU.cpp",
221"RegisterCompositeImplicitAutograd.cpp",
222"RegisterCompositeImplicitAutogradNestedTensor.cpp",
223"RegisterZeroTensor.cpp",
224"RegisterMeta.cpp",
225"RegisterQuantizedMeta.cpp",
226"RegisterNestedTensorMeta.cpp",
227"RegisterSparseMeta.cpp",
228"RegisterCompositeExplicitAutograd.cpp",
229"RegisterCompositeExplicitAutogradNonFunctional.cpp",
230"CompositeViewCopyKernels.cpp",
231"RegisterSchema.cpp",
232"RegisterFunctionalization_0.cpp",
233"RegisterFunctionalization_1.cpp",
234"RegisterFunctionalization_2.cpp",
235"RegisterFunctionalization_3.cpp",
236]
237
238GENERATED_CPP_CORE = [
239"Operators_0.cpp",
240"Operators_1.cpp",
241"Operators_2.cpp",
242"Operators_3.cpp",
243"Operators_4.cpp",
244"core/ATenOpList.cpp",
245"core/TensorMethods.cpp",
246]
247
248# These lists are temporarily living in and exported from the shared
249# structure so that an internal build that lives under a different
250# root can access them. These could technically live in a separate
251# file in the same directory but that would require extra work to
252# ensure that file is synced to both Meta internal repositories and
253# GitHub. This problem will go away when the targets downstream of
254# generate-code that use these lists are moved into the shared
255# structure as well.
256
257_GENERATED_AUTOGRAD_PYTHON_HEADERS = [
258"torch/csrc/autograd/generated/python_functions.h",
259"torch/csrc/autograd/generated/python_return_types.h",
260]
261
262_GENERATED_AUTOGRAD_CPP_HEADERS = [
263"torch/csrc/autograd/generated/Functions.h",
264"torch/csrc/autograd/generated/VariableType.h",
265"torch/csrc/autograd/generated/ViewFuncs.h",
266"torch/csrc/autograd/generated/variable_factories.h",
267]
268
269GENERATED_TESTING_PY = [
270"torch/testing/_internal/generated/annotated_fn_args.py",
271]
272
273GENERATED_LAZY_H = [
274"torch/csrc/lazy/generated/LazyIr.h",
275"torch/csrc/lazy/generated/LazyNonNativeIr.h",
276"torch/csrc/lazy/generated/LazyNativeFunctions.h",
277]
278
279_GENERATED_AUTOGRAD_PYTHON_CPP = [
280"torch/csrc/autograd/generated/python_functions_0.cpp",
281"torch/csrc/autograd/generated/python_functions_1.cpp",
282"torch/csrc/autograd/generated/python_functions_2.cpp",
283"torch/csrc/autograd/generated/python_functions_3.cpp",
284"torch/csrc/autograd/generated/python_functions_4.cpp",
285"torch/csrc/autograd/generated/python_nn_functions.cpp",
286"torch/csrc/autograd/generated/python_nested_functions.cpp",
287"torch/csrc/autograd/generated/python_fft_functions.cpp",
288"torch/csrc/autograd/generated/python_linalg_functions.cpp",
289"torch/csrc/autograd/generated/python_return_types.cpp",
290"torch/csrc/autograd/generated/python_enum_tag.cpp",
291"torch/csrc/autograd/generated/python_sparse_functions.cpp",
292"torch/csrc/autograd/generated/python_special_functions.cpp",
293"torch/csrc/autograd/generated/python_torch_functions_0.cpp",
294"torch/csrc/autograd/generated/python_torch_functions_1.cpp",
295"torch/csrc/autograd/generated/python_torch_functions_2.cpp",
296"torch/csrc/autograd/generated/python_variable_methods.cpp",
297]
298
299GENERATED_AUTOGRAD_PYTHON = _GENERATED_AUTOGRAD_PYTHON_HEADERS + _GENERATED_AUTOGRAD_PYTHON_CPP
300
301GENERATED_AUTOGRAD_CPP = [
302"torch/csrc/autograd/generated/Functions.cpp",
303"torch/csrc/autograd/generated/VariableType_0.cpp",
304"torch/csrc/autograd/generated/VariableType_1.cpp",
305"torch/csrc/autograd/generated/VariableType_2.cpp",
306"torch/csrc/autograd/generated/VariableType_3.cpp",
307"torch/csrc/autograd/generated/VariableType_4.cpp",
308"torch/csrc/autograd/generated/ViewFuncs.cpp",
309"torch/csrc/autograd/generated/TraceType_0.cpp",
310"torch/csrc/autograd/generated/TraceType_1.cpp",
311"torch/csrc/autograd/generated/TraceType_2.cpp",
312"torch/csrc/autograd/generated/TraceType_3.cpp",
313"torch/csrc/autograd/generated/TraceType_4.cpp",
314"torch/csrc/autograd/generated/ADInplaceOrViewType_0.cpp",
315"torch/csrc/autograd/generated/ADInplaceOrViewType_1.cpp",
316"torch/csrc/lazy/generated/LazyNativeFunctions.cpp",
317"torch/csrc/lazy/generated/RegisterAutogradLazy.cpp",
318"torch/csrc/lazy/generated/RegisterLazy.cpp",
319] + _GENERATED_AUTOGRAD_CPP_HEADERS + GENERATED_LAZY_H
320
321GENERATED_AOTI_CPP = [
322"torch/csrc/inductor/aoti_torch/generated/c_shim_cpu.cpp",
323]
324
325GENERATED_AOTI_CUDA_CPP = [
326"torch/csrc/inductor/aoti_torch/generated/c_shim_cuda.cpp",
327]
328