2
# PyTorch documentation build configuration file, created by
3
# sphinx-quickstart on Fri Dec 23 13:31:47 2016.
5
# This file is execfile()d with the current directory set to its
8
# Note that not all possible configuration values are present in this
11
# All configuration values have a default; values that are commented out
12
# serve to show the default.
14
# If extensions (or modules to document with autodoc) are in another directory,
15
# add these directories to sys.path here. If the directory is relative to the
16
# documentation root, use os.path.abspath to make it absolute, like shown here.
25
# source code directory, relative to this file, for sphinx-autobuild
26
# sys.path.insert(0, os.path.abspath('../..'))
31
import torchvision # noqa: F401
35
warnings.warn('unable to load "torchvision" package')
37
RELEASE = os.environ.get("RELEASE", False)
39
import pytorch_sphinx_theme
41
# -- General configuration ------------------------------------------------
43
# If your documentation needs a minimal Sphinx version, state it here.
47
# Add any Sphinx extension module names here, as strings. They can be
48
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
52
"sphinx.ext.autosummary",
54
"sphinx.ext.intersphinx",
56
"sphinx.ext.coverage",
57
"sphinx.ext.napoleon",
58
"sphinx.ext.viewcode",
59
"sphinxcontrib.katex",
60
"sphinx.ext.autosectionlabel",
66
# build the templated autosummary files
67
autosummary_generate = True
68
numpydoc_show_class_members = False
70
# Theme has bootstrap already
71
panels_add_bootstrap_css = False
73
# autosectionlabel throws warnings if section names are duplicated.
74
# The following tells autosectionlabel to not throw a warning for
75
# duplicated section names that are in different documents.
76
autosectionlabel_prefix_document = True
84
napoleon_use_ivar = True
86
# Add any paths that contain templates here, relative to this directory.
87
templates_path = ["_templates"]
89
# TODO: document these and remove them from here.
91
coverage_ignore_functions = [
98
# torch.cuda._sanitizer
101
# torch.distributed.autograd
103
# torch.distributed.checkpoint.state_dict
106
# torch.distributed.elastic.events
107
"construct_and_record_rdzv_event",
109
# torch.distributed.elastic.metrics
110
"initialize_metrics",
111
# torch.distributed.elastic.rendezvous.registry
112
"get_rendezvous_handler",
113
# torch.distributed.launch
117
# torch.distributed.rpc
119
# torch.distributed.run
121
"determine_local_world_size",
127
"parse_min_max_nnodes",
130
# torch.distributions.constraints
136
# torch.jit.unsupported_tensor_ops
139
"unregister_custom_op_symbolic",
140
# torch.ao.quantization
143
"disable_global_flags",
145
# torch.distributed.algorithms.ddp_comm_hooks
146
"register_ddp_comm_hook",
150
"DistributedDataParallelCPU",
153
# torch.utils.model_dump
155
"get_info_and_burn_skeleton",
156
"get_inline_skeleton",
159
"hierarchical_pickle",
160
# torch.amp.autocast_mode
161
"autocast_decorator",
162
# torch.ao.nn.quantized.dynamic.modules.rnn
165
# torch.ao.nn.quantized.reference.modules.rnn
166
"get_quantized_weight",
167
# torch.ao.ns.fx.graph_matcher
168
"get_matching_subgraph_pairs",
169
# torch.ao.ns.fx.graph_passes
170
"add_loggers_to_model",
171
"create_a_shadows_b",
172
# torch.ao.ns.fx.mappings
173
"add_op_to_sets_of_related_ops",
174
"get_base_name_for_op",
175
"get_base_name_to_sets_of_related_ops",
176
"get_node_type_to_io_type_map",
177
"get_unmatchable_types_map",
178
# torch.ao.ns.fx.n_shadows_utils
179
"create_add_loggers_graph",
180
"create_n_transformed_and_logged_copies_of_subgraph",
181
"create_one_transformed_and_logged_copy_of_subgraph",
182
"create_results_comparison",
183
"create_submodule_from_subgraph",
184
"extract_weight_comparison",
185
"group_results_by_subgraph",
186
"print_n_shadows_summary",
187
# torch.ao.ns.fx.pattern_utils
188
"end_node_matches_reversed_fusion",
189
"get_reversed_fusions",
190
"get_type_a_related_to_b",
191
# torch.ao.ns.fx.utils
192
"get_arg_indices_of_inputs_to_log",
193
"get_node_first_input_and_output_type",
194
"get_node_input_qparams",
195
"get_normalized_nth_input",
196
"get_number_of_non_param_args",
197
"get_target_type_str",
198
"maybe_add_missing_fqns",
199
"maybe_dequantize_first_two_tensor_args_and_handle_tuples",
200
"op_type_supports_shadowing",
201
"rekey_logger_info_on_node_name_of_model",
202
"return_first_non_observer_node",
203
# torch.ao.ns.fx.weight_utils
204
"extract_weight_from_node",
205
"get_conv_fun_weight",
206
"get_conv_mod_weight",
207
"get_linear_fun_weight",
208
"get_linear_mod_weight",
209
"get_lstm_mod_weights",
211
"get_op_to_type_to_weight_extraction_fn",
212
"get_qconv_fun_weight",
213
"get_qlinear_fun_weight",
215
"mod_0_weight_detach",
218
# torch.ao.pruning.sparsifier.utils
220
"get_arg_info_from_tensor_fqn",
221
"module_contains_param",
224
# torch.ao.quantization.backend_config.executorch
225
"get_executorch_backend_config",
226
# torch.ao.quantization.backend_config.fbgemm
227
"get_fbgemm_backend_config",
228
# torch.ao.quantization.backend_config.native
229
"get_native_backend_config",
230
"get_native_backend_config_dict",
231
"get_test_only_legacy_native_backend_config",
232
"get_test_only_legacy_native_backend_config_dict",
233
# torch.ao.quantization.backend_config.onednn
234
"get_onednn_backend_config",
235
# torch.ao.quantization.backend_config.qnnpack
236
"get_qnnpack_backend_config",
237
# torch.ao.quantization.backend_config.tensorrt
238
"get_tensorrt_backend_config",
239
"get_tensorrt_backend_config_dict",
240
# torch.ao.quantization.backend_config.utils
241
"entry_to_pretty_str",
242
"get_fused_module_classes",
243
"get_fuser_method_mapping",
244
"get_fusion_pattern_to_extra_inputs_getter",
245
"get_fusion_pattern_to_root_node_getter",
246
"get_module_to_qat_module",
247
"get_pattern_to_dtype_configs",
248
"get_pattern_to_input_type_to_index",
249
"get_qat_module_classes",
250
"get_root_module_to_quantized_reference_module",
251
"pattern_to_human_readable",
252
"remove_boolean_dispatch_from_name",
253
# torch.ao.quantization.backend_config.x86
254
"get_x86_backend_config",
255
# torch.ao.quantization.fuse_modules
256
"fuse_known_modules",
258
# torch.ao.quantization.fuser_method_mappings
261
"fuse_convtranspose_bn",
264
"get_fuser_method_new",
265
# torch.ao.quantization.fx.convert
267
"convert_custom_module",
268
"convert_standalone_module",
269
"convert_weighted_module",
270
# torch.ao.quantization.fx.fuse
272
# torch.ao.quantization.fx.lower_to_fbgemm
274
# torch.ao.quantization.fx.lower_to_qnnpack
276
# torch.ao.quantization.fx.pattern_utils
277
"get_default_fusion_patterns",
278
"get_default_output_activation_post_process_map",
279
"get_default_quant_patterns",
280
# torch.ao.quantization.fx.prepare
281
"insert_observers_for_model",
283
"propagate_dtypes_for_known_nodes",
284
# torch.ao.quantization.fx.utils
285
"all_node_args_except_first",
286
"all_node_args_have_no_tensors",
287
"assert_and_get_unique_device",
288
"collect_producer_nodes",
289
"create_getattr_from_value",
290
"create_node_from_old_node_preserve_meta",
291
"get_custom_module_class_keys",
292
"get_linear_prepack_op_for_dtype",
293
"get_new_attr_name_with_prefix",
294
"get_non_observable_arg_indexes_and_types",
295
"get_qconv_prepack_op",
296
"get_skipped_module_name_and_classes",
297
"graph_module_from_producer_nodes",
298
"maybe_get_next_module",
300
"node_arg_is_weight",
302
# torch.ao.quantization.pt2e.graph_utils
303
"find_sequential_partitions",
304
"get_equivalent_types",
305
"update_equivalent_types_dict",
306
# torch.ao.quantization.pt2e.prepare
308
# torch.ao.quantization.pt2e.representation.rewrite
309
"reference_representation_rewrite",
310
# torch.ao.quantization.pt2e.utils
311
"fold_bn_weights_into_conv_node",
312
"get_aten_graph_module",
313
"remove_tensor_overload_for_qdq_ops",
314
# torch.ao.quantization.qconfig
315
"get_default_qat_qconfig",
316
"get_default_qat_qconfig_dict",
317
"get_default_qconfig",
318
"get_default_qconfig_dict",
320
# torch.ao.quantization.quantization_mappings
321
"get_default_compare_output_module_list",
322
"get_default_dynamic_quant_module_mappings",
323
"get_default_dynamic_sparse_quant_module_mappings",
324
"get_default_float_to_quantized_operator_mappings",
325
"get_default_qat_module_mappings",
326
"get_default_qconfig_propagation_list",
327
"get_default_static_quant_module_mappings",
328
"get_default_static_quant_reference_module_mappings",
329
"get_default_static_sparse_quant_module_mappings",
330
"get_dynamic_quant_module_class",
331
"get_embedding_qat_module_mappings",
332
"get_embedding_static_quant_module_mappings",
333
"get_quantized_operator",
334
"get_static_quant_module_class",
336
# torch.ao.quantization.quantize
337
"get_default_custom_config_dict",
338
# torch.ao.quantization.quantize_fx
339
"attach_preserved_attrs_to_model",
340
"convert_to_reference_fx",
341
# torch.ao.quantization.quantize_jit
342
"convert_dynamic_jit",
345
"prepare_dynamic_jit",
347
"quantize_dynamic_jit",
350
"script_qconfig_dict",
351
# torch.ao.quantization.quantize_pt2e
355
# torch.ao.quantization.quantizer.embedding_quantizer
356
"get_embedding_operators_config",
357
# torch.ao.quantization.quantizer.xnnpack_quantizer_utils
359
"get_input_act_qspec",
360
"get_output_act_qspec",
362
"propagate_annotation",
363
"register_annotator",
364
# torch.ao.quantization.utils
366
"activation_is_dynamically_quantized",
367
"activation_is_int32_quantized",
368
"activation_is_int8_quantized",
369
"activation_is_statically_quantized",
370
"calculate_qmin_qmax",
371
"check_min_max_valid",
375
"get_fqn_to_example_inputs",
376
"get_qconfig_dtypes",
379
"get_swapped_custom_module_class",
381
"has_no_children_ignoring_parametrizations",
384
"op_is_int8_dynamically_quantized",
385
"to_underlying_dtype",
386
"validate_qmin_qmax",
388
"weight_is_quantized",
389
"weight_is_statically_quantized",
390
# torch.backends.cudnn.rnn
392
"init_dropout_state",
393
# torch.backends.xeon.run_cpu
395
# torch.cuda.amp.autocast_mode
398
# torch.cuda.amp.common
399
"amp_definitely_not_available",
402
"is_current_stream_capturing",
403
"make_graphed_callables",
405
"caching_allocator_alloc",
406
"caching_allocator_delete",
407
"change_current_allocator",
409
"get_allocator_backend",
410
"list_gpu_processes",
411
"max_memory_allocated",
413
"max_memory_reserved",
420
"memory_stats_as_nested_dict",
422
"reset_accumulated_memory_stats",
423
"reset_max_memory_allocated",
424
"reset_max_memory_cached",
425
"reset_peak_memory_stats",
426
"set_per_process_memory_fraction",
440
# torch.cuda.profiler
455
# torch.distributed.algorithms.ddp_comm_hooks.ddp_zero_hook
456
"hook_with_zero_step",
457
"hook_with_zero_step_interleaved",
458
# torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook
459
"post_localSGD_hook",
460
# torch.distributed.algorithms.ddp_comm_hooks.quantization_hooks
461
"quantization_perchannel_hook",
462
"quantization_pertensor_hook",
463
# torch.distributed.algorithms.model_averaging.utils
464
"average_parameters",
465
"average_parameters_or_parameter_groups",
466
"get_params_to_average",
467
# torch.distributed.checkpoint.default_planner
468
"create_default_global_load_plan",
469
"create_default_global_save_plan",
470
"create_default_local_load_plan",
471
"create_default_local_save_plan",
472
# torch.distributed.checkpoint.optimizer
473
"load_sharded_optimizer_state_dict",
474
# torch.distributed.checkpoint.planner_helpers
475
"create_read_items_for_chunk_list",
476
# torch.distributed.checkpoint.state_dict_loader
478
# torch.distributed.checkpoint.state_dict_saver
480
# torch.distributed.checkpoint.utils
481
"find_state_dict_object",
483
# torch.distributed.collective_utils
485
"all_gather_object_enforce_type",
487
# torch.distributed.distributed_c10d
489
"all_gather_coalesced",
490
"all_gather_into_tensor",
493
"all_reduce_coalesced",
499
"broadcast_object_list",
500
"destroy_process_group",
504
"get_backend_config",
507
"get_process_group_ranks",
510
"init_process_group",
512
"is_backend_available",
517
"is_torchelastic_launched",
523
"new_subgroups_by_enumeration",
527
"reduce_scatter_tensor",
529
"scatter_object_list",
532
# torch.distributed.elastic.events.handlers
533
"get_logging_handler",
534
# torch.distributed.elastic.metrics.api
537
"get_elapsed_time_ms",
542
# torch.distributed.elastic.multiprocessing.api
545
# torch.distributed.elastic.multiprocessing.errors.handlers
547
# torch.distributed.elastic.multiprocessing.redirects
550
# torch.distributed.elastic.multiprocessing.tail_log
552
# torch.distributed.elastic.rendezvous.dynamic_rendezvous
554
# torch.distributed.elastic.rendezvous.etcd_rendezvous
555
"create_rdzv_handler",
556
# torch.distributed.elastic.rendezvous.etcd_server
559
# torch.distributed.elastic.rendezvous.etcd_store
561
# torch.distributed.elastic.rendezvous.static_tcp_rendezvous
562
"create_rdzv_handler",
563
# torch.distributed.elastic.rendezvous.utils
564
"parse_rendezvous_endpoint",
565
# torch.distributed.elastic.timer.api
568
# torch.distributed.elastic.utils.api
569
"get_env_variable_or_raise",
570
"get_socket_with_port",
571
# torch.distributed.elastic.utils.distributed
574
"get_socket_with_port",
575
# torch.distributed.elastic.utils.log_level
577
# torch.distributed.elastic.utils.logging
579
# torch.distributed.elastic.utils.store
583
# torch.distributed.fsdp.wrap
584
"always_wrap_policy",
586
"lambda_auto_wrap_policy",
587
"size_based_auto_wrap_policy",
588
"transformer_auto_wrap_policy",
590
# torch.distributed.nn.functional
600
# torch.distributed.nn.jit.instantiator
601
"get_arg_return_types_from_interface",
602
"instantiate_non_scriptable_remote_module_template",
603
"instantiate_scriptable_remote_module_template",
604
# torch.distributed.nn.jit.templates.remote_module_template
605
"get_remote_module_template",
606
# torch.distributed.optim.utils
607
"as_functional_optim",
608
"register_functional_optim",
609
# torch.distributed.pipeline.sync.checkpoint
611
"enable_checkpointing",
612
"enable_recomputing",
615
"restore_rng_states",
617
# torch.distributed.pipeline.sync.dependency
620
# torch.distributed.pipeline.sync.microbatch
624
# torch.distributed.pipeline.sync.phony
626
# torch.distributed.pipeline.sync.skip.layout
627
"inspect_skip_layout",
628
# torch.distributed.pipeline.sync.skip.tracker
629
"current_skip_tracker",
631
# torch.distributed.pipeline.sync.stream
642
# torch.distributed.pipeline.sync.utils
644
# torch.distributed.pipeline.sync.worker
648
# torch.distributed.rendezvous
649
"register_rendezvous_handler",
651
# torch.distributed.rpc.api
659
# torch.distributed.rpc.backend_registry
660
"backend_registered",
661
"construct_rpc_backend_options",
664
# torch.distributed.rpc.internal
667
# torch.distributed.tensor.parallel.api
668
"parallelize_module",
669
# torch.distributed.tensor.parallel.input_reshard
671
# torch.distributed.tensor.parallel.loss
673
# torch.distributed.tensor.parallel.style
674
"make_sharded_output_tensor",
675
# torch.distributions.utils
680
"tril_matrix_to_vec",
681
"vec_to_tril_matrix",
701
"unique_consecutive",
705
# torch.fx.experimental.accelerator_partitioner
707
"combine_two_partitions",
708
"get_bfs_level_partition",
709
"get_device_partition_stats",
710
"get_device_to_partitions_mapping",
711
"get_logical_id_to_device",
712
"get_node_to_partition_mapping",
713
"reorganize_partitions",
714
"reset_partition_device",
715
"set_parents_and_children",
716
# torch.fx.experimental.const_fold
717
"get_unique_attr_name_in_module",
718
"split_const_subgraphs",
719
# torch.fx.experimental.debug
721
# torch.fx.experimental.graph_gradual_typechecker
722
"adaptiveavgpool2d_check",
723
"adaptiveavgpool2d_inference_rule",
724
"add_inference_rule",
726
"bn2d_inference_rule",
728
"calculate_out_dimension",
729
"conv2d_inference_rule",
730
"conv_refinement_rule",
733
"expand_to_tensor_dim",
736
"flatten_inference_rule",
737
"flatten_refinement_rule",
738
"get_attr_inference_rule",
739
"get_greatest_upper_bound",
742
"linear_inference_rule",
743
"linear_refinement_rule",
745
"maxpool2d_inference_rule",
746
"register_algebraic_expressions_inference_rule",
747
"register_inference_rule",
748
"register_refinement_rule",
749
"relu_inference_rule",
750
"reshape_inference_rule",
751
"transpose_inference_rule",
752
# torch.fx.experimental.merge_matmul
753
"are_nodes_independent",
756
"split_result_tensors",
757
# torch.fx.experimental.meta_tracer
758
"embedding_override",
759
"functional_relu_override",
760
"gen_constructor_wrapper",
761
"nn_layernorm_override",
764
"torch_abs_override",
765
"torch_nn_relu_override",
766
"torch_relu_override",
767
"torch_where_override",
768
# torch.fx.experimental.migrate_gradual_types.constraint
769
"is_algebraic_expression",
772
# torch.fx.experimental.migrate_gradual_types.constraint_generator
773
"adaptive_inference_rule",
774
"add_layer_norm_constraints",
775
"add_linear_constraints",
776
"arange_inference_rule",
777
"assert_inference_rule",
778
"batchnorm_inference_rule",
779
"bmm_inference_rule",
780
"broadcasting_inference_rule",
781
"conv2d_inference_rule",
782
"cumsum_inference_rule",
783
"embedding_inference_rule",
784
"embedding_inference_rule_functional",
786
"equality_inference_rule",
787
"expand_inference_rule",
788
"flatten_inference_rule",
789
"full_inference_rule",
790
"gen_broadcasting_constraints",
791
"gen_embedding_rules",
792
"gen_layer_norm_constraints",
793
"generate_flatten_constraints",
794
"get_attr_inference_rule",
795
"getitem_inference_rule",
797
"index_select_inference_rule",
798
"layer_norm_functional",
799
"layer_norm_inference_rule",
800
"linear_constraints",
801
"linear_inference_rule",
803
"masked_fill_inference_rule",
804
"maxpool_inference_rule",
805
"neq_inference_rule",
807
"register_inference_rule",
808
"relu_inference_rule",
809
"reshape_inference_rule",
810
"size_inference_rule",
811
"tensor_inference_rule",
812
"torch_dim_inference_rule",
813
"torch_linear_inference_rule",
814
"transpose_inference_rule",
815
"type_inference_rule",
816
"view_inference_rule",
817
# torch.fx.experimental.migrate_gradual_types.constraint_transformation
820
"calc_last_two_dims",
821
"create_equality_constraints_for_broadcasting",
822
"gen_all_reshape_possibilities",
823
"gen_broadcasting_constraints",
824
"gen_consistency_constraints",
825
"gen_greatest_upper_bound",
827
"generate_all_broadcasting_possibilities_no_padding",
828
"generate_all_int_dyn_dim_possibilities",
829
"generate_binconstraint_d",
830
"generate_binconstraint_t",
831
"generate_broadcasting",
832
"generate_calc_conv",
833
"generate_calc_maxpool",
834
"generate_calc_product",
840
"is_dim_div_by_target",
841
"is_target_div_by_dim",
842
"no_broadcast_dim_with_index",
843
"register_transformation_rule",
844
"transform_constraint",
845
"transform_get_item",
846
"transform_get_item_tensor",
847
"transform_index_select",
848
"transform_transpose",
850
"valid_index_tensor",
851
# torch.fx.experimental.migrate_gradual_types.transform_to_z3
852
"evaluate_conditional_with_constraints",
853
# torch.fx.experimental.migrate_gradual_types.util
856
"gen_nat_constraints",
859
# torch.fx.experimental.optimization
863
"matches_module_pattern",
865
"optimize_for_inference",
867
"replace_node_module",
870
# torch.fx.experimental.partitioner_utils
871
"get_comm_latency_between",
873
"get_latency_of_one_partition",
874
"get_latency_of_partitioned_graph",
875
"get_partition_to_latency_mapping",
876
# torch.fx.experimental.proxy_tensor
878
"disable_autocast_cache",
879
"disable_proxy_modes_tracing",
884
"fetch_object_proxy",
885
"get_innermost_proxy_mode",
886
"get_isolated_graphmodule",
888
"get_torch_dispatch_modes",
892
"maybe_disable_fake_tensor_mode",
893
"maybe_handle_decomp",
896
"set_original_aten_op",
903
"wrapper_and_args_for_make_fx",
904
# torch.fx.experimental.recording
905
"record_shapeenv_event",
906
"replay_shape_env_events",
907
"shape_env_check_state_equal",
908
# torch.fx.experimental.sym_node
912
"method_to_operator",
913
"sympy_is_channels_last_contiguous_2d",
914
"sympy_is_channels_last_contiguous_3d",
915
"sympy_is_channels_last_strides_2d",
916
"sympy_is_channels_last_strides_3d",
917
"sympy_is_channels_last_strides_generic",
918
"sympy_is_contiguous",
919
"sympy_is_contiguous_generic",
924
# torch.fx.experimental.symbolic_shapes
926
"cast_symbool_to_symint_guardless",
930
"eval_is_non_overlapping_and_dense",
932
"find_symbol_binding_fx_nodes",
934
"free_unbacked_symbols",
935
"fx_placeholder_targets",
936
"fx_placeholder_vals",
942
"has_symbolic_sizes_strides",
943
"is_channels_last_contiguous_2d",
944
"is_channels_last_contiguous_3d",
945
"is_channels_last_strides_2d",
946
"is_channels_last_strides_3d",
948
"is_non_overlapping_and_dense_indicator",
950
"is_symbol_binding_fx_node",
952
# torch.fx.experimental.unification.core
954
# torch.fx.experimental.unification.match
959
# torch.fx.experimental.unification.more
963
# torch.fx.experimental.unification.multipledispatch.conflict
971
# torch.fx.experimental.unification.multipledispatch.core
974
# torch.fx.experimental.unification.multipledispatch.dispatcher
980
"variadic_signature_matches",
981
"variadic_signature_matches_iter",
983
# torch.fx.experimental.unification.multipledispatch.utils
988
# torch.fx.experimental.unification.multipledispatch.variadic
990
# torch.fx.experimental.unification.unification_tools
1007
# torch.fx.experimental.unification.utils
1014
# torch.fx.experimental.unification.variable
1017
# torch.fx.experimental.unify_refinements
1018
"check_for_type_equality",
1020
"infer_symbolic_types",
1021
"infer_symbolic_types_single_pass",
1022
"substitute_all_types",
1023
"substitute_solution_one_type",
1025
# torch.fx.experimental.validator
1027
"translation_validation_enabled",
1028
"translation_validation_timeout",
1031
# torch.fx.graph_module
1032
"reduce_deploy_graph_module",
1033
"reduce_graph_module",
1034
"reduce_package_graph_module",
1039
# torch.fx.operator_schemas
1040
"check_for_mutable_operation",
1042
"get_signature_for_torch_op",
1043
"normalize_function",
1046
# torch.fx.passes.annotate_getitem_nodes
1047
"annotate_getitem_nodes",
1048
# torch.fx.passes.backends.cudagraphs
1049
"partition_cudagraphs",
1050
# torch.fx.passes.dialect.common.cse_pass
1051
"get_CSE_banned_ops",
1052
# torch.fx.passes.graph_manipulation
1053
"get_size_of_all_nodes",
1056
"replace_target_nodes_with",
1057
# torch.fx.passes.infra.pass_manager
1058
"pass_result_wrapper",
1059
"this_before_that_pass_constraint",
1060
# torch.fx.passes.operator_support
1063
"create_op_support",
1064
# torch.fx.passes.param_fetch
1066
"extract_attrs_for_lowering",
1067
"lift_lowering_attrs_to_nodes",
1068
# torch.fx.passes.pass_manager
1072
"these_before_those_pass_constraint",
1073
"this_before_that_pass_constraint",
1074
# torch.fx.passes.reinplace
1076
# torch.fx.passes.split_module
1078
# torch.fx.passes.split_utils
1079
"getattr_recursive",
1080
"setattr_recursive",
1082
# torch.fx.passes.splitter_base
1083
"generate_inputs_for_submodules",
1084
# torch.fx.passes.tools_common
1087
"is_node_output_tensor",
1089
# torch.fx.passes.utils.common
1091
"lift_subgraph_as_module",
1092
# torch.fx.passes.utils.fuser_utils
1094
"fuse_as_graphmodule",
1095
"fuse_by_partitions",
1098
"validate_partition",
1099
# torch.fx.passes.utils.source_matcher_utils
1100
"check_subgraphs_connected",
1101
"get_source_partitions",
1104
# torch.fx.subgraph_rewriter
1106
"replace_pattern_with_filters",
1107
# torch.fx.tensor_type
1110
# torch.fx.traceback
1113
"has_preserved_node_meta",
1114
"preserve_node_meta",
1115
"reset_grad_fn_seq_nr",
1117
"set_grad_fn_seq_nr",
1119
# torch.jit.annotations
1122
"get_enum_value_type",
1126
"is_function_or_method",
1132
"try_real_annotations",
1133
# torch.jit.frontend
1136
"build_ignore_context_manager",
1142
"get_class_assigns",
1143
"get_class_properties",
1145
"get_default_args_for_class",
1146
"get_jit_class_def",
1149
"is_torch_jit_ignore_context_manager",
1150
# torch.jit.generate_bytecode
1152
"generate_upgraders_bytecode",
1153
# torch.jit.quantized
1154
"apply_permutation",
1155
"quantize_linear_modules",
1156
"quantize_rnn_cell_modules",
1157
"quantize_rnn_modules",
1163
# torch.masked.maskedtensor.core
1165
# torch.masked.maskedtensor.creation
1168
# torch.multiprocessing.pool
1170
# torch.multiprocessing.reductions
1173
"rebuild_cuda_tensor",
1175
"rebuild_nested_tensor",
1176
"rebuild_sparse_coo_tensor",
1177
"rebuild_sparse_compressed_tensor",
1178
"rebuild_storage_empty",
1179
"rebuild_storage_fd",
1180
"rebuild_storage_filename",
1182
"rebuild_typed_storage",
1183
"rebuild_typed_storage_child",
1187
"reduce_typed_storage",
1188
"reduce_typed_storage_child",
1189
"storage_from_cache",
1190
# torch.multiprocessing.spawn
1192
# torch.nn.functional
1193
"adaptive_max_pool1d_with_indices",
1194
"adaptive_max_pool2d_with_indices",
1195
"adaptive_max_pool3d_with_indices",
1196
"assert_int_or_pair",
1197
"fractional_max_pool2d_with_indices",
1198
"fractional_max_pool3d_with_indices",
1199
"max_pool1d_with_indices",
1200
"max_pool2d_with_indices",
1201
"max_pool3d_with_indices",
1202
"multi_head_attention_forward",
1222
# torch.nn.modules.rnn
1223
"apply_permutation",
1224
# torch.nn.modules.utils
1225
"consume_prefix_in_state_dict_if_present",
1226
# torch.nn.parallel.comm
1228
"broadcast_coalesced",
1231
"reduce_add_coalesced",
1233
# torch.nn.parallel.data_parallel
1235
# torch.nn.parallel.parallel_apply
1238
# torch.nn.parallel.replicate
1240
# torch.nn.parallel.scatter_gather
1245
# torch.nn.parameter
1247
# torch.nn.utils.clip_grad
1251
# torch.nn.utils.convert_parameters
1252
"parameters_to_vector",
1253
"vector_to_parameters",
1254
# torch.nn.utils.fusion
1255
"fuse_conv_bn_eval",
1256
"fuse_conv_bn_weights",
1257
"fuse_linear_bn_eval",
1258
"fuse_linear_bn_weights",
1259
# torch.nn.utils.init
1261
# torch.nn.utils.memory_format
1262
"convert_conv2d_weight_memory_format",
1263
# torch.nn.utils.parametrizations
1265
# torch.nn.utils.parametrize
1266
"transfer_parametrizations_and_params",
1267
"type_before_parametrizations",
1268
# torch.nn.utils.rnn
1270
"invert_permutation",
1271
# torch.nn.utils.spectral_norm
1272
"remove_spectral_norm",
1274
# torch.nn.utils.weight_norm
1275
"remove_weight_norm",
1277
# torch.onnx.operators
1278
"reshape_from_tensor_shape",
1280
# torch.onnx.symbolic_caffe2
1293
"quantize_per_tensor",
1294
"register_quantized_ops",
1299
"upsample_nearest2d",
1300
# torch.onnx.symbolic_helper
1301
"args_have_same_dtype",
1302
"check_training_mode",
1303
"dequantize_helper",
1304
"is_caffe2_aten_fallback",
1308
"requantize_bias_helper",
1309
# torch.onnx.symbolic_opset10
1313
"fake_quantize_per_tensor_affine",
1319
"quantize_per_tensor",
1321
"quantized_add_relu",
1324
"quantized_conv1d_relu",
1326
"quantized_conv2d_relu",
1328
"quantized_conv3d_relu",
1329
"quantized_conv_transpose1d",
1330
"quantized_conv_transpose2d",
1331
"quantized_conv_transpose3d",
1332
"quantized_group_norm",
1333
"quantized_hardswish",
1334
"quantized_instance_norm",
1335
"quantized_layer_norm",
1336
"quantized_leaky_relu",
1338
"quantized_linear_relu",
1340
"quantized_sigmoid",
1344
# torch.onnx.symbolic_opset11
1373
"linalg_vector_norm",
1383
"prim_constant_chunk",
1402
# torch.onnx.symbolic_opset12
1405
"binary_cross_entropy_with_logits",
1407
"cross_entropy_loss",
1420
# torch.onnx.symbolic_opset13
1422
"fake_quantize_per_channel_affine",
1423
"fake_quantize_per_tensor_affine",
1428
"quantized_conv1d_relu",
1430
"quantized_conv2d_relu",
1432
"quantized_conv3d_relu",
1433
"quantized_conv_transpose1d",
1434
"quantized_conv_transpose2d",
1435
"quantized_conv_transpose3d",
1437
"quantized_linear_relu",
1438
"repeat_interleave",
1448
"unsafe_split_with_sizes",
1450
# torch.onnx.symbolic_opset14
1453
"quantized_hardswish",
1455
"scaled_dot_product_attention",
1458
# torch.onnx.symbolic_opset15
1462
"prim_unchecked_cast",
1463
# torch.onnx.symbolic_opset16
1467
# torch.onnx.symbolic_opset17
1470
# torch.onnx.symbolic_opset18
1472
# torch.onnx.symbolic_opset7
1475
# torch.onnx.symbolic_opset8
1493
# torch.onnx.symbolic_opset9
1496
"adaptive_avg_pool1d",
1497
"adaptive_avg_pool2d",
1498
"adaptive_avg_pool3d",
1499
"adaptive_max_pool1d",
1500
"adaptive_max_pool2d",
1501
"adaptive_max_pool3d",
1526
"broadcast_tensors",
1545
"convert_element_type",
1548
"cosine_similarity",
1578
"get_pool_ceil_padding",
1595
"is_floating_point",
1606
"linalg_matrix_norm",
1608
"linalg_vector_norm",
1631
"max_pool1d_with_indices",
1633
"max_pool2d_with_indices",
1635
"max_pool3d_with_indices",
1648
"native_layer_norm",
1657
"noop_complex_operators",
1665
"overload_by_arg_count",
1667
"pairwise_distance",
1674
"prim_constant_chunk",
1675
"prim_constant_split",
1681
"prim_list_construct",
1688
"prim_tuple_construct",
1690
"prim_unchecked_cast",
1691
"prim_uninitialized",
1704
"repeat_interleave",
1754
"unsafe_split_with_sizes",
1756
"unsupported_complex_operators",
1758
"upsample_bilinear2d",
1759
"upsample_linear1d",
1760
"upsample_nearest1d",
1761
"upsample_nearest2d",
1762
"upsample_nearest3d",
1763
"upsample_trilinear3d",
1769
"wrap_logical_op_with_cast_to",
1770
"wrap_logical_op_with_negation",
1775
"disable_apex_o2_state_dict_hook",
1777
"export_to_pretty_string",
1779
"is_in_onnx_export",
1781
"register_custom_op_symbolic",
1782
"select_model_mode_for_export",
1783
"setup_onnx_logging",
1784
"unconvertible_ops",
1785
"unpack_quantized_tensor",
1786
"warn_on_static_input_change",
1787
# torch.onnx.verification
1788
"check_export_model_diff",
1790
"verify_aten_graph",
1791
# torch.optim.adadelta
1793
# torch.optim.adagrad
1797
# torch.optim.adamax
1805
# torch.optim.optimizer
1806
"register_optimizer_step_post_hook",
1807
"register_optimizer_step_pre_hook",
1810
# torch.optim.rmsprop
1816
# torch.optim.swa_utils
1818
"get_ema_multi_avg_fn",
1820
"get_swa_multi_avg_fn",
1823
"enable_reentrant_dispatch",
1824
# torch.package.analyze.find_first_use_of_broken_modules
1825
"find_first_use_of_broken_modules",
1826
# torch.package.analyze.is_from_package
1828
# torch.package.analyze.trace_dependencies
1829
"trace_dependencies",
1830
# torch.profiler.itt
1832
# torch.profiler.profiler
1834
"supported_activities",
1835
"tensorboard_trace_handler",
1836
# torch.return_types
1837
"pytree_register_structseq",
1838
# torch.serialization
1839
"check_module_version_greater_or_equal",
1840
"default_restore_location",
1844
"normalize_storage_type",
1846
"storage_to_tensor_type",
1847
"validate_cuda_device",
1848
"validate_hpu_device",
1849
# torch.signal.windows.windows
1861
# torch.sparse.semi_structured
1862
"to_sparse_semi_structured",
1863
# torch.utils.backend_registration
1864
"generate_methods_for_privateuse1_backend",
1865
"rename_privateuse1_backend",
1866
# torch.utils.benchmark.examples.blas_compare_setup
1868
# torch.utils.benchmark.examples.op_benchmark
1869
"assert_dicts_equal",
1870
# torch.utils.benchmark.op_fuzzers.spectral
1872
# torch.utils.benchmark.utils.common
1875
"set_torch_threads",
1878
# torch.utils.benchmark.utils.compare
1880
# torch.utils.benchmark.utils.compile
1883
"benchmark_compile",
1884
# torch.utils.benchmark.utils.cpp_jit
1885
"compile_callgrind_template",
1886
"compile_timeit_template",
1887
"get_compat_bindings",
1888
# torch.utils.benchmark.utils.fuzzer
1891
# torch.utils.benchmark.utils.timer
1893
# torch.utils.benchmark.utils.valgrind_wrapper.timer_interface
1894
"wrapper_singleton",
1895
# torch.utils.bundled_inputs
1896
"augment_many_model_functions_with_bundled_inputs",
1897
"augment_model_with_bundled_inputs",
1899
"bundle_large_tensor",
1901
# torch.utils.checkpoint
1902
"check_backward_validity",
1904
"get_device_states",
1906
"set_checkpoint_early_stop",
1907
"set_device_states",
1908
# torch.utils.collect_env
1909
"check_release_file",
1910
"get_cachingallocator_config",
1911
"get_clang_version",
1912
"get_cmake_version",
1913
"get_conda_packages",
1915
"get_cuda_module_loading_config",
1916
"get_cudnn_version",
1923
"get_nvidia_driver_version",
1928
"get_pretty_env_info",
1929
"get_python_platform",
1930
"get_running_cuda_version",
1931
"get_windows_version",
1932
"is_xnnpack_available",
1934
# torch.utils.cpp_backtrace
1935
"get_cpp_backtrace",
1936
# torch.utils.cpp_extension
1937
"check_compiler_is_gcc",
1938
"check_compiler_ok_for_platform",
1940
"get_default_build_root",
1942
"remove_extension_h_precompiler_headers",
1943
# torch.utils.data.backward_compatibility
1945
# torch.utils.data.datapipes.dataframe.dataframe_wrapper
1956
# torch.utils.data.datapipes.dataframe.dataframes
1959
# torch.utils.data.datapipes.gen_pyi
1960
"extract_class_name",
1961
"extract_method_name",
1963
"gen_from_template",
1964
"get_method_definitions",
1965
"materialize_lines",
1966
"parse_datapipe_file",
1967
"parse_datapipe_files",
1968
"process_signature",
1969
"split_outside_bracket",
1970
# torch.utils.data.datapipes.map.callable
1972
# torch.utils.data.datapipes.utils.common
1973
"get_file_binaries_from_pathnames",
1974
"get_file_pathnames_from_root",
1976
"validate_input_col",
1977
"validate_pathname_binary_tuple",
1978
# torch.utils.data.datapipes.utils.decoder
1981
"extension_extract_fn",
1986
# torch.utils.data.dataset
1988
# torch.utils.data.graph
1991
# torch.utils.data.graph_settings
1992
"apply_random_seed",
1994
"apply_shuffle_seed",
1995
"apply_shuffle_settings",
1996
"get_all_graph_pipes",
1997
# torch.utils.flop_counter
2001
"conv_backward_flop",
2004
"convert_num_with_suffix",
2009
"register_flop_formula",
2010
"sdpa_backward_flop",
2011
"sdpa_backward_flop_count",
2016
# torch.utils.hipify.hipify_python
2019
"extract_arguments",
2021
"file_specific_replacement",
2022
"find_bracket_group",
2023
"find_closure_group",
2024
"find_parentheses_group",
2025
"fix_static_global_kernels",
2026
"get_hip_file_path",
2029
"is_caffe2_gpu_file",
2035
"matched_files_iter",
2037
"preprocess_file_and_save_result",
2039
"processKernelLaunches",
2040
"replace_extern_shared",
2041
"replace_math_functions",
2044
"unserializable_hook",
2045
"warn_if_has_hooks",
2046
# torch.utils.jit.log_extract
2048
"load_graph_and_inputs",
2049
"make_tensor_from_type",
2053
# torch.utils.mkldnn
2055
# torch.utils.mobile_optimizer
2056
"generate_mobile_module_lints",
2057
# torch.utils.tensorboard.summary
2079
# torch.utils.throughput_benchmark
2083
coverage_ignore_classes = [
2101
"BenchmarkExecutionStats",
2109
"CompleteArgumentSpec",
2111
"ConcreteModuleType",
2112
"ConcreteModuleTypeBuilder",
2113
"DeepCopyMemoTable",
2114
"DeserializationStorageContext",
2120
"ExcludeDispatchKeyGuard",
2127
"GraphExecutorState",
2134
"MobileOptimizerType",
2144
"PyTorchFileReader",
2145
"PyTorchFileWriter",
2148
"ScriptClassFunction",
2150
"ScriptDictIterator",
2151
"ScriptDictKeyIterator",
2153
"ScriptListIterator",
2156
"ScriptModuleSerializer",
2158
"ScriptObjectProperty",
2159
"SerializationStorageContext",
2164
"ThroughputBenchmark",
2180
"ComplexDoubleStorage",
2181
"ComplexFloatStorage",
2183
"DeferredCudaCallError",
2197
# torch.cuda._sanitizer
2202
"CUDASanitizerDispatchMode",
2203
"CUDASanitizerErrors",
2205
"SynchronizationError",
2206
"UnsynchronizedAccessError",
2207
# torch.distributed.elastic.multiprocessing.errors
2210
# torch.distributions.constraints
2214
"half_open_interval",
2221
# torch.distributions.transforms
2225
"CorrCholeskyTransform",
2226
"CumulativeDistributionTransform",
2228
"IndependentTransform",
2233
"SoftplusTransform",
2235
"StickBreakingTransform",
2249
# torch.backends.cuda
2252
"cuFFTPlanCacheAttrContextProp",
2253
"cuFFTPlanCacheManager",
2254
# torch.distributed.algorithms.ddp_comm_hooks
2258
# torch.ao.nn.quantized.modules
2261
# torch.utils.backcompat
2263
# torch.ao.nn.intrinsic.modules.fused
2269
# torch.ao.nn.intrinsic.qat.modules.conv_fused
2276
# torch.ao.nn.intrinsic.qat.modules.linear_fused
2278
# torch.ao.nn.intrinsic.qat.modules.linear_relu
2280
# torch.ao.nn.intrinsic.quantized.dynamic.modules.linear_relu
2282
# torch.ao.nn.intrinsic.quantized.modules.bn_relu
2285
# torch.ao.nn.intrinsic.quantized.modules.conv_add
2288
# torch.ao.nn.intrinsic.quantized.modules.conv_relu
2292
# torch.ao.nn.intrinsic.quantized.modules.linear_relu
2296
# torch.ao.nn.qat.modules.conv
2300
# torch.ao.nn.qat.modules.embedding_ops
2303
# torch.ao.nn.qat.modules.linear
2305
# torch.ao.nn.quantizable.modules.activation
2306
"MultiheadAttention",
2307
# torch.ao.nn.quantizable.modules.rnn
2310
# torch.ao.nn.quantized.dynamic.modules.conv
2317
# torch.ao.nn.quantized.dynamic.modules.linear
2319
# torch.ao.nn.quantized.dynamic.modules.rnn
2328
# torch.ao.nn.quantized.modules.activation
2332
"MultiheadAttention",
2337
# torch.ao.nn.quantized.modules.batchnorm
2340
# torch.ao.nn.quantized.modules.conv
2347
# torch.ao.nn.quantized.modules.dropout
2349
# torch.ao.nn.quantized.modules.embedding_ops
2352
"EmbeddingPackedParams",
2353
# torch.ao.nn.quantized.modules.functional_modules
2354
"FXFloatFunctional",
2357
# torch.ao.nn.quantized.modules.linear
2359
"LinearPackedParams",
2360
# torch.ao.nn.quantized.modules.normalization
2366
# torch.ao.nn.quantized.modules.rnn
2368
# torch.ao.nn.quantized.modules.utils
2369
"WeightedQuantizedModule",
2370
# torch.ao.nn.quantized.reference.modules.conv
2377
# torch.ao.nn.quantized.reference.modules.linear
2379
# torch.ao.nn.quantized.reference.modules.rnn
2387
# torch.ao.nn.quantized.reference.modules.sparse
2390
# torch.ao.nn.quantized.reference.modules.utils
2391
"ReferenceQuantizedModule",
2392
# torch.ao.nn.sparse.quantized.dynamic.linear
2394
# torch.ao.nn.sparse.quantized.linear
2396
"LinearPackedParams",
2397
# torch.ao.nn.sparse.quantized.utils
2398
"LinearBlockSparsePattern",
2399
# torch.ao.ns.fx.graph_matcher
2400
"SubgraphTypeRelationship",
2401
# torch.ao.ns.fx.n_shadows_utils
2403
# torch.ao.ns.fx.ns_types
2404
"NSSingleResultValuesType",
2406
# torch.ao.ns.fx.qconfig_multi_mapping
2407
"QConfigMultiMapping",
2408
# torch.ao.pruning.scheduler.base_scheduler
2410
# torch.ao.pruning.scheduler.cubic_scheduler
2412
# torch.ao.pruning.scheduler.lambda_scheduler
2414
# torch.ao.pruning.sparsifier.base_sparsifier
2416
# torch.ao.pruning.sparsifier.nearly_diagonal_sparsifier
2417
"NearlyDiagonalSparsifier",
2418
# torch.ao.pruning.sparsifier.utils
2420
# torch.ao.pruning.sparsifier.weight_norm_sparsifier
2421
"WeightNormSparsifier",
2422
# torch.ao.quantization.backend_config.backend_config
2424
"BackendPatternConfig",
2426
# torch.ao.quantization.fake_quantize
2429
"FixedQParamsFakeQuantize",
2430
"FusedMovingAvgObsFakeQuantize",
2431
# torch.ao.quantization.fx.fuse_handler
2432
"DefaultFuseHandler",
2434
# torch.ao.quantization.fx.graph_module
2436
"ObservedGraphModule",
2437
"ObservedStandaloneGraphModule",
2438
# torch.ao.quantization.fx.quantize_handler
2439
"BatchNormQuantizeHandler",
2440
"BinaryOpQuantizeHandler",
2441
"CatQuantizeHandler",
2442
"ConvReluQuantizeHandler",
2443
"CopyNodeQuantizeHandler",
2444
"CustomModuleQuantizeHandler",
2445
"DefaultNodeQuantizeHandler",
2446
"EmbeddingQuantizeHandler",
2447
"FixedQParamsOpQuantizeHandler",
2448
"GeneralTensorShapeOpQuantizeHandler",
2449
"LinearReLUQuantizeHandler",
2450
"RNNDynamicQuantizeHandler",
2451
"StandaloneModuleQuantizeHandler",
2452
# torch.ao.quantization.fx.tracer
2453
"QuantizationTracer",
2454
"ScopeContextManager",
2455
# torch.ao.quantization.fx.utils
2456
"ObservedGraphModuleAttrs",
2457
# torch.ao.quantization.observer
2458
"FixedQParamsObserver",
2459
"HistogramObserver",
2461
"MovingAverageMinMaxObserver",
2462
"MovingAveragePerChannelMinMaxObserver",
2465
"PerChannelMinMaxObserver",
2466
"PlaceholderObserver",
2467
"RecordingObserver",
2468
"ReuseInputObserver",
2469
"UniformQuantizationObserverBase",
2470
"default_debug_observer",
2471
"default_placeholder_observer",
2472
"default_reuse_input_observer",
2473
# torch.ao.quantization.pt2e.duplicate_dq_pass
2475
# torch.ao.quantization.pt2e.port_metadata_pass
2476
"PortNodeMetaForQDQ",
2477
# torch.ao.quantization.qconfig
2479
# torch.ao.quantization.quant_type
2481
# torch.ao.quantization.quantizer.composable_quantizer
2482
"ComposableQuantizer",
2483
# torch.ao.quantization.quantizer.embedding_quantizer
2484
"EmbeddingQuantizer",
2485
# torch.ao.quantization.quantizer.quantizer
2486
"DerivedQuantizationSpec",
2487
"FixedQParamsQuantizationSpec",
2488
"QuantizationAnnotation",
2490
"QuantizationSpecBase",
2491
"SharedQuantizationSpec",
2492
# torch.ao.quantization.quantizer.x86_inductor_quantizer
2493
"X86InductorQuantizer",
2494
# torch.ao.quantization.quantizer.xnnpack_quantizer
2496
# torch.ao.quantization.quantizer.xnnpack_quantizer_utils
2498
"QuantizationConfig",
2499
# torch.ao.quantization.stubs
2503
# torch.ao.quantization.utils
2505
# torch.backends.cudnn.rnn
2507
# torch.amp.grad_scaler
2512
# torch.cuda.streams
2514
# torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook
2515
"PostLocalSGDState",
2516
# torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook
2518
# torch.distributed.algorithms.join
2522
# torch.distributed.algorithms.model_averaging.averagers
2524
"PeriodicModelAverager",
2525
# torch.distributed.algorithms.model_averaging.hierarchical_model_averager
2526
"HierarchicalModelAverager",
2527
# torch.distributed.argparse_util
2530
# torch.distributed.checkpoint.api
2531
"CheckpointException",
2532
# torch.distributed.checkpoint.default_planner
2533
"DefaultLoadPlanner",
2534
"DefaultSavePlanner",
2535
# torch.distributed.checkpoint.filesystem
2538
# torch.distributed.checkpoint.metadata
2539
"BytesStorageMetadata",
2540
"ChunkStorageMetadata",
2543
# torch.distributed.checkpoint.planner
2548
# torch.distributed.checkpoint.state_dict
2549
"DistributedStateDictOptions",
2550
# torch.distributed.checkpoint.storage
2552
# torch.distributed.collective_utils
2554
# torch.distributed.distributed_c10d
2556
"AllreduceCoalescedOptions",
2569
"ReduceScatterOptions",
2573
# torch.distributed.elastic.agent.server.api
2576
"SimpleElasticAgent",
2578
# torch.distributed.elastic.events.api
2581
# torch.distributed.elastic.metrics.api
2582
"ConsoleMetricHandler",
2587
"NullMetricHandler",
2588
# torch.distributed.elastic.multiprocessing.api
2589
"MultiprocessContext",
2594
"SubprocessContext",
2595
"SubprocessHandler",
2596
# torch.distributed.elastic.multiprocessing.tail_log
2598
# torch.distributed.elastic.rendezvous.api
2599
"RendezvousHandler",
2600
"RendezvousHandlerRegistry",
2601
"RendezvousParameters",
2602
# torch.distributed.elastic.rendezvous.dynamic_rendezvous
2603
"DynamicRendezvousHandler",
2604
"RendezvousSettings",
2605
# torch.distributed.elastic.rendezvous.etcd_rendezvous
2607
"EtcdRendezvousHandler",
2608
"EtcdRendezvousRetryImmediately",
2609
"EtcdRendezvousRetryableFailure",
2610
# torch.distributed.elastic.rendezvous.etcd_server
2612
# torch.distributed.elastic.rendezvous.static_tcp_rendezvous
2613
"StaticTCPRendezvous",
2614
# torch.distributed.elastic.timer.api
2618
# torch.distributed.elastic.timer.file_based_local_timer
2622
# torch.distributed.elastic.timer.local_timer
2625
"MultiprocessingRequestQueue",
2626
# torch.distributed.elastic.utils.api
2628
# torch.distributed.elastic.utils.data.cycling_iterator
2630
# torch.distributed.elastic.utils.data.elastic_distributed_sampler
2631
"ElasticDistributedSampler",
2632
# torch.distributed.fsdp.api
2634
# torch.distributed.fsdp.fully_sharded_data_parallel
2635
"FullyShardedDataParallel",
2636
"OptimStateKeyType",
2637
# torch.distributed.fsdp.sharded_grad_scaler
2638
"ShardedGradScaler",
2639
# torch.distributed.fsdp.wrap
2642
# torch.distributed.launcher.api
2645
# torch.distributed.optim.optimizer
2646
"DistributedOptimizer",
2647
# torch.distributed.optim.post_localSGD_optimizer
2648
"PostLocalSGDOptimizer",
2649
# torch.distributed.optim.zero_redundancy_optimizer
2650
"ZeroRedundancyOptimizer",
2651
# torch.distributed.pipeline.sync.batchnorm
2652
"DeferredBatchNorm",
2653
# torch.distributed.pipeline.sync.checkpoint
2660
# torch.distributed.pipeline.sync.copy
2664
# torch.distributed.pipeline.sync.dependency
2667
# torch.distributed.pipeline.sync.microbatch
2670
# torch.distributed.pipeline.sync.pipe
2675
# torch.distributed.pipeline.sync.pipeline
2677
# torch.distributed.pipeline.sync.skip.layout
2679
# torch.distributed.pipeline.sync.skip.namespace
2681
# torch.distributed.pipeline.sync.skip.portal
2687
# torch.distributed.pipeline.sync.skip.skippable
2689
# torch.distributed.pipeline.sync.skip.tracker
2691
"SkipTrackerThroughPotals",
2693
# torch.distributed.pipeline.sync.stream
2695
# torch.distributed.pipeline.sync.worker
2697
# torch.distributed.rpc.api
2700
# torch.distributed.rpc.backend_registry
2702
# torch.distributed.rpc.internal
2706
# torch.distributed.rpc.rref_proxy
2708
# torch.distributed.tensor.parallel.fsdp
2709
"DTensorExtensions",
2710
# torch.distributed.tensor.parallel.style
2712
# torch.distributions.logistic_normal
2714
# torch.distributions.one_hot_categorical
2715
"OneHotCategoricalStraightThrough",
2716
# torch.distributions.relaxed_categorical
2717
"ExpRelaxedCategorical",
2718
# torch.distributions.utils
2720
# torch.export.exported_program
2723
# torch.fx.experimental.accelerator_partitioner
2728
# torch.fx.experimental.const_fold
2729
"FoldedGraphModule",
2730
# torch.fx.experimental.graph_gradual_typechecker
2732
# torch.fx.experimental.meta_tracer
2734
"MetaDeviceAttribute",
2737
# torch.fx.experimental.migrate_gradual_types.constraint
2738
"ApplyBroadcasting",
2749
"DGreatestUpperBound",
2758
"TGreatestUpperBound",
2761
# torch.fx.experimental.migrate_gradual_types.constraint_generator
2762
"ConstraintGenerator",
2763
# torch.fx.experimental.normalize
2765
"NormalizeOperators",
2766
# torch.fx.experimental.optimization
2769
# torch.fx.experimental.partitioner_utils
2774
"PartitionerConfig",
2775
# torch.fx.experimental.proxy_tensor
2776
"DecompositionInterpreter",
2777
"PreDispatchTorchFunctionMode",
2778
"ProxySymDispatchMode",
2779
"ProxyTorchDispatchMode",
2781
# torch.fx.experimental.recording
2785
# torch.fx.experimental.refinement_types
2787
# torch.fx.experimental.rewriter
2790
# torch.fx.experimental.schema_type_annotation
2791
"AnnotateTypesWithSchema",
2792
# torch.fx.experimental.sym_node
2794
# torch.fx.experimental.symbolic_shapes
2796
"ConstraintViolationError",
2797
"DynamicDimConstraintPrinter",
2798
"GuardOnDataDependentSymNode",
2799
"LoggingShapeGuardPrinter",
2800
"RelaxedUnspecConstraint",
2802
"ShapeGuardPrinter",
2805
# torch.fx.experimental.unification.match
2808
# torch.fx.experimental.unification.multipledispatch.conflict
2810
# torch.fx.experimental.unification.multipledispatch.dispatcher
2812
"MDNotImplementedError",
2814
# torch.fx.experimental.unification.multipledispatch.variadic
2816
"VariadicSignatureMeta",
2817
"VariadicSignatureType",
2818
# torch.fx.experimental.unification.variable
2820
# torch.fx.experimental.validator
2821
"BisectValidationException",
2822
"PopulateValidator",
2824
"ValidationException",
2827
# torch.fx.immutable_collections
2830
# torch.fx.interpreter
2832
# torch.fx.operator_schemas
2834
# torch.fx.passes.backends.cudagraphs
2835
"CudaGraphsSupport",
2836
# torch.fx.passes.dialect.common.cse_pass
2838
# torch.fx.passes.fake_tensor_prop
2840
# torch.fx.passes.graph_drawer
2842
# torch.fx.passes.graph_manipulation
2844
# torch.fx.passes.infra.partitioner
2845
"CapabilityBasedPartitioner",
2847
# torch.fx.passes.infra.pass_base
2850
# torch.fx.passes.infra.pass_manager
2852
# torch.fx.passes.net_min_base
2853
"FxNetMinimizerBadModuleError",
2854
"FxNetMinimizerResultMismatchError",
2855
"FxNetMinimizerRunFuncError",
2856
# torch.fx.passes.operator_support
2859
"OperatorSupportBase",
2860
# torch.fx.passes.pass_manager
2862
# torch.fx.passes.shape_prop
2864
# torch.fx.passes.split_module
2866
# torch.fx.passes.split_utils
2868
# torch.fx.passes.splitter_base
2869
"FxNetAccNodesFinder",
2870
"FxNetSplitterInternalError",
2873
# torch.fx.passes.tests.test_pass_manager
2875
# torch.fx.passes.tools_common
2876
"FxNetAccFusionsFinder",
2877
# torch.fx.passes.utils.common
2879
# torch.fx.passes.utils.matcher_utils
2882
# torch.fx.passes.utils.source_matcher_utils
2889
"ScopeContextManager",
2892
# torch.fx.subgraph_rewriter
2895
# torch.jit.annotations
2898
# torch.jit.frontend
2902
"FrontendTypeError",
2903
"NotSupportedError",
2905
"UnsupportedNodeError",
2907
# torch.masked.maskedtensor.core
2909
# torch.multiprocessing.pool
2911
# torch.multiprocessing.queue
2912
"ConnectionWrapper",
2915
# torch.multiprocessing.reductions
2917
# torch.multiprocessing.spawn
2920
"ProcessExitedException",
2921
"ProcessRaisedException",
2925
"OrderedDictWrapper",
2926
# torch.nn.modules.activation
2939
"MultiheadAttention",
2956
# torch.nn.modules.adaptive
2957
"AdaptiveLogSoftmaxWithLoss",
2958
# torch.nn.modules.batchnorm
2960
# torch.nn.modules.channelshuffle
2962
# torch.nn.modules.container
2967
# torch.nn.modules.conv
2974
# torch.nn.modules.distance
2977
# torch.nn.modules.dropout
2983
"FeatureAlphaDropout",
2984
# torch.nn.modules.flatten
2987
# torch.nn.modules.fold
2990
# torch.nn.modules.linear
2995
"NonDynamicallyQuantizableLinear",
2996
# torch.nn.modules.loss
2998
"BCEWithLogitsLoss",
3000
"CosineEmbeddingLoss",
3003
"HingeEmbeddingLoss",
3008
"MarginRankingLoss",
3009
"MultiLabelMarginLoss",
3010
"MultiLabelSoftMarginLoss",
3017
"TripletMarginLoss",
3018
"TripletMarginWithDistanceLoss",
3019
# torch.nn.modules.module
3021
# torch.nn.modules.normalization
3025
"LocalResponseNorm",
3026
# torch.nn.modules.padding
3033
# torch.nn.modules.pixelshuffle
3036
# torch.nn.modules.pooling
3037
"AdaptiveAvgPool1d",
3038
"AdaptiveAvgPool2d",
3039
"AdaptiveAvgPool3d",
3040
"AdaptiveMaxPool1d",
3041
"AdaptiveMaxPool2d",
3042
"AdaptiveMaxPool3d",
3046
"FractionalMaxPool2d",
3047
"FractionalMaxPool3d",
3057
# torch.nn.modules.rnn
3066
# torch.nn.modules.sparse
3069
# torch.nn.modules.upsampling
3071
# torch.nn.parallel.data_parallel
3073
# torch.nn.parallel.distributed
3074
"DistributedDataParallel",
3075
# torch.nn.parameter
3076
"UninitializedTensorMixin",
3077
# torch.nn.utils.parametrize
3078
"ParametrizationList",
3079
# torch.nn.utils.prune
3083
"RandomUnstructured",
3084
# torch.nn.utils.rnn
3087
# torch.nn.utils.spectral_norm
3089
"SpectralNormLoadStateDictPreHook",
3090
"SpectralNormStateDictHook",
3091
# torch.nn.utils.weight_norm
3094
"OnnxExporterError",
3095
"OnnxExporterWarning",
3096
"SymbolicValueError",
3097
"UnsupportedOperatorError",
3098
# torch.onnx.verification
3100
"OnnxTestCaseRepro",
3101
# torch.optim.adadelta
3103
# torch.optim.adagrad
3107
# torch.optim.adamax
3115
# torch.optim.lr_scheduler
3118
"CosineAnnealingLR",
3119
"CosineAnnealingWarmRestarts",
3129
"ReduceLROnPlateau",
3134
# torch.optim.optimizer
3138
# torch.optim.rmsprop
3144
# torch.optim.sparse_adam
3146
# torch.optim.swa_utils
3150
"BaseTorchFunctionMode",
3151
"TorchFunctionMode",
3152
# torch.package.file_structure_representation
3154
# torch.package.glob_group
3156
# torch.package.importer
3161
# torch.package.package_exporter
3163
"PackagingErrorReason",
3164
# torch.package.package_importer
3166
# torch.profiler.profiler
3167
"ExecutionTraceObserver",
3169
# torch.return_types
3185
"linalg_cholesky_ex",
3186
"linalg_cholesky_ex_out",
3192
"linalg_inv_ex_out",
3193
"linalg_ldl_factor",
3194
"linalg_ldl_factor_ex",
3195
"linalg_ldl_factor_ex_out",
3196
"linalg_ldl_factor_out",
3201
"linalg_lu_factor_ex",
3202
"linalg_lu_factor_ex_out",
3203
"linalg_lu_factor_out",
3208
"linalg_slogdet_out",
3210
"linalg_solve_ex_out",
3236
"triangular_solve_out",
3237
# torch.serialization
3239
"SourceChangeWarning",
3240
# torch.sparse.semi_structured
3241
"SparseSemiStructuredTensor",
3244
# torch.torch_version
3248
# torch.utils.benchmark.examples.blas_compare_setup
3250
# torch.utils.benchmark.examples.compare
3252
# torch.utils.benchmark.examples.spectral_ops_fuzz_test
3254
# torch.utils.benchmark.op_fuzzers.binary
3256
# torch.utils.benchmark.op_fuzzers.sparse_binary
3257
"BinaryOpSparseFuzzer",
3258
# torch.utils.benchmark.op_fuzzers.sparse_unary
3259
"UnaryOpSparseFuzzer",
3260
# torch.utils.benchmark.op_fuzzers.spectral
3262
# torch.utils.benchmark.op_fuzzers.unary
3264
# torch.utils.benchmark.utils.common
3267
# torch.utils.benchmark.utils.compare
3271
# torch.utils.benchmark.utils.fuzzer
3276
# torch.utils.benchmark.utils.sparse_fuzzer
3277
"FuzzedSparseTensor",
3278
# torch.utils.benchmark.utils.timer
3282
# torch.utils.benchmark.utils.valgrind_wrapper.timer_interface
3289
# torch.utils.bundled_inputs
3291
# torch.utils.checkpoint
3293
"CheckpointFunction",
3294
"DefaultDeviceType",
3295
# torch.utils.collect_env
3297
# torch.utils.cpp_extension
3299
# torch.utils.data.dataloader
3301
# torch.utils.data.datapipes.dataframe.dataframe_wrapper
3304
# torch.utils.data.datapipes.dataframe.dataframes
3311
"CaptureDataFrameWithDataPipeOps",
3321
"CaptureVariableAssign",
3322
"DataFrameTracedOps",
3324
# torch.utils.data.datapipes.dataframe.datapipes
3325
"ConcatDataFramesPipe",
3326
"DataFramesAsTuplesPipe",
3327
"ExampleAggregateAsDataFrames",
3328
"FilterDataFramesPipe",
3329
"PerRowDataFramesPipe",
3330
"ShuffleDataFramesPipe",
3331
# torch.utils.data.datapipes.dataframe.structures
3333
# torch.utils.data.datapipes.datapipe
3338
# torch.utils.data.datapipes.iter.callable
3339
"CollatorIterDataPipe",
3340
"MapperIterDataPipe",
3341
# torch.utils.data.datapipes.iter.combinatorics
3342
"SamplerIterDataPipe",
3343
"ShufflerIterDataPipe",
3344
# torch.utils.data.datapipes.iter.combining
3345
"ConcaterIterDataPipe",
3346
"DemultiplexerIterDataPipe",
3347
"ForkerIterDataPipe",
3348
"MultiplexerIterDataPipe",
3349
"ZipperIterDataPipe",
3350
# torch.utils.data.datapipes.iter.filelister
3351
"FileListerIterDataPipe",
3352
# torch.utils.data.datapipes.iter.fileopener
3353
"FileOpenerIterDataPipe",
3354
# torch.utils.data.datapipes.iter.grouping
3355
"BatcherIterDataPipe",
3356
"GrouperIterDataPipe",
3357
"UnBatcherIterDataPipe",
3358
# torch.utils.data.datapipes.iter.routeddecoder
3359
"RoutedDecoderIterDataPipe",
3360
# torch.utils.data.datapipes.iter.selecting
3361
"FilterIterDataPipe",
3362
# torch.utils.data.datapipes.iter.sharding
3363
"SHARDING_PRIORITIES",
3364
"ShardingFilterIterDataPipe",
3365
# torch.utils.data.datapipes.iter.utils
3366
"IterableWrapperIterDataPipe",
3367
# torch.utils.data.datapipes.map.callable
3368
"MapperMapDataPipe",
3369
# torch.utils.data.datapipes.map.combinatorics
3370
"ShufflerIterDataPipe",
3371
# torch.utils.data.datapipes.map.combining
3372
"ConcaterMapDataPipe",
3373
"ZipperMapDataPipe",
3374
# torch.utils.data.datapipes.map.grouping
3375
"BatcherMapDataPipe",
3376
# torch.utils.data.datapipes.map.utils
3377
"SequenceWrapperMapDataPipe",
3378
# torch.utils.data.datapipes.utils.decoder
3382
# torch.utils.data.dataset
3384
# torch.utils.data.distributed
3385
"DistributedSampler",
3386
# torch.utils.dlpack
3388
# torch.utils.file_baton
3390
# torch.utils.flop_counter
3392
# torch.utils.hipify.hipify_python
3394
"GeneratedFileCleaner",
3402
# torch.utils.mkldnn
3409
# torch.utils.mobile_optimizer
3411
# torch.utils.show_pickle
3415
# torch.utils.tensorboard.writer
3418
# torch.utils.throughput_benchmark
3421
"WeakIdKeyDictionary",
3423
"WeakTensorKeyDictionary",
3426
# The suffix(es) of source filenames.
3427
# You can specify multiple suffix as a list of string:
3429
# source_suffix = ['.rst', '.md']
3430
source_suffix = ".rst"
3432
# The master toctree document.
3435
# General information about the project.
3437
copyright = "2023, PyTorch Contributors"
3438
author = "PyTorch Contributors"
3439
torch_version = str(torch.__version__)
3441
# The version info for the project you're documenting, acts as replacement for
3442
# |version| and |release|, also used in various other places throughout the
3445
# The short X.Y version.
3446
# TODO: change to [:2] at v1.0
3447
version = "main (" + torch_version + " )"
3448
# The full version, including alpha/beta/rc tags.
3449
# TODO: verify this works as expected
3452
# Customized html_title here.
3453
# Default is " ".join(project, release, "documentation") if not set
3455
# Turn 1.11.0aHASH into 1.11
3456
# Note: the release candidates should no longer have the aHASH suffix, but in any
3457
# case we wish to leave only major.minor, even for rc builds.
3458
version = ".".join(torch_version.split(".")[:2])
3459
html_title = " ".join((project, version, "documentation"))
3462
# The language for content autogenerated by Sphinx. Refer to documentation
3463
# for a list of supported languages.
3465
# This is also used if you do content translation via gettext catalogs.
3466
# Usually you set "language" from the command line for these cases.
3469
# List of patterns, relative to source directory, that match files and
3470
# directories to ignore when looking for source files.
3471
# This patterns also effect to html_static_path and html_extra_path
3472
exclude_patterns = []
3474
# The name of the Pygments (syntax highlighting) style to use.
3475
pygments_style = "sphinx"
3477
# If true, `todo` and `todoList` produce output, else they produce nothing.
3478
todo_include_todos = True
3480
# Disable docstring inheritance
3481
autodoc_inherit_docstrings = False
3483
# Show type hints in the description
3484
autodoc_typehints = "description"
3486
# Add parameter types if the parameter is documented in the docstring
3487
autodoc_typehints_description_target = "documented_params"
3489
# Type aliases for common types
3490
# Sphinx type aliases only works with Postponed Evaluation of Annotations
3491
# (PEP 563) enabled (via `from __future__ import annotations`), which keeps the
3492
# type annotations in string form instead of resolving them to actual types.
3493
# However, PEP 563 does not work well with JIT, which uses the type information
3494
# to generate the code. Therefore, the following dict does not have any effect
3495
# until PEP 563 is supported by JIT and enabled in files.
3496
autodoc_type_aliases = {
3497
"_size_1_t": "int or tuple[int]",
3498
"_size_2_t": "int or tuple[int, int]",
3499
"_size_3_t": "int or tuple[int, int, int]",
3500
"_size_4_t": "int or tuple[int, int, int, int]",
3501
"_size_5_t": "int or tuple[int, int, int, int, int]",
3502
"_size_6_t": "int or tuple[int, int, int, int, int, int]",
3503
"_size_any_opt_t": "int or None or tuple",
3504
"_size_2_opt_t": "int or None or 2-tuple",
3505
"_size_3_opt_t": "int or None or 3-tuple",
3506
"_ratio_2_t": "float or tuple[float, float]",
3507
"_ratio_3_t": "float or tuple[float, float, float]",
3508
"_ratio_any_t": "float or tuple",
3509
"_tensor_list_t": "Tensor or tuple[Tensor]",
3512
# Enable overriding of function signatures in the first line of the docstring.
3513
autodoc_docstring_signature = True
3515
# -- katex javascript in header
3518
# app.add_javascript("https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.js")
3521
# -- Options for HTML output ----------------------------------------------
3523
# The theme to use for HTML and HTML Help pages. See the documentation for
3524
# a list of builtin themes.
3529
html_theme = "pytorch_sphinx_theme"
3530
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
3532
# Theme options are theme-specific and customize the look and feel of a theme
3533
# further. For a list of options available for each theme, see the
3536
html_theme_options = {
3537
"pytorch_project": "docs",
3538
"canonical_url": "https://pytorch.org/docs/stable/",
3539
"collapse_navigation": False,
3540
"display_version": True,
3542
"analytics_id": "GTM-T8XT4PS",
3545
html_logo = "_static/img/pytorch-logo-dark-unstable.png"
3547
html_logo = "_static/img/pytorch-logo-dark.svg"
3550
# Add any paths that contain custom static files (such as style sheets) here,
3551
# relative to this directory. They are copied after the builtin static files,
3552
# so a file named "default.css" will overwrite the builtin "default.css".
3553
html_static_path = ["_static"]
3559
from sphinx.ext.coverage import CoverageBuilder
3562
def coverage_post_process(app, exception):
3563
if exception is not None:
3566
# Only run this test for the coverage build
3567
if not isinstance(app.builder, CoverageBuilder):
3570
if not torch.distributed.is_available():
3572
"The coverage tool cannot run with a version "
3573
"of PyTorch that was built with USE_DISTRIBUTED=0 "
3574
"as this module's API changes."
3577
# These are all the modules that have "automodule" in an rst file
3578
# These modules are the ones for which coverage is checked
3579
# Here, we make sure that no module is missing from that list
3580
modules = app.env.domaindata["py"]["modules"]
3582
# We go through all the torch submodules and make sure they are
3586
def is_not_internal(modname):
3587
split_name = modname.split(".")
3588
for name in split_name:
3593
# The walk function does not return the top module
3594
if "torch" not in modules:
3595
missing.add("torch")
3597
for _, modname, ispkg in pkgutil.walk_packages(
3598
path=torch.__path__, prefix=torch.__name__ + "."
3600
if is_not_internal(modname):
3601
if modname not in modules:
3602
missing.add(modname)
3607
mods = ", ".join(missing)
3609
f"\nYou added the following module(s) to the PyTorch namespace '{mods}' "
3610
"but they have no corresponding entry in a doc .rst file. You should "
3611
"either make sure that the .rst file that contains the module's documentation "
3612
"properly contains either '.. automodule:: mod_name' (if you do not want "
3613
"the paragraph added by the automodule, you can simply use '.. py:module:: mod_name') "
3614
" or make the module private (by appending an '_' at the beginning of its name)."
3617
# The output file is hard-coded by the coverage tool
3618
# Our CI is setup to fail if any line is added to this file
3619
output_file = path.join(app.outdir, "python.txt")
3622
with open(output_file, "a") as f:
3627
def process_docstring(app, what_, name, obj, options, lines):
3629
Custom process to transform docstring lines Remove "Ignore" blocks
3632
app (sphinx.application.Sphinx): the Sphinx application object
3635
the type of the object which the docstring belongs to (one of
3636
"module", "class", "exception", "function", "method", "attribute")
3638
name (str): the fully qualified name of the object
3640
obj: the object itself
3642
options: the options given to the directive: an object with
3643
attributes inherited_members, undoc_members, show_inheritance
3644
and noindex that are true if the flag option of same name was
3645
given to the auto directive
3647
lines (List[str]): the lines of the docstring, see above
3650
https://www.sphinx-doc.org/en/1.5.1/_modules/sphinx/ext/autodoc.html
3651
https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
3655
remove_directives = [
3656
# Remove all xdoctest directives
3657
re.compile(r"\s*>>>\s*#\s*x?doctest:\s*.*"),
3658
re.compile(r"\s*>>>\s*#\s*x?doc:\s*.*"),
3661
line for line in lines if not any(pat.match(line) for pat in remove_directives)
3663
# Modify the lines inplace
3664
lines[:] = filtered_lines
3666
# make sure there is a blank line at the end
3667
if lines and lines[-1].strip():
3671
# Called automatically by Sphinx, making this `conf.py` an "extension".
3673
# NOTE: in Sphinx 1.8+ `html_css_files` is an official configuration value
3674
# and can be moved outside of this function (and the setup(app) function
3677
"https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.css"
3680
# In Sphinx 1.8 it was renamed to `add_css_file`, 1.7 and prior it is
3681
# `add_stylesheet` (deprecated in 1.8).
3682
add_css = getattr(app, "add_css_file", app.add_stylesheet)
3683
for css_file in html_css_files:
3686
app.connect("build-finished", coverage_post_process)
3687
app.connect("autodoc-process-docstring", process_docstring)
3690
# From PyTorch 1.5, we now use autogenerated files to document classes and
3691
# functions. This breaks older references since
3692
# https://pytorch.org/docs/stable/torch.html#torch.flip
3694
# https://pytorch.org/docs/stable/generated/torch.flip.html
3695
# which breaks older links from blog posts, stack overflow answers and more.
3696
# To mitigate that, we add an id="torch.flip" in an appropriated place
3697
# in torch.html by overriding the visit_reference method of html writers.
3698
# Someday this can be removed, once the old links fade away
3700
from sphinx.writers import html, html5
3704
old_call = Klass.visit_reference
3706
def visit_reference(self, node):
3707
if "refuri" in node and "generated" in node.get("refuri"):
3708
ref = node.get("refuri")
3709
ref_anchor = ref.split("#")
3710
if len(ref_anchor) > 1:
3711
# Only add the id if the node href and the text match,
3712
# i.e. the href is "torch.flip#torch.flip" and the content is
3713
# "torch.flip" or "flip" since that is a signal the node refers
3714
# to autogenerated content
3715
anchor = ref_anchor[1]
3716
txt = node.parent.astext()
3717
if txt == anchor or txt == anchor.split(".")[-1]:
3718
self.body.append(f'<p id="{ref_anchor[1]}"/>')
3719
return old_call(self, node)
3721
Klass.visit_reference = visit_reference
3724
replace(html.HTMLTranslator)
3725
replace(html5.HTML5Translator)
3727
# -- Options for HTMLHelp output ------------------------------------------
3729
# Output file base name for HTML help builder.
3730
htmlhelp_basename = "PyTorchdoc"
3733
# -- Options for LaTeX output ---------------------------------------------
3736
# The paper size ('letterpaper' or 'a4paper').
3738
# 'papersize': 'letterpaper',
3739
# The font size ('10pt', '11pt' or '12pt').
3741
# 'pointsize': '10pt',
3742
# Additional stuff for the LaTeX preamble.
3745
# Latex figure (float) alignment
3747
# 'figure_align': 'htbp',
3750
# Grouping the document tree into LaTeX files. List of tuples
3751
# (source start file, target name, title,
3752
# author, documentclass [howto, manual, or own class]).
3757
"PyTorch Documentation",
3758
"Torch Contributors",
3764
# -- Options for manual page output ---------------------------------------
3766
# One entry per manual page. List of tuples
3767
# (source start file, name, description, authors, manual section).
3768
man_pages = [(master_doc, "PyTorch", "PyTorch Documentation", [author], 1)]
3771
# -- Options for Texinfo output -------------------------------------------
3773
# Grouping the document tree into Texinfo files. List of tuples
3774
# (source start file, target name, title, author,
3775
# dir menu entry, description, category)
3776
texinfo_documents = [
3780
"PyTorch Documentation",
3783
"One line description of project.",
3789
# Example configuration for intersphinx: refer to the Python standard library.
3790
intersphinx_mapping = {
3791
"python": ("https://docs.python.org/3", None),
3792
"numpy": ("https://numpy.org/doc/stable", None),
3795
import sphinx.ext.doctest
3797
# -- A patch that prevents Sphinx from cross-referencing ivar tags -------
3798
# See http://stackoverflow.com/a/41184353/3343043
3800
from docutils import nodes
3801
from sphinx import addnodes
3802
from sphinx.util.docfields import TypedField
3804
# Without this, doctest adds any example with a `>>>` as a test
3805
doctest_test_doctest_blocks = ""
3806
doctest_default_flags = sphinx.ext.doctest.doctest.ELLIPSIS
3807
doctest_global_setup = """
3816
def patched_make_field(self, types, domain, items, **kw):
3817
# `kw` catches `env=None` needed for newer sphinx while maintaining
3818
# backwards compatibility when passed along further down!
3820
# type: (List, unicode, Tuple) -> nodes.field
3821
def handle_item(fieldarg, content):
3822
par = nodes.paragraph()
3823
par += addnodes.literal_strong("", fieldarg) # Patch: this line added
3824
# par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
3825
# addnodes.literal_strong))
3826
if fieldarg in types:
3827
par += nodes.Text(" (")
3828
# NOTE: using .pop() here to prevent a single type node to be
3829
# inserted twice into the doctree, which leads to
3830
# inconsistencies later when references are resolved
3831
fieldtype = types.pop(fieldarg)
3832
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
3833
typename = fieldtype[0].astext()
3834
builtin_types = ["int", "long", "float", "bool", "type"]
3835
for builtin_type in builtin_types:
3836
pattern = rf"(?<![\w.]){builtin_type}(?![\w.])"
3837
repl = f"python:{builtin_type}"
3838
typename = re.sub(pattern, repl, typename)
3844
addnodes.literal_emphasis,
3850
par += nodes.Text(")")
3851
par += nodes.Text(" -- ")
3855
fieldname = nodes.field_name("", self.label)
3856
if len(items) == 1 and self.can_collapse:
3857
fieldarg, content = items[0]
3858
bodynode = handle_item(fieldarg, content)
3860
bodynode = self.list_type()
3861
for fieldarg, content in items:
3862
bodynode += nodes.list_item("", handle_item(fieldarg, content))
3863
fieldbody = nodes.field_body("", bodynode)
3864
return nodes.field("", fieldname, fieldbody)
3867
TypedField.make_field = patched_make_field
3869
copybutton_prompt_text = r">>> |\.\.\. "
3870
copybutton_prompt_is_regexp = True