pytorch

Форк
0
/
allowlist_for_publicAPI.json 
2608 строк · 55.9 Кб
1
{
2
  "being_migrated": {
3
    "torch.nn.intrinsic": "torch.ao.nn.intrinsic",
4
    "torch.nn.intrinsic.modules": "torch.ao.nn.intrinsic.modules",
5
    "torch.nn.intrinsic.modules.fused": "torch.ao.nn.intrinsic.modules.fused",
6
    "torch.nn.intrinsic.qat": "torch.ao.nn.intrinsic.qat",
7
    "torch.nn.intrinsic.qat.modules": "torch.ao.nn.intrinsic.qat.modules",
8
    "torch.nn.intrinsic.qat.modules.conv_fused": "torch.ao.nn.intrinsic.qat.modules.conv_fused",
9
    "torch.nn.intrinsic.qat.modules.linear_fused": "torch.ao.nn.intrinsic.qat.modules.linear_fused",
10
    "torch.nn.intrinsic.qat.modules.linear_relu": "torch.ao.nn.intrinsic.qat.modules.linear_relu",
11
    "torch.nn.intrinsic.quantized": "torch.ao.nn.intrinsic.quantized",
12
    "torch.nn.intrinsic.quantized.modules": "torch.ao.nn.intrinsic.quantized.modules",
13
    "torch.nn.intrinsic.quantized.modules.bn_relu": "torch.ao.nn.intrinsic.quantized.modules.bn_relu",
14
    "torch.nn.intrinsic.quantized.modules.conv_relu": "torch.ao.nn.intrinsic.quantized.modules.conv_relu",
15
    "torch.nn.intrinsic.quantized.modules.linear_relu": "torch.ao.nn.intrinsic.quantized.modules.linear_relu",
16
    "torch.nn.intrinsic.quantized.dynamic": "torch.ao.nn.intrinsic.quantized.dynamic",
17
    "torch.nn.intrinsic.quantized.dynamic.modules": "torch.ao.nn.intrinsic.quantized.dynamic.modules",
18
    "torch.nn.intrinsic.quantized.dynamic.modules.linear_relu": "torch.ao.nn.intrinsic.quantized.dynamic.modules.linear_relu",
19
    "torch.nn.qat": "torch.ao.nn.qat",
20
    "torch.nn.qat.dynamic": "torch.ao.nn.qat.dynamic",
21
    "torch.nn.qat.dynamic.modules": "torch.ao.nn.qat.dynamic.modules",
22
    "torch.nn.qat.dynamic.modules.linear": "torch.ao.nn.qat.dynamic.modules.linear",
23
    "torch.nn.qat.modules": "torch.ao.nn.qat.modules",
24
    "torch.nn.qat.modules.conv": "torch.ao.nn.qat.modules.conv",
25
    "torch.nn.qat.modules.embedding_ops": "torch.ao.nn.qat.modules.embedding_ops",
26
    "torch.nn.qat.modules.linear": "torch.ao.nn.qat.modules.linear",
27
    "torch.nn.quantized.functional": "torch.ao.nn.quantized.functional",
28
    "torch.nn.quantized": "torch.ao.nn.quantized",
29
    "torch.nn.quantized.modules": "torch.ao.nn.quantized.modules",
30
    "torch.nn.quantized.dynamic": "torch.ao.nn.quantized.dynamic",
31
    "torch.nn.quantized.dynamic.modules": "torch.ao.nn.quantized.dynamic.modules",
32
    "torch.nn.quantized.dynamic.modules.rnn": "torch.ao.nn.quantized.dynamic.modules.rnn",
33
    "torch.nn.quantizable": "torch.ao.nn.quantizable",
34
    "torch.nn.quantizable.modules": "torch.ao.nn.quantizable.modules",
35
    "torch.nn.quantizable.modules.activation": "torch.ao.nn.quantizable.modules.activation",
36
    "torch.nn.quantizable.modules.rnn": "torch.ao.nn.quantizable.modules.rnn"
37
  },
38
  "torch.backends": [
39
    "contextmanager"
40
  ],
41
  "torch.cuda.comm": [
42
    "broadcast",
43
    "broadcast_coalesced",
44
    "reduce_add",
45
    "reduce_add_coalesced",
46
    "scatter",
47
    "gather"
48
  ],
49
  "torch.cuda.nccl": [
50
    "init_rank",
51
    "is_available",
52
    "unique_id",
53
    "version"
54
  ],
55
  "torch.distributed": [
56
    "AllToAllOptions",
57
    "AllreduceCoalescedOptions",
58
    "AllreduceOptions",
59
    "BarrierOptions",
60
    "BroadcastOptions",
61
    "BuiltinCommHookType",
62
    "Callable",
63
    "DebugLevel",
64
    "Dict",
65
    "Enum",
66
    "FileStore",
67
    "GatherOptions",
68
    "GradBucket",
69
    "HashStore",
70
    "Logger",
71
    "namedtuple",
72
    "Optional",
73
    "PrefixStore",
74
    "ProcessGroup",
75
    "ProcessGroupGloo",
76
    "ReduceOp",
77
    "ReduceOptions",
78
    "ReduceScatterOptions",
79
    "Reducer",
80
    "ScatterOptions",
81
    "Store",
82
    "TCPStore",
83
    "Tuple",
84
    "Union",
85
    "get_debug_level",
86
    "set_debug_level",
87
    "set_debug_level_from_env",
88
    "timedelta",
89
    "ProcessGroupMPI",
90
    "ProcessGroupNCCL"
91
  ],
92
  "torch.distributed.checkpoint.state_dict": [
93
    "Any",
94
    "Callable",
95
    "DDP",
96
    "DTensor",
97
    "Dict",
98
    "DictValueType",
99
    "FQNS_T",
100
    "FSDP",
101
    "FullOptimStateDictConfig",
102
    "FullStateDictConfig",
103
    "Iterable",
104
    "List",
105
    "ListDictValueType",
106
    "OptimStateDictConfig",
107
    "OptimizerStateType",
108
    "Optional",
109
    "PrimitiveType",
110
    "Set",
111
    "ShardedOptimStateDictConfig",
112
    "ShardedStateDictConfig",
113
    "ShardedTensor",
114
    "StateDictConfig",
115
    "StateDictType",
116
    "Tuple",
117
    "Union",
118
    "ValueType",
119
    "asdict",
120
    "cast",
121
    "chain",
122
    "dataclass",
123
    "field",
124
    "no_type_check"
125
  ],
126
  "torch.distributed.autograd": [
127
    "DistAutogradContext",
128
    "backward",
129
    "get_gradients"
130
  ],
131
  "torch.distributed.elastic.events": [
132
    "Dict",
133
    "Enum",
134
    "EventMetadataValue",
135
    "Optional"
136
  ],
137
  "torch.distributed.elastic.events.handlers": [
138
    "Dict",
139
    "Optional",
140
    "ScubaLogHandler",
141
    "ScubaRdzvLogHandler"
142
  ],
143
  "torch.distributed.elastic.metrics": [
144
    "Optional",
145
    "get_logger",
146
    "TorchElasticService"
147
  ],
148
  "torch.distributed.elastic.multiprocessing": [
149
    "Callable",
150
    "Dict",
151
    "Tuple",
152
    "Union",
153
    "get_logger"
154
  ],
155
  "torch.distributed.elastic.multiprocessing.redirects": [
156
    "contextmanager",
157
    "partial",
158
    "redirect_stderr",
159
    "redirect_stdout"
160
  ],
161
  "torch.distributed.elastic.rendezvous": [
162
    "RendezvousHandlerCreator"
163
  ],
164
  "torch.distributed.elastic.rendezvous.api": [
165
    "ABC",
166
    "Any",
167
    "Callable",
168
    "Dict",
169
    "Optional",
170
    "RendezvousHandlerCreator",
171
    "Store",
172
    "Tuple",
173
    "abstractmethod"
174
  ],
175
  "torch.distributed.elastic.rendezvous.dynamic_rendezvous": [
176
    "get_method_name"
177
  ],
178
  "torch.distributed.elastic.utils.api": [
179
    "Any",
180
    "List",
181
    "Template"
182
  ],
183
  "torch.distributed.elastic.utils.data.elastic_distributed_sampler": [
184
    "DistributedSampler"
185
  ],
186
  "torch.distributed.elastic.utils.logging": [
187
    "Optional",
188
    "get_log_level"
189
  ],
190
  "torch.distributed.elastic.utils.store": [
191
    "List",
192
    "timedelta"
193
  ],
194
  "torch.distributed.nn": [
195
    "Function",
196
    "ReduceOp",
197
    "group"
198
  ],
199
  "torch.distributed.nn.functional": [
200
    "Function",
201
    "ReduceOp",
202
    "group"
203
  ],
204
  "torch.distributed.nn.jit.instantiator": [
205
    "Optional",
206
    "get_remote_module_template"
207
  ],
208
  "torch.distributed.optim.utils": [
209
    "Type"
210
  ],
211
  "torch.distributed.pipeline.sync.pipe": [
212
    "Pipeline"
213
  ],
214
  "torch.distributed.pipeline.sync.skip.layout": [
215
    "SkipLayout",
216
    "inspect_skip_layout"
217
  ],
218
  "torch.distributed.pipeline.sync.skip.portal": [
219
    "Context",
220
    "Portal",
221
    "PortalBlue",
222
    "PortalCopy",
223
    "PortalOrange"
224
  ],
225
  "torch.distributed.pipeline.sync.skip.skippable": [
226
    "Skippable"
227
  ],
228
  "torch.distributed.pipeline.sync.skip.tracker": [
229
    "SkipTracker",
230
    "SkipTrackerThroughPotals",
231
    "ThreadLocal",
232
    "current_skip_tracker",
233
    "use_skip_tracker"
234
  ],
235
  "torch.distributed.remote_device": [
236
    "Optional",
237
    "Union"
238
  ],
239
  "torch.distributed.rendezvous": [
240
    "Dict",
241
    "FileStore",
242
    "Iterable",
243
    "Optional",
244
    "PrefixStore",
245
    "Store",
246
    "TCPStore",
247
    "Tuple",
248
    "Union",
249
    "cast",
250
    "timedelta",
251
    "urlparse",
252
    "urlunparse"
253
  ],
254
  "torch.distributed.rpc": [],
255
  "torch.fft": [
256
    "Tensor",
257
    "fft",
258
    "fft2",
259
    "fftfreq",
260
    "fftn",
261
    "fftshift",
262
    "hfft",
263
    "ifft",
264
    "ifft2",
265
    "ifftn",
266
    "ifftshift",
267
    "ihfft",
268
    "irfft",
269
    "irfft2",
270
    "irfftn",
271
    "rfft",
272
    "rfft2",
273
    "rfftfreq",
274
    "rfftn"
275
  ],
276
  "torch.functional": [
277
    "istft",
278
    "pca_lowrank",
279
    "svd_lowrank"
280
  ],
281
  "torch.futures": [
282
    "Future"
283
  ],
284
  "torch.fx": [
285
    "ProxyableClassMeta",
286
    "Tracer",
287
    "symbolic_trace",
288
    "wrap"
289
  ],
290
  "torch.fx.experimental.migrate_gradual_types.z3_types": [
291
    "D"
292
  ],
293
  "torch.fx.experimental.unification.core": [
294
    "Iterator",
295
    "assoc",
296
    "dispatch",
297
    "isvar",
298
    "partial",
299
    "unify",
300
    "walk"
301
  ],
302
  "torch.fx.experimental.unification.dispatch": [
303
    "dispatch",
304
    "partial"
305
  ],
306
  "torch.fx.experimental.unification.more": [
307
    "dispatch",
308
    "reify",
309
    "unify"
310
  ],
311
  "torch.fx.experimental.unification.unification_tools": [
312
    "first",
313
    "getter",
314
    "groupby"
315
  ],
316
  "torch.fx.experimental.unification.variable": [
317
    "contextmanager",
318
    "dispatch",
319
    "hashable",
320
    "isvar"
321
  ],
322
  "torch.fx.proxy": [
323
    "assert_fn"
324
  ],
325
  "torch.hub": [
326
    "HTTPError",
327
    "Path",
328
    "Request",
329
    "tqdm",
330
    "urlopen",
331
    "urlparse"
332
  ],
333
  "torch.jit": [
334
    "Attribute",
335
    "Final",
336
    "Iterator",
337
    "ONNXTracedModule",
338
    "RecursiveScriptClass",
339
    "RecursiveScriptModule",
340
    "ScriptModule",
341
    "ScriptWarning",
342
    "TopLevelTracedModule",
343
    "TracedModule",
344
    "TracerWarning",
345
    "TracingCheckError",
346
    "contextmanager",
347
    "export",
348
    "fork",
349
    "freeze",
350
    "fuser",
351
    "ignore",
352
    "interface",
353
    "is_scripting",
354
    "is_tracing",
355
    "jit_module_from_flatbuffer",
356
    "last_executed_optimized_graph",
357
    "load",
358
    "optimize_for_inference",
359
    "optimized_execution",
360
    "run_frozen_optimizations",
361
    "save",
362
    "save_jit_module_to_flatbuffer",
363
    "script",
364
    "script_method",
365
    "set_fusion_strategy",
366
    "set_module",
367
    "trace",
368
    "trace_module",
369
    "unused",
370
    "wait"
371
  ],
372
  "torch.jit.annotations": [
373
    "Any",
374
    "AnyType",
375
    "ComplexType",
376
    "Dict",
377
    "DictType",
378
    "EvalEnv",
379
    "FloatType",
380
    "IntType",
381
    "List",
382
    "ListType",
383
    "StringType",
384
    "TensorType",
385
    "Tuple",
386
    "TupleType",
387
    "get_enum_value_type",
388
    "is_dict",
389
    "is_function_or_method",
390
    "is_list",
391
    "is_optional",
392
    "is_tensor",
393
    "is_tuple",
394
    "is_union",
395
    "is_vararg"
396
  ],
397
  "torch.jit.frontend": [
398
    "Apply",
399
    "Assert",
400
    "Assign",
401
    "Attribute",
402
    "AugAssign",
403
    "BinOp",
404
    "Break",
405
    "ClassDef",
406
    "Const",
407
    "Continue",
408
    "Decl",
409
    "Def",
410
    "Delete",
411
    "DictComp",
412
    "DictLiteral",
413
    "Dots",
414
    "EmptyTypeAnnotation",
415
    "ExprStmt",
416
    "FalseLiteral",
417
    "For",
418
    "FunctionModifiers",
419
    "Ident",
420
    "If",
421
    "List",
422
    "ListComp",
423
    "ListLiteral",
424
    "NoneLiteral",
425
    "Param",
426
    "Pass",
427
    "Property",
428
    "Raise",
429
    "Return",
430
    "Select",
431
    "SliceExpr",
432
    "Starred",
433
    "Stmt",
434
    "StringLiteral",
435
    "Subscript",
436
    "TernaryIf",
437
    "TrueLiteral",
438
    "Tuple",
439
    "TupleLiteral",
440
    "UnaryOp",
441
    "Var",
442
    "While",
443
    "With",
444
    "WithItem",
445
    "dedent",
446
    "get_qualified_name",
447
    "get_source_lines_and_file",
448
    "is_static_fn",
449
    "make_source_context",
450
    "namedtuple",
451
    "parse_def",
452
    "should_drop",
453
    "monkeytype_trace"
454
  ],
455
  "torch.linalg": [
456
    "LinAlgError",
457
    "Tensor",
458
    "cholesky",
459
    "cholesky_ex",
460
    "cond",
461
    "cross",
462
    "det",
463
    "diagonal",
464
    "eig",
465
    "eigh",
466
    "eigvals",
467
    "eigvalsh",
468
    "householder_product",
469
    "inv",
470
    "inv_ex",
471
    "ldl_factor",
472
    "ldl_factor_ex",
473
    "ldl_solve",
474
    "lstsq",
475
    "lu",
476
    "lu_factor",
477
    "lu_factor_ex",
478
    "lu_solve",
479
    "matmul",
480
    "matrix_exp",
481
    "matrix_norm",
482
    "matrix_power",
483
    "matrix_rank",
484
    "multi_dot",
485
    "norm",
486
    "pinv",
487
    "qr",
488
    "slogdet",
489
    "solve",
490
    "solve_ex",
491
    "solve_triangular",
492
    "svd",
493
    "svdvals",
494
    "tensorinv",
495
    "tensorsolve",
496
    "vander",
497
    "vecdot",
498
    "vector_norm"
499
  ],
500
  "torch.multiprocessing": [
501
    "Array",
502
    "AuthenticationError",
503
    "Barrier",
504
    "BoundedSemaphore",
505
    "BufferTooShort",
506
    "Condition",
507
    "Event",
508
    "JoinableQueue",
509
    "Lock",
510
    "Manager",
511
    "Pipe",
512
    "Pool",
513
    "Process",
514
    "ProcessContext",
515
    "ProcessError",
516
    "ProcessExitedException",
517
    "ProcessRaisedException",
518
    "Queue",
519
    "RLock",
520
    "RawArray",
521
    "RawValue",
522
    "Semaphore",
523
    "SimpleQueue",
524
    "SpawnContext",
525
    "TimeoutError",
526
    "Value",
527
    "active_children",
528
    "allow_connection_pickling",
529
    "cpu_count",
530
    "current_process",
531
    "freeze_support",
532
    "get_all_start_methods",
533
    "get_context",
534
    "get_logger",
535
    "get_start_method",
536
    "init_reductions",
537
    "log_to_stderr",
538
    "set_executable",
539
    "set_forkserver_preload",
540
    "set_start_method",
541
    "spawn",
542
    "start_processes",
543
    "parent_process"
544
  ],
545
  "torch.multiprocessing.reductions": [
546
    "ForkingPickler",
547
    "Union",
548
    "check_serializing_named_tensor",
549
    "register_after_fork"
550
  ],
551
  "torch.multiprocessing.spawn": [
552
    "Optional"
553
  ],
554
  "torch.nested": [
555
    "nested_tensor",
556
    "to_padded_tensor"
557
  ],
558
  "torch.nn.common_types": [
559
    "Optional",
560
    "Tensor",
561
    "Tuple",
562
    "TypeVar",
563
    "Union"
564
  ],
565
  "torch.nn.functional": [
566
    "Callable",
567
    "DType",
568
    "List",
569
    "Optional",
570
    "Tensor",
571
    "Tuple",
572
    "Union",
573
    "adaptive_avg_pool1d",
574
    "avg_pool1d",
575
    "avg_pool2d",
576
    "avg_pool3d",
577
    "bilinear",
578
    "boolean_dispatch",
579
    "celu_",
580
    "channel_shuffle",
581
    "conv1d",
582
    "conv2d",
583
    "conv3d",
584
    "conv_tbc",
585
    "conv_transpose1d",
586
    "conv_transpose2d",
587
    "conv_transpose3d",
588
    "cosine_similarity",
589
    "elu_",
590
    "gelu",
591
    "handle_torch_function",
592
    "hardshrink",
593
    "hardtanh_",
594
    "has_torch_function",
595
    "has_torch_function_unary",
596
    "has_torch_function_variadic",
597
    "leaky_relu_",
598
    "linear",
599
    "logsigmoid",
600
    "native_channel_shuffle",
601
    "one_hot",
602
    "pairwise_distance",
603
    "pdist",
604
    "pixel_shuffle",
605
    "pixel_unshuffle",
606
    "prelu",
607
    "relu_",
608
    "rrelu_",
609
    "scaled_dot_product_attention",
610
    "selu_",
611
    "softplus",
612
    "softshrink",
613
    "threshold_"
614
  ],
615
  "torch.nn.init": [
616
    "Tensor"
617
  ],
618
  "torch.nn.intrinsic.modules": [
619
    "_FusedModule"
620
  ],
621
  "torch.nn.modules.linear": [
622
    "NonDynamicallyQuantizableLinear"
623
  ],
624
  "torch.nn.modules.rnn": [
625
    "apply_permutation"
626
  ],
627
  "torch.nn.parallel": [
628
    "DistributedDataParallelCPU"
629
  ],
630
  "torch.nn.parallel.comm": [
631
    "List"
632
  ],
633
  "torch.nn.parallel.parallel_apply": [
634
    "ExceptionWrapper",
635
    "autocast"
636
  ],
637
  "torch.nn.parallel.replicate": [
638
    "OrderedDict"
639
  ],
640
  "torch.nn.parallel.scatter_gather": [
641
    "is_namedtuple"
642
  ],
643
  "torch.nn.parameter": [
644
    "OrderedDict"
645
  ],
646
  "torch.nn.utils.rnn": [
647
    "bind",
648
    "PackedSequence_"
649
  ],
650
  "torch.nn.utils.convert_parameters": [
651
    "Iterable",
652
    "Optional"
653
  ],
654
  "torch.onnx": [
655
    "Dict",
656
    "OperatorExportTypes",
657
    "Optional",
658
    "TensorProtoDataType",
659
    "TrainingMode"
660
  ],
661
  "torch.overrides": [
662
    "BaseTorchFunctionMode",
663
    "TorchFunctionMode",
664
    "TorchFunctionModeMeta",
665
    "enable_torch_function_mode",
666
    "get_default_nowrap_functions",
667
    "has_torch_function"
668
  ],
669
  "torch.package.analyze.is_from_package": [
670
    "Any",
671
    "ModuleType",
672
    "is_mangled"
673
  ],
674
  "torch.package.find_file_dependencies": [
675
    "List",
676
    "Optional",
677
    "Tuple"
678
  ],
679
  "torch.package.glob_group": [
680
    "GlobPattern",
681
    "Iterable",
682
    "Union"
683
  ],
684
  "torch.profiler": [
685
    "DeviceType",
686
    "ProfilerActivity",
687
    "kineto_available",
688
    "record_function"
689
  ],
690
  "torch.quantization": [
691
    "ABC",
692
    "DeQuantStub",
693
    "FakeQuantize",
694
    "FakeQuantizeBase",
695
    "FixedQParamsFakeQuantize",
696
    "FusedMovingAvgObsFakeQuantize",
697
    "HistogramObserver",
698
    "MinMaxObserver",
699
    "MovingAverageMinMaxObserver",
700
    "MovingAveragePerChannelMinMaxObserver",
701
    "NoopObserver",
702
    "ObserverBase",
703
    "PerChannelMinMaxObserver",
704
    "PlaceholderObserver",
705
    "QConfig",
706
    "QConfigAny",
707
    "QConfigDynamic",
708
    "QuantStub",
709
    "QuantType",
710
    "QuantWrapper",
711
    "RecordingObserver",
712
    "_add_module_to_qconfig_obs_ctr",
713
    "add_quant_dequant",
714
    "_assert_valid_qconfig",
715
    "convert",
716
    "convert_dynamic_jit",
717
    "convert_jit",
718
    "default_fixed_qparams_range_0to1_fake_quant",
719
    "default_affine_fixed_qparams_fake_quant",
720
    "default_debug_observer",
721
    "default_dynamic_quant_observer",
722
    "default_fake_quant",
723
    "default_float_qparams_observer",
724
    "default_fused_act_fake_quant",
725
    "default_fused_per_channel_wt_fake_quant",
726
    "default_fused_wt_fake_quant",
727
    "default_histogram_fake_quant",
728
    "default_histogram_observer",
729
    "default_observer",
730
    "default_per_channel_weight_fake_quant",
731
    "default_per_channel_weight_observer",
732
    "default_placeholder_observer",
733
    "default_fixed_qparams_range_neg1to1_fake_quant",
734
    "default_symmetric_fixed_qparams_fake_quant",
735
    "default_weight_fake_quant",
736
    "default_weight_observer",
737
    "disable_fake_quant",
738
    "disable_observer",
739
    "enable_fake_quant",
740
    "enable_observer",
741
    "fuse_conv_bn",
742
    "fuse_conv_bn_jit",
743
    "fuse_conv_bn_relu",
744
    "fuse_linear_bn",
745
    "fuse_modules",
746
    "get_default_compare_output_module_list",
747
    "get_default_dynamic_quant_module_mappings",
748
    "get_default_float_to_quantized_operator_mappings",
749
    "get_default_qat_module_mappings",
750
    "get_default_qat_qconfig",
751
    "get_default_qconfig",
752
    "get_default_qconfig_propagation_list",
753
    "get_default_static_quant_module_mappings",
754
    "get_dynamic_quant_module_class",
755
    "get_fuser_method",
756
    "get_observer_state_dict",
757
    "get_quantized_operator",
758
    "get_static_quant_module_class",
759
    "load_observer_state_dict",
760
    "no_observer_set",
761
    "prepare",
762
    "prepare_dynamic_jit",
763
    "prepare_jit",
764
    "prepare_qat",
765
    "propagate_qconfig_",
766
    "qconfig_equals",
767
    "_get_quant_type_to_str",
768
    "quantize",
769
    "quantize_dynamic",
770
    "quantize_dynamic_jit",
771
    "quantize_jit",
772
    "quantize_qat",
773
    "script_qconfig",
774
    "script_qconfig_dict",
775
    "swap_module"
776
  ],
777
  "torch.quantization.fake_quantize": [
778
    "FakeQuantize",
779
    "FakeQuantizeBase",
780
    "FixedQParamsFakeQuantize",
781
    "FusedMovingAvgObsFakeQuantize",
782
    "default_fixed_qparams_range_0to1_fake_quant",
783
    "default_affine_fixed_qparams_fake_quant",
784
    "default_fake_quant",
785
    "default_fused_act_fake_quant",
786
    "default_fused_per_channel_wt_fake_quant",
787
    "default_fused_wt_fake_quant",
788
    "default_histogram_fake_quant",
789
    "default_per_channel_weight_fake_quant",
790
    "default_fixed_qparams_range_neg1to1_fake_quant",
791
    "default_symmetric_fixed_qparams_fake_quant",
792
    "default_weight_fake_quant",
793
    "disable_fake_quant",
794
    "disable_observer",
795
    "enable_fake_quant",
796
    "enable_observer"
797
  ],
798
  "torch.quantization.fuse_modules": [
799
    "fuse_conv_bn",
800
    "fuse_conv_bn_relu",
801
    "fuse_known_modules",
802
    "fuse_modules",
803
    "get_fuser_method"
804
  ],
805
  "torch.quantization.fuser_method_mappings": [
806
    "fuse_conv_bn",
807
    "fuse_conv_bn_relu",
808
    "fuse_linear_bn",
809
    "get_fuser_method"
810
  ],
811
  "torch.quantization.observer": [
812
    "ABC",
813
    "HistogramObserver",
814
    "MinMaxObserver",
815
    "MovingAverageMinMaxObserver",
816
    "MovingAveragePerChannelMinMaxObserver",
817
    "NoopObserver",
818
    "ObserverBase",
819
    "PerChannelMinMaxObserver",
820
    "PlaceholderObserver",
821
    "RecordingObserver",
822
    "default_debug_observer",
823
    "default_dynamic_quant_observer",
824
    "default_float_qparams_observer",
825
    "default_histogram_observer",
826
    "default_observer",
827
    "default_per_channel_weight_observer",
828
    "default_placeholder_observer",
829
    "default_weight_observer",
830
    "get_observer_state_dict",
831
    "load_observer_state_dict"
832
  ],
833
  "torch.quantization.qconfig": [
834
    "QConfig",
835
    "QConfigAny",
836
    "QConfigDynamic",
837
    "_add_module_to_qconfig_obs_ctr",
838
    "_assert_valid_qconfig",
839
    "get_default_qat_qconfig",
840
    "get_default_qconfig",
841
    "qconfig_equals"
842
  ],
843
  "torch.quantization.quant_type": [
844
    "QuantType",
845
    "_get_quant_type_to_str"
846
  ],
847
  "torch.quantization.quantization_mappings": [
848
    "get_default_compare_output_module_list",
849
    "get_default_dynamic_quant_module_mappings",
850
    "get_default_float_to_quantized_operator_mappings",
851
    "get_default_qat_module_mappings",
852
    "get_default_qconfig_propagation_list",
853
    "get_default_static_quant_module_mappings",
854
    "get_dynamic_quant_module_class",
855
    "get_quantized_operator",
856
    "get_static_quant_module_class",
857
    "no_observer_set"
858
  ],
859
  "torch.quantization.quantize": [
860
    "add_quant_dequant",
861
    "convert",
862
    "prepare",
863
    "prepare_qat",
864
    "propagate_qconfig_",
865
    "quantize",
866
    "quantize_dynamic",
867
    "quantize_qat",
868
    "swap_module"
869
  ],
870
  "torch.quantization.quantize_jit": [
871
    "convert_dynamic_jit",
872
    "convert_jit",
873
    "fuse_conv_bn_jit",
874
    "prepare_dynamic_jit",
875
    "prepare_jit",
876
    "quantize_dynamic_jit",
877
    "quantize_jit",
878
    "script_qconfig",
879
    "script_qconfig_dict"
880
  ],
881
  "torch.quantization.stubs": [
882
    "DeQuantStub",
883
    "QuantStub",
884
    "QuantWrapper"
885
  ],
886
  "torch.quasirandom": [
887
    "Optional"
888
  ],
889
  "torch.random": [
890
    "Generator"
891
  ],
892
  "torch.serialization": [
893
    "Any",
894
    "BinaryIO",
895
    "Dict",
896
    "IO",
897
    "Optional",
898
    "Storage",
899
    "Tuple",
900
    "Type",
901
    "Union",
902
    "cast",
903
    "closing",
904
    "contextmanager",
905
    "get_source_lines_and_file"
906
  ],
907
  "torch.sparse": [
908
    "BFloat16Tensor",
909
    "ByteTensor",
910
    "CharTensor",
911
    "DoubleTensor",
912
    "FloatTensor",
913
    "HalfTensor",
914
    "IntTensor",
915
    "LongTensor",
916
    "ShortTensor",
917
    "addmm",
918
    "log_softmax",
919
    "mm",
920
    "softmax"
921
  ],
922
  "torch.special": [
923
    "airy_ai",
924
    "bessel_j0",
925
    "bessel_j1",
926
    "bessel_y0",
927
    "bessel_y1",
928
    "chebyshev_polynomial_t",
929
    "chebyshev_polynomial_u",
930
    "chebyshev_polynomial_v",
931
    "chebyshev_polynomial_w",
932
    "digamma",
933
    "entr",
934
    "erf",
935
    "erfc",
936
    "erfcx",
937
    "erfinv",
938
    "exp2",
939
    "expit",
940
    "expm1",
941
    "gammainc",
942
    "gammaincc",
943
    "gammaln",
944
    "hermite_polynomial_h",
945
    "hermite_polynomial_he",
946
    "i0",
947
    "i0e",
948
    "i1",
949
    "i1e",
950
    "laguerre_polynomial_l",
951
    "legendre_polynomial_p",
952
    "log1p",
953
    "log_ndtr",
954
    "log_softmax",
955
    "logit",
956
    "logsumexp",
957
    "modified_bessel_i0",
958
    "modified_bessel_i1",
959
    "modified_bessel_k0",
960
    "modified_bessel_k1",
961
    "multigammaln",
962
    "ndtr",
963
    "ndtri",
964
    "polygamma",
965
    "psi",
966
    "round",
967
    "scaled_modified_bessel_k0",
968
    "scaled_modified_bessel_k1",
969
    "shifted_chebyshev_polynomial_t",
970
    "shifted_chebyshev_polynomial_u",
971
    "shifted_chebyshev_polynomial_v",
972
    "shifted_chebyshev_polynomial_w",
973
    "sinc",
974
    "softmax",
975
    "spherical_bessel_j0",
976
    "xlog1py",
977
    "xlogy",
978
    "zeta"
979
  ],
980
  "torch.storage": [
981
    "Any",
982
    "Storage",
983
    "Type",
984
    "TypeVar",
985
    "Union",
986
    "cast",
987
    "lru_cache"
988
  ],
989
  "torch.testing": [
990
    "FileCheck",
991
    "all_types",
992
    "all_types_and",
993
    "all_types_and_complex",
994
    "all_types_and_complex_and",
995
    "all_types_and_half",
996
    "assert_allclose",
997
    "assert_close",
998
    "complex_types",
999
    "double_types",
1000
    "empty_types",
1001
    "floating_and_complex_types",
1002
    "floating_and_complex_types_and",
1003
    "floating_types",
1004
    "floating_types_and",
1005
    "floating_types_and_half",
1006
    "get_all_complex_dtypes",
1007
    "get_all_device_types",
1008
    "get_all_dtypes",
1009
    "get_all_fp_dtypes",
1010
    "get_all_int_dtypes",
1011
    "get_all_math_dtypes",
1012
    "integral_types",
1013
    "integral_types_and",
1014
    "make_non_contiguous",
1015
    "make_tensor",
1016
    "rand",
1017
    "randn"
1018
  ],
1019
  "torch.types": [
1020
    "Any",
1021
    "Device",
1022
    "List",
1023
    "Number",
1024
    "Sequence",
1025
    "Tuple",
1026
    "Union"
1027
  ],
1028
  "torch.utils.benchmark.utils.compare": [
1029
    "Colorize",
1030
    "Table",
1031
    "optional_min"
1032
  ],
1033
  "torch.utils.benchmark.utils.cpp_jit": [
1034
    "Any",
1035
    "CallgrindModuleType",
1036
    "List",
1037
    "Optional",
1038
    "TimeitModuleType"
1039
  ],
1040
  "torch.utils.benchmark.utils.fuzzer": [
1041
    "dtype_size",
1042
    "prod"
1043
  ],
1044
  "torch.utils.benchmark.utils.sparse_fuzzer": [
1045
    "FuzzedTensor",
1046
    "Number",
1047
    "Optional",
1048
    "Tuple",
1049
    "Union"
1050
  ],
1051
  "torch.utils.benchmark.utils.timer": [
1052
    "CPPTimer",
1053
    "timer"
1054
  ],
1055
  "torch.utils.benchmark.utils.valgrind_wrapper.timer_interface": [
1056
    "GlobalsBridge",
1057
    "Serialization",
1058
    "wrapper_singleton"
1059
  ],
1060
  "torch.utils.data": [
1061
    "_DatasetKind",
1062
    "argument_validation",
1063
    "default_collate",
1064
    "default_convert",
1065
    "functional_datapipe",
1066
    "get_worker_info",
1067
    "guaranteed_datapipes_determinism",
1068
    "non_deterministic",
1069
    "runtime_validation",
1070
    "runtime_validation_disabled"
1071
  ],
1072
  "torch.utils.data.dataloader": [
1073
    "default_collate",
1074
    "default_convert",
1075
    "get_worker_info"
1076
  ],
1077
  "torch.utils.data.datapipes.dataframe": [
1078
    "DFIterDataPipe"
1079
  ],
1080
  "torch.utils.dlpack": [
1081
    "Any",
1082
    "to_dlpack"
1083
  ],
1084
  "torch": [
1085
    "BFloat16Storage",
1086
    "BFloat16Tensor",
1087
    "ComplexDoubleStorage",
1088
    "ComplexFloatStorage",
1089
    "DisableTorchFunction",
1090
    "DisableTorchFunctionSubclass",
1091
    "Generator",
1092
    "HalfStorage",
1093
    "HalfTensor",
1094
    "QInt32Storage",
1095
    "QInt8Storage",
1096
    "QUInt2x4Storage",
1097
    "QUInt4x2Storage",
1098
    "QUInt8Storage",
1099
    "Storage",
1100
    "TypedStorage",
1101
    "_adaptive_avg_pool2d",
1102
    "_adaptive_avg_pool3d",
1103
    "_add_batch_dim",
1104
    "_add_relu",
1105
    "_add_relu_",
1106
    "_addmm_activation",
1107
    "_aminmax",
1108
    "_amp_foreach_non_finite_check_and_unscale_",
1109
    "_amp_update_scale_",
1110
    "_assert_async",
1111
    "_batch_norm_impl_index",
1112
    "_cast_Byte",
1113
    "_cast_Char",
1114
    "_cast_Double",
1115
    "_cast_Float",
1116
    "_cast_Half",
1117
    "_cast_Int",
1118
    "_cast_Long",
1119
    "_cast_Short",
1120
    "_choose_qparams_per_tensor",
1121
    "_coalesce",
1122
    "_compute_linear_combination",
1123
    "_conj",
1124
    "_conj_copy",
1125
    "_conj_physical",
1126
    "_convert_indices_from_coo_to_csr",
1127
    "_convert_indices_from_csr_to_coo",
1128
    "_convolution",
1129
    "_convolution_mode",
1130
    "_copy_from",
1131
    "_copy_from_and_resize",
1132
    "_ctc_loss",
1133
    "_cudnn_ctc_loss",
1134
    "_cudnn_init_dropout_state",
1135
    "_cudnn_rnn",
1136
    "_cudnn_rnn_flatten_weight",
1137
    "_cufft_clear_plan_cache",
1138
    "_cufft_get_plan_cache_max_size",
1139
    "_cufft_get_plan_cache_size",
1140
    "_cufft_set_plan_cache_max_size",
1141
    "_cummax_helper",
1142
    "_cummin_helper",
1143
    "_debug_has_internal_overlap",
1144
    "_det_lu_based_helper_backward_helper",
1145
    "_dim_arange",
1146
    "_dirichlet_grad",
1147
    "_disable_functionalization",
1148
    "_efficientzerotensor",
1149
    "_embedding_bag",
1150
    "_embedding_bag_forward_only",
1151
    "_empty_affine_quantized",
1152
    "_empty_per_channel_affine_quantized",
1153
    "_enable_functionalization",
1154
    "_euclidean_dist",
1155
    "_fake_quantize_learnable_per_channel_affine",
1156
    "_fake_quantize_learnable_per_tensor_affine",
1157
    "_fake_quantize_per_tensor_affine_cachemask_tensor_qparams",
1158
    "_fft_c2c",
1159
    "_fft_c2r",
1160
    "_fft_r2c",
1161
    "_foreach_abs",
1162
    "_foreach_abs_",
1163
    "_foreach_acos",
1164
    "_foreach_acos_",
1165
    "_foreach_add",
1166
    "_foreach_add_",
1167
    "_foreach_addcdiv",
1168
    "_foreach_addcdiv_",
1169
    "_foreach_addcmul",
1170
    "_foreach_addcmul_",
1171
    "_foreach_asin",
1172
    "_foreach_asin_",
1173
    "_foreach_atan",
1174
    "_foreach_atan_",
1175
    "_foreach_ceil",
1176
    "_foreach_ceil_",
1177
    "_foreach_cos",
1178
    "_foreach_cos_",
1179
    "_foreach_cosh",
1180
    "_foreach_cosh_",
1181
    "_foreach_div",
1182
    "_foreach_div_",
1183
    "_foreach_erf",
1184
    "_foreach_erf_",
1185
    "_foreach_erfc",
1186
    "_foreach_erfc_",
1187
    "_foreach_exp",
1188
    "_foreach_exp_",
1189
    "_foreach_expm1",
1190
    "_foreach_expm1_",
1191
    "_foreach_floor",
1192
    "_foreach_floor_",
1193
    "_foreach_frac",
1194
    "_foreach_frac_",
1195
    "_foreach_lgamma",
1196
    "_foreach_lgamma_",
1197
    "_foreach_log",
1198
    "_foreach_log10",
1199
    "_foreach_log10_",
1200
    "_foreach_log1p",
1201
    "_foreach_log1p_",
1202
    "_foreach_log2",
1203
    "_foreach_log2_",
1204
    "_foreach_log_",
1205
    "_foreach_maximum",
1206
    "_foreach_minimum",
1207
    "_foreach_mul",
1208
    "_foreach_mul_",
1209
    "_foreach_neg",
1210
    "_foreach_neg_",
1211
    "_foreach_norm",
1212
    "_foreach_reciprocal",
1213
    "_foreach_reciprocal_",
1214
    "_foreach_round",
1215
    "_foreach_round_",
1216
    "_foreach_sigmoid",
1217
    "_foreach_sigmoid_",
1218
    "_foreach_sign",
1219
    "_foreach_sign_",
1220
    "_foreach_sin",
1221
    "_foreach_sin_",
1222
    "_foreach_sinh",
1223
    "_foreach_sinh_",
1224
    "_foreach_sqrt",
1225
    "_foreach_sqrt_",
1226
    "_foreach_sub",
1227
    "_foreach_sub_",
1228
    "_foreach_tan",
1229
    "_foreach_tan_",
1230
    "_foreach_tanh",
1231
    "_foreach_tanh_",
1232
    "_foreach_trunc",
1233
    "_foreach_trunc_",
1234
    "_foreach_zero_",
1235
    "_from_functional_tensor",
1236
    "_fused_dropout",
1237
    "_fused_moving_avg_obs_fq_helper",
1238
    "_fw_primal_copy",
1239
    "_grid_sampler_2d_cpu_fallback",
1240
    "_has_compatible_shallow_copy_type",
1241
    "_histogramdd_bin_edges",
1242
    "_histogramdd_from_bin_cts",
1243
    "_histogramdd_from_bin_tensors",
1244
    "_index_put_impl_",
1245
    "_indices_copy",
1246
    "_is_functional_tensor",
1247
    "_is_zerotensor",
1248
    "_linalg_check_errors",
1249
    "_linalg_qr_helper",
1250
    "_linalg_svd",
1251
    "_linalg_solve_ex",
1252
    "_log_softmax",
1253
    "_log_softmax_backward_data",
1254
    "_logcumsumexp",
1255
    "_lu_with_info",
1256
    "_make_dual",
1257
    "_make_dual_copy",
1258
    "_make_per_channel_quantized_tensor",
1259
    "_make_per_tensor_quantized_tensor",
1260
    "_masked_scale",
1261
    "_masked_softmax",
1262
    "_mkldnn_reshape",
1263
    "_mkldnn_transpose",
1264
    "_mkldnn_transpose_",
1265
    "_neg_view",
1266
    "_neg_view_copy",
1267
    "_nested_from_padded",
1268
    "_nested_from_padded_and_nested_example",
1269
    "_nnpack_available",
1270
    "_nnpack_spatial_convolution",
1271
    "_pack_padded_sequence",
1272
    "_pad_packed_sequence",
1273
    "_pin_memory",
1274
    "_remove_batch_dim",
1275
    "_reshape_alias_copy",
1276
    "_reshape_from_tensor",
1277
    "_rowwise_prune",
1278
    "_sample_dirichlet",
1279
    "_saturate_weight_to_fp16",
1280
    "_shape_as_tensor",
1281
    "_sobol_engine_draw",
1282
    "_sobol_engine_ff_",
1283
    "_sobol_engine_initialize_state_",
1284
    "_sobol_engine_scramble_",
1285
    "_softmax",
1286
    "_softmax_backward_data",
1287
    "_sparse_broadcast_to",
1288
    "_sparse_broadcast_to_copy",
1289
    "_sparse_coo_tensor_unsafe",
1290
    "_sparse_csr_prod",
1291
    "_sparse_csr_sum",
1292
    "_sparse_csr_tensor_unsafe",
1293
    "_sparse_log_softmax_backward_data",
1294
    "_sparse_softmax_backward_data",
1295
    "_sparse_sparse_matmul",
1296
    "_sparse_sum",
1297
    "_stack",
1298
    "_standard_gamma",
1299
    "_standard_gamma_grad",
1300
    "_sync",
1301
    "_test_serialization_subcmul",
1302
    "_to_cpu",
1303
    "_to_functional_tensor",
1304
    "_torch_cuda_cu_linker_symbol_op",
1305
    "_trilinear",
1306
    "_unique",
1307
    "_unique2",
1308
    "_unpack_dual",
1309
    "_use_cudnn_ctc_loss",
1310
    "_use_cudnn_rnn_flatten_weight",
1311
    "_validate_sparse_compressed_tensor_args",
1312
    "_validate_sparse_coo_tensor_args",
1313
    "_validate_sparse_csr_tensor_args",
1314
    "_values_copy",
1315
    "_weight_norm",
1316
    "_weight_norm_interface",
1317
    "autocast",
1318
    "broadcast_shapes",
1319
    "candidate",
1320
    "compiled_with_cxx11_abi",
1321
    "from_dlpack",
1322
    "lobpcg",
1323
    "lu",
1324
    "obj",
1325
    "segment_reduce",
1326
    "set_default_dtype",
1327
    "set_grad_enabled",
1328
    "set_printoptions",
1329
    "unique"
1330
  ],
1331
  "torch.ao.ns.fx.graph_matcher": [
1332
    "Any",
1333
    "Dict",
1334
    "FakeQuantizeBase",
1335
    "Graph",
1336
    "GraphModule",
1337
    "List",
1338
    "NSNodeTargetType",
1339
    "NSSubgraph",
1340
    "Node",
1341
    "ObserverBase",
1342
    "Optional",
1343
    "Set",
1344
    "Tuple",
1345
    "end_node_matches_reversed_fusion",
1346
    "get_base_name_to_sets_of_related_ops",
1347
    "get_reversed_fusions",
1348
    "get_type_a_related_to_b",
1349
    "get_unmatchable_types_map",
1350
    "getattr_from_fqn"
1351
  ],
1352
  "torch.ao.ns.fx.graph_passes": [
1353
    "Any",
1354
    "Callable",
1355
    "Dict",
1356
    "Graph",
1357
    "GraphModule",
1358
    "List",
1359
    "NSNodeTargetType",
1360
    "NSSingleResultValuesType",
1361
    "NSSubgraph",
1362
    "Node",
1363
    "NodeInputOrOutputType",
1364
    "Optional",
1365
    "Set",
1366
    "Tuple",
1367
    "Union",
1368
    "get_arg_indices_of_inputs_to_log",
1369
    "get_new_attr_name_with_prefix",
1370
    "get_node_first_input_and_output_type",
1371
    "get_node_input_qparams",
1372
    "get_node_type_to_io_type_map",
1373
    "get_normalized_nth_input",
1374
    "get_number_of_non_param_args",
1375
    "get_target_type_str",
1376
    "getattr_from_fqn",
1377
    "map_arg",
1378
    "op_type_supports_shadowing",
1379
    "return_first_non_observer_node"
1380
  ],
1381
  "torch.ao.ns.fx.mappings": [
1382
    "Callable",
1383
    "Dict",
1384
    "List",
1385
    "NSNodeTargetType",
1386
    "Optional",
1387
    "Set",
1388
    "Tuple",
1389
    "get_native_backend_config"
1390
  ],
1391
  "torch.ao.ns.fx.n_shadows_utils": [
1392
    "Any",
1393
    "Callable",
1394
    "Dict",
1395
    "Graph",
1396
    "GraphModule",
1397
    "List",
1398
    "NSResultsType",
1399
    "NSSingleResultValuesType",
1400
    "Node",
1401
    "Optional",
1402
    "QConfigAny",
1403
    "QConfigMapping",
1404
    "Set",
1405
    "Tuple",
1406
    "get_normalized_nth_input",
1407
    "get_target_type_str",
1408
    "getattr_from_fqn",
1409
    "tree_map"
1410
  ],
1411
  "torch.ao.ns.fx.ns_types": [
1412
    "Any",
1413
    "Callable",
1414
    "Dict",
1415
    "List",
1416
    "NSNodeTargetType",
1417
    "NSResultsType",
1418
    "NSSingleResultType",
1419
    "NamedTuple",
1420
    "Node",
1421
    "Union"
1422
  ],
1423
  "torch.ao.ns.fx.pattern_utils": [
1424
    "Any",
1425
    "Callable",
1426
    "Dict",
1427
    "FakeQuantizeBase",
1428
    "GraphModule",
1429
    "List",
1430
    "NSFusionElType",
1431
    "NSFusionType",
1432
    "NSNodeTargetType",
1433
    "Node",
1434
    "ObserverBase",
1435
    "Set",
1436
    "Tuple",
1437
    "Union",
1438
    "get_native_backend_config",
1439
    "getattr_from_fqn"
1440
  ],
1441
  "torch.ao.ns.fx.utils": [
1442
    "Callable",
1443
    "Dict",
1444
    "FakeQuantizeBase",
1445
    "GraphModule",
1446
    "List",
1447
    "NSNodeTargetType",
1448
    "NSResultsType",
1449
    "Node",
1450
    "ObserverBase",
1451
    "Optional",
1452
    "Set",
1453
    "Tuple",
1454
    "Union",
1455
    "getattr_from_fqn"
1456
  ],
1457
  "torch.ao.ns.fx.weight_utils": [
1458
    "Callable",
1459
    "Dict",
1460
    "GraphModule",
1461
    "List",
1462
    "NSSingleResultType",
1463
    "NSSingleResultValuesType",
1464
    "Node",
1465
    "Optional",
1466
    "get_target_type_str",
1467
    "getattr_from_fqn",
1468
    "return_first_non_observer_node"
1469
  ],
1470
  "torch.ao.pruning": [
1471
    "get_dynamic_sparse_quantized_mapping",
1472
    "get_static_sparse_quantized_mapping"
1473
  ],
1474
  "torch.ao.quantization.fx.lstm_utils": [
1475
    "Any",
1476
    "BackendConfig",
1477
    "Callable",
1478
    "FakeQuantizeBase",
1479
    "Optional",
1480
    "QConfig",
1481
    "QConfigMapping",
1482
    "Tuple",
1483
    "convert_to_reference_fx",
1484
    "default_weight_fake_quant",
1485
    "default_weight_observer",
1486
    "prepare_fx"
1487
  ],
1488
  "torch.ao.quantization.fx.tracer": [
1489
    "ScopeContextManager"
1490
  ],
1491
  "torch.ao.quantization.pt2e.prepare": [
1492
    "Any",
1493
    "Argument",
1494
    "Dict",
1495
    "EdgeOrNode",
1496
    "FakeTensor",
1497
    "GraphModule",
1498
    "Node",
1499
    "ObserverOrFakeQuantize",
1500
    "PrepareCustomConfig",
1501
    "QConfigAny",
1502
    "QConfigMapping",
1503
    "QuantizationAnnotation",
1504
    "SharedQuantizationSpec",
1505
    "Tuple",
1506
    "Union"
1507
  ],
1508
  "torch.ao.quantization.pt2e.qat_utils": [
1509
    "Any",
1510
    "Callable",
1511
    "DerivedQuantizationSpec",
1512
    "Dict",
1513
    "EdgeOrNode",
1514
    "Graph",
1515
    "GraphModule",
1516
    "List",
1517
    "Node",
1518
    "QuantizationSpecBase",
1519
    "SharedQuantizationSpec",
1520
    "Tuple",
1521
    "fold_bn_weights_into_conv_node",
1522
    "get_aten_graph_module",
1523
    "replace_pattern_with_filters"
1524
  ],
1525
  "torch.ao.quantization.quantize_fx": [
1526
    "Any",
1527
    "BackendConfig",
1528
    "ConvertCustomConfig",
1529
    "Dict",
1530
    "FuseCustomConfig",
1531
    "GraphModule",
1532
    "ObservedGraphModule",
1533
    "Optional",
1534
    "PrepareCustomConfig",
1535
    "QConfigMapping",
1536
    "QuantizationTracer",
1537
    "Scope",
1538
    "ScopeContextManager",
1539
    "Tuple",
1540
    "Union",
1541
    "convert",
1542
    "fuse",
1543
    "get_custom_module_class_keys",
1544
    "get_skipped_module_name_and_classes",
1545
    "get_tensorrt_backend_config",
1546
    "prepare"
1547
  ],
1548
  "torch.ao.quantization.quantizer.utils": [
1549
    "List",
1550
    "Node",
1551
    "QuantizationAnnotation"
1552
  ],
1553
  "torch.ao.quantization.quantizer.xnnpack_quantizer": [
1554
    "OperatorConfig",
1555
    "OperatorPatternType",
1556
    "QuantizationConfig",
1557
    "propagate_annotation"
1558
  ],
1559
  "torch.ao.quantization.quantizer.xnnpack_quantizer_utils": [
1560
    "register_annotator"
1561
  ],
1562
  "torch.backends.xeon.run_cpu": [
1563
    "ArgumentParser",
1564
    "Dict",
1565
    "List",
1566
    "RawTextHelpFormatter",
1567
    "Std",
1568
    "expanduser",
1569
    "start_processes"
1570
  ],
1571
  "torch.distributed.algorithms.ddp_comm_hooks.mixed_precision_hooks": [
1572
    "Any",
1573
    "Variable",
1574
    "dataclass",
1575
    "no_type_check"
1576
  ],
1577
  "torch.distributed.algorithms.model_averaging.hierarchical_model_averager": [
1578
    "Dict",
1579
    "Iterable",
1580
    "OrderedDict",
1581
    "Union"
1582
  ],
1583
  "torch.distributed.argparse_util": [
1584
    "Action"
1585
  ],
1586
  "torch.distributed.collective_utils": [
1587
    "Any",
1588
    "Callable",
1589
    "Generic",
1590
    "List",
1591
    "Optional",
1592
    "Tuple",
1593
    "TypeVar",
1594
    "Union",
1595
    "cast",
1596
    "dataclass"
1597
  ],
1598
  "torch.distributed.elastic.rendezvous.c10d_rendezvous_backend": [
1599
    "Any",
1600
    "FileStore",
1601
    "NodeState",
1602
    "Optional",
1603
    "RendezvousBackend",
1604
    "RendezvousConnectionError",
1605
    "RendezvousError",
1606
    "RendezvousParameters",
1607
    "RendezvousStateError",
1608
    "Store",
1609
    "TCPStore",
1610
    "Token",
1611
    "Tuple",
1612
    "b64decode",
1613
    "b64encode",
1614
    "cast",
1615
    "construct_and_record_rdzv_event",
1616
    "parse_rendezvous_endpoint",
1617
    "timedelta"
1618
  ],
1619
  "torch.distributed.elastic.rendezvous.etcd_rendezvous": [
1620
    "EtcdStore",
1621
    "Optional",
1622
    "RendezvousClosedError",
1623
    "RendezvousError",
1624
    "RendezvousHandler",
1625
    "RendezvousParameters",
1626
    "RendezvousTimeoutError",
1627
    "cas_delay",
1628
    "parse_rendezvous_endpoint"
1629
  ],
1630
  "torch.distributed.elastic.rendezvous.etcd_rendezvous_backend": [
1631
    "EtcdAlreadyExist",
1632
    "EtcdClient",
1633
    "EtcdCompareFailed",
1634
    "EtcdException",
1635
    "EtcdKeyNotFound",
1636
    "EtcdResult",
1637
    "EtcdStore",
1638
    "Optional",
1639
    "RendezvousBackend",
1640
    "RendezvousConnectionError",
1641
    "RendezvousParameters",
1642
    "RendezvousStateError",
1643
    "Store",
1644
    "Token",
1645
    "Tuple",
1646
    "b64decode",
1647
    "b64encode",
1648
    "cast",
1649
    "parse_rendezvous_endpoint"
1650
  ],
1651
  "torch.distributed.elastic.rendezvous.etcd_server": [
1652
    "Optional",
1653
    "TextIO",
1654
    "Union"
1655
  ],
1656
  "torch.distributed.elastic.rendezvous.etcd_store": [
1657
    "Optional",
1658
    "Store",
1659
    "b64decode",
1660
    "b64encode"
1661
  ],
1662
  "torch.distributed.elastic.rendezvous.static_tcp_rendezvous": [
1663
    "Optional",
1664
    "PrefixStore",
1665
    "RendezvousHandler",
1666
    "RendezvousParameters",
1667
    "Store",
1668
    "TCPStore",
1669
    "Tuple",
1670
    "cast",
1671
    "parse_rendezvous_endpoint"
1672
  ],
1673
  "torch.distributed.elastic.utils.distributed": [
1674
    "closing",
1675
    "get_logger"
1676
  ],
1677
  "torch.distributed.fsdp.sharded_grad_scaler": [
1678
    "Any",
1679
    "Dict",
1680
    "GradScaler",
1681
    "Iterable",
1682
    "List",
1683
    "OptState",
1684
    "Optional",
1685
    "ProcessGroup",
1686
    "Sequence",
1687
    "Tuple",
1688
    "Union",
1689
    "defaultdict",
1690
    "overload"
1691
  ],
1692
  "torch.distributed.launch": [
1693
    "get_args_parser",
1694
    "run"
1695
  ],
1696
  "torch.distributed.pipeline.sync": [
1697
    "NoChunk",
1698
    "WithDevice"
1699
  ],
1700
  "torch.distributed.rpc.rref_proxy": [
1701
    "Future",
1702
    "partial",
1703
    "rpc_async"
1704
  ],
1705
  "torch.distributed.run": [
1706
    "ArgumentParser",
1707
    "Callable",
1708
    "LaunchConfig",
1709
    "List",
1710
    "Std",
1711
    "Tuple",
1712
    "Union",
1713
    "check_env",
1714
    "elastic_launch",
1715
    "env",
1716
    "get_logger",
1717
    "macros",
1718
    "record"
1719
  ],
1720
  "torch.fx.annotate": [
1721
    "Proxy",
1722
    "compatibility"
1723
  ],
1724
  "torch.fx.experimental.accelerator_partitioner": [
1725
    "Deque",
1726
    "Device",
1727
    "Dict",
1728
    "GraphModule",
1729
    "List",
1730
    "NamedTuple",
1731
    "Node",
1732
    "NodeLatency",
1733
    "Partition",
1734
    "PartitionMode",
1735
    "PartitionerConfig",
1736
    "Set",
1737
    "Tuple",
1738
    "deque",
1739
    "get_extra_size_of",
1740
    "get_latency_of_partitioned_graph",
1741
    "get_partition_to_latency_mapping",
1742
    "get_size_of_all_nodes",
1743
    "map_arg",
1744
    "split_module"
1745
  ],
1746
  "torch.fx.experimental.graph_gradual_typechecker": [
1747
    "BatchNorm2d",
1748
    "Callable",
1749
    "Conv2d",
1750
    "Dict",
1751
    "Equality",
1752
    "Node",
1753
    "Target",
1754
    "TensorType",
1755
    "Var",
1756
    "is_consistent",
1757
    "is_more_precise",
1758
    "reduce"
1759
  ],
1760
  "torch.fx.experimental.merge_matmul": [
1761
    "Dict",
1762
    "List",
1763
    "Node",
1764
    "Tuple",
1765
    "legalize_graph",
1766
    "symbolic_trace"
1767
  ],
1768
  "torch.fx.experimental.meta_tracer": [
1769
    "Any",
1770
    "Callable",
1771
    "Dict",
1772
    "Optional",
1773
    "Union"
1774
  ],
1775
  "torch.fx.experimental.migrate_gradual_types.constraint": [
1776
    "TensorType"
1777
  ],
1778
  "torch.fx.experimental.migrate_gradual_types.constraint_generator": [
1779
    "ApplyBroadcasting",
1780
    "BatchNorm2d",
1781
    "BinConstraintD",
1782
    "BinConstraintT",
1783
    "CalcConv",
1784
    "CalcMaxPool",
1785
    "CalcProduct",
1786
    "Callable",
1787
    "CanReshape",
1788
    "Conj",
1789
    "Conv2d",
1790
    "DGreatestUpperBound",
1791
    "DVar",
1792
    "Dict",
1793
    "Disj",
1794
    "F",
1795
    "GetItem",
1796
    "GetItemTensor",
1797
    "IndexSelect",
1798
    "Iterable",
1799
    "Node",
1800
    "T",
1801
    "TGreatestUpperBound",
1802
    "TVar",
1803
    "Target",
1804
    "TensorType",
1805
    "Transpose",
1806
    "gen_bvar",
1807
    "gen_dvar",
1808
    "gen_nat_constraints",
1809
    "gen_tensor_dims",
1810
    "gen_tvar"
1811
  ],
1812
  "torch.fx.experimental.migrate_gradual_types.constraint_transformation": [
1813
    "ApplyBroadcasting",
1814
    "BinConstraintD",
1815
    "BinConstraintT",
1816
    "CalcConv",
1817
    "CalcMaxPool",
1818
    "CalcProduct",
1819
    "Callable",
1820
    "CanReshape",
1821
    "Conj",
1822
    "Constraint",
1823
    "DGreatestUpperBound",
1824
    "DVar",
1825
    "Dict",
1826
    "Disj",
1827
    "F",
1828
    "GetItem",
1829
    "GetItemTensor",
1830
    "IndexSelect",
1831
    "List",
1832
    "Prod",
1833
    "T",
1834
    "TGreatestUpperBound",
1835
    "TVar",
1836
    "TensorType",
1837
    "Transpose",
1838
    "gen_dvar",
1839
    "gen_nat_constraints",
1840
    "gen_tensor_dims"
1841
  ],
1842
  "torch.fx.experimental.migrate_gradual_types.transform_to_z3": [
1843
    "BVar",
1844
    "BinConstraintD",
1845
    "BinConstraintT",
1846
    "Conj",
1847
    "ConstraintGenerator",
1848
    "D",
1849
    "DVar",
1850
    "Disj",
1851
    "F",
1852
    "Prod",
1853
    "T",
1854
    "TVar",
1855
    "TensorType",
1856
    "is_algebraic_expression",
1857
    "is_bool_expr",
1858
    "is_dim",
1859
    "transform_constraint"
1860
  ],
1861
  "torch.fx.experimental.migrate_gradual_types.util": [
1862
    "BVar",
1863
    "BinConstraintD",
1864
    "DVar",
1865
    "TVar"
1866
  ],
1867
  "torch.fx.experimental.normalize": [
1868
    "AnnotateTypesWithSchema",
1869
    "Any",
1870
    "Argument",
1871
    "Callable",
1872
    "Dict",
1873
    "Node",
1874
    "Optional",
1875
    "Proxy",
1876
    "Target",
1877
    "Transformer",
1878
    "Tuple",
1879
    "create_type_hint",
1880
    "map_aggregate",
1881
    "normalize_function",
1882
    "normalize_module"
1883
  ],
1884
  "torch.fx.experimental.optimization": [
1885
    "Any",
1886
    "Argument",
1887
    "Dict",
1888
    "Enum",
1889
    "Iterable",
1890
    "List",
1891
    "Optional",
1892
    "ShapeProp",
1893
    "Target",
1894
    "Tuple",
1895
    "Type",
1896
    "cast",
1897
    "defaultdict",
1898
    "fuse_conv_bn_eval"
1899
  ],
1900
  "torch.fx.experimental.partitioner_utils": [
1901
    "Dict",
1902
    "Enum",
1903
    "List",
1904
    "NamedTuple",
1905
    "Node",
1906
    "Set",
1907
    "map_arg"
1908
  ],
1909
  "torch.fx.experimental.proxy_tensor": [
1910
    "PreDispatchTorchFunctionMode",
1911
    "ProxySymDispatchMode",
1912
    "ProxyTorchDispatchMode",
1913
    "decompose",
1914
    "disable_autocast_cache",
1915
    "disable_proxy_modes_tracing",
1916
    "extract_val",
1917
    "fake_signature",
1918
    "fetch_sym_proxy",
1919
    "fetch_object_proxy",
1920
    "get_isolated_graphmodule",
1921
    "get_proxy_slot",
1922
    "get_torch_dispatch_modes",
1923
    "has_proxy_slot",
1924
    "is_sym_node",
1925
    "maybe_disable_fake_tensor_mode",
1926
    "maybe_handle_decomp",
1927
    "proxy_call",
1928
    "set_meta",
1929
    "set_original_aten_op",
1930
    "set_proxy_slot",
1931
    "snapshot_fake",
1932
    "thunkify",
1933
    "track_tensor",
1934
    "track_tensor_tree",
1935
    "wrap_key",
1936
    "wrapper_and_args_for_make_fx"
1937
  ],
1938
  "torch.fx.experimental.rewriter": [
1939
    "Any",
1940
    "Callable",
1941
    "Dict",
1942
    "FunctionType",
1943
    "Graph",
1944
    "Optional",
1945
    "Tracer",
1946
    "Union",
1947
    "cast",
1948
    "normalize_source_lines"
1949
  ],
1950
  "torch.fx.experimental.schema_type_annotation": [
1951
    "Any",
1952
    "Argument",
1953
    "Dict",
1954
    "Optional",
1955
    "Target",
1956
    "Transformer",
1957
    "Tuple"
1958
  ],
1959
  "torch.fx.experimental.sym_dispatch_mode": [
1960
    "sym_function_mode",
1961
    "set_sym_function_mode"
1962
  ],
1963
  "torch.fx.experimental.sym_node": [
1964
    "SymNode",
1965
    "method_to_operator",
1966
    "magic_methods",
1967
    "to_node",
1968
    "wrap_node",
1969
    "is_channels_last_contiguous_2d",
1970
    "is_channels_last_contiguous_3d",
1971
    "is_channels_last_strides_2d",
1972
    "is_channels_last_strides_3d",
1973
    "is_non_overlapping_and_dense_indicator",
1974
    "sympy_is_channels_last_contiguous_2d",
1975
    "sympy_is_channels_last_contiguous_3d",
1976
    "sympy_is_channels_last_strides_2d",
1977
    "sympy_is_channels_last_strides_3d",
1978
    "sympy_is_channels_last_strides_generic",
1979
    "is_contiguous",
1980
    "sympy_is_contiguous",
1981
    "sympy_is_contiguous_generic",
1982
    "sym_sqrt"
1983
  ],
1984
  "torch.fx.experimental.symbolic_shapes": [
1985
    "Constraint",
1986
    "ConstraintViolationError",
1987
    "DimConstraints",
1988
    "DimDynamic",
1989
    "DynamicDimConstraintPrinter",
1990
    "EqualityConstraint",
1991
    "GuardOnDataDependentSymNode",
1992
    "LoggingShapeGuardPrinter",
1993
    "RelaxedUnspecConstraint",
1994
    "RuntimeAssert",
1995
    "ShapeGuardPrinter",
1996
    "StrictMinMaxConstraint",
1997
    "bind_symbols",
1998
    "cast_symbool_to_symint_guardless",
1999
    "constrain_range",
2000
    "constrain_unify",
2001
    "definitely_false",
2002
    "definitely_true",
2003
    "error",
2004
    "eval_guards",
2005
    "eval_is_non_overlapping_and_dense",
2006
    "expect_true",
2007
    "find_symbol_binding_fx_nodes",
2008
    "free_unbacked_symbols",
2009
    "fx_placeholder_targets",
2010
    "fx_placeholder_vals",
2011
    "guard_bool",
2012
    "has_hint",
2013
    "is_symbolic",
2014
    "parallel_and",
2015
    "parallel_or",
2016
    "safe_expand",
2017
    "uninteresting_files"
2018
  ],
2019
  "torch.fx.experimental.unification.match": [
2020
    "first",
2021
    "freeze",
2022
    "groupby",
2023
    "isvar",
2024
    "reify",
2025
    "unify"
2026
  ],
2027
  "torch.fx.experimental.unify_refinements": [
2028
    "Refine",
2029
    "TensorType",
2030
    "Var",
2031
    "unify"
2032
  ],
2033
  "torch.fx.experimental.validator": [
2034
    "bisect"
2035
  ],
2036
  "torch.fx.passes.backends.cudagraphs": [
2037
    "CapabilityBasedPartitioner",
2038
    "FakeTensorProp",
2039
    "OperatorSupport",
2040
    "tree_map"
2041
  ],
2042
  "torch.fx.passes.dialect.common.cse_pass": [
2043
    "Any",
2044
    "Dict",
2045
    "Graph",
2046
    "GraphModule",
2047
    "Node",
2048
    "PassBase",
2049
    "PassResult",
2050
    "Tuple",
2051
    "tree_flatten"
2052
  ],
2053
  "torch.fx.passes.infra.partitioner": [
2054
    "Deque",
2055
    "Dict",
2056
    "GraphModule",
2057
    "Iterable",
2058
    "List",
2059
    "Node",
2060
    "OperatorSupportBase",
2061
    "Optional",
2062
    "Sequence",
2063
    "Set",
2064
    "copy",
2065
    "deque",
2066
    "fuse_by_partitions"
2067
  ],
2068
  "torch.fx.passes.tests.test_pass_manager": [
2069
    "PassManager",
2070
    "inplace_wrapper",
2071
    "these_before_those_pass_constraint",
2072
    "this_before_that_pass_constraint"
2073
  ],
2074
  "torch.fx.passes.utils.fuser_utils": [
2075
    "Dict",
2076
    "Graph",
2077
    "GraphModule",
2078
    "List",
2079
    "Node",
2080
    "NodeList",
2081
    "NodeSet",
2082
    "SimpleQueue",
2083
    "Tuple",
2084
    "compatibility",
2085
    "legalize_graph",
2086
    "lift_subgraph_as_module"
2087
  ],
2088
  "torch.fx.tensor_type": [
2089
    "Var",
2090
    "compatibility"
2091
  ],
2092
  "torch.jit.generate_bytecode": [
2093
    "List"
2094
  ],
2095
  "torch.jit.mobile": [
2096
    "validate_map_location"
2097
  ],
2098
  "torch.jit.quantized": [
2099
    "List",
2100
    "Optional",
2101
    "PackedSequence",
2102
    "Tensor",
2103
    "Tuple"
2104
  ],
2105
  "torch.jit.unsupported_tensor_ops": [
2106
    "Any",
2107
    "Dict",
2108
    "dedent"
2109
  ],
2110
  "torch.monitor": [
2111
    "Aggregation",
2112
    "Event",
2113
    "EventHandlerHandle",
2114
    "Stat",
2115
    "data_value_t",
2116
    "log_event",
2117
    "register_event_handler",
2118
    "unregister_event_handler"
2119
  ],
2120
  "torch.multiprocessing.pool": [
2121
    "SimpleQueue"
2122
  ],
2123
  "torch.multiprocessing.queue": [
2124
    "ForkingPickler"
2125
  ],
2126
  "torch.nn.quantized.dynamic.modules.conv": [
2127
    "Conv1d",
2128
    "Conv2d",
2129
    "Conv3d",
2130
    "ConvTranspose1d",
2131
    "ConvTranspose2d",
2132
    "ConvTranspose3d"
2133
  ],
2134
  "torch.nn.quantized.dynamic.modules.linear": [
2135
    "Linear"
2136
  ],
2137
  "torch.nn.quantized.modules.activation": [
2138
    "ELU",
2139
    "Hardswish",
2140
    "LeakyReLU",
2141
    "MultiheadAttention",
2142
    "PReLU",
2143
    "ReLU6",
2144
    "Sigmoid",
2145
    "Softmax"
2146
  ],
2147
  "torch.nn.quantized.modules.batchnorm": [
2148
    "BatchNorm2d",
2149
    "BatchNorm3d"
2150
  ],
2151
  "torch.nn.quantized.modules.conv": [
2152
    "Conv1d",
2153
    "Conv2d",
2154
    "Conv3d",
2155
    "ConvTranspose1d",
2156
    "ConvTranspose2d",
2157
    "ConvTranspose3d"
2158
  ],
2159
  "torch.nn.quantized.modules.dropout": [
2160
    "Dropout"
2161
  ],
2162
  "torch.nn.quantized.modules.embedding_ops": [
2163
    "Embedding",
2164
    "EmbeddingBag",
2165
    "EmbeddingPackedParams"
2166
  ],
2167
  "torch.nn.quantized.modules.functional_modules": [
2168
    "FXFloatFunctional",
2169
    "FloatFunctional",
2170
    "QFunctional"
2171
  ],
2172
  "torch.nn.quantized.modules.linear": [
2173
    "Linear",
2174
    "LinearPackedParams"
2175
  ],
2176
  "torch.nn.quantized.modules.normalization": [
2177
    "GroupNorm",
2178
    "InstanceNorm1d",
2179
    "InstanceNorm2d",
2180
    "InstanceNorm3d",
2181
    "LayerNorm"
2182
  ],
2183
  "torch.nn.quantized.modules.rnn": [
2184
    "LSTM"
2185
  ],
2186
  "torch.nn.quantized.modules.utils": [
2187
    "WeightedQuantizedModule"
2188
  ],
2189
  "torch.nn.utils.prune": [
2190
    "ABC",
2191
    "Iterable",
2192
    "Tuple",
2193
    "abstractmethod"
2194
  ],
2195
  "torch.onnx.verification": [
2196
    "Any",
2197
    "Callable",
2198
    "Collection",
2199
    "Dict",
2200
    "FrozenSet",
2201
    "List",
2202
    "Mapping",
2203
    "Number",
2204
    "Optional",
2205
    "Sequence",
2206
    "Set",
2207
    "Tuple",
2208
    "Union"
2209
  ],
2210
  "torch.quantization.fx": [
2211
    "convert",
2212
    "fuse",
2213
    "prepare"
2214
  ],
2215
  "torch.quantization.fx.convert": [
2216
    "convert"
2217
  ],
2218
  "torch.quantization.fx.fuse": [
2219
    "fuse"
2220
  ],
2221
  "torch.quantization.fx.fusion_patterns": [
2222
    "DefaultFuseHandler",
2223
    "FuseHandler"
2224
  ],
2225
  "torch.quantization.fx.graph_module": [
2226
    "FusedGraphModule",
2227
    "GraphModule",
2228
    "ObservedGraphModule",
2229
    "ObservedStandaloneGraphModule",
2230
    "QuantizedGraphModule"
2231
  ],
2232
  "torch.quantization.fx.match_utils": [
2233
    "MatchAllNode"
2234
  ],
2235
  "torch.quantization.fx.pattern_utils": [
2236
    "QuantizeHandler",
2237
    "get_default_fusion_patterns",
2238
    "get_default_output_activation_post_process_map",
2239
    "get_default_quant_patterns"
2240
  ],
2241
  "torch.quantization.fx.prepare": [
2242
    "prepare"
2243
  ],
2244
  "torch.quantization.fx.quantization_patterns": [
2245
    "BatchNormQuantizeHandler",
2246
    "BinaryOpQuantizeHandler",
2247
    "CatQuantizeHandler",
2248
    "ConvReluQuantizeHandler",
2249
    "CopyNodeQuantizeHandler",
2250
    "CustomModuleQuantizeHandler",
2251
    "DefaultNodeQuantizeHandler",
2252
    "EmbeddingQuantizeHandler",
2253
    "FixedQParamsOpQuantizeHandler",
2254
    "GeneralTensorShapeOpQuantizeHandler",
2255
    "LinearReLUQuantizeHandler",
2256
    "QuantizeHandler",
2257
    "RNNDynamicQuantizeHandler",
2258
    "StandaloneModuleQuantizeHandler"
2259
  ],
2260
  "torch.quantization.fx.quantization_types": [
2261
    "Pattern",
2262
    "QuantizerCls"
2263
  ],
2264
  "torch.quantization.fx.utils": [
2265
    "all_node_args_have_no_tensors",
2266
    "assert_and_get_unique_device",
2267
    "create_getattr_from_value",
2268
    "get_custom_module_class_keys",
2269
    "get_linear_prepack_op_for_dtype",
2270
    "get_new_attr_name_with_prefix",
2271
    "get_non_observable_arg_indexes_and_types",
2272
    "get_qconv_prepack_op",
2273
    "graph_module_from_producer_nodes",
2274
    "maybe_get_next_module"
2275
  ],
2276
  "torch.quantization.quantize_fx": [
2277
    "ObservedGraphModule",
2278
    "QuantizationTracer",
2279
    "Scope",
2280
    "ScopeContextManager",
2281
    "convert_fx",
2282
    "fuse_fx",
2283
    "prepare_fx",
2284
    "prepare_qat_fx"
2285
  ],
2286
  "torch.quantization.utils": [
2287
    "activation_dtype",
2288
    "activation_is_int8_quantized",
2289
    "activation_is_statically_quantized",
2290
    "calculate_qmin_qmax",
2291
    "check_min_max_valid",
2292
    "get_combined_dict",
2293
    "get_qconfig_dtypes",
2294
    "get_qparam_dict",
2295
    "get_quant_type",
2296
    "get_swapped_custom_module_class",
2297
    "getattr_from_fqn",
2298
    "is_per_channel",
2299
    "is_per_tensor",
2300
    "weight_dtype",
2301
    "weight_is_quantized",
2302
    "weight_is_statically_quantized"
2303
  ],
2304
  "torch.utils.benchmark": [
2305
    "Number",
2306
    "Optional",
2307
    "Tuple",
2308
    "Union",
2309
    "timer"
2310
  ],
2311
  "torch.utils.benchmark.examples.op_benchmark": [
2312
    "BinaryOpFuzzer",
2313
    "Timer",
2314
    "UnaryOpFuzzer"
2315
  ],
2316
  "torch.utils.benchmark.examples.spectral_ops_fuzz_test": [
2317
    "ArgumentParser",
2318
    "Iterable",
2319
    "SpectralOpFuzzer",
2320
    "namedtuple"
2321
  ],
2322
  "torch.utils.benchmark.op_fuzzers.binary": [
2323
    "FuzzedParameter",
2324
    "FuzzedTensor",
2325
    "Fuzzer",
2326
    "ParameterAlias"
2327
  ],
2328
  "torch.utils.benchmark.op_fuzzers.sparse_binary": [
2329
    "FuzzedParameter",
2330
    "FuzzedSparseTensor",
2331
    "Fuzzer",
2332
    "ParameterAlias"
2333
  ],
2334
  "torch.utils.benchmark.op_fuzzers.sparse_unary": [
2335
    "FuzzedParameter",
2336
    "FuzzedSparseTensor",
2337
    "Fuzzer",
2338
    "ParameterAlias"
2339
  ],
2340
  "torch.utils.benchmark.op_fuzzers.spectral": [
2341
    "power_range"
2342
  ],
2343
  "torch.utils.benchmark.op_fuzzers.unary": [
2344
    "FuzzedParameter",
2345
    "FuzzedTensor",
2346
    "Fuzzer",
2347
    "ParameterAlias"
2348
  ],
2349
  "torch.utils.benchmark.utils.compile": [
2350
    "bench_loop"
2351
  ],
2352
  "torch.utils.bundled_inputs": [
2353
    "Any",
2354
    "Callable",
2355
    "Dict",
2356
    "List",
2357
    "ListType",
2358
    "NamedTuple",
2359
    "Optional",
2360
    "Sequence",
2361
    "Tuple",
2362
    "TupleType",
2363
    "TypeVar",
2364
    "Union",
2365
    "wrap_cpp_module"
2366
  ],
2367
  "torch.utils.collect_env": [
2368
    "namedtuple"
2369
  ],
2370
  "torch.utils.data.datapipes.gen_pyi": [
2371
    "Any",
2372
    "Dict",
2373
    "List",
2374
    "Set",
2375
    "Tuple",
2376
    "Union",
2377
    "defaultdict"
2378
  ],
2379
  "torch.utils.data.datapipes.utils.snapshot": [
2380
    "IterDataPipe",
2381
    "apply_random_seed"
2382
  ],
2383
  "torch.utils.flop_counter": [
2384
    "addmm_flop",
2385
    "baddbmm_flop",
2386
    "bmm_flop",
2387
    "conv_backward_flop",
2388
    "conv_flop",
2389
    "conv_flop_count",
2390
    "convert_num_with_suffix",
2391
    "convert_to_percent_str",
2392
    "get_shape",
2393
    "get_suffix_str",
2394
    "mm_flop",
2395
    "normalize_tuple",
2396
    "sdpa_backward_flop",
2397
    "sdpa_backward_flop_count",
2398
    "sdpa_flop",
2399
    "sdpa_flop_count",
2400
    "shape_wrapper",
2401
    "transpose_shape"
2402
  ],
2403
  "torch.utils.jit.log_extract": [
2404
    "Any",
2405
    "List",
2406
    "Timer",
2407
    "Tuple",
2408
    "cast",
2409
    "contextmanager"
2410
  ],
2411
  "torch.utils.mobile_optimizer": [
2412
    "Enum",
2413
    "List",
2414
    "MobileOptimizerType",
2415
    "Optional",
2416
    "Set"
2417
  ],
2418
  "torch.utils.model_dump": [
2419
    "main"
2420
  ],
2421
  "torch.utils.model_zoo": [
2422
    "load_url",
2423
    "tqdm"
2424
  ],
2425
  "torch.utils.tensorboard": [
2426
    "RecordWriter"
2427
  ],
2428
  "torch.ao.quantization.experimental.APoT_tensor": [
2429
    "APoTQuantizer"
2430
  ],
2431
  "torch.ao.quantization.experimental.fake_quantize": [
2432
    "APoTObserver",
2433
    "FakeQuantizeBase",
2434
    "Tensor"
2435
  ],
2436
  "torch.ao.quantization.experimental.fake_quantize_function": [
2437
    "dequantize_APoT",
2438
    "quantize_APoT",
2439
    "Tensor"
2440
  ],
2441
  "torch.ao.quantization.experimental.linear": [
2442
    "APoTObserver",
2443
    "quantize_APoT",
2444
    "WeightedQuantizedModule"
2445
  ],
2446
  "torch.ao.quantization.experimental.observer": [
2447
    "apot_to_float",
2448
    "float_to_apot",
2449
    "ObserverBase"
2450
  ],
2451
  "torch.ao.quantization.experimental.qconfig": [
2452
    "APoTFakeQuantize",
2453
    "default_symmetric_fake_quant",
2454
    "default_weight_symmetric_fake_quant",
2455
    "FakeQuantize",
2456
    "MinMaxObserver",
2457
    "QConfig"
2458
  ],
2459
  "torch.ao.quantization.experimental.quantizer": [
2460
    "apot_to_float",
2461
    "float_to_apot",
2462
    "quant_dequant_util",
2463
    "Tensor"
2464
  ],
2465
  "torch.ao.sparsity": [
2466
    "BaseScheduler",
2467
    "BaseSparsifier",
2468
    "CubicSL",
2469
    "FakeSparsity",
2470
    "fqn_to_module",
2471
    "get_arg_info_from_tensor_fqn",
2472
    "get_dynamic_sparse_quantized_mapping",
2473
    "get_static_sparse_quantized_mapping",
2474
    "LambdaSL",
2475
    "module_to_fqn",
2476
    "NearlyDiagonalSparsifier",
2477
    "WeightNormSparsifier"
2478
  ],
2479
  "torch.ao.sparsity.scheduler.base_scheduler": [
2480
    "BaseScheduler"
2481
  ],
2482
  "torch.ao.sparsity.scheduler.cubic_scheduler": [
2483
    "CubicSL"
2484
  ],
2485
  "torch.ao.sparsity.scheduler.lambda_scheduler": [
2486
    "LambdaSL"
2487
  ],
2488
  "torch.ao.sparsity.sparsifier.base_sparsifier": [
2489
    "BaseSparsifier"
2490
  ],
2491
  "torch.ao.sparsity.sparsifier.nearly_diagonal_sparsifier": [
2492
    "NearlyDiagonalSparsifier"
2493
  ],
2494
  "torch.ao.sparsity.sparsifier.utils": [
2495
    "FakeSparsity",
2496
    "fqn_to_module",
2497
    "get_arg_info_from_tensor_fqn",
2498
    "module_to_fqn"
2499
  ],
2500
  "torch.ao.sparsity.sparsifier.weight_norm_sparsifier": [
2501
    "WeightNormSparsifier"
2502
  ],
2503
  "torch.csrc.jit.tensorexpr.codegen_external": [
2504
    "FileManager",
2505
    "parse_native_yaml"
2506
  ],
2507
  "torch.distributed.checkpoint.examples.async_checkpointing_example": [
2508
    "FSDP",
2509
    "init_device_mesh"
2510
  ],
2511
  "torch.distributed.checkpoint.examples.fsdp_checkpoint_example": [
2512
    "FSDP",
2513
    "load_sharded_optimizer_state_dict",
2514
    "StateDictType"
2515
  ],
2516
  "torch.distributed.checkpoint.examples.stateful_example": [
2517
    "FSDP",
2518
    "init_device_mesh"
2519
  ],
2520
  "torch.distributed.elastic.events.fb.scuba": [
2521
    "await_sync",
2522
    "cast",
2523
    "Dict",
2524
    "Enum",
2525
    "Event",
2526
    "EventMetadataValue",
2527
    "List",
2528
    "Optional",
2529
    "RdzvEvent",
2530
    "RuntimeEnvironment",
2531
    "TorchelasticRdzvLogEntry",
2532
    "TorchelasticStatusLogEntry",
2533
    "WhenceScribeLogged"
2534
  ],
2535
  "torch.distributed.elastic.metrics.fb.service_data_metrics": [
2536
    "MetricHandler",
2537
    "ServiceDataMetrics"
2538
  ],
2539
  "torch.distributed.elastic.metrics.static_init": [
2540
    "configure",
2541
    "get_logger",
2542
    "MetricsConfig",
2543
    "Optional",
2544
    "ServiceDataMetricsHandler",
2545
    "TorchElasticService"
2546
  ],
2547
  "torch.distributed.elastic.multiprocessing.errors.fb.error_handler_fb": [
2548
    "Any",
2549
    "Dict",
2550
    "ErrorHandler",
2551
    "format_exception",
2552
    "generate_python_trace",
2553
    "MastReplyFileErrorCode",
2554
    "Optional",
2555
    "RuntimeEnvironment",
2556
    "RuntimeEnvironmentScheduler",
2557
    "write_formatted_message"
2558
  ],
2559
  "torch.distributed.elastic.multiprocessing.errors.handlers": [
2560
    "ErrorHandlerFB"
2561
  ],
2562
  "torch.distributed.elastic.rendezvous.fb.mast_rendezvous": [
2563
    "create_c10d_store",
2564
    "DistNetworkError",
2565
    "DistStoreError",
2566
    "get_logger",
2567
    "List",
2568
    "Optional",
2569
    "RendezvousClosedError",
2570
    "RendezvousHandler",
2571
    "RendezvousParameters",
2572
    "RendezvousTimeoutError",
2573
    "Tuple"
2574
  ],
2575
  "torch.distributed.elastic.rendezvous.fb.zeus": [
2576
    "gethostname",
2577
    "get_logger",
2578
    "namedtuple",
2579
    "Optional",
2580
    "RendezvousClosedError",
2581
    "RendezvousHandler",
2582
    "RendezvousParameters",
2583
    "RendezvousTimeoutError"
2584
  ],
2585
  "torch.distributed.elastic.rendezvous.registry": [
2586
    "create_handler",
2587
    "RendezvousHandler",
2588
    "RendezvousParameters"
2589
  ],
2590
  "torch.distributed.logging_handlers": [
2591
    "C10D_CATEGORY",
2592
    "Dict",
2593
    "LogCategory",
2594
    "Optional",
2595
    "Sample",
2596
    "ScubaData",
2597
    "signpost",
2598
    "SignpostType"
2599
  ],
2600
  "torch.utils.benchmark.examples.sparse.op_benchmark": [
2601
    "BinaryOpSparseFuzzer",
2602
    "Timer",
2603
    "UnaryOpSparseFuzzer"
2604
  ],
2605
  "torch.version": [
2606
    "get_file_path"
2607
  ]
2608
}
2609

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.