pytorch

Форк
0
/
_tensor_docs.py 
6938 строк · 139.3 Кб
1
# mypy: allow-untyped-defs
2
"""Adds docstrings to Tensor functions"""
3

4
import torch._C
5
from torch._C import _add_docstr as add_docstr
6
from torch._torch_docs import parse_kwargs, reproducibility_notes
7

8

9
def add_docstr_all(method, docstr):
10
    add_docstr(getattr(torch._C.TensorBase, method), docstr)
11

12

13
common_args = parse_kwargs(
14
    """
15
    memory_format (:class:`torch.memory_format`, optional): the desired memory format of
16
        returned Tensor. Default: ``torch.preserve_format``.
17
"""
18
)
19

20
new_common_args = parse_kwargs(
21
    """
22
    size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
23
        shape of the output tensor.
24
    dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
25
        Default: if None, same :class:`torch.dtype` as this tensor.
26
    device (:class:`torch.device`, optional): the desired device of returned tensor.
27
        Default: if None, same :class:`torch.device` as this tensor.
28
    requires_grad (bool, optional): If autograd should record operations on the
29
        returned tensor. Default: ``False``.
30
    pin_memory (bool, optional): If set, returned tensor would be allocated in
31
        the pinned memory. Works only for CPU tensors. Default: ``False``.
32
    layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
33
        Default: ``torch.strided``.
34
"""
35
)
36

37
add_docstr_all(
38
    "new_tensor",
39
    """
40
new_tensor(data, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
41
pin_memory=False) -> Tensor
42
"""
43
    + r"""
44

45
Returns a new Tensor with :attr:`data` as the tensor data.
46
By default, the returned Tensor has the same :class:`torch.dtype` and
47
:class:`torch.device` as this tensor.
48

49
.. warning::
50

51
    :func:`new_tensor` always copies :attr:`data`. If you have a Tensor
52
    ``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_`
53
    or :func:`torch.Tensor.detach`.
54
    If you have a numpy array and want to avoid a copy, use
55
    :func:`torch.from_numpy`.
56

57
.. warning::
58

59
    When data is a tensor `x`, :func:`new_tensor()` reads out 'the data' from whatever it is passed,
60
    and constructs a leaf variable. Therefore ``tensor.new_tensor(x)`` is equivalent to ``x.clone().detach()``
61
    and ``tensor.new_tensor(x, requires_grad=True)`` is equivalent to ``x.clone().detach().requires_grad_(True)``.
62
    The equivalents using ``clone()`` and ``detach()`` are recommended.
63

64
Args:
65
    data (array_like): The returned Tensor copies :attr:`data`.
66

67
Keyword args:
68
    {dtype}
69
    {device}
70
    {requires_grad}
71
    {layout}
72
    {pin_memory}
73

74
Example::
75

76
    >>> tensor = torch.ones((2,), dtype=torch.int8)
77
    >>> data = [[0, 1], [2, 3]]
78
    >>> tensor.new_tensor(data)
79
    tensor([[ 0,  1],
80
            [ 2,  3]], dtype=torch.int8)
81

82
""".format(**new_common_args),
83
)
84

85
add_docstr_all(
86
    "new_full",
87
    """
88
new_full(size, fill_value, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
89
pin_memory=False) -> Tensor
90
"""
91
    + r"""
92

93
Returns a Tensor of size :attr:`size` filled with :attr:`fill_value`.
94
By default, the returned Tensor has the same :class:`torch.dtype` and
95
:class:`torch.device` as this tensor.
96

97
Args:
98
    fill_value (scalar): the number to fill the output tensor with.
99

100
Keyword args:
101
    {dtype}
102
    {device}
103
    {requires_grad}
104
    {layout}
105
    {pin_memory}
106

107
Example::
108

109
    >>> tensor = torch.ones((2,), dtype=torch.float64)
110
    >>> tensor.new_full((3, 4), 3.141592)
111
    tensor([[ 3.1416,  3.1416,  3.1416,  3.1416],
112
            [ 3.1416,  3.1416,  3.1416,  3.1416],
113
            [ 3.1416,  3.1416,  3.1416,  3.1416]], dtype=torch.float64)
114

115
""".format(**new_common_args),
116
)
117

118
add_docstr_all(
119
    "new_empty",
120
    """
121
new_empty(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
122
pin_memory=False) -> Tensor
123
"""
124
    + r"""
125

126
Returns a Tensor of size :attr:`size` filled with uninitialized data.
127
By default, the returned Tensor has the same :class:`torch.dtype` and
128
:class:`torch.device` as this tensor.
129

130
Args:
131
    size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
132
        shape of the output tensor.
133

134
Keyword args:
135
    {dtype}
136
    {device}
137
    {requires_grad}
138
    {layout}
139
    {pin_memory}
140

141
Example::
142

143
    >>> tensor = torch.ones(())
144
    >>> tensor.new_empty((2, 3))
145
    tensor([[ 5.8182e-18,  4.5765e-41, -1.0545e+30],
146
            [ 3.0949e-41,  4.4842e-44,  0.0000e+00]])
147

148
""".format(**new_common_args),
149
)
150

151
add_docstr_all(
152
    "new_empty_strided",
153
    """
154
new_empty_strided(size, stride, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
155
pin_memory=False) -> Tensor
156
"""
157
    + r"""
158

159
Returns a Tensor of size :attr:`size` and strides :attr:`stride` filled with
160
uninitialized data. By default, the returned Tensor has the same
161
:class:`torch.dtype` and :class:`torch.device` as this tensor.
162

163
Args:
164
    size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
165
        shape of the output tensor.
166

167
Keyword args:
168
    {dtype}
169
    {device}
170
    {requires_grad}
171
    {layout}
172
    {pin_memory}
173

174
Example::
175

176
    >>> tensor = torch.ones(())
177
    >>> tensor.new_empty_strided((2, 3), (3, 1))
178
    tensor([[ 5.8182e-18,  4.5765e-41, -1.0545e+30],
179
            [ 3.0949e-41,  4.4842e-44,  0.0000e+00]])
180

181
""".format(**new_common_args),
182
)
183

184
add_docstr_all(
185
    "new_ones",
186
    """
187
new_ones(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
188
pin_memory=False) -> Tensor
189
"""
190
    + r"""
191

192
Returns a Tensor of size :attr:`size` filled with ``1``.
193
By default, the returned Tensor has the same :class:`torch.dtype` and
194
:class:`torch.device` as this tensor.
195

196
Args:
197
    size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
198
        shape of the output tensor.
199

200
Keyword args:
201
    {dtype}
202
    {device}
203
    {requires_grad}
204
    {layout}
205
    {pin_memory}
206

207
Example::
208

209
    >>> tensor = torch.tensor((), dtype=torch.int32)
210
    >>> tensor.new_ones((2, 3))
211
    tensor([[ 1,  1,  1],
212
            [ 1,  1,  1]], dtype=torch.int32)
213

214
""".format(**new_common_args),
215
)
216

217
add_docstr_all(
218
    "new_zeros",
219
    """
220
new_zeros(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
221
pin_memory=False) -> Tensor
222
"""
223
    + r"""
224

225
Returns a Tensor of size :attr:`size` filled with ``0``.
226
By default, the returned Tensor has the same :class:`torch.dtype` and
227
:class:`torch.device` as this tensor.
228

229
Args:
230
    size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
231
        shape of the output tensor.
232

233
Keyword args:
234
    {dtype}
235
    {device}
236
    {requires_grad}
237
    {layout}
238
    {pin_memory}
239

240
Example::
241

242
    >>> tensor = torch.tensor((), dtype=torch.float64)
243
    >>> tensor.new_zeros((2, 3))
244
    tensor([[ 0.,  0.,  0.],
245
            [ 0.,  0.,  0.]], dtype=torch.float64)
246

247
""".format(**new_common_args),
248
)
249

250
add_docstr_all(
251
    "abs",
252
    r"""
253
abs() -> Tensor
254

255
See :func:`torch.abs`
256
""",
257
)
258

259
add_docstr_all(
260
    "abs_",
261
    r"""
262
abs_() -> Tensor
263

264
In-place version of :meth:`~Tensor.abs`
265
""",
266
)
267

268
add_docstr_all(
269
    "absolute",
270
    r"""
271
absolute() -> Tensor
272

273
Alias for :func:`abs`
274
""",
275
)
276

277
add_docstr_all(
278
    "absolute_",
279
    r"""
280
absolute_() -> Tensor
281

282
In-place version of :meth:`~Tensor.absolute`
283
Alias for :func:`abs_`
284
""",
285
)
286

287
add_docstr_all(
288
    "acos",
289
    r"""
290
acos() -> Tensor
291

292
See :func:`torch.acos`
293
""",
294
)
295

296
add_docstr_all(
297
    "acos_",
298
    r"""
299
acos_() -> Tensor
300

301
In-place version of :meth:`~Tensor.acos`
302
""",
303
)
304

305
add_docstr_all(
306
    "arccos",
307
    r"""
308
arccos() -> Tensor
309

310
See :func:`torch.arccos`
311
""",
312
)
313

314
add_docstr_all(
315
    "arccos_",
316
    r"""
317
arccos_() -> Tensor
318

319
In-place version of :meth:`~Tensor.arccos`
320
""",
321
)
322

323
add_docstr_all(
324
    "acosh",
325
    r"""
326
acosh() -> Tensor
327

328
See :func:`torch.acosh`
329
""",
330
)
331

332
add_docstr_all(
333
    "acosh_",
334
    r"""
335
acosh_() -> Tensor
336

337
In-place version of :meth:`~Tensor.acosh`
338
""",
339
)
340

341
add_docstr_all(
342
    "arccosh",
343
    r"""
344
acosh() -> Tensor
345

346
See :func:`torch.arccosh`
347
""",
348
)
349

350
add_docstr_all(
351
    "arccosh_",
352
    r"""
353
acosh_() -> Tensor
354

355
In-place version of :meth:`~Tensor.arccosh`
356
""",
357
)
358

359
add_docstr_all(
360
    "add",
361
    r"""
362
add(other, *, alpha=1) -> Tensor
363

364
Add a scalar or tensor to :attr:`self` tensor. If both :attr:`alpha`
365
and :attr:`other` are specified, each element of :attr:`other` is scaled by
366
:attr:`alpha` before being used.
367

368
When :attr:`other` is a tensor, the shape of :attr:`other` must be
369
:ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
370
tensor
371

372
See :func:`torch.add`
373
""",
374
)
375

376
add_docstr_all(
377
    "add_",
378
    r"""
379
add_(other, *, alpha=1) -> Tensor
380

381
In-place version of :meth:`~Tensor.add`
382
""",
383
)
384

385
add_docstr_all(
386
    "addbmm",
387
    r"""
388
addbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
389

390
See :func:`torch.addbmm`
391
""",
392
)
393

394
add_docstr_all(
395
    "addbmm_",
396
    r"""
397
addbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
398

399
In-place version of :meth:`~Tensor.addbmm`
400
""",
401
)
402

403
add_docstr_all(
404
    "addcdiv",
405
    r"""
406
addcdiv(tensor1, tensor2, *, value=1) -> Tensor
407

408
See :func:`torch.addcdiv`
409
""",
410
)
411

412
add_docstr_all(
413
    "addcdiv_",
414
    r"""
415
addcdiv_(tensor1, tensor2, *, value=1) -> Tensor
416

417
In-place version of :meth:`~Tensor.addcdiv`
418
""",
419
)
420

421
add_docstr_all(
422
    "addcmul",
423
    r"""
424
addcmul(tensor1, tensor2, *, value=1) -> Tensor
425

426
See :func:`torch.addcmul`
427
""",
428
)
429

430
add_docstr_all(
431
    "addcmul_",
432
    r"""
433
addcmul_(tensor1, tensor2, *, value=1) -> Tensor
434

435
In-place version of :meth:`~Tensor.addcmul`
436
""",
437
)
438

439
add_docstr_all(
440
    "addmm",
441
    r"""
442
addmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
443

444
See :func:`torch.addmm`
445
""",
446
)
447

448
add_docstr_all(
449
    "addmm_",
450
    r"""
451
addmm_(mat1, mat2, *, beta=1, alpha=1) -> Tensor
452

453
In-place version of :meth:`~Tensor.addmm`
454
""",
455
)
456

457
add_docstr_all(
458
    "addmv",
459
    r"""
460
addmv(mat, vec, *, beta=1, alpha=1) -> Tensor
461

462
See :func:`torch.addmv`
463
""",
464
)
465

466
add_docstr_all(
467
    "addmv_",
468
    r"""
469
addmv_(mat, vec, *, beta=1, alpha=1) -> Tensor
470

471
In-place version of :meth:`~Tensor.addmv`
472
""",
473
)
474

475
add_docstr_all(
476
    "sspaddmm",
477
    r"""
478
sspaddmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
479

480
See :func:`torch.sspaddmm`
481
""",
482
)
483

484
add_docstr_all(
485
    "smm",
486
    r"""
487
smm(mat) -> Tensor
488

489
See :func:`torch.smm`
490
""",
491
)
492

493
add_docstr_all(
494
    "addr",
495
    r"""
496
addr(vec1, vec2, *, beta=1, alpha=1) -> Tensor
497

498
See :func:`torch.addr`
499
""",
500
)
501

502
add_docstr_all(
503
    "addr_",
504
    r"""
505
addr_(vec1, vec2, *, beta=1, alpha=1) -> Tensor
506

507
In-place version of :meth:`~Tensor.addr`
508
""",
509
)
510

511
add_docstr_all(
512
    "align_as",
513
    r"""
514
align_as(other) -> Tensor
515

516
Permutes the dimensions of the :attr:`self` tensor to match the dimension order
517
in the :attr:`other` tensor, adding size-one dims for any new names.
518

519
This operation is useful for explicit broadcasting by names (see examples).
520

521
All of the dims of :attr:`self` must be named in order to use this method.
522
The resulting tensor is a view on the original tensor.
523

524
All dimension names of :attr:`self` must be present in ``other.names``.
525
:attr:`other` may contain named dimensions that are not in ``self.names``;
526
the output tensor has a size-one dimension for each of those new names.
527

528
To align a tensor to a specific order, use :meth:`~Tensor.align_to`.
529

530
Examples::
531

532
    # Example 1: Applying a mask
533
    >>> mask = torch.randint(2, [127, 128], dtype=torch.bool).refine_names('W', 'H')
534
    >>> imgs = torch.randn(32, 128, 127, 3, names=('N', 'H', 'W', 'C'))
535
    >>> imgs.masked_fill_(mask.align_as(imgs), 0)
536

537

538
    # Example 2: Applying a per-channel-scale
539
    >>> def scale_channels(input, scale):
540
    >>>    scale = scale.refine_names('C')
541
    >>>    return input * scale.align_as(input)
542

543
    >>> num_channels = 3
544
    >>> scale = torch.randn(num_channels, names=('C',))
545
    >>> imgs = torch.rand(32, 128, 128, num_channels, names=('N', 'H', 'W', 'C'))
546
    >>> more_imgs = torch.rand(32, num_channels, 128, 128, names=('N', 'C', 'H', 'W'))
547
    >>> videos = torch.randn(3, num_channels, 128, 128, 128, names=('N', 'C', 'H', 'W', 'D'))
548

549
    # scale_channels is agnostic to the dimension order of the input
550
    >>> scale_channels(imgs, scale)
551
    >>> scale_channels(more_imgs, scale)
552
    >>> scale_channels(videos, scale)
553

554
.. warning::
555
    The named tensor API is experimental and subject to change.
556

557
""",
558
)
559

560
add_docstr_all(
561
    "all",
562
    r"""
563
all(dim=None, keepdim=False) -> Tensor
564

565
See :func:`torch.all`
566
""",
567
)
568

569
add_docstr_all(
570
    "allclose",
571
    r"""
572
allclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
573

574
See :func:`torch.allclose`
575
""",
576
)
577

578
add_docstr_all(
579
    "angle",
580
    r"""
581
angle() -> Tensor
582

583
See :func:`torch.angle`
584
""",
585
)
586

587
add_docstr_all(
588
    "any",
589
    r"""
590
any(dim=None, keepdim=False) -> Tensor
591

592
See :func:`torch.any`
593
""",
594
)
595

596
add_docstr_all(
597
    "apply_",
598
    r"""
599
apply_(callable) -> Tensor
600

601
Applies the function :attr:`callable` to each element in the tensor, replacing
602
each element with the value returned by :attr:`callable`.
603

604
.. note::
605

606
    This function only works with CPU tensors and should not be used in code
607
    sections that require high performance.
608
""",
609
)
610

611
add_docstr_all(
612
    "asin",
613
    r"""
614
asin() -> Tensor
615

616
See :func:`torch.asin`
617
""",
618
)
619

620
add_docstr_all(
621
    "asin_",
622
    r"""
623
asin_() -> Tensor
624

625
In-place version of :meth:`~Tensor.asin`
626
""",
627
)
628

629
add_docstr_all(
630
    "arcsin",
631
    r"""
632
arcsin() -> Tensor
633

634
See :func:`torch.arcsin`
635
""",
636
)
637

638
add_docstr_all(
639
    "arcsin_",
640
    r"""
641
arcsin_() -> Tensor
642

643
In-place version of :meth:`~Tensor.arcsin`
644
""",
645
)
646

647
add_docstr_all(
648
    "asinh",
649
    r"""
650
asinh() -> Tensor
651

652
See :func:`torch.asinh`
653
""",
654
)
655

656
add_docstr_all(
657
    "asinh_",
658
    r"""
659
asinh_() -> Tensor
660

661
In-place version of :meth:`~Tensor.asinh`
662
""",
663
)
664

665
add_docstr_all(
666
    "arcsinh",
667
    r"""
668
arcsinh() -> Tensor
669

670
See :func:`torch.arcsinh`
671
""",
672
)
673

674
add_docstr_all(
675
    "arcsinh_",
676
    r"""
677
arcsinh_() -> Tensor
678

679
In-place version of :meth:`~Tensor.arcsinh`
680
""",
681
)
682

683
add_docstr_all(
684
    "as_strided",
685
    r"""
686
as_strided(size, stride, storage_offset=None) -> Tensor
687

688
See :func:`torch.as_strided`
689
""",
690
)
691

692
add_docstr_all(
693
    "as_strided_",
694
    r"""
695
as_strided_(size, stride, storage_offset=None) -> Tensor
696

697
In-place version of :meth:`~Tensor.as_strided`
698
""",
699
)
700

701
add_docstr_all(
702
    "atan",
703
    r"""
704
atan() -> Tensor
705

706
See :func:`torch.atan`
707
""",
708
)
709

710
add_docstr_all(
711
    "atan_",
712
    r"""
713
atan_() -> Tensor
714

715
In-place version of :meth:`~Tensor.atan`
716
""",
717
)
718

719
add_docstr_all(
720
    "arctan",
721
    r"""
722
arctan() -> Tensor
723

724
See :func:`torch.arctan`
725
""",
726
)
727

728
add_docstr_all(
729
    "arctan_",
730
    r"""
731
arctan_() -> Tensor
732

733
In-place version of :meth:`~Tensor.arctan`
734
""",
735
)
736

737
add_docstr_all(
738
    "atan2",
739
    r"""
740
atan2(other) -> Tensor
741

742
See :func:`torch.atan2`
743
""",
744
)
745

746
add_docstr_all(
747
    "atan2_",
748
    r"""
749
atan2_(other) -> Tensor
750

751
In-place version of :meth:`~Tensor.atan2`
752
""",
753
)
754

755
add_docstr_all(
756
    "arctan2",
757
    r"""
758
arctan2(other) -> Tensor
759

760
See :func:`torch.arctan2`
761
""",
762
)
763

764
add_docstr_all(
765
    "arctan2_",
766
    r"""
767
atan2_(other) -> Tensor
768

769
In-place version of :meth:`~Tensor.arctan2`
770
""",
771
)
772

773
add_docstr_all(
774
    "atanh",
775
    r"""
776
atanh() -> Tensor
777

778
See :func:`torch.atanh`
779
""",
780
)
781

782
add_docstr_all(
783
    "atanh_",
784
    r"""
785
atanh_(other) -> Tensor
786

787
In-place version of :meth:`~Tensor.atanh`
788
""",
789
)
790

791
add_docstr_all(
792
    "arctanh",
793
    r"""
794
arctanh() -> Tensor
795

796
See :func:`torch.arctanh`
797
""",
798
)
799

800
add_docstr_all(
801
    "arctanh_",
802
    r"""
803
arctanh_(other) -> Tensor
804

805
In-place version of :meth:`~Tensor.arctanh`
806
""",
807
)
808

809
add_docstr_all(
810
    "baddbmm",
811
    r"""
812
baddbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
813

814
See :func:`torch.baddbmm`
815
""",
816
)
817

818
add_docstr_all(
819
    "baddbmm_",
820
    r"""
821
baddbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
822

823
In-place version of :meth:`~Tensor.baddbmm`
824
""",
825
)
826

827
add_docstr_all(
828
    "bernoulli",
829
    r"""
830
bernoulli(*, generator=None) -> Tensor
831

832
Returns a result tensor where each :math:`\texttt{result[i]}` is independently
833
sampled from :math:`\text{Bernoulli}(\texttt{self[i]})`. :attr:`self` must have
834
floating point ``dtype``, and the result will have the same ``dtype``.
835

836
See :func:`torch.bernoulli`
837
""",
838
)
839

840
add_docstr_all(
841
    "bernoulli_",
842
    r"""
843
bernoulli_(p=0.5, *, generator=None) -> Tensor
844

845
Fills each location of :attr:`self` with an independent sample from
846
:math:`\text{Bernoulli}(\texttt{p})`. :attr:`self` can have integral
847
``dtype``.
848

849
:attr:`p` should either be a scalar or tensor containing probabilities to be
850
used for drawing the binary random number.
851

852
If it is a tensor, the :math:`\text{i}^{th}` element of :attr:`self` tensor
853
will be set to a value sampled from
854
:math:`\text{Bernoulli}(\texttt{p\_tensor[i]})`. In this case `p` must have
855
floating point ``dtype``.
856

857
See also :meth:`~Tensor.bernoulli` and :func:`torch.bernoulli`
858
""",
859
)
860

861
add_docstr_all(
862
    "bincount",
863
    r"""
864
bincount(weights=None, minlength=0) -> Tensor
865

866
See :func:`torch.bincount`
867
""",
868
)
869

870
add_docstr_all(
871
    "bitwise_not",
872
    r"""
873
bitwise_not() -> Tensor
874

875
See :func:`torch.bitwise_not`
876
""",
877
)
878

879
add_docstr_all(
880
    "bitwise_not_",
881
    r"""
882
bitwise_not_() -> Tensor
883

884
In-place version of :meth:`~Tensor.bitwise_not`
885
""",
886
)
887

888
add_docstr_all(
889
    "bitwise_and",
890
    r"""
891
bitwise_and() -> Tensor
892

893
See :func:`torch.bitwise_and`
894
""",
895
)
896

897
add_docstr_all(
898
    "bitwise_and_",
899
    r"""
900
bitwise_and_() -> Tensor
901

902
In-place version of :meth:`~Tensor.bitwise_and`
903
""",
904
)
905

906
add_docstr_all(
907
    "bitwise_or",
908
    r"""
909
bitwise_or() -> Tensor
910

911
See :func:`torch.bitwise_or`
912
""",
913
)
914

915
add_docstr_all(
916
    "bitwise_or_",
917
    r"""
918
bitwise_or_() -> Tensor
919

920
In-place version of :meth:`~Tensor.bitwise_or`
921
""",
922
)
923

924
add_docstr_all(
925
    "bitwise_xor",
926
    r"""
927
bitwise_xor() -> Tensor
928

929
See :func:`torch.bitwise_xor`
930
""",
931
)
932

933
add_docstr_all(
934
    "bitwise_xor_",
935
    r"""
936
bitwise_xor_() -> Tensor
937

938
In-place version of :meth:`~Tensor.bitwise_xor`
939
""",
940
)
941

942
add_docstr_all(
943
    "bitwise_left_shift",
944
    r"""
945
bitwise_left_shift(other) -> Tensor
946

947
See :func:`torch.bitwise_left_shift`
948
""",
949
)
950

951
add_docstr_all(
952
    "bitwise_left_shift_",
953
    r"""
954
bitwise_left_shift_(other) -> Tensor
955

956
In-place version of :meth:`~Tensor.bitwise_left_shift`
957
""",
958
)
959

960
add_docstr_all(
961
    "bitwise_right_shift",
962
    r"""
963
bitwise_right_shift(other) -> Tensor
964

965
See :func:`torch.bitwise_right_shift`
966
""",
967
)
968

969
add_docstr_all(
970
    "bitwise_right_shift_",
971
    r"""
972
bitwise_right_shift_(other) -> Tensor
973

974
In-place version of :meth:`~Tensor.bitwise_right_shift`
975
""",
976
)
977

978
add_docstr_all(
979
    "broadcast_to",
980
    r"""
981
broadcast_to(shape) -> Tensor
982

983
See :func:`torch.broadcast_to`.
984
""",
985
)
986

987
add_docstr_all(
988
    "logical_and",
989
    r"""
990
logical_and() -> Tensor
991

992
See :func:`torch.logical_and`
993
""",
994
)
995

996
add_docstr_all(
997
    "logical_and_",
998
    r"""
999
logical_and_() -> Tensor
1000

1001
In-place version of :meth:`~Tensor.logical_and`
1002
""",
1003
)
1004

1005
add_docstr_all(
1006
    "logical_not",
1007
    r"""
1008
logical_not() -> Tensor
1009

1010
See :func:`torch.logical_not`
1011
""",
1012
)
1013

1014
add_docstr_all(
1015
    "logical_not_",
1016
    r"""
1017
logical_not_() -> Tensor
1018

1019
In-place version of :meth:`~Tensor.logical_not`
1020
""",
1021
)
1022

1023
add_docstr_all(
1024
    "logical_or",
1025
    r"""
1026
logical_or() -> Tensor
1027

1028
See :func:`torch.logical_or`
1029
""",
1030
)
1031

1032
add_docstr_all(
1033
    "logical_or_",
1034
    r"""
1035
logical_or_() -> Tensor
1036

1037
In-place version of :meth:`~Tensor.logical_or`
1038
""",
1039
)
1040

1041
add_docstr_all(
1042
    "logical_xor",
1043
    r"""
1044
logical_xor() -> Tensor
1045

1046
See :func:`torch.logical_xor`
1047
""",
1048
)
1049

1050
add_docstr_all(
1051
    "logical_xor_",
1052
    r"""
1053
logical_xor_() -> Tensor
1054

1055
In-place version of :meth:`~Tensor.logical_xor`
1056
""",
1057
)
1058

1059
add_docstr_all(
1060
    "bmm",
1061
    r"""
1062
bmm(batch2) -> Tensor
1063

1064
See :func:`torch.bmm`
1065
""",
1066
)
1067

1068
add_docstr_all(
1069
    "cauchy_",
1070
    r"""
1071
cauchy_(median=0, sigma=1, *, generator=None) -> Tensor
1072

1073
Fills the tensor with numbers drawn from the Cauchy distribution:
1074

1075
.. math::
1076

1077
    f(x) = \dfrac{1}{\pi} \dfrac{\sigma}{(x - \text{median})^2 + \sigma^2}
1078

1079
.. note::
1080
  Sigma (:math:`\sigma`) is used to denote the scale parameter in Cauchy distribution.
1081
""",
1082
)
1083

1084
add_docstr_all(
1085
    "ceil",
1086
    r"""
1087
ceil() -> Tensor
1088

1089
See :func:`torch.ceil`
1090
""",
1091
)
1092

1093
add_docstr_all(
1094
    "ceil_",
1095
    r"""
1096
ceil_() -> Tensor
1097

1098
In-place version of :meth:`~Tensor.ceil`
1099
""",
1100
)
1101

1102
add_docstr_all(
1103
    "cholesky",
1104
    r"""
1105
cholesky(upper=False) -> Tensor
1106

1107
See :func:`torch.cholesky`
1108
""",
1109
)
1110

1111
add_docstr_all(
1112
    "cholesky_solve",
1113
    r"""
1114
cholesky_solve(input2, upper=False) -> Tensor
1115

1116
See :func:`torch.cholesky_solve`
1117
""",
1118
)
1119

1120
add_docstr_all(
1121
    "cholesky_inverse",
1122
    r"""
1123
cholesky_inverse(upper=False) -> Tensor
1124

1125
See :func:`torch.cholesky_inverse`
1126
""",
1127
)
1128

1129
add_docstr_all(
1130
    "clamp",
1131
    r"""
1132
clamp(min=None, max=None) -> Tensor
1133

1134
See :func:`torch.clamp`
1135
""",
1136
)
1137

1138
add_docstr_all(
1139
    "clamp_",
1140
    r"""
1141
clamp_(min=None, max=None) -> Tensor
1142

1143
In-place version of :meth:`~Tensor.clamp`
1144
""",
1145
)
1146

1147
add_docstr_all(
1148
    "clip",
1149
    r"""
1150
clip(min=None, max=None) -> Tensor
1151

1152
Alias for :meth:`~Tensor.clamp`.
1153
""",
1154
)
1155

1156
add_docstr_all(
1157
    "clip_",
1158
    r"""
1159
clip_(min=None, max=None) -> Tensor
1160

1161
Alias for :meth:`~Tensor.clamp_`.
1162
""",
1163
)
1164

1165
add_docstr_all(
1166
    "clone",
1167
    r"""
1168
clone(*, memory_format=torch.preserve_format) -> Tensor
1169

1170
See :func:`torch.clone`
1171
""".format(**common_args),
1172
)
1173

1174
add_docstr_all(
1175
    "coalesce",
1176
    r"""
1177
coalesce() -> Tensor
1178

1179
Returns a coalesced copy of :attr:`self` if :attr:`self` is an
1180
:ref:`uncoalesced tensor <sparse-uncoalesced-coo-docs>`.
1181

1182
Returns :attr:`self` if :attr:`self` is a coalesced tensor.
1183

1184
.. warning::
1185
  Throws an error if :attr:`self` is not a sparse COO tensor.
1186
""",
1187
)
1188

1189
add_docstr_all(
1190
    "contiguous",
1191
    r"""
1192
contiguous(memory_format=torch.contiguous_format) -> Tensor
1193

1194
Returns a contiguous in memory tensor containing the same data as :attr:`self` tensor. If
1195
:attr:`self` tensor is already in the specified memory format, this function returns the
1196
:attr:`self` tensor.
1197

1198
Args:
1199
    memory_format (:class:`torch.memory_format`, optional): the desired memory format of
1200
        returned Tensor. Default: ``torch.contiguous_format``.
1201
""",
1202
)
1203

1204
add_docstr_all(
1205
    "copy_",
1206
    r"""
1207
copy_(src, non_blocking=False) -> Tensor
1208

1209
Copies the elements from :attr:`src` into :attr:`self` tensor and returns
1210
:attr:`self`.
1211

1212
The :attr:`src` tensor must be :ref:`broadcastable <broadcasting-semantics>`
1213
with the :attr:`self` tensor. It may be of a different data type or reside on a
1214
different device.
1215

1216
Args:
1217
    src (Tensor): the source tensor to copy from
1218
    non_blocking (bool): if ``True`` and this copy is between CPU and GPU,
1219
        the copy may occur asynchronously with respect to the host. For other
1220
        cases, this argument has no effect.
1221
""",
1222
)
1223

1224
add_docstr_all(
1225
    "conj",
1226
    r"""
1227
conj() -> Tensor
1228

1229
See :func:`torch.conj`
1230
""",
1231
)
1232

1233
add_docstr_all(
1234
    "conj_physical",
1235
    r"""
1236
conj_physical() -> Tensor
1237

1238
See :func:`torch.conj_physical`
1239
""",
1240
)
1241

1242
add_docstr_all(
1243
    "conj_physical_",
1244
    r"""
1245
conj_physical_() -> Tensor
1246

1247
In-place version of :meth:`~Tensor.conj_physical`
1248
""",
1249
)
1250

1251
add_docstr_all(
1252
    "resolve_conj",
1253
    r"""
1254
resolve_conj() -> Tensor
1255

1256
See :func:`torch.resolve_conj`
1257
""",
1258
)
1259

1260
add_docstr_all(
1261
    "resolve_neg",
1262
    r"""
1263
resolve_neg() -> Tensor
1264

1265
See :func:`torch.resolve_neg`
1266
""",
1267
)
1268

1269
add_docstr_all(
1270
    "copysign",
1271
    r"""
1272
copysign(other) -> Tensor
1273

1274
See :func:`torch.copysign`
1275
""",
1276
)
1277

1278
add_docstr_all(
1279
    "copysign_",
1280
    r"""
1281
copysign_(other) -> Tensor
1282

1283
In-place version of :meth:`~Tensor.copysign`
1284
""",
1285
)
1286

1287
add_docstr_all(
1288
    "cos",
1289
    r"""
1290
cos() -> Tensor
1291

1292
See :func:`torch.cos`
1293
""",
1294
)
1295

1296
add_docstr_all(
1297
    "cos_",
1298
    r"""
1299
cos_() -> Tensor
1300

1301
In-place version of :meth:`~Tensor.cos`
1302
""",
1303
)
1304

1305
add_docstr_all(
1306
    "cosh",
1307
    r"""
1308
cosh() -> Tensor
1309

1310
See :func:`torch.cosh`
1311
""",
1312
)
1313

1314
add_docstr_all(
1315
    "cosh_",
1316
    r"""
1317
cosh_() -> Tensor
1318

1319
In-place version of :meth:`~Tensor.cosh`
1320
""",
1321
)
1322

1323
add_docstr_all(
1324
    "cpu",
1325
    r"""
1326
cpu(memory_format=torch.preserve_format) -> Tensor
1327

1328
Returns a copy of this object in CPU memory.
1329

1330
If this object is already in CPU memory and on the correct device,
1331
then no copy is performed and the original object is returned.
1332

1333
Args:
1334
    {memory_format}
1335

1336
""".format(**common_args),
1337
)
1338

1339
add_docstr_all(
1340
    "count_nonzero",
1341
    r"""
1342
count_nonzero(dim=None) -> Tensor
1343

1344
See :func:`torch.count_nonzero`
1345
""",
1346
)
1347

1348
add_docstr_all(
1349
    "cov",
1350
    r"""
1351
cov(*, correction=1, fweights=None, aweights=None) -> Tensor
1352

1353
See :func:`torch.cov`
1354
""",
1355
)
1356

1357
add_docstr_all(
1358
    "corrcoef",
1359
    r"""
1360
corrcoef() -> Tensor
1361

1362
See :func:`torch.corrcoef`
1363
""",
1364
)
1365

1366
add_docstr_all(
1367
    "cross",
1368
    r"""
1369
cross(other, dim=None) -> Tensor
1370

1371
See :func:`torch.cross`
1372
""",
1373
)
1374

1375
add_docstr_all(
1376
    "cuda",
1377
    r"""
1378
cuda(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
1379

1380
Returns a copy of this object in CUDA memory.
1381

1382
If this object is already in CUDA memory and on the correct device,
1383
then no copy is performed and the original object is returned.
1384

1385
Args:
1386
    device (:class:`torch.device`): The destination GPU device.
1387
        Defaults to the current CUDA device.
1388
    non_blocking (bool): If ``True`` and the source is in pinned memory,
1389
        the copy will be asynchronous with respect to the host.
1390
        Otherwise, the argument has no effect. Default: ``False``.
1391
    {memory_format}
1392
""".format(**common_args),
1393
)
1394

1395
add_docstr_all(
1396
    "mtia",
1397
    r"""
1398
mtia(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
1399

1400
Returns a copy of this object in MTIA memory.
1401

1402
If this object is already in MTIA memory and on the correct device,
1403
then no copy is performed and the original object is returned.
1404

1405
Args:
1406
    device (:class:`torch.device`): The destination MTIA device.
1407
        Defaults to the current MTIA device.
1408
    non_blocking (bool): If ``True`` and the source is in pinned memory,
1409
        the copy will be asynchronous with respect to the host.
1410
        Otherwise, the argument has no effect. Default: ``False``.
1411
    {memory_format}
1412
""".format(**common_args),
1413
)
1414

1415
add_docstr_all(
1416
    "ipu",
1417
    r"""
1418
ipu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
1419

1420
Returns a copy of this object in IPU memory.
1421

1422
If this object is already in IPU memory and on the correct device,
1423
then no copy is performed and the original object is returned.
1424

1425
Args:
1426
    device (:class:`torch.device`): The destination IPU device.
1427
        Defaults to the current IPU device.
1428
    non_blocking (bool): If ``True`` and the source is in pinned memory,
1429
        the copy will be asynchronous with respect to the host.
1430
        Otherwise, the argument has no effect. Default: ``False``.
1431
    {memory_format}
1432
""".format(**common_args),
1433
)
1434

1435
add_docstr_all(
1436
    "xpu",
1437
    r"""
1438
xpu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
1439

1440
Returns a copy of this object in XPU memory.
1441

1442
If this object is already in XPU memory and on the correct device,
1443
then no copy is performed and the original object is returned.
1444

1445
Args:
1446
    device (:class:`torch.device`): The destination XPU device.
1447
        Defaults to the current XPU device.
1448
    non_blocking (bool): If ``True`` and the source is in pinned memory,
1449
        the copy will be asynchronous with respect to the host.
1450
        Otherwise, the argument has no effect. Default: ``False``.
1451
    {memory_format}
1452
""".format(**common_args),
1453
)
1454

1455
add_docstr_all(
1456
    "logcumsumexp",
1457
    r"""
1458
logcumsumexp(dim) -> Tensor
1459

1460
See :func:`torch.logcumsumexp`
1461
""",
1462
)
1463

1464
add_docstr_all(
1465
    "cummax",
1466
    r"""
1467
cummax(dim) -> (Tensor, Tensor)
1468

1469
See :func:`torch.cummax`
1470
""",
1471
)
1472

1473
add_docstr_all(
1474
    "cummin",
1475
    r"""
1476
cummin(dim) -> (Tensor, Tensor)
1477

1478
See :func:`torch.cummin`
1479
""",
1480
)
1481

1482
add_docstr_all(
1483
    "cumprod",
1484
    r"""
1485
cumprod(dim, dtype=None) -> Tensor
1486

1487
See :func:`torch.cumprod`
1488
""",
1489
)
1490

1491
add_docstr_all(
1492
    "cumprod_",
1493
    r"""
1494
cumprod_(dim, dtype=None) -> Tensor
1495

1496
In-place version of :meth:`~Tensor.cumprod`
1497
""",
1498
)
1499

1500
add_docstr_all(
1501
    "cumsum",
1502
    r"""
1503
cumsum(dim, dtype=None) -> Tensor
1504

1505
See :func:`torch.cumsum`
1506
""",
1507
)
1508

1509
add_docstr_all(
1510
    "cumsum_",
1511
    r"""
1512
cumsum_(dim, dtype=None) -> Tensor
1513

1514
In-place version of :meth:`~Tensor.cumsum`
1515
""",
1516
)
1517

1518
add_docstr_all(
1519
    "data_ptr",
1520
    r"""
1521
data_ptr() -> int
1522

1523
Returns the address of the first element of :attr:`self` tensor.
1524
""",
1525
)
1526

1527
add_docstr_all(
1528
    "dequantize",
1529
    r"""
1530
dequantize() -> Tensor
1531

1532
Given a quantized Tensor, dequantize it and return the dequantized float Tensor.
1533
""",
1534
)
1535

1536
add_docstr_all(
1537
    "dense_dim",
1538
    r"""
1539
dense_dim() -> int
1540

1541
Return the number of dense dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.
1542

1543
.. note::
1544
  Returns ``len(self.shape)`` if :attr:`self` is not a sparse tensor.
1545

1546
See also :meth:`Tensor.sparse_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
1547
""",
1548
)
1549

1550
add_docstr_all(
1551
    "diag",
1552
    r"""
1553
diag(diagonal=0) -> Tensor
1554

1555
See :func:`torch.diag`
1556
""",
1557
)
1558

1559
add_docstr_all(
1560
    "diag_embed",
1561
    r"""
1562
diag_embed(offset=0, dim1=-2, dim2=-1) -> Tensor
1563

1564
See :func:`torch.diag_embed`
1565
""",
1566
)
1567

1568
add_docstr_all(
1569
    "diagflat",
1570
    r"""
1571
diagflat(offset=0) -> Tensor
1572

1573
See :func:`torch.diagflat`
1574
""",
1575
)
1576

1577
add_docstr_all(
1578
    "diagonal",
1579
    r"""
1580
diagonal(offset=0, dim1=0, dim2=1) -> Tensor
1581

1582
See :func:`torch.diagonal`
1583
""",
1584
)
1585

1586
add_docstr_all(
1587
    "diagonal_scatter",
1588
    r"""
1589
diagonal_scatter(src, offset=0, dim1=0, dim2=1) -> Tensor
1590

1591
See :func:`torch.diagonal_scatter`
1592
""",
1593
)
1594

1595
add_docstr_all(
1596
    "as_strided_scatter",
1597
    r"""
1598
as_strided_scatter(src, size, stride, storage_offset=None) -> Tensor
1599

1600
See :func:`torch.as_strided_scatter`
1601
""",
1602
)
1603

1604
add_docstr_all(
1605
    "fill_diagonal_",
1606
    r"""
1607
fill_diagonal_(fill_value, wrap=False) -> Tensor
1608

1609
Fill the main diagonal of a tensor that has at least 2-dimensions.
1610
When dims>2, all dimensions of input must be of equal length.
1611
This function modifies the input tensor in-place, and returns the input tensor.
1612

1613
Arguments:
1614
    fill_value (Scalar): the fill value
1615
    wrap (bool): the diagonal 'wrapped' after N columns for tall matrices.
1616

1617
Example::
1618

1619
    >>> a = torch.zeros(3, 3)
1620
    >>> a.fill_diagonal_(5)
1621
    tensor([[5., 0., 0.],
1622
            [0., 5., 0.],
1623
            [0., 0., 5.]])
1624
    >>> b = torch.zeros(7, 3)
1625
    >>> b.fill_diagonal_(5)
1626
    tensor([[5., 0., 0.],
1627
            [0., 5., 0.],
1628
            [0., 0., 5.],
1629
            [0., 0., 0.],
1630
            [0., 0., 0.],
1631
            [0., 0., 0.],
1632
            [0., 0., 0.]])
1633
    >>> c = torch.zeros(7, 3)
1634
    >>> c.fill_diagonal_(5, wrap=True)
1635
    tensor([[5., 0., 0.],
1636
            [0., 5., 0.],
1637
            [0., 0., 5.],
1638
            [0., 0., 0.],
1639
            [5., 0., 0.],
1640
            [0., 5., 0.],
1641
            [0., 0., 5.]])
1642

1643
""",
1644
)
1645

1646
add_docstr_all(
1647
    "floor_divide",
1648
    r"""
1649
floor_divide(value) -> Tensor
1650

1651
See :func:`torch.floor_divide`
1652
""",
1653
)
1654

1655
add_docstr_all(
1656
    "floor_divide_",
1657
    r"""
1658
floor_divide_(value) -> Tensor
1659

1660
In-place version of :meth:`~Tensor.floor_divide`
1661
""",
1662
)
1663

1664
add_docstr_all(
1665
    "diff",
1666
    r"""
1667
diff(n=1, dim=-1, prepend=None, append=None) -> Tensor
1668

1669
See :func:`torch.diff`
1670
""",
1671
)
1672

1673
add_docstr_all(
1674
    "digamma",
1675
    r"""
1676
digamma() -> Tensor
1677

1678
See :func:`torch.digamma`
1679
""",
1680
)
1681

1682
add_docstr_all(
1683
    "digamma_",
1684
    r"""
1685
digamma_() -> Tensor
1686

1687
In-place version of :meth:`~Tensor.digamma`
1688
""",
1689
)
1690

1691
add_docstr_all(
1692
    "dim",
1693
    r"""
1694
dim() -> int
1695

1696
Returns the number of dimensions of :attr:`self` tensor.
1697
""",
1698
)
1699

1700
add_docstr_all(
1701
    "dist",
1702
    r"""
1703
dist(other, p=2) -> Tensor
1704

1705
See :func:`torch.dist`
1706
""",
1707
)
1708

1709
add_docstr_all(
1710
    "div",
1711
    r"""
1712
div(value, *, rounding_mode=None) -> Tensor
1713

1714
See :func:`torch.div`
1715
""",
1716
)
1717

1718
add_docstr_all(
1719
    "div_",
1720
    r"""
1721
div_(value, *, rounding_mode=None) -> Tensor
1722

1723
In-place version of :meth:`~Tensor.div`
1724
""",
1725
)
1726

1727
add_docstr_all(
1728
    "divide",
1729
    r"""
1730
divide(value, *, rounding_mode=None) -> Tensor
1731

1732
See :func:`torch.divide`
1733
""",
1734
)
1735

1736
add_docstr_all(
1737
    "divide_",
1738
    r"""
1739
divide_(value, *, rounding_mode=None) -> Tensor
1740

1741
In-place version of :meth:`~Tensor.divide`
1742
""",
1743
)
1744

1745
add_docstr_all(
1746
    "dot",
1747
    r"""
1748
dot(other) -> Tensor
1749

1750
See :func:`torch.dot`
1751
""",
1752
)
1753

1754
add_docstr_all(
1755
    "element_size",
1756
    r"""
1757
element_size() -> int
1758

1759
Returns the size in bytes of an individual element.
1760

1761
Example::
1762

1763
    >>> torch.tensor([]).element_size()
1764
    4
1765
    >>> torch.tensor([], dtype=torch.uint8).element_size()
1766
    1
1767

1768
""",
1769
)
1770

1771
add_docstr_all(
1772
    "eq",
1773
    r"""
1774
eq(other) -> Tensor
1775

1776
See :func:`torch.eq`
1777
""",
1778
)
1779

1780
add_docstr_all(
1781
    "eq_",
1782
    r"""
1783
eq_(other) -> Tensor
1784

1785
In-place version of :meth:`~Tensor.eq`
1786
""",
1787
)
1788

1789
add_docstr_all(
1790
    "equal",
1791
    r"""
1792
equal(other) -> bool
1793

1794
See :func:`torch.equal`
1795
""",
1796
)
1797

1798
add_docstr_all(
1799
    "erf",
1800
    r"""
1801
erf() -> Tensor
1802

1803
See :func:`torch.erf`
1804
""",
1805
)
1806

1807
add_docstr_all(
1808
    "erf_",
1809
    r"""
1810
erf_() -> Tensor
1811

1812
In-place version of :meth:`~Tensor.erf`
1813
""",
1814
)
1815

1816
add_docstr_all(
1817
    "erfc",
1818
    r"""
1819
erfc() -> Tensor
1820

1821
See :func:`torch.erfc`
1822
""",
1823
)
1824

1825
add_docstr_all(
1826
    "erfc_",
1827
    r"""
1828
erfc_() -> Tensor
1829

1830
In-place version of :meth:`~Tensor.erfc`
1831
""",
1832
)
1833

1834
add_docstr_all(
1835
    "erfinv",
1836
    r"""
1837
erfinv() -> Tensor
1838

1839
See :func:`torch.erfinv`
1840
""",
1841
)
1842

1843
add_docstr_all(
1844
    "erfinv_",
1845
    r"""
1846
erfinv_() -> Tensor
1847

1848
In-place version of :meth:`~Tensor.erfinv`
1849
""",
1850
)
1851

1852
add_docstr_all(
1853
    "exp",
1854
    r"""
1855
exp() -> Tensor
1856

1857
See :func:`torch.exp`
1858
""",
1859
)
1860

1861
add_docstr_all(
1862
    "exp_",
1863
    r"""
1864
exp_() -> Tensor
1865

1866
In-place version of :meth:`~Tensor.exp`
1867
""",
1868
)
1869

1870
add_docstr_all(
1871
    "exp2",
1872
    r"""
1873
exp2() -> Tensor
1874

1875
See :func:`torch.exp2`
1876
""",
1877
)
1878

1879
add_docstr_all(
1880
    "exp2_",
1881
    r"""
1882
exp2_() -> Tensor
1883

1884
In-place version of :meth:`~Tensor.exp2`
1885
""",
1886
)
1887

1888
add_docstr_all(
1889
    "expm1",
1890
    r"""
1891
expm1() -> Tensor
1892

1893
See :func:`torch.expm1`
1894
""",
1895
)
1896

1897
add_docstr_all(
1898
    "expm1_",
1899
    r"""
1900
expm1_() -> Tensor
1901

1902
In-place version of :meth:`~Tensor.expm1`
1903
""",
1904
)
1905

1906
add_docstr_all(
1907
    "exponential_",
1908
    r"""
1909
exponential_(lambd=1, *, generator=None) -> Tensor
1910

1911
Fills :attr:`self` tensor with elements drawn from the PDF (probability density function):
1912

1913
.. math::
1914

1915
    f(x) = \lambda e^{-\lambda x}, x > 0
1916

1917
.. note::
1918
  In probability theory, exponential distribution is supported on interval [0, :math:`\inf`) (i.e., :math:`x >= 0`)
1919
  implying that zero can be sampled from the exponential distribution.
1920
  However, :func:`torch.Tensor.exponential_` does not sample zero,
1921
  which means that its actual support is the interval (0, :math:`\inf`).
1922

1923
  Note that :func:`torch.distributions.exponential.Exponential` is supported on the interval [0, :math:`\inf`) and can sample zero.
1924
""",
1925
)
1926

1927
add_docstr_all(
1928
    "fill_",
1929
    r"""
1930
fill_(value) -> Tensor
1931

1932
Fills :attr:`self` tensor with the specified value.
1933
""",
1934
)
1935

1936
add_docstr_all(
1937
    "floor",
1938
    r"""
1939
floor() -> Tensor
1940

1941
See :func:`torch.floor`
1942
""",
1943
)
1944

1945
add_docstr_all(
1946
    "flip",
1947
    r"""
1948
flip(dims) -> Tensor
1949

1950
See :func:`torch.flip`
1951
""",
1952
)
1953

1954
add_docstr_all(
1955
    "fliplr",
1956
    r"""
1957
fliplr() -> Tensor
1958

1959
See :func:`torch.fliplr`
1960
""",
1961
)
1962

1963
add_docstr_all(
1964
    "flipud",
1965
    r"""
1966
flipud() -> Tensor
1967

1968
See :func:`torch.flipud`
1969
""",
1970
)
1971

1972
add_docstr_all(
1973
    "roll",
1974
    r"""
1975
roll(shifts, dims) -> Tensor
1976

1977
See :func:`torch.roll`
1978
""",
1979
)
1980

1981
add_docstr_all(
1982
    "floor_",
1983
    r"""
1984
floor_() -> Tensor
1985

1986
In-place version of :meth:`~Tensor.floor`
1987
""",
1988
)
1989

1990
add_docstr_all(
1991
    "fmod",
1992
    r"""
1993
fmod(divisor) -> Tensor
1994

1995
See :func:`torch.fmod`
1996
""",
1997
)
1998

1999
add_docstr_all(
2000
    "fmod_",
2001
    r"""
2002
fmod_(divisor) -> Tensor
2003

2004
In-place version of :meth:`~Tensor.fmod`
2005
""",
2006
)
2007

2008
add_docstr_all(
2009
    "frac",
2010
    r"""
2011
frac() -> Tensor
2012

2013
See :func:`torch.frac`
2014
""",
2015
)
2016

2017
add_docstr_all(
2018
    "frac_",
2019
    r"""
2020
frac_() -> Tensor
2021

2022
In-place version of :meth:`~Tensor.frac`
2023
""",
2024
)
2025

2026
add_docstr_all(
2027
    "frexp",
2028
    r"""
2029
frexp(input) -> (Tensor mantissa, Tensor exponent)
2030

2031
See :func:`torch.frexp`
2032
""",
2033
)
2034

2035
add_docstr_all(
2036
    "flatten",
2037
    r"""
2038
flatten(start_dim=0, end_dim=-1) -> Tensor
2039

2040
See :func:`torch.flatten`
2041
""",
2042
)
2043

2044
add_docstr_all(
2045
    "gather",
2046
    r"""
2047
gather(dim, index) -> Tensor
2048

2049
See :func:`torch.gather`
2050
""",
2051
)
2052

2053
add_docstr_all(
2054
    "gcd",
2055
    r"""
2056
gcd(other) -> Tensor
2057

2058
See :func:`torch.gcd`
2059
""",
2060
)
2061

2062
add_docstr_all(
2063
    "gcd_",
2064
    r"""
2065
gcd_(other) -> Tensor
2066

2067
In-place version of :meth:`~Tensor.gcd`
2068
""",
2069
)
2070

2071
add_docstr_all(
2072
    "ge",
2073
    r"""
2074
ge(other) -> Tensor
2075

2076
See :func:`torch.ge`.
2077
""",
2078
)
2079

2080
add_docstr_all(
2081
    "ge_",
2082
    r"""
2083
ge_(other) -> Tensor
2084

2085
In-place version of :meth:`~Tensor.ge`.
2086
""",
2087
)
2088

2089
add_docstr_all(
2090
    "greater_equal",
2091
    r"""
2092
greater_equal(other) -> Tensor
2093

2094
See :func:`torch.greater_equal`.
2095
""",
2096
)
2097

2098
add_docstr_all(
2099
    "greater_equal_",
2100
    r"""
2101
greater_equal_(other) -> Tensor
2102

2103
In-place version of :meth:`~Tensor.greater_equal`.
2104
""",
2105
)
2106

2107
add_docstr_all(
2108
    "geometric_",
2109
    r"""
2110
geometric_(p, *, generator=None) -> Tensor
2111

2112
Fills :attr:`self` tensor with elements drawn from the geometric distribution:
2113

2114
.. math::
2115

2116
    P(X=k) = (1 - p)^{k - 1} p, k = 1, 2, ...
2117

2118
.. note::
2119
  :func:`torch.Tensor.geometric_` `k`-th trial is the first success hence draws samples in :math:`\{1, 2, \ldots\}`, whereas
2120
  :func:`torch.distributions.geometric.Geometric` :math:`(k+1)`-th trial is the first success
2121
  hence draws samples in :math:`\{0, 1, \ldots\}`.
2122
""",
2123
)
2124

2125
add_docstr_all(
2126
    "geqrf",
2127
    r"""
2128
geqrf() -> (Tensor, Tensor)
2129

2130
See :func:`torch.geqrf`
2131
""",
2132
)
2133

2134
add_docstr_all(
2135
    "ger",
2136
    r"""
2137
ger(vec2) -> Tensor
2138

2139
See :func:`torch.ger`
2140
""",
2141
)
2142

2143
add_docstr_all(
2144
    "inner",
2145
    r"""
2146
inner(other) -> Tensor
2147

2148
See :func:`torch.inner`.
2149
""",
2150
)
2151

2152
add_docstr_all(
2153
    "outer",
2154
    r"""
2155
outer(vec2) -> Tensor
2156

2157
See :func:`torch.outer`.
2158
""",
2159
)
2160

2161
add_docstr_all(
2162
    "hypot",
2163
    r"""
2164
hypot(other) -> Tensor
2165

2166
See :func:`torch.hypot`
2167
""",
2168
)
2169

2170
add_docstr_all(
2171
    "hypot_",
2172
    r"""
2173
hypot_(other) -> Tensor
2174

2175
In-place version of :meth:`~Tensor.hypot`
2176
""",
2177
)
2178

2179
add_docstr_all(
2180
    "i0",
2181
    r"""
2182
i0() -> Tensor
2183

2184
See :func:`torch.i0`
2185
""",
2186
)
2187

2188
add_docstr_all(
2189
    "i0_",
2190
    r"""
2191
i0_() -> Tensor
2192

2193
In-place version of :meth:`~Tensor.i0`
2194
""",
2195
)
2196

2197
add_docstr_all(
2198
    "igamma",
2199
    r"""
2200
igamma(other) -> Tensor
2201

2202
See :func:`torch.igamma`
2203
""",
2204
)
2205

2206
add_docstr_all(
2207
    "igamma_",
2208
    r"""
2209
igamma_(other) -> Tensor
2210

2211
In-place version of :meth:`~Tensor.igamma`
2212
""",
2213
)
2214

2215
add_docstr_all(
2216
    "igammac",
2217
    r"""
2218
igammac(other) -> Tensor
2219
See :func:`torch.igammac`
2220
""",
2221
)
2222

2223
add_docstr_all(
2224
    "igammac_",
2225
    r"""
2226
igammac_(other) -> Tensor
2227
In-place version of :meth:`~Tensor.igammac`
2228
""",
2229
)
2230

2231
add_docstr_all(
2232
    "indices",
2233
    r"""
2234
indices() -> Tensor
2235

2236
Return the indices tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.
2237

2238
.. warning::
2239
  Throws an error if :attr:`self` is not a sparse COO tensor.
2240

2241
See also :meth:`Tensor.values`.
2242

2243
.. note::
2244
  This method can only be called on a coalesced sparse tensor. See
2245
  :meth:`Tensor.coalesce` for details.
2246
""",
2247
)
2248

2249
add_docstr_all(
2250
    "get_device",
2251
    r"""
2252
get_device() -> Device ordinal (Integer)
2253

2254
For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides.
2255
For CPU tensors, this function returns `-1`.
2256

2257
Example::
2258

2259
    >>> x = torch.randn(3, 4, 5, device='cuda:0')
2260
    >>> x.get_device()
2261
    0
2262
    >>> x.cpu().get_device()
2263
    -1
2264
""",
2265
)
2266

2267
add_docstr_all(
2268
    "values",
2269
    r"""
2270
values() -> Tensor
2271

2272
Return the values tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.
2273

2274
.. warning::
2275
  Throws an error if :attr:`self` is not a sparse COO tensor.
2276

2277
See also :meth:`Tensor.indices`.
2278

2279
.. note::
2280
  This method can only be called on a coalesced sparse tensor. See
2281
  :meth:`Tensor.coalesce` for details.
2282
""",
2283
)
2284

2285
add_docstr_all(
2286
    "gt",
2287
    r"""
2288
gt(other) -> Tensor
2289

2290
See :func:`torch.gt`.
2291
""",
2292
)
2293

2294
add_docstr_all(
2295
    "gt_",
2296
    r"""
2297
gt_(other) -> Tensor
2298

2299
In-place version of :meth:`~Tensor.gt`.
2300
""",
2301
)
2302

2303
add_docstr_all(
2304
    "greater",
2305
    r"""
2306
greater(other) -> Tensor
2307

2308
See :func:`torch.greater`.
2309
""",
2310
)
2311

2312
add_docstr_all(
2313
    "greater_",
2314
    r"""
2315
greater_(other) -> Tensor
2316

2317
In-place version of :meth:`~Tensor.greater`.
2318
""",
2319
)
2320

2321
add_docstr_all(
2322
    "has_names",
2323
    r"""
2324
Is ``True`` if any of this tensor's dimensions are named. Otherwise, is ``False``.
2325
""",
2326
)
2327

2328
add_docstr_all(
2329
    "hardshrink",
2330
    r"""
2331
hardshrink(lambd=0.5) -> Tensor
2332

2333
See :func:`torch.nn.functional.hardshrink`
2334
""",
2335
)
2336

2337
add_docstr_all(
2338
    "heaviside",
2339
    r"""
2340
heaviside(values) -> Tensor
2341

2342
See :func:`torch.heaviside`
2343
""",
2344
)
2345

2346
add_docstr_all(
2347
    "heaviside_",
2348
    r"""
2349
heaviside_(values) -> Tensor
2350

2351
In-place version of :meth:`~Tensor.heaviside`
2352
""",
2353
)
2354

2355
add_docstr_all(
2356
    "histc",
2357
    r"""
2358
histc(bins=100, min=0, max=0) -> Tensor
2359

2360
See :func:`torch.histc`
2361
""",
2362
)
2363

2364
add_docstr_all(
2365
    "histogram",
2366
    r"""
2367
histogram(input, bins, *, range=None, weight=None, density=False) -> (Tensor, Tensor)
2368

2369
See :func:`torch.histogram`
2370
""",
2371
)
2372

2373
add_docstr_all(
2374
    "index_add_",
2375
    r"""
2376
index_add_(dim, index, source, *, alpha=1) -> Tensor
2377

2378
Accumulate the elements of :attr:`alpha` times ``source`` into the :attr:`self`
2379
tensor by adding to the indices in the order given in :attr:`index`. For example,
2380
if ``dim == 0``, ``index[i] == j``, and ``alpha=-1``, then the ``i``\ th row of
2381
``source`` is subtracted from the ``j``\ th row of :attr:`self`.
2382

2383
The :attr:`dim`\ th dimension of ``source`` must have the same size as the
2384
length of :attr:`index` (which must be a vector), and all other dimensions must
2385
match :attr:`self`, or an error will be raised.
2386

2387
For a 3-D tensor the output is given as::
2388

2389
    self[index[i], :, :] += alpha * src[i, :, :]  # if dim == 0
2390
    self[:, index[i], :] += alpha * src[:, i, :]  # if dim == 1
2391
    self[:, :, index[i]] += alpha * src[:, :, i]  # if dim == 2
2392

2393
Note:
2394
    {forward_reproducibility_note}
2395

2396
Args:
2397
    dim (int): dimension along which to index
2398
    index (Tensor): indices of ``source`` to select from,
2399
            should have dtype either `torch.int64` or `torch.int32`
2400
    source (Tensor): the tensor containing values to add
2401

2402
Keyword args:
2403
    alpha (Number): the scalar multiplier for ``source``
2404

2405
Example::
2406

2407
    >>> x = torch.ones(5, 3)
2408
    >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
2409
    >>> index = torch.tensor([0, 4, 2])
2410
    >>> x.index_add_(0, index, t)
2411
    tensor([[  2.,   3.,   4.],
2412
            [  1.,   1.,   1.],
2413
            [  8.,   9.,  10.],
2414
            [  1.,   1.,   1.],
2415
            [  5.,   6.,   7.]])
2416
    >>> x.index_add_(0, index, t, alpha=-1)
2417
    tensor([[  1.,   1.,   1.],
2418
            [  1.,   1.,   1.],
2419
            [  1.,   1.,   1.],
2420
            [  1.,   1.,   1.],
2421
            [  1.,   1.,   1.]])
2422
""".format(**reproducibility_notes),
2423
)
2424

2425
add_docstr_all(
2426
    "index_copy_",
2427
    r"""
2428
index_copy_(dim, index, tensor) -> Tensor
2429

2430
Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting
2431
the indices in the order given in :attr:`index`. For example, if ``dim == 0``
2432
and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is copied to the
2433
``j``\ th row of :attr:`self`.
2434

2435
The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the
2436
length of :attr:`index` (which must be a vector), and all other dimensions must
2437
match :attr:`self`, or an error will be raised.
2438

2439
.. note::
2440
    If :attr:`index` contains duplicate entries, multiple elements from
2441
    :attr:`tensor` will be copied to the same index of :attr:`self`. The result
2442
    is nondeterministic since it depends on which copy occurs last.
2443

2444
Args:
2445
    dim (int): dimension along which to index
2446
    index (LongTensor): indices of :attr:`tensor` to select from
2447
    tensor (Tensor): the tensor containing values to copy
2448

2449
Example::
2450

2451
    >>> x = torch.zeros(5, 3)
2452
    >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
2453
    >>> index = torch.tensor([0, 4, 2])
2454
    >>> x.index_copy_(0, index, t)
2455
    tensor([[ 1.,  2.,  3.],
2456
            [ 0.,  0.,  0.],
2457
            [ 7.,  8.,  9.],
2458
            [ 0.,  0.,  0.],
2459
            [ 4.,  5.,  6.]])
2460
""",
2461
)
2462

2463
add_docstr_all(
2464
    "index_fill_",
2465
    r"""
2466
index_fill_(dim, index, value) -> Tensor
2467

2468
Fills the elements of the :attr:`self` tensor with value :attr:`value` by
2469
selecting the indices in the order given in :attr:`index`.
2470

2471
Args:
2472
    dim (int): dimension along which to index
2473
    index (LongTensor): indices of :attr:`self` tensor to fill in
2474
    value (float): the value to fill with
2475

2476
Example::
2477
    >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
2478
    >>> index = torch.tensor([0, 2])
2479
    >>> x.index_fill_(1, index, -1)
2480
    tensor([[-1.,  2., -1.],
2481
            [-1.,  5., -1.],
2482
            [-1.,  8., -1.]])
2483
""",
2484
)
2485

2486
add_docstr_all(
2487
    "index_put_",
2488
    r"""
2489
index_put_(indices, values, accumulate=False) -> Tensor
2490

2491
Puts values from the tensor :attr:`values` into the tensor :attr:`self` using
2492
the indices specified in :attr:`indices` (which is a tuple of Tensors). The
2493
expression ``tensor.index_put_(indices, values)`` is equivalent to
2494
``tensor[indices] = values``. Returns :attr:`self`.
2495

2496
If :attr:`accumulate` is ``True``, the elements in :attr:`values` are added to
2497
:attr:`self`. If accumulate is ``False``, the behavior is undefined if indices
2498
contain duplicate elements.
2499

2500
Args:
2501
    indices (tuple of LongTensor): tensors used to index into `self`.
2502
    values (Tensor): tensor of same dtype as `self`.
2503
    accumulate (bool): whether to accumulate into self
2504
""",
2505
)
2506

2507
add_docstr_all(
2508
    "index_put",
2509
    r"""
2510
index_put(indices, values, accumulate=False) -> Tensor
2511

2512
Out-place version of :meth:`~Tensor.index_put_`.
2513
""",
2514
)
2515

2516
add_docstr_all(
2517
    "index_reduce_",
2518
    r"""
2519
index_reduce_(dim, index, source, reduce, *, include_self=True) -> Tensor
2520

2521
Accumulate the elements of ``source`` into the :attr:`self`
2522
tensor by accumulating to the indices in the order given in :attr:`index`
2523
using the reduction given by the ``reduce`` argument. For example, if ``dim == 0``,
2524
``index[i] == j``, ``reduce == prod`` and ``include_self == True`` then the ``i``\ th
2525
row of ``source`` is multiplied by the ``j``\ th row of :attr:`self`. If
2526
:obj:`include_self="True"`, the values in the :attr:`self` tensor are included
2527
in the reduction, otherwise, rows in the :attr:`self` tensor that are accumulated
2528
to are treated as if they were filled with the reduction identites.
2529

2530
The :attr:`dim`\ th dimension of ``source`` must have the same size as the
2531
length of :attr:`index` (which must be a vector), and all other dimensions must
2532
match :attr:`self`, or an error will be raised.
2533

2534
For a 3-D tensor with :obj:`reduce="prod"` and :obj:`include_self=True` the
2535
output is given as::
2536

2537
    self[index[i], :, :] *= src[i, :, :]  # if dim == 0
2538
    self[:, index[i], :] *= src[:, i, :]  # if dim == 1
2539
    self[:, :, index[i]] *= src[:, :, i]  # if dim == 2
2540

2541
Note:
2542
    {forward_reproducibility_note}
2543

2544
.. note::
2545

2546
    This function only supports floating point tensors.
2547

2548
.. warning::
2549

2550
    This function is in beta and may change in the near future.
2551

2552
Args:
2553
    dim (int): dimension along which to index
2554
    index (Tensor): indices of ``source`` to select from,
2555
        should have dtype either `torch.int64` or `torch.int32`
2556
    source (FloatTensor): the tensor containing values to accumulate
2557
    reduce (str): the reduction operation to apply
2558
        (:obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
2559

2560
Keyword args:
2561
    include_self (bool): whether the elements from the ``self`` tensor are
2562
        included in the reduction
2563

2564
Example::
2565

2566
    >>> x = torch.empty(5, 3).fill_(2)
2567
    >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=torch.float)
2568
    >>> index = torch.tensor([0, 4, 2, 0])
2569
    >>> x.index_reduce_(0, index, t, 'prod')
2570
    tensor([[20., 44., 72.],
2571
            [ 2.,  2.,  2.],
2572
            [14., 16., 18.],
2573
            [ 2.,  2.,  2.],
2574
            [ 8., 10., 12.]])
2575
    >>> x = torch.empty(5, 3).fill_(2)
2576
    >>> x.index_reduce_(0, index, t, 'prod', include_self=False)
2577
    tensor([[10., 22., 36.],
2578
            [ 2.,  2.,  2.],
2579
            [ 7.,  8.,  9.],
2580
            [ 2.,  2.,  2.],
2581
            [ 4.,  5.,  6.]])
2582
""".format(**reproducibility_notes),
2583
)
2584

2585
add_docstr_all(
2586
    "index_select",
2587
    r"""
2588
index_select(dim, index) -> Tensor
2589

2590
See :func:`torch.index_select`
2591
""",
2592
)
2593

2594
add_docstr_all(
2595
    "sparse_mask",
2596
    r"""
2597
sparse_mask(mask) -> Tensor
2598

2599
Returns a new :ref:`sparse tensor <sparse-docs>` with values from a
2600
strided tensor :attr:`self` filtered by the indices of the sparse
2601
tensor :attr:`mask`. The values of :attr:`mask` sparse tensor are
2602
ignored. :attr:`self` and :attr:`mask` tensors must have the same
2603
shape.
2604

2605
.. note::
2606

2607
  The returned sparse tensor might contain duplicate values if :attr:`mask`
2608
  is not coalesced. It is therefore advisable to pass ``mask.coalesce()``
2609
  if such behavior is not desired.
2610

2611
.. note::
2612

2613
  The returned sparse tensor has the same indices as the sparse tensor
2614
  :attr:`mask`, even when the corresponding values in :attr:`self` are
2615
  zeros.
2616

2617
Args:
2618
    mask (Tensor): a sparse tensor whose indices are used as a filter
2619

2620
Example::
2621

2622
    >>> nse = 5
2623
    >>> dims = (5, 5, 2, 2)
2624
    >>> I = torch.cat([torch.randint(0, dims[0], size=(nse,)),
2625
    ...                torch.randint(0, dims[1], size=(nse,))], 0).reshape(2, nse)
2626
    >>> V = torch.randn(nse, dims[2], dims[3])
2627
    >>> S = torch.sparse_coo_tensor(I, V, dims).coalesce()
2628
    >>> D = torch.randn(dims)
2629
    >>> D.sparse_mask(S)
2630
    tensor(indices=tensor([[0, 0, 0, 2],
2631
                           [0, 1, 4, 3]]),
2632
           values=tensor([[[ 1.6550,  0.2397],
2633
                           [-0.1611, -0.0779]],
2634

2635
                          [[ 0.2326, -1.0558],
2636
                           [ 1.4711,  1.9678]],
2637

2638
                          [[-0.5138, -0.0411],
2639
                           [ 1.9417,  0.5158]],
2640

2641
                          [[ 0.0793,  0.0036],
2642
                           [-0.2569, -0.1055]]]),
2643
           size=(5, 5, 2, 2), nnz=4, layout=torch.sparse_coo)
2644
""",
2645
)
2646

2647
add_docstr_all(
2648
    "inverse",
2649
    r"""
2650
inverse() -> Tensor
2651

2652
See :func:`torch.inverse`
2653
""",
2654
)
2655

2656
add_docstr_all(
2657
    "isnan",
2658
    r"""
2659
isnan() -> Tensor
2660

2661
See :func:`torch.isnan`
2662
""",
2663
)
2664

2665
add_docstr_all(
2666
    "isinf",
2667
    r"""
2668
isinf() -> Tensor
2669

2670
See :func:`torch.isinf`
2671
""",
2672
)
2673

2674
add_docstr_all(
2675
    "isposinf",
2676
    r"""
2677
isposinf() -> Tensor
2678

2679
See :func:`torch.isposinf`
2680
""",
2681
)
2682

2683
add_docstr_all(
2684
    "isneginf",
2685
    r"""
2686
isneginf() -> Tensor
2687

2688
See :func:`torch.isneginf`
2689
""",
2690
)
2691

2692
add_docstr_all(
2693
    "isfinite",
2694
    r"""
2695
isfinite() -> Tensor
2696

2697
See :func:`torch.isfinite`
2698
""",
2699
)
2700

2701
add_docstr_all(
2702
    "isclose",
2703
    r"""
2704
isclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
2705

2706
See :func:`torch.isclose`
2707
""",
2708
)
2709

2710
add_docstr_all(
2711
    "isreal",
2712
    r"""
2713
isreal() -> Tensor
2714

2715
See :func:`torch.isreal`
2716
""",
2717
)
2718

2719
add_docstr_all(
2720
    "is_coalesced",
2721
    r"""
2722
is_coalesced() -> bool
2723

2724
Returns ``True`` if :attr:`self` is a :ref:`sparse COO tensor
2725
<sparse-coo-docs>` that is coalesced, ``False`` otherwise.
2726

2727
.. warning::
2728
  Throws an error if :attr:`self` is not a sparse COO tensor.
2729

2730
See :meth:`coalesce` and :ref:`uncoalesced tensors <sparse-uncoalesced-coo-docs>`.
2731
""",
2732
)
2733

2734
add_docstr_all(
2735
    "is_contiguous",
2736
    r"""
2737
is_contiguous(memory_format=torch.contiguous_format) -> bool
2738

2739
Returns True if :attr:`self` tensor is contiguous in memory in the order specified
2740
by memory format.
2741

2742
Args:
2743
    memory_format (:class:`torch.memory_format`, optional): Specifies memory allocation
2744
        order. Default: ``torch.contiguous_format``.
2745
""",
2746
)
2747

2748
add_docstr_all(
2749
    "is_pinned",
2750
    r"""
2751
Returns true if this tensor resides in pinned memory.
2752
""",
2753
)
2754

2755
add_docstr_all(
2756
    "is_floating_point",
2757
    r"""
2758
is_floating_point() -> bool
2759

2760
Returns True if the data type of :attr:`self` is a floating point data type.
2761
""",
2762
)
2763

2764
add_docstr_all(
2765
    "is_complex",
2766
    r"""
2767
is_complex() -> bool
2768

2769
Returns True if the data type of :attr:`self` is a complex data type.
2770
""",
2771
)
2772

2773
add_docstr_all(
2774
    "is_inference",
2775
    r"""
2776
is_inference() -> bool
2777

2778
See :func:`torch.is_inference`
2779
""",
2780
)
2781

2782
add_docstr_all(
2783
    "is_conj",
2784
    r"""
2785
is_conj() -> bool
2786

2787
Returns True if the conjugate bit of :attr:`self` is set to true.
2788
""",
2789
)
2790

2791
add_docstr_all(
2792
    "is_neg",
2793
    r"""
2794
is_neg() -> bool
2795

2796
Returns True if the negative bit of :attr:`self` is set to true.
2797
""",
2798
)
2799

2800
add_docstr_all(
2801
    "is_signed",
2802
    r"""
2803
is_signed() -> bool
2804

2805
Returns True if the data type of :attr:`self` is a signed data type.
2806
""",
2807
)
2808

2809
add_docstr_all(
2810
    "is_set_to",
2811
    r"""
2812
is_set_to(tensor) -> bool
2813

2814
Returns True if both tensors are pointing to the exact same memory (same
2815
storage, offset, size and stride).
2816
""",
2817
)
2818

2819
add_docstr_all(
2820
    "item",
2821
    r"""
2822
item() -> number
2823

2824
Returns the value of this tensor as a standard Python number. This only works
2825
for tensors with one element. For other cases, see :meth:`~Tensor.tolist`.
2826

2827
This operation is not differentiable.
2828

2829
Example::
2830

2831
    >>> x = torch.tensor([1.0])
2832
    >>> x.item()
2833
    1.0
2834

2835
""",
2836
)
2837

2838
add_docstr_all(
2839
    "kron",
2840
    r"""
2841
kron(other) -> Tensor
2842

2843
See :func:`torch.kron`
2844
""",
2845
)
2846

2847
add_docstr_all(
2848
    "kthvalue",
2849
    r"""
2850
kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor)
2851

2852
See :func:`torch.kthvalue`
2853
""",
2854
)
2855

2856
add_docstr_all(
2857
    "ldexp",
2858
    r"""
2859
ldexp(other) -> Tensor
2860

2861
See :func:`torch.ldexp`
2862
""",
2863
)
2864

2865
add_docstr_all(
2866
    "ldexp_",
2867
    r"""
2868
ldexp_(other) -> Tensor
2869

2870
In-place version of :meth:`~Tensor.ldexp`
2871
""",
2872
)
2873

2874
add_docstr_all(
2875
    "lcm",
2876
    r"""
2877
lcm(other) -> Tensor
2878

2879
See :func:`torch.lcm`
2880
""",
2881
)
2882

2883
add_docstr_all(
2884
    "lcm_",
2885
    r"""
2886
lcm_(other) -> Tensor
2887

2888
In-place version of :meth:`~Tensor.lcm`
2889
""",
2890
)
2891

2892
add_docstr_all(
2893
    "le",
2894
    r"""
2895
le(other) -> Tensor
2896

2897
See :func:`torch.le`.
2898
""",
2899
)
2900

2901
add_docstr_all(
2902
    "le_",
2903
    r"""
2904
le_(other) -> Tensor
2905

2906
In-place version of :meth:`~Tensor.le`.
2907
""",
2908
)
2909

2910
add_docstr_all(
2911
    "less_equal",
2912
    r"""
2913
less_equal(other) -> Tensor
2914

2915
See :func:`torch.less_equal`.
2916
""",
2917
)
2918

2919
add_docstr_all(
2920
    "less_equal_",
2921
    r"""
2922
less_equal_(other) -> Tensor
2923

2924
In-place version of :meth:`~Tensor.less_equal`.
2925
""",
2926
)
2927

2928
add_docstr_all(
2929
    "lerp",
2930
    r"""
2931
lerp(end, weight) -> Tensor
2932

2933
See :func:`torch.lerp`
2934
""",
2935
)
2936

2937
add_docstr_all(
2938
    "lerp_",
2939
    r"""
2940
lerp_(end, weight) -> Tensor
2941

2942
In-place version of :meth:`~Tensor.lerp`
2943
""",
2944
)
2945

2946
add_docstr_all(
2947
    "lgamma",
2948
    r"""
2949
lgamma() -> Tensor
2950

2951
See :func:`torch.lgamma`
2952
""",
2953
)
2954

2955
add_docstr_all(
2956
    "lgamma_",
2957
    r"""
2958
lgamma_() -> Tensor
2959

2960
In-place version of :meth:`~Tensor.lgamma`
2961
""",
2962
)
2963

2964
add_docstr_all(
2965
    "log",
2966
    r"""
2967
log() -> Tensor
2968

2969
See :func:`torch.log`
2970
""",
2971
)
2972

2973
add_docstr_all(
2974
    "log_",
2975
    r"""
2976
log_() -> Tensor
2977

2978
In-place version of :meth:`~Tensor.log`
2979
""",
2980
)
2981

2982
add_docstr_all(
2983
    "log10",
2984
    r"""
2985
log10() -> Tensor
2986

2987
See :func:`torch.log10`
2988
""",
2989
)
2990

2991
add_docstr_all(
2992
    "log10_",
2993
    r"""
2994
log10_() -> Tensor
2995

2996
In-place version of :meth:`~Tensor.log10`
2997
""",
2998
)
2999

3000
add_docstr_all(
3001
    "log1p",
3002
    r"""
3003
log1p() -> Tensor
3004

3005
See :func:`torch.log1p`
3006
""",
3007
)
3008

3009
add_docstr_all(
3010
    "log1p_",
3011
    r"""
3012
log1p_() -> Tensor
3013

3014
In-place version of :meth:`~Tensor.log1p`
3015
""",
3016
)
3017

3018
add_docstr_all(
3019
    "log2",
3020
    r"""
3021
log2() -> Tensor
3022

3023
See :func:`torch.log2`
3024
""",
3025
)
3026

3027
add_docstr_all(
3028
    "log2_",
3029
    r"""
3030
log2_() -> Tensor
3031

3032
In-place version of :meth:`~Tensor.log2`
3033
""",
3034
)
3035

3036
add_docstr_all(
3037
    "logaddexp",
3038
    r"""
3039
logaddexp(other) -> Tensor
3040

3041
See :func:`torch.logaddexp`
3042
""",
3043
)
3044

3045
add_docstr_all(
3046
    "logaddexp2",
3047
    r"""
3048
logaddexp2(other) -> Tensor
3049

3050
See :func:`torch.logaddexp2`
3051
""",
3052
)
3053

3054
add_docstr_all(
3055
    "log_normal_",
3056
    r"""
3057
log_normal_(mean=1, std=2, *, generator=None)
3058

3059
Fills :attr:`self` tensor with numbers samples from the log-normal distribution
3060
parameterized by the given mean :math:`\mu` and standard deviation
3061
:math:`\sigma`. Note that :attr:`mean` and :attr:`std` are the mean and
3062
standard deviation of the underlying normal distribution, and not of the
3063
returned distribution:
3064

3065
.. math::
3066

3067
    f(x) = \dfrac{1}{x \sigma \sqrt{2\pi}}\ e^{-\frac{(\ln x - \mu)^2}{2\sigma^2}}
3068
""",
3069
)
3070

3071
add_docstr_all(
3072
    "logsumexp",
3073
    r"""
3074
logsumexp(dim, keepdim=False) -> Tensor
3075

3076
See :func:`torch.logsumexp`
3077
""",
3078
)
3079

3080
add_docstr_all(
3081
    "lt",
3082
    r"""
3083
lt(other) -> Tensor
3084

3085
See :func:`torch.lt`.
3086
""",
3087
)
3088

3089
add_docstr_all(
3090
    "lt_",
3091
    r"""
3092
lt_(other) -> Tensor
3093

3094
In-place version of :meth:`~Tensor.lt`.
3095
""",
3096
)
3097

3098
add_docstr_all(
3099
    "less",
3100
    r"""
3101
lt(other) -> Tensor
3102

3103
See :func:`torch.less`.
3104
""",
3105
)
3106

3107
add_docstr_all(
3108
    "less_",
3109
    r"""
3110
less_(other) -> Tensor
3111

3112
In-place version of :meth:`~Tensor.less`.
3113
""",
3114
)
3115

3116
add_docstr_all(
3117
    "lu_solve",
3118
    r"""
3119
lu_solve(LU_data, LU_pivots) -> Tensor
3120

3121
See :func:`torch.lu_solve`
3122
""",
3123
)
3124

3125
add_docstr_all(
3126
    "map_",
3127
    r"""
3128
map_(tensor, callable)
3129

3130
Applies :attr:`callable` for each element in :attr:`self` tensor and the given
3131
:attr:`tensor` and stores the results in :attr:`self` tensor. :attr:`self` tensor and
3132
the given :attr:`tensor` must be :ref:`broadcastable <broadcasting-semantics>`.
3133

3134
The :attr:`callable` should have the signature::
3135

3136
    def callable(a, b) -> number
3137
""",
3138
)
3139

3140
add_docstr_all(
3141
    "masked_scatter_",
3142
    r"""
3143
masked_scatter_(mask, source)
3144

3145
Copies elements from :attr:`source` into :attr:`self` tensor at positions where
3146
the :attr:`mask` is True. Elements from :attr:`source` are copied into :attr:`self`
3147
starting at position 0 of :attr:`source` and continuing in order one-by-one for each
3148
occurrence of :attr:`mask` being True.
3149
The shape of :attr:`mask` must be :ref:`broadcastable <broadcasting-semantics>`
3150
with the shape of the underlying tensor. The :attr:`source` should have at least
3151
as many elements as the number of ones in :attr:`mask`.
3152

3153
Args:
3154
    mask (BoolTensor): the boolean mask
3155
    source (Tensor): the tensor to copy from
3156

3157
.. note::
3158

3159
    The :attr:`mask` operates on the :attr:`self` tensor, not on the given
3160
    :attr:`source` tensor.
3161

3162
Example:
3163

3164
    >>> self = torch.tensor([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
3165
    >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=torch.bool)
3166
    >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
3167
    >>> self.masked_scatter_(mask, source)
3168
    tensor([[0, 0, 0, 0, 1],
3169
            [2, 3, 0, 4, 5]])
3170

3171
""",
3172
)
3173

3174
add_docstr_all(
3175
    "masked_fill_",
3176
    r"""
3177
masked_fill_(mask, value)
3178

3179
Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is
3180
True. The shape of :attr:`mask` must be
3181
:ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
3182
tensor.
3183

3184
Args:
3185
    mask (BoolTensor): the boolean mask
3186
    value (float): the value to fill in with
3187
""",
3188
)
3189

3190
add_docstr_all(
3191
    "masked_select",
3192
    r"""
3193
masked_select(mask) -> Tensor
3194

3195
See :func:`torch.masked_select`
3196
""",
3197
)
3198

3199
add_docstr_all(
3200
    "matrix_power",
3201
    r"""
3202
matrix_power(n) -> Tensor
3203

3204
.. note:: :meth:`~Tensor.matrix_power` is deprecated, use :func:`torch.linalg.matrix_power` instead.
3205

3206
Alias for :func:`torch.linalg.matrix_power`
3207
""",
3208
)
3209

3210
add_docstr_all(
3211
    "matrix_exp",
3212
    r"""
3213
matrix_exp() -> Tensor
3214

3215
See :func:`torch.matrix_exp`
3216
""",
3217
)
3218

3219
add_docstr_all(
3220
    "max",
3221
    r"""
3222
max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
3223

3224
See :func:`torch.max`
3225
""",
3226
)
3227

3228
add_docstr_all(
3229
    "amax",
3230
    r"""
3231
amax(dim=None, keepdim=False) -> Tensor
3232

3233
See :func:`torch.amax`
3234
""",
3235
)
3236

3237
add_docstr_all(
3238
    "maximum",
3239
    r"""
3240
maximum(other) -> Tensor
3241

3242
See :func:`torch.maximum`
3243
""",
3244
)
3245

3246
add_docstr_all(
3247
    "fmax",
3248
    r"""
3249
fmax(other) -> Tensor
3250

3251
See :func:`torch.fmax`
3252
""",
3253
)
3254

3255
add_docstr_all(
3256
    "argmax",
3257
    r"""
3258
argmax(dim=None, keepdim=False) -> LongTensor
3259

3260
See :func:`torch.argmax`
3261
""",
3262
)
3263

3264
add_docstr_all(
3265
    "argwhere",
3266
    r"""
3267
argwhere() -> Tensor
3268

3269
See :func:`torch.argwhere`
3270
""",
3271
)
3272

3273
add_docstr_all(
3274
    "mean",
3275
    r"""
3276
mean(dim=None, keepdim=False, *, dtype=None) -> Tensor
3277

3278
See :func:`torch.mean`
3279
""",
3280
)
3281

3282
add_docstr_all(
3283
    "nanmean",
3284
    r"""
3285
nanmean(dim=None, keepdim=False, *, dtype=None) -> Tensor
3286

3287
See :func:`torch.nanmean`
3288
""",
3289
)
3290

3291
add_docstr_all(
3292
    "median",
3293
    r"""
3294
median(dim=None, keepdim=False) -> (Tensor, LongTensor)
3295

3296
See :func:`torch.median`
3297
""",
3298
)
3299

3300
add_docstr_all(
3301
    "nanmedian",
3302
    r"""
3303
nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor)
3304

3305
See :func:`torch.nanmedian`
3306
""",
3307
)
3308

3309
add_docstr_all(
3310
    "min",
3311
    r"""
3312
min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
3313

3314
See :func:`torch.min`
3315
""",
3316
)
3317

3318
add_docstr_all(
3319
    "amin",
3320
    r"""
3321
amin(dim=None, keepdim=False) -> Tensor
3322

3323
See :func:`torch.amin`
3324
""",
3325
)
3326

3327
add_docstr_all(
3328
    "minimum",
3329
    r"""
3330
minimum(other) -> Tensor
3331

3332
See :func:`torch.minimum`
3333
""",
3334
)
3335

3336
add_docstr_all(
3337
    "aminmax",
3338
    r"""
3339
aminmax(*, dim=None, keepdim=False) -> (Tensor min, Tensor max)
3340

3341
See :func:`torch.aminmax`
3342
""",
3343
)
3344

3345
add_docstr_all(
3346
    "fmin",
3347
    r"""
3348
fmin(other) -> Tensor
3349

3350
See :func:`torch.fmin`
3351
""",
3352
)
3353

3354
add_docstr_all(
3355
    "argmin",
3356
    r"""
3357
argmin(dim=None, keepdim=False) -> LongTensor
3358

3359
See :func:`torch.argmin`
3360
""",
3361
)
3362

3363
add_docstr_all(
3364
    "mm",
3365
    r"""
3366
mm(mat2) -> Tensor
3367

3368
See :func:`torch.mm`
3369
""",
3370
)
3371

3372
add_docstr_all(
3373
    "mode",
3374
    r"""
3375
mode(dim=None, keepdim=False) -> (Tensor, LongTensor)
3376

3377
See :func:`torch.mode`
3378
""",
3379
)
3380

3381
add_docstr_all(
3382
    "movedim",
3383
    r"""
3384
movedim(source, destination) -> Tensor
3385

3386
See :func:`torch.movedim`
3387
""",
3388
)
3389

3390
add_docstr_all(
3391
    "moveaxis",
3392
    r"""
3393
moveaxis(source, destination) -> Tensor
3394

3395
See :func:`torch.moveaxis`
3396
""",
3397
)
3398

3399
add_docstr_all(
3400
    "mul",
3401
    r"""
3402
mul(value) -> Tensor
3403

3404
See :func:`torch.mul`.
3405
""",
3406
)
3407

3408
add_docstr_all(
3409
    "mul_",
3410
    r"""
3411
mul_(value) -> Tensor
3412

3413
In-place version of :meth:`~Tensor.mul`.
3414
""",
3415
)
3416

3417
add_docstr_all(
3418
    "multiply",
3419
    r"""
3420
multiply(value) -> Tensor
3421

3422
See :func:`torch.multiply`.
3423
""",
3424
)
3425

3426
add_docstr_all(
3427
    "multiply_",
3428
    r"""
3429
multiply_(value) -> Tensor
3430

3431
In-place version of :meth:`~Tensor.multiply`.
3432
""",
3433
)
3434

3435
add_docstr_all(
3436
    "multinomial",
3437
    r"""
3438
multinomial(num_samples, replacement=False, *, generator=None) -> Tensor
3439

3440
See :func:`torch.multinomial`
3441
""",
3442
)
3443

3444
add_docstr_all(
3445
    "mv",
3446
    r"""
3447
mv(vec) -> Tensor
3448

3449
See :func:`torch.mv`
3450
""",
3451
)
3452

3453
add_docstr_all(
3454
    "mvlgamma",
3455
    r"""
3456
mvlgamma(p) -> Tensor
3457

3458
See :func:`torch.mvlgamma`
3459
""",
3460
)
3461

3462
add_docstr_all(
3463
    "mvlgamma_",
3464
    r"""
3465
mvlgamma_(p) -> Tensor
3466

3467
In-place version of :meth:`~Tensor.mvlgamma`
3468
""",
3469
)
3470

3471
add_docstr_all(
3472
    "narrow",
3473
    r"""
3474
narrow(dimension, start, length) -> Tensor
3475

3476
See :func:`torch.narrow`.
3477
""",
3478
)
3479

3480
add_docstr_all(
3481
    "narrow_copy",
3482
    r"""
3483
narrow_copy(dimension, start, length) -> Tensor
3484

3485
See :func:`torch.narrow_copy`.
3486
""",
3487
)
3488

3489
add_docstr_all(
3490
    "ndimension",
3491
    r"""
3492
ndimension() -> int
3493

3494
Alias for :meth:`~Tensor.dim()`
3495
""",
3496
)
3497

3498
add_docstr_all(
3499
    "nan_to_num",
3500
    r"""
3501
nan_to_num(nan=0.0, posinf=None, neginf=None) -> Tensor
3502

3503
See :func:`torch.nan_to_num`.
3504
""",
3505
)
3506

3507
add_docstr_all(
3508
    "nan_to_num_",
3509
    r"""
3510
nan_to_num_(nan=0.0, posinf=None, neginf=None) -> Tensor
3511

3512
In-place version of :meth:`~Tensor.nan_to_num`.
3513
""",
3514
)
3515

3516
add_docstr_all(
3517
    "ne",
3518
    r"""
3519
ne(other) -> Tensor
3520

3521
See :func:`torch.ne`.
3522
""",
3523
)
3524

3525
add_docstr_all(
3526
    "ne_",
3527
    r"""
3528
ne_(other) -> Tensor
3529

3530
In-place version of :meth:`~Tensor.ne`.
3531
""",
3532
)
3533

3534
add_docstr_all(
3535
    "not_equal",
3536
    r"""
3537
not_equal(other) -> Tensor
3538

3539
See :func:`torch.not_equal`.
3540
""",
3541
)
3542

3543
add_docstr_all(
3544
    "not_equal_",
3545
    r"""
3546
not_equal_(other) -> Tensor
3547

3548
In-place version of :meth:`~Tensor.not_equal`.
3549
""",
3550
)
3551

3552
add_docstr_all(
3553
    "neg",
3554
    r"""
3555
neg() -> Tensor
3556

3557
See :func:`torch.neg`
3558
""",
3559
)
3560

3561
add_docstr_all(
3562
    "negative",
3563
    r"""
3564
negative() -> Tensor
3565

3566
See :func:`torch.negative`
3567
""",
3568
)
3569

3570
add_docstr_all(
3571
    "neg_",
3572
    r"""
3573
neg_() -> Tensor
3574

3575
In-place version of :meth:`~Tensor.neg`
3576
""",
3577
)
3578

3579
add_docstr_all(
3580
    "negative_",
3581
    r"""
3582
negative_() -> Tensor
3583

3584
In-place version of :meth:`~Tensor.negative`
3585
""",
3586
)
3587

3588
add_docstr_all(
3589
    "nelement",
3590
    r"""
3591
nelement() -> int
3592

3593
Alias for :meth:`~Tensor.numel`
3594
""",
3595
)
3596

3597
add_docstr_all(
3598
    "nextafter",
3599
    r"""
3600
nextafter(other) -> Tensor
3601
See :func:`torch.nextafter`
3602
""",
3603
)
3604

3605
add_docstr_all(
3606
    "nextafter_",
3607
    r"""
3608
nextafter_(other) -> Tensor
3609
In-place version of :meth:`~Tensor.nextafter`
3610
""",
3611
)
3612

3613
add_docstr_all(
3614
    "nonzero",
3615
    r"""
3616
nonzero() -> LongTensor
3617

3618
See :func:`torch.nonzero`
3619
""",
3620
)
3621

3622
add_docstr_all(
3623
    "nonzero_static",
3624
    r"""
3625
nonzero_static(input, *, size, fill_value=-1) -> Tensor
3626

3627
Returns a 2-D tensor where each row is the index for a non-zero value.
3628
The returned Tensor has the same `torch.dtype` as `torch.nonzero()`.
3629

3630
Args:
3631
    input (Tensor): the input tensor to count non-zero elements.
3632

3633
Keyword args:
3634
    size (int): the size of non-zero elements expected to be included in the out
3635
        tensor. Pad the out tensor with `fill_value` if the `size` is larger
3636
        than total number of non-zero elements, truncate out tensor if `size`
3637
        is smaller. The size must be a non-negative integer.
3638
    fill_value (int): the value to fill the output tensor with when `size` is larger
3639
        than the total number of non-zero elements. Default is `-1` to represent
3640
        invalid index.
3641

3642
Example:
3643

3644
    # Example 1: Padding
3645
    >>> input_tensor = torch.tensor([[1, 0], [3, 2]])
3646
    >>> static_size = 4
3647
    >>> t = torch.nonzero_static(input_tensor, size = static_size)
3648
    tensor([[  0,   0],
3649
            [  1,   0],
3650
            [  1,   1],
3651
            [  -1, -1]], dtype=torch.int64)
3652

3653
    # Example 2: Truncating
3654
    >>> input_tensor = torch.tensor([[1, 0], [3, 2]])
3655
    >>> static_size = 2
3656
    >>> t = torch.nonzero_static(input_tensor, size = static_size)
3657
    tensor([[  0,   0],
3658
            [  1,   0]], dtype=torch.int64)
3659

3660
    # Example 3: 0 size
3661
    >>> input_tensor = torch.tensor([10])
3662
    >>> static_size = 0
3663
    >>> t = torch.nonzero_static(input_tensor, size = static_size)
3664
    tensor([], size=(0, 1), dtype=torch.int64)
3665

3666
    # Example 4: 0 rank input
3667
    >>> input_tensor = torch.tensor(10)
3668
    >>> static_size = 2
3669
    >>> t = torch.nonzero_static(input_tensor, size = static_size)
3670
    tensor([], size=(2, 0), dtype=torch.int64)
3671
""",
3672
)
3673

3674
add_docstr_all(
3675
    "norm",
3676
    r"""
3677
norm(p=2, dim=None, keepdim=False) -> Tensor
3678

3679
See :func:`torch.norm`
3680
""",
3681
)
3682

3683
add_docstr_all(
3684
    "normal_",
3685
    r"""
3686
normal_(mean=0, std=1, *, generator=None) -> Tensor
3687

3688
Fills :attr:`self` tensor with elements samples from the normal distribution
3689
parameterized by :attr:`mean` and :attr:`std`.
3690
""",
3691
)
3692

3693
add_docstr_all(
3694
    "numel",
3695
    r"""
3696
numel() -> int
3697

3698
See :func:`torch.numel`
3699
""",
3700
)
3701

3702
add_docstr_all(
3703
    "numpy",
3704
    r"""
3705
numpy(*, force=False) -> numpy.ndarray
3706

3707
Returns the tensor as a NumPy :class:`ndarray`.
3708

3709
If :attr:`force` is ``False`` (the default), the conversion
3710
is performed only if the tensor is on the CPU, does not require grad,
3711
does not have its conjugate bit set, and is a dtype and layout that
3712
NumPy supports. The returned ndarray and the tensor will share their
3713
storage, so changes to the tensor will be reflected in the ndarray
3714
and vice versa.
3715

3716
If :attr:`force` is ``True`` this is equivalent to
3717
calling ``t.detach().cpu().resolve_conj().resolve_neg().numpy()``.
3718
If the tensor isn't on the CPU or the conjugate or negative bit is set,
3719
the tensor won't share its storage with the returned ndarray.
3720
Setting :attr:`force` to ``True`` can be a useful shorthand.
3721

3722
Args:
3723
    force (bool): if ``True``, the ndarray may be a copy of the tensor
3724
               instead of always sharing memory, defaults to ``False``.
3725
""",
3726
)
3727

3728
add_docstr_all(
3729
    "orgqr",
3730
    r"""
3731
orgqr(input2) -> Tensor
3732

3733
See :func:`torch.orgqr`
3734
""",
3735
)
3736

3737
add_docstr_all(
3738
    "ormqr",
3739
    r"""
3740
ormqr(input2, input3, left=True, transpose=False) -> Tensor
3741

3742
See :func:`torch.ormqr`
3743
""",
3744
)
3745

3746
add_docstr_all(
3747
    "permute",
3748
    r"""
3749
permute(*dims) -> Tensor
3750

3751
See :func:`torch.permute`
3752
""",
3753
)
3754

3755
add_docstr_all(
3756
    "polygamma",
3757
    r"""
3758
polygamma(n) -> Tensor
3759

3760
See :func:`torch.polygamma`
3761
""",
3762
)
3763

3764
add_docstr_all(
3765
    "polygamma_",
3766
    r"""
3767
polygamma_(n) -> Tensor
3768

3769
In-place version of :meth:`~Tensor.polygamma`
3770
""",
3771
)
3772

3773
add_docstr_all(
3774
    "positive",
3775
    r"""
3776
positive() -> Tensor
3777

3778
See :func:`torch.positive`
3779
""",
3780
)
3781

3782
add_docstr_all(
3783
    "pow",
3784
    r"""
3785
pow(exponent) -> Tensor
3786

3787
See :func:`torch.pow`
3788
""",
3789
)
3790

3791
add_docstr_all(
3792
    "pow_",
3793
    r"""
3794
pow_(exponent) -> Tensor
3795

3796
In-place version of :meth:`~Tensor.pow`
3797
""",
3798
)
3799

3800
add_docstr_all(
3801
    "float_power",
3802
    r"""
3803
float_power(exponent) -> Tensor
3804

3805
See :func:`torch.float_power`
3806
""",
3807
)
3808

3809
add_docstr_all(
3810
    "float_power_",
3811
    r"""
3812
float_power_(exponent) -> Tensor
3813

3814
In-place version of :meth:`~Tensor.float_power`
3815
""",
3816
)
3817

3818
add_docstr_all(
3819
    "prod",
3820
    r"""
3821
prod(dim=None, keepdim=False, dtype=None) -> Tensor
3822

3823
See :func:`torch.prod`
3824
""",
3825
)
3826

3827
add_docstr_all(
3828
    "put_",
3829
    r"""
3830
put_(index, source, accumulate=False) -> Tensor
3831

3832
Copies the elements from :attr:`source` into the positions specified by
3833
:attr:`index`. For the purpose of indexing, the :attr:`self` tensor is treated as if
3834
it were a 1-D tensor.
3835

3836
:attr:`index` and :attr:`source` need to have the same number of elements, but not necessarily
3837
the same shape.
3838

3839
If :attr:`accumulate` is ``True``, the elements in :attr:`source` are added to
3840
:attr:`self`. If accumulate is ``False``, the behavior is undefined if :attr:`index`
3841
contain duplicate elements.
3842

3843
Args:
3844
    index (LongTensor): the indices into self
3845
    source (Tensor): the tensor containing values to copy from
3846
    accumulate (bool): whether to accumulate into self
3847

3848
Example::
3849

3850
    >>> src = torch.tensor([[4, 3, 5],
3851
    ...                     [6, 7, 8]])
3852
    >>> src.put_(torch.tensor([1, 3]), torch.tensor([9, 10]))
3853
    tensor([[  4,   9,   5],
3854
            [ 10,   7,   8]])
3855
""",
3856
)
3857

3858
add_docstr_all(
3859
    "put",
3860
    r"""
3861
put(input, index, source, accumulate=False) -> Tensor
3862

3863
Out-of-place version of :meth:`torch.Tensor.put_`.
3864
`input` corresponds to `self` in :meth:`torch.Tensor.put_`.
3865
""",
3866
)
3867

3868
add_docstr_all(
3869
    "qr",
3870
    r"""
3871
qr(some=True) -> (Tensor, Tensor)
3872

3873
See :func:`torch.qr`
3874
""",
3875
)
3876

3877
add_docstr_all(
3878
    "qscheme",
3879
    r"""
3880
qscheme() -> torch.qscheme
3881

3882
Returns the quantization scheme of a given QTensor.
3883
""",
3884
)
3885

3886
add_docstr_all(
3887
    "quantile",
3888
    r"""
3889
quantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
3890

3891
See :func:`torch.quantile`
3892
""",
3893
)
3894

3895
add_docstr_all(
3896
    "nanquantile",
3897
    r"""
3898
nanquantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
3899

3900
See :func:`torch.nanquantile`
3901
""",
3902
)
3903

3904
add_docstr_all(
3905
    "q_scale",
3906
    r"""
3907
q_scale() -> float
3908

3909
Given a Tensor quantized by linear(affine) quantization,
3910
returns the scale of the underlying quantizer().
3911
""",
3912
)
3913

3914
add_docstr_all(
3915
    "q_zero_point",
3916
    r"""
3917
q_zero_point() -> int
3918

3919
Given a Tensor quantized by linear(affine) quantization,
3920
returns the zero_point of the underlying quantizer().
3921
""",
3922
)
3923

3924
add_docstr_all(
3925
    "q_per_channel_scales",
3926
    r"""
3927
q_per_channel_scales() -> Tensor
3928

3929
Given a Tensor quantized by linear (affine) per-channel quantization,
3930
returns a Tensor of scales of the underlying quantizer. It has the number of
3931
elements that matches the corresponding dimensions (from q_per_channel_axis) of
3932
the tensor.
3933
""",
3934
)
3935

3936
add_docstr_all(
3937
    "q_per_channel_zero_points",
3938
    r"""
3939
q_per_channel_zero_points() -> Tensor
3940

3941
Given a Tensor quantized by linear (affine) per-channel quantization,
3942
returns a tensor of zero_points of the underlying quantizer. It has the number of
3943
elements that matches the corresponding dimensions (from q_per_channel_axis) of
3944
the tensor.
3945
""",
3946
)
3947

3948
add_docstr_all(
3949
    "q_per_channel_axis",
3950
    r"""
3951
q_per_channel_axis() -> int
3952

3953
Given a Tensor quantized by linear (affine) per-channel quantization,
3954
returns the index of dimension on which per-channel quantization is applied.
3955
""",
3956
)
3957

3958
add_docstr_all(
3959
    "random_",
3960
    r"""
3961
random_(from=0, to=None, *, generator=None) -> Tensor
3962

3963
Fills :attr:`self` tensor with numbers sampled from the discrete uniform
3964
distribution over ``[from, to - 1]``. If not specified, the values are usually
3965
only bounded by :attr:`self` tensor's data type. However, for floating point
3966
types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every
3967
value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()`
3968
will be uniform in ``[0, 2^53]``.
3969
""",
3970
)
3971

3972
add_docstr_all(
3973
    "rad2deg",
3974
    r"""
3975
rad2deg() -> Tensor
3976

3977
See :func:`torch.rad2deg`
3978
""",
3979
)
3980

3981
add_docstr_all(
3982
    "rad2deg_",
3983
    r"""
3984
rad2deg_() -> Tensor
3985

3986
In-place version of :meth:`~Tensor.rad2deg`
3987
""",
3988
)
3989

3990
add_docstr_all(
3991
    "deg2rad",
3992
    r"""
3993
deg2rad() -> Tensor
3994

3995
See :func:`torch.deg2rad`
3996
""",
3997
)
3998

3999
add_docstr_all(
4000
    "deg2rad_",
4001
    r"""
4002
deg2rad_() -> Tensor
4003

4004
In-place version of :meth:`~Tensor.deg2rad`
4005
""",
4006
)
4007

4008
add_docstr_all(
4009
    "ravel",
4010
    r"""
4011
ravel() -> Tensor
4012

4013
see :func:`torch.ravel`
4014
""",
4015
)
4016

4017
add_docstr_all(
4018
    "reciprocal",
4019
    r"""
4020
reciprocal() -> Tensor
4021

4022
See :func:`torch.reciprocal`
4023
""",
4024
)
4025

4026
add_docstr_all(
4027
    "reciprocal_",
4028
    r"""
4029
reciprocal_() -> Tensor
4030

4031
In-place version of :meth:`~Tensor.reciprocal`
4032
""",
4033
)
4034

4035
add_docstr_all(
4036
    "record_stream",
4037
    r"""
4038
record_stream(stream)
4039

4040
Marks the tensor as having been used by this stream.  When the tensor
4041
is deallocated, ensure the tensor memory is not reused for another tensor
4042
until all work queued on :attr:`stream` at the time of deallocation is
4043
complete.
4044

4045
.. note::
4046

4047
    The caching allocator is aware of only the stream where a tensor was
4048
    allocated. Due to the awareness, it already correctly manages the life
4049
    cycle of tensors on only one stream. But if a tensor is used on a stream
4050
    different from the stream of origin, the allocator might reuse the memory
4051
    unexpectedly. Calling this method lets the allocator know which streams
4052
    have used the tensor.
4053

4054
.. warning::
4055

4056
    This method is most suitable for use cases where you are providing a
4057
    function that created a tensor on a side stream, and want users to be able
4058
    to make use of the tensor without having to think carefully about stream
4059
    safety when making use of them.  These safety guarantees come at some
4060
    performance and predictability cost (analogous to the tradeoff between GC
4061
    and manual memory management), so if you are in a situation where
4062
    you manage the full lifetime of your tensors, you may consider instead
4063
    manually managing CUDA events so that calling this method is not necessary.
4064
    In particular, when you call this method, on later allocations the
4065
    allocator will poll the recorded stream to see if all operations have
4066
    completed yet; you can potentially race with side stream computation and
4067
    non-deterministically reuse or fail to reuse memory for an allocation.
4068

4069
    You can safely use tensors allocated on side streams without
4070
    :meth:`~Tensor.record_stream`; you must manually ensure that
4071
    any non-creation stream uses of a tensor are synced back to the creation
4072
    stream before you deallocate the tensor.  As the CUDA caching allocator
4073
    guarantees that the memory will only be reused with the same creation stream,
4074
    this is sufficient to ensure that writes to future reallocations of the
4075
    memory will be delayed until non-creation stream uses are done.
4076
    (Counterintuitively, you may observe that on the CPU side we have already
4077
    reallocated the tensor, even though CUDA kernels on the old tensor are
4078
    still in progress.  This is fine, because CUDA operations on the new
4079
    tensor will appropriately wait for the old operations to complete, as they
4080
    are all on the same stream.)
4081

4082
    Concretely, this looks like this::
4083

4084
        with torch.cuda.stream(s0):
4085
            x = torch.zeros(N)
4086

4087
        s1.wait_stream(s0)
4088
        with torch.cuda.stream(s1):
4089
            y = some_comm_op(x)
4090

4091
        ... some compute on s0 ...
4092

4093
        # synchronize creation stream s0 to side stream s1
4094
        # before deallocating x
4095
        s0.wait_stream(s1)
4096
        del x
4097

4098
    Note that some discretion is required when deciding when to perform
4099
    ``s0.wait_stream(s1)``.  In particular, if we were to wait immediately
4100
    after ``some_comm_op``, there wouldn't be any point in having the side
4101
    stream; it would be equivalent to have run ``some_comm_op`` on ``s0``.
4102
    Instead, the synchronization must be placed at some appropriate, later
4103
    point in time where you expect the side stream ``s1`` to have finished
4104
    work.  This location is typically identified via profiling, e.g., using
4105
    Chrome traces produced
4106
    :meth:`torch.autograd.profiler.profile.export_chrome_trace`.  If you
4107
    place the wait too early, work on s0 will block until ``s1`` has finished,
4108
    preventing further overlapping of communication and computation.  If you
4109
    place the wait too late, you will use more memory than is strictly
4110
    necessary (as you are keeping ``x`` live for longer.)  For a concrete
4111
    example of how this guidance can be applied in practice, see this post:
4112
    `FSDP and CUDACachingAllocator
4113
    <https://dev-discuss.pytorch.org/t/fsdp-cudacachingallocator-an-outsider-newb-perspective/1486>`_.
4114
""",
4115
)
4116

4117
add_docstr_all(
4118
    "remainder",
4119
    r"""
4120
remainder(divisor) -> Tensor
4121

4122
See :func:`torch.remainder`
4123
""",
4124
)
4125

4126
add_docstr_all(
4127
    "remainder_",
4128
    r"""
4129
remainder_(divisor) -> Tensor
4130

4131
In-place version of :meth:`~Tensor.remainder`
4132
""",
4133
)
4134

4135
add_docstr_all(
4136
    "renorm",
4137
    r"""
4138
renorm(p, dim, maxnorm) -> Tensor
4139

4140
See :func:`torch.renorm`
4141
""",
4142
)
4143

4144
add_docstr_all(
4145
    "renorm_",
4146
    r"""
4147
renorm_(p, dim, maxnorm) -> Tensor
4148

4149
In-place version of :meth:`~Tensor.renorm`
4150
""",
4151
)
4152

4153
add_docstr_all(
4154
    "repeat",
4155
    r"""
4156
repeat(*repeats) -> Tensor
4157

4158
Repeats this tensor along the specified dimensions.
4159

4160
Unlike :meth:`~Tensor.expand`, this function copies the tensor's data.
4161

4162
.. warning::
4163

4164
    :meth:`~Tensor.repeat` behaves differently from
4165
    `numpy.repeat <https://docs.scipy.org/doc/numpy/reference/generated/numpy.repeat.html>`_,
4166
    but is more similar to
4167
    `numpy.tile <https://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html>`_.
4168
    For the operator similar to `numpy.repeat`, see :func:`torch.repeat_interleave`.
4169

4170
Args:
4171
    repeat (torch.Size, int..., tuple of int or list of int): The number of times to repeat this tensor along each dimension
4172

4173
Example::
4174

4175
    >>> x = torch.tensor([1, 2, 3])
4176
    >>> x.repeat(4, 2)
4177
    tensor([[ 1,  2,  3,  1,  2,  3],
4178
            [ 1,  2,  3,  1,  2,  3],
4179
            [ 1,  2,  3,  1,  2,  3],
4180
            [ 1,  2,  3,  1,  2,  3]])
4181
    >>> x.repeat(4, 2, 1).size()
4182
    torch.Size([4, 2, 3])
4183
""",
4184
)
4185

4186
add_docstr_all(
4187
    "repeat_interleave",
4188
    r"""
4189
repeat_interleave(repeats, dim=None, *, output_size=None) -> Tensor
4190

4191
See :func:`torch.repeat_interleave`.
4192
""",
4193
)
4194

4195
add_docstr_all(
4196
    "requires_grad_",
4197
    r"""
4198
requires_grad_(requires_grad=True) -> Tensor
4199

4200
Change if autograd should record operations on this tensor: sets this tensor's
4201
:attr:`requires_grad` attribute in-place. Returns this tensor.
4202

4203
:func:`requires_grad_`'s main use case is to tell autograd to begin recording
4204
operations on a Tensor ``tensor``. If ``tensor`` has ``requires_grad=False``
4205
(because it was obtained through a DataLoader, or required preprocessing or
4206
initialization), ``tensor.requires_grad_()`` makes it so that autograd will
4207
begin to record operations on ``tensor``.
4208

4209
Args:
4210
    requires_grad (bool): If autograd should record operations on this tensor.
4211
        Default: ``True``.
4212

4213
Example::
4214

4215
    >>> # Let's say we want to preprocess some saved weights and use
4216
    >>> # the result as new weights.
4217
    >>> saved_weights = [0.1, 0.2, 0.3, 0.25]
4218
    >>> loaded_weights = torch.tensor(saved_weights)
4219
    >>> weights = preprocess(loaded_weights)  # some function
4220
    >>> weights
4221
    tensor([-0.5503,  0.4926, -2.1158, -0.8303])
4222

4223
    >>> # Now, start to record operations done to weights
4224
    >>> weights.requires_grad_()
4225
    >>> out = weights.pow(2).sum()
4226
    >>> out.backward()
4227
    >>> weights.grad
4228
    tensor([-1.1007,  0.9853, -4.2316, -1.6606])
4229

4230
""",
4231
)
4232

4233
add_docstr_all(
4234
    "reshape",
4235
    r"""
4236
reshape(*shape) -> Tensor
4237

4238
Returns a tensor with the same data and number of elements as :attr:`self`
4239
but with the specified shape. This method returns a view if :attr:`shape` is
4240
compatible with the current shape. See :meth:`torch.Tensor.view` on when it is
4241
possible to return a view.
4242

4243
See :func:`torch.reshape`
4244

4245
Args:
4246
    shape (tuple of ints or int...): the desired shape
4247

4248
""",
4249
)
4250

4251
add_docstr_all(
4252
    "reshape_as",
4253
    r"""
4254
reshape_as(other) -> Tensor
4255

4256
Returns this tensor as the same shape as :attr:`other`.
4257
``self.reshape_as(other)`` is equivalent to ``self.reshape(other.sizes())``.
4258
This method returns a view if ``other.sizes()`` is compatible with the current
4259
shape. See :meth:`torch.Tensor.view` on when it is possible to return a view.
4260

4261
Please see :meth:`reshape` for more information about ``reshape``.
4262

4263
Args:
4264
    other (:class:`torch.Tensor`): The result tensor has the same shape
4265
        as :attr:`other`.
4266
""",
4267
)
4268

4269
add_docstr_all(
4270
    "resize_",
4271
    r"""
4272
resize_(*sizes, memory_format=torch.contiguous_format) -> Tensor
4273

4274
Resizes :attr:`self` tensor to the specified size. If the number of elements is
4275
larger than the current storage size, then the underlying storage is resized
4276
to fit the new number of elements. If the number of elements is smaller, the
4277
underlying storage is not changed. Existing elements are preserved but any new
4278
memory is uninitialized.
4279

4280
.. warning::
4281

4282
    This is a low-level method. The storage is reinterpreted as C-contiguous,
4283
    ignoring the current strides (unless the target size equals the current
4284
    size, in which case the tensor is left unchanged). For most purposes, you
4285
    will instead want to use :meth:`~Tensor.view()`, which checks for
4286
    contiguity, or :meth:`~Tensor.reshape()`, which copies data if needed. To
4287
    change the size in-place with custom strides, see :meth:`~Tensor.set_()`.
4288

4289
.. note::
4290

4291
    If :func:`torch.use_deterministic_algorithms()` and
4292
    :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
4293
    ``True``, new elements are initialized to prevent nondeterministic behavior
4294
    from using the result as an input to an operation. Floating point and
4295
    complex values are set to NaN, and integer values are set to the maximum
4296
    value.
4297

4298
Args:
4299
    sizes (torch.Size or int...): the desired size
4300
    memory_format (:class:`torch.memory_format`, optional): the desired memory format of
4301
        Tensor. Default: ``torch.contiguous_format``. Note that memory format of
4302
        :attr:`self` is going to be unaffected if ``self.size()`` matches ``sizes``.
4303

4304
Example::
4305

4306
    >>> x = torch.tensor([[1, 2], [3, 4], [5, 6]])
4307
    >>> x.resize_(2, 2)
4308
    tensor([[ 1,  2],
4309
            [ 3,  4]])
4310
""",
4311
)
4312

4313
add_docstr_all(
4314
    "resize_as_",
4315
    r"""
4316
resize_as_(tensor, memory_format=torch.contiguous_format) -> Tensor
4317

4318
Resizes the :attr:`self` tensor to be the same size as the specified
4319
:attr:`tensor`. This is equivalent to ``self.resize_(tensor.size())``.
4320

4321
Args:
4322
    memory_format (:class:`torch.memory_format`, optional): the desired memory format of
4323
        Tensor. Default: ``torch.contiguous_format``. Note that memory format of
4324
        :attr:`self` is going to be unaffected if ``self.size()`` matches ``tensor.size()``.
4325

4326
""",
4327
)
4328

4329
add_docstr_all(
4330
    "rot90",
4331
    r"""
4332
rot90(k, dims) -> Tensor
4333

4334
See :func:`torch.rot90`
4335
""",
4336
)
4337

4338
add_docstr_all(
4339
    "round",
4340
    r"""
4341
round(decimals=0) -> Tensor
4342

4343
See :func:`torch.round`
4344
""",
4345
)
4346

4347
add_docstr_all(
4348
    "round_",
4349
    r"""
4350
round_(decimals=0) -> Tensor
4351

4352
In-place version of :meth:`~Tensor.round`
4353
""",
4354
)
4355

4356
add_docstr_all(
4357
    "rsqrt",
4358
    r"""
4359
rsqrt() -> Tensor
4360

4361
See :func:`torch.rsqrt`
4362
""",
4363
)
4364

4365
add_docstr_all(
4366
    "rsqrt_",
4367
    r"""
4368
rsqrt_() -> Tensor
4369

4370
In-place version of :meth:`~Tensor.rsqrt`
4371
""",
4372
)
4373

4374
add_docstr_all(
4375
    "scatter_",
4376
    r"""
4377
scatter_(dim, index, src, *, reduce=None) -> Tensor
4378

4379
Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
4380
specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
4381
index is specified by its index in :attr:`src` for ``dimension != dim`` and by
4382
the corresponding value in :attr:`index` for ``dimension = dim``.
4383

4384
For a 3-D tensor, :attr:`self` is updated as::
4385

4386
    self[index[i][j][k]][j][k] = src[i][j][k]  # if dim == 0
4387
    self[i][index[i][j][k]][k] = src[i][j][k]  # if dim == 1
4388
    self[i][j][index[i][j][k]] = src[i][j][k]  # if dim == 2
4389

4390
This is the reverse operation of the manner described in :meth:`~Tensor.gather`.
4391

4392
:attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have
4393
the same number of dimensions. It is also required that
4394
``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
4395
``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
4396
Note that ``index`` and ``src`` do not broadcast.
4397

4398
Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
4399
between ``0`` and ``self.size(dim) - 1`` inclusive.
4400

4401
.. warning::
4402

4403
    When indices are not unique, the behavior is non-deterministic (one of the
4404
    values from ``src`` will be picked arbitrarily) and the gradient will be
4405
    incorrect (it will be propagated to all locations in the source that
4406
    correspond to the same index)!
4407

4408
.. note::
4409

4410
    The backward pass is implemented only for ``src.shape == index.shape``.
4411

4412
Additionally accepts an optional :attr:`reduce` argument that allows
4413
specification of an optional reduction operation, which is applied to all
4414
values in the tensor :attr:`src` into :attr:`self` at the indices
4415
specified in the :attr:`index`. For each value in :attr:`src`, the reduction
4416
operation is applied to an index in :attr:`self` which is specified by
4417
its index in :attr:`src` for ``dimension != dim`` and by the corresponding
4418
value in :attr:`index` for ``dimension = dim``.
4419

4420
Given a 3-D tensor and reduction using the multiplication operation, :attr:`self`
4421
is updated as::
4422

4423
    self[index[i][j][k]][j][k] *= src[i][j][k]  # if dim == 0
4424
    self[i][index[i][j][k]][k] *= src[i][j][k]  # if dim == 1
4425
    self[i][j][index[i][j][k]] *= src[i][j][k]  # if dim == 2
4426

4427
Reducing with the addition operation is the same as using
4428
:meth:`~torch.Tensor.scatter_add_`.
4429

4430
.. warning::
4431
    The reduce argument with Tensor ``src`` is deprecated and will be removed in
4432
    a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_`
4433
    instead for more reduction options.
4434

4435
Args:
4436
    dim (int): the axis along which to index
4437
    index (LongTensor): the indices of elements to scatter, can be either empty
4438
        or of the same dimensionality as ``src``. When empty, the operation
4439
        returns ``self`` unchanged.
4440
    src (Tensor): the source element(s) to scatter.
4441

4442
Keyword args:
4443
    reduce (str, optional): reduction operation to apply, can be either
4444
        ``'add'`` or ``'multiply'``.
4445

4446
Example::
4447

4448
    >>> src = torch.arange(1, 11).reshape((2, 5))
4449
    >>> src
4450
    tensor([[ 1,  2,  3,  4,  5],
4451
            [ 6,  7,  8,  9, 10]])
4452
    >>> index = torch.tensor([[0, 1, 2, 0]])
4453
    >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src)
4454
    tensor([[1, 0, 0, 4, 0],
4455
            [0, 2, 0, 0, 0],
4456
            [0, 0, 3, 0, 0]])
4457
    >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]])
4458
    >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src)
4459
    tensor([[1, 2, 3, 0, 0],
4460
            [6, 7, 0, 0, 8],
4461
            [0, 0, 0, 0, 0]])
4462

4463
    >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
4464
    ...            1.23, reduce='multiply')
4465
    tensor([[2.0000, 2.0000, 2.4600, 2.0000],
4466
            [2.0000, 2.0000, 2.0000, 2.4600]])
4467
    >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
4468
    ...            1.23, reduce='add')
4469
    tensor([[2.0000, 2.0000, 3.2300, 2.0000],
4470
            [2.0000, 2.0000, 2.0000, 3.2300]])
4471

4472
.. function:: scatter_(dim, index, value, *, reduce=None) -> Tensor:
4473
   :noindex:
4474

4475
Writes the value from :attr:`value` into :attr:`self` at the indices
4476
specified in the :attr:`index` tensor.  This operation is equivalent to the previous version,
4477
with the :attr:`src` tensor filled entirely with :attr:`value`.
4478

4479
Args:
4480
    dim (int): the axis along which to index
4481
    index (LongTensor): the indices of elements to scatter, can be either empty
4482
        or of the same dimensionality as ``src``. When empty, the operation
4483
        returns ``self`` unchanged.
4484
    value (Scalar): the value to scatter.
4485

4486
Keyword args:
4487
    reduce (str, optional): reduction operation to apply, can be either
4488
        ``'add'`` or ``'multiply'``.
4489

4490
Example::
4491

4492
    >>> index = torch.tensor([[0, 1]])
4493
    >>> value = 2
4494
    >>> torch.zeros(3, 5).scatter_(0, index, value)
4495
    tensor([[2., 0., 0., 0., 0.],
4496
            [0., 2., 0., 0., 0.],
4497
            [0., 0., 0., 0., 0.]])
4498
""",
4499
)
4500

4501
add_docstr_all(
4502
    "scatter_add_",
4503
    r"""
4504
scatter_add_(dim, index, src) -> Tensor
4505

4506
Adds all values from the tensor :attr:`src` into :attr:`self` at the indices
4507
specified in the :attr:`index` tensor in a similar fashion as
4508
:meth:`~torch.Tensor.scatter_`. For each value in :attr:`src`, it is added to
4509
an index in :attr:`self` which is specified by its index in :attr:`src`
4510
for ``dimension != dim`` and by the corresponding value in :attr:`index` for
4511
``dimension = dim``.
4512

4513
For a 3-D tensor, :attr:`self` is updated as::
4514

4515
    self[index[i][j][k]][j][k] += src[i][j][k]  # if dim == 0
4516
    self[i][index[i][j][k]][k] += src[i][j][k]  # if dim == 1
4517
    self[i][j][index[i][j][k]] += src[i][j][k]  # if dim == 2
4518

4519
:attr:`self`, :attr:`index` and :attr:`src` should have same number of
4520
dimensions. It is also required that ``index.size(d) <= src.size(d)`` for all
4521
dimensions ``d``, and that ``index.size(d) <= self.size(d)`` for all dimensions
4522
``d != dim``. Note that ``index`` and ``src`` do not broadcast.
4523

4524
Note:
4525
    {forward_reproducibility_note}
4526

4527
.. note::
4528

4529
    The backward pass is implemented only for ``src.shape == index.shape``.
4530

4531
Args:
4532
    dim (int): the axis along which to index
4533
    index (LongTensor): the indices of elements to scatter and add, can be
4534
        either empty or of the same dimensionality as ``src``. When empty, the
4535
        operation returns ``self`` unchanged.
4536
    src (Tensor): the source elements to scatter and add
4537

4538
Example::
4539

4540
    >>> src = torch.ones((2, 5))
4541
    >>> index = torch.tensor([[0, 1, 2, 0, 0]])
4542
    >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
4543
    tensor([[1., 0., 0., 1., 1.],
4544
            [0., 1., 0., 0., 0.],
4545
            [0., 0., 1., 0., 0.]])
4546
    >>> index = torch.tensor([[0, 1, 2, 0, 0], [0, 1, 2, 2, 2]])
4547
    >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
4548
    tensor([[2., 0., 0., 1., 1.],
4549
            [0., 2., 0., 0., 0.],
4550
            [0., 0., 2., 1., 1.]])
4551

4552
""".format(**reproducibility_notes),
4553
)
4554

4555
add_docstr_all(
4556
    "scatter_reduce_",
4557
    r"""
4558
scatter_reduce_(dim, index, src, reduce, *, include_self=True) -> Tensor
4559

4560
Reduces all values from the :attr:`src` tensor to the indices specified in
4561
the :attr:`index` tensor in the :attr:`self` tensor using the applied reduction
4562
defined via the :attr:`reduce` argument (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`,
4563
:obj:`"amax"`, :obj:`"amin"`). For each value in :attr:`src`, it is reduced to an
4564
index in :attr:`self` which is specified by its index in :attr:`src` for
4565
``dimension != dim`` and by the corresponding value in :attr:`index` for
4566
``dimension = dim``. If :obj:`include_self="True"`, the values in the :attr:`self`
4567
tensor are included in the reduction.
4568

4569
:attr:`self`, :attr:`index` and :attr:`src` should all have
4570
the same number of dimensions. It is also required that
4571
``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
4572
``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
4573
Note that ``index`` and ``src`` do not broadcast.
4574

4575
For a 3-D tensor with :obj:`reduce="sum"` and :obj:`include_self=True` the
4576
output is given as::
4577

4578
    self[index[i][j][k]][j][k] += src[i][j][k]  # if dim == 0
4579
    self[i][index[i][j][k]][k] += src[i][j][k]  # if dim == 1
4580
    self[i][j][index[i][j][k]] += src[i][j][k]  # if dim == 2
4581

4582
Note:
4583
    {forward_reproducibility_note}
4584

4585
.. note::
4586

4587
    The backward pass is implemented only for ``src.shape == index.shape``.
4588

4589
.. warning::
4590

4591
    This function is in beta and may change in the near future.
4592

4593
Args:
4594
    dim (int): the axis along which to index
4595
    index (LongTensor): the indices of elements to scatter and reduce.
4596
    src (Tensor): the source elements to scatter and reduce
4597
    reduce (str): the reduction operation to apply for non-unique indices
4598
        (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
4599
    include_self (bool): whether elements from the :attr:`self` tensor are
4600
        included in the reduction
4601

4602
Example::
4603

4604
    >>> src = torch.tensor([1., 2., 3., 4., 5., 6.])
4605
    >>> index = torch.tensor([0, 1, 0, 1, 2, 1])
4606
    >>> input = torch.tensor([1., 2., 3., 4.])
4607
    >>> input.scatter_reduce(0, index, src, reduce="sum")
4608
    tensor([5., 14., 8., 4.])
4609
    >>> input.scatter_reduce(0, index, src, reduce="sum", include_self=False)
4610
    tensor([4., 12., 5., 4.])
4611
    >>> input2 = torch.tensor([5., 4., 3., 2.])
4612
    >>> input2.scatter_reduce(0, index, src, reduce="amax")
4613
    tensor([5., 6., 5., 2.])
4614
    >>> input2.scatter_reduce(0, index, src, reduce="amax", include_self=False)
4615
    tensor([3., 6., 5., 2.])
4616

4617

4618
""".format(**reproducibility_notes),
4619
)
4620

4621
add_docstr_all(
4622
    "select",
4623
    r"""
4624
select(dim, index) -> Tensor
4625

4626
See :func:`torch.select`
4627
""",
4628
)
4629

4630
add_docstr_all(
4631
    "select_scatter",
4632
    r"""
4633
select_scatter(src, dim, index) -> Tensor
4634

4635
See :func:`torch.select_scatter`
4636
""",
4637
)
4638

4639
add_docstr_all(
4640
    "slice_scatter",
4641
    r"""
4642
slice_scatter(src, dim=0, start=None, end=None, step=1) -> Tensor
4643

4644
See :func:`torch.slice_scatter`
4645
""",
4646
)
4647

4648
add_docstr_all(
4649
    "set_",
4650
    r"""
4651
set_(source=None, storage_offset=0, size=None, stride=None) -> Tensor
4652

4653
Sets the underlying storage, size, and strides. If :attr:`source` is a tensor,
4654
:attr:`self` tensor will share the same storage and have the same size and
4655
strides as :attr:`source`. Changes to elements in one tensor will be reflected
4656
in the other.
4657

4658
If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying
4659
storage, offset, size, and stride.
4660

4661
Args:
4662
    source (Tensor or Storage): the tensor or storage to use
4663
    storage_offset (int, optional): the offset in the storage
4664
    size (torch.Size, optional): the desired size. Defaults to the size of the source.
4665
    stride (tuple, optional): the desired stride. Defaults to C-contiguous strides.
4666
""",
4667
)
4668

4669
add_docstr_all(
4670
    "sigmoid",
4671
    r"""
4672
sigmoid() -> Tensor
4673

4674
See :func:`torch.sigmoid`
4675
""",
4676
)
4677

4678
add_docstr_all(
4679
    "sigmoid_",
4680
    r"""
4681
sigmoid_() -> Tensor
4682

4683
In-place version of :meth:`~Tensor.sigmoid`
4684
""",
4685
)
4686

4687
add_docstr_all(
4688
    "logit",
4689
    r"""
4690
logit() -> Tensor
4691

4692
See :func:`torch.logit`
4693
""",
4694
)
4695

4696
add_docstr_all(
4697
    "logit_",
4698
    r"""
4699
logit_() -> Tensor
4700

4701
In-place version of :meth:`~Tensor.logit`
4702
""",
4703
)
4704

4705
add_docstr_all(
4706
    "sign",
4707
    r"""
4708
sign() -> Tensor
4709

4710
See :func:`torch.sign`
4711
""",
4712
)
4713

4714
add_docstr_all(
4715
    "sign_",
4716
    r"""
4717
sign_() -> Tensor
4718

4719
In-place version of :meth:`~Tensor.sign`
4720
""",
4721
)
4722

4723
add_docstr_all(
4724
    "signbit",
4725
    r"""
4726
signbit() -> Tensor
4727

4728
See :func:`torch.signbit`
4729
""",
4730
)
4731

4732
add_docstr_all(
4733
    "sgn",
4734
    r"""
4735
sgn() -> Tensor
4736

4737
See :func:`torch.sgn`
4738
""",
4739
)
4740

4741
add_docstr_all(
4742
    "sgn_",
4743
    r"""
4744
sgn_() -> Tensor
4745

4746
In-place version of :meth:`~Tensor.sgn`
4747
""",
4748
)
4749

4750
add_docstr_all(
4751
    "sin",
4752
    r"""
4753
sin() -> Tensor
4754

4755
See :func:`torch.sin`
4756
""",
4757
)
4758

4759
add_docstr_all(
4760
    "sin_",
4761
    r"""
4762
sin_() -> Tensor
4763

4764
In-place version of :meth:`~Tensor.sin`
4765
""",
4766
)
4767

4768
add_docstr_all(
4769
    "sinc",
4770
    r"""
4771
sinc() -> Tensor
4772

4773
See :func:`torch.sinc`
4774
""",
4775
)
4776

4777
add_docstr_all(
4778
    "sinc_",
4779
    r"""
4780
sinc_() -> Tensor
4781

4782
In-place version of :meth:`~Tensor.sinc`
4783
""",
4784
)
4785

4786
add_docstr_all(
4787
    "sinh",
4788
    r"""
4789
sinh() -> Tensor
4790

4791
See :func:`torch.sinh`
4792
""",
4793
)
4794

4795
add_docstr_all(
4796
    "sinh_",
4797
    r"""
4798
sinh_() -> Tensor
4799

4800
In-place version of :meth:`~Tensor.sinh`
4801
""",
4802
)
4803

4804
add_docstr_all(
4805
    "size",
4806
    r"""
4807
size(dim=None) -> torch.Size or int
4808

4809
Returns the size of the :attr:`self` tensor. If ``dim`` is not specified,
4810
the returned value is a :class:`torch.Size`, a subclass of :class:`tuple`.
4811
If ``dim`` is specified, returns an int holding the size of that dimension.
4812

4813
Args:
4814
  dim (int, optional): The dimension for which to retrieve the size.
4815

4816
Example::
4817

4818
    >>> t = torch.empty(3, 4, 5)
4819
    >>> t.size()
4820
    torch.Size([3, 4, 5])
4821
    >>> t.size(dim=1)
4822
    4
4823

4824
""",
4825
)
4826

4827
add_docstr_all(
4828
    "shape",
4829
    r"""
4830
shape() -> torch.Size
4831

4832
Returns the size of the :attr:`self` tensor. Alias for :attr:`size`.
4833

4834
See also :meth:`Tensor.size`.
4835

4836
Example::
4837

4838
    >>> t = torch.empty(3, 4, 5)
4839
    >>> t.size()
4840
    torch.Size([3, 4, 5])
4841
    >>> t.shape
4842
    torch.Size([3, 4, 5])
4843

4844
""",
4845
)
4846

4847
add_docstr_all(
4848
    "sort",
4849
    r"""
4850
sort(dim=-1, descending=False) -> (Tensor, LongTensor)
4851

4852
See :func:`torch.sort`
4853
""",
4854
)
4855

4856
add_docstr_all(
4857
    "msort",
4858
    r"""
4859
msort() -> Tensor
4860

4861
See :func:`torch.msort`
4862
""",
4863
)
4864

4865
add_docstr_all(
4866
    "argsort",
4867
    r"""
4868
argsort(dim=-1, descending=False) -> LongTensor
4869

4870
See :func:`torch.argsort`
4871
""",
4872
)
4873

4874
add_docstr_all(
4875
    "sparse_dim",
4876
    r"""
4877
sparse_dim() -> int
4878

4879
Return the number of sparse dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.
4880

4881
.. note::
4882
  Returns ``0`` if :attr:`self` is not a sparse tensor.
4883

4884
See also :meth:`Tensor.dense_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
4885
""",
4886
)
4887

4888
add_docstr_all(
4889
    "sparse_resize_",
4890
    r"""
4891
sparse_resize_(size, sparse_dim, dense_dim) -> Tensor
4892

4893
Resizes :attr:`self` :ref:`sparse tensor <sparse-docs>` to the desired
4894
size and the number of sparse and dense dimensions.
4895

4896
.. note::
4897
  If the number of specified elements in :attr:`self` is zero, then
4898
  :attr:`size`, :attr:`sparse_dim`, and :attr:`dense_dim` can be any
4899
  size and positive integers such that ``len(size) == sparse_dim +
4900
  dense_dim``.
4901

4902
  If :attr:`self` specifies one or more elements, however, then each
4903
  dimension in :attr:`size` must not be smaller than the corresponding
4904
  dimension of :attr:`self`, :attr:`sparse_dim` must equal the number
4905
  of sparse dimensions in :attr:`self`, and :attr:`dense_dim` must
4906
  equal the number of dense dimensions in :attr:`self`.
4907

4908
.. warning::
4909
  Throws an error if :attr:`self` is not a sparse tensor.
4910

4911
Args:
4912
    size (torch.Size): the desired size. If :attr:`self` is non-empty
4913
      sparse tensor, the desired size cannot be smaller than the
4914
      original size.
4915
    sparse_dim (int): the number of sparse dimensions
4916
    dense_dim (int): the number of dense dimensions
4917
""",
4918
)
4919

4920
add_docstr_all(
4921
    "sparse_resize_and_clear_",
4922
    r"""
4923
sparse_resize_and_clear_(size, sparse_dim, dense_dim) -> Tensor
4924

4925
Removes all specified elements from a :ref:`sparse tensor
4926
<sparse-docs>` :attr:`self` and resizes :attr:`self` to the desired
4927
size and the number of sparse and dense dimensions.
4928

4929
.. warning:
4930
  Throws an error if :attr:`self` is not a sparse tensor.
4931

4932
Args:
4933
    size (torch.Size): the desired size.
4934
    sparse_dim (int): the number of sparse dimensions
4935
    dense_dim (int): the number of dense dimensions
4936
""",
4937
)
4938

4939
add_docstr_all(
4940
    "sqrt",
4941
    r"""
4942
sqrt() -> Tensor
4943

4944
See :func:`torch.sqrt`
4945
""",
4946
)
4947

4948
add_docstr_all(
4949
    "sqrt_",
4950
    r"""
4951
sqrt_() -> Tensor
4952

4953
In-place version of :meth:`~Tensor.sqrt`
4954
""",
4955
)
4956

4957
add_docstr_all(
4958
    "square",
4959
    r"""
4960
square() -> Tensor
4961

4962
See :func:`torch.square`
4963
""",
4964
)
4965

4966
add_docstr_all(
4967
    "square_",
4968
    r"""
4969
square_() -> Tensor
4970

4971
In-place version of :meth:`~Tensor.square`
4972
""",
4973
)
4974

4975
add_docstr_all(
4976
    "squeeze",
4977
    r"""
4978
squeeze(dim=None) -> Tensor
4979

4980
See :func:`torch.squeeze`
4981
""",
4982
)
4983

4984
add_docstr_all(
4985
    "squeeze_",
4986
    r"""
4987
squeeze_(dim=None) -> Tensor
4988

4989
In-place version of :meth:`~Tensor.squeeze`
4990
""",
4991
)
4992

4993
add_docstr_all(
4994
    "std",
4995
    r"""
4996
std(dim=None, *, correction=1, keepdim=False) -> Tensor
4997

4998
See :func:`torch.std`
4999
""",
5000
)
5001

5002
add_docstr_all(
5003
    "storage_offset",
5004
    r"""
5005
storage_offset() -> int
5006

5007
Returns :attr:`self` tensor's offset in the underlying storage in terms of
5008
number of storage elements (not bytes).
5009

5010
Example::
5011

5012
    >>> x = torch.tensor([1, 2, 3, 4, 5])
5013
    >>> x.storage_offset()
5014
    0
5015
    >>> x[3:].storage_offset()
5016
    3
5017

5018
""",
5019
)
5020

5021
add_docstr_all(
5022
    "untyped_storage",
5023
    r"""
5024
untyped_storage() -> torch.UntypedStorage
5025

5026
Returns the underlying :class:`UntypedStorage`.
5027
""",
5028
)
5029

5030
add_docstr_all(
5031
    "stride",
5032
    r"""
5033
stride(dim) -> tuple or int
5034

5035
Returns the stride of :attr:`self` tensor.
5036

5037
Stride is the jump necessary to go from one element to the next one in the
5038
specified dimension :attr:`dim`. A tuple of all strides is returned when no
5039
argument is passed in. Otherwise, an integer value is returned as the stride in
5040
the particular dimension :attr:`dim`.
5041

5042
Args:
5043
    dim (int, optional): the desired dimension in which stride is required
5044

5045
Example::
5046

5047
    >>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
5048
    >>> x.stride()
5049
    (5, 1)
5050
    >>> x.stride(0)
5051
    5
5052
    >>> x.stride(-1)
5053
    1
5054

5055
""",
5056
)
5057

5058
add_docstr_all(
5059
    "sub",
5060
    r"""
5061
sub(other, *, alpha=1) -> Tensor
5062

5063
See :func:`torch.sub`.
5064
""",
5065
)
5066

5067
add_docstr_all(
5068
    "sub_",
5069
    r"""
5070
sub_(other, *, alpha=1) -> Tensor
5071

5072
In-place version of :meth:`~Tensor.sub`
5073
""",
5074
)
5075

5076
add_docstr_all(
5077
    "subtract",
5078
    r"""
5079
subtract(other, *, alpha=1) -> Tensor
5080

5081
See :func:`torch.subtract`.
5082
""",
5083
)
5084

5085
add_docstr_all(
5086
    "subtract_",
5087
    r"""
5088
subtract_(other, *, alpha=1) -> Tensor
5089

5090
In-place version of :meth:`~Tensor.subtract`.
5091
""",
5092
)
5093

5094
add_docstr_all(
5095
    "sum",
5096
    r"""
5097
sum(dim=None, keepdim=False, dtype=None) -> Tensor
5098

5099
See :func:`torch.sum`
5100
""",
5101
)
5102

5103
add_docstr_all(
5104
    "nansum",
5105
    r"""
5106
nansum(dim=None, keepdim=False, dtype=None) -> Tensor
5107

5108
See :func:`torch.nansum`
5109
""",
5110
)
5111

5112
add_docstr_all(
5113
    "svd",
5114
    r"""
5115
svd(some=True, compute_uv=True) -> (Tensor, Tensor, Tensor)
5116

5117
See :func:`torch.svd`
5118
""",
5119
)
5120

5121
add_docstr_all(
5122
    "swapdims",
5123
    r"""
5124
swapdims(dim0, dim1) -> Tensor
5125

5126
See :func:`torch.swapdims`
5127
""",
5128
)
5129

5130
add_docstr_all(
5131
    "swapdims_",
5132
    r"""
5133
swapdims_(dim0, dim1) -> Tensor
5134

5135
In-place version of :meth:`~Tensor.swapdims`
5136
""",
5137
)
5138

5139
add_docstr_all(
5140
    "swapaxes",
5141
    r"""
5142
swapaxes(axis0, axis1) -> Tensor
5143

5144
See :func:`torch.swapaxes`
5145
""",
5146
)
5147

5148
add_docstr_all(
5149
    "swapaxes_",
5150
    r"""
5151
swapaxes_(axis0, axis1) -> Tensor
5152

5153
In-place version of :meth:`~Tensor.swapaxes`
5154
""",
5155
)
5156

5157
add_docstr_all(
5158
    "t",
5159
    r"""
5160
t() -> Tensor
5161

5162
See :func:`torch.t`
5163
""",
5164
)
5165

5166
add_docstr_all(
5167
    "t_",
5168
    r"""
5169
t_() -> Tensor
5170

5171
In-place version of :meth:`~Tensor.t`
5172
""",
5173
)
5174

5175
add_docstr_all(
5176
    "tile",
5177
    r"""
5178
tile(dims) -> Tensor
5179

5180
See :func:`torch.tile`
5181
""",
5182
)
5183

5184
add_docstr_all(
5185
    "to",
5186
    r"""
5187
to(*args, **kwargs) -> Tensor
5188

5189
Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are
5190
inferred from the arguments of ``self.to(*args, **kwargs)``.
5191

5192
.. note::
5193

5194
    If the ``self`` Tensor already
5195
    has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned.
5196
    Otherwise, the returned tensor is a copy of ``self`` with the desired
5197
    :class:`torch.dtype` and :class:`torch.device`.
5198

5199
Here are the ways to call ``to``:
5200

5201
.. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
5202
   :noindex:
5203

5204
    Returns a Tensor with the specified :attr:`dtype`
5205

5206
    Args:
5207
        {memory_format}
5208

5209
.. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
5210
   :noindex:
5211

5212
    Returns a Tensor with the specified :attr:`device` and (optional)
5213
    :attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``.
5214
    When :attr:`non_blocking`, tries to convert asynchronously with respect to
5215
    the host if possible, e.g., converting a CPU Tensor with pinned memory to a
5216
    CUDA Tensor.
5217
    When :attr:`copy` is set, a new Tensor is created even when the Tensor
5218
    already matches the desired conversion.
5219

5220
    Args:
5221
        {memory_format}
5222

5223
.. method:: to(other, non_blocking=False, copy=False) -> Tensor
5224
   :noindex:
5225

5226
    Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as
5227
    the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert
5228
    asynchronously with respect to the host if possible, e.g., converting a CPU
5229
    Tensor with pinned memory to a CUDA Tensor.
5230
    When :attr:`copy` is set, a new Tensor is created even when the Tensor
5231
    already matches the desired conversion.
5232

5233
Example::
5234

5235
    >>> tensor = torch.randn(2, 2)  # Initially dtype=float32, device=cpu
5236
    >>> tensor.to(torch.float64)
5237
    tensor([[-0.5044,  0.0005],
5238
            [ 0.3310, -0.0584]], dtype=torch.float64)
5239

5240
    >>> cuda0 = torch.device('cuda:0')
5241
    >>> tensor.to(cuda0)
5242
    tensor([[-0.5044,  0.0005],
5243
            [ 0.3310, -0.0584]], device='cuda:0')
5244

5245
    >>> tensor.to(cuda0, dtype=torch.float64)
5246
    tensor([[-0.5044,  0.0005],
5247
            [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
5248

5249
    >>> other = torch.randn((), dtype=torch.float64, device=cuda0)
5250
    >>> tensor.to(other, non_blocking=True)
5251
    tensor([[-0.5044,  0.0005],
5252
            [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
5253
""".format(**common_args),
5254
)
5255

5256
add_docstr_all(
5257
    "byte",
5258
    r"""
5259
byte(memory_format=torch.preserve_format) -> Tensor
5260

5261
``self.byte()`` is equivalent to ``self.to(torch.uint8)``. See :func:`to`.
5262

5263
Args:
5264
    {memory_format}
5265
""".format(**common_args),
5266
)
5267

5268
add_docstr_all(
5269
    "bool",
5270
    r"""
5271
bool(memory_format=torch.preserve_format) -> Tensor
5272

5273
``self.bool()`` is equivalent to ``self.to(torch.bool)``. See :func:`to`.
5274

5275
Args:
5276
    {memory_format}
5277
""".format(**common_args),
5278
)
5279

5280
add_docstr_all(
5281
    "char",
5282
    r"""
5283
char(memory_format=torch.preserve_format) -> Tensor
5284

5285
``self.char()`` is equivalent to ``self.to(torch.int8)``. See :func:`to`.
5286

5287
Args:
5288
    {memory_format}
5289
""".format(**common_args),
5290
)
5291

5292
add_docstr_all(
5293
    "bfloat16",
5294
    r"""
5295
bfloat16(memory_format=torch.preserve_format) -> Tensor
5296
``self.bfloat16()`` is equivalent to ``self.to(torch.bfloat16)``. See :func:`to`.
5297

5298
Args:
5299
    {memory_format}
5300
""".format(**common_args),
5301
)
5302

5303
add_docstr_all(
5304
    "double",
5305
    r"""
5306
double(memory_format=torch.preserve_format) -> Tensor
5307

5308
``self.double()`` is equivalent to ``self.to(torch.float64)``. See :func:`to`.
5309

5310
Args:
5311
    {memory_format}
5312
""".format(**common_args),
5313
)
5314

5315
add_docstr_all(
5316
    "float",
5317
    r"""
5318
float(memory_format=torch.preserve_format) -> Tensor
5319

5320
``self.float()`` is equivalent to ``self.to(torch.float32)``. See :func:`to`.
5321

5322
Args:
5323
    {memory_format}
5324
""".format(**common_args),
5325
)
5326

5327
add_docstr_all(
5328
    "cdouble",
5329
    r"""
5330
cdouble(memory_format=torch.preserve_format) -> Tensor
5331

5332
``self.cdouble()`` is equivalent to ``self.to(torch.complex128)``. See :func:`to`.
5333

5334
Args:
5335
    {memory_format}
5336
""".format(**common_args),
5337
)
5338

5339
add_docstr_all(
5340
    "cfloat",
5341
    r"""
5342
cfloat(memory_format=torch.preserve_format) -> Tensor
5343

5344
``self.cfloat()`` is equivalent to ``self.to(torch.complex64)``. See :func:`to`.
5345

5346
Args:
5347
    {memory_format}
5348
""".format(**common_args),
5349
)
5350

5351
add_docstr_all(
5352
    "chalf",
5353
    r"""
5354
chalf(memory_format=torch.preserve_format) -> Tensor
5355

5356
``self.chalf()`` is equivalent to ``self.to(torch.complex32)``. See :func:`to`.
5357

5358
Args:
5359
     {memory_format}
5360
 """.format(**common_args),
5361
)
5362

5363
add_docstr_all(
5364
    "half",
5365
    r"""
5366
half(memory_format=torch.preserve_format) -> Tensor
5367

5368
``self.half()`` is equivalent to ``self.to(torch.float16)``. See :func:`to`.
5369

5370
Args:
5371
    {memory_format}
5372
""".format(**common_args),
5373
)
5374

5375
add_docstr_all(
5376
    "int",
5377
    r"""
5378
int(memory_format=torch.preserve_format) -> Tensor
5379

5380
``self.int()`` is equivalent to ``self.to(torch.int32)``. See :func:`to`.
5381

5382
Args:
5383
    {memory_format}
5384
""".format(**common_args),
5385
)
5386

5387
add_docstr_all(
5388
    "int_repr",
5389
    r"""
5390
int_repr() -> Tensor
5391

5392
Given a quantized Tensor,
5393
``self.int_repr()`` returns a CPU Tensor with uint8_t as data type that stores the
5394
underlying uint8_t values of the given Tensor.
5395
""",
5396
)
5397

5398

5399
add_docstr_all(
5400
    "long",
5401
    r"""
5402
long(memory_format=torch.preserve_format) -> Tensor
5403

5404
``self.long()`` is equivalent to ``self.to(torch.int64)``. See :func:`to`.
5405

5406
Args:
5407
    {memory_format}
5408
""".format(**common_args),
5409
)
5410

5411
add_docstr_all(
5412
    "short",
5413
    r"""
5414
short(memory_format=torch.preserve_format) -> Tensor
5415

5416
``self.short()`` is equivalent to ``self.to(torch.int16)``. See :func:`to`.
5417

5418
Args:
5419
    {memory_format}
5420
""".format(**common_args),
5421
)
5422

5423
add_docstr_all(
5424
    "take",
5425
    r"""
5426
take(indices) -> Tensor
5427

5428
See :func:`torch.take`
5429
""",
5430
)
5431

5432
add_docstr_all(
5433
    "take_along_dim",
5434
    r"""
5435
take_along_dim(indices, dim) -> Tensor
5436

5437
See :func:`torch.take_along_dim`
5438
""",
5439
)
5440

5441
add_docstr_all(
5442
    "tan",
5443
    r"""
5444
tan() -> Tensor
5445

5446
See :func:`torch.tan`
5447
""",
5448
)
5449

5450
add_docstr_all(
5451
    "tan_",
5452
    r"""
5453
tan_() -> Tensor
5454

5455
In-place version of :meth:`~Tensor.tan`
5456
""",
5457
)
5458

5459
add_docstr_all(
5460
    "tanh",
5461
    r"""
5462
tanh() -> Tensor
5463

5464
See :func:`torch.tanh`
5465
""",
5466
)
5467

5468
add_docstr_all(
5469
    "softmax",
5470
    r"""
5471
softmax(dim) -> Tensor
5472

5473
Alias for :func:`torch.nn.functional.softmax`.
5474
""",
5475
)
5476

5477
add_docstr_all(
5478
    "tanh_",
5479
    r"""
5480
tanh_() -> Tensor
5481

5482
In-place version of :meth:`~Tensor.tanh`
5483
""",
5484
)
5485

5486
add_docstr_all(
5487
    "tolist",
5488
    r"""
5489
tolist() -> list or number
5490

5491
Returns the tensor as a (nested) list. For scalars, a standard
5492
Python number is returned, just like with :meth:`~Tensor.item`.
5493
Tensors are automatically moved to the CPU first if necessary.
5494

5495
This operation is not differentiable.
5496

5497
Examples::
5498

5499
    >>> a = torch.randn(2, 2)
5500
    >>> a.tolist()
5501
    [[0.012766935862600803, 0.5415473580360413],
5502
     [-0.08909505605697632, 0.7729271650314331]]
5503
    >>> a[0,0].tolist()
5504
    0.012766935862600803
5505
""",
5506
)
5507

5508
add_docstr_all(
5509
    "topk",
5510
    r"""
5511
topk(k, dim=None, largest=True, sorted=True) -> (Tensor, LongTensor)
5512

5513
See :func:`torch.topk`
5514
""",
5515
)
5516

5517
add_docstr_all(
5518
    "to_dense",
5519
    r"""
5520
to_dense(dtype=None, *, masked_grad=True) -> Tensor
5521

5522
Creates a strided copy of :attr:`self` if :attr:`self` is not a strided tensor, otherwise returns :attr:`self`.
5523

5524
Keyword args:
5525
    {dtype}
5526
    masked_grad (bool, optional): If set to ``True`` (default) and
5527
      :attr:`self` has a sparse layout then the backward of
5528
      :meth:`to_dense` returns ``grad.sparse_mask(self)``.
5529

5530
Example::
5531

5532
    >>> s = torch.sparse_coo_tensor(
5533
    ...        torch.tensor([[1, 1],
5534
    ...                      [0, 2]]),
5535
    ...        torch.tensor([9, 10]),
5536
    ...        size=(3, 3))
5537
    >>> s.to_dense()
5538
    tensor([[ 0,  0,  0],
5539
            [ 9,  0, 10],
5540
            [ 0,  0,  0]])
5541
""",
5542
)
5543

5544
add_docstr_all(
5545
    "to_sparse",
5546
    r"""
5547
to_sparse(sparseDims) -> Tensor
5548

5549
Returns a sparse copy of the tensor.  PyTorch supports sparse tensors in
5550
:ref:`coordinate format <sparse-coo-docs>`.
5551

5552
Args:
5553
    sparseDims (int, optional): the number of sparse dimensions to include in the new sparse tensor
5554

5555
Example::
5556

5557
    >>> d = torch.tensor([[0, 0, 0], [9, 0, 10], [0, 0, 0]])
5558
    >>> d
5559
    tensor([[ 0,  0,  0],
5560
            [ 9,  0, 10],
5561
            [ 0,  0,  0]])
5562
    >>> d.to_sparse()
5563
    tensor(indices=tensor([[1, 1],
5564
                           [0, 2]]),
5565
           values=tensor([ 9, 10]),
5566
           size=(3, 3), nnz=2, layout=torch.sparse_coo)
5567
    >>> d.to_sparse(1)
5568
    tensor(indices=tensor([[1]]),
5569
           values=tensor([[ 9,  0, 10]]),
5570
           size=(3, 3), nnz=1, layout=torch.sparse_coo)
5571

5572
.. method:: to_sparse(*, layout=None, blocksize=None, dense_dim=None) -> Tensor
5573
   :noindex:
5574

5575
Returns a sparse tensor with the specified layout and blocksize.  If
5576
the :attr:`self` is strided, the number of dense dimensions could be
5577
specified, and a hybrid sparse tensor will be created, with
5578
`dense_dim` dense dimensions and `self.dim() - 2 - dense_dim` batch
5579
dimension.
5580

5581
.. note:: If the :attr:`self` layout and blocksize parameters match
5582
          with the specified layout and blocksize, return
5583
          :attr:`self`. Otherwise, return a sparse tensor copy of
5584
          :attr:`self`.
5585

5586
Args:
5587

5588
    layout (:class:`torch.layout`, optional): The desired sparse
5589
      layout. One of ``torch.sparse_coo``, ``torch.sparse_csr``,
5590
      ``torch.sparse_csc``, ``torch.sparse_bsr``, or
5591
      ``torch.sparse_bsc``. Default: if ``None``,
5592
      ``torch.sparse_coo``.
5593

5594
    blocksize (list, tuple, :class:`torch.Size`, optional): Block size
5595
      of the resulting BSR or BSC tensor. For other layouts,
5596
      specifying the block size that is not ``None`` will result in a
5597
      RuntimeError exception.  A block size must be a tuple of length
5598
      two such that its items evenly divide the two sparse dimensions.
5599

5600
    dense_dim (int, optional): Number of dense dimensions of the
5601
      resulting CSR, CSC, BSR or BSC tensor.  This argument should be
5602
      used only if :attr:`self` is a strided tensor, and must be a
5603
      value between 0 and dimension of :attr:`self` tensor minus two.
5604

5605
Example::
5606

5607
    >>> x = torch.tensor([[1, 0], [0, 0], [2, 3]])
5608
    >>> x.to_sparse(layout=torch.sparse_coo)
5609
    tensor(indices=tensor([[0, 2, 2],
5610
                           [0, 0, 1]]),
5611
           values=tensor([1, 2, 3]),
5612
           size=(3, 2), nnz=3, layout=torch.sparse_coo)
5613
    >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(1, 2))
5614
    tensor(crow_indices=tensor([0, 1, 1, 2]),
5615
           col_indices=tensor([0, 0]),
5616
           values=tensor([[[1, 0]],
5617
                          [[2, 3]]]), size=(3, 2), nnz=2, layout=torch.sparse_bsr)
5618
    >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(2, 1))
5619
    RuntimeError: Tensor size(-2) 3 needs to be divisible by blocksize[0] 2
5620
    >>> x.to_sparse(layout=torch.sparse_csr, blocksize=(3, 1))
5621
    RuntimeError: to_sparse for Strided to SparseCsr conversion does not use specified blocksize
5622

5623
    >>> x = torch.tensor([[[1], [0]], [[0], [0]], [[2], [3]]])
5624
    >>> x.to_sparse(layout=torch.sparse_csr, dense_dim=1)
5625
    tensor(crow_indices=tensor([0, 1, 1, 3]),
5626
           col_indices=tensor([0, 0, 1]),
5627
           values=tensor([[1],
5628
                          [2],
5629
                          [3]]), size=(3, 2, 1), nnz=3, layout=torch.sparse_csr)
5630

5631
""",
5632
)
5633

5634
add_docstr_all(
5635
    "to_sparse_csr",
5636
    r"""
5637
to_sparse_csr(dense_dim=None) -> Tensor
5638

5639
Convert a tensor to compressed row storage format (CSR).  Except for
5640
strided tensors, only works with 2D tensors.  If the :attr:`self` is
5641
strided, then the number of dense dimensions could be specified, and a
5642
hybrid CSR tensor will be created, with `dense_dim` dense dimensions
5643
and `self.dim() - 2 - dense_dim` batch dimension.
5644

5645
Args:
5646

5647
    dense_dim (int, optional): Number of dense dimensions of the
5648
      resulting CSR tensor.  This argument should be used only if
5649
      :attr:`self` is a strided tensor, and must be a value between 0
5650
      and dimension of :attr:`self` tensor minus two.
5651

5652
Example::
5653

5654
    >>> dense = torch.randn(5, 5)
5655
    >>> sparse = dense.to_sparse_csr()
5656
    >>> sparse._nnz()
5657
    25
5658

5659
    >>> dense = torch.zeros(3, 3, 1, 1)
5660
    >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1
5661
    >>> dense.to_sparse_csr(dense_dim=2)
5662
    tensor(crow_indices=tensor([0, 1, 2, 3]),
5663
           col_indices=tensor([0, 2, 1]),
5664
           values=tensor([[[1.]],
5665

5666
                          [[1.]],
5667

5668
                          [[1.]]]), size=(3, 3, 1, 1), nnz=3,
5669
           layout=torch.sparse_csr)
5670

5671
""",
5672
)
5673

5674
add_docstr_all(
5675
    "to_sparse_csc",
5676
    r"""
5677
to_sparse_csc() -> Tensor
5678

5679
Convert a tensor to compressed column storage (CSC) format.  Except
5680
for strided tensors, only works with 2D tensors.  If the :attr:`self`
5681
is strided, then the number of dense dimensions could be specified,
5682
and a hybrid CSC tensor will be created, with `dense_dim` dense
5683
dimensions and `self.dim() - 2 - dense_dim` batch dimension.
5684

5685
Args:
5686

5687
    dense_dim (int, optional): Number of dense dimensions of the
5688
      resulting CSC tensor.  This argument should be used only if
5689
      :attr:`self` is a strided tensor, and must be a value between 0
5690
      and dimension of :attr:`self` tensor minus two.
5691

5692
Example::
5693

5694
    >>> dense = torch.randn(5, 5)
5695
    >>> sparse = dense.to_sparse_csc()
5696
    >>> sparse._nnz()
5697
    25
5698

5699
    >>> dense = torch.zeros(3, 3, 1, 1)
5700
    >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1
5701
    >>> dense.to_sparse_csc(dense_dim=2)
5702
    tensor(ccol_indices=tensor([0, 1, 2, 3]),
5703
           row_indices=tensor([0, 2, 1]),
5704
           values=tensor([[[1.]],
5705

5706
                          [[1.]],
5707

5708
                          [[1.]]]), size=(3, 3, 1, 1), nnz=3,
5709
           layout=torch.sparse_csc)
5710

5711
""",
5712
)
5713

5714
add_docstr_all(
5715
    "to_sparse_bsr",
5716
    r"""
5717
to_sparse_bsr(blocksize, dense_dim) -> Tensor
5718

5719
Convert a tensor to a block sparse row (BSR) storage format of given
5720
blocksize.  If the :attr:`self` is strided, then the number of dense
5721
dimensions could be specified, and a hybrid BSR tensor will be
5722
created, with `dense_dim` dense dimensions and `self.dim() - 2 -
5723
dense_dim` batch dimension.
5724

5725
Args:
5726

5727
    blocksize (list, tuple, :class:`torch.Size`, optional): Block size
5728
      of the resulting BSR tensor. A block size must be a tuple of
5729
      length two such that its items evenly divide the two sparse
5730
      dimensions.
5731

5732
    dense_dim (int, optional): Number of dense dimensions of the
5733
      resulting BSR tensor.  This argument should be used only if
5734
      :attr:`self` is a strided tensor, and must be a value between 0
5735
      and dimension of :attr:`self` tensor minus two.
5736

5737
Example::
5738

5739
    >>> dense = torch.randn(10, 10)
5740
    >>> sparse = dense.to_sparse_csr()
5741
    >>> sparse_bsr = sparse.to_sparse_bsr((5, 5))
5742
    >>> sparse_bsr.col_indices()
5743
    tensor([0, 1, 0, 1])
5744

5745
    >>> dense = torch.zeros(4, 3, 1)
5746
    >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1
5747
    >>> dense.to_sparse_bsr((2, 1), 1)
5748
    tensor(crow_indices=tensor([0, 2, 3]),
5749
           col_indices=tensor([0, 2, 1]),
5750
           values=tensor([[[[1.]],
5751

5752
                           [[1.]]],
5753

5754

5755
                          [[[1.]],
5756

5757
                           [[1.]]],
5758

5759

5760
                          [[[1.]],
5761

5762
                           [[1.]]]]), size=(4, 3, 1), nnz=3,
5763
           layout=torch.sparse_bsr)
5764

5765
""",
5766
)
5767

5768
add_docstr_all(
5769
    "to_sparse_bsc",
5770
    r"""
5771
to_sparse_bsc(blocksize, dense_dim) -> Tensor
5772

5773
Convert a tensor to a block sparse column (BSC) storage format of
5774
given blocksize.  If the :attr:`self` is strided, then the number of
5775
dense dimensions could be specified, and a hybrid BSC tensor will be
5776
created, with `dense_dim` dense dimensions and `self.dim() - 2 -
5777
dense_dim` batch dimension.
5778

5779
Args:
5780

5781
    blocksize (list, tuple, :class:`torch.Size`, optional): Block size
5782
      of the resulting BSC tensor. A block size must be a tuple of
5783
      length two such that its items evenly divide the two sparse
5784
      dimensions.
5785

5786
    dense_dim (int, optional): Number of dense dimensions of the
5787
      resulting BSC tensor.  This argument should be used only if
5788
      :attr:`self` is a strided tensor, and must be a value between 0
5789
      and dimension of :attr:`self` tensor minus two.
5790

5791
Example::
5792

5793
    >>> dense = torch.randn(10, 10)
5794
    >>> sparse = dense.to_sparse_csr()
5795
    >>> sparse_bsc = sparse.to_sparse_bsc((5, 5))
5796
    >>> sparse_bsc.row_indices()
5797
    tensor([0, 1, 0, 1])
5798

5799
    >>> dense = torch.zeros(4, 3, 1)
5800
    >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1
5801
    >>> dense.to_sparse_bsc((2, 1), 1)
5802
    tensor(ccol_indices=tensor([0, 1, 2, 3]),
5803
           row_indices=tensor([0, 1, 0]),
5804
           values=tensor([[[[1.]],
5805

5806
                           [[1.]]],
5807

5808

5809
                          [[[1.]],
5810

5811
                           [[1.]]],
5812

5813

5814
                          [[[1.]],
5815

5816
                           [[1.]]]]), size=(4, 3, 1), nnz=3,
5817
           layout=torch.sparse_bsc)
5818

5819
""",
5820
)
5821

5822
add_docstr_all(
5823
    "to_mkldnn",
5824
    r"""
5825
to_mkldnn() -> Tensor
5826
Returns a copy of the tensor in ``torch.mkldnn`` layout.
5827

5828
""",
5829
)
5830

5831
add_docstr_all(
5832
    "trace",
5833
    r"""
5834
trace() -> Tensor
5835

5836
See :func:`torch.trace`
5837
""",
5838
)
5839

5840
add_docstr_all(
5841
    "transpose",
5842
    r"""
5843
transpose(dim0, dim1) -> Tensor
5844

5845
See :func:`torch.transpose`
5846
""",
5847
)
5848

5849
add_docstr_all(
5850
    "transpose_",
5851
    r"""
5852
transpose_(dim0, dim1) -> Tensor
5853

5854
In-place version of :meth:`~Tensor.transpose`
5855
""",
5856
)
5857

5858
add_docstr_all(
5859
    "triangular_solve",
5860
    r"""
5861
triangular_solve(A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor)
5862

5863
See :func:`torch.triangular_solve`
5864
""",
5865
)
5866

5867
add_docstr_all(
5868
    "tril",
5869
    r"""
5870
tril(diagonal=0) -> Tensor
5871

5872
See :func:`torch.tril`
5873
""",
5874
)
5875

5876
add_docstr_all(
5877
    "tril_",
5878
    r"""
5879
tril_(diagonal=0) -> Tensor
5880

5881
In-place version of :meth:`~Tensor.tril`
5882
""",
5883
)
5884

5885
add_docstr_all(
5886
    "triu",
5887
    r"""
5888
triu(diagonal=0) -> Tensor
5889

5890
See :func:`torch.triu`
5891
""",
5892
)
5893

5894
add_docstr_all(
5895
    "triu_",
5896
    r"""
5897
triu_(diagonal=0) -> Tensor
5898

5899
In-place version of :meth:`~Tensor.triu`
5900
""",
5901
)
5902

5903
add_docstr_all(
5904
    "true_divide",
5905
    r"""
5906
true_divide(value) -> Tensor
5907

5908
See :func:`torch.true_divide`
5909
""",
5910
)
5911

5912
add_docstr_all(
5913
    "true_divide_",
5914
    r"""
5915
true_divide_(value) -> Tensor
5916

5917
In-place version of :meth:`~Tensor.true_divide_`
5918
""",
5919
)
5920

5921
add_docstr_all(
5922
    "trunc",
5923
    r"""
5924
trunc() -> Tensor
5925

5926
See :func:`torch.trunc`
5927
""",
5928
)
5929

5930
add_docstr_all(
5931
    "fix",
5932
    r"""
5933
fix() -> Tensor
5934

5935
See :func:`torch.fix`.
5936
""",
5937
)
5938

5939
add_docstr_all(
5940
    "trunc_",
5941
    r"""
5942
trunc_() -> Tensor
5943

5944
In-place version of :meth:`~Tensor.trunc`
5945
""",
5946
)
5947

5948
add_docstr_all(
5949
    "fix_",
5950
    r"""
5951
fix_() -> Tensor
5952

5953
In-place version of :meth:`~Tensor.fix`
5954
""",
5955
)
5956

5957
add_docstr_all(
5958
    "type",
5959
    r"""
5960
type(dtype=None, non_blocking=False, **kwargs) -> str or Tensor
5961
Returns the type if `dtype` is not provided, else casts this object to
5962
the specified type.
5963

5964
If this is already of the correct type, no copy is performed and the
5965
original object is returned.
5966

5967
Args:
5968
    dtype (dtype or string): The desired type
5969
    non_blocking (bool): If ``True``, and the source is in pinned memory
5970
        and destination is on the GPU or vice versa, the copy is performed
5971
        asynchronously with respect to the host. Otherwise, the argument
5972
        has no effect.
5973
    **kwargs: For compatibility, may contain the key ``async`` in place of
5974
        the ``non_blocking`` argument. The ``async`` arg is deprecated.
5975
""",
5976
)
5977

5978
add_docstr_all(
5979
    "type_as",
5980
    r"""
5981
type_as(tensor) -> Tensor
5982

5983
Returns this tensor cast to the type of the given tensor.
5984

5985
This is a no-op if the tensor is already of the correct type. This is
5986
equivalent to ``self.type(tensor.type())``
5987

5988
Args:
5989
    tensor (Tensor): the tensor which has the desired type
5990
""",
5991
)
5992

5993
add_docstr_all(
5994
    "unfold",
5995
    r"""
5996
unfold(dimension, size, step) -> Tensor
5997

5998
Returns a view of the original tensor which contains all slices of size :attr:`size` from
5999
:attr:`self` tensor in the dimension :attr:`dimension`.
6000

6001
Step between two slices is given by :attr:`step`.
6002

6003
If `sizedim` is the size of dimension :attr:`dimension` for :attr:`self`, the size of
6004
dimension :attr:`dimension` in the returned tensor will be
6005
`(sizedim - size) / step + 1`.
6006

6007
An additional dimension of size :attr:`size` is appended in the returned tensor.
6008

6009
Args:
6010
    dimension (int): dimension in which unfolding happens
6011
    size (int): the size of each slice that is unfolded
6012
    step (int): the step between each slice
6013

6014
Example::
6015

6016
    >>> x = torch.arange(1., 8)
6017
    >>> x
6018
    tensor([ 1.,  2.,  3.,  4.,  5.,  6.,  7.])
6019
    >>> x.unfold(0, 2, 1)
6020
    tensor([[ 1.,  2.],
6021
            [ 2.,  3.],
6022
            [ 3.,  4.],
6023
            [ 4.,  5.],
6024
            [ 5.,  6.],
6025
            [ 6.,  7.]])
6026
    >>> x.unfold(0, 2, 2)
6027
    tensor([[ 1.,  2.],
6028
            [ 3.,  4.],
6029
            [ 5.,  6.]])
6030
""",
6031
)
6032

6033
add_docstr_all(
6034
    "uniform_",
6035
    r"""
6036
uniform_(from=0, to=1, *, generator=None) -> Tensor
6037

6038
Fills :attr:`self` tensor with numbers sampled from the continuous uniform
6039
distribution:
6040

6041
.. math::
6042
    f(x) = \dfrac{1}{\text{to} - \text{from}}
6043
""",
6044
)
6045

6046
add_docstr_all(
6047
    "unsqueeze",
6048
    r"""
6049
unsqueeze(dim) -> Tensor
6050

6051
See :func:`torch.unsqueeze`
6052
""",
6053
)
6054

6055
add_docstr_all(
6056
    "unsqueeze_",
6057
    r"""
6058
unsqueeze_(dim) -> Tensor
6059

6060
In-place version of :meth:`~Tensor.unsqueeze`
6061
""",
6062
)
6063

6064
add_docstr_all(
6065
    "var",
6066
    r"""
6067
var(dim=None, *, correction=1, keepdim=False) -> Tensor
6068

6069
See :func:`torch.var`
6070
""",
6071
)
6072

6073
add_docstr_all(
6074
    "vdot",
6075
    r"""
6076
vdot(other) -> Tensor
6077

6078
See :func:`torch.vdot`
6079
""",
6080
)
6081

6082
add_docstr_all(
6083
    "view",
6084
    r"""
6085
view(*shape) -> Tensor
6086

6087
Returns a new tensor with the same data as the :attr:`self` tensor but of a
6088
different :attr:`shape`.
6089

6090
The returned tensor shares the same data and must have the same number
6091
of elements, but may have a different size. For a tensor to be viewed, the new
6092
view size must be compatible with its original size and stride, i.e., each new
6093
view dimension must either be a subspace of an original dimension, or only span
6094
across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following
6095
contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`,
6096

6097
.. math::
6098

6099
  \text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]
6100

6101
Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape`
6102
without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a
6103
:meth:`view` can be performed, it is advisable to use :meth:`reshape`, which
6104
returns a view if the shapes are compatible, and copies (equivalent to calling
6105
:meth:`contiguous`) otherwise.
6106

6107
Args:
6108
    shape (torch.Size or int...): the desired size
6109

6110
Example::
6111

6112
    >>> x = torch.randn(4, 4)
6113
    >>> x.size()
6114
    torch.Size([4, 4])
6115
    >>> y = x.view(16)
6116
    >>> y.size()
6117
    torch.Size([16])
6118
    >>> z = x.view(-1, 8)  # the size -1 is inferred from other dimensions
6119
    >>> z.size()
6120
    torch.Size([2, 8])
6121

6122
    >>> a = torch.randn(1, 2, 3, 4)
6123
    >>> a.size()
6124
    torch.Size([1, 2, 3, 4])
6125
    >>> b = a.transpose(1, 2)  # Swaps 2nd and 3rd dimension
6126
    >>> b.size()
6127
    torch.Size([1, 3, 2, 4])
6128
    >>> c = a.view(1, 3, 2, 4)  # Does not change tensor layout in memory
6129
    >>> c.size()
6130
    torch.Size([1, 3, 2, 4])
6131
    >>> torch.equal(b, c)
6132
    False
6133

6134

6135
.. method:: view(dtype) -> Tensor
6136
   :noindex:
6137

6138
Returns a new tensor with the same data as the :attr:`self` tensor but of a
6139
different :attr:`dtype`.
6140

6141
If the element size of :attr:`dtype` is different than that of ``self.dtype``,
6142
then the size of the last dimension of the output will be scaled
6143
proportionally.  For instance, if :attr:`dtype` element size is twice that of
6144
``self.dtype``, then each pair of elements in the last dimension of
6145
:attr:`self` will be combined, and the size of the last dimension of the output
6146
will be half that of :attr:`self`. If :attr:`dtype` element size is half that
6147
of ``self.dtype``, then each element in the last dimension of :attr:`self` will
6148
be split in two, and the size of the last dimension of the output will be
6149
double that of :attr:`self`. For this to be possible, the following conditions
6150
must be true:
6151

6152
    * ``self.dim()`` must be greater than 0.
6153
    * ``self.stride(-1)`` must be 1.
6154

6155
Additionally, if the element size of :attr:`dtype` is greater than that of
6156
``self.dtype``, the following conditions must be true as well:
6157

6158
    * ``self.size(-1)`` must be divisible by the ratio between the element
6159
      sizes of the dtypes.
6160
    * ``self.storage_offset()`` must be divisible by the ratio between the
6161
      element sizes of the dtypes.
6162
    * The strides of all dimensions, except the last dimension, must be
6163
      divisible by the ratio between the element sizes of the dtypes.
6164

6165
If any of the above conditions are not met, an error is thrown.
6166

6167
.. warning::
6168

6169
    This overload is not supported by TorchScript, and using it in a Torchscript
6170
    program will cause undefined behavior.
6171

6172

6173
Args:
6174
    dtype (:class:`torch.dtype`): the desired dtype
6175

6176
Example::
6177

6178
    >>> x = torch.randn(4, 4)
6179
    >>> x
6180
    tensor([[ 0.9482, -0.0310,  1.4999, -0.5316],
6181
            [-0.1520,  0.7472,  0.5617, -0.8649],
6182
            [-2.4724, -0.0334, -0.2976, -0.8499],
6183
            [-0.2109,  1.9913, -0.9607, -0.6123]])
6184
    >>> x.dtype
6185
    torch.float32
6186

6187
    >>> y = x.view(torch.int32)
6188
    >>> y
6189
    tensor([[ 1064483442, -1124191867,  1069546515, -1089989247],
6190
            [-1105482831,  1061112040,  1057999968, -1084397505],
6191
            [-1071760287, -1123489973, -1097310419, -1084649136],
6192
            [-1101533110,  1073668768, -1082790149, -1088634448]],
6193
        dtype=torch.int32)
6194
    >>> y[0, 0] = 1000000000
6195
    >>> x
6196
    tensor([[ 0.0047, -0.0310,  1.4999, -0.5316],
6197
            [-0.1520,  0.7472,  0.5617, -0.8649],
6198
            [-2.4724, -0.0334, -0.2976, -0.8499],
6199
            [-0.2109,  1.9913, -0.9607, -0.6123]])
6200

6201
    >>> x.view(torch.cfloat)
6202
    tensor([[ 0.0047-0.0310j,  1.4999-0.5316j],
6203
            [-0.1520+0.7472j,  0.5617-0.8649j],
6204
            [-2.4724-0.0334j, -0.2976-0.8499j],
6205
            [-0.2109+1.9913j, -0.9607-0.6123j]])
6206
    >>> x.view(torch.cfloat).size()
6207
    torch.Size([4, 2])
6208

6209
    >>> x.view(torch.uint8)
6210
    tensor([[  0, 202, 154,  59, 182, 243, 253, 188, 185, 252, 191,  63, 240,  22,
6211
               8, 191],
6212
            [227, 165,  27, 190, 128,  72,  63,  63, 146, 203,  15,  63,  22, 106,
6213
              93, 191],
6214
            [205,  59,  30, 192, 112, 206,   8, 189,   7,  95, 152, 190,  12, 147,
6215
              89, 191],
6216
            [ 43, 246,  87, 190, 235, 226, 254,  63, 111, 240, 117, 191, 177, 191,
6217
              28, 191]], dtype=torch.uint8)
6218
    >>> x.view(torch.uint8).size()
6219
    torch.Size([4, 16])
6220
""",
6221
)
6222

6223
add_docstr_all(
6224
    "view_as",
6225
    r"""
6226
view_as(other) -> Tensor
6227

6228
View this tensor as the same size as :attr:`other`.
6229
``self.view_as(other)`` is equivalent to ``self.view(other.size())``.
6230

6231
Please see :meth:`~Tensor.view` for more information about ``view``.
6232

6233
Args:
6234
    other (:class:`torch.Tensor`): The result tensor has the same size
6235
        as :attr:`other`.
6236
""",
6237
)
6238

6239
add_docstr_all(
6240
    "expand",
6241
    r"""
6242
expand(*sizes) -> Tensor
6243

6244
Returns a new view of the :attr:`self` tensor with singleton dimensions expanded
6245
to a larger size.
6246

6247
Passing -1 as the size for a dimension means not changing the size of
6248
that dimension.
6249

6250
Tensor can be also expanded to a larger number of dimensions, and the
6251
new ones will be appended at the front. For the new dimensions, the
6252
size cannot be set to -1.
6253

6254
Expanding a tensor does not allocate new memory, but only creates a
6255
new view on the existing tensor where a dimension of size one is
6256
expanded to a larger size by setting the ``stride`` to 0. Any dimension
6257
of size 1 can be expanded to an arbitrary value without allocating new
6258
memory.
6259

6260
Args:
6261
    *sizes (torch.Size or int...): the desired expanded size
6262

6263
.. warning::
6264

6265
    More than one element of an expanded tensor may refer to a single
6266
    memory location. As a result, in-place operations (especially ones that
6267
    are vectorized) may result in incorrect behavior. If you need to write
6268
    to the tensors, please clone them first.
6269

6270
Example::
6271

6272
    >>> x = torch.tensor([[1], [2], [3]])
6273
    >>> x.size()
6274
    torch.Size([3, 1])
6275
    >>> x.expand(3, 4)
6276
    tensor([[ 1,  1,  1,  1],
6277
            [ 2,  2,  2,  2],
6278
            [ 3,  3,  3,  3]])
6279
    >>> x.expand(-1, 4)   # -1 means not changing the size of that dimension
6280
    tensor([[ 1,  1,  1,  1],
6281
            [ 2,  2,  2,  2],
6282
            [ 3,  3,  3,  3]])
6283
""",
6284
)
6285

6286
add_docstr_all(
6287
    "expand_as",
6288
    r"""
6289
expand_as(other) -> Tensor
6290

6291
Expand this tensor to the same size as :attr:`other`.
6292
``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``.
6293

6294
Please see :meth:`~Tensor.expand` for more information about ``expand``.
6295

6296
Args:
6297
    other (:class:`torch.Tensor`): The result tensor has the same size
6298
        as :attr:`other`.
6299
""",
6300
)
6301

6302
add_docstr_all(
6303
    "sum_to_size",
6304
    r"""
6305
sum_to_size(*size) -> Tensor
6306

6307
Sum ``this`` tensor to :attr:`size`.
6308
:attr:`size` must be broadcastable to ``this`` tensor size.
6309

6310
Args:
6311
    size (int...): a sequence of integers defining the shape of the output tensor.
6312
""",
6313
)
6314

6315

6316
add_docstr_all(
6317
    "zero_",
6318
    r"""
6319
zero_() -> Tensor
6320

6321
Fills :attr:`self` tensor with zeros.
6322
""",
6323
)
6324

6325
add_docstr_all(
6326
    "matmul",
6327
    r"""
6328
matmul(tensor2) -> Tensor
6329

6330
See :func:`torch.matmul`
6331
""",
6332
)
6333

6334
add_docstr_all(
6335
    "chunk",
6336
    r"""
6337
chunk(chunks, dim=0) -> List of Tensors
6338

6339
See :func:`torch.chunk`
6340
""",
6341
)
6342

6343
add_docstr_all(
6344
    "unsafe_chunk",
6345
    r"""
6346
unsafe_chunk(chunks, dim=0) -> List of Tensors
6347

6348
See :func:`torch.unsafe_chunk`
6349
""",
6350
)
6351

6352
add_docstr_all(
6353
    "unsafe_split",
6354
    r"""
6355
unsafe_split(split_size, dim=0) -> List of Tensors
6356

6357
See :func:`torch.unsafe_split`
6358
""",
6359
)
6360

6361
add_docstr_all(
6362
    "tensor_split",
6363
    r"""
6364
tensor_split(indices_or_sections, dim=0) -> List of Tensors
6365

6366
See :func:`torch.tensor_split`
6367
""",
6368
)
6369

6370
add_docstr_all(
6371
    "hsplit",
6372
    r"""
6373
hsplit(split_size_or_sections) -> List of Tensors
6374

6375
See :func:`torch.hsplit`
6376
""",
6377
)
6378

6379
add_docstr_all(
6380
    "vsplit",
6381
    r"""
6382
vsplit(split_size_or_sections) -> List of Tensors
6383

6384
See :func:`torch.vsplit`
6385
""",
6386
)
6387

6388
add_docstr_all(
6389
    "dsplit",
6390
    r"""
6391
dsplit(split_size_or_sections) -> List of Tensors
6392

6393
See :func:`torch.dsplit`
6394
""",
6395
)
6396

6397
add_docstr_all(
6398
    "stft",
6399
    r"""
6400
stft(frame_length, hop, fft_size=None, return_onesided=True, window=None, pad_end=0) -> Tensor
6401

6402
See :func:`torch.stft`
6403
""",
6404
)
6405

6406
add_docstr_all(
6407
    "istft",
6408
    r"""
6409
istft(n_fft, hop_length=None, win_length=None, window=None,
6410
 center=True, normalized=False, onesided=True, length=None) -> Tensor
6411

6412
See :func:`torch.istft`
6413
""",
6414
)
6415

6416
add_docstr_all(
6417
    "det",
6418
    r"""
6419
det() -> Tensor
6420

6421
See :func:`torch.det`
6422
""",
6423
)
6424

6425
add_docstr_all(
6426
    "where",
6427
    r"""
6428
where(condition, y) -> Tensor
6429

6430
``self.where(condition, y)`` is equivalent to ``torch.where(condition, self, y)``.
6431
See :func:`torch.where`
6432
""",
6433
)
6434

6435
add_docstr_all(
6436
    "logdet",
6437
    r"""
6438
logdet() -> Tensor
6439

6440
See :func:`torch.logdet`
6441
""",
6442
)
6443

6444
add_docstr_all(
6445
    "slogdet",
6446
    r"""
6447
slogdet() -> (Tensor, Tensor)
6448

6449
See :func:`torch.slogdet`
6450
""",
6451
)
6452

6453
add_docstr_all(
6454
    "unbind",
6455
    r"""
6456
unbind(dim=0) -> seq
6457

6458
See :func:`torch.unbind`
6459
""",
6460
)
6461

6462
add_docstr_all(
6463
    "pin_memory",
6464
    r"""
6465
pin_memory() -> Tensor
6466

6467
Copies the tensor to pinned memory, if it's not already pinned.
6468
""",
6469
)
6470

6471
add_docstr_all(
6472
    "pinverse",
6473
    r"""
6474
pinverse() -> Tensor
6475

6476
See :func:`torch.pinverse`
6477
""",
6478
)
6479

6480
add_docstr_all(
6481
    "index_add",
6482
    r"""
6483
index_add(dim, index, source, *, alpha=1) -> Tensor
6484

6485
Out-of-place version of :meth:`torch.Tensor.index_add_`.
6486
""",
6487
)
6488

6489
add_docstr_all(
6490
    "index_copy",
6491
    r"""
6492
index_copy(dim, index, tensor2) -> Tensor
6493

6494
Out-of-place version of :meth:`torch.Tensor.index_copy_`.
6495
""",
6496
)
6497

6498
add_docstr_all(
6499
    "index_fill",
6500
    r"""
6501
index_fill(dim, index, value) -> Tensor
6502

6503
Out-of-place version of :meth:`torch.Tensor.index_fill_`.
6504
""",
6505
)
6506

6507
add_docstr_all(
6508
    "scatter",
6509
    r"""
6510
scatter(dim, index, src) -> Tensor
6511

6512
Out-of-place version of :meth:`torch.Tensor.scatter_`
6513
""",
6514
)
6515

6516
add_docstr_all(
6517
    "scatter_add",
6518
    r"""
6519
scatter_add(dim, index, src) -> Tensor
6520

6521
Out-of-place version of :meth:`torch.Tensor.scatter_add_`
6522
""",
6523
)
6524

6525
add_docstr_all(
6526
    "scatter_reduce",
6527
    r"""
6528
scatter_reduce(dim, index, src, reduce, *, include_self=True) -> Tensor
6529

6530
Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
6531
""",
6532
)
6533

6534
add_docstr_all(
6535
    "masked_scatter",
6536
    r"""
6537
masked_scatter(mask, tensor) -> Tensor
6538

6539
Out-of-place version of :meth:`torch.Tensor.masked_scatter_`
6540

6541
.. note::
6542

6543
    The inputs :attr:`self` and :attr:`mask`
6544
    :ref:`broadcast <broadcasting-semantics>`.
6545

6546
Example:
6547

6548
    >>> self = torch.tensor([0, 0, 0, 0, 0])
6549
    >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=torch.bool)
6550
    >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
6551
    >>> self.masked_scatter(mask, source)
6552
    tensor([[0, 0, 0, 0, 1],
6553
            [2, 3, 0, 4, 5]])
6554

6555
""",
6556
)
6557

6558
add_docstr_all(
6559
    "xlogy",
6560
    r"""
6561
xlogy(other) -> Tensor
6562

6563
See :func:`torch.xlogy`
6564
""",
6565
)
6566

6567
add_docstr_all(
6568
    "xlogy_",
6569
    r"""
6570
xlogy_(other) -> Tensor
6571

6572
In-place version of :meth:`~Tensor.xlogy`
6573
""",
6574
)
6575

6576
add_docstr_all(
6577
    "masked_fill",
6578
    r"""
6579
masked_fill(mask, value) -> Tensor
6580

6581
Out-of-place version of :meth:`torch.Tensor.masked_fill_`
6582
""",
6583
)
6584

6585
add_docstr_all(
6586
    "grad",
6587
    r"""
6588
This attribute is ``None`` by default and becomes a Tensor the first time a call to
6589
:func:`backward` computes gradients for ``self``.
6590
The attribute will then contain the gradients computed and future calls to
6591
:func:`backward` will accumulate (add) gradients into it.
6592
""",
6593
)
6594

6595
add_docstr_all(
6596
    "retain_grad",
6597
    r"""
6598
retain_grad() -> None
6599

6600
Enables this Tensor to have their :attr:`grad` populated during
6601
:func:`backward`. This is a no-op for leaf tensors.
6602
""",
6603
)
6604

6605
add_docstr_all(
6606
    "retains_grad",
6607
    r"""
6608
Is ``True`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be
6609
populated during :func:`backward`, ``False`` otherwise.
6610
""",
6611
)
6612

6613
add_docstr_all(
6614
    "requires_grad",
6615
    r"""
6616
Is ``True`` if gradients need to be computed for this Tensor, ``False`` otherwise.
6617

6618
.. note::
6619

6620
    The fact that gradients need to be computed for a Tensor do not mean that the :attr:`grad`
6621
    attribute will be populated, see :attr:`is_leaf` for more details.
6622

6623
""",
6624
)
6625

6626
add_docstr_all(
6627
    "is_leaf",
6628
    r"""
6629
All Tensors that have :attr:`requires_grad` which is ``False`` will be leaf Tensors by convention.
6630

6631
For Tensors that have :attr:`requires_grad` which is ``True``, they will be leaf Tensors if they were
6632
created by the user. This means that they are not the result of an operation and so
6633
:attr:`grad_fn` is None.
6634

6635
Only leaf Tensors will have their :attr:`grad` populated during a call to :func:`backward`.
6636
To get :attr:`grad` populated for non-leaf Tensors, you can use :func:`retain_grad`.
6637

6638
Example::
6639

6640
    >>> a = torch.rand(10, requires_grad=True)
6641
    >>> a.is_leaf
6642
    True
6643
    >>> b = torch.rand(10, requires_grad=True).cuda()
6644
    >>> b.is_leaf
6645
    False
6646
    # b was created by the operation that cast a cpu Tensor into a cuda Tensor
6647
    >>> c = torch.rand(10, requires_grad=True) + 2
6648
    >>> c.is_leaf
6649
    False
6650
    # c was created by the addition operation
6651
    >>> d = torch.rand(10).cuda()
6652
    >>> d.is_leaf
6653
    True
6654
    # d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
6655
    >>> e = torch.rand(10).cuda().requires_grad_()
6656
    >>> e.is_leaf
6657
    True
6658
    # e requires gradients and has no operations creating it
6659
    >>> f = torch.rand(10, requires_grad=True, device="cuda")
6660
    >>> f.is_leaf
6661
    True
6662
    # f requires grad, has no operation creating it
6663

6664

6665
""",
6666
)
6667

6668
add_docstr_all(
6669
    "names",
6670
    r"""
6671
Stores names for each of this tensor's dimensions.
6672

6673
``names[idx]`` corresponds to the name of tensor dimension ``idx``.
6674
Names are either a string if the dimension is named or ``None`` if the
6675
dimension is unnamed.
6676

6677
Dimension names may contain characters or underscore. Furthermore, a dimension
6678
name must be a valid Python variable name (i.e., does not start with underscore).
6679

6680
Tensors may not have two named dimensions with the same name.
6681

6682
.. warning::
6683
    The named tensor API is experimental and subject to change.
6684

6685
""",
6686
)
6687

6688
add_docstr_all(
6689
    "is_cuda",
6690
    r"""
6691
Is ``True`` if the Tensor is stored on the GPU, ``False`` otherwise.
6692
""",
6693
)
6694

6695
add_docstr_all(
6696
    "is_cpu",
6697
    r"""
6698
Is ``True`` if the Tensor is stored on the CPU, ``False`` otherwise.
6699
""",
6700
)
6701

6702
add_docstr_all(
6703
    "is_xla",
6704
    r"""
6705
Is ``True`` if the Tensor is stored on an XLA device, ``False`` otherwise.
6706
""",
6707
)
6708

6709
add_docstr_all(
6710
    "is_ipu",
6711
    r"""
6712
Is ``True`` if the Tensor is stored on the IPU, ``False`` otherwise.
6713
""",
6714
)
6715

6716
add_docstr_all(
6717
    "is_xpu",
6718
    r"""
6719
Is ``True`` if the Tensor is stored on the XPU, ``False`` otherwise.
6720
""",
6721
)
6722

6723
add_docstr_all(
6724
    "is_quantized",
6725
    r"""
6726
Is ``True`` if the Tensor is quantized, ``False`` otherwise.
6727
""",
6728
)
6729

6730
add_docstr_all(
6731
    "is_meta",
6732
    r"""
6733
Is ``True`` if the Tensor is a meta tensor, ``False`` otherwise.  Meta tensors
6734
are like normal tensors, but they carry no data.
6735
""",
6736
)
6737

6738
add_docstr_all(
6739
    "is_mps",
6740
    r"""
6741
Is ``True`` if the Tensor is stored on the MPS device, ``False`` otherwise.
6742
""",
6743
)
6744

6745
add_docstr_all(
6746
    "is_sparse",
6747
    r"""
6748
Is ``True`` if the Tensor uses sparse COO storage layout, ``False`` otherwise.
6749
""",
6750
)
6751

6752
add_docstr_all(
6753
    "is_sparse_csr",
6754
    r"""
6755
Is ``True`` if the Tensor uses sparse CSR storage layout, ``False`` otherwise.
6756
""",
6757
)
6758

6759
add_docstr_all(
6760
    "device",
6761
    r"""
6762
Is the :class:`torch.device` where this Tensor is.
6763
""",
6764
)
6765

6766
add_docstr_all(
6767
    "ndim",
6768
    r"""
6769
Alias for :meth:`~Tensor.dim()`
6770
""",
6771
)
6772

6773
add_docstr_all(
6774
    "itemsize",
6775
    r"""
6776
Alias for :meth:`~Tensor.element_size()`
6777
""",
6778
)
6779

6780
add_docstr_all(
6781
    "nbytes",
6782
    r"""
6783
Returns the number of bytes consumed by the "view" of elements of the Tensor
6784
if the Tensor does not use sparse storage layout.
6785
Defined to be :meth:`~Tensor.numel()` * :meth:`~Tensor.element_size()`
6786
""",
6787
)
6788

6789
add_docstr_all(
6790
    "T",
6791
    r"""
6792
Returns a view of this tensor with its dimensions reversed.
6793

6794
If ``n`` is the number of dimensions in ``x``,
6795
``x.T`` is equivalent to ``x.permute(n-1, n-2, ..., 0)``.
6796

6797
.. warning::
6798
    The use of :func:`Tensor.T` on tensors of dimension other than 2 to reverse their shape
6799
    is deprecated and it will throw an error in a future release. Consider :attr:`~.Tensor.mT`
6800
    to transpose batches of matrices or `x.permute(*torch.arange(x.ndim - 1, -1, -1))` to reverse
6801
    the dimensions of a tensor.
6802
""",
6803
)
6804

6805
add_docstr_all(
6806
    "H",
6807
    r"""
6808
Returns a view of a matrix (2-D tensor) conjugated and transposed.
6809

6810
``x.H`` is equivalent to ``x.transpose(0, 1).conj()`` for complex matrices and
6811
``x.transpose(0, 1)`` for real matrices.
6812

6813
.. seealso::
6814

6815
        :attr:`~.Tensor.mH`: An attribute that also works on batches of matrices.
6816
""",
6817
)
6818

6819
add_docstr_all(
6820
    "mT",
6821
    r"""
6822
Returns a view of this tensor with the last two dimensions transposed.
6823

6824
``x.mT`` is equivalent to ``x.transpose(-2, -1)``.
6825
""",
6826
)
6827

6828
add_docstr_all(
6829
    "mH",
6830
    r"""
6831
Accessing this property is equivalent to calling :func:`adjoint`.
6832
""",
6833
)
6834

6835
add_docstr_all(
6836
    "adjoint",
6837
    r"""
6838
adjoint() -> Tensor
6839

6840
Alias for :func:`adjoint`
6841
""",
6842
)
6843

6844
add_docstr_all(
6845
    "real",
6846
    r"""
6847
Returns a new tensor containing real values of the :attr:`self` tensor for a complex-valued input tensor.
6848
The returned tensor and :attr:`self` share the same underlying storage.
6849

6850
Returns :attr:`self` if :attr:`self` is a real-valued tensor tensor.
6851

6852
Example::
6853
    >>> x=torch.randn(4, dtype=torch.cfloat)
6854
    >>> x
6855
    tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
6856
    >>> x.real
6857
    tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
6858

6859
""",
6860
)
6861

6862
add_docstr_all(
6863
    "imag",
6864
    r"""
6865
Returns a new tensor containing imaginary values of the :attr:`self` tensor.
6866
The returned tensor and :attr:`self` share the same underlying storage.
6867

6868
.. warning::
6869
    :func:`imag` is only supported for tensors with complex dtypes.
6870

6871
Example::
6872
    >>> x=torch.randn(4, dtype=torch.cfloat)
6873
    >>> x
6874
    tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
6875
    >>> x.imag
6876
    tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
6877

6878
""",
6879
)
6880

6881
add_docstr_all(
6882
    "as_subclass",
6883
    r"""
6884
as_subclass(cls) -> Tensor
6885

6886
Makes a ``cls`` instance with the same data pointer as ``self``. Changes
6887
in the output mirror changes in ``self``, and the output stays attached
6888
to the autograd graph. ``cls`` must be a subclass of ``Tensor``.
6889
""",
6890
)
6891

6892
add_docstr_all(
6893
    "crow_indices",
6894
    r"""
6895
crow_indices() -> IntTensor
6896

6897
Returns the tensor containing the compressed row indices of the :attr:`self`
6898
tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
6899
The ``crow_indices`` tensor is strictly of shape (:attr:`self`.size(0) + 1)
6900
and of type ``int32`` or ``int64``. When using MKL routines such as sparse
6901
matrix multiplication, it is necessary to use ``int32`` indexing in order
6902
to avoid downcasting and potentially losing information.
6903

6904
Example::
6905
    >>> csr = torch.eye(5,5).to_sparse_csr()
6906
    >>> csr.crow_indices()
6907
    tensor([0, 1, 2, 3, 4, 5], dtype=torch.int32)
6908

6909
""",
6910
)
6911

6912
add_docstr_all(
6913
    "col_indices",
6914
    r"""
6915
col_indices() -> IntTensor
6916

6917
Returns the tensor containing the column indices of the :attr:`self`
6918
tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
6919
The ``col_indices`` tensor is strictly of shape (:attr:`self`.nnz())
6920
and of type ``int32`` or ``int64``.  When using MKL routines such as sparse
6921
matrix multiplication, it is necessary to use ``int32`` indexing in order
6922
to avoid downcasting and potentially losing information.
6923

6924
Example::
6925
    >>> csr = torch.eye(5,5).to_sparse_csr()
6926
    >>> csr.col_indices()
6927
    tensor([0, 1, 2, 3, 4], dtype=torch.int32)
6928

6929
""",
6930
)
6931

6932
add_docstr_all(
6933
    "to_padded_tensor",
6934
    r"""
6935
to_padded_tensor(padding, output_size=None) -> Tensor
6936
See :func:`to_padded_tensor`
6937
""",
6938
)
6939

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.