2
"""Adds docstrings to Tensor functions"""
5
from torch._C import _add_docstr as add_docstr
6
from torch._torch_docs import parse_kwargs, reproducibility_notes
9
def add_docstr_all(method, docstr):
10
add_docstr(getattr(torch._C.TensorBase, method), docstr)
13
common_args = parse_kwargs(
15
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
16
returned Tensor. Default: ``torch.preserve_format``.
20
new_common_args = parse_kwargs(
22
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
23
shape of the output tensor.
24
dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
25
Default: if None, same :class:`torch.dtype` as this tensor.
26
device (:class:`torch.device`, optional): the desired device of returned tensor.
27
Default: if None, same :class:`torch.device` as this tensor.
28
requires_grad (bool, optional): If autograd should record operations on the
29
returned tensor. Default: ``False``.
30
pin_memory (bool, optional): If set, returned tensor would be allocated in
31
the pinned memory. Works only for CPU tensors. Default: ``False``.
32
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
33
Default: ``torch.strided``.
40
new_tensor(data, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
41
pin_memory=False) -> Tensor
45
Returns a new Tensor with :attr:`data` as the tensor data.
46
By default, the returned Tensor has the same :class:`torch.dtype` and
47
:class:`torch.device` as this tensor.
51
:func:`new_tensor` always copies :attr:`data`. If you have a Tensor
52
``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_`
53
or :func:`torch.Tensor.detach`.
54
If you have a numpy array and want to avoid a copy, use
55
:func:`torch.from_numpy`.
59
When data is a tensor `x`, :func:`new_tensor()` reads out 'the data' from whatever it is passed,
60
and constructs a leaf variable. Therefore ``tensor.new_tensor(x)`` is equivalent to ``x.clone().detach()``
61
and ``tensor.new_tensor(x, requires_grad=True)`` is equivalent to ``x.clone().detach().requires_grad_(True)``.
62
The equivalents using ``clone()`` and ``detach()`` are recommended.
65
data (array_like): The returned Tensor copies :attr:`data`.
76
>>> tensor = torch.ones((2,), dtype=torch.int8)
77
>>> data = [[0, 1], [2, 3]]
78
>>> tensor.new_tensor(data)
80
[ 2, 3]], dtype=torch.int8)
82
""".format(**new_common_args),
88
new_full(size, fill_value, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
89
pin_memory=False) -> Tensor
93
Returns a Tensor of size :attr:`size` filled with :attr:`fill_value`.
94
By default, the returned Tensor has the same :class:`torch.dtype` and
95
:class:`torch.device` as this tensor.
98
fill_value (scalar): the number to fill the output tensor with.
109
>>> tensor = torch.ones((2,), dtype=torch.float64)
110
>>> tensor.new_full((3, 4), 3.141592)
111
tensor([[ 3.1416, 3.1416, 3.1416, 3.1416],
112
[ 3.1416, 3.1416, 3.1416, 3.1416],
113
[ 3.1416, 3.1416, 3.1416, 3.1416]], dtype=torch.float64)
115
""".format(**new_common_args),
121
new_empty(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
122
pin_memory=False) -> Tensor
126
Returns a Tensor of size :attr:`size` filled with uninitialized data.
127
By default, the returned Tensor has the same :class:`torch.dtype` and
128
:class:`torch.device` as this tensor.
131
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
132
shape of the output tensor.
143
>>> tensor = torch.ones(())
144
>>> tensor.new_empty((2, 3))
145
tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
146
[ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
148
""".format(**new_common_args),
154
new_empty_strided(size, stride, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
155
pin_memory=False) -> Tensor
159
Returns a Tensor of size :attr:`size` and strides :attr:`stride` filled with
160
uninitialized data. By default, the returned Tensor has the same
161
:class:`torch.dtype` and :class:`torch.device` as this tensor.
164
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
165
shape of the output tensor.
176
>>> tensor = torch.ones(())
177
>>> tensor.new_empty_strided((2, 3), (3, 1))
178
tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
179
[ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
181
""".format(**new_common_args),
187
new_ones(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
188
pin_memory=False) -> Tensor
192
Returns a Tensor of size :attr:`size` filled with ``1``.
193
By default, the returned Tensor has the same :class:`torch.dtype` and
194
:class:`torch.device` as this tensor.
197
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
198
shape of the output tensor.
209
>>> tensor = torch.tensor((), dtype=torch.int32)
210
>>> tensor.new_ones((2, 3))
212
[ 1, 1, 1]], dtype=torch.int32)
214
""".format(**new_common_args),
220
new_zeros(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
221
pin_memory=False) -> Tensor
225
Returns a Tensor of size :attr:`size` filled with ``0``.
226
By default, the returned Tensor has the same :class:`torch.dtype` and
227
:class:`torch.device` as this tensor.
230
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
231
shape of the output tensor.
242
>>> tensor = torch.tensor((), dtype=torch.float64)
243
>>> tensor.new_zeros((2, 3))
244
tensor([[ 0., 0., 0.],
245
[ 0., 0., 0.]], dtype=torch.float64)
247
""".format(**new_common_args),
264
In-place version of :meth:`~Tensor.abs`
282
In-place version of :meth:`~Tensor.absolute`
283
Alias for :func:`abs_`
292
See :func:`torch.acos`
301
In-place version of :meth:`~Tensor.acos`
310
See :func:`torch.arccos`
319
In-place version of :meth:`~Tensor.arccos`
328
See :func:`torch.acosh`
337
In-place version of :meth:`~Tensor.acosh`
346
See :func:`torch.arccosh`
355
In-place version of :meth:`~Tensor.arccosh`
362
add(other, *, alpha=1) -> Tensor
364
Add a scalar or tensor to :attr:`self` tensor. If both :attr:`alpha`
365
and :attr:`other` are specified, each element of :attr:`other` is scaled by
366
:attr:`alpha` before being used.
368
When :attr:`other` is a tensor, the shape of :attr:`other` must be
369
:ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
379
add_(other, *, alpha=1) -> Tensor
381
In-place version of :meth:`~Tensor.add`
388
addbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
390
See :func:`torch.addbmm`
397
addbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
399
In-place version of :meth:`~Tensor.addbmm`
406
addcdiv(tensor1, tensor2, *, value=1) -> Tensor
408
See :func:`torch.addcdiv`
415
addcdiv_(tensor1, tensor2, *, value=1) -> Tensor
417
In-place version of :meth:`~Tensor.addcdiv`
424
addcmul(tensor1, tensor2, *, value=1) -> Tensor
426
See :func:`torch.addcmul`
433
addcmul_(tensor1, tensor2, *, value=1) -> Tensor
435
In-place version of :meth:`~Tensor.addcmul`
442
addmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
444
See :func:`torch.addmm`
451
addmm_(mat1, mat2, *, beta=1, alpha=1) -> Tensor
453
In-place version of :meth:`~Tensor.addmm`
460
addmv(mat, vec, *, beta=1, alpha=1) -> Tensor
462
See :func:`torch.addmv`
469
addmv_(mat, vec, *, beta=1, alpha=1) -> Tensor
471
In-place version of :meth:`~Tensor.addmv`
478
sspaddmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
480
See :func:`torch.sspaddmm`
496
addr(vec1, vec2, *, beta=1, alpha=1) -> Tensor
498
See :func:`torch.addr`
505
addr_(vec1, vec2, *, beta=1, alpha=1) -> Tensor
507
In-place version of :meth:`~Tensor.addr`
514
align_as(other) -> Tensor
516
Permutes the dimensions of the :attr:`self` tensor to match the dimension order
517
in the :attr:`other` tensor, adding size-one dims for any new names.
519
This operation is useful for explicit broadcasting by names (see examples).
521
All of the dims of :attr:`self` must be named in order to use this method.
522
The resulting tensor is a view on the original tensor.
524
All dimension names of :attr:`self` must be present in ``other.names``.
525
:attr:`other` may contain named dimensions that are not in ``self.names``;
526
the output tensor has a size-one dimension for each of those new names.
528
To align a tensor to a specific order, use :meth:`~Tensor.align_to`.
532
# Example 1: Applying a mask
533
>>> mask = torch.randint(2, [127, 128], dtype=torch.bool).refine_names('W', 'H')
534
>>> imgs = torch.randn(32, 128, 127, 3, names=('N', 'H', 'W', 'C'))
535
>>> imgs.masked_fill_(mask.align_as(imgs), 0)
538
# Example 2: Applying a per-channel-scale
539
>>> def scale_channels(input, scale):
540
>>> scale = scale.refine_names('C')
541
>>> return input * scale.align_as(input)
544
>>> scale = torch.randn(num_channels, names=('C',))
545
>>> imgs = torch.rand(32, 128, 128, num_channels, names=('N', 'H', 'W', 'C'))
546
>>> more_imgs = torch.rand(32, num_channels, 128, 128, names=('N', 'C', 'H', 'W'))
547
>>> videos = torch.randn(3, num_channels, 128, 128, 128, names=('N', 'C', 'H', 'W', 'D'))
549
# scale_channels is agnostic to the dimension order of the input
550
>>> scale_channels(imgs, scale)
551
>>> scale_channels(more_imgs, scale)
552
>>> scale_channels(videos, scale)
555
The named tensor API is experimental and subject to change.
563
all(dim=None, keepdim=False) -> Tensor
572
allclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
574
See :func:`torch.allclose`
583
See :func:`torch.angle`
590
any(dim=None, keepdim=False) -> Tensor
599
apply_(callable) -> Tensor
601
Applies the function :attr:`callable` to each element in the tensor, replacing
602
each element with the value returned by :attr:`callable`.
606
This function only works with CPU tensors and should not be used in code
607
sections that require high performance.
616
See :func:`torch.asin`
625
In-place version of :meth:`~Tensor.asin`
634
See :func:`torch.arcsin`
643
In-place version of :meth:`~Tensor.arcsin`
652
See :func:`torch.asinh`
661
In-place version of :meth:`~Tensor.asinh`
670
See :func:`torch.arcsinh`
679
In-place version of :meth:`~Tensor.arcsinh`
686
as_strided(size, stride, storage_offset=None) -> Tensor
688
See :func:`torch.as_strided`
695
as_strided_(size, stride, storage_offset=None) -> Tensor
697
In-place version of :meth:`~Tensor.as_strided`
706
See :func:`torch.atan`
715
In-place version of :meth:`~Tensor.atan`
724
See :func:`torch.arctan`
733
In-place version of :meth:`~Tensor.arctan`
740
atan2(other) -> Tensor
742
See :func:`torch.atan2`
749
atan2_(other) -> Tensor
751
In-place version of :meth:`~Tensor.atan2`
758
arctan2(other) -> Tensor
760
See :func:`torch.arctan2`
767
atan2_(other) -> Tensor
769
In-place version of :meth:`~Tensor.arctan2`
778
See :func:`torch.atanh`
785
atanh_(other) -> Tensor
787
In-place version of :meth:`~Tensor.atanh`
796
See :func:`torch.arctanh`
803
arctanh_(other) -> Tensor
805
In-place version of :meth:`~Tensor.arctanh`
812
baddbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
814
See :func:`torch.baddbmm`
821
baddbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
823
In-place version of :meth:`~Tensor.baddbmm`
830
bernoulli(*, generator=None) -> Tensor
832
Returns a result tensor where each :math:`\texttt{result[i]}` is independently
833
sampled from :math:`\text{Bernoulli}(\texttt{self[i]})`. :attr:`self` must have
834
floating point ``dtype``, and the result will have the same ``dtype``.
836
See :func:`torch.bernoulli`
843
bernoulli_(p=0.5, *, generator=None) -> Tensor
845
Fills each location of :attr:`self` with an independent sample from
846
:math:`\text{Bernoulli}(\texttt{p})`. :attr:`self` can have integral
849
:attr:`p` should either be a scalar or tensor containing probabilities to be
850
used for drawing the binary random number.
852
If it is a tensor, the :math:`\text{i}^{th}` element of :attr:`self` tensor
853
will be set to a value sampled from
854
:math:`\text{Bernoulli}(\texttt{p\_tensor[i]})`. In this case `p` must have
855
floating point ``dtype``.
857
See also :meth:`~Tensor.bernoulli` and :func:`torch.bernoulli`
864
bincount(weights=None, minlength=0) -> Tensor
866
See :func:`torch.bincount`
873
bitwise_not() -> Tensor
875
See :func:`torch.bitwise_not`
882
bitwise_not_() -> Tensor
884
In-place version of :meth:`~Tensor.bitwise_not`
891
bitwise_and() -> Tensor
893
See :func:`torch.bitwise_and`
900
bitwise_and_() -> Tensor
902
In-place version of :meth:`~Tensor.bitwise_and`
909
bitwise_or() -> Tensor
911
See :func:`torch.bitwise_or`
918
bitwise_or_() -> Tensor
920
In-place version of :meth:`~Tensor.bitwise_or`
927
bitwise_xor() -> Tensor
929
See :func:`torch.bitwise_xor`
936
bitwise_xor_() -> Tensor
938
In-place version of :meth:`~Tensor.bitwise_xor`
943
"bitwise_left_shift",
945
bitwise_left_shift(other) -> Tensor
947
See :func:`torch.bitwise_left_shift`
952
"bitwise_left_shift_",
954
bitwise_left_shift_(other) -> Tensor
956
In-place version of :meth:`~Tensor.bitwise_left_shift`
961
"bitwise_right_shift",
963
bitwise_right_shift(other) -> Tensor
965
See :func:`torch.bitwise_right_shift`
970
"bitwise_right_shift_",
972
bitwise_right_shift_(other) -> Tensor
974
In-place version of :meth:`~Tensor.bitwise_right_shift`
981
broadcast_to(shape) -> Tensor
983
See :func:`torch.broadcast_to`.
990
logical_and() -> Tensor
992
See :func:`torch.logical_and`
999
logical_and_() -> Tensor
1001
In-place version of :meth:`~Tensor.logical_and`
1008
logical_not() -> Tensor
1010
See :func:`torch.logical_not`
1017
logical_not_() -> Tensor
1019
In-place version of :meth:`~Tensor.logical_not`
1026
logical_or() -> Tensor
1028
See :func:`torch.logical_or`
1035
logical_or_() -> Tensor
1037
In-place version of :meth:`~Tensor.logical_or`
1044
logical_xor() -> Tensor
1046
See :func:`torch.logical_xor`
1053
logical_xor_() -> Tensor
1055
In-place version of :meth:`~Tensor.logical_xor`
1062
bmm(batch2) -> Tensor
1064
See :func:`torch.bmm`
1071
cauchy_(median=0, sigma=1, *, generator=None) -> Tensor
1073
Fills the tensor with numbers drawn from the Cauchy distribution:
1077
f(x) = \dfrac{1}{\pi} \dfrac{\sigma}{(x - \text{median})^2 + \sigma^2}
1080
Sigma (:math:`\sigma`) is used to denote the scale parameter in Cauchy distribution.
1089
See :func:`torch.ceil`
1098
In-place version of :meth:`~Tensor.ceil`
1105
cholesky(upper=False) -> Tensor
1107
See :func:`torch.cholesky`
1114
cholesky_solve(input2, upper=False) -> Tensor
1116
See :func:`torch.cholesky_solve`
1123
cholesky_inverse(upper=False) -> Tensor
1125
See :func:`torch.cholesky_inverse`
1132
clamp(min=None, max=None) -> Tensor
1134
See :func:`torch.clamp`
1141
clamp_(min=None, max=None) -> Tensor
1143
In-place version of :meth:`~Tensor.clamp`
1150
clip(min=None, max=None) -> Tensor
1152
Alias for :meth:`~Tensor.clamp`.
1159
clip_(min=None, max=None) -> Tensor
1161
Alias for :meth:`~Tensor.clamp_`.
1168
clone(*, memory_format=torch.preserve_format) -> Tensor
1170
See :func:`torch.clone`
1171
""".format(**common_args),
1179
Returns a coalesced copy of :attr:`self` if :attr:`self` is an
1180
:ref:`uncoalesced tensor <sparse-uncoalesced-coo-docs>`.
1182
Returns :attr:`self` if :attr:`self` is a coalesced tensor.
1185
Throws an error if :attr:`self` is not a sparse COO tensor.
1192
contiguous(memory_format=torch.contiguous_format) -> Tensor
1194
Returns a contiguous in memory tensor containing the same data as :attr:`self` tensor. If
1195
:attr:`self` tensor is already in the specified memory format, this function returns the
1199
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
1200
returned Tensor. Default: ``torch.contiguous_format``.
1207
copy_(src, non_blocking=False) -> Tensor
1209
Copies the elements from :attr:`src` into :attr:`self` tensor and returns
1212
The :attr:`src` tensor must be :ref:`broadcastable <broadcasting-semantics>`
1213
with the :attr:`self` tensor. It may be of a different data type or reside on a
1217
src (Tensor): the source tensor to copy from
1218
non_blocking (bool): if ``True`` and this copy is between CPU and GPU,
1219
the copy may occur asynchronously with respect to the host. For other
1220
cases, this argument has no effect.
1229
See :func:`torch.conj`
1236
conj_physical() -> Tensor
1238
See :func:`torch.conj_physical`
1245
conj_physical_() -> Tensor
1247
In-place version of :meth:`~Tensor.conj_physical`
1254
resolve_conj() -> Tensor
1256
See :func:`torch.resolve_conj`
1263
resolve_neg() -> Tensor
1265
See :func:`torch.resolve_neg`
1272
copysign(other) -> Tensor
1274
See :func:`torch.copysign`
1281
copysign_(other) -> Tensor
1283
In-place version of :meth:`~Tensor.copysign`
1292
See :func:`torch.cos`
1301
In-place version of :meth:`~Tensor.cos`
1310
See :func:`torch.cosh`
1319
In-place version of :meth:`~Tensor.cosh`
1326
cpu(memory_format=torch.preserve_format) -> Tensor
1328
Returns a copy of this object in CPU memory.
1330
If this object is already in CPU memory and on the correct device,
1331
then no copy is performed and the original object is returned.
1336
""".format(**common_args),
1342
count_nonzero(dim=None) -> Tensor
1344
See :func:`torch.count_nonzero`
1351
cov(*, correction=1, fweights=None, aweights=None) -> Tensor
1353
See :func:`torch.cov`
1362
See :func:`torch.corrcoef`
1369
cross(other, dim=None) -> Tensor
1371
See :func:`torch.cross`
1378
cuda(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
1380
Returns a copy of this object in CUDA memory.
1382
If this object is already in CUDA memory and on the correct device,
1383
then no copy is performed and the original object is returned.
1386
device (:class:`torch.device`): The destination GPU device.
1387
Defaults to the current CUDA device.
1388
non_blocking (bool): If ``True`` and the source is in pinned memory,
1389
the copy will be asynchronous with respect to the host.
1390
Otherwise, the argument has no effect. Default: ``False``.
1392
""".format(**common_args),
1398
mtia(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
1400
Returns a copy of this object in MTIA memory.
1402
If this object is already in MTIA memory and on the correct device,
1403
then no copy is performed and the original object is returned.
1406
device (:class:`torch.device`): The destination MTIA device.
1407
Defaults to the current MTIA device.
1408
non_blocking (bool): If ``True`` and the source is in pinned memory,
1409
the copy will be asynchronous with respect to the host.
1410
Otherwise, the argument has no effect. Default: ``False``.
1412
""".format(**common_args),
1418
ipu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
1420
Returns a copy of this object in IPU memory.
1422
If this object is already in IPU memory and on the correct device,
1423
then no copy is performed and the original object is returned.
1426
device (:class:`torch.device`): The destination IPU device.
1427
Defaults to the current IPU device.
1428
non_blocking (bool): If ``True`` and the source is in pinned memory,
1429
the copy will be asynchronous with respect to the host.
1430
Otherwise, the argument has no effect. Default: ``False``.
1432
""".format(**common_args),
1438
xpu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
1440
Returns a copy of this object in XPU memory.
1442
If this object is already in XPU memory and on the correct device,
1443
then no copy is performed and the original object is returned.
1446
device (:class:`torch.device`): The destination XPU device.
1447
Defaults to the current XPU device.
1448
non_blocking (bool): If ``True`` and the source is in pinned memory,
1449
the copy will be asynchronous with respect to the host.
1450
Otherwise, the argument has no effect. Default: ``False``.
1452
""".format(**common_args),
1458
logcumsumexp(dim) -> Tensor
1460
See :func:`torch.logcumsumexp`
1467
cummax(dim) -> (Tensor, Tensor)
1469
See :func:`torch.cummax`
1476
cummin(dim) -> (Tensor, Tensor)
1478
See :func:`torch.cummin`
1485
cumprod(dim, dtype=None) -> Tensor
1487
See :func:`torch.cumprod`
1494
cumprod_(dim, dtype=None) -> Tensor
1496
In-place version of :meth:`~Tensor.cumprod`
1503
cumsum(dim, dtype=None) -> Tensor
1505
See :func:`torch.cumsum`
1512
cumsum_(dim, dtype=None) -> Tensor
1514
In-place version of :meth:`~Tensor.cumsum`
1523
Returns the address of the first element of :attr:`self` tensor.
1530
dequantize() -> Tensor
1532
Given a quantized Tensor, dequantize it and return the dequantized float Tensor.
1541
Return the number of dense dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.
1544
Returns ``len(self.shape)`` if :attr:`self` is not a sparse tensor.
1546
See also :meth:`Tensor.sparse_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
1553
diag(diagonal=0) -> Tensor
1555
See :func:`torch.diag`
1562
diag_embed(offset=0, dim1=-2, dim2=-1) -> Tensor
1564
See :func:`torch.diag_embed`
1571
diagflat(offset=0) -> Tensor
1573
See :func:`torch.diagflat`
1580
diagonal(offset=0, dim1=0, dim2=1) -> Tensor
1582
See :func:`torch.diagonal`
1589
diagonal_scatter(src, offset=0, dim1=0, dim2=1) -> Tensor
1591
See :func:`torch.diagonal_scatter`
1596
"as_strided_scatter",
1598
as_strided_scatter(src, size, stride, storage_offset=None) -> Tensor
1600
See :func:`torch.as_strided_scatter`
1607
fill_diagonal_(fill_value, wrap=False) -> Tensor
1609
Fill the main diagonal of a tensor that has at least 2-dimensions.
1610
When dims>2, all dimensions of input must be of equal length.
1611
This function modifies the input tensor in-place, and returns the input tensor.
1614
fill_value (Scalar): the fill value
1615
wrap (bool): the diagonal 'wrapped' after N columns for tall matrices.
1619
>>> a = torch.zeros(3, 3)
1620
>>> a.fill_diagonal_(5)
1621
tensor([[5., 0., 0.],
1624
>>> b = torch.zeros(7, 3)
1625
>>> b.fill_diagonal_(5)
1626
tensor([[5., 0., 0.],
1633
>>> c = torch.zeros(7, 3)
1634
>>> c.fill_diagonal_(5, wrap=True)
1635
tensor([[5., 0., 0.],
1649
floor_divide(value) -> Tensor
1651
See :func:`torch.floor_divide`
1658
floor_divide_(value) -> Tensor
1660
In-place version of :meth:`~Tensor.floor_divide`
1667
diff(n=1, dim=-1, prepend=None, append=None) -> Tensor
1669
See :func:`torch.diff`
1678
See :func:`torch.digamma`
1687
In-place version of :meth:`~Tensor.digamma`
1696
Returns the number of dimensions of :attr:`self` tensor.
1703
dist(other, p=2) -> Tensor
1705
See :func:`torch.dist`
1712
div(value, *, rounding_mode=None) -> Tensor
1714
See :func:`torch.div`
1721
div_(value, *, rounding_mode=None) -> Tensor
1723
In-place version of :meth:`~Tensor.div`
1730
divide(value, *, rounding_mode=None) -> Tensor
1732
See :func:`torch.divide`
1739
divide_(value, *, rounding_mode=None) -> Tensor
1741
In-place version of :meth:`~Tensor.divide`
1750
See :func:`torch.dot`
1757
element_size() -> int
1759
Returns the size in bytes of an individual element.
1763
>>> torch.tensor([]).element_size()
1765
>>> torch.tensor([], dtype=torch.uint8).element_size()
1785
In-place version of :meth:`~Tensor.eq`
1794
See :func:`torch.equal`
1803
See :func:`torch.erf`
1812
In-place version of :meth:`~Tensor.erf`
1821
See :func:`torch.erfc`
1830
In-place version of :meth:`~Tensor.erfc`
1839
See :func:`torch.erfinv`
1848
In-place version of :meth:`~Tensor.erfinv`
1857
See :func:`torch.exp`
1866
In-place version of :meth:`~Tensor.exp`
1875
See :func:`torch.exp2`
1884
In-place version of :meth:`~Tensor.exp2`
1893
See :func:`torch.expm1`
1902
In-place version of :meth:`~Tensor.expm1`
1909
exponential_(lambd=1, *, generator=None) -> Tensor
1911
Fills :attr:`self` tensor with elements drawn from the PDF (probability density function):
1915
f(x) = \lambda e^{-\lambda x}, x > 0
1918
In probability theory, exponential distribution is supported on interval [0, :math:`\inf`) (i.e., :math:`x >= 0`)
1919
implying that zero can be sampled from the exponential distribution.
1920
However, :func:`torch.Tensor.exponential_` does not sample zero,
1921
which means that its actual support is the interval (0, :math:`\inf`).
1923
Note that :func:`torch.distributions.exponential.Exponential` is supported on the interval [0, :math:`\inf`) and can sample zero.
1930
fill_(value) -> Tensor
1932
Fills :attr:`self` tensor with the specified value.
1941
See :func:`torch.floor`
1950
See :func:`torch.flip`
1959
See :func:`torch.fliplr`
1968
See :func:`torch.flipud`
1975
roll(shifts, dims) -> Tensor
1977
See :func:`torch.roll`
1986
In-place version of :meth:`~Tensor.floor`
1993
fmod(divisor) -> Tensor
1995
See :func:`torch.fmod`
2002
fmod_(divisor) -> Tensor
2004
In-place version of :meth:`~Tensor.fmod`
2013
See :func:`torch.frac`
2022
In-place version of :meth:`~Tensor.frac`
2029
frexp(input) -> (Tensor mantissa, Tensor exponent)
2031
See :func:`torch.frexp`
2038
flatten(start_dim=0, end_dim=-1) -> Tensor
2040
See :func:`torch.flatten`
2047
gather(dim, index) -> Tensor
2049
See :func:`torch.gather`
2058
See :func:`torch.gcd`
2065
gcd_(other) -> Tensor
2067
In-place version of :meth:`~Tensor.gcd`
2076
See :func:`torch.ge`.
2085
In-place version of :meth:`~Tensor.ge`.
2092
greater_equal(other) -> Tensor
2094
See :func:`torch.greater_equal`.
2101
greater_equal_(other) -> Tensor
2103
In-place version of :meth:`~Tensor.greater_equal`.
2110
geometric_(p, *, generator=None) -> Tensor
2112
Fills :attr:`self` tensor with elements drawn from the geometric distribution:
2116
P(X=k) = (1 - p)^{k - 1} p, k = 1, 2, ...
2119
:func:`torch.Tensor.geometric_` `k`-th trial is the first success hence draws samples in :math:`\{1, 2, \ldots\}`, whereas
2120
:func:`torch.distributions.geometric.Geometric` :math:`(k+1)`-th trial is the first success
2121
hence draws samples in :math:`\{0, 1, \ldots\}`.
2128
geqrf() -> (Tensor, Tensor)
2130
See :func:`torch.geqrf`
2139
See :func:`torch.ger`
2146
inner(other) -> Tensor
2148
See :func:`torch.inner`.
2155
outer(vec2) -> Tensor
2157
See :func:`torch.outer`.
2164
hypot(other) -> Tensor
2166
See :func:`torch.hypot`
2173
hypot_(other) -> Tensor
2175
In-place version of :meth:`~Tensor.hypot`
2193
In-place version of :meth:`~Tensor.i0`
2200
igamma(other) -> Tensor
2202
See :func:`torch.igamma`
2209
igamma_(other) -> Tensor
2211
In-place version of :meth:`~Tensor.igamma`
2218
igammac(other) -> Tensor
2219
See :func:`torch.igammac`
2226
igammac_(other) -> Tensor
2227
In-place version of :meth:`~Tensor.igammac`
2236
Return the indices tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.
2239
Throws an error if :attr:`self` is not a sparse COO tensor.
2241
See also :meth:`Tensor.values`.
2244
This method can only be called on a coalesced sparse tensor. See
2245
:meth:`Tensor.coalesce` for details.
2252
get_device() -> Device ordinal (Integer)
2254
For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides.
2255
For CPU tensors, this function returns `-1`.
2259
>>> x = torch.randn(3, 4, 5, device='cuda:0')
2262
>>> x.cpu().get_device()
2272
Return the values tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.
2275
Throws an error if :attr:`self` is not a sparse COO tensor.
2277
See also :meth:`Tensor.indices`.
2280
This method can only be called on a coalesced sparse tensor. See
2281
:meth:`Tensor.coalesce` for details.
2290
See :func:`torch.gt`.
2299
In-place version of :meth:`~Tensor.gt`.
2306
greater(other) -> Tensor
2308
See :func:`torch.greater`.
2315
greater_(other) -> Tensor
2317
In-place version of :meth:`~Tensor.greater`.
2324
Is ``True`` if any of this tensor's dimensions are named. Otherwise, is ``False``.
2331
hardshrink(lambd=0.5) -> Tensor
2333
See :func:`torch.nn.functional.hardshrink`
2340
heaviside(values) -> Tensor
2342
See :func:`torch.heaviside`
2349
heaviside_(values) -> Tensor
2351
In-place version of :meth:`~Tensor.heaviside`
2358
histc(bins=100, min=0, max=0) -> Tensor
2360
See :func:`torch.histc`
2367
histogram(input, bins, *, range=None, weight=None, density=False) -> (Tensor, Tensor)
2369
See :func:`torch.histogram`
2376
index_add_(dim, index, source, *, alpha=1) -> Tensor
2378
Accumulate the elements of :attr:`alpha` times ``source`` into the :attr:`self`
2379
tensor by adding to the indices in the order given in :attr:`index`. For example,
2380
if ``dim == 0``, ``index[i] == j``, and ``alpha=-1``, then the ``i``\ th row of
2381
``source`` is subtracted from the ``j``\ th row of :attr:`self`.
2383
The :attr:`dim`\ th dimension of ``source`` must have the same size as the
2384
length of :attr:`index` (which must be a vector), and all other dimensions must
2385
match :attr:`self`, or an error will be raised.
2387
For a 3-D tensor the output is given as::
2389
self[index[i], :, :] += alpha * src[i, :, :] # if dim == 0
2390
self[:, index[i], :] += alpha * src[:, i, :] # if dim == 1
2391
self[:, :, index[i]] += alpha * src[:, :, i] # if dim == 2
2394
{forward_reproducibility_note}
2397
dim (int): dimension along which to index
2398
index (Tensor): indices of ``source`` to select from,
2399
should have dtype either `torch.int64` or `torch.int32`
2400
source (Tensor): the tensor containing values to add
2403
alpha (Number): the scalar multiplier for ``source``
2407
>>> x = torch.ones(5, 3)
2408
>>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
2409
>>> index = torch.tensor([0, 4, 2])
2410
>>> x.index_add_(0, index, t)
2411
tensor([[ 2., 3., 4.],
2416
>>> x.index_add_(0, index, t, alpha=-1)
2417
tensor([[ 1., 1., 1.],
2422
""".format(**reproducibility_notes),
2428
index_copy_(dim, index, tensor) -> Tensor
2430
Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting
2431
the indices in the order given in :attr:`index`. For example, if ``dim == 0``
2432
and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is copied to the
2433
``j``\ th row of :attr:`self`.
2435
The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the
2436
length of :attr:`index` (which must be a vector), and all other dimensions must
2437
match :attr:`self`, or an error will be raised.
2440
If :attr:`index` contains duplicate entries, multiple elements from
2441
:attr:`tensor` will be copied to the same index of :attr:`self`. The result
2442
is nondeterministic since it depends on which copy occurs last.
2445
dim (int): dimension along which to index
2446
index (LongTensor): indices of :attr:`tensor` to select from
2447
tensor (Tensor): the tensor containing values to copy
2451
>>> x = torch.zeros(5, 3)
2452
>>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
2453
>>> index = torch.tensor([0, 4, 2])
2454
>>> x.index_copy_(0, index, t)
2455
tensor([[ 1., 2., 3.],
2466
index_fill_(dim, index, value) -> Tensor
2468
Fills the elements of the :attr:`self` tensor with value :attr:`value` by
2469
selecting the indices in the order given in :attr:`index`.
2472
dim (int): dimension along which to index
2473
index (LongTensor): indices of :attr:`self` tensor to fill in
2474
value (float): the value to fill with
2477
>>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
2478
>>> index = torch.tensor([0, 2])
2479
>>> x.index_fill_(1, index, -1)
2480
tensor([[-1., 2., -1.],
2489
index_put_(indices, values, accumulate=False) -> Tensor
2491
Puts values from the tensor :attr:`values` into the tensor :attr:`self` using
2492
the indices specified in :attr:`indices` (which is a tuple of Tensors). The
2493
expression ``tensor.index_put_(indices, values)`` is equivalent to
2494
``tensor[indices] = values``. Returns :attr:`self`.
2496
If :attr:`accumulate` is ``True``, the elements in :attr:`values` are added to
2497
:attr:`self`. If accumulate is ``False``, the behavior is undefined if indices
2498
contain duplicate elements.
2501
indices (tuple of LongTensor): tensors used to index into `self`.
2502
values (Tensor): tensor of same dtype as `self`.
2503
accumulate (bool): whether to accumulate into self
2510
index_put(indices, values, accumulate=False) -> Tensor
2512
Out-place version of :meth:`~Tensor.index_put_`.
2519
index_reduce_(dim, index, source, reduce, *, include_self=True) -> Tensor
2521
Accumulate the elements of ``source`` into the :attr:`self`
2522
tensor by accumulating to the indices in the order given in :attr:`index`
2523
using the reduction given by the ``reduce`` argument. For example, if ``dim == 0``,
2524
``index[i] == j``, ``reduce == prod`` and ``include_self == True`` then the ``i``\ th
2525
row of ``source`` is multiplied by the ``j``\ th row of :attr:`self`. If
2526
:obj:`include_self="True"`, the values in the :attr:`self` tensor are included
2527
in the reduction, otherwise, rows in the :attr:`self` tensor that are accumulated
2528
to are treated as if they were filled with the reduction identites.
2530
The :attr:`dim`\ th dimension of ``source`` must have the same size as the
2531
length of :attr:`index` (which must be a vector), and all other dimensions must
2532
match :attr:`self`, or an error will be raised.
2534
For a 3-D tensor with :obj:`reduce="prod"` and :obj:`include_self=True` the
2537
self[index[i], :, :] *= src[i, :, :] # if dim == 0
2538
self[:, index[i], :] *= src[:, i, :] # if dim == 1
2539
self[:, :, index[i]] *= src[:, :, i] # if dim == 2
2542
{forward_reproducibility_note}
2546
This function only supports floating point tensors.
2550
This function is in beta and may change in the near future.
2553
dim (int): dimension along which to index
2554
index (Tensor): indices of ``source`` to select from,
2555
should have dtype either `torch.int64` or `torch.int32`
2556
source (FloatTensor): the tensor containing values to accumulate
2557
reduce (str): the reduction operation to apply
2558
(:obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
2561
include_self (bool): whether the elements from the ``self`` tensor are
2562
included in the reduction
2566
>>> x = torch.empty(5, 3).fill_(2)
2567
>>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=torch.float)
2568
>>> index = torch.tensor([0, 4, 2, 0])
2569
>>> x.index_reduce_(0, index, t, 'prod')
2570
tensor([[20., 44., 72.],
2575
>>> x = torch.empty(5, 3).fill_(2)
2576
>>> x.index_reduce_(0, index, t, 'prod', include_self=False)
2577
tensor([[10., 22., 36.],
2582
""".format(**reproducibility_notes),
2588
index_select(dim, index) -> Tensor
2590
See :func:`torch.index_select`
2597
sparse_mask(mask) -> Tensor
2599
Returns a new :ref:`sparse tensor <sparse-docs>` with values from a
2600
strided tensor :attr:`self` filtered by the indices of the sparse
2601
tensor :attr:`mask`. The values of :attr:`mask` sparse tensor are
2602
ignored. :attr:`self` and :attr:`mask` tensors must have the same
2607
The returned sparse tensor might contain duplicate values if :attr:`mask`
2608
is not coalesced. It is therefore advisable to pass ``mask.coalesce()``
2609
if such behavior is not desired.
2613
The returned sparse tensor has the same indices as the sparse tensor
2614
:attr:`mask`, even when the corresponding values in :attr:`self` are
2618
mask (Tensor): a sparse tensor whose indices are used as a filter
2623
>>> dims = (5, 5, 2, 2)
2624
>>> I = torch.cat([torch.randint(0, dims[0], size=(nse,)),
2625
... torch.randint(0, dims[1], size=(nse,))], 0).reshape(2, nse)
2626
>>> V = torch.randn(nse, dims[2], dims[3])
2627
>>> S = torch.sparse_coo_tensor(I, V, dims).coalesce()
2628
>>> D = torch.randn(dims)
2629
>>> D.sparse_mask(S)
2630
tensor(indices=tensor([[0, 0, 0, 2],
2632
values=tensor([[[ 1.6550, 0.2397],
2633
[-0.1611, -0.0779]],
2635
[[ 0.2326, -1.0558],
2638
[[-0.5138, -0.0411],
2642
[-0.2569, -0.1055]]]),
2643
size=(5, 5, 2, 2), nnz=4, layout=torch.sparse_coo)
2652
See :func:`torch.inverse`
2661
See :func:`torch.isnan`
2670
See :func:`torch.isinf`
2679
See :func:`torch.isposinf`
2688
See :func:`torch.isneginf`
2697
See :func:`torch.isfinite`
2704
isclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
2706
See :func:`torch.isclose`
2715
See :func:`torch.isreal`
2722
is_coalesced() -> bool
2724
Returns ``True`` if :attr:`self` is a :ref:`sparse COO tensor
2725
<sparse-coo-docs>` that is coalesced, ``False`` otherwise.
2728
Throws an error if :attr:`self` is not a sparse COO tensor.
2730
See :meth:`coalesce` and :ref:`uncoalesced tensors <sparse-uncoalesced-coo-docs>`.
2737
is_contiguous(memory_format=torch.contiguous_format) -> bool
2739
Returns True if :attr:`self` tensor is contiguous in memory in the order specified
2743
memory_format (:class:`torch.memory_format`, optional): Specifies memory allocation
2744
order. Default: ``torch.contiguous_format``.
2751
Returns true if this tensor resides in pinned memory.
2756
"is_floating_point",
2758
is_floating_point() -> bool
2760
Returns True if the data type of :attr:`self` is a floating point data type.
2769
Returns True if the data type of :attr:`self` is a complex data type.
2776
is_inference() -> bool
2778
See :func:`torch.is_inference`
2787
Returns True if the conjugate bit of :attr:`self` is set to true.
2796
Returns True if the negative bit of :attr:`self` is set to true.
2805
Returns True if the data type of :attr:`self` is a signed data type.
2812
is_set_to(tensor) -> bool
2814
Returns True if both tensors are pointing to the exact same memory (same
2815
storage, offset, size and stride).
2824
Returns the value of this tensor as a standard Python number. This only works
2825
for tensors with one element. For other cases, see :meth:`~Tensor.tolist`.
2827
This operation is not differentiable.
2831
>>> x = torch.tensor([1.0])
2841
kron(other) -> Tensor
2843
See :func:`torch.kron`
2850
kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor)
2852
See :func:`torch.kthvalue`
2859
ldexp(other) -> Tensor
2861
See :func:`torch.ldexp`
2868
ldexp_(other) -> Tensor
2870
In-place version of :meth:`~Tensor.ldexp`
2879
See :func:`torch.lcm`
2886
lcm_(other) -> Tensor
2888
In-place version of :meth:`~Tensor.lcm`
2897
See :func:`torch.le`.
2906
In-place version of :meth:`~Tensor.le`.
2913
less_equal(other) -> Tensor
2915
See :func:`torch.less_equal`.
2922
less_equal_(other) -> Tensor
2924
In-place version of :meth:`~Tensor.less_equal`.
2931
lerp(end, weight) -> Tensor
2933
See :func:`torch.lerp`
2940
lerp_(end, weight) -> Tensor
2942
In-place version of :meth:`~Tensor.lerp`
2951
See :func:`torch.lgamma`
2960
In-place version of :meth:`~Tensor.lgamma`
2969
See :func:`torch.log`
2978
In-place version of :meth:`~Tensor.log`
2987
See :func:`torch.log10`
2996
In-place version of :meth:`~Tensor.log10`
3005
See :func:`torch.log1p`
3014
In-place version of :meth:`~Tensor.log1p`
3023
See :func:`torch.log2`
3032
In-place version of :meth:`~Tensor.log2`
3039
logaddexp(other) -> Tensor
3041
See :func:`torch.logaddexp`
3048
logaddexp2(other) -> Tensor
3050
See :func:`torch.logaddexp2`
3057
log_normal_(mean=1, std=2, *, generator=None)
3059
Fills :attr:`self` tensor with numbers samples from the log-normal distribution
3060
parameterized by the given mean :math:`\mu` and standard deviation
3061
:math:`\sigma`. Note that :attr:`mean` and :attr:`std` are the mean and
3062
standard deviation of the underlying normal distribution, and not of the
3063
returned distribution:
3067
f(x) = \dfrac{1}{x \sigma \sqrt{2\pi}}\ e^{-\frac{(\ln x - \mu)^2}{2\sigma^2}}
3074
logsumexp(dim, keepdim=False) -> Tensor
3076
See :func:`torch.logsumexp`
3085
See :func:`torch.lt`.
3094
In-place version of :meth:`~Tensor.lt`.
3103
See :func:`torch.less`.
3110
less_(other) -> Tensor
3112
In-place version of :meth:`~Tensor.less`.
3119
lu_solve(LU_data, LU_pivots) -> Tensor
3121
See :func:`torch.lu_solve`
3128
map_(tensor, callable)
3130
Applies :attr:`callable` for each element in :attr:`self` tensor and the given
3131
:attr:`tensor` and stores the results in :attr:`self` tensor. :attr:`self` tensor and
3132
the given :attr:`tensor` must be :ref:`broadcastable <broadcasting-semantics>`.
3134
The :attr:`callable` should have the signature::
3136
def callable(a, b) -> number
3143
masked_scatter_(mask, source)
3145
Copies elements from :attr:`source` into :attr:`self` tensor at positions where
3146
the :attr:`mask` is True. Elements from :attr:`source` are copied into :attr:`self`
3147
starting at position 0 of :attr:`source` and continuing in order one-by-one for each
3148
occurrence of :attr:`mask` being True.
3149
The shape of :attr:`mask` must be :ref:`broadcastable <broadcasting-semantics>`
3150
with the shape of the underlying tensor. The :attr:`source` should have at least
3151
as many elements as the number of ones in :attr:`mask`.
3154
mask (BoolTensor): the boolean mask
3155
source (Tensor): the tensor to copy from
3159
The :attr:`mask` operates on the :attr:`self` tensor, not on the given
3160
:attr:`source` tensor.
3164
>>> self = torch.tensor([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
3165
>>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=torch.bool)
3166
>>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
3167
>>> self.masked_scatter_(mask, source)
3168
tensor([[0, 0, 0, 0, 1],
3177
masked_fill_(mask, value)
3179
Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is
3180
True. The shape of :attr:`mask` must be
3181
:ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
3185
mask (BoolTensor): the boolean mask
3186
value (float): the value to fill in with
3193
masked_select(mask) -> Tensor
3195
See :func:`torch.masked_select`
3202
matrix_power(n) -> Tensor
3204
.. note:: :meth:`~Tensor.matrix_power` is deprecated, use :func:`torch.linalg.matrix_power` instead.
3206
Alias for :func:`torch.linalg.matrix_power`
3213
matrix_exp() -> Tensor
3215
See :func:`torch.matrix_exp`
3222
max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
3224
See :func:`torch.max`
3231
amax(dim=None, keepdim=False) -> Tensor
3233
See :func:`torch.amax`
3240
maximum(other) -> Tensor
3242
See :func:`torch.maximum`
3249
fmax(other) -> Tensor
3251
See :func:`torch.fmax`
3258
argmax(dim=None, keepdim=False) -> LongTensor
3260
See :func:`torch.argmax`
3269
See :func:`torch.argwhere`
3276
mean(dim=None, keepdim=False, *, dtype=None) -> Tensor
3278
See :func:`torch.mean`
3285
nanmean(dim=None, keepdim=False, *, dtype=None) -> Tensor
3287
See :func:`torch.nanmean`
3294
median(dim=None, keepdim=False) -> (Tensor, LongTensor)
3296
See :func:`torch.median`
3303
nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor)
3305
See :func:`torch.nanmedian`
3312
min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
3314
See :func:`torch.min`
3321
amin(dim=None, keepdim=False) -> Tensor
3323
See :func:`torch.amin`
3330
minimum(other) -> Tensor
3332
See :func:`torch.minimum`
3339
aminmax(*, dim=None, keepdim=False) -> (Tensor min, Tensor max)
3341
See :func:`torch.aminmax`
3348
fmin(other) -> Tensor
3350
See :func:`torch.fmin`
3357
argmin(dim=None, keepdim=False) -> LongTensor
3359
See :func:`torch.argmin`
3375
mode(dim=None, keepdim=False) -> (Tensor, LongTensor)
3377
See :func:`torch.mode`
3384
movedim(source, destination) -> Tensor
3386
See :func:`torch.movedim`
3393
moveaxis(source, destination) -> Tensor
3395
See :func:`torch.moveaxis`
3404
See :func:`torch.mul`.
3411
mul_(value) -> Tensor
3413
In-place version of :meth:`~Tensor.mul`.
3420
multiply(value) -> Tensor
3422
See :func:`torch.multiply`.
3429
multiply_(value) -> Tensor
3431
In-place version of :meth:`~Tensor.multiply`.
3438
multinomial(num_samples, replacement=False, *, generator=None) -> Tensor
3440
See :func:`torch.multinomial`
3456
mvlgamma(p) -> Tensor
3458
See :func:`torch.mvlgamma`
3465
mvlgamma_(p) -> Tensor
3467
In-place version of :meth:`~Tensor.mvlgamma`
3474
narrow(dimension, start, length) -> Tensor
3476
See :func:`torch.narrow`.
3483
narrow_copy(dimension, start, length) -> Tensor
3485
See :func:`torch.narrow_copy`.
3494
Alias for :meth:`~Tensor.dim()`
3501
nan_to_num(nan=0.0, posinf=None, neginf=None) -> Tensor
3503
See :func:`torch.nan_to_num`.
3510
nan_to_num_(nan=0.0, posinf=None, neginf=None) -> Tensor
3512
In-place version of :meth:`~Tensor.nan_to_num`.
3521
See :func:`torch.ne`.
3530
In-place version of :meth:`~Tensor.ne`.
3537
not_equal(other) -> Tensor
3539
See :func:`torch.not_equal`.
3546
not_equal_(other) -> Tensor
3548
In-place version of :meth:`~Tensor.not_equal`.
3557
See :func:`torch.neg`
3566
See :func:`torch.negative`
3575
In-place version of :meth:`~Tensor.neg`
3582
negative_() -> Tensor
3584
In-place version of :meth:`~Tensor.negative`
3593
Alias for :meth:`~Tensor.numel`
3600
nextafter(other) -> Tensor
3601
See :func:`torch.nextafter`
3608
nextafter_(other) -> Tensor
3609
In-place version of :meth:`~Tensor.nextafter`
3616
nonzero() -> LongTensor
3618
See :func:`torch.nonzero`
3625
nonzero_static(input, *, size, fill_value=-1) -> Tensor
3627
Returns a 2-D tensor where each row is the index for a non-zero value.
3628
The returned Tensor has the same `torch.dtype` as `torch.nonzero()`.
3631
input (Tensor): the input tensor to count non-zero elements.
3634
size (int): the size of non-zero elements expected to be included in the out
3635
tensor. Pad the out tensor with `fill_value` if the `size` is larger
3636
than total number of non-zero elements, truncate out tensor if `size`
3637
is smaller. The size must be a non-negative integer.
3638
fill_value (int): the value to fill the output tensor with when `size` is larger
3639
than the total number of non-zero elements. Default is `-1` to represent
3644
# Example 1: Padding
3645
>>> input_tensor = torch.tensor([[1, 0], [3, 2]])
3647
>>> t = torch.nonzero_static(input_tensor, size = static_size)
3651
[ -1, -1]], dtype=torch.int64)
3653
# Example 2: Truncating
3654
>>> input_tensor = torch.tensor([[1, 0], [3, 2]])
3656
>>> t = torch.nonzero_static(input_tensor, size = static_size)
3658
[ 1, 0]], dtype=torch.int64)
3661
>>> input_tensor = torch.tensor([10])
3663
>>> t = torch.nonzero_static(input_tensor, size = static_size)
3664
tensor([], size=(0, 1), dtype=torch.int64)
3666
# Example 4: 0 rank input
3667
>>> input_tensor = torch.tensor(10)
3669
>>> t = torch.nonzero_static(input_tensor, size = static_size)
3670
tensor([], size=(2, 0), dtype=torch.int64)
3677
norm(p=2, dim=None, keepdim=False) -> Tensor
3679
See :func:`torch.norm`
3686
normal_(mean=0, std=1, *, generator=None) -> Tensor
3688
Fills :attr:`self` tensor with elements samples from the normal distribution
3689
parameterized by :attr:`mean` and :attr:`std`.
3698
See :func:`torch.numel`
3705
numpy(*, force=False) -> numpy.ndarray
3707
Returns the tensor as a NumPy :class:`ndarray`.
3709
If :attr:`force` is ``False`` (the default), the conversion
3710
is performed only if the tensor is on the CPU, does not require grad,
3711
does not have its conjugate bit set, and is a dtype and layout that
3712
NumPy supports. The returned ndarray and the tensor will share their
3713
storage, so changes to the tensor will be reflected in the ndarray
3716
If :attr:`force` is ``True`` this is equivalent to
3717
calling ``t.detach().cpu().resolve_conj().resolve_neg().numpy()``.
3718
If the tensor isn't on the CPU or the conjugate or negative bit is set,
3719
the tensor won't share its storage with the returned ndarray.
3720
Setting :attr:`force` to ``True`` can be a useful shorthand.
3723
force (bool): if ``True``, the ndarray may be a copy of the tensor
3724
instead of always sharing memory, defaults to ``False``.
3731
orgqr(input2) -> Tensor
3733
See :func:`torch.orgqr`
3740
ormqr(input2, input3, left=True, transpose=False) -> Tensor
3742
See :func:`torch.ormqr`
3749
permute(*dims) -> Tensor
3751
See :func:`torch.permute`
3758
polygamma(n) -> Tensor
3760
See :func:`torch.polygamma`
3767
polygamma_(n) -> Tensor
3769
In-place version of :meth:`~Tensor.polygamma`
3778
See :func:`torch.positive`
3785
pow(exponent) -> Tensor
3787
See :func:`torch.pow`
3794
pow_(exponent) -> Tensor
3796
In-place version of :meth:`~Tensor.pow`
3803
float_power(exponent) -> Tensor
3805
See :func:`torch.float_power`
3812
float_power_(exponent) -> Tensor
3814
In-place version of :meth:`~Tensor.float_power`
3821
prod(dim=None, keepdim=False, dtype=None) -> Tensor
3823
See :func:`torch.prod`
3830
put_(index, source, accumulate=False) -> Tensor
3832
Copies the elements from :attr:`source` into the positions specified by
3833
:attr:`index`. For the purpose of indexing, the :attr:`self` tensor is treated as if
3834
it were a 1-D tensor.
3836
:attr:`index` and :attr:`source` need to have the same number of elements, but not necessarily
3839
If :attr:`accumulate` is ``True``, the elements in :attr:`source` are added to
3840
:attr:`self`. If accumulate is ``False``, the behavior is undefined if :attr:`index`
3841
contain duplicate elements.
3844
index (LongTensor): the indices into self
3845
source (Tensor): the tensor containing values to copy from
3846
accumulate (bool): whether to accumulate into self
3850
>>> src = torch.tensor([[4, 3, 5],
3852
>>> src.put_(torch.tensor([1, 3]), torch.tensor([9, 10]))
3861
put(input, index, source, accumulate=False) -> Tensor
3863
Out-of-place version of :meth:`torch.Tensor.put_`.
3864
`input` corresponds to `self` in :meth:`torch.Tensor.put_`.
3871
qr(some=True) -> (Tensor, Tensor)
3880
qscheme() -> torch.qscheme
3882
Returns the quantization scheme of a given QTensor.
3889
quantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
3891
See :func:`torch.quantile`
3898
nanquantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
3900
See :func:`torch.nanquantile`
3909
Given a Tensor quantized by linear(affine) quantization,
3910
returns the scale of the underlying quantizer().
3917
q_zero_point() -> int
3919
Given a Tensor quantized by linear(affine) quantization,
3920
returns the zero_point of the underlying quantizer().
3925
"q_per_channel_scales",
3927
q_per_channel_scales() -> Tensor
3929
Given a Tensor quantized by linear (affine) per-channel quantization,
3930
returns a Tensor of scales of the underlying quantizer. It has the number of
3931
elements that matches the corresponding dimensions (from q_per_channel_axis) of
3937
"q_per_channel_zero_points",
3939
q_per_channel_zero_points() -> Tensor
3941
Given a Tensor quantized by linear (affine) per-channel quantization,
3942
returns a tensor of zero_points of the underlying quantizer. It has the number of
3943
elements that matches the corresponding dimensions (from q_per_channel_axis) of
3949
"q_per_channel_axis",
3951
q_per_channel_axis() -> int
3953
Given a Tensor quantized by linear (affine) per-channel quantization,
3954
returns the index of dimension on which per-channel quantization is applied.
3961
random_(from=0, to=None, *, generator=None) -> Tensor
3963
Fills :attr:`self` tensor with numbers sampled from the discrete uniform
3964
distribution over ``[from, to - 1]``. If not specified, the values are usually
3965
only bounded by :attr:`self` tensor's data type. However, for floating point
3966
types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every
3967
value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()`
3968
will be uniform in ``[0, 2^53]``.
3977
See :func:`torch.rad2deg`
3986
In-place version of :meth:`~Tensor.rad2deg`
3995
See :func:`torch.deg2rad`
4004
In-place version of :meth:`~Tensor.deg2rad`
4013
see :func:`torch.ravel`
4020
reciprocal() -> Tensor
4022
See :func:`torch.reciprocal`
4029
reciprocal_() -> Tensor
4031
In-place version of :meth:`~Tensor.reciprocal`
4038
record_stream(stream)
4040
Marks the tensor as having been used by this stream. When the tensor
4041
is deallocated, ensure the tensor memory is not reused for another tensor
4042
until all work queued on :attr:`stream` at the time of deallocation is
4047
The caching allocator is aware of only the stream where a tensor was
4048
allocated. Due to the awareness, it already correctly manages the life
4049
cycle of tensors on only one stream. But if a tensor is used on a stream
4050
different from the stream of origin, the allocator might reuse the memory
4051
unexpectedly. Calling this method lets the allocator know which streams
4052
have used the tensor.
4056
This method is most suitable for use cases where you are providing a
4057
function that created a tensor on a side stream, and want users to be able
4058
to make use of the tensor without having to think carefully about stream
4059
safety when making use of them. These safety guarantees come at some
4060
performance and predictability cost (analogous to the tradeoff between GC
4061
and manual memory management), so if you are in a situation where
4062
you manage the full lifetime of your tensors, you may consider instead
4063
manually managing CUDA events so that calling this method is not necessary.
4064
In particular, when you call this method, on later allocations the
4065
allocator will poll the recorded stream to see if all operations have
4066
completed yet; you can potentially race with side stream computation and
4067
non-deterministically reuse or fail to reuse memory for an allocation.
4069
You can safely use tensors allocated on side streams without
4070
:meth:`~Tensor.record_stream`; you must manually ensure that
4071
any non-creation stream uses of a tensor are synced back to the creation
4072
stream before you deallocate the tensor. As the CUDA caching allocator
4073
guarantees that the memory will only be reused with the same creation stream,
4074
this is sufficient to ensure that writes to future reallocations of the
4075
memory will be delayed until non-creation stream uses are done.
4076
(Counterintuitively, you may observe that on the CPU side we have already
4077
reallocated the tensor, even though CUDA kernels on the old tensor are
4078
still in progress. This is fine, because CUDA operations on the new
4079
tensor will appropriately wait for the old operations to complete, as they
4080
are all on the same stream.)
4082
Concretely, this looks like this::
4084
with torch.cuda.stream(s0):
4088
with torch.cuda.stream(s1):
4091
... some compute on s0 ...
4093
# synchronize creation stream s0 to side stream s1
4094
# before deallocating x
4098
Note that some discretion is required when deciding when to perform
4099
``s0.wait_stream(s1)``. In particular, if we were to wait immediately
4100
after ``some_comm_op``, there wouldn't be any point in having the side
4101
stream; it would be equivalent to have run ``some_comm_op`` on ``s0``.
4102
Instead, the synchronization must be placed at some appropriate, later
4103
point in time where you expect the side stream ``s1`` to have finished
4104
work. This location is typically identified via profiling, e.g., using
4105
Chrome traces produced
4106
:meth:`torch.autograd.profiler.profile.export_chrome_trace`. If you
4107
place the wait too early, work on s0 will block until ``s1`` has finished,
4108
preventing further overlapping of communication and computation. If you
4109
place the wait too late, you will use more memory than is strictly
4110
necessary (as you are keeping ``x`` live for longer.) For a concrete
4111
example of how this guidance can be applied in practice, see this post:
4112
`FSDP and CUDACachingAllocator
4113
<https://dev-discuss.pytorch.org/t/fsdp-cudacachingallocator-an-outsider-newb-perspective/1486>`_.
4120
remainder(divisor) -> Tensor
4122
See :func:`torch.remainder`
4129
remainder_(divisor) -> Tensor
4131
In-place version of :meth:`~Tensor.remainder`
4138
renorm(p, dim, maxnorm) -> Tensor
4140
See :func:`torch.renorm`
4147
renorm_(p, dim, maxnorm) -> Tensor
4149
In-place version of :meth:`~Tensor.renorm`
4156
repeat(*repeats) -> Tensor
4158
Repeats this tensor along the specified dimensions.
4160
Unlike :meth:`~Tensor.expand`, this function copies the tensor's data.
4164
:meth:`~Tensor.repeat` behaves differently from
4165
`numpy.repeat <https://docs.scipy.org/doc/numpy/reference/generated/numpy.repeat.html>`_,
4166
but is more similar to
4167
`numpy.tile <https://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html>`_.
4168
For the operator similar to `numpy.repeat`, see :func:`torch.repeat_interleave`.
4171
repeat (torch.Size, int..., tuple of int or list of int): The number of times to repeat this tensor along each dimension
4175
>>> x = torch.tensor([1, 2, 3])
4177
tensor([[ 1, 2, 3, 1, 2, 3],
4178
[ 1, 2, 3, 1, 2, 3],
4179
[ 1, 2, 3, 1, 2, 3],
4180
[ 1, 2, 3, 1, 2, 3]])
4181
>>> x.repeat(4, 2, 1).size()
4182
torch.Size([4, 2, 3])
4187
"repeat_interleave",
4189
repeat_interleave(repeats, dim=None, *, output_size=None) -> Tensor
4191
See :func:`torch.repeat_interleave`.
4198
requires_grad_(requires_grad=True) -> Tensor
4200
Change if autograd should record operations on this tensor: sets this tensor's
4201
:attr:`requires_grad` attribute in-place. Returns this tensor.
4203
:func:`requires_grad_`'s main use case is to tell autograd to begin recording
4204
operations on a Tensor ``tensor``. If ``tensor`` has ``requires_grad=False``
4205
(because it was obtained through a DataLoader, or required preprocessing or
4206
initialization), ``tensor.requires_grad_()`` makes it so that autograd will
4207
begin to record operations on ``tensor``.
4210
requires_grad (bool): If autograd should record operations on this tensor.
4215
>>> # Let's say we want to preprocess some saved weights and use
4216
>>> # the result as new weights.
4217
>>> saved_weights = [0.1, 0.2, 0.3, 0.25]
4218
>>> loaded_weights = torch.tensor(saved_weights)
4219
>>> weights = preprocess(loaded_weights) # some function
4221
tensor([-0.5503, 0.4926, -2.1158, -0.8303])
4223
>>> # Now, start to record operations done to weights
4224
>>> weights.requires_grad_()
4225
>>> out = weights.pow(2).sum()
4228
tensor([-1.1007, 0.9853, -4.2316, -1.6606])
4236
reshape(*shape) -> Tensor
4238
Returns a tensor with the same data and number of elements as :attr:`self`
4239
but with the specified shape. This method returns a view if :attr:`shape` is
4240
compatible with the current shape. See :meth:`torch.Tensor.view` on when it is
4241
possible to return a view.
4243
See :func:`torch.reshape`
4246
shape (tuple of ints or int...): the desired shape
4254
reshape_as(other) -> Tensor
4256
Returns this tensor as the same shape as :attr:`other`.
4257
``self.reshape_as(other)`` is equivalent to ``self.reshape(other.sizes())``.
4258
This method returns a view if ``other.sizes()`` is compatible with the current
4259
shape. See :meth:`torch.Tensor.view` on when it is possible to return a view.
4261
Please see :meth:`reshape` for more information about ``reshape``.
4264
other (:class:`torch.Tensor`): The result tensor has the same shape
4272
resize_(*sizes, memory_format=torch.contiguous_format) -> Tensor
4274
Resizes :attr:`self` tensor to the specified size. If the number of elements is
4275
larger than the current storage size, then the underlying storage is resized
4276
to fit the new number of elements. If the number of elements is smaller, the
4277
underlying storage is not changed. Existing elements are preserved but any new
4278
memory is uninitialized.
4282
This is a low-level method. The storage is reinterpreted as C-contiguous,
4283
ignoring the current strides (unless the target size equals the current
4284
size, in which case the tensor is left unchanged). For most purposes, you
4285
will instead want to use :meth:`~Tensor.view()`, which checks for
4286
contiguity, or :meth:`~Tensor.reshape()`, which copies data if needed. To
4287
change the size in-place with custom strides, see :meth:`~Tensor.set_()`.
4291
If :func:`torch.use_deterministic_algorithms()` and
4292
:attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
4293
``True``, new elements are initialized to prevent nondeterministic behavior
4294
from using the result as an input to an operation. Floating point and
4295
complex values are set to NaN, and integer values are set to the maximum
4299
sizes (torch.Size or int...): the desired size
4300
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
4301
Tensor. Default: ``torch.contiguous_format``. Note that memory format of
4302
:attr:`self` is going to be unaffected if ``self.size()`` matches ``sizes``.
4306
>>> x = torch.tensor([[1, 2], [3, 4], [5, 6]])
4316
resize_as_(tensor, memory_format=torch.contiguous_format) -> Tensor
4318
Resizes the :attr:`self` tensor to be the same size as the specified
4319
:attr:`tensor`. This is equivalent to ``self.resize_(tensor.size())``.
4322
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
4323
Tensor. Default: ``torch.contiguous_format``. Note that memory format of
4324
:attr:`self` is going to be unaffected if ``self.size()`` matches ``tensor.size()``.
4332
rot90(k, dims) -> Tensor
4334
See :func:`torch.rot90`
4341
round(decimals=0) -> Tensor
4343
See :func:`torch.round`
4350
round_(decimals=0) -> Tensor
4352
In-place version of :meth:`~Tensor.round`
4361
See :func:`torch.rsqrt`
4370
In-place version of :meth:`~Tensor.rsqrt`
4377
scatter_(dim, index, src, *, reduce=None) -> Tensor
4379
Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
4380
specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
4381
index is specified by its index in :attr:`src` for ``dimension != dim`` and by
4382
the corresponding value in :attr:`index` for ``dimension = dim``.
4384
For a 3-D tensor, :attr:`self` is updated as::
4386
self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
4387
self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
4388
self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
4390
This is the reverse operation of the manner described in :meth:`~Tensor.gather`.
4392
:attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have
4393
the same number of dimensions. It is also required that
4394
``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
4395
``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
4396
Note that ``index`` and ``src`` do not broadcast.
4398
Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
4399
between ``0`` and ``self.size(dim) - 1`` inclusive.
4403
When indices are not unique, the behavior is non-deterministic (one of the
4404
values from ``src`` will be picked arbitrarily) and the gradient will be
4405
incorrect (it will be propagated to all locations in the source that
4406
correspond to the same index)!
4410
The backward pass is implemented only for ``src.shape == index.shape``.
4412
Additionally accepts an optional :attr:`reduce` argument that allows
4413
specification of an optional reduction operation, which is applied to all
4414
values in the tensor :attr:`src` into :attr:`self` at the indices
4415
specified in the :attr:`index`. For each value in :attr:`src`, the reduction
4416
operation is applied to an index in :attr:`self` which is specified by
4417
its index in :attr:`src` for ``dimension != dim`` and by the corresponding
4418
value in :attr:`index` for ``dimension = dim``.
4420
Given a 3-D tensor and reduction using the multiplication operation, :attr:`self`
4423
self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0
4424
self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1
4425
self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2
4427
Reducing with the addition operation is the same as using
4428
:meth:`~torch.Tensor.scatter_add_`.
4431
The reduce argument with Tensor ``src`` is deprecated and will be removed in
4432
a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_`
4433
instead for more reduction options.
4436
dim (int): the axis along which to index
4437
index (LongTensor): the indices of elements to scatter, can be either empty
4438
or of the same dimensionality as ``src``. When empty, the operation
4439
returns ``self`` unchanged.
4440
src (Tensor): the source element(s) to scatter.
4443
reduce (str, optional): reduction operation to apply, can be either
4444
``'add'`` or ``'multiply'``.
4448
>>> src = torch.arange(1, 11).reshape((2, 5))
4450
tensor([[ 1, 2, 3, 4, 5],
4452
>>> index = torch.tensor([[0, 1, 2, 0]])
4453
>>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src)
4454
tensor([[1, 0, 0, 4, 0],
4457
>>> index = torch.tensor([[0, 1, 2], [0, 1, 4]])
4458
>>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src)
4459
tensor([[1, 2, 3, 0, 0],
4463
>>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
4464
... 1.23, reduce='multiply')
4465
tensor([[2.0000, 2.0000, 2.4600, 2.0000],
4466
[2.0000, 2.0000, 2.0000, 2.4600]])
4467
>>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
4468
... 1.23, reduce='add')
4469
tensor([[2.0000, 2.0000, 3.2300, 2.0000],
4470
[2.0000, 2.0000, 2.0000, 3.2300]])
4472
.. function:: scatter_(dim, index, value, *, reduce=None) -> Tensor:
4475
Writes the value from :attr:`value` into :attr:`self` at the indices
4476
specified in the :attr:`index` tensor. This operation is equivalent to the previous version,
4477
with the :attr:`src` tensor filled entirely with :attr:`value`.
4480
dim (int): the axis along which to index
4481
index (LongTensor): the indices of elements to scatter, can be either empty
4482
or of the same dimensionality as ``src``. When empty, the operation
4483
returns ``self`` unchanged.
4484
value (Scalar): the value to scatter.
4487
reduce (str, optional): reduction operation to apply, can be either
4488
``'add'`` or ``'multiply'``.
4492
>>> index = torch.tensor([[0, 1]])
4494
>>> torch.zeros(3, 5).scatter_(0, index, value)
4495
tensor([[2., 0., 0., 0., 0.],
4496
[0., 2., 0., 0., 0.],
4497
[0., 0., 0., 0., 0.]])
4504
scatter_add_(dim, index, src) -> Tensor
4506
Adds all values from the tensor :attr:`src` into :attr:`self` at the indices
4507
specified in the :attr:`index` tensor in a similar fashion as
4508
:meth:`~torch.Tensor.scatter_`. For each value in :attr:`src`, it is added to
4509
an index in :attr:`self` which is specified by its index in :attr:`src`
4510
for ``dimension != dim`` and by the corresponding value in :attr:`index` for
4513
For a 3-D tensor, :attr:`self` is updated as::
4515
self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
4516
self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
4517
self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
4519
:attr:`self`, :attr:`index` and :attr:`src` should have same number of
4520
dimensions. It is also required that ``index.size(d) <= src.size(d)`` for all
4521
dimensions ``d``, and that ``index.size(d) <= self.size(d)`` for all dimensions
4522
``d != dim``. Note that ``index`` and ``src`` do not broadcast.
4525
{forward_reproducibility_note}
4529
The backward pass is implemented only for ``src.shape == index.shape``.
4532
dim (int): the axis along which to index
4533
index (LongTensor): the indices of elements to scatter and add, can be
4534
either empty or of the same dimensionality as ``src``. When empty, the
4535
operation returns ``self`` unchanged.
4536
src (Tensor): the source elements to scatter and add
4540
>>> src = torch.ones((2, 5))
4541
>>> index = torch.tensor([[0, 1, 2, 0, 0]])
4542
>>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
4543
tensor([[1., 0., 0., 1., 1.],
4544
[0., 1., 0., 0., 0.],
4545
[0., 0., 1., 0., 0.]])
4546
>>> index = torch.tensor([[0, 1, 2, 0, 0], [0, 1, 2, 2, 2]])
4547
>>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
4548
tensor([[2., 0., 0., 1., 1.],
4549
[0., 2., 0., 0., 0.],
4550
[0., 0., 2., 1., 1.]])
4552
""".format(**reproducibility_notes),
4558
scatter_reduce_(dim, index, src, reduce, *, include_self=True) -> Tensor
4560
Reduces all values from the :attr:`src` tensor to the indices specified in
4561
the :attr:`index` tensor in the :attr:`self` tensor using the applied reduction
4562
defined via the :attr:`reduce` argument (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`,
4563
:obj:`"amax"`, :obj:`"amin"`). For each value in :attr:`src`, it is reduced to an
4564
index in :attr:`self` which is specified by its index in :attr:`src` for
4565
``dimension != dim`` and by the corresponding value in :attr:`index` for
4566
``dimension = dim``. If :obj:`include_self="True"`, the values in the :attr:`self`
4567
tensor are included in the reduction.
4569
:attr:`self`, :attr:`index` and :attr:`src` should all have
4570
the same number of dimensions. It is also required that
4571
``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
4572
``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
4573
Note that ``index`` and ``src`` do not broadcast.
4575
For a 3-D tensor with :obj:`reduce="sum"` and :obj:`include_self=True` the
4578
self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
4579
self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
4580
self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
4583
{forward_reproducibility_note}
4587
The backward pass is implemented only for ``src.shape == index.shape``.
4591
This function is in beta and may change in the near future.
4594
dim (int): the axis along which to index
4595
index (LongTensor): the indices of elements to scatter and reduce.
4596
src (Tensor): the source elements to scatter and reduce
4597
reduce (str): the reduction operation to apply for non-unique indices
4598
(:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
4599
include_self (bool): whether elements from the :attr:`self` tensor are
4600
included in the reduction
4604
>>> src = torch.tensor([1., 2., 3., 4., 5., 6.])
4605
>>> index = torch.tensor([0, 1, 0, 1, 2, 1])
4606
>>> input = torch.tensor([1., 2., 3., 4.])
4607
>>> input.scatter_reduce(0, index, src, reduce="sum")
4608
tensor([5., 14., 8., 4.])
4609
>>> input.scatter_reduce(0, index, src, reduce="sum", include_self=False)
4610
tensor([4., 12., 5., 4.])
4611
>>> input2 = torch.tensor([5., 4., 3., 2.])
4612
>>> input2.scatter_reduce(0, index, src, reduce="amax")
4613
tensor([5., 6., 5., 2.])
4614
>>> input2.scatter_reduce(0, index, src, reduce="amax", include_self=False)
4615
tensor([3., 6., 5., 2.])
4618
""".format(**reproducibility_notes),
4624
select(dim, index) -> Tensor
4626
See :func:`torch.select`
4633
select_scatter(src, dim, index) -> Tensor
4635
See :func:`torch.select_scatter`
4642
slice_scatter(src, dim=0, start=None, end=None, step=1) -> Tensor
4644
See :func:`torch.slice_scatter`
4651
set_(source=None, storage_offset=0, size=None, stride=None) -> Tensor
4653
Sets the underlying storage, size, and strides. If :attr:`source` is a tensor,
4654
:attr:`self` tensor will share the same storage and have the same size and
4655
strides as :attr:`source`. Changes to elements in one tensor will be reflected
4658
If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying
4659
storage, offset, size, and stride.
4662
source (Tensor or Storage): the tensor or storage to use
4663
storage_offset (int, optional): the offset in the storage
4664
size (torch.Size, optional): the desired size. Defaults to the size of the source.
4665
stride (tuple, optional): the desired stride. Defaults to C-contiguous strides.
4674
See :func:`torch.sigmoid`
4683
In-place version of :meth:`~Tensor.sigmoid`
4692
See :func:`torch.logit`
4701
In-place version of :meth:`~Tensor.logit`
4710
See :func:`torch.sign`
4719
In-place version of :meth:`~Tensor.sign`
4728
See :func:`torch.signbit`
4737
See :func:`torch.sgn`
4746
In-place version of :meth:`~Tensor.sgn`
4755
See :func:`torch.sin`
4764
In-place version of :meth:`~Tensor.sin`
4773
See :func:`torch.sinc`
4782
In-place version of :meth:`~Tensor.sinc`
4791
See :func:`torch.sinh`
4800
In-place version of :meth:`~Tensor.sinh`
4807
size(dim=None) -> torch.Size or int
4809
Returns the size of the :attr:`self` tensor. If ``dim`` is not specified,
4810
the returned value is a :class:`torch.Size`, a subclass of :class:`tuple`.
4811
If ``dim`` is specified, returns an int holding the size of that dimension.
4814
dim (int, optional): The dimension for which to retrieve the size.
4818
>>> t = torch.empty(3, 4, 5)
4820
torch.Size([3, 4, 5])
4830
shape() -> torch.Size
4832
Returns the size of the :attr:`self` tensor. Alias for :attr:`size`.
4834
See also :meth:`Tensor.size`.
4838
>>> t = torch.empty(3, 4, 5)
4840
torch.Size([3, 4, 5])
4842
torch.Size([3, 4, 5])
4850
sort(dim=-1, descending=False) -> (Tensor, LongTensor)
4852
See :func:`torch.sort`
4861
See :func:`torch.msort`
4868
argsort(dim=-1, descending=False) -> LongTensor
4870
See :func:`torch.argsort`
4879
Return the number of sparse dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.
4882
Returns ``0`` if :attr:`self` is not a sparse tensor.
4884
See also :meth:`Tensor.dense_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
4891
sparse_resize_(size, sparse_dim, dense_dim) -> Tensor
4893
Resizes :attr:`self` :ref:`sparse tensor <sparse-docs>` to the desired
4894
size and the number of sparse and dense dimensions.
4897
If the number of specified elements in :attr:`self` is zero, then
4898
:attr:`size`, :attr:`sparse_dim`, and :attr:`dense_dim` can be any
4899
size and positive integers such that ``len(size) == sparse_dim +
4902
If :attr:`self` specifies one or more elements, however, then each
4903
dimension in :attr:`size` must not be smaller than the corresponding
4904
dimension of :attr:`self`, :attr:`sparse_dim` must equal the number
4905
of sparse dimensions in :attr:`self`, and :attr:`dense_dim` must
4906
equal the number of dense dimensions in :attr:`self`.
4909
Throws an error if :attr:`self` is not a sparse tensor.
4912
size (torch.Size): the desired size. If :attr:`self` is non-empty
4913
sparse tensor, the desired size cannot be smaller than the
4915
sparse_dim (int): the number of sparse dimensions
4916
dense_dim (int): the number of dense dimensions
4921
"sparse_resize_and_clear_",
4923
sparse_resize_and_clear_(size, sparse_dim, dense_dim) -> Tensor
4925
Removes all specified elements from a :ref:`sparse tensor
4926
<sparse-docs>` :attr:`self` and resizes :attr:`self` to the desired
4927
size and the number of sparse and dense dimensions.
4930
Throws an error if :attr:`self` is not a sparse tensor.
4933
size (torch.Size): the desired size.
4934
sparse_dim (int): the number of sparse dimensions
4935
dense_dim (int): the number of dense dimensions
4944
See :func:`torch.sqrt`
4953
In-place version of :meth:`~Tensor.sqrt`
4962
See :func:`torch.square`
4971
In-place version of :meth:`~Tensor.square`
4978
squeeze(dim=None) -> Tensor
4980
See :func:`torch.squeeze`
4987
squeeze_(dim=None) -> Tensor
4989
In-place version of :meth:`~Tensor.squeeze`
4996
std(dim=None, *, correction=1, keepdim=False) -> Tensor
4998
See :func:`torch.std`
5005
storage_offset() -> int
5007
Returns :attr:`self` tensor's offset in the underlying storage in terms of
5008
number of storage elements (not bytes).
5012
>>> x = torch.tensor([1, 2, 3, 4, 5])
5013
>>> x.storage_offset()
5015
>>> x[3:].storage_offset()
5024
untyped_storage() -> torch.UntypedStorage
5026
Returns the underlying :class:`UntypedStorage`.
5033
stride(dim) -> tuple or int
5035
Returns the stride of :attr:`self` tensor.
5037
Stride is the jump necessary to go from one element to the next one in the
5038
specified dimension :attr:`dim`. A tuple of all strides is returned when no
5039
argument is passed in. Otherwise, an integer value is returned as the stride in
5040
the particular dimension :attr:`dim`.
5043
dim (int, optional): the desired dimension in which stride is required
5047
>>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
5061
sub(other, *, alpha=1) -> Tensor
5063
See :func:`torch.sub`.
5070
sub_(other, *, alpha=1) -> Tensor
5072
In-place version of :meth:`~Tensor.sub`
5079
subtract(other, *, alpha=1) -> Tensor
5081
See :func:`torch.subtract`.
5088
subtract_(other, *, alpha=1) -> Tensor
5090
In-place version of :meth:`~Tensor.subtract`.
5097
sum(dim=None, keepdim=False, dtype=None) -> Tensor
5099
See :func:`torch.sum`
5106
nansum(dim=None, keepdim=False, dtype=None) -> Tensor
5108
See :func:`torch.nansum`
5115
svd(some=True, compute_uv=True) -> (Tensor, Tensor, Tensor)
5117
See :func:`torch.svd`
5124
swapdims(dim0, dim1) -> Tensor
5126
See :func:`torch.swapdims`
5133
swapdims_(dim0, dim1) -> Tensor
5135
In-place version of :meth:`~Tensor.swapdims`
5142
swapaxes(axis0, axis1) -> Tensor
5144
See :func:`torch.swapaxes`
5151
swapaxes_(axis0, axis1) -> Tensor
5153
In-place version of :meth:`~Tensor.swapaxes`
5171
In-place version of :meth:`~Tensor.t`
5180
See :func:`torch.tile`
5187
to(*args, **kwargs) -> Tensor
5189
Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are
5190
inferred from the arguments of ``self.to(*args, **kwargs)``.
5194
If the ``self`` Tensor already
5195
has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned.
5196
Otherwise, the returned tensor is a copy of ``self`` with the desired
5197
:class:`torch.dtype` and :class:`torch.device`.
5199
Here are the ways to call ``to``:
5201
.. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
5204
Returns a Tensor with the specified :attr:`dtype`
5209
.. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
5212
Returns a Tensor with the specified :attr:`device` and (optional)
5213
:attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``.
5214
When :attr:`non_blocking`, tries to convert asynchronously with respect to
5215
the host if possible, e.g., converting a CPU Tensor with pinned memory to a
5217
When :attr:`copy` is set, a new Tensor is created even when the Tensor
5218
already matches the desired conversion.
5223
.. method:: to(other, non_blocking=False, copy=False) -> Tensor
5226
Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as
5227
the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert
5228
asynchronously with respect to the host if possible, e.g., converting a CPU
5229
Tensor with pinned memory to a CUDA Tensor.
5230
When :attr:`copy` is set, a new Tensor is created even when the Tensor
5231
already matches the desired conversion.
5235
>>> tensor = torch.randn(2, 2) # Initially dtype=float32, device=cpu
5236
>>> tensor.to(torch.float64)
5237
tensor([[-0.5044, 0.0005],
5238
[ 0.3310, -0.0584]], dtype=torch.float64)
5240
>>> cuda0 = torch.device('cuda:0')
5241
>>> tensor.to(cuda0)
5242
tensor([[-0.5044, 0.0005],
5243
[ 0.3310, -0.0584]], device='cuda:0')
5245
>>> tensor.to(cuda0, dtype=torch.float64)
5246
tensor([[-0.5044, 0.0005],
5247
[ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
5249
>>> other = torch.randn((), dtype=torch.float64, device=cuda0)
5250
>>> tensor.to(other, non_blocking=True)
5251
tensor([[-0.5044, 0.0005],
5252
[ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
5253
""".format(**common_args),
5259
byte(memory_format=torch.preserve_format) -> Tensor
5261
``self.byte()`` is equivalent to ``self.to(torch.uint8)``. See :func:`to`.
5265
""".format(**common_args),
5271
bool(memory_format=torch.preserve_format) -> Tensor
5273
``self.bool()`` is equivalent to ``self.to(torch.bool)``. See :func:`to`.
5277
""".format(**common_args),
5283
char(memory_format=torch.preserve_format) -> Tensor
5285
``self.char()`` is equivalent to ``self.to(torch.int8)``. See :func:`to`.
5289
""".format(**common_args),
5295
bfloat16(memory_format=torch.preserve_format) -> Tensor
5296
``self.bfloat16()`` is equivalent to ``self.to(torch.bfloat16)``. See :func:`to`.
5300
""".format(**common_args),
5306
double(memory_format=torch.preserve_format) -> Tensor
5308
``self.double()`` is equivalent to ``self.to(torch.float64)``. See :func:`to`.
5312
""".format(**common_args),
5318
float(memory_format=torch.preserve_format) -> Tensor
5320
``self.float()`` is equivalent to ``self.to(torch.float32)``. See :func:`to`.
5324
""".format(**common_args),
5330
cdouble(memory_format=torch.preserve_format) -> Tensor
5332
``self.cdouble()`` is equivalent to ``self.to(torch.complex128)``. See :func:`to`.
5336
""".format(**common_args),
5342
cfloat(memory_format=torch.preserve_format) -> Tensor
5344
``self.cfloat()`` is equivalent to ``self.to(torch.complex64)``. See :func:`to`.
5348
""".format(**common_args),
5354
chalf(memory_format=torch.preserve_format) -> Tensor
5356
``self.chalf()`` is equivalent to ``self.to(torch.complex32)``. See :func:`to`.
5360
""".format(**common_args),
5366
half(memory_format=torch.preserve_format) -> Tensor
5368
``self.half()`` is equivalent to ``self.to(torch.float16)``. See :func:`to`.
5372
""".format(**common_args),
5378
int(memory_format=torch.preserve_format) -> Tensor
5380
``self.int()`` is equivalent to ``self.to(torch.int32)``. See :func:`to`.
5384
""".format(**common_args),
5392
Given a quantized Tensor,
5393
``self.int_repr()`` returns a CPU Tensor with uint8_t as data type that stores the
5394
underlying uint8_t values of the given Tensor.
5402
long(memory_format=torch.preserve_format) -> Tensor
5404
``self.long()`` is equivalent to ``self.to(torch.int64)``. See :func:`to`.
5408
""".format(**common_args),
5414
short(memory_format=torch.preserve_format) -> Tensor
5416
``self.short()`` is equivalent to ``self.to(torch.int16)``. See :func:`to`.
5420
""".format(**common_args),
5426
take(indices) -> Tensor
5428
See :func:`torch.take`
5435
take_along_dim(indices, dim) -> Tensor
5437
See :func:`torch.take_along_dim`
5446
See :func:`torch.tan`
5455
In-place version of :meth:`~Tensor.tan`
5464
See :func:`torch.tanh`
5471
softmax(dim) -> Tensor
5473
Alias for :func:`torch.nn.functional.softmax`.
5482
In-place version of :meth:`~Tensor.tanh`
5489
tolist() -> list or number
5491
Returns the tensor as a (nested) list. For scalars, a standard
5492
Python number is returned, just like with :meth:`~Tensor.item`.
5493
Tensors are automatically moved to the CPU first if necessary.
5495
This operation is not differentiable.
5499
>>> a = torch.randn(2, 2)
5501
[[0.012766935862600803, 0.5415473580360413],
5502
[-0.08909505605697632, 0.7729271650314331]]
5504
0.012766935862600803
5511
topk(k, dim=None, largest=True, sorted=True) -> (Tensor, LongTensor)
5513
See :func:`torch.topk`
5520
to_dense(dtype=None, *, masked_grad=True) -> Tensor
5522
Creates a strided copy of :attr:`self` if :attr:`self` is not a strided tensor, otherwise returns :attr:`self`.
5526
masked_grad (bool, optional): If set to ``True`` (default) and
5527
:attr:`self` has a sparse layout then the backward of
5528
:meth:`to_dense` returns ``grad.sparse_mask(self)``.
5532
>>> s = torch.sparse_coo_tensor(
5533
... torch.tensor([[1, 1],
5535
... torch.tensor([9, 10]),
5547
to_sparse(sparseDims) -> Tensor
5549
Returns a sparse copy of the tensor. PyTorch supports sparse tensors in
5550
:ref:`coordinate format <sparse-coo-docs>`.
5553
sparseDims (int, optional): the number of sparse dimensions to include in the new sparse tensor
5557
>>> d = torch.tensor([[0, 0, 0], [9, 0, 10], [0, 0, 0]])
5563
tensor(indices=tensor([[1, 1],
5565
values=tensor([ 9, 10]),
5566
size=(3, 3), nnz=2, layout=torch.sparse_coo)
5568
tensor(indices=tensor([[1]]),
5569
values=tensor([[ 9, 0, 10]]),
5570
size=(3, 3), nnz=1, layout=torch.sparse_coo)
5572
.. method:: to_sparse(*, layout=None, blocksize=None, dense_dim=None) -> Tensor
5575
Returns a sparse tensor with the specified layout and blocksize. If
5576
the :attr:`self` is strided, the number of dense dimensions could be
5577
specified, and a hybrid sparse tensor will be created, with
5578
`dense_dim` dense dimensions and `self.dim() - 2 - dense_dim` batch
5581
.. note:: If the :attr:`self` layout and blocksize parameters match
5582
with the specified layout and blocksize, return
5583
:attr:`self`. Otherwise, return a sparse tensor copy of
5588
layout (:class:`torch.layout`, optional): The desired sparse
5589
layout. One of ``torch.sparse_coo``, ``torch.sparse_csr``,
5590
``torch.sparse_csc``, ``torch.sparse_bsr``, or
5591
``torch.sparse_bsc``. Default: if ``None``,
5592
``torch.sparse_coo``.
5594
blocksize (list, tuple, :class:`torch.Size`, optional): Block size
5595
of the resulting BSR or BSC tensor. For other layouts,
5596
specifying the block size that is not ``None`` will result in a
5597
RuntimeError exception. A block size must be a tuple of length
5598
two such that its items evenly divide the two sparse dimensions.
5600
dense_dim (int, optional): Number of dense dimensions of the
5601
resulting CSR, CSC, BSR or BSC tensor. This argument should be
5602
used only if :attr:`self` is a strided tensor, and must be a
5603
value between 0 and dimension of :attr:`self` tensor minus two.
5607
>>> x = torch.tensor([[1, 0], [0, 0], [2, 3]])
5608
>>> x.to_sparse(layout=torch.sparse_coo)
5609
tensor(indices=tensor([[0, 2, 2],
5611
values=tensor([1, 2, 3]),
5612
size=(3, 2), nnz=3, layout=torch.sparse_coo)
5613
>>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(1, 2))
5614
tensor(crow_indices=tensor([0, 1, 1, 2]),
5615
col_indices=tensor([0, 0]),
5616
values=tensor([[[1, 0]],
5617
[[2, 3]]]), size=(3, 2), nnz=2, layout=torch.sparse_bsr)
5618
>>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(2, 1))
5619
RuntimeError: Tensor size(-2) 3 needs to be divisible by blocksize[0] 2
5620
>>> x.to_sparse(layout=torch.sparse_csr, blocksize=(3, 1))
5621
RuntimeError: to_sparse for Strided to SparseCsr conversion does not use specified blocksize
5623
>>> x = torch.tensor([[[1], [0]], [[0], [0]], [[2], [3]]])
5624
>>> x.to_sparse(layout=torch.sparse_csr, dense_dim=1)
5625
tensor(crow_indices=tensor([0, 1, 1, 3]),
5626
col_indices=tensor([0, 0, 1]),
5629
[3]]), size=(3, 2, 1), nnz=3, layout=torch.sparse_csr)
5637
to_sparse_csr(dense_dim=None) -> Tensor
5639
Convert a tensor to compressed row storage format (CSR). Except for
5640
strided tensors, only works with 2D tensors. If the :attr:`self` is
5641
strided, then the number of dense dimensions could be specified, and a
5642
hybrid CSR tensor will be created, with `dense_dim` dense dimensions
5643
and `self.dim() - 2 - dense_dim` batch dimension.
5647
dense_dim (int, optional): Number of dense dimensions of the
5648
resulting CSR tensor. This argument should be used only if
5649
:attr:`self` is a strided tensor, and must be a value between 0
5650
and dimension of :attr:`self` tensor minus two.
5654
>>> dense = torch.randn(5, 5)
5655
>>> sparse = dense.to_sparse_csr()
5659
>>> dense = torch.zeros(3, 3, 1, 1)
5660
>>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1
5661
>>> dense.to_sparse_csr(dense_dim=2)
5662
tensor(crow_indices=tensor([0, 1, 2, 3]),
5663
col_indices=tensor([0, 2, 1]),
5664
values=tensor([[[1.]],
5668
[[1.]]]), size=(3, 3, 1, 1), nnz=3,
5669
layout=torch.sparse_csr)
5677
to_sparse_csc() -> Tensor
5679
Convert a tensor to compressed column storage (CSC) format. Except
5680
for strided tensors, only works with 2D tensors. If the :attr:`self`
5681
is strided, then the number of dense dimensions could be specified,
5682
and a hybrid CSC tensor will be created, with `dense_dim` dense
5683
dimensions and `self.dim() - 2 - dense_dim` batch dimension.
5687
dense_dim (int, optional): Number of dense dimensions of the
5688
resulting CSC tensor. This argument should be used only if
5689
:attr:`self` is a strided tensor, and must be a value between 0
5690
and dimension of :attr:`self` tensor minus two.
5694
>>> dense = torch.randn(5, 5)
5695
>>> sparse = dense.to_sparse_csc()
5699
>>> dense = torch.zeros(3, 3, 1, 1)
5700
>>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1
5701
>>> dense.to_sparse_csc(dense_dim=2)
5702
tensor(ccol_indices=tensor([0, 1, 2, 3]),
5703
row_indices=tensor([0, 2, 1]),
5704
values=tensor([[[1.]],
5708
[[1.]]]), size=(3, 3, 1, 1), nnz=3,
5709
layout=torch.sparse_csc)
5717
to_sparse_bsr(blocksize, dense_dim) -> Tensor
5719
Convert a tensor to a block sparse row (BSR) storage format of given
5720
blocksize. If the :attr:`self` is strided, then the number of dense
5721
dimensions could be specified, and a hybrid BSR tensor will be
5722
created, with `dense_dim` dense dimensions and `self.dim() - 2 -
5723
dense_dim` batch dimension.
5727
blocksize (list, tuple, :class:`torch.Size`, optional): Block size
5728
of the resulting BSR tensor. A block size must be a tuple of
5729
length two such that its items evenly divide the two sparse
5732
dense_dim (int, optional): Number of dense dimensions of the
5733
resulting BSR tensor. This argument should be used only if
5734
:attr:`self` is a strided tensor, and must be a value between 0
5735
and dimension of :attr:`self` tensor minus two.
5739
>>> dense = torch.randn(10, 10)
5740
>>> sparse = dense.to_sparse_csr()
5741
>>> sparse_bsr = sparse.to_sparse_bsr((5, 5))
5742
>>> sparse_bsr.col_indices()
5743
tensor([0, 1, 0, 1])
5745
>>> dense = torch.zeros(4, 3, 1)
5746
>>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1
5747
>>> dense.to_sparse_bsr((2, 1), 1)
5748
tensor(crow_indices=tensor([0, 2, 3]),
5749
col_indices=tensor([0, 2, 1]),
5750
values=tensor([[[[1.]],
5762
[[1.]]]]), size=(4, 3, 1), nnz=3,
5763
layout=torch.sparse_bsr)
5771
to_sparse_bsc(blocksize, dense_dim) -> Tensor
5773
Convert a tensor to a block sparse column (BSC) storage format of
5774
given blocksize. If the :attr:`self` is strided, then the number of
5775
dense dimensions could be specified, and a hybrid BSC tensor will be
5776
created, with `dense_dim` dense dimensions and `self.dim() - 2 -
5777
dense_dim` batch dimension.
5781
blocksize (list, tuple, :class:`torch.Size`, optional): Block size
5782
of the resulting BSC tensor. A block size must be a tuple of
5783
length two such that its items evenly divide the two sparse
5786
dense_dim (int, optional): Number of dense dimensions of the
5787
resulting BSC tensor. This argument should be used only if
5788
:attr:`self` is a strided tensor, and must be a value between 0
5789
and dimension of :attr:`self` tensor minus two.
5793
>>> dense = torch.randn(10, 10)
5794
>>> sparse = dense.to_sparse_csr()
5795
>>> sparse_bsc = sparse.to_sparse_bsc((5, 5))
5796
>>> sparse_bsc.row_indices()
5797
tensor([0, 1, 0, 1])
5799
>>> dense = torch.zeros(4, 3, 1)
5800
>>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1
5801
>>> dense.to_sparse_bsc((2, 1), 1)
5802
tensor(ccol_indices=tensor([0, 1, 2, 3]),
5803
row_indices=tensor([0, 1, 0]),
5804
values=tensor([[[[1.]],
5816
[[1.]]]]), size=(4, 3, 1), nnz=3,
5817
layout=torch.sparse_bsc)
5825
to_mkldnn() -> Tensor
5826
Returns a copy of the tensor in ``torch.mkldnn`` layout.
5836
See :func:`torch.trace`
5843
transpose(dim0, dim1) -> Tensor
5845
See :func:`torch.transpose`
5852
transpose_(dim0, dim1) -> Tensor
5854
In-place version of :meth:`~Tensor.transpose`
5861
triangular_solve(A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor)
5863
See :func:`torch.triangular_solve`
5870
tril(diagonal=0) -> Tensor
5872
See :func:`torch.tril`
5879
tril_(diagonal=0) -> Tensor
5881
In-place version of :meth:`~Tensor.tril`
5888
triu(diagonal=0) -> Tensor
5890
See :func:`torch.triu`
5897
triu_(diagonal=0) -> Tensor
5899
In-place version of :meth:`~Tensor.triu`
5906
true_divide(value) -> Tensor
5908
See :func:`torch.true_divide`
5915
true_divide_(value) -> Tensor
5917
In-place version of :meth:`~Tensor.true_divide_`
5926
See :func:`torch.trunc`
5935
See :func:`torch.fix`.
5944
In-place version of :meth:`~Tensor.trunc`
5953
In-place version of :meth:`~Tensor.fix`
5960
type(dtype=None, non_blocking=False, **kwargs) -> str or Tensor
5961
Returns the type if `dtype` is not provided, else casts this object to
5964
If this is already of the correct type, no copy is performed and the
5965
original object is returned.
5968
dtype (dtype or string): The desired type
5969
non_blocking (bool): If ``True``, and the source is in pinned memory
5970
and destination is on the GPU or vice versa, the copy is performed
5971
asynchronously with respect to the host. Otherwise, the argument
5973
**kwargs: For compatibility, may contain the key ``async`` in place of
5974
the ``non_blocking`` argument. The ``async`` arg is deprecated.
5981
type_as(tensor) -> Tensor
5983
Returns this tensor cast to the type of the given tensor.
5985
This is a no-op if the tensor is already of the correct type. This is
5986
equivalent to ``self.type(tensor.type())``
5989
tensor (Tensor): the tensor which has the desired type
5996
unfold(dimension, size, step) -> Tensor
5998
Returns a view of the original tensor which contains all slices of size :attr:`size` from
5999
:attr:`self` tensor in the dimension :attr:`dimension`.
6001
Step between two slices is given by :attr:`step`.
6003
If `sizedim` is the size of dimension :attr:`dimension` for :attr:`self`, the size of
6004
dimension :attr:`dimension` in the returned tensor will be
6005
`(sizedim - size) / step + 1`.
6007
An additional dimension of size :attr:`size` is appended in the returned tensor.
6010
dimension (int): dimension in which unfolding happens
6011
size (int): the size of each slice that is unfolded
6012
step (int): the step between each slice
6016
>>> x = torch.arange(1., 8)
6018
tensor([ 1., 2., 3., 4., 5., 6., 7.])
6019
>>> x.unfold(0, 2, 1)
6026
>>> x.unfold(0, 2, 2)
6036
uniform_(from=0, to=1, *, generator=None) -> Tensor
6038
Fills :attr:`self` tensor with numbers sampled from the continuous uniform
6042
f(x) = \dfrac{1}{\text{to} - \text{from}}
6049
unsqueeze(dim) -> Tensor
6051
See :func:`torch.unsqueeze`
6058
unsqueeze_(dim) -> Tensor
6060
In-place version of :meth:`~Tensor.unsqueeze`
6067
var(dim=None, *, correction=1, keepdim=False) -> Tensor
6069
See :func:`torch.var`
6076
vdot(other) -> Tensor
6078
See :func:`torch.vdot`
6085
view(*shape) -> Tensor
6087
Returns a new tensor with the same data as the :attr:`self` tensor but of a
6088
different :attr:`shape`.
6090
The returned tensor shares the same data and must have the same number
6091
of elements, but may have a different size. For a tensor to be viewed, the new
6092
view size must be compatible with its original size and stride, i.e., each new
6093
view dimension must either be a subspace of an original dimension, or only span
6094
across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following
6095
contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`,
6099
\text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]
6101
Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape`
6102
without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a
6103
:meth:`view` can be performed, it is advisable to use :meth:`reshape`, which
6104
returns a view if the shapes are compatible, and copies (equivalent to calling
6105
:meth:`contiguous`) otherwise.
6108
shape (torch.Size or int...): the desired size
6112
>>> x = torch.randn(4, 4)
6118
>>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions
6122
>>> a = torch.randn(1, 2, 3, 4)
6124
torch.Size([1, 2, 3, 4])
6125
>>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension
6127
torch.Size([1, 3, 2, 4])
6128
>>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory
6130
torch.Size([1, 3, 2, 4])
6131
>>> torch.equal(b, c)
6135
.. method:: view(dtype) -> Tensor
6138
Returns a new tensor with the same data as the :attr:`self` tensor but of a
6139
different :attr:`dtype`.
6141
If the element size of :attr:`dtype` is different than that of ``self.dtype``,
6142
then the size of the last dimension of the output will be scaled
6143
proportionally. For instance, if :attr:`dtype` element size is twice that of
6144
``self.dtype``, then each pair of elements in the last dimension of
6145
:attr:`self` will be combined, and the size of the last dimension of the output
6146
will be half that of :attr:`self`. If :attr:`dtype` element size is half that
6147
of ``self.dtype``, then each element in the last dimension of :attr:`self` will
6148
be split in two, and the size of the last dimension of the output will be
6149
double that of :attr:`self`. For this to be possible, the following conditions
6152
* ``self.dim()`` must be greater than 0.
6153
* ``self.stride(-1)`` must be 1.
6155
Additionally, if the element size of :attr:`dtype` is greater than that of
6156
``self.dtype``, the following conditions must be true as well:
6158
* ``self.size(-1)`` must be divisible by the ratio between the element
6159
sizes of the dtypes.
6160
* ``self.storage_offset()`` must be divisible by the ratio between the
6161
element sizes of the dtypes.
6162
* The strides of all dimensions, except the last dimension, must be
6163
divisible by the ratio between the element sizes of the dtypes.
6165
If any of the above conditions are not met, an error is thrown.
6169
This overload is not supported by TorchScript, and using it in a Torchscript
6170
program will cause undefined behavior.
6174
dtype (:class:`torch.dtype`): the desired dtype
6178
>>> x = torch.randn(4, 4)
6180
tensor([[ 0.9482, -0.0310, 1.4999, -0.5316],
6181
[-0.1520, 0.7472, 0.5617, -0.8649],
6182
[-2.4724, -0.0334, -0.2976, -0.8499],
6183
[-0.2109, 1.9913, -0.9607, -0.6123]])
6187
>>> y = x.view(torch.int32)
6189
tensor([[ 1064483442, -1124191867, 1069546515, -1089989247],
6190
[-1105482831, 1061112040, 1057999968, -1084397505],
6191
[-1071760287, -1123489973, -1097310419, -1084649136],
6192
[-1101533110, 1073668768, -1082790149, -1088634448]],
6194
>>> y[0, 0] = 1000000000
6196
tensor([[ 0.0047, -0.0310, 1.4999, -0.5316],
6197
[-0.1520, 0.7472, 0.5617, -0.8649],
6198
[-2.4724, -0.0334, -0.2976, -0.8499],
6199
[-0.2109, 1.9913, -0.9607, -0.6123]])
6201
>>> x.view(torch.cfloat)
6202
tensor([[ 0.0047-0.0310j, 1.4999-0.5316j],
6203
[-0.1520+0.7472j, 0.5617-0.8649j],
6204
[-2.4724-0.0334j, -0.2976-0.8499j],
6205
[-0.2109+1.9913j, -0.9607-0.6123j]])
6206
>>> x.view(torch.cfloat).size()
6209
>>> x.view(torch.uint8)
6210
tensor([[ 0, 202, 154, 59, 182, 243, 253, 188, 185, 252, 191, 63, 240, 22,
6212
[227, 165, 27, 190, 128, 72, 63, 63, 146, 203, 15, 63, 22, 106,
6214
[205, 59, 30, 192, 112, 206, 8, 189, 7, 95, 152, 190, 12, 147,
6216
[ 43, 246, 87, 190, 235, 226, 254, 63, 111, 240, 117, 191, 177, 191,
6217
28, 191]], dtype=torch.uint8)
6218
>>> x.view(torch.uint8).size()
6226
view_as(other) -> Tensor
6228
View this tensor as the same size as :attr:`other`.
6229
``self.view_as(other)`` is equivalent to ``self.view(other.size())``.
6231
Please see :meth:`~Tensor.view` for more information about ``view``.
6234
other (:class:`torch.Tensor`): The result tensor has the same size
6242
expand(*sizes) -> Tensor
6244
Returns a new view of the :attr:`self` tensor with singleton dimensions expanded
6247
Passing -1 as the size for a dimension means not changing the size of
6250
Tensor can be also expanded to a larger number of dimensions, and the
6251
new ones will be appended at the front. For the new dimensions, the
6252
size cannot be set to -1.
6254
Expanding a tensor does not allocate new memory, but only creates a
6255
new view on the existing tensor where a dimension of size one is
6256
expanded to a larger size by setting the ``stride`` to 0. Any dimension
6257
of size 1 can be expanded to an arbitrary value without allocating new
6261
*sizes (torch.Size or int...): the desired expanded size
6265
More than one element of an expanded tensor may refer to a single
6266
memory location. As a result, in-place operations (especially ones that
6267
are vectorized) may result in incorrect behavior. If you need to write
6268
to the tensors, please clone them first.
6272
>>> x = torch.tensor([[1], [2], [3]])
6276
tensor([[ 1, 1, 1, 1],
6279
>>> x.expand(-1, 4) # -1 means not changing the size of that dimension
6280
tensor([[ 1, 1, 1, 1],
6289
expand_as(other) -> Tensor
6291
Expand this tensor to the same size as :attr:`other`.
6292
``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``.
6294
Please see :meth:`~Tensor.expand` for more information about ``expand``.
6297
other (:class:`torch.Tensor`): The result tensor has the same size
6305
sum_to_size(*size) -> Tensor
6307
Sum ``this`` tensor to :attr:`size`.
6308
:attr:`size` must be broadcastable to ``this`` tensor size.
6311
size (int...): a sequence of integers defining the shape of the output tensor.
6321
Fills :attr:`self` tensor with zeros.
6328
matmul(tensor2) -> Tensor
6330
See :func:`torch.matmul`
6337
chunk(chunks, dim=0) -> List of Tensors
6339
See :func:`torch.chunk`
6346
unsafe_chunk(chunks, dim=0) -> List of Tensors
6348
See :func:`torch.unsafe_chunk`
6355
unsafe_split(split_size, dim=0) -> List of Tensors
6357
See :func:`torch.unsafe_split`
6364
tensor_split(indices_or_sections, dim=0) -> List of Tensors
6366
See :func:`torch.tensor_split`
6373
hsplit(split_size_or_sections) -> List of Tensors
6375
See :func:`torch.hsplit`
6382
vsplit(split_size_or_sections) -> List of Tensors
6384
See :func:`torch.vsplit`
6391
dsplit(split_size_or_sections) -> List of Tensors
6393
See :func:`torch.dsplit`
6400
stft(frame_length, hop, fft_size=None, return_onesided=True, window=None, pad_end=0) -> Tensor
6402
See :func:`torch.stft`
6409
istft(n_fft, hop_length=None, win_length=None, window=None,
6410
center=True, normalized=False, onesided=True, length=None) -> Tensor
6412
See :func:`torch.istft`
6421
See :func:`torch.det`
6428
where(condition, y) -> Tensor
6430
``self.where(condition, y)`` is equivalent to ``torch.where(condition, self, y)``.
6431
See :func:`torch.where`
6440
See :func:`torch.logdet`
6447
slogdet() -> (Tensor, Tensor)
6449
See :func:`torch.slogdet`
6458
See :func:`torch.unbind`
6465
pin_memory() -> Tensor
6467
Copies the tensor to pinned memory, if it's not already pinned.
6476
See :func:`torch.pinverse`
6483
index_add(dim, index, source, *, alpha=1) -> Tensor
6485
Out-of-place version of :meth:`torch.Tensor.index_add_`.
6492
index_copy(dim, index, tensor2) -> Tensor
6494
Out-of-place version of :meth:`torch.Tensor.index_copy_`.
6501
index_fill(dim, index, value) -> Tensor
6503
Out-of-place version of :meth:`torch.Tensor.index_fill_`.
6510
scatter(dim, index, src) -> Tensor
6512
Out-of-place version of :meth:`torch.Tensor.scatter_`
6519
scatter_add(dim, index, src) -> Tensor
6521
Out-of-place version of :meth:`torch.Tensor.scatter_add_`
6528
scatter_reduce(dim, index, src, reduce, *, include_self=True) -> Tensor
6530
Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
6537
masked_scatter(mask, tensor) -> Tensor
6539
Out-of-place version of :meth:`torch.Tensor.masked_scatter_`
6543
The inputs :attr:`self` and :attr:`mask`
6544
:ref:`broadcast <broadcasting-semantics>`.
6548
>>> self = torch.tensor([0, 0, 0, 0, 0])
6549
>>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=torch.bool)
6550
>>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
6551
>>> self.masked_scatter(mask, source)
6552
tensor([[0, 0, 0, 0, 1],
6561
xlogy(other) -> Tensor
6563
See :func:`torch.xlogy`
6570
xlogy_(other) -> Tensor
6572
In-place version of :meth:`~Tensor.xlogy`
6579
masked_fill(mask, value) -> Tensor
6581
Out-of-place version of :meth:`torch.Tensor.masked_fill_`
6588
This attribute is ``None`` by default and becomes a Tensor the first time a call to
6589
:func:`backward` computes gradients for ``self``.
6590
The attribute will then contain the gradients computed and future calls to
6591
:func:`backward` will accumulate (add) gradients into it.
6598
retain_grad() -> None
6600
Enables this Tensor to have their :attr:`grad` populated during
6601
:func:`backward`. This is a no-op for leaf tensors.
6608
Is ``True`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be
6609
populated during :func:`backward`, ``False`` otherwise.
6616
Is ``True`` if gradients need to be computed for this Tensor, ``False`` otherwise.
6620
The fact that gradients need to be computed for a Tensor do not mean that the :attr:`grad`
6621
attribute will be populated, see :attr:`is_leaf` for more details.
6629
All Tensors that have :attr:`requires_grad` which is ``False`` will be leaf Tensors by convention.
6631
For Tensors that have :attr:`requires_grad` which is ``True``, they will be leaf Tensors if they were
6632
created by the user. This means that they are not the result of an operation and so
6633
:attr:`grad_fn` is None.
6635
Only leaf Tensors will have their :attr:`grad` populated during a call to :func:`backward`.
6636
To get :attr:`grad` populated for non-leaf Tensors, you can use :func:`retain_grad`.
6640
>>> a = torch.rand(10, requires_grad=True)
6643
>>> b = torch.rand(10, requires_grad=True).cuda()
6646
# b was created by the operation that cast a cpu Tensor into a cuda Tensor
6647
>>> c = torch.rand(10, requires_grad=True) + 2
6650
# c was created by the addition operation
6651
>>> d = torch.rand(10).cuda()
6654
# d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
6655
>>> e = torch.rand(10).cuda().requires_grad_()
6658
# e requires gradients and has no operations creating it
6659
>>> f = torch.rand(10, requires_grad=True, device="cuda")
6662
# f requires grad, has no operation creating it
6671
Stores names for each of this tensor's dimensions.
6673
``names[idx]`` corresponds to the name of tensor dimension ``idx``.
6674
Names are either a string if the dimension is named or ``None`` if the
6675
dimension is unnamed.
6677
Dimension names may contain characters or underscore. Furthermore, a dimension
6678
name must be a valid Python variable name (i.e., does not start with underscore).
6680
Tensors may not have two named dimensions with the same name.
6683
The named tensor API is experimental and subject to change.
6691
Is ``True`` if the Tensor is stored on the GPU, ``False`` otherwise.
6698
Is ``True`` if the Tensor is stored on the CPU, ``False`` otherwise.
6705
Is ``True`` if the Tensor is stored on an XLA device, ``False`` otherwise.
6712
Is ``True`` if the Tensor is stored on the IPU, ``False`` otherwise.
6719
Is ``True`` if the Tensor is stored on the XPU, ``False`` otherwise.
6726
Is ``True`` if the Tensor is quantized, ``False`` otherwise.
6733
Is ``True`` if the Tensor is a meta tensor, ``False`` otherwise. Meta tensors
6734
are like normal tensors, but they carry no data.
6741
Is ``True`` if the Tensor is stored on the MPS device, ``False`` otherwise.
6748
Is ``True`` if the Tensor uses sparse COO storage layout, ``False`` otherwise.
6755
Is ``True`` if the Tensor uses sparse CSR storage layout, ``False`` otherwise.
6762
Is the :class:`torch.device` where this Tensor is.
6769
Alias for :meth:`~Tensor.dim()`
6776
Alias for :meth:`~Tensor.element_size()`
6783
Returns the number of bytes consumed by the "view" of elements of the Tensor
6784
if the Tensor does not use sparse storage layout.
6785
Defined to be :meth:`~Tensor.numel()` * :meth:`~Tensor.element_size()`
6792
Returns a view of this tensor with its dimensions reversed.
6794
If ``n`` is the number of dimensions in ``x``,
6795
``x.T`` is equivalent to ``x.permute(n-1, n-2, ..., 0)``.
6798
The use of :func:`Tensor.T` on tensors of dimension other than 2 to reverse their shape
6799
is deprecated and it will throw an error in a future release. Consider :attr:`~.Tensor.mT`
6800
to transpose batches of matrices or `x.permute(*torch.arange(x.ndim - 1, -1, -1))` to reverse
6801
the dimensions of a tensor.
6808
Returns a view of a matrix (2-D tensor) conjugated and transposed.
6810
``x.H`` is equivalent to ``x.transpose(0, 1).conj()`` for complex matrices and
6811
``x.transpose(0, 1)`` for real matrices.
6815
:attr:`~.Tensor.mH`: An attribute that also works on batches of matrices.
6822
Returns a view of this tensor with the last two dimensions transposed.
6824
``x.mT`` is equivalent to ``x.transpose(-2, -1)``.
6831
Accessing this property is equivalent to calling :func:`adjoint`.
6840
Alias for :func:`adjoint`
6847
Returns a new tensor containing real values of the :attr:`self` tensor for a complex-valued input tensor.
6848
The returned tensor and :attr:`self` share the same underlying storage.
6850
Returns :attr:`self` if :attr:`self` is a real-valued tensor tensor.
6853
>>> x=torch.randn(4, dtype=torch.cfloat)
6855
tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
6857
tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
6865
Returns a new tensor containing imaginary values of the :attr:`self` tensor.
6866
The returned tensor and :attr:`self` share the same underlying storage.
6869
:func:`imag` is only supported for tensors with complex dtypes.
6872
>>> x=torch.randn(4, dtype=torch.cfloat)
6874
tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
6876
tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
6884
as_subclass(cls) -> Tensor
6886
Makes a ``cls`` instance with the same data pointer as ``self``. Changes
6887
in the output mirror changes in ``self``, and the output stays attached
6888
to the autograd graph. ``cls`` must be a subclass of ``Tensor``.
6895
crow_indices() -> IntTensor
6897
Returns the tensor containing the compressed row indices of the :attr:`self`
6898
tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
6899
The ``crow_indices`` tensor is strictly of shape (:attr:`self`.size(0) + 1)
6900
and of type ``int32`` or ``int64``. When using MKL routines such as sparse
6901
matrix multiplication, it is necessary to use ``int32`` indexing in order
6902
to avoid downcasting and potentially losing information.
6905
>>> csr = torch.eye(5,5).to_sparse_csr()
6906
>>> csr.crow_indices()
6907
tensor([0, 1, 2, 3, 4, 5], dtype=torch.int32)
6915
col_indices() -> IntTensor
6917
Returns the tensor containing the column indices of the :attr:`self`
6918
tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
6919
The ``col_indices`` tensor is strictly of shape (:attr:`self`.nnz())
6920
and of type ``int32`` or ``int64``. When using MKL routines such as sparse
6921
matrix multiplication, it is necessary to use ``int32`` indexing in order
6922
to avoid downcasting and potentially losing information.
6925
>>> csr = torch.eye(5,5).to_sparse_csr()
6926
>>> csr.col_indices()
6927
tensor([0, 1, 2, 3, 4], dtype=torch.int32)
6935
to_padded_tensor(padding, output_size=None) -> Tensor
6936
See :func:`to_padded_tensor`