6
from torch._C import _from_dlpack
7
from torch._C import _to_dlpack as to_dlpack
10
class DLDeviceType(enum.IntEnum):
24
torch._C._add_docstr(to_dlpack, r"""to_dlpack(tensor) -> PyCapsule
26
Returns an opaque object (a "DLPack capsule") representing the tensor.
29
``to_dlpack`` is a legacy DLPack interface. The capsule it returns
30
cannot be used for anything in Python other than use it as input to
31
``from_dlpack``. The more idiomatic use of DLPack is to call
32
``from_dlpack`` directly on the tensor object - this works when that
33
object has a ``__dlpack__`` method, which PyTorch and most other
34
libraries indeed have now.
37
Only call ``from_dlpack`` once per capsule produced with ``to_dlpack``.
38
Behavior when a capsule is consumed multiple times is undefined.
41
tensor: a tensor to be exported
43
The DLPack capsule shares the tensor's memory.
49
def from_dlpack(ext_tensor: Any) -> 'torch.Tensor':
50
"""from_dlpack(ext_tensor) -> Tensor
52
Converts a tensor from an external library into a ``torch.Tensor``.
54
The returned PyTorch tensor will share the memory with the input tensor
55
(which may have come from another library). Note that in-place operations
56
will therefore also affect the data of the input tensor. This may lead to
57
unexpected issues (e.g., other libraries may have read-only flags or
58
immutable data structures), so the user should only do this if they know
59
for sure that this is fine.
62
ext_tensor (object with ``__dlpack__`` attribute, or a DLPack capsule):
63
The tensor or DLPack capsule to convert.
65
If ``ext_tensor`` is a tensor (or ndarray) object, it must support
66
the ``__dlpack__`` protocol (i.e., have a ``ext_tensor.__dlpack__``
67
method). Otherwise ``ext_tensor`` may be a DLPack capsule, which is
68
an opaque ``PyCapsule`` instance, typically produced by a
69
``to_dlpack`` function or method.
73
>>> import torch.utils.dlpack
74
>>> t = torch.arange(4)
76
# Convert a tensor directly (supported in PyTorch >= 1.10)
77
>>> t2 = torch.from_dlpack(t)
78
>>> t2[:2] = -1 # show that memory is shared
80
tensor([-1, -1, 2, 3])
82
tensor([-1, -1, 2, 3])
84
# The old-style DLPack usage, with an intermediate capsule object
85
>>> capsule = torch.utils.dlpack.to_dlpack(t)
87
<capsule object "dltensor" at ...>
88
>>> t3 = torch.from_dlpack(capsule)
90
tensor([-1, -1, 2, 3])
91
>>> t3[0] = -9 # now we're sharing memory between 3 tensors
93
tensor([-9, -1, 2, 3])
95
tensor([-9, -1, 2, 3])
97
tensor([-9, -1, 2, 3])
100
if hasattr(ext_tensor, '__dlpack__'):
101
device = ext_tensor.__dlpack_device__()
104
if device[0] in (DLDeviceType.kDLGPU, DLDeviceType.kDLROCM):
105
stream = torch.cuda.current_stream(f'cuda:{device[1]}')
111
is_cuda = device[0] == DLDeviceType.kDLGPU
114
stream_ptr = 1 if is_cuda and stream.cuda_stream == 0 else stream.cuda_stream
115
dlpack = ext_tensor.__dlpack__(stream=stream_ptr)
117
dlpack = ext_tensor.__dlpack__()
121
return _from_dlpack(dlpack)