pytorch

Форк
0
/
storage.py 
1228 строк · 45.8 Кб
1
import io
2

3
import torch
4
from ._utils import _type, _cuda, _hpu
5
from torch.types import Storage
6
from typing import cast, Any, Dict as _Dict, Optional as _Optional, TypeVar, Type, Union
7
import copy
8
import collections
9
from functools import lru_cache
10
import warnings
11
import threading
12
import functools
13
try:
14
    import numpy as np
15
    HAS_NUMPY = True
16
except ModuleNotFoundError:
17
    np = None  # type: ignore[assignment]
18

19
_share_memory_lock = threading.Lock()
20
_share_memory_map: _Dict[int, threading.RLock] = {}
21

22
T = TypeVar('T', bound='Union[_StorageBase, TypedStorage]')
23
class _StorageBase:
24
    _cdata: Any
25
    is_sparse: bool = False
26
    is_sparse_csr: bool = False
27
    device: torch.device
28

29
    def __init__(self, *args, **kwargs): ...  # noqa: E704
30
    def __len__(self) -> int: ...  # type: ignore[empty-body] # noqa: E704
31
    def __getitem__(self, idx): ...  # noqa: E704
32
    def __setitem__(self, *args, **kwargs): ...  # noqa: E704
33
    def copy_(self, source: T, non_blocking: _Optional[bool] = None) -> T: ...  # type: ignore[empty-body] # noqa: E704
34
    def new(self) -> T: ...  # type: ignore[empty-body, misc, type-var] # noqa: E704
35
    def nbytes(self) -> int: ...  # type: ignore[empty-body] # noqa: E704
36

37
    def size(self) -> int:
38
        return self.nbytes()
39

40
    def type(self, dtype: _Optional[str] = None, non_blocking: bool = False) -> T: ...  # type: ignore[empty-body, misc, type-var] # noqa: E704
41
    def cuda(self, device=None, non_blocking=False, **kwargs) -> T: ...  # type: ignore[empty-body, misc, type-var] # noqa: E704
42
    def hpu(self, device=None, non_blocking=False, **kwargs) -> T: ...  # type: ignore[empty-body, misc, type-var] # noqa: E704
43
    def element_size(self) -> int: ...  # type: ignore[empty-body, type-var] # noqa: E704
44

45
    def get_device(self) -> int:
46
        return self.device.index
47

48
    def data_ptr(self) -> int: ...  # type: ignore[empty-body] # noqa: E704
49

50
    def resizable(self) -> bool: ...  # type: ignore[empty-body] # noqa: E704
51

52
    # Defined in torch/csrc/generic/StorageSharing.cpp
53
    def _share_filename_cpu_(self, *args, **kwargs): ...  # noqa: E704
54
    def _share_fd_cpu_(self, *args, **kwargs): ...  # noqa: E704
55
    @classmethod
56
    def _new_using_filename_cpu(cls: Type[T], size: int) -> T: ...  # type: ignore[empty-body] # noqa: E704
57
    @classmethod
58
    def _new_using_fd_cpu(cls: Type[T], size: int) -> T: ...  # type: ignore[empty-body] # noqa: E704
59
    @classmethod
60
    def from_buffer(cls: Type[T], *args, **kwargs) -> T: ...  # type: ignore[empty-body] # noqa: E704
61
    @classmethod
62
    def _new_shared_filename_cpu(cls: Type[T], manager, obj, size, *, device=None, dtype=None) -> T: ...  # type: ignore[empty-body] # noqa: E704
63
    @classmethod
64
    def _release_ipc_counter_cuda(cls: Type[T], *args, **kwargs) -> T: ...  # type: ignore[empty-body] # noqa: E704
65
    @classmethod
66
    def _new_with_weak_ptr(cls: Type[T], *args, **kwargs) -> T: ...  # type: ignore[empty-body] # noqa: E704
67
    def _shared_decref(self) -> T: ...  # type: ignore[empty-body, misc, type-var] # noqa: E704
68
    def _write_file(self, *args, **kwargs): ...  # noqa: E704
69
    def resize_(self, size: int): ...  # noqa: E704
70
    def _weak_ref(self, *args, **kwargs) -> T: ...  # type: ignore[empty-body, misc, type-var] # noqa: E704
71
    def _set_from_file(self, *args, **kwargs): ...  # noqa: E704
72
    def _set_cdata(self, *args, **kwargs): ...  # noqa: E704
73
    def _share_cuda_(self, *args, **kwargs): ...  # noqa: E704
74
    def is_shared(self) -> bool: ...  # type: ignore[empty-body] # noqa: E704
75
    @classmethod
76
    def _new_shared_cuda(cls: Type[T], *args, **kwargs) -> T: ...  # type: ignore[empty-body] # noqa: E704
77
    def _shared_incref(self, *args, **kwargs): ...  # noqa: E704
78
    @classmethod
79
    def _free_weak_ref(cls, *args, **kwargs): ...  # noqa: E704
80
    @property
81
    def is_cuda(self): ...  # noqa: E704
82
    @property
83
    def is_hpu(self): ...  # noqa: E704
84
    @classmethod
85
    def from_file(cls, filename, shared, nbytes) -> T: ...  # type: ignore[empty-body, misc, type-var] # noqa: E704
86
    @classmethod
87
    def _expired(cls, *args, **kwargs) -> T: ...  # type: ignore[empty-body, misc, type-var] # noqa: E704
88
    def _byteswap(self, *args, **kwargs): ...  # noqa: E704
89
    def _get_filename(self, *args, **kwargs) -> _Optional[str]: ...  # type: ignore[empty-body, misc] # noqa: E704
90

91
    def __str__(self):
92
        info_str = (
93
            f'[{torch.typename(self)}(device={self.device}) '
94
            f'of size {len(self)}]')
95
        if self.device.type == 'meta':
96
            return '...\n' + info_str
97
        else:
98
            data_str = ' ' + '\n '.join(str(self[i]) for i in range(self.size()))
99
            return data_str + '\n' + info_str
100

101
    def __repr__(self):
102
        return str(self)
103

104
    def __iter__(self):
105
        return iter(self[i] for i in range(self.size()))
106

107
    def __copy__(self):
108
        return self.clone()
109

110
    def __deepcopy__(self, memo):
111
        memo = memo.setdefault('torch', {})
112
        if self._cdata in memo:
113
            return memo[self._cdata]
114
        new_storage = self.clone()
115
        memo[self._cdata] = new_storage
116
        return new_storage
117

118
    def __reduce__(self):
119
        b = io.BytesIO()
120
        torch.save(self, b, _use_new_zipfile_serialization=False)
121
        return (_load_from_bytes, (b.getvalue(),))
122

123
    def __sizeof__(self):
124
        return super().__sizeof__() + self.size()
125

126
    def clone(self):
127
        """Return a copy of this storage."""
128
        return type(self)(self.nbytes(), device=self.device).copy_(self)
129

130
    def tolist(self):
131
        """Return a list containing the elements of this storage."""
132
        return list(self)
133

134
    def cpu(self):
135
        """Return a CPU copy of this storage if it's not already on the CPU."""
136
        if self.device.type != 'cpu':
137
            return torch.UntypedStorage(self.size()).copy_(self, False)
138
        else:
139
            return self
140

141
    def mps(self):
142
        """Return a MPS copy of this storage if it's not already on the MPS."""
143
        if self.device.type != 'mps':
144
            return torch.UntypedStorage(self.size(), device="mps").copy_(self, False)
145
        else:
146
            return self
147

148
    def _to(self, dtype):
149
        if not isinstance(dtype, torch.dtype):
150
            raise TypeError(f"Argument 'dtype' must be torch.dtype, not {type(dtype)}")
151
        storage = torch.tensor([], dtype=torch.uint8, device=self.device).set_(cast(Storage, self)).to(dtype)._typed_storage()
152
        if storage.data_ptr() == self.data_ptr():
153
            storage = storage.clone()
154
        return storage
155

156
    def double(self):
157
        """Casts this storage to double type."""
158
        return self._to(torch.double)
159

160
    def float(self):
161
        """Casts this storage to float type."""
162
        return self._to(torch.float)
163

164
    def half(self):
165
        """Casts this storage to half type."""
166
        return self._to(torch.half)
167

168
    def long(self):
169
        """Casts this storage to long type."""
170
        return self._to(torch.long)
171

172
    def int(self):
173
        """Casts this storage to int type."""
174
        return self._to(torch.int)
175

176
    def short(self):
177
        """Casts this storage to short type."""
178
        return self._to(torch.short)
179

180
    def char(self):
181
        """Casts this storage to char type."""
182
        return self._to(torch.int8)
183

184
    def byte(self):
185
        """Casts this storage to byte type."""
186
        return self._to(torch.uint8)
187

188
    def bool(self):
189
        """Casts this storage to bool type."""
190
        return self._to(torch.bool)
191

192
    def bfloat16(self):
193
        """Casts this storage to bfloat16 type."""
194
        return self._to(torch.bfloat16)
195

196
    def complex_double(self):
197
        """Casts this storage to complex double type."""
198
        return self._to(torch.cdouble)
199

200
    def complex_float(self):
201
        """Casts this storage to complex float type."""
202
        return self._to(torch.cfloat)
203

204
    def float8_e5m2(self):
205
        """Casts this storage to float8_e5m2 type"""
206
        return self._to(torch.float8_e5m2)
207

208
    def float8_e4m3fn(self):
209
        """Casts this storage to float8_e4m3fn type"""
210
        return self._to(torch.float8_e4m3fn)
211

212
    def float8_e5m2fnuz(self):
213
        """Casts this storage to float8_e5m2fnuz type"""
214
        return self._to(torch.float8_e5m2fnuz)
215

216
    def float8_e4m3fnuz(self):
217
        """Casts this storage to float8_e4m3fnuz type"""
218
        return self._to(torch.float8_e4m3fnuz)
219

220
    def is_pinned(self, device: Union[str, torch.device] = 'cuda'):
221
        r"""Determine whether the CPU storage is already pinned on device.
222

223
        Args:
224
            device (str or torch.device): The device to pin memory on. Default: ``'cuda'``.
225

226
        Returns:
227
            A boolean variable.
228
        """
229
        return torch.tensor([], dtype=torch.uint8, device=self.device).set_(
230
            cast(Storage, self)).is_pinned(device)
231

232
    def pin_memory(self, device: Union[str, torch.device] = 'cuda'):
233
        r"""Copy the CPU storage to pinned memory, if it's not already pinned.
234

235
        Args:
236
            device (str or torch.device): The device to pin memory on. Default: ``'cuda'``.
237

238
        Returns:
239
            A pinned CPU storage.
240
        """
241
        if self.device.type != 'cpu':
242
            raise TypeError(f"cannot pin '{self.type()}' only CPU memory can be pinned")
243

244
        pinned_tensor = torch.tensor([], dtype=torch.uint8, device=self.device).set_(
245
            cast(Storage, self)).pin_memory(device)
246
        return pinned_tensor.untyped_storage()
247

248
    def share_memory_(self):
249
        """See :meth:`torch.UntypedStorage.share_memory_`"""
250
        from torch.multiprocessing import get_sharing_strategy
251
        if self.device.type in ["cuda", torch._C._get_privateuse1_backend_name()]:
252
            pass  # CUDA or PrivateUse1 doesn't use POSIX shared memory
253
        elif get_sharing_strategy() == 'file_system':
254
            self._share_filename_cpu_()
255
        else:
256
            self._share_fd_cpu_()
257
        return self
258

259
    @classmethod
260
    def _new_shared(cls, size, *, device='cpu'):
261
        """Create a new storage in shared memory with the same data type."""
262
        from torch.multiprocessing import get_sharing_strategy
263
        device = torch.device(device)
264
        if device.type in ["cuda", torch._C._get_privateuse1_backend_name(), "hpu"]:
265
            return cls(size, device=device)
266
        elif get_sharing_strategy() == 'file_system':
267
            return cls._new_using_filename_cpu(size)
268
        else:
269
            return cls._new_using_fd_cpu(size)
270

271
    def untyped(self):
272
        return self
273

274
    def byteswap(self, dtype):
275
        """Swap bytes in underlying data."""
276
        elem_size = torch._utils._element_size(dtype)
277
        # for complex types, don't swap first and second numbers
278
        if dtype.is_complex:
279
            elem_size = max(int(elem_size / 2), 1)
280
        self._byteswap(elem_size)
281

282

283
def _share_memory_lock_protected(fn):
284
    @functools.wraps(fn)
285
    def wrapper(self, *args, **kwargs):
286
        to_free = None
287
        to_wait = None
288
        with _share_memory_lock:
289
            key = self._cdata
290
            if key in _share_memory_map:
291
                to_wait = _share_memory_map[key]
292
            else:
293
                _share_memory_map[key] = threading.RLock()
294
                _share_memory_map[key].acquire()
295
                to_free = key
296

297
        # If we're already in the process of sharing the storage, wait
298
        # for it to be done.
299
        if to_wait is not None:
300
            with to_wait:
301
                pass
302

303
        try:
304
            return fn(self, *args, **kwargs)
305
        finally:
306
            # If we acquired the storage lock here and we're done working on it
307
            # we can now release it and free the entry.
308
            if to_free is not None:
309
                # Ensure that the cdata from the storage didn't change and only
310
                # the data_ptr did.
311
                assert self._cdata == to_free
312
                with _share_memory_lock:
313
                    _share_memory_map[to_free].release()
314
                    del _share_memory_map[to_free]
315
    return wrapper
316

317
class UntypedStorage(torch._C.StorageBase, _StorageBase):
318
    def __getitem__(self, *args, **kwargs):
319
        if self.device.type == 'meta':
320
            raise NotImplementedError("Not available for 'meta' device type")
321
        return super().__getitem__(*args, **kwargs)
322

323
    @property
324
    def is_cuda(self):
325
        return self.device.type == 'cuda'
326

327
    @property
328
    def is_hpu(self):
329
        return self.device.type == 'hpu'
330

331
    @property
332
    def filename(self) -> _Optional[str]:
333
        """Returns the file name associated with this storage if the storage was memory mapped from a file.
334
           or ``None`` if the storage was not created by memory mapping a file."""
335
        return self._get_filename()
336

337
    @_share_memory_lock_protected
338
    def share_memory_(self, *args, **kwargs):
339
        """
340
        Moves the storage to shared memory.
341

342
        This is a no-op for storages already in shared memory and for CUDA
343
        storages, which do not need to be moved for sharing across processes.
344
        Storages in shared memory cannot be resized.
345

346
        Note that to mitigate issues like `this <https://github.com/pytorch/pytorch/issues/95606>`_
347
        it is thread safe to call this function from multiple threads on the same object.
348
        It is NOT thread safe though to call any other function on self without proper
349
        synchronization. Please see :doc:`/notes/multiprocessing` for more details.
350

351
        .. note::
352
            When all references to a storage in shared memory are deleted, the associated shared memory
353
            object will also be deleted. PyTorch has a special cleanup process to ensure that this happens
354
            even if the current process exits unexpectedly.
355

356
            It is worth noting the difference between :meth:`share_memory_` and :meth:`from_file` with ``shared = True``
357

358
            #. ``share_memory_`` uses `shm_open(3) <https://man7.org/linux/man-pages/man3/shm_open.3.html>`_ to create a
359
               POSIX shared memory object while :meth:`from_file` uses
360
               `open(2) <https://man7.org/linux/man-pages/man2/open.2.html>`_ to open the filename passed by the user.
361
            #. Both use an `mmap(2) call <https://man7.org/linux/man-pages/man2/mmap.2.html>`_ with ``MAP_SHARED``
362
               to map the file/object into the current virtual address space
363
            #. ``share_memory_`` will call ``shm_unlink(3)`` on the object after mapping it to make sure the shared memory
364
               object is freed when no process has the object open. ``torch.from_file(shared=True)`` does not unlink the
365
               file. This file is persistent and will remain until it is deleted by the user.
366

367
        Returns:
368
            ``self``
369
        """
370
        return super().share_memory_(*args, **kwargs)
371

372
    @_share_memory_lock_protected
373
    def _share_fd_cpu_(self, *args, **kwargs):
374
        return super()._share_fd_cpu_(*args, **kwargs)
375

376
    @_share_memory_lock_protected
377
    def _share_filename_cpu_(self, *args, **kwargs):
378
        return super()._share_filename_cpu_(*args, **kwargs)
379

380
def _load_from_bytes(b):
381
    return torch.load(io.BytesIO(b))
382

383

384
_StorageBase.type = _type  # type: ignore[assignment]
385
_StorageBase.cuda = _cuda  # type: ignore[assignment]
386
_StorageBase.hpu = _hpu  # type: ignore[assignment]
387

388

389
@lru_cache(maxsize=None)
390
def _dtype_to_storage_type_map():
391
    # NOTE: We should no longer add dtypes to this map. This map
392
    # is only used for BC/FC with older PyTorch versions. Going forward,
393
    # new dtypes of TypedStorage should not translate to a legacy
394
    # <type>Storage class. Instead, new dtypes of TypedStorage should
395
    # be serialized as an UntypedStorage paired with a torch.dtype
396
    return {
397
        torch.double: 'DoubleStorage',
398
        torch.float: 'FloatStorage',
399
        torch.half: 'HalfStorage',
400
        torch.long: 'LongStorage',
401
        torch.int: 'IntStorage',
402
        torch.int16: 'ShortStorage',
403
        torch.int8: 'CharStorage',
404
        torch.uint8: 'ByteStorage',
405
        torch.bool: 'BoolStorage',
406
        torch.bfloat16: 'BFloat16Storage',
407
        torch.cdouble: 'ComplexDoubleStorage',
408
        torch.cfloat: 'ComplexFloatStorage',
409
        torch.qint8: 'QInt8Storage',
410
        torch.qint32: 'QInt32Storage',
411
        torch.quint8: 'QUInt8Storage',
412
        torch.quint4x2: 'QUInt4x2Storage',
413
        torch.quint2x4: 'QUInt2x4Storage',
414
    }
415

416
@lru_cache(maxsize=None)
417
def _storage_type_to_dtype_map():
418
    dtype_map = {
419
        val: key for key, val in _dtype_to_storage_type_map().items()}
420
    return dtype_map
421

422
def _get_storage_from_sequence(sequence, dtype, device):
423
    if dtype in [torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8]:
424
        interpret_dtypes = {
425
            torch.quint8: torch.uint8,
426
            torch.quint4x2: torch.uint8,
427
            torch.quint2x4: torch.uint8,
428
            torch.qint32: torch.int32,
429
            torch.qint8: torch.int8
430
        }
431
        tmp_tensor = torch.tensor(
432
            sequence,
433
            dtype=interpret_dtypes[dtype],
434
            device=device)
435

436
    else:
437
        tmp_tensor = torch.tensor(
438
            sequence,
439
            dtype=dtype,
440
            device=device)
441

442
    return tmp_tensor._typed_storage()._untyped_storage
443

444
def _isint(x):
445
    if HAS_NUMPY:
446
        return isinstance(x, (int, np.integer))
447
    else:
448
        return isinstance(x, int)
449

450
_always_warn_typed_storage_removal = False
451

452
def _get_always_warn_typed_storage_removal():
453
    return _always_warn_typed_storage_removal
454

455
def _set_always_warn_typed_storage_removal(always_warn):
456
    global _always_warn_typed_storage_removal
457
    assert isinstance(always_warn, bool)
458
    _always_warn_typed_storage_removal = always_warn
459

460
def _warn_typed_storage_removal(stacklevel=2):
461
    global _always_warn_typed_storage_removal
462

463
    def is_first_time():
464
        if not hasattr(_warn_typed_storage_removal, 'has_warned'):
465
            return True
466
        else:
467
            return not _warn_typed_storage_removal.__dict__['has_warned']
468

469
    if _get_always_warn_typed_storage_removal() or is_first_time():
470
        message = (
471
            "TypedStorage is deprecated. It will be removed in the future and "
472
            "UntypedStorage will be the only storage class. This should only matter "
473
            "to you if you are using storages directly.  To access UntypedStorage "
474
            "directly, use tensor.untyped_storage() instead of tensor.storage()"
475
        )
476
        warnings.warn(message, UserWarning, stacklevel=stacklevel + 1)
477
        _warn_typed_storage_removal.__dict__['has_warned'] = True
478

479
def _reset_warn_typed_storage_removal():
480
    _warn_typed_storage_removal.__dict__['has_warned'] = False
481

482
def _get_device_from_module(module: str):
483
    last_part = module.rsplit(".", 1)[-1]
484
    if last_part in ["cuda", torch._C._get_privateuse1_backend_name(), "hpu"]:
485
        return last_part
486
    else:
487
        return "cpu"
488

489
class TypedStorage:
490
    is_sparse = False
491

492
    dtype: torch.dtype
493

494
    @property
495
    def _dtype(self):
496
        return self.dtype
497

498
    @property
499
    def filename(self) -> _Optional[str]:
500
        """Returns the file name associated with this storage if the storage was memory mapped from a file.
501
           or ``None`` if the storage was not created by memory mapping a file."""
502
        return self._untyped_storage.filename
503

504
    def fill_(self, value):
505
        _warn_typed_storage_removal()
506
        self._setitem(slice(0, self._size()), value)
507
        return self
508

509
    def __new__(cls, *args, wrap_storage=None, dtype=None, device=None, _internal=False):
510
        if not _internal:
511
            _warn_typed_storage_removal()
512

513
        if cls == torch.storage._LegacyStorage:
514
            raise RuntimeError("Only child classes of _LegacyStorage can be instantiated")
515

516
        if cls == TypedStorage:
517
            return super().__new__(cls)
518

519
        else:
520
            arg_error_msg = (
521
                f'{cls}.__new__ received an invalid combination '
522
                f'of arguments. Expected one of:\n'
523
                ' * no arguments\n'
524
                ' * (int size)\n'
525
                ' * (Sequence data)\n'
526
                ' * (*, UntypedStorage wrap_storage)')
527

528
            if device is not None:
529
                raise RuntimeError(
530
                    arg_error_msg +
531
                    "\nKeyword argument 'device' cannot be specified")
532

533
            if dtype is not None:
534
                raise RuntimeError(
535
                    arg_error_msg +
536
                    "\nKeyword argument 'dtype' cannot be specified")
537

538
            if wrap_storage is None:
539
                if len(args) > 1:
540
                    raise RuntimeError(
541
                        arg_error_msg +
542
                        "\nToo many positional arguments")
543

544
                if len(args) == 1 and not _isint(args[0]) and not isinstance(args[0], collections.abc.Sequence):
545
                    raise TypeError(
546
                        arg_error_msg +
547
                        f"\nArgument type not recognized: {type(args[0])}")
548

549
                return TypedStorage(
550
                    *args,
551
                    dtype=cls._dtype,
552
                    device=_get_device_from_module(cls.__module__),
553
                    _internal=True)
554

555
            else:
556
                if len(args) != 0:
557
                    raise RuntimeError(
558
                        arg_error_msg +
559
                        "\nNo positional arguments should be given when using "
560
                        "'wrap_storage'")
561

562
                if not isinstance(wrap_storage, torch.UntypedStorage):
563
                    raise TypeError(
564
                        arg_error_msg +
565
                        f"\nArgument 'wrap_storage' must be UntypedStorage, but got {type(wrap_storage)}")
566

567
                cls_device = _get_device_from_module(cls.__module__)
568

569
                if wrap_storage.device.type != cls_device:
570
                    raise RuntimeError(
571
                        arg_error_msg +
572
                        f"\nDevice of 'wrap_storage' must be {cls_device}"
573
                        f", but got {wrap_storage.device.type}")
574

575
                return TypedStorage(
576
                    *args,
577
                    wrap_storage=wrap_storage,
578
                    dtype=cls.dtype,
579
                    _internal=True)
580

581
    def __init__(self, *args, device=None, dtype=None, wrap_storage=None, _internal=False):
582
        if not _internal:
583
            _warn_typed_storage_removal()
584
        arg_error_msg = (
585
            'TypedStorage.__init__ received an invalid combination '
586
            'of arguments. Expected one of:\n'
587
            ' * (*, torch.device device, torch.dtype dtype)\n'
588
            ' * (int size, *, torch.device device, torch.dtype dtype)\n'
589
            ' * (Sequence data, *, torch.device device, torch.dtype dtype)\n'
590
            ' * (*, UntypedStorage wrap_storage, torch.dtype dtype)')
591

592
        if wrap_storage is not None:
593
            if len(args) != 0:
594
                raise RuntimeError(
595
                    arg_error_msg +
596
                    "\nNo positional arguments should be given when using "
597
                    "'wrap_storage'")
598

599
            if dtype is None:
600
                raise RuntimeError(
601
                    arg_error_msg +
602
                    "\nArgument 'dtype' must be specified")
603

604
            if not isinstance(dtype, torch.dtype):
605
                raise TypeError(
606
                    arg_error_msg +
607
                    f"\nArgument 'dtype' must be torch.dtype, not {type(dtype)}")
608

609
            if device is not None:
610
                raise RuntimeError(
611
                    arg_error_msg +
612
                    "\nArgument 'device' should not be specified when 'wrap_storage' is given")
613

614
            self.dtype = dtype
615

616
            if not isinstance(wrap_storage, torch.UntypedStorage):
617
                raise TypeError(
618
                    arg_error_msg +
619
                    f"\nArgument 'wrap_storage' must be UntypedStorage, but got {type(wrap_storage)}")
620

621
            self._untyped_storage = wrap_storage
622

623
        else:
624
            self.dtype = torch.get_default_dtype() if dtype is None else dtype
625
            device = torch.device('cpu' if device is None else device)
626

627
            if self.dtype in [torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8]:
628
                if device.type == 'cuda':
629
                    raise RuntimeError("Cannot create CUDA storage with quantized dtype")
630

631
            if len(args) == 0:
632
                self._untyped_storage = torch.UntypedStorage(device=device)
633

634
            elif len(args) == 1:
635
                if _isint(args[0]):
636
                    self._untyped_storage = torch.UntypedStorage(int(args[0]) * self._element_size(), device=device)
637
                elif isinstance(args[0], collections.abc.Sequence):
638
                    self._untyped_storage = _get_storage_from_sequence(args[0], self.dtype, device)
639
                else:
640
                    raise TypeError(
641
                        arg_error_msg +
642
                        f"\nArgument type not recognized: {type(args[0])}")
643

644
            else:
645
                raise RuntimeError(
646
                    arg_error_msg +
647
                    "\nToo many positional arguments")
648

649
    @property
650
    def is_cuda(self):
651
        _warn_typed_storage_removal()
652
        return self._untyped_storage.device.type == 'cuda'
653

654
    @property
655
    def is_hpu(self):
656
        _warn_typed_storage_removal()
657
        return self._untyped_storage.device.type == 'hpu'
658

659
    def untyped(self):
660
        """Return the internal :class:`torch.UntypedStorage`."""
661
        _warn_typed_storage_removal()
662
        return self._untyped_storage
663

664
    def _new_wrapped_storage(self, untyped_storage):
665
        assert type(untyped_storage) == torch.UntypedStorage
666

667
        if type(self) == TypedStorage:
668
            return TypedStorage(
669
                wrap_storage=untyped_storage,
670
                dtype=self.dtype,
671
                _internal=True)
672
        else:
673
            return type(self)(wrap_storage=untyped_storage)
674

675
    def __len__(self):
676
        _warn_typed_storage_removal()
677
        return self._size()
678

679
    def _maybe_wrap_index(self, idx, is_stop=False):
680
        if idx is None:
681
            if is_stop:
682
                return self._size()
683
            else:
684
                return 0
685

686
        else:
687
            if type(idx) != int:
688
                raise TypeError(
689
                    f"can't index a {type(self)} with {type(idx)}")
690
            if is_stop:
691
                if (idx > self._size()) or (idx < -self._size()):
692
                    raise IndexError(
693
                        f'index {idx} out of range for storage of size {self.size()}')
694
                if idx > 0:
695
                    return idx
696
                else:
697
                    return idx % self._size()
698
            else:
699
                if (idx >= self._size()) or (idx < -self._size()):
700
                    raise IndexError(
701
                        f'index {idx} out of range for storage of size {self.size()}')
702
                return idx % self._size()
703

704
    def __setitem__(self, idx, value):
705
        _warn_typed_storage_removal()
706
        return self._setitem(idx, value)
707

708
    def _setitem(self, idx, value):
709
        if not isinstance(idx, (int, slice)):
710
            raise RuntimeError(f"can't index a {type(self)} with {type(idx)}")
711
        if torch.is_storage(value):
712
            raise RuntimeError(f'cannot set item with value type {type(value)}')
713
        if self.dtype in [torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8]:
714
            interpret_dtypes = {
715
                torch.quint8: torch.uint8,
716
                torch.quint4x2: torch.uint8,
717
                torch.quint2x4: torch.uint8,
718
                torch.qint32: torch.int32,
719
                torch.qint8: torch.int8
720
            }
721
            tmp_dtype = interpret_dtypes[self.dtype]
722
            tmp_tensor = torch.tensor([], dtype=tmp_dtype, device=self._untyped_storage.device)
723
            tmp_tensor.set_(TypedStorage(
724
                wrap_storage=self._untyped_storage,
725
                dtype=tmp_dtype,
726
                _internal=True))
727
        else:
728
            tmp_tensor = torch.tensor([], dtype=self.dtype, device=self._untyped_storage.device).set_(self)
729

730
        tmp_tensor[idx] = value
731

732
    def __getitem__(self, idx):
733
        _warn_typed_storage_removal()
734
        return self._getitem(idx)
735

736
    def _getitem(self, idx):
737
        if self._untyped_storage.device.type == 'meta':
738
            raise NotImplementedError("Not available for 'meta' device type")
739

740
        # NOTE: Before TypedStorage existed, indexing with a slice used to be
741
        # possible for <type>Storage objects. However, it would return
742
        # a storage view, which would be a hassle to implement in TypedStorage,
743
        # so it was disabled
744
        if isinstance(idx, slice):
745
            raise RuntimeError('slices are only supported in UntypedStorage.__getitem__')
746
        elif not isinstance(idx, int):
747
            raise RuntimeError(f"can't index a {type(self)} with {type(idx)}")
748

749
        if self.dtype in [torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8]:
750
            interpret_dtypes = {
751
                torch.quint8: torch.uint8,
752
                torch.quint4x2: torch.uint8,
753
                torch.quint2x4: torch.uint8,
754
                torch.qint32: torch.int32,
755
                torch.qint8: torch.int8
756
            }
757
            return TypedStorage(
758
                wrap_storage=self._untyped_storage,
759
                dtype=interpret_dtypes[self.dtype],
760
                _internal=True)._getitem(idx)
761

762
        idx_wrapped = self._maybe_wrap_index(idx)
763
        from torch._subclasses.fake_tensor import unset_fake_temporarily
764

765
        with unset_fake_temporarily():
766
            tmp_tensor = torch.tensor([], dtype=self.dtype, device=self._untyped_storage.device).set_(self)
767
            return tmp_tensor[idx_wrapped].item()
768

769
    def copy_(self, source: T, non_blocking: _Optional[bool] = None):
770
        _warn_typed_storage_removal()
771
        if isinstance(source, TypedStorage):
772
            self._untyped_storage.copy_(source._untyped_storage, non_blocking)  # type: ignore[arg-type]
773
        else:
774
            self._untyped_storage.copy_(source, non_blocking)  # type: ignore[arg-type]
775
        return self
776

777
    def nbytes(self):
778
        _warn_typed_storage_removal()
779
        return self._nbytes()
780

781
    # For internal use only, to avoid deprecation warning
782
    def _nbytes(self):
783
        return self._untyped_storage.nbytes()
784

785
    def type(self, dtype: _Optional[str] = None, non_blocking: bool = False) -> Union[T, str]:
786
        _warn_typed_storage_removal()
787
        if dtype is None:
788
            legacy_class = self._get_legacy_storage_class()
789

790
            if legacy_class is not None:
791
                return legacy_class.__module__ + '.' + legacy_class.__name__
792

793
            return '.'.join([self.__module__, type(self).__name__])
794

795
        else:
796
            return self._untyped_storage.type(dtype, non_blocking)
797

798
    def cuda(self, device=None, non_blocking=False, **kwargs) -> T:  # type: ignore[misc, type-var]
799
        _warn_typed_storage_removal()
800
        if self.dtype in [torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8]:
801
            raise RuntimeError("Cannot create CUDA storage with quantized dtype")
802
        cuda_storage: torch.UntypedStorage = self._untyped_storage.cuda(device, non_blocking, **kwargs)
803
        return self._new_wrapped_storage(cuda_storage)
804

805
    def hpu(self, device=None, non_blocking=False, **kwargs) -> T:  # type: ignore[misc, type-var]
806
        _warn_typed_storage_removal()
807
        if self.dtype in [torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8]:
808
            raise RuntimeError("Cannot create HPU storage with quantized dtype")
809
        hpu_storage: torch.UntypedStorage = self._untyped_storage.hpu(device, non_blocking, **kwargs)
810
        return self._new_wrapped_storage(hpu_storage)
811

812
    def element_size(self):
813
        _warn_typed_storage_removal()
814
        return self._element_size()
815

816
    # For internal use only, to avoid deprecation warning
817
    def _element_size(self):
818
        return torch._utils._element_size(self.dtype)
819

820
    def get_device(self) -> int:
821
        _warn_typed_storage_removal()
822
        return self._untyped_storage.get_device()
823

824
    def __str__(self):
825
        _warn_typed_storage_removal()
826
        info_str = (
827
            f'[{torch.typename(self)}(dtype={self.dtype}, '
828
            f'device={self.device}) of size {len(self)}]')
829
        if self.device.type == 'meta':
830
            return '...\n' + info_str
831
        else:
832
            data_str = ' ' + '\n '.join(str(self[i]) for i in range(self.size()))
833
            return data_str + '\n' + info_str
834

835
    def __repr__(self):
836
        _warn_typed_storage_removal()
837
        return str(self)
838

839
    def __iter__(self):
840
        _warn_typed_storage_removal()
841
        return iter(self[i] for i in range(self.size()))
842

843
    def __copy__(self):
844
        _warn_typed_storage_removal()
845
        return self._new_wrapped_storage(copy.copy(self._untyped_storage))
846

847
    def __deepcopy__(self, memo):
848
        _warn_typed_storage_removal()
849
        return self._deepcopy(memo)
850

851
    # For internal use only, to avoid deprecation warning
852
    def _deepcopy(self, memo):
853
        return self._new_wrapped_storage(copy.deepcopy(self._untyped_storage, memo))
854

855
    def __sizeof__(self):
856
        _warn_typed_storage_removal()
857
        return super().__sizeof__() + self.nbytes()
858

859
    def clone(self):
860
        """Return a copy of this storage."""
861
        _warn_typed_storage_removal()
862
        return self._new_wrapped_storage(self._untyped_storage.clone())
863

864
    def tolist(self):
865
        """Return a list containing the elements of this storage."""
866
        _warn_typed_storage_removal()
867
        return list(self)
868

869
    def cpu(self):
870
        """Return a CPU copy of this storage if it's not already on the CPU."""
871
        _warn_typed_storage_removal()
872
        return self._new_wrapped_storage(self._untyped_storage.cpu())
873

874
    def is_pinned(self, device: Union[str, torch.device] = 'cuda'):
875
        r"""Determine whether the CPU TypedStorage is already pinned on device.
876

877
        Args:
878
            device (str or torch.device): The device to pin memory on. Default: ``'cuda'``
879

880
        Returns:
881
            A boolean variable.
882
        """
883
        _warn_typed_storage_removal()
884
        return self._untyped_storage.is_pinned(device)
885

886
    def pin_memory(self, device: Union[str, torch.device] = 'cuda'):
887
        r"""Copy the CPU TypedStorage to pinned memory, if it's not already pinned.
888

889
        Args:
890
            device (str or torch.device): The device to pin memory on. Default: ``'cuda'``.
891

892
        Returns:
893
            A pinned CPU storage.
894
        """
895
        _warn_typed_storage_removal()
896
        return self._new_wrapped_storage(self._untyped_storage.pin_memory(device=device))
897

898
    def share_memory_(self):
899
        """See :meth:`torch.UntypedStorage.share_memory_`"""
900
        _warn_typed_storage_removal()
901
        return self._share_memory_()
902

903
    # For internal use only, to avoid deprecation warning
904
    def _share_memory_(self):
905
        self._untyped_storage.share_memory_()
906
        return self
907

908
    def _new_shared(self, size, *, device=None):
909
        """Create a new storage in shared memory with the same data type."""
910
        if device is None:
911
            device = 'cpu'
912
        device = torch.device(device)
913
        untyped_storage = torch.UntypedStorage._new_shared(size * self._element_size(), device=device)
914
        return TypedStorage(
915
            wrap_storage=untyped_storage,
916
            dtype=self.dtype,
917
            _internal=True)
918

919
    @property
920
    def _cdata(self):
921
        return self._untyped_storage._cdata
922

923
    @property
924
    def device(self):
925
        _warn_typed_storage_removal()
926
        return self._untyped_storage.device
927

928
    def size(self):
929
        _warn_typed_storage_removal()
930
        return self._size()
931

932
    # For internal use only, to avoid deprecation warning
933
    def _size(self):
934
        # NB: don't indirect through __len__, as that requires
935
        # an int to be returned
936
        return self._untyped_storage.nbytes() // self._element_size()
937

938
    def pickle_storage_type(self):
939
        _warn_typed_storage_removal()
940
        return self._pickle_storage_type()
941

942
    # For internal use only, to avoid deprecation warning
943
    def _pickle_storage_type(self):
944
        try:
945
            return _dtype_to_storage_type_map()[self.dtype]
946
        except KeyError as e:
947
            raise KeyError(f'dtype {self.dtype} is not recognized') from e
948

949
    def __reduce__(self):
950
        b = io.BytesIO()
951
        torch.save(self, b, _use_new_zipfile_serialization=False)
952
        return (_load_from_bytes, (b.getvalue(),))
953

954
    def data_ptr(self):
955
        _warn_typed_storage_removal()
956
        return self._data_ptr()
957

958
    # For internal use only, to avoid deprecation warning
959
    def _data_ptr(self):
960
        return self._untyped_storage.data_ptr()
961

962
    def resizable(self):
963
        _warn_typed_storage_removal()
964
        return self._untyped_storage.resizable()
965

966
    def resize_(self, size):
967
        _warn_typed_storage_removal()
968
        self._resize_(size)
969

970
    # For internal use only, to avoid deprecation warning
971
    def _resize_(self, size):
972
        self._untyped_storage.resize_(size * self._element_size())
973

974
    @classmethod
975
    def _free_weak_ref(cls, *args, **kwargs):
976
        return UntypedStorage._free_weak_ref(*args, **kwargs)
977

978
    def _weak_ref(self, *args, **kwargs):
979
        return self._untyped_storage._weak_ref(*args, **kwargs)
980

981
    @classmethod
982
    def from_buffer(cls, *args, **kwargs):
983
        _warn_typed_storage_removal()
984
        return cls._from_buffer(*args, **kwargs)
985

986
    @classmethod
987
    def _from_buffer(cls, *args, dtype=None, device=None, **kwargs):
988
        if cls == TypedStorage:
989
            dtype = torch.get_default_dtype() if dtype is None else dtype
990
            device = torch.device('cpu' if device is None else device)
991
            if device.type != 'cpu':
992
                raise RuntimeError(f'TypedStorage.from_buffer: Not available for device {device.type}')
993
            untyped_storage: torch.UntypedStorage = torch.UntypedStorage.from_buffer(*args, dtype=dtype, **kwargs)
994

995
        else:
996
            if dtype is not None or len(args) == 5:
997
                raise RuntimeError(
998
                    "from_buffer: 'dtype' can only be specified in "
999
                    "UntypedStorage.from_buffer and TypedStorage.from_buffer")
1000
            if device is not None:
1001
                raise RuntimeError(
1002
                    "from_buffer: 'device' can only be specified in "
1003
                    "UntypedStorage.from_buffer and TypedStorage.from_buffer")
1004

1005
            dtype = cls._dtype
1006
            untyped_storage = torch.UntypedStorage.from_buffer(*args, dtype=dtype, **kwargs)
1007

1008
        return TypedStorage(
1009
            wrap_storage=untyped_storage,
1010
            dtype=dtype,
1011
            _internal=True)
1012

1013
    def _to(self, dtype):
1014
        if not isinstance(dtype, torch.dtype):
1015
            raise TypeError(f"Argument 'dtype' must be torch.dtype, not {type(dtype)}")
1016
        storage = torch.tensor([], dtype=self.dtype, device=self.device).set_(self).to(dtype)._typed_storage()
1017
        if storage.data_ptr() == self.data_ptr():
1018
            storage = storage.clone()
1019
        return storage
1020

1021
    def double(self):
1022
        """Casts this storage to double type."""
1023
        _warn_typed_storage_removal()
1024
        return self._to(torch.double)
1025

1026
    def float(self):
1027
        """Casts this storage to float type."""
1028
        _warn_typed_storage_removal()
1029
        return self._to(torch.float)
1030

1031
    def half(self):
1032
        """Casts this storage to half type."""
1033
        _warn_typed_storage_removal()
1034
        return self._to(torch.half)
1035

1036
    def long(self):
1037
        """Casts this storage to long type."""
1038
        _warn_typed_storage_removal()
1039
        return self._to(torch.long)
1040

1041
    def int(self):
1042
        """Casts this storage to int type."""
1043
        _warn_typed_storage_removal()
1044
        return self._to(torch.int)
1045

1046
    def short(self):
1047
        """Casts this storage to short type."""
1048
        _warn_typed_storage_removal()
1049
        return self._to(torch.short)
1050

1051
    def char(self):
1052
        """Casts this storage to char type."""
1053
        _warn_typed_storage_removal()
1054
        return self._to(torch.int8)
1055

1056
    def byte(self):
1057
        """Casts this storage to byte type."""
1058
        _warn_typed_storage_removal()
1059
        return self._to(torch.uint8)
1060

1061
    def bool(self):
1062
        """Casts this storage to bool type."""
1063
        _warn_typed_storage_removal()
1064
        return self._to(torch.bool)
1065

1066
    def bfloat16(self):
1067
        """Casts this storage to bfloat16 type."""
1068
        _warn_typed_storage_removal()
1069
        return self._to(torch.bfloat16)
1070

1071
    def complex_double(self):
1072
        """Casts this storage to complex double type."""
1073
        _warn_typed_storage_removal()
1074
        return self._to(torch.cdouble)
1075

1076
    def complex_float(self):
1077
        """Casts this storage to complex float type."""
1078
        _warn_typed_storage_removal()
1079
        return self._to(torch.cfloat)
1080

1081
    def float8_e5m2(self):
1082
        """Casts this storage to float8_e5m2 type"""
1083
        _warn_typed_storage_removal()
1084
        return self._to(torch.float8_e5m2)
1085

1086
    def float8_e4m3fn(self):
1087
        """Casts this storage to float8_e4m3fn type"""
1088
        _warn_typed_storage_removal()
1089
        return self._to(torch.float8_e4m3fn)
1090

1091
    def float8_e5m2fnuz(self):
1092
        """Casts this storage to float8_e5m2fnuz type"""
1093
        _warn_typed_storage_removal()
1094
        return self._to(torch.float8_e5m2fnuz)
1095

1096
    def float8_e4m3fnuz(self):
1097
        """Casts this storage to float8_e4m3fnuz type"""
1098
        _warn_typed_storage_removal()
1099
        return self._to(torch.float8_e4m3fnuz)
1100

1101
    @classmethod
1102
    def from_file(cls, filename, shared, size):
1103
        """from_file(filename, shared=False, size=0) -> Storage
1104

1105
        Creates a CPU storage backed by a memory-mapped file.
1106

1107
        If ``shared`` is ``True``, then memory is shared between all processes.
1108
        All changes are written to the file. If ``shared`` is ``False``, then the changes on
1109
        the storage do not affect the file.
1110

1111
        ``size`` is the number of elements in the storage. If ``shared`` is ``False``,
1112
        then the file must contain at least ``size * sizeof(Type)`` bytes
1113
        (``Type`` is the type of storage). If ``shared`` is ``True`` the file will be created if needed.
1114

1115
        Args:
1116
            filename (str): file name to map
1117
            shared (bool): whether to share memory (whether ``MAP_SHARED`` or ``MAP_PRIVATE`` is passed to the
1118
                            underlying `mmap(2) call <https://man7.org/linux/man-pages/man2/mmap.2.html>`_)
1119
            size (int): number of elements in the storage
1120
        """
1121
        _warn_typed_storage_removal()
1122
        if cls == TypedStorage:
1123
            raise RuntimeError('from_file can only be called on derived classes')
1124
        untyped_storage: UntypedStorage = UntypedStorage.from_file(
1125
            filename,
1126
            shared,
1127
            size * torch._utils._element_size(cls.dtype))
1128
        storage = cls(wrap_storage=untyped_storage)
1129
        return storage
1130

1131
    @classmethod
1132
    def _expired(cls, *args, **kwargs):
1133
        return UntypedStorage._expired(*args, **kwargs)
1134

1135
    def _write_file(self, *args, **kwargs):
1136
        return self._untyped_storage._write_file(*args, **kwargs)
1137

1138
    def _set_from_file(self, *args, **kwargs):
1139
        return self._untyped_storage._set_from_file(*args, **kwargs)
1140

1141
    def _set_cdata(self, *args, **kwargs):
1142
        return self._untyped_storage._set_cdata(*args, **kwargs)
1143

1144
    def _share_cuda_(self, *args, **kwargs):
1145
        return self._untyped_storage._share_cuda_(*args, **kwargs)
1146

1147
    def is_shared(self):
1148
        _warn_typed_storage_removal()
1149
        return self._is_shared()
1150

1151
    # For internal use only, to avoid deprecation warning
1152
    def _is_shared(self):
1153
        return self._untyped_storage.is_shared()
1154

1155
    @classmethod
1156
    def _new_shared_cuda(cls, *args, **kwargs):
1157
        return torch.UntypedStorage._new_shared_cuda(*args, **kwargs)
1158

1159
    def _share_filename_cpu_(self, *args, **kwargs):
1160
        manager_handle, storage_handle, size = self._untyped_storage._share_filename_cpu_(*args, **kwargs)
1161
        return manager_handle, storage_handle, size // self._element_size()
1162

1163
    def _shared_decref(self):
1164
        self._untyped_storage._shared_decref()
1165
        return self
1166

1167
    @classmethod
1168
    def _release_ipc_counter(cls, *args, device=None, **kwargs):
1169
        return torch.UntypedStorage._release_ipc_counter_cuda(*args, **kwargs)
1170

1171
    def _shared_incref(self, *args, **kwargs):
1172
        return self._untyped_storage._shared_incref(*args, **kwargs)
1173

1174
    def _share_fd_cpu_(self, *args, **kwargs):
1175
        fd, size = self._untyped_storage._share_fd_cpu_(*args, **kwargs)
1176
        return fd, size // self._element_size()
1177

1178
    def _get_legacy_storage_class(self):
1179
        if self.dtype not in _dtype_to_storage_type_map():
1180
            return None
1181

1182
        storage_name = _dtype_to_storage_type_map()[self.dtype]
1183

1184
        if self.device.type not in ['cpu', 'cuda', "hpu", torch._C._get_privateuse1_backend_name()]:
1185
            return None
1186

1187
        module = torch if self.device.type == 'cpu' else getattr(torch, self.device.type)
1188

1189
        try:
1190
            return getattr(module, storage_name)
1191
        except AttributeError:
1192
            return None
1193

1194
TypedStorage.type.__doc__ = _type.__doc__
1195
TypedStorage.cuda.__doc__ = _cuda.__doc__
1196
TypedStorage.hpu.__doc__ = _hpu.__doc__
1197

1198
class _LegacyStorageMeta(type):
1199
    dtype: torch.dtype
1200

1201
    def __instancecheck__(cls, instance):
1202
        if type(instance) == TypedStorage:
1203
            cls_device = _get_device_from_module(cls.__module__)
1204
            return (cls_device == instance.device.type) and (cls.dtype == instance.dtype)
1205
        return False
1206

1207
class _LegacyStorage(TypedStorage, metaclass=_LegacyStorageMeta):
1208
    @classmethod
1209
    def _new_shared(cls, size):
1210
        """Create a new storage in shared memory with the same data type."""
1211
        untyped_storage = torch.UntypedStorage._new_shared(size * cls()._element_size())
1212
        return cls(wrap_storage=untyped_storage)
1213

1214
    @classmethod
1215
    def _release_ipc_counter(cls, *args, **kwargs):
1216
        return torch.UntypedStorage._release_ipc_counter_cuda(*args, **kwargs)
1217

1218
    @classmethod
1219
    def _new_shared_filename(cls, manager, obj, size):
1220
        bytes_size = size * torch._utils._element_size(cls.dtype)
1221
        return cls(wrap_storage=torch.UntypedStorage._new_shared_filename_cpu(manager, obj, bytes_size))
1222

1223
def _get_dtype_from_pickle_storage_type(pickle_storage_type: str):
1224
    try:
1225
        return _storage_type_to_dtype_map()[pickle_storage_type]
1226
    except KeyError as e:
1227
        raise KeyError(
1228
            f'pickle storage type "{pickle_storage_type}" is not recognized') from e
1229

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.