6
from itertools import product
11
from torch.testing import make_tensor
12
from torch.testing._internal.common_device_type import (
14
instantiate_device_type_tests,
18
from torch.testing._internal.common_dtype import all_types_and_complex_and
19
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
24
class TestNumPyInterop(TestCase):
29
def test_numpy_non_writeable(self, device):
31
arr.flags["WRITEABLE"] = False
32
self.assertWarns(UserWarning, lambda: torch.from_numpy(arr))
35
def test_numpy_unresizable(self, device) -> None:
37
y = torch.from_numpy(x)
38
with self.assertRaises(ValueError):
43
with self.assertRaises(RuntimeError):
45
with self.assertRaises(ValueError):
49
def test_to_numpy(self, device) -> None:
50
def get_castable_tensor(shape, dtype):
51
if dtype.is_floating_point:
52
dtype_info = torch.finfo(dtype)
55
low = max(dtype_info.min, -1e10)
56
high = min(dtype_info.max, 1e10)
57
t = torch.empty(shape, dtype=torch.float64).uniform_(low, high)
61
low = max(torch.iinfo(dtype).min, int(-1e10))
62
high = min(torch.iinfo(dtype).max, int(1e10))
63
t = torch.empty(shape, dtype=torch.int64).random_(low, high)
80
x = get_castable_tensor(sz, dtp)
83
self.assertEqual(x[i], y[i])
86
xm = get_castable_tensor(sz * 2, dtp)
87
x = xm.narrow(0, sz - 1, sz)
88
self.assertTrue(x.storage_offset() > 0)
91
self.assertEqual(x[i], y[i])
96
self.assertEqual(x[i][j], y[i][j])
99
x = torch.tensor([]).to(dtp)
101
self.assertEqual(y.size, 0)
106
x = get_castable_tensor((sz1, sz2), dtp)
109
self.assertTrue(y.flags["C_CONTIGUOUS"])
112
xm = get_castable_tensor((sz1 * 2, sz2), dtp)
113
x = xm.narrow(0, sz1 - 1, sz1)
115
self.assertTrue(x.storage_offset() > 0)
117
self.assertTrue(y.flags["C_CONTIGUOUS"])
120
x = get_castable_tensor((sz2, sz1), dtp).t()
123
self.assertFalse(y.flags["C_CONTIGUOUS"])
126
xm = get_castable_tensor((sz2 * 2, sz1), dtp)
127
x = xm.narrow(0, sz2 - 1, sz2).t()
129
self.assertTrue(x.storage_offset() > 0)
133
xm = get_castable_tensor((sz2 * 2, sz1 * 2), dtp)
134
x = xm.narrow(0, sz2 - 1, sz2).narrow(1, sz1 - 1, sz1).t()
136
self.assertTrue(x.storage_offset() > 0)
139
if dtp != torch.half:
141
x = get_castable_tensor((3, 4), dtp)
143
self.assertTrue(y.flags.writeable)
145
self.assertTrue(x[0][1] == 3)
147
self.assertTrue(y.flags.writeable)
149
self.assertTrue(x[0][1] == 3)
151
def test_to_numpy_bool(self, device) -> None:
152
x = torch.tensor([True, False], dtype=torch.bool)
153
self.assertEqual(x.dtype, torch.bool)
156
self.assertEqual(y.dtype, np.bool_)
157
for i in range(len(x)):
158
self.assertEqual(x[i], y[i])
160
x = torch.tensor([True], dtype=torch.bool)
161
self.assertEqual(x.dtype, torch.bool)
164
self.assertEqual(y.dtype, np.bool_)
165
self.assertEqual(x[0], y[0])
167
@skipIfTorchDynamo("conj bit not implemented in TensorVariable yet")
168
def test_to_numpy_force_argument(self, device) -> None:
169
for force in [False, True]:
170
for requires_grad in [False, True]:
171
for sparse in [False, True]:
172
for conj in [False, True]:
173
data = [[1 + 2j, -2 + 3j], [-1 - 2j, 3 - 2j]]
175
data, requires_grad=requires_grad, device=device
186
requires_grad or sparse or conj or not device == "cpu"
188
error_msg = r"Use (t|T)ensor\..*(\.numpy\(\))?"
189
if not force and expect_error:
190
self.assertRaisesRegex(
191
(RuntimeError, TypeError), error_msg, lambda: x.numpy()
193
self.assertRaisesRegex(
194
(RuntimeError, TypeError),
196
lambda: x.numpy(force=False),
198
elif force and sparse:
199
self.assertRaisesRegex(
200
TypeError, error_msg, lambda: x.numpy(force=True)
203
self.assertEqual(x.numpy(force=force), y)
205
def test_from_numpy(self, device) -> None:
226
array = np.array([1, 2, 3, 4], dtype=dtype)
227
tensor_from_array = torch.from_numpy(array)
230
for i in range(len(array)):
231
self.assertEqual(tensor_from_array[i], array[i])
233
if dtype not in complex_dtypes:
237
tensor_from_array2 = torch.from_numpy(array2)
238
for i in range(len(array2)):
239
self.assertEqual(tensor_from_array2[i], array2[i])
242
array = np.array(["foo", "bar"], dtype=np.dtype(np.str_))
243
with self.assertRaises(TypeError):
244
tensor_from_array = torch.from_numpy(array)
247
x = np.linspace(1, 125, 125)
250
expected = torch.arange(1, 126, dtype=torch.float64).view(5, 5, 5)[1]
251
self.assertEqual(torch.from_numpy(x), expected)
254
x = np.linspace(1, 25, 25)
256
expected = torch.arange(1, 26, dtype=torch.float64).view(5, 5).t()
257
self.assertEqual(torch.from_numpy(x.T), expected)
260
x = np.linspace(1, 125, 125)
263
expected = torch.arange(1, 126, dtype=torch.float64).view(5, 5, 5)[:, 1]
264
self.assertEqual(torch.from_numpy(x), expected)
268
self.assertEqual(torch.from_numpy(x).shape, (0, 2))
270
self.assertEqual(torch.from_numpy(x).shape, (2, 0))
273
x = np.array([3.0, 5.0, 8.0])
275
self.assertRaises(ValueError, lambda: torch.from_numpy(x))
277
@skipIfTorchDynamo("No need to test invalid dtypes that should fail by design.")
278
def test_from_numpy_no_leak_on_invalid_dtype(self):
281
x = np.array("value".encode("ascii"))
282
for _ in range(1000):
287
self.assertTrue(sys.getrefcount(x) == 2)
290
def test_from_list_of_ndarray_warning(self, device):
292
r"Creating a tensor from a list of numpy.ndarrays is extremely slow"
294
with self.assertWarnsOnceRegex(UserWarning, warning_msg):
295
torch.tensor([np.array([0]), np.array([1])], device=device)
297
def test_ctor_with_invalid_numpy_array_sequence(self, device):
299
with self.assertRaisesRegex(ValueError, "expected sequence of length"):
301
[np.random.random(size=(3, 3)), np.random.random(size=(3, 0))],
306
with self.assertRaisesRegex(ValueError, "expected sequence of length"):
308
[[np.random.random(size=(3, 3)), np.random.random(size=(3, 2))]],
312
with self.assertRaisesRegex(ValueError, "expected sequence of length"):
315
[np.random.random(size=(3, 3)), np.random.random(size=(3, 3))],
316
[np.random.random(size=(3, 3)), np.random.random(size=(3, 2))],
323
with self.assertRaisesRegex(TypeError, "not a sequence"):
325
[[np.random.random(size=(3)), np.random.random()]], device=device
329
with self.assertRaisesRegex(ValueError, "expected sequence of length"):
330
torch.tensor([[1, 2, 3], np.random.random(size=(2,))], device=device)
333
def test_ctor_with_numpy_scalar_ctor(self, device) -> None:
345
self.assertEqual(dtype(42), torch.tensor(dtype(42)).item())
348
def test_numpy_index(self, device):
349
i = np.array([0, 1, 2], dtype=np.int32)
350
x = torch.randn(5, 5)
352
self.assertFalse(isinstance(idx, int))
353
self.assertEqual(x[idx], x[int(idx)])
356
def test_numpy_index_multi(self, device):
357
for dim_sz in [2, 8, 16, 32]:
358
i = np.zeros((dim_sz, dim_sz, dim_sz), dtype=np.int32)
359
i[: dim_sz // 2, :, :] = 1
360
x = torch.randn(dim_sz, dim_sz, dim_sz)
361
self.assertTrue(x[i == 1].numel() == np.sum(i))
364
def test_numpy_array_interface(self, device):
383
for tp, dtype in zip(types, dtypes):
385
if np.dtype(dtype).kind == "u":
388
x = torch.tensor([1, 2, 3, 4]).type(tp)
389
array = np.array([1, 2, 3, 4], dtype=dtype)
391
x = torch.tensor([1, -2, 3, -4]).type(tp)
392
array = np.array([1, -2, 3, -4], dtype=dtype)
395
asarray = np.asarray(x)
396
self.assertIsInstance(asarray, np.ndarray)
397
self.assertEqual(asarray.dtype, dtype)
398
for i in range(len(x)):
399
self.assertEqual(asarray[i], x[i])
403
abs_array = np.abs(array)
404
self.assertIsInstance(abs_x, tp)
405
for i in range(len(x)):
406
self.assertEqual(abs_x[i], abs_array[i])
410
x = torch.IntTensor([1, -2, 3, -4])
411
asarray = np.asarray(x, dtype=dtype)
412
self.assertEqual(asarray.dtype, dtype)
414
if np.dtype(dtype).kind == "u":
415
wrapped_x = np.array([1, -2, 3, -4], dtype=dtype)
416
for i in range(len(x)):
417
self.assertEqual(asarray[i], wrapped_x[i])
419
for i in range(len(x)):
420
self.assertEqual(asarray[i], x[i])
423
float_types = [torch.DoubleTensor, torch.FloatTensor]
424
float_dtypes = [np.float64, np.float32]
425
for tp, dtype in zip(float_types, float_dtypes):
426
x = torch.tensor([1, 2, 3, 4]).type(tp)
427
array = np.array([1, 2, 3, 4], dtype=dtype)
428
for func in ["sin", "sqrt", "ceil"]:
429
ufunc = getattr(np, func)
431
res_array = ufunc(array)
432
self.assertIsInstance(res_x, tp)
433
for i in range(len(x)):
434
self.assertEqual(res_x[i], res_array[i])
437
for tp, dtype in zip(types, dtypes):
438
x = torch.tensor([1, 2, 3, 4]).type(tp)
439
array = np.array([1, 2, 3, 4], dtype=dtype)
440
geq2_x = np.greater_equal(x, 2)
441
geq2_array = np.greater_equal(array, 2).astype("uint8")
442
self.assertIsInstance(geq2_x, torch.ByteTensor)
443
for i in range(len(x)):
444
self.assertEqual(geq2_x[i], geq2_array[i])
447
def test_multiplication_numpy_scalar(self, device) -> None:
456
for t_dtype in [torch.float, torch.double]:
459
np_sc = np_dtype(2.0)
460
t = torch.ones(2, requires_grad=True, dtype=t_dtype)
462
self.assertIsInstance(r1, torch.Tensor)
463
self.assertTrue(r1.dtype == t_dtype)
464
self.assertTrue(r1.requires_grad)
466
self.assertIsInstance(r2, torch.Tensor)
467
self.assertTrue(r2.dtype == t_dtype)
468
self.assertTrue(r2.requires_grad)
472
def test_parse_numpy_int_overflow(self, device):
475
self.assertRaisesRegex(
477
"(Overflow|an integer is required)",
478
lambda: torch.mean(torch.randn(1, 1), np.uint64(-1)),
482
def test_parse_numpy_int(self, device):
484
for nptype in [np.int16, np.int8, np.uint8, np.int32, np.int64]:
486
np_arr = np.array([scalar], dtype=nptype)
491
self.assertEqual(torch.ones(5).diag(scalar), torch.ones(5).diag(np_val))
493
torch.ones([2, 2, 2, 2]).mean(scalar),
494
torch.ones([2, 2, 2, 2]).mean(np_val),
498
self.assertEqual(torch.Storage(np_val).size(), scalar)
500
tensor = torch.tensor([2], dtype=torch.int)
502
self.assertEqual(tensor[0], np_val)
507
t = torch.from_numpy(np_arr)
508
self.assertEqual((t + np_val).dtype, t.dtype)
509
self.assertEqual((np_val + t).dtype, t.dtype)
511
def test_has_storage_numpy(self, device):
512
for dtype in [np.float32, np.float64, np.int64, np.int32, np.int16, np.uint8]:
513
arr = np.array([1], dtype=dtype)
514
self.assertIsNotNone(
515
torch.tensor(arr, device=device, dtype=torch.float32).storage()
517
self.assertIsNotNone(
518
torch.tensor(arr, device=device, dtype=torch.double).storage()
520
self.assertIsNotNone(
521
torch.tensor(arr, device=device, dtype=torch.int).storage()
523
self.assertIsNotNone(
524
torch.tensor(arr, device=device, dtype=torch.long).storage()
526
self.assertIsNotNone(
527
torch.tensor(arr, device=device, dtype=torch.uint8).storage()
530
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
531
def test_numpy_scalar_cmp(self, device, dtype):
534
torch.tensor(complex(1, 3), dtype=dtype, device=device),
535
torch.tensor([complex(1, 3), 0, 2j], dtype=dtype, device=device),
537
[[complex(3, 1), 0], [-1j, 5]], dtype=dtype, device=device
542
torch.tensor(3, dtype=dtype, device=device),
543
torch.tensor([1, 0, -3], dtype=dtype, device=device),
544
torch.tensor([[3, 0, -1], [3, 5, 4]], dtype=dtype, device=device),
547
for tensor in tensors:
548
if dtype == torch.bfloat16:
549
with self.assertRaises(TypeError):
550
np_array = tensor.cpu().numpy()
553
np_array = tensor.cpu().numpy()
555
(tensor.flatten()[0], tensor.flatten()[0].item()),
556
(np_array.flatten()[0], np_array.flatten()[0].item()),
558
self.assertEqual(t, a)
560
dtype == torch.complex64
561
and torch.is_tensor(t)
562
and type(a) == np.complex64
566
self.assertFalse(t == a)
568
self.assertTrue(t == a)
571
@dtypes(*all_types_and_complex_and(torch.half, torch.bool))
572
def test___eq__(self, device, dtype):
573
a = make_tensor((5, 7), dtype=dtype, device=device, low=-9, high=9)
574
b = a.clone().detach()
578
res_check = torch.ones_like(a, dtype=torch.bool)
579
self.assertEqual(a == b_np, res_check)
580
self.assertEqual(b_np == a, res_check)
583
if dtype == torch.bool:
584
b[1][3] = not b[1][3]
587
res_check[1][3] = False
588
self.assertEqual(a == b_np, res_check)
589
self.assertEqual(b_np == a, res_check)
592
rand = torch.randint(0, 2, a.shape, dtype=torch.bool)
593
res_check = rand.logical_not()
596
if dtype == torch.bool:
597
b[rand] = b[rand].logical_not()
601
self.assertEqual(a == b_np, res_check)
602
self.assertEqual(b_np == a, res_check)
605
if dtype == torch.bool:
606
b.copy_(a.logical_not())
609
res_check.fill_(False)
610
self.assertEqual(a == b_np, res_check)
611
self.assertEqual(b_np == a, res_check)
614
def test_empty_tensors_interop(self, device):
615
x = torch.rand((), dtype=torch.float16)
616
y = torch.tensor(np.random.rand(0), dtype=torch.float16)
621
self.assertEqual(torch.true_divide(x, y).shape, y.shape)
623
self.assertEqual(torch.mul(x, y).shape, y.shape)
625
self.assertEqual(torch.div(x, y, rounding_mode="floor").shape, y.shape)
628
instantiate_device_type_tests(TestNumPyInterop, globals())
630
if __name__ == "__main__":