1# Owner(s): ["module: dynamo"] 2 3import functools 4import inspect 5from unittest import expectedFailure as xfail, skipIf as skip 6 7import numpy as _np 8from pytest import raises as assert_raises 9 10import torch 11import torch._numpy as w 12import torch._numpy._ufuncs as _ufuncs 13import torch._numpy._util as _util 14from torch._numpy.testing import assert_allclose, assert_equal 15from torch.testing._internal.common_cuda import TEST_CUDA 16from torch.testing._internal.common_utils import ( 17 instantiate_parametrized_tests, 18 parametrize, 19 run_tests, 20 TestCase, 21) 22 23 24# These function receive one array_like arg and return one array_like result 25one_arg_funcs = [ 26 w.asarray, 27 w.empty_like, 28 w.ones_like, 29 w.zeros_like, 30 functools.partial(w.full_like, fill_value=42), 31 w.corrcoef, 32 w.squeeze, 33 w.argmax, 34 # w.bincount, # XXX: input dtypes 35 w.prod, 36 w.sum, 37 w.real, 38 w.imag, 39 w.angle, 40 w.real_if_close, 41 w.isreal, 42 w.iscomplex, 43 w.isneginf, 44 w.isposinf, 45 w.i0, 46 w.copy, 47 w.array, 48 w.round, 49 w.around, 50 w.flip, 51 w.vstack, 52 w.hstack, 53 w.dstack, 54 w.column_stack, 55 w.row_stack, 56 w.flatnonzero, 57] 58 59ufunc_names = _ufuncs._unary 60ufunc_names.remove("invert") # torch: bitwise_not_cpu not implemented for 'Float' 61ufunc_names.remove("bitwise_not") 62 63one_arg_funcs += [getattr(_ufuncs, name) for name in ufunc_names] 64 65 66@instantiate_parametrized_tests 67class TestOneArr(TestCase): 68 """Base for smoke tests of one-arg functions: (array_like) -> (array_like) 69 70 Accepts array_likes, torch.Tensors, w.ndarays; returns an ndarray 71 """ 72 73 @parametrize("func", one_arg_funcs) 74 def test_asarray_tensor(self, func): 75 t = torch.Tensor([[1.0, 2, 3], [4, 5, 6]]) 76 ta = func(t) 77 78 assert isinstance(ta, w.ndarray) 79 80 @parametrize("func", one_arg_funcs) 81 def test_asarray_list(self, func): 82 lst = [[1.0, 2, 3], [4, 5, 6]] 83 la = func(lst) 84 85 assert isinstance(la, w.ndarray) 86 87 @parametrize("func", one_arg_funcs) 88 def test_asarray_array(self, func): 89 a = w.asarray([[1.0, 2, 3], [4, 5, 6]]) 90 la = func(a) 91 92 assert isinstance(la, w.ndarray) 93 94 95one_arg_axis_funcs = [ 96 w.argmax, 97 w.argmin, 98 w.prod, 99 w.sum, 100 w.all, 101 w.any, 102 w.mean, 103 w.argsort, 104 w.std, 105 w.var, 106 w.flip, 107] 108 109 110@instantiate_parametrized_tests 111class TestOneArrAndAxis(TestCase): 112 @parametrize("func", one_arg_axis_funcs) 113 @parametrize("axis", [0, 1, -1, None]) 114 def test_andaxis_tensor(self, func, axis): 115 t = torch.Tensor([[1.0, 2, 3], [4, 5, 6]]) 116 ta = func(t, axis=axis) 117 assert isinstance(ta, w.ndarray) 118 119 @parametrize("func", one_arg_axis_funcs) 120 @parametrize("axis", [0, 1, -1, None]) 121 def test_andaxis_list(self, func, axis): 122 t = [[1.0, 2, 3], [4, 5, 6]] 123 ta = func(t, axis=axis) 124 assert isinstance(ta, w.ndarray) 125 126 @parametrize("func", one_arg_axis_funcs) 127 @parametrize("axis", [0, 1, -1, None]) 128 def test_andaxis_array(self, func, axis): 129 t = w.asarray([[1.0, 2, 3], [4, 5, 6]]) 130 ta = func(t, axis=axis) 131 assert isinstance(ta, w.ndarray) 132 133 134@instantiate_parametrized_tests 135class TestOneArrAndAxesTuple(TestCase): 136 @parametrize("func", [w.transpose]) 137 @parametrize("axes", [(0, 2, 1), (1, 2, 0), None]) 138 def test_andtuple_tensor(self, func, axes): 139 t = torch.ones((1, 2, 3)) 140 ta = func(t, axes=axes) 141 assert isinstance(ta, w.ndarray) 142 143 # a np.transpose -specific test 144 if axes is None: 145 newshape = (3, 2, 1) 146 else: 147 newshape = tuple(t.shape[axes[i]] for i in range(w.ndim(t))) 148 assert ta.shape == newshape 149 150 @parametrize("func", [w.transpose]) 151 @parametrize("axes", [(0, 2, 1), (1, 2, 0), None]) 152 def test_andtuple_list(self, func, axes): 153 t = [[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]] # shape = (1, 2, 3) 154 ta = func(t, axes=axes) 155 assert isinstance(ta, w.ndarray) 156 157 @parametrize("func", [w.transpose]) 158 @parametrize("axes", [(0, 2, 1), (1, 2, 0), None]) 159 def test_andtuple_array(self, func, axes): 160 t = w.asarray([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]) 161 ta = func(t, axes=axes) 162 assert isinstance(ta, w.ndarray) 163 164 if axes is None: 165 newshape = (3, 2, 1) 166 else: 167 newshape = tuple(t.shape[axes[i]] for i in range(t.ndim)) 168 assert ta.shape == newshape 169 170 171arr_shape_funcs = [ 172 w.reshape, 173 w.empty_like, 174 w.ones_like, 175 functools.partial(w.full_like, fill_value=42), 176 w.broadcast_to, 177] 178 179 180@instantiate_parametrized_tests 181class TestOneArrAndShape(TestCase): 182 """Smoke test of functions (array_like, shape_like) -> array_like""" 183 184 def setUp(self): 185 self.shape = (2, 3) 186 self.shape_arg_name = { 187 w.reshape: "newshape", 188 } # reshape expects `newshape` 189 190 @parametrize("func", arr_shape_funcs) 191 def test_andshape_tensor(self, func): 192 t = torch.Tensor([[1, 2, 3], [4, 5, 6]]) 193 194 shape_dict = {self.shape_arg_name.get(func, "shape"): self.shape} 195 ta = func(t, **shape_dict) 196 assert isinstance(ta, w.ndarray) 197 assert ta.shape == self.shape 198 199 @parametrize("func", arr_shape_funcs) 200 def test_andshape_list(self, func): 201 t = [[1, 2, 3], [4, 5, 6]] 202 203 shape_dict = {self.shape_arg_name.get(func, "shape"): self.shape} 204 ta = func(t, **shape_dict) 205 assert isinstance(ta, w.ndarray) 206 assert ta.shape == self.shape 207 208 @parametrize("func", arr_shape_funcs) 209 def test_andshape_array(self, func): 210 t = w.asarray([[1, 2, 3], [4, 5, 6]]) 211 212 shape_dict = {self.shape_arg_name.get(func, "shape"): self.shape} 213 ta = func(t, **shape_dict) 214 assert isinstance(ta, w.ndarray) 215 assert ta.shape == self.shape 216 217 218one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] 219 220 221@instantiate_parametrized_tests 222class TestOneArrToScalar(TestCase): 223 """Smoke test of functions (array_like) -> scalar or python object.""" 224 225 @parametrize("func, np_func", one_arg_scalar_funcs) 226 def test_toscalar_tensor(self, func, np_func): 227 t = torch.Tensor([[1, 2, 3], [4, 5, 6]]) 228 ta = func(t) 229 tn = np_func(_np.asarray(t)) 230 231 assert not isinstance(ta, w.ndarray) 232 assert ta == tn 233 234 @parametrize("func, np_func", one_arg_scalar_funcs) 235 def test_toscalar_list(self, func, np_func): 236 t = [[1, 2, 3], [4, 5, 6]] 237 ta = func(t) 238 tn = np_func(t) 239 240 assert not isinstance(ta, w.ndarray) 241 assert ta == tn 242 243 @parametrize("func, np_func", one_arg_scalar_funcs) 244 def test_toscalar_array(self, func, np_func): 245 t = w.asarray([[1, 2, 3], [4, 5, 6]]) 246 ta = func(t) 247 tn = np_func(t) 248 249 assert not isinstance(ta, w.ndarray) 250 assert ta == tn 251 252 253shape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)] 254 255 256@instantiate_parametrized_tests 257class TestShapeLikeToArray(TestCase): 258 """Smoke test (shape_like) -> array.""" 259 260 shape = (3, 4) 261 262 @parametrize("func", shape_funcs) 263 def test_shape(self, func): 264 a = func(self.shape) 265 266 assert isinstance(a, w.ndarray) 267 assert a.shape == self.shape 268 269 270seq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays] 271 272 273@instantiate_parametrized_tests 274class TestSequenceOfArrays(TestCase): 275 """Smoke test (sequence of arrays) -> (sequence of arrays).""" 276 277 @parametrize("func", seq_funcs) 278 def test_single_tensor(self, func): 279 t = torch.Tensor([[1, 2, 3], [4, 5, 6]]) 280 ta = func(t) 281 282 # for a single argument, broadcast_arrays returns a tuple, while 283 # atleast_?d return an array 284 unpack = {w.broadcast_arrays: True}.get(func, False) 285 res = ta[0] if unpack else ta 286 287 assert isinstance(res, w.ndarray) 288 289 @parametrize("func", seq_funcs) 290 def test_single_list(self, func): 291 lst = [[1, 2, 3], [4, 5, 6]] 292 la = func(lst) 293 294 unpack = {w.broadcast_arrays: True}.get(func, False) 295 res = la[0] if unpack else la 296 297 assert isinstance(res, w.ndarray) 298 299 @parametrize("func", seq_funcs) 300 def test_single_array(self, func): 301 a = w.asarray([[1, 2, 3], [4, 5, 6]]) 302 la = func(a) 303 304 unpack = {w.broadcast_arrays: True}.get(func, False) 305 res = la[0] if unpack else la 306 307 assert isinstance(res, w.ndarray) 308 309 @parametrize("func", seq_funcs) 310 def test_several(self, func): 311 arys = ( 312 torch.Tensor([[1, 2, 3], [4, 5, 6]]), 313 w.asarray([[1, 2, 3], [4, 5, 6]]), 314 [[1, 2, 3], [4, 5, 6]], 315 ) 316 317 result = func(*arys) 318 assert isinstance(result, (tuple, list)) 319 assert len(result) == len(arys) 320 assert all(isinstance(_, w.ndarray) for _ in result) 321 322 323seq_to_single_funcs = [ 324 w.concatenate, 325 w.stack, 326 w.vstack, 327 w.hstack, 328 w.dstack, 329 w.column_stack, 330 w.row_stack, 331] 332 333 334@instantiate_parametrized_tests 335class TestSequenceOfArraysToSingle(TestCase): 336 """Smoke test (sequence of arrays) -> (array).""" 337 338 @parametrize("func", seq_to_single_funcs) 339 def test_several(self, func): 340 arys = ( 341 torch.Tensor([[1, 2, 3], [4, 5, 6]]), 342 w.asarray([[1, 2, 3], [4, 5, 6]]), 343 [[1, 2, 3], [4, 5, 6]], 344 ) 345 346 result = func(arys) 347 assert isinstance(result, w.ndarray) 348 349 350single_to_seq_funcs = ( 351 w.nonzero, 352 # https://github.com/Quansight-Labs/numpy_pytorch_interop/pull/121#discussion_r1172824545 353 # w.tril_indices_from, 354 # w.triu_indices_from, 355 w.where, 356) 357 358 359@instantiate_parametrized_tests 360class TestArrayToSequence(TestCase): 361 """Smoke test array -> (tuple of arrays).""" 362 363 @parametrize("func", single_to_seq_funcs) 364 def test_asarray_tensor(self, func): 365 t = torch.Tensor([[1, 2, 3], [4, 5, 6]]) 366 ta = func(t) 367 368 assert isinstance(ta, tuple) 369 assert all(isinstance(x, w.ndarray) for x in ta) 370 371 @parametrize("func", single_to_seq_funcs) 372 def test_asarray_list(self, func): 373 lst = [[1, 2, 3], [4, 5, 6]] 374 la = func(lst) 375 376 assert isinstance(la, tuple) 377 assert all(isinstance(x, w.ndarray) for x in la) 378 379 @parametrize("func", single_to_seq_funcs) 380 def test_asarray_array(self, func): 381 a = w.asarray([[1, 2, 3], [4, 5, 6]]) 382 la = func(a) 383 384 assert isinstance(la, tuple) 385 assert all(isinstance(x, w.ndarray) for x in la) 386 387 388funcs_and_args = [ 389 (w.linspace, (0, 10, 11)), 390 (w.logspace, (1, 2, 5)), 391 (w.logspace, (1, 2, 5, 11)), 392 (w.geomspace, (1, 1000, 5, 11)), 393 (w.eye, (5, 6)), 394 (w.identity, (3,)), 395 (w.arange, (5,)), 396 (w.arange, (5, 8)), 397 (w.arange, (5, 8, 0.5)), 398 (w.tri, (3, 3, -1)), 399] 400 401 402@instantiate_parametrized_tests 403class TestPythonArgsToArray(TestCase): 404 """Smoke_test (sequence of scalars) -> (array)""" 405 406 @parametrize("func, args", funcs_and_args) 407 def test_argstoarray_simple(self, func, args): 408 a = func(*args) 409 assert isinstance(a, w.ndarray) 410 411 412class TestNormalizations(TestCase): 413 """Smoke test generic problems with normalizations.""" 414 415 def test_unknown_args(self): 416 # Check that unknown args to decorated functions fail 417 a = w.arange(7) % 2 == 0 418 419 # unknown positional args 420 with assert_raises(TypeError): 421 w.nonzero(a, "kaboom") 422 423 # unknown kwarg 424 with assert_raises(TypeError): 425 w.nonzero(a, oops="ouch") 426 427 def test_too_few_args_positional(self): 428 with assert_raises(TypeError): 429 w.nonzero() 430 431 def test_unknown_args_with_defaults(self): 432 # check a function 5 arguments and 4 defaults: this should work 433 w.eye(3) 434 435 # five arguments, four defaults: this should fail 436 with assert_raises(TypeError): 437 w.eye() 438 439 440class TestCopyTo(TestCase): 441 def test_copyto_basic(self): 442 dst = w.empty(4) 443 src = w.arange(4) 444 w.copyto(dst, src) 445 assert (dst == src).all() 446 447 def test_copytobcast(self): 448 dst = w.empty((4, 2)) 449 src = w.arange(4) 450 451 # cannot broadcast => error out 452 with assert_raises(RuntimeError): 453 w.copyto(dst, src) 454 455 # broadcast src against dst 456 dst = w.empty((2, 4)) 457 w.copyto(dst, src) 458 assert (dst == src).all() 459 460 def test_copyto_typecast(self): 461 dst = w.empty(4, dtype=int) 462 src = w.arange(4, dtype=float) 463 464 with assert_raises(TypeError): 465 w.copyto(dst, src, casting="no") 466 467 # force the type cast 468 w.copyto(dst, src, casting="unsafe") 469 assert (dst == src).all() 470 471 472class TestDivmod(TestCase): 473 def test_divmod_out(self): 474 x1 = w.arange(8, 15) 475 x2 = w.arange(4, 11) 476 477 out = (w.empty_like(x1), w.empty_like(x1)) 478 479 quot, rem = w.divmod(x1, x2, out=out) 480 481 assert_equal(quot, x1 // x2) 482 assert_equal(rem, x1 % x2) 483 484 out1, out2 = out 485 assert quot is out[0] 486 assert rem is out[1] 487 488 def test_divmod_out_list(self): 489 x1 = [4, 5, 6] 490 x2 = [2, 1, 2] 491 492 out = (w.empty_like(x1), w.empty_like(x1)) 493 494 quot, rem = w.divmod(x1, x2, out=out) 495 496 assert quot is out[0] 497 assert rem is out[1] 498 499 @xfail # ("out1, out2 not implemented") 500 def test_divmod_pos_only(self): 501 x1 = [4, 5, 6] 502 x2 = [2, 1, 2] 503 504 out1, out2 = w.empty_like(x1), w.empty_like(x1) 505 506 quot, rem = w.divmod(x1, x2, out1, out2) 507 508 assert quot is out1 509 assert rem is out2 510 511 def test_divmod_no_out(self): 512 # check that the out= machinery handles no out at all 513 x1 = w.array([4, 5, 6]) 514 x2 = w.array([2, 1, 2]) 515 quot, rem = w.divmod(x1, x2) 516 517 assert_equal(quot, x1 // x2) 518 assert_equal(rem, x1 % x2) 519 520 def test_divmod_out_both_pos_and_kw(self): 521 o = w.empty(1) 522 with assert_raises(TypeError): 523 w.divmod(1, 2, o, o, out=(o, o)) 524 525 526class TestSmokeNotImpl(TestCase): 527 def test_nimpl_basic(self): 528 # smoke test that the "NotImplemented" annotation is picked up 529 with assert_raises(NotImplementedError): 530 w.empty(3, like="ooops") 531 532 533@instantiate_parametrized_tests 534class TestDefaultDtype(TestCase): 535 def test_defaultdtype_defaults(self): 536 # by default, both floats and ints 64 bit 537 x = w.empty(3) 538 z = x + 1j * x 539 540 assert x.dtype.torch_dtype == torch.float64 541 assert z.dtype.torch_dtype == torch.complex128 542 543 assert w.arange(3).dtype.torch_dtype == torch.int64 544 545 @parametrize("dt", ["pytorch", "float32", torch.float32]) 546 def test_set_default_float(self, dt): 547 try: 548 w.set_default_dtype(fp_dtype=dt) 549 550 x = w.empty(3) 551 z = x + 1j * x 552 553 assert x.dtype.torch_dtype == torch.float32 554 assert z.dtype.torch_dtype == torch.complex64 555 556 finally: 557 # restore the 558 w.set_default_dtype(fp_dtype="numpy") 559 560 561@skip(_np.__version__ <= "1.23", reason="from_dlpack is new in NumPy 1.23") 562class TestExport(TestCase): 563 def test_exported_objects(self): 564 exported_fns = ( 565 x 566 for x in dir(w) 567 if inspect.isfunction(getattr(w, x)) 568 and not x.startswith("_") 569 and x != "set_default_dtype" 570 ) 571 diff = set(exported_fns).difference(set(dir(_np))) 572 assert len(diff) == 0, str(diff) 573 574 575class TestCtorNested(TestCase): 576 def test_arrays_in_lists(self): 577 lst = [[1, 2], [3, w.array(4)]] 578 assert_equal(w.asarray(lst), [[1, 2], [3, 4]]) 579 580 581class TestMisc(TestCase): 582 def test_ndarrays_to_tensors(self): 583 out = _util.ndarrays_to_tensors(((w.asarray(42), 7), 3)) 584 assert len(out) == 2 585 assert isinstance(out[0], tuple) and len(out[0]) == 2 586 assert isinstance(out[0][0], torch.Tensor) 587 588 @skip(not TEST_CUDA, reason="requires cuda") 589 def test_f16_on_cuda(self): 590 # make sure operations with float16 tensors give same results on CUDA and on CPU 591 t = torch.arange(5, dtype=torch.float16) 592 assert_allclose(w.vdot(t.cuda(), t.cuda()), w.vdot(t, t)) 593 assert_allclose(w.inner(t.cuda(), t.cuda()), w.inner(t, t)) 594 assert_allclose(w.matmul(t.cuda(), t.cuda()), w.matmul(t, t)) 595 assert_allclose(w.einsum("i,i", t.cuda(), t.cuda()), w.einsum("i,i", t, t)) 596 597 assert_allclose(w.mean(t.cuda()), w.mean(t)) 598 599 assert_allclose(w.cov(t.cuda(), t.cuda()), w.cov(t, t).tensor.cuda()) 600 assert_allclose(w.corrcoef(t.cuda()), w.corrcoef(t).tensor.cuda()) 601 602 603if __name__ == "__main__": 604 run_tests() 605