Lines Matching full:coalesce

296             self.assertEqual(x.coalesce()._nnz(), nnz if x.is_coalesced() else nnz // 2)
314 # Make sure that coalesce handles duplicate indices correctly
318 self.assertEqual(x.coalesce()._nnz(), 9)
327 tc = t.coalesce()
373 # Test coalesce doesn't create autograd graph cycles (gh-52253)
388 S = S.coalesce()
390 S2 = S.coalesce()
552 a_coalesced = a.coalesce()
562 a_coalesced = a.coalesce()
565 self.assertEqual(a.coalesce(), a.coalesce().to_dense().to_sparse())
571 a_coalesced = a.coalesce()
643 x = x.coalesce()
724 x = x.coalesce()
817 x = x.coalesce()
1009 x_coalesced = x.coalesce()
1232 t_sparse = t.to_sparse().coalesce() if coalesced else t.to_sparse()
1546 bias = bias.coalesce()
1770 y = x.coalesce()
1799 D = S.coalesce().to_dense().detach().requires_grad_(True)
1919 y = x1.coalesce()
1920 z = x1.coalesce()
1925 # check that coalesce is out of place if the original tensor is not
1929 # check that coalesce is in-place if the original tensor is
2012 x = self.sparse_tensor(i, v, torch.Size([5, 4]), dtype=dtype, device=device).coalesce()
2025 self.assertEqual(res_dense_lhs.coalesce(), expected.coalesce())
2026 # check no side effects for the coalesce flag.
2028 self.assertEqual(res_sparse_lhs.coalesce(), expected.coalesce())
2035 x = self.sparse_tensor(i, v, torch.Size([5, 4, 0])).coalesce()
2042 self.assertEqual(res_dense_lhs.coalesce(), expected.coalesce())
2043 # check no side effects for the coalesce flag.
2045 self.assertEqual(res_sparse_lhs.coalesce(), expected.coalesce())
2063 # check coalesce
2069 self.assertEqual(res_all_sparse.coalesce(), res_dense_sparse.coalesce())
2081 # TODO: This is also testing that, if coalesce is a no-op,
2084 x = self.sparse_tensor(i, v, torch.Size([5, 4, 2])).coalesce()
2097 self.assertEqual(res_dense_lhs.coalesce(), expected.coalesce())
2098 # check no side effects for the coalesce flag
2100 self.assertEqual(res_sparse_lhs.coalesce(), expected.coalesce())
2107 x = self.sparse_tensor(i, v, torch.Size([5, 4, 2, 0])).coalesce()
2114 self.assertEqual(res_dense_lhs.coalesce(), expected.coalesce())
2115 # check no side effects for the coalesce flag
2117 self.assertEqual(res_sparse_lhs.coalesce(), expected.coalesce())
2287 ).coalesce()
2297 ).coalesce()
2360 sparse_tensor.coalesce().log1p_()
2362 self.assertEqual(expected_output, sparse_tensor.coalesce().log1p_().to_dense())
2383 ).coalesce()
2393 ).coalesce()
2448 ).coalesce()
2458 ).coalesce()
2507 # test coalesce on integral dtype tensor
2509 op(sparse_tensor.clone().coalesce()).to_dense()
2511 self.assertEqual(expected_output, op(sparse_tensor.clone().coalesce()).to_dense())
2528 ).coalesce()
2538 ).coalesce()
2675 out = x.new(indices, values).coalesce()
2676 x_c = x.coalesce()
3145 self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
3146 self.assertEqual(list(t.coalesce().values().size()), [1, 3])
3153 self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
3154 self.assertEqual(list(t.coalesce().values().size()), [1, 3])
3161 self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
3162 self.assertEqual(list(t.coalesce().values().size()), [1, 3])
3169 self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
3170 self.assertEqual(list(t.coalesce().values().size()), [1, 3])
3177 self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
3178 self.assertEqual(list(t.coalesce().values().size()), [1, 3])
3276 sparse = sparse.coalesce()
3317 sparse = sparse.coalesce()
3414 x = x.coalesce()
3617 return a.grad.sparse_mask(a_s.coalesce()), b.grad.sparse_mask(b_s.coalesce())
3627 g = c2.sparse_mask(c.coalesce())
3699 out.coalesce().values().sum().backward()
3743 …ertEqual(s_res, torch.sparse_coo_tensor(s_res._indices(), s_res._values(), s_res.shape).coalesce())
3768 # This case always coalesce inputs and that could lead to loss of precision,
3795 def check_empty(sparse_shape, nnz, dense_shape, coalesce): argument
3800 … x = self._gen_sparse(sparse_dim, nnz_val, empty_sparse_shape, dtype, device, coalesce)[0]
3864 def check_empty(sparse_shape, nnz, dense_shape, coalesce): argument
3869 … s = self._gen_sparse(sparse_dim, nnz_val, empty_sparse_shape, dtype, device, coalesce)[0]
3894 # coalesce.to_dense != to_dense
4086 # to_dense uses coalesce which isn't implemented for bool
4127 sample.input = sample.input.to_sparse().coalesce()
5030 torch.Tensor.coalesce,
5049 coalesce=({torch.sparse_coo},
5050 … "coalesce expected sparse coordinate tensor layout but got (Sparse(Csr|Csc|Bsr|Bsc)|Strided)"),
5308 x = x.coalesce()