Home
last modified time | relevance | path

Searched full:compressed_indices (Results 1 – 25 of 30) sorted by relevance

12

/aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/
H A DSparseCsrTensor.cpp128 static void _validate_sparse_compressed_tensor_args_worker(const Tensor& compressed_indices, const … in _validate_sparse_compressed_tensor_args_worker() argument
153 TORCH_CHECK(compressed_indices.layout() == kStrided, in _validate_sparse_compressed_tensor_args_worker()
154 …ed ", compressed_indices_name, " to be a strided tensor but got ", compressed_indices.layout(), " … in _validate_sparse_compressed_tensor_args_worker()
157 const auto batch_ndim = compressed_indices.dim() - 1; in _validate_sparse_compressed_tensor_args_worker()
176 … compressed_indices_name, " must have dimensionality >= 1 but got ", compressed_indices.dim()); in _validate_sparse_compressed_tensor_args_worker()
180 compressed_indices.dim() == plain_indices.dim(), in _validate_sparse_compressed_tensor_args_worker()
182 compressed_indices.dim(), " and ", plain_indices.dim(), ", respectively"); in _validate_sparse_compressed_tensor_args_worker()
195 TORCH_CHECK(compressed_indices.stride(-1) == 1, in _validate_sparse_compressed_tensor_args_worker()
216 …DimVector compressed_indices_batchsize = DimVector(compressed_indices.sizes().slice(0, batch_ndim)… in _validate_sparse_compressed_tensor_args_worker()
242 compressed_indices.size(-1) == compressed_dim_size + 1, in _validate_sparse_compressed_tensor_args_worker()
[all …]
H A DValidateCompressedIndicesCommon.h39 // use `cidx/idx` to refer to `compressed_indices/plain_indices` respectively.
81 // 0 <= compressed_indices[..., 1:] - compressed_indices[..., :-1] <= plain_dim.
117 // plain_indices[..., compressed_indices[..., i - 1]:compressed_indices[..., i]]
H A DSparseCsrTensorMath.cpp226 auto [compressed_indices, plain_indices] = getCompressedPlainIndices(sparse); in intersection_binary_op_with_wrapped_scalar()
228 compressed_indices.clone(), in intersection_binary_op_with_wrapped_scalar()
298 auto compressed_indices = AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS(input.layout(), in get_result_tensor_for_unary_op() local
308 compressed_indices.clone(), in get_result_tensor_for_unary_op()
351 auto [compressed_indices, plain_indices] = at::sparse_csr::getCompressedPlainIndices(mask); in sparse_mask_sparse_compressed()
354 compressed_indices, in sparse_mask_sparse_compressed()
631 // it's compressed_indices tensor will contain junk values in addmm_out_sparse_compressed_cpu()
H A DSparseBlasImpl.cpp178 auto [compressed_indices, plain_indices] = at::sparse_csr::getCompressedPlainIndices(compressed); in _compressed_row_strided_mm_out()
198 compressed_indices, in _compressed_row_strided_mm_out()
200 compressed_indices.scalar_type() == kInt).select(0, 0); in _compressed_row_strided_mm_out()
/aosp_15_r20/external/pytorch/benchmarks/operator_benchmark/pt/
H A Dqembedding_bag_lookups_test.py156 self.compressed_indices = None
161 self.compressed_indices,
172 "compressed_indices": self.compressed_indices,
186 compressed_indices: Optional[torch.Tensor],
196 compressed_indices_mapping=compressed_indices,
268 self.compressed_indices = None
273 self.compressed_indices,
284 "compressed_indices": self.compressed_indices,
298 compressed_indices: Optional[torch.Tensor],
308 compressed_indices_mapping=self.compressed_indices,
/aosp_15_r20/external/pytorch/test/
H A Dtest_sparse_csr.py249 … for (compressed_indices, plain_indices, values), kwargs in self.generate_simple_inputs(
263 compressed_indices_expect = compressed_indices
268 compressed_indices = compressed_indices.tolist()
276compressed_indices, plain_indices, values, requires_grad=requires_grad)
279 compressed_indices, plain_indices, values, size,
284 compressed_indices, plain_indices, values,
288 compressed_indices, plain_indices, values, size,
350 compressed_indices, plain_indices = s.crow_indices(), s.col_indices()
352 compressed_indices, plain_indices = s.ccol_indices(), s.row_indices()
355 compressed_indices_shape=compressed_indices.shape,
[all …]
H A Dtest_sparse.py4279compressed_indices = torch.empty((*batchsize, nof_compressed_indices), device='meta', dtype=index_…
4284 compressed_indices,
4341compressed_indices = torch.empty((*batchsize, nof_compressed_indices), device='meta', dtype=index_…
4344 compressed_indices,
4529 compressed_indices = torch.tensor([0, 0, 1])
4536 …return torch.sparse_compressed_tensor(compressed_indices, invalid_plain_indices, values, shape, la…
4538 …return torch.sparse_compressed_tensor(compressed_indices, invalid_plain_indices, values, shape, la…
4911 compressed_indices, plain_indices = r.crow_indices(), r.col_indices()
4913 compressed_indices, plain_indices = r.ccol_indices(), r.row_indices()
4914 … torch._validate_sparse_compressed_tensor_args(compressed_indices, plain_indices, r.values(),
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DTensorConversions.cpp182 Tensor& compressed_indices, in reshape_2d_sparse_compressed_members_to_nd_batched() argument
191 compressed_indices = compressed_to_batched_compressed_indices( in reshape_2d_sparse_compressed_members_to_nd_batched()
192 compressed_indices, n_batch, /*out_int32*/ false); in reshape_2d_sparse_compressed_members_to_nd_batched()
202 compressed_indices = compressed_indices.reshape(batchsize_infer_last); in reshape_2d_sparse_compressed_members_to_nd_batched()
301 auto [compressed_indices, plain_indices] = at::sparse_csr::getCompressedPlainIndices(self); in _to_copy()
314 compressed_indices, in _to_copy()
315 compressed_indices.scalar_type(), in _to_copy()
648 auto [compressed_indices, plain_indices] = in sparse_compressed_to_dense()
657 compressed_indices.unsqueeze_(0); in sparse_compressed_to_dense()
664 compressed_indices = compressed_indices.flatten(0, batch_ndim - 1); in sparse_compressed_to_dense()
[all …]
H A DTensorFactories.cpp1291 auto compressed_indices = at::empty(compressed_indices_size, options); in zeros_sparse_compressed_symint() local
1292 compressed_indices.zero_(); in zeros_sparse_compressed_symint()
1296 return at::_sparse_compressed_tensor_unsafe(compressed_indices, in zeros_sparse_compressed_symint()
1406 auto [compressed_indices, plain_indices] = at::sparse_csr::getCompressedPlainIndices(res); in zeros_like()
1407 compressed_indices.zero_(); in zeros_like()
/aosp_15_r20/external/pytorch/aten/src/ATen/
H A DSparseCsrTensorImpl.cpp173 auto [compressed_indices, plain_indices] = in resize_as_sparse_compressed_tensor_()
176 if (crow_indices_.sizes() != compressed_indices.sizes()) { in resize_as_sparse_compressed_tensor_()
177 crow_indices_.resize_as_(compressed_indices); in resize_as_sparse_compressed_tensor_()
184 crow_indices_.copy_(compressed_indices); in resize_as_sparse_compressed_tensor_()
H A DSparseCsrTensorUtils.h367 auto [compressed_indices, plain_indices] = in only_sparse_compressed_binary_op_trivial_cases()
371 compressed_indices, in only_sparse_compressed_binary_op_trivial_cases()
399 auto [compressed_indices, plain_indices] = in to_type()
402 compressed_indices, in to_type()
H A DSparseCsrTensorImpl.h56 const Tensor& compressed_indices() const { in compressed_indices() function
200 dest_sparse_impl->crow_indices_ = src_sparse_impl->compressed_indices(); in copy_tensor_metadata()
/aosp_15_r20/external/pytorch/torch/multiprocessing/
H A Dreductions.py468 compressed_indices = rebuild_compressed_indices_func(
474 compressed_indices, plain_indices, values, shape, layout=layout
495 compressed_indices = sparse.crow_indices()
498 compressed_indices = sparse.ccol_indices()
505 ) = reduce_tensor(compressed_indices)
/aosp_15_r20/external/pytorch/docs/source/
H A Dsparse.rst667 We say that an indices tensor ``compressed_indices`` uses CSR
670 - ``compressed_indices`` is a contiguous strided 32 or 64 bit
672 - ``compressed_indices`` shape is ``(*batchsize,
675 - ``compressed_indices[..., 0] == 0`` where ``...`` denotes batch
677 - ``compressed_indices[..., compressed_dim_size] == nse`` where
679 - ``0 <= compressed_indices[..., i] - compressed_indices[..., i -
1094 >>> compressed_indices = torch.tensor([0, 2, 4])
1097 …>>> csr = torch.sparse_compressed_tensor(compressed_indices, plain_indices, values, layout=torch.s…
1103 …>>> csc = torch.sparse_compressed_tensor(compressed_indices, plain_indices, values, layout=torch.s…
/aosp_15_r20/external/pytorch/torch/csrc/utils/
H A Dtensor_new.cpp968 Tensor compressed_indices = internal_new_from_data( in sparse_compressed_tensor_ctor_worker() local
998 compressed_indices, in sparse_compressed_tensor_ctor_worker()
1024 Tensor compressed_indices = internal_new_from_data( in sparse_compressed_tensor_ctor_worker() local
1054 compressed_indices, in sparse_compressed_tensor_ctor_worker()
1309 …"_validate_sparse_compressed_tensor(PyObject* compressed_indices, PyObject* plain_indices, PyObjec… in _validate_sparse_compressed_tensor_args()
1323 Tensor compressed_indices = internal_new_from_data( in _validate_sparse_compressed_tensor_args() local
1340 compressed_indices, in _validate_sparse_compressed_tensor_args()
1394 Tensor compressed_indices = internal_new_from_data( in _validate_sparse_compressed_tensor_args_template() local
1412 compressed_indices, plain_indices, values, r.intlist(3), required_layout); in _validate_sparse_compressed_tensor_args_template()
/aosp_15_r20/external/pytorch/torch/
H A D_utils.py266 compressed_indices, plain_indices = (
271 compressed_indices, plain_indices = (
276 compressed_indices, plain_indices, t.values(), t.size(), t.layout
314 compressed_indices, plain_indices, values, size = data
316 compressed_indices,
H A D_tensor_str.py513 compressed_indices = compressed_indices_method(self).detach()
518 compressed_indices, indent + len(compressed_indices_prefix)
520 if compressed_indices.numel() == 0 or is_meta:
522 tuple(compressed_indices.shape)
H A D_tensor.py381 compressed_indices, plain_indices = (
386 compressed_indices, plain_indices = (
393 compressed_indices,
/aosp_15_r20/external/pytorch/torch/sparse/
H A D__init__.py625 compressed_indices=obj.crow_indices(),
631 compressed_indices=obj.ccol_indices(),
659 d["compressed_indices"],
/aosp_15_r20/external/pytorch/torch/autograd/
H A Dgradcheck.py131 compressed_indices = (
142 batch_numel = compressed_indices.numel() // compressed_indices.shape[-1]
1678 compressed_indices, plain_indices = x.crow_indices(), x.col_indices()
1680 compressed_indices, plain_indices = x.ccol_indices(), x.row_indices()
1690 compressed_indices,
/aosp_15_r20/external/pytorch/torch/testing/_internal/
H A Dcommon_utils.py3316compressed_indices = self._make_crow_indices(n_compressed_dims, n_plain_dims, nnz, device=device, …
3319 count = compressed_indices[i + 1] - compressed_indices[i]
3320 plain_indices[compressed_indices[i]:compressed_indices[i + 1]], _ = torch.sort(
3325 return values, compressed_indices, plain_indices
3339 compressed_indices = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, -1)
3341 return torch.sparse_compressed_tensor(compressed_indices, plain_indices,
3419 …(compressed_indices, plain_indices, values), dict(size=expected_size_from_shape_inference, device=…
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkl/
H A DSparseBlasImpl.cpp632 const auto compressed_indices = std::get<0>(at::sparse_csr::getCompressedPlainIndices(t)); in triangular_solve_out_sparse_csr()
633 … const auto diag_indices = at::arange(n, compressed_indices.options()).unsqueeze(0).expand({2, n}); in triangular_solve_out_sparse_csr()
/aosp_15_r20/external/pytorch/torch/csrc/autograd/
H A Dpython_torch_functions_manual.cpp195 …({"sparse_compressed_tensor(PyObject* compressed_indices, PyObject* plain_indices, PyObject* value…
196 …"sparse_compressed_tensor(PyObject* compressed_indices, PyObject* plain_indices, PyObject* values,…
H A Dinput_buffer.cpp49 impl->compressed_indices().storage().data_ptr(), stream); in record_stream_any_impl()
/aosp_15_r20/external/pytorch/torch/_dynamo/
H A Dutils.py822 compressed_indices = x.crow_indices()
825 compressed_indices = x.ccol_indices()
828 torch_clone(compressed_indices),

12