/aosp_15_r20/external/pytorch/test/ |
H A D | test_view_ops.py | 400 a_split_dim0 = a.tensor_split(7, 0) 403 a_split_dim1 = a.tensor_split(7, 1) 1839 result1 = torch.tensor_split(a, sections, dim) 1840 result2 = torch.tensor_split( 1883 result_1 = torch.tensor_split(a, indices, dim) 1884 result_2 = torch.tensor_split( 1910 r"tensor_split expected at least a 1-dimensional tensor, " 1920 r"tensor_split expected at least a 1-dimensional tensor, " 1945 torch.tensor_split(a, sections_or_indices, dim) 1947 torch.tensor_split(a, torch.tensor(sections_or_indices), dim) [all …]
|
/aosp_15_r20/external/pytorch/torch/distributed/pipelining/ |
H A D | microbatch.py | 190 chunk_tensors = torch.tensor_split( 426 # Infer size of individual chunks by running `tensor_split` again 430 meta_chunks = torch.tensor_split(
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | TensorShape.cpp | 179 #include <ATen/ops/tensor_split.h> 960 …TORCH_CHECK(self.dim() > 0, "tensor_split expected at least a 1-dimensional tensor, but got a tens… in tensor_split_sections_symint() 981 …TORCH_CHECK(self.dim() > 0, "tensor_split expected at least a 1-dimensional tensor, but got a tens… in _tensor_split_indices() 995 std::vector<Tensor> tensor_split(const Tensor& self, IntArrayRef indices, int64_t dim) { in tensor_split() function 1003 std::vector<Tensor> tensor_split(const Tensor& self, const Tensor& tensor_indices_or_sections, int6… in tensor_split() function 1004 …TORCH_CHECK(self.dim() > 0, "tensor_split expected at least a 1-dimensional tensor, but got a tens… in tensor_split() 1007 "tensor_split expected tensor_indices_or_sections to be on cpu, but it's on ", split_device); in tensor_split() 1010 … "tensor_split expected tensor_indices_or_sections to have dtype of long, but got ", split_dtype); in tensor_split() 1013 …"tensor_split expected tensor_indices_or_sections to be a zero-dimensional or one-dimensional tens… in tensor_split() 1017 return self.tensor_split(sections, dim); in tensor_split() [all …]
|
H A D | NonSymbolicBC.h | 25 TORCH_API std::vector<Tensor> tensor_split(const Tensor& self, IntArrayRef indices, int64_t dim);
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/static/ |
H A D | native_ops.cpp | 860 REGISTER_NATIVE_OPERATOR_FUNCTOR(aten::tensor_split, aten_tensor_split, [](Node* n) -> SROperator { in __anon75e5f0515402() 862 … "aten::tensor_split.indices(Tensor(a -> *) self, int[] indices, int dim=0) -> Tensor(a)[]"))) { in __anon75e5f0515402() 867 pnode->Output(0) = at::native::tensor_split(a, b, c); in __anon75e5f0515402() 872 … "aten::tensor_split.sections(Tensor(a -> *) self, int sections, int dim=0) -> Tensor(a)[]"))) { in __anon75e5f0515402() 882 …"aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_secti… in __anon75e5f0515402() 887 pnode->Output(0) = at::native::tensor_split(a, b, c); in __anon75e5f0515402()
|
/aosp_15_r20/external/pytorch/test/mobile/model_test/ |
H A D | tensor_ops.py | 206 torch.tensor_split(x, 1), 207 torch.tensor_split(x, [0, 1]),
|
H A D | coverage.yaml | 588 - aten::tensor_split.indices 589 - aten::tensor_split.sections 986 aten::tensor_split.indices: 4
|
H A D | model_ops.yaml | 360 aten::tensor_split.indices: 4
|
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/ |
H A D | BatchRulesDecompositions.cpp | 294 m.impl("tensor_split.indices", native::tensor_split_indices_symint); in TORCH_LIBRARY_IMPL() 295 m.impl("tensor_split.sections", native::tensor_split_sections_symint); in TORCH_LIBRARY_IMPL()
|
/aosp_15_r20/external/pytorch/torch/_refs/ |
H A D | __init__.py | 291 "tensor_split", 3956 torch.squeeze(s, dim) for s in torch.tensor_split(t, t.shape[dim], dim) 4106 def tensor_split( function 4113 … msg = "tensor_split: received a rank zero tensor, but expected a tensor of rank one or greater!" 4120 f"tensor_split: if indices_or_sections is a tensor it must be on the CPU, " 4125 msg = "tensor_split: if indices_or_sections is a tensor it must have long dtype, " 4140 msg = f"tensor_split: number of sections must be greater than 0, but was {sections}" 4164 … msg = "tensor_split: non-scalar indices_or_sections tensors must have only one dimension, " 4206 return tensor_split(a, split_size, dim) 4218 return tensor_split(a, split_sizes, dim) [all …]
|
/aosp_15_r20/external/pytorch/torch/_functorch/ |
H A D | vmap.py | 360 t.tensor_split(split_idxs, dim=in_dim) 421 # >>> t.tensor_split([1, 2, 3, 4, 5, 6])
|
/aosp_15_r20/external/pytorch/aten/src/ATen/core/ |
H A D | NamedRegistrations.cpp | 476 m.impl("tensor_split.indices", CppFunction::makeFallthrough()); in TORCH_LIBRARY_IMPL() 477 m.impl("tensor_split.sections", CppFunction::makeFallthrough()); in TORCH_LIBRARY_IMPL() 478 m.impl("tensor_split.tensor_indices_or_sections", CppFunction::makeFallthrough()); in TORCH_LIBRARY_IMPL()
|
/aosp_15_r20/external/pytorch/torch/testing/_internal/ |
H A D | custom_op_db.py | 316 return [xi.clone() for xi in torch.tensor_split(x, splits, dim)] 349 return [xi.clone() for xi in torch.tensor_split(x, splits, dim)], len(splits)
|
/aosp_15_r20/external/pytorch/aten/src/ATen/ |
H A D | LegacyBatchingRegistrations.cpp | 222 auto result = at::tensor_split(self_physical.tensor(), sections, dim_physical); in tensor_split_sections_batching_rule() 230 auto result = at::tensor_split(self_physical.tensor(), indices, dim_physical); in tensor_split_indices_batching_rule() 1103 m.impl("tensor_split.sections", tensor_split_sections_batching_rule); in TORCH_LIBRARY_IMPL() 1104 m.impl("tensor_split.indices", tensor_split_indices_batching_rule); in TORCH_LIBRARY_IMPL()
|
/aosp_15_r20/external/executorch/test/end2end/ |
H A D | test_end2end.py | 206 split = torch.ops.aten.tensor_split.sections(x, 3) 653 … with error: Missing out variants: {'aten::select', 'aten::_shape_as_tensor', 'aten::tensor_split'}
|
/aosp_15_r20/external/pytorch/test/functorch/ |
H A D | test_ops.py | 351 xfail("tensor_split"), # data_ptr composite compliance 395 # 'tensor_split' not composite compliant, see vjp_fail 555 xfail("tensor_split"), 1288 xfail("tensor_split"), # data_ptr composite compliance 1509 xfail("tensor_split"),
|
H A D | test_vmap_registrations.py | 216 "aten::tensor_split.tensor_indices_or_sections",
|
/aosp_15_r20/external/pytorch/functorch/op_analysis/ |
H A D | public_api | 208 tensor_split
|
H A D | annotated_ops | 70 tensor_split, view/reshape
|
/aosp_15_r20/external/pytorch/docs/source/ |
H A D | tensor_view.rst | 80 - :meth:`~torch.Tensor.tensor_split`
|
H A D | torch.rst | 133 tensor_split
|
/aosp_15_r20/external/pytorch/test/export/ |
H A D | test_pass_infra.py | 20 return torch.ops.aten.tensor_split.sections(y, 2)
|
/aosp_15_r20/external/pytorch/torch/_decomp/ |
H A D | decompositions.py | 1473 @aten.tensor_split.tensor_indices_or_sections.py_impl( 1486 lambda: "tensor_split expected tensor_indices_or_sections to be a zero-dimensional " 1492 return self.tensor_split(sections, dim) 1496 # can't: tensor_split works with negative values in indices: 1498 # >>> torch.tensor_split(torch.randn(10), torch.tensor([-5, 5])) 1504 return self.tensor_split(indices, dim)
|
/aosp_15_r20/external/pytorch/torch/onnx/ |
H A D | symbolic_opset13.py | 137 @_onnx_symbolic("aten::tensor_split") 139 def tensor_split( function
|
/aosp_15_r20/external/executorch/backends/apple/mps/test/ |
H A D | test_mps_models.py | 154 split = torch.ops.aten.tensor_split.sections(x, 3)
|