Home
last modified time | relevance | path

Searched refs:split_sizes (Results 1 – 25 of 49) sorted by relevance

12

/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A DTensorShape.cu188 at::IntArrayRef split_sizes, in get_split_base_addrs() argument
195 split_base_addrs.reserve(split_sizes.size()); in get_split_base_addrs()
196 for (const auto& split_size : split_sizes) { in get_split_base_addrs()
215 at::IntArrayRef split_sizes, in get_split_chunk_sizes() argument
220 split_chunk_sizes.reserve(split_sizes.size()); in get_split_chunk_sizes()
221 for (const auto& split_size : split_sizes) { in get_split_chunk_sizes()
620 at::IntArrayRef split_sizes, in split_with_sizes_copy_out_cuda_contiguous_no_cast() argument
625 detail::get_split_base_addrs(self, split_sizes, dim); in split_with_sizes_copy_out_cuda_contiguous_no_cast()
629 detail::get_split_chunk_sizes(self, split_sizes, dim); in split_with_sizes_copy_out_cuda_contiguous_no_cast()
667 for (size_t split_idx = 0; split_idx < split_sizes.size(); ++split_idx) { in split_with_sizes_copy_out_cuda_contiguous_no_cast()
[all …]
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/impl/
H A DSplit.cpp23 const std::vector<int64_t>& split_sizes, in add_split_with_sizes_default_node() argument
34 VK_CHECK_COND(out_list->size() == split_sizes.size()); in add_split_with_sizes_default_node()
36 for (int split_idx = 0; split_idx < split_sizes.size(); split_idx++) { in add_split_with_sizes_default_node()
37 int64_t split_size = split_sizes[split_idx]; in add_split_with_sizes_default_node()
104 std::vector<int64_t> split_sizes = *(graph.get_int_list(split_sizes_ref)); in add_split_with_sizes_default_node() local
106 add_split_with_sizes_default_node(graph, in, split_sizes, dim, out); in add_split_with_sizes_default_node()
127 std::vector<int64_t> split_sizes(size / split_size, split_size); in add_split_tensor_node() local
129 add_split_with_sizes_default_node(graph, in, split_sizes, dim, out); in add_split_tensor_node()
/aosp_15_r20/external/executorch/kernels/portable/cpu/
H A Dop_split_with_sizes_copy.cpp26 exec_aten::ArrayRef<int64_t> split_sizes, in split_with_sizes_copy_out() argument
38 check_split_with_sizes_copy_args(in, split_sizes, dim, out), in split_with_sizes_copy_out()
59 for (size_t i = 0; i < split_sizes.size(); i++) { in split_with_sizes_copy_out()
60 target_out_sizes[dim] = static_cast<Tensor::SizesType>(split_sizes[i]); in split_with_sizes_copy_out()
87 size_t chunk_step = split_sizes[i] * trailing_dims; in split_with_sizes_copy_out()
90 target_out_sizes[dim] = static_cast<Tensor::SizesType>(split_sizes[i]); in split_with_sizes_copy_out()
/aosp_15_r20/external/tensorflow/tensorflow/compiler/tf2xla/kernels/
H A Dsplit_op.cc147 std::vector<int64_t> split_sizes; in Compile() local
148 OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntVector(1, &split_sizes)); in Compile()
151 int64_t slice_size = split_sizes[i]; in Compile()
181 split_sizes[neg_one_dim] = in Compile()
193 int slice_size = split_sizes[i]; in Compile()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/nested/
H A DNestedTensorUtils.cpp115 c10::IntArrayRef split_sizes, in split_with_sizes_nested() argument
124 auto num_splits = split_sizes.size(); in split_with_sizes_nested()
131 for (const auto split_size : split_sizes) { in split_with_sizes_nested()
138 " (input tensor's size at dimension ", dim, "), but got split_sizes=", split_sizes); in split_with_sizes_nested()
151 auto split_size = split_sizes[split_idx]; in split_with_sizes_nested()
H A DNestedTensorMath.cpp276 std::vector<int64_t> split_sizes; in NestedTensor_to_padded_tensor_generic() local
277 split_sizes.reserve(sizes_num_rows); in NestedTensor_to_padded_tensor_generic()
280 split_sizes.push_back( in NestedTensor_to_padded_tensor_generic()
284 for (const auto split_size : split_sizes) { in NestedTensor_to_padded_tensor_generic()
296 buffers.reserve(split_sizes.size()); in NestedTensor_to_padded_tensor_generic()
299 for (const auto split_size : split_sizes) { in NestedTensor_to_padded_tensor_generic()
/aosp_15_r20/external/executorch/kernels/test/
H A Dop_split_with_sizes_copy_test.cpp24 exec_aten::ArrayRef<int64_t> split_sizes, in op_split_with_sizes_copy_out() argument
28 context_, self, split_sizes, dim, out); in op_split_with_sizes_copy_out()
43 exec_aten::ArrayRef<int64_t> split_sizes = exec_aten::ArrayRef<int64_t>( in test_tensor_shape_dynamism() local
103 op_split_with_sizes_copy_out(self, split_sizes, dim, out); in test_tensor_shape_dynamism()
/aosp_15_r20/external/pytorch/torch/csrc/distributed/c10d/
H A DUtils.hpp499 const std::vector<int64_t>& split_sizes, in checkSplitSizes() argument
502 if (split_sizes.empty()) { in checkSplitSizes()
508 split_sizes.size() == static_cast<size_t>(group_size), in checkSplitSizes()
510 const auto sum = c10::sum_integers(split_sizes); in checkSplitSizes()
519 const std::vector<int64_t>& split_sizes, in computeLengthsAndOffsets() argument
530 if (split_sizes.empty()) { in computeLengthsAndOffsets()
535 size_t length = row_size * (equal_splits ? split_size : split_sizes[i]); in computeLengthsAndOffsets()
/aosp_15_r20/external/pytorch/aten/src/ATen/
H A DFunctionalInverses.cpp254 …Mode inverse_return_mode, int64_t mutated_view_idx, c10::SymIntArrayRef split_sizes, int64_t dim) { in split_with_sizes_inverse() argument
259 start += split_sizes[i]; in split_with_sizes_inverse()
261 auto end = start + split_sizes[mutated_view_idx]; in split_with_sizes_inverse()
450 std::vector<c10::SymInt> split_sizes(chunks, split_size); in chunk_inverse() local
451 split_sizes[chunks - 1] = split_size - (split_size * chunks - dim_size); in chunk_inverse()
452 …it_with_sizes_inverse(base, mutated_view, inverse_return_mode, mutated_view_idx, split_sizes, dim); in chunk_inverse()
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/
H A DLegacyBatchingRegistrations.cpp249 std::vector<Tensor> split_with_sizes_batching_rule(const Tensor& self, SymIntArrayRef split_sizes, … in split_with_sizes_batching_rule() argument
252 return split_with_sizes_symint(self, split_sizes, dim); in split_with_sizes_batching_rule()
256 auto result = split_with_sizes_symint(self_physical.tensor(), split_sizes, dim_physical); in split_with_sizes_batching_rule()
261 … split_with_sizes_copy_batching_rule(const Tensor& self, SymIntArrayRef split_sizes, int64_t dim) { in split_with_sizes_copy_batching_rule() argument
264 return split_with_sizes_copy_symint(self, split_sizes, dim); in split_with_sizes_copy_batching_rule()
268 auto result = split_with_sizes_copy_symint(self_physical.tensor(), split_sizes, dim_physical); in split_with_sizes_copy_batching_rule()
/aosp_15_r20/external/executorch/backends/arm/test/ops/
H A Dtest_split.py37 def forward(self, x: torch.Tensor, split_sizes: list[int], dim: int):
38 return x.split_with_sizes(split_sizes=split_sizes, dim=dim)
/aosp_15_r20/external/pytorch/test/distributed/
H A Dtest_functional_api.py523 split_sizes = [(i + 1) * (rank + 1) for i in range(self.world_size)]
525 x, output_split_sizes=split_sizes, input_split_sizes=split_sizes, group=mesh
528 for idx, tensor in enumerate(torch.split(x, split_sizes)):
541 split_sizes = [(i + 1) * (rank + 1) for i in range(self.world_size)]
543 x, output_split_sizes=split_sizes, input_split_sizes=split_sizes, group=mesh
546 for idx, tensor in enumerate(torch.split(x, split_sizes)):
H A Dtest_c10d_spawn.py240 split_sizes = [(i + 1) * (self.rank + 1) for i in range(self.world_size)]
242 y, x, output_split_sizes=split_sizes, input_split_sizes=split_sizes
245 for idx, tensor in enumerate(torch.split(x, split_sizes)):
/aosp_15_r20/external/pytorch/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/
H A Dembedding_bag.py422 split_sizes = torch.sum(
426 split_sizes = torch.cat(
433 return torch.div(result, split_sizes.unsqueeze(1))
/aosp_15_r20/external/executorch/backends/apple/mps/operators/
H A Dshape_ops.py243 split_sizes = eval_shape(cast(torch.SymInt, node.args[1]))
256 split_sizes=split_sizes,
/aosp_15_r20/external/executorch/kernels/portable/cpu/util/
H A Dcopy_ops_util.cpp418 exec_aten::ArrayRef<int64_t> split_sizes, in check_split_with_sizes_copy_args() argument
425 split_sizes.size() == out.size(), in check_split_with_sizes_copy_args()
429 for (int i = 0; i < split_sizes.size(); i++) { in check_split_with_sizes_copy_args()
431 split_sizes[i] >= 0, "All split sizes must be non negative."); in check_split_with_sizes_copy_args()
432 sum += split_sizes[i]; in check_split_with_sizes_copy_args()
H A Dcopy_ops_util.h141 exec_aten::ArrayRef<int64_t> split_sizes,
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DTensorShape.cpp951 std::vector<c10::SymInt> split_sizes(chunks, split_size); in chunk() local
952 split_sizes[chunks - 1] = split_size - (split_size * chunks - dim_size); in chunk()
953 return self.split_with_sizes_symint(split_sizes, dim); in chunk()
1042 std::vector<int64_t> split_sizes(chunks, split_size); in unsafe_chunk() local
1043 split_sizes[chunks - 1] = split_size - (split_size * chunks - dim_size); in unsafe_chunk()
1044 return self.unsafe_split_with_sizes(split_sizes, dim); in unsafe_chunk()
2621 std::vector<Tensor> split_with_sizes(const Tensor& self, IntArrayRef split_sizes, int64_t dim) { in split_with_sizes() argument
2624 const int64_t num_splits = split_sizes.size(); in split_with_sizes()
2630 auto length = split_sizes[i]; in split_with_sizes()
2633 "entries, but got split_sizes=", split_sizes); in split_with_sizes()
[all …]
/aosp_15_r20/external/pytorch/torch/_inductor/fx_passes/
H A Dpost_grad.py620 split_sizes = get_arg_value(split_node, 1, "split_sizes")
622 if get_item_args != set(range(len(split_sizes))):
630 if cat_items_args_order != list(range(len(split_sizes))):
937 split_sizes = get_arg_value(split_node, 1, "split_sizes")
940 if len(cat_inputs) != len(split_sizes):
943 for cat_input, split_size in zip(cat_inputs, split_sizes):
/aosp_15_r20/external/pytorch/torch/onnx/
H A Dsymbolic_opset13.py73 split_sizes = [
83 "Add", start, split_sizes[i]
119 def split_with_sizes(g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None): argument
120 return split(g, self, split_sizes, dim, _outputs)
132 g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None argument
134 return split_with_sizes(g, self, split_sizes, dim, _outputs)
H A Dsymbolic_opset11.py610 split_sizes = [
619 "Add", start, split_sizes[i]
638 def split_with_sizes(g: jit_utils.GraphContext, self, split_sizes, dim, _outputs=None): argument
639 return split(g, self, split_sizes, dim, _outputs)
/aosp_15_r20/external/pytorch/torch/_decomp/
H A Ddecompositions.py1398 self: Tensor, split_sizes: List[int], dim: int = 0
1402 for i in range(len(split_sizes)):
1404 split_sizes[i],
1409 sum(split_sizes) == self.shape[dim],
1412 num_splits = len(split_sizes)
1417 length = split_sizes[i]
1429 split_sizes: List[int],
1433 splits = split_with_sizes(self, split_sizes, dim=dim)
1450 input: Tensor, split_sizes: List[int], dim: int = 0
1452 return aten.split_with_sizes.default(input, split_sizes, dim)
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/
H A Dsplit_v_op.cc199 absl::Span<const Tlen> split_sizes) { in SplitHasAlignedOutputsInFirstDimension() argument
204 for (const Tlen split_size : split_sizes) { in SplitHasAlignedOutputsInFirstDimension()
/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/static/
H A Dnative_ops.cpp733 const auto& split_sizes = p_node->Input(1).toIntList(); in __anon75e5f0514602() local
736 at::native::split_with_sizes(self, split_sizes.vec(), dim); in __anon75e5f0514602()
757 const auto& split_sizes = p_node->Input(1).toIntList(); in __anon75e5f0514902() local
760 at::native::split_with_sizes(self, split_sizes.vec(), dim); in __anon75e5f0514902()
/aosp_15_r20/external/tensorflow/tensorflow/python/distribute/
H A Dcross_device_ops.py765 split_sizes = [split_size] * (num_splits - 1) + [split_size_last]
766 grad_packs = array_ops.split(concat_grads, split_sizes)

12