/aosp_15_r20/external/executorch/kernels/test/ |
H A D | op_split_copy_test.cpp | 31 int64_t split_size, in op_split_copy_tensor_out() 290 for (int64_t split_size = 3; split_size < 6; ++split_size) { in TEST_F() local 319 for (int64_t split_size = 0; split_size < 3; ++split_size) { in TEST_F() local 388 constexpr int64_t split_size = 2; in TEST_F() local 413 constexpr int64_t split_size = 2; in TEST_F() local 438 constexpr int64_t split_size = 2; in TEST_F() local 486 constexpr int64_t split_size = 2; in TEST_F() local
|
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/ |
H A D | split_v_op.cc | 142 const Tlen& split_size = (*split_sizes_vec)[i]; in ComputeEasyCases() local 204 for (const Tlen split_size : split_sizes) { in SplitHasAlignedOutputsInFirstDimension() local 337 auto make_sizes = [&](Eigen::DenseIndex split_size) { in Compute() 340 auto reshape_result = [&](Tensor* result, Tlen split_size) { in Compute() 350 auto make_sizes = [&](Eigen::DenseIndex split_size) { in Compute() 354 auto reshape_result = [&](Tensor* result, Tlen split_size) { in Compute()
|
H A D | split_op.cc | 237 auto make_sizes = [&](Eigen::DenseIndex split_size) { in Compute() 240 auto reshape_result = [&](Tensor* result, Eigen::DenseIndex split_size) { in Compute() 250 auto make_sizes = [&](Eigen::DenseIndex split_size) { in Compute() 254 auto reshape_result = [&](Tensor* result, Eigen::DenseIndex split_size) { in Compute()
|
H A D | sparse_split_op_gpu.cu.cc | 49 inline __device__ Index GetSliceIndex(const Index index, const Index split_size, in GetSliceIndex() 62 const Index split_size, in GetDimensionInSlice() 74 inline Index GetSliceShape(const Index slice_index, const Index split_size, in GetSliceShape()
|
H A D | concat_lib_gpu_impl.cu.cc | 38 GpuDeviceArrayStruct<const T*> input_ptr_data, int split_size, in concat_fixed_kernel() 141 bool fixed_size, int split_size, in ConcatGPUImpl()
|
H A D | ragged_tensor_from_variant_op.cc | 174 int split_size = 1; in NestedStackRaggedTensors() local
|
H A D | ragged_tensor_to_variant_op.cc | 125 SPLIT_TYPE split_size = -1; in UnbatchRaggedZerothDim() local
|
/aosp_15_r20/external/ComputeLibrary/src/graph/nodes/ |
H A D | SplitLayerNode.cpp | 60 int split_size = input_descriptor.shape[tmp_axis] / num_splits; in compute_output_descriptor() local 68 int split_size = _size_splits[idx]; in compute_output_descriptor() local 119 …int split_size = (_size_splits.empty()) ? (input_descriptor.shape[tmp_axis] / _num_splits) : _size… in configure_output() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/nested/ |
H A D | NestedTensorUtils.cpp | 85 int64_t split_size = last_dim_size / chunks; in chunk_nested_tensor() local 131 for (const auto split_size : split_sizes) { in split_with_sizes_nested() local 151 auto split_size = split_sizes[split_idx]; in split_with_sizes_nested() local
|
H A D | NestedTensorMath.cpp | 284 for (const auto split_size : split_sizes) { in NestedTensor_to_padded_tensor_generic() local 299 for (const auto split_size : split_sizes) { in NestedTensor_to_padded_tensor_generic() local
|
/aosp_15_r20/external/tensorflow/tensorflow/core/util/sparse/ |
H A D | sparse_tensor.h | 222 static inline int GetSliceIndex(const int dim, const int split_size, in GetSliceIndex() 236 static inline int GetDimensionInSlice(const int dim, const int split_size, in GetDimensionInSlice() 250 static inline int GetSliceShape(const int slice_index, const int split_size, in GetSliceShape() 516 const int split_size = split_dim_size / num_split; in Split() local
|
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/impl/ |
H A D | Split.cpp | 37 int64_t split_size = split_sizes[split_idx]; in add_split_with_sizes_default_node() local 121 int64_t split_size = graph.extract_scalar<int64_t>(split_size_ref); in add_split_tensor_node() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | TensorShape.cpp | 944 auto split_size = (dim_size + chunks - 1) / chunks; in chunk() local 972 auto split_size = (num_splits_one_extra > split_idx) ? (min_split_size + 1) : min_split_size; in tensor_split_sections_symint() local 1038 int64_t split_size = (dim_size + chunks - 1) / chunks; in unsafe_chunk() local 2572 std::vector<Tensor> split(const Tensor& self, int64_t split_size, int64_t dim) { in split() 2588 std::vector<Tensor> unsafe_split(const Tensor& self, int64_t split_size, int64_t dim) { in unsafe_split() 2599 std::vector<Tensor> hsplit(const Tensor& self, int64_t split_size) { in hsplit() 2607 std::vector<Tensor> vsplit(const Tensor& self, int64_t split_size) { in vsplit() 2614 std::vector<Tensor> dsplit(const Tensor& self, int64_t split_size) { in dsplit() 4049 void split_copy_Tensor_out(const at::Tensor & self, int64_t split_size, int64_t dim, at::TensorList… in split_copy_Tensor_out()
|
H A D | TensorShape.h | 41 inline int64_t get_num_splits(const Tensor& self, int64_t split_size, int64_t dim) { in get_num_splits()
|
/aosp_15_r20/external/executorch/kernels/portable/cpu/util/ |
H A D | copy_ops_util.cpp | 445 int64_t split_size, in get_split_with_sizes_copy_out_target_size() 612 int64_t split_size, in check_split_copy_args()
|
/aosp_15_r20/external/tflite-support/tensorflow_lite_support/custom_ops/kernel/sentencepiece/ |
H A D | sentencepiece_detokenizer_op.cc | 64 const int split_size = input_splits_flat(i + 1) - input_splits_flat(i); in Compute() local
|
H A D | sentencepiece_detokenizer_tflite.cc | 72 const int split_size = input_splits_data[i + 1] - input_splits_data[i]; in Eval() local
|
/aosp_15_r20/external/pytorch/torch/distributed/_shard/sharding_spec/ |
H A D | _internals.py | 176 def get_chunked_dim_size(dim_size, split_size, idx): argument
|
/aosp_15_r20/external/executorch/kernels/portable/cpu/ |
H A D | op_split_copy.cpp | 35 int64_t split_size, in split_copy_Tensor_out()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/ |
H A D | all_to_all_decomposer.cc | 63 int64_t split_size = in ExpandInstruction() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/ |
H A D | FunctionalInverses.cpp | 234 …InverseReturnMode inverse_return_mode, int64_t mutated_view_idx, c10::SymInt split_size, int64_t d… in split_Tensor_inverse() 449 auto split_size = (dim_size + chunks - 1) / chunks; in chunk_inverse() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | TensorShape.cu | 196 for (const auto& split_size : split_sizes) { in get_split_base_addrs() local 221 for (const auto& split_size : split_sizes) { in get_split_chunk_sizes() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cudnn/ |
H A D | Conv_v7.cpp | 646 int64_t split_size = std::max<int64_t>(max_worksize / max_inner_size, 1L); in split_batch_dim_to_32bit_out() local 1055 int64_t split_size = in raw_cudnn_convolution_backward_weight_out_v7() local
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/ |
H A D | register_prim_ops_fulljit.cpp | 146 int64_t split_size = (regular_shape[dim] + chunks - 1) / chunks; in __anon1bf66a480e02() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/ |
H A D | LegacyBatchingRegistrations.cpp | 237 std::vector<Tensor> split_batching_rule(const Tensor& self, int64_t split_size, int64_t dim) { in split_batching_rule()
|