/aosp_15_r20/external/pytorch/torch/distributed/nn/ |
H A D | functional.py | 158 def all_to_all(output_tensor_list, input_tensor_list, group=group.WORLD): argument 171 return _AlltoAll.apply(group, output_tensor_list, *input_tensor_list) 305 def forward(ctx, op, group, tensor, *input_tensor_list): argument 309 input_tensor_list = tuple(t.contiguous() for t in input_tensor_list) 310 dist.reduce_scatter(tensor, list(input_tensor_list), op=op, group=group)
|
/aosp_15_r20/external/pytorch/torch/testing/_internal/distributed/ |
H A D | multi_threaded_pg.py | 76 _, input_tensor_list = data[src_rank] 77 output_tensor_list[src_rank].copy_(input_tensor_list[dest_rank]) 330 def alltoall(self, output_tensor_list, input_tensor_list, opts=AllToAllOptions()): argument 332 res = coll.join(self._rank, (output_tensor_list, input_tensor_list)) 399 …def allgather_into_tensor_coalesced(self, output_tensor_list, input_tensor_list, opts=AllgatherOpt… argument 401 for o_t, i_t in zip(output_tensor_list, input_tensor_list):
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xrt/kernels/ |
H A D | xrt_state_ops.h | 74 const xrt::XLATupleNode& tuple_node, const OpInputList& input_tensor_list, in ParseTupleNode() argument 85 tuple_node.tuples(i), input_tensor_list, input_vector, in ParseTupleNode() 108 TensorShapeUtils::IsScalar(input_tensor_list[input_index].shape())); in ParseTupleNode() 109 int64_t key = input_tensor_list[input_index].scalar<int64_t>()(); in ParseTupleNode() 126 const OpInputList& input_tensor_list, in ParseTupleTree() argument 134 TF_RETURN_IF_ERROR(ParseTupleNode(tuple_tree_root, input_tensor_list, in ParseTupleTree()
|
/aosp_15_r20/external/pytorch/torch/distributed/ |
H A D | distributed_c10d.py | 3481 output_tensor_lists, input_tensor_list, group=None, async_op=False argument 3531 _check_tensor_list(input_tensor_list, "input_tensor_list") 3532 _ensure_all_tensors_same_dtype(input_tensor_list) 3545 input_tensor_list = [ 3546 t if not t.is_complex() else torch.view_as_real(t) for t in input_tensor_list 3550 work = group.allgather_coalesced(output_tensor_lists, input_tensor_list) 4007 def all_to_all(output_tensor_list, input_tensor_list, group=None, async_op=False): argument 4103 _check_tensor_list(input_tensor_list, "input_tensor_list") 4104 _ensure_all_tensors_same_dtype(output_tensor_list, input_tensor_list) 4106 input_tensor_list = [ [all …]
|
/aosp_15_r20/external/pytorch/test/distributed/ |
H A D | test_multi_threaded_pg.py | 206 input_tensor_list = [ 210 output_tensor_list = [torch.empty_like(tensor) for tensor in input_tensor_list] 211 dist.all_to_all(output_tensor_list, input_tensor_list)
|
H A D | test_c10d_common.py | 1561 def allgather(self, output_tensor_lists, input_tensor_list, opts=None): argument 1563 output_tensor_lists, input_tensor_list 1602 for output_tensor, input_tensor_list in zip( 1605 output_tensor.copy_(input_tensor_list[self.rank()]) 1751 input_tensor_list = [torch.ones(2, 2) for _ in range(self.world_size)] 1752 dist.reduce_scatter(output_tensor, input_tensor_list)
|
/aosp_15_r20/external/pytorch/torch/distributed/_shard/sharded_tensor/ |
H A D | reshard.py | 234 input_tensor_list = [tensor.contiguous() for tensor in input_tensor_tuple] 237 input_tensor_list,
|
/aosp_15_r20/external/federated-compute/fcp/aggregation/core/ |
H A D | BUILD | 30 "input_tensor_list.cc", 42 "input_tensor_list.h",
|