/aosp_15_r20/external/pytorch/test/inductor/ |
H A D | test_auto_functionalize.py | 46 out.copy_(x) 87 o.copy_(all_gather_output) 407 … copy_: "f32[s0][1]cpu" = torch.ops.aten.copy_.default(arg2_1, arg2_1); arg2_1 = copy_ = None 408 … copy__1: "f32[s0][1]cpu" = torch.ops.aten.copy_.default(arg5_1, arg5_1); arg5_1 = copy__1 = None 419 copy_: "f32[3][1]cpu" = torch.ops.aten.copy_.default(arg1_1, arg1_1); arg1_1 = copy_ = None 420 … copy__1: "f32[3][1]cpu" = torch.ops.aten.copy_.default(arg4_1, arg4_1); arg4_1 = copy__1 = None 511 copy_: "f32[3][1]cpu" = torch.ops.aten.copy_.default(arg1_1, arg1_1); arg1_1 = copy_ = None 512 … copy__1: "f32[3][1]cpu" = torch.ops.aten.copy_.default(arg4_1, arg4_1); arg4_1 = copy__1 = None 571 …copy_: "f32[s0][1]cpu" = torch.ops.aten.copy_.default(arg1_1, getitem_2); arg1_1 = getitem_2 = co… 572 …copy__1: "f32[s0][1]cpu" = torch.ops.aten.copy_.default(arg2_1, getitem_1); arg2_1 = getitem_1 = … [all …]
|
H A D | s429861_repro.py | 4442 copy_: "f32[50][1]cuda:0" = torch.ops.aten.copy_.default(arg1_1, getitem_7031) 4444 copy__1: "f32[23][1]cuda:0" = torch.ops.aten.copy_.default(arg2_1, getitem_7032) 4446 copy__2: "f32[38][1]cuda:0" = torch.ops.aten.copy_.default(arg3_1, getitem_7033) 4448 copy__3: "f32[5][1]cuda:0" = torch.ops.aten.copy_.default(arg4_1, getitem_7034) 4450 copy__4: "f32[100][1]cuda:0" = torch.ops.aten.copy_.default(arg5_1, getitem_7035) 4452 copy__5: "f32[50][1]cuda:0" = torch.ops.aten.copy_.default(arg6_1, getitem_7036) 4454 copy__6: "f32[77][1]cuda:0" = torch.ops.aten.copy_.default(arg7_1, getitem_7037) 4456 copy__7: "f32[100][1]cuda:0" = torch.ops.aten.copy_.default(arg8_1, getitem_7038) 4458 copy__8: "f32[100][1]cuda:0" = torch.ops.aten.copy_.default(arg9_1, getitem_7039) 4460 copy__9: "f32[96][1]cuda:0" = torch.ops.aten.copy_.default(arg10_1, getitem_7040) [all …]
|
H A D | test_inplacing_pass.py | 39 result.copy_(x.sin()) 44 out_sin.copy_(x.sin()) 45 out_cos.copy_(x.cos()) 206 copy_ = torch.ops.aten.copy_.default(arg0_1, getitem_1) 229 copy_ = torch.ops.aten.copy_.default(arg0_1, getitem_1) 253 copy_ = torch.ops.aten.copy_.default(arg0_1, getitem_1)
|
H A D | test_distributed_patterns.py | 30 … # torch.ops.fsdp.set_ doesn't work well in eager mode, so use the slow copy_ path instead. 37 mod.unsharded_weight.copy_(all_gather(mod.sharded_weight)) 62 … # torch.ops.fsdp.set_ doesn't work well in eager mode, so use the slow copy_ path instead. 69 mod.unsharded_weight.copy_(all_gather(mod.sharded_weight)) 199 out.copy_(y.cos()) 225 w.copy_(x + 1) 249 w.copy_(x) 269 w.copy_(x + 1) 287 w.copy_(x)
|
/aosp_15_r20/external/pytorch/test/ |
H A D | test_functionalization.py | 70 inpt.copy_(inpt_new) 187 y.copy_(x) 298 copy_ = torch.ops.aten.copy_.default(arg0_1, view_copy_1); arg0_1 = view_copy_1 = copy_ = None 319 copy_ = torch.ops.aten.copy_.default(arg0_1, view_1); arg0_1 = view_1 = copy_ = None 513 copy_ = torch.ops.aten.copy_.default(arg0_1, add); arg0_1 = copy_ = None 532 copy_ = torch.ops.aten.copy_.default(arg0_1, add); arg0_1 = copy_ = None 561 copy_ = torch.ops.aten.copy_.default(arg0_1, getitem_5); arg0_1 = getitem_5 = copy_ = None 585 copy_ = torch.ops.aten.copy_.default(arg0_1, as_strided_scatter); arg0_1 = copy_ = None 605 copy_ = torch.ops.aten.copy_.default(arg0_1, as_strided_scatter); arg0_1 = copy_ = None 739 copy_ = torch.ops.aten.copy_.default(arg0_1, diagonal_scatter); arg0_1 = copy_ = None [all …]
|
/aosp_15_r20/external/google-cloud-java/java-aiplatform/proto-google-cloud-aiplatform-v1beta1/src/main/java/com/google/cloud/aiplatform/v1beta1/ |
H A D | ModelSourceInfo.java | 335 private boolean copy_ = false; field in ModelSourceInfo 351 return copy_; in getCopy() 374 if (copy_ != false) { in writeTo() 375 output.writeBool(2, copy_); in writeTo() 392 if (copy_ != false) { in getSerializedSize() 393 size += com.google.protobuf.CodedOutputStream.computeBoolSize(2, copy_); in getSerializedSize() 568 copy_ = false; in clear() 609 result.copy_ = copy_; in buildPartial0() 699 copy_ = input.readBool(); in mergeFrom() 821 private boolean copy_; field in ModelSourceInfo.Builder [all …]
|
/aosp_15_r20/external/google-cloud-java/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/ |
H A D | ModelSourceInfo.java | 333 private boolean copy_ = false; field in ModelSourceInfo 349 return copy_; in getCopy() 372 if (copy_ != false) { in writeTo() 373 output.writeBool(2, copy_); in writeTo() 390 if (copy_ != false) { in getSerializedSize() 391 size += com.google.protobuf.CodedOutputStream.computeBoolSize(2, copy_); in getSerializedSize() 566 copy_ = false; in clear() 607 result.copy_ = copy_; in buildPartial0() 696 copy_ = input.readBool(); in mergeFrom() 812 private boolean copy_; field in ModelSourceInfo.Builder [all …]
|
/aosp_15_r20/external/pytorch/torch/_inductor/fx_passes/ |
H A D | reinplace.py | 76 tmp.copy_(src) 145 slice2.copy_(src) 159 graph_call_function(graph, aten.copy_.default, tmp, src) 196 user.target is aten.copy_.default and user.args[0] is inp for user in node.users 232 tmp.copy_(src) 382 be inplaced if the above condition is true and there's a copy_ in the 391 # maps argument to the first copy_ node that mutates it. 399 if node.target == aten.copy_.default and node.args[0].op in ( 437 # Ignore uses after the copy_ epilogue node, where the input 446 # mutated_arg.copy_(other) [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | Copy.cpp | 88 _AT_DISPATCH_CP_TYPES(self.scalar_type(), "copy_", [&] { in copy_same_type_transpose_() 130 // (e.g. XLA) may be supported by overriding copy_ and _copy_from. 221 // cpu_tensor.copy_(xla_tensor) => xla_tensor._copy_from(cpu_tensor) in copy_impl() 222 // xla_tensor.copy_(cpu_tensor) => cpu_tensor._copy_from(xla_tensor) in copy_impl() 247 return vulkan::ops::copy_(self, src); in copy_impl() 317 r.copy_(src, non_blocking); in copy_meta() 323 …// copy() is the "functional" form of copy_(). It exists so we can properly functionalize copy_(),… in copy() 329 // that copy_() will fully overwrite all data with that of src in copy() 335 r.copy_(src, non_blocking); in copy() 353 Tensor& copy_(Tensor& self, const Tensor& src, bool non_blocking) { in copy_() function
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | ScanKernels.cpp | 38 values.copy_(*values_); in cummax_helper_cuda() 41 indices.copy_(*indices_); in cummax_helper_cuda() 55 values.copy_(*values_); in cummin_helper_cuda() 58 indices.copy_(*indices_); in cummin_helper_cuda() 81 result.copy_(*result_); in _logcumsumexp_out_cuda() 100 result.copy_(*result_); in cumsum_cuda_kernel() 108 result.copy_(*result_); in cumprod_cuda_kernel()
|
H A D | Copy.cu | 145 AT_DISPATCH_QINT_TYPES(dtype, "copy_", [&] { in direct_copy_kernel_cuda() 151 TORCH_CHECK(dtype == iter.dtype(1), "copy_() does not support casting " in direct_copy_kernel_cuda() 153 AT_DISPATCH_BIT_TYPES(dtype, "copy_", [&] { in direct_copy_kernel_cuda() 158 dtype, "copy_", AT_WRAP([&] { in direct_copy_kernel_cuda() 328 dst_contig.copy_(src_contig, non_blocking); in copy_kernel_cuda() 333 dst.copy_(dst_contig, non_blocking); in copy_kernel_cuda() 354 TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()"); in copy_kernel_cuda()
|
H A D | Sort.cpp | 34 t.copy_(rangeview); in fillSliceWithIndex() 78 values.copy_(self); in sort_cuda_kernel() 93 self_.copy_(self); in sort_cuda_kernel() 115 values.copy_(*values_tmp); in sort_cuda_kernel() 118 indices.copy_(*indices_tmp); in sort_cuda_kernel()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/ |
H A D | AdaptiveMaxPoolKernel.cpp | 77 output_.copy_(output); in cpu_adaptive_max_pool2d() 80 indices_.copy_(indices); in cpu_adaptive_max_pool2d() 194 output_.copy_(output); in cpu_adaptive_max_pool2d_channels_last() 197 indices_.copy_(indices); in cpu_adaptive_max_pool2d_channels_last() 334 output_.copy_(output); in cpu_adaptive_max_pool2d_channels_last() 337 indices_.copy_(indices); in cpu_adaptive_max_pool2d_channels_last() 383 grad_input_.copy_(grad_input); in cpu_adaptive_max_pool2d_backward() 432 grad_input_.copy_(grad_input); in cpu_adaptive_max_pool2d_backward_channels_last() 551 output_.copy_(output); in cpu_adaptive_max_pool3d() 554 indices_.copy_(indices); in cpu_adaptive_max_pool3d() [all …]
|
/aosp_15_r20/external/pytorch/test/profiler/ |
H A D | test_profiler_tree.py | 292 aten::copy_ 298 aten::copy_ 309 aten::copy_ 351 aten::copy_ 363 aten::copy_""", 403 aten::copy_ 411 aten::copy_ 432 aten::copy_ 512 aten::copy_ 520 aten::copy_ [all …]
|
/aosp_15_r20/external/pytorch/torch/optim/ |
H A D | rprop.py | 266 sign.copy_(torch.where(sign.gt(0), etaplus, sign)) 267 sign.copy_(torch.where(sign.lt(0), etaminus, sign)) 268 sign.copy_(torch.where(sign.eq(0), 1, sign)) 281 grad.copy_(torch.where(sign.eq(etaminus), 0, grad)) 287 prev.copy_(grad) 368 sign.copy_(torch.where(sign.gt(0), etaplus, sign)) 369 sign.copy_(torch.where(sign.lt(0), etaminus, sign)) 370 sign.copy_(torch.where(sign.eq(0), 1, sign)) 386 grouped_grads[i].copy_(
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/ |
H A D | observer.py | 539 self.min_val.copy_(min_val) 540 self.max_val.copy_(max_val) 555 self.min_val.copy_(torch.tensor(float("inf"))) 556 self.max_val.copy_(torch.tensor(float("-inf"))) 653 self.min_val.copy_(min_val) 654 self.max_val.copy_(max_val) 761 self.min_val.copy_(min_val) 762 self.max_val.copy_(max_val) 811 self.min_val.copy_(val) 813 self.max_val.copy_(val) [all …]
|
/aosp_15_r20/external/pytorch/torch/csrc/distributed/c10d/ |
H A D | reducer.cpp | 394 "torch::distributed::reducer::copy_", in mark_variable_ready_dense() 396 bucket_view.copy_(div_result); in mark_variable_ready_dense() 400 "torch::distributed::reducer::copy_", in mark_variable_ready_dense() 402 bucket_view.copy_(grad); in mark_variable_ready_dense() 741 // essential. The H2D copy_ is stream ordered, while the host's in all_reduce_local_used_map() 743 // cuda/privateuseone-stream work pushes the copy_ far into the future, and in all_reduce_local_used_map() 746 // before the stream executes the copy_, copy_ will read those zeros in all_reduce_local_used_map() 770 local_used_map_tmp.copy_(local_used_map_); in all_reduce_local_used_map() 771 local_used_map_dev_.copy_(local_used_map_tmp, true); in all_reduce_local_used_map() 782 local_used_map_tmp.copy_(local_used_map_); in all_reduce_local_used_map() [all …]
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/fx/_model_report/ |
H A D | model_report_observer.py | 101 self.epoch_activation_min.copy_(epoch_min_val) 102 self.epoch_activation_max.copy_(epoch_max_val) 147 self.min_val.copy_(min_val) 148 self.max_val.copy_(max_val) 247 self.percentile_batches_tracked.copy_(new_number_of_batches) 248 self.average_percentile_ratio.copy_(new_ratios) 249 self.constant_channels.copy_(new_constant_count)
|
/aosp_15_r20/external/pytorch/torch/csrc/autograd/ |
H A D | VariableTypeManual.cpp | 190 Tensor& copy_( in copy_() function 210 at::redispatch::copy_( in copy_() 222 new_fw_grad = self_fw_grad.copy_(src_fw_grad); in copy_() 362 "copy_", in TORCH_LIBRARY_IMPL() 363 torch::dispatch(DispatchKey::Autograd, TORCH_FN(VariableType::copy_))); in TORCH_LIBRARY_IMPL() 384 static Tensor& copy_( in copy_() function 391 at::redispatch::copy_( in copy_() 536 "copy_", in TORCH_LIBRARY_IMPL() 538 DispatchKey::ADInplaceOrView, TORCH_FN(ADInplaceOrView::copy_))); in TORCH_LIBRARY_IMPL()
|
H A D | autograd_meta.cpp | 29 // foo.copy_(bar) 33 // view.copy_(bar) 37 // foo.copy_(bar) 67 // view.copy_(bar) 73 // base.copy_(bar) 242 new_fw_grad_value.copy_(new_grad); in set_fw_grad() 262 res.copy_(new_grad); in set_fw_grad()
|
/aosp_15_r20/external/executorch/examples/models/llama/source_transformation/ |
H A D | quantized_kv_cache.py | 125 narrowed_k.copy_(quantized_k_val) 126 narrowed_k_scales.copy_(k_scales) 127 narrowed_k_zp.copy_(k_zero_points) 135 narrowed_v.copy_(quantized_v_val) 136 narrowed_v_scales.copy_(v_scales) 137 narrowed_v_zp.copy_(v_zero_points)
|
/aosp_15_r20/external/pytorch/torch/_functorch/_aot_autograd/ |
H A D | traced_function_transforms.py | 383 # and performing copy_() calls at the end of the function if `keep_input_mutations` is set. 466 … # Add node meta for copy_ for partitioner that this node should be in backward graph. 468 before.copy_(after) 489 … # (2) For keep_input_mutations, we support tracing a call to copy_() directly on mutated inputs. 504 …# It would be much better to add copy_() calls into the graph for the two tiny slices, instead of … 571 … # Even if we marked the input as having a data mutation (thus needing a copy_()), 576 … # Optimization: if the copy_() is a no-op then don't include it in the graph. 578 # param.copy_(param), where param is a zero-storage-size tensor, 586 # Since keep_input_mutations is set, we need to faithfully apply a copy_() 601 inpt_old.copy_(inpt_new) [all …]
|
/aosp_15_r20/external/pytorch/torch/testing/_internal/distributed/ |
H A D | multi_threaded_pg.py | 77 output_tensor_list[src_rank].copy_(input_tensor_list[dest_rank]) 92 output_buffer[output_indexes[src_rank]:output_indexes[src_rank + 1]].copy_( 139 data[src_rank][i].copy_(res.to(data[src_rank][i].device)) 153 dest_tensor.copy_(src_tensor) 172 dest_tensor.copy_(src_in_tensors[rank]) 189 dest_tensor.copy_(src_in_tensor_list[0]) 210 dest_tensor_on_rank_i[0].copy_(to_scatter[i].to(dst_tensor_device)) 230 out_tensor_list[j].copy_(in_tensor_list[j])
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/ |
H A D | concat_opt.cpp | 262 // %22 = aten::copy_(%21, %2) // copy %2 264 // %24 = aten::copy_(%23, %3) // copy %3 351 auto copy = graph_->create(aten::copy_, {slice->output(), cat_inp}); in expandCat() 434 // %22 = aten::copy_(%21, %2) 436 // %24 = aten::copy_(%23, %3) 440 // %32 = aten::copy_(%31, %20) // src of copy is aten::empty 443 // %34 = aten::copy_(%33, %4) 449 // %22 = aten::copy_(%21, %2) 451 // %24 = aten::copy_(%23, %3) 455 // %34 = aten::copy_(%33, %4)
|
/aosp_15_r20/external/pytorch/torch/fx/passes/ |
H A D | reinplace.py | 65 # copy_() doesn't read from its first argument; it writes to it, overwriting previous data. 68 if node.target is torch.ops.aten.copy_.default: 308 there is a node that looks like "a.copy_(...)", 310 which will later be overwritten by the copy_() call. 412 a_slice.copy_(b) 430 slice.copy_(mutated_slice) 538 # TODO: later, add the optimization for handling `copy_()` calls in the graph. 567 # and instead copy_() the slice directly into the larger tensor. 575 # slice.copy_(mutated_slice) 582 … 'call_function', torch.ops.aten.copy_.default, (slice_node, mutated_slice_node,), {})
|