/aosp_15_r20/external/pytorch/aten/src/ATen/native/mps/operations/ |
H A D | Copy.mm | 103 src = src_.expand_as(dst).contiguous(); 197 Tensor src = (src_.dtype() != dst_.dtype() ? src_.to(dst_.dtype()) : src_).expand_as(dst_); 254 src = src_.expand_as(dst_).contiguous(); 340 return copy_from_mps_(dst, needs_broadcasting ? src.expand_as(dst) : src, non_blocking); 343 return copy_to_mps_(dst, needs_broadcasting ? src.expand_as(dst) : src, non_blocking); 347 return copy_kernel_mps(dst, needs_broadcasting ? src.expand_as(dst) : src, non_blocking);
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/ops/ |
H A D | Expand.cpp | 70 Tensor expand_as(const at::Tensor& self, const at::Tensor& other) { in expand_as() function 78 m.impl(TORCH_SELECTIVE_NAME("aten::expand_as"), TORCH_FN(expand_as)); in TORCH_LIBRARY_IMPL()
|
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/functional/ |
H A D | normalization.h | 21 auto denom = input.norm(p, dim, true).clamp_min(eps).expand_as(input); in normalize() 24 auto denom = input.norm(p, dim, true).clamp_min(eps).expand_as(input); in normalize()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/ |
H A D | ValidateCompressedIndicesCommon.h | 269 .add_owned_output(dummy.expand_as(idx)) in _validate_compressed_sparse_indices_kernel() 303 .add_owned_output(dummy.expand_as(cidx_curr)) in _validate_compressed_sparse_indices_kernel()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | Copy.cu | 311 src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous(); in copy_kernel_cuda() 315 src_contig = iter.tensor(1).expand_as(dst).contiguous(); in copy_kernel_cuda()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/ |
H A D | BatchRulesReduceOps.cpp | 255 a_has_bdim ? a : a.expand_as(flagpole), in expand_bdims() 256 b_has_bdim ? b : b.expand_as(flagpole)); in expand_bdims()
|
H A D | BatchRulesLoss.cpp | 137 reduction == Reduction::None ? grad : grad.expand_as(input), cur_level); in binary_cross_entropy_backward_plumbing()
|
/aosp_15_r20/external/pytorch/torch/onnx/ |
H A D | symbolic_opset7.py | 26 "expand_as",
|
H A D | symbolic_opset11.py | 408 "ScatterElements", self, index, opset9.expand_as(g, src, index), axis_i=dim 429 index = opset9.nonzero(g, opset9.expand_as(g, mask, self)) 435 index = opset9.nonzero(g, opset9.expand_as(g, mask, self))
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | SobolEngineOpsUtils.h | 39 inter = at::pow(2, inter).expand_as(bmat); in cdot_pow2()
|
H A D | MathBitFallThroughLists.h | 11 m.impl("expand_as", torch::CppFunction::makeFallthrough()); \
|
H A D | SparseTensorUtils.cpp | 123 at::zeros({1}, t._values().options()).expand_as(t._values()), in zeros_like_with_indices()
|
/aosp_15_r20/external/pytorch/torch/distributions/ |
H A D | dirichlet.py | 15 total = concentration.sum(-1, True).expand_as(concentration)
|
H A D | kl.py | 249 t[(q.probs == 0).expand_as(t)] = inf 250 t[(p.probs == 0).expand_as(t)] = 0
|
/aosp_15_r20/external/pytorch/test/quantization/core/experimental/ |
H A D | quantization_util.py | 63 correct = pred.eq(target.view(1, -1).expand_as(pred))
|
/aosp_15_r20/external/pytorch/torch/_inductor/fx_passes/ |
H A D | fuse_attention.py | 376 attn_mask = (attn_mask == 0).view((bs, 1, 1, k_len)).expand_as(scores) 441 attn_mask = (attn_mask == 0).view((bs, 1, 1, k_len)).expand_as(scores)
|
/aosp_15_r20/external/pytorch/torch/masked/maskedtensor/ |
H A D | binary.py | 156 result_mask = result_mask.expand_as(result_data)
|
/aosp_15_r20/external/pytorch/functorch/op_analysis/ |
H A D | public_api | 178 expand_as
|
/aosp_15_r20/external/pytorch/docs/source/ |
H A D | tensor_view.rst | 56 - :meth:`~torch.Tensor.expand_as`
|
H A D | masked.rst | 296 Tensor.expand_as
|
/aosp_15_r20/external/pytorch/test/mobile/model_test/ |
H A D | coverage.yaml | 219 - aten::expand_as 810 aten::expand_as: 3
|
/aosp_15_r20/external/pytorch/test/ |
H A D | test_matmul_cuda.py | 494 self.assertEqual(difference, torch.tensor(4.0, device=device).expand_as(out_fp32)) 518 self.assertEqual(outb_fp32, torch.tensor(-3.0, device=device).expand_as(outb_fp32))
|
/aosp_15_r20/external/pytorch/tools/autograd/ |
H A D | derivatives.yaml | 838 …source: "maybe_multiply(source.dim() > 0 ? grad.index_select(dim, index).expand_as(source) : grad.… 851 …source: "source.dim() > 0 ? grad.index_select(dim, index).expand_as(source) : grad.index_select(di… 1359 …result: (prod_backward(at::ones({}, result.options()).expand_as(result), self_p.to(result.scalar_t… 1363 …result: (prod_backward(at::ones({}, result.options()).expand_as(result), self_p.to(result.scalar_t… 2093 … lambd).logical_or(self_p < -lambd), grad_out_t, at::zeros({}, result.options()).expand_as(result)) 2434 …l).logical_and(self_p < max_val), grad_output_t, at::zeros({}, result.options()).expand_as(result)) 2583 …mbd).logical_or(self_p < -lambd), grad_output_t, at::zeros({}, result.options()).expand_as(result))
|
/aosp_15_r20/external/pytorch/test/inductor/ |
H A D | test_fused_attention.py | 741 attn_mask = (attn_mask == 0).view((bs, 1, 1, k_len)).expand_as(scores) 834 attn_mask = (attn_mask == 0).view((bs, 1, 1, k_len)).expand_as(scores)
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/ |
H A D | device_type_analysis.cpp | 234 {"aten::expand_as(Tensor self, Tensor other) -> Tensor", in buildRuleRegistry()
|