Home
last modified time | relevance | path

Searched full:cdist (Results 1 – 25 of 46) sorted by relevance

12

/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DDistance.cpp82 …TORCH_CHECK(at::isFloatingType(x1.scalar_type()), "cdist only supports floating-point dtypes, X1 g… in cdist_impl()
84 …TORCH_CHECK(at::isFloatingType(x2.scalar_type()), "cdist only supports floating-point dtypes, X2 g… in cdist_impl()
86 TORCH_CHECK(p >= 0, "cdist only supports non-negative p values"); in cdist_impl()
102 // See Note [cdist relies on cdist_impl redispatching] in cdist_impl()
105 …TORCH_CHECK(device1 == kCPU || device1 == kCUDA || device1 == kXPU, "cdist only supports CPU, XPU … in cdist_impl()
106 …TORCH_CHECK(device2 == kCPU || device2 == kCUDA || device2 == kXPU, "cdist only supports CPU, XPU … in cdist_impl()
138 // See Note [cdist relies on cdist_impl redispatching] in cdist_impl()
150 Tensor cdist(const Tensor& x1, const Tensor& x2, const double p, std::optional<int64_t> compute_mod… in cdist() function
151 TORCH_CHECK(x1.dim() >= 2, "cdist only supports at least 2D tensors, X1 got: ", x1.dim(), "D"); in cdist()
152 TORCH_CHECK(x2.dim() >= 2, "cdist only supports at least 2D tensors, X2 got: ", x2.dim(), "D"); in cdist()
[all …]
/aosp_15_r20/external/executorch/kernels/portable/cpu/
H A Dop_cdist_forward.cpp27 void cdist(const Tensor& x1, const Tensor& x2, Tensor& out, double p) { in cdist() function
102 void cdist(const Tensor& x1, const Tensor& x2, Tensor& out, double p) { in cdist() function
104 cdist<CTYPE, L0<CTYPE>>(x1, x2, out, p); in cdist()
106 cdist<CTYPE, L1<CTYPE>>(x1, x2, out, p); in cdist()
108 cdist<CTYPE, L2<CTYPE>>(x1, x2, out, p); in cdist()
110 cdist<CTYPE, Linf<CTYPE>>(x1, x2, out, p); in cdist()
112 cdist<CTYPE, Lp<CTYPE>>(x1, x2, out, p); in cdist()
166 out_type, ctx, name, CTYPE, [&] { cdist<CTYPE>(x1, x2, out, p); }); in _cdist_forward_out()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mps/operations/
H A DReduceOps.mm295 bool cdist = false,
309 IntArrayRef input_shape = cdist ? input_broadcasted_shape.value() : input_t.sizes();
338 if (cdist) {
351 …string tensor_key = cdist ? getTensorsStringKey({input_tensor, other_tensor}) : getTensorsStringKe…
358 if (cdist) {
362 MPSGraphTensor* inputTensor = cdist
404 if (cdist) {
418 if (cdist) {
1032 impl_func_norm_mps(self, self, opt_p, dim, keepdim, std::nullopt, result, /*cdist=*/false);
1042 impl_func_norm_mps(self, self, opt_p, dim, keepdim, dtype, result, /*cdist=*/false);
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/
H A DBatchRulesBinaryOps.cpp229 const Tensor& cdist, std::optional<int64_t> cdist_bdim) { in cdist_backward_batch_rule() argument
233 // We need to make sure that x1 has batch dim if cdist has one in cdist_backward_batch_rule()
237 auto bs = cdist.size(*cdist_bdim); in cdist_backward_batch_rule()
259 auto out = at::_cdist_backward(grad_, x1_, x2_, p, cdist); in cdist_backward_batch_rule()
390 // but cdist can't work with scalars, at least 2d tensors. in TORCH_LIBRARY_IMPL()
H A DBatchRulesDecompositions.cpp83 OP_DECOMPOSE(cdist); in TORCH_LIBRARY_IMPL()
/aosp_15_r20/external/pytorch/torch/jit/
H A D_builtins.py108 (torch._VF.cdist, "aten::cdist"), # type: ignore[attr-defined]
131 "cdist",
/aosp_15_r20/external/pytorch/torch/
H A Dfunctional.py29 "cdist",
1431 def cdist(x1, x2, p=2.0, compute_mode="use_mm_for_euclid_dist_if_necessary"): function
1452 This function is equivalent to `scipy.spatial.distance.cdist(input,'minkowski', p=p)`
1454 `scipy.spatial.distance.cdist(input, 'hamming') * M`. When :math:`p = \infty`, the closest
1455 scipy function is `scipy.spatial.distance.cdist(xn, lambda x, y: np.abs(x - y).max())`.
1468 >>> torch.cdist(a, b, p=2)
1475 cdist, (x1, x2), x1, x2, p=p, compute_mode=compute_mode
1478 return _VF.cdist(x1, x2, p, None) # type: ignore[attr-defined]
1480 return _VF.cdist(x1, x2, p, 1) # type: ignore[attr-defined]
1482 return _VF.cdist(x1, x2, p, 2) # type: ignore[attr-defined]
H A D_meta_registrations.py3293 lambda: f"cdist only supports at least 2D tensors, X1 got: {x1.dim()}D",
3297 lambda: f"cdist only supports at least 2D tensors, X2 got: {x2.dim()}D",
3305 lambda: "cdist only supports floating-point dtypes, X1 got: {x1.dtype}",
3309 lambda: "cdist only supports floating-point dtypes, X2 got: {x2.dtype}",
3311 torch._check(p >= 0, lambda: "cdist only supports non-negative p values")
3327 def meta_cdist_backward(grad, x1, x2, p, cdist): argument
/aosp_15_r20/external/pytorch/torch/ao/pruning/_experimental/pruner/
H A DFPGM_pruner.py43 self.dist_fn = lambda x: torch.cdist(x, x, p=1)
45 self.dist_fn = lambda x: torch.cdist(x, x, p=2)
/aosp_15_r20/external/python/cpython2/Demo/tkinter/guido/
Dsolitaire.py598 cdist = 999999999
603 if dist < cdist:
605 cdist = dist
/aosp_15_r20/external/libopus/dnn/torch/osce/stndrd/evaluation/
H A Drun_nomad.py36 from scipy.spatial.distance import cdist
85 dist = np.diag(cdist(ref_embeddings, deg_embeddings)) # wasteful
/aosp_15_r20/external/pytorch/aten/src/ATen/
H A DNamedTensorUtils.cpp465 auto& result = self_batch.unifyFromRightInplace(other_batch, "cdist"); in compute_cdist_outnames()
467 // cdist treats self and other like batches of M x D and N X D tensors, respectively. in compute_cdist_outnames()
474 result.checkUnique("cdist"); in compute_cdist_outnames()
H A Dautocast_mode.cpp285 KERNEL_MPS(cdist, fp32) in TORCH_LIBRARY_IMPL()
366 KERNEL_CPU(cdist, fp32) in TORCH_LIBRARY_IMPL()
/aosp_15_r20/external/pytorch/docs/source/
H A Damp.rst185 ``cdist``,
413 ``cdist``,
H A Dtorch.rst528 cdist
H A Dconf.py653 "cdist",
1490 "cdist",
/aosp_15_r20/external/executorch/kernels/portable/cpu/util/
H A Ddistance_util.cpp44 p >= 0, "cdist only supports non-negative p values"); in check_cdist_args()
/aosp_15_r20/external/pytorch/test/
H A Dtest_torch.py2359 # FIXME: find test suite for pdist and cdist
2390 self.assertEqual(torch.empty(0, 4, device=device), torch.cdist(x, y))
2394 self.assertEqual(torch.empty(2, 0, device=device), torch.cdist(x, y))
2398 self.assertEqual(torch.zeros(2, 3, device=device), torch.cdist(x, y))
2402 self.assertEqual(torch.empty(2, 0, device=device), torch.cdist(x, y))
2421 actual = torch.cdist(x, y, p=2, compute_mode=cm)
2425 actual = torch.cdist(x, y, p=p)
2439 actual = torch.cdist(x, y, p=2, compute_mode=cm)
2443 actual = torch.cdist(x, y, p=p)
2458 z1 = torch.cdist(x1, y1, p=2, compute_mode=cm).mean()
[all …]
H A Dtest_mps.py93 'cdist': [torch.float32],
502 'cdist': [torch.float32],
608 'cdist': [torch.float32],
1725 actual = torch.cdist(x, y, p=2, compute_mode=cm)
1733 actual = torch.cdist(x, y, p=2, compute_mode=cm)
1741 actual = torch.cdist(x, y, p=2, compute_mode=cm)
1749 actual = torch.cdist(x, y, p=2, compute_mode=cm)
1757 actual = torch.cdist(x, y, p=2, compute_mode=cm)
1767 actual = torch.cdist(x, y, p=2, compute_mode=cm)
1775 actual = torch.cdist(x, y, p=2, compute_mode=cm)
[all …]
/aosp_15_r20/external/pytorch/functorch/op_analysis/
H A Dpublic_api406 cdist
H A Dannotated_ops195 cdist, reduction
/aosp_15_r20/external/pytorch/test/ao/sparsity/
H A Dtest_structured_sparsifier.py953 # compute the distance matrix using torch.cdist
1000 expected_dist_matrix_conv1 = torch.cdist(
/aosp_15_r20/external/pytorch/torch/csrc/inductor/aoti_torch/generated/
H A Dc_shim_cpu.h19 …rad, AtenTensorHandle x1, AtenTensorHandle x2, double p, AtenTensorHandle cdist, AtenTensorHandle*…
H A Dc_shim_cuda.h19 …rad, AtenTensorHandle x1, AtenTensorHandle x2, double p, AtenTensorHandle cdist, AtenTensorHandle*…
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/
H A Dautocast.cpp437 case aten::cdist: in handleBlock()

12