/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/ |
H A D | eigh_expander.cc | 388 // frobenius_norm = np.sqrt(np.sum(np.square(tl) + np.square(tr) + 392 // off_diag_norm = np.sqrt(frobenius_norm - diag_norm) * np.sqrt( 393 // frobenius_norm + diag_norm) 394 // while off_diag_norm > 1e-6 * frobenius_norm: 422 // frobenius_norm = np.sqrt(np.sum(np.square(tl) + np.square(tr) + 426 // off_diag_norm = np.sqrt(frobenius_norm - diag_norm) * np.sqrt( 427 // frobenius_norm + diag_norm)
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/client/lib/ |
H A D | svd.cc | 803 // frobenius_norm = np.linalg.norm(D) 806 // frobenius_norm - diag_norm) * np.sqrt(frobenius_norm + diag_norm) 807 // while off_diag_norm > 1e-6 * frobenius_norm and iter < max_iter: 816 // frobenius_norm = np.linalg.norm(D) 819 // frobenius_norm - diag_norm) * np.sqrt(frobenius_norm + diag_norm)
|
/aosp_15_r20/external/pytorch/torch/jit/ |
H A D | _builtins.py | 113 (torch._VF.frobenius_norm, "aten::frobenius_norm"),
|
/aosp_15_r20/external/pytorch/functorch/op_analysis/ |
H A D | public_api | 239 frobenius_norm
|
H A D | annotated_ops | 307 frobenius_norm, reduction
|
/aosp_15_r20/external/pytorch/test/mobile/model_test/ |
H A D | coverage.yaml | 250 - aten::frobenius_norm.dim 251 - aten::frobenius_norm.out
|
H A D | model_ops.yaml | 157 aten::frobenius_norm.dim: 1
|
/aosp_15_r20/external/pytorch/test/functorch/ |
H A D | test_vmap_registrations.py | 83 "aten::frobenius_norm",
|
/aosp_15_r20/external/pytorch/torch/onnx/ |
H A D | symbolic_opset13.py | 50 @_onnx_symbolic("aten::frobenius_norm") 52 def frobenius_norm(g: jit_utils.GraphContext, self, dim=None, keepdim=False): function
|
H A D | symbolic_opset9.py | 107 "frobenius_norm", 5538 return frobenius_norm(g, self, dim, keepdim) 5544 return frobenius_norm(g, self, dim, keepdim) 5592 @_onnx_symbolic("aten::frobenius_norm") 5594 def frobenius_norm(g: jit_utils.GraphContext, self, dim=None, keepdim=False): function
|
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/ |
H A D | BatchRulesDecompositions.cpp | 214 OP_DECOMPOSE2(frobenius_norm, dim); in TORCH_LIBRARY_IMPL()
|
/aosp_15_r20/external/pytorch/torch/ |
H A D | functional.py | 1834 return _VF.frobenius_norm(input, dim=(), keepdim=keepdim) 1858 return _VF.frobenius_norm(input, _dim, keepdim=keepdim) # type: ignore[arg-type] 1860 … return _VF.frobenius_norm(input, _dim, keepdim=keepdim, out=out) # type: ignore[arg-type]
|
H A D | overrides.py | 643 torch.frobenius_norm: lambda input, dim=None, keepdim=False, out=None: -1,
|
/aosp_15_r20/external/pytorch/aten/src/ATen/ |
H A D | autocast_mode.cpp | 262 KERNEL_MPS2(frobenius_norm, dim, fp32) in TORCH_LIBRARY_IMPL()
|
H A D | autocast_mode.h | 832 _(frobenius_norm, dim) \
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/ |
H A D | autocast.cpp | 418 case aten::frobenius_norm: in handleBlock()
|
/aosp_15_r20/external/pytorch/docs/source/ |
H A D | conf.py | 1384 "frobenius_norm", 1532 "frobenius_norm",
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | LinearAlgebra.cpp | 3060 Tensor frobenius_norm(const Tensor& self, IntArrayRef dim, bool keepdim) { in frobenius_norm() function 3064 "at::frobenius_norm is deprecated and it is just left for JIT compatibility. ", in frobenius_norm() 3084 "at::frobenius_norm is deprecated and it is just left for JIT compatibility. ", in frobenius_norm_out()
|
H A D | native_functions.yaml | 6755 - func: frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor 6759 - func: frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tenso…
|
/aosp_15_r20/external/pytorch/torch/_dynamo/ |
H A D | trace_rules.py | 1836 "torch.frobenius_norm",
|
/aosp_15_r20/external/pytorch/test/cpp/lazy/ |
H A D | test_lazy_ops.cpp | 2002 torch::Tensor b = torch::frobenius_norm(a, {dim}, /*keepdim=*/false); in TEST_F() 2006 torch::frobenius_norm(lazy_a, {dim}, /*keepdim=*/false); in TEST_F() 2016 torch::Tensor b = torch::frobenius_norm(a, dims, /*keepdim=*/false); in TEST_F() 2020 torch::frobenius_norm(lazy_a, dims, /*keepdim=*/false); in TEST_F()
|