/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | UpSampleBicubic2d.cpp | 17 #include <ATen/ops/upsample_bicubic2d.h> 25 TORCH_META_FUNC(upsample_bicubic2d) ( in TORCH_META_FUNC() argument 279 Tensor upsample_bicubic2d( in upsample_bicubic2d() function 287 return at::upsample_bicubic2d(input, osize, align_corners, scale_h, scale_w); in upsample_bicubic2d()
|
H A D | native_functions.yaml | 12504 - func: upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? sc… 12506 autogen: upsample_bicubic2d.vec_out 12610 - func: upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scale… 12617 - func: upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=… 12619 structured_delegate: upsample_bicubic2d.out
|
/aosp_15_r20/external/pytorch/test/expect/ |
H A D | HasDecompTest.test_aten_core_operators.expect | 514 aten::upsample_bicubic2d 515 aten::upsample_bicubic2d.out
|
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/ |
H A D | BatchRulesDecompositions.cpp | 340 m.impl("upsample_bicubic2d.vec", native::upsample_bicubic2d); in TORCH_LIBRARY_IMPL()
|
H A D | BatchRulesModules.cpp | 384 EXISTING_BDIM(upsample_bicubic2d); in TORCH_LIBRARY_IMPL()
|
/aosp_15_r20/external/pytorch/test/mobile/model_test/ |
H A D | coverage.yaml | 619 - aten::upsample_bicubic2d.vec 1001 aten::upsample_bicubic2d.vec: 1
|
H A D | model_ops.yaml | 378 aten::upsample_bicubic2d.vec: 2
|
/aosp_15_r20/external/pytorch/test/export/ |
H A D | testing.py | 99 aten.upsample_bicubic2d.vec,
|
/aosp_15_r20/external/pytorch/torch/_decomp/ |
H A D | decompositions.py | 4438 @register_decomposition([aten.upsample_bicubic2d.default, aten.upsample_bicubic2d.out]) 4439 @aten.upsample_bicubic2d.default.py_impl(DispatchKey.Autograd) 4525 @register_decomposition(aten.upsample_bicubic2d.vec) 4526 @aten.upsample_bicubic2d.vec.py_impl(DispatchKey.CompositeImplicitAutograd) 4527 @aten.upsample_bicubic2d.vec.py_impl(DispatchKey.Autograd)
|
/aosp_15_r20/external/pytorch/functorch/op_analysis/ |
H A D | annotated_ops | 480 upsample_bicubic2d, misc
|
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/functional/ |
H A D | upsampling.h | 240 return torch::upsample_bicubic2d( in interpolate()
|
/aosp_15_r20/external/pytorch/test/ |
H A D | test_decomp.py | 266 (torch.float64, torch.ops.aten.upsample_bicubic2d.vec): (1e-5, 5e-4), 267 (torch.float64, torch.ops.aten.upsample_bicubic2d.default): (1e-5, 5e-4),
|
H A D | test_nn.py | 9690 …# Partially passes. NotImplementedError: aten::upsample_bicubic2d.out https://github.com/pytorch/p… 9797 …@expectedFailureMPS # NotImplementedError: aten::upsample_bicubic2d.out https://github.com/pytorc…
|
/aosp_15_r20/external/pytorch/torch/onnx/ |
H A D | symbolic_opset11.py | 359 "aten::upsample_bicubic2d", 360 decorate=[symbolic_helper._apply_params("upsample_bicubic2d", 4, "cubic")],
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/quantization/ |
H A D | helper.cpp | 158 "upsample_bicubic2d",
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/ |
H A D | register_prim_ops_fulljit.cpp | 558 return at::upsample_bicubic2d( in interpolate()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/ |
H A D | autocast_mode.h | 867 _(upsample_bicubic2d) \
|
/aosp_15_r20/external/pytorch/benchmarks/dynamo/microbenchmarks/operator_inp_logs/timm_train/ |
H A D | crossvit_9_240_training.txt | 202 Operator: aten.upsample_bicubic2d.vec
|
/aosp_15_r20/external/pytorch/torch/_dynamo/ |
H A D | trace_rules.py | 1073 "torch._C._nn.upsample_bicubic2d",
|
/aosp_15_r20/external/pytorch/test/inductor/ |
H A D | test_torchinductor.py | 5858 aten.upsample_bicubic2d(a, (128, 128), True), 5859 aten.upsample_bicubic2d(a, (128, 256), False), 5876 return aten.upsample_bicubic2d(x, (256, 256), False)
|
/aosp_15_r20/external/pytorch/tools/autograd/ |
H A D | derivatives.yaml | 2207 - name: upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=…
|
/aosp_15_r20/external/pytorch/torch/nn/ |
H A D | functional.py | 4594 return torch._C._nn.upsample_bicubic2d(
|