/aosp_15_r20/external/pytorch/test/inductor/ |
H A D | test_pad_mm.py | 14 from torch._inductor.utils import fresh_inductor_cache, is_big_gpu, run_and_get_code 52 res2, (code,) = run_and_get_code(compiled_fn, a) 87 res2, (code,) = run_and_get_code(compiled_fn, a, b) 114 res2, (code,) = run_and_get_code(compiled_fn, a, b) 144 res2, (code,) = run_and_get_code(compiled_fn, a, b) 172 res2, (code,) = run_and_get_code(compiled_fn, a, b) 210 res2, (code,) = run_and_get_code(compiled_fn, a, b) 239 res2, (code,) = run_and_get_code(compiled_fn, a, b) 269 res2, (code,) = run_and_get_code(compiled_fn, a, b) 298 res2, (code,) = run_and_get_code(compiled_fn, a, b, c) [all …]
|
H A D | test_move_constructors_to_cuda.py | 8 from torch._inductor.utils import run_and_get_code 26 out_compiled, code = run_and_get_code(torch.compile(func), *args) 80 out, code = run_and_get_code(foo, inp) 89 out, code = run_and_get_code(foo, inp)
|
H A D | test_benchmark_fusion.py | 9 from torch._inductor.utils import fresh_inductor_cache, is_big_gpu, run_and_get_code 145 _, out_code = run_and_get_code(foo_c, m, inp) 163 _, out_code2 = run_and_get_code(foo_c, m, inp) 226 res, code = run_and_get_code(foo_c, m, inp) 234 res2, code2 = run_and_get_code(foo_c, m, inp)
|
H A D | test_b2b_gemm.py | 8 from torch._inductor.utils import run_and_get_code 43 res, (code,) = run_and_get_code(f_opt, A, B, C) 69 res, (code,) = run_and_get_code(f_opt, A, B, C) 94 res, (code,) = run_and_get_code(f_opt, A, B, C) 119 res, (code,) = run_and_get_code(f_opt, A, B, C) 139 res, (code,) = run_and_get_code(f_opt, A, B, C) 158 res, (code,) = run_and_get_code(f_opt, A, B, C)
|
H A D | test_pattern_matcher.py | 28 from torch._inductor.utils import run_and_get_code 57 actual, codes = run_and_get_code(torch.compile(fn), *args) 114 test, (code,) = run_and_get_code(torch.compile(fn, mode="max-autotune"), *args) 191 test, (code,) = run_and_get_code(torch.compile(fn), *args) 487 test, (code,) = run_and_get_code(torch.compile(fn), *args) 552 test, (code,) = run_and_get_code(torch.compile(fn), *args) 583 test, (code,) = run_and_get_code(torch.compile(fn), *args) 610 test, (code,) = run_and_get_code(torch.compile(fn), *args) 828 result, (code,) = run_and_get_code(torch.compile(fn, fullgraph=True)) 1109 result, (code) = run_and_get_code(fn, torch.randn(8, 8), torch.randn(8, 8)) [all …]
|
H A D | test_inductor_freezing.py | 16 from torch._inductor.utils import override_lowering, run_and_get_code 251 out_compiled, code = run_and_get_code(foo, mod, inp) 317 out, code = run_and_get_code(foo, mod, inp) 336 out, code = run_and_get_code(foo, mod2, inp) 464 out_optimized_for_infernece, code = run_and_get_code(foo, mod, x) 509 out_optimized_for_infernece, code = run_and_get_code(foo, mod, x) 543 out_optimized_for_infernece, _ = run_and_get_code( 578 out_optimized_for_infernece, _ = run_and_get_code( 604 out_optimized_for_infernece, _ = run_and_get_code( 640 out_optimized_for_infernece, _ = run_and_get_code(
|
H A D | test_multi_kernel.py | 13 from torch._inductor.utils import run_and_get_code 82 act, wrapper_code = run_and_get_code(compiled_fn, x, -1) 209 _, (wrapper_code, _) = run_and_get_code(
|
H A D | test_triton_kernels.py | 16 from torch._inductor.utils import run_and_get_code 373 from torch._inductor.utils import run_and_get_code 393 test, codes = run_and_get_code( 412 from torch._inductor.utils import run_and_get_code 435 test, (code,) = run_and_get_code(torch.compile(call_triton_add), t1, t2) 441 from torch._inductor.utils import run_and_get_code 483 test, (code,) = run_and_get_code(torch.compile(call_triton), t) 954 compiled_out, (code,) = run_and_get_code(torch.compile(f), x) 1177 compiled_out, sources = run_and_get_code( 1209 compiled_out, sources = run_and_get_code( [all …]
|
H A D | test_torchinductor_strided_blocks.py | 13 from torch._inductor.utils import run_and_get_code 61 result, code = run_and_get_code(compiled, *args)
|
H A D | test_cuda_repro.py | 19 run_and_get_code, 725 out, code = run_and_get_code(compiled_sr, input, 0, index, src, "sum") 1136 out, code = run_and_get_code(cat, inps) 1218 out, code = run_and_get_code(inner_reduce, a) 1227 out, code = run_and_get_code(outer_reduce, a) 1314 actual, code = run_and_get_code(cfn, a, b)
|
H A D | test_decompose_mem_bound_mm.py | 9 from torch._inductor.utils import run_and_get_code 341 out, code = run_and_get_code(foo, input1, input2)
|
H A D | test_perf.py | 13 from torch._inductor.utils import run_and_get_code 976 compiled_out, (code,) = run_and_get_code( 1007 compiled_out, (code,) = run_and_get_code( 1040 compiled_out, (code,) = run_and_get_code(
|
H A D | test_padding.py | 16 from torch._inductor.utils import ceildiv, run_and_get_code 477 _, wrapper_codes = run_and_get_code(
|
H A D | test_fused_attention.py | 12 from torch._inductor.utils import run_and_get_code 79 result2, source_code = run_and_get_code( 265 _, (source_code,) = run_and_get_code(dot_prod_attention, *args)
|
H A D | test_flex_attention.py | 17 from torch._inductor.utils import run_and_get_code 917 _, code = run_and_get_code(func, q, k, v, _identity) 938 _, code = run_and_get_code(func, q, k, v, _identity)
|
H A D | test_max_autotune.py | 25 from torch._inductor.utils import fresh_inductor_cache, run_and_get_code 356 out, code = run_and_get_code(foo, conv1x1, input_tensor)
|
H A D | test_torchinductor_dynamic_shapes.py | 20 from torch._inductor.utils import run_and_get_code 175 actual, source_codes = run_and_get_code(fn_c, x)
|
H A D | test_flex_decoding.py | 13 from torch._inductor.utils import run_and_get_code 948 _, code = run_and_get_code(func, q, k, v, _identity)
|
H A D | test_torchinductor.py | 50 run_and_get_code, 233 return run_and_get_code(run_with_backward) 656 result, source_codes = run_and_get_code(func, *args, **kwargs) 1092 _, code = run_and_get_code(fn, x, y) 1754 _, code = run_and_get_code(cfn, a, b, 0) 3227 _, code = run_and_get_code(foo, grouped_conv, input_tensor) 3242 _, code = run_and_get_code(foo, conv_layer, input_tensor) 5170 out, source_codes = run_and_get_code(foo_opt, inps[0], inps[1], fn) 5184 out, source_codes = run_and_get_code(foo_opt, inps[0], inps[1], fn) 5191 out, source_codes = run_and_get_code(foo_opt, inps[0], inps[1], fn) [all …]
|
H A D | test_mkldnn_pattern_matcher.py | 15 from torch._inductor.utils import run_and_get_code 243 actual, (source_code,) = run_and_get_code(
|
/aosp_15_r20/external/pytorch/test/distributed/_composable/fsdp/ |
H A D | test_fully_shard_compile.py | 17 from torch._inductor.utils import is_fallback_op, run_and_get_code 523 _, triton_codes = run_and_get_code( 711 _, triton_codes = run_and_get_code(
|
/aosp_15_r20/external/pytorch/torch/_inductor/ |
H A D | utils.py | 1288 def run_and_get_code(fn, *args, **kwargs): function 1308 return run_and_get_code(run_with_backward) 1360 _, source_codes = run_and_get_code(fn, *args, **kwargs)
|
/aosp_15_r20/external/pytorch/test/distributed/ |
H A D | test_functional_api.py | 14 from torch._inductor.utils import run_and_get_code 710 res, codes = run_and_get_code(run_with_backward)
|
/aosp_15_r20/external/pytorch/test/dynamo/ |
H A D | test_misc.py | 53 from torch._inductor.utils import run_and_get_code 9636 actual, source_code = run_and_get_code(pow_opt, x)
|