xref: /aosp_15_r20/external/pytorch/torch/_dynamo/trace_rules.py (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1# mypy: allow-untyped-defs
2import _collections_abc
3import _weakrefset
4import abc
5import builtins
6import collections
7import contextlib
8import copy
9import copyreg
10import dataclasses
11import enum
12import functools
13import importlib
14import inspect
15import linecache
16import logging
17import multiprocessing
18import operator
19import os
20import posixpath
21import random
22import re
23import selectors
24import signal
25import sys
26import tempfile
27import threading
28import tokenize
29import traceback
30import types
31import typing
32import unittest
33import weakref
34from collections import defaultdict
35from pathlib import Path
36from typing import Any, Callable, cast, Dict, List, Optional, Set, Type, Union
37
38import torch
39import torch._inductor.test_operators
40import torch.distributed
41import torch.utils._content_store
42from torch.utils import _config_module
43
44from .resume_execution import TORCH_DYNAMO_RESUME_IN_PREFIX
45from .utils import getfile, hashable, NP_SUPPORTED_MODULES, unwrap_if_wrapper
46from .variables import (
47    BuiltinVariable,
48    FunctionalCallVariable,
49    FunctorchHigherOrderVariable,
50    NestedUserFunctionVariable,
51    PolyfilledFunctionVariable,
52    SkipFunctionVariable,
53    TorchInGraphFunctionVariable,
54    UserFunctionVariable,
55    UserMethodVariable,
56)
57
58
59np: Optional[types.ModuleType] = None
60try:
61    import numpy as np
62except ModuleNotFoundError:
63    pass
64
65
66if typing.TYPE_CHECKING:
67    from .variables.base import VariableTracker
68
69
70"""
71A note on skip/inline rules:
72
73Dynamo consults this file to determine whether function should be inlined or skipped.
74
75A skip applies at the frame boundary, meaning dynamo either triggers a graph break
76at the beginning of the frame or attempts to trace/inline the whole frame. When skipping
77a frame, recursively called frames are still traced by dynamo unless also skipped.
78
79Skipfiles (skipped at the file level instead of function level) still apply on a
80frame-by-frame boundary as dynamo traces, but apply to all functions in that file.
81
82@skip is a helper decorator that can be applied to your function to cause it to be
83included here.
84
85Dynamo skip/inline rules & priorities are defined as follows:
86* Inline is the default behavior and will be used unless explicitly skipped.
87* Dynamo has two SKIPLIST: BUILTIN_SKIPLIST and THIRDPARTY_SKIPLIST.
88    * BUILTIN_SKIPLIST contains builtin python modules, such as abc, collections, etc.
89    * THIRDPARTY_SKIPLIST contains common third party libraries, such as numpy, pandas, etc.
90* Functions in these two SKIPLISTs are always skipped, except:
91    * They have explicitly defined rule in `manual_torch_name_rule_map`;
92    * The corresponding python module has been put into MOD_INLINELIST.
93* PyTorch(torch) is in the BUILTIN_SKIPLIST by default, but there are many cases
94    where we want inline the functions under torch namespace.
95    We should specify inline for the functions in `manual_torch_name_rule_map` or
96    put the corresponding python module into MOD_INLINELIST to make dynamo inline them.
97* If you call functions under skipped modules/files, Dynamo will wrap these functions
98    as SkipFunctionVariable. There are a few functions(e.g, collections.OrderedDict) that
99    we have special handling at SkipFunctionVariable.call_function.
100
101Overall: *_INLINELIST has precedence over *_SKIPLIST has precedence over DEFAULT (inline)
102
103To figure out what the behavior is, check the following list in order:
104* `manual_torch_name_rule_map` (Inline if YES)
105* MOD_INLINELIST (Inline if YES)
106* BUILTIN_SKIPLIST & THIRDPARTY_SKIPLIST (Skip if YES)
107* Inline by default
108
109In general, if you want to force inline a function or module, please consider adding
110the function's python module to MOD_INLINELIST first.
111Use the `manual_torch_name_rule_map` only when there are other functions under the same module that
112you don't want to inline them.
113"""
114
115"""
116Map of function objects to their tracing rules (Dynamo variables).
117* TorchInGraphFunctionVariable: The functions should be put into the FX graph or can be constant folded. E.g.,
118  - torch.add: should be put into the FX graph.
119  - torch.is_floating_point: constant folded.
120* SkipFunctionVariable: The objects should be skipped from tracing.
121* UserFunctionVariable: The functions should be inlined.
122
123For developers: If you add/remove a torch level API, it may trigger failures from
124test/dynamo/test_trace_rules.py:test_torch_name_rule_map_updated. To fix the failures:
125If you are adding a new torch level API or Dynamo implementation:
126* Add the name with the corresponding tracing rule to this map
127  if you are adding a new in graph function or Dynamo implementation for an existing function.
128* Remove the object name from test/dynamo/test_trace_rules.ignored_c_binding_in_graph_function_names if it's there.
129
130If you are removing an existing torch level API:
131* Remove the entry represented the API from this map or test/dynamo/test_trace_rules.ignored_c_binding_in_graph_function_names
132  depends on where it is.
133
134
135"""
136manual_torch_name_rule_map = {
137    "torch.onnx.is_in_onnx_export": TorchInGraphFunctionVariable,
138    "torch.onnx.operators.shape_as_tensor": TorchInGraphFunctionVariable,
139    "torch.overrides.is_tensor_like": TorchInGraphFunctionVariable,
140    "torch.jit.is_scripting": TorchInGraphFunctionVariable,
141    "torch.jit.is_tracing": TorchInGraphFunctionVariable,
142    "torch.jit.annotate": TorchInGraphFunctionVariable,
143    "torch.distributed.is_available": TorchInGraphFunctionVariable,
144    "torch.distributed.is_initialized": TorchInGraphFunctionVariable,
145    "torch.distributed.get_rank": TorchInGraphFunctionVariable,
146    "torch.distributed.get_world_size": TorchInGraphFunctionVariable,
147    "torch.distributed.tensor._api.DTensor#from_local": TorchInGraphFunctionVariable,
148    "torch.distributed.distributed_c10d._get_group_size_by_name": TorchInGraphFunctionVariable,
149    "torch.distributed.distributed_c10d._resolve_group_name_by_ranks_and_tag": TorchInGraphFunctionVariable,
150    "torch.distributed.distributed_c10d._get_group_tag": TorchInGraphFunctionVariable,
151    "torch.distributed.distributed_c10d.get_process_group_ranks": TorchInGraphFunctionVariable,
152    "torch._utils.is_compiling": TorchInGraphFunctionVariable,
153    "torch.fx._symbolic_trace.is_fx_tracing": TorchInGraphFunctionVariable,
154    "torch._dynamo.external_utils.is_compiling": TorchInGraphFunctionVariable,
155    "torch.compiler.is_compiling": TorchInGraphFunctionVariable,
156    "torch.compiler.is_dynamo_compiling": TorchInGraphFunctionVariable,
157    "torch.autograd._profiler_enabled": SkipFunctionVariable,
158    "torch._C._to_dlpack": SkipFunctionVariable,
159    "torch.to_dlpack": SkipFunctionVariable,
160    # We graph break on RNG state setters or getters like
161    # `torch.get_rng_state` or `torch.set_rng_state`. These functions
162    # are not aten operations and therefore they are completely ignored
163    # by the AOT dispatcher. As a result, the AOT graph does not have
164    # these setter or getter functions, producing an incorrect graph
165    # when it comes to rng states.
166    "torch.default_generator#get_state": SkipFunctionVariable,
167    "torch._C.Generator#get_state": SkipFunctionVariable,
168    "torch.get_rng_state": SkipFunctionVariable,
169    "torch.cuda.get_rng_state": SkipFunctionVariable,
170    "torch.default_generator#set_state": SkipFunctionVariable,
171    "torch._C.Generator#set_state": SkipFunctionVariable,
172    "torch.set_rng_state": SkipFunctionVariable,
173    "torch.cuda.set_rng_state": SkipFunctionVariable,
174    # https://github.com/pytorch/pytorch/issues/107187
175    "torch.manual_seed": SkipFunctionVariable,
176    # https://github.com/pytorch/pytorch/issues/93501
177    "torch.nn.utils.rnn.pack_padded_sequence": SkipFunctionVariable,
178    "torch.nn.Parameter": TorchInGraphFunctionVariable,
179    "torch.nn.Buffer": TorchInGraphFunctionVariable,
180    "torch._nested_tensor_from_mask": SkipFunctionVariable,
181    "torch._nested_from_padded": SkipFunctionVariable,
182    "torch.nested.nested_tensor_from_jagged": UserFunctionVariable,
183    # symbol operators implemented in Python
184    "torch.sym_not": TorchInGraphFunctionVariable,
185    "torch.sym_float": TorchInGraphFunctionVariable,
186    "torch.sym_int": TorchInGraphFunctionVariable,
187    "torch.sym_max": TorchInGraphFunctionVariable,
188    "torch.sym_min": TorchInGraphFunctionVariable,
189    "torch.sym_sqrt": TorchInGraphFunctionVariable,
190    "torch.sym_ite": TorchInGraphFunctionVariable,
191    "torch.Tensor#_make_wrapper_subclass": SkipFunctionVariable,
192    "torch.Tensor#__init__": SkipFunctionVariable,
193    "torch.cuda.set_device": SkipFunctionVariable,
194    "torch.cuda.current_device": SkipFunctionVariable,
195    "torch._C.autocast_decrement_nesting": SkipFunctionVariable,
196    "torch._C.autocast_increment_nesting": SkipFunctionVariable,
197    "torch.autograd.grad": SkipFunctionVariable,
198    "torch.autograd.backward": SkipFunctionVariable,
199    "torch._C.clear_autocast_cache": SkipFunctionVariable,
200    "torch.distributions.constraints.is_dependent": SkipFunctionVariable,
201    "torch.jit.isinstance": SkipFunctionVariable,
202    "torch._C.set_anomaly_enabled": SkipFunctionVariable,
203    "torch._C.set_autocast_cache_enabled": SkipFunctionVariable,
204    "torch._C.set_autocast_cpu_dtype": SkipFunctionVariable,
205    "torch._C.set_autocast_cpu_enabled": SkipFunctionVariable,
206    "torch._C.set_autocast_enabled": SkipFunctionVariable,
207    "torch._C.set_autocast_gpu_dtype": SkipFunctionVariable,
208    "torch._C.set_autocast_ipu_dtype": SkipFunctionVariable,
209    "torch._C.set_autocast_ipu_enabled": SkipFunctionVariable,
210    "torch._C.set_autocast_xla_dtype": SkipFunctionVariable,
211    "torch._C.set_autocast_xla_enabled": SkipFunctionVariable,
212    "torch.resize_as_": SkipFunctionVariable,
213    "torch.resize_as_sparse_": SkipFunctionVariable,
214    "torch.get_default_device": TorchInGraphFunctionVariable,
215    # functorch/vmap
216    "torch._functorch.vmap._check_int_or_none": UserFunctionVariable,
217    "torch._functorch.vmap._check_out_dims_is_int_or_int_pytree": UserFunctionVariable,
218    "torch._functorch.vmap._check_randomness_arg": UserFunctionVariable,
219    "torch._functorch.vmap._chunked_vmap": UserFunctionVariable,
220    "torch._functorch.vmap._concat_chunked_outputs": UserFunctionVariable,
221    "torch._functorch.vmap._create_batched_inputs": UserFunctionVariable,
222    "torch._functorch.vmap._flat_vmap": UserFunctionVariable,
223    "torch._functorch.vmap._flatten_chunks_output": UserFunctionVariable,
224    "torch._functorch.vmap._get_chunked_inputs": UserFunctionVariable,
225    "torch._functorch.vmap._get_name": UserFunctionVariable,
226    "torch._functorch.vmap._maybe_remove_batch_dim": UserFunctionVariable,
227    "torch._functorch.vmap._num_outputs": UserFunctionVariable,
228    "torch._functorch.vmap._process_batched_inputs": UserFunctionVariable,
229    "torch._functorch.vmap._unwrap_batched": UserFunctionVariable,
230    "torch._functorch.vmap._validate_and_get_batch_size": UserFunctionVariable,
231    "torch._functorch.vmap.doesnt_support_saved_tensors_hooks": UserFunctionVariable,
232    "torch._functorch.vmap.get_chunk_sizes": UserFunctionVariable,
233    # lazy_load_decompositions uses a lock that is not supported yet in dynamo
234    # "torch._functorch.vmap.lazy_load_decompositions": UserFunctionVariable,
235    "torch._functorch.vmap.restore_vmap": UserFunctionVariable,
236    "torch._functorch.apis.vmap": UserFunctionVariable,
237    "torch._functorch.vmap.unwrap_batched": UserFunctionVariable,
238    "torch._functorch.vmap.vmap_impl": FunctorchHigherOrderVariable,
239    "torch._functorch.vmap.wrap_batched": UserFunctionVariable,
240    # functorch/grad
241    "torch._functorch.eager_transforms.grad_impl": FunctorchHigherOrderVariable,
242    "torch._functorch.apis.grad_and_value": UserFunctionVariable,
243    "torch._functorch.eager_transforms._as_tuple": UserFunctionVariable,
244    "torch._functorch.eager_transforms._check_unique_non_empty": UserFunctionVariable,
245    "torch._functorch.eager_transforms._create_differentiable": UserFunctionVariable,
246    "torch._functorch.eager_transforms._slice_argnums": UserFunctionVariable,
247    "torch._functorch.eager_transforms._undo_create_differentiable": UserFunctionVariable,
248    "torch._functorch.eager_transforms._validate_and_wrap_argnum": UserFunctionVariable,
249    "torch._functorch.eager_transforms._validate_and_wrap_argnums": UserFunctionVariable,
250    "torch._functorch.eager_transforms._wrap_all_tensors": UserFunctionVariable,
251    "torch._functorch.eager_transforms._wrap_tensor_for_grad": UserFunctionVariable,
252    # functorch/jacrev
253    "torch._functorch.eager_transforms.jacrev": FunctorchHigherOrderVariable,
254    "torch._functorch.eager_transforms.error_if_complex": UserFunctionVariable,
255    "torch._functorch.eager_transforms._chunked_standard_basis_for_": UserFunctionVariable,
256    "torch._functorch.eager_transforms._safe_zero_index": UserFunctionVariable,
257    # functorch/vjp
258    "torch._functorch.eager_transforms.vjp": FunctorchHigherOrderVariable,
259    "torch._functorch.eager_transforms._vjp_with_argnums": UserFunctionVariable,
260    "torch._functorch.eager_transforms.assert_non_empty_tensor_output": UserFunctionVariable,
261    # functorch/jvp
262    "torch._functorch.eager_transforms._jvp_with_argnums": UserFunctionVariable,
263    "torch._functorch.eager_transforms.jvp": FunctorchHigherOrderVariable,
264    "torch._functorch.eager_transforms._replace_args": UserFunctionVariable,
265    "torch._functorch.eager_transforms.safe_unpack_dual": UserFunctionVariable,
266    "torch._functorch.eager_transforms.assert_non_empty_list_of_tensors": UserFunctionVariable,
267    "torch._functorch.eager_transforms.assert_output_is_tensor_or_tensors": UserFunctionVariable,
268    "torch.autograd.forward_ad.enter_dual_level": UserFunctionVariable,
269    "torch.autograd.forward_ad.exit_dual_level": UserFunctionVariable,
270    "torch.autograd.forward_ad.make_dual": UserFunctionVariable,
271    "torch.autograd.forward_ad.unpack_dual": UserFunctionVariable,
272    # functorch/linearize
273    "torch._functorch.eager_transforms.linearize": FunctorchHigherOrderVariable,
274    # functorch/jacfwd
275    "torch._functorch.eager_transforms.jacfwd": FunctorchHigherOrderVariable,
276    "torch._functorch.eager_transforms._construct_standard_basis_for": UserFunctionVariable,
277    "torch._functorch.eager_transforms.safe_unflatten": UserFunctionVariable,
278    # functorch/hessian
279    "torch._functorch.eager_transforms.hessian": FunctorchHigherOrderVariable,
280    # functional_call
281    "torch._functorch.functional_call.functional_call": FunctionalCallVariable,
282    "torch.nn.utils.stateless._groupby_tensor": TorchInGraphFunctionVariable,
283    # functorch/deprecated
284    "torch._functorch.deprecated.jvp": UserFunctionVariable,
285    "torch._functorch.deprecated.hessian": UserFunctionVariable,
286    "torch._functorch.deprecated.jacfwd": UserFunctionVariable,
287    "torch._functorch.deprecated.jacrev": UserFunctionVariable,
288    "torch._functorch.deprecated.grad": UserFunctionVariable,
289    "torch._functorch.deprecated.grad_and_value": UserFunctionVariable,
290    "torch._functorch.deprecated.vjp": UserFunctionVariable,
291    # everything else
292    "torch._constrain_as_size": UserFunctionVariable,
293    "torch._tensor._convert": UserFunctionVariable,
294    "torch.jit._unwrap_optional": UserFunctionVariable,
295    "torch.backends.mha.get_fastpath_enabled": UserFunctionVariable,
296    "torch._C._functorch._add_batch_dim": TorchInGraphFunctionVariable,
297    "torch._C._functorch._remove_batch_dim": TorchInGraphFunctionVariable,
298    "torch._C._functorch._wrap_for_grad": TorchInGraphFunctionVariable,
299    "torch._C._functorch._unwrap_for_grad": TorchInGraphFunctionVariable,
300    "torch._C._functorch.maybe_current_level": TorchInGraphFunctionVariable,
301    "torch._C._functorch.is_batchedtensor": TorchInGraphFunctionVariable,
302    "torch._dynamo.mark_static": UserFunctionVariable,
303    "torch.fx.experimental.symbolic_shapes.guard_size_oblivious": TorchInGraphFunctionVariable,
304    "torch.cuda._get_device_properties": TorchInGraphFunctionVariable,
305    "torch.utils.hooks.BackwardHook": TorchInGraphFunctionVariable,
306    "torch.sparse_bsc_tensor": SkipFunctionVariable,
307    "torch.sparse_bsr_tensor": SkipFunctionVariable,
308    "torch.sparse_csc_tensor": SkipFunctionVariable,
309    "torch.sparse_csr_tensor": SkipFunctionVariable,
310    "torch.sparse_compressed_tensor": SkipFunctionVariable,
311    "torch._C._autograd._unsafe_set_version_counter": TorchInGraphFunctionVariable,
312    # avoid skipping user defined modules in distributed unit tests
313    "torch/testing/_internal/common_fsdp.py#forward": UserFunctionVariable,
314    f"torch/testing/_internal/common_fsdp.py#{TORCH_DYNAMO_RESUME_IN_PREFIX}": UserFunctionVariable,
315    "torch/testing/_internal/distributed/_tensor/common_dtensor.py#forward": UserFunctionVariable,
316    f"torch/testing/_internal/distributed/_tensor/common_dtensor.py#{TORCH_DYNAMO_RESUME_IN_PREFIX}": UserFunctionVariable,
317    "torch/testing/_internal/common_distributed.py#forward": UserFunctionVariable,
318    f"torch/testing/_internal/common_distributed.py#{TORCH_DYNAMO_RESUME_IN_PREFIX}": UserFunctionVariable,
319}
320
321
322# In graph functions (including constant folding) that are C bindings
323torch_c_binding_in_graph_functions = dict.fromkeys(
324    [
325        "math.acos",
326        "math.acosh",
327        "math.asin",
328        "math.asinh",
329        "math.atan",
330        "math.atan2",
331        "math.atanh",
332        "math.ceil",
333        "math.comb",
334        "math.copysign",
335        "math.cos",
336        "math.cosh",
337        "math.degrees",
338        "math.dist",
339        "math.erf",
340        "math.erfc",
341        "math.exp",
342        "math.expm1",
343        "math.fabs",
344        "math.factorial",
345        "math.floor",
346        "math.fmod",
347        "math.frexp",
348        "math.fsum",
349        "math.gamma",
350        "math.gcd",
351        "math.hypot",
352        "math.isclose",
353        "math.isfinite",
354        "math.isinf",
355        "math.isnan",
356        "math.isqrt",
357        "math.ldexp",
358        "math.lgamma",
359        "math.log",
360        "math.log10",
361        "math.log1p",
362        "math.log2",
363        "math.modf",
364        "math.nextafter",
365        "math.perm",
366        "math.pow",
367        "math.prod",
368        "math.radians",
369        "math.remainder",
370        "math.sin",
371        "math.sinh",
372        "math.tan",
373        "math.tanh",
374        "math.trunc",
375        "math.ulp",
376        "torch._adaptive_avg_pool2d",
377        "torch._adaptive_avg_pool3d",
378        "torch._add_batch_dim",
379        "torch._add_relu_",
380        "torch._add_relu",
381        "torch._addmm_activation",
382        "torch._aminmax",
383        "torch._amp_foreach_non_finite_check_and_unscale_",
384        "torch._amp_update_scale_",
385        "torch._assert_async",
386        "torch._assert_tensor_metadata",
387        "torch._batch_norm_impl_index",
388        "torch._C._activate_gpu_trace",
389        "torch._C._add_cached_tensor",
390        "torch._C._add_docstr",
391        "torch._C._are_functorch_transforms_active",
392        "torch._C._autograd_init",
393        "torch._C._awaitable_nowait",
394        "torch._C._awaitable_wait",
395        "torch._C._awaitable",
396        "torch._C._backport_for_mobile_from_buffer_to_buffer",
397        "torch._C._backport_for_mobile_from_buffer",
398        "torch._C._backport_for_mobile_to_buffer",
399        "torch._C._backport_for_mobile",
400        "torch._C._broadcast_coalesced",
401        "torch._C._broadcast_out",
402        "torch._C._broadcast",
403        "torch._C._c10d_init",
404        "torch._C._calculate_package_version_based_on_upgraders",
405        "torch._C._can_use_flash_attention",
406        "torch._C._can_use_mem_efficient_attention",
407        "torch._C._can_use_cudnn_attention",
408        "torch._C._check_onnx_proto",
409        "torch._C._check_sparse_tensor_invariants",
410        "torch._C._collect_all",
411        "torch._C._commit_update",
412        "torch._C._compile_graph_to_code_table",
413        "torch._C._construct_CUDA_Tensor_From_Storage_And_Metadata",
414        "torch._C._construct_storage_from_data_pointer",
415        "torch._C._conv_determine_backend_memory_format",
416        "torch._C._cpu._is_avx2_supported",
417        "torch._C._cpu._is_avx512_supported",
418        "torch._C._cpu._is_avx512_vnni_supported",
419        "torch._C._cpu._is_avx512_bf16_supported",
420        "torch._C._cpu._is_amx_tile_supported",
421        "torch._C._cpu._init_amx",
422        "torch._C._crash_if_aten_asan",
423        "torch._C._crash_if_csrc_asan",
424        "torch._C._crash_if_csrc_ubsan",
425        "torch._C._crash_if_debug_asserts_fail",
426        "torch._C._crash_if_vptr_ubsan",
427        "torch._C._create_function_from_graph",
428        "torch._C._create_function_from_trace_with_dict",
429        "torch._C._create_function_from_trace",
430        "torch._C._create_graph_by_tracing",
431        "torch._C._create_module_with_type",
432        "torch._C._create_object_with_type",
433        "torch._C._cuda_attach_out_of_memory_observer",
434        "torch._C._cuda_beginAllocateCurrentStreamToPool",
435        "torch._C._cuda_canDeviceAccessPeer",
436        "torch._C._cuda_changeCurrentAllocator",
437        "torch._C._cuda_checkPoolLiveAllocations",
438        "torch._C._cuda_clearCublasWorkspaces",
439        "torch._C._cuda_cudaCachingAllocator_raw_alloc",
440        "torch._C._cuda_cudaCachingAllocator_raw_delete",
441        "torch._C._cuda_cudaCachingAllocator_set_allocator_settings",
442        "torch._C._cuda_cudaHostAllocator",
443        "torch._C._cuda_customAllocator",
444        "torch._C._cuda_emptyCache",
445        "torch._C._cuda_endAllocateCurrentStreamToPool",
446        "torch._C._cuda_exchangeDevice",
447        "torch._C._cuda_get_conv_benchmark_empty_cache",
448        "torch._C._cuda_get_cudnn_benchmark_limit",
449        "torch._C._cuda_get_sync_debug_mode",
450        "torch._C._cuda_getAllocator",
451        "torch._C._cuda_getAllocatorBackend",
452        "torch._C._cuda_getArchFlags",
453        "torch._C._cuda_getCheckpointState",
454        "torch._C._cuda_getCompiledVersion",
455        "torch._C._cuda_getCurrentBlasHandle",
456        "torch._C._cuda_getCurrentRawStream",
457        "torch._C._cuda_getCurrentStream",
458        "torch._C._cuda_getDefaultStream",
459        "torch._C._cuda_getDevice",
460        "torch._C._cuda_getDeviceCount",
461        "torch._C._cuda_hasPrimaryContext",
462        "torch._C._cuda_init",
463        "torch._C._cuda_ipc_collect",
464        "torch._C._cuda_isCurrentStreamCapturing",
465        "torch._C._cuda_isHistoryEnabled",
466        "torch._C._cuda_isInBadFork",
467        "torch._C._cuda_jiterator_compile_and_launch_kernel",
468        "torch._C._cuda_lock_mutex",
469        "torch._C._cuda_maybeExchangeDevice",
470        "torch._C._cuda_memorySnapshot",
471        "torch._C._cuda_memoryStats",
472        "torch._C._cuda_record_memory_history_legacy",
473        "torch._C._cuda_record_memory_history",
474        "torch._C._cuda_releasePool",
475        "torch._C._cuda_resetAccumulatedMemoryStats",
476        "torch._C._cuda_resetPeakMemoryStats",
477        "torch._C._cuda_set_cudnn_benchmark_limit",
478        "torch._C._cuda_set_sync_debug_mode",
479        "torch._C._cuda_setCheckpointPoolState",
480        "torch._C._cuda_setDevice",
481        "torch._C._cuda_setMemoryFraction",
482        "torch._C._cuda_setStream",
483        "torch._C._cuda_sleep",
484        "torch._C._cuda_synchronize",
485        "torch._C._cuda_unlock_mutex",
486        "torch._C._cudnn_set_conv_benchmark_empty_cache",
487        "torch._C._cudnn.getCompileVersion",
488        "torch._C._cudnn.getRuntimeVersion",
489        "torch._C._cudnn.getVersionInt",
490        "torch._C._current_autograd_node",
491        "torch._C._current_graph_task_execution_order",
492        "torch._C._current_graph_task_id",
493        "torch._C._cxx_flags",
494        "torch._C._debug_get_fusion_group_inlining",
495        "torch._C._debug_only_are_vmap_fallback_warnings_enabled",
496        "torch._C._debug_only_display_vmap_fallback_warnings",
497        "torch._C._debug_set_autodiff_subgraph_inlining",
498        "torch._C._debug_set_fusion_group_inlining",
499        "torch._C._demangle",
500        "torch._C._disabled_torch_dispatch_impl",
501        "torch._C._disabled_torch_function_impl",
502        "torch._C._dispatch_call_boxed",
503        "torch._C._dispatch_check_all_invariants",
504        "torch._C._dispatch_check_invariants",
505        "torch._C._dispatch_dump_table",
506        "torch._C._dispatch_dump",
507        "torch._C._dispatch_find_dangling_impls",
508        "torch._C._dispatch_find_schema_or_throw",
509        "torch._C._dispatch_get_all_op_names",
510        "torch._C._dispatch_get_backend_keyset_from_autograd",
511        "torch._C._dispatch_get_registrations_for_dispatch_key",
512        "torch._C._dispatch_has_backend_fallback",
513        "torch._C._dispatch_has_computed_kernel_for_dispatch_key",
514        "torch._C._dispatch_has_kernel_for_any_dispatch_key",
515        "torch._C._dispatch_has_kernel_for_dispatch_key",
516        "torch._C._dispatch_has_kernel",
517        "torch._C._dispatch_is_alias_key",
518        "torch._C._dispatch_is_included_in_alias",
519        "torch._C._dispatch_is_main_interpreter",
520        "torch._C._dispatch_isTensorSubclassLike",
521        "torch._C._dispatch_key_for_device",
522        "torch._C._dispatch_key_name",
523        "torch._C._dispatch_key_parse",
524        "torch._C._dispatch_key_set",
525        "torch._C._dispatch_keys",
526        "torch._C._dispatch_keyset_full_after",
527        "torch._C._dispatch_keyset_full",
528        "torch._C._dispatch_keyset_to_string",
529        "torch._C._dispatch_library",
530        "torch._C._dispatch_num_backends",
531        "torch._C._dispatch_print_registrations_for_dispatch_key",
532        "torch._C._dispatch_pystub",
533        "torch._C._dispatch_set_report_error_callback",
534        "torch._C._dispatch_tls_is_dispatch_key_excluded",
535        "torch._C._dispatch_tls_is_dispatch_key_included",
536        "torch._C._dispatch_tls_local_exclude_set",
537        "torch._C._dispatch_tls_local_include_set",
538        "torch._C._dispatch_tls_set_dispatch_key_excluded",
539        "torch._C._dispatch_tls_set_dispatch_key_included",
540        "torch._C._dist_autograd_init",
541        "torch._C._dump_local_tls_set",
542        "torch._C._dump_upgraders_map",
543        "torch._C._enable_mobile_interface_call_export",
544        "torch._C._enter_dual_level",
545        "torch._C._error_if_any_worker_fails",
546        "torch._C._exit_dual_level",
547        "torch._C._export_operator_list",
548        "torch._C._export_opnames",
549        "torch._C._faulty_agent_init",
550        "torch._C._fft.fft_fft",
551        "torch._C._fft.fft_fft2",
552        "torch._C._fft.fft_fftfreq",
553        "torch._C._fft.fft_fftn",
554        "torch._C._fft.fft_fftshift",
555        "torch._C._fft.fft_hfft",
556        "torch._C._fft.fft_hfft2",
557        "torch._C._fft.fft_hfftn",
558        "torch._C._fft.fft_ifft",
559        "torch._C._fft.fft_ifft2",
560        "torch._C._fft.fft_ifftn",
561        "torch._C._fft.fft_ifftshift",
562        "torch._C._fft.fft_ihfft",
563        "torch._C._fft.fft_ihfft2",
564        "torch._C._fft.fft_ihfftn",
565        "torch._C._fft.fft_irfft",
566        "torch._C._fft.fft_irfft2",
567        "torch._C._fft.fft_irfftn",
568        "torch._C._fft.fft_rfft",
569        "torch._C._fft.fft_rfft2",
570        "torch._C._fft.fft_rfftfreq",
571        "torch._C._fft.fft_rfftn",
572        "torch._C._free_And_Remove_DeleterFn",
573        "torch._C._freeze_module",
574        "torch._C._from_dlpack",
575        "torch._C._functionality_to_backend_keys",
576        "torch._C._functionalization_reapply_views_tls",
577        "torch._C._fuse_to_static_module",
578        "torch._C._gather_out",
579        "torch._C._gather",
580        "torch._C._generate_upgraders_graph",
581        "torch._C._get_autograd_fallback_mode",
582        "torch._C._get_backcompat_broadcast_warn",
583        "torch._C._get_backcompat_keepdim_warn",
584        "torch._C._get_blas_preferred_backend",
585        "torch._C._get_caught_jit_exception_class_name",
586        "torch._C._get_caught_jit_exception_original_msg",
587        "torch._C._get_constant_bool_symnode",
588        "torch._C._get_cpp_backtrace",
589        "torch._C._get_cpu_capability",
590        "torch._C._get_cublas_allow_bf16_reduced_precision_reduction",
591        "torch._C._get_cublas_allow_fp16_reduced_precision_reduction",
592        "torch._C._get_cublas_allow_tf32",
593        "torch._C._get_cudnn_allow_tf32",
594        "torch._C._get_cudnn_benchmark",
595        "torch._C._get_cudnn_deterministic",
596        "torch._C._get_cudnn_enabled",
597        "torch._C._get_custom_class_python_wrapper",
598        "torch._C._get_default_device",
599        "torch._C._get_deterministic_algorithms_warn_only",
600        "torch._C._get_deterministic_algorithms",
601        "torch._C._get_deterministic_fill_uninitialized_memory",
602        "torch._C._get_dispatch_mode",
603        "torch._C._get_dispatch_stack_at",
604        "torch._C._get_file_format",
605        "torch._C._get_flash_sdp_enabled",
606        "torch._C._get_float32_matmul_precision",
607        "torch._C._get_function_stack_at",
608        "torch._C._get_graph_executor_optimize",
609        "torch._C._get_linalg_preferred_backend",
610        "torch._C._get_math_sdp_enabled",
611        "torch._C._get_math_sdp_allow_fp16_bf16_reduction",
612        "torch._C._get_max_operator_version",
613        "torch._C._get_mem_efficient_sdp_enabled",
614        "torch._C._get_mkldnn_enabled",
615        "torch._C._get_cudnn_sdp_enabled",
616        "torch._C._set_sdp_use_cudnn",
617        "torch._C._get_mobile_model_contained_types_from_buffer",
618        "torch._C._get_mobile_model_contained_types",
619        "torch._C._get_model_bytecode_version_from_buffer",
620        "torch._C._get_model_bytecode_version",
621        "torch._C._get_model_extra_files_from_buffer",
622        "torch._C._get_model_extra_files",
623        "torch._C._get_model_ops_and_info_from_buffer",
624        "torch._C._get_model_ops_and_info",
625        "torch._C._get_module_info_from_flatbuffer",
626        "torch._C._get_nnpack_enabled",
627        "torch._C._get_obj_in_tls",
628        "torch._C._get_operation_overload",
629        "torch._C._get_operator_version_map",
630        "torch._C._get_privateuse1_backend_name",
631        "torch._C._get_qengine",
632        "torch._C._get_schema",
633        "torch._C._get_nested_int",
634        "torch._C._get_tensor_metadata",
635        "torch._C._get_tracing_state",
636        "torch._C._get_upgrader_ranges",
637        "torch._C._get_upgraders_entry_map",
638        "torch._C._get_upgraders_map_size",
639        "torch._C._get_value_trace",
640        "torch._C._get_version_calculator_flag",
641        "torch._C._get_warnAlways",
642        "torch._C._graph_pool_handle",
643        "torch._C._group_tensors_by_device_and_dtype",
644        "torch._C._hack_do_not_use_clone_module_with_class",
645        "torch._C._has_distributed",
646        "torch._C._has_Standard_Deleter",
647        "torch._C._has_storage",
648        "torch._C._has_tensorexpr_cpp_tests",
649        "torch._C._run_tensorexpr_cpp_tests",
650        "torch._C._has_torch_function_unary",
651        "torch._C._has_torch_function_variadic",
652        "torch._C._has_torch_function",
653        "torch._C._import_ir_module_from_package",
654        "torch._C._increment_version",
655        "torch._C._infer_size",
656        "torch._C._init_names",
657        "torch._C._initExtension",
658        "torch._C._is_alias_of",
659        "torch._C._is_any_autocast_enabled",
660        "torch._C._is_cached_tensor",
661        "torch._C._is_flash_attention_available",
662        "torch._C._is_fwd_grad_enabled",
663        "torch._C._is_key_in_tls",
664        "torch._C._is_multithreading_enabled",
665        "torch._C._is_torch_function_enabled",
666        "torch._C._is_torch_function_mode_enabled",
667        "torch._C._is_tracing",
668        "torch._C._is_view_replay_enabled",
669        "torch._C._is_xnnpack_enabled",
670        "torch._C._itt.is_available",
671        "torch._C._itt.mark",
672        "torch._C._itt.rangePop",
673        "torch._C._itt.rangePush",
674        "torch._C._ivalue_debug_python_object",
675        "torch._C._ivalue_tags_match",
676        "torch._C._jit_assert_is_instance",
677        "torch._C._jit_can_fuse_on_cpu_legacy",
678        "torch._C._jit_can_fuse_on_cpu",
679        "torch._C._jit_can_fuse_on_gpu",
680        "torch._C._jit_cat_wo_conditionals",
681        "torch._C._jit_check_alias_annotation",
682        "torch._C._jit_clear_class_registry",
683        "torch._C._jit_debug_fuser_num_cached_kernel_specs",
684        "torch._C._jit_debug_module_iterators",
685        "torch._C._jit_decay_packed_param_input_types",
686        "torch._C._jit_decomposition_graph_for_node",
687        "torch._C._jit_differentiate",
688        "torch._C._jit_erase_non_input_shape_information",
689        "torch._C._jit_flatten",
690        "torch._C._jit_fuser_get_fused_kernel_code",
691        "torch._C._jit_get_all_schemas",
692        "torch._C._jit_get_custom_class_schemas",
693        "torch._C._jit_get_emit_hooks",
694        "torch._C._jit_get_inline_everything_mode",
695        "torch._C._jit_get_logging_option",
696        "torch._C._jit_get_num_profiled_runs",
697        "torch._C._jit_get_operation",
698        "torch._C._jit_get_schemas_for_operator",
699        "torch._C._jit_get_te_cuda_pointwise_block_count",
700        "torch._C._jit_get_te_cuda_pointwise_block_size",
701        "torch._C._jit_get_te_cuda_pointwise_loop_levels",
702        "torch._C._jit_get_te_generate_block_code",
703        "torch._C._jit_get_te_must_use_llvm_cpu",
704        "torch._C._jit_get_tracer_state_warn",
705        "torch._C._jit_has_cpp_tests",
706        "torch._C._jit_init",
707        "torch._C._jit_interpret_graph",
708        "torch._C._jit_is_onnx_log_enabled",
709        "torch._C._jit_is_script_object",
710        "torch._C._jit_llga_enabled",
711        "torch._C._jit_nvfuser_can_be_enabled",
712        "torch._C._jit_nvfuser_clear_comparison_callback",
713        "torch._C._jit_nvfuser_enabled",
714        "torch._C._jit_nvfuser_horizontal_mode",
715        "torch._C._jit_nvfuser_set_comparison_callback",
716        "torch._C._jit_nvfuser_single_node_mode",
717        "torch._C._jit_object_is_non_holding",
718        "torch._C._jit_onnx_convert_pattern_from_subblock",
719        "torch._C._jit_onnx_create_full_scope_name",
720        "torch._C._jit_onnx_list_model_parameters",
721        "torch._C._jit_onnx_log",
722        "torch._C._jit_opt_conditionals",
723        "torch._C._jit_override_can_fuse_on_cpu_legacy",
724        "torch._C._jit_override_can_fuse_on_cpu",
725        "torch._C._jit_override_can_fuse_on_gpu",
726        "torch._C._jit_pass_autocast",
727        "torch._C._jit_pass_batch_mm",
728        "torch._C._jit_pass_canonicalize_graph_fuser_ops",
729        "torch._C._jit_pass_canonicalize",
730        "torch._C._jit_pass_complete_shape_analysis",
731        "torch._C._jit_pass_concat_frozen_linear",
732        "torch._C._jit_pass_constant_loop_unrolling",
733        "torch._C._jit_pass_constant_pooling",
734        "torch._C._jit_pass_constant_propagation_immutable_types",
735        "torch._C._jit_pass_constant_propagation",
736        "torch._C._jit_pass_convert_frozen_ops_to_mkldnn",
737        "torch._C._jit_pass_create_autodiff_subgraphs",
738        "torch._C._jit_pass_create_functional_graphs",
739        "torch._C._jit_pass_cse",
740        "torch._C._jit_pass_custom_pattern_based_rewrite_graph",
741        "torch._C._jit_pass_custom_pattern_based_rewrite",
742        "torch._C._jit_pass_dbr_quant_remove_redundant_aliases",
743        "torch._C._jit_pass_dce_allow_deleting_nodes_with_side_effects",
744        "torch._C._jit_pass_dce",
745        "torch._C._jit_pass_decompose_ops",
746        "torch._C._jit_pass_dedup_module_uses",
747        "torch._C._jit_pass_erase_number_types",
748        "torch._C._jit_pass_erase_shape_information",
749        "torch._C._jit_pass_filter_non_tensor_arguments",
750        "torch._C._jit_pass_fixup_onnx_controlflow_node",
751        "torch._C._jit_pass_fold_convbn",
752        "torch._C._jit_pass_fold_frozen_conv_add_or_sub",
753        "torch._C._jit_pass_fold_frozen_conv_bn",
754        "torch._C._jit_pass_fold_frozen_conv_mul_or_div",
755        "torch._C._jit_pass_fold_frozen_linear_bn",
756        "torch._C._jit_pass_fold_prepacking_ops",
757        "torch._C._jit_pass_functional_to_inplace_activation",
758        "torch._C._jit_pass_fuse_add_relu",
759        "torch._C._jit_pass_fuse_addmm",
760        "torch._C._jit_pass_fuse_clamp_w_prepacked_linear_conv",
761        "torch._C._jit_pass_fuse_frozen_conv_add_relu",
762        "torch._C._jit_pass_fuse_linear",
763        "torch._C._jit_pass_fuse_quantized_add_relu",
764        "torch._C._jit_pass_fuse_tensorexprs",
765        "torch._C._jit_pass_fuse",
766        "torch._C._jit_pass_inline_fork_wait",
767        "torch._C._jit_pass_inline_functional_graphs",
768        "torch._C._jit_pass_inline",
769        "torch._C._jit_pass_inplace_to_functional_activation",
770        "torch._C._jit_pass_insert_observer_method_for_ondevice_ptq",
771        "torch._C._jit_pass_insert_observers",
772        "torch._C._jit_pass_insert_prepack_unpack",
773        "torch._C._jit_pass_insert_prepacked_ops",
774        "torch._C._jit_pass_insert_quant_dequant_for_ondevice_ptq",
775        "torch._C._jit_pass_insert_quant_dequant",
776        "torch._C._jit_pass_integer_value_refinement",
777        "torch._C._jit_pass_lint",
778        "torch._C._jit_pass_loop_unrolling",
779        "torch._C._jit_pass_lower_all_tuples",
780        "torch._C._jit_pass_lower_graph",
781        "torch._C._jit_pass_metal_fold_prepacking_ops",
782        "torch._C._jit_pass_metal_fuse_clamp_w_prepacked_conv",
783        "torch._C._jit_pass_metal_insert_prepacked_ops",
784        "torch._C._jit_pass_metal_optimize_for_mobile",
785        "torch._C._jit_pass_onnx_assign_output_shape",
786        "torch._C._jit_pass_onnx_assign_scoped_names_for_node_and_value",
787        "torch._C._jit_pass_onnx_autograd_function_process",
788        "torch._C._jit_pass_onnx_block",
789        "torch._C._jit_pass_onnx_cast_all_constant_to_floating",
790        "torch._C._jit_pass_onnx_clear_scope_records",
791        "torch._C._jit_pass_onnx_constant_fold",
792        "torch._C._jit_pass_onnx_deduplicate_initializers",
793        "torch._C._jit_pass_onnx_eliminate_unused_items",
794        "torch._C._jit_pass_onnx_eval_peephole",
795        "torch._C._jit_pass_onnx_function_extraction",
796        "torch._C._jit_pass_onnx_function_substitution",
797        "torch._C._jit_pass_onnx_graph_shape_type_inference",
798        "torch._C._jit_pass_onnx_lint",
799        "torch._C._jit_pass_onnx_node_shape_type_inference",
800        "torch._C._jit_pass_onnx_peephole",
801        "torch._C._jit_pass_onnx_preprocess_caffe2",
802        "torch._C._jit_pass_onnx_preprocess",
803        "torch._C._jit_pass_onnx_quantization_insert_permutes",
804        "torch._C._jit_pass_onnx_remove_inplace_ops_for_onnx",
805        "torch._C._jit_pass_onnx_remove_print",
806        "torch._C._jit_pass_onnx_scalar_type_analysis",
807        "torch._C._jit_pass_onnx_set_dynamic_input_shape",
808        "torch._C._jit_pass_onnx_track_scope_attributes",
809        "torch._C._jit_pass_onnx_unpack_quantized_weights",
810        "torch._C._jit_pass_onnx",
811        "torch._C._jit_pass_optimize_for_inference",
812        "torch._C._jit_pass_optimize_for_mobile",
813        "torch._C._jit_pass_optimize_frozen_graph",
814        "torch._C._jit_pass_pattern_based_rewrite",
815        "torch._C._jit_pass_peephole_list_idioms",
816        "torch._C._jit_pass_peephole",
817        "torch._C._jit_pass_prepare_division_for_onnx",
818        "torch._C._jit_pass_propagate_device",
819        "torch._C._jit_pass_propagate_dtype",
820        "torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute",
821        "torch._C._jit_pass_propagate_shapes_on_graph",
822        "torch._C._jit_pass_quant_finalize_for_ondevice_ptq",
823        "torch._C._jit_pass_quant_finalize",
824        "torch._C._jit_pass_quant_fusion",
825        "torch._C._jit_pass_refine_integer_values",
826        "torch._C._jit_pass_refine_tuple_types",
827        "torch._C._jit_pass_remove_dropout",
828        "torch._C._jit_pass_remove_expands",
829        "torch._C._jit_pass_remove_inplace_ops",
830        "torch._C._jit_pass_remove_mutation",
831        "torch._C._jit_pass_replace_old_ops_with_upgraders",
832        "torch._C._jit_pass_replicate_dequantize",
833        "torch._C._jit_pass_run_decompositions",
834        "torch._C._jit_pass_specialize_autogradzero",
835        "torch._C._jit_pass_swap_functional_linear",
836        "torch._C._jit_pass_transform_conv1d_to_conv2d",
837        "torch._C._jit_pass_transpose_frozen_linear",
838        "torch._C._jit_pass_vulkan_fold_prepacking_ops",
839        "torch._C._jit_pass_vulkan_fuse_clamp_w_prepacked_conv",
840        "torch._C._jit_pass_vulkan_insert_prepacked_ops",
841        "torch._C._jit_pass_vulkan_optimize_for_mobile",
842        "torch._C._jit_register_decomposition_for_schema",
843        "torch._C._jit_register_shape_compute_graph_for_node",
844        "torch._C._jit_resolve_packet",
845        "torch._C._jit_run_cpp_tests",
846        "torch._C._jit_script_class_compile",
847        "torch._C._jit_script_compile_overload",
848        "torch._C._jit_script_compile",
849        "torch._C._jit_script_interface_compile",
850        "torch._C._jit_set_autocast_mode",
851        "torch._C._jit_set_bailout_depth",
852        "torch._C._jit_set_emit_hooks",
853        "torch._C._jit_set_fusion_strategy",
854        "torch._C._jit_set_inline_everything_mode",
855        "torch._C._jit_set_llga_enabled",
856        "torch._C._jit_set_logging_option",
857        "torch._C._jit_set_logging_stream",
858        "torch._C._jit_set_num_profiled_runs",
859        "torch._C._jit_set_nvfuser_enabled",
860        "torch._C._jit_set_nvfuser_guard_mode",
861        "torch._C._jit_set_nvfuser_horizontal_mode",
862        "torch._C._jit_set_nvfuser_single_node_mode",
863        "torch._C._jit_set_nvfuser_skip_node_kind",
864        "torch._C._jit_set_onnx_log_enabled",
865        "torch._C._jit_set_onnx_log_output_stream",
866        "torch._C._jit_set_profiling_executor",
867        "torch._C._jit_set_profiling_mode",
868        "torch._C._jit_set_symbolic_shapes_test_mode",
869        "torch._C._jit_set_te_cuda_pointwise_block_count",
870        "torch._C._jit_set_te_cuda_pointwise_block_size",
871        "torch._C._jit_set_te_cuda_pointwise_loop_levels",
872        "torch._C._jit_set_te_generate_block_code",
873        "torch._C._jit_set_te_must_use_llvm_cpu",
874        "torch._C._jit_set_texpr_dynamic_shape_enabled",
875        "torch._C._jit_set_texpr_fuser_enabled",
876        "torch._C._jit_set_texpr_reductions_enabled",
877        "torch._C._jit_set_tracer_state_warn",
878        "torch._C._jit_set_utf8_decoding_ignore",
879        "torch._C._jit_shape_compute_graph_for_node",
880        "torch._C._jit_symbolic_shapes_test_mode_enabled",
881        "torch._C._jit_texpr_dynamic_shape_enabled",
882        "torch._C._jit_texpr_fallback_allowed",
883        "torch._C._jit_texpr_fuser_enabled",
884        "torch._C._jit_texpr_reductions_enabled",
885        "torch._C._jit_texpr_set_fallback_allowed",
886        "torch._C._jit_to_backend_selective",
887        "torch._C._jit_to_backend",
888        "torch._C._jit_to_static_module",
889        "torch._C._jit_trace_graph",
890        "torch._C._jit_trace_module",
891        "torch._C._jit_tree_views.FalseLiteral",
892        "torch._C._jit_tree_views.NoneLiteral",
893        "torch._C._jit_tree_views.TrueLiteral",
894        "torch._C._jit_try_infer_type",
895        "torch._C._jit_unflatten",
896        "torch._C._last_executed_optimized_graph",
897        "torch._C._len_torch_dispatch_stack",
898        "torch._C._len_torch_function_stack",
899        "torch._C._linalg._linalg_eigvals",
900        "torch._C._linalg.linalg_cholesky_ex",
901        "torch._C._linalg.linalg_cholesky",
902        "torch._C._linalg.linalg_cond",
903        "torch._C._linalg.linalg_cross",
904        "torch._C._linalg.linalg_det",
905        "torch._C._linalg.linalg_diagonal",
906        "torch._C._linalg.linalg_eig",
907        "torch._C._linalg.linalg_eigh",
908        "torch._C._linalg.linalg_eigvals",
909        "torch._C._linalg.linalg_eigvalsh",
910        "torch._C._linalg.linalg_householder_product",
911        "torch._C._linalg.linalg_inv_ex",
912        "torch._C._linalg.linalg_inv",
913        "torch._C._linalg.linalg_ldl_factor_ex",
914        "torch._C._linalg.linalg_ldl_factor",
915        "torch._C._linalg.linalg_ldl_solve",
916        "torch._C._linalg.linalg_lstsq",
917        "torch._C._linalg.linalg_lu_factor_ex",
918        "torch._C._linalg.linalg_lu_factor",
919        "torch._C._linalg.linalg_lu_solve",
920        "torch._C._linalg.linalg_lu",
921        "torch._C._linalg.linalg_matmul",
922        "torch._C._linalg.linalg_matrix_exp",
923        "torch._C._linalg.linalg_matrix_norm",
924        "torch._C._linalg.linalg_matrix_power",
925        "torch._C._linalg.linalg_matrix_rank",
926        "torch._C._linalg.linalg_multi_dot",
927        "torch._C._linalg.linalg_norm",
928        "torch._C._linalg.linalg_pinv",
929        "torch._C._linalg.linalg_qr",
930        "torch._C._linalg.linalg_slogdet",
931        "torch._C._linalg.linalg_solve_ex",
932        "torch._C._linalg.linalg_solve_triangular",
933        "torch._C._linalg.linalg_solve",
934        "torch._C._linalg.linalg_svd",
935        "torch._C._linalg.linalg_svdvals",
936        "torch._C._linalg.linalg_tensorinv",
937        "torch._C._linalg.linalg_tensorsolve",
938        "torch._C._linalg.linalg_vander",
939        "torch._C._linalg.linalg_vecdot",
940        "torch._C._linalg.linalg_vector_norm",
941        "torch._C._llvm_enabled",
942        "torch._C._load_for_lite_interpreter_from_buffer",
943        "torch._C._load_for_lite_interpreter",
944        "torch._C._load_jit_module_from_bytes",
945        "torch._C._load_jit_module_from_file",
946        "torch._C._load_mobile_module_from_bytes",
947        "torch._C._load_mobile_module_from_file",
948        "torch._C._log_api_usage_metadata",
949        "torch._C._log_api_usage_once",
950        "torch._C._logging_set_logger",
951        "torch._C._meta_in_tls_dispatch_include",
952        "torch._C._mps_acquireEvent",
953        "torch._C._mps_currentAllocatedMemory",
954        "torch._C._mps_deviceSynchronize",
955        "torch._C._mps_driverAllocatedMemory",
956        "torch._C._mps_recommendedMaxMemory",
957        "torch._C._mps_elapsedTimeOfEvents",
958        "torch._C._mps_emptyCache",
959        "torch._C._mps_get_default_generator",
960        "torch._C._mps_is_available",
961        "torch._C._mps_is_in_bad_fork",
962        "torch._C._mps_is_on_macos_13_or_newer",
963        "torch._C._mps_profilerStartTrace",
964        "torch._C._mps_profilerStopTrace",
965        "torch._C._mps_queryEvent",
966        "torch._C._mps_recordEvent",
967        "torch._C._mps_releaseEvent",
968        "torch._C._mps_setMemoryFraction",
969        "torch._C._mps_synchronizeEvent",
970        "torch._C._mps_waitForEvent",
971        "torch._C._multiprocessing_init",
972        "torch._C._nccl_all_gather",
973        "torch._C._nccl_all_reduce",
974        "torch._C._nccl_broadcast",
975        "torch._C._nccl_init_rank",
976        "torch._C._nccl_reduce_scatter",
977        "torch._C._nccl_reduce",
978        "torch._C._nccl_unique_id",
979        "torch._C._nccl_version_suffix",
980        "torch._C._nccl_version",
981        "torch._C._nested.nested_tensor",
982        "torch._C._nested.nested_to_padded_tensor",
983        "torch._C._new_symbolic_shape_symbol",
984        "torch._C._nn_module_to_mobile",
985        "torch._C._nn._conv_depthwise2d",
986        "torch._C._nn._pad_circular",
987        "torch._C._nn._pad_enum",
988        "torch._C._nn._parse_to",
989        "torch._C._nn._test_ambiguous_defaults",
990        "torch._C._nn._test_optional_filled_intlist",
991        "torch._C._nn._test_optional_floatlist",
992        "torch._C._nn._test_optional_intlist",
993        "torch._C._nn._test_string_default",
994        "torch._C._nn._test_warn_in_autograd",
995        "torch._C._nn._upsample_bicubic2d_aa",
996        "torch._C._nn._upsample_bilinear2d_aa",
997        "torch._C._nn._upsample_nearest_exact1d",
998        "torch._C._nn._upsample_nearest_exact2d",
999        "torch._C._nn._upsample_nearest_exact3d",
1000        "torch._C._nn.adaptive_avg_pool2d",
1001        "torch._C._nn.adaptive_avg_pool3d",
1002        "torch._C._nn.adaptive_max_pool2d",
1003        "torch._C._nn.adaptive_max_pool3d",
1004        "torch._C._nn.avg_pool2d",
1005        "torch._C._nn.avg_pool3d",
1006        "torch._C._nn.binary_cross_entropy",
1007        "torch._C._nn.col2im",
1008        "torch._C._nn.conv_depthwise3d",
1009        "torch._C._nn.cross_entropy_loss",
1010        "torch._C._nn.elu_",
1011        "torch._C._nn.elu",
1012        "torch._C._nn.flatten_dense_tensors",
1013        "torch._C._nn.fractional_max_pool2d",
1014        "torch._C._nn.fractional_max_pool3d",
1015        "torch._C._nn.gelu_",
1016        "torch._C._nn.gelu",
1017        "torch._C._nn.glu",
1018        "torch._C._nn.hardsigmoid_",
1019        "torch._C._nn.hardsigmoid",
1020        "torch._C._nn.hardswish_",
1021        "torch._C._nn.hardswish",
1022        "torch._C._nn.hardtanh_",
1023        "torch._C._nn.hardtanh",
1024        "torch._C._nn.huber_loss",
1025        "torch._C._nn.im2col",
1026        "torch._C._nn.l1_loss",
1027        "torch._C._nn.leaky_relu_",
1028        "torch._C._nn.leaky_relu",
1029        "torch._C._nn.linear",
1030        "torch._C._nn.log_sigmoid",
1031        "torch._C._nn.max_pool2d_with_indices",
1032        "torch._C._nn.max_pool3d_with_indices",
1033        "torch._C._nn.max_unpool2d",
1034        "torch._C._nn.max_unpool3d",
1035        "torch._C._nn.mish_",
1036        "torch._C._nn.mish",
1037        "torch._C._nn.mkldnn_linear",
1038        "torch._C._nn.mkldnn_reorder_conv2d_weight",
1039        "torch._C._nn.mkldnn_reorder_conv3d_weight",
1040        "torch._C._nn.mse_loss",
1041        "torch._C._nn.multi_margin_loss",
1042        "torch._C._nn.multilabel_margin_loss",
1043        "torch._C._nn.nll_loss_nd",
1044        "torch._C._nn.nll_loss",
1045        "torch._C._nn.nll_loss2d",
1046        "torch._C._nn.one_hot",
1047        "torch._C._nn.pad_sequence",
1048        "torch._C._nn.pad",
1049        "torch._C._nn.reflection_pad1d",
1050        "torch._C._nn.reflection_pad2d",
1051        "torch._C._nn.reflection_pad3d",
1052        "torch._C._nn.relu6_",
1053        "torch._C._nn.relu6",
1054        "torch._C._nn.replication_pad1d",
1055        "torch._C._nn.replication_pad2d",
1056        "torch._C._nn.replication_pad3d",
1057        "torch._C._nn.rrelu_with_noise_",
1058        "torch._C._nn.rrelu_with_noise",
1059        "torch._C._nn.scaled_dot_product_attention",
1060        "torch._C._nn.silu_",
1061        "torch._C._nn.silu",
1062        "torch._C._nn.slow_conv_dilated2d",
1063        "torch._C._nn.slow_conv_dilated3d",
1064        "torch._C._nn.slow_conv_transpose2d",
1065        "torch._C._nn.slow_conv_transpose3d",
1066        "torch._C._nn.slow_conv3d",
1067        "torch._C._nn.smooth_l1_loss",
1068        "torch._C._nn.soft_margin_loss",
1069        "torch._C._nn.softplus",
1070        "torch._C._nn.softshrink",
1071        "torch._C._nn.thnn_conv2d",
1072        "torch._C._nn.unflatten_dense_tensors",
1073        "torch._C._nn.upsample_bicubic2d",
1074        "torch._C._nn.upsample_bilinear2d",
1075        "torch._C._nn.upsample_linear1d",
1076        "torch._C._nn.upsample_nearest1d",
1077        "torch._C._nn.upsample_nearest2d",
1078        "torch._C._nn.upsample_nearest3d",
1079        "torch._C._nn.upsample_trilinear3d",
1080        "torch._C._non_sym_sizes",
1081        "torch._C._overlaps",
1082        "torch._C._parallel_info",
1083        "torch._C._parse_dispatch_key",
1084        "torch._C._parse_source_def",
1085        "torch._C._pop_torch_dispatch_stack",
1086        "torch._C._pop_torch_function_stack",
1087        "torch._C._propagate_and_assign_input_shapes",
1088        "torch._C._propagate_shapes",
1089        "torch._C._propagate_xla_data",
1090        "torch._C._push_on_torch_dispatch_stack",
1091        "torch._C._push_on_torch_function_stack",
1092        "torch._C._quantize_ondevice_ptq_dynamic",
1093        "torch._C._register_py_class_for_device",
1094        "torch._C._remove_cached_tensor",
1095        "torch._C._remove_worker_pids",
1096        "torch._C._rename_privateuse1_backend",
1097        "torch._C._replace_",
1098        "torch._C._replace_overloaded_method_decl",
1099        "torch._C._resolve_type_from_object",
1100        "torch._C._resolve_type",
1101        "torch._C._rocm_is_backward_pass",
1102        "torch._C._rpc_init",
1103        "torch._C._run_emit_module_hook",
1104        "torch._C._save_jit_module_to_bytes",
1105        "torch._C._save_jit_module",
1106        "torch._C._save_mobile_module_to_bytes",
1107        "torch._C._save_mobile_module",
1108        "torch._C._save_parameters",
1109        "torch._C._scatter_out",
1110        "torch._C._scatter",
1111        "torch._C._select_conv_backend",
1112        "torch._C._select_batch_norm_backend",
1113        "torch._C._set_autograd_fallback_mode",
1114        "torch._C._set_backcompat_broadcast_warn",
1115        "torch._C._set_backcompat_keepdim_warn",
1116        "torch._C._set_blas_preferred_backend",
1117        "torch._C._set_cached_tensors_enabled",
1118        "torch._C._set_check_sparse_tensor_invariants",
1119        "torch._C._set_conj",
1120        "torch._C._set_cublas_allow_bf16_reduced_precision_reduction",
1121        "torch._C._set_cublas_allow_fp16_reduced_precision_reduction",
1122        "torch._C._set_cublas_allow_tf32",
1123        "torch._C._set_cudnn_allow_tf32",
1124        "torch._C._set_cudnn_benchmark",
1125        "torch._C._set_cudnn_deterministic",
1126        "torch._C._set_cudnn_enabled",
1127        "torch._C._set_default_dtype",
1128        "torch._C._set_default_mobile_cpu_allocator",
1129        "torch._C._set_default_tensor_type",
1130        "torch._C._set_deterministic_algorithms",
1131        "torch._C._set_deterministic_fill_uninitialized_memory",
1132        "torch._C._set_dispatch_mode",
1133        "torch._C._set_float32_matmul_precision",
1134        "torch._C._set_fwd_grad_enabled",
1135        "torch._C._set_grad_enabled",
1136        "torch._C._set_graph_executor_optimize",
1137        "torch._C._set_linalg_preferred_backend",
1138        "torch._C._set_meta_in_tls_dispatch_include",
1139        "torch._C._set_mkldnn_enabled",
1140        "torch._C._set_multithreading_enabled",
1141        "torch._C._set_neg",
1142        "torch._C._set_nnpack_enabled",
1143        "torch._C._set_print_stack_traces_on_fatal_signal",
1144        "torch._C._set_qengine",
1145        "torch._C._set_sdp_use_flash",
1146        "torch._C._set_sdp_use_math",
1147        "torch._C._set_math_sdp_allow_fp16_bf16_reduction",
1148        "torch._C._set_sdp_use_mem_efficient",
1149        "torch._C._set_should_use_format_with_string_table",
1150        "torch._C._set_storage_access_error_msg",
1151        "torch._C._set_tensor_metadata",
1152        "torch._C._set_tracing_state",
1153        "torch._C._set_value_trace",
1154        "torch._C._set_view_replay_enabled",
1155        "torch._C._set_warnAlways",
1156        "torch._C._set_worker_pids",
1157        "torch._C._set_worker_signal_handlers",
1158        "torch._C._should_allow_numbers_as_tensors",
1159        "torch._C._show_config",
1160        "torch._C._sparse._sparse_addmm",
1161        "torch._C._sparse._sparse_log_softmax",
1162        "torch._C._sparse._sparse_mm_reduce_impl",
1163        "torch._C._sparse._sparse_mm",
1164        "torch._C._sparse._sparse_softmax",
1165        "torch._C._sparse._spdiags",
1166        "torch._C._sparse.sparse_sampled_addmm",
1167        "torch._C._special.special_airy_ai",
1168        "torch._C._special.special_bessel_j0",
1169        "torch._C._special.special_bessel_j1",
1170        "torch._C._special.special_bessel_y0",
1171        "torch._C._special.special_bessel_y1",
1172        "torch._C._special.special_chebyshev_polynomial_t",
1173        "torch._C._special.special_chebyshev_polynomial_u",
1174        "torch._C._special.special_chebyshev_polynomial_v",
1175        "torch._C._special.special_chebyshev_polynomial_w",
1176        "torch._C._special.special_digamma",
1177        "torch._C._special.special_entr",
1178        "torch._C._special.special_erf",
1179        "torch._C._special.special_erfc",
1180        "torch._C._special.special_erfcx",
1181        "torch._C._special.special_erfinv",
1182        "torch._C._special.special_exp2",
1183        "torch._C._special.special_expit",
1184        "torch._C._special.special_expm1",
1185        "torch._C._special.special_gammainc",
1186        "torch._C._special.special_gammaincc",
1187        "torch._C._special.special_gammaln",
1188        "torch._C._special.special_hermite_polynomial_h",
1189        "torch._C._special.special_hermite_polynomial_he",
1190        "torch._C._special.special_i0",
1191        "torch._C._special.special_i0e",
1192        "torch._C._special.special_i1",
1193        "torch._C._special.special_i1e",
1194        "torch._C._special.special_laguerre_polynomial_l",
1195        "torch._C._special.special_legendre_polynomial_p",
1196        "torch._C._special.special_log_ndtr",
1197        "torch._C._special.special_log_softmax",
1198        "torch._C._special.special_log1p",
1199        "torch._C._special.special_logit",
1200        "torch._C._special.special_logsumexp",
1201        "torch._C._special.special_modified_bessel_i0",
1202        "torch._C._special.special_modified_bessel_i1",
1203        "torch._C._special.special_modified_bessel_k0",
1204        "torch._C._special.special_modified_bessel_k1",
1205        "torch._C._special.special_multigammaln",
1206        "torch._C._special.special_ndtr",
1207        "torch._C._special.special_ndtri",
1208        "torch._C._special.special_polygamma",
1209        "torch._C._special.special_psi",
1210        "torch._C._special.special_round",
1211        "torch._C._special.special_scaled_modified_bessel_k0",
1212        "torch._C._special.special_scaled_modified_bessel_k1",
1213        "torch._C._special.special_shifted_chebyshev_polynomial_t",
1214        "torch._C._special.special_shifted_chebyshev_polynomial_u",
1215        "torch._C._special.special_shifted_chebyshev_polynomial_v",
1216        "torch._C._special.special_shifted_chebyshev_polynomial_w",
1217        "torch._C._special.special_sinc",
1218        "torch._C._special.special_softmax",
1219        "torch._C._special.special_spherical_bessel_j0",
1220        "torch._C._special.special_xlog1py",
1221        "torch._C._special.special_xlogy",
1222        "torch._C._special.special_zeta",
1223        "torch._C._stash_obj_in_tls",
1224        "torch._C._storage_id",
1225        "torch._C._storage_Use_Count",
1226        "torch._C._supported_qengines",
1227        "torch._C._te.abs",
1228        "torch._C._te.acos",
1229        "torch._C._te.annotate_input_shapes",
1230        "torch._C._te.asin",
1231        "torch._C._te.atan",
1232        "torch._C._te.atan2",
1233        "torch._C._te.ceil",
1234        "torch._C._te.Compute",
1235        "torch._C._te.Compute2",
1236        "torch._C._te.construct_codegen",
1237        "torch._C._te.cos",
1238        "torch._C._te.cosh",
1239        "torch._C._te.erf",
1240        "torch._C._te.erfc",
1241        "torch._C._te.exp",
1242        "torch._C._te.expm1",
1243        "torch._C._te.fixup_missing_shape_info",
1244        "torch._C._te.floor",
1245        "torch._C._te.fmod",
1246        "torch._C._te.frac",
1247        "torch._C._te.ifThenElse",
1248        "torch._C._te.is_graph_compilable",
1249        "torch._C._te.isnan",
1250        "torch._C._te.lgamma",
1251        "torch._C._te.log",
1252        "torch._C._te.log10",
1253        "torch._C._te.log1p",
1254        "torch._C._te.log2",
1255        "torch._C._te.lower",
1256        "torch._C._te.make_shapes_symbolic",
1257        "torch._C._te.pow",
1258        "torch._C._te.Reduce",
1259        "torch._C._te.remainder",
1260        "torch._C._te.remove_graph_output",
1261        "torch._C._te.remove_unused_self_argument",
1262        "torch._C._te.replace_list_output_with_tuple",
1263        "torch._C._te.round",
1264        "torch._C._te.rsqrt",
1265        "torch._C._te.sigmoid",
1266        "torch._C._te.simplify",
1267        "torch._C._te.sin",
1268        "torch._C._te.sinh",
1269        "torch._C._te.sqrt",
1270        "torch._C._te.tan",
1271        "torch._C._te.tanh",
1272        "torch._C._te.trim_graph",
1273        "torch._C._te.trunc",
1274        "torch._C._tensor_impl_raw_handle",
1275        "torch._C._test_only_add_entry_to_op_version_map",
1276        "torch._C._test_only_populate_upgraders",
1277        "torch._C._test_only_remove_entry_to_op_version_map",
1278        "torch._C._test_only_remove_upgraders",
1279        "torch._C._to_functionality_key",
1280        "torch._C._tracer_set_force_outplace",
1281        "torch._C._tracer_set_get_unique_name_fn",
1282        "torch._C._tracer_warn_use_python",
1283        "torch._C._unset_default_mobile_cpu_allocator",
1284        "torch._C._unset_dispatch_mode",
1285        "torch._C._valgrind_supported_platform",
1286        "torch._C._valgrind_toggle_and_dump_stats",
1287        "torch._C._valgrind_toggle",
1288        "torch._C._verbose.mkl_set_verbose",
1289        "torch._C._verbose.mkldnn_set_verbose",
1290        "torch._C._vmapmode_decrement_nesting",
1291        "torch._C._vmapmode_increment_nesting",
1292        "torch._C._warn_deprecation",
1293        "torch._C._warn",
1294        "torch._C._will_engine_execute_node",
1295        "torch._C._wrap_tensor_impl",
1296        "torch._C.fork",
1297        "torch._C.get_autocast_cpu_dtype",
1298        "torch._C.get_autocast_dtype",
1299        "torch._C.get_autocast_gpu_dtype",
1300        "torch._C.get_autocast_ipu_dtype",
1301        "torch._C.get_autocast_xla_dtype",
1302        "torch._C.get_default_dtype",
1303        "torch._C.get_num_interop_threads",
1304        "torch._C.get_num_threads",
1305        "torch._C.import_ir_module_from_buffer",
1306        "torch._C.import_ir_module",
1307        "torch._C.init_num_threads",
1308        "torch._C.is_anomaly_check_nan_enabled",
1309        "torch._C.is_anomaly_enabled",
1310        "torch._C.is_autocast_cache_enabled",
1311        "torch._C.is_autocast_cpu_enabled",
1312        "torch._C.is_autocast_enabled",
1313        "torch._C.is_autocast_ipu_enabled",
1314        "torch._C.is_autocast_xla_enabled",
1315        "torch._C.is_grad_enabled",
1316        "torch._C.is_inference_mode_enabled",
1317        "torch._C.merge_type_from_type_comment",
1318        "torch._C.parse_ir",
1319        "torch._C.parse_schema",
1320        "torch._C.parse_type_comment",
1321        "torch._C.read_vitals",
1322        "torch._C.set_vital",
1323        "torch._C.unify_type_list",
1324        "torch._C.vitals_enabled",
1325        "torch._C.wait",
1326        "torch._cast_Byte",
1327        "torch._cast_Char",
1328        "torch._cast_Double",
1329        "torch._cast_Float",
1330        "torch._cast_Half",
1331        "torch._cast_Int",
1332        "torch._cast_Long",
1333        "torch._cast_Short",
1334        "torch._choose_qparams_per_tensor",
1335        "torch._chunk_cat",
1336        "torch._coalesce",
1337        "torch._compute_linear_combination",
1338        "torch._conj_copy",
1339        "torch._conj_physical",
1340        "torch._conj",
1341        "torch._convert_indices_from_coo_to_csr",
1342        "torch._convert_indices_from_csr_to_coo",
1343        "torch._convert_weight_to_int4pack",
1344        "torch._convolution_mode",
1345        "torch._convolution",
1346        "torch._copy_from_and_resize",
1347        "torch._copy_from",
1348        "torch._cslt_compress",
1349        "torch._cslt_sparse_mm",
1350        "torch._ctc_loss",
1351        "torch._cudnn_ctc_loss",
1352        "torch._cudnn_init_dropout_state",
1353        "torch._cudnn_rnn_flatten_weight",
1354        "torch._cudnn_rnn",
1355        "torch._cufft_clear_plan_cache",
1356        "torch._cufft_get_plan_cache_max_size",
1357        "torch._cufft_get_plan_cache_size",
1358        "torch._cufft_set_plan_cache_max_size",
1359        "torch._cummax_helper",
1360        "torch._cummin_helper",
1361        "torch._debug_has_internal_overlap",
1362        "torch._dim_arange",
1363        "torch._dirichlet_grad",
1364        "torch._disable_functionalization",
1365        "torch._efficientzerotensor",
1366        "torch._embedding_bag_forward_only",
1367        "torch._embedding_bag",
1368        "torch._empty_affine_quantized",
1369        "torch._empty_per_channel_affine_quantized",
1370        "torch._enable_functionalization",
1371        "torch._euclidean_dist",
1372        "torch._fake_quantize_learnable_per_channel_affine",
1373        "torch._fake_quantize_learnable_per_tensor_affine",
1374        "torch._fake_quantize_per_tensor_affine_cachemask_tensor_qparams",
1375        "torch._fft_c2c",
1376        "torch._fft_c2r",
1377        "torch._fft_r2c",
1378        "torch._fill_mem_eff_dropout_mask_",
1379        "torch._foobar",
1380        "torch._foreach_abs_",
1381        "torch._foreach_abs",
1382        "torch._foreach_acos_",
1383        "torch._foreach_acos",
1384        "torch._foreach_add_",
1385        "torch._foreach_add",
1386        "torch._foreach_addcdiv_",
1387        "torch._foreach_addcdiv",
1388        "torch._foreach_addcmul_",
1389        "torch._foreach_addcmul",
1390        "torch._foreach_asin_",
1391        "torch._foreach_asin",
1392        "torch._foreach_atan_",
1393        "torch._foreach_atan",
1394        "torch._foreach_ceil_",
1395        "torch._foreach_ceil",
1396        "torch._foreach_clamp_max_",
1397        "torch._foreach_clamp_max",
1398        "torch._foreach_clamp_min_",
1399        "torch._foreach_clamp_min",
1400        "torch._foreach_copy_",
1401        "torch._foreach_cos_",
1402        "torch._foreach_cos",
1403        "torch._foreach_cosh_",
1404        "torch._foreach_cosh",
1405        "torch._foreach_div_",
1406        "torch._foreach_div",
1407        "torch._foreach_erf_",
1408        "torch._foreach_erf",
1409        "torch._foreach_erfc_",
1410        "torch._foreach_erfc",
1411        "torch._foreach_exp_",
1412        "torch._foreach_exp",
1413        "torch._foreach_expm1_",
1414        "torch._foreach_expm1",
1415        "torch._foreach_floor_",
1416        "torch._foreach_floor",
1417        "torch._foreach_frac_",
1418        "torch._foreach_frac",
1419        "torch._foreach_lerp_",
1420        "torch._foreach_lerp",
1421        "torch._foreach_lgamma_",
1422        "torch._foreach_lgamma",
1423        "torch._foreach_log_",
1424        "torch._foreach_log",
1425        "torch._foreach_log10_",
1426        "torch._foreach_log10",
1427        "torch._foreach_log1p_",
1428        "torch._foreach_log1p",
1429        "torch._foreach_log2_",
1430        "torch._foreach_log2",
1431        "torch._foreach_maximum_",
1432        "torch._foreach_maximum",
1433        "torch._foreach_minimum_",
1434        "torch._foreach_minimum",
1435        "torch._foreach_mul_",
1436        "torch._foreach_mul",
1437        "torch._foreach_neg_",
1438        "torch._foreach_neg",
1439        "torch._foreach_norm",
1440        "torch._foreach_pow_",
1441        "torch._foreach_pow",
1442        "torch._foreach_reciprocal_",
1443        "torch._foreach_reciprocal",
1444        "torch._foreach_round_",
1445        "torch._foreach_round",
1446        "torch._foreach_sigmoid_",
1447        "torch._foreach_sigmoid",
1448        "torch._foreach_sign_",
1449        "torch._foreach_sign",
1450        "torch._foreach_sin_",
1451        "torch._foreach_sin",
1452        "torch._foreach_sinh_",
1453        "torch._foreach_sinh",
1454        "torch._foreach_sqrt_",
1455        "torch._foreach_sqrt",
1456        "torch._foreach_sub_",
1457        "torch._foreach_sub",
1458        "torch._foreach_tan_",
1459        "torch._foreach_tan",
1460        "torch._foreach_tanh_",
1461        "torch._foreach_tanh",
1462        "torch._foreach_trunc_",
1463        "torch._foreach_trunc",
1464        "torch._foreach_zero_",
1465        "torch._freeze_functional_tensor",
1466        "torch._from_functional_tensor",
1467        "torch._functional_assert_async",
1468        "torch._functional_sym_constrain_range_for_size",
1469        "torch._functional_sym_constrain_range",
1470        "torch._functionalize_are_all_mutations_hidden_from_autograd",
1471        "torch._functionalize_commit_update",
1472        "torch._functionalize_enable_reapply_views",
1473        "torch._functionalize_has_data_mutation",
1474        "torch._functionalize_has_metadata_mutation",
1475        "torch._functionalize_is_multi_output_view",
1476        "torch._functionalize_mark_mutation_hidden_from_autograd",
1477        "torch._functionalize_replace",
1478        "torch._functionalize_sync",
1479        "torch._functionalize_was_storage_changed",
1480        "torch._fused_adam_",
1481        "torch._fused_adamw_",
1482        "torch._fused_dropout",
1483        "torch._fused_moving_avg_obs_fq_helper",
1484        "torch._fused_sdp_choice",
1485        "torch._fw_primal_copy",
1486        "torch._grid_sampler_2d_cpu_fallback",
1487        "torch._has_compatible_shallow_copy_type",
1488        "torch._histogramdd_bin_edges",
1489        "torch._histogramdd_from_bin_cts",
1490        "torch._histogramdd_from_bin_tensors",
1491        "torch._index_put_impl_",
1492        "torch._indices_copy",
1493        "torch._int_mm",
1494        "torch._is_all_true",
1495        "torch._is_any_true",
1496        "torch._is_functional_tensor",
1497        "torch._is_zerotensor",
1498        "torch._linalg_check_errors",
1499        "torch._linalg_det",
1500        "torch._linalg_eigh",
1501        "torch._linalg_eigvals",
1502        "torch._linalg_slogdet",
1503        "torch._linalg_solve_ex",
1504        "torch._linalg_svd",
1505        "torch._log_softmax_backward_data",
1506        "torch._log_softmax",
1507        "torch._logcumsumexp",
1508        "torch._lstm_mps",
1509        "torch._lu_with_info",
1510        "torch._make_dep_token",
1511        "torch._make_dual_copy",
1512        "torch._make_dual",
1513        "torch._make_per_channel_quantized_tensor",
1514        "torch._make_per_tensor_quantized_tensor",
1515        "torch._masked_scale",
1516        "torch._masked_softmax",
1517        "torch._mirror_autograd_meta_to",
1518        "torch._mixed_dtypes_linear",
1519        "torch._mkldnn_reshape",
1520        "torch._mkldnn_transpose_",
1521        "torch._mkldnn_transpose",
1522        "torch._mps_convolution_transpose",
1523        "torch._mps_convolution",
1524        "torch._native_batch_norm_legit_no_training",
1525        "torch._native_batch_norm_legit",
1526        "torch._native_multi_head_attention",
1527        "torch._neg_view_copy",
1528        "torch._neg_view",
1529        "torch._nested_from_padded_and_nested_example",
1530        "torch._nested_tensor_from_mask_left_aligned",
1531        "torch._nested_tensor_from_tensor_list",
1532        "torch._nested_tensor_softmax_with_shape",
1533        "torch._nested_view_from_buffer_copy",
1534        "torch._nested_view_from_buffer",
1535        "torch._nnpack_available",
1536        "torch._nnpack_spatial_convolution",
1537        "torch._pack_padded_sequence",
1538        "torch._pad_packed_sequence",
1539        "torch._pin_memory",
1540        "torch._prelu_kernel",
1541        "torch._propagate_xla_data",
1542        "torch._remove_batch_dim",
1543        "torch._reshape_alias_copy",
1544        "torch._reshape_from_tensor",
1545        "torch._resize_output_",
1546        "torch._rowwise_prune",
1547        "torch._sample_dirichlet",
1548        "torch._saturate_weight_to_fp16",
1549        "torch._scaled_dot_product_attention_math",
1550        "torch._scaled_dot_product_efficient_attention",
1551        "torch._scaled_dot_product_flash_attention",
1552        "torch._scaled_dot_product_flash_attention_for_cpu",
1553        "torch._scaled_dot_product_cudnn_attention",
1554        "torch._scaled_mm",
1555        "torch._shape_as_tensor",
1556        "torch._sobol_engine_draw",
1557        "torch._sobol_engine_ff_",
1558        "torch._sobol_engine_initialize_state_",
1559        "torch._sobol_engine_scramble_",
1560        "torch._softmax_backward_data",
1561        "torch._softmax",
1562        "torch._sparse_broadcast_to_copy",
1563        "torch._sparse_broadcast_to",
1564        "torch._sparse_csr_prod",
1565        "torch._sparse_csr_sum",
1566        "torch._sparse_log_softmax_backward_data",
1567        "torch._sparse_semi_structured_addmm",
1568        "torch._sparse_semi_structured_linear",
1569        "torch._sparse_semi_structured_mm",
1570        "torch._sparse_softmax_backward_data",
1571        "torch._sparse_sparse_matmul",
1572        "torch._sparse_sum",
1573        "torch._stack",
1574        "torch._standard_gamma_grad",
1575        "torch._standard_gamma",
1576        "torch._test_autograd_multiple_dispatch_view_copy",
1577        "torch._test_autograd_multiple_dispatch_view",
1578        "torch._test_autograd_multiple_dispatch",
1579        "torch._test_check_tensor",
1580        "torch._test_functorch_fallback",
1581        "torch._test_serialization_subcmul",
1582        "torch._to_cpu",
1583        "torch._to_functional_tensor",
1584        "torch._to_sparse_semi_structured",
1585        "torch._transform_bias_rescale_qkv",
1586        "torch._transformer_encoder_layer_fwd",
1587        "torch._trilinear",
1588        "torch._triton_multi_head_attention",
1589        "torch._triton_scaled_dot_attention",
1590        "torch._unique",
1591        "torch._unique2",
1592        "torch._unpack_dual",
1593        "torch._unsafe_index_put",
1594        "torch._unsafe_index",
1595        "torch._unsafe_masked_index_put_accumulate",
1596        "torch._unsafe_masked_index",
1597        "torch._use_cudnn_ctc_loss",
1598        "torch._use_cudnn_rnn_flatten_weight",
1599        "torch._values_copy",
1600        "torch._weight_int4pack_mm",
1601        "torch._weight_int8pack_mm",
1602        "torch._weight_norm_interface",
1603        "torch._weight_norm",
1604        "torch.abs_",
1605        "torch.abs",
1606        "torch.absolute",
1607        "torch.acos_",
1608        "torch.acos",
1609        "torch.acosh_",
1610        "torch.acosh",
1611        "torch.adaptive_avg_pool1d",
1612        "torch.adaptive_max_pool1d",
1613        "torch.add",
1614        "torch.addbmm",
1615        "torch.addcdiv",
1616        "torch.addcmul",
1617        "torch.addmm",
1618        "torch.addmv_",
1619        "torch.addmv",
1620        "torch.addr",
1621        "torch.adjoint",
1622        "torch.affine_grid_generator",
1623        "torch.alias_copy",
1624        "torch.all",
1625        "torch.allclose",
1626        "torch.alpha_dropout_",
1627        "torch.alpha_dropout",
1628        "torch.amax",
1629        "torch.amin",
1630        "torch.aminmax",
1631        "torch.angle",
1632        "torch.any",
1633        "torch.arange",
1634        "torch.arccos_",
1635        "torch.arccos",
1636        "torch.arccosh_",
1637        "torch.arccosh",
1638        "torch.arcsin_",
1639        "torch.arcsin",
1640        "torch.arcsinh_",
1641        "torch.arcsinh",
1642        "torch.arctan_",
1643        "torch.arctan",
1644        "torch.arctan2",
1645        "torch.arctanh_",
1646        "torch.arctanh",
1647        "torch.argmax",
1648        "torch.argmin",
1649        "torch.argsort",
1650        "torch.argwhere",
1651        "torch.as_strided_",
1652        "torch.as_strided_copy",
1653        "torch.as_strided_scatter",
1654        "torch.as_strided",
1655        "torch.as_tensor",
1656        "torch.asarray",
1657        "torch.asin_",
1658        "torch.asin",
1659        "torch.asinh_",
1660        "torch.asinh",
1661        "torch.atan_",
1662        "torch.atan",
1663        "torch.atan2",
1664        "torch.atanh_",
1665        "torch.atanh",
1666        "torch.avg_pool1d",
1667        "torch.baddbmm",
1668        "torch.bartlett_window",
1669        "torch.batch_norm_backward_elemt",
1670        "torch.batch_norm_backward_reduce",
1671        "torch.batch_norm_elemt",
1672        "torch.batch_norm_gather_stats_with_counts",
1673        "torch.batch_norm_gather_stats",
1674        "torch.batch_norm_stats",
1675        "torch.batch_norm_update_stats",
1676        "torch.batch_norm",
1677        "torch.bernoulli",
1678        "torch.bilinear",
1679        "torch.binary_cross_entropy_with_logits",
1680        "torch.bincount",
1681        "torch.binomial",
1682        "torch.bitwise_and",
1683        "torch.bitwise_left_shift",
1684        "torch.bitwise_not",
1685        "torch.bitwise_or",
1686        "torch.bitwise_right_shift",
1687        "torch.bitwise_xor",
1688        "torch.blackman_window",
1689        "torch.bmm",
1690        "torch.broadcast_to",
1691        "torch.bucketize",
1692        "torch.can_cast",
1693        "torch.cat",
1694        "torch.ccol_indices_copy",
1695        "torch.ceil_",
1696        "torch.ceil",
1697        "torch.celu_",
1698        "torch.celu",
1699        "torch.channel_shuffle",
1700        "torch.cholesky_inverse",
1701        "torch.cholesky_solve",
1702        "torch.cholesky",
1703        "torch.choose_qparams_optimized",
1704        "torch.chunk",
1705        "torch.clamp_",
1706        "torch.clamp_max_",
1707        "torch.clamp_max",
1708        "torch.clamp_min_",
1709        "torch.clamp_min",
1710        "torch.clamp",
1711        "torch.clip_",
1712        "torch.clip",
1713        "torch.clone",
1714        "torch.col_indices_copy",
1715        "torch.column_stack",
1716        "torch.combinations",
1717        "torch.complex",
1718        "torch.concat",
1719        "torch.concatenate",
1720        "torch.conj_physical_",
1721        "torch.conj_physical",
1722        "torch.conj",
1723        "torch.constant_pad_nd",
1724        "torch.conv_tbc",
1725        "torch.conv_transpose1d",
1726        "torch.conv_transpose2d",
1727        "torch.conv_transpose3d",
1728        "torch.conv1d",
1729        "torch.conv2d",
1730        "torch.conv3d",
1731        "torch.convolution",
1732        "torch.copysign",
1733        "torch.corrcoef",
1734        "torch.cos_",
1735        "torch.cos",
1736        "torch.cosh_",
1737        "torch.cosh",
1738        "torch.cosine_embedding_loss",
1739        "torch.cosine_similarity",
1740        "torch.count_nonzero",
1741        "torch.cov",
1742        "torch.cross",
1743        "torch.crow_indices_copy",
1744        "torch.ctc_loss",
1745        "torch.cudnn_affine_grid_generator",
1746        "torch.cudnn_batch_norm",
1747        "torch.cudnn_convolution_add_relu",
1748        "torch.cudnn_convolution_relu",
1749        "torch.cudnn_convolution_transpose",
1750        "torch.cudnn_convolution",
1751        "torch.cudnn_grid_sampler",
1752        "torch.cudnn_is_acceptable",
1753        "torch.cummax",
1754        "torch.cummin",
1755        "torch.cumprod",
1756        "torch.cumsum",
1757        "torch.cumulative_trapezoid",
1758        "torch.deg2rad_",
1759        "torch.deg2rad",
1760        "torch.dequantize",
1761        "torch.det",
1762        "torch.detach_",
1763        "torch.detach_copy",
1764        "torch.detach",
1765        "torch.diag_embed",
1766        "torch.diag",
1767        "torch.diagflat",
1768        "torch.diagonal_copy",
1769        "torch.diagonal_scatter",
1770        "torch.diagonal",
1771        "torch.diff",
1772        "torch.digamma",
1773        "torch.dist",
1774        "torch.div",
1775        "torch.divide",
1776        "torch.dot",
1777        "torch.dropout_",
1778        "torch.dropout",
1779        "torch.dsmm",
1780        "torch.dsplit",
1781        "torch.dstack",
1782        "torch.embedding_bag",
1783        "torch.embedding_renorm_",
1784        "torch.embedding",
1785        "torch.empty_like",
1786        "torch.empty_permuted",
1787        "torch.empty_quantized",
1788        "torch.empty_strided",
1789        "torch.empty",
1790        "torch.eq",
1791        "torch.equal",
1792        "torch.erf_",
1793        "torch.erf",
1794        "torch.erfc_",
1795        "torch.erfc",
1796        "torch.erfinv",
1797        "torch.exp_",
1798        "torch.exp",
1799        "torch.exp2_",
1800        "torch.exp2",
1801        "torch.expand_copy",
1802        "torch.expm1_",
1803        "torch.expm1",
1804        "torch.eye",
1805        "torch.fake_quantize_per_channel_affine",
1806        "torch.fake_quantize_per_tensor_affine",
1807        "torch.fbgemm_linear_fp16_weight_fp32_activation",
1808        "torch.fbgemm_linear_fp16_weight",
1809        "torch.fbgemm_linear_int8_weight_fp32_activation",
1810        "torch.fbgemm_linear_int8_weight",
1811        "torch.fbgemm_linear_quantize_weight",
1812        "torch.fbgemm_pack_gemm_matrix_fp16",
1813        "torch.fbgemm_pack_quantized_matrix",
1814        "torch.feature_alpha_dropout_",
1815        "torch.feature_alpha_dropout",
1816        "torch.feature_dropout_",
1817        "torch.feature_dropout",
1818        "torch.fill_",
1819        "torch.fill",
1820        "torch.fix_",
1821        "torch.fix",
1822        "torch.flatten",
1823        "torch.flip",
1824        "torch.fliplr",
1825        "torch.flipud",
1826        "torch.float_power",
1827        "torch.floor_",
1828        "torch.floor_divide",
1829        "torch.floor",
1830        "torch.fmax",
1831        "torch.fmin",
1832        "torch.fmod",
1833        "torch.frac_",
1834        "torch.frac",
1835        "torch.frexp",
1836        "torch.frobenius_norm",
1837        "torch.from_file",
1838        "torch.from_numpy",
1839        "torch.frombuffer",
1840        "torch.full_like",
1841        "torch.full",
1842        "torch.fused_moving_avg_obs_fake_quant",
1843        "torch.gather",
1844        "torch.gcd_",
1845        "torch.gcd",
1846        "torch.ge",
1847        "torch.geqrf",
1848        "torch.ger",
1849        "torch.get_device",
1850        "torch.gradient",
1851        "torch.greater_equal",
1852        "torch.greater",
1853        "torch.grid_sampler_2d",
1854        "torch.grid_sampler_3d",
1855        "torch.grid_sampler",
1856        "torch.group_norm",
1857        "torch.gru_cell",
1858        "torch.gru",
1859        "torch.gt",
1860        "torch.hamming_window",
1861        "torch.hann_window",
1862        "torch.hardshrink",
1863        "torch.heaviside",
1864        "torch.hinge_embedding_loss",
1865        "torch.histc",
1866        "torch.histogram",
1867        "torch.histogramdd",
1868        "torch.hsmm",
1869        "torch.hsplit",
1870        "torch.hspmm",
1871        "torch.hstack",
1872        "torch.hypot",
1873        "torch.i0_",
1874        "torch.i0",
1875        "torch.igamma",
1876        "torch.igammac",
1877        "torch.imag",
1878        "torch.index_add",
1879        "torch.index_copy",
1880        "torch.index_fill",
1881        "torch.index_put_",
1882        "torch.index_put",
1883        "torch.index_reduce",
1884        "torch.index_select",
1885        "torch.indices_copy",
1886        "torch.inner",
1887        "torch.instance_norm",
1888        "torch.int_repr",
1889        "torch.inverse",
1890        "torch.is_complex",
1891        "torch.is_conj",
1892        "torch.is_distributed",
1893        "torch.is_floating_point",
1894        "torch.is_inference",
1895        "torch.is_neg",
1896        "torch.is_nonzero",
1897        "torch.is_same_size",
1898        "torch.is_signed",
1899        "torch.is_vulkan_available",
1900        "torch.isclose",
1901        "torch.isfinite",
1902        "torch.isin",
1903        "torch.isinf",
1904        "torch.isnan",
1905        "torch.isneginf",
1906        "torch.isposinf",
1907        "torch.isreal",
1908        "torch.istft",
1909        "torch.kaiser_window",
1910        "torch.kl_div",
1911        "torch.kron",
1912        "torch.kthvalue",
1913        "torch.layer_norm",
1914        "torch.lcm_",
1915        "torch.lcm",
1916        "torch.ldexp_",
1917        "torch.ldexp",
1918        "torch.le",
1919        "torch.lerp",
1920        "torch.less_equal",
1921        "torch.less",
1922        "torch.lgamma",
1923        "torch.linspace",
1924        "torch.log_",
1925        "torch.log_softmax",
1926        "torch.log",
1927        "torch.log10_",
1928        "torch.log10",
1929        "torch.log1p_",
1930        "torch.log1p",
1931        "torch.log2_",
1932        "torch.log2",
1933        "torch.logaddexp",
1934        "torch.logaddexp2",
1935        "torch.logcumsumexp",
1936        "torch.logdet",
1937        "torch.logical_and",
1938        "torch.logical_not",
1939        "torch.logical_or",
1940        "torch.logical_xor",
1941        "torch.logit_",
1942        "torch.logit",
1943        "torch.logspace",
1944        "torch.logsumexp",
1945        "torch.lstm_cell",
1946        "torch.lstm",
1947        "torch.lt",
1948        "torch.lu_solve",
1949        "torch.lu_unpack",
1950        "torch.margin_ranking_loss",
1951        "torch.masked_fill",
1952        "torch.masked_scatter",
1953        "torch.masked_select",
1954        "torch.matmul",
1955        "torch.matrix_exp",
1956        "torch.matrix_power",
1957        "torch.max_pool1d_with_indices",
1958        "torch.max_pool1d",
1959        "torch.max_pool2d",
1960        "torch.max_pool3d",
1961        "torch.max",
1962        "torch.maximum",
1963        "torch.mean",
1964        "torch.median",
1965        "torch.min",
1966        "torch.minimum",
1967        "torch.miopen_batch_norm",
1968        "torch.miopen_convolution_add_relu",
1969        "torch.miopen_convolution_relu",
1970        "torch.miopen_convolution_transpose",
1971        "torch.miopen_convolution",
1972        "torch.miopen_depthwise_convolution",
1973        "torch.miopen_rnn",
1974        "torch.mkldnn_adaptive_avg_pool2d",
1975        "torch.mkldnn_convolution",
1976        "torch.mkldnn_linear_backward_weights",
1977        "torch.mkldnn_max_pool2d",
1978        "torch.mkldnn_max_pool3d",
1979        "torch.mkldnn_rnn_layer",
1980        "torch.mm",
1981        "torch.mode",
1982        "torch.moveaxis",
1983        "torch.movedim",
1984        "torch.msort",
1985        "torch.mul",
1986        "torch.multinomial",
1987        "torch.multiply",
1988        "torch.mv",
1989        "torch.mvlgamma",
1990        "torch.nan_to_num_",
1991        "torch.nan_to_num",
1992        "torch.nanmean",
1993        "torch.nanmedian",
1994        "torch.nanquantile",
1995        "torch.nansum",
1996        "torch.narrow_copy",
1997        "torch.narrow",
1998        "torch.native_batch_norm",
1999        "torch.native_channel_shuffle",
2000        "torch.native_dropout",
2001        "torch.native_group_norm",
2002        "torch.native_layer_norm",
2003        "torch.native_norm",
2004        "torch.ne",
2005        "torch.neg_",
2006        "torch.neg",
2007        "torch.negative_",
2008        "torch.negative",
2009        "torch.nextafter",
2010        "torch.nonzero_static",
2011        "torch.nonzero",
2012        "torch.norm_except_dim",
2013        "torch.normal",
2014        "torch.not_equal",
2015        "torch.nuclear_norm",
2016        "torch.numel",
2017        "torch.ones_like",
2018        "torch.ones",
2019        "torch.orgqr",
2020        "torch.ormqr",
2021        "torch.outer",
2022        "torch.pairwise_distance",
2023        "torch.pdist",
2024        "torch.permute_copy",
2025        "torch.permute",
2026        "torch.pinverse",
2027        "torch.pixel_shuffle",
2028        "torch.pixel_unshuffle",
2029        "torch.poisson_nll_loss",
2030        "torch.poisson",
2031        "torch.polar",
2032        "torch.polygamma",
2033        "torch.positive",
2034        "torch.pow",
2035        "torch.prelu",
2036        "torch._print",
2037        "torch.prod",
2038        "torch.promote_types",
2039        "torch.put",
2040        "torch.q_per_channel_axis",
2041        "torch.q_per_channel_scales",
2042        "torch.q_per_channel_zero_points",
2043        "torch.q_scale",
2044        "torch.q_zero_point",
2045        "torch.qr",
2046        "torch.quantile",
2047        "torch.quantize_per_channel",
2048        "torch.quantize_per_tensor_dynamic",
2049        "torch.quantize_per_tensor",
2050        "torch.quantized_batch_norm",
2051        "torch.quantized_gru_cell",
2052        "torch.quantized_lstm_cell",
2053        "torch.quantized_max_pool1d",
2054        "torch.quantized_max_pool2d",
2055        "torch.quantized_max_pool3d",
2056        "torch.quantized_rnn_relu_cell",
2057        "torch.quantized_rnn_tanh_cell",
2058        "torch.rad2deg_",
2059        "torch.rad2deg",
2060        "torch.rand_like",
2061        "torch.rand",
2062        "torch.randint_like",
2063        "torch.randint",
2064        "torch.randn_like",
2065        "torch.randn",
2066        "torch.randperm",
2067        "torch.range",
2068        "torch.ravel",
2069        "torch.real",
2070        "torch.reciprocal_",
2071        "torch.reciprocal",
2072        "torch.relu_",
2073        "torch.relu",
2074        "torch.remainder",
2075        "torch.renorm",
2076        "torch.repeat_interleave",
2077        "torch.reshape",
2078        "torch.resolve_conj",
2079        "torch.resolve_neg",
2080        "torch.result_type",
2081        "torch.rms_norm",
2082        "torch.rnn_relu_cell",
2083        "torch.rnn_relu",
2084        "torch.rnn_tanh_cell",
2085        "torch.rnn_tanh",
2086        "torch.roll",
2087        "torch.rot90",
2088        "torch.round_",
2089        "torch.round",
2090        "torch.row_indices_copy",
2091        "torch.row_stack",
2092        "torch.rrelu_",
2093        "torch.rrelu",
2094        "torch.rsqrt_",
2095        "torch.rsqrt",
2096        "torch.rsub",
2097        "torch.saddmm",
2098        "torch.scalar_tensor",
2099        "torch.scatter_add",
2100        "torch.scatter_reduce",
2101        "torch.scatter",
2102        "torch.searchsorted",
2103        "torch.segment_reduce",
2104        "torch.select_copy",
2105        "torch.select_scatter",
2106        "torch.select",
2107        "torch.selu_",
2108        "torch.selu",
2109        "torch.sgn",
2110        "torch.sigmoid_",
2111        "torch.sigmoid",
2112        "torch.sign",
2113        "torch.signal.windows.windows.sqrt",
2114        "torch.signbit",
2115        "torch.sin_",
2116        "torch.sin",
2117        "torch.sinc_",
2118        "torch.sinc",
2119        "torch.sinh_",
2120        "torch.sinh",
2121        "torch.slice_copy",
2122        "torch.slice_scatter",
2123        "torch.slogdet",
2124        "torch.smm",
2125        "torch.softmax",
2126        "torch.sort",
2127        "torch.split_copy",
2128        "torch.split_with_sizes_copy",
2129        "torch.split_with_sizes",
2130        "torch.spmm",
2131        "torch.sqrt_",
2132        "torch.sqrt",
2133        "torch.square_",
2134        "torch.square",
2135        "torch.squeeze_copy",
2136        "torch.squeeze",
2137        "torch.sspaddmm",
2138        "torch.stack",
2139        "torch.std_mean",
2140        "torch.std",
2141        "torch.sub",
2142        "torch.subtract",
2143        "torch.sum",
2144        "torch.svd",
2145        "torch.swapaxes",
2146        "torch.swapdims",
2147        "torch.sym_constrain_range_for_size",
2148        "torch.sym_constrain_range",
2149        "torch.t_copy",
2150        "torch.t",
2151        "torch.take_along_dim",
2152        "torch.take",
2153        "torch.tan_",
2154        "torch.tan",
2155        "torch.tanh_",
2156        "torch.tanh",
2157        "torch.tensor_split",
2158        "torch.tensor",
2159        "torch.threshold_",
2160        "torch.threshold",
2161        "torch.tile",
2162        "torch.topk",
2163        "torch.trace",
2164        "torch.transpose_copy",
2165        "torch.transpose",
2166        "torch.trapezoid",
2167        "torch.trapz",
2168        "torch.triangular_solve",
2169        "torch.tril_indices",
2170        "torch.tril",
2171        "torch.triplet_margin_loss",
2172        "torch.triu_indices",
2173        "torch.triu",
2174        "torch.true_divide",
2175        "torch.trunc_",
2176        "torch.trunc",
2177        "torch.unbind_copy",
2178        "torch.unbind",
2179        "torch.unflatten",
2180        "torch.unfold_copy",
2181        "torch.unsafe_chunk",
2182        "torch.unsafe_split_with_sizes",
2183        "torch.unsafe_split",
2184        "torch.unsqueeze_copy",
2185        "torch.unsqueeze",
2186        "torch.values_copy",
2187        "torch.vander",
2188        "torch.var_mean",
2189        "torch.var",
2190        "torch.vdot",
2191        "torch.view_as_complex_copy",
2192        "torch.view_as_complex",
2193        "torch.view_as_real_copy",
2194        "torch.view_as_real",
2195        "torch.view_copy",
2196        "torch.vsplit",
2197        "torch.vstack",
2198        "torch.where",
2199        "torch.xlogy_",
2200        "torch.xlogy",
2201        "torch.zero_",
2202        "torch.zeros",
2203        "torch.zeros_like",
2204        "torch._fused_sgd_",
2205        "torch.slice_inverse",
2206        "torch._assert_scalar",
2207        "torch._functional_assert_scalar",
2208    ],
2209    TorchInGraphFunctionVariable,
2210)
2211
2212
2213if sys.version_info >= (3, 9):
2214    torch_c_binding_in_graph_functions["math.lcm"] = TorchInGraphFunctionVariable
2215if sys.version_info >= (3, 11):
2216    torch_c_binding_in_graph_functions["math.exp2"] = TorchInGraphFunctionVariable
2217    torch_c_binding_in_graph_functions["math.cbrt"] = TorchInGraphFunctionVariable
2218
2219
2220# In graph functions (including constant folding) that are not C bindings
2221torch_non_c_binding_in_graph_functions = dict.fromkeys(
2222    [
2223        "torch.__future__.get_overwrite_module_params_on_conversion",
2224        "torch.__future__.set_overwrite_module_params_on_conversion",
2225        "torch.__getattr__",
2226        "torch._assert",
2227        "torch._check_index",
2228        "torch._check_is_size",
2229        "torch._check_not_implemented",
2230        "torch._check_tensor_all_with",
2231        "torch._check_tensor_all",
2232        "torch._check_type",
2233        "torch._check_value",
2234        "torch._check_with",
2235        "torch._check",
2236        "torch._compile._disable_dynamo",
2237        "torch._functorch.apis.chunk_vmap",
2238        "torch._functorch.autograd_function.custom_function_call_functionalize",
2239        "torch._functorch.autograd_function.custom_function_call_grad",
2240        "torch._functorch.autograd_function.custom_function_call_vmap_generate_rule",
2241        "torch._functorch.autograd_function.custom_function_call_vmap",
2242        "torch._functorch.autograd_function.generate_single_level_function",
2243        "torch._functorch.autograd_function.get_tangents_in_dims",
2244        "torch._functorch.autograd_function.has_overriden_vmap_rule",
2245        "torch._functorch.autograd_function.reductify_leaf",
2246        "torch._functorch.autograd_function.reductify",
2247        "torch._functorch.autograd_function.validate_vmap_returns_tuple_of_two_elements",
2248        "torch._functorch.autograd_function.vmapify_autograd_function",
2249        "torch._functorch.autograd_function.wrap_outputs_maintaining_identity",
2250        "torch._functorch.batch_norm_replacement.batch_norm_without_running_stats",
2251        "torch._functorch.batch_norm_replacement.replace_all_batch_norm_modules_",
2252        "torch._functorch.deprecated.combine_state_for_ensemble",
2253        "torch._functorch.deprecated.functionalize",
2254        "torch._functorch.deprecated.get_warning",
2255        "torch._functorch.deprecated.make_functional_with_buffers",
2256        "torch._functorch.deprecated.make_functional",
2257        "torch._functorch.deprecated.setup_docs",
2258        "torch._functorch.deprecated.warn_deprecated",
2259        "torch._functorch.eager_transforms._any_differentiable",
2260        "torch._functorch.eager_transforms._autograd_grad",
2261        "torch._functorch.eager_transforms._vjp_treespec_compare",
2262        "torch._functorch.eager_transforms._set_tensor_requires_grad",
2263        "torch._functorch.eager_transforms._jvp_treespec_compare",
2264        "torch._functorch.eager_transforms._linearize_treespec_compare",
2265        "torch._functorch.eager_transforms._is_differentiable",
2266        "torch._functorch.eager_transforms._maybe_unwrap_functional_tensor",
2267        "torch._functorch.eager_transforms._maybe_wrap_functional_tensor",
2268        "torch._functorch.eager_transforms._unwrap_all_tensors_from_functional",
2269        "torch._functorch.eager_transforms._wrap_all_tensors_to_functional",
2270        "torch._functorch.eager_transforms.assert_flat_tuple_of_tensors",
2271        "torch._functorch.eager_transforms.functionalize",
2272        "torch._functorch.eager_transforms.lazy_dynamo_disable",
2273        "torch._functorch.eager_transforms.noop",
2274        "torch._functorch.pyfunctorch.coerce_cinterpreter",
2275        "torch._functorch.pyfunctorch.dispatch_functorch",
2276        "torch._functorch.pyfunctorch.nested",
2277        "torch._functorch.pyfunctorch.retrieve_current_functorch_interpreter",
2278        "torch._functorch.pyfunctorch.temporarily_pop_interpreter_stack",
2279        "torch._functorch.utils.enable_single_level_autograd_function",
2280        "torch._functorch.utils.exposed_in",
2281        "torch._functorch.utils.unwrap_dead_wrappers",
2282        "torch._functorch.vmap.lazy_load_decompositions",
2283        "torch._guards.compile_context",
2284        "torch._guards.detect_fake_mode",
2285        "torch._guards.tracing",
2286        "torch._higher_order_ops.map._has_potential_branch_input_alias",
2287        "torch._higher_order_ops.map._has_potential_branch_input_mutation",
2288        "torch._higher_order_ops.map._stack_pytree",
2289        "torch._higher_order_ops.map._unstack_pytree",
2290        "torch._higher_order_ops.map.create_fw_bw_graph",
2291        "torch._higher_order_ops.map.map_autograd",
2292        "torch._higher_order_ops.map.map_dense",
2293        "torch._higher_order_ops.map.map_fake_tensor_mode",
2294        "torch._higher_order_ops.map.map_functionalize",
2295        "torch._higher_order_ops.map.map_proxy_torch_dispatch_mode",
2296        "torch._higher_order_ops.map.map_wrapper",
2297        "torch._higher_order_ops.map.trace_map",
2298        "torch._higher_order_ops.out_dtype.elementwise_dtypes",
2299        "torch._higher_order_ops.out_dtype.is_int_mm",
2300        "torch._higher_order_ops.out_dtype.out_dtype_dense",
2301        "torch._higher_order_ops.out_dtype.out_dtype_fake_tensor_mode",
2302        "torch._higher_order_ops.out_dtype.out_dtype_fallback",
2303        "torch._higher_order_ops.out_dtype.out_dtype_func",
2304        "torch._higher_order_ops.out_dtype.out_dtype_proxy",
2305        "torch._higher_order_ops.out_dtype.trace_out_dtype",
2306        "torch._higher_order_ops.utils.autograd_not_implemented_inner",
2307        "torch._higher_order_ops.utils.autograd_not_implemented",
2308        "torch._linalg_utils._symeig",
2309        "torch._linalg_utils.basis",
2310        "torch._linalg_utils.bform",
2311        "torch._linalg_utils.eig",
2312        "torch._linalg_utils.get_floating_dtype",
2313        "torch._linalg_utils.is_sparse",
2314        "torch._linalg_utils.lstsq",
2315        "torch._linalg_utils.matmul",
2316        "torch._linalg_utils.matrix_rank",
2317        "torch._linalg_utils.qform",
2318        "torch._linalg_utils.solve",
2319        "torch._linalg_utils.symeig",
2320        "torch._load_global_deps",
2321        "torch._lowrank._svd_lowrank",
2322        "torch._lowrank.get_approximate_basis",
2323        "torch._lowrank.pca_lowrank",
2324        "torch._lowrank.svd_lowrank",
2325        "torch._ops._compute_keyset",
2326        "torch._ops._get_tensors",
2327        "torch._ops._to_flat_tuple",
2328        "torch._ops.add_cached_op",
2329        "torch._ops.dl_open_guard",
2330        "torch._ops.get_cached_ops",
2331        "torch._ops.key_extractor",
2332        "torch._ops.reset_cached_ops",
2333        "torch._ops.resolve_key",
2334        "torch._preload_cuda_deps",
2335        "torch._register_device_module",
2336        "torch._running_with_deploy",
2337        "torch._utils._dummy_type",
2338        "torch._weights_only_unpickler._get_allowed_globals",
2339        "torch._weights_only_unpickler.load",
2340        "torch.align_tensors",
2341        "torch.amp.autocast_mode._enter_autocast",
2342        "torch.amp.autocast_mode._exit_autocast",
2343        "torch.amp.autocast_mode.autocast_decorator",
2344        "torch.amp.autocast_mode.custom_bwd",
2345        "torch.amp.autocast_mode.custom_fwd",
2346        "torch.are_deterministic_algorithms_enabled",
2347        "torch.atleast_1d",
2348        "torch.atleast_2d",
2349        "torch.atleast_3d",
2350        "torch.autograd._calculate_shape",
2351        "torch.autograd._is_checkpoint_valid",
2352        "torch.autograd._make_grads",
2353        "torch.autograd._register_py_tensor_class_for_device",
2354        "torch.autograd._tensor_or_tensors_to_tuple",
2355        "torch.autograd.forward_ad._maybe_load_decompositions",
2356        "torch.autograd.function._iter_filter",
2357        "torch.autograd.function._iter_jit_values",
2358        "torch.autograd.function._iter_None_tensors",
2359        "torch.autograd.function._iter_tensors_permissive",
2360        "torch.autograd.function._iter_tensors",
2361        "torch.autograd.function._jit_unwrap_structured",
2362        "torch.autograd.function._map_tensor_data",
2363        "torch.autograd.function._nested_map",
2364        "torch.autograd.function._unflatten",
2365        "torch.autograd.function.once_differentiable",
2366        "torch.autograd.function.traceable",
2367        "torch.autograd.functional._as_tuple_nocheck",
2368        "torch.autograd.functional._as_tuple",
2369        "torch.autograd.functional._autograd_grad",
2370        "torch.autograd.functional._check_requires_grad",
2371        "torch.autograd.functional._construct_standard_basis_for",
2372        "torch.autograd.functional._fill_in_zeros",
2373        "torch.autograd.functional._grad_postprocess",
2374        "torch.autograd.functional._grad_preprocess",
2375        "torch.autograd.functional._jacfwd",
2376        "torch.autograd.functional._tuple_postprocess",
2377        "torch.autograd.functional._validate_v",
2378        "torch.autograd.functional.hessian",
2379        "torch.autograd.functional.hvp",
2380        "torch.autograd.functional.jacobian",
2381        "torch.autograd.functional.jvp",
2382        "torch.autograd.functional.vhp",
2383        "torch.autograd.functional.vjp",
2384        "torch.autograd.grad_mode._enter_inference_mode",
2385        "torch.autograd.grad_mode._exit_inference_mode",
2386        "torch.autograd.graph._get_sid",
2387        "torch.autograd.graph._get_tid",
2388        "torch.autograd.graph.allow_mutation_on_saved_tensors",
2389        "torch.autograd.graph.get_gradient_edge",
2390        "torch.autograd.graph.increment_version",
2391        "torch.autograd.graph.register_multi_grad_hook",
2392        "torch.autograd.variable",
2393        "torch.backends.__allow_nonbracketed_mutation",
2394        "torch.backends.cpu.get_cpu_capability",
2395        "torch.backends.cuda.can_use_efficient_attention",
2396        "torch.backends.cuda.can_use_flash_attention",
2397        "torch.backends.cuda.can_use_cudnn_attention",
2398        "torch.backends.cuda.enable_flash_sdp",
2399        "torch.backends.cuda.enable_math_sdp",
2400        "torch.backends.cuda.allow_fp16_bf16_reduction_math_sdp",
2401        "torch.backends.cuda.enable_mem_efficient_sdp",
2402        "torch.backends.cuda.flash_sdp_enabled",
2403        "torch.backends.cuda.is_built",
2404        "torch.backends.cuda.is_flash_attention_available",
2405        "torch.backends.cuda.math_sdp_enabled",
2406        "torch.backends.cuda.fp16_bf16_reduction_math_sdp_allowed",
2407        "torch.backends.cuda.mem_efficient_sdp_enabled",
2408        "torch.backends.cuda.cudnn_sdp_enabled",
2409        "torch.backends.cuda.enable_cudnn_sdp",
2410        "torch.backends.cuda.preferred_blas_library",
2411        "torch.backends.cuda.preferred_linalg_library",
2412        "torch.backends.cuda.sdp_kernel",
2413        "torch.backends.cudnn._init",
2414        "torch.backends.cudnn.flags",
2415        "torch.backends.cudnn.is_acceptable",
2416        "torch.backends.cudnn.is_available",
2417        "torch.backends.cudnn.set_flags",
2418        "torch.backends.cudnn.version",
2419        "torch.backends.disable_global_flags",
2420        "torch.backends.flags_frozen",
2421        "torch.backends.mkl.is_available",
2422        "torch.backends.mkldnn.flags",
2423        "torch.backends.mkldnn.is_available",
2424        "torch.backends.mkldnn.set_flags",
2425        "torch.backends.mps._init",
2426        "torch.backends.mps.is_available",
2427        "torch.backends.mps.is_built",
2428        "torch.backends.mps.is_macos13_or_newer",
2429        "torch.backends.openmp.is_available",
2430        "torch.backends.quantized._get_qengine_id",
2431        "torch.backends.quantized._get_qengine_str",
2432        "torch.block_diag",
2433        "torch.broadcast_tensors",
2434        "torch.cartesian_prod",
2435        "torch.cdist",
2436        "torch.chain_matmul",
2437        "torch.compile",
2438        "torch.compiled_with_cxx11_abi",
2439        "torch._C._cpu._is_avx2_supported",
2440        "torch._C._cpu._is_avx512_supported",
2441        "torch._C._cpu._is_avx512_vnni_supported",
2442        "torch._C._cpu._is_avx512_bf16_supported",
2443        "torch._C._cpu._is_amx_tile_supported",
2444        "torch.cpu._init_amx",
2445        "torch.cpu.current_device",
2446        "torch.cpu.current_stream",
2447        "torch.cpu.device_count",
2448        "torch.cpu.is_available",
2449        "torch.cpu.set_device",
2450        "torch.cpu.stream",
2451        "torch.cpu.synchronize",
2452        "torch.cuda._check_capability",
2453        "torch.cuda._check_cubins",
2454        "torch.cuda._device_count_amdsmi",
2455        "torch.cuda._device_count_nvml",
2456        "torch.cuda._get_amdsmi_handler",
2457        "torch.cuda._get_amdsmi_device_index",
2458        "torch.cuda._get_device",
2459        "torch.cuda._get_generator",
2460        "torch.cuda._get_nvml_device_index",
2461        "torch.cuda._get_pynvml_handler",
2462        "torch.cuda._get_rng_state_offset",
2463        "torch.cuda._is_compiled",
2464        "torch.cuda._lazy_call",
2465        "torch.cuda._lazy_init",
2466        "torch.cuda._memory_viz._block_extra_legacy",
2467        "torch.cuda._memory_viz._block_extra",
2468        "torch.cuda._memory_viz._format_size",
2469        "torch.cuda._memory_viz._format_viz",
2470        "torch.cuda._memory_viz._frame_filter",
2471        "torch.cuda._memory_viz._frame_fmt",
2472        "torch.cuda._memory_viz._frames_fmt",
2473        "torch.cuda._memory_viz._profile_to_snapshot",
2474        "torch.cuda._memory_viz._report_free",
2475        "torch.cuda._memory_viz._write_blocks",
2476        "torch.cuda._memory_viz.calc_active",
2477        "torch.cuda._memory_viz.compare",
2478        "torch.cuda._memory_viz.format_flamegraph",
2479        "torch.cuda._memory_viz.memory",
2480        "torch.cuda._memory_viz.profile_plot",
2481        "torch.cuda._memory_viz.segment_plot",
2482        "torch.cuda._memory_viz.segments",
2483        "torch.cuda._memory_viz.segsum",
2484        "torch.cuda._memory_viz.trace_plot",
2485        "torch.cuda._memory_viz.trace",
2486        "torch.cuda._nvml_based_avail",
2487        "torch.cuda._parse_visible_devices",
2488        "torch.cuda._raw_device_count_amdsmi",
2489        "torch.cuda._raw_device_count_nvml",
2490        "torch.cuda._raw_device_uuid_amdsmi",
2491        "torch.cuda._raw_device_uuid_nvml",
2492        "torch.cuda._register_triton_kernels",
2493        "torch.cuda._set_rng_state_offset",
2494        "torch.cuda._set_stream_by_id",
2495        "torch.cuda._sleep",
2496        "torch.cuda._transform_uuid_to_ordinals",
2497        "torch.cuda._utils._get_device_index",
2498        "torch.cuda.amp.autocast_mode._cast",
2499        "torch.cuda.amp.autocast_mode.custom_bwd",
2500        "torch.cuda.amp.autocast_mode.custom_fwd",
2501        "torch.cuda.amp.common.amp_definitely_not_available",
2502        "torch.amp.grad_scaler._refresh_per_optimizer_state",
2503        "torch.cuda.can_device_access_peer",
2504        "torch.cuda.check_error",
2505        "torch.cuda.clock_rate",
2506        "torch.cuda.cudart",
2507        "torch.cuda.current_blas_handle",
2508        "torch.cuda.current_stream",
2509        "torch.cuda.default_stream",
2510        "torch.cuda.device_count",
2511        "torch.cuda.get_arch_list",
2512        "torch.cuda.get_device_capability",
2513        "torch.cuda.get_device_name",
2514        "torch.cuda.get_device_properties",
2515        "torch.cuda.get_gencode_flags",
2516        "torch.cuda.get_sync_debug_mode",
2517        "torch.cuda.graphs.graph_pool_handle",
2518        "torch.cuda.graphs.is_current_stream_capturing",
2519        "torch.cuda.graphs.make_graphed_callables",
2520        "torch.cuda.init",
2521        "torch.cuda.ipc_collect",
2522        "torch.cuda.is_available",
2523        "torch.cuda.is_bf16_supported",
2524        "torch.cuda.is_initialized",
2525        "torch.cuda.jiterator._create_jit_fn",
2526        "torch.cuda.jiterator._create_multi_output_jit_fn",
2527        "torch.cuda.memory_usage",
2528        "torch.cuda.memory._dump_snapshot",
2529        "torch.cuda.memory._free_mutex",
2530        "torch.cuda.memory._get_current_allocator",
2531        "torch.cuda.memory._host_allocator",
2532        "torch.cuda.memory._record_memory_history_impl",
2533        "torch.cuda.memory._record_memory_history_legacy",
2534        "torch.cuda.memory._record_memory_history",
2535        "torch.cuda.memory._save_memory_usage",
2536        "torch.cuda.memory._save_segment_usage",
2537        "torch.cuda.memory._set_allocator_settings",
2538        "torch.cuda.memory._snapshot",
2539        "torch.cuda.memory.caching_allocator_alloc",
2540        "torch.cuda.memory.caching_allocator_delete",
2541        "torch.cuda.memory.change_current_allocator",
2542        "torch.cuda.memory.empty_cache",
2543        "torch.cuda.memory.get_allocator_backend",
2544        "torch.cuda.memory.list_gpu_processes",
2545        "torch.cuda.memory.max_memory_allocated",
2546        "torch.cuda.memory.max_memory_cached",
2547        "torch.cuda.memory.max_memory_reserved",
2548        "torch.cuda.memory.mem_get_info",
2549        "torch.cuda.memory.memory_allocated",
2550        "torch.cuda.memory.memory_cached",
2551        "torch.cuda.memory.memory_reserved",
2552        "torch.cuda.memory.memory_snapshot",
2553        "torch.cuda.memory.memory_stats_as_nested_dict",
2554        "torch.cuda.memory.memory_stats",
2555        "torch.cuda.memory.memory_summary",
2556        "torch.cuda.memory.reset_accumulated_memory_stats",
2557        "torch.cuda.memory.reset_max_memory_allocated",
2558        "torch.cuda.memory.reset_max_memory_cached",
2559        "torch.cuda.memory.reset_peak_memory_stats",
2560        "torch.cuda.memory.set_per_process_memory_fraction",
2561        "torch.cuda.nccl._check_sequence_type",
2562        "torch.cuda.nccl.all_gather",
2563        "torch.cuda.nccl.all_reduce",
2564        "torch.cuda.nccl.broadcast",
2565        "torch.cuda.nccl.init_rank",
2566        "torch.cuda.nccl.is_available",
2567        "torch.cuda.nccl.reduce_scatter",
2568        "torch.cuda.nccl.reduce",
2569        "torch.cuda.nccl.unique_id",
2570        "torch.cuda.nccl.version",
2571        "torch.cuda.nvtx.mark",
2572        "torch.cuda.nvtx.range_end",
2573        "torch.cuda.nvtx.range_pop",
2574        "torch.cuda.nvtx.range_push",
2575        "torch.cuda.nvtx.range_start",
2576        "torch.cuda.nvtx.range",
2577        "torch.cuda.power_draw",
2578        "torch.cuda.profiler.init",
2579        "torch.cuda.profiler.profile",
2580        "torch.cuda.profiler.start",
2581        "torch.cuda.profiler.stop",
2582        "torch.cuda.random.get_rng_state_all",
2583        "torch.cuda.random.initial_seed",
2584        "torch.cuda.random.manual_seed_all",
2585        "torch.cuda.random.manual_seed",
2586        "torch.cuda.random.seed_all",
2587        "torch.cuda.random.seed",
2588        "torch.cuda.random.set_rng_state_all",
2589        "torch.cuda.set_stream",
2590        "torch.cuda.set_sync_debug_mode",
2591        "torch.cuda.stream",
2592        "torch.cuda.synchronize",
2593        "torch.cuda.temperature",
2594        "torch.cuda.utilization",
2595        "torch.einsum",
2596        "torch.functional._check_list_size",
2597        "torch.functional._consecutive_return_counts",
2598        "torch.functional._consecutive_return_inverse_false",
2599        "torch.functional._consecutive_return_inverse_true",
2600        "torch.functional._consecutive_return_inverse",
2601        "torch.functional._consecutive_return_output",
2602        "torch.functional._lu_impl",
2603        "torch.functional._lu_no_infos",
2604        "torch.functional._lu_with_infos",
2605        "torch.functional._meshgrid",
2606        "torch.functional._return_counts",
2607        "torch.functional._return_inverse_false",
2608        "torch.functional._return_inverse_true",
2609        "torch.functional._return_inverse",
2610        "torch.functional._return_output",
2611        "torch.functional._unique_consecutive_impl",
2612        "torch.functional._unique_impl",
2613        "torch.functional._unravel_index",
2614        "torch.functional.broadcast_shapes",
2615        "torch.functional.lu",
2616        "torch.functional.unique",
2617        "torch.functional.unravel_index",
2618        "torch.futures.collect_all",
2619        "torch.futures.wait_all",
2620        "torch.fx.experimental.const_fold.split_const_subgraphs",
2621        "torch.fx.experimental.proxy_tensor.make_fx",
2622        "torch.get_deterministic_debug_mode",
2623        "torch.get_float32_matmul_precision",
2624        "torch.is_deterministic_algorithms_warn_only_enabled",
2625        "torch.is_storage",
2626        "torch.is_tensor",
2627        "torch.is_warn_always_enabled",
2628        "torch.masked._ops._any",
2629        "torch.masked._ops._apply_docstring_templates",
2630        "torch.masked._ops._canonical_dim",
2631        "torch.masked._ops._combine_input_and_mask",
2632        "torch.masked._ops._generate_docstring",
2633        "torch.masked._ops._input_mask",
2634        "torch.masked._ops._output_mask",
2635        "torch.masked._ops._reduction_identity",
2636        "torch.masked._ops._sparse_coo_flatten_indices",
2637        "torch.masked._ops._sparse_coo_scatter_reduction_helper",
2638        "torch.masked._ops._sparse_coo_where",
2639        "torch.masked._ops._sparse_csr_segment_reduction_helper",
2640        "torch.masked._ops._sparse_csr_where",
2641        "torch.masked._ops._std_var",
2642        "torch.masked._ops._where",
2643        "torch.masked._ops.amax",
2644        "torch.masked._ops.amin",
2645        "torch.masked._ops.argmax",
2646        "torch.masked._ops.argmin",
2647        "torch.masked._ops.corresponding_real_dtype",
2648        "torch.masked._ops.cumprod",
2649        "torch.masked._ops.cumsum",
2650        "torch.masked._ops.log_softmax",
2651        "torch.masked._ops.logaddexp",
2652        "torch.masked._ops.logsumexp",
2653        "torch.masked._ops.mean",
2654        "torch.masked._ops.median",
2655        "torch.masked._ops.norm",
2656        "torch.masked._ops.normalize",
2657        "torch.masked._ops.prod",
2658        "torch.masked._ops.softmax",
2659        "torch.masked._ops.softmin",
2660        "torch.masked._ops.std",
2661        "torch.masked._ops.sum",
2662        "torch.masked._ops.var",
2663        "torch.meshgrid",
2664        "torch.mps._get_default_mps_generator",
2665        "torch.mps.current_allocated_memory",
2666        "torch.mps.driver_allocated_memory",
2667        "torch.mps.empty_cache",
2668        "torch.mps.get_rng_state",
2669        "torch.mps.manual_seed",
2670        "torch.mps.profiler.profile",
2671        "torch.mps.profiler.start",
2672        "torch.mps.profiler.stop",
2673        "torch.mps.seed",
2674        "torch.mps.set_per_process_memory_fraction",
2675        "torch.mps.set_rng_state",
2676        "torch.mps.synchronize",
2677        "torch.nested._internal.nested_tensor.buffer_from_jagged",
2678        "torch.nested._internal.nested_tensor.get_tensor_symint",
2679        "torch.nested._internal.nested_tensor.is_expandable_to",
2680        "torch.nested._internal.nested_tensor.jagged_from_list",
2681        "torch.nested._internal.nested_tensor.jagged_from_tensor_and_lengths",
2682        "torch.nested._internal.nested_tensor.nested_view_from_values_offsets",
2683        "torch.nested._internal.nested_tensor.nested_view_from_values_offsets_lengths",
2684        "torch.nested.as_nested_tensor",
2685        "torch.nested.narrow",
2686        "torch.nested.nested_tensor",
2687        "torch.nn._reduction.get_enum",
2688        "torch.nn._reduction.legacy_get_enum",
2689        "torch.nn._reduction.legacy_get_string",
2690        "torch.nn.factory_kwargs",
2691        "torch.nn.functional.adaptive_avg_pool2d",
2692        "torch.nn.functional.adaptive_avg_pool3d",
2693        "torch.nn.functional.adaptive_max_pool1d_with_indices",
2694        "torch.nn.functional.adaptive_max_pool1d",
2695        "torch.nn.functional.adaptive_max_pool2d_with_indices",
2696        "torch.nn.functional.adaptive_max_pool2d",
2697        "torch.nn.functional.adaptive_max_pool3d_with_indices",
2698        "torch.nn.functional.adaptive_max_pool3d",
2699        "torch.nn.functional.affine_grid",
2700        "torch.nn.functional.alpha_dropout",
2701        "torch.nn.functional.assert_int_or_pair",
2702        "torch.nn.functional.batch_norm",
2703        "torch.nn.functional.binary_cross_entropy_with_logits",
2704        "torch.nn.functional.binary_cross_entropy",
2705        "torch.nn.functional.celu",
2706        "torch.nn.functional.cosine_embedding_loss",
2707        "torch.nn.functional.cross_entropy",
2708        "torch.nn.functional.ctc_loss",
2709        "torch.nn.functional.dropout",
2710        "torch.nn.functional.dropout1d",
2711        "torch.nn.functional.dropout2d",
2712        "torch.nn.functional.dropout3d",
2713        "torch.nn.functional.elu",
2714        "torch.nn.functional.embedding_bag",
2715        "torch.nn.functional.embedding",
2716        "torch.nn.functional.feature_alpha_dropout",
2717        "torch.nn.functional.fold",
2718        "torch.nn.functional.fractional_max_pool2d_with_indices",
2719        "torch.nn.functional.fractional_max_pool2d",
2720        "torch.nn.functional.fractional_max_pool3d_with_indices",
2721        "torch.nn.functional.fractional_max_pool3d",
2722        "torch.nn.functional.gaussian_nll_loss",
2723        "torch.nn.functional.glu",
2724        "torch.nn.functional.grid_sample",
2725        "torch.nn.functional.group_norm",
2726        "torch.nn.functional.gumbel_softmax",
2727        "torch.nn.functional.hardsigmoid",
2728        "torch.nn.functional.hardswish",
2729        "torch.nn.functional.hardtanh",
2730        "torch.nn.functional.hinge_embedding_loss",
2731        "torch.nn.functional.huber_loss",
2732        "torch.nn.functional.instance_norm",
2733        "torch.nn.functional.interpolate",
2734        "torch.nn.functional.kl_div",
2735        "torch.nn.functional.l1_loss",
2736        "torch.nn.functional.layer_norm",
2737        "torch.nn.functional.leaky_relu",
2738        "torch.nn.functional.local_response_norm",
2739        "torch.nn.functional.log_softmax",
2740        "torch.nn.functional.lp_pool1d",
2741        "torch.nn.functional.lp_pool2d",
2742        "torch.nn.functional.margin_ranking_loss",
2743        "torch.nn.functional.max_pool1d_with_indices",
2744        "torch.nn.functional.max_pool1d",
2745        "torch.nn.functional.max_pool2d_with_indices",
2746        "torch.nn.functional.max_pool2d",
2747        "torch.nn.functional.max_pool3d_with_indices",
2748        "torch.nn.functional.max_pool3d",
2749        "torch.nn.functional.max_unpool1d",
2750        "torch.nn.functional.max_unpool2d",
2751        "torch.nn.functional.max_unpool3d",
2752        "torch.nn.functional.mish",
2753        "torch.nn.functional.mse_loss",
2754        "torch.nn.functional.multi_head_attention_forward",
2755        "torch.nn.functional.multi_margin_loss",
2756        "torch.nn.functional.multilabel_margin_loss",
2757        "torch.nn.functional.multilabel_soft_margin_loss",
2758        "torch.nn.functional.nll_loss",
2759        "torch.nn.functional.normalize",
2760        "torch.nn.functional.poisson_nll_loss",
2761        "torch.nn.functional.relu",
2762        "torch.nn.functional.relu6",
2763        "torch.nn.functional.rrelu",
2764        "torch.nn.functional.selu",
2765        "torch.nn.functional.sigmoid",
2766        "torch.nn.functional.silu",
2767        "torch.nn.functional.smooth_l1_loss",
2768        "torch.nn.functional.soft_margin_loss",
2769        "torch.nn.functional.softmax",
2770        "torch.nn.functional.softmin",
2771        "torch.nn.functional.softsign",
2772        "torch.nn.functional.tanh",
2773        "torch.nn.functional.tanhshrink",
2774        "torch.nn.functional.triplet_margin_loss",
2775        "torch.nn.functional.unfold",
2776        "torch.nn.functional.upsample_bilinear",
2777        "torch.nn.functional.upsample_nearest",
2778        "torch.nn.functional.upsample",
2779        "torch.nn.grad._pair",
2780        "torch.nn.grad._single",
2781        "torch.nn.grad._triple",
2782        "torch.nn.grad.conv1d_input",
2783        "torch.nn.grad.conv1d_weight",
2784        "torch.nn.grad.conv2d_input",
2785        "torch.nn.grad.conv2d_weight",
2786        "torch.nn.grad.conv3d_input",
2787        "torch.nn.grad.conv3d_weight",
2788        "torch.nn.modules.activation._is_make_fx_tracing",
2789        "torch.nn.modules.utils._list_with_default",
2790        "torch.nn.modules.utils._ntuple",
2791        "torch.nn.modules.utils._quadruple",
2792        "torch.nn.modules.utils._reverse_repeat_tuple",
2793        "torch.nn.modules.utils.consume_prefix_in_state_dict_if_present",
2794        "torch.nn.parameter.is_lazy",
2795        "torch.norm",
2796        "torch.quantization.default_eval_fn",
2797        "torch.random._seed_custom_device",
2798        "torch.random.fork_rng",
2799        "torch.random.initial_seed",
2800        "torch.random.seed",
2801        "torch.return_types.pytree_register_structseq",
2802        "torch.set_default_device",
2803        "torch.set_default_dtype",
2804        "torch.set_default_tensor_type",
2805        "torch.set_deterministic_debug_mode",
2806        "torch.set_float32_matmul_precision",
2807        "torch.set_warn_always",
2808        "torch.signal.windows.windows._add_docstr",
2809        "torch.signal.windows.windows._window_function_checks",
2810        "torch.signal.windows.windows.bartlett",
2811        "torch.signal.windows.windows.blackman",
2812        "torch.signal.windows.windows.cosine",
2813        "torch.signal.windows.windows.exponential",
2814        "torch.signal.windows.windows.gaussian",
2815        "torch.signal.windows.windows.general_cosine",
2816        "torch.signal.windows.windows.general_hamming",
2817        "torch.signal.windows.windows.hamming",
2818        "torch.signal.windows.windows.hann",
2819        "torch.signal.windows.windows.kaiser",
2820        "torch.signal.windows.windows.merge_dicts",
2821        "torch.signal.windows.windows.nuttall",
2822        "torch.signal.windows.windows.parse_kwargs",
2823        "torch.sparse.semi_structured.to_sparse_semi_structured",
2824        "torch.sparse.sum",
2825        "torch.split",
2826        "torch.stft",
2827        "torch.sym_float",
2828        "torch.sym_int",
2829        "torch.sym_ite",
2830        "torch.sym_max",
2831        "torch.sym_min",
2832        "torch.sym_not",
2833        "torch.tensordot",
2834        "torch.typename",
2835        "torch.unique_consecutive",
2836        "torch.use_deterministic_algorithms",
2837    ],
2838    TorchInGraphFunctionVariable,
2839)
2840
2841
2842torch_name_rule_map = [
2843    manual_torch_name_rule_map,
2844    torch_c_binding_in_graph_functions,
2845    torch_non_c_binding_in_graph_functions,
2846]
2847
2848
2849"""
2850Generate the torch object - Dynamo tracing rule (the wrapping variable) map.
2851"""
2852
2853
2854@functools.lru_cache(None)
2855def get_torch_obj_rule_map() -> Dict[Any, Type["VariableTracker"]]:
2856    d: Dict[Any, Type[VariableTracker]] = {}
2857    for m in torch_name_rule_map:
2858        for k, v in m.items():  # type: ignore[attr-defined]
2859            if ".py#" not in k:
2860                obj = load_object(k)
2861            else:
2862                obj = _module_dir(torch) + k[len("torch/") :]
2863            if obj is not None:
2864                if obj in d and d[obj] != v:
2865                    raise AssertionError(
2866                        f"Duplicate torch object {obj} with different rules: {v}, {d[obj]}"
2867                    )
2868                else:
2869                    d[obj] = v
2870    return d
2871
2872
2873def _load_obj_from_str(fully_qualified_name):
2874    module, obj_name = fully_qualified_name.rsplit(".", maxsplit=1)
2875    return getattr(importlib.import_module(module), obj_name)
2876
2877
2878"""
2879Load string represented torch objects.
2880"""
2881
2882
2883def load_object(name):
2884    try:
2885        x = name.split("#")
2886        if len(x) == 2:
2887            obj = _load_obj_from_str(x[0])
2888            val = getattr(obj, x[1])
2889        else:
2890            assert len(x) == 1, f"Invalid obj name {name}"
2891            val = _load_obj_from_str(x[0])
2892        val = unwrap_if_wrapper(val)
2893    except (AttributeError, ImportError):
2894        val = None
2895    return val
2896
2897
2898"""
2899Get all torch.Tensor methods which are allowed to be in graph functions.
2900"""
2901
2902
2903@functools.lru_cache(None)
2904def get_tensor_method():
2905    s = set()
2906    for name in dir(torch.Tensor):
2907        method = getattr(torch.Tensor, name)
2908        if isinstance(
2909            method, (types.MethodDescriptorType, types.WrapperDescriptorType)
2910        ):
2911            s.add(method)
2912    return frozenset(s)
2913
2914
2915"""
2916Return if a torch object is ATen op or torch.Tensor method.
2917"""
2918
2919
2920def is_aten_op_or_tensor_method(obj):
2921    return obj in get_tensor_method() or isinstance(
2922        obj,
2923        (torch._ops.OpOverloadPacket, torch._ops.OpOverload),
2924    )
2925
2926
2927class FunctionIdSet:
2928    """
2929    Track a set of `id()`s of objects which are either allowed or not
2930    allowed to go into the generated FX graph.  Use to test for torch.*,
2931    numpy.*, builtins.*, etc.
2932
2933    Support user modification to permit customization of what can be
2934    added to the graph and what will cause a graph break.
2935    """
2936
2937    function_ids: Optional[Set[int]] = None
2938    function_names: Optional[Dict[int, str]] = None
2939
2940    def __init__(
2941        self, lazy_initializer: Callable[[], Union[Dict[int, str], Set[int]]]
2942    ) -> None:
2943        self.lazy_initializer = lazy_initializer
2944
2945    def __call__(self) -> Set[int]:
2946        if self.function_ids is None:
2947            value = self.lazy_initializer()
2948            if isinstance(value, dict):
2949                self.function_ids = set(value.keys())
2950                self.function_names = value
2951            else:
2952                assert isinstance(value, set)
2953                self.function_ids = value
2954        return self.function_ids
2955
2956    def get_name(self, idx: int, default: str):
2957        self()  # lazy init
2958        assert self.function_names is not None
2959        return self.function_names.get(idx, default)
2960
2961    def add(self, idx: int):
2962        function_ids = self()  # lazy init
2963        function_ids.add(idx)
2964
2965    def remove(self, idx: int):
2966        function_ids = self()
2967        if idx in function_ids:
2968            function_ids.remove(idx)
2969
2970    def __contains__(self, idx: int) -> bool:
2971        return idx in self()
2972
2973
2974@FunctionIdSet
2975def _allowed_callable_ids() -> Dict[int, str]:
2976    rv: Dict[int, str] = {}
2977    return rv
2978
2979
2980@FunctionIdSet
2981def _disallowed_callable_ids() -> Dict[int, str]:
2982    rv: Dict[int, str] = {}
2983    return rv
2984
2985
2986@FunctionIdSet
2987def _builtin_function_ids() -> Dict[int, str]:
2988    # See also torch/_dynamo/polyfills/loader.py, which removes items in _builtin_function_ids
2989    rv = {
2990        id(v): f"builtins.{k}"
2991        for k, v in builtins.__dict__.items()
2992        if not k.startswith("_") and callable(v)
2993    }
2994    rv.update(
2995        {
2996            id(v): f"operator.{k}"
2997            for k, v in operator.__dict__.items()
2998            if not k.startswith("_") and callable(v)
2999        }
3000    )
3001    rv.update(
3002        {
3003            id(cast): "typing.cast",
3004            id(functools.reduce): "functools.reduce",
3005            id(copy.deepcopy): "copy.deepcopy",
3006        }
3007    )
3008    return rv
3009
3010
3011@FunctionIdSet
3012def _numpy_function_ids() -> Dict[int, str]:
3013    rv = {}
3014    for mod in NP_SUPPORTED_MODULES:
3015        rv.update(
3016            {
3017                id(v): f"{mod.__name__}.{k}"
3018                for k, v in mod.__dict__.items()
3019                if callable(v)
3020                and (getattr(v, "__module__", None) or mod.__name__) == mod.__name__
3021            }
3022        )
3023    return rv
3024
3025
3026@FunctionIdSet
3027def _builtin_constant_ids() -> Dict[int, str]:
3028    """
3029    Collects constant builtins by eliminating callable items.
3030    """
3031    rv = {
3032        id(v): f"builtins.{k}"
3033        for k, v in builtins.__dict__.items()
3034        if not k.startswith("_") and not callable(v)
3035    }
3036    return rv
3037
3038
3039_lazy_module_init: Dict[str, List[Callable[[], None]]] = defaultdict(list)
3040
3041
3042def add_module_init_func(name: str, init_func: Callable[[], None]) -> None:
3043    """Register a module without eagerly importing it"""
3044    # If the module is already imported, eagerly run init
3045    assert "." not in name, f"Expected a root module name, but got {name}"
3046    assert name not in _lazy_module_init
3047    _lazy_module_init[name].append(init_func)
3048
3049
3050def _maybe_init_lazy_module(obj: object) -> None:
3051    module = getattr(obj, "__module__", None)
3052    if module is None:
3053        return
3054
3055    base_module = module.split(".")[0]
3056    init_funcs = _lazy_module_init.pop(base_module, None)
3057    if init_funcs is not None:
3058        for fn in init_funcs:
3059            fn()
3060
3061
3062def is_callable_allowed(obj) -> bool:
3063    _maybe_init_lazy_module(obj)
3064    return id(obj) in _allowed_callable_ids
3065
3066
3067def is_callable_disallowed(obj) -> bool:
3068    _maybe_init_lazy_module(obj)
3069    return id(obj) in _disallowed_callable_ids
3070
3071
3072def is_forbidden(obj) -> bool:
3073    _maybe_init_lazy_module(obj)
3074    return inspect.getattr_static(obj, "_dynamo_forbidden", False)
3075
3076
3077def is_builtin_callable(obj) -> bool:
3078    # See also torch/_dynamo/polyfills/loader.py, which removes items in _builtin_function_ids
3079    return id(obj) in _builtin_function_ids
3080
3081
3082def is_builtin_constant(obj) -> bool:
3083    return id(obj) in _builtin_constant_ids
3084
3085
3086def is_numpy(obj) -> bool:
3087    if np is None:
3088        return False
3089    return isinstance(obj, (np.ndarray, np.generic)) or id(obj) in _numpy_function_ids
3090
3091
3092def is_numpy_dtype(obj) -> bool:
3093    if np is None:
3094        return False
3095    return isinstance(obj, np.dtype)
3096
3097
3098def is_numpy_type_info(obj) -> bool:
3099    if np is None:
3100        return False
3101    return isinstance(obj, (np.finfo, np.iinfo))
3102
3103
3104BUILTIN_SKIPLIST = (
3105    abc,
3106    collections,
3107    contextlib,
3108    copy,
3109    copyreg,
3110    dataclasses,
3111    enum,
3112    functools,
3113    importlib,
3114    inspect,
3115    linecache,
3116    logging,
3117    multiprocessing,
3118    operator,
3119    posixpath,
3120    random,
3121    re,
3122    selectors,
3123    signal,
3124    tempfile,
3125    threading,
3126    tokenize,
3127    torch,  # torch/* is skipped by default unless specified in FUNC_INLINELIST or MOD_INLINELIST
3128    traceback,
3129    types,
3130    typing,
3131    unittest,
3132    weakref,
3133    _collections_abc,
3134    _weakrefset,
3135)
3136
3137# third party libraries skiplist is defined by str, because users may not use these libraries.
3138# we should use lazy import & skip in the future.
3139THIRDPARTY_SKIPLIST = (
3140    "fx2trt_oss",
3141    "hypothesis",
3142    "networkx",
3143    "numpy",
3144    "omegaconf",
3145    "onnx",
3146    "onnxruntime",
3147    "onnx_tf",
3148    "pandas",
3149    "sklearn",
3150    "tabulate",
3151    "tensorflow",
3152    "tensorrt",
3153    "torch2trt",
3154    "tqdm",
3155    "tree",
3156    "tvm",
3157    "xarray",
3158)
3159
3160
3161def _as_posix_path(path):
3162    posix_path = Path(os.path.normpath(path)).as_posix()
3163    # os.path.normpath and pathlib.Path remove trailing slash, so we need to add it back
3164    if path.endswith((os.path.sep, "/")):
3165        posix_path += "/"
3166    return posix_path
3167
3168
3169def _strip_init_py(s):
3170    # TODO: Once we require py3.9 use removesuffix instead.
3171    suffix = "__init__.py"
3172    if s.endswith(suffix):
3173        s = s[: -len(suffix)]
3174    return _as_posix_path(s)
3175
3176
3177def _module_dir(m: types.ModuleType):
3178    # Protect against a module not exporting __file__ - this can happen for
3179    # frozen modules, for example.
3180    file = getattr(m, "__file__", None)
3181    return file and _strip_init_py(file)
3182
3183
3184# These are legacy workarounds, don't add new modules to this list.
3185# Please use the MOD_INLINELIST instead to force inline functions under particular modules.
3186LEGACY_MOD_INLINELIST = {
3187    "torch._dynamo.external_utils",
3188    "torch._export.db.examples",
3189    "torch._export.wrappers",
3190    "torch._functorch.apis",
3191    "torch._functorch.deprecated",
3192    "torch._higher_order_ops.cond",
3193    "torch._higher_order_ops.while_loop",
3194    "torch._higher_order_ops.associative_scan",
3195    "torch.nn.attention.flex_attention",
3196    "torch.ao.quantization.pt2e.export_utils",
3197    "torch.ao.quantization.pt2e.qat_utils",
3198    "torch.ao.quantization.pt2e.representation.rewrite",
3199    "torch.ao.quantization.pt2e.utils",
3200    "torch.ao.quantization.quantizer.xnnpack_quantizer",
3201    "torch.export.unflatten",
3202    "torch.optim",
3203}
3204
3205if torch.distributed.is_available():
3206    LEGACY_MOD_INLINELIST |= {
3207        "torch.distributed.tensor._api",
3208        "torch.distributed.tensor.device_mesh",
3209        "torch.distributed.device_mesh",
3210        "torch.distributed.algorithms._checkpoint.checkpoint_wrapper",
3211        "torch.distributed.tensor.parallel._data_parallel_utils",
3212        "torch.distributed.tensor.parallel._utils",
3213        "torch.distributed.tensor.parallel.style",
3214        # we have to add replicate to LEGACY_MOD_INLINELIST to ensure
3215        # the forward_hook won't be ignored.
3216        "torch.distributed._composable.replicate",
3217    }
3218    if not torch._dynamo.config.skip_fsdp_hooks:
3219        LEGACY_MOD_INLINELIST.add("torch.distributed._composable.fsdp")
3220
3221
3222# Force inline functions under these modules, even they are in *_SKIPLIST.
3223# We are using python module name instead of file or directory object to avoid circular dependency.
3224# Please keep this sorted alphabetically.
3225MOD_INLINELIST = [
3226    "torch._decomp",
3227    "torch._dynamo._trace_wrapped_higher_order_op",
3228    "torch._dynamo.comptime",
3229    "torch._dynamo.polyfills",
3230    "torch._functorch.autograd_function",
3231    "torch._functorch.eager_transforms",
3232    "torch._functorch.functional_call",
3233    "torch._functorch.vmap",
3234    "torch._higher_order_ops.associative_scan",
3235    "torch._higher_order_ops.strict_mode",
3236    "torch._higher_order_ops.while_loop",
3237    "torch._inductor.test_operators",
3238    "torch._library.autograd",
3239    "torch._library.custom_ops",
3240    "torch._prims",
3241    "torch._refs",
3242    "torch._tensor",
3243    "torch.amp.autocast_mode",
3244    "torch.ao.nn",
3245    "torch.autograd.function",
3246    "torch.backends.cuda",
3247    "torch.cuda.amp.autocast_mode",
3248    "torch.distributions",
3249    "torch.export._tree_utils",
3250    "torch.fx._pytree",
3251    "torch.fx._symbolic_trace",
3252    "torch.fx.experimental.proxy_tensor",
3253    "torch.fx.passes.shape_prop",
3254    "torch.nn",
3255    "torch.overrides",
3256    "torch.random",
3257    "torch.sparse",
3258    "torch.testing",
3259    "torch.utils._content_store",
3260    "torch.utils._contextlib",
3261    "torch.utils._foreach_utils",
3262    "torch.utils._python_dispatch",
3263    "torch.utils._pytree",
3264    "torch.utils.hooks",
3265]
3266assert sorted(set(MOD_INLINELIST)) == MOD_INLINELIST
3267MOD_INLINELIST = set(MOD_INLINELIST)
3268
3269
3270if torch.distributed.is_available():
3271    MOD_INLINELIST.add("torch.distributed")
3272    if not torch._dynamo.config.skip_fsdp_hooks:
3273        MOD_INLINELIST.add("torch.distributed._composable.fsdp")
3274
3275
3276@functools.lru_cache(None)
3277def get_legacy_mod_inlinelist():
3278    inlinelist = {
3279        _as_posix_path(_module_dir(torch) + m[len("torch.") :].replace(".", "/"))
3280        for m in LEGACY_MOD_INLINELIST
3281    }
3282    return inlinelist
3283
3284
3285@functools.lru_cache(None)
3286def get_mod_inlinelist():
3287    inlinelist = {
3288        _as_posix_path(_module_dir(torch) + m[len("torch.") :].replace(".", "/"))
3289        for m in MOD_INLINELIST
3290    }
3291    return inlinelist
3292
3293
3294# skip some standard python builtin libs
3295SKIP_DIRS = [
3296    "<frozen importlib",
3297    "<frozen abc",
3298    "<__array_function__ internals>",
3299    _as_posix_path(_config_module.__file__),
3300    "triton/backends",
3301]
3302SKIP_DIRS.extend(map(_as_posix_path, filter(None, map(_module_dir, BUILTIN_SKIPLIST))))
3303
3304SKIP_DIRS_RE = re.compile(r"match nothing^")
3305
3306is_fbcode = importlib.import_module("torch._inductor.config").is_fbcode()
3307# Skip fbcode paths(including torch.package paths) containing
3308# one of the following strings.
3309FBCODE_SKIP_DIRS: Set[str] = set()
3310
3311FBCODE_SKIP_DIRS_RE = re.compile(f".*({'|'.join(map(re.escape, FBCODE_SKIP_DIRS))})")
3312
3313# Remove this after fbcode is fully migrated to tracing through torchrec.
3314FBCODE_SKIP_TORCHREC_DIRS = {
3315    "torchrec/distributed",
3316    "trochrec/fb/distributed",
3317    "caffe2/torch/fb/sparsenn/pooled_embeddings_modules.py",
3318}
3319
3320FBCODE_SKIP_TORCHREC_DIRS_RE = re.compile(
3321    f".*({'|'.join(re.escape(_as_posix_path(d)) for d in FBCODE_SKIP_TORCHREC_DIRS)})"
3322)
3323
3324# TODO(yanboliang, anijain2305) - There are a few concerns that we should
3325# resolve
3326# 1) Audit if torchrec/distributed is even required in FBCODE_SKIPS_DIR
3327# 2) To inline just one file but skip others in a directory, we could use
3328# manual_torch_name_rule_map but this one is hard because FBCODE can add unusual
3329# names like torch_package.
3330# So, this is a stop gap solution till then.
3331FBCODE_INLINE_FILES_IN_SKIPPED_DIRS = {
3332    "torchrec/distributed/types.py",
3333}
3334FBCODE_INLINE_FILES_IN_SKIPPED_DIRS_RE = re.compile(
3335    f".*({'|'.join(re.escape(_as_posix_path(d)) for d in FBCODE_INLINE_FILES_IN_SKIPPED_DIRS)})"
3336)
3337
3338# torch.optim is a special case,
3339# we usually want to inline it, but the directory
3340# structure does not match the module structure
3341# and we want to skip the functions in optim/lr_scheduler.py
3342# this has precedence over all other rules in check_file
3343FORCE_SKIP_FILES = {f"{_module_dir(torch)}optim/lr_scheduler.py"}
3344
3345
3346def _recompile_re():
3347    global SKIP_DIRS_RE
3348    SKIP_DIRS_RE = re.compile(
3349        rf"^[^\s<]*({'|'.join(re.escape(_as_posix_path(d)) for d in SKIP_DIRS)})"
3350    )
3351
3352
3353def add(import_name: str):
3354    if isinstance(import_name, types.ModuleType):
3355        return add(import_name.__name__)
3356    assert isinstance(import_name, str)
3357    from importlib.util import find_spec
3358
3359    module_spec = find_spec(import_name)
3360    if not module_spec:
3361        return
3362    origin = module_spec.origin
3363    if origin is None:
3364        return
3365    SKIP_DIRS.append(_strip_init_py(origin))
3366    _recompile_re()
3367
3368
3369@dataclasses.dataclass
3370class SkipResult:
3371    skipped: bool
3372    reason: Optional[str]
3373
3374
3375def check_file(filename, is_inlined_call=False):
3376    """Should skip this file?"""
3377    if filename is None:
3378        return SkipResult(True, "filename is None")
3379    filename = _as_posix_path(filename)
3380    if filename in FORCE_SKIP_FILES:
3381        return SkipResult(True, "FORCE_SKIP_FILES")
3382    if any(filename.startswith(d) for d in get_legacy_mod_inlinelist()):
3383        return SkipResult(
3384            False,
3385            "LEGACY_MOD_INLINELIST",
3386        )
3387    if is_inlined_call and is_torch_inline_allowed(filename):
3388        return SkipResult(
3389            False,
3390            "MOD_INLINELIST",
3391        )
3392    if (
3393        is_fbcode
3394        and FBCODE_SKIP_DIRS
3395        and bool(FBCODE_SKIP_DIRS_RE.match(filename))
3396        and not bool(FBCODE_INLINE_FILES_IN_SKIPPED_DIRS_RE.match(filename))
3397    ):
3398        return SkipResult(
3399            True,
3400            "FBCODE_SKIP_DIRS",
3401        )
3402
3403    if (
3404        is_fbcode
3405        and torch._dynamo.config.skip_torchrec
3406        and FBCODE_SKIP_TORCHREC_DIRS
3407        and bool(FBCODE_SKIP_TORCHREC_DIRS_RE.match(filename))
3408        and not bool(FBCODE_INLINE_FILES_IN_SKIPPED_DIRS_RE.match(filename))
3409    ):
3410        return SkipResult(True, "FBCODE_SKIP_TORCHREC_DIRS")
3411
3412    if bool(SKIP_DIRS_RE.match(filename)):
3413        return SkipResult(True, "SKIP_DIRS")
3414    else:
3415        return SkipResult(False, "inlined by default")
3416
3417
3418@dataclasses.dataclass
3419class FunctionInfo:
3420    py_obj: Optional[object]
3421    name: Optional[str]
3422    filename: str
3423    code: Optional[types.CodeType]
3424
3425
3426"""
3427This is the main entry point to determine whether an object (function) should be inlined or skipped.
3428Let's illustrate the logic with an example:
3429    @torch.compile
3430    def f1(x, y):
3431        ......
3432        f2(x, y)
3433        ......
3434
3435    def f2(x, y):
3436        ......
3437        f3(x, y)
3438        ......
3439
3440    def f3(x, y):
3441        ......
3442
3443There are mainly three call sites of check/check_verbose:
3444* The compile region entrance (like function f1), the correspoinding code is located at eval_frame.py.
3445* When tracing the recursively called functions (like function f2 and f3).
3446    * Dynamo decides inline/skip everytime it encounters a new recursively function call, and the call site
3447      is in InliningInstructionTranslator.check_inlineable of symbolic_convert.py.
3448    * If f2 is skipped by Dynamo, when evaluating the frame of f3, Dynamo need the inline/skip check again
3449      and the call site is in catch_errors_wrapper.catch_errors of convert_frame.py.
3450* For global variables and function arguments, Dynamo needs to decide if they are wrapped as SkipFunctionVariable in builder.py.
3451
3452`is_inlined_call` is used to indicate if the current function call is inlined (f2 is inlined call if it passes check)
3453or not (f3 is not inlined call if f2 is skipped). Inside of the `check_verbose` function, there are more rules
3454to be checked if this `is_inlined_call`.
3455The reason to have this flag is that if the upper level function call (e.g, f2) is skipped,
3456we don't want to inline the lower level function call (e.g, f3) by default.
3457"""
3458
3459
3460def check_verbose(obj, is_inlined_call=False):
3461    if isinstance(
3462        obj, (UserFunctionVariable, UserMethodVariable, NestedUserFunctionVariable)
3463    ):
3464        try:
3465            py_obj = obj.get_function()
3466        except NotImplementedError:
3467            py_obj = None
3468        fi = FunctionInfo(py_obj, obj.get_name(), obj.get_filename(), obj.get_code())
3469    elif isinstance(obj, types.CodeType):
3470        fi = FunctionInfo(None, obj.co_name, obj.co_filename, obj)
3471    elif isinstance(obj, (types.FunctionType, types.MethodType)):
3472        fi = FunctionInfo(
3473            obj, obj.__name__, getfile(obj), obj.__code__  # type: ignore[union-attr] # FIXME Add MethodType.__code__ to typeshed
3474        )
3475    else:
3476        fi = FunctionInfo(obj, None, getfile(obj), None)
3477
3478    # Consulte the central trace rules defined in torch._dynamo.trace_rules.
3479    reasons: Set[str] = set()
3480    rule = lookup_inner(fi.py_obj, fi.name, fi.filename, is_inlined_call, reasons)
3481    if issubclass(rule, (UserFunctionVariable, PolyfilledFunctionVariable)):
3482        return SkipResult(
3483            False,
3484            f"inlined according trace_rules.lookup {reasons.pop()}",
3485        )
3486    else:
3487        assert rule == SkipFunctionVariable, rule
3488        return SkipResult(
3489            True,
3490            f"skipped according trace_rules.lookup {reasons.pop()}",
3491        )
3492
3493
3494def check(obj, is_inlined_call=False):
3495    return check_verbose(obj, is_inlined_call).skipped
3496
3497
3498# skip common third party libs
3499for _name in THIRDPARTY_SKIPLIST:
3500    add(_name)
3501
3502_recompile_re()
3503
3504
3505def is_torch_inline_allowed(filename):
3506    return any(filename.startswith(d) for d in get_mod_inlinelist())
3507
3508
3509@functools.lru_cache(None)
3510def dynamo_dir():
3511    import torch._dynamo
3512
3513    return _module_dir(torch._dynamo)
3514
3515
3516def is_torch(filename):
3517    if filename.startswith(dynamo_dir()):
3518        return False
3519    return filename.startswith(_module_dir(torch))
3520
3521
3522"""
3523Main entry point for looking up the trace rule (the Dynamo variable) for a given callable object.
3524"""
3525
3526
3527def lookup_callable(obj):
3528    if not hashable(obj):
3529        return None
3530    # Custom allow/disallow in graph takes precedence over the general lookup.
3531    if is_callable_disallowed(obj):
3532        return SkipFunctionVariable
3533    if is_callable_allowed(obj):
3534        return TorchInGraphFunctionVariable
3535    if is_builtin_callable(obj):
3536        return BuiltinVariable
3537    return None
3538
3539
3540"""
3541Main entry point for looking up the trace rule (the Dynamo variable) for a given function object.
3542E.g, the lookup result of `torch.sin` is `TorchInGraphFunctionVariable`.
3543"""
3544
3545
3546def lookup(obj):
3547    return lookup_inner(obj)
3548
3549
3550def lookup_inner(
3551    obj,
3552    name=None,
3553    filename=None,
3554    is_direct_call=True,
3555    reasons: Union[None, Set[str]] = None,
3556):
3557    # Step 1: lookup obj's tracing rule in `torch_name_rule_map`.
3558    # The rules defined in `torch_name_rule_map` mainly includes two parts:
3559    # - Manually defined rules for any functions.
3560    # - The list of torch in graph functions.
3561    try:
3562        can_hash = hashable(obj)
3563    except Exception:
3564        can_hash = False
3565    if not can_hash:
3566        if reasons is not None:
3567            reasons.add("obj is not hashable")
3568        return None
3569    if obj is not None:
3570        if is_aten_op_or_tensor_method(obj):
3571            return TorchInGraphFunctionVariable
3572        rule = get_torch_obj_rule_map().get(obj, None)
3573        if rule is not None:
3574            if reasons is not None:
3575                reasons.add("get_torch_obj_rule_map")
3576            return rule
3577    elif name is not None and filename is not None and not is_direct_call:
3578        if name.startswith(TORCH_DYNAMO_RESUME_IN_PREFIX):
3579            rule = get_torch_obj_rule_map().get(
3580                filename + "#" + TORCH_DYNAMO_RESUME_IN_PREFIX, None
3581            )
3582        else:
3583            rule = get_torch_obj_rule_map().get(filename + "#" + name, None)
3584        if rule is not None:
3585            if reasons is not None:
3586                reasons.add("get_torch_obj_rule_map")
3587            return rule
3588
3589    # Step 2: lookup obj's tracing rule by function name.
3590    if is_direct_call:
3591        if name == "patched_init":
3592            if reasons is not None:
3593                reasons.add("func name is patched_init")
3594            return SkipFunctionVariable
3595        elif name == "__torch_function__":
3596            if reasons is not None:
3597                reasons.add("func name is __torch_function__")
3598            return UserFunctionVariable
3599
3600    if not is_direct_call:
3601        if name == "__getattr__":
3602            # is_direct_call = False indicates that this is the top-level frame
3603            # being traced (i.e., it is not inlined and not called from
3604            # InliningInstructionTranslator).  Tracing __getattr__ at the top
3605            # level is unlikely because we inline it for
3606            # UserDefinedObjectVariable. This scenario occurs only for
3607            # UnspecializedNNModuleVariable, where Dynamo directly calls
3608            # __getattr__ during trace time, generating LOAD_ATTR bytecode
3609            # without going through the underlying __getattr__ data structures.
3610            # When this optimized bytecode is executed, Dynamo is triggered
3611            # again on the __getattr__ call. Therefore, we skip Dynamo tracing
3612            # in this case.
3613            if reasons is not None:
3614                reasons.add(
3615                    "Tracing __getattr__ as the top level frame, unsuitable for tracing."
3616                )
3617            return SkipFunctionVariable
3618
3619    # Step 3: lookup obj's tracing rule by filename.
3620    if filename is None:
3621        filename = getfile(obj)
3622
3623    skip_result = check_file(filename, is_direct_call)
3624    if reasons is not None:
3625        reasons.add(skip_result.reason)
3626    if skip_result.skipped:
3627        return SkipFunctionVariable
3628    else:
3629        return UserFunctionVariable
3630
3631
3632def clear_lru_cache():
3633    torch._dynamo.trace_rules.get_torch_obj_rule_map.cache_clear()
3634    torch._dynamo.trace_rules.get_tensor_method.cache_clear()
3635    torch._dynamo.trace_rules.get_legacy_mod_inlinelist.cache_clear()
3636    torch._dynamo.trace_rules.get_mod_inlinelist.cache_clear()
3637    torch._dynamo.trace_rules.dynamo_dir.cache_clear()
3638