xref: /aosp_15_r20/external/pytorch/torch/fx/experimental/_config.py (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1import os
2import sys
3from typing import Optional
4
5
6# [@compile_ignored: debug] Uses z3 for validating the guard optimizations transformations.
7translation_validation = (
8    os.environ.get("TORCHDYNAMO_TRANSLATION_VALIDATION", "0") == "1"
9)
10# Timeout (in milliseconds) for z3 finding a solution.
11# [@compile_ignored: debug]
12translation_validation_timeout = int(
13    os.environ.get("TORCHDYNAMO_TRANSLATION_VALIDATION_TIMEOUT", "600000")
14)
15# Disables bisection for translation validation.
16#
17# Translation validation bisection is enabled by default, if translation validation
18# is also enabled. This should help finding guard simplification issues. However,
19# since validation uses Z3 for bisecting, it might take a lot of time.
20#
21# Set this configuration option so as to avoid bisecting.
22# [@compile_ignored: debug]
23translation_validation_no_bisect = (
24    os.environ.get("TORCHDYNAMO_TRANSLATION_NO_BISECT", "0") == "1"
25)
26# Checks whether replaying ShapeEnv events on a freshly constructed one yields
27# the a ShapeEnv with the same state. This should be used only in testing.
28check_shape_env_recorded_events = False
29
30# TODO: Perhaps consider allowing unions for the configs below (so you can hit
31# multiple reps at the same time)
32
33# Give extended debug information if the string representation of a guard
34# matches this.  For example, set this to "Ne(s0, 10)" and whenever we issue
35# this guard, we will generate full Python and C++ backtrace
36# [@compile_ignored: debug]
37extended_debug_guard_added = os.environ.get(
38    "TORCHDYNAMO_EXTENDED_DEBUG_GUARD_ADDED", None
39)
40
41# Give extended debug information when a particular symbol is allocated.  For
42# example, set this to "u2" and whenever we create this symbol, we will
43# generate full Python and C++ backtrace
44# [@compile_ignored: debug]
45extended_debug_create_symbol = os.environ.get(
46    "TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL", None
47)
48
49# Give extended debug information (C++ backtrace) for all extended debug
50# settings as well as errors.  The C++ backtrace is slow and very spammy so we
51# don't include it by default even when you're requesting extended debug.
52# [@compile_ignored: debug]
53extended_debug_cpp = os.environ.get("TORCHDYNAMO_EXTENDED_DEBUG_CPP", "") != ""
54
55# Give extended debug information (line of code) when a torch function
56# is called during export.  This is useful for showing progress and detecting
57# where export might be stuck. Currently only works for strict=False.
58# [@compile_ignored: debug]
59extended_debug_current_loc = (
60    os.environ.get("TORCHEXPORT_EXTENDED_DEBUG_CURRENT_LOC", "0") == "1"
61)
62
63# [@compile_ignored: debug] Show a warning for every specialization
64print_specializations = False
65
66# wraps (un)equalities with 'Not' class after recording the correct expression
67# in the FX graph. This should incorrectly construct the divisible and replacement
68# lists, and incorrectly issue guards.
69inject_EVALUATE_EXPR_flip_equality_TESTING_ONLY = False
70
71# [@compile_ignored: debug] Validate that ShapeEnv's version key is updated correctly
72validate_shape_env_version_key = False
73
74# If we produce more than this many guards on a symbol, force the symbol to
75# get specialized and bail out if this many guards mention this particular
76# symbol.  This may be slightly more aggressive than the true number of guards
77# issued (as we test if we've hit the limit on-the-fly, whereas we may
78# do further simplifications at final guard issuance time that make guards
79# irrelevant.)
80symbol_guard_limit_before_specialize: Optional[int] = None
81
82# This flag changes whether we should use the same symbolic variable to represent input sizes that are the same.
83use_duck_shape = True
84
85from torch.utils._config_module import install_config_module
86
87
88install_config_module(sys.modules[__name__])
89