xref: /aosp_15_r20/external/pytorch/torch/onnx/_internal/diagnostics/_rules.py (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1# mypy: allow-untyped-defs
2"""
3GENERATED CODE - DO NOT EDIT DIRECTLY
4This file is generated by gen_diagnostics.py.
5See tools/onnx/gen_diagnostics.py for more information.
6
7Diagnostic rules for PyTorch ONNX export.
8"""
9
10import dataclasses
11from typing import Tuple
12
13# flake8: noqa
14from torch.onnx._internal.diagnostics import infra
15
16
17"""
18GENERATED CODE - DO NOT EDIT DIRECTLY
19The purpose of generating a class for each rule is to override the `format_message`
20method to provide more details in the signature about the format arguments.
21"""
22
23
24class _NodeMissingOnnxShapeInference(infra.Rule):
25    """Node is missing ONNX shape inference."""
26
27    def format_message(self, op_name) -> str:  # type: ignore[override]
28        """Returns the formatted default message of this Rule.
29
30        Message template: 'The shape inference of {op_name} type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.'
31        """
32        return self.message_default_template.format(op_name=op_name)
33
34    def format(  # type: ignore[override]
35        self, level: infra.Level, op_name
36    ) -> Tuple[infra.Rule, infra.Level, str]:
37        """Returns a tuple of (Rule, Level, message) for this Rule.
38
39        Message template: 'The shape inference of {op_name} type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.'
40        """
41        return self, level, self.format_message(op_name=op_name)
42
43
44class _MissingCustomSymbolicFunction(infra.Rule):
45    """Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX."""
46
47    def format_message(self, op_name) -> str:  # type: ignore[override]
48        """Returns the formatted default message of this Rule.
49
50        Message template: 'ONNX export failed on an operator with unrecognized namespace {op_name}. If you are trying to export a custom operator, make sure you registered it with the right domain and version.'
51        """
52        return self.message_default_template.format(op_name=op_name)
53
54    def format(  # type: ignore[override]
55        self, level: infra.Level, op_name
56    ) -> Tuple[infra.Rule, infra.Level, str]:
57        """Returns a tuple of (Rule, Level, message) for this Rule.
58
59        Message template: 'ONNX export failed on an operator with unrecognized namespace {op_name}. If you are trying to export a custom operator, make sure you registered it with the right domain and version.'
60        """
61        return self, level, self.format_message(op_name=op_name)
62
63
64class _MissingStandardSymbolicFunction(infra.Rule):
65    """Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX."""
66
67    def format_message(  # type: ignore[override]
68        self, op_name, opset_version, issue_url
69    ) -> str:
70        """Returns the formatted default message of this Rule.
71
72        Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub: {issue_url}."
73        """
74        return self.message_default_template.format(
75            op_name=op_name, opset_version=opset_version, issue_url=issue_url
76        )
77
78    def format(  # type: ignore[override]
79        self, level: infra.Level, op_name, opset_version, issue_url
80    ) -> Tuple[infra.Rule, infra.Level, str]:
81        """Returns a tuple of (Rule, Level, message) for this Rule.
82
83        Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub: {issue_url}."
84        """
85        return (
86            self,
87            level,
88            self.format_message(
89                op_name=op_name, opset_version=opset_version, issue_url=issue_url
90            ),
91        )
92
93
94class _OperatorSupportedInNewerOpsetVersion(infra.Rule):
95    """Operator is supported in newer opset version."""
96
97    def format_message(  # type: ignore[override]
98        self, op_name, opset_version, supported_opset_version
99    ) -> str:
100        """Returns the formatted default message of this Rule.
101
102        Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Support for this operator was added in version {supported_opset_version}, try exporting with this version."
103        """
104        return self.message_default_template.format(
105            op_name=op_name,
106            opset_version=opset_version,
107            supported_opset_version=supported_opset_version,
108        )
109
110    def format(  # type: ignore[override]
111        self, level: infra.Level, op_name, opset_version, supported_opset_version
112    ) -> Tuple[infra.Rule, infra.Level, str]:
113        """Returns a tuple of (Rule, Level, message) for this Rule.
114
115        Message template: "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Support for this operator was added in version {supported_opset_version}, try exporting with this version."
116        """
117        return (
118            self,
119            level,
120            self.format_message(
121                op_name=op_name,
122                opset_version=opset_version,
123                supported_opset_version=supported_opset_version,
124            ),
125        )
126
127
128class _FxGraphToOnnx(infra.Rule):
129    """Transforms graph from FX IR to ONNX IR."""
130
131    def format_message(self, graph_name) -> str:  # type: ignore[override]
132        """Returns the formatted default message of this Rule.
133
134        Message template: 'Transforming FX graph {graph_name} to ONNX graph.'
135        """
136        return self.message_default_template.format(graph_name=graph_name)
137
138    def format(  # type: ignore[override]
139        self, level: infra.Level, graph_name
140    ) -> Tuple[infra.Rule, infra.Level, str]:
141        """Returns a tuple of (Rule, Level, message) for this Rule.
142
143        Message template: 'Transforming FX graph {graph_name} to ONNX graph.'
144        """
145        return self, level, self.format_message(graph_name=graph_name)
146
147
148class _FxNodeToOnnx(infra.Rule):
149    """Transforms an FX node to an ONNX node."""
150
151    def format_message(self, node_repr) -> str:  # type: ignore[override]
152        """Returns the formatted default message of this Rule.
153
154        Message template: 'Transforming FX node {node_repr} to ONNX node.'
155        """
156        return self.message_default_template.format(node_repr=node_repr)
157
158    def format(  # type: ignore[override]
159        self, level: infra.Level, node_repr
160    ) -> Tuple[infra.Rule, infra.Level, str]:
161        """Returns a tuple of (Rule, Level, message) for this Rule.
162
163        Message template: 'Transforming FX node {node_repr} to ONNX node.'
164        """
165        return self, level, self.format_message(node_repr=node_repr)
166
167
168class _FxPass(infra.Rule):
169    """FX graph transformation during ONNX export before converting from FX IR to ONNX IR."""
170
171    def format_message(self, pass_name) -> str:  # type: ignore[override]
172        """Returns the formatted default message of this Rule.
173
174        Message template: 'Running {pass_name} pass.'
175        """
176        return self.message_default_template.format(pass_name=pass_name)
177
178    def format(  # type: ignore[override]
179        self, level: infra.Level, pass_name
180    ) -> Tuple[infra.Rule, infra.Level, str]:
181        """Returns a tuple of (Rule, Level, message) for this Rule.
182
183        Message template: 'Running {pass_name} pass.'
184        """
185        return self, level, self.format_message(pass_name=pass_name)
186
187
188class _NoSymbolicFunctionForCallFunction(infra.Rule):
189    """Cannot find symbolic function to convert the "call_function" FX node to ONNX."""
190
191    def format_message(self, target) -> str:  # type: ignore[override]
192        """Returns the formatted default message of this Rule.
193
194        Message template: 'No symbolic function to convert the "call_function" node {target} to ONNX. '
195        """
196        return self.message_default_template.format(target=target)
197
198    def format(  # type: ignore[override]
199        self, level: infra.Level, target
200    ) -> Tuple[infra.Rule, infra.Level, str]:
201        """Returns a tuple of (Rule, Level, message) for this Rule.
202
203        Message template: 'No symbolic function to convert the "call_function" node {target} to ONNX. '
204        """
205        return self, level, self.format_message(target=target)
206
207
208class _UnsupportedFxNodeAnalysis(infra.Rule):
209    """Result from FX graph analysis to reveal unsupported FX nodes."""
210
211    def format_message(  # type: ignore[override]
212        self, node_op_to_target_mapping
213    ) -> str:
214        """Returns the formatted default message of this Rule.
215
216        Message template: 'Unsupported FX nodes: {node_op_to_target_mapping}. '
217        """
218        return self.message_default_template.format(
219            node_op_to_target_mapping=node_op_to_target_mapping
220        )
221
222    def format(  # type: ignore[override]
223        self, level: infra.Level, node_op_to_target_mapping
224    ) -> Tuple[infra.Rule, infra.Level, str]:
225        """Returns a tuple of (Rule, Level, message) for this Rule.
226
227        Message template: 'Unsupported FX nodes: {node_op_to_target_mapping}. '
228        """
229        return (
230            self,
231            level,
232            self.format_message(node_op_to_target_mapping=node_op_to_target_mapping),
233        )
234
235
236class _OpLevelDebugging(infra.Rule):
237    """Report any op level validation failure in warnings."""
238
239    def format_message(self, node, symbolic_fn) -> str:  # type: ignore[override]
240        """Returns the formatted default message of this Rule.
241
242        Message template: 'FX node: {node} and its onnx function: {symbolic_fn} fails on op level validation.'
243        """
244        return self.message_default_template.format(node=node, symbolic_fn=symbolic_fn)
245
246    def format(  # type: ignore[override]
247        self, level: infra.Level, node, symbolic_fn
248    ) -> Tuple[infra.Rule, infra.Level, str]:
249        """Returns a tuple of (Rule, Level, message) for this Rule.
250
251        Message template: 'FX node: {node} and its onnx function: {symbolic_fn} fails on op level validation.'
252        """
253        return self, level, self.format_message(node=node, symbolic_fn=symbolic_fn)
254
255
256class _FindOpschemaMatchedSymbolicFunction(infra.Rule):
257    """Find the OnnxFunction that matches the input/attribute dtypes by comparing them with their opschemas."""
258
259    def format_message(self, symbolic_fn, node) -> str:  # type: ignore[override]
260        """Returns the formatted default message of this Rule.
261
262        Message template: 'The OnnxFunction: {symbolic_fn} is the nearest match of the node {node}.'
263        """
264        return self.message_default_template.format(symbolic_fn=symbolic_fn, node=node)
265
266    def format(  # type: ignore[override]
267        self, level: infra.Level, symbolic_fn, node
268    ) -> Tuple[infra.Rule, infra.Level, str]:
269        """Returns a tuple of (Rule, Level, message) for this Rule.
270
271        Message template: 'The OnnxFunction: {symbolic_fn} is the nearest match of the node {node}.'
272        """
273        return self, level, self.format_message(symbolic_fn=symbolic_fn, node=node)
274
275
276class _FxNodeInsertTypePromotion(infra.Rule):
277    """Determine if type promotion is required for the FX node. Insert cast nodes if needed."""
278
279    def format_message(self, target) -> str:  # type: ignore[override]
280        """Returns the formatted default message of this Rule.
281
282        Message template: 'Performing explicit type promotion for node {target}. '
283        """
284        return self.message_default_template.format(target=target)
285
286    def format(  # type: ignore[override]
287        self, level: infra.Level, target
288    ) -> Tuple[infra.Rule, infra.Level, str]:
289        """Returns a tuple of (Rule, Level, message) for this Rule.
290
291        Message template: 'Performing explicit type promotion for node {target}. '
292        """
293        return self, level, self.format_message(target=target)
294
295
296class _FindOperatorOverloadsInOnnxRegistry(infra.Rule):
297    """Find the list of OnnxFunction of the PyTorch operator in onnx registry."""
298
299    def format_message(self, node) -> str:  # type: ignore[override]
300        """Returns the formatted default message of this Rule.
301
302        Message template: 'Checking if the FX node: {node} is supported in onnx registry.'
303        """
304        return self.message_default_template.format(node=node)
305
306    def format(  # type: ignore[override]
307        self, level: infra.Level, node
308    ) -> Tuple[infra.Rule, infra.Level, str]:
309        """Returns a tuple of (Rule, Level, message) for this Rule.
310
311        Message template: 'Checking if the FX node: {node} is supported in onnx registry.'
312        """
313        return self, level, self.format_message(node=node)
314
315
316@dataclasses.dataclass
317class _POERules(infra.RuleCollection):
318    node_missing_onnx_shape_inference: _NodeMissingOnnxShapeInference = dataclasses.field(
319        default=_NodeMissingOnnxShapeInference.from_sarif(
320            **{
321                "id": "POE0001",
322                "name": "node-missing-onnx-shape-inference",
323                "short_description": {"text": "Node is missing ONNX shape inference."},
324                "full_description": {
325                    "text": "Node is missing ONNX shape inference. This usually happens when the node is not valid under standard ONNX operator spec.",
326                    "markdown": "Node is missing ONNX shape inference.\nThis usually happens when the node is not valid under standard ONNX operator spec.\n",
327                },
328                "message_strings": {
329                    "default": {
330                        "text": "The shape inference of {op_name} type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function."
331                    }
332                },
333                "help_uri": None,
334                "properties": {"deprecated": False, "tags": []},
335            }
336        ),
337        init=False,
338    )
339    """Node is missing ONNX shape inference."""
340
341    missing_custom_symbolic_function: _MissingCustomSymbolicFunction = dataclasses.field(
342        default=_MissingCustomSymbolicFunction.from_sarif(
343            **{
344                "id": "POE0002",
345                "name": "missing-custom-symbolic-function",
346                "short_description": {
347                    "text": "Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX."
348                },
349                "full_description": {
350                    "text": "Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX.",
351                    "markdown": "Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX.\n",
352                },
353                "message_strings": {
354                    "default": {
355                        "text": "ONNX export failed on an operator with unrecognized namespace {op_name}. If you are trying to export a custom operator, make sure you registered it with the right domain and version."
356                    }
357                },
358                "help_uri": None,
359                "properties": {"deprecated": False, "tags": []},
360            }
361        ),
362        init=False,
363    )
364    """Missing symbolic function for custom PyTorch operator, cannot translate node to ONNX."""
365
366    missing_standard_symbolic_function: _MissingStandardSymbolicFunction = dataclasses.field(
367        default=_MissingStandardSymbolicFunction.from_sarif(
368            **{
369                "id": "POE0003",
370                "name": "missing-standard-symbolic-function",
371                "short_description": {
372                    "text": "Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX."
373                },
374                "full_description": {
375                    "text": "Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX.",
376                    "markdown": "Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX.\n",
377                },
378                "message_strings": {
379                    "default": {
380                        "text": "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub: {issue_url}."
381                    }
382                },
383                "help_uri": None,
384                "properties": {"deprecated": False, "tags": []},
385            }
386        ),
387        init=False,
388    )
389    """Missing symbolic function for standard PyTorch operator, cannot translate node to ONNX."""
390
391    operator_supported_in_newer_opset_version: _OperatorSupportedInNewerOpsetVersion = dataclasses.field(
392        default=_OperatorSupportedInNewerOpsetVersion.from_sarif(
393            **{
394                "id": "POE0004",
395                "name": "operator-supported-in-newer-opset-version",
396                "short_description": {
397                    "text": "Operator is supported in newer opset version."
398                },
399                "full_description": {
400                    "text": "Operator is supported in newer opset version.",
401                    "markdown": "Operator is supported in newer opset version.\n\nExample:\n```python\ntorch.onnx.export(model, args, ..., opset_version=9)\n```\n",
402                },
403                "message_strings": {
404                    "default": {
405                        "text": "Exporting the operator '{op_name}' to ONNX opset version {opset_version} is not supported. Support for this operator was added in version {supported_opset_version}, try exporting with this version."
406                    }
407                },
408                "help_uri": None,
409                "properties": {"deprecated": False, "tags": []},
410            }
411        ),
412        init=False,
413    )
414    """Operator is supported in newer opset version."""
415
416    fx_graph_to_onnx: _FxGraphToOnnx = dataclasses.field(
417        default=_FxGraphToOnnx.from_sarif(
418            **{
419                "id": "FXE0007",
420                "name": "fx-graph-to-onnx",
421                "short_description": {
422                    "text": "Transforms graph from FX IR to ONNX IR."
423                },
424                "full_description": {
425                    "text": "Transforms graph from FX IR to ONNX IR.",
426                    "markdown": "This diagnostic tracks the transformation process from an FX Graph (in FX IR) to an ONNX Graph (in ONNX IR).\n\n## Key Representations:\n\n- **FX Graph**: The graph in FX IR produced by dynamo or symbolic tracing.\n- **ONNX Graph**: The graph in ONNX IR and [operators](https://onnx.ai/onnx/operators/).\n\n## Additional Notes:\n\n- Prior to this transformation step, the FX graph undergoes preprocessing through multiple FX passes.\n  To gain insight into these transformations, refer to diagnostic `FXE0010`.\n- To enable a detailed view of the graph transformation in progress within this diagnostic, switch to the DEBUG mode.\n\n  - Set DiagnosticOptions.verbosity_level to logging.DEBUG.\n  - Activate the environment variable TORCH_LOGS='onnx_diagnostics'.\n\n- For specific information related to node-level FX to ONNX transformations, explore the diagnostic `FXE0008`.\n",
427                },
428                "message_strings": {
429                    "default": {
430                        "text": "Transforming FX graph {graph_name} to ONNX graph."
431                    }
432                },
433                "help_uri": None,
434                "properties": {"deprecated": False, "tags": []},
435            }
436        ),
437        init=False,
438    )
439    """Transforms graph from FX IR to ONNX IR."""
440
441    fx_node_to_onnx: _FxNodeToOnnx = dataclasses.field(
442        default=_FxNodeToOnnx.from_sarif(
443            **{
444                "id": "FXE0008",
445                "name": "fx-node-to-onnx",
446                "short_description": {"text": "Transforms an FX node to an ONNX node."},
447                "full_description": {
448                    "text": "Transforms an FX node to an ONNX node.",
449                    "markdown": "This diagnostic tracks the transformation process from an FX Node to ONNX [Operators](https://onnx.ai/onnx/operators/).\n\nThe process of converting FX Node to ONNX Node involves dealing with six distinct node types:\n  1. `placeholder`: Represents a module input, maps to an ONNX graph input.\n  2. `call_module`: Symbolizes a call to a submodule, maps to an ONNX\n  3. `call_method`: Symbolizes a method call. Not yet implemented.\n  4. `call_function`: Symbolizes a function call. [Core ATen](https://pytorch.org/docs/stable/ir.html#core-aten-ir) is expected\n    as the function call target. The mapping from ATen to ONNX is implemented by [ONNXScript torchlib](https://github.com/microsoft/onnxscript/tree/main/onnxscript/function_libs/torch_lib/ops).\n    This [guide](https://pytorch.org/docs/stable/onnx.html#onnx-script-functions) shows how to write and register a custom symbolic function for call_function FX node.\n  5. `get_attr`: Indicates an attribute access within the current module. Maps to an ONNX graph initializer.\n  6. `output`: Represents the module's output. Maps to an ONNX graph output.\n\nFor a granular understanding of how each node type is transformed, refer to the implementation details in `FxOnnxInterpreter`.\n",
450                },
451                "message_strings": {
452                    "default": {
453                        "text": "Transforming FX node {node_repr} to ONNX node."
454                    }
455                },
456                "help_uri": None,
457                "properties": {"deprecated": False, "tags": []},
458            }
459        ),
460        init=False,
461    )
462    """Transforms an FX node to an ONNX node."""
463
464    fx_pass: _FxPass = dataclasses.field(
465        default=_FxPass.from_sarif(
466            **{
467                "id": "FXE0010",
468                "name": "fx-pass",
469                "short_description": {
470                    "text": "FX graph transformation during ONNX export before converting from FX IR to ONNX IR."
471                },
472                "full_description": {
473                    "text": "FX graph transformation during ONNX export before converting from FX IR to ONNX IR.",
474                    "markdown": "This diagnostic tracks the FX passes executed during the ONNX export process prior\nto converting from FX IR (Intermediate Representation) to ONNX IR.\n\nUnder the scope of ONNX export, an FX pass refers to a specific transformation applied to the FX GraphModule.\nThe primary aim of these passes is to streamline the graph into a format that aligns more with the ONNX IR.\nMoreover, these passes work to substitute unsupported FX IR features with those recognized and endorsed by\nONNX IR. Common transformations include, but aren't limited to, decomposition, functionalization and\ntype promotion.\n\nFor those who are interested in a comprehensive log detailing the modifications made during these passes,\nthere are a couple of options:\n\n- Set DiagnosticOptions.verbosity_level to logging.DEBUG.\n- Activate the environment variable TORCH_LOGS='onnx_diagnostics'.\n\nHowever, it's noteworthy that by default, such detailed logging is turned off. The primary reason being\nits considerable impact on performance.\n\nFor an in-depth understanding of each specific pass, please refer to the directory: torch/onnx/_internal/fx/passes.\n",
475                },
476                "message_strings": {"default": {"text": "Running {pass_name} pass."}},
477                "help_uri": None,
478                "properties": {"deprecated": False, "tags": []},
479            }
480        ),
481        init=False,
482    )
483    """FX graph transformation during ONNX export before converting from FX IR to ONNX IR."""
484
485    no_symbolic_function_for_call_function: _NoSymbolicFunctionForCallFunction = dataclasses.field(
486        default=_NoSymbolicFunctionForCallFunction.from_sarif(
487            **{
488                "id": "FXE0011",
489                "name": "no-symbolic-function-for-call-function",
490                "short_description": {
491                    "text": 'Cannot find symbolic function to convert the "call_function" FX node to ONNX.'
492                },
493                "full_description": {
494                    "text": 'Cannot find symbolic function to convert the "call_function" FX node to ONNX. ',
495                    "markdown": 'This error occurs when the ONNX converter is unable to find a corresponding symbolic function\nto convert a "call_function" node in the input graph to its equivalence in ONNX. The "call_function"\nnode represents a normalized function call in PyTorch, such as "torch.aten.ops.add".\n\nTo resolve this error, you can try one of the following:\n\n- If exists, apply the auto-fix suggested by the diagnostic. TODO: this part is not available yet.\n- Rewrite the model using only supported PyTorch operators or functions.\n- Follow this [guide](https://pytorch.org/tutorials/beginner/onnx/onnx_registry_tutorial.html#overview) to write and\n  register a custom symbolic function for the unsupported call_function FX node.\n',
496                },
497                "message_strings": {
498                    "default": {
499                        "text": 'No symbolic function to convert the "call_function" node {target} to ONNX. '
500                    }
501                },
502                "help_uri": None,
503                "properties": {"deprecated": False, "tags": []},
504            }
505        ),
506        init=False,
507    )
508    """Cannot find symbolic function to convert the "call_function" FX node to ONNX."""
509
510    unsupported_fx_node_analysis: _UnsupportedFxNodeAnalysis = dataclasses.field(
511        default=_UnsupportedFxNodeAnalysis.from_sarif(
512            **{
513                "id": "FXE0012",
514                "name": "unsupported-fx-node-analysis",
515                "short_description": {
516                    "text": "Result from FX graph analysis to reveal unsupported FX nodes."
517                },
518                "full_description": {
519                    "text": "Result from FX graph analysis to reveal unsupported FX nodes.",
520                    "markdown": "This error indicates that an FX graph contains one or more unsupported nodes. The error message\nis typically accompanied by a list of the unsupported nodes found during analysis.\n\nTo resolve this error, you can try resolving each individual unsupported node error by following\nthe suggestions by its diagnostic. Typically, options include:\n\n- If exists, apply the auto-fix suggested by the diagnostic. TODO: this part is not available yet.\n- Rewrite the model using only supported PyTorch operators or functions.\n- Follow this [guide](https://pytorch.org/docs/stable/onnx.html#onnx-script-functions) to write and\n  register a custom symbolic function for the unsupported call_function FX node.\n",
521                },
522                "message_strings": {
523                    "default": {
524                        "text": "Unsupported FX nodes: {node_op_to_target_mapping}. "
525                    }
526                },
527                "help_uri": None,
528                "properties": {"deprecated": False, "tags": []},
529            }
530        ),
531        init=False,
532    )
533    """Result from FX graph analysis to reveal unsupported FX nodes."""
534
535    op_level_debugging: _OpLevelDebugging = dataclasses.field(
536        default=_OpLevelDebugging.from_sarif(
537            **{
538                "id": "FXE0013",
539                "name": "op-level-debugging",
540                "short_description": {
541                    "text": "Report any op level validation failure in warnings."
542                },
543                "full_description": {
544                    "text": "Report any op level validation failure in warnings.",
545                    "markdown": "This warning message indicates that during op level debugging, certain symbolic functions\nhave failed to match the results of torch ops when using real tensors generated from fake\ntensors. It is important to note that the symbolic functions may not necessarily be\nincorrect, as the validation process is non-deterministic and should only be used as a\nreference.\n\nThere are two categories of warnings that can be triggered:\n\n1. Non-validated operators:\n  If the warnings are caused by the following errors, they can be disregarded by users,\n  as these errors occur due to the non-deterministic nature of the validation. However,\n  it is important to be aware that the operators have not been validated.\n\n  - IndexError: Unsupported input arguments of randomized dimensions/indices(INT64).\n  - RuntimeError: Unsupported input arguments for torch ops are generated.\n  - ValueError: Arguments/keyword arguments do not match the signature of the symbolic function.\n\n2. Potentially wrong torchlib operators:\n  If the warnings are triggered by the following error, users should be aware that the symbolic functions\n  may be incorrect in dispatching or implementation. In such cases, it is recommended to report\n  the issue to the PyTorch-ONNX team, or create/register a custom symbolic function to replace the default one.\n\n  - AssertionError: The symbolic function is potentially wrong as the results do not match the results of torch ops.\n  - TypeError: The symbolic function is potentially wrong as the opschema doesn't match inputs.\n",
546                },
547                "message_strings": {
548                    "default": {
549                        "text": "FX node: {node} and its onnx function: {symbolic_fn} fails on op level validation."
550                    }
551                },
552                "help_uri": None,
553                "properties": {"deprecated": False, "tags": []},
554            }
555        ),
556        init=False,
557    )
558    """Report any op level validation failure in warnings."""
559
560    find_opschema_matched_symbolic_function: _FindOpschemaMatchedSymbolicFunction = dataclasses.field(
561        default=_FindOpschemaMatchedSymbolicFunction.from_sarif(
562            **{
563                "id": "FXE0014",
564                "name": "find-opschema-matched-symbolic-function",
565                "short_description": {
566                    "text": "Find the OnnxFunction that matches the input/attribute dtypes by comparing them with their opschemas."
567                },
568                "full_description": {
569                    "text": "Find the OnnxFunction that matches the input dtypes by comparing them with their opschemas. A warning will be issued if the matched OnnxFunction is not an exact match.",
570                    "markdown": "When an ATen/Custom operator is registered and needs to be dispatched to an OnnxFunction, the input/attribute\ndtypes of the ATen/Custom operator are compared with the input/attribute dtypes of the OnnxFunction opschemas\nto find a match. However, if a perfect/exact match is not found, the dispatcher will attempt to find\nthe nearest match with the highest number of input/attribute dtypes matching the OnnxFunction opschemas, while\nissuing a warning.\n\nThere are two types of level that can be triggered in this rule:\n\n1. NOTE: A perfect match is found, and no warning is issued.\n2. WARNING: The matched OnnxFunction is not a perfect/exact match.\n\nHere are some suggestions based on the WARNING situation:\n\n1. If there are NO errors or mismatches in the results, it is safe to disregard this warning,\n  as the definition of OnnxFunction schema is usually more stringent.\n2. If there are errors or mismatches in the results, it is recommended to:\n  (a) Enable op_level_debugging to determine if the OnnxFunction might be incorrect.\n  (b) Report the issue to the PyTorch-ONNX team.\n  (c) Create/register a custom symbolic function to replace the default one.\n",
571                },
572                "message_strings": {
573                    "default": {
574                        "text": "The OnnxFunction: {symbolic_fn} is the nearest match of the node {node}."
575                    }
576                },
577                "help_uri": None,
578                "properties": {"deprecated": False, "tags": []},
579            }
580        ),
581        init=False,
582    )
583    """Find the OnnxFunction that matches the input/attribute dtypes by comparing them with their opschemas."""
584
585    fx_node_insert_type_promotion: _FxNodeInsertTypePromotion = dataclasses.field(
586        default=_FxNodeInsertTypePromotion.from_sarif(
587            **{
588                "id": "FXE0015",
589                "name": "fx-node-insert-type-promotion",
590                "short_description": {
591                    "text": "Determine if type promotion is required for the FX node. Insert cast nodes if needed."
592                },
593                "full_description": {
594                    "text": "Determine if type promotion is required for the FX node. Insert cast nodes if needed.",
595                    "markdown": "This diagnostic monitors the node-level type promotion insertion process. In PyTorch, there is an automatic process called implicit type promotion,\nwhere the input types of an operator are promoted to a common type. The determination of the common type is based on the type promotion rule specific to each operator.\nTo learn more about PyTorch's type promotion rules, refer to the [elementwise_dtypes doc](https://github.com/pytorch/pytorch/blob/f044613f78df713fb57f70c608483c9f10ad332e/torch/_prims_common/__init__.py#L1252-L1335)\nand [torch._refs ops](https://github.com/pytorch/pytorch/blob/a475ea4542dfe961c9d097e33ab5041f61c8c17f/torch/_refs/__init__.py#L484).\n\nHowever, implicit type promotion is not supported in ONNX. Therefore, to replicate the PyTorch behavior, we need to explicitly insert cast nodes.\nThis diagnostic tracks the process of node-level type promotion insertion.\n\nThe type promotion rules used by this process can be found in `torch/onnx/_internal/fx/passes/type_promotion.py.`\nTo update or add new type promotion rules, please refer to the [Note: Update type promotion rule] section.\n",
596                },
597                "message_strings": {
598                    "default": {
599                        "text": "Performing explicit type promotion for node {target}. "
600                    }
601                },
602                "help_uri": None,
603                "properties": {"deprecated": False, "tags": []},
604            }
605        ),
606        init=False,
607    )
608    """Determine if type promotion is required for the FX node. Insert cast nodes if needed."""
609
610    find_operator_overloads_in_onnx_registry: _FindOperatorOverloadsInOnnxRegistry = dataclasses.field(
611        default=_FindOperatorOverloadsInOnnxRegistry.from_sarif(
612            **{
613                "id": "FXE0016",
614                "name": "find-operator-overloads-in-onnx-registry",
615                "short_description": {
616                    "text": "Find the list of OnnxFunction of the PyTorch operator in onnx registry."
617                },
618                "full_description": {
619                    "text": "This rule involves finding the list of OnnxFunction for the PyTorch operator overload in the ONNX registry. If the operator overload is not supported but its default overload is, a warning will be issued. If both the operator overload and its default overload are not supported, an error will be issued.",
620                    "markdown": "The operator overload name serves the purpose of verifying whether a PyTorch operator is registered in the ONNX registry.\nIf it's not found, the dispatcher takes a fallback approach and tries to locate the default overload of the PyTorch\noperator in the registry. If even the default overload is absent, it signifies that the operator is officially unsupported.\n\nThere are three types of level that can be triggered in this rule:\n\n1. NOTE: The op overload is supported.\n2. WARNING: The op overload is not supported, but it's default overload is supported.\n3. ERROR: The op overload is not supported, and it's default overload is also not supported.\n\nHere are some suggestions based on the WARNING situation:\n\n1. If there are NO errors or mismatches in the results, it is safe to disregard this warning.\n2. If there are errors or mismatches in the results, it is recommended to:\n  (a) Enable op_level_debugging to determine if the OnnxFunction might be incorrect.\n  (b) Report the unsupported overload to the PyTorch-ONNX team.\n  (c) Create/register a custom symbolic function to replace the default one.\n\nHere are some suggestions based on the ERROR situation:\n\n1. Report the unsupported operator to the PyTorch-ONNX team.\n2. Create/register a custom symbolic function to replace the default one.\n",
621                },
622                "message_strings": {
623                    "default": {
624                        "text": "Checking if the FX node: {node} is supported in onnx registry."
625                    }
626                },
627                "help_uri": None,
628                "properties": {"deprecated": False, "tags": []},
629            }
630        ),
631        init=False,
632    )
633    """Find the list of OnnxFunction of the PyTorch operator in onnx registry."""
634
635
636rules = _POERules()
637