xref: /aosp_15_r20/external/pytorch/torch/ao/nn/quantized/functional.py (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1# mypy: allow-untyped-defs
2r""" Functional interface (quantized)."""
3import warnings
4from typing import List, Optional
5
6import torch
7from torch import Tensor
8from torch.jit.annotations import BroadcastingList2
9from torch.nn.modules.utils import _pair, _triple
10
11from .modules.utils import _pair_from_first
12
13
14# Although some of the functions and docstrings are mirrored from the torch.nn,
15# we want to have them here for future changes.
16
17__all__ = [
18    "avg_pool2d",
19    "avg_pool3d",
20    "adaptive_avg_pool2d",
21    "adaptive_avg_pool3d",
22    "conv1d",
23    "conv2d",
24    "conv3d",
25    "interpolate",
26    "linear",
27    "max_pool1d",
28    "max_pool2d",
29    "celu",
30    "leaky_relu",
31    "hardtanh",
32    "hardswish",
33    "threshold",
34    "elu",
35    "hardsigmoid",
36    "clamp",
37    "upsample",
38    "upsample_bilinear",
39    "upsample_nearest",
40]
41
42
43def avg_pool2d(
44    input,
45    kernel_size,
46    stride=None,
47    padding=0,
48    ceil_mode=False,
49    count_include_pad=True,
50    divisor_override=None,
51):
52    r"""
53    Applies 2D average-pooling operation in :math:`kH \times kW` regions by step size
54    :math:`sH \times sW` steps. The number of output features is equal to the number of
55    input planes.
56
57    .. note:: The input quantization parameters propagate to the output.
58
59    See :class:`~torch.ao.nn.quantized.AvgPool2d` for details and output shape.
60
61    Args:
62        input: quantized input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
63        kernel_size: size of the pooling region. Can be a single number or a
64          tuple `(kH, kW)`
65        stride: stride of the pooling operation. Can be a single number or a
66          tuple `(sH, sW)`. Default: :attr:`kernel_size`
67        padding: implicit zero paddings on both sides of the input. Can be a
68          single number or a tuple `(padH, padW)`. Default: 0
69        ceil_mode: when True, will use `ceil` instead of `floor` in the formula
70            to compute the output shape. Default: ``False``
71        count_include_pad: when True, will include the zero-padding in the
72            averaging calculation. Default: ``True``
73        divisor_override: if specified, it will be used as divisor, otherwise
74             size of the pooling region will be used. Default: None
75    """
76    if not input.is_quantized:
77        raise ValueError("Input to 'quantized.avg_pool2d' must be quantized!")
78    return torch.nn.functional.avg_pool2d(
79        input,
80        kernel_size,
81        stride,
82        padding,
83        ceil_mode,
84        count_include_pad,
85        divisor_override,
86    )
87
88
89def avg_pool3d(
90    input,
91    kernel_size,
92    stride=None,
93    padding=0,
94    ceil_mode=False,
95    count_include_pad=True,
96    divisor_override=None,
97):
98    r"""
99    Applies 3D average-pooling operation in :math:`kD \ times kH \times kW` regions by step size
100    :math:`sD \times sH \times sW` steps. The number of output features is equal to the number of
101    input planes.
102
103    .. note:: The input quantization parameters propagate to the output.
104
105    Args:
106        input: quantized input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
107        kernel_size: size of the pooling region. Can be a single number or a
108          tuple `(kD, kH, kW)`
109        stride: stride of the pooling operation. Can be a single number or a
110          tuple `(sD, sH, sW)`. Default: :attr:`kernel_size`
111        padding: implicit zero paddings on both sides of the input. Can be a
112          single number or a tuple `(padD, padH, padW)`. Default: 0
113        ceil_mode: when True, will use `ceil` instead of `floor` in the formula
114            to compute the output shape. Default: ``False``
115        count_include_pad: when True, will include the zero-padding in the
116            averaging calculation. Default: ``True``
117        divisor_override: if specified, it will be used as divisor, otherwise
118             size of the pooling region will be used. Default: None
119    """
120    if not input.is_quantized:
121        raise ValueError("Input to 'quantized.avg_pool3d' must be quantized!")
122    return torch.nn.functional.avg_pool3d(
123        input,
124        kernel_size,
125        stride,
126        padding,
127        ceil_mode,
128        count_include_pad,
129        divisor_override,
130    )
131
132
133def adaptive_avg_pool2d(input: Tensor, output_size: BroadcastingList2[int]) -> Tensor:
134    r"""
135    Applies a 2D adaptive average pooling over a quantized input signal composed
136    of several quantized input planes.
137
138    .. note:: The input quantization parameters propagate to the output.
139
140    See :class:`~torch.ao.nn.quantized.AdaptiveAvgPool2d` for details and output shape.
141
142    Args:
143        output_size: the target output size (single integer or
144                     double-integer tuple)
145    """
146    if not input.is_quantized:
147        raise ValueError(
148            "Input to 'quantized.functional.adaptive_avg_pool2d' must be quantized!"
149        )
150    return torch.nn.functional.adaptive_avg_pool2d(input, output_size)
151
152
153def adaptive_avg_pool3d(input: Tensor, output_size: BroadcastingList2[int]) -> Tensor:
154    r"""
155    Applies a 3D adaptive average pooling over a quantized input signal composed
156    of several quantized input planes.
157
158    .. note:: The input quantization parameters propagate to the output.
159
160    See :class:`~torch.ao.nn.quantized.AdaptiveAvgPool3d` for details and output shape.
161
162    Args:
163        output_size: the target output size (single integer or
164                     double-integer tuple)
165    """
166    if not input.is_quantized:
167        raise ValueError(
168            "Input to 'quantized.functional.adaptive_avg_pool3d' must be quantized!"
169        )
170    return torch.nn.functional.adaptive_avg_pool3d(input, output_size)
171
172
173def conv1d(
174    input,
175    weight,
176    bias,
177    stride=1,
178    padding=0,
179    dilation=1,
180    groups=1,
181    padding_mode="zeros",
182    scale=1.0,
183    zero_point=0,
184    dtype=torch.quint8,
185):
186    r"""
187    Applies a 1D convolution over a quantized 1D input composed of several input
188    planes.
189
190    See :class:`~torch.ao.nn.quantized.Conv1d` for details and output shape.
191
192    Args:
193        input: quantized input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
194        weight: quantized filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , iW)`
195        bias: **non-quantized** bias tensor of shape :math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
196        stride: the stride of the convolving kernel. Can be a single number or a
197          tuple `(sW,)`. Default: 1
198        padding: implicit paddings on both sides of the input. Can be a
199          single number or a tuple `(padW,)`. Default: 0
200        dilation: the spacing between kernel elements. Can be a single number or
201          a tuple `(dW,)`. Default: 1
202        groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
203          number of groups. Default: 1
204        padding_mode: the padding mode to use. Only "zeros" is supported for quantized convolution at the moment. Default: "zeros"
205        scale: quantization scale for the output. Default: 1.0
206        zero_point: quantization zero_point for the output. Default: 0
207        dtype: quantization data type to use. Default: ``torch.quint8``
208
209    Examples::
210
211        >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
212        >>> from torch.ao.nn.quantized import functional as qF
213        >>> filters = torch.randn(33, 16, 3, dtype=torch.float)
214        >>> inputs = torch.randn(20, 16, 50, dtype=torch.float)
215        >>> bias = torch.randn(33, dtype=torch.float)
216        >>>
217        >>> scale, zero_point = 1.0, 0
218        >>> dtype_inputs = torch.quint8
219        >>> dtype_filters = torch.qint8
220        >>>
221        >>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
222        >>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
223        >>> qF.conv1d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
224    """  # noqa: E501
225    if padding_mode != "zeros":
226        raise NotImplementedError("Only zero-padding is supported!")
227    if input.dtype != torch.quint8:
228        raise NotImplementedError(
229            "Only torch.quint8 is supported for activation tensor!"
230        )
231    if weight.dtype != torch.qint8:
232        raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
233    if input.ndim != 3:
234        raise ValueError("Input shape must be `(N, C, L)`!")
235    stride = _pair_from_first(stride)
236    padding = _pair_from_first(padding)
237    dilation = _pair_from_first(dilation)
238
239    packed_params = torch.ops.quantized.conv1d_prepack(
240        weight, bias, stride, padding, dilation, groups
241    )
242    return torch.ops.quantized.conv1d(input, packed_params, scale, zero_point)
243
244
245def conv2d(
246    input,
247    weight,
248    bias,
249    stride=1,
250    padding=0,
251    dilation=1,
252    groups=1,
253    padding_mode="zeros",
254    scale=1.0,
255    zero_point=0,
256    dtype=torch.quint8,
257):
258    r"""
259    Applies a 2D convolution over a quantized 2D input composed of several input
260    planes.
261
262    See :class:`~torch.ao.nn.quantized.Conv2d` for details and output shape.
263
264    Args:
265        input: quantized input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
266        weight: quantized filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kH , kW)`
267        bias: **non-quantized** bias tensor of shape :math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
268        stride: the stride of the convolving kernel. Can be a single number or a
269          tuple `(sH, sW)`. Default: 1
270        padding: implicit paddings on both sides of the input. Can be a
271          single number or a tuple `(padH, padW)`. Default: 0
272        dilation: the spacing between kernel elements. Can be a single number or
273          a tuple `(dH, dW)`. Default: 1
274        groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
275          number of groups. Default: 1
276        padding_mode: the padding mode to use. Only "zeros" is supported for quantized convolution at the moment. Default: "zeros"
277        scale: quantization scale for the output. Default: 1.0
278        zero_point: quantization zero_point for the output. Default: 0
279        dtype: quantization data type to use. Default: ``torch.quint8``
280
281    Examples::
282
283        >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
284        >>> from torch.ao.nn.quantized import functional as qF
285        >>> filters = torch.randn(8, 4, 3, 3, dtype=torch.float)
286        >>> inputs = torch.randn(1, 4, 5, 5, dtype=torch.float)
287        >>> bias = torch.randn(8, dtype=torch.float)
288        >>>
289        >>> scale, zero_point = 1.0, 0
290        >>> dtype_inputs = torch.quint8
291        >>> dtype_filters = torch.qint8
292        >>>
293        >>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
294        >>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
295        >>> qF.conv2d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
296    """  # noqa: E501
297    if padding_mode != "zeros":
298        raise NotImplementedError("Only zero-padding is supported!")
299    if input.dtype != torch.quint8:
300        raise NotImplementedError(
301            "Only torch.quint8 is supported for activation tensor!"
302        )
303    if weight.dtype != torch.qint8:
304        raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
305    if input.ndim != 4:
306        raise ValueError("Input shape must be `(N, C, H, W)`!")
307    stride = _pair(stride)
308    padding = _pair(padding)
309    dilation = _pair(dilation)
310
311    packed_params = torch.ops.quantized.conv2d_prepack(
312        weight, bias, stride, padding, dilation, groups
313    )
314    return torch.ops.quantized.conv2d(input, packed_params, scale, zero_point)
315
316
317def conv3d(
318    input,
319    weight,
320    bias,
321    stride=1,
322    padding=0,
323    dilation=1,
324    groups=1,
325    padding_mode="zeros",
326    scale=1.0,
327    zero_point=0,
328    dtype=torch.quint8,
329):
330    r"""
331    Applies a 3D convolution over a quantized 3D input composed of several input
332    planes.
333
334    See :class:`~torch.ao.nn.quantized.Conv3d` for details and output shape.
335
336    Args:
337        input: quantized input tensor of shape
338          :math:`(\text{minibatch} , \text{in\_channels} , iD , iH , iW)`
339        weight: quantized filters of shape
340          :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kD , kH , kW)`
341        bias: **non-quantized** bias tensor of shape
342          :math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
343        stride: the stride of the convolving kernel. Can be a single number or a
344          tuple `(sD, sH, sW)`. Default: 1
345        padding: implicit paddings on both sides of the input. Can be a
346          single number or a tuple `(padD, padH, padW)`. Default: 0
347        dilation: the spacing between kernel elements. Can be a single number or
348          a tuple `(dD, dH, dW)`. Default: 1
349        groups: split input into groups, :math:`\text{in\_channels}` should be
350          divisible by the number of groups. Default: 1
351        padding_mode: the padding mode to use. Only "zeros" is supported for
352          quantized convolution at the moment. Default: "zeros"
353        scale: quantization scale for the output. Default: 1.0
354        zero_point: quantization zero_point for the output. Default: 0
355        dtype: quantization data type to use. Default: ``torch.quint8``
356
357    Examples::
358
359        >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
360        >>> from torch.ao.nn.quantized import functional as qF
361        >>> filters = torch.randn(8, 4, 3, 3, 3, dtype=torch.float)
362        >>> inputs = torch.randn(1, 4, 5, 5, 5, dtype=torch.float)
363        >>> bias = torch.randn(8, dtype=torch.float)
364        >>>
365        >>> scale, zero_point = 1.0, 0
366        >>> dtype_inputs = torch.quint8
367        >>> dtype_filters = torch.qint8
368        >>>
369        >>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
370        >>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
371        >>> qF.conv3d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
372    """  # noqa: E501
373    if padding_mode != "zeros":
374        raise NotImplementedError("Only zero-padding is supported!")
375    if input.dtype != torch.quint8:
376        raise NotImplementedError(
377            "Only torch.quint8 is supported for activation tensor!"
378        )
379    if weight.dtype != torch.qint8:
380        raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
381    if input.ndim != 5:
382        raise ValueError("Input shape must be `(N, C, D, H, W)`!")
383    stride = _triple(stride)
384    padding = _triple(padding)
385    dilation = _triple(dilation)
386
387    packed_params = torch.ops.quantized.conv3d_prepack(
388        weight, bias, stride, padding, dilation, groups
389    )
390    return torch.ops.quantized.conv3d(input, packed_params, scale, zero_point)
391
392
393def interpolate(
394    input, size=None, scale_factor=None, mode="nearest", align_corners=None
395):
396    r"""Down/up samples the input to either the given :attr:`size` or the given
397    :attr:`scale_factor`
398
399    See :func:`torch.nn.functional.interpolate` for implementation details.
400
401    The input dimensions are interpreted in the form:
402    `mini-batch x channels x [optional depth] x [optional height] x width`.
403
404    .. note:: The input quantization parameters propagate to the output.
405
406    .. note:: Only 2D/3D input is supported for quantized inputs
407
408    .. note:: Only the following modes are supported for the quantized inputs:
409
410        - `bilinear`
411        - `nearest`
412
413    Args:
414        input (Tensor): the input tensor
415        size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
416            output spatial size.
417        scale_factor (float or Tuple[float]): multiplier for spatial size. Has to match input size if it is a tuple.
418        mode (str): algorithm used for upsampling:
419            ``'nearest'`` | ``'bilinear'``
420        align_corners (bool, optional): Geometrically, we consider the pixels of the
421            input and output as squares rather than points.
422            If set to ``True``, the input and output tensors are aligned by the
423            center points of their corner pixels, preserving the values at the corner pixels.
424            If set to ``False``, the input and output tensors are aligned by the corner
425            points of their corner pixels, and the interpolation uses edge value padding
426            for out-of-boundary values, making this operation *independent* of input size
427            when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
428            is ``'bilinear'``.
429            Default: ``False``
430    """
431    if not input.is_quantized:
432        raise ValueError("Input to 'quantized.interpolate' must be quantized!")
433    return torch.nn.functional.interpolate(
434        input, size, scale_factor, mode, align_corners
435    )
436
437
438def linear(
439    input: Tensor,
440    weight: Tensor,
441    bias: Optional[Tensor] = None,
442    scale: Optional[float] = None,
443    zero_point: Optional[int] = None,
444) -> Tensor:
445    r"""
446    Applies a linear transformation to the incoming quantized data:
447    :math:`y = xA^T + b`.
448    See :class:`~torch.ao.nn.quantized.Linear`
449
450    .. note::
451
452      Current implementation packs weights on every call, which has penalty on performance.
453      If you want to avoid the overhead, use :class:`~torch.ao.nn.quantized.Linear`.
454
455    Args:
456      input (Tensor): Quantized input of type `torch.quint8`
457      weight (Tensor): Quantized weight of type `torch.qint8`
458      bias (Tensor): None or fp32 bias of type `torch.float`
459      scale (double): output scale. If None, derived from the input scale
460      zero_point (long): output zero point. If None, derived from the input zero_point
461
462    Shape:
463        - Input: :math:`(N, *, in\_features)` where `*` means any number of
464          additional dimensions
465        - Weight: :math:`(out\_features, in\_features)`
466        - Bias: :math:`(out\_features)`
467        - Output: :math:`(N, *, out\_features)`
468    """
469    if scale is None:
470        scale = input.q_scale()
471    if zero_point is None:
472        zero_point = input.q_zero_point()
473    _packed_params = torch.ops.quantized.linear_prepack(weight, bias)
474    return torch.ops.quantized.linear(input, _packed_params, scale, zero_point)
475
476
477def max_pool1d(
478    input,
479    kernel_size,
480    stride=None,
481    padding=0,
482    dilation=1,
483    ceil_mode=False,
484    return_indices=False,
485):
486    r"""Applies a 1D max pooling over a quantized input signal composed of
487    several quantized input planes.
488
489    .. note:: The input quantization parameters are propagated to the output.
490
491    See :class:`~torch.ao.nn.quantized.MaxPool1d` for details.
492    """
493    if return_indices:
494        raise NotImplementedError("return_indices is not yet implemented!")
495    if stride is None:
496        stride = torch.jit.annotate(List[int], [])
497    return torch.nn.functional.max_pool1d(
498        input,
499        kernel_size,
500        stride,
501        padding,
502        dilation,
503        ceil_mode=ceil_mode,
504        return_indices=return_indices,
505    )
506
507
508def max_pool2d(
509    input,
510    kernel_size,
511    stride=None,
512    padding=0,
513    dilation=1,
514    ceil_mode=False,
515    return_indices=False,
516):
517    r"""Applies a 2D max pooling over a quantized input signal composed of
518    several quantized input planes.
519
520    .. note:: The input quantization parameters are propagated to the output.
521
522    See :class:`~torch.ao.nn.quantized.MaxPool2d` for details.
523    """
524    if return_indices:
525        raise NotImplementedError("return_indices is not yet implemented!")
526    if stride is None:
527        stride = torch.jit.annotate(List[int], [])
528    return torch.nn.functional.max_pool2d(
529        input,
530        kernel_size,
531        stride,
532        padding,
533        dilation,
534        ceil_mode=ceil_mode,
535        return_indices=return_indices,
536    )
537
538
539def celu(input: Tensor, scale: float, zero_point: int, alpha: float = 1.0) -> Tensor:
540    r"""celu(input, scale, zero_point, alpha=1.) -> Tensor
541
542    Applies the quantized CELU function element-wise.
543
544    .. math::
545        \text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x / \alpha) - 1))
546
547    Args:
548        input: quantized input
549        alpha: the :math:`\alpha` value for the CELU formulation. Default: 1.0
550    """
551    if not input.is_quantized:
552        raise ValueError("Input to 'quantized.celu' must be quantized!")
553    return torch.ops.quantized.celu(input, scale, zero_point, alpha)
554
555
556def leaky_relu(
557    input: Tensor,
558    negative_slope: float = 0.01,
559    inplace: bool = False,
560    scale: Optional[float] = None,
561    zero_point: Optional[int] = None,
562):
563    r"""
564    Quantized version of the.
565    leaky_relu(input, negative_slope=0.01, inplace=False, scale, zero_point) -> Tensor
566
567    Applies element-wise,
568    :math:`\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)`
569
570    Args:
571        input: Quantized input
572        negative_slope: The slope of the negative input
573        inplace: Inplace modification of the input tensor
574        scale, zero_point: Scale and zero point of the output tensor.
575
576    See :class:`~torch.nn.LeakyReLU` for more details.
577    """
578    if scale is not None and zero_point is not None:
579        assert not inplace, "Cannot rescale with `inplace`"
580        output = torch._empty_affine_quantized(
581            input.shape, scale=scale, zero_point=int(zero_point), dtype=input.dtype
582        )
583        torch._C._nn.leaky_relu(input, negative_slope, out=output)
584        return output
585    if inplace:
586        result = torch._C._nn.leaky_relu_(input, negative_slope)
587    else:
588        result = torch._C._nn.leaky_relu(input, negative_slope)
589    return result
590
591
592def hardtanh(
593    input: Tensor, min_val: float = -1.0, max_val: float = 1.0, inplace: bool = False
594) -> Tensor:
595    r"""This is the quantized version of :func:`~torch.nn.functional.hardtanh`."""
596    if not input.is_quantized:
597        raise ValueError("Input to 'quantized.hardtanh' must be quantized!")
598    if inplace:
599        return torch._C._nn.hardtanh_(input, min_val, max_val)
600    return torch._C._nn.hardtanh(input, min_val, max_val)
601
602
603def hardswish(input: Tensor, scale: float, zero_point: int) -> Tensor:
604    r"""This is the quantized version of :func:`~torch.nn.functional.hardswish`.
605
606    Args:
607        input: quantized input
608        scale: quantization scale of the output tensor
609        zero_point: quantization zero point of the output tensor
610    """
611    if not input.is_quantized:
612        raise ValueError("Input to 'quantized.hardswish' must be quantized!")
613    return torch._ops.ops.quantized.hardswish(input, scale, zero_point)
614
615
616def threshold(input: Tensor, threshold: float, value: float) -> Tensor:
617    r"""Applies the quantized version of the threshold function element-wise:
618
619    .. math::
620        x = \begin{cases}
621                x & \text{if~} x > \text{threshold} \\
622                \text{value} & \text{otherwise}
623            \end{cases}
624
625    See :class:`~torch.nn.Threshold` for more details.
626    """
627    if not input.is_quantized:
628        raise ValueError("Input to 'quantized.threshold' must be quantized!")
629    if threshold is None:
630        raise ValueError("Input to 'threshold' must be specified!")
631    if value is None:
632        raise ValueError("Input to 'value' must be specified!")
633    return torch._ops.ops.quantized.threshold(input, threshold, value)
634
635
636def elu(input: Tensor, scale: float, zero_point: int, alpha: float = 1.0) -> Tensor:
637    r"""This is the quantized version of :func:`~torch.nn.functional.elu`.
638
639    Args:
640        input: quantized input
641        scale: quantization scale of the output tensor
642        zero_point: quantization zero point of the output tensor
643        alpha: the alpha constant
644    """
645    if not input.is_quantized:
646        raise ValueError("Input to 'quantized.elu' must be quantized!")
647    return torch.ops.quantized.elu(input, scale, zero_point, alpha)
648
649
650def hardsigmoid(input: Tensor, inplace: bool = False) -> Tensor:
651    r"""This is the quantized version of :func:`~torch.nn.functional.hardsigmoid`."""
652    if not input.is_quantized:
653        raise ValueError("Input to 'quantized.hardsigmoid' must be quantized!")
654    if inplace:
655        return torch._C._nn.hardsigmoid_(input)  # type: ignore[attr-defined]
656    return torch._C._nn.hardsigmoid(input)
657
658
659def clamp(input: Tensor, min_: float, max_: float) -> Tensor:
660    r"""float(input, min\_, max\_) -> Tensor
661
662    Applies the clamp function element-wise.
663    See :class:`~torch.ao.nn.quantized.clamp` for more details.
664
665    Args:
666        input: quantized input
667        min_: minimum value for clamping
668        max_: maximum value for clamping
669    """
670    if not input.is_quantized:
671        raise ValueError("Input to 'quantized.clamp' must be quantized!")
672    return torch.clamp(input, min_, max_)
673
674
675def upsample(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
676    r"""Upsamples the input to either the given :attr:`size` or the given
677    :attr:`scale_factor`
678
679    .. warning::
680        This function is deprecated in favor of
681        :func:`torch.ao.nn.quantized.functional.interpolate`.
682        This is equivalent with ``nn.quantized.functional.interpolate(...)``.
683
684    See :func:`torch.nn.functional.interpolate` for implementation details.
685
686    The input dimensions are interpreted in the form:
687    `mini-batch x channels x [optional depth] x [optional height] x width`.
688
689    .. note:: The input quantization parameters propagate to the output.
690
691    .. note:: Only 2D input is supported for quantized inputs
692
693    .. note:: Only the following modes are supported for the quantized inputs:
694
695        - `bilinear`
696        - `nearest`
697
698    Args:
699        input (Tensor): quantized input tensor
700        size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
701            output spatial size.
702        scale_factor (float or Tuple[float]): multiplier for spatial size. Has to be an integer.
703        mode (str): algorithm used for upsampling:
704            ``'nearest'`` | ``'bilinear'``
705        align_corners (bool, optional): Geometrically, we consider the pixels of the
706            input and output as squares rather than points.
707            If set to ``True``, the input and output tensors are aligned by the
708            center points of their corner pixels, preserving the values at the corner pixels.
709            If set to ``False``, the input and output tensors are aligned by the corner
710            points of their corner pixels, and the interpolation uses edge value padding
711            for out-of-boundary values, making this operation *independent* of input size
712            when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
713            is ``'bilinear'``.
714            Default: ``False``
715
716    .. warning::
717        With ``align_corners = True``, the linearly interpolating modes
718        (`bilinear`) don't proportionally align the
719        output and input pixels, and thus the output values can depend on the
720        input size. This was the default behavior for these modes up to version
721        0.3.1. Since then, the default behavior is ``align_corners = False``.
722        See :class:`~torch.nn.Upsample` for concrete examples on how this
723        affects the outputs.
724    """
725    warnings.warn(
726        "nn.quantized.functional.upsample is deprecated. Use nn.quantized.functional.interpolate instead."
727    )
728    return interpolate(input, size, scale_factor, mode, align_corners)
729
730
731def upsample_bilinear(input, size=None, scale_factor=None):
732    r"""Upsamples the input, using bilinear upsampling.
733
734    .. warning::
735        This function is deprecated in favor of
736        :func:`torch.ao.nn.quantized.functional.interpolate`.
737        This is equivalent with
738        ``nn.quantized.functional.interpolate(..., mode='bilinear', align_corners=True)``.
739
740    .. note:: The input quantization parameters propagate to the output.
741
742    .. note:: Only 2D inputs are supported
743
744    Args:
745        input (Tensor): quantized input
746        size (int or Tuple[int, int]): output spatial size.
747        scale_factor (int or Tuple[int, int]): multiplier for spatial size
748    """
749    # DeprecationWarning is ignored by default
750    warnings.warn(
751        "nn.quantized.functional.upsample_bilinear is deprecated. Use nn.quantized.functional.interpolate instead."
752    )
753    return interpolate(input, size, scale_factor, mode="bilinear", align_corners=True)
754
755
756def upsample_nearest(input, size=None, scale_factor=None):
757    r"""Upsamples the input, using nearest neighbours' pixel values.
758
759    .. warning::
760        This function is deprecated in favor of
761        :func:`torch.ao.nn.quantized.functional.interpolate`.
762        This is equivalent with ``nn.quantized.functional.interpolate(..., mode='nearest')``.
763
764    .. note:: The input quantization parameters propagate to the output.
765
766    .. note:: Only 2D inputs are supported
767
768    Args:
769        input (Tensor): quantized input
770        size (int or Tuple[int, int] or Tuple[int, int, int]): output spatial
771            size.
772        scale_factor (int): multiplier for spatial size. Has to be an integer.
773    """
774    # DeprecationWarning is ignored by default
775    warnings.warn(
776        "nn.quantized.functional.upsample_nearest is deprecated. Use nn.quantized.functional.interpolate instead."
777    )
778    return interpolate(input, size, scale_factor, mode="nearest")
779