xref: /aosp_15_r20/external/pytorch/torch/nn/modules/padding.py (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1# mypy: allow-untyped-defs
2from typing import Sequence, Tuple
3
4import torch.nn.functional as F
5from torch import Tensor
6from torch.nn.common_types import _size_2_t, _size_4_t, _size_6_t
7
8from .module import Module
9from .utils import _ntuple, _pair, _quadruple
10
11
12# TODO: grad_output size asserts in THNN
13
14__all__ = [
15    "CircularPad1d",
16    "CircularPad2d",
17    "CircularPad3d",
18    "ConstantPad1d",
19    "ConstantPad2d",
20    "ConstantPad3d",
21    "ReflectionPad1d",
22    "ReflectionPad2d",
23    "ReflectionPad3d",
24    "ReplicationPad1d",
25    "ReplicationPad2d",
26    "ReplicationPad3d",
27    "ZeroPad1d",
28    "ZeroPad2d",
29    "ZeroPad3d",
30]
31
32
33class _CircularPadNd(Module):
34    __constants__ = ["padding"]
35    padding: Sequence[int]
36
37    def _check_input_dim(self, input):
38        raise NotImplementedError
39
40    def forward(self, input: Tensor) -> Tensor:
41        self._check_input_dim(input)
42        return F.pad(input, self.padding, "circular")
43
44    def extra_repr(self) -> str:
45        return f"{self.padding}"
46
47
48class CircularPad1d(_CircularPadNd):
49    r"""Pads the input tensor using circular padding of the input boundary.
50
51    Tensor values at the beginning of the dimension are used to pad the end,
52    and values at the end are used to pad the beginning. If negative padding is
53    applied then the ends of the tensor get removed.
54
55    For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
56
57    Args:
58        padding (int, tuple): the size of the padding. If is `int`, uses the same
59            padding in all boundaries. If a 2-`tuple`, uses
60            (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
61
62    Shape:
63        - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
64        - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
65
66          :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
67
68    Examples::
69
70        >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
71        >>> m = nn.CircularPad1d(2)
72        >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
73        >>> input
74        tensor([[[0., 1., 2., 3.],
75                 [4., 5., 6., 7.]]])
76        >>> m(input)
77        tensor([[[2., 3., 0., 1., 2., 3., 0., 1.],
78                 [6., 7., 4., 5., 6., 7., 4., 5.]]])
79        >>> # using different paddings for different sides
80        >>> m = nn.CircularPad1d((3, 1))
81        >>> m(input)
82        tensor([[[1., 2., 3., 0., 1., 2., 3., 0.],
83                 [5., 6., 7., 4., 5., 6., 7., 4.]]])
84    """
85
86    padding: Tuple[int, int]
87
88    def __init__(self, padding: _size_2_t) -> None:
89        super().__init__()
90        self.padding = _pair(padding)
91
92    def _check_input_dim(self, input):
93        if input.dim() != 2 and input.dim() != 3:
94            raise ValueError(f"expected 2D or 3D input (got {input.dim()}D input)")
95
96
97class CircularPad2d(_CircularPadNd):
98    r"""Pads the input tensor using circular padding of the input boundary.
99
100    Tensor values at the beginning of the dimension are used to pad the end,
101    and values at the end are used to pad the beginning. If negative padding is
102    applied then the ends of the tensor get removed.
103
104    For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
105
106    Args:
107        padding (int, tuple): the size of the padding. If is `int`, uses the same
108            padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
109            :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
110
111    Shape:
112        - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
113        - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
114
115          :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
116
117          :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
118
119    Examples::
120
121        >>> m = nn.CircularPad2d(2)
122        >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
123        >>> input
124        tensor([[[[0., 1., 2.],
125                  [3., 4., 5.],
126                  [6., 7., 8.]]]])
127        >>> m(input)
128        tensor([[[[4., 5., 3., 4., 5., 3., 4.],
129                  [7., 8., 6., 7., 8., 6., 7.],
130                  [1., 2., 0., 1., 2., 0., 1.],
131                  [4., 5., 3., 4., 5., 3., 4.],
132                  [7., 8., 6., 7., 8., 6., 7.],
133                  [1., 2., 0., 1., 2., 0., 1.],
134                  [4., 5., 3., 4., 5., 3., 4.]]]])
135        >>> # using different paddings for different sides
136        >>> m = nn.CircularPad2d((1, 1, 2, 0))
137        >>> m(input)
138        tensor([[[[5., 3., 4., 5., 3.],
139                  [8., 6., 7., 8., 6.],
140                  [2., 0., 1., 2., 0.],
141                  [5., 3., 4., 5., 3.],
142                  [8., 6., 7., 8., 6.]]]])
143    """
144
145    padding: Tuple[int, int, int, int]
146
147    def __init__(self, padding: _size_4_t) -> None:
148        super().__init__()
149        self.padding = _quadruple(padding)
150
151    def _check_input_dim(self, input):
152        if input.dim() != 3 and input.dim() != 4:
153            raise ValueError(f"expected 3D or 4D input (got {input.dim()}D input)")
154
155
156class CircularPad3d(_CircularPadNd):
157    r"""Pads the input tensor using circular padding of the input boundary.
158
159    Tensor values at the beginning of the dimension are used to pad the end,
160    and values at the end are used to pad the beginning. If negative padding is
161    applied then the ends of the tensor get removed.
162
163    For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
164
165    Args:
166        padding (int, tuple): the size of the padding. If is `int`, uses the same
167            padding in all boundaries. If a 6-`tuple`, uses
168            (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
169            :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
170            :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
171
172    Shape:
173        - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
174        - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
175          where
176
177          :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
178
179          :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
180
181          :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
182
183    Examples::
184
185        >>> # xdoctest: +IGNORE_WANT("non-deterministic")
186        >>> m = nn.CircularPad3d(3)
187        >>> input = torch.randn(16, 3, 8, 320, 480)
188        >>> output = m(input)
189        >>> # using different paddings for different sides
190        >>> m = nn.CircularPad3d((3, 3, 6, 6, 1, 1))
191        >>> output = m(input)
192    """
193
194    padding: Tuple[int, int, int, int, int, int]
195
196    def __init__(self, padding: _size_6_t) -> None:
197        super().__init__()
198        self.padding = _ntuple(6)(padding)
199
200    def _check_input_dim(self, input):
201        if input.dim() != 4 and input.dim() != 5:
202            raise ValueError(f"expected 4D or 5D input (got {input.dim()}D input)")
203
204
205class _ConstantPadNd(Module):
206    __constants__ = ["padding", "value"]
207    value: float
208    padding: Sequence[int]
209
210    def __init__(self, value: float) -> None:
211        super().__init__()
212        self.value = value
213
214    def forward(self, input: Tensor) -> Tensor:
215        return F.pad(input, self.padding, "constant", self.value)
216
217    def extra_repr(self) -> str:
218        return f"padding={self.padding}, value={self.value}"
219
220
221class ConstantPad1d(_ConstantPadNd):
222    r"""Pads the input tensor boundaries with a constant value.
223
224    For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
225
226    Args:
227        padding (int, tuple): the size of the padding. If is `int`, uses the same
228            padding in both boundaries. If a 2-`tuple`, uses
229            (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
230
231    Shape:
232        - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
233        - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
234
235          :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
236
237    Examples::
238
239        >>> # xdoctest: +IGNORE_WANT("non-deterministic")
240        >>> m = nn.ConstantPad1d(2, 3.5)
241        >>> input = torch.randn(1, 2, 4)
242        >>> input
243        tensor([[[-1.0491, -0.7152, -0.0749,  0.8530],
244                 [-1.3287,  1.8966,  0.1466, -0.2771]]])
245        >>> m(input)
246        tensor([[[ 3.5000,  3.5000, -1.0491, -0.7152, -0.0749,  0.8530,  3.5000,
247                   3.5000],
248                 [ 3.5000,  3.5000, -1.3287,  1.8966,  0.1466, -0.2771,  3.5000,
249                   3.5000]]])
250        >>> m = nn.ConstantPad1d(2, 3.5)
251        >>> input = torch.randn(1, 2, 3)
252        >>> input
253        tensor([[[ 1.6616,  1.4523, -1.1255],
254                 [-3.6372,  0.1182, -1.8652]]])
255        >>> m(input)
256        tensor([[[ 3.5000,  3.5000,  1.6616,  1.4523, -1.1255,  3.5000,  3.5000],
257                 [ 3.5000,  3.5000, -3.6372,  0.1182, -1.8652,  3.5000,  3.5000]]])
258        >>> # using different paddings for different sides
259        >>> m = nn.ConstantPad1d((3, 1), 3.5)
260        >>> m(input)
261        tensor([[[ 3.5000,  3.5000,  3.5000,  1.6616,  1.4523, -1.1255,  3.5000],
262                 [ 3.5000,  3.5000,  3.5000, -3.6372,  0.1182, -1.8652,  3.5000]]])
263    """
264
265    padding: Tuple[int, int]
266
267    def __init__(self, padding: _size_2_t, value: float):
268        super().__init__(value)
269        self.padding = _pair(padding)
270
271
272class ConstantPad2d(_ConstantPadNd):
273    r"""Pads the input tensor boundaries with a constant value.
274
275    For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
276
277    Args:
278        padding (int, tuple): the size of the padding. If is `int`, uses the same
279            padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
280            :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
281
282    Shape:
283        - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
284        - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
285
286          :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
287
288          :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
289
290    Examples::
291
292        >>> # xdoctest: +IGNORE_WANT("non-deterministic")
293        >>> m = nn.ConstantPad2d(2, 3.5)
294        >>> input = torch.randn(1, 2, 2)
295        >>> input
296        tensor([[[ 1.6585,  0.4320],
297                 [-0.8701, -0.4649]]])
298        >>> m(input)
299        tensor([[[ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
300                 [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
301                 [ 3.5000,  3.5000,  1.6585,  0.4320,  3.5000,  3.5000],
302                 [ 3.5000,  3.5000, -0.8701, -0.4649,  3.5000,  3.5000],
303                 [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
304                 [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000,  3.5000]]])
305        >>> # using different paddings for different sides
306        >>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5)
307        >>> m(input)
308        tensor([[[ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
309                 [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
310                 [ 3.5000,  3.5000,  3.5000,  1.6585,  0.4320],
311                 [ 3.5000,  3.5000,  3.5000, -0.8701, -0.4649],
312                 [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000]]])
313    """
314
315    __constants__ = ["padding", "value"]
316    padding: Tuple[int, int, int, int]
317
318    def __init__(self, padding: _size_4_t, value: float) -> None:
319        super().__init__(value)
320        self.padding = _quadruple(padding)
321
322
323class ConstantPad3d(_ConstantPadNd):
324    r"""Pads the input tensor boundaries with a constant value.
325
326    For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
327
328    Args:
329        padding (int, tuple): the size of the padding. If is `int`, uses the same
330            padding in all boundaries. If a 6-`tuple`, uses
331            (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
332            :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
333            :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
334
335    Shape:
336        - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
337        - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
338          :math:`(C, D_{out}, H_{out}, W_{out})`, where
339
340          :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
341
342          :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
343
344          :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
345
346    Examples::
347
348        >>> m = nn.ConstantPad3d(3, 3.5)
349        >>> input = torch.randn(16, 3, 10, 20, 30)
350        >>> output = m(input)
351        >>> # using different paddings for different sides
352        >>> m = nn.ConstantPad3d((3, 3, 6, 6, 0, 1), 3.5)
353        >>> output = m(input)
354    """
355
356    padding: Tuple[int, int, int, int, int, int]
357
358    def __init__(self, padding: _size_6_t, value: float) -> None:
359        super().__init__(value)
360        self.padding = _ntuple(6)(padding)
361
362
363class _ReflectionPadNd(Module):
364    __constants__ = ["padding"]
365    padding: Sequence[int]
366
367    def forward(self, input: Tensor) -> Tensor:
368        return F.pad(input, self.padding, "reflect")
369
370    def extra_repr(self) -> str:
371        return f"{self.padding}"
372
373
374class ReflectionPad1d(_ReflectionPadNd):
375    r"""Pads the input tensor using the reflection of the input boundary.
376
377    For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
378
379    Args:
380        padding (int, tuple): the size of the padding. If is `int`, uses the same
381            padding in all boundaries. If a 2-`tuple`, uses
382            (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
383
384    Shape:
385        - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
386        - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
387
388          :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
389
390    Examples::
391
392        >>> m = nn.ReflectionPad1d(2)
393        >>> # xdoctest: +IGNORE_WANT("other tests seem to modify printing styles")
394        >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
395        >>> input
396        tensor([[[0., 1., 2., 3.],
397                 [4., 5., 6., 7.]]])
398        >>> m(input)
399        tensor([[[2., 1., 0., 1., 2., 3., 2., 1.],
400                 [6., 5., 4., 5., 6., 7., 6., 5.]]])
401        >>> # using different paddings for different sides
402        >>> m = nn.ReflectionPad1d((3, 1))
403        >>> m(input)
404        tensor([[[3., 2., 1., 0., 1., 2., 3., 2.],
405                 [7., 6., 5., 4., 5., 6., 7., 6.]]])
406    """
407
408    padding: Tuple[int, int]
409
410    def __init__(self, padding: _size_2_t) -> None:
411        super().__init__()
412        self.padding = _pair(padding)
413
414
415class ReflectionPad2d(_ReflectionPadNd):
416    r"""Pads the input tensor using the reflection of the input boundary.
417
418    For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
419
420    Args:
421        padding (int, tuple): the size of the padding. If is `int`, uses the same
422            padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
423            :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
424            Note that padding size should be less than the corresponding input dimension.
425
426    Shape:
427        - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
428        - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})` where
429
430          :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
431
432          :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
433
434    Examples::
435
436        >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
437        >>> m = nn.ReflectionPad2d(2)
438        >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
439        >>> input
440        tensor([[[[0., 1., 2.],
441                  [3., 4., 5.],
442                  [6., 7., 8.]]]])
443        >>> m(input)
444        tensor([[[[8., 7., 6., 7., 8., 7., 6.],
445                  [5., 4., 3., 4., 5., 4., 3.],
446                  [2., 1., 0., 1., 2., 1., 0.],
447                  [5., 4., 3., 4., 5., 4., 3.],
448                  [8., 7., 6., 7., 8., 7., 6.],
449                  [5., 4., 3., 4., 5., 4., 3.],
450                  [2., 1., 0., 1., 2., 1., 0.]]]])
451        >>> # using different paddings for different sides
452        >>> m = nn.ReflectionPad2d((1, 1, 2, 0))
453        >>> m(input)
454        tensor([[[[7., 6., 7., 8., 7.],
455                  [4., 3., 4., 5., 4.],
456                  [1., 0., 1., 2., 1.],
457                  [4., 3., 4., 5., 4.],
458                  [7., 6., 7., 8., 7.]]]])
459    """
460
461    padding: Tuple[int, int, int, int]
462
463    def __init__(self, padding: _size_4_t) -> None:
464        super().__init__()
465        self.padding = _quadruple(padding)
466
467
468class ReflectionPad3d(_ReflectionPadNd):
469    r"""Pads the input tensor using the reflection of the input boundary.
470
471    For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
472
473    Args:
474        padding (int, tuple): the size of the padding. If is `int`, uses the same
475            padding in all boundaries. If a 6-`tuple`, uses
476            (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
477            :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
478            :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
479
480    Shape:
481        - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
482        - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
483          where
484
485          :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
486
487          :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
488
489          :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
490
491    Examples::
492
493        >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
494        >>> m = nn.ReflectionPad3d(1)
495        >>> input = torch.arange(8, dtype=torch.float).reshape(1, 1, 2, 2, 2)
496        >>> m(input)
497        tensor([[[[[7., 6., 7., 6.],
498                   [5., 4., 5., 4.],
499                   [7., 6., 7., 6.],
500                   [5., 4., 5., 4.]],
501                  [[3., 2., 3., 2.],
502                   [1., 0., 1., 0.],
503                   [3., 2., 3., 2.],
504                   [1., 0., 1., 0.]],
505                  [[7., 6., 7., 6.],
506                   [5., 4., 5., 4.],
507                   [7., 6., 7., 6.],
508                   [5., 4., 5., 4.]],
509                  [[3., 2., 3., 2.],
510                   [1., 0., 1., 0.],
511                   [3., 2., 3., 2.],
512                   [1., 0., 1., 0.]]]]])
513    """
514
515    padding: Tuple[int, int, int, int, int, int]
516
517    def __init__(self, padding: _size_6_t) -> None:
518        super().__init__()
519        self.padding = _ntuple(6)(padding)
520
521
522class _ReplicationPadNd(Module):
523    __constants__ = ["padding"]
524    padding: Sequence[int]
525
526    def forward(self, input: Tensor) -> Tensor:
527        return F.pad(input, self.padding, "replicate")
528
529    def extra_repr(self) -> str:
530        return f"{self.padding}"
531
532
533class ReplicationPad1d(_ReplicationPadNd):
534    r"""Pads the input tensor using replication of the input boundary.
535
536    For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
537
538    Args:
539        padding (int, tuple): the size of the padding. If is `int`, uses the same
540            padding in all boundaries. If a 2-`tuple`, uses
541            (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
542
543    Shape:
544        - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
545        - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
546
547          :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
548
549    Examples::
550
551        >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
552        >>> m = nn.ReplicationPad1d(2)
553        >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
554        >>> input
555        tensor([[[0., 1., 2., 3.],
556                 [4., 5., 6., 7.]]])
557        >>> m(input)
558        tensor([[[0., 0., 0., 1., 2., 3., 3., 3.],
559                 [4., 4., 4., 5., 6., 7., 7., 7.]]])
560        >>> # using different paddings for different sides
561        >>> m = nn.ReplicationPad1d((3, 1))
562        >>> m(input)
563        tensor([[[0., 0., 0., 0., 1., 2., 3., 3.],
564                 [4., 4., 4., 4., 5., 6., 7., 7.]]])
565    """
566
567    padding: Tuple[int, int]
568
569    def __init__(self, padding: _size_2_t) -> None:
570        super().__init__()
571        self.padding = _pair(padding)
572
573
574class ReplicationPad2d(_ReplicationPadNd):
575    r"""Pads the input tensor using replication of the input boundary.
576
577    For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
578
579    Args:
580        padding (int, tuple): the size of the padding. If is `int`, uses the same
581            padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
582            :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
583
584    Shape:
585        - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
586        - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
587
588          :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
589
590          :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
591
592    Examples::
593
594        >>> m = nn.ReplicationPad2d(2)
595        >>> # xdoctest: +IGNORE_WANT("non-deterministic")
596        >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
597        >>> input
598        tensor([[[[0., 1., 2.],
599                  [3., 4., 5.],
600                  [6., 7., 8.]]]])
601        >>> m(input)
602        tensor([[[[0., 0., 0., 1., 2., 2., 2.],
603                  [0., 0., 0., 1., 2., 2., 2.],
604                  [0., 0., 0., 1., 2., 2., 2.],
605                  [3., 3., 3., 4., 5., 5., 5.],
606                  [6., 6., 6., 7., 8., 8., 8.],
607                  [6., 6., 6., 7., 8., 8., 8.],
608                  [6., 6., 6., 7., 8., 8., 8.]]]])
609        >>> # using different paddings for different sides
610        >>> m = nn.ReplicationPad2d((1, 1, 2, 0))
611        >>> m(input)
612        tensor([[[[0., 0., 1., 2., 2.],
613                  [0., 0., 1., 2., 2.],
614                  [0., 0., 1., 2., 2.],
615                  [3., 3., 4., 5., 5.],
616                  [6., 6., 7., 8., 8.]]]])
617    """
618
619    padding: Tuple[int, int, int, int]
620
621    def __init__(self, padding: _size_4_t) -> None:
622        super().__init__()
623        self.padding = _quadruple(padding)
624
625
626class ReplicationPad3d(_ReplicationPadNd):
627    r"""Pads the input tensor using replication of the input boundary.
628
629    For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
630
631    Args:
632        padding (int, tuple): the size of the padding. If is `int`, uses the same
633            padding in all boundaries. If a 6-`tuple`, uses
634            (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
635            :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
636            :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
637
638    Shape:
639        - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
640        - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
641          where
642
643          :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
644
645          :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
646
647          :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
648
649    Examples::
650
651        >>> # xdoctest: +IGNORE_WANT("non-deterministic")
652        >>> m = nn.ReplicationPad3d(3)
653        >>> input = torch.randn(16, 3, 8, 320, 480)
654        >>> output = m(input)
655        >>> # using different paddings for different sides
656        >>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1))
657        >>> output = m(input)
658    """
659
660    padding: Tuple[int, int, int, int, int, int]
661
662    def __init__(self, padding: _size_6_t) -> None:
663        super().__init__()
664        self.padding = _ntuple(6)(padding)
665
666
667class ZeroPad1d(ConstantPad1d):
668    r"""Pads the input tensor boundaries with zero.
669
670    For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
671
672    Args:
673        padding (int, tuple): the size of the padding. If is `int`, uses the same
674            padding in both boundaries. If a 2-`tuple`, uses
675            (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
676
677    Shape:
678        - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
679        - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
680
681          :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
682
683    Examples::
684
685        >>> # xdoctest: +IGNORE_WANT("non-deterministic")
686        >>> m = nn.ZeroPad1d(2)
687        >>> input = torch.randn(1, 2, 4)
688        >>> input
689        tensor([[[-1.0491, -0.7152, -0.0749,  0.8530],
690                 [-1.3287,  1.8966,  0.1466, -0.2771]]])
691        >>> m(input)
692        tensor([[[ 0.0000,  0.0000, -1.0491, -0.7152, -0.0749,  0.8530,  0.0000,
693                   0.0000],
694                 [ 0.0000,  0.0000, -1.3287,  1.8966,  0.1466, -0.2771,  0.0000,
695                   0.0000]]])
696        >>> m = nn.ZeroPad1d(2)
697        >>> input = torch.randn(1, 2, 3)
698        >>> input
699        tensor([[[ 1.6616,  1.4523, -1.1255],
700                 [-3.6372,  0.1182, -1.8652]]])
701        >>> m(input)
702        tensor([[[ 0.0000,  0.0000,  1.6616,  1.4523, -1.1255,  0.0000,  0.0000],
703                 [ 0.0000,  0.0000, -3.6372,  0.1182, -1.8652,  0.0000,  0.0000]]])
704        >>> # using different paddings for different sides
705        >>> m = nn.ZeroPad1d((3, 1))
706        >>> m(input)
707        tensor([[[ 0.0000,  0.0000,  0.0000,  1.6616,  1.4523, -1.1255,  0.0000],
708                 [ 0.0000,  0.0000,  0.0000, -3.6372,  0.1182, -1.8652,  0.0000]]])
709    """
710
711    padding: Tuple[int, int]
712
713    def __init__(self, padding: _size_2_t) -> None:
714        super().__init__(padding, 0.0)
715
716    def extra_repr(self) -> str:
717        return f"{self.padding}"
718
719
720class ZeroPad2d(ConstantPad2d):
721    r"""Pads the input tensor boundaries with zero.
722
723    For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
724
725    Args:
726        padding (int, tuple): the size of the padding. If is `int`, uses the same
727            padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
728            :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
729
730    Shape:
731        - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
732        - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
733
734          :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
735
736          :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
737
738    Examples::
739
740        >>> # xdoctest: +IGNORE_WANT("non-deterministic")
741        >>> m = nn.ZeroPad2d(2)
742        >>> input = torch.randn(1, 1, 3, 3)
743        >>> input
744        tensor([[[[-0.1678, -0.4418,  1.9466],
745                  [ 0.9604, -0.4219, -0.5241],
746                  [-0.9162, -0.5436, -0.6446]]]])
747        >>> m(input)
748        tensor([[[[ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000],
749                  [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000],
750                  [ 0.0000,  0.0000, -0.1678, -0.4418,  1.9466,  0.0000,  0.0000],
751                  [ 0.0000,  0.0000,  0.9604, -0.4219, -0.5241,  0.0000,  0.0000],
752                  [ 0.0000,  0.0000, -0.9162, -0.5436, -0.6446,  0.0000,  0.0000],
753                  [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000],
754                  [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000]]]])
755        >>> # using different paddings for different sides
756        >>> m = nn.ZeroPad2d((1, 1, 2, 0))
757        >>> m(input)
758        tensor([[[[ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000],
759                  [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000],
760                  [ 0.0000, -0.1678, -0.4418,  1.9466,  0.0000],
761                  [ 0.0000,  0.9604, -0.4219, -0.5241,  0.0000],
762                  [ 0.0000, -0.9162, -0.5436, -0.6446,  0.0000]]]])
763    """
764
765    padding: Tuple[int, int, int, int]
766
767    def __init__(self, padding: _size_4_t) -> None:
768        super().__init__(padding, 0.0)
769
770    def extra_repr(self) -> str:
771        return f"{self.padding}"
772
773
774class ZeroPad3d(ConstantPad3d):
775    r"""Pads the input tensor boundaries with zero.
776
777    For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
778
779    Args:
780        padding (int, tuple): the size of the padding. If is `int`, uses the same
781            padding in all boundaries. If a 6-`tuple`, uses
782            (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
783            :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
784            :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
785
786    Shape:
787        - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
788        - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
789          :math:`(C, D_{out}, H_{out}, W_{out})`, where
790
791          :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
792
793          :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
794
795          :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
796
797    Examples::
798
799        >>> m = nn.ZeroPad3d(3)
800        >>> input = torch.randn(16, 3, 10, 20, 30)
801        >>> output = m(input)
802        >>> # using different paddings for different sides
803        >>> m = nn.ZeroPad3d((3, 3, 6, 6, 0, 1))
804        >>> output = m(input)
805    """
806
807    padding: Tuple[int, int, int, int, int, int]
808
809    def __init__(self, padding: _size_6_t) -> None:
810        super().__init__(padding, 0.0)
811
812    def extra_repr(self) -> str:
813        return f"{self.padding}"
814