Home
last modified time | relevance | path

Searched full:memory_format (Results 1 – 25 of 477) sorted by relevance

12345678910>>...20

/aosp_15_r20/external/pytorch/torch/nn/utils/
H A Dmemory_format.py5 def convert_conv2d_weight_memory_format(module, memory_format): argument
6 r"""Convert ``memory_format`` of ``nn.Conv2d.weight`` to ``memory_format``.
9 Note that it only changes the memory_format, but not the semantics of each dimensions.
14 Calling ``model.to(memory_format=torch.channels_last)`` is more aggressive
17 necessarily benefit from conversion to specified ``memory_format``.
27 from memory_format conversion.
40 ``memory_format``.
49 memory_format: user specified ``memory_format``,
70 module.weight.detach().clone().contiguous(memory_format=memory_format)
73 weight_data.size(), memory_format=memory_format
[all …]
/aosp_15_r20/external/pytorch/test/
H A Dtest_mkldnn_fusion.py73 for memory_format, enabled in [
92 groups=groups).to(memory_format=memory_format)
93 … x = torch.randn(batch_size, iC, input_size, input_size).to(memory_format=memory_format)
114 for memory_format, enabled in [
121 … m = M(unary_fn, 3, oC, bias, kernel_size=(3, 3)).to(memory_format=memory_format)
122 x = torch.randn(1, 3, 224, 224).to(memory_format=memory_format)
141 for module, dim, memory_format in [
164 groups=groups).to(memory_format=memory_format)
168 x = torch.randn(input_sizes).to(memory_format=memory_format)
245 for bias, dilation, groups, memory_format in options:
[all …]
H A Dtest_prims.py242 for shapes, memory_format in pairs:
245 … expected = torch.empty(shape, device=device, dtype=dtype, memory_format=memory_format)
246 actual = refs.empty(shape, device=device, dtype=dtype, memory_format=memory_format)
251 expected = torch.clone(a, memory_format=memory_format)
252 actual = torch.clone(a, memory_format=memory_format)
257 expected = a.contiguous(memory_format=memory_format)
258 actual = refs.contiguous(a, memory_format=memory_format)
363 a = torch.zeros([2] * ndim).to(memory_format=mf)
365 self.assertTrue(res.is_contiguous(memory_format=mf))
371 self.assertTrue(a.is_contiguous(memory_format=torch.channels_last))
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cudnn/
H A DConvShared.cpp85 << " memory_format = " << params.memory_format << "\n" in operator <<()
113 at::MemoryFormat memory_format) { in setConvolutionParams() argument
120 params->memory_format = memory_format; in setConvolutionParams()
162 ((params.memory_format == at::MemoryFormat::ChannelsLast) || in repro_from_args()
163 (params.memory_format == at::MemoryFormat::ChannelsLast3d)) in repro_from_args()
164 ? ".to(memory_format=torch." + channels_last_xd + ")" in repro_from_args()
220 auto memory_format = output->suggest_memory_format(); in cudnn_convolution_forward_out() local
224 Tensor weight_contig = weight->contiguous(memory_format); in cudnn_convolution_forward_out()
225 Tensor input_contig = input->contiguous(memory_format); in cudnn_convolution_forward_out()
252 auto memory_format = cudnn_conv_suggest_memory_format(input_t, weight_t); in cudnn_convolution() local
[all …]
/aosp_15_r20/external/pytorch/torch/distributed/_shard/sharded_tensor/
H A Dmetadata.py24 memory_format: torch.memory_format = field(default=torch.contiguous_format) variable in TensorProperties
28 # Since torch.memory_format cannot be pickled!
29 memory_format = self.memory_format
30 if memory_format == torch.contiguous_format:
32 elif memory_format == torch.channels_last:
34 elif memory_format == torch.preserve_format:
37 raise RuntimeError(f"Invalid torch.memory_format: {memory_format}")
60 memory_format = torch.contiguous_format
62 memory_format = torch.channels_last
64 memory_format = torch.preserve_format
[all …]
H A D__init__.py33 memory_format=torch.contiguous_format, argument
56 memory_format (:class:`torch.memory_format`, optional): the desired memory format of
75 memory_format=memory_format,
88 memory_format=torch.contiguous_format, argument
129 memory_format=memory_format,
142 memory_format=torch.contiguous_format, argument
183 memory_format=memory_format,
198 memory_format=torch.contiguous_format, argument
237 memory_format=memory_format,
252 memory_format=torch.contiguous_format, argument
[all …]
H A Dapi.py223 memory_format (:class:`torch.memory_format`, optional): the desired memory format of
252 memory_format=torch.contiguous_format, argument
263 if memory_format != torch.contiguous_format:
265 "Only torch.contiguous_format memory_format is currently supported"
268 self._metadata.tensor_properties.memory_format = memory_format
495 self, memory_format=torch.preserve_format, process_group=None argument
511 memory_format != torch.preserve_format
512 and memory_format != torch.contiguous_format
530 cpu_tensor = shard.tensor.cpu(memory_format=memory_format) # type: ignore[call-arg]
553 memory_format=torch.preserve_format, argument
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/miopen/
H A DConv_miopen.cpp667 auto memory_format = output->suggest_memory_format(); in miopen_convolution_add_bias_() local
671 at::Tensor bias_contig = bias->reshape(shape).contiguous(memory_format); in miopen_convolution_add_bias_()
673 bias_contig.resize_(bias_contig.sizes(), memory_format ); in miopen_convolution_add_bias_()
763 auto memory_format = at::MemoryFormat::Contiguous; in miopen_convolution_forward() local
765memory_format = (weight->ndimension() == 5) ? /*at::MemoryFormat::ChannelsLast3d*/at::MemoryFormat… in miopen_convolution_forward()
771 input->options().memory_format(memory_format)); in miopen_convolution_forward()
782 Tensor weight_contig = weight->contiguous(memory_format); in miopen_convolution_forward()
784 weight_contig.resize_(weight_contig.sizes(), memory_format); in miopen_convolution_forward()
785 Tensor input_contig = input->contiguous(memory_format); in miopen_convolution_forward()
786 input_contig.resize_(input_contig.sizes(), memory_format); in miopen_convolution_forward()
[all …]
/aosp_15_r20/external/pytorch/torch/distributed/checkpoint/
H A Dmetadata.py52 memory_format: torch.memory_format = field(default=torch.contiguous_format) variable in TensorProperties
57 # Since torch.memory_format cannot be pickled!
58 memory_format = self.memory_format
59 if memory_format == torch.contiguous_format:
61 elif memory_format == torch.channels_last:
63 elif memory_format == torch.preserve_format:
66 raise RuntimeError(f"Invalid torch.memory_format: {memory_format}")
89 memory_format = torch.contiguous_format
91 memory_format = torch.channels_last
93 memory_format = torch.preserve_format
[all …]
/aosp_15_r20/external/pytorch/test/nn/
H A Dtest_dropout.py95 def _test_dropout(self, cls, device, input, memory_format=torch.contiguous_format): argument
100 input_var = input.clone(memory_format=memory_format).requires_grad_()
102 self.assertTrue(output.is_contiguous(memory_format=memory_format))
105 self.assertTrue(input_var.grad.is_contiguous(memory_format=memory_format))
109 input_var = input.clone(memory_format=memory_format).requires_grad_()
111 self.assertTrue(output.is_contiguous(memory_format=memory_format))
114 self.assertTrue(input_var.grad.is_contiguous(memory_format=memory_format))
127 self, cls, device, memory_format=torch.contiguous_format argument
137 2, 3, 3, 6, device=device, memory_format=memory_format
145 self.assertTrue(out.is_contiguous(memory_format=memory_format))
[all …]
H A Dtest_pooling.py223 input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
236 self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
248 input = input.contiguous(memory_format=torch.channels_last)
263 self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
270 self, device, dtype, mod, memory_format argument
273 input = input.to(device).to(memory_format=memory_format).requires_grad_()
283 self.assertTrue(out2.is_contiguous(memory_format=memory_format))
319 input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
333 self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
344 input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DDilatedMaxPool2d.cpp49 const auto memory_format = input.suggest_memory_format(); in TORCH_META_FUNC() local
50 if (memory_format == at::MemoryFormat::ChannelsLast) { in TORCH_META_FUNC()
53 } else if (memory_format == at::MemoryFormat::Contiguous) { in TORCH_META_FUNC()
74 outputHeight, outputWidth, memory_format); in TORCH_META_FUNC()
79 … {nInputPlane, outputHeight, outputWidth}, {}, input.options().memory_format(memory_format), maybe… in TORCH_META_FUNC()
81 … {nInputPlane, outputHeight, outputWidth}, {}, input.options().memory_format(memory_format).dtype(… in TORCH_META_FUNC()
83 …, nInputPlane, outputHeight, outputWidth}, {}, input.options().memory_format(memory_format), maybe… in TORCH_META_FUNC()
85 …, nInputPlane, outputHeight, outputWidth}, {}, input.options().memory_format(memory_format).dtype(… in TORCH_META_FUNC()
125 const auto memory_format = input.suggest_memory_format(); in TORCH_META_FUNC() local
126 if (memory_format == at::MemoryFormat::ChannelsLast) { in TORCH_META_FUNC()
[all …]
/aosp_15_r20/external/pytorch/tools/autograd/templates/
H A Dpython_variable_methods.cpp248 static Tensor dispatch_contiguous(const Tensor & self, at::MemoryFormat memory_format) { in dispatch_contiguous() argument
251 return self.contiguous(memory_format); in dispatch_contiguous()
258 "contiguous(*, MemoryFormat memory_format=contiguous_format)", in THPVariable_contiguous()
268 auto memory_format = r.memoryformat(0); in THPVariable_contiguous() local
270 if (self_.is_contiguous(memory_format)) { in THPVariable_contiguous()
281 jit::tracer::addInputs(node, "memory_format", memory_format); in THPVariable_contiguous()
288 return THPVariable_Wrap(dispatch_contiguous(self_, memory_format)); in THPVariable_contiguous()
407 …return self.to(self.options().device(device).memory_format(optional_memory_format), non_blocking, … in dispatch_to()
412 return self.to(self.options().memory_format(optional_memory_format), non_blocking, copy); in dispatch_to()
431 "cpu(*, MemoryFormat? memory_format=None)" in THPVariable_cpu()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/
H A DAdaptiveMaxPoolKernel.cpp93 auto memory_format = at::MemoryFormat::ChannelsLast; in cpu_adaptive_max_pool2d_channels_last() local
94 auto input = input_.contiguous(memory_format); in cpu_adaptive_max_pool2d_channels_last()
95 auto output = output_.contiguous(memory_format); in cpu_adaptive_max_pool2d_channels_last()
96 auto indices = indices_.contiguous(memory_format); in cpu_adaptive_max_pool2d_channels_last()
193 if (!output_.is_contiguous(memory_format)) { in cpu_adaptive_max_pool2d_channels_last()
196 if (!indices_.is_contiguous(memory_format)) { in cpu_adaptive_max_pool2d_channels_last()
210 auto memory_format = at::MemoryFormat::ChannelsLast; in cpu_adaptive_max_pool2d_channels_last() local
211 auto input = input_.contiguous(memory_format); in cpu_adaptive_max_pool2d_channels_last()
212 auto output = output_.contiguous(memory_format); in cpu_adaptive_max_pool2d_channels_last()
213 auto indices = indices_.contiguous(memory_format); in cpu_adaptive_max_pool2d_channels_last()
[all …]
H A DAdaptiveAvgPoolKernel.cpp76 auto memory_format = at::MemoryFormat::ChannelsLast; in cpu_adaptive_avg_pool2d_channels_last() local
77 auto input = input_.contiguous(memory_format); in cpu_adaptive_avg_pool2d_channels_last()
78 auto output = output_.contiguous(memory_format); in cpu_adaptive_avg_pool2d_channels_last()
152 if (!output_.is_contiguous(memory_format)) { in cpu_adaptive_avg_pool2d_channels_last()
163 auto memory_format = at::MemoryFormat::ChannelsLast; in cpu_adaptive_avg_pool2d_channels_last() local
164 auto input = input_.contiguous(memory_format); in cpu_adaptive_avg_pool2d_channels_last()
165 auto output = output_.contiguous(memory_format); in cpu_adaptive_avg_pool2d_channels_last()
251 if (!output_.is_contiguous(memory_format)) { in cpu_adaptive_avg_pool2d_channels_last()
310 auto memory_format = at::MemoryFormat::ChannelsLast; in cpu_adaptive_avg_pool2d_backward_channels_last() local
311 auto grad_input = grad_input_.contiguous(memory_format); in cpu_adaptive_avg_pool2d_backward_channels_last()
[all …]
/aosp_15_r20/external/pytorch/torch/csrc/lazy/ts_backend/ops/
H A Dto_copy.h26 const std::optional<at::MemoryFormat>& memory_format, in ToCopy() argument
39 memory_format)),
46 memory_format(memory_format) {}
55 const std::optional<at::MemoryFormat>& memory_format) const { in CanBeReused() argument
61 this->memory_format == memory_format); in CanBeReused()
88 if (memory_format.has_value()) { in ToString()
89 ss << ", memory_format=" << memory_format.value(); in ToString()
91 ss << ", memory_format=null"; in ToString()
110 kwarguments.emplace_back("memory_format", memory_format); in Lower()
123 std::optional<at::MemoryFormat> memory_format; variable
/aosp_15_r20/external/pytorch/aten/src/ATen/native/nested/
H A DNestedTensorFactories.cpp20 .memory_format(optional_memory_format); in verify_empty_parameters()
23 auto memory_format = in verify_empty_parameters() local
26 memory_format == MemoryFormat::Preserve || memory_format == MemoryFormat::Contiguous, in verify_empty_parameters()
28 memory_format, in verify_empty_parameters()
47 auto memory_format = options.memory_format_opt().value_or(MemoryFormat::Preserve); in empty_like_nested() local
48 if (memory_format == MemoryFormat::Contiguous) { in empty_like_nested()
57 memory_format == MemoryFormat::Preserve, in empty_like_nested()
105 // memory_format is handled separately due to MemoryFormat::Preserve logic in _to_copy_nested()
106 options = self.options().merge_in(options).memory_format(std::nullopt); in _to_copy_nested()
107 auto memory_format = optional_memory_format.value_or(MemoryFormat::Preserve); in _to_copy_nested() local
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/
H A DConv.cpp158 auto memory_format = at::MemoryFormat::Contiguous; in mkldnn_convolution_memory_format() local
160 memory_format = dims == 4 ? at::MemoryFormat::ChannelsLast : at::MemoryFormat::ChannelsLast3d; in mkldnn_convolution_memory_format()
162 return memory_format; in mkldnn_convolution_memory_format()
177 auto memory_format = mkldnn_convolution_memory_format(input_t.ndimension(), is_channels_last); in _mkldnn_convolution_out() local
178 auto input = input_t.is_mkldnn() ? input_t : input_t.contiguous(memory_format); in _mkldnn_convolution_out()
179 auto weight = weight_t.is_mkldnn() ? weight_t : weight_t.contiguous(memory_format); in _mkldnn_convolution_out()
246 auto memory_format = in _mkldnn_convolution() local
253 output.resize_(output_sizes, memory_format); in _mkldnn_convolution()
401 auto memory_format = in mkldnn_convolution_pointwise_binary() local
403 auto input = input_t.contiguous(memory_format); in mkldnn_convolution_pointwise_binary()
[all …]
/aosp_15_r20/external/executorch/kernels/test/
H A Dop_full_like_test.cpp32 optional<MemoryFormat> memory_format, in op_full_like_out() argument
35 context_, self, fill_value, memory_format, out); in op_full_like_out()
45 MemoryFormat memory_format = MemoryFormat::Contiguous; in test_full_like_out() local
48 op_full_like_out(in, value, memory_format, out); in test_full_like_out()
52 op_full_like_out(in, value, memory_format, out); in test_full_like_out()
63 MemoryFormat memory_format; in test_full_like_out_mismatched_shape() local
66 context_, op_full_like_out(in, value, memory_format, out)); in test_full_like_out_mismatched_shape()
77 MemoryFormat memory_format = MemoryFormat::Contiguous; in test_full_like_out() local
80 op_full_like_out(in, value, memory_format, out); in test_full_like_out()
84 op_full_like_out(in, value, memory_format, out); in test_full_like_out()
[all …]
/aosp_15_r20/external/pytorch/torch/_inductor/
H A Ddecomposition.py514 memory_format: Optional[torch.memory_format] = None, argument
515 ) -> torch.memory_format:
517 if memory_format is torch.preserve_format or memory_format is None:
520 return memory_format
529 memory_format: Optional[torch.memory_format] = None, argument
537 ).to(memory_format=get_like_layout(self, memory_format))
546 memory_format: Optional[torch.memory_format] = None, argument
554 ).to(memory_format=get_like_layout(self, memory_format))
567 memory_format: torch.memory_format = torch.preserve_format, argument
576 ).to(memory_format=get_like_layout(self, memory_format))
[all …]
/aosp_15_r20/external/executorch/exir/
H A Ddim_order_utils.py12 Set of simple utilities for translating between torch.memory_format and dim_order
34 def get_memory_format(dim_order: Optional[List[int]]) -> torch.memory_format:
36 Given a dim_order try to map it to torch.memory_format
48 f"Failed to map a given dim_order: {dim_order} to a torch.memory_format"
53 memory_format: Optional[torch.memory_format], ndim: int argument
56 Given a memory_format and a tensor rank, generate a dim_order
58 if memory_format in [None, torch.preserve_format]:
60 elif memory_format == torch.contiguous_format:
62 elif memory_format == torch.channels_last:
66 f"Failed to generate dim_order for a given memory format: {memory_format}"
/aosp_15_r20/external/pytorch/aten/src/ATen/cudnn/
H A DDescriptors.cpp36 void TensorDescriptor::set(const at::Tensor &t, at::MemoryFormat memory_format, size_t pad) { in set() argument
38 memory_format == at::MemoryFormat::ChannelsLast || in set()
39 memory_format == at::MemoryFormat::ChannelsLast3d); in set()
43 auto memory_format = t.suggest_memory_format(); in set() local
45 memory_format == at::MemoryFormat::ChannelsLast || in set()
46 memory_format == at::MemoryFormat::ChannelsLast3d); in set()
124 void FilterDescriptor::set(const at::Tensor &t, const at::MemoryFormat memory_format, int64_t pad) { in set() argument
132 TORCH_CHECK(t.is_contiguous(memory_format), in set()
133 "cuDNN filters (a.k.a. weights) must be contiguous in desired memory_format\n", in set()
136 "cuDNN suggested memory_format: ", memory_format); in set()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mps/operations/
H A DConvolution.mm69 c10::MemoryFormat memory_format,
96 c10::MemoryFormat memory_format,
111 …descriptor_.dataLayout = (memory_format == at::MemoryFormat::Contiguous) ? MPSGraphTensorNamedData…
153 auto memory_format = input_t.suggest_memory_format();
154 bool is_channels_last = (memory_format == at::MemoryFormat::ChannelsLast) && !is3DConv;
162 is_macOS_15_0_or_newer ? memory_format : MemoryFormat::Contiguous);
197 switch (memory_format) {
230 MPSShape* inputShape = mps::getMPSShape(input_t, memory_format);
231 MPSShape* outputShape = mps::getMPSShape(output_t, memory_format);
235 …if (input_t.is_contiguous(memory_format) && output_t.is_contiguous(memory_format) && is_macOS_15_0…
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/
H A DBatchRulesUnaryOps.cpp17 std::optional<MemoryFormat> memory_format) { in clone_batch_rule() argument
25 TORCH_CHECK(!memory_format.has_value() || memory_format == MemoryFormat::Preserve in clone_batch_rule()
26 || memory_format == MemoryFormat::Contiguous, in clone_batch_rule()
27 "NYI: Tensor.clone(memory_format) inside vmap is only supported with ", in clone_batch_rule()
28 "memory_format torch.preserve_format or torch.contiguous_format (got ", in clone_batch_rule()
29 *memory_format, ")"); in clone_batch_rule()
31 if (memory_format == MemoryFormat::Contiguous) { in clone_batch_rule()
42 auto result = at::clone(self_, memory_format); in clone_batch_rule()
46 TORCH_INTERNAL_ASSERT(!memory_format.has_value() || memory_format == MemoryFormat::Preserve); in clone_batch_rule()
47 auto result = at::clone(self, memory_format); in clone_batch_rule()
/aosp_15_r20/external/pytorch/torch/_C/
H A D_nn.pyi.in6 from torch import memory_format, Tensor
43 memory_format: memory_format,
44 ) -> Tuple[_device, _dtype, _bool, memory_format]: ...
51 memory_format: memory_format,
52 ) -> Tuple[_device, _dtype, _bool, memory_format]: ...
59 memory_format: memory_format,
60 ) -> Tuple[_device, _dtype, _bool, memory_format]: ...

12345678910>>...20