Home
last modified time | relevance | path

Searched full:as_strided (Results 1 – 25 of 187) sorted by relevance

12345678

/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/
H A DLegacyBatchingRegistrations.cpp299 // x.as_strided(sizes, strides, maybe_storage_offset)
321 "result = tensor.as_strided(", sizes, ", ", strides, ", ", storage_offset, ") ", in checkBasicAsStridedValidForSlice()
325 "`as_strided` call as a sequence of PyTorch view operations"); in checkBasicAsStridedValidForSlice()
330 "result = tensor.as_strided(", sizes, ", ", strides, ", ", storage_offset, ") ", in checkBasicAsStridedValidForSlice()
335 "rewrite the `as_strided` call as a sequence of PyTorch view operations"); in checkBasicAsStridedValidForSlice()
338 // What are the semantics of as_strided inside of vmap?
339 // y = vmap(lambda x: x.as_strided(sizes, strides, offset))(xs)
346 // offset equal to xs.offset() and called as_strided(sizes, sizes, offset).
347 // (that is equivalent to x[i].as_strided(
350 // Note that this *may* be different from actually running as_strided
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/
H A DLegacyBatchingRegistrations.cpp501 "vmap: Calling Tensor.as_strided is not supported unless the batch dims being ", in checkBatchDimsAtFrontInLayout()
505 "express the as_strided operation in terms of PyTorch view operations"); in checkBatchDimsAtFrontInLayout()
522 // x.as_strided(sizes, strides, maybe_storage_offset)
544 "result = tensor.as_strided(", sizes, ",", strides, ",", storage_offset, ")", in checkBasicAsStridedValidForSlice()
548 "`as_strided` call as a sequence of PyTorch view operations"); in checkBasicAsStridedValidForSlice()
553 "result = tensor.as_strided(", sizes, ",", strides, ",", storage_offset, ")", in checkBasicAsStridedValidForSlice()
558 "rewrite the `as_strided` call as a sequence of PyTorch view operations"); in checkBasicAsStridedValidForSlice()
586 // tensor because using as_strided to access storage locations not indexable in _has_same_storage_numel_batching_rule()
591 // What are the semantics of as_strided inside of vmap?
592 // y = vmap(lambda x: x.as_strided(sizes, strides, offset))(xs)
[all …]
H A DFunctionalStorageImpl.cpp51 // NB: We only actually need tmp_values for ops like select/slice/diagonal/squeeze/as_strided in apply_update()
122 " was an as_strided() call. as_strided() is non-compositional, and therefore is not possible to fun… in add_update()
/aosp_15_r20/external/pytorch/test/profiler/
H A Dprofiler_utils_mock_events.json1as_strided", "_start_us": 1656454173444282, "_duration_us": 4, "_linked_correlation_id": 0, "_devi…
H A Dtest_profiler_tree.py612 aten::as_strided
615 aten::as_strided
659 aten::as_strided
662 aten::as_strided
969 aten::as_strided
977 aten::as_strided
1013 aten::as_strided
1020 aten::as_strided
1038 aten::as_strided
/aosp_15_r20/external/pytorch/test/
H A Dtest_fx_reinplace_pass.py208 good_mirror_of_b = a.as_strided((4,), (4,), 1)
234 as_strided = torch.ops.aten.as_strided.default(clone, [4], [4], 1); clone = None
235 return as_strided
247 good_mirror_of_b = a.as_strided((4,), (4,), 1)
270 as_strided = torch.ops.aten.as_strided.default(clone, [4], [4], 1); clone = None
271 select_int = torch.ops.aten.select.int(as_strided, 0, 0)
273 return as_strided
282 bad_mirror_of_b = a.as_strided((4,), (4,), 0)
303 as_strided = torch.ops.aten.as_strided.default(clone, [4], [4], 0); clone = None
304 select_int = torch.ops.aten.select.int(as_strided, 0, 1)
[all …]
H A Dtest_jiterator.py34 a = a_buffer.as_strided(*shape_strides[0])
35 b = b_buffer.as_strided(*shape_strides[1])
55 a = a_buffer.as_strided(*shape_strides[0])
56 b = b_buffer.as_strided(*shape_strides[1])
H A Dtest_functionalization.py568 y = x.as_strided((2,), (2,), 1)
601 as_strided = torch.ops.aten.as_strided.default(arg0_1, [2], [2], 1)
602 add = torch.ops.aten.add.Tensor(as_strided, 1); as_strided = None
604 …as_strided_1 = torch.ops.aten.as_strided.default(as_strided_scatter, [2], [2], 1); as_strided_1 =…
1718 as_strided = torch.ops.aten.as_strided.default(view, [3, 3], [3, 1]); view = None
1719 view_1 = torch.ops.aten.view.default(as_strided, [-1]); as_strided = None
1722 as_strided_1 = torch.ops.aten.as_strided.default(view_2, [3, 3], [3, 1]); as_strided_1 = None
1726 as_strided_2 = torch.ops.aten.as_strided.default(view_5, [3, 3], [3, 1]); view_5 = None
1729 as_strided_3 = torch.ops.aten.as_strided.default(view_7, [3, 3], [3, 1]); view_7 = None
/aosp_15_r20/external/pytorch/torch/csrc/autograd/functions/
H A Dtensor.h82 // from forward pass, so that we can recover we when as_strided is not
87 // When as_strided is supported (e.g. strided CPU/CUDA Tensors), view_fn_
89 // With the TensorGeometry information we can use `as_strided` call which
99 // In CPU/CUDA case where we support efficient as_strided implementation,
102 // grad_view_n = grad_base.as_strided(view_sizes, view_strides, view_offset);
104 // But in XLA backend where we don't have full support of as_strided,
113 // efficient than the as_strided one so we should be careful to only use it when
117 // That's all we need to pass into as_strided.
/aosp_15_r20/external/pytorch/torch/_functorch/_aot_autograd/
H A Dfunctional_utils.py264 # fall back to .as_strided() if we can't.
269 # Don't unnecessarily call as_strided if nothing changed; as_strided's
276 reshaped_base_tensor = aliased_base_tensor.as_strided(
287 # As a stopgap, we'll fall back to as_strided.
295 aliased_out = torch.view_as_real(aliased_base_tensor).as_strided(
299 aliased_out = torch.view_as_complex(aliased_base_tensor).as_strided(
303 aliased_out = aliased_base_tensor.as_strided(size, stride, storage_offset)
307 # as_strided() is the "most generic" view, but it does not cover cross-dtype views
/aosp_15_r20/external/pytorch/test/inductor/
H A Dtest_torchinductor_strided_blocks.py167 return torch.as_strided(full, view_size, view_stride, storage_offset=offset)
212 view = torch.as_strided(full, view_size, full.stride())
264 view = torch.as_strided(full, view_size, full.stride())
302 view = torch.as_strided(full, view_size, full.stride())
328 view = torch.as_strided(full, view_size, full.stride())
351 view = torch.as_strided(full, view_size, full.stride())
366 view = torch.as_strided(full, view_size, full.stride())
424 return torch.as_strided(full, view_size, full.stride())
H A Dtest_auto_functionalize.py695 as_strided_default: "f32[][]cpu" = torch.ops.aten.as_strided.default(arg1_1, [], [], 0)
696 as_strided_default_1: "f32[][]cpu" = torch.ops.aten.as_strided.default(arg1_1, [], [], 1)
708 as_strided_default: "f32[][]cpu" = torch.ops.aten.as_strided.default(arg0_1, [], [], 0)
709 as_strided_default_1: "f32[][]cpu" = torch.ops.aten.as_strided.default(arg0_1, [], [], 1)
775 as_strided_default: "f32[][]cpu" = torch.ops.aten.as_strided.default(arg0_1, [], [], 0)
776 as_strided_default_1: "f32[][]cpu" = torch.ops.aten.as_strided.default(arg0_1, [], [], 1)
845 as_strided_default: "f32[][]cpu" = torch.ops.aten.as_strided.default(arg0_1, [], [], 0)
846 as_strided_default_1: "f32[][]cpu" = torch.ops.aten.as_strided.default(arg1_1, [], [], 0)
/aosp_15_r20/external/pytorch/benchmarks/operator_benchmark/pt/
H A Das_strided_test.py8 """Microbenchmarks for as_strided operator"""
11 # Configs for PT as_strided operator
44 self.set_module_name("as_strided")
49 return torch.as_strided(input_one, size, stride, storage_offset)
/aosp_15_r20/external/pytorch/test/functorch/
H A Dtest_aotdispatch.py2579 a = x.as_strided((4, 4), (8, 1), storage_offset=0)
2580 b = x.as_strided((4, 4), (8, 1), storage_offset=28)
2595 a = x.as_strided((4, 4), (9, 1), storage_offset=0)
2596 b = x.as_strided((4, 4), (9, 1), storage_offset=22)
2603 a = x.as_strided((4, 4), (9, 1), storage_offset=0)
2604 b = x.as_strided((4, 4), (9, 1), storage_offset=23)
2612 a = x.as_strided((2, 4, 3), (110, 24, 4), storage_offset=5)
2621 a = x.as_strided((4, 4), (9, 1), storage_offset=0)
2622 b = x.as_strided((4, 4), (9, 1), storage_offset=24)
2629 a = x.as_strided((4, 4), (9, 1), storage_offset=0)
[all …]
H A Dtest_ops.py427 xfail("as_strided"),
428 xfail("as_strided", "partial_views"),
586 xfail("as_strided"),
587 xfail("as_strided", "partial_views"),
758 xfail("as_strided"),
761 xfail("as_strided", "partial_views"),
935 xfail("as_strided"), # incorrect output
936 xfail("as_strided", "partial_views"), # incorrect output
1063 xfail("as_strided", "partial_views"),
1142 xfail("as_strided", "partial_views"),
[all …]
/aosp_15_r20/external/pytorch/torch/_prims/
H A D__init__.py141 "as_strided",
1225 # as_strided to shapes with no elements are trivially valid, so it's OK
1232 return torch.as_strided(a, size, stride, storage_offset)
1238 return torch.as_strided(a, size, stride, storage_offset)
1246 as_strided = _make_prim( variable
1247 …schema="as_strided(Tensor(a!) a, SymInt[] size, SymInt[] stride, SymInt storage_offset) -> Tensor(…
1310 return a.as_strided(shape, new_strides, a.storage_offset())
1440 return a.as_strided(new_shape, new_strides, a.storage_offset())
1477 out = a.as_strided(a.shape, a.stride(), a.storage_offset())
1585 return a.as_strided(new_shape, new_strides, a.storage_offset())
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A DScatterGatherKernel.cu181 : self.as_strided(index_sizes, self_strides); in operator ()()
183 src.as_strided(index_sizes, src_strides) in operator ()()
239 : self.as_strided(index_sizes, self_strides); in operator ()()
241 src.as_strided(index_sizes, src_strides) in operator ()()
298 : self.as_strided(index_sizes, self_strides); in operator ()()
300 src.as_strided(index_sizes, src_strides) in operator ()()
/aosp_15_r20/external/pytorch/torch/_inductor/
H A Dfreezing.py190 by adding aten.as_strided nodes with the expected strides.
220 Make sure the as_strided node's input's layout does not change due to compiler
221 optimizations, because the as_strided strides info depends on input tensor stride info.
225 torch.ops.aten.as_strided.default,
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DUnfoldBackward.h55 auto grad_out_restrided = grad_out.as_strided( in _make_unfold_backward_iter_over_grad_out()
73 auto grad_in_restrided = grad_in.squeeze(-1).as_strided( in _make_unfold_backward_iter_over_grad_out()
95 auto idx_dim_restrided = idx_dim.as_strided(idx_dim_sizes, idx_dim_strides); in _make_unfold_backward_iter_over_grad_out()
H A DFunctionOfAMatrixUtils.cpp67 auto output_restrided = output.as_strided( in _compute_linear_combination_out()
78 auto input_restrided = input.as_strided( in _compute_linear_combination_out()
90 auto coefficients_restrided = coefficients.as_strided( in _compute_linear_combination_out()
/aosp_15_r20/external/pytorch/torch/csrc/autograd/
H A Dautograd_meta.cpp48 // because of as_strided. conj/neg bit must be part of this metadata because
90 // - Make sure that when the same as_strided is applied to both primal and
238 new_fw_grad_value = new_base_fw_grad.as_strided( in set_fw_grad()
305 new_val = base_val.as_strided( in fw_grad()
H A Dvariable.cpp31 // NB: On mobile, the as_strided() op and thus the generated AsStridedViewFunc
38 return std::make_unique<ErroringViewFunc>("as_strided() not available"); in create_view_func_matching()
78 // `view_func` is used to recover views in backward when either as_strided is in chain()
80 // recorded by as_strided See Note [View + Inplace update on base tensor] and in chain()
115 // view_func() AND as_strided() isn't supported; there's no obvious way in chain()
119 "does not support as_strided(). This is not supported."); in chain()
/aosp_15_r20/external/pytorch/test/typing/pass/
H A Dcreation_ops.py39 # torch.as_strided
41 torch.as_strided(x, (2, 2), (1, 2))
42 torch.as_strided(x, (2, 2), (1, 2), 1)
/aosp_15_r20/external/pytorch/test/typing/reveal/
H A Dtensor_constructors.py46 # torch.as_strided
48 reveal_type(torch.as_strided(x, (2, 2), (1, 2))) # E: {Tensor}
49 reveal_type(torch.as_strided(x, (2, 2), (1, 2), 1)) # E: {Tensor}
/aosp_15_r20/external/pytorch/benchmarks/instruction_counts/definitions/
H A Dstandard.py193 # @as_strided | // @as_strided
194 torch.as_strided(x, (2, 3), (4, 1), 2) | torch::as_strided(x, {2, 3}, {4, 1}, 2);

12345678