Home
last modified time | relevance | path

Searched refs:SymIntArrayRef (Results 1 – 25 of 109) sorted by relevance

12345

/aosp_15_r20/external/pytorch/aten/src/ATen/
H A DEmptyTensor.h33 SymIntArrayRef sizes,
42 SymIntArrayRef sizes,
43 SymIntArrayRef strides,
55 SymIntArrayRef size,
69 SymIntArrayRef size,
70 SymIntArrayRef stride,
124 SymIntArrayRef size,
150 SymIntArrayRef size,
151 SymIntArrayRef stride,
155 SymIntArrayRef size,
[all …]
H A DTensorIndexing.h210 const std::optional<SymIntArrayRef>& self_sizes) { in applySlice()
240 const std::optional<SymIntArrayRef>& self_sizes) { in applySelect()
392 inline SymIntArrayRef slicePrefix1sSize(const SymIntArrayRef& sizes) { in slicePrefix1sSize()
436 const std::optional<SymIntArrayRef>& prev_dim_result_sizes) { in handleDimInMultiDimIndexing()
520 const std::optional<SymIntArrayRef>& self_sizes) { in applySlicing()
536 std::optional<SymIntArrayRef> result_sizes = result.is_nested() in applySlicing()
537 ? std::optional<SymIntArrayRef>(std::nullopt) in applySlicing()
538 : std::optional<SymIntArrayRef>(result.sym_sizes()); in applySlicing()
612 std::optional<SymIntArrayRef> self_sizes = self.is_nested()
613 ? std::optional<SymIntArrayRef>(std::nullopt)
[all …]
H A DEmptyTensor.cpp125 SymIntArrayRef sizes, in computeStorageNbytesContiguous()
136 SymIntArrayRef sizes, in computeStorageNbytes()
137 SymIntArrayRef strides, in computeStorageNbytes()
207 SymIntArrayRef size, in empty_generic_symint()
248 SymIntArrayRef size, in empty_strided_symint_generic()
249 SymIntArrayRef stride, in empty_strided_symint_generic()
253 return _empty_strided_generic<SymIntArrayRef>(size, stride, allocator, ks, scalar_type); in empty_strided_symint_generic()
375 SymIntArrayRef size, in empty_symint_meta()
434 TensorBase empty_strided_symint_meta(SymIntArrayRef size, SymIntArrayRef stride, in empty_strided_symint_meta()
443 SymIntArrayRef size, in empty_strided_symint_meta()
[all …]
H A DFunctionalInverses.cpp29 static Tensor unsqueeze_copy_to(const Tensor & self, c10::SymIntArrayRef sizes, InverseReturnMode i… in unsqueeze_copy_to()
48 static Tensor unsqueeze_copy_to(const Tensor & self, IntArrayRef dim, c10::SymIntArrayRef sizes, In… in unsqueeze_copy_to()
148 …mutated_view, InverseReturnMode inverse_return_mode, at::SymIntArrayRef size, at::SymIntArrayRef s… in as_strided_inverse()
170 …nst Tensor& mutated_view, InverseReturnMode inverse_return_mode, at::SymIntArrayRef size, bool imp… in expand_inverse()
189 …mutated_view, InverseReturnMode inverse_return_mode, at::SymIntArrayRef size, at::SymIntArrayRef s… in _reshape_alias_inverse()
254 …nverseReturnMode inverse_return_mode, int64_t mutated_view_idx, c10::SymIntArrayRef split_sizes, i… in split_with_sizes_inverse()
399 …base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, at::SymIntArrayRef size) { in view_inverse()
H A DExpandUtils.h24 SymIntArrayRef a,
25 SymIntArrayRef b);
28 infer_size_symdimvector(SymIntArrayRef a, SymIntArrayRef b);
487 const c10::SymIntArrayRef shape,
502 SymIntArrayRef shape, in is_expandable_to()
503 c10::SymIntArrayRef desired) { in is_expandable_to()
520 auto sym_shape = c10::SymIntArrayRef( in is_expandable_to()
522 auto sym_desired = c10::SymIntArrayRef( in is_expandable_to()
H A DExpandUtils.cpp49 std::vector<SymInt> infer_size_symint(SymIntArrayRef a, SymIntArrayRef b) { in infer_size_symint()
57 SymDimVector infer_size_symdimvector(SymIntArrayRef a, SymIntArrayRef b) { in infer_size_symdimvector()
58 return infer_size_impl<SymDimVector, SymIntArrayRef>(a, b); in infer_size_symdimvector()
H A DTensorUtils.cpp78 void checkSize_symint(CheckedFrom c, const TensorGeometryArg& t, c10::SymIntArrayRef sizes) { in checkSize_symint()
407 c10::SymIntArrayRef oldshape, in computeStride()
408 c10::SymIntArrayRef oldstride, in computeStride()
409 c10::SymIntArrayRef newshape) { in computeStride()
410 auto toResult = [](const SymIntArrayRef& a) { return SymDimVector(a); }; in computeStride()
411 …return computeStride_impl<SymDimVector, c10::SymIntArrayRef, c10::SymInt>(oldshape, oldstride, new… in computeStride()
H A DTensorUtils.h100 c10::SymIntArrayRef sizes);
180 c10::SymIntArrayRef oldshape,
181 c10::SymIntArrayRef oldstride,
182 c10::SymIntArrayRef newshape);
H A DTensorGeometry.h18 explicit TensorGeometry(c10::SymIntArrayRef sizes) in TensorGeometry()
78 c10::SymIntArrayRef sym_sizes() const { in sym_sizes()
85 c10::SymIntArrayRef sym_strides() const { in sym_strides()
/aosp_15_r20/external/pytorch/c10/core/
H A DSymIntArrayRef.h12 using SymIntArrayRef = ArrayRef<SymInt>; variable
14 inline at::IntArrayRef asIntArrayRefUnchecked(c10::SymIntArrayRef ar) { in asIntArrayRefUnchecked()
25 c10::SymIntArrayRef ar) { in asIntArrayRefSlowOpt()
36 c10::SymIntArrayRef ar, in asIntArrayRefSlow()
53 c10::SymIntArrayRef ar, in asIntArrayRefSlowAlloc()
69 inline SymIntArrayRef fromIntArrayRefUnchecked(IntArrayRef array_ref) { in fromIntArrayRefUnchecked()
70 return SymIntArrayRef( in fromIntArrayRefUnchecked()
74 inline SymIntArrayRef fromIntArrayRefKnownNonNegative(IntArrayRef array_ref) { in fromIntArrayRefKnownNonNegative()
78 inline SymIntArrayRef fromIntArrayRefSlow(IntArrayRef array_ref) { in fromIntArrayRefSlow()
85 return SymIntArrayRef( in fromIntArrayRefSlow()
H A DSymbolicShapeMeta.cpp33 normalize_sym_sizes_strides(SymIntArrayRef sizes, SymIntArrayRef strides) { in normalize_sym_sizes_strides()
82 c10::SymIntArrayRef sizes(sizes_); in compute_contiguous()
83 c10::SymIntArrayRef strides(strides_); in compute_contiguous()
93 c10::SymIntArrayRef sizes(sizes_); \
94 c10::SymIntArrayRef strides(strides_); \
108 c10::SymIntArrayRef sizes(sizes_); \
109 c10::SymIntArrayRef strides(strides_); \
H A DTensorImpl.cpp353 c10::SymIntArrayRef TensorImpl::sym_sizes_custom() const { in sym_sizes_custom()
367 c10::SymIntArrayRef TensorImpl::sym_strides_custom() const { in sym_strides_custom()
841 static void clone_symvec(SymIntArrayRef src, SymDimVector& dst) { in clone_symvec()
854 c10::SymIntArrayRef sizes, in set_sizes_and_strides()
855 c10::SymIntArrayRef strides, in set_sizes_and_strides()
895 void TensorImpl::generic_set_sizes_contiguous(SymIntArrayRef sizes) { in generic_set_sizes_contiguous()
/aosp_15_r20/external/pytorch/torch/csrc/autograd/
H A DFunctionsManual.h176 at::SymIntArrayRef sizes,
181 c10::SymIntArrayRef sizes,
190 std::vector<c10::SymInt> reverse_list_symint(const c10::SymIntArrayRef list);
256 at::Tensor unsqueeze_to(const at::Tensor& self, c10::SymIntArrayRef sym_sizes);
260 c10::SymIntArrayRef sym_sizes);
264 c10::SymIntArrayRef sym_sizes);
301 at::SymIntArrayRef strides_or_error(
307 at::SymIntArrayRef mat1_sizes,
308 at::SymIntArrayRef mat1_strides,
314 at::SymIntArrayRef sizes,
[all …]
H A DFunctionsManual.cpp152 static c10::SymInt _safe_size(c10::SymIntArrayRef sizes, c10::IntArrayRef dim) { in _safe_size()
708 c10::SymIntArrayRef sizes, in sum_backward()
722 c10::SymIntArrayRef sizes, in sum_backward()
746 c10::SymIntArrayRef shape, in mean_backward()
756 std::vector<c10::SymInt> reverse_list_symint(const c10::SymIntArrayRef list) { in reverse_list_symint()
981 c10::SymIntArrayRef sizes; in unbind_backward()
1035 Tensor unsqueeze_to(const Tensor& self, c10::SymIntArrayRef sym_sizes) { in unsqueeze_to()
1050 c10::SymIntArrayRef sym_sizes) { in unsqueeze_to()
1066 c10::SymIntArrayRef sym_sizes) { in unsqueeze_to()
1296 at::SymIntArrayRef stride, in convolution_jvp()
[all …]
H A Dpython_variable_indexing.cpp199 std::optional<SymIntArrayRef> result_sizes = result.is_nested() in applySlicing()
200 ? std::optional<SymIntArrayRef>(std::nullopt) in applySlicing()
201 : std::optional<SymIntArrayRef>(result.sym_sizes()); in applySlicing()
532 SymIntArrayRef valueSizes = value.sym_sizes(); in THPVariable_setitem()
533 SymIntArrayRef slicedValueSizes = in THPVariable_setitem()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DConvUtils.h126 SymIntArrayRef stride, SymIntArrayRef padding, SymIntArrayRef dilation,
127 …bool transposed, SymIntArrayRef output_padding, c10::SymInt groups, const at::OptionalSymIntArrayR…
244 SymIntArrayRef input_size, SymIntArrayRef weight_size,
245 SymIntArrayRef padding, SymIntArrayRef stride, SymIntArrayRef dilation = SymIntArrayRef()
270 SymIntArrayRef output_size, SymIntArrayRef weight_size, in conv_input_size()
271SymIntArrayRef padding, SymIntArrayRef output_padding, SymIntArrayRef stride, SymIntArrayRef dilat… in conv_input_size()
301 SymIntArrayRef input_size, SymIntArrayRef output_size, in conv_weight_size()
302SymIntArrayRef padding, SymIntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, i… in conv_weight_size()
H A DConvolution.cpp271 const SymIntArrayRef padding, in xnnpack_use_convolution2d()
272 const SymIntArrayRef stride, in xnnpack_use_convolution2d()
273 const SymIntArrayRef dilation, in xnnpack_use_convolution2d()
849 SymIntArrayRef stride, in complex_convolution()
850 SymIntArrayRef padding, in complex_convolution()
851 SymIntArrayRef dilation, in complex_convolution()
853 SymIntArrayRef output_padding, in complex_convolution()
887 c10::SymIntArrayRef stride, in complex_convolution_mode()
889 c10::SymIntArrayRef dilation, in complex_convolution_mode()
917 SymIntArrayRef stride, SymIntArrayRef padding, SymIntArrayRef dilation, c10::SymInt groups) { in conv1d_symint()
[all …]
H A DComplexHelper.h23 c10::SymInt offset, SymIntArrayRef sizes, SymIntArrayRef strides) { in view_tensor()
33 inline SymDimVector computeStrideForViewAsReal(SymIntArrayRef oldstride) { in computeStrideForViewAsReal()
64 inline SymDimVector computeStrideForViewAsComplex(SymIntArrayRef oldstride) { in computeStrideForViewAsComplex()
H A DMetaTensor.cpp15 SymIntArrayRef size, in empty_meta_symint()
32 SymIntArrayRef size, in empty_strided_meta_symint()
33 SymIntArrayRef stride, in empty_strided_meta_symint()
H A DResize.cpp44 bool resize_output_check_symint(const Tensor& output, SymIntArrayRef shape) { in resize_output_check_symint()
52 static void native_resize_(const Tensor& output, SymIntArrayRef shape) { in native_resize_()
80 bool resize_output_symint(const Tensor& output, SymIntArrayRef shape) { in resize_output_symint()
271 c10::SymIntArrayRef size, in resize__symint()
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/
H A DBatchRulesConvolution.cpp20 … bias_bdim, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation,… in convolution_batch_rule()
172 …c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transp… in convolution_backward_input_batch_rule()
173 c10::SymIntArrayRef output_padding, const c10::SymInt& groups) { in convolution_backward_input_batch_rule()
253 …c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transp… in convolution_backward_weight_batch_rule()
254 c10::SymIntArrayRef output_padding, const c10::SymInt& groups) { in convolution_backward_weight_batch_rule()
364 …c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transp… in convolution_backward_plumbing()
365 c10::SymIntArrayRef output_padding, c10::SymInt groups, std::array<bool, 3> output_mask) { in convolution_backward_plumbing()
H A DLegacyBatchingRegistrations.cpp249 std::vector<Tensor> split_with_sizes_batching_rule(const Tensor& self, SymIntArrayRef split_sizes, … in split_with_sizes_batching_rule()
261 std::vector<Tensor> split_with_sizes_copy_batching_rule(const Tensor& self, SymIntArrayRef split_si… in split_with_sizes_copy_batching_rule()
289 c10::SymIntArrayRef sizes, c10::SymIntArrayRef strides, const c10::SymInt& storage_offset) { in maximum_indexable_location()
304 c10::SymIntArrayRef sizes, in checkBasicAsStridedValidForSlice()
305 c10::SymIntArrayRef strides, in checkBasicAsStridedValidForSlice()
362 c10::SymIntArrayRef sizes, in as_strided_batching_rule()
363 c10::SymIntArrayRef strides, in as_strided_batching_rule()
629 SymIntArrayRef sym_size, in new_empty_strided_batching_rule()
630 SymIntArrayRef sym_stride, in new_empty_strided_batching_rule()
H A DBatchRulesViews.cpp107 c10::SymIntArrayRef sizes) { in repeat_batch_rule()
122 c10::SymIntArrayRef size) { in _unsafe_view_batch_rule()
271 …ensor& self, std::optional<int64_t> bdim, const c10::SymIntArrayRef shape, const c10::SymIntArrayR… in _reshape_alias_batch_rule()
282 …t>> roll_batch_rule(const Tensor& self, std::optional<int64_t> bdim, SymIntArrayRef shifts, IntArr… in roll_batch_rule()
322 c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { in diagonal_backward_batch_rule()
394 c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) { in select_backward_batch_rule()
407 SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) { in slice_backward_batch_rule()
419 const Tensor &self, std::optional<int64_t> self_bdim, SymIntArrayRef sym_size) in view_batching_rule()
433 c10::SymIntArrayRef size) { in view_copy_batch_rule()
445 const Tensor &self, std::optional<int64_t> self_bdim, SymIntArrayRef size, bool implicit) in expand_batch_rule()
H A DBatchRulesRandomness.cpp22 Tensor random_batching_rule(SymIntArrayRef shape, ExtraArgs... extra_args) { in random_batching_rule()
260 static Tensor apply(SymIntArrayRef shape, T... extra_args) { in apply()
266 Tensor rand_int_wrapper(SymIntArrayRef shape, c10::SymInt high, T... extra_args) { in rand_int_wrapper()
285 static Tensor apply(c10::SymInt high, SymIntArrayRef shape, T... extra_args) { in apply()
293 Tensor rand_int_low_wrapper(SymIntArrayRef shape, T0 scalar0, T1 scalar1, T... extra_args) { in rand_int_low_wrapper()
302 static Tensor apply(T0 scalar0, T1 scalar1, SymIntArrayRef shape, T... extra_args) { in apply()
/aosp_15_r20/external/pytorch/torch/csrc/lazy/ts_backend/
H A Dts_native_functions.cpp273 at::SymIntArrayRef sym_size, in empty_symint()
303 at::SymIntArrayRef sym_size, in empty_strided_symint()
304 at::SymIntArrayRef sym_stride, in empty_strided_symint()
408 c10::SymIntArrayRef size, in new_empty_strided_symint()
409 c10::SymIntArrayRef stride, in new_empty_strided_symint()
441 c10::SymIntArrayRef input_sizes, in select_backward_symint()
506 at::SymIntArrayRef input_sizes, in diagonal_backward_symint()
516 at::SymIntArrayRef input_sizes, in slice_backward_symint()

12345