Home
last modified time | relevance | path

Searched full:ndim (Results 1 – 25 of 672) sorted by relevance

12345678910>>...27

/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/
H A DPaddingKernel.cpp16 int ndim; member
30 ndim = padding.size() / 2; in PaddingParams()
32 bool is_batch = input.dim() == ndim + 2; in PaddingParams()
40 for (const auto d : c10::irange(ndim)) { in PaddingParams()
51 if (ndim == 1) { in PaddingParams()
53 } else if (ndim == 2) { in PaddingParams()
61 for (const auto d : c10::irange(ndim)) { in PaddingParams()
145 int ndim = p.ndim; in cpu_padding() local
146 int64_t input_depth = ndim == 3 ? p.ishape[ndim - 3] : 1; in cpu_padding()
147 int64_t input_height = ndim >=2 ? p.ishape[ndim - 2] : 1; in cpu_padding()
[all …]
H A DUpSampleMoreKernel.cpp106 auto ndim = input_sizes.size(); in cpu_upsample_nearest_backward() local
110 int64_t input_depth = (ndim == 5) ? input_sizes[2] : 1; in cpu_upsample_nearest_backward()
111 int64_t output_depth = (ndim == 5) ? output_sizes[2] : 1; in cpu_upsample_nearest_backward()
112 int64_t input_height = (ndim >= 4) ? input_sizes[ndim - 2] : 1; in cpu_upsample_nearest_backward()
113 int64_t output_height = (ndim >= 4) ? output_sizes[ndim - 2] : 1; in cpu_upsample_nearest_backward()
114 int64_t input_width = input_sizes[ndim - 1]; in cpu_upsample_nearest_backward()
115 int64_t output_width = output_sizes[ndim - 1]; in cpu_upsample_nearest_backward()
207 if (ndim == 3) { in cpu_upsample_nearest_backward()
210 } else if (ndim == 4) { in cpu_upsample_nearest_backward()
215 TORCH_INTERNAL_ASSERT(ndim == 5); in cpu_upsample_nearest_backward()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/
H A Dstrided_slice_op_impl.h39 template <typename Device, typename T, int NDIM>
47 template <typename Device, typename T, int NDIM>
55 template <typename Device, typename T, int NDIM>
77 template <typename Device, typename T, int NDIM>
88 Eigen::DSizes<Eigen::DenseIndex, NDIM> begin_di; in HandleStridedSliceCase()
89 Eigen::DSizes<Eigen::DenseIndex, NDIM> sizes_di; in HandleStridedSliceCase()
90 for (int i = 0; i < NDIM; ++i) { in HandleStridedSliceCase()
94 functor::Slice<Device, Proxy, NDIM>()( in HandleStridedSliceCase()
96 result->bit_casted_shaped<Proxy, NDIM>(processing_dims), in HandleStridedSliceCase()
97 context->input(0).bit_casted_tensor<Proxy, NDIM>(), begin_di, sizes_di); in HandleStridedSliceCase()
[all …]
H A Dtile_ops.cc53 template <typename Device, typename T, int NDIM>
55 void operator()(const Device& d, typename TTypes<T, NDIM>::Tensor out,
56 typename TTypes<T, NDIM>::ConstTensor in,
57 const Eigen::DSizes<Eigen::DenseIndex, NDIM>& indices,
58 const Eigen::DSizes<Eigen::DenseIndex, NDIM>& sizes,
70 template <typename Device, typename T, int NDIM, int REDUCEDNDIM>
73 const Device& d, typename TTypes<T, NDIM>::Tensor out,
74 typename TTypes<T, NDIM>::ConstTensor in,
76 const Eigen::DSizes<Eigen::DenseIndex, NDIM>& reshape_dim) const;
101 #define DECLARE_CUDA_DIM(T, NDIM) \ argument
[all …]
H A Dbetainc_op.cc91 #define CASE(NDIM) \ in Compute() argument
92 case NDIM: { \ in Compute()
93 functor::Betainc<Device, T, NDIM> functor; \ in Compute()
94 auto a_value = a.shaped<T, NDIM>(a_shaper.x_reshape()); \ in Compute()
95 auto b_value = b.shaped<T, NDIM>(b_shaper.x_reshape()); \ in Compute()
96 auto x_value = x.shaped<T, NDIM>(x_shaper.x_reshape()); \ in Compute()
98 BCast::ToIndexArray<NDIM>(a_shaper.x_bcast()), b_value, \ in Compute()
99 BCast::ToIndexArray<NDIM>(b_shaper.x_bcast()), x_value, \ in Compute()
100 BCast::ToIndexArray<NDIM>(x_shaper.x_bcast()), \ in Compute()
101 output->shaped<T, NDIM>(a_shaper.y_reshape())); \ in Compute()
[all …]
H A Dslice_op.cc195 #define HANDLE_DIM(NDIM) \ in Compute() argument
196 if (input_dims == NDIM) { \ in Compute()
197 HandleCase<NDIM>(context, begin, size, input, result); \ in Compute()
219 template <int NDIM>
223 Eigen::DSizes<Eigen::DenseIndex, NDIM> indices; in HandleCase()
224 Eigen::DSizes<Eigen::DenseIndex, NDIM> sizes; in HandleCase()
225 for (int i = 0; i < NDIM; ++i) { in HandleCase()
230 functor::Slice<Device, T, NDIM>()(context->eigen_device<Device>(), in HandleCase()
231 result->tensor<T, NDIM>(), in HandleCase()
232 input.tensor<T, NDIM>(), indices, sizes); in HandleCase()
[all …]
H A Dwhere_op_gpu.cu.h39 template <int NDIM, typename TIndex>
41 const TIndex output_rows, const typename Eigen::array<TIndex, NDIM> strides, in PropagateWhereIndicesKernel()
47 TIndex index_value = ldg(output + NDIM * i); in PropagateWhereIndicesKernel()
49 for (int c = 0; c < NDIM; ++c) { in PropagateWhereIndicesKernel()
50 *(output + NDIM * i + c) = index_value / strides[c]; in PropagateWhereIndicesKernel()
200 template <int NDIM>
233 return *(ptr_ + (valid ? (NDIM * n) : 0));
241 template <typename TIndex, typename T, int NDIM>
242 Eigen::array<TIndex, NDIM> CalculateStrides(
243 typename TTypes<T, NDIM>::ConstTensor input) {
[all …]
H A Dtile_ops_gpu_impl.h24 // DEFINE_TILE_OPS(NDIM)
27 // where NDIM is an integer.
41 #define DEFINE_DIM(T, NDIM) \ argument
42 template struct TileGrad<Eigen::GpuDevice, T, NDIM>; \
43 template struct ReduceAndReshape<Eigen::GpuDevice, T, NDIM, 1>;
45 #define DEFINE_TILE_OPS(NDIM) \ argument
48 DEFINE_DIM(int16, NDIM) \
49 DEFINE_DIM(int32, NDIM) \
50 DEFINE_DIM(int64, NDIM) \
51 DEFINE_DIM(Eigen::half, NDIM) \
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DPool.h128 const int64_t ndim = input.ndimension(); in pool2d_shape_check() local
146 TORCH_CHECK((ndim == 4 && valid_dims && input.size(3) != 0), in pool2d_shape_check()
150 TORCH_CHECK((ndim == 3 && input.size(0) != 0 && valid_dims) || in pool2d_shape_check()
151 (ndim == 4 && valid_dims && input.size(3) != 0), in pool2d_shape_check()
184 const int64_t ndim = input.ndimension(); in max_pool2d_backward_shape_check() local
187 check_dim_size(gradOutput, ndim, ndim-3, nOutputPlane); in max_pool2d_backward_shape_check()
188 check_dim_size(gradOutput, ndim, ndim-2, outputHeight); in max_pool2d_backward_shape_check()
189 check_dim_size(gradOutput, ndim, ndim-1, outputWidth); in max_pool2d_backward_shape_check()
191 check_dim_size(indices, ndim, ndim-3, nOutputPlane); in max_pool2d_backward_shape_check()
192 check_dim_size(indices, ndim, ndim-2, outputHeight); in max_pool2d_backward_shape_check()
[all …]
H A DLinearAlgebraUtils.h351 const int64_t ndim = self.ndimension(); in _move_to_end() local
354 for (const auto i : c10::irange(ndim)) { in _move_to_end()
364 TORCH_CHECK((int64_t)perm.size() == ndim, in _move_to_end()
365 "duplicate or invalid axis in 'dim' argument for tensor with ndim==", ndim); in _move_to_end()
436 …e std::vector<int64_t> create_dim_backshift_permutation(int64_t dim0, int64_t dim1, int64_t ndim) { in create_dim_backshift_permutation() argument
438 (dim0 != dim1) && (dim0 < ndim) && (dim0 >= 0) && (dim1 < ndim) && (dim1 >= 0), in create_dim_backshift_permutation()
440 std::vector<int64_t> permutation(ndim); in create_dim_backshift_permutation()
442 for (const auto dim_ind : c10::irange(ndim)) { in create_dim_backshift_permutation()
457 int64_t ndim = permutation.size(); in create_reverse_permutation() local
458 std::vector<int64_t> reverse_permutation(ndim); in create_reverse_permutation()
[all …]
/aosp_15_r20/external/pytorch/torch/distributed/tensor/_ops/
H A D_view_ops.py171 def dim_pad_left(ndim: int, min_dims: int) -> DimMap:
172 return (Singleton(),) * max(0, min_dims - ndim) + tuple(
173 InputDim(i) for i in range(ndim)
177 def dim_atleast_3d(ndim: int) -> DimMap:
178 if ndim == 0:
180 elif ndim == 1:
182 elif ndim == 2:
185 return tuple(InputDim(i) for i in range(ndim))
221 def dim_flatten(ndim: int, start_dim=0, end_dim=-1) -> DimMap:
222 if ndim == 0:
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/keras/engine/
H A Dinput_spec.py46 ndim: Integer, expected rank of the input.
74 ndim=None, argument
87 self.ndim = len(shape)
90 self.ndim = ndim
102 if self.axes and (self.ndim is not None or self.max_ndim is not None):
103 max_dim = (self.ndim if self.ndim else self.max_ndim) - 1
112 ('ndim=' + str(self.ndim)) if self.ndim else '',
122 'ndim': self.ndim,
135 If the InputSpec's shape or ndim is defined, this method will return a fully
144 if spec.ndim is None and spec.shape is None:
[all …]
/aosp_15_r20/external/python/cpython3/Objects/
Dmemoryobject.c223 /* Fast contiguity test. Caller must ensure suboffsets==NULL and ndim==1. */
243 with the same logical structure: format, itemsize, ndim and shape
244 are identical, with ndim > 0.
250 /* Assumptions: ndim >= 1. The macro tests for a corner case that should
253 (view->suboffsets && view->suboffsets[dest->ndim-1] >= 0)
258 assert(dest->ndim > 0 && src->ndim > 0); in last_dim_is_contiguous()
261 dest->strides[dest->ndim-1] == dest->itemsize && in last_dim_is_contiguous()
262 src->strides[src->ndim-1] == src->itemsize); in last_dim_is_contiguous()
296 if (dest->ndim != src->ndim) in equiv_shape()
299 for (i = 0; i < dest->ndim; i++) { in equiv_shape()
[all …]
/aosp_15_r20/external/python/cpython3/Modules/
D_testbuffer.c55 #define ND_SCALAR 0x008 /* scalar: ndim = 0 */
154 base->ndim = 1; in ndbuf_new()
267 if (ndbuf->base.ndim == 0) in init_flags()
473 copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize, in copy_rec() argument
480 assert(ndim >= 1); in copy_rec()
482 if (ndim == 1) { in copy_rec()
506 copy_rec(shape+1, ndim-1, itemsize, in copy_rec()
520 dest->ndim != src->ndim) in cmp_structure()
523 for (i = 0; i < dest->ndim; i++) { in cmp_structure()
534 ndim and shape. Copying is atomic, the function never fails with
[all …]
/aosp_15_r20/external/pytorch/torch/
H A D_meta_registrations.py171 x_d = self.ndim
172 y_d = other.ndim
203 if self.numel() != 0 and self.ndim != 0:
205 maybe_wrap_dim(dim, self.ndim)
213 maybe_wrap_dim(dim, self.ndim)
219 ndim = self.ndim
221 batch_dims = ndim - signal_ndim
224 dim_permute = list(range(ndim))
226 is_transformed_dim = [False for _ in range(ndim)]
258 out_strides = [0 for _ in range(ndim)]
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/core/util/sparse/
H A Dsparse_tensor_test.cc36 GetSimpleIndexTensor(int N, const int NDIM) { in GetSimpleIndexTensor() argument
37 Eigen::Tensor<int64_t, 2, Eigen::RowMajor, Eigen::DenseIndex> ix(N, NDIM); in GetSimpleIndexTensor()
62 const int NDIM = 3; in TEST() local
63 auto ix = GetSimpleIndexTensor(N, NDIM); in TEST()
64 TTypes<int64_t>::Matrix map(ix.data(), N, NDIM); in TEST()
95 const int NDIM = 3; in TEST() local
96 Tensor ix(DT_INT32, TensorShape({N, NDIM})); in TEST()
108 const int NDIM = 3; in TEST() local
109 Tensor ix(DT_INT64, TensorShape({N, NDIM, 1})); in TEST()
121 const int NDIM = 3; in TEST() local
[all …]
/aosp_15_r20/external/python/cpython3/Lib/test/
Dtest_buffer.py264 def strides_from_shape(ndim, shape, itemsize, layout): argument
267 if ndim == 0:
271 for i in range(ndim-2, -1, -1):
275 for i in range(1, ndim):
321 def getindex(ndim, ind, strides): argument
324 for i in range(ndim):
333 ndim = len(shape)
334 sstrides = strides_from_shape(ndim, shape, 1, 'C')
335 dstrides = strides_from_shape(ndim, shape[::-1], 1, 'C')
338 fr = getindex(ndim, ind, sstrides)
[all …]
/aosp_15_r20/external/pytorch/torch/_numpy/
H A D_funcs_impl.py186 result_ndim = tensors[0].ndim + 1
193 if arr.ndim != 1:
196 axis = arr.ndim - 1
217 axis = _util.normalize_axis_index(axis, tensor.ndim)
266 if ary.ndim == 0:
268 axis = 1 if ary.ndim > 1 else 0
273 if ary.ndim < 2:
279 if ary.ndim < 3:
510 ndim_extra = 2 - x_tensor.ndim
517 ndim_extra = 2 - y_tensor.ndim
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/xpu/detail/
H A DConv.cpp19 int64_t ndim, in conv_dst_size() argument
27 dnnl::memory::dims dst_size(ndim); in conv_dst_size()
30 for (int d = 2; d < ndim; ++d) { in conv_dst_size()
52 const int64_t ndim, in conv_src_fmt() argument
55 return (ndim == 3) in conv_src_fmt()
57 : ((ndim == 4) ? dnnl::memory::format_tag::nchw in conv_src_fmt()
58 : ((ndim == 5) ? dnnl::memory::format_tag::ncdhw in conv_src_fmt()
61 return (ndim == 3) in conv_src_fmt()
63 : ((ndim == 4) ? dnnl::memory::format_tag::nhwc in conv_src_fmt()
64 : ((ndim == 5) ? dnnl::memory::format_tag::ndhwc in conv_src_fmt()
[all …]
/aosp_15_r20/external/pytorch/torch/_refs/
H A D__init__.py2154 if a.ndim > 64:
2156 … f"Received a tensor with {a.ndim} dimensions, but only tensors with up to 64 dims are supported!"
2173 valid_shape = a.ndim == 0 or builtins.all(a.shape[i] for i in dims)
2184 output_shape = [a.shape[i] if i not in dims else 1 for i in range(a.ndim)]
2185 broadcast_dims = [i for i in range(a.ndim) if i not in dims]
2307 leading_dims = a.ndim - len(shape)
2496 nelem = 1 if a.ndim == 0 else reduce(operator.mul, (a.shape[i] for i in dims), 1)
2564 vec1.ndim == 1,
2565 lambda: f"addr: Expected 1-D argument vec1, but got {vec1.ndim}-D",
2568 vec2.ndim == 1,
[all …]
/aosp_15_r20/external/pytorch/torch/_refs/nn/functional/
H A D__init__.py317 input.ndim >= 2,
318 lambda: f"Expected at least 2 dimensions for input tensor but received {input.ndim}",
646 if input1.ndim != input2.ndim or input1.ndim != target.ndim:
702 input.ndim > 0 and input.ndim <= 3,
703 lambda: f"Expected input dimension to be either [1, 2, 3] but received {input.ndim}.",
707 (input.ndim == 1) or (input.shape[0] == target.shape[0]),
721 num_classes = input.shape[1] if input.ndim > 1 else input.shape[0]
744 if input.ndim == 1:
748 elif input.ndim == 2:
792 input.ndim > 0,
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/pjrt/
H A Dtranspose.cc540 int ndim = a_dims.size(); in RemoveTrivialDimensions() local
543 std::vector<int> shift(ndim); in RemoveTrivialDimensions()
548 updated_a_dims.reserve(ndim); in RemoveTrivialDimensions()
549 updated_lda.reserve(ndim); in RemoveTrivialDimensions()
550 updated_lda_tile.reserve(ndim); in RemoveTrivialDimensions()
551 updated_a_tiling.reserve(ndim); in RemoveTrivialDimensions()
553 for (int a_dim = 0; a_dim < ndim; ++a_dim) { in RemoveTrivialDimensions()
572 for (int b_dim = 0; b_dim < ndim; ++b_dim) { in RemoveTrivialDimensions()
596 int ndim = a_dims.size(); in CoalesceDimensions() local
599 std::vector<int> shift(ndim, 0); in CoalesceDimensions()
[all …]
/aosp_15_r20/external/pytorch/torch/testing/_internal/opinfo/definitions/
H A Dsparse.py132 if sample_input.input.ndim == 0:
142 if sample_input.input.ndim < 2:
146 if sample_input.input.ndim > 2 and (sample_input.input == 0).any():
181 if sample_input.input.ndim > 2:
188 dense_dim=sample_input.input.ndim - 2,
211 and mask.ndim > 2
232 elif sample.input.ndim > 2:
244 and mask.ndim > 2
261 and mask.ndim > 2
279 sample.input.ndim > 2
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/framework/
H A Dfast_tensor_util.pyx10 tensor_proto, np.ndarray[np.uint16_t, ndim=1] nparray): argument
22 tensor_proto, np.ndarray[np.uint16_t, ndim=1] nparray): argument
30 tensor_proto, np.ndarray[np.float32_t, ndim=1] nparray): argument
38 tensor_proto, np.ndarray[np.float64_t, ndim=1] nparray): argument
46 tensor_proto, np.ndarray[np.int32_t, ndim=1] nparray): argument
53 tensor_proto, np.ndarray[np.uint32_t, ndim=1] nparray): argument
60 tensor_proto, np.ndarray[np.int64_t, ndim=1] nparray): argument
67 tensor_proto, np.ndarray[np.uint64_t, ndim=1] nparray): argument
74 tensor_proto, np.ndarray[np.uint8_t, ndim=1] nparray): argument
82 tensor_proto, np.ndarray[np.uint16_t, ndim=1] nparray): argument
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/
H A DTensorIterator.cpp46 inline void get_strides(int64_t* strides, ArrayRef<OperandInfo> operands, int64_t ndim) { in get_strides() argument
47 for (const auto dim : c10::irange(ndim)) { in get_strides()
53 if (ndim < 2) { in get_strides()
55 std::fill_n(strides, (2 - ndim) * ntensors, 0); in get_strides()
234 // strides[0] is the fastest moving dimension instead of strides[ndim - 1]. in reorder_dimensions()
237 perm_.resize(ndim()); in reorder_dimensions()
238 if (ndim() == 1) { in reorder_dimensions()
292 for (const auto i : c10::irange(1, ndim())) { in reorder_dimensions()
554 for (const auto dim : c10::irange(ndim())) { in compatible_stride()
567 for (const auto dim : c10::irange(ndim())) { in invert_perm()
[all …]

12345678910>>...27