Home
last modified time | relevance | path

Searched refs:logical_dim (Results 1 – 18 of 18) sorted by relevance

/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/gpu/
H A Dreduction_dimension_grouper.cc60 for (int logical_dim = 0; logical_dim < shape.rank(); logical_dim++) { in HandleReduce() local
61 VLOG(5) << "Processing dimension " << logical_dim << " of size " in HandleReduce()
62 << shape.dimensions(logical_dim); in HandleReduce()
63 if (is_reduced(logical_dim) && logical_dim < shape.rank() - 1 && in HandleReduce()
64 is_reduced(logical_dim + 1)) { in HandleReduce()
67 next_dim_size *= shape.dimensions(logical_dim); in HandleReduce()
71 if (is_reduced(logical_dim)) { in HandleReduce()
73 shape.dimensions(logical_dim)); in HandleReduce()
80 new_grouped_dims.push_back(shape.dimensions(logical_dim)); in HandleReduce()
H A Dreduction_layout_normalizer.cc99 int64_t logical_dim = in HandleReduce() local
101 int64_t dim_size = operand_shape.dimensions(logical_dim); in HandleReduce()
102 VLOG(5) << "Processing logical dimension " << logical_dim << " of size " in HandleReduce()
106 if (absl::c_linear_search(reduce->dimensions(), logical_dim)) { in HandleReduce()
110 int64_t logical_reduce_dim = to_reduce_logical_dim(logical_dim); in HandleReduce()
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/
H A DLegacyBatchingRegistrations.cpp109 auto logical_dim = self.dim(); in squeeze_dims__batching_rule() local
111 if (logical_dim == 0) { in squeeze_dims__batching_rule()
122 auto actual_dim = c10::maybe_wrap_dim(d, logical_dim); in squeeze_dims__batching_rule()
191 auto logical_dim = self.dim(); in unsqueeze__batching_rule() local
192 int64_t dim_physical = maybe_wrap_dim(dim, logical_dim + 1); in unsqueeze__batching_rule()
211 auto logical_dim = self.dim(); in transpose__batching_rule() local
218 if (logical_dim == 0 && in transpose__batching_rule()
225 dim0 = maybe_wrap_dim(dim0, logical_dim); in transpose__batching_rule()
226 dim1 = maybe_wrap_dim(dim1, logical_dim); in transpose__batching_rule()
H A DBatchRulesReduceOps.cpp130 auto logical_dim = rankWithoutBatchDim(self, self_bdim); in boxed_reduction_batch_rule() local
137 auto all_dims = range(0, std::max((int64_t)1, logical_dim)); in boxed_reduction_batch_rule()
153 if (logical_dim == 0) { in boxed_reduction_batch_rule()
171 …bool is_scalar_case = logical_dim == 0 && dims.size() == 1 && is_allowed_dim_on_scalar_tensor(dims… in boxed_reduction_batch_rule()
H A DBatchRulesHelper.cpp44 int64_t getPhysicalDim(const Tensor& tensor, bool has_batch_dim, int64_t logical_dim) { in getPhysicalDim() argument
48 auto wrapped_dim = maybe_wrap_dim(logical_dim, rank); in getPhysicalDim()
H A DLegacyVmapTransforms.cpp64 int64_t VmapPhysicalView::getPhysicalDim(int64_t logical_dim) const { in getPhysicalDim()
66 return maybe_wrap_dim(logical_dim, logical_ndim) + numBatchDims(); in getPhysicalDim()
H A DLegacyVmapTransforms.h140 int64_t getPhysicalDim(int64_t logical_dim) const;
H A DBatchRulesScatterOps.cpp52 auto logical_dim = rankWithoutBatchDim(maybe_tensor.value(), indices_bdims[i]); in get_max_index_logical_dim() local
53 max_logical_dim = std::max(logical_dim, max_logical_dim); in get_max_index_logical_dim()
H A DBatchRulesHelper.h37 int64_t getPhysicalDim(const Tensor& tensor, bool has_batch_dim, int64_t logical_dim);
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DLegacyBatching.cpp81 auto logical_dim = self.dim(); in maybe_movedim() local
82 src = maybe_wrap_dim(src, logical_dim); in maybe_movedim()
83 dst = maybe_wrap_dim(dst, logical_dim); in maybe_movedim()
/aosp_15_r20/external/pytorch/torch/csrc/functorch/
H A Dinit.cpp107 auto logical_dim = self.dim(); in _movedim() local
108 src = at::maybe_wrap_dim(src, logical_dim); in _movedim()
109 dst = at::maybe_wrap_dim(dst, logical_dim); in _movedim()
114 permutation.reserve(logical_dim); in _movedim()
115 for (int64_t dim = 0; dim < logical_dim; dim++) { in _movedim()
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/
H A Dlayout_util.cc554 int64_t logical_dim = Minor(shape.layout(), minor); in LinearIndex() local
555 int64_t shape_dim_size = shape.dimensions(logical_dim); in LinearIndex()
556 int64_t index = indices[logical_dim]; in LinearIndex()
/aosp_15_r20/external/pytorch/aten/src/ATen/
H A DLegacyVmapTransforms.cpp77 int64_t VmapPhysicalView::getPhysicalDim(int64_t logical_dim) const { in getPhysicalDim()
79 return maybe_wrap_dim(logical_dim, logical_ndim) + numBatchDims(); in getPhysicalDim()
H A DLegacyVmapTransforms.h137 int64_t getPhysicalDim(int64_t logical_dim) const;
/aosp_15_r20/external/pytorch/torch/testing/_internal/
H A Dautograd_function_db.py320 logical_dim = x.dim() if x_bdim is None else x_bdim - 1
321 dim = dim if dim >= 0 else dim + logical_dim
H A Dcustom_op_db.py185 logical_dim = x.dim() if x_bdim is None else x_bdim - 1
186 dim = dim if dim >= 0 else dim + logical_dim
/aosp_15_r20/external/pytorch/docs/source/notes/
H A Dextending.func.rst457 logical_dim = x.dim() if x_bdim is None else x_bdim - 1
458 dim = dim if dim >= 0 else dim + logical_dim
/aosp_15_r20/external/mesa3d/src/intel/isl/
H A Disl.c1457 enum isl_surf_dim logical_dim, in isl_surf_choose_dim_layout() argument
1467 switch (logical_dim) { in isl_surf_choose_dim_layout()
1492 switch (logical_dim) { in isl_surf_choose_dim_layout()