Home
last modified time | relevance | path

Searched full:indices (Results 1 – 25 of 10757) sorted by relevance

12345678910>>...431

/aosp_15_r20/external/tensorflow/tensorflow/python/ops/ragged/
H A Dragged_gather_ops.py35 indices: ragged_tensor.RaggedOrDense,
40 """Gathers ragged slices from `params` axis `0` according to `indices`.
43 as `tf.gather`, but supports ragged `params` and `indices`.)
48 >>> indices = tf.constant([3, 1, 2, 1, 0])
55 >>> tf.gather(ragged_params, indices)
64 indices: The potentially ragged tensor indicating which values to gather.
68 axis: The axis in `params` to gather `indices` from.
74 `output.shape=indices.shape + params.shape[1:]` and
75 `output.ragged_rank=indices.shape.ndims + params.ragged_rank`.
78 ValueError: If indices.shape.ndims is not known statically.
[all …]
H A Dragged_batch_gather_op_test.py44 indices=ragged_factory_ops.constant_value([[1, 2, 0], [], [], [0,
52 descr='params: [P1], indices: [I], result: [I]',
54 indices=[3, 2],
57 descr='params: [P1, (P2)], indices: [I], result: [I, (P2)]',
60 indices=[3, 2],
66 descr='params: [B1, P1], indices: [B1, I], result: [B1, I]',
68 indices=[[2, 0], [0, 1], [1, 0]],
71 descr='params: [B1, (P1)], indices: [B1, I], result: [B1, I]',
74 indices=[[2, 0], [0, 1], [0, 0]],
77 descr='params: [B1, P1], indices: [B1, (I)], result: [B1, (I)]',
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/ops/
H A Dstate_ops.py383 def scatter_update(ref, indices, updates, use_locking=True, name=None): argument
390 # Scalar indices
391 ref[indices, ...] = updates[...]
393 # Vector indices (for each i)
394 ref[indices[i], ...] = updates[i, ...]
396 # High rank indices (for each i, ..., j)
397 ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
404 duplicate entries in `indices`, the order at which the updates happen
407 Requires `updates.shape = indices.shape + ref.shape[1:]`.
415 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/array_ops/
H A Dscatter_nd_ops_test.py59 def _NumpyScatterNd(ref, indices, updates, op): argument
60 ixdim = indices.shape[-1]
61 num_updates = indices.size // ixdim
66 flat_indices = _FlatInnerDims(indices)
76 def _NumpyUpdate(ref, indices, updates): argument
77 return _NumpyScatterNd(ref, indices, updates, lambda p, u: u)
80 def _NumpyAdd(ref, indices, updates): argument
81 return _NumpyScatterNd(ref, indices, updates, lambda p, u: p + u)
84 def _NumpySub(ref, indices, updates): argument
85 return _NumpyScatterNd(ref, indices, updates, lambda p, u: p - u)
[all …]
H A Dgather_op_test.py66 for indices in 4, [1, 2, 2, 4, 5]:
67 with self.subTest(dtype=dtype, indices=indices):
70 indices_tf = constant_op.constant(indices)
73 np_val = params_np[indices]
86 indices = constant_op.constant(2)
87 gather_t = array_ops.gather(params, indices, axis=axis)
102 # The indices must be in bounds for any axis.
103 indices = constant_op.constant([0, 1, 0, 2])
104 gather_t = array_ops.gather(params, indices, axis=axis)
113 # We check that scalar and empty indices shapes work as well
[all …]
H A Done_hot_op_test.py55 indices = np.asarray([0, 2, -1, 1], dtype=np.int64)
67 indices=indices,
76 indices=indices,
85 indices = np.asarray([0, 2, -1, 1], dtype=np.int64)
93 self._testBothOneHot(indices=indices, depth=depth, dtype=dtype, truth=truth)
97 indices=indices, depth=depth, axis=0, dtype=dtype,
128 indices = np.asarray([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=np.int64)
141 indices=indices,
150 indices=indices,
159 indices = np.asarray([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=np.int64)
[all …]
H A Dscatter_ops_test.py32 def _NumpyAdd(ref, indices, updates): argument
33 # Since numpy advanced assignment does not support repeated indices,
35 for i, indx in np.ndenumerate(indices):
39 def _NumpyAddScalar(ref, indices, update): argument
40 for _, indx in np.ndenumerate(indices):
44 def _NumpySub(ref, indices, updates): argument
45 for i, indx in np.ndenumerate(indices):
49 def _NumpySubScalar(ref, indices, update): argument
50 for _, indx in np.ndenumerate(indices):
54 def _NumpyMul(ref, indices, updates): argument
[all …]
/aosp_15_r20/external/perfetto/src/trace_processor/db/column/
H A Dnumeric_storage_unittest.cc45 using Indices = DataLayerChain::Indices; typedef
255 Indices common_indices = Indices::CreateWithIndexPayloadForTesting( in TEST()
256 {0, 4, 4, 5, 1, 6}, Indices::State::kNonmonotonic); in TEST()
259 auto indices = common_indices; in TEST() local
260 chain->IndexSearch(FilterOp::kEq, val, indices); in TEST()
261 ASSERT_THAT(utils::ExtractPayloadForTesting(indices), ElementsAre(3)); in TEST()
263 indices = common_indices; in TEST()
264 chain->IndexSearch(FilterOp::kNe, val, indices); in TEST()
265 ASSERT_THAT(utils::ExtractPayloadForTesting(indices), in TEST()
268 indices = common_indices; in TEST()
[all …]
H A Dstring_storage_unittest.cc38 using Indices = DataLayerChain::Indices; typedef
167 Indices common_indices = Indices::CreateWithIndexPayloadForTesting( in TEST()
168 {6, 5, 4, 3, 2, 1, 0}, Indices::State::kNonmonotonic); in TEST()
170 auto indices = common_indices; in TEST() local
171 chain->IndexSearch(FilterOp::kEq, val, indices); in TEST()
172 ASSERT_THAT(utils::ExtractPayloadForTesting(indices), ElementsAre(2)); in TEST()
174 indices = common_indices; in TEST()
175 chain->IndexSearch(FilterOp::kNe, val, indices); in TEST()
176 ASSERT_THAT(utils::ExtractPayloadForTesting(indices), in TEST()
179 indices = common_indices; in TEST()
[all …]
H A Dnull_overlay.cc41 using Indices = DataLayerChain::Indices; typedef
43 std::optional<Token> UpdateIndicesForInner(Indices& indices, in UpdateIndicesForInner() argument
47 indices.tokens.begin(), indices.tokens.end(), in UpdateIndicesForInner()
52 if (first_null_it != indices.tokens.end()) { in UpdateIndicesForInner()
57 indices.tokens.erase(std::remove_if(first_null_it, indices.tokens.end(), in UpdateIndicesForInner()
61 indices.tokens.end()); in UpdateIndicesForInner()
64 for (auto& token : indices.tokens) { in UpdateIndicesForInner()
77 // Reconcile the results of the Search operation with the non-null indices in ReconcileStorageResult()
99 // For the IS NULL constraint, we also need to include all the null indices in ReconcileStorageResult()
199 // Figure out the bounds of the indices in the underlying storage and search in SearchValidated()
[all …]
H A Ddense_null_overlay.cc39 using Indices = DataLayerChain::Indices; typedef
42 Indices& indices, in RemoveAllNullsAndReturnTheFirstOne() argument
46 indices.tokens.begin(), indices.tokens.end(), in RemoveAllNullsAndReturnTheFirstOne()
51 if (first_null_it != indices.tokens.end()) { in RemoveAllNullsAndReturnTheFirstOne()
56 indices.tokens.erase(std::remove_if(first_null_it, indices.tokens.end(), in RemoveAllNullsAndReturnTheFirstOne()
60 indices.tokens.end()); in RemoveAllNullsAndReturnTheFirstOne()
181 Indices& indices) const { in IndexSearchValidated()
186 // Partition the vector into all the null indices followed by all the in IndexSearchValidated()
187 // non-null indices. in IndexSearchValidated()
189 indices.tokens.begin(), indices.tokens.end(), in IndexSearchValidated()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/
H A Dqembeddingbag.cpp39 const at::Tensor& indices, in embedding_lookup_fallback_impl() argument
50 const auto indices_data = indices.data_ptr<IndexType>(); in embedding_lookup_fallback_impl()
55 const int index_size = indices.numel(); in embedding_lookup_fallback_impl()
66 lengths_data.push_back(indices.numel() - lower); in embedding_lookup_fallback_impl()
79 "Expect the lengths data to be less than indices size"); in embedding_lookup_fallback_impl()
86 TORCH_CHECK((idx >= 0 && idx < N), "Invalid indices data"); in embedding_lookup_fallback_impl()
94 "Invalid indices data for Sparse Op.") in embedding_lookup_fallback_impl()
200 const IndexType* indices) { in fbgemm_spmdm_report_error_() argument
204 IndexType idx = indices[i]; in fbgemm_spmdm_report_error_()
218 "the size of the indices tensor, but it appears not."); in fbgemm_spmdm_report_error_()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/spmd/
H A Dgather_scatter_handler.cc65 // Return an update sharding that is compatible with the indices sharding for
68 const PartitionedHlo& updates, const PartitionedHlo& indices, in ComputeUpdateShardingFromIndices() argument
71 std::vector<int64_t> index_dim_to_update_dim(indices.base_shape().rank(), -1); in ComputeUpdateShardingFromIndices()
79 indices.sharding(), index_dim_to_update_dim, update_dim_to_index_dim); in ComputeUpdateShardingFromIndices()
83 // Returns the min and max for the indices (replicated) in a scatter/gather
121 // Broadcast the index bounds to the same shape as the indices. in IndexBoundsForGatherScatterOperandPartitionedOnTrivialSliceDims()
161 PartitionedHlo& indices, const Shape& output_shape,
165 // Perform partitioning of Gather when the indices are partitioned on the
170 PartitionedHlo& indices, absl::Span<const int64_t> batch_dims, in PartitionGatherIndexPassthroughPartition() argument
173 if (!indices.sharding().IsTileMaximal() && in PartitionGatherIndexPassthroughPartition()
[all …]
/aosp_15_r20/external/eigen/Eigen/src/plugins/
H A DIndexedViewMethods.h27 template<typename Indices>
28 struct IvcRowType : public internal::IndexedViewCompatibleType<Indices,RowsAtCompileTime> {};
30 template<typename Indices>
31 struct IvcColType : public internal::IndexedViewCompatibleType<Indices,ColsAtCompileTime> {};
33 template<typename Indices>
34 struct IvcType : public internal::IndexedViewCompatibleType<Indices,SizeAtCompileTime> {};
38 template<typename Indices>
39 typename IvcRowType<Indices>::type
40 ivcRow(const Indices& indices) const { in ivcRow() argument
41 …return internal::makeIndexedViewCompatible(indices, internal::variable_if_dynamic<Index,RowsAtComp… in ivcRow()
[all …]
/aosp_15_r20/external/executorch/kernels/portable/cpu/util/
H A Dadvanced_index_util.cpp20 bool check_indices_dtypes(TensorOptList indices) { in check_indices_dtypes() argument
21 for (auto i = 0; i < indices.size(); i++) { in check_indices_dtypes()
22 if (indices[i].has_value()) { in check_indices_dtypes()
23 const Tensor& index = indices[i].value(); in check_indices_dtypes()
42 bool check_mask_indices(const Tensor& in, TensorOptList indices) { in check_mask_indices() argument
44 for (auto i = 0; i < indices.size(); i++) { in check_mask_indices()
45 if (indices[i].has_value()) { in check_mask_indices()
46 const Tensor& index = indices[i].value(); in check_mask_indices()
141 bool check_index_args(const Tensor& in, TensorOptList indices, Tensor& out) { in check_index_args() argument
143 ET_LOG_AND_RETURN_IF_FALSE(check_indices_dtypes(indices)); in check_index_args()
[all …]
/aosp_15_r20/external/eigen/unsupported/Eigen/CXX11/src/Tensor/
H A DTensorRef.h212 const array<Index, num_indices> indices{{firstIndex, otherIndices...}}; in operator()
213 return coeff(indices); in operator()
219 const array<Index, num_indices> indices{{firstIndex, otherIndices...}}; in coeffRef()
220 return coeffRef(indices); in coeffRef()
227 array<Index, 2> indices; in operator() local
228 indices[0] = i0; in operator()
229 indices[1] = i1; in operator()
230 return coeff(indices); in operator()
235 array<Index, 3> indices; in operator() local
236 indices[0] = i0; in operator()
[all …]
/aosp_15_r20/external/rust/android-crates-io/crates/itertools/src/
Dcombinations.rs14 indices: Vec<usize>, field
24 clone_fields!(indices, pool, first);
32 debug_fmt_fields!(Combinations, indices, pool, first);
41 indices: (0..k).collect(), in combinations()
51 self.indices.len() in k()
74 if k < self.indices.len() { in reset()
75 self.indices.truncate(k); in reset()
77 self.indices[i] = i; in reset()
80 for i in 0..self.indices.len() { in reset()
81 self.indices[i] = i; in reset()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/
H A DBatchRulesScatterOps.cpp29 static int64_t get_num_leading_nones(ArrayRef<std::optional<Tensor>> indices) { in get_num_leading_nones() argument
31 for (const auto& idx : indices) { in get_num_leading_nones()
42 ArrayRef<std::optional<Tensor>> indices, in get_max_index_logical_dim() argument
45 TORCH_INTERNAL_ASSERT(indices.size() == indices_bdims.size()); in get_max_index_logical_dim()
46 TORCH_INTERNAL_ASSERT(!indices.empty()); in get_max_index_logical_dim()
47 for (const auto i : c10::irange(0, indices.size())) { in get_max_index_logical_dim()
48 const auto& maybe_tensor = indices[i]; in get_max_index_logical_dim()
59 ArrayRef<std::optional<Tensor>> indices, in batchIndices() argument
65 // 1. self is batched, indices/values are not batched in batchIndices()
66 // In this case, we just need to augment indices with a None at the front to in batchIndices()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cuda/
H A DEmbeddingBag.cu89 const PackedTensorAccessor32<index_t, 1, RestrictPtrTraits> indices, in embedding_bag_nbits_rowwise_offsets_kernel() argument
119 : indices.size(0); in embedding_bag_nbits_rowwise_offsets_kernel()
140 int64_t idx = indices[l]; in embedding_bag_nbits_rowwise_offsets_kernel()
192 const at::Tensor& indices, in embedding_bag_byte_impl() argument
200 TORCH_CHECK(indices.is_cuda()); in embedding_bag_byte_impl()
202 TORCH_CHECK(indices.device() == weight.device()) in embedding_bag_byte_impl()
229 "Compressed indices mapping not yet implemented for embedding_bag_byte_rowwise_offsets_cuda"); in embedding_bag_byte_impl()
245 indices.scalar_type(), "embedding_bag_byte_rowwise_offsets_kernel", ([&] { in embedding_bag_byte_impl()
252 indices.packed_accessor32<index_t, 1, RestrictPtrTraits>(), in embedding_bag_byte_impl()
269 const Tensor& indices, in embedding_bag_byte_rowwise_offsets() argument
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/math_ops/
H A Dsegment_reduction_ops_test.py48 def _segmentReduce(self, indices, x, op1, op2=None, num_segments=None, argument
52 indices = np.asarray(indices)
54 num_segments = indices[-1] + 1
56 slice_shape = x.shape[indices.ndim:]
57 x_flat = x.reshape((indices.size,) + slice_shape)
58 for i, index in enumerate(indices.ravel()):
109 indices = [i // 3 for i in range(n)]
121 indices, np_x, np_op1, np_op2, initial_value=initial_value)
122 s = tf_op(data=tf_x, segment_ids=indices)
135 indices = constant_op.constant([0, 1, 2, 2], shape=[2, 2])
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/tests/
H A Dscatter_nd_op_test.py46 def _NumpyScatterNd(ref, indices, updates, op): argument
47 ixdim = indices.shape[-1]
48 num_updates = indices.size // ixdim
53 flat_indices = _FlatInnerDims(indices)
63 def _NumpyUpdate(indices, updates, shape): argument
65 return _NumpyScatterNd(ref, indices, updates, lambda p, u: u)
91 indices = np.array(all_indices[:num_updates])
94 indices = indices[:num_updates // 2]
96 indices = np.append(
97 indices, [indices[np.random.randint(num_updates // 2)]], axis=0)
[all …]
/aosp_15_r20/external/mesa3d/src/compiler/nir/
H A Dnir_intrinsics.py46 indices, flags, sysval, bit_sizes): argument
56 - indices: list of constant indicies
66 assert isinstance(indices, list)
67 if indices:
68 assert isinstance(indices[0], Index)
85 self.num_indices = len(indices)
86 self.indices = indices
118 def intrinsic(name, src_comp=[], dest_comp=-1, indices=[], argument
122 indices, flags, sysval, bit_sizes)
125 # Possible indices:
[all …]
/aosp_15_r20/external/angle/util/
H A Dgeometry_utils.cpp47 result->indices.clear(); in CreateSphereGeometry()
48 result->indices.reserve(indexCount); in CreateSphereGeometry()
53 result->indices.push_back(static_cast<unsigned short>(i * (sliceCount + 1) + j)); in CreateSphereGeometry()
54 result->indices.push_back(static_cast<unsigned short>((i + 1) * (sliceCount + 1) + j)); in CreateSphereGeometry()
55 result->indices.push_back( in CreateSphereGeometry()
58 result->indices.push_back(static_cast<unsigned short>(i * (sliceCount + 1) + j)); in CreateSphereGeometry()
59 result->indices.push_back( in CreateSphereGeometry()
61 result->indices.push_back(static_cast<unsigned short>(i * (sliceCount + 1) + (j + 1))); in CreateSphereGeometry()
146 result->indices.resize(36); in GenerateCubeGeometry()
147 result->indices[0] = 0; in GenerateCubeGeometry()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/sparse_ops/
H A Dsparse_cross_op_test.py46 indices = []
51 indices.append([batch_ix, column_ix])
59 constant_op.constant(indices, dtypes.int64, [len(indices), 2]),
60 constant_op.constant(values, value_type, [len(indices)]),
64 self.assertAllEqual(sp1.indices, sp2.indices)
69 self.assertEqual(0, sp.indices.size)
411 self.assertAllEqual([[0, i] for i in range(6)], out.indices)
417 self.assertEqual(0, sp.indices.size)
423 self.assertAllEqual(sp1.indices, sp2.indices)
440 indices = []
[all …]
/aosp_15_r20/external/rust/android-crates-io/crates/indexmap/src/map/
Dcore.rs26 /// indices mapping from the entry hash to its index.
27 indices: RawTable<usize>, field
65 let indices = self.indices.clone(); in clone() localVariable
66 let mut entries = Vec::with_capacity(indices.capacity()); in clone()
68 IndexMapCore { indices, entries } in clone()
73 self.indices.clone_from_with_hasher(&other.indices, hasher); in clone_from()
75 // If we must resize, match the indices capacity in clone_from()
89 .field("indices", &raw::DebugIndices(&self.indices)) in fmt()
126 indices: RawTable::new(), in new()
134 indices: RawTable::with_capacity(n), in with_capacity()
[all …]

12345678910>>...431