/aosp_15_r20/external/rust/android-crates-io/crates/libz-sys/src/zlib/contrib/puff/ |
D | puff.c | 52 * 1.3 20 Mar 2002 - Go back to lengths for puff() parameters [Gailly] 94 #define MAXCODES (MAXLCODES+MAXDCODES) /* maximum codes lengths to read */ 213 * a negative value if there is an error. If all of the lengths are zero, i.e. 220 * a simple integer ordering of codes of the same lengths. Hence below the 309 * Given the list of code lengths length[0..n-1] representing a canonical 320 * codes past the end of the incomplete lengths. 351 (h->count[length[symbol]])++; /* assumes lengths are within bounds */ in construct() 355 /* check for an over-subscribed or incomplete set of lengths */ in construct() 393 * - Literals, lengths, and the end-of-block code are combined into a single 397 * - There are 256 possible lengths (3..258), and so 29 symbols are not enough [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/python/ops/ragged/ |
H A D | ragged_tensor_shape.py | 42 lengths. `RaggedTensorDynamicShape` records the size of each ragged 43 dimension using an integer vector containing the slice lengths for all 291 def broadcast_dimension(self, axis, lengths): argument 292 """Returns a shape that is broadcast-compatible with self & lengths. 294 * If dimension[axis] is uniform and lengths is a scalar, the check 295 that either lengths==1 or axis==1 or lengths==axis, and tile 296 dimension[axis] with tf.where(lengths==axis, 1, axis) repeats. 298 * If dimension[axis] is uniform and lengths is a vector, then check 300 lengths repeats. (we can skip tiling if we statically know that 303 * If dimension[axis] is ragged and lengths is a scalar, then check [all …]
|
H A D | dynamic_ragged_shape_test.py | 52 lengths: Sequence[Union[int, Sequence[int]]]) -> Sequence[RowPartition]: 59 lengths: a list of integers and lists of integers. 65 _) = dynamic_ragged_shape._to_row_partitions_and_nvals_from_lengths(lengths) 70 values, lengths: Sequence[Union[int, Sequence[int]]]) -> RaggedTensor: 71 """Specify a ragged tensor (or tensor) from lengths and values.""" 72 row_partitions = _to_row_partitions_from_lengths(lengths) 102 lengths: Sequence[Union[int, 105 if not lengths: 107 next_length = lengths[0] 109 return _num_elements_of_lengths_with_rows(next_length * rows, lengths[1:]) [all …]
|
H A D | ragged_from_tensor_op_test.py | 41 RaggedTensor.from_tensor(dt, lengths=[1, 0, 3]), [[5], [], [6, 0, 0]]) 50 RaggedTensor.from_tensor(dt_3d, lengths=([2, 0, 3], [1, 1, 2, 0, 1])), 120 'lengths': [1], 126 'lengths': [0], 132 'lengths': [0, 1, 2], 138 'lengths': [0, 0, 0], 144 'lengths': [2, 2], 150 'lengths': [7, 8], # lengths > ncols: truncated to ncols 156 'lengths': [-2, -1], # lengths < 0: treated as zero 163 'lengths': [0, 0], [all …]
|
H A D | ragged_tensor.py | 65 dimensions whose slices may have different lengths. For example, the inner 67 since the column slices (`rt[0, :]`, ..., `rt[4, :]`) have different lengths. 71 differing slice lengths). 802 for lengths in reversed(nested_row_lengths): 803 result = cls.from_row_lengths(result, lengths, validate=validate) 1249 """Returns the lengths of the rows in this ragged tensor. 1255 axis: An integer constant indicating the axis whose row lengths should be 1269 >>> print(rt.row_lengths()) # lengths of rows in rt 1271 >>> print(rt.row_lengths(axis=2)) # lengths of axis=2 rows. 1577 lengths=None, argument [all …]
|
/aosp_15_r20/external/pytorch/test/ |
H A D | test_segment_reductions.py | 55 lengths = torch.tensor(lengths_arr, device=device, dtype=lengths_dtype) 56 # generate offsets from lengths 57 zeros_shape = list(lengths.shape) 59 offsets = torch.cat((lengths.new_zeros(zeros_shape), lengths), -1).cumsum_(-1) 69 for mode in ['lengths', 'offsets']: 74 if (mode == 'lengths'): 75 segment_reduce_kwargs['lengths'] = lengths 128 lengths = [1, 2, 3, 0] 170 lengths, 185 lengths = [0, 0] [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | SegmentReduce.cpp | 133 const Tensor& lengths, in _segment_reduce_lengths_cpu_kernel() argument 136 // data and lengths should be contiguous from the call to .contiguous in segment_reduce_kernel in _segment_reduce_lengths_cpu_kernel() 138 TORCH_CHECK(lengths.is_contiguous(), "Expected lengths to be contiguous."); in _segment_reduce_lengths_cpu_kernel() 139 // reduction axis should always be the last dimension of lengths in _segment_reduce_lengths_cpu_kernel() 140 axis = lengths.dim() - 1; in _segment_reduce_lengths_cpu_kernel() 141 int64_t segment_count = lengths.size(axis); in _segment_reduce_lengths_cpu_kernel() 142 int64_t lengths_stride_axis = lengths.stride(axis); in _segment_reduce_lengths_cpu_kernel() 147 AT_DISPATCH_INDEX_TYPES(lengths.scalar_type(), "_segment_reduce_lengths_cpu_kernel1", [&]() { in _segment_reduce_lengths_cpu_kernel() 148 const auto* lengths_data = lengths.const_data_ptr<index_t>(); in _segment_reduce_lengths_cpu_kernel() 162 // data and lengths should be contiguous from the call to .contiguous in segment_reduce_kernel in _segment_reduce_offsets_cpu_kernel() [all …]
|
/aosp_15_r20/external/pytorch/torch/nn/utils/ |
H A D | rnn.py | 58 the batch, not the varying sequence lengths passed to 282 lengths: Union[Tensor, List[int]], 306 lengths (Tensor or list(int)): list of sequence lengths of each batch 317 if not isinstance(lengths, torch.Tensor): 321 "sequence lengths. The tracer cannot track the data flow of Python " 323 "the trace incorrect for any other combination of lengths.", 326 lengths = torch.as_tensor(lengths, dtype=torch.int64, device="cpu") 328 lengths = lengths.to(dtype=torch.int64) 333 lengths, sorted_indices = torch.sort(lengths, descending=True) 338 data, batch_sizes = _VF._pack_padded_sequence(input, lengths, batch_first) [all …]
|
/aosp_15_r20/external/pytorch/test/nn/ |
H A D | test_packed_sequence.py | 47 lengths = [len(i) for i in ordered] 49 return padded_tensor, lengths 56 padded, lengths = self._padded_sequence(input_type) 58 padded, lengths, enforce_sorted=enforce_sorted 90 padded, lengths = self._padded_sequence(torch.FloatTensor) 91 max_length = max(lengths) 92 packed = rnn_utils.pack_padded_sequence(padded, lengths) 118 self.assertEqual(lengths, lengths_out) 136 padded, lengths = self._padded_sequence(torch.IntTensor) 138 padded, lengths, enforce_sorted=enforce_sorted [all …]
|
/aosp_15_r20/external/pytorch/torch/nested/_internal/ |
H A D | nested_tensor.py | 61 # tensors' varying lengths. 78 lengths=None, argument 90 # Query cache for the symint associated with offsets or lengths 92 ragged_source = offsets if lengths is None else lengths 96 if lengths is not None: 97 assert B == lengths.shape[0] 129 def __init__(self, values, offsets, *, lengths=None, **kwargs): argument 134 self._lengths = lengths 158 def lengths(self): member in NestedTensor 257 lengths = inner_tensors.get("_lengths", None) [all …]
|
/aosp_15_r20/external/toybox/toys/posix/ |
H A D | wc.c | 35 static void show_lengths(unsigned long *lengths, char *name) 51 printf(" %*ld"+first, space, lengths[i]); 54 if (i==4) TT.totals[i] = maxof(TT.totals[i], lengths[i]); 55 else TT.totals[i] += lengths[i]; 64 unsigned long word = 0, lengths[ARRAY_LEN(TT.totals)] = {0}, line = 0; in do_wc() local 72 lengths[3] = st.st_size; in do_wc() 86 if (toybuf[pos]=='\n') lengths[0]++; in do_wc() 87 lengths[3]++; in do_wc() 96 lengths[2]++; in do_wc() 100 if (line>lengths[4]) lengths[4] = line; in do_wc() [all …]
|
/aosp_15_r20/external/mbedtls/tests/suites/ |
H A D | test_suite_nist_kw.data | 27 NIST KW lengths #1 KW plaintext OK (2 to 2^54 - 1 semiblocks) 30 NIST KW lengths #2 KWP plaintext OK (1 to 2^32 - 1 octets) 33 NIST KW lengths #3 KW ciphertext OK (3 to 2^54 semiblocks) 36 NIST KW lengths #4 KWP ciphertext OK (2 to 2^29 semiblocks) 39 NIST KW lengths #5 KW plaintext too short (2 to 2^54 - 1 semiblocks) 42 NIST KW lengths #6 KWP plaintext too short (1 to 2^32 - 1 octets) 45 NIST KW lengths #8 KW ciphertext too short (3 to 2^54 semiblocks) 48 NIST KW lengths #9 KWP ciphertext too short (2 to 2^29 semiblocks) 51 NIST KW lengths #10 KW plaintext not a multiple of semiblocks. 54 NIST KW lengths #11 KW ciphertext not a multiple of semiblocks. [all …]
|
/aosp_15_r20/external/openthread/third_party/mbedtls/repo/tests/suites/ |
H A D | test_suite_nist_kw.data | 27 NIST KW lengths #1 KW plaintext OK (2 to 2^54 - 1 semiblocks) 30 NIST KW lengths #2 KWP plaintext OK (1 to 2^32 - 1 octets) 33 NIST KW lengths #3 KW ciphertext OK (3 to 2^54 semiblocks) 36 NIST KW lengths #4 KWP ciphertext OK (2 to 2^29 semiblocks) 39 NIST KW lengths #5 KW plaintext too short (2 to 2^54 - 1 semiblocks) 42 NIST KW lengths #6 KWP plaintext too short (1 to 2^32 - 1 octets) 45 NIST KW lengths #8 KW ciphertext too short (3 to 2^54 semiblocks) 48 NIST KW lengths #9 KWP ciphertext too short (2 to 2^29 semiblocks) 51 NIST KW lengths #10 KW plaintext not a multiple of semiblocks. 54 NIST KW lengths #11 KW ciphertext not a multiple of semiblocks. [all …]
|
/aosp_15_r20/external/chromium-trace/catapult/third_party/polymer/components/web-animations-js/src/ |
H A D | shadow-handler.js | 20 lengths: [], property 31 shadow.lengths.push(result[0]); 54 while (left.lengths.length < Math.max(left.lengths.length, right.lengths.length)) 55 left.lengths.push({px: 0}); 56 while (right.lengths.length < Math.max(left.lengths.length, right.lengths.length)) 57 right.lengths.push({px: 0}); 66 for (var i = 0; i < left.lengths.length; i++) { 67 var mergedDimensions = scope.mergeDimensions(left.lengths[i], right.lengths[i], i == 2); 94 return {inset: inset, color: [0, 0, 0, 0], lengths: [{px: 0}, {px: 0}, {px: 0}, {px: 0}]};
|
/aosp_15_r20/external/puffin/src/ |
H A D | huffman_table.h | 23 // Permutations of input Huffman code lengths (used only to read 35 // same as |kLengthBases| except for the the distances instead of lengths. 39 // Same as |kLengthExtraBits| except for distances instead of lengths. 47 // Checks the lengths of Huffman length arrays for correctness 49 // |num_lit_len| IN The number of literal/lengths code lengths 50 // |num_distance| IN The number of distance code lengths 51 // |num_codes| IN The number of code lengths for reading Huffman table. 56 LOG(ERROR) << "The lengths of the dynamic Huffman table are invalid: " in CheckHuffmanArrayLengths() 201 // Initializes the Huffman codes from an array of lengths. 203 // |lens| IN The input array of code lengths. [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/python/data/kernel_tests/ |
H A D | bucket_by_sequence_length_test.py | 85 lengths = [8, 13, 25, 35] 90 # Expected sequence lengths of the individual batches. 101 for length, batch_size, bucket_elements in zip(lengths, batch_sizes, 110 # Calculate the expected occurrence of individual sequence lengths. 118 for bucket_elements, length in zip(n_bucket_elements, lengths): 161 for length, batch_size, bucket_elements in zip(lengths, batch_sizes, 179 for l in lengths: 181 # sequence lengths. 194 # Make sure the generated sequence lengths appear as often as expected. 197 "The generated sequence lengths did not match! " [all …]
|
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/utils/ |
H A D | rnn.h | 35 /// the batch, not the varying sequence lengths passed to 170 /// longest sequence (equal to ``lengths[0]``), ``B`` is the batch size, and 188 /// lengths (Tensor): list of sequences lengths of each batch element. 200 Tensor lengths, 203 lengths = lengths.to(kInt64); 208 std::tie(lengths, sorted_indices) = 209 torch::sort(lengths, /*dim=*/-1, /*descending=*/true); 216 torch::_pack_padded_sequence(input, lengths, batch_first); 245 /// containing the list of lengths of each sequence in the batch. 264 auto [padded_output, lengths] = torch::_pad_packed_sequence( [all …]
|
/aosp_15_r20/external/pytorch/torch/utils/data/ |
H A D | dataset.py | 428 lengths: Sequence[Union[int, float]], 432 Randomly split a dataset into non-overlapping new datasets of given lengths. 435 the lengths will be computed automatically as 438 After computing the lengths, if there are any remainders, 1 count will be 439 distributed in round-robin fashion to the lengths 453 lengths (sequence): lengths or fractions of splits to be produced 456 if math.isclose(sum(lengths), 1) and sum(lengths) <= 1: 458 for i, frac in enumerate(lengths): 466 # add 1 to all the lengths in round-robin fashion until the remainder is 0 470 lengths = subset_lengths [all …]
|
/aosp_15_r20/frameworks/base/libs/hwui/jni/ |
H A D | Path.cpp | 253 static void addMove(std::vector<SkPoint>& segmentPoints, std::vector<float>& lengths, in addMove() argument 256 if (!lengths.empty()) { in addMove() 257 length = lengths.back(); in addMove() 260 lengths.push_back(length); in addMove() 263 static void addLine(std::vector<SkPoint>& segmentPoints, std::vector<float>& lengths, in addLine() argument 267 lengths.push_back(0); in addLine() 271 float length = lengths.back() + SkPoint::Distance(segmentPoints.back(), toPoint); in addLine() 273 lengths.push_back(length); in addLine() 330 std::vector<float>& lengths, float errorSquared, bool doubleCheckDivision) { in addBezier() argument 367 addLine(segmentPoints, lengths, iter->second); in addBezier() [all …]
|
/aosp_15_r20/external/mesa3d/src/gallium/frontends/rusticl/api/ |
H A D | program.rs | 101 lengths: *const usize, in create_program_with_source() 116 // "lengths argument is an array with the number of chars in each string in create_program_with_source() 117 // (the string length). If an element in lengths is zero, its accompanying in create_program_with_source() 118 // string is null-terminated. If lengths is NULL, all strings in the in create_program_with_source() 126 // Take either an iterator over the given slice or - if the `lengths` in create_program_with_source() 137 let lengths: Box<dyn Iterator<Item = _>> = if lengths.is_null() { in create_program_with_source() localVariable 142 let lengths = lengths as *const Option<NonZeroUsize>; in create_program_with_source() localVariable 143 Box::new(unsafe { slice::from_raw_parts(lengths, count as usize) }.iter()) in create_program_with_source() 149 for (&string_ptr, len_opt) in iter::zip(srcs, lengths) { in create_program_with_source() 180 lengths: *const usize, in create_program_with_binary() [all …]
|
/aosp_15_r20/external/rust/android-crates-io/crates/libz-sys/src/zlib/doc/ |
D | rfc1951.txt | 207 "deflate" format limits distances to 32K bytes and lengths to 258 211 Each type of value (literals, distances, and lengths) in the 213 tree for literals and lengths and a separate code tree for distances. 289 lengths, not a sequence of bytes. We must therefore specify 318 sequences of different lengths, but a parser can always parse 367 various alphabets must not exceed certain maximum code lengths. 369 lengths from symbol frequencies. Again, see Chapter 5, 413 just by giving the bit lengths of the codes for each symbol of 416 by the sequence of bit lengths (2, 1, 3, 3). The following 418 from most- to least-significant bit. The code lengths are [all …]
|
/aosp_15_r20/external/rust/android-crates-io/crates/libz-sys/src/zlib-ng/doc/ |
D | rfc1951.txt | 207 "deflate" format limits distances to 32K bytes and lengths to 258 211 Each type of value (literals, distances, and lengths) in the 213 tree for literals and lengths and a separate code tree for distances. 289 lengths, not a sequence of bytes. We must therefore specify 318 sequences of different lengths, but a parser can always parse 367 various alphabets must not exceed certain maximum code lengths. 369 lengths from symbol frequencies. Again, see Chapter 5, 413 just by giving the bit lengths of the codes for each symbol of 416 by the sequence of bit lengths (2, 1, 3, 3). The following 418 from most- to least-significant bit. The code lengths are [all …]
|
/aosp_15_r20/external/google-java-format/core/src/main/java/com/google/googlejavaformat/java/ |
H A D | CommandLineOptions.java | 33 private final ImmutableList<Integer> lengths; field in CommandLineOptions 52 ImmutableList<Integer> lengths, in CommandLineOptions() argument 69 this.lengths = lengths; in CommandLineOptions() 99 /** Character offsets for partial formatting, paired with {@code lengths}. */ 104 /** Partial formatting region lengths, paired with {@code offsets}. */ 105 ImmutableList<Integer> lengths() { in lengths() method in CommandLineOptions 106 return lengths; in lengths() 167 return !lines().isEmpty() || !offsets().isEmpty() || !lengths().isEmpty(); in isSelection() 183 private final ImmutableList.Builder<Integer> lengths = ImmutableList.builder(); field in CommandLineOptions.Builder 217 lengths.add(length); in addLength() [all …]
|
/aosp_15_r20/external/pytorch/torch/nested/ |
H A D | __init__.py | 280 >>> lengths = torch.tensor([3, 2, 2, 1, 5], dtype=torch.int64) 282 >>> nt_narrowed = torch.nested.narrow(narrow_base, 1, starts, lengths, layout=torch.jagged) 319 lengths: Optional[Tensor] = None, 327 The offsets / lengths metadata determines how this dimension is split into batch elements 335 * lengths: Lengths of the individual batch elements; shape == batch_size. Example: [2, 1, 3] 339 Note that it can be useful to provide both offsets and lengths. This describes a nested tensor 348 with the offsets / lengths metadata used to distinguish batch elements. 350 lengths (optional :class:`torch.Tensor`): Lengths of the batch elements of shape B. 375 >>> lengths = torch.tensor([1, 1, 2]) 377 >>> nt = nested_tensor_from_jagged(values, offsets, lengths) [all …]
|
/aosp_15_r20/prebuilts/go/linux-x86/src/strings/ |
D | compare_test.go | 63 lengths := make([]int, 0) // lengths to test in ascending order 65 lengths = append(lengths, i) 67 lengths = append(lengths, 256, 512, 1024, 1333, 4095, 4096, 4097) 70 lengths = append(lengths, 65535, 65536, 65537, 99999) 73 n := lengths[len(lengths)-1] 77 for _, len := range lengths {
|