/aosp_15_r20/frameworks/native/services/surfaceflinger/tests/unittests/ |
H A D | VsyncConfigurationTest.cpp | 61 auto offsets = mWorkDuration.getConfigsForRefreshRate(60_Hz); in TEST_F() local 63 EXPECT_EQ(currentOffsets, offsets); in TEST_F() 64 EXPECT_EQ(offsets.late.sfOffset, 6'166'667); in TEST_F() 65 EXPECT_EQ(offsets.late.appOffset, 2'333'334); in TEST_F() 67 EXPECT_EQ(offsets.late.sfWorkDuration, 10'500'000ns); in TEST_F() 68 EXPECT_EQ(offsets.late.appWorkDuration, 20'500'000ns); in TEST_F() 70 EXPECT_EQ(offsets.early.sfOffset, 666'667); in TEST_F() 71 EXPECT_EQ(offsets.early.appOffset, 833'334); in TEST_F() 73 EXPECT_EQ(offsets.early.sfWorkDuration, 16'000'000ns); in TEST_F() 74 EXPECT_EQ(offsets.early.appWorkDuration, 16'500'000ns); in TEST_F() [all …]
|
/aosp_15_r20/packages/providers/MediaProvider/jni/ |
D | RedactionInfoTest.cpp | 59 info.getReadRanges(0, 1000, &out); // read offsets [0, 1000) in TEST() 66 info.getReadRanges(0, 5, &out); // read offsets [0, 5) in TEST() 68 EXPECT_EQ(ReadRange(0, 1, false), out[0]); // offsets: [0, 1) len = 1 in TEST() 69 EXPECT_EQ(ReadRange(1, 4, true), out[1]); // offsets: [1, 5) len = 4 in TEST() 72 info.getReadRanges(1, 10, &out); // read offsets [1, 11) in TEST() 74 EXPECT_EQ(ReadRange(1, 9, true), out[0]); // offsets: [1, 10) len = 9 in TEST() 75 EXPECT_EQ(ReadRange(10, 1, false), out[1]); // offsets: [10, 11) len = 1 in TEST() 79 info.getReadRanges(5, 5, &out); // read offsets [5, 10) in TEST() 81 EXPECT_EQ(ReadRange(5, 5, true), out[0]); // offsets: [5, 10) len = 5 in TEST() 84 info.getReadRanges(1, 5, &out); // read offsets [1, 6) in TEST() [all …]
|
/aosp_15_r20/external/pytorch/torch/testing/_internal/ |
H A D | triton_utils.py | 27 offsets = block_start + tl.arange(0, BLOCK_SIZE) 28 mask = offsets < n_elements 29 x = tl.load(in_ptr0 + offsets, mask=mask) 30 y = tl.load(in_ptr1 + offsets, mask=mask) 32 tl.store(out_ptr + offsets, output, mask=mask) 45 offsets = block_start + tl.arange(0, BLOCK_SIZE) 46 mask = offsets < n_elements 47 x = tl.load(in_ptr0 + offsets, mask=mask) 49 y = tl.load(in_ptr1 + offsets, mask=mask) 53 tl.store(out_ptr + offsets, output, mask=mask) [all …]
|
/aosp_15_r20/external/icu/icu4c/source/common/ |
H A D | ucnv_u7.cpp | 217 int32_t *offsets; in _UTF7ToUnicodeWithOffsets() local 241 offsets=pArgs->offsets; in _UTF7ToUnicodeWithOffsets() 283 if(offsets!=nullptr) { in _UTF7ToUnicodeWithOffsets() 284 *offsets++=sourceIndex++; in _UTF7ToUnicodeWithOffsets() 378 if(offsets!=nullptr) { in _UTF7ToUnicodeWithOffsets() 379 *offsets++=sourceIndex; in _UTF7ToUnicodeWithOffsets() 389 if(offsets!=nullptr) { in _UTF7ToUnicodeWithOffsets() 390 *offsets++=sourceIndex; in _UTF7ToUnicodeWithOffsets() 400 if(offsets!=nullptr) { in _UTF7ToUnicodeWithOffsets() 401 *offsets++=sourceIndex; in _UTF7ToUnicodeWithOffsets() [all …]
|
H A D | ucnvlat1.cpp | 41 int32_t *offsets; in _Latin1ToUnicodeWithOffsets() local 49 offsets=pArgs->offsets; in _Latin1ToUnicodeWithOffsets() 85 if(offsets!=nullptr) { in _Latin1ToUnicodeWithOffsets() 87 offsets[0]=sourceIndex++; in _Latin1ToUnicodeWithOffsets() 88 offsets[1]=sourceIndex++; in _Latin1ToUnicodeWithOffsets() 89 offsets[2]=sourceIndex++; in _Latin1ToUnicodeWithOffsets() 90 offsets[3]=sourceIndex++; in _Latin1ToUnicodeWithOffsets() 91 offsets[4]=sourceIndex++; in _Latin1ToUnicodeWithOffsets() 92 offsets[5]=sourceIndex++; in _Latin1ToUnicodeWithOffsets() 93 offsets[6]=sourceIndex++; in _Latin1ToUnicodeWithOffsets() [all …]
|
H A D | ucnv_u16.cpp | 57 int32_t *offsets; in _UTF16BEFromUnicodeWithOffsets() local 78 &pArgs->offsets, -1, in _UTF16BEFromUnicodeWithOffsets() 90 offsets=pArgs->offsets; in _UTF16BEFromUnicodeWithOffsets() 105 if(offsets!=nullptr) { in _UTF16BEFromUnicodeWithOffsets() 106 *offsets++=-1; in _UTF16BEFromUnicodeWithOffsets() 107 *offsets++=-1; in _UTF16BEFromUnicodeWithOffsets() 108 *offsets++=-1; in _UTF16BEFromUnicodeWithOffsets() 109 *offsets++=-1; in _UTF16BEFromUnicodeWithOffsets() 126 if(offsets==nullptr) { in _UTF16BEFromUnicodeWithOffsets() 153 *offsets++=sourceIndex; in _UTF16BEFromUnicodeWithOffsets() [all …]
|
/aosp_15_r20/external/cronet/third_party/icu/source/common/ |
H A D | ucnv_u7.cpp | 217 int32_t *offsets; in _UTF7ToUnicodeWithOffsets() local 241 offsets=pArgs->offsets; in _UTF7ToUnicodeWithOffsets() 283 if(offsets!=nullptr) { in _UTF7ToUnicodeWithOffsets() 284 *offsets++=sourceIndex++; in _UTF7ToUnicodeWithOffsets() 378 if(offsets!=nullptr) { in _UTF7ToUnicodeWithOffsets() 379 *offsets++=sourceIndex; in _UTF7ToUnicodeWithOffsets() 389 if(offsets!=nullptr) { in _UTF7ToUnicodeWithOffsets() 390 *offsets++=sourceIndex; in _UTF7ToUnicodeWithOffsets() 400 if(offsets!=nullptr) { in _UTF7ToUnicodeWithOffsets() 401 *offsets++=sourceIndex; in _UTF7ToUnicodeWithOffsets() [all …]
|
H A D | ucnvlat1.cpp | 41 int32_t *offsets; in _Latin1ToUnicodeWithOffsets() local 49 offsets=pArgs->offsets; in _Latin1ToUnicodeWithOffsets() 85 if(offsets!=nullptr) { in _Latin1ToUnicodeWithOffsets() 87 offsets[0]=sourceIndex++; in _Latin1ToUnicodeWithOffsets() 88 offsets[1]=sourceIndex++; in _Latin1ToUnicodeWithOffsets() 89 offsets[2]=sourceIndex++; in _Latin1ToUnicodeWithOffsets() 90 offsets[3]=sourceIndex++; in _Latin1ToUnicodeWithOffsets() 91 offsets[4]=sourceIndex++; in _Latin1ToUnicodeWithOffsets() 92 offsets[5]=sourceIndex++; in _Latin1ToUnicodeWithOffsets() 93 offsets[6]=sourceIndex++; in _Latin1ToUnicodeWithOffsets() [all …]
|
H A D | ucnv_u16.cpp | 57 int32_t *offsets; in _UTF16BEFromUnicodeWithOffsets() local 78 &pArgs->offsets, -1, in _UTF16BEFromUnicodeWithOffsets() 90 offsets=pArgs->offsets; in _UTF16BEFromUnicodeWithOffsets() 105 if(offsets!=nullptr) { in _UTF16BEFromUnicodeWithOffsets() 106 *offsets++=-1; in _UTF16BEFromUnicodeWithOffsets() 107 *offsets++=-1; in _UTF16BEFromUnicodeWithOffsets() 108 *offsets++=-1; in _UTF16BEFromUnicodeWithOffsets() 109 *offsets++=-1; in _UTF16BEFromUnicodeWithOffsets() 126 if(offsets==nullptr) { in _UTF16BEFromUnicodeWithOffsets() 153 *offsets++=sourceIndex; in _UTF16BEFromUnicodeWithOffsets() [all …]
|
/aosp_15_r20/external/deqp/android/cts/main/vk-main-2024-03-01/ |
H A D | glsl.txt | 2363 dEQP-VK.glsl.texture_gather.offsets.implementation_offset.2d.depth32f.base_level.level_1 2364 dEQP-VK.glsl.texture_gather.offsets.implementation_offset.2d.depth32f.base_level.level_2 2365 dEQP-VK.glsl.texture_gather.offsets.implementation_offset.2d.depth32f.base_level.sparse_level_1 2366 dEQP-VK.glsl.texture_gather.offsets.implementation_offset.2d.depth32f.base_level.sparse_level_2 2367 dEQP-VK.glsl.texture_gather.offsets.implementation_offset.2d.depth32f.filter_mode.min_linear_mag_li… 2368 dEQP-VK.glsl.texture_gather.offsets.implementation_offset.2d.depth32f.filter_mode.min_linear_mipmap… 2369 dEQP-VK.glsl.texture_gather.offsets.implementation_offset.2d.depth32f.filter_mode.min_linear_mipmap… 2370 dEQP-VK.glsl.texture_gather.offsets.implementation_offset.2d.depth32f.filter_mode.min_nearest_mipma… 2371 dEQP-VK.glsl.texture_gather.offsets.implementation_offset.2d.depth32f.filter_mode.min_nearest_mipma… 2372 dEQP-VK.glsl.texture_gather.offsets.implementation_offset.2d.depth32f.filter_mode.sparse_min_linear… [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | EmbeddingBag.cpp | 57 static void make_offset2bag(const Tensor &offsets, Tensor& offset2bag) { in make_offset2bag() argument 59 … 0, offsets, at::ones_like(offsets, LEGACY_CONTIGUOUS_MEMORY_FORMAT)); // offset2bag = [1 0 1 0 1] in make_offset2bag() 68 const Tensor& offsets) { in promoteIndicesAndOffsets() argument 70 promoteTypes(offsets.scalar_type(), indices.scalar_type()); in promoteIndicesAndOffsets() 74 offsets.scalar_type() == commonType ? c10::MaybeOwned<Tensor>::borrowed(offsets) in promoteIndicesAndOffsets() 75 … : c10::MaybeOwned<Tensor>::owned(offsets.toType(commonType))}; in promoteIndicesAndOffsets() 115 const Tensor& /*offsets*/, in index_select_add() 162 const index_t* offsets, in fbgemm_spmdm_report_error_() argument 165 for (index_t i = offsets[m]; i < offsets[m + 1]; ++i) { in fbgemm_spmdm_report_error_() 180 offsets[output_size] == index_size, in fbgemm_spmdm_report_error_() [all …]
|
/aosp_15_r20/external/pytorch/test/inductor/ |
H A D | test_triton_kernels.py | 281 offsets = block_start + tl.arange(0, BLOCK_SIZE) 282 mask = offsets < n_elements 283 x = tl.load(in_ptr0 + offsets, mask=mask) 285 tl.store(out_ptr + offsets, output, mask=mask) 350 offsets = block_start + tl.arange(0, BLOCK_SIZE) 351 mask = offsets < n_elements 352 x = tl.load(in_ptr0 + offsets, mask=mask) 354 tl.store(out_ptr + offsets, output, mask=mask) 453 offsets = block_start + tl.arange(0, BLOCK_SIZE) 454 mask = offsets < n_elements [all …]
|
/aosp_15_r20/external/icu/icu4j/main/charset/src/main/java/com/ibm/icu/charset/ |
H A D | CharsetSCSU.java | 61 /* use table of predefined fixed offsets for values from fixedThreshold */ 67 /* constant offsets for the 8 static windows */ 79 /* initial offsets for the 8 dynamic (sliding) windows */ 91 /* Table of fixed predefined Offsets */ 114 /* dynamic window offsets, initialize to default values from initialDynamicOffsets */ 236 protected CoderResult decodeLoop(ByteBuffer source, CharBuffer target, IntBuffer offsets, in decodeLoop() argument 260 labelType = fastSingle(source, target, offsets, ByteMode); in decodeLoop() 264 labelType = singleByteMode(source, target, offsets, ByteMode); in decodeLoop() 267 endLoop(source, target, offsets); in decodeLoop() 274 labelType = fastSingle(source, target, offsets, UnicodeMode); in decodeLoop() [all …]
|
H A D | CharsetBOCU1.java | 390 …protected CoderResult encodeLoop(CharBuffer source, ByteBuffer target, IntBuffer offsets, boolean … in encodeLoop() argument 418 labelType = fastSingle(source, target, offsets); in encodeLoop() 421 labelType = getTrail(source, target, offsets); in encodeLoop() 424 labelType = regularLoop(source, target, offsets); in encodeLoop() 432 private int fastSingle(CharBuffer source, ByteBuffer target, IntBuffer offsets){ in fastSingle() argument 446 if(offsets!=null){ in fastSingle() 447 offsets.put(nextSourceIndex++); in fastSingle() 456 if(offsets!=null){ in fastSingle() 457 offsets.put(nextSourceIndex++); in fastSingle() 469 private int getTrail(CharBuffer source, ByteBuffer target, IntBuffer offsets){ in getTrail() argument [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/ |
H A D | qembeddingbag.cpp | 40 const at::Tensor& offsets, in embedding_lookup_fallback_impl() argument 57 auto accessor = offsets.accessor<OffsetType, 1>(); in embedding_lookup_fallback_impl() 61 for (const auto i : c10::irange(1, offsets.numel())) { in embedding_lookup_fallback_impl() 199 const OffsetType* offsets, in fbgemm_spmdm_report_error_() argument 202 for (OffsetType i = offsets[m]; i < offsets[m + 1]; ++i) { in fbgemm_spmdm_report_error_() 216 offsets[output_size] == index_size, in fbgemm_spmdm_report_error_() 228 const at::Tensor& offsets, in embedding_bag_nbit_impl() argument 235 TORCH_CHECK(offsets.dim() == 1); in embedding_bag_nbit_impl() 237 auto offsets_data = offsets.data_ptr<OffsetType>(); in embedding_bag_nbit_impl() 261 const int64_t M = offsets.sizes()[0]; in embedding_bag_nbit_impl() [all …]
|
/aosp_15_r20/external/pytorch/caffe2/perfkernels/ |
H A D | embedding_lookup_idx_avx2.cc | 21 const int* offsets, in EmbeddingLookupIdx_int32_t_float_float__avx2_fma() argument 50 if (dataInd != offsets[rangeIndex] - offsets[0]) { in EmbeddingLookupIdx_int32_t_float_float__avx2_fma() 53 int64_t end_offset = offsets[rangeIndex + 1]; in EmbeddingLookupIdx_int32_t_float_float__avx2_fma() 54 int64_t length = end_offset - offsets[rangeIndex]; in EmbeddingLookupIdx_int32_t_float_float__avx2_fma() 55 for (int64_t start = dataInd; dataInd < end_offset - offsets[0]; in EmbeddingLookupIdx_int32_t_float_float__avx2_fma() 167 if (dataInd != offsets[rangeIndex] - offsets[0]) { in EmbeddingLookupIdx_int32_t_float_float__avx2_fma() 170 int64_t end_offset = offsets[rangeIndex + 1]; in EmbeddingLookupIdx_int32_t_float_float__avx2_fma() 171 int64_t length = end_offset - offsets[rangeIndex]; in EmbeddingLookupIdx_int32_t_float_float__avx2_fma() 172 for (int64_t start = dataInd; dataInd < end_offset - offsets[0]; in EmbeddingLookupIdx_int32_t_float_float__avx2_fma() 244 if (dataInd != offsets[rangeIndex] - offsets[0]) { in EmbeddingLookupIdx_int32_t_float_float__avx2_fma() [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cuda/ |
H A D | EmbeddingBag.cu | 90 const PackedTensorAccessor32<index_t, 1, RestrictPtrTraits> offsets, in embedding_bag_nbits_rowwise_offsets_kernel() argument 113 int64_t indices_start = offsets[t * B + b]; in embedding_bag_nbits_rowwise_offsets_kernel() 116 indices_end = offsets[t * B + b + 1]; in embedding_bag_nbits_rowwise_offsets_kernel() 118 indices_end = (t * B + b + 1) < offsets.size(0) ? offsets[t * B + b + 1] in embedding_bag_nbits_rowwise_offsets_kernel() 193 const at::Tensor& offsets, in embedding_bag_byte_impl() argument 201 TORCH_CHECK(offsets.is_cuda()); in embedding_bag_byte_impl() 203 TORCH_CHECK(offsets.device() == weight.device()); in embedding_bag_byte_impl() 220 const int64_t M = offsets.sizes()[0]; in embedding_bag_byte_impl() 253 offsets.packed_accessor32<index_t, 1, RestrictPtrTraits>(), in embedding_bag_byte_impl() 280 c10::MaybeOwned<at::Tensor> offsets; in embedding_bag_byte_rowwise_offsets() local [all …]
|
/aosp_15_r20/out/soong/raw-aosp_shiba/59/ |
D | 59ef42869f9dd65e5e418a856dac3fab075a8226 | 99 …art/test/814-large-field-offsets/art-run-test-814-large-field-offsets/android_common/javac/art-run… 100 …art/test/814-large-field-offsets/art-run-test-814-large-field-offsets/android_common/javac/art-run… 127 …offsets/art-run-test-814-large-field-offsets/android_common/lint-srcs.list.rsp __SBOX_SANDBOX_DIR_… 130 …o: "out/soong/.intermediates/art/test/814-large-field-offsets/art-run-test-814-large-field-offsets… 134 …o: "out/soong/.intermediates/art/test/814-large-field-offsets/art-run-test-814-large-field-offsets… 138 …o: "out/soong/.intermediates/art/test/814-large-field-offsets/art-run-test-814-large-field-offsets… 142 …o: "out/soong/.intermediates/art/test/814-large-field-offsets/art-run-test-814-large-field-offsets… 146 …o: "out/soong/.intermediates/art/test/814-large-field-offsets/art-run-test-814-large-field-offsets… 150 …o: "out/soong/.intermediates/art/test/814-large-field-offsets/art-run-test-814-large-field-offsets… 154 …e: "out/soong/.intermediates/art/test/814-large-field-offsets/art-run-test-814-large-field-offsets… [all …]
|
/aosp_15_r20/external/libaom/av1/encoder/x86/ |
H A D | encodetxb_sse2.c | 22 const ptrdiff_t *const offsets, in load_levels_4x4x5_sse2() argument 26 level[2] = load_8bit_4x4_to_1_reg_sse2(src + offsets[0], stride); in load_levels_4x4x5_sse2() 27 level[3] = load_8bit_4x4_to_1_reg_sse2(src + offsets[1], stride); in load_levels_4x4x5_sse2() 28 level[4] = load_8bit_4x4_to_1_reg_sse2(src + offsets[2], stride); in load_levels_4x4x5_sse2() 33 const ptrdiff_t *const offsets, in load_levels_8x2x5_sse2() argument 37 level[2] = load_8bit_8x2_to_1_reg_sse2(src + offsets[0], stride); in load_levels_8x2x5_sse2() 38 level[3] = load_8bit_8x2_to_1_reg_sse2(src + offsets[1], stride); in load_levels_8x2x5_sse2() 39 level[4] = load_8bit_8x2_to_1_reg_sse2(src + offsets[2], stride); in load_levels_8x2x5_sse2() 44 const ptrdiff_t *const offsets, in load_levels_16x1x5_sse2() argument 48 level[2] = _mm_loadu_si128((__m128i *)(src + offsets[0])); in load_levels_16x1x5_sse2() [all …]
|
/aosp_15_r20/external/pytorch/torch/nested/_internal/ |
H A D | nested_tensor.py | 76 offsets, argument 85 assert offsets is not None 86 assert offsets.ndim == 1 88 assert values.device == offsets.device 90 # Query cache for the symint associated with offsets or lengths 92 ragged_source = offsets if lengths is None else lengths 95 B = offsets.shape[0] - 1 129 def __init__(self, values, offsets, *, lengths=None, **kwargs): argument 133 self._offsets = offsets 155 def offsets(self): member in NestedTensor [all …]
|
/aosp_15_r20/external/trace-cmd/lib/trace-cmd/ |
H A D | trace-timesync.c | 127 * tracecmd_tsync_get_offsets - Return the calculated time offsets 130 * @cpu: CPU for which to get the calculated offsets 131 * @count: Returns the number of calculated time offsets 132 * @ts: Array of size @count containing timestamps of callculated offsets 133 * @offsets: array of size @count, containing offsets for each timestamp 141 long long **offsets, long long **scalings, long long **frac) in tracecmd_tsync_get_offsets() argument 148 if (cpu >= tsync_context->cpu_count || !tsync_context->offsets) in tracecmd_tsync_get_offsets() 151 *count = tsync_context->offsets[cpu].sync_count; in tracecmd_tsync_get_offsets() 153 *ts = tsync_context->offsets[cpu].sync_ts; in tracecmd_tsync_get_offsets() 154 if (offsets) in tracecmd_tsync_get_offsets() [all …]
|
/aosp_15_r20/external/swiftshader/third_party/llvm-16.0/llvm/lib/Target/ARM/ |
H A D | MVEGatherScatterLowering.cpp | 86 // Decompose a ptr into Base and Offsets, potentially using a GEP to return a 87 // scalar base and vector offsets, or else fallback to using a base of 0 and 89 Value *decomposePtr(Value *Ptr, Value *&Offsets, int &Scale, 92 // Check for a getelementptr and deduce base and offsets from it, on success 93 // returning the base directly and the offsets indirectly using the Offsets 95 Value *decomposeGEP(Value *&Offsets, FixedVectorType *Ty, 108 // Create a gather from a base + vector of offsets 122 // Create a scatter to a base + vector of offsets 123 Instruction *tryCreateMaskedScatterOffset(IntrinsicInst *I, Value *Offsets, 134 // QI gathers and scatters can increment their offsets on their own if [all …]
|
/aosp_15_r20/external/pytorch/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/ |
H A D | embedding_bag.py | 101 offsets = kwargs.get("offsets") 114 offsets = offsets[:-1] 122 offsets, 138 offsets, 167 offsets = kwargs.get("offsets") 178 if offsets is not None and not isinstance(offsets, torch.Tensor): 179 raise TypeError("offsets need to be torch.Tensor") 203 if offsets is not None and len(input.size()) != 1: 205 if len(input.size()) == 1 and offsets is None: 206 raise ValueError("offsets is required for 1D input") [all …]
|
/aosp_15_r20/external/libaom/av1/encoder/arm/ |
H A D | encodetxb_neon.c | 218 const ptrdiff_t *const offsets, in load_levels_4x4x5() argument 222 level[2] = load_8bit_4x4_to_1_reg(&src[offsets[0]], stride); in load_levels_4x4x5() 223 level[3] = load_8bit_4x4_to_1_reg(&src[offsets[1]], stride); in load_levels_4x4x5() 224 level[4] = load_8bit_4x4_to_1_reg(&src[offsets[2]], stride); in load_levels_4x4x5() 228 const ptrdiff_t *const offsets, in load_levels_8x2x5() argument 232 level[2] = load_8bit_8x2_to_1_reg(&src[offsets[0]], stride); in load_levels_8x2x5() 233 level[3] = load_8bit_8x2_to_1_reg(&src[offsets[1]], stride); in load_levels_8x2x5() 234 level[4] = load_8bit_8x2_to_1_reg(&src[offsets[2]], stride); in load_levels_8x2x5() 239 const ptrdiff_t *const offsets, in load_levels_16x1x5() argument 243 level[2] = load_8bit_16x1_to_1_reg(&src[offsets[0]], stride); in load_levels_16x1x5() [all …]
|
/aosp_15_r20/external/OpenCL-CTS/test_conformance/basic/ |
H A D | test_global_work_offsets.cpp | 46 int check_results( size_t threads[], size_t offsets[], cl_int outputA[], cl_int outputB[], cl_int o… in check_results() argument 48 …ze_t offsettedSizes[ 3 ] = { threads[ 0 ] + offsets[ 0 ], threads[ 1 ] + offsets[ 1 ], threads[ 2 … in check_results() 74 if( ( x >= offsets[ 0 ] ) && ( y >= offsets[ 1 ] ) && ( z >= offsets[ 2 ] ) ) in check_results() 104 size_t diffs[3] = { ( offsets[ 0 ] > threads[ 0 ] ? 0 : threads[ 0 ] - offsets[ 0 ] ), in check_results() 105 ( offsets[ 1 ] > threads[ 1 ] ? 0 : threads[ 1 ] - offsets[ 1 ] ), in check_results() 106 ( offsets[ 2 ] > threads[ 2 ] ? 0 : threads[ 2 ] - offsets[ 2 ] ) }; in check_results() 125 size_t threads[] = {1,1,1}, localThreads[] = {1,1,1}, offsets[] = {0,0,0}; in test_global_work_offsets() local 160 // Randomize some offsets in test_global_work_offsets() 162 offsets[ j ] = random_in_range( 0, MAX_OFFSET, seed ); in test_global_work_offsets() 164 log_info( "\tTesting %ld,%ld,%ld (%ld,%ld,%ld) with offsets (%ld,%ld,%ld)...\n", in test_global_work_offsets() [all …]
|