/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/ |
H A D | ScatterGatherKernel.cpp | 218 for (const auto nelem C10_UNUSED : c10::irange(n)) { in operator ()() local 235 for (const auto nelem C10_UNUSED : c10::irange(n)) { in operator ()() local 309 for (const auto nelem C10_UNUSED : c10::irange(n)) { in operator ()() local 330 for (const auto nelem C10_UNUSED : c10::irange(n)) { in operator ()() local 405 for (const auto nelem C10_UNUSED : c10::irange(n)) { in operator ()() local 426 for (const auto nelem C10_UNUSED : c10::irange(n)) { in operator ()() local 500 for (const auto nelem C10_UNUSED : c10::irange(n)) { in operator ()() local 521 for (const auto nelem C10_UNUSED : c10::irange(n)) { in operator ()() local 596 for (const auto nelem C10_UNUSED : c10::irange(n)) { in operator ()() local 617 for (const auto nelem C10_UNUSED : c10::irange(n)) { in operator ()() local
|
H A D | IndexKernel.cpp | 81 for (const auto elem C10_UNUSED : c10::irange(n)) { in cpu_take_put_kernel() local 206 for (const auto elem C10_UNUSED : c10::irange(n)) { in index_fill_kernel() local 232 for (const auto elem C10_UNUSED: c10::irange(n)) { in index_fill_kernel() local 265 for (const auto elem C10_UNUSED : c10::irange(n)) { in index_copy_kernel() local 288 for (const auto elem C10_UNUSED : c10::irange(n)) { in index_copy_kernel() local 477 for (const auto j C10_UNUSED : c10::irange(size1)) { in cpu_hflip_vec() local 546 for (const auto j C10_UNUSED : c10::irange(size1)) { in cpu_vflip_memcpy() local
|
H A D | CopyKernel.cpp | 85 for (const auto it C10_UNUSED : c10::irange(size1)) { in reduced_float_copy_kernel() local 154 for (const auto it C10_UNUSED : c10::irange(size1)) { in reduced_float_copy_kernel() local
|
H A D | TensorCompareKernel.cpp | 86 for (const auto i C10_UNUSED : c10::irange(n)) { in compare_base_kernel() local 256 for (const auto k C10_UNUSED : c10::irange(n)) { in mode_kernel_impl() local
|
H A D | FunctionOfAMatrixUtilsKernel.cpp | 33 for (const auto elem C10_UNUSED : c10::irange(n)) { in _compute_linear_combination_cpu_kernel() local
|
H A D | UnfoldBackwardKernel.cpp | 79 for (const auto elem C10_UNUSED : c10::irange(nelems)) { in _unfold_backward_internal_kernel() local
|
H A D | Reduce.h | 73 for (const auto j C10_UNUSED : c10::irange(n)) { in UNARY_OUTER_LOOP() local
|
H A D | SortingKernel.cpp | 56 for (const auto i C10_UNUSED : c10::irange(n)) { in _dim_apply() local
|
H A D | Unfold2d.cpp | 356 for (const auto k C10_UNUSED: c10::irange(start, end)) { in unfolded2d_copy_channels_last() local
|
H A D | ReduceOpsKernel.cpp | 65 for (const auto i C10_UNUSED : c10::irange(n)) { in cpu_cum_base_kernel() local
|
H A D | DistanceOpsKernel.cpp | 398 for (const auto l C10_UNUSED : c10::irange(d)) { in backward_down_column_cdist() local
|
H A D | UpSampleKernel.cpp | 736 for (const auto j C10_UNUSED : c10::irange(interp_size)) { in init_indices_weights() local 1050 for (const auto j C10_UNUSED : c10::irange(interp_size)) { in init_indices_weights() local
|
H A D | UpSampleKernelAVXAntialias.h | 105 for (const auto i C10_UNUSED : c10::irange(num_pixels)) { in pack_rgb() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | layer_norm.cpp | 53 for (const auto idx C10_UNUSED : c10::irange(axis, input.dim())) { in layer_norm_with_mean_rstd_out() local 258 for (const auto idx C10_UNUSED : c10::irange(axis, input.dim())) { in math_native_layer_norm() local
|
H A D | LinearAlgebraUtils.h | 244 for (const auto elem C10_UNUSED : c10::irange(nelems)) { in batch_iterator_with_broadcasting() local
|
/aosp_15_r20/external/pytorch/c10/macros/ |
H A D | Macros.h | 154 #define C10_UNUSED __pragma(warning(suppress : 4100 4101)) macro 156 #define C10_UNUSED __attribute__((__unused__)) macro
|
/aosp_15_r20/external/pytorch/c10/cuda/ |
H A D | CUDAException.cpp | 26 auto error_unused C10_UNUSED = cudaGetLastError(); in c10_cuda_check_implementation() local
|
H A D | CUDAFunctions.cpp | 25 cudaError_t last_err C10_UNUSED = cudaGetLastError(); in device_count_impl() local
|
/aosp_15_r20/external/pytorch/torch/csrc/distributed/rpc/ |
H A D | tensorpipe_agent.cpp | 266 for (const auto laneIdx C10_UNUSED : c10::irange(kNumUvThreads)) { in makeMultiplexedUvChannel() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/kernels/ |
H A D | QuantizedOpKernels.cpp | 3904 for (const auto b C10_UNUSED : c10::irange(batches)) { local 3905 for (const auto e C10_UNUSED : c10::irange(elements_per_channel)) { local 3935 for (const auto b C10_UNUSED : c10::irange(batches)) { local 3936 for (const auto c C10_UNUSED : c10::irange(channels)) { local
|