Lines Matching +full:bit +full:- +full:shift
1 // SPDX-License-Identifier: GPL-2.0
5 * Based on former do_div() implementation from asm-parisc/div64.h:
6 * Copyright (C) 1999 Hewlett-Packard Co
7 * Copyright (C) 1999 David Mosberger-Tang <[email protected]>
10 * Generic C version of 64bit/32bit division and modulo, with
11 * 64bit result and 32bit remainder.
16 * for some CPUs. __div64_32() can be overridden by linking arch-specific
28 /* Not needed on 64bit architectures */
39 /* Reduce the thing a bit first */ in __div64_32()
44 rem -= (uint64_t) (high*base) << 32; in __div64_32()
54 rem -= b; in __div64_32()
73 quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder); in div_s64_rem()
74 *remainder = -*remainder; in div_s64_rem()
76 quotient = -quotient; in div_s64_rem()
80 quotient = -quotient; in div_s64_rem()
88 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
89 * @dividend: 64bit dividend
90 * @divisor: 64bit divisor
91 * @remainder: 64bit remainder
95 * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
113 quot--; in div64_u64_rem()
115 *remainder = dividend - quot * divisor; in div64_u64_rem()
118 *remainder -= divisor; in div64_u64_rem()
128 * div64_u64 - unsigned 64bit divide with 64bit divisor
129 * @dividend: 64bit dividend
130 * @divisor: 64bit divisor
151 quot--; in div64_u64()
152 if ((dividend - quot * divisor) >= divisor) in div64_u64()
169 return (quot ^ t) - t; in div64_s64()
217 #pragma GCC diagnostic ignored "-Wdiv-by-zero" in mul_u64_u64_div_u64()
222 int shift = __builtin_ctzll(c); in mul_u64_u64_div_u64() local
225 if ((n_hi >> shift) == 0) { in mul_u64_u64_div_u64()
226 u64 n = shift ? (n_lo >> shift) | (n_hi << (64 - shift)) : n_lo; in mul_u64_u64_div_u64()
228 return div64_u64(n, c >> shift); in mul_u64_u64_div_u64()
231 * res = div64_u64_rem(n, c >> shift, &rem); in mul_u64_u64_div_u64()
232 * rem = (rem << shift) + (n_lo - (n << shift)); in mul_u64_u64_div_u64()
238 return -1; in mul_u64_u64_div_u64()
243 shift = __builtin_clzll(c); in mul_u64_u64_div_u64()
244 c <<= shift; in mul_u64_u64_div_u64()
246 int p = 64 + shift; in mul_u64_u64_div_u64()
252 shift = carry ? 1 : __builtin_clzll(n_hi); in mul_u64_u64_div_u64()
253 if (p < shift) in mul_u64_u64_div_u64()
255 p -= shift; in mul_u64_u64_div_u64()
256 n_hi <<= shift; in mul_u64_u64_div_u64()
257 n_hi |= n_lo >> (64 - shift); in mul_u64_u64_div_u64()
258 n_lo <<= shift; in mul_u64_u64_div_u64()
260 n_hi -= c; in mul_u64_u64_div_u64()