1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com> 4 * 5 * Based on former do_div() implementation from asm-parisc/div64.h: 6 * Copyright (C) 1999 Hewlett-Packard Co 7 * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com> 8 * 9 * 10 * Generic C version of 64bit/32bit division and modulo, with 11 * 64bit result and 32bit remainder. 12 * 13 * The fast case for (n>>32 == 0) is handled inline by do_div(). 14 * 15 * Code generated for this function might be very inefficient 16 * for some CPUs. __div64_32() can be overridden by linking arch-specific 17 * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S 18 * or by defining a preprocessor macro in arch/include/asm/div64.h. 19 */ 20 21 #include <linux/bitops.h> 22 #include <linux/export.h> 23 #include <linux/math.h> 24 #include <linux/math64.h> 25 #include <linux/minmax.h> 26 #include <linux/log2.h> 27 28 /* Not needed on 64bit architectures */ 29 #if BITS_PER_LONG == 32 30 31 #ifndef __div64_32 32 uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) 33 { 34 uint64_t rem = *n; 35 uint64_t b = base; 36 uint64_t res, d = 1; 37 uint32_t high = rem >> 32; 38 39 /* Reduce the thing a bit first */ 40 res = 0; 41 if (high >= base) { 42 high /= base; 43 res = (uint64_t) high << 32; 44 rem -= (uint64_t) (high*base) << 32; 45 } 46 47 while ((int64_t)b > 0 && b < rem) { 48 b = b+b; 49 d = d+d; 50 } 51 52 do { 53 if (rem >= b) { 54 rem -= b; 55 res += d; 56 } 57 b >>= 1; 58 d >>= 1; 59 } while (d); 60 61 *n = res; 62 return rem; 63 } 64 EXPORT_SYMBOL(__div64_32); 65 #endif 66 67 #ifndef div_s64_rem 68 s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) 69 { 70 u64 quotient; 71 72 if (dividend < 0) { 73 quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder); 74 *remainder = -*remainder; 75 if (divisor > 0) 76 quotient = -quotient; 77 } else { 78 quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder); 79 if (divisor < 0) 80 quotient = -quotient; 81 } 82 return quotient; 83 } 84 EXPORT_SYMBOL(div_s64_rem); 85 #endif 86 87 /* 88 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder 89 * @dividend: 64bit dividend 90 * @divisor: 64bit divisor 91 * @remainder: 64bit remainder 92 * 93 * This implementation is a comparable to algorithm used by div64_u64. 94 * But this operation, which includes math for calculating the remainder, 95 * is kept distinct to avoid slowing down the div64_u64 operation on 32bit 96 * systems. 97 */ 98 #ifndef div64_u64_rem 99 u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) 100 { 101 u32 high = divisor >> 32; 102 u64 quot; 103 104 if (high == 0) { 105 u32 rem32; 106 quot = div_u64_rem(dividend, divisor, &rem32); 107 *remainder = rem32; 108 } else { 109 int n = fls(high); 110 quot = div_u64(dividend >> n, divisor >> n); 111 112 if (quot != 0) 113 quot--; 114 115 *remainder = dividend - quot * divisor; 116 if (*remainder >= divisor) { 117 quot++; 118 *remainder -= divisor; 119 } 120 } 121 122 return quot; 123 } 124 EXPORT_SYMBOL(div64_u64_rem); 125 #endif 126 127 /* 128 * div64_u64 - unsigned 64bit divide with 64bit divisor 129 * @dividend: 64bit dividend 130 * @divisor: 64bit divisor 131 * 132 * This implementation is a modified version of the algorithm proposed 133 * by the book 'Hacker's Delight'. The original source and full proof 134 * can be found here and is available for use without restriction. 135 * 136 * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt' 137 */ 138 #ifndef div64_u64 139 u64 div64_u64(u64 dividend, u64 divisor) 140 { 141 u32 high = divisor >> 32; 142 u64 quot; 143 144 if (high == 0) { 145 quot = div_u64(dividend, divisor); 146 } else { 147 int n = fls(high); 148 quot = div_u64(dividend >> n, divisor >> n); 149 150 if (quot != 0) 151 quot--; 152 if ((dividend - quot * divisor) >= divisor) 153 quot++; 154 } 155 156 return quot; 157 } 158 EXPORT_SYMBOL(div64_u64); 159 #endif 160 161 #ifndef div64_s64 162 s64 div64_s64(s64 dividend, s64 divisor) 163 { 164 s64 quot, t; 165 166 quot = div64_u64(abs(dividend), abs(divisor)); 167 t = (dividend ^ divisor) >> 63; 168 169 return (quot ^ t) - t; 170 } 171 EXPORT_SYMBOL(div64_s64); 172 #endif 173 174 #endif /* BITS_PER_LONG == 32 */ 175 176 /* 177 * Iterative div/mod for use when dividend is not expected to be much 178 * bigger than divisor. 179 */ 180 #ifndef iter_div_u64_rem 181 u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) 182 { 183 return __iter_div_u64_rem(dividend, divisor, remainder); 184 } 185 EXPORT_SYMBOL(iter_div_u64_rem); 186 #endif 187 188 #if !defined(mul_u64_add_u64_div_u64) || defined(test_mul_u64_add_u64_div_u64) 189 u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d) 190 { 191 #if defined(__SIZEOF_INT128__) && !defined(test_mul_u64_add_u64_div_u64) 192 193 /* native 64x64=128 bits multiplication */ 194 u128 prod = (u128)a * b + c; 195 u64 n_lo = prod, n_hi = prod >> 64; 196 197 #else 198 199 /* perform a 64x64=128 bits multiplication manually */ 200 u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32; 201 u64 x, y, z; 202 203 /* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */ 204 x = (u64)a_lo * b_lo + (u32)c; 205 y = (u64)a_lo * b_hi + (u32)(c >> 32); 206 y += (u32)(x >> 32); 207 z = (u64)a_hi * b_hi + (u32)(y >> 32); 208 y = (u64)a_hi * b_lo + (u32)y; 209 z += (u32)(y >> 32); 210 x = (y << 32) + (u32)x; 211 212 u64 n_lo = x, n_hi = z; 213 214 #endif 215 216 if (!n_hi) 217 return div64_u64(n_lo, d); 218 219 if (unlikely(n_hi >= d)) { 220 /* trigger runtime exception if divisor is zero */ 221 if (d == 0) { 222 unsigned long zero = 0; 223 224 OPTIMIZER_HIDE_VAR(zero); 225 return ~0UL/zero; 226 } 227 /* overflow: result is unrepresentable in a u64 */ 228 return ~0ULL; 229 } 230 231 int shift = __builtin_ctzll(d); 232 233 /* try reducing the fraction in case the dividend becomes <= 64 bits */ 234 if ((n_hi >> shift) == 0) { 235 u64 n = shift ? (n_lo >> shift) | (n_hi << (64 - shift)) : n_lo; 236 237 return div64_u64(n, d >> shift); 238 /* 239 * The remainder value if needed would be: 240 * res = div64_u64_rem(n, d >> shift, &rem); 241 * rem = (rem << shift) + (n_lo - (n << shift)); 242 */ 243 } 244 245 /* Do the full 128 by 64 bits division */ 246 247 shift = __builtin_clzll(d); 248 d <<= shift; 249 250 int p = 64 + shift; 251 u64 res = 0; 252 bool carry; 253 254 do { 255 carry = n_hi >> 63; 256 shift = carry ? 1 : __builtin_clzll(n_hi); 257 if (p < shift) 258 break; 259 p -= shift; 260 n_hi <<= shift; 261 n_hi |= n_lo >> (64 - shift); 262 n_lo <<= shift; 263 if (carry || (n_hi >= d)) { 264 n_hi -= d; 265 res |= 1ULL << p; 266 } 267 } while (n_hi); 268 /* The remainder value if needed would be n_hi << p */ 269 270 return res; 271 } 272 #if !defined(test_mul_u64_add_u64_div_u64) 273 EXPORT_SYMBOL(mul_u64_add_u64_div_u64); 274 #endif 275 #endif 276