xref: /freebsd/sys/compat/linuxkpi/common/include/linux/math64.h (revision cf16d65c2e3a7099319447f7ac464b9839af868b)
1 /*-
2  * Copyright (c) 2007 Cisco Systems, Inc.  All rights reserved.
3  * Copyright (c) 2014-2015 Mellanox Technologies, Ltd. All rights reserved.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #ifndef _LINUXKPI_LINUX_MATH64_H
29 #define	_LINUXKPI_LINUX_MATH64_H
30 
31 #include <sys/stdint.h>
32 #include <sys/systm.h>
33 
34 #define	do_div(n, base) ({			\
35 	uint32_t __base = (base);		\
36 	uint32_t __rem;				\
37 	__rem = ((uint64_t)(n)) % __base;	\
38 	(n) = ((uint64_t)(n)) / __base;		\
39 	__rem;					\
40 })
41 
42 static inline uint64_t
div64_u64_rem(uint64_t dividend,uint64_t divisor,uint64_t * remainder)43 div64_u64_rem(uint64_t dividend, uint64_t divisor, uint64_t *remainder)
44 {
45 
46 	*remainder = dividend % divisor;
47 	return (dividend / divisor);
48 }
49 
50 static inline int64_t
div64_s64(int64_t dividend,int64_t divisor)51 div64_s64(int64_t dividend, int64_t divisor)
52 {
53 
54 	return (dividend / divisor);
55 }
56 
57 static inline uint64_t
div64_u64(uint64_t dividend,uint64_t divisor)58 div64_u64(uint64_t dividend, uint64_t divisor)
59 {
60 
61 	return (dividend / divisor);
62 }
63 
64 #define	div64_ul(x, y)	div64_u64((x), (y))
65 
66 static inline uint64_t
div_u64_rem(uint64_t dividend,uint32_t divisor,uint32_t * remainder)67 div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder)
68 {
69 
70 	*remainder = dividend % divisor;
71 	return (dividend / divisor);
72 }
73 
74 static inline int64_t
div_s64(int64_t dividend,int32_t divisor)75 div_s64(int64_t dividend, int32_t divisor)
76 {
77 
78 	return (dividend / divisor);
79 }
80 
81 static inline uint64_t
div_u64(uint64_t dividend,uint32_t divisor)82 div_u64(uint64_t dividend, uint32_t divisor)
83 {
84 
85 	return (dividend / divisor);
86 }
87 
88 static inline uint64_t
mul_u32_u32(uint32_t a,uint32_t b)89 mul_u32_u32(uint32_t a, uint32_t b)
90 {
91 
92 	return ((uint64_t)a * b);
93 }
94 
95 static inline uint64_t
div64_u64_round_up(uint64_t dividend,uint64_t divisor)96 div64_u64_round_up(uint64_t dividend, uint64_t divisor)
97 {
98 	return ((dividend + divisor - 1) / divisor);
99 }
100 
101 #define	DIV64_U64_ROUND_UP(...) \
102 	div64_u64_round_up(__VA_ARGS__)
103 
104 static inline uint64_t
mul_u64_u32_div(uint64_t x,uint32_t y,uint32_t div)105 mul_u64_u32_div(uint64_t x, uint32_t y, uint32_t div)
106 {
107 	const uint64_t rem = x % div;
108 
109 	return ((x / div) * y + (rem * y) / div);
110 }
111 
112 static inline uint64_t
mul_u64_u64_div_u64(uint64_t x,uint64_t y,uint64_t z)113 mul_u64_u64_div_u64(uint64_t x, uint64_t y, uint64_t z)
114 {
115 	uint64_t res, rem;
116 	uint64_t x1, y1, y1z;
117 
118 	res = rem = 0;
119 	x1 = x;
120 	y1z = y / z;
121 	y1 = y - y1z * z;
122 
123 	/*
124 	 * INVARIANT: x * y = res * z + rem + (y1 + y1z * z) * x1
125 	 * INVARIANT: y1 < z
126 	 * INVARIANT: rem < z
127 	 */
128 	while (x1 > 0) {
129 		/* Handle low bit. */
130 		if (x1 & 1) {
131 			x1 &= ~1;
132 			res += y1z;
133 			rem += y1;
134 			if ((rem < y1) || (rem >= z)) {
135 				res += 1;
136 				rem -= z;
137 			}
138 		}
139 
140 		/* Shift x1 right and (y1 + y1z * z) left */
141 		x1 >>= 1;
142 		if ((y1 * 2 < y1) || (y1 * 2 >= z)) {
143 			y1z = y1z * 2 + 1;
144 			y1 = y1 * 2 - z;
145 		} else {
146 			y1z *= 2;
147 			y1 *= 2;
148 		}
149 	}
150 
151 	KASSERT(res * z + rem == x * y, ("%s: res %ju * z %ju + rem %ju != "
152 	    "x %ju * y %ju", __func__, (uintmax_t)res, (uintmax_t)z,
153 	    (uintmax_t)rem, (uintmax_t)x, (uintmax_t)y));
154 	KASSERT(rem < z, ("%s: rem %ju >= z %ju\n", __func__,
155 	    (uintmax_t)rem, (uintmax_t)z));
156 
157 	return (res);
158 }
159 
160 static inline uint64_t
mul_u64_u32_shr(uint64_t x,uint32_t y,unsigned int shift)161 mul_u64_u32_shr(uint64_t x, uint32_t y, unsigned int shift)
162 {
163 	uint32_t hi, lo;
164 	hi = x >> 32;
165 	lo = x & 0xffffffff;
166 
167 	return (mul_u32_u32(lo, y) >> shift) +
168 		(mul_u32_u32(hi, y) << (32 - shift));
169 }
170 
171 #endif /* _LINUXKPI_LINUX_MATH64_H */
172