1 /*-
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2014-2015 Mellanox Technologies, Ltd. All rights reserved.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #ifndef _LINUXKPI_LINUX_MATH64_H
29 #define _LINUXKPI_LINUX_MATH64_H
30
31 #include <sys/stdint.h>
32 #include <sys/systm.h>
33
34 #define do_div(n, base) ({ \
35 uint32_t __base = (base); \
36 uint32_t __rem; \
37 __rem = ((uint64_t)(n)) % __base; \
38 (n) = ((uint64_t)(n)) / __base; \
39 __rem; \
40 })
41
42 static inline uint64_t
div64_u64_rem(uint64_t dividend,uint64_t divisor,uint64_t * remainder)43 div64_u64_rem(uint64_t dividend, uint64_t divisor, uint64_t *remainder)
44 {
45
46 *remainder = dividend % divisor;
47 return (dividend / divisor);
48 }
49
50 static inline int64_t
div64_s64(int64_t dividend,int64_t divisor)51 div64_s64(int64_t dividend, int64_t divisor)
52 {
53
54 return (dividend / divisor);
55 }
56
57 static inline uint64_t
div64_u64(uint64_t dividend,uint64_t divisor)58 div64_u64(uint64_t dividend, uint64_t divisor)
59 {
60
61 return (dividend / divisor);
62 }
63
64 #define div64_ul(x, y) div64_u64((x), (y))
65
66 static inline uint64_t
div_u64_rem(uint64_t dividend,uint32_t divisor,uint32_t * remainder)67 div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder)
68 {
69
70 *remainder = dividend % divisor;
71 return (dividend / divisor);
72 }
73
74 static inline int64_t
div_s64(int64_t dividend,int32_t divisor)75 div_s64(int64_t dividend, int32_t divisor)
76 {
77
78 return (dividend / divisor);
79 }
80
81 static inline uint64_t
div_u64(uint64_t dividend,uint32_t divisor)82 div_u64(uint64_t dividend, uint32_t divisor)
83 {
84
85 return (dividend / divisor);
86 }
87
88 static inline uint64_t
mul_u32_u32(uint32_t a,uint32_t b)89 mul_u32_u32(uint32_t a, uint32_t b)
90 {
91
92 return ((uint64_t)a * b);
93 }
94
95 static inline uint64_t
div64_u64_round_up(uint64_t dividend,uint64_t divisor)96 div64_u64_round_up(uint64_t dividend, uint64_t divisor)
97 {
98 return ((dividend + divisor - 1) / divisor);
99 }
100
101 static inline uint64_t
roundup_u64(uint64_t x1,uint32_t x2)102 roundup_u64(uint64_t x1, uint32_t x2)
103 {
104 return (div_u64(x1 + x2 - 1, x2) * x2);
105 }
106
107 #define DIV64_U64_ROUND_UP(...) \
108 div64_u64_round_up(__VA_ARGS__)
109
110 static inline uint64_t
mul_u64_u32_div(uint64_t x,uint32_t y,uint32_t div)111 mul_u64_u32_div(uint64_t x, uint32_t y, uint32_t div)
112 {
113 const uint64_t rem = x % div;
114
115 return ((x / div) * y + (rem * y) / div);
116 }
117
118 static inline uint64_t
mul_u64_u64_div_u64(uint64_t x,uint64_t y,uint64_t z)119 mul_u64_u64_div_u64(uint64_t x, uint64_t y, uint64_t z)
120 {
121 uint64_t res, rem;
122 uint64_t x1, y1, y1z;
123
124 res = rem = 0;
125 x1 = x;
126 y1z = y / z;
127 y1 = y - y1z * z;
128
129 /*
130 * INVARIANT: x * y = res * z + rem + (y1 + y1z * z) * x1
131 * INVARIANT: y1 < z
132 * INVARIANT: rem < z
133 */
134 while (x1 > 0) {
135 /* Handle low bit. */
136 if (x1 & 1) {
137 x1 &= ~1;
138 res += y1z;
139 rem += y1;
140 if ((rem < y1) || (rem >= z)) {
141 res += 1;
142 rem -= z;
143 }
144 }
145
146 /* Shift x1 right and (y1 + y1z * z) left */
147 x1 >>= 1;
148 if ((y1 * 2 < y1) || (y1 * 2 >= z)) {
149 y1z = y1z * 2 + 1;
150 y1 = y1 * 2 - z;
151 } else {
152 y1z *= 2;
153 y1 *= 2;
154 }
155 }
156
157 KASSERT(res * z + rem == x * y, ("%s: res %ju * z %ju + rem %ju != "
158 "x %ju * y %ju", __func__, (uintmax_t)res, (uintmax_t)z,
159 (uintmax_t)rem, (uintmax_t)x, (uintmax_t)y));
160 KASSERT(rem < z, ("%s: rem %ju >= z %ju\n", __func__,
161 (uintmax_t)rem, (uintmax_t)z));
162
163 return (res);
164 }
165
166 static inline uint64_t
mul_u64_u32_shr(uint64_t x,uint32_t y,unsigned int shift)167 mul_u64_u32_shr(uint64_t x, uint32_t y, unsigned int shift)
168 {
169 uint32_t hi, lo;
170 hi = x >> 32;
171 lo = x & 0xffffffff;
172
173 return (mul_u32_u32(lo, y) >> shift) +
174 (mul_u32_u32(hi, y) << (32 - shift));
175 }
176
177 #endif /* _LINUXKPI_LINUX_MATH64_H */
178