1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "eytzinger.h"
4
5 /**
6 * is_aligned - is this pointer & size okay for word-wide copying?
7 * @base: pointer to data
8 * @size: size of each element
9 * @align: required alignment (typically 4 or 8)
10 *
11 * Returns true if elements can be copied using word loads and stores.
12 * The size must be a multiple of the alignment, and the base address must
13 * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
14 *
15 * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
16 * to "if ((a | b) & mask)", so we do that by hand.
17 */
18 __attribute_const__ __always_inline
is_aligned(const void * base,size_t size,unsigned char align)19 static bool is_aligned(const void *base, size_t size, unsigned char align)
20 {
21 unsigned char lsbits = (unsigned char)size;
22
23 (void)base;
24 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
25 lsbits |= (unsigned char)(uintptr_t)base;
26 #endif
27 return (lsbits & (align - 1)) == 0;
28 }
29
30 /**
31 * swap_words_32 - swap two elements in 32-bit chunks
32 * @a: pointer to the first element to swap
33 * @b: pointer to the second element to swap
34 * @n: element size (must be a multiple of 4)
35 *
36 * Exchange the two objects in memory. This exploits base+index addressing,
37 * which basically all CPUs have, to minimize loop overhead computations.
38 *
39 * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
40 * bottom of the loop, even though the zero flag is still valid from the
41 * subtract (since the intervening mov instructions don't alter the flags).
42 * Gcc 8.1.0 doesn't have that problem.
43 */
swap_words_32(void * a,void * b,size_t n)44 static void swap_words_32(void *a, void *b, size_t n)
45 {
46 do {
47 u32 t = *(u32 *)(a + (n -= 4));
48 *(u32 *)(a + n) = *(u32 *)(b + n);
49 *(u32 *)(b + n) = t;
50 } while (n);
51 }
52
53 /**
54 * swap_words_64 - swap two elements in 64-bit chunks
55 * @a: pointer to the first element to swap
56 * @b: pointer to the second element to swap
57 * @n: element size (must be a multiple of 8)
58 *
59 * Exchange the two objects in memory. This exploits base+index
60 * addressing, which basically all CPUs have, to minimize loop overhead
61 * computations.
62 *
63 * We'd like to use 64-bit loads if possible. If they're not, emulating
64 * one requires base+index+4 addressing which x86 has but most other
65 * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads,
66 * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
67 * x32 ABI). Are there any cases the kernel needs to worry about?
68 */
swap_words_64(void * a,void * b,size_t n)69 static void swap_words_64(void *a, void *b, size_t n)
70 {
71 do {
72 #ifdef CONFIG_64BIT
73 u64 t = *(u64 *)(a + (n -= 8));
74 *(u64 *)(a + n) = *(u64 *)(b + n);
75 *(u64 *)(b + n) = t;
76 #else
77 /* Use two 32-bit transfers to avoid base+index+4 addressing */
78 u32 t = *(u32 *)(a + (n -= 4));
79 *(u32 *)(a + n) = *(u32 *)(b + n);
80 *(u32 *)(b + n) = t;
81
82 t = *(u32 *)(a + (n -= 4));
83 *(u32 *)(a + n) = *(u32 *)(b + n);
84 *(u32 *)(b + n) = t;
85 #endif
86 } while (n);
87 }
88
89 /**
90 * swap_bytes - swap two elements a byte at a time
91 * @a: pointer to the first element to swap
92 * @b: pointer to the second element to swap
93 * @n: element size
94 *
95 * This is the fallback if alignment doesn't allow using larger chunks.
96 */
swap_bytes(void * a,void * b,size_t n)97 static void swap_bytes(void *a, void *b, size_t n)
98 {
99 do {
100 char t = ((char *)a)[--n];
101 ((char *)a)[n] = ((char *)b)[n];
102 ((char *)b)[n] = t;
103 } while (n);
104 }
105
106 /*
107 * The values are arbitrary as long as they can't be confused with
108 * a pointer, but small integers make for the smallest compare
109 * instructions.
110 */
111 #define SWAP_WORDS_64 (swap_r_func_t)0
112 #define SWAP_WORDS_32 (swap_r_func_t)1
113 #define SWAP_BYTES (swap_r_func_t)2
114 #define SWAP_WRAPPER (swap_r_func_t)3
115
116 struct wrapper {
117 cmp_func_t cmp;
118 swap_func_t swap_func;
119 };
120
121 /*
122 * The function pointer is last to make tail calls most efficient if the
123 * compiler decides not to inline this function.
124 */
do_swap(void * a,void * b,size_t size,swap_r_func_t swap_func,const void * priv)125 static void do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, const void *priv)
126 {
127 if (swap_func == SWAP_WRAPPER) {
128 ((const struct wrapper *)priv)->swap_func(a, b, (int)size);
129 return;
130 }
131
132 if (swap_func == SWAP_WORDS_64)
133 swap_words_64(a, b, size);
134 else if (swap_func == SWAP_WORDS_32)
135 swap_words_32(a, b, size);
136 else if (swap_func == SWAP_BYTES)
137 swap_bytes(a, b, size);
138 else
139 swap_func(a, b, (int)size, priv);
140 }
141
142 #define _CMP_WRAPPER ((cmp_r_func_t)0L)
143
do_cmp(const void * a,const void * b,cmp_r_func_t cmp,const void * priv)144 static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
145 {
146 if (cmp == _CMP_WRAPPER)
147 return ((const struct wrapper *)priv)->cmp(a, b);
148 return cmp(a, b, priv);
149 }
150
eytzinger1_do_cmp(void * base1,size_t n,size_t size,cmp_r_func_t cmp_func,const void * priv,size_t l,size_t r)151 static inline int eytzinger1_do_cmp(void *base1, size_t n, size_t size,
152 cmp_r_func_t cmp_func, const void *priv,
153 size_t l, size_t r)
154 {
155 return do_cmp(base1 + inorder_to_eytzinger1(l, n) * size,
156 base1 + inorder_to_eytzinger1(r, n) * size,
157 cmp_func, priv);
158 }
159
eytzinger1_do_swap(void * base1,size_t n,size_t size,swap_r_func_t swap_func,const void * priv,size_t l,size_t r)160 static inline void eytzinger1_do_swap(void *base1, size_t n, size_t size,
161 swap_r_func_t swap_func, const void *priv,
162 size_t l, size_t r)
163 {
164 do_swap(base1 + inorder_to_eytzinger1(l, n) * size,
165 base1 + inorder_to_eytzinger1(r, n) * size,
166 size, swap_func, priv);
167 }
168
eytzinger1_sort_r(void * base1,size_t n,size_t size,cmp_r_func_t cmp_func,swap_r_func_t swap_func,const void * priv)169 static void eytzinger1_sort_r(void *base1, size_t n, size_t size,
170 cmp_r_func_t cmp_func,
171 swap_r_func_t swap_func,
172 const void *priv)
173 {
174 unsigned i, j, k;
175
176 /* called from 'sort' without swap function, let's pick the default */
177 if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap_func)
178 swap_func = NULL;
179
180 if (!swap_func) {
181 if (is_aligned(base1, size, 8))
182 swap_func = SWAP_WORDS_64;
183 else if (is_aligned(base1, size, 4))
184 swap_func = SWAP_WORDS_32;
185 else
186 swap_func = SWAP_BYTES;
187 }
188
189 /* heapify */
190 for (i = n / 2; i >= 1; --i) {
191 /* Find the sift-down path all the way to the leaves. */
192 for (j = i; k = j * 2, k < n;)
193 j = eytzinger1_do_cmp(base1, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1;
194
195 /* Special case for the last leaf with no sibling. */
196 if (j * 2 == n)
197 j *= 2;
198
199 /* Backtrack to the correct location. */
200 while (j != i && eytzinger1_do_cmp(base1, n, size, cmp_func, priv, i, j) >= 0)
201 j /= 2;
202
203 /* Shift the element into its correct place. */
204 for (k = j; j != i;) {
205 j /= 2;
206 eytzinger1_do_swap(base1, n, size, swap_func, priv, j, k);
207 }
208 }
209
210 /* sort */
211 for (i = n; i > 1; --i) {
212 eytzinger1_do_swap(base1, n, size, swap_func, priv, 1, i);
213
214 /* Find the sift-down path all the way to the leaves. */
215 for (j = 1; k = j * 2, k + 1 < i;)
216 j = eytzinger1_do_cmp(base1, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1;
217
218 /* Special case for the last leaf with no sibling. */
219 if (j * 2 + 1 == i)
220 j *= 2;
221
222 /* Backtrack to the correct location. */
223 while (j >= 1 && eytzinger1_do_cmp(base1, n, size, cmp_func, priv, 1, j) >= 0)
224 j /= 2;
225
226 /* Shift the element into its correct place. */
227 for (k = j; j > 1;) {
228 j /= 2;
229 eytzinger1_do_swap(base1, n, size, swap_func, priv, j, k);
230 }
231 }
232 }
233
eytzinger0_sort_r(void * base,size_t n,size_t size,cmp_r_func_t cmp_func,swap_r_func_t swap_func,const void * priv)234 void eytzinger0_sort_r(void *base, size_t n, size_t size,
235 cmp_r_func_t cmp_func,
236 swap_r_func_t swap_func,
237 const void *priv)
238 {
239 void *base1 = base - size;
240
241 return eytzinger1_sort_r(base1, n, size, cmp_func, swap_func, priv);
242 }
243
eytzinger0_sort(void * base,size_t n,size_t size,cmp_func_t cmp_func,swap_func_t swap_func)244 void eytzinger0_sort(void *base, size_t n, size_t size,
245 cmp_func_t cmp_func,
246 swap_func_t swap_func)
247 {
248 struct wrapper w = {
249 .cmp = cmp_func,
250 .swap_func = swap_func,
251 };
252
253 return eytzinger0_sort_r(base, n, size, _CMP_WRAPPER, SWAP_WRAPPER, &w);
254 }
255
256 #if 0
257 #include <linux/slab.h>
258 #include <linux/random.h>
259 #include <linux/ktime.h>
260
261 static u64 cmp_count;
262
263 static int mycmp(const void *a, const void *b)
264 {
265 u32 _a = *(u32 *)a;
266 u32 _b = *(u32 *)b;
267
268 cmp_count++;
269 if (_a < _b)
270 return -1;
271 else if (_a > _b)
272 return 1;
273 else
274 return 0;
275 }
276
277 static int test(void)
278 {
279 size_t N, i;
280 ktime_t start, end;
281 s64 delta;
282 u32 *arr;
283
284 for (N = 10000; N <= 100000; N += 10000) {
285 arr = kmalloc_array(N, sizeof(u32), GFP_KERNEL);
286 cmp_count = 0;
287
288 for (i = 0; i < N; i++)
289 arr[i] = get_random_u32();
290
291 start = ktime_get();
292 eytzinger0_sort(arr, N, sizeof(u32), mycmp, NULL);
293 end = ktime_get();
294
295 delta = ktime_us_delta(end, start);
296 printk(KERN_INFO "time: %lld\n", delta);
297 printk(KERN_INFO "comparisons: %lld\n", cmp_count);
298
299 u32 prev = 0;
300
301 eytzinger0_for_each(i, N) {
302 if (prev > arr[i])
303 goto err;
304 prev = arr[i];
305 }
306
307 kfree(arr);
308 }
309 return 0;
310
311 err:
312 kfree(arr);
313 return -1;
314 }
315 #endif
316