1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * A fast, small, non-recursive O(n log n) sort for the Linux kernel 4 * 5 * This performs n*log2(n) + 0.37*n + o(n) comparisons on average, 6 * and 1.5*n*log2(n) + O(n) in the (very contrived) worst case. 7 * 8 * Quicksort manages n*log2(n) - 1.26*n for random inputs (1.63*n 9 * better) at the expense of stack usage and much larger code to avoid 10 * quicksort's O(n^2) worst case. 11 */ 12 13 #include <linux/types.h> 14 #include <linux/export.h> 15 #include <linux/sort.h> 16 17 /** 18 * is_aligned - is this pointer & size okay for word-wide copying? 19 * @base: pointer to data 20 * @size: size of each element 21 * @align: required alignment (typically 4 or 8) 22 * 23 * Returns true if elements can be copied using word loads and stores. 24 * The size must be a multiple of the alignment, and the base address must 25 * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS. 26 * 27 * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)" 28 * to "if ((a | b) & mask)", so we do that by hand. 29 */ 30 __attribute_const__ __always_inline 31 static bool is_aligned(const void *base, size_t size, unsigned char align) 32 { 33 unsigned char lsbits = (unsigned char)size; 34 35 (void)base; 36 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 37 lsbits |= (unsigned char)(uintptr_t)base; 38 #endif 39 return (lsbits & (align - 1)) == 0; 40 } 41 42 /** 43 * swap_words_32 - swap two elements in 32-bit chunks 44 * @a: pointer to the first element to swap 45 * @b: pointer to the second element to swap 46 * @n: element size (must be a multiple of 4) 47 * 48 * Exchange the two objects in memory. This exploits base+index addressing, 49 * which basically all CPUs have, to minimize loop overhead computations. 50 * 51 * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the 52 * bottom of the loop, even though the zero flag is still valid from the 53 * subtract (since the intervening mov instructions don't alter the flags). 54 * Gcc 8.1.0 doesn't have that problem. 55 */ 56 static void swap_words_32(void *a, void *b, size_t n) 57 { 58 do { 59 u32 t = *(u32 *)(a + (n -= 4)); 60 *(u32 *)(a + n) = *(u32 *)(b + n); 61 *(u32 *)(b + n) = t; 62 } while (n); 63 } 64 65 /** 66 * swap_words_64 - swap two elements in 64-bit chunks 67 * @a: pointer to the first element to swap 68 * @b: pointer to the second element to swap 69 * @n: element size (must be a multiple of 8) 70 * 71 * Exchange the two objects in memory. This exploits base+index 72 * addressing, which basically all CPUs have, to minimize loop overhead 73 * computations. 74 * 75 * We'd like to use 64-bit loads if possible. If they're not, emulating 76 * one requires base+index+4 addressing which x86 has but most other 77 * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads, 78 * but it's possible to have 64-bit loads without 64-bit pointers (e.g. 79 * x32 ABI). Are there any cases the kernel needs to worry about? 80 */ 81 static void swap_words_64(void *a, void *b, size_t n) 82 { 83 do { 84 #ifdef CONFIG_64BIT 85 u64 t = *(u64 *)(a + (n -= 8)); 86 *(u64 *)(a + n) = *(u64 *)(b + n); 87 *(u64 *)(b + n) = t; 88 #else 89 /* Use two 32-bit transfers to avoid base+index+4 addressing */ 90 u32 t = *(u32 *)(a + (n -= 4)); 91 *(u32 *)(a + n) = *(u32 *)(b + n); 92 *(u32 *)(b + n) = t; 93 94 t = *(u32 *)(a + (n -= 4)); 95 *(u32 *)(a + n) = *(u32 *)(b + n); 96 *(u32 *)(b + n) = t; 97 #endif 98 } while (n); 99 } 100 101 /** 102 * swap_bytes - swap two elements a byte at a time 103 * @a: pointer to the first element to swap 104 * @b: pointer to the second element to swap 105 * @n: element size 106 * 107 * This is the fallback if alignment doesn't allow using larger chunks. 108 */ 109 static void swap_bytes(void *a, void *b, size_t n) 110 { 111 do { 112 char t = ((char *)a)[--n]; 113 ((char *)a)[n] = ((char *)b)[n]; 114 ((char *)b)[n] = t; 115 } while (n); 116 } 117 118 /* 119 * The values are arbitrary as long as they can't be confused with 120 * a pointer, but small integers make for the smallest compare 121 * instructions. 122 */ 123 #define SWAP_WORDS_64 (swap_r_func_t)0 124 #define SWAP_WORDS_32 (swap_r_func_t)1 125 #define SWAP_BYTES (swap_r_func_t)2 126 #define SWAP_WRAPPER (swap_r_func_t)3 127 128 struct wrapper { 129 cmp_func_t cmp; 130 swap_func_t swap; 131 }; 132 133 /* 134 * The function pointer is last to make tail calls most efficient if the 135 * compiler decides not to inline this function. 136 */ 137 static void do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, const void *priv) 138 { 139 if (swap_func == SWAP_WRAPPER) { 140 ((const struct wrapper *)priv)->swap(a, b, (int)size); 141 return; 142 } 143 144 if (swap_func == SWAP_WORDS_64) 145 swap_words_64(a, b, size); 146 else if (swap_func == SWAP_WORDS_32) 147 swap_words_32(a, b, size); 148 else if (swap_func == SWAP_BYTES) 149 swap_bytes(a, b, size); 150 else 151 swap_func(a, b, (int)size, priv); 152 } 153 154 #define _CMP_WRAPPER ((cmp_r_func_t)0L) 155 156 static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv) 157 { 158 if (cmp == _CMP_WRAPPER) 159 return ((const struct wrapper *)priv)->cmp(a, b); 160 return cmp(a, b, priv); 161 } 162 163 /** 164 * parent - given the offset of the child, find the offset of the parent. 165 * @i: the offset of the heap element whose parent is sought. Non-zero. 166 * @lsbit: a precomputed 1-bit mask, equal to "size & -size" 167 * @size: size of each element 168 * 169 * In terms of array indexes, the parent of element j = @i/@size is simply 170 * (j-1)/2. But when working in byte offsets, we can't use implicit 171 * truncation of integer divides. 172 * 173 * Fortunately, we only need one bit of the quotient, not the full divide. 174 * @size has a least significant bit. That bit will be clear if @i is 175 * an even multiple of @size, and set if it's an odd multiple. 176 * 177 * Logically, we're doing "if (i & lsbit) i -= size;", but since the 178 * branch is unpredictable, it's done with a bit of clever branch-free 179 * code instead. 180 */ 181 __attribute_const__ __always_inline 182 static size_t parent(size_t i, unsigned int lsbit, size_t size) 183 { 184 i -= size; 185 i -= size & -(i & lsbit); 186 return i / 2; 187 } 188 189 #include <linux/sched.h> 190 191 static void __sort_r(void *base, size_t num, size_t size, 192 cmp_r_func_t cmp_func, 193 swap_r_func_t swap_func, 194 const void *priv, 195 bool may_schedule) 196 { 197 /* pre-scale counters for performance */ 198 size_t n = num * size, a = (num/2) * size; 199 const unsigned int lsbit = size & -size; /* Used to find parent */ 200 size_t shift = 0; 201 202 if (!a) /* num < 2 || size == 0 */ 203 return; 204 205 /* called from 'sort' without swap function, let's pick the default */ 206 if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap) 207 swap_func = NULL; 208 209 if (!swap_func) { 210 if (is_aligned(base, size, 8)) 211 swap_func = SWAP_WORDS_64; 212 else if (is_aligned(base, size, 4)) 213 swap_func = SWAP_WORDS_32; 214 else 215 swap_func = SWAP_BYTES; 216 } 217 218 /* 219 * Loop invariants: 220 * 1. elements [a,n) satisfy the heap property (compare greater than 221 * all of their children), 222 * 2. elements [n,num*size) are sorted, and 223 * 3. a <= b <= c <= d <= n (whenever they are valid). 224 */ 225 for (;;) { 226 size_t b, c, d; 227 228 if (a) /* Building heap: sift down a */ 229 a -= size << shift; 230 else if (n > 3 * size) { /* Sorting: Extract two largest elements */ 231 n -= size; 232 do_swap(base, base + n, size, swap_func, priv); 233 shift = do_cmp(base + size, base + 2 * size, cmp_func, priv) <= 0; 234 a = size << shift; 235 n -= size; 236 do_swap(base + a, base + n, size, swap_func, priv); 237 } else { /* Sort complete */ 238 break; 239 } 240 241 /* 242 * Sift element at "a" down into heap. This is the 243 * "bottom-up" variant, which significantly reduces 244 * calls to cmp_func(): we find the sift-down path all 245 * the way to the leaves (one compare per level), then 246 * backtrack to find where to insert the target element. 247 * 248 * Because elements tend to sift down close to the leaves, 249 * this uses fewer compares than doing two per level 250 * on the way down. (A bit more than half as many on 251 * average, 3/4 worst-case.) 252 */ 253 for (b = a; c = 2*b + size, (d = c + size) < n;) 254 b = do_cmp(base + c, base + d, cmp_func, priv) > 0 ? c : d; 255 if (d == n) /* Special case last leaf with no sibling */ 256 b = c; 257 258 /* Now backtrack from "b" to the correct location for "a" */ 259 while (b != a && do_cmp(base + a, base + b, cmp_func, priv) >= 0) 260 b = parent(b, lsbit, size); 261 c = b; /* Where "a" belongs */ 262 while (b != a) { /* Shift it into place */ 263 b = parent(b, lsbit, size); 264 do_swap(base + b, base + c, size, swap_func, priv); 265 } 266 267 if (may_schedule) 268 cond_resched(); 269 } 270 271 n -= size; 272 do_swap(base, base + n, size, swap_func, priv); 273 if (n == size * 2 && do_cmp(base, base + size, cmp_func, priv) > 0) 274 do_swap(base, base + size, size, swap_func, priv); 275 } 276 277 /** 278 * sort_r - sort an array of elements 279 * @base: pointer to data to sort 280 * @num: number of elements 281 * @size: size of each element 282 * @cmp_func: pointer to comparison function 283 * @swap_func: pointer to swap function or NULL 284 * @priv: third argument passed to comparison function 285 * 286 * This function does a heapsort on the given array. You may provide 287 * a swap_func function if you need to do something more than a memory 288 * copy (e.g. fix up pointers or auxiliary data), but the built-in swap 289 * avoids a slow retpoline and so is significantly faster. 290 * 291 * The comparison function must adhere to specific mathematical 292 * properties to ensure correct and stable sorting: 293 * - Antisymmetry: cmp_func(a, b) must return the opposite sign of 294 * cmp_func(b, a). 295 * - Transitivity: if cmp_func(a, b) <= 0 and cmp_func(b, c) <= 0, then 296 * cmp_func(a, c) <= 0. 297 * 298 * Sorting time is O(n log n) both on average and worst-case. While 299 * quicksort is slightly faster on average, it suffers from exploitable 300 * O(n*n) worst-case behavior and extra memory requirements that make 301 * it less suitable for kernel use. 302 */ 303 void sort_r(void *base, size_t num, size_t size, 304 cmp_r_func_t cmp_func, 305 swap_r_func_t swap_func, 306 const void *priv) 307 { 308 __sort_r(base, num, size, cmp_func, swap_func, priv, false); 309 } 310 EXPORT_SYMBOL(sort_r); 311 312 /** 313 * sort_r_nonatomic - sort an array of elements, with cond_resched 314 * @base: pointer to data to sort 315 * @num: number of elements 316 * @size: size of each element 317 * @cmp_func: pointer to comparison function 318 * @swap_func: pointer to swap function or NULL 319 * @priv: third argument passed to comparison function 320 * 321 * Same as sort_r, but preferred for larger arrays as it does a periodic 322 * cond_resched(). 323 */ 324 void sort_r_nonatomic(void *base, size_t num, size_t size, 325 cmp_r_func_t cmp_func, 326 swap_r_func_t swap_func, 327 const void *priv) 328 { 329 __sort_r(base, num, size, cmp_func, swap_func, priv, true); 330 } 331 EXPORT_SYMBOL(sort_r_nonatomic); 332 333 void sort(void *base, size_t num, size_t size, 334 cmp_func_t cmp_func, 335 swap_func_t swap_func) 336 { 337 struct wrapper w = { 338 .cmp = cmp_func, 339 .swap = swap_func, 340 }; 341 342 return __sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w, false); 343 } 344 EXPORT_SYMBOL(sort); 345 346 void sort_nonatomic(void *base, size_t num, size_t size, 347 cmp_func_t cmp_func, 348 swap_func_t swap_func) 349 { 350 struct wrapper w = { 351 .cmp = cmp_func, 352 .swap = swap_func, 353 }; 354 355 return __sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w, true); 356 } 357 EXPORT_SYMBOL(sort_nonatomic); 358