1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Peter McIlroy. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * Hybrid exponential search/linear search merge sort with hybrid 37 * natural/pairwise first pass. Requires about .3% more comparisons 38 * for random data than LSMS with pairwise first pass alone. 39 * It works for objects as small as two bytes. 40 */ 41 42 #define NATURAL 43 #define THRESHOLD 16 /* Best choice for natural merge cut-off. */ 44 45 /* #define NATURAL to get hybrid natural merge. 46 * (The default is pairwise merging.) 47 */ 48 49 #include <sys/param.h> 50 51 #include <errno.h> 52 #include <stdlib.h> 53 #include <string.h> 54 55 #ifdef I_AM_MERGESORT_B 56 #include "block_abi.h" 57 #define DECLARE_CMP DECLARE_BLOCK(int, cmp, const void *, const void *) 58 typedef DECLARE_BLOCK(int, cmp_t, const void *, const void *); 59 #define CMP(x, y) CALL_BLOCK(cmp, x, y) 60 #else 61 typedef int (*cmp_t)(const void *, const void *); 62 #define CMP(x, y) cmp(x, y) 63 #endif 64 65 static void setup(u_char *, u_char *, size_t, size_t, cmp_t); 66 static void insertionsort(u_char *, size_t, size_t, cmp_t); 67 68 #define ISIZE sizeof(int) 69 #define PSIZE sizeof(u_char *) 70 #define ICOPY_LIST(src, dst, last) \ 71 do \ 72 *(int*)dst = *(int*)src, src += ISIZE, dst += ISIZE; \ 73 while(src < last) 74 #define ICOPY_ELT(src, dst, i) \ 75 do \ 76 *(int*) dst = *(int*) src, src += ISIZE, dst += ISIZE; \ 77 while (i -= ISIZE) 78 79 #define CCOPY_LIST(src, dst, last) \ 80 do \ 81 *dst++ = *src++; \ 82 while (src < last) 83 #define CCOPY_ELT(src, dst, i) \ 84 do \ 85 *dst++ = *src++; \ 86 while (i -= 1) 87 88 /* 89 * Find the next possible pointer head. (Trickery for forcing an array 90 * to do double duty as a linked list when objects do not align with word 91 * boundaries. 92 */ 93 /* Assumption: PSIZE is a power of 2. */ 94 #define EVAL(p) (u_char **)roundup2((uintptr_t)p, PSIZE) 95 96 #ifdef I_AM_MERGESORT_B 97 int mergesort_b(void *, size_t, size_t, cmp_t); 98 #else 99 int mergesort(void *, size_t, size_t, cmp_t); 100 #endif 101 102 /* 103 * Arguments are as for qsort. 104 */ 105 int 106 #ifdef I_AM_MERGESORT_B 107 mergesort_b(void *base, size_t nmemb, size_t size, cmp_t cmp) 108 #else 109 mergesort(void *base, size_t nmemb, size_t size, cmp_t cmp) 110 #endif 111 { 112 size_t i; 113 int sense; 114 int big, iflag; 115 u_char *f1, *f2, *t, *b, *tp2, *q, *l1, *l2; 116 u_char *list2, *list1, *p2, *p, *last, **p1; 117 118 if (size < PSIZE / 2) { /* Pointers must fit into 2 * size. */ 119 errno = EINVAL; 120 return (-1); 121 } 122 123 if (nmemb == 0) 124 return (0); 125 126 iflag = 0; 127 if (__is_aligned(size, ISIZE) && __is_aligned(base, ISIZE)) 128 iflag = 1; 129 130 if ((list2 = malloc(nmemb * size + PSIZE)) == NULL) 131 return (-1); 132 133 list1 = base; 134 setup(list1, list2, nmemb, size, cmp); 135 last = list2 + nmemb * size; 136 i = big = 0; 137 while (*EVAL(list2) != last) { 138 l2 = list1; 139 p1 = EVAL(list1); 140 for (tp2 = p2 = list2; p2 != last; p1 = EVAL(l2)) { 141 p2 = *EVAL(p2); 142 f1 = l2; 143 f2 = l1 = list1 + (p2 - list2); 144 if (p2 != last) 145 p2 = *EVAL(p2); 146 l2 = list1 + (p2 - list2); 147 while (f1 < l1 && f2 < l2) { 148 if (CMP(f1, f2) <= 0) { 149 q = f2; 150 b = f1, t = l1; 151 sense = -1; 152 } else { 153 q = f1; 154 b = f2, t = l2; 155 sense = 0; 156 } 157 if (!big) { /* here i = 0 */ 158 while ((b += size) < t && CMP(q, b) >sense) 159 if (++i == 6) { 160 big = 1; 161 goto EXPONENTIAL; 162 } 163 } else { 164 EXPONENTIAL: for (i = size; ; i <<= 1) 165 if ((p = (b + i)) >= t) { 166 if ((p = t - size) > b && 167 CMP(q, p) <= sense) 168 t = p; 169 else 170 b = p; 171 break; 172 } else if (CMP(q, p) <= sense) { 173 t = p; 174 if (i == size) 175 big = 0; 176 goto FASTCASE; 177 } else 178 b = p; 179 while (t > b+size) { 180 i = (((t - b) / size) >> 1) * size; 181 if (CMP(q, p = b + i) <= sense) 182 t = p; 183 else 184 b = p; 185 } 186 goto COPY; 187 FASTCASE: while (i > size) 188 if (CMP(q, 189 p = b + (i >>= 1)) <= sense) 190 t = p; 191 else 192 b = p; 193 COPY: b = t; 194 } 195 i = size; 196 if (q == f1) { 197 if (iflag) { 198 ICOPY_LIST(f2, tp2, b); 199 ICOPY_ELT(f1, tp2, i); 200 } else { 201 CCOPY_LIST(f2, tp2, b); 202 CCOPY_ELT(f1, tp2, i); 203 } 204 } else { 205 if (iflag) { 206 ICOPY_LIST(f1, tp2, b); 207 ICOPY_ELT(f2, tp2, i); 208 } else { 209 CCOPY_LIST(f1, tp2, b); 210 CCOPY_ELT(f2, tp2, i); 211 } 212 } 213 } 214 if (f2 < l2) { 215 if (iflag) 216 ICOPY_LIST(f2, tp2, l2); 217 else 218 CCOPY_LIST(f2, tp2, l2); 219 } else if (f1 < l1) { 220 if (iflag) 221 ICOPY_LIST(f1, tp2, l1); 222 else 223 CCOPY_LIST(f1, tp2, l1); 224 } 225 *p1 = l2; 226 } 227 tp2 = list1; /* swap list1, list2 */ 228 list1 = list2; 229 list2 = tp2; 230 last = list2 + nmemb*size; 231 } 232 if (base == list2) { 233 memmove(list2, list1, nmemb*size); 234 list2 = list1; 235 } 236 free(list2); 237 return (0); 238 } 239 240 #define swap(a, b) { \ 241 s = b; \ 242 i = size; \ 243 do { \ 244 tmp = *a; *a++ = *s; *s++ = tmp; \ 245 } while (--i); \ 246 a -= size; \ 247 } 248 #define reverse(bot, top) { \ 249 s = top; \ 250 do { \ 251 i = size; \ 252 do { \ 253 tmp = *bot; *bot++ = *s; *s++ = tmp; \ 254 } while (--i); \ 255 s -= size2; \ 256 } while(bot < s); \ 257 } 258 259 /* 260 * Optional hybrid natural/pairwise first pass. Eats up list1 in runs of 261 * increasing order, list2 in a corresponding linked list. Checks for runs 262 * when THRESHOLD/2 pairs compare with same sense. (Only used when NATURAL 263 * is defined. Otherwise simple pairwise merging is used.) 264 */ 265 void 266 setup(u_char *list1, u_char *list2, size_t n, size_t size, cmp_t cmp) 267 { 268 int i, length, size2, tmp, sense; 269 u_char *f1, *f2, *s, *l2, *last, *p2; 270 271 size2 = size*2; 272 if (n <= 5) { 273 insertionsort(list1, n, size, cmp); 274 *EVAL(list2) = (u_char*) list2 + n*size; 275 return; 276 } 277 /* 278 * Avoid running pointers out of bounds; limit n to evens 279 * for simplicity. 280 */ 281 i = 4 + (n & 1); 282 insertionsort(list1 + (n - i) * size, i, size, cmp); 283 last = list1 + size * (n - i); 284 *EVAL(list2 + (last - list1)) = list2 + n * size; 285 286 #ifdef NATURAL 287 p2 = list2; 288 f1 = list1; 289 sense = (CMP(f1, f1 + size) > 0); 290 for (; f1 < last; sense = !sense) { 291 length = 2; 292 /* Find pairs with same sense. */ 293 for (f2 = f1 + size2; f2 < last; f2 += size2) { 294 if ((CMP(f2, f2+ size) > 0) != sense) 295 break; 296 length += 2; 297 } 298 if (length < THRESHOLD) { /* Pairwise merge */ 299 do { 300 p2 = *EVAL(p2) = f1 + size2 - list1 + list2; 301 if (sense > 0) 302 swap (f1, f1 + size); 303 } while ((f1 += size2) < f2); 304 } else { /* Natural merge */ 305 l2 = f2; 306 for (f2 = f1 + size2; f2 < l2; f2 += size2) { 307 if ((CMP(f2-size, f2) > 0) != sense) { 308 p2 = *EVAL(p2) = f2 - list1 + list2; 309 if (sense > 0) 310 reverse(f1, f2-size); 311 f1 = f2; 312 } 313 } 314 if (sense > 0) 315 reverse (f1, f2-size); 316 f1 = f2; 317 if (f2 < last || CMP(f2 - size, f2) > 0) 318 p2 = *EVAL(p2) = f2 - list1 + list2; 319 else 320 p2 = *EVAL(p2) = list2 + n*size; 321 } 322 } 323 #else /* pairwise merge only. */ 324 for (f1 = list1, p2 = list2; f1 < last; f1 += size2) { 325 p2 = *EVAL(p2) = p2 + size2; 326 if (CMP (f1, f1 + size) > 0) 327 swap(f1, f1 + size); 328 } 329 #endif /* NATURAL */ 330 } 331 332 /* 333 * This is to avoid out-of-bounds addresses in sorting the 334 * last 4 elements. 335 */ 336 static void 337 insertionsort(u_char *a, size_t n, size_t size, cmp_t cmp) 338 { 339 u_char *ai, *s, *t, *u, tmp; 340 int i; 341 342 for (ai = a+size; --n >= 1; ai += size) 343 for (t = ai; t > a; t -= size) { 344 u = t - size; 345 if (CMP(u, t) <= 0) 346 break; 347 swap(u, t); 348 } 349 } 350