1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * idr-test.c: Test the IDR API 4 * Copyright (c) 2016 Matthew Wilcox <willy@infradead.org> 5 */ 6 #include <linux/bitmap.h> 7 #include <linux/idr.h> 8 #include <linux/slab.h> 9 #include <linux/kernel.h> 10 #include <linux/errno.h> 11 12 #include "test.h" 13 14 #define DUMMY_PTR ((void *)0x10) 15 16 int item_idr_free(int id, void *p, void *data) 17 { 18 struct item *item = p; 19 assert(item->index == id); 20 free(p); 21 22 return 0; 23 } 24 25 void item_idr_remove(struct idr *idr, int id) 26 { 27 struct item *item = idr_find(idr, id); 28 assert(item->index == id); 29 idr_remove(idr, id); 30 free(item); 31 } 32 33 void idr_alloc_test(void) 34 { 35 unsigned long i; 36 DEFINE_IDR(idr); 37 38 assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0, 0x4000, GFP_KERNEL) == 0); 39 assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0x3ffd, 0x4000, GFP_KERNEL) == 0x3ffd); 40 idr_remove(&idr, 0x3ffd); 41 idr_remove(&idr, 0); 42 43 for (i = 0x3ffe; i < 0x4003; i++) { 44 int id; 45 struct item *item; 46 47 if (i < 0x4000) 48 item = item_create(i, 0); 49 else 50 item = item_create(i - 0x3fff, 0); 51 52 id = idr_alloc_cyclic(&idr, item, 1, 0x4000, GFP_KERNEL); 53 assert(id == item->index); 54 } 55 56 idr_for_each(&idr, item_idr_free, &idr); 57 idr_destroy(&idr); 58 } 59 60 void idr_alloc2_test(void) 61 { 62 int id; 63 struct idr idr = IDR_INIT_BASE(idr, 1); 64 65 id = idr_alloc(&idr, idr_alloc2_test, 0, 1, GFP_KERNEL); 66 assert(id == -ENOSPC); 67 68 id = idr_alloc(&idr, idr_alloc2_test, 1, 2, GFP_KERNEL); 69 assert(id == 1); 70 71 id = idr_alloc(&idr, idr_alloc2_test, 0, 1, GFP_KERNEL); 72 assert(id == -ENOSPC); 73 74 id = idr_alloc(&idr, idr_alloc2_test, 0, 2, GFP_KERNEL); 75 assert(id == -ENOSPC); 76 77 idr_destroy(&idr); 78 } 79 80 void idr_replace_test(void) 81 { 82 DEFINE_IDR(idr); 83 84 idr_alloc(&idr, (void *)-1, 10, 11, GFP_KERNEL); 85 idr_replace(&idr, &idr, 10); 86 87 idr_destroy(&idr); 88 } 89 90 /* 91 * Unlike the radix tree, you can put a NULL pointer -- with care -- into 92 * the IDR. Some interfaces, like idr_find() do not distinguish between 93 * "present, value is NULL" and "not present", but that's exactly what some 94 * users want. 95 */ 96 void idr_null_test(void) 97 { 98 int i; 99 DEFINE_IDR(idr); 100 101 assert(idr_is_empty(&idr)); 102 103 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0); 104 assert(!idr_is_empty(&idr)); 105 idr_remove(&idr, 0); 106 assert(idr_is_empty(&idr)); 107 108 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0); 109 assert(!idr_is_empty(&idr)); 110 idr_destroy(&idr); 111 assert(idr_is_empty(&idr)); 112 113 for (i = 0; i < 10; i++) { 114 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == i); 115 } 116 117 assert(idr_replace(&idr, DUMMY_PTR, 3) == NULL); 118 assert(idr_replace(&idr, DUMMY_PTR, 4) == NULL); 119 assert(idr_replace(&idr, NULL, 4) == DUMMY_PTR); 120 assert(idr_replace(&idr, DUMMY_PTR, 11) == ERR_PTR(-ENOENT)); 121 idr_remove(&idr, 5); 122 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 5); 123 idr_remove(&idr, 5); 124 125 for (i = 0; i < 9; i++) { 126 idr_remove(&idr, i); 127 assert(!idr_is_empty(&idr)); 128 } 129 idr_remove(&idr, 8); 130 assert(!idr_is_empty(&idr)); 131 idr_remove(&idr, 9); 132 assert(idr_is_empty(&idr)); 133 134 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0); 135 assert(idr_replace(&idr, DUMMY_PTR, 3) == ERR_PTR(-ENOENT)); 136 assert(idr_replace(&idr, DUMMY_PTR, 0) == NULL); 137 assert(idr_replace(&idr, NULL, 0) == DUMMY_PTR); 138 139 idr_destroy(&idr); 140 assert(idr_is_empty(&idr)); 141 142 for (i = 1; i < 10; i++) { 143 assert(idr_alloc(&idr, NULL, 1, 0, GFP_KERNEL) == i); 144 } 145 146 idr_destroy(&idr); 147 assert(idr_is_empty(&idr)); 148 } 149 150 void idr_nowait_test(void) 151 { 152 unsigned int i; 153 DEFINE_IDR(idr); 154 155 idr_preload(GFP_KERNEL); 156 157 for (i = 0; i < 3; i++) { 158 struct item *item = item_create(i, 0); 159 assert(idr_alloc(&idr, item, i, i + 1, GFP_NOWAIT) == i); 160 } 161 162 idr_preload_end(); 163 164 idr_for_each(&idr, item_idr_free, &idr); 165 idr_destroy(&idr); 166 } 167 168 void idr_get_next_test(int base) 169 { 170 unsigned long i; 171 int nextid; 172 DEFINE_IDR(idr); 173 idr_init_base(&idr, base); 174 175 int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0}; 176 177 for(i = 0; indices[i]; i++) { 178 struct item *item = item_create(indices[i], 0); 179 assert(idr_alloc(&idr, item, indices[i], indices[i+1], 180 GFP_KERNEL) == indices[i]); 181 } 182 183 for(i = 0, nextid = 0; indices[i]; i++) { 184 idr_get_next(&idr, &nextid); 185 assert(nextid == indices[i]); 186 nextid++; 187 } 188 189 idr_for_each(&idr, item_idr_free, &idr); 190 idr_destroy(&idr); 191 } 192 193 int idr_u32_cb(int id, void *ptr, void *data) 194 { 195 BUG_ON(id < 0); 196 BUG_ON(ptr != DUMMY_PTR); 197 return 0; 198 } 199 200 void idr_u32_test1(struct idr *idr, u32 handle) 201 { 202 static bool warned = false; 203 u32 id = handle; 204 int sid = 0; 205 void *ptr; 206 207 BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL)); 208 BUG_ON(id != handle); 209 BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL) != -ENOSPC); 210 BUG_ON(id != handle); 211 if (!warned && id > INT_MAX) 212 printk("vvv Ignore these warnings\n"); 213 ptr = idr_get_next(idr, &sid); 214 if (id > INT_MAX) { 215 BUG_ON(ptr != NULL); 216 BUG_ON(sid != 0); 217 } else { 218 BUG_ON(ptr != DUMMY_PTR); 219 BUG_ON(sid != id); 220 } 221 idr_for_each(idr, idr_u32_cb, NULL); 222 if (!warned && id > INT_MAX) { 223 printk("^^^ Warnings over\n"); 224 warned = true; 225 } 226 BUG_ON(idr_remove(idr, id) != DUMMY_PTR); 227 BUG_ON(!idr_is_empty(idr)); 228 } 229 230 void idr_u32_test(int base) 231 { 232 DEFINE_IDR(idr); 233 idr_init_base(&idr, base); 234 idr_u32_test1(&idr, 10); 235 idr_u32_test1(&idr, 0x7fffffff); 236 idr_u32_test1(&idr, 0x80000000); 237 idr_u32_test1(&idr, 0x80000001); 238 idr_u32_test1(&idr, 0xffe00000); 239 idr_u32_test1(&idr, 0xffffffff); 240 } 241 242 static void idr_align_test(struct idr *idr) 243 { 244 char name[] = "Motorola 68000"; 245 int i, id; 246 void *entry; 247 248 for (i = 0; i < 9; i++) { 249 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i); 250 idr_for_each_entry(idr, entry, id); 251 } 252 idr_destroy(idr); 253 254 for (i = 1; i < 10; i++) { 255 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 1); 256 idr_for_each_entry(idr, entry, id); 257 } 258 idr_destroy(idr); 259 260 for (i = 2; i < 11; i++) { 261 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 2); 262 idr_for_each_entry(idr, entry, id); 263 } 264 idr_destroy(idr); 265 266 for (i = 3; i < 12; i++) { 267 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 3); 268 idr_for_each_entry(idr, entry, id); 269 } 270 idr_destroy(idr); 271 272 for (i = 0; i < 8; i++) { 273 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0); 274 BUG_ON(idr_alloc(idr, &name[i + 1], 0, 0, GFP_KERNEL) != 1); 275 idr_for_each_entry(idr, entry, id); 276 idr_remove(idr, 1); 277 idr_for_each_entry(idr, entry, id); 278 idr_remove(idr, 0); 279 BUG_ON(!idr_is_empty(idr)); 280 } 281 282 for (i = 0; i < 8; i++) { 283 BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 0); 284 idr_for_each_entry(idr, entry, id); 285 idr_replace(idr, &name[i], 0); 286 idr_for_each_entry(idr, entry, id); 287 BUG_ON(idr_find(idr, 0) != &name[i]); 288 idr_remove(idr, 0); 289 } 290 291 for (i = 0; i < 8; i++) { 292 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0); 293 BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 1); 294 idr_remove(idr, 1); 295 idr_for_each_entry(idr, entry, id); 296 idr_replace(idr, &name[i + 1], 0); 297 idr_for_each_entry(idr, entry, id); 298 idr_remove(idr, 0); 299 } 300 } 301 302 DEFINE_IDR(find_idr); 303 304 static void *idr_throbber(void *arg) 305 { 306 time_t start = time(NULL); 307 int id = *(int *)arg; 308 309 rcu_register_thread(); 310 do { 311 idr_alloc(&find_idr, xa_mk_value(id), id, id + 1, GFP_KERNEL); 312 idr_remove(&find_idr, id); 313 } while (time(NULL) < start + 10); 314 rcu_unregister_thread(); 315 316 return NULL; 317 } 318 319 /* 320 * There are always either 1 or 2 objects in the IDR. If we find nothing, 321 * or we find something at an ID we didn't expect, that's a bug. 322 */ 323 void idr_find_test_1(int anchor_id, int throbber_id) 324 { 325 pthread_t throbber; 326 time_t start = time(NULL); 327 328 BUG_ON(idr_alloc(&find_idr, xa_mk_value(anchor_id), anchor_id, 329 anchor_id + 1, GFP_KERNEL) != anchor_id); 330 331 pthread_create(&throbber, NULL, idr_throbber, &throbber_id); 332 333 rcu_read_lock(); 334 do { 335 int id = 0; 336 void *entry = idr_get_next(&find_idr, &id); 337 rcu_read_unlock(); 338 if ((id != anchor_id && id != throbber_id) || 339 entry != xa_mk_value(id)) { 340 printf("%s(%d, %d): %p at %d\n", __func__, anchor_id, 341 throbber_id, entry, id); 342 abort(); 343 } 344 rcu_read_lock(); 345 } while (time(NULL) < start + 11); 346 rcu_read_unlock(); 347 348 pthread_join(throbber, NULL); 349 350 idr_remove(&find_idr, anchor_id); 351 BUG_ON(!idr_is_empty(&find_idr)); 352 } 353 354 void idr_find_test(void) 355 { 356 idr_find_test_1(100000, 0); 357 idr_find_test_1(0, 100000); 358 } 359 360 void idr_checks(void) 361 { 362 unsigned long i; 363 DEFINE_IDR(idr); 364 365 for (i = 0; i < 10000; i++) { 366 struct item *item = item_create(i, 0); 367 assert(idr_alloc(&idr, item, 0, 20000, GFP_KERNEL) == i); 368 } 369 370 assert(idr_alloc(&idr, DUMMY_PTR, 5, 30, GFP_KERNEL) < 0); 371 372 for (i = 0; i < 5000; i++) 373 item_idr_remove(&idr, i); 374 375 idr_remove(&idr, 3); 376 377 idr_for_each(&idr, item_idr_free, &idr); 378 idr_destroy(&idr); 379 380 assert(idr_is_empty(&idr)); 381 382 idr_remove(&idr, 3); 383 idr_remove(&idr, 0); 384 385 assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0); 386 idr_remove(&idr, 1); 387 for (i = 1; i < RADIX_TREE_MAP_SIZE; i++) 388 assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i); 389 idr_remove(&idr, 1 << 30); 390 idr_destroy(&idr); 391 392 for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) { 393 struct item *item = item_create(i, 0); 394 assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i); 395 } 396 assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC); 397 assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i + 10, GFP_KERNEL) == -ENOSPC); 398 399 idr_for_each(&idr, item_idr_free, &idr); 400 idr_destroy(&idr); 401 idr_destroy(&idr); 402 403 assert(idr_is_empty(&idr)); 404 405 idr_set_cursor(&idr, INT_MAX - 3UL); 406 for (i = INT_MAX - 3UL; i < INT_MAX + 3UL; i++) { 407 struct item *item; 408 unsigned int id; 409 if (i <= INT_MAX) 410 item = item_create(i, 0); 411 else 412 item = item_create(i - INT_MAX - 1, 0); 413 414 id = idr_alloc_cyclic(&idr, item, 0, 0, GFP_KERNEL); 415 assert(id == item->index); 416 } 417 418 idr_for_each(&idr, item_idr_free, &idr); 419 idr_destroy(&idr); 420 assert(idr_is_empty(&idr)); 421 422 for (i = 1; i < 10000; i++) { 423 struct item *item = item_create(i, 0); 424 assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i); 425 } 426 427 idr_for_each(&idr, item_idr_free, &idr); 428 idr_destroy(&idr); 429 430 idr_replace_test(); 431 idr_alloc_test(); 432 idr_alloc2_test(); 433 idr_null_test(); 434 idr_nowait_test(); 435 idr_get_next_test(0); 436 idr_get_next_test(1); 437 idr_get_next_test(4); 438 idr_u32_test(4); 439 idr_u32_test(1); 440 idr_u32_test(0); 441 idr_align_test(&idr); 442 idr_find_test(); 443 } 444 445 #define module_init(x) 446 #define module_exit(x) 447 #define MODULE_AUTHOR(x) 448 #define MODULE_DESCRIPTION(X) 449 #define MODULE_LICENSE(x) 450 #define dump_stack() assert(0) 451 void ida_dump(struct ida *); 452 453 #include "../../../lib/test_ida.c" 454 455 /* 456 * Check that we get the correct error when we run out of memory doing 457 * allocations. In userspace, GFP_NOWAIT will always fail an allocation. 458 * The first test is for not having a bitmap available, and the second test 459 * is for not being able to allocate a level of the radix tree. 460 */ 461 void ida_check_nomem(void) 462 { 463 DEFINE_IDA(ida); 464 int id; 465 466 id = ida_alloc_min(&ida, 256, GFP_NOWAIT); 467 IDA_BUG_ON(&ida, id != -ENOMEM); 468 id = ida_alloc_min(&ida, 1UL << 30, GFP_NOWAIT); 469 IDA_BUG_ON(&ida, id != -ENOMEM); 470 IDA_BUG_ON(&ida, !ida_is_empty(&ida)); 471 } 472 473 /* 474 * Check handling of conversions between exceptional entries and full bitmaps. 475 */ 476 void ida_check_conv_user(void) 477 { 478 DEFINE_IDA(ida); 479 unsigned long i; 480 481 for (i = 0; i < 1000000; i++) { 482 int id = ida_alloc(&ida, GFP_NOWAIT); 483 if (id == -ENOMEM) { 484 IDA_BUG_ON(&ida, ((i % IDA_BITMAP_BITS) != 485 BITS_PER_XA_VALUE) && 486 ((i % IDA_BITMAP_BITS) != 0)); 487 id = ida_alloc(&ida, GFP_KERNEL); 488 } else { 489 IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) == 490 BITS_PER_XA_VALUE); 491 } 492 IDA_BUG_ON(&ida, id != i); 493 } 494 ida_destroy(&ida); 495 } 496 497 void ida_check_random(void) 498 { 499 DEFINE_IDA(ida); 500 DECLARE_BITMAP(bitmap, 2048); 501 unsigned int i; 502 time_t s = time(NULL); 503 504 repeat: 505 memset(bitmap, 0, sizeof(bitmap)); 506 for (i = 0; i < 100000; i++) { 507 int i = rand(); 508 int bit = i & 2047; 509 if (test_bit(bit, bitmap)) { 510 __clear_bit(bit, bitmap); 511 ida_free(&ida, bit); 512 } else { 513 __set_bit(bit, bitmap); 514 IDA_BUG_ON(&ida, ida_alloc_min(&ida, bit, GFP_KERNEL) 515 != bit); 516 } 517 } 518 ida_destroy(&ida); 519 if (time(NULL) < s + 10) 520 goto repeat; 521 } 522 523 void ida_alloc_free_test(void) 524 { 525 DEFINE_IDA(ida); 526 unsigned long i; 527 528 for (i = 0; i < 10000; i++) 529 assert(ida_alloc_max(&ida, 20000, GFP_KERNEL) == i); 530 assert(ida_alloc_range(&ida, 5, 30, GFP_KERNEL) < 0); 531 532 for (i = 0; i < 10000; i++) 533 ida_free(&ida, i); 534 assert(ida_is_empty(&ida)); 535 536 ida_destroy(&ida); 537 } 538 539 void user_ida_checks(void) 540 { 541 radix_tree_cpu_dead(1); 542 543 ida_check_nomem(); 544 ida_check_conv_user(); 545 ida_check_random(); 546 ida_alloc_free_test(); 547 548 radix_tree_cpu_dead(1); 549 } 550 551 static void *ida_random_fn(void *arg) 552 { 553 rcu_register_thread(); 554 ida_check_random(); 555 rcu_unregister_thread(); 556 return NULL; 557 } 558 559 static void *ida_leak_fn(void *arg) 560 { 561 struct ida *ida = arg; 562 time_t s = time(NULL); 563 int i, ret; 564 565 rcu_register_thread(); 566 567 do for (i = 0; i < 1000; i++) { 568 ret = ida_alloc_range(ida, 128, 128, GFP_KERNEL); 569 if (ret >= 0) 570 ida_free(ida, 128); 571 } while (time(NULL) < s + 2); 572 573 rcu_unregister_thread(); 574 return NULL; 575 } 576 577 void ida_thread_tests(void) 578 { 579 DEFINE_IDA(ida); 580 pthread_t threads[20]; 581 int i; 582 583 for (i = 0; i < ARRAY_SIZE(threads); i++) 584 if (pthread_create(&threads[i], NULL, ida_random_fn, NULL)) { 585 perror("creating ida thread"); 586 exit(1); 587 } 588 589 while (i--) 590 pthread_join(threads[i], NULL); 591 592 for (i = 0; i < ARRAY_SIZE(threads); i++) 593 if (pthread_create(&threads[i], NULL, ida_leak_fn, &ida)) { 594 perror("creating ida thread"); 595 exit(1); 596 } 597 598 while (i--) 599 pthread_join(threads[i], NULL); 600 assert(ida_is_empty(&ida)); 601 } 602 603 void ida_tests(void) 604 { 605 user_ida_checks(); 606 ida_checks(); 607 ida_exit(); 608 ida_thread_tests(); 609 } 610 611 int __weak main(void) 612 { 613 rcu_register_thread(); 614 radix_tree_init(); 615 idr_checks(); 616 ida_tests(); 617 radix_tree_cpu_dead(1); 618 rcu_barrier(); 619 if (nr_allocated) 620 printf("nr_allocated = %d\n", nr_allocated); 621 rcu_unregister_thread(); 622 return 0; 623 } 624