1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * test_xarray.c: Test the XArray API 4 * Copyright (c) 2017-2018 Microsoft Corporation 5 * Copyright (c) 2019-2020 Oracle 6 * Author: Matthew Wilcox <willy@infradead.org> 7 */ 8 9 #include <linux/xarray.h> 10 #include <linux/module.h> 11 12 static unsigned int tests_run; 13 static unsigned int tests_passed; 14 15 static const unsigned int order_limit = 16 IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1; 17 18 #ifndef XA_DEBUG 19 # ifdef __KERNEL__ 20 void xa_dump(const struct xarray *xa) { } 21 # endif 22 #undef XA_BUG_ON 23 #define XA_BUG_ON(xa, x) do { \ 24 tests_run++; \ 25 if (x) { \ 26 printk("BUG at %s:%d\n", __func__, __LINE__); \ 27 xa_dump(xa); \ 28 dump_stack(); \ 29 } else { \ 30 tests_passed++; \ 31 } \ 32 } while (0) 33 #endif 34 35 static void *xa_mk_index(unsigned long index) 36 { 37 return xa_mk_value(index & LONG_MAX); 38 } 39 40 static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) 41 { 42 return xa_store(xa, index, xa_mk_index(index), gfp); 43 } 44 45 static void xa_insert_index(struct xarray *xa, unsigned long index) 46 { 47 XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index), 48 GFP_KERNEL) != 0); 49 } 50 51 static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) 52 { 53 u32 id; 54 55 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b, 56 gfp) != 0); 57 XA_BUG_ON(xa, id != index); 58 } 59 60 static void xa_erase_index(struct xarray *xa, unsigned long index) 61 { 62 XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); 63 XA_BUG_ON(xa, xa_load(xa, index) != NULL); 64 } 65 66 /* 67 * If anyone needs this, please move it to xarray.c. We have no current 68 * users outside the test suite because all current multislot users want 69 * to use the advanced API. 70 */ 71 static void *xa_store_order(struct xarray *xa, unsigned long index, 72 unsigned order, void *entry, gfp_t gfp) 73 { 74 XA_STATE_ORDER(xas, xa, index, order); 75 void *curr; 76 77 do { 78 xas_lock(&xas); 79 curr = xas_store(&xas, entry); 80 xas_unlock(&xas); 81 } while (xas_nomem(&xas, gfp)); 82 83 return curr; 84 } 85 86 static noinline void check_xa_err(struct xarray *xa) 87 { 88 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0); 89 XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0); 90 #ifndef __KERNEL__ 91 /* The kernel does not fail GFP_NOWAIT allocations */ 92 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 93 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 94 #endif 95 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0); 96 XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0); 97 XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0); 98 // kills the test-suite :-( 99 // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); 100 } 101 102 static noinline void check_xas_retry(struct xarray *xa) 103 { 104 XA_STATE(xas, xa, 0); 105 void *entry; 106 107 xa_store_index(xa, 0, GFP_KERNEL); 108 xa_store_index(xa, 1, GFP_KERNEL); 109 110 rcu_read_lock(); 111 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); 112 xa_erase_index(xa, 1); 113 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); 114 XA_BUG_ON(xa, xas_retry(&xas, NULL)); 115 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); 116 xas_reset(&xas); 117 XA_BUG_ON(xa, xas.xa_node != XAS_RESTART); 118 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 119 XA_BUG_ON(xa, xas.xa_node != NULL); 120 rcu_read_unlock(); 121 122 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 123 124 rcu_read_lock(); 125 XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas))); 126 xas.xa_node = XAS_RESTART; 127 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 128 rcu_read_unlock(); 129 130 /* Make sure we can iterate through retry entries */ 131 xas_lock(&xas); 132 xas_set(&xas, 0); 133 xas_store(&xas, XA_RETRY_ENTRY); 134 xas_set(&xas, 1); 135 xas_store(&xas, XA_RETRY_ENTRY); 136 137 xas_set(&xas, 0); 138 xas_for_each(&xas, entry, ULONG_MAX) { 139 xas_store(&xas, xa_mk_index(xas.xa_index)); 140 } 141 xas_unlock(&xas); 142 143 xa_erase_index(xa, 0); 144 xa_erase_index(xa, 1); 145 } 146 147 static noinline void check_xa_load(struct xarray *xa) 148 { 149 unsigned long i, j; 150 151 for (i = 0; i < 1024; i++) { 152 for (j = 0; j < 1024; j++) { 153 void *entry = xa_load(xa, j); 154 if (j < i) 155 XA_BUG_ON(xa, xa_to_value(entry) != j); 156 else 157 XA_BUG_ON(xa, entry); 158 } 159 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 160 } 161 162 for (i = 0; i < 1024; i++) { 163 for (j = 0; j < 1024; j++) { 164 void *entry = xa_load(xa, j); 165 if (j >= i) 166 XA_BUG_ON(xa, xa_to_value(entry) != j); 167 else 168 XA_BUG_ON(xa, entry); 169 } 170 xa_erase_index(xa, i); 171 } 172 XA_BUG_ON(xa, !xa_empty(xa)); 173 } 174 175 static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) 176 { 177 unsigned int order; 178 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1; 179 180 /* NULL elements have no marks set */ 181 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 182 xa_set_mark(xa, index, XA_MARK_0); 183 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 184 185 /* Storing a pointer will not make a mark appear */ 186 XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL); 187 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 188 xa_set_mark(xa, index, XA_MARK_0); 189 XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); 190 191 /* Setting one mark will not set another mark */ 192 XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0)); 193 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1)); 194 195 /* Storing NULL clears marks, and they can't be set again */ 196 xa_erase_index(xa, index); 197 XA_BUG_ON(xa, !xa_empty(xa)); 198 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 199 xa_set_mark(xa, index, XA_MARK_0); 200 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 201 202 /* 203 * Storing a multi-index entry over entries with marks gives the 204 * entire entry the union of the marks 205 */ 206 BUG_ON((index % 4) != 0); 207 for (order = 2; order < max_order; order++) { 208 unsigned long base = round_down(index, 1UL << order); 209 unsigned long next = base + (1UL << order); 210 unsigned long i; 211 212 XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL)); 213 xa_set_mark(xa, index + 1, XA_MARK_0); 214 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); 215 xa_set_mark(xa, index + 2, XA_MARK_2); 216 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); 217 xa_store_order(xa, index, order, xa_mk_index(index), 218 GFP_KERNEL); 219 for (i = base; i < next; i++) { 220 XA_STATE(xas, xa, i); 221 unsigned int seen = 0; 222 void *entry; 223 224 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 225 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1)); 226 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2)); 227 228 /* We should see two elements in the array */ 229 rcu_read_lock(); 230 xas_for_each(&xas, entry, ULONG_MAX) 231 seen++; 232 rcu_read_unlock(); 233 XA_BUG_ON(xa, seen != 2); 234 235 /* One of which is marked */ 236 xas_set(&xas, 0); 237 seen = 0; 238 rcu_read_lock(); 239 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 240 seen++; 241 rcu_read_unlock(); 242 XA_BUG_ON(xa, seen != 1); 243 } 244 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); 245 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); 246 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); 247 xa_erase_index(xa, index); 248 xa_erase_index(xa, next); 249 XA_BUG_ON(xa, !xa_empty(xa)); 250 } 251 XA_BUG_ON(xa, !xa_empty(xa)); 252 } 253 254 static noinline void check_xa_mark_2(struct xarray *xa) 255 { 256 XA_STATE(xas, xa, 0); 257 unsigned long index; 258 unsigned int count = 0; 259 void *entry; 260 261 xa_store_index(xa, 0, GFP_KERNEL); 262 xa_set_mark(xa, 0, XA_MARK_0); 263 xas_lock(&xas); 264 xas_load(&xas); 265 xas_init_marks(&xas); 266 xas_unlock(&xas); 267 XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0); 268 269 for (index = 3500; index < 4500; index++) { 270 xa_store_index(xa, index, GFP_KERNEL); 271 xa_set_mark(xa, index, XA_MARK_0); 272 } 273 274 xas_reset(&xas); 275 rcu_read_lock(); 276 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 277 count++; 278 rcu_read_unlock(); 279 XA_BUG_ON(xa, count != 1000); 280 281 xas_lock(&xas); 282 xas_for_each(&xas, entry, ULONG_MAX) { 283 xas_init_marks(&xas); 284 XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0)); 285 XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0)); 286 } 287 xas_unlock(&xas); 288 289 xa_destroy(xa); 290 } 291 292 static noinline void check_xa_mark_3(struct xarray *xa) 293 { 294 #ifdef CONFIG_XARRAY_MULTI 295 XA_STATE(xas, xa, 0x41); 296 void *entry; 297 int count = 0; 298 299 xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL); 300 xa_set_mark(xa, 0x41, XA_MARK_0); 301 302 rcu_read_lock(); 303 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) { 304 count++; 305 XA_BUG_ON(xa, entry != xa_mk_index(0x40)); 306 } 307 XA_BUG_ON(xa, count != 1); 308 rcu_read_unlock(); 309 xa_destroy(xa); 310 #endif 311 } 312 313 static noinline void check_xa_mark(struct xarray *xa) 314 { 315 unsigned long index; 316 317 for (index = 0; index < 16384; index += 4) 318 check_xa_mark_1(xa, index); 319 320 check_xa_mark_2(xa); 321 check_xa_mark_3(xa); 322 } 323 324 static noinline void check_xa_shrink(struct xarray *xa) 325 { 326 XA_STATE(xas, xa, 1); 327 struct xa_node *node; 328 unsigned int order; 329 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1; 330 331 XA_BUG_ON(xa, !xa_empty(xa)); 332 XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL); 333 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 334 335 /* 336 * Check that erasing the entry at 1 shrinks the tree and properly 337 * marks the node as being deleted. 338 */ 339 xas_lock(&xas); 340 XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1)); 341 node = xas.xa_node; 342 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0)); 343 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 344 XA_BUG_ON(xa, xa_load(xa, 1) != NULL); 345 XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS); 346 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY); 347 XA_BUG_ON(xa, xas_load(&xas) != NULL); 348 xas_unlock(&xas); 349 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 350 xa_erase_index(xa, 0); 351 XA_BUG_ON(xa, !xa_empty(xa)); 352 353 for (order = 0; order < max_order; order++) { 354 unsigned long max = (1UL << order) - 1; 355 xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL); 356 XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0)); 357 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 358 rcu_read_lock(); 359 node = xa_head(xa); 360 rcu_read_unlock(); 361 XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) != 362 NULL); 363 rcu_read_lock(); 364 XA_BUG_ON(xa, xa_head(xa) == node); 365 rcu_read_unlock(); 366 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 367 xa_erase_index(xa, ULONG_MAX); 368 XA_BUG_ON(xa, xa->xa_head != node); 369 xa_erase_index(xa, 0); 370 } 371 } 372 373 static noinline void check_insert(struct xarray *xa) 374 { 375 unsigned long i; 376 377 for (i = 0; i < 1024; i++) { 378 xa_insert_index(xa, i); 379 XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL); 380 XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL); 381 xa_erase_index(xa, i); 382 } 383 384 for (i = 10; i < BITS_PER_LONG; i++) { 385 xa_insert_index(xa, 1UL << i); 386 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL); 387 XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL); 388 xa_erase_index(xa, 1UL << i); 389 390 xa_insert_index(xa, (1UL << i) - 1); 391 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL); 392 XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL); 393 xa_erase_index(xa, (1UL << i) - 1); 394 } 395 396 xa_insert_index(xa, ~0UL); 397 XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL); 398 XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL); 399 xa_erase_index(xa, ~0UL); 400 401 XA_BUG_ON(xa, !xa_empty(xa)); 402 } 403 404 static noinline void check_cmpxchg(struct xarray *xa) 405 { 406 void *FIVE = xa_mk_value(5); 407 void *SIX = xa_mk_value(6); 408 void *LOTS = xa_mk_value(12345678); 409 410 XA_BUG_ON(xa, !xa_empty(xa)); 411 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL); 412 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY); 413 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS); 414 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS); 415 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE); 416 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL); 417 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL); 418 XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY); 419 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE); 420 XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY); 421 xa_erase_index(xa, 12345678); 422 xa_erase_index(xa, 5); 423 XA_BUG_ON(xa, !xa_empty(xa)); 424 } 425 426 static noinline void check_cmpxchg_order(struct xarray *xa) 427 { 428 #ifdef CONFIG_XARRAY_MULTI 429 void *FIVE = xa_mk_value(5); 430 unsigned int i, order = 3; 431 432 XA_BUG_ON(xa, xa_store_order(xa, 0, order, FIVE, GFP_KERNEL)); 433 434 /* Check entry FIVE has the order saved */ 435 XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != order); 436 437 /* Check all the tied indexes have the same entry and order */ 438 for (i = 0; i < (1 << order); i++) { 439 XA_BUG_ON(xa, xa_load(xa, i) != FIVE); 440 XA_BUG_ON(xa, xa_get_order(xa, i) != order); 441 } 442 443 /* Ensure that nothing is stored at index '1 << order' */ 444 XA_BUG_ON(xa, xa_load(xa, 1 << order) != NULL); 445 446 /* 447 * Additionally, keep the node information and the order at 448 * '1 << order' 449 */ 450 XA_BUG_ON(xa, xa_store_order(xa, 1 << order, order, FIVE, GFP_KERNEL)); 451 for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) { 452 XA_BUG_ON(xa, xa_load(xa, i) != FIVE); 453 XA_BUG_ON(xa, xa_get_order(xa, i) != order); 454 } 455 456 /* Conditionally replace FIVE entry at index '0' with NULL */ 457 XA_BUG_ON(xa, xa_cmpxchg(xa, 0, FIVE, NULL, GFP_KERNEL) != FIVE); 458 459 /* Verify the order is lost at FIVE (and old) entries */ 460 XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != 0); 461 462 /* Verify the order and entries are lost in all the tied indexes */ 463 for (i = 0; i < (1 << order); i++) { 464 XA_BUG_ON(xa, xa_load(xa, i) != NULL); 465 XA_BUG_ON(xa, xa_get_order(xa, i) != 0); 466 } 467 468 /* Verify node and order are kept at '1 << order' */ 469 for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) { 470 XA_BUG_ON(xa, xa_load(xa, i) != FIVE); 471 XA_BUG_ON(xa, xa_get_order(xa, i) != order); 472 } 473 474 xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); 475 XA_BUG_ON(xa, !xa_empty(xa)); 476 #endif 477 } 478 479 static noinline void check_reserve(struct xarray *xa) 480 { 481 void *entry; 482 unsigned long index; 483 int count; 484 485 /* An array with a reserved entry is not empty */ 486 XA_BUG_ON(xa, !xa_empty(xa)); 487 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 488 XA_BUG_ON(xa, xa_empty(xa)); 489 XA_BUG_ON(xa, xa_load(xa, 12345678)); 490 xa_release(xa, 12345678); 491 XA_BUG_ON(xa, !xa_empty(xa)); 492 493 /* Releasing a used entry does nothing */ 494 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 495 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); 496 xa_release(xa, 12345678); 497 xa_erase_index(xa, 12345678); 498 XA_BUG_ON(xa, !xa_empty(xa)); 499 500 /* cmpxchg sees a reserved entry as ZERO */ 501 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 502 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY, 503 xa_mk_value(12345678), GFP_NOWAIT) != NULL); 504 xa_release(xa, 12345678); 505 xa_erase_index(xa, 12345678); 506 XA_BUG_ON(xa, !xa_empty(xa)); 507 508 /* xa_insert treats it as busy */ 509 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 510 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 511 -EBUSY); 512 XA_BUG_ON(xa, xa_empty(xa)); 513 XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL); 514 XA_BUG_ON(xa, !xa_empty(xa)); 515 516 /* Can iterate through a reserved entry */ 517 xa_store_index(xa, 5, GFP_KERNEL); 518 XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0); 519 xa_store_index(xa, 7, GFP_KERNEL); 520 521 count = 0; 522 xa_for_each(xa, index, entry) { 523 XA_BUG_ON(xa, index != 5 && index != 7); 524 count++; 525 } 526 XA_BUG_ON(xa, count != 2); 527 528 /* If we free a reserved entry, we should be able to allocate it */ 529 if (xa->xa_flags & XA_FLAGS_ALLOC) { 530 u32 id; 531 532 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8), 533 XA_LIMIT(5, 10), GFP_KERNEL) != 0); 534 XA_BUG_ON(xa, id != 8); 535 536 xa_release(xa, 6); 537 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6), 538 XA_LIMIT(5, 10), GFP_KERNEL) != 0); 539 XA_BUG_ON(xa, id != 6); 540 } 541 542 xa_destroy(xa); 543 } 544 545 static noinline void check_xas_erase(struct xarray *xa) 546 { 547 XA_STATE(xas, xa, 0); 548 void *entry; 549 unsigned long i, j; 550 551 for (i = 0; i < 200; i++) { 552 for (j = i; j < 2 * i + 17; j++) { 553 xas_set(&xas, j); 554 do { 555 xas_lock(&xas); 556 xas_store(&xas, xa_mk_index(j)); 557 xas_unlock(&xas); 558 } while (xas_nomem(&xas, GFP_KERNEL)); 559 } 560 561 xas_set(&xas, ULONG_MAX); 562 do { 563 xas_lock(&xas); 564 xas_store(&xas, xa_mk_value(0)); 565 xas_unlock(&xas); 566 } while (xas_nomem(&xas, GFP_KERNEL)); 567 568 xas_lock(&xas); 569 xas_store(&xas, NULL); 570 571 xas_set(&xas, 0); 572 j = i; 573 xas_for_each(&xas, entry, ULONG_MAX) { 574 XA_BUG_ON(xa, entry != xa_mk_index(j)); 575 xas_store(&xas, NULL); 576 j++; 577 } 578 xas_unlock(&xas); 579 XA_BUG_ON(xa, !xa_empty(xa)); 580 } 581 } 582 583 #ifdef CONFIG_XARRAY_MULTI 584 static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, 585 unsigned int order) 586 { 587 XA_STATE(xas, xa, index); 588 unsigned long min = index & ~((1UL << order) - 1); 589 unsigned long max = min + (1UL << order); 590 591 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 592 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index)); 593 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index)); 594 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 595 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 596 597 xas_lock(&xas); 598 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index)); 599 xas_unlock(&xas); 600 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min)); 601 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min)); 602 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 603 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 604 605 xa_erase_index(xa, min); 606 XA_BUG_ON(xa, !xa_empty(xa)); 607 } 608 609 static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, 610 unsigned int order) 611 { 612 XA_STATE(xas, xa, index); 613 xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); 614 615 xas_lock(&xas); 616 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0)); 617 XA_BUG_ON(xa, xas.xa_index != index); 618 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 619 xas_unlock(&xas); 620 XA_BUG_ON(xa, !xa_empty(xa)); 621 } 622 623 static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, 624 unsigned int order) 625 { 626 XA_STATE(xas, xa, 0); 627 void *entry; 628 int n = 0; 629 630 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 631 632 xas_lock(&xas); 633 xas_for_each(&xas, entry, ULONG_MAX) { 634 XA_BUG_ON(xa, entry != xa_mk_index(index)); 635 n++; 636 } 637 XA_BUG_ON(xa, n != 1); 638 xas_set(&xas, index + 1); 639 xas_for_each(&xas, entry, ULONG_MAX) { 640 XA_BUG_ON(xa, entry != xa_mk_index(index)); 641 n++; 642 } 643 XA_BUG_ON(xa, n != 2); 644 xas_unlock(&xas); 645 646 xa_destroy(xa); 647 } 648 #endif 649 650 static noinline void check_multi_store(struct xarray *xa) 651 { 652 #ifdef CONFIG_XARRAY_MULTI 653 unsigned long i, j, k; 654 unsigned int max_order = (sizeof(long) == 4) ? 30 : 60; 655 656 /* Loading from any position returns the same value */ 657 xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL); 658 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 659 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 660 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 661 rcu_read_lock(); 662 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2); 663 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 664 rcu_read_unlock(); 665 666 /* Storing adjacent to the value does not alter the value */ 667 xa_store(xa, 3, xa, GFP_KERNEL); 668 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 669 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 670 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 671 rcu_read_lock(); 672 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3); 673 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 674 rcu_read_unlock(); 675 676 /* Overwriting multiple indexes works */ 677 xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL); 678 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1)); 679 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1)); 680 XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1)); 681 XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1)); 682 XA_BUG_ON(xa, xa_load(xa, 4) != NULL); 683 rcu_read_lock(); 684 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4); 685 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4); 686 rcu_read_unlock(); 687 688 /* We can erase multiple values with a single store */ 689 xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); 690 XA_BUG_ON(xa, !xa_empty(xa)); 691 692 /* Even when the first slot is empty but the others aren't */ 693 xa_store_index(xa, 1, GFP_KERNEL); 694 xa_store_index(xa, 2, GFP_KERNEL); 695 xa_store_order(xa, 0, 2, NULL, GFP_KERNEL); 696 XA_BUG_ON(xa, !xa_empty(xa)); 697 698 for (i = 0; i < max_order; i++) { 699 for (j = 0; j < max_order; j++) { 700 xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL); 701 xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL); 702 703 for (k = 0; k < max_order; k++) { 704 void *entry = xa_load(xa, (1UL << k) - 1); 705 if ((i < k) && (j < k)) 706 XA_BUG_ON(xa, entry != NULL); 707 else 708 XA_BUG_ON(xa, entry != xa_mk_index(j)); 709 } 710 711 xa_erase(xa, 0); 712 XA_BUG_ON(xa, !xa_empty(xa)); 713 } 714 } 715 716 for (i = 0; i < 20; i++) { 717 check_multi_store_1(xa, 200, i); 718 check_multi_store_1(xa, 0, i); 719 check_multi_store_1(xa, (1UL << i) + 1, i); 720 } 721 check_multi_store_2(xa, 4095, 9); 722 723 for (i = 1; i < 20; i++) { 724 check_multi_store_3(xa, 0, i); 725 check_multi_store_3(xa, 1UL << i, i); 726 } 727 #endif 728 } 729 730 #ifdef CONFIG_XARRAY_MULTI 731 /* mimics page cache __filemap_add_folio() */ 732 static noinline void check_xa_multi_store_adv_add(struct xarray *xa, 733 unsigned long index, 734 unsigned int order, 735 void *p) 736 { 737 XA_STATE(xas, xa, index); 738 unsigned int nrpages = 1UL << order; 739 740 /* users are responsible for index alignemnt to the order when adding */ 741 XA_BUG_ON(xa, index & (nrpages - 1)); 742 743 xas_set_order(&xas, index, order); 744 745 do { 746 xas_lock_irq(&xas); 747 xas_store(&xas, p); 748 xas_unlock_irq(&xas); 749 /* 750 * In our selftest case the only failure we can expect is for 751 * there not to be enough memory as we're not mimicking the 752 * entire page cache, so verify that's the only error we can run 753 * into here. The xas_nomem() which follows will ensure to fix 754 * that condition for us so to chug on on the loop. 755 */ 756 XA_BUG_ON(xa, xas_error(&xas) && xas_error(&xas) != -ENOMEM); 757 } while (xas_nomem(&xas, GFP_KERNEL)); 758 759 XA_BUG_ON(xa, xas_error(&xas)); 760 XA_BUG_ON(xa, xa_load(xa, index) != p); 761 } 762 763 /* mimics page_cache_delete() */ 764 static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa, 765 unsigned long index, 766 unsigned int order) 767 { 768 XA_STATE(xas, xa, index); 769 770 xas_set_order(&xas, index, order); 771 xas_store(&xas, NULL); 772 xas_init_marks(&xas); 773 } 774 775 static noinline void check_xa_multi_store_adv_delete(struct xarray *xa, 776 unsigned long index, 777 unsigned int order) 778 { 779 xa_lock_irq(xa); 780 check_xa_multi_store_adv_del_entry(xa, index, order); 781 xa_unlock_irq(xa); 782 } 783 784 /* mimics page cache filemap_get_entry() */ 785 static noinline void *test_get_entry(struct xarray *xa, unsigned long index) 786 { 787 XA_STATE(xas, xa, index); 788 void *p; 789 static unsigned int loops = 0; 790 791 rcu_read_lock(); 792 repeat: 793 xas_reset(&xas); 794 p = xas_load(&xas); 795 if (xas_retry(&xas, p)) 796 goto repeat; 797 rcu_read_unlock(); 798 799 /* 800 * This is not part of the page cache, this selftest is pretty 801 * aggressive and does not want to trust the xarray API but rather 802 * test it, and for order 20 (4 GiB block size) we can loop over 803 * over a million entries which can cause a soft lockup. Page cache 804 * APIs won't be stupid, proper page cache APIs loop over the proper 805 * order so when using a larger order we skip shared entries. 806 */ 807 if (++loops % XA_CHECK_SCHED == 0) 808 schedule(); 809 810 return p; 811 } 812 813 static unsigned long some_val = 0xdeadbeef; 814 static unsigned long some_val_2 = 0xdeaddead; 815 816 /* mimics the page cache usage */ 817 static noinline void check_xa_multi_store_adv(struct xarray *xa, 818 unsigned long pos, 819 unsigned int order) 820 { 821 unsigned int nrpages = 1UL << order; 822 unsigned long index, base, next_index, next_next_index; 823 unsigned int i; 824 825 index = pos >> PAGE_SHIFT; 826 base = round_down(index, nrpages); 827 next_index = round_down(base + nrpages, nrpages); 828 next_next_index = round_down(next_index + nrpages, nrpages); 829 830 check_xa_multi_store_adv_add(xa, base, order, &some_val); 831 832 for (i = 0; i < nrpages; i++) 833 XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val); 834 835 XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL); 836 837 /* Use order 0 for the next item */ 838 check_xa_multi_store_adv_add(xa, next_index, 0, &some_val_2); 839 XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2); 840 841 /* Remove the next item */ 842 check_xa_multi_store_adv_delete(xa, next_index, 0); 843 844 /* Now use order for a new pointer */ 845 check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); 846 847 for (i = 0; i < nrpages; i++) 848 XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2); 849 850 check_xa_multi_store_adv_delete(xa, next_index, order); 851 check_xa_multi_store_adv_delete(xa, base, order); 852 XA_BUG_ON(xa, !xa_empty(xa)); 853 854 /* starting fresh again */ 855 856 /* let's test some holes now */ 857 858 /* hole at base and next_next */ 859 check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); 860 861 for (i = 0; i < nrpages; i++) 862 XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); 863 864 for (i = 0; i < nrpages; i++) 865 XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2); 866 867 for (i = 0; i < nrpages; i++) 868 XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL); 869 870 check_xa_multi_store_adv_delete(xa, next_index, order); 871 XA_BUG_ON(xa, !xa_empty(xa)); 872 873 /* hole at base and next */ 874 875 check_xa_multi_store_adv_add(xa, next_next_index, order, &some_val_2); 876 877 for (i = 0; i < nrpages; i++) 878 XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); 879 880 for (i = 0; i < nrpages; i++) 881 XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != NULL); 882 883 for (i = 0; i < nrpages; i++) 884 XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2); 885 886 check_xa_multi_store_adv_delete(xa, next_next_index, order); 887 XA_BUG_ON(xa, !xa_empty(xa)); 888 } 889 #endif 890 891 static noinline void check_multi_store_advanced(struct xarray *xa) 892 { 893 #ifdef CONFIG_XARRAY_MULTI 894 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 895 unsigned long end = ULONG_MAX/2; 896 unsigned long pos, i; 897 898 /* 899 * About 117 million tests below. 900 */ 901 for (pos = 7; pos < end; pos = (pos * pos) + 564) { 902 for (i = 0; i < max_order; i++) { 903 check_xa_multi_store_adv(xa, pos, i); 904 check_xa_multi_store_adv(xa, pos + 157, i); 905 } 906 } 907 #endif 908 } 909 910 static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) 911 { 912 int i; 913 u32 id; 914 915 XA_BUG_ON(xa, !xa_empty(xa)); 916 /* An empty array should assign %base to the first alloc */ 917 xa_alloc_index(xa, base, GFP_KERNEL); 918 919 /* Erasing it should make the array empty again */ 920 xa_erase_index(xa, base); 921 XA_BUG_ON(xa, !xa_empty(xa)); 922 923 /* And it should assign %base again */ 924 xa_alloc_index(xa, base, GFP_KERNEL); 925 926 /* Allocating and then erasing a lot should not lose base */ 927 for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++) 928 xa_alloc_index(xa, i, GFP_KERNEL); 929 for (i = base; i < 2 * XA_CHUNK_SIZE; i++) 930 xa_erase_index(xa, i); 931 xa_alloc_index(xa, base, GFP_KERNEL); 932 933 /* Destroying the array should do the same as erasing */ 934 xa_destroy(xa); 935 936 /* And it should assign %base again */ 937 xa_alloc_index(xa, base, GFP_KERNEL); 938 939 /* The next assigned ID should be base+1 */ 940 xa_alloc_index(xa, base + 1, GFP_KERNEL); 941 xa_erase_index(xa, base + 1); 942 943 /* Storing a value should mark it used */ 944 xa_store_index(xa, base + 1, GFP_KERNEL); 945 xa_alloc_index(xa, base + 2, GFP_KERNEL); 946 947 /* If we then erase base, it should be free */ 948 xa_erase_index(xa, base); 949 xa_alloc_index(xa, base, GFP_KERNEL); 950 951 xa_erase_index(xa, base + 1); 952 xa_erase_index(xa, base + 2); 953 954 for (i = 1; i < 5000; i++) { 955 xa_alloc_index(xa, base + i, GFP_KERNEL); 956 } 957 958 xa_destroy(xa); 959 960 /* Check that we fail properly at the limit of allocation */ 961 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1), 962 XA_LIMIT(UINT_MAX - 1, UINT_MAX), 963 GFP_KERNEL) != 0); 964 XA_BUG_ON(xa, id != 0xfffffffeU); 965 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX), 966 XA_LIMIT(UINT_MAX - 1, UINT_MAX), 967 GFP_KERNEL) != 0); 968 XA_BUG_ON(xa, id != 0xffffffffU); 969 id = 3; 970 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0), 971 XA_LIMIT(UINT_MAX - 1, UINT_MAX), 972 GFP_KERNEL) != -EBUSY); 973 XA_BUG_ON(xa, id != 3); 974 xa_destroy(xa); 975 976 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), 977 GFP_KERNEL) != -EBUSY); 978 XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); 979 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), 980 GFP_KERNEL) != -EBUSY); 981 xa_erase_index(xa, 3); 982 XA_BUG_ON(xa, !xa_empty(xa)); 983 } 984 985 static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) 986 { 987 unsigned int i, id; 988 unsigned long index; 989 void *entry; 990 991 /* Allocate and free a NULL and check xa_empty() behaves */ 992 XA_BUG_ON(xa, !xa_empty(xa)); 993 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 994 XA_BUG_ON(xa, id != base); 995 XA_BUG_ON(xa, xa_empty(xa)); 996 XA_BUG_ON(xa, xa_erase(xa, id) != NULL); 997 XA_BUG_ON(xa, !xa_empty(xa)); 998 999 /* Ditto, but check destroy instead of erase */ 1000 XA_BUG_ON(xa, !xa_empty(xa)); 1001 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 1002 XA_BUG_ON(xa, id != base); 1003 XA_BUG_ON(xa, xa_empty(xa)); 1004 xa_destroy(xa); 1005 XA_BUG_ON(xa, !xa_empty(xa)); 1006 1007 for (i = base; i < base + 10; i++) { 1008 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, 1009 GFP_KERNEL) != 0); 1010 XA_BUG_ON(xa, id != i); 1011 } 1012 1013 XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL); 1014 XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL); 1015 XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4)); 1016 XA_BUG_ON(xa, xa_erase(xa, 5) != NULL); 1017 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 1018 XA_BUG_ON(xa, id != 5); 1019 1020 xa_for_each(xa, index, entry) { 1021 xa_erase_index(xa, index); 1022 } 1023 1024 for (i = base; i < base + 9; i++) { 1025 XA_BUG_ON(xa, xa_erase(xa, i) != NULL); 1026 XA_BUG_ON(xa, xa_empty(xa)); 1027 } 1028 XA_BUG_ON(xa, xa_erase(xa, 8) != NULL); 1029 XA_BUG_ON(xa, xa_empty(xa)); 1030 XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL); 1031 XA_BUG_ON(xa, !xa_empty(xa)); 1032 1033 xa_destroy(xa); 1034 } 1035 1036 static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) 1037 { 1038 struct xa_limit limit = XA_LIMIT(1, 0x3fff); 1039 u32 next = 0; 1040 unsigned int i, id; 1041 unsigned long index; 1042 void *entry; 1043 int ret; 1044 1045 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit, 1046 &next, GFP_KERNEL) != 0); 1047 XA_BUG_ON(xa, id != 1); 1048 1049 next = 0x3ffd; 1050 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit, 1051 &next, GFP_KERNEL) != 0); 1052 XA_BUG_ON(xa, id != 0x3ffd); 1053 xa_erase_index(xa, 0x3ffd); 1054 xa_erase_index(xa, 1); 1055 XA_BUG_ON(xa, !xa_empty(xa)); 1056 1057 for (i = 0x3ffe; i < 0x4003; i++) { 1058 if (i < 0x4000) 1059 entry = xa_mk_index(i); 1060 else 1061 entry = xa_mk_index(i - 0x3fff); 1062 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit, 1063 &next, GFP_KERNEL) != 0); 1064 XA_BUG_ON(xa, xa_mk_index(id) != entry); 1065 } 1066 1067 /* Check wrap-around is handled correctly */ 1068 if (base != 0) 1069 xa_erase_index(xa, base); 1070 xa_erase_index(xa, base + 1); 1071 next = UINT_MAX; 1072 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), 1073 xa_limit_32b, &next, GFP_KERNEL) != 0); 1074 XA_BUG_ON(xa, id != UINT_MAX); 1075 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base), 1076 xa_limit_32b, &next, GFP_KERNEL) != 0); 1077 XA_BUG_ON(xa, id != base); 1078 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1), 1079 xa_limit_32b, &next, GFP_KERNEL) != 0); 1080 XA_BUG_ON(xa, id != base + 1); 1081 1082 xa_for_each(xa, index, entry) 1083 xa_erase_index(xa, index); 1084 XA_BUG_ON(xa, !xa_empty(xa)); 1085 1086 /* check wrap-around return of __xa_alloc_cyclic() */ 1087 next = UINT_MAX; 1088 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), 1089 xa_limit_32b, &next, GFP_KERNEL) != 0); 1090 xa_lock(xa); 1091 ret = __xa_alloc_cyclic(xa, &id, xa_mk_index(base), xa_limit_32b, 1092 &next, GFP_KERNEL); 1093 xa_unlock(xa); 1094 XA_BUG_ON(xa, ret != 1); 1095 xa_for_each(xa, index, entry) 1096 xa_erase_index(xa, index); 1097 XA_BUG_ON(xa, !xa_empty(xa)); 1098 } 1099 1100 static DEFINE_XARRAY_ALLOC(xa0); 1101 static DEFINE_XARRAY_ALLOC1(xa1); 1102 1103 static noinline void check_xa_alloc(void) 1104 { 1105 check_xa_alloc_1(&xa0, 0); 1106 check_xa_alloc_1(&xa1, 1); 1107 check_xa_alloc_2(&xa0, 0); 1108 check_xa_alloc_2(&xa1, 1); 1109 check_xa_alloc_3(&xa0, 0); 1110 check_xa_alloc_3(&xa1, 1); 1111 } 1112 1113 static noinline void __check_store_iter(struct xarray *xa, unsigned long start, 1114 unsigned int order, unsigned int present) 1115 { 1116 XA_STATE_ORDER(xas, xa, start, order); 1117 void *entry; 1118 unsigned int count = 0; 1119 1120 retry: 1121 xas_lock(&xas); 1122 xas_for_each_conflict(&xas, entry) { 1123 XA_BUG_ON(xa, !xa_is_value(entry)); 1124 XA_BUG_ON(xa, entry < xa_mk_index(start)); 1125 XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1)); 1126 count++; 1127 } 1128 xas_store(&xas, xa_mk_index(start)); 1129 xas_unlock(&xas); 1130 if (xas_nomem(&xas, GFP_KERNEL)) { 1131 count = 0; 1132 goto retry; 1133 } 1134 XA_BUG_ON(xa, xas_error(&xas)); 1135 XA_BUG_ON(xa, count != present); 1136 XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); 1137 XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != 1138 xa_mk_index(start)); 1139 xa_erase_index(xa, start); 1140 } 1141 1142 static noinline void check_store_iter(struct xarray *xa) 1143 { 1144 unsigned int i, j; 1145 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 1146 1147 for (i = 0; i < max_order; i++) { 1148 unsigned int min = 1 << i; 1149 unsigned int max = (2 << i) - 1; 1150 __check_store_iter(xa, 0, i, 0); 1151 XA_BUG_ON(xa, !xa_empty(xa)); 1152 __check_store_iter(xa, min, i, 0); 1153 XA_BUG_ON(xa, !xa_empty(xa)); 1154 1155 xa_store_index(xa, min, GFP_KERNEL); 1156 __check_store_iter(xa, min, i, 1); 1157 XA_BUG_ON(xa, !xa_empty(xa)); 1158 xa_store_index(xa, max, GFP_KERNEL); 1159 __check_store_iter(xa, min, i, 1); 1160 XA_BUG_ON(xa, !xa_empty(xa)); 1161 1162 for (j = 0; j < min; j++) 1163 xa_store_index(xa, j, GFP_KERNEL); 1164 __check_store_iter(xa, 0, i, min); 1165 XA_BUG_ON(xa, !xa_empty(xa)); 1166 for (j = 0; j < min; j++) 1167 xa_store_index(xa, min + j, GFP_KERNEL); 1168 __check_store_iter(xa, min, i, min); 1169 XA_BUG_ON(xa, !xa_empty(xa)); 1170 } 1171 #ifdef CONFIG_XARRAY_MULTI 1172 xa_store_index(xa, 63, GFP_KERNEL); 1173 xa_store_index(xa, 65, GFP_KERNEL); 1174 __check_store_iter(xa, 64, 2, 1); 1175 xa_erase_index(xa, 63); 1176 #endif 1177 XA_BUG_ON(xa, !xa_empty(xa)); 1178 } 1179 1180 static noinline void check_multi_find_1(struct xarray *xa, unsigned order) 1181 { 1182 #ifdef CONFIG_XARRAY_MULTI 1183 unsigned long multi = 3 << order; 1184 unsigned long next = 4 << order; 1185 unsigned long index; 1186 1187 xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL); 1188 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL); 1189 XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL); 1190 1191 index = 0; 1192 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 1193 xa_mk_value(multi)); 1194 XA_BUG_ON(xa, index != multi); 1195 index = multi + 1; 1196 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 1197 xa_mk_value(multi)); 1198 XA_BUG_ON(xa, (index < multi) || (index >= next)); 1199 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) != 1200 xa_mk_value(next)); 1201 XA_BUG_ON(xa, index != next); 1202 XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL); 1203 XA_BUG_ON(xa, index != next); 1204 1205 xa_erase_index(xa, multi); 1206 xa_erase_index(xa, next); 1207 xa_erase_index(xa, next + 1); 1208 XA_BUG_ON(xa, !xa_empty(xa)); 1209 #endif 1210 } 1211 1212 static noinline void check_multi_find_2(struct xarray *xa) 1213 { 1214 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; 1215 unsigned int i, j; 1216 void *entry; 1217 1218 for (i = 0; i < max_order; i++) { 1219 unsigned long index = 1UL << i; 1220 for (j = 0; j < index; j++) { 1221 XA_STATE(xas, xa, j + index); 1222 xa_store_index(xa, index - 1, GFP_KERNEL); 1223 xa_store_order(xa, index, i, xa_mk_index(index), 1224 GFP_KERNEL); 1225 rcu_read_lock(); 1226 xas_for_each(&xas, entry, ULONG_MAX) { 1227 xa_erase_index(xa, index); 1228 } 1229 rcu_read_unlock(); 1230 xa_erase_index(xa, index - 1); 1231 XA_BUG_ON(xa, !xa_empty(xa)); 1232 } 1233 } 1234 } 1235 1236 static noinline void check_multi_find_3(struct xarray *xa) 1237 { 1238 unsigned int order; 1239 1240 for (order = 5; order < order_limit; order++) { 1241 unsigned long index = 1UL << (order - 5); 1242 1243 XA_BUG_ON(xa, !xa_empty(xa)); 1244 xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL); 1245 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)); 1246 xa_erase_index(xa, 0); 1247 } 1248 } 1249 1250 static noinline void check_find_1(struct xarray *xa) 1251 { 1252 unsigned long i, j, k; 1253 1254 XA_BUG_ON(xa, !xa_empty(xa)); 1255 1256 /* 1257 * Check xa_find with all pairs between 0 and 99 inclusive, 1258 * starting at every index between 0 and 99 1259 */ 1260 for (i = 0; i < 100; i++) { 1261 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 1262 xa_set_mark(xa, i, XA_MARK_0); 1263 for (j = 0; j < i; j++) { 1264 XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) != 1265 NULL); 1266 xa_set_mark(xa, j, XA_MARK_0); 1267 for (k = 0; k < 100; k++) { 1268 unsigned long index = k; 1269 void *entry = xa_find(xa, &index, ULONG_MAX, 1270 XA_PRESENT); 1271 if (k <= j) 1272 XA_BUG_ON(xa, index != j); 1273 else if (k <= i) 1274 XA_BUG_ON(xa, index != i); 1275 else 1276 XA_BUG_ON(xa, entry != NULL); 1277 1278 index = k; 1279 entry = xa_find(xa, &index, ULONG_MAX, 1280 XA_MARK_0); 1281 if (k <= j) 1282 XA_BUG_ON(xa, index != j); 1283 else if (k <= i) 1284 XA_BUG_ON(xa, index != i); 1285 else 1286 XA_BUG_ON(xa, entry != NULL); 1287 } 1288 xa_erase_index(xa, j); 1289 XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); 1290 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 1291 } 1292 xa_erase_index(xa, i); 1293 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); 1294 } 1295 XA_BUG_ON(xa, !xa_empty(xa)); 1296 } 1297 1298 static noinline void check_find_2(struct xarray *xa) 1299 { 1300 void *entry; 1301 unsigned long i, j, index; 1302 1303 xa_for_each(xa, index, entry) { 1304 XA_BUG_ON(xa, true); 1305 } 1306 1307 for (i = 0; i < 1024; i++) { 1308 xa_store_index(xa, index, GFP_KERNEL); 1309 j = 0; 1310 xa_for_each(xa, index, entry) { 1311 XA_BUG_ON(xa, xa_mk_index(index) != entry); 1312 XA_BUG_ON(xa, index != j++); 1313 } 1314 } 1315 1316 xa_destroy(xa); 1317 } 1318 1319 static noinline void check_find_3(struct xarray *xa) 1320 { 1321 XA_STATE(xas, xa, 0); 1322 unsigned long i, j, k; 1323 void *entry; 1324 1325 for (i = 0; i < 100; i++) { 1326 for (j = 0; j < 100; j++) { 1327 rcu_read_lock(); 1328 for (k = 0; k < 100; k++) { 1329 xas_set(&xas, j); 1330 xas_for_each_marked(&xas, entry, k, XA_MARK_0) 1331 ; 1332 if (j > k) 1333 XA_BUG_ON(xa, 1334 xas.xa_node != XAS_RESTART); 1335 } 1336 rcu_read_unlock(); 1337 } 1338 xa_store_index(xa, i, GFP_KERNEL); 1339 xa_set_mark(xa, i, XA_MARK_0); 1340 } 1341 xa_destroy(xa); 1342 } 1343 1344 static noinline void check_find_4(struct xarray *xa) 1345 { 1346 unsigned long index = 0; 1347 void *entry; 1348 1349 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 1350 1351 entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); 1352 XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX)); 1353 1354 entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); 1355 XA_BUG_ON(xa, entry); 1356 1357 xa_erase_index(xa, ULONG_MAX); 1358 } 1359 1360 static noinline void check_find(struct xarray *xa) 1361 { 1362 unsigned i; 1363 1364 check_find_1(xa); 1365 check_find_2(xa); 1366 check_find_3(xa); 1367 check_find_4(xa); 1368 1369 for (i = 2; i < 10; i++) 1370 check_multi_find_1(xa, i); 1371 check_multi_find_2(xa); 1372 check_multi_find_3(xa); 1373 } 1374 1375 /* See find_swap_entry() in mm/shmem.c */ 1376 static noinline unsigned long xa_find_entry(struct xarray *xa, void *item) 1377 { 1378 XA_STATE(xas, xa, 0); 1379 unsigned int checked = 0; 1380 void *entry; 1381 1382 rcu_read_lock(); 1383 xas_for_each(&xas, entry, ULONG_MAX) { 1384 if (xas_retry(&xas, entry)) 1385 continue; 1386 if (entry == item) 1387 break; 1388 checked++; 1389 if ((checked % 4) != 0) 1390 continue; 1391 xas_pause(&xas); 1392 } 1393 rcu_read_unlock(); 1394 1395 return entry ? xas.xa_index : -1; 1396 } 1397 1398 static noinline void check_find_entry(struct xarray *xa) 1399 { 1400 #ifdef CONFIG_XARRAY_MULTI 1401 unsigned int order; 1402 unsigned long offset, index; 1403 1404 for (order = 0; order < 20; order++) { 1405 for (offset = 0; offset < (1UL << (order + 3)); 1406 offset += (1UL << order)) { 1407 for (index = 0; index < (1UL << (order + 5)); 1408 index += (1UL << order)) { 1409 xa_store_order(xa, index, order, 1410 xa_mk_index(index), GFP_KERNEL); 1411 XA_BUG_ON(xa, xa_load(xa, index) != 1412 xa_mk_index(index)); 1413 XA_BUG_ON(xa, xa_find_entry(xa, 1414 xa_mk_index(index)) != index); 1415 } 1416 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1417 xa_destroy(xa); 1418 } 1419 } 1420 #endif 1421 1422 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1423 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 1424 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1425 XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); 1426 xa_erase_index(xa, ULONG_MAX); 1427 XA_BUG_ON(xa, !xa_empty(xa)); 1428 } 1429 1430 static noinline void check_pause(struct xarray *xa) 1431 { 1432 XA_STATE(xas, xa, 0); 1433 void *entry; 1434 int order; 1435 unsigned long index = 1; 1436 unsigned int count = 0; 1437 1438 for (order = 0; order < order_limit; order++) { 1439 XA_BUG_ON(xa, xa_store_order(xa, index, order, 1440 xa_mk_index(index), GFP_KERNEL)); 1441 index += 1UL << order; 1442 } 1443 1444 rcu_read_lock(); 1445 xas_for_each(&xas, entry, ULONG_MAX) { 1446 XA_BUG_ON(xa, entry != xa_mk_index(1UL << count)); 1447 count++; 1448 } 1449 rcu_read_unlock(); 1450 XA_BUG_ON(xa, count != order_limit); 1451 1452 count = 0; 1453 xas_set(&xas, 0); 1454 rcu_read_lock(); 1455 xas_for_each(&xas, entry, ULONG_MAX) { 1456 XA_BUG_ON(xa, entry != xa_mk_index(1UL << count)); 1457 count++; 1458 xas_pause(&xas); 1459 } 1460 rcu_read_unlock(); 1461 XA_BUG_ON(xa, count != order_limit); 1462 1463 xa_destroy(xa); 1464 1465 index = 0; 1466 for (order = order_limit - 1; order >= 0; order--) { 1467 XA_BUG_ON(xa, xa_store_order(xa, index, order, 1468 xa_mk_index(index), GFP_KERNEL)); 1469 index += 1UL << order; 1470 } 1471 1472 index = 0; 1473 count = 0; 1474 xas_set(&xas, 0); 1475 rcu_read_lock(); 1476 xas_for_each(&xas, entry, ULONG_MAX) { 1477 XA_BUG_ON(xa, entry != xa_mk_index(index)); 1478 index += 1UL << (order_limit - count - 1); 1479 count++; 1480 } 1481 rcu_read_unlock(); 1482 XA_BUG_ON(xa, count != order_limit); 1483 1484 index = 0; 1485 count = 0; 1486 /* test unaligned index */ 1487 xas_set(&xas, 1 % (1UL << (order_limit - 1))); 1488 rcu_read_lock(); 1489 xas_for_each(&xas, entry, ULONG_MAX) { 1490 XA_BUG_ON(xa, entry != xa_mk_index(index)); 1491 index += 1UL << (order_limit - count - 1); 1492 count++; 1493 xas_pause(&xas); 1494 } 1495 rcu_read_unlock(); 1496 XA_BUG_ON(xa, count != order_limit); 1497 1498 xa_destroy(xa); 1499 1500 } 1501 1502 static noinline void check_move_tiny(struct xarray *xa) 1503 { 1504 XA_STATE(xas, xa, 0); 1505 1506 XA_BUG_ON(xa, !xa_empty(xa)); 1507 rcu_read_lock(); 1508 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1509 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1510 rcu_read_unlock(); 1511 xa_store_index(xa, 0, GFP_KERNEL); 1512 rcu_read_lock(); 1513 xas_set(&xas, 0); 1514 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0)); 1515 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1516 xas_set(&xas, 0); 1517 XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0)); 1518 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1519 rcu_read_unlock(); 1520 xa_erase_index(xa, 0); 1521 XA_BUG_ON(xa, !xa_empty(xa)); 1522 } 1523 1524 static noinline void check_move_max(struct xarray *xa) 1525 { 1526 XA_STATE(xas, xa, 0); 1527 1528 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 1529 rcu_read_lock(); 1530 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX)); 1531 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); 1532 rcu_read_unlock(); 1533 1534 xas_set(&xas, 0); 1535 rcu_read_lock(); 1536 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX)); 1537 xas_pause(&xas); 1538 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); 1539 rcu_read_unlock(); 1540 1541 xa_erase_index(xa, ULONG_MAX); 1542 XA_BUG_ON(xa, !xa_empty(xa)); 1543 } 1544 1545 static noinline void check_move_small(struct xarray *xa, unsigned long idx) 1546 { 1547 XA_STATE(xas, xa, 0); 1548 unsigned long i; 1549 1550 xa_store_index(xa, 0, GFP_KERNEL); 1551 xa_store_index(xa, idx, GFP_KERNEL); 1552 1553 rcu_read_lock(); 1554 for (i = 0; i < idx * 4; i++) { 1555 void *entry = xas_next(&xas); 1556 if (i <= idx) 1557 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 1558 XA_BUG_ON(xa, xas.xa_index != i); 1559 if (i == 0 || i == idx) 1560 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1561 else 1562 XA_BUG_ON(xa, entry != NULL); 1563 } 1564 xas_next(&xas); 1565 XA_BUG_ON(xa, xas.xa_index != i); 1566 1567 do { 1568 void *entry = xas_prev(&xas); 1569 i--; 1570 if (i <= idx) 1571 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 1572 XA_BUG_ON(xa, xas.xa_index != i); 1573 if (i == 0 || i == idx) 1574 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1575 else 1576 XA_BUG_ON(xa, entry != NULL); 1577 } while (i > 0); 1578 1579 xas_set(&xas, ULONG_MAX); 1580 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1581 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1582 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0)); 1583 XA_BUG_ON(xa, xas.xa_index != 0); 1584 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1585 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1586 rcu_read_unlock(); 1587 1588 xa_erase_index(xa, 0); 1589 xa_erase_index(xa, idx); 1590 XA_BUG_ON(xa, !xa_empty(xa)); 1591 } 1592 1593 static noinline void check_move(struct xarray *xa) 1594 { 1595 XA_STATE(xas, xa, (1 << 16) - 1); 1596 unsigned long i; 1597 1598 for (i = 0; i < (1 << 16); i++) 1599 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 1600 1601 rcu_read_lock(); 1602 do { 1603 void *entry = xas_prev(&xas); 1604 i--; 1605 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1606 XA_BUG_ON(xa, i != xas.xa_index); 1607 } while (i != 0); 1608 1609 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1610 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1611 1612 do { 1613 void *entry = xas_next(&xas); 1614 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1615 XA_BUG_ON(xa, i != xas.xa_index); 1616 i++; 1617 } while (i < (1 << 16)); 1618 rcu_read_unlock(); 1619 1620 for (i = (1 << 8); i < (1 << 15); i++) 1621 xa_erase_index(xa, i); 1622 1623 i = xas.xa_index; 1624 1625 rcu_read_lock(); 1626 do { 1627 void *entry = xas_prev(&xas); 1628 i--; 1629 if ((i < (1 << 8)) || (i >= (1 << 15))) 1630 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1631 else 1632 XA_BUG_ON(xa, entry != NULL); 1633 XA_BUG_ON(xa, i != xas.xa_index); 1634 } while (i != 0); 1635 1636 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1637 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1638 1639 do { 1640 void *entry = xas_next(&xas); 1641 if ((i < (1 << 8)) || (i >= (1 << 15))) 1642 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1643 else 1644 XA_BUG_ON(xa, entry != NULL); 1645 XA_BUG_ON(xa, i != xas.xa_index); 1646 i++; 1647 } while (i < (1 << 16)); 1648 rcu_read_unlock(); 1649 1650 xa_destroy(xa); 1651 1652 check_move_tiny(xa); 1653 check_move_max(xa); 1654 1655 for (i = 0; i < 16; i++) 1656 check_move_small(xa, 1UL << i); 1657 1658 for (i = 2; i < 16; i++) 1659 check_move_small(xa, (1UL << i) - 1); 1660 } 1661 1662 static noinline void xa_store_many_order(struct xarray *xa, 1663 unsigned long index, unsigned order) 1664 { 1665 XA_STATE_ORDER(xas, xa, index, order); 1666 unsigned int i = 0; 1667 1668 do { 1669 xas_lock(&xas); 1670 XA_BUG_ON(xa, xas_find_conflict(&xas)); 1671 xas_create_range(&xas); 1672 if (xas_error(&xas)) 1673 goto unlock; 1674 for (i = 0; i < (1U << order); i++) { 1675 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i))); 1676 xas_next(&xas); 1677 } 1678 unlock: 1679 xas_unlock(&xas); 1680 } while (xas_nomem(&xas, GFP_KERNEL)); 1681 1682 XA_BUG_ON(xa, xas_error(&xas)); 1683 } 1684 1685 static noinline void check_create_range_1(struct xarray *xa, 1686 unsigned long index, unsigned order) 1687 { 1688 unsigned long i; 1689 1690 xa_store_many_order(xa, index, order); 1691 for (i = index; i < index + (1UL << order); i++) 1692 xa_erase_index(xa, i); 1693 XA_BUG_ON(xa, !xa_empty(xa)); 1694 } 1695 1696 static noinline void check_create_range_2(struct xarray *xa, unsigned order) 1697 { 1698 unsigned long i; 1699 unsigned long nr = 1UL << order; 1700 1701 for (i = 0; i < nr * nr; i += nr) 1702 xa_store_many_order(xa, i, order); 1703 for (i = 0; i < nr * nr; i++) 1704 xa_erase_index(xa, i); 1705 XA_BUG_ON(xa, !xa_empty(xa)); 1706 } 1707 1708 static noinline void check_create_range_3(void) 1709 { 1710 XA_STATE(xas, NULL, 0); 1711 xas_set_err(&xas, -EEXIST); 1712 xas_create_range(&xas); 1713 XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST); 1714 } 1715 1716 static noinline void check_create_range_4(struct xarray *xa, 1717 unsigned long index, unsigned order) 1718 { 1719 XA_STATE_ORDER(xas, xa, index, order); 1720 unsigned long base = xas.xa_index; 1721 unsigned long i = 0; 1722 1723 xa_store_index(xa, index, GFP_KERNEL); 1724 do { 1725 xas_lock(&xas); 1726 xas_create_range(&xas); 1727 if (xas_error(&xas)) 1728 goto unlock; 1729 for (i = 0; i < (1UL << order); i++) { 1730 void *old = xas_store(&xas, xa_mk_index(base + i)); 1731 if (xas.xa_index == index) 1732 XA_BUG_ON(xa, old != xa_mk_index(base + i)); 1733 else 1734 XA_BUG_ON(xa, old != NULL); 1735 xas_next(&xas); 1736 } 1737 unlock: 1738 xas_unlock(&xas); 1739 } while (xas_nomem(&xas, GFP_KERNEL)); 1740 1741 XA_BUG_ON(xa, xas_error(&xas)); 1742 1743 for (i = base; i < base + (1UL << order); i++) 1744 xa_erase_index(xa, i); 1745 XA_BUG_ON(xa, !xa_empty(xa)); 1746 } 1747 1748 static noinline void check_create_range_5(struct xarray *xa, 1749 unsigned long index, unsigned int order) 1750 { 1751 XA_STATE_ORDER(xas, xa, index, order); 1752 unsigned int i; 1753 1754 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 1755 1756 for (i = 0; i < order + 10; i++) { 1757 do { 1758 xas_lock(&xas); 1759 xas_create_range(&xas); 1760 xas_unlock(&xas); 1761 } while (xas_nomem(&xas, GFP_KERNEL)); 1762 } 1763 1764 xa_destroy(xa); 1765 } 1766 1767 static noinline void check_create_range(struct xarray *xa) 1768 { 1769 unsigned int order; 1770 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1; 1771 1772 for (order = 0; order < max_order; order++) { 1773 check_create_range_1(xa, 0, order); 1774 check_create_range_1(xa, 1U << order, order); 1775 check_create_range_1(xa, 2U << order, order); 1776 check_create_range_1(xa, 3U << order, order); 1777 check_create_range_1(xa, 1U << 24, order); 1778 if (order < 10) 1779 check_create_range_2(xa, order); 1780 1781 check_create_range_4(xa, 0, order); 1782 check_create_range_4(xa, 1U << order, order); 1783 check_create_range_4(xa, 2U << order, order); 1784 check_create_range_4(xa, 3U << order, order); 1785 check_create_range_4(xa, 1U << 24, order); 1786 1787 check_create_range_4(xa, 1, order); 1788 check_create_range_4(xa, (1U << order) + 1, order); 1789 check_create_range_4(xa, (2U << order) + 1, order); 1790 check_create_range_4(xa, (2U << order) - 1, order); 1791 check_create_range_4(xa, (3U << order) + 1, order); 1792 check_create_range_4(xa, (3U << order) - 1, order); 1793 check_create_range_4(xa, (1U << 24) + 1, order); 1794 1795 check_create_range_5(xa, 0, order); 1796 check_create_range_5(xa, (1U << order), order); 1797 } 1798 1799 check_create_range_3(); 1800 } 1801 1802 static noinline void __check_store_range(struct xarray *xa, unsigned long first, 1803 unsigned long last) 1804 { 1805 #ifdef CONFIG_XARRAY_MULTI 1806 xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); 1807 1808 XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first)); 1809 XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first)); 1810 XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); 1811 XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); 1812 1813 xa_store_range(xa, first, last, NULL, GFP_KERNEL); 1814 #endif 1815 1816 XA_BUG_ON(xa, !xa_empty(xa)); 1817 } 1818 1819 static noinline void check_store_range(struct xarray *xa) 1820 { 1821 unsigned long i, j; 1822 1823 for (i = 0; i < 128; i++) { 1824 for (j = i; j < 128; j++) { 1825 __check_store_range(xa, i, j); 1826 __check_store_range(xa, 128 + i, 128 + j); 1827 __check_store_range(xa, 4095 + i, 4095 + j); 1828 __check_store_range(xa, 4096 + i, 4096 + j); 1829 __check_store_range(xa, 123456 + i, 123456 + j); 1830 __check_store_range(xa, (1 << 24) + i, (1 << 24) + j); 1831 } 1832 } 1833 } 1834 1835 #ifdef CONFIG_XARRAY_MULTI 1836 static void check_split_1(struct xarray *xa, unsigned long index, 1837 unsigned int order, unsigned int new_order) 1838 { 1839 XA_STATE_ORDER(xas, xa, index, new_order); 1840 unsigned int i, found; 1841 void *entry; 1842 1843 xa_store_order(xa, index, order, xa, GFP_KERNEL); 1844 xa_set_mark(xa, index, XA_MARK_1); 1845 1846 xas_split_alloc(&xas, xa, order, GFP_KERNEL); 1847 xas_lock(&xas); 1848 xas_split(&xas, xa, order); 1849 for (i = 0; i < (1 << order); i += (1 << new_order)) 1850 __xa_store(xa, index + i, xa_mk_index(index + i), 0); 1851 xas_unlock(&xas); 1852 1853 for (i = 0; i < (1 << order); i++) { 1854 unsigned int val = index + (i & ~((1 << new_order) - 1)); 1855 XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val)); 1856 } 1857 1858 xa_set_mark(xa, index, XA_MARK_0); 1859 XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); 1860 1861 xas_set_order(&xas, index, 0); 1862 found = 0; 1863 rcu_read_lock(); 1864 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) { 1865 found++; 1866 XA_BUG_ON(xa, xa_is_internal(entry)); 1867 } 1868 rcu_read_unlock(); 1869 XA_BUG_ON(xa, found != 1 << (order - new_order)); 1870 1871 xa_destroy(xa); 1872 } 1873 1874 static void check_split_2(struct xarray *xa, unsigned long index, 1875 unsigned int order, unsigned int new_order) 1876 { 1877 XA_STATE_ORDER(xas, xa, index, new_order); 1878 unsigned int i, found; 1879 void *entry; 1880 1881 xa_store_order(xa, index, order, xa, GFP_KERNEL); 1882 xa_set_mark(xa, index, XA_MARK_1); 1883 1884 /* allocate a node for xas_try_split() */ 1885 xas_set_err(&xas, -ENOMEM); 1886 XA_BUG_ON(xa, !xas_nomem(&xas, GFP_KERNEL)); 1887 1888 xas_lock(&xas); 1889 xas_try_split(&xas, xa, order); 1890 if (((new_order / XA_CHUNK_SHIFT) < (order / XA_CHUNK_SHIFT)) && 1891 new_order < order - 1) { 1892 XA_BUG_ON(xa, !xas_error(&xas) || xas_error(&xas) != -EINVAL); 1893 xas_unlock(&xas); 1894 goto out; 1895 } 1896 for (i = 0; i < (1 << order); i += (1 << new_order)) 1897 __xa_store(xa, index + i, xa_mk_index(index + i), 0); 1898 xas_unlock(&xas); 1899 1900 for (i = 0; i < (1 << order); i++) { 1901 unsigned int val = index + (i & ~((1 << new_order) - 1)); 1902 XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val)); 1903 } 1904 1905 xa_set_mark(xa, index, XA_MARK_0); 1906 XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); 1907 1908 xas_set_order(&xas, index, 0); 1909 found = 0; 1910 rcu_read_lock(); 1911 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) { 1912 found++; 1913 XA_BUG_ON(xa, xa_is_internal(entry)); 1914 } 1915 rcu_read_unlock(); 1916 XA_BUG_ON(xa, found != 1 << (order - new_order)); 1917 out: 1918 xas_destroy(&xas); 1919 xa_destroy(xa); 1920 } 1921 1922 static noinline void check_split(struct xarray *xa) 1923 { 1924 unsigned int order, new_order; 1925 1926 XA_BUG_ON(xa, !xa_empty(xa)); 1927 1928 for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) { 1929 for (new_order = 0; new_order < order; new_order++) { 1930 check_split_1(xa, 0, order, new_order); 1931 check_split_1(xa, 1UL << order, order, new_order); 1932 check_split_1(xa, 3UL << order, order, new_order); 1933 1934 check_split_2(xa, 0, order, new_order); 1935 check_split_2(xa, 1UL << order, order, new_order); 1936 check_split_2(xa, 3UL << order, order, new_order); 1937 } 1938 } 1939 } 1940 #else 1941 static void check_split(struct xarray *xa) { } 1942 #endif 1943 1944 static void check_align_1(struct xarray *xa, char *name) 1945 { 1946 int i; 1947 unsigned int id; 1948 unsigned long index; 1949 void *entry; 1950 1951 for (i = 0; i < 8; i++) { 1952 XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b, 1953 GFP_KERNEL) != 0); 1954 XA_BUG_ON(xa, id != i); 1955 } 1956 xa_for_each(xa, index, entry) 1957 XA_BUG_ON(xa, xa_is_err(entry)); 1958 xa_destroy(xa); 1959 } 1960 1961 /* 1962 * We should always be able to store without allocating memory after 1963 * reserving a slot. 1964 */ 1965 static void check_align_2(struct xarray *xa, char *name) 1966 { 1967 int i; 1968 1969 XA_BUG_ON(xa, !xa_empty(xa)); 1970 1971 for (i = 0; i < 8; i++) { 1972 XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL); 1973 xa_erase(xa, 0); 1974 } 1975 1976 for (i = 0; i < 8; i++) { 1977 XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0); 1978 XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL); 1979 xa_erase(xa, 0); 1980 } 1981 1982 XA_BUG_ON(xa, !xa_empty(xa)); 1983 } 1984 1985 static noinline void check_align(struct xarray *xa) 1986 { 1987 char name[] = "Motorola 68000"; 1988 1989 check_align_1(xa, name); 1990 check_align_1(xa, name + 1); 1991 check_align_1(xa, name + 2); 1992 check_align_1(xa, name + 3); 1993 check_align_2(xa, name); 1994 } 1995 1996 static LIST_HEAD(shadow_nodes); 1997 1998 static void test_update_node(struct xa_node *node) 1999 { 2000 if (node->count && node->count == node->nr_values) { 2001 if (list_empty(&node->private_list)) 2002 list_add(&shadow_nodes, &node->private_list); 2003 } else { 2004 if (!list_empty(&node->private_list)) 2005 list_del_init(&node->private_list); 2006 } 2007 } 2008 2009 static noinline void shadow_remove(struct xarray *xa) 2010 { 2011 struct xa_node *node; 2012 2013 xa_lock(xa); 2014 while ((node = list_first_entry_or_null(&shadow_nodes, 2015 struct xa_node, private_list))) { 2016 XA_BUG_ON(xa, node->array != xa); 2017 list_del_init(&node->private_list); 2018 xa_delete_node(node, test_update_node); 2019 } 2020 xa_unlock(xa); 2021 } 2022 2023 static noinline void check_workingset(struct xarray *xa, unsigned long index) 2024 { 2025 XA_STATE(xas, xa, index); 2026 xas_set_update(&xas, test_update_node); 2027 2028 do { 2029 xas_lock(&xas); 2030 xas_store(&xas, xa_mk_value(0)); 2031 xas_next(&xas); 2032 xas_store(&xas, xa_mk_value(1)); 2033 xas_unlock(&xas); 2034 } while (xas_nomem(&xas, GFP_KERNEL)); 2035 2036 XA_BUG_ON(xa, list_empty(&shadow_nodes)); 2037 2038 xas_lock(&xas); 2039 xas_next(&xas); 2040 xas_store(&xas, &xas); 2041 XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 2042 2043 xas_store(&xas, xa_mk_value(2)); 2044 xas_unlock(&xas); 2045 XA_BUG_ON(xa, list_empty(&shadow_nodes)); 2046 2047 shadow_remove(xa); 2048 XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 2049 XA_BUG_ON(xa, !xa_empty(xa)); 2050 } 2051 2052 /* 2053 * Check that the pointer / value / sibling entries are accounted the 2054 * way we expect them to be. 2055 */ 2056 static noinline void check_account(struct xarray *xa) 2057 { 2058 #ifdef CONFIG_XARRAY_MULTI 2059 unsigned int order; 2060 2061 for (order = 1; order < 12; order++) { 2062 XA_STATE(xas, xa, 1 << order); 2063 2064 xa_store_order(xa, 0, order, xa, GFP_KERNEL); 2065 rcu_read_lock(); 2066 xas_load(&xas); 2067 XA_BUG_ON(xa, xas.xa_node->count == 0); 2068 XA_BUG_ON(xa, xas.xa_node->count > (1 << order)); 2069 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 2070 rcu_read_unlock(); 2071 2072 xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order), 2073 GFP_KERNEL); 2074 XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); 2075 2076 xa_erase(xa, 1 << order); 2077 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 2078 2079 xa_erase(xa, 0); 2080 XA_BUG_ON(xa, !xa_empty(xa)); 2081 } 2082 #endif 2083 } 2084 2085 static noinline void check_get_order(struct xarray *xa) 2086 { 2087 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 2088 unsigned int order; 2089 unsigned long i, j; 2090 2091 for (i = 0; i < 3; i++) 2092 XA_BUG_ON(xa, xa_get_order(xa, i) != 0); 2093 2094 for (order = 0; order < max_order; order++) { 2095 for (i = 0; i < 10; i++) { 2096 xa_store_order(xa, i << order, order, 2097 xa_mk_index(i << order), GFP_KERNEL); 2098 for (j = i << order; j < (i + 1) << order; j++) 2099 XA_BUG_ON(xa, xa_get_order(xa, j) != order); 2100 xa_erase(xa, i << order); 2101 } 2102 } 2103 } 2104 2105 static noinline void check_xas_get_order(struct xarray *xa) 2106 { 2107 XA_STATE(xas, xa, 0); 2108 2109 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 2110 unsigned int order; 2111 unsigned long i, j; 2112 2113 for (order = 0; order < max_order; order++) { 2114 for (i = 0; i < 10; i++) { 2115 xas_set_order(&xas, i << order, order); 2116 do { 2117 xas_lock(&xas); 2118 xas_store(&xas, xa_mk_value(i)); 2119 xas_unlock(&xas); 2120 } while (xas_nomem(&xas, GFP_KERNEL)); 2121 2122 for (j = i << order; j < (i + 1) << order; j++) { 2123 xas_set_order(&xas, j, 0); 2124 rcu_read_lock(); 2125 xas_load(&xas); 2126 XA_BUG_ON(xa, xas_get_order(&xas) != order); 2127 rcu_read_unlock(); 2128 } 2129 2130 xas_lock(&xas); 2131 xas_set_order(&xas, i << order, order); 2132 xas_store(&xas, NULL); 2133 xas_unlock(&xas); 2134 } 2135 } 2136 } 2137 2138 static noinline void check_xas_conflict_get_order(struct xarray *xa) 2139 { 2140 XA_STATE(xas, xa, 0); 2141 2142 void *entry; 2143 int only_once; 2144 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 2145 unsigned int order; 2146 unsigned long i, j, k; 2147 2148 for (order = 0; order < max_order; order++) { 2149 for (i = 0; i < 10; i++) { 2150 xas_set_order(&xas, i << order, order); 2151 do { 2152 xas_lock(&xas); 2153 xas_store(&xas, xa_mk_value(i)); 2154 xas_unlock(&xas); 2155 } while (xas_nomem(&xas, GFP_KERNEL)); 2156 2157 /* 2158 * Ensure xas_get_order works with xas_for_each_conflict. 2159 */ 2160 j = i << order; 2161 for (k = 0; k < order; k++) { 2162 only_once = 0; 2163 xas_set_order(&xas, j + (1 << k), k); 2164 xas_lock(&xas); 2165 xas_for_each_conflict(&xas, entry) { 2166 XA_BUG_ON(xa, entry != xa_mk_value(i)); 2167 XA_BUG_ON(xa, xas_get_order(&xas) != order); 2168 only_once++; 2169 } 2170 XA_BUG_ON(xa, only_once != 1); 2171 xas_unlock(&xas); 2172 } 2173 2174 if (order < max_order - 1) { 2175 only_once = 0; 2176 xas_set_order(&xas, (i & ~1UL) << order, order + 1); 2177 xas_lock(&xas); 2178 xas_for_each_conflict(&xas, entry) { 2179 XA_BUG_ON(xa, entry != xa_mk_value(i)); 2180 XA_BUG_ON(xa, xas_get_order(&xas) != order); 2181 only_once++; 2182 } 2183 XA_BUG_ON(xa, only_once != 1); 2184 xas_unlock(&xas); 2185 } 2186 2187 xas_set_order(&xas, i << order, order); 2188 xas_lock(&xas); 2189 xas_store(&xas, NULL); 2190 xas_unlock(&xas); 2191 } 2192 } 2193 } 2194 2195 2196 static noinline void check_destroy(struct xarray *xa) 2197 { 2198 unsigned long index; 2199 2200 XA_BUG_ON(xa, !xa_empty(xa)); 2201 2202 /* Destroying an empty array is a no-op */ 2203 xa_destroy(xa); 2204 XA_BUG_ON(xa, !xa_empty(xa)); 2205 2206 /* Destroying an array with a single entry */ 2207 for (index = 0; index < 1000; index++) { 2208 xa_store_index(xa, index, GFP_KERNEL); 2209 XA_BUG_ON(xa, xa_empty(xa)); 2210 xa_destroy(xa); 2211 XA_BUG_ON(xa, !xa_empty(xa)); 2212 } 2213 2214 /* Destroying an array with a single entry at ULONG_MAX */ 2215 xa_store(xa, ULONG_MAX, xa, GFP_KERNEL); 2216 XA_BUG_ON(xa, xa_empty(xa)); 2217 xa_destroy(xa); 2218 XA_BUG_ON(xa, !xa_empty(xa)); 2219 2220 #ifdef CONFIG_XARRAY_MULTI 2221 /* Destroying an array with a multi-index entry */ 2222 xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL); 2223 XA_BUG_ON(xa, xa_empty(xa)); 2224 xa_destroy(xa); 2225 XA_BUG_ON(xa, !xa_empty(xa)); 2226 #endif 2227 } 2228 2229 static DEFINE_XARRAY(array); 2230 2231 static int xarray_checks(void) 2232 { 2233 check_xa_err(&array); 2234 check_xas_retry(&array); 2235 check_xa_load(&array); 2236 check_xa_mark(&array); 2237 check_xa_shrink(&array); 2238 check_xas_erase(&array); 2239 check_insert(&array); 2240 check_cmpxchg(&array); 2241 check_cmpxchg_order(&array); 2242 check_reserve(&array); 2243 check_reserve(&xa0); 2244 check_multi_store(&array); 2245 check_multi_store_advanced(&array); 2246 check_get_order(&array); 2247 check_xas_get_order(&array); 2248 check_xas_conflict_get_order(&array); 2249 check_xa_alloc(); 2250 check_find(&array); 2251 check_find_entry(&array); 2252 check_pause(&array); 2253 check_account(&array); 2254 check_destroy(&array); 2255 check_move(&array); 2256 check_create_range(&array); 2257 check_store_range(&array); 2258 check_store_iter(&array); 2259 check_align(&xa0); 2260 check_split(&array); 2261 2262 check_workingset(&array, 0); 2263 check_workingset(&array, 64); 2264 check_workingset(&array, 4096); 2265 2266 printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); 2267 return (tests_run == tests_passed) ? 0 : -EINVAL; 2268 } 2269 2270 static void xarray_exit(void) 2271 { 2272 } 2273 2274 module_init(xarray_checks); 2275 module_exit(xarray_exit); 2276 MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>"); 2277 MODULE_DESCRIPTION("XArray API test module"); 2278 MODULE_LICENSE("GPL"); 2279