1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * test_xarray.c: Test the XArray API 4 * Copyright (c) 2017-2018 Microsoft Corporation 5 * Copyright (c) 2019-2020 Oracle 6 * Author: Matthew Wilcox <willy@infradead.org> 7 */ 8 9 #include <kunit/test.h> 10 11 #include <linux/module.h> 12 #include <linux/xarray.h> 13 14 static const unsigned int order_limit = 15 IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1; 16 17 #ifndef XA_DEBUG 18 # ifdef __KERNEL__ 19 void xa_dump(const struct xarray *xa) { } 20 # endif 21 #undef XA_BUG_ON 22 #define XA_BUG_ON(xa, x) do { \ 23 if (x) { \ 24 KUNIT_FAIL(test, #x); \ 25 xa_dump(xa); \ 26 dump_stack(); \ 27 } \ 28 } while (0) 29 #endif 30 31 static void *xa_mk_index(unsigned long index) 32 { 33 return xa_mk_value(index & LONG_MAX); 34 } 35 36 static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) 37 { 38 return xa_store(xa, index, xa_mk_index(index), gfp); 39 } 40 41 static void xa_insert_index(struct kunit *test, struct xarray *xa, unsigned long index) 42 { 43 XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index), 44 GFP_KERNEL) != 0); 45 } 46 47 static void xa_alloc_index(struct kunit *test, struct xarray *xa, unsigned long index, gfp_t gfp) 48 { 49 u32 id; 50 51 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b, 52 gfp) != 0); 53 XA_BUG_ON(xa, id != index); 54 } 55 56 static void xa_erase_index(struct kunit *test, struct xarray *xa, unsigned long index) 57 { 58 XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); 59 XA_BUG_ON(xa, xa_load(xa, index) != NULL); 60 } 61 62 /* 63 * If anyone needs this, please move it to xarray.c. We have no current 64 * users outside the test suite because all current multislot users want 65 * to use the advanced API. 66 */ 67 static void *xa_store_order(struct xarray *xa, unsigned long index, 68 unsigned order, void *entry, gfp_t gfp) 69 { 70 XA_STATE_ORDER(xas, xa, index, order); 71 void *curr; 72 73 do { 74 xas_lock(&xas); 75 curr = xas_store(&xas, entry); 76 xas_unlock(&xas); 77 } while (xas_nomem(&xas, gfp)); 78 79 return curr; 80 } 81 82 static inline struct xarray *xa_param(struct kunit *test) 83 { 84 return *(struct xarray **)test->param_value; 85 } 86 87 static noinline void check_xa_err(struct kunit *test) 88 { 89 struct xarray *xa = xa_param(test); 90 91 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0); 92 XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0); 93 #ifndef __KERNEL__ 94 /* The kernel does not fail GFP_NOWAIT allocations */ 95 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 96 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 97 #endif 98 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0); 99 XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0); 100 XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0); 101 // kills the test-suite :-( 102 // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); 103 } 104 105 static noinline void check_xas_retry(struct kunit *test) 106 { 107 struct xarray *xa = xa_param(test); 108 109 XA_STATE(xas, xa, 0); 110 void *entry; 111 112 xa_store_index(xa, 0, GFP_KERNEL); 113 xa_store_index(xa, 1, GFP_KERNEL); 114 115 rcu_read_lock(); 116 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); 117 xa_erase_index(test, xa, 1); 118 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); 119 XA_BUG_ON(xa, xas_retry(&xas, NULL)); 120 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); 121 xas_reset(&xas); 122 XA_BUG_ON(xa, xas.xa_node != XAS_RESTART); 123 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 124 XA_BUG_ON(xa, xas.xa_node != NULL); 125 rcu_read_unlock(); 126 127 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 128 129 rcu_read_lock(); 130 XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas))); 131 xas.xa_node = XAS_RESTART; 132 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 133 rcu_read_unlock(); 134 135 /* Make sure we can iterate through retry entries */ 136 xas_lock(&xas); 137 xas_set(&xas, 0); 138 xas_store(&xas, XA_RETRY_ENTRY); 139 xas_set(&xas, 1); 140 xas_store(&xas, XA_RETRY_ENTRY); 141 142 xas_set(&xas, 0); 143 xas_for_each(&xas, entry, ULONG_MAX) { 144 xas_store(&xas, xa_mk_index(xas.xa_index)); 145 } 146 xas_unlock(&xas); 147 148 xa_erase_index(test, xa, 0); 149 xa_erase_index(test, xa, 1); 150 } 151 152 static noinline void check_xa_load(struct kunit *test) 153 { 154 struct xarray *xa = xa_param(test); 155 156 unsigned long i, j; 157 158 for (i = 0; i < 1024; i++) { 159 for (j = 0; j < 1024; j++) { 160 void *entry = xa_load(xa, j); 161 if (j < i) 162 XA_BUG_ON(xa, xa_to_value(entry) != j); 163 else 164 XA_BUG_ON(xa, entry); 165 } 166 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 167 } 168 169 for (i = 0; i < 1024; i++) { 170 for (j = 0; j < 1024; j++) { 171 void *entry = xa_load(xa, j); 172 if (j >= i) 173 XA_BUG_ON(xa, xa_to_value(entry) != j); 174 else 175 XA_BUG_ON(xa, entry); 176 } 177 xa_erase_index(test, xa, i); 178 } 179 XA_BUG_ON(xa, !xa_empty(xa)); 180 } 181 182 static noinline void check_xa_mark_1(struct kunit *test, unsigned long index) 183 { 184 struct xarray *xa = xa_param(test); 185 186 unsigned int order; 187 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1; 188 189 /* NULL elements have no marks set */ 190 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 191 xa_set_mark(xa, index, XA_MARK_0); 192 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 193 194 /* Storing a pointer will not make a mark appear */ 195 XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL); 196 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 197 xa_set_mark(xa, index, XA_MARK_0); 198 XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); 199 200 /* Setting one mark will not set another mark */ 201 XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0)); 202 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1)); 203 204 /* Storing NULL clears marks, and they can't be set again */ 205 xa_erase_index(test, xa, index); 206 XA_BUG_ON(xa, !xa_empty(xa)); 207 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 208 xa_set_mark(xa, index, XA_MARK_0); 209 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 210 211 /* 212 * Storing a multi-index entry over entries with marks gives the 213 * entire entry the union of the marks 214 */ 215 BUG_ON((index % 4) != 0); 216 for (order = 2; order < max_order; order++) { 217 unsigned long base = round_down(index, 1UL << order); 218 unsigned long next = base + (1UL << order); 219 unsigned long i; 220 221 XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL)); 222 xa_set_mark(xa, index + 1, XA_MARK_0); 223 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); 224 xa_set_mark(xa, index + 2, XA_MARK_2); 225 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); 226 xa_store_order(xa, index, order, xa_mk_index(index), 227 GFP_KERNEL); 228 for (i = base; i < next; i++) { 229 XA_STATE(xas, xa, i); 230 unsigned int seen = 0; 231 void *entry; 232 233 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 234 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1)); 235 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2)); 236 237 /* We should see two elements in the array */ 238 rcu_read_lock(); 239 xas_for_each(&xas, entry, ULONG_MAX) 240 seen++; 241 rcu_read_unlock(); 242 XA_BUG_ON(xa, seen != 2); 243 244 /* One of which is marked */ 245 xas_set(&xas, 0); 246 seen = 0; 247 rcu_read_lock(); 248 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 249 seen++; 250 rcu_read_unlock(); 251 XA_BUG_ON(xa, seen != 1); 252 } 253 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); 254 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); 255 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); 256 xa_erase_index(test, xa, index); 257 xa_erase_index(test, xa, next); 258 XA_BUG_ON(xa, !xa_empty(xa)); 259 } 260 XA_BUG_ON(xa, !xa_empty(xa)); 261 } 262 263 static noinline void check_xa_mark_2(struct kunit *test) 264 { 265 struct xarray *xa = xa_param(test); 266 267 XA_STATE(xas, xa, 0); 268 unsigned long index; 269 unsigned int count = 0; 270 void *entry; 271 272 xa_store_index(xa, 0, GFP_KERNEL); 273 xa_set_mark(xa, 0, XA_MARK_0); 274 xas_lock(&xas); 275 xas_load(&xas); 276 xas_init_marks(&xas); 277 xas_unlock(&xas); 278 XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0); 279 280 for (index = 3500; index < 4500; index++) { 281 xa_store_index(xa, index, GFP_KERNEL); 282 xa_set_mark(xa, index, XA_MARK_0); 283 } 284 285 xas_reset(&xas); 286 rcu_read_lock(); 287 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 288 count++; 289 rcu_read_unlock(); 290 XA_BUG_ON(xa, count != 1000); 291 292 xas_lock(&xas); 293 xas_for_each(&xas, entry, ULONG_MAX) { 294 xas_init_marks(&xas); 295 XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0)); 296 XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0)); 297 } 298 xas_unlock(&xas); 299 300 xa_destroy(xa); 301 } 302 303 static noinline void check_xa_mark_3(struct kunit *test) 304 { 305 #ifdef CONFIG_XARRAY_MULTI 306 struct xarray *xa = xa_param(test); 307 308 XA_STATE(xas, xa, 0x41); 309 void *entry; 310 int count = 0; 311 312 xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL); 313 xa_set_mark(xa, 0x41, XA_MARK_0); 314 315 rcu_read_lock(); 316 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) { 317 count++; 318 XA_BUG_ON(xa, entry != xa_mk_index(0x40)); 319 } 320 XA_BUG_ON(xa, count != 1); 321 rcu_read_unlock(); 322 xa_destroy(xa); 323 #endif 324 } 325 326 static noinline void check_xa_mark(struct kunit *test) 327 { 328 unsigned long index; 329 330 for (index = 0; index < 16384; index += 4) 331 check_xa_mark_1(test, index); 332 333 check_xa_mark_2(test); 334 check_xa_mark_3(test); 335 } 336 337 static noinline void check_xa_shrink(struct kunit *test) 338 { 339 struct xarray *xa = xa_param(test); 340 341 XA_STATE(xas, xa, 1); 342 struct xa_node *node; 343 unsigned int order; 344 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1; 345 346 XA_BUG_ON(xa, !xa_empty(xa)); 347 XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL); 348 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 349 350 /* 351 * Check that erasing the entry at 1 shrinks the tree and properly 352 * marks the node as being deleted. 353 */ 354 xas_lock(&xas); 355 XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1)); 356 node = xas.xa_node; 357 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0)); 358 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 359 XA_BUG_ON(xa, xa_load(xa, 1) != NULL); 360 XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS); 361 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY); 362 XA_BUG_ON(xa, xas_load(&xas) != NULL); 363 xas_unlock(&xas); 364 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 365 xa_erase_index(test, xa, 0); 366 XA_BUG_ON(xa, !xa_empty(xa)); 367 368 for (order = 0; order < max_order; order++) { 369 unsigned long max = (1UL << order) - 1; 370 xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL); 371 XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0)); 372 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 373 rcu_read_lock(); 374 node = xa_head(xa); 375 rcu_read_unlock(); 376 XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) != 377 NULL); 378 rcu_read_lock(); 379 XA_BUG_ON(xa, xa_head(xa) == node); 380 rcu_read_unlock(); 381 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 382 xa_erase_index(test, xa, ULONG_MAX); 383 XA_BUG_ON(xa, xa->xa_head != node); 384 xa_erase_index(test, xa, 0); 385 } 386 } 387 388 static noinline void check_insert(struct kunit *test) 389 { 390 struct xarray *xa = xa_param(test); 391 392 unsigned long i; 393 394 for (i = 0; i < 1024; i++) { 395 xa_insert_index(test, xa, i); 396 XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL); 397 XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL); 398 xa_erase_index(test, xa, i); 399 } 400 401 for (i = 10; i < BITS_PER_LONG; i++) { 402 xa_insert_index(test, xa, 1UL << i); 403 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL); 404 XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL); 405 xa_erase_index(test, xa, 1UL << i); 406 407 xa_insert_index(test, xa, (1UL << i) - 1); 408 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL); 409 XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL); 410 xa_erase_index(test, xa, (1UL << i) - 1); 411 } 412 413 xa_insert_index(test, xa, ~0UL); 414 XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL); 415 XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL); 416 xa_erase_index(test, xa, ~0UL); 417 418 XA_BUG_ON(xa, !xa_empty(xa)); 419 } 420 421 static noinline void check_cmpxchg(struct kunit *test) 422 { 423 struct xarray *xa = xa_param(test); 424 425 void *FIVE = xa_mk_value(5); 426 void *SIX = xa_mk_value(6); 427 void *LOTS = xa_mk_value(12345678); 428 429 XA_BUG_ON(xa, !xa_empty(xa)); 430 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL); 431 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY); 432 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS); 433 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS); 434 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE); 435 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL); 436 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL); 437 XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY); 438 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE); 439 XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY); 440 xa_erase_index(test, xa, 12345678); 441 xa_erase_index(test, xa, 5); 442 XA_BUG_ON(xa, !xa_empty(xa)); 443 } 444 445 static noinline void check_cmpxchg_order(struct kunit *test) 446 { 447 #ifdef CONFIG_XARRAY_MULTI 448 struct xarray *xa = xa_param(test); 449 450 void *FIVE = xa_mk_value(5); 451 unsigned int i, order = 3; 452 453 XA_BUG_ON(xa, xa_store_order(xa, 0, order, FIVE, GFP_KERNEL)); 454 455 /* Check entry FIVE has the order saved */ 456 XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != order); 457 458 /* Check all the tied indexes have the same entry and order */ 459 for (i = 0; i < (1 << order); i++) { 460 XA_BUG_ON(xa, xa_load(xa, i) != FIVE); 461 XA_BUG_ON(xa, xa_get_order(xa, i) != order); 462 } 463 464 /* Ensure that nothing is stored at index '1 << order' */ 465 XA_BUG_ON(xa, xa_load(xa, 1 << order) != NULL); 466 467 /* 468 * Additionally, keep the node information and the order at 469 * '1 << order' 470 */ 471 XA_BUG_ON(xa, xa_store_order(xa, 1 << order, order, FIVE, GFP_KERNEL)); 472 for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) { 473 XA_BUG_ON(xa, xa_load(xa, i) != FIVE); 474 XA_BUG_ON(xa, xa_get_order(xa, i) != order); 475 } 476 477 /* Conditionally replace FIVE entry at index '0' with NULL */ 478 XA_BUG_ON(xa, xa_cmpxchg(xa, 0, FIVE, NULL, GFP_KERNEL) != FIVE); 479 480 /* Verify the order is lost at FIVE (and old) entries */ 481 XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != 0); 482 483 /* Verify the order and entries are lost in all the tied indexes */ 484 for (i = 0; i < (1 << order); i++) { 485 XA_BUG_ON(xa, xa_load(xa, i) != NULL); 486 XA_BUG_ON(xa, xa_get_order(xa, i) != 0); 487 } 488 489 /* Verify node and order are kept at '1 << order' */ 490 for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) { 491 XA_BUG_ON(xa, xa_load(xa, i) != FIVE); 492 XA_BUG_ON(xa, xa_get_order(xa, i) != order); 493 } 494 495 xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); 496 XA_BUG_ON(xa, !xa_empty(xa)); 497 #endif 498 } 499 500 static noinline void check_reserve(struct kunit *test) 501 { 502 struct xarray *xa = xa_param(test); 503 504 void *entry; 505 unsigned long index; 506 int count; 507 508 /* An array with a reserved entry is not empty */ 509 XA_BUG_ON(xa, !xa_empty(xa)); 510 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 511 XA_BUG_ON(xa, xa_empty(xa)); 512 XA_BUG_ON(xa, xa_load(xa, 12345678)); 513 xa_release(xa, 12345678); 514 XA_BUG_ON(xa, !xa_empty(xa)); 515 516 /* Releasing a used entry does nothing */ 517 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 518 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); 519 xa_release(xa, 12345678); 520 xa_erase_index(test, xa, 12345678); 521 XA_BUG_ON(xa, !xa_empty(xa)); 522 523 /* cmpxchg sees a reserved entry as ZERO */ 524 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 525 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY, 526 xa_mk_value(12345678), GFP_NOWAIT) != NULL); 527 xa_release(xa, 12345678); 528 xa_erase_index(test, xa, 12345678); 529 XA_BUG_ON(xa, !xa_empty(xa)); 530 531 /* xa_insert treats it as busy */ 532 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 533 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 534 -EBUSY); 535 XA_BUG_ON(xa, xa_empty(xa)); 536 XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL); 537 XA_BUG_ON(xa, !xa_empty(xa)); 538 539 /* Can iterate through a reserved entry */ 540 xa_store_index(xa, 5, GFP_KERNEL); 541 XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0); 542 xa_store_index(xa, 7, GFP_KERNEL); 543 544 count = 0; 545 xa_for_each(xa, index, entry) { 546 XA_BUG_ON(xa, index != 5 && index != 7); 547 count++; 548 } 549 XA_BUG_ON(xa, count != 2); 550 551 /* If we free a reserved entry, we should be able to allocate it */ 552 if (xa->xa_flags & XA_FLAGS_ALLOC) { 553 u32 id; 554 555 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8), 556 XA_LIMIT(5, 10), GFP_KERNEL) != 0); 557 XA_BUG_ON(xa, id != 8); 558 559 xa_release(xa, 6); 560 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6), 561 XA_LIMIT(5, 10), GFP_KERNEL) != 0); 562 XA_BUG_ON(xa, id != 6); 563 } 564 565 xa_destroy(xa); 566 } 567 568 static noinline void check_xas_erase(struct kunit *test) 569 { 570 struct xarray *xa = xa_param(test); 571 572 XA_STATE(xas, xa, 0); 573 void *entry; 574 unsigned long i, j; 575 576 for (i = 0; i < 200; i++) { 577 for (j = i; j < 2 * i + 17; j++) { 578 xas_set(&xas, j); 579 do { 580 xas_lock(&xas); 581 xas_store(&xas, xa_mk_index(j)); 582 xas_unlock(&xas); 583 } while (xas_nomem(&xas, GFP_KERNEL)); 584 } 585 586 xas_set(&xas, ULONG_MAX); 587 do { 588 xas_lock(&xas); 589 xas_store(&xas, xa_mk_value(0)); 590 xas_unlock(&xas); 591 } while (xas_nomem(&xas, GFP_KERNEL)); 592 593 xas_lock(&xas); 594 xas_store(&xas, NULL); 595 596 xas_set(&xas, 0); 597 j = i; 598 xas_for_each(&xas, entry, ULONG_MAX) { 599 XA_BUG_ON(xa, entry != xa_mk_index(j)); 600 xas_store(&xas, NULL); 601 j++; 602 } 603 xas_unlock(&xas); 604 XA_BUG_ON(xa, !xa_empty(xa)); 605 } 606 } 607 608 #ifdef CONFIG_XARRAY_MULTI 609 static noinline void check_multi_store_1(struct kunit *test, unsigned long index, 610 unsigned int order) 611 { 612 struct xarray *xa = xa_param(test); 613 614 XA_STATE(xas, xa, index); 615 unsigned long min = index & ~((1UL << order) - 1); 616 unsigned long max = min + (1UL << order); 617 618 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 619 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index)); 620 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index)); 621 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 622 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 623 624 xas_lock(&xas); 625 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index)); 626 xas_unlock(&xas); 627 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min)); 628 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min)); 629 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 630 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 631 632 xa_erase_index(test, xa, min); 633 XA_BUG_ON(xa, !xa_empty(xa)); 634 } 635 636 static noinline void check_multi_store_2(struct kunit *test, unsigned long index, 637 unsigned int order) 638 { 639 struct xarray *xa = xa_param(test); 640 641 XA_STATE(xas, xa, index); 642 xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); 643 644 xas_lock(&xas); 645 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0)); 646 XA_BUG_ON(xa, xas.xa_index != index); 647 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 648 xas_unlock(&xas); 649 XA_BUG_ON(xa, !xa_empty(xa)); 650 } 651 652 static noinline void check_multi_store_3(struct kunit *test, unsigned long index, 653 unsigned int order) 654 { 655 struct xarray *xa = xa_param(test); 656 657 XA_STATE(xas, xa, 0); 658 void *entry; 659 int n = 0; 660 661 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 662 663 xas_lock(&xas); 664 xas_for_each(&xas, entry, ULONG_MAX) { 665 XA_BUG_ON(xa, entry != xa_mk_index(index)); 666 n++; 667 } 668 XA_BUG_ON(xa, n != 1); 669 xas_set(&xas, index + 1); 670 xas_for_each(&xas, entry, ULONG_MAX) { 671 XA_BUG_ON(xa, entry != xa_mk_index(index)); 672 n++; 673 } 674 XA_BUG_ON(xa, n != 2); 675 xas_unlock(&xas); 676 677 xa_destroy(xa); 678 } 679 #endif 680 681 static noinline void check_multi_store(struct kunit *test) 682 { 683 #ifdef CONFIG_XARRAY_MULTI 684 struct xarray *xa = xa_param(test); 685 686 unsigned long i, j, k; 687 unsigned int max_order = (sizeof(long) == 4) ? 30 : 60; 688 689 /* Loading from any position returns the same value */ 690 xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL); 691 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 692 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 693 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 694 rcu_read_lock(); 695 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2); 696 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 697 rcu_read_unlock(); 698 699 /* Storing adjacent to the value does not alter the value */ 700 xa_store(xa, 3, xa, GFP_KERNEL); 701 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 702 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 703 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 704 rcu_read_lock(); 705 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3); 706 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 707 rcu_read_unlock(); 708 709 /* Overwriting multiple indexes works */ 710 xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL); 711 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1)); 712 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1)); 713 XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1)); 714 XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1)); 715 XA_BUG_ON(xa, xa_load(xa, 4) != NULL); 716 rcu_read_lock(); 717 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4); 718 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4); 719 rcu_read_unlock(); 720 721 /* We can erase multiple values with a single store */ 722 xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); 723 XA_BUG_ON(xa, !xa_empty(xa)); 724 725 /* Even when the first slot is empty but the others aren't */ 726 xa_store_index(xa, 1, GFP_KERNEL); 727 xa_store_index(xa, 2, GFP_KERNEL); 728 xa_store_order(xa, 0, 2, NULL, GFP_KERNEL); 729 XA_BUG_ON(xa, !xa_empty(xa)); 730 731 for (i = 0; i < max_order; i++) { 732 for (j = 0; j < max_order; j++) { 733 xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL); 734 xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL); 735 736 for (k = 0; k < max_order; k++) { 737 void *entry = xa_load(xa, (1UL << k) - 1); 738 if ((i < k) && (j < k)) 739 XA_BUG_ON(xa, entry != NULL); 740 else 741 XA_BUG_ON(xa, entry != xa_mk_index(j)); 742 } 743 744 xa_erase(xa, 0); 745 XA_BUG_ON(xa, !xa_empty(xa)); 746 } 747 } 748 749 for (i = 0; i < 20; i++) { 750 check_multi_store_1(test, 200, i); 751 check_multi_store_1(test, 0, i); 752 check_multi_store_1(test, (1UL << i) + 1, i); 753 } 754 check_multi_store_2(test, 4095, 9); 755 756 for (i = 1; i < 20; i++) { 757 check_multi_store_3(test, 0, i); 758 check_multi_store_3(test, 1UL << i, i); 759 } 760 #endif 761 } 762 763 #ifdef CONFIG_XARRAY_MULTI 764 /* mimics page cache __filemap_add_folio() */ 765 static noinline void check_xa_multi_store_adv_add(struct kunit *test, 766 unsigned long index, 767 unsigned int order, 768 void *p) 769 { 770 struct xarray *xa = xa_param(test); 771 772 XA_STATE(xas, xa, index); 773 unsigned int nrpages = 1UL << order; 774 775 /* users are responsible for index alignemnt to the order when adding */ 776 XA_BUG_ON(xa, index & (nrpages - 1)); 777 778 xas_set_order(&xas, index, order); 779 780 do { 781 xas_lock_irq(&xas); 782 xas_store(&xas, p); 783 xas_unlock_irq(&xas); 784 /* 785 * In our selftest case the only failure we can expect is for 786 * there not to be enough memory as we're not mimicking the 787 * entire page cache, so verify that's the only error we can run 788 * into here. The xas_nomem() which follows will ensure to fix 789 * that condition for us so to chug on on the loop. 790 */ 791 XA_BUG_ON(xa, xas_error(&xas) && xas_error(&xas) != -ENOMEM); 792 } while (xas_nomem(&xas, GFP_KERNEL)); 793 794 XA_BUG_ON(xa, xas_error(&xas)); 795 XA_BUG_ON(xa, xa_load(xa, index) != p); 796 } 797 798 /* mimics page_cache_delete() */ 799 static noinline void check_xa_multi_store_adv_del_entry(struct kunit *test, 800 unsigned long index, 801 unsigned int order) 802 { 803 struct xarray *xa = xa_param(test); 804 805 XA_STATE(xas, xa, index); 806 807 xas_set_order(&xas, index, order); 808 xas_store(&xas, NULL); 809 xas_init_marks(&xas); 810 } 811 812 static noinline void check_xa_multi_store_adv_delete(struct kunit *test, 813 unsigned long index, 814 unsigned int order) 815 { 816 struct xarray *xa = xa_param(test); 817 818 xa_lock_irq(xa); 819 check_xa_multi_store_adv_del_entry(test, index, order); 820 xa_unlock_irq(xa); 821 } 822 823 /* mimics page cache filemap_get_entry() */ 824 static noinline void *test_get_entry(struct xarray *xa, unsigned long index) 825 { 826 XA_STATE(xas, xa, index); 827 void *p; 828 static unsigned int loops = 0; 829 830 rcu_read_lock(); 831 repeat: 832 xas_reset(&xas); 833 p = xas_load(&xas); 834 if (xas_retry(&xas, p)) 835 goto repeat; 836 rcu_read_unlock(); 837 838 /* 839 * This is not part of the page cache, this selftest is pretty 840 * aggressive and does not want to trust the xarray API but rather 841 * test it, and for order 20 (4 GiB block size) we can loop over 842 * over a million entries which can cause a soft lockup. Page cache 843 * APIs won't be stupid, proper page cache APIs loop over the proper 844 * order so when using a larger order we skip shared entries. 845 */ 846 if (++loops % XA_CHECK_SCHED == 0) 847 schedule(); 848 849 return p; 850 } 851 852 static unsigned long some_val = 0xdeadbeef; 853 static unsigned long some_val_2 = 0xdeaddead; 854 855 /* mimics the page cache usage */ 856 static noinline void check_xa_multi_store_adv(struct kunit *test, 857 unsigned long pos, 858 unsigned int order) 859 { 860 struct xarray *xa = xa_param(test); 861 862 unsigned int nrpages = 1UL << order; 863 unsigned long index, base, next_index, next_next_index; 864 unsigned int i; 865 866 index = pos >> PAGE_SHIFT; 867 base = round_down(index, nrpages); 868 next_index = round_down(base + nrpages, nrpages); 869 next_next_index = round_down(next_index + nrpages, nrpages); 870 871 check_xa_multi_store_adv_add(test, base, order, &some_val); 872 873 for (i = 0; i < nrpages; i++) 874 XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val); 875 876 XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL); 877 878 /* Use order 0 for the next item */ 879 check_xa_multi_store_adv_add(test, next_index, 0, &some_val_2); 880 XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2); 881 882 /* Remove the next item */ 883 check_xa_multi_store_adv_delete(test, next_index, 0); 884 885 /* Now use order for a new pointer */ 886 check_xa_multi_store_adv_add(test, next_index, order, &some_val_2); 887 888 for (i = 0; i < nrpages; i++) 889 XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2); 890 891 check_xa_multi_store_adv_delete(test, next_index, order); 892 check_xa_multi_store_adv_delete(test, base, order); 893 XA_BUG_ON(xa, !xa_empty(xa)); 894 895 /* starting fresh again */ 896 897 /* let's test some holes now */ 898 899 /* hole at base and next_next */ 900 check_xa_multi_store_adv_add(test, next_index, order, &some_val_2); 901 902 for (i = 0; i < nrpages; i++) 903 XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); 904 905 for (i = 0; i < nrpages; i++) 906 XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2); 907 908 for (i = 0; i < nrpages; i++) 909 XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL); 910 911 check_xa_multi_store_adv_delete(test, next_index, order); 912 XA_BUG_ON(xa, !xa_empty(xa)); 913 914 /* hole at base and next */ 915 916 check_xa_multi_store_adv_add(test, next_next_index, order, &some_val_2); 917 918 for (i = 0; i < nrpages; i++) 919 XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); 920 921 for (i = 0; i < nrpages; i++) 922 XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != NULL); 923 924 for (i = 0; i < nrpages; i++) 925 XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2); 926 927 check_xa_multi_store_adv_delete(test, next_next_index, order); 928 XA_BUG_ON(xa, !xa_empty(xa)); 929 } 930 #endif 931 932 static noinline void check_multi_store_advanced(struct kunit *test) 933 { 934 #ifdef CONFIG_XARRAY_MULTI 935 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 936 unsigned long end = ULONG_MAX/2; 937 unsigned long pos, i; 938 939 /* 940 * About 117 million tests below. 941 */ 942 for (pos = 7; pos < end; pos = (pos * pos) + 564) { 943 for (i = 0; i < max_order; i++) { 944 check_xa_multi_store_adv(test, pos, i); 945 check_xa_multi_store_adv(test, pos + 157, i); 946 } 947 } 948 #endif 949 } 950 951 static noinline void check_xa_alloc_1(struct kunit *test, struct xarray *xa, unsigned int base) 952 { 953 int i; 954 u32 id; 955 956 XA_BUG_ON(xa, !xa_empty(xa)); 957 /* An empty array should assign %base to the first alloc */ 958 xa_alloc_index(test, xa, base, GFP_KERNEL); 959 960 /* Erasing it should make the array empty again */ 961 xa_erase_index(test, xa, base); 962 XA_BUG_ON(xa, !xa_empty(xa)); 963 964 /* And it should assign %base again */ 965 xa_alloc_index(test, xa, base, GFP_KERNEL); 966 967 /* Allocating and then erasing a lot should not lose base */ 968 for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++) 969 xa_alloc_index(test, xa, i, GFP_KERNEL); 970 for (i = base; i < 2 * XA_CHUNK_SIZE; i++) 971 xa_erase_index(test, xa, i); 972 xa_alloc_index(test, xa, base, GFP_KERNEL); 973 974 /* Destroying the array should do the same as erasing */ 975 xa_destroy(xa); 976 977 /* And it should assign %base again */ 978 xa_alloc_index(test, xa, base, GFP_KERNEL); 979 980 /* The next assigned ID should be base+1 */ 981 xa_alloc_index(test, xa, base + 1, GFP_KERNEL); 982 xa_erase_index(test, xa, base + 1); 983 984 /* Storing a value should mark it used */ 985 xa_store_index(xa, base + 1, GFP_KERNEL); 986 xa_alloc_index(test, xa, base + 2, GFP_KERNEL); 987 988 /* If we then erase base, it should be free */ 989 xa_erase_index(test, xa, base); 990 xa_alloc_index(test, xa, base, GFP_KERNEL); 991 992 xa_erase_index(test, xa, base + 1); 993 xa_erase_index(test, xa, base + 2); 994 995 for (i = 1; i < 5000; i++) { 996 xa_alloc_index(test, xa, base + i, GFP_KERNEL); 997 } 998 999 xa_destroy(xa); 1000 1001 /* Check that we fail properly at the limit of allocation */ 1002 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1), 1003 XA_LIMIT(UINT_MAX - 1, UINT_MAX), 1004 GFP_KERNEL) != 0); 1005 XA_BUG_ON(xa, id != 0xfffffffeU); 1006 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX), 1007 XA_LIMIT(UINT_MAX - 1, UINT_MAX), 1008 GFP_KERNEL) != 0); 1009 XA_BUG_ON(xa, id != 0xffffffffU); 1010 id = 3; 1011 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0), 1012 XA_LIMIT(UINT_MAX - 1, UINT_MAX), 1013 GFP_KERNEL) != -EBUSY); 1014 XA_BUG_ON(xa, id != 3); 1015 xa_destroy(xa); 1016 1017 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), 1018 GFP_KERNEL) != -EBUSY); 1019 XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != NULL); 1020 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), 1021 GFP_KERNEL) != -EBUSY); 1022 xa_erase_index(test, xa, 3); 1023 XA_BUG_ON(xa, !xa_empty(xa)); 1024 } 1025 1026 static noinline void check_xa_alloc_2(struct kunit *test, struct xarray *xa, unsigned int base) 1027 { 1028 unsigned int i, id; 1029 unsigned long index; 1030 void *entry; 1031 1032 /* Allocate and free a NULL and check xa_empty() behaves */ 1033 XA_BUG_ON(xa, !xa_empty(xa)); 1034 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 1035 XA_BUG_ON(xa, id != base); 1036 XA_BUG_ON(xa, xa_empty(xa)); 1037 XA_BUG_ON(xa, xa_erase(xa, id) != NULL); 1038 XA_BUG_ON(xa, !xa_empty(xa)); 1039 1040 /* Ditto, but check destroy instead of erase */ 1041 XA_BUG_ON(xa, !xa_empty(xa)); 1042 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 1043 XA_BUG_ON(xa, id != base); 1044 XA_BUG_ON(xa, xa_empty(xa)); 1045 xa_destroy(xa); 1046 XA_BUG_ON(xa, !xa_empty(xa)); 1047 1048 for (i = base; i < base + 10; i++) { 1049 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, 1050 GFP_KERNEL) != 0); 1051 XA_BUG_ON(xa, id != i); 1052 } 1053 1054 XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL); 1055 XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL); 1056 XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4)); 1057 XA_BUG_ON(xa, xa_erase(xa, 5) != NULL); 1058 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 1059 XA_BUG_ON(xa, id != 5); 1060 1061 xa_for_each(xa, index, entry) { 1062 xa_erase_index(test, xa, index); 1063 } 1064 1065 for (i = base; i < base + 9; i++) { 1066 XA_BUG_ON(xa, xa_erase(xa, i) != NULL); 1067 XA_BUG_ON(xa, xa_empty(xa)); 1068 } 1069 XA_BUG_ON(xa, xa_erase(xa, 8) != NULL); 1070 XA_BUG_ON(xa, xa_empty(xa)); 1071 XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL); 1072 XA_BUG_ON(xa, !xa_empty(xa)); 1073 1074 xa_destroy(xa); 1075 } 1076 1077 static noinline void check_xa_alloc_3(struct kunit *test, struct xarray *xa, unsigned int base) 1078 { 1079 struct xa_limit limit = XA_LIMIT(1, 0x3fff); 1080 u32 next = 0; 1081 unsigned int i, id; 1082 unsigned long index; 1083 void *entry; 1084 1085 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit, 1086 &next, GFP_KERNEL) != 0); 1087 XA_BUG_ON(xa, id != 1); 1088 1089 next = 0x3ffd; 1090 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit, 1091 &next, GFP_KERNEL) != 0); 1092 XA_BUG_ON(xa, id != 0x3ffd); 1093 xa_erase_index(test, xa, 0x3ffd); 1094 xa_erase_index(test, xa, 1); 1095 XA_BUG_ON(xa, !xa_empty(xa)); 1096 1097 for (i = 0x3ffe; i < 0x4003; i++) { 1098 if (i < 0x4000) 1099 entry = xa_mk_index(i); 1100 else 1101 entry = xa_mk_index(i - 0x3fff); 1102 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit, 1103 &next, GFP_KERNEL) != (id == 1)); 1104 XA_BUG_ON(xa, xa_mk_index(id) != entry); 1105 } 1106 1107 /* Check wrap-around is handled correctly */ 1108 if (base != 0) 1109 xa_erase_index(test, xa, base); 1110 xa_erase_index(test, xa, base + 1); 1111 next = UINT_MAX; 1112 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), 1113 xa_limit_32b, &next, GFP_KERNEL) != 0); 1114 XA_BUG_ON(xa, id != UINT_MAX); 1115 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base), 1116 xa_limit_32b, &next, GFP_KERNEL) != 1); 1117 XA_BUG_ON(xa, id != base); 1118 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1), 1119 xa_limit_32b, &next, GFP_KERNEL) != 0); 1120 XA_BUG_ON(xa, id != base + 1); 1121 1122 xa_for_each(xa, index, entry) 1123 xa_erase_index(test, xa, index); 1124 1125 XA_BUG_ON(xa, !xa_empty(xa)); 1126 } 1127 1128 static DEFINE_XARRAY_ALLOC(xa0); 1129 static DEFINE_XARRAY_ALLOC1(xa1); 1130 1131 static noinline void check_xa_alloc(struct kunit *test) 1132 { 1133 check_xa_alloc_1(test, &xa0, 0); 1134 check_xa_alloc_1(test, &xa1, 1); 1135 check_xa_alloc_2(test, &xa0, 0); 1136 check_xa_alloc_2(test, &xa1, 1); 1137 check_xa_alloc_3(test, &xa0, 0); 1138 check_xa_alloc_3(test, &xa1, 1); 1139 } 1140 1141 static noinline void __check_store_iter(struct kunit *test, unsigned long start, 1142 unsigned int order, unsigned int present) 1143 { 1144 struct xarray *xa = xa_param(test); 1145 1146 XA_STATE_ORDER(xas, xa, start, order); 1147 void *entry; 1148 unsigned int count = 0; 1149 1150 retry: 1151 xas_lock(&xas); 1152 xas_for_each_conflict(&xas, entry) { 1153 XA_BUG_ON(xa, !xa_is_value(entry)); 1154 XA_BUG_ON(xa, entry < xa_mk_index(start)); 1155 XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1)); 1156 count++; 1157 } 1158 xas_store(&xas, xa_mk_index(start)); 1159 xas_unlock(&xas); 1160 if (xas_nomem(&xas, GFP_KERNEL)) { 1161 count = 0; 1162 goto retry; 1163 } 1164 XA_BUG_ON(xa, xas_error(&xas)); 1165 XA_BUG_ON(xa, count != present); 1166 XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); 1167 XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != 1168 xa_mk_index(start)); 1169 xa_erase_index(test, xa, start); 1170 } 1171 1172 static noinline void check_store_iter(struct kunit *test) 1173 { 1174 struct xarray *xa = xa_param(test); 1175 1176 unsigned int i, j; 1177 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 1178 1179 for (i = 0; i < max_order; i++) { 1180 unsigned int min = 1 << i; 1181 unsigned int max = (2 << i) - 1; 1182 __check_store_iter(test, 0, i, 0); 1183 XA_BUG_ON(xa, !xa_empty(xa)); 1184 __check_store_iter(test, min, i, 0); 1185 XA_BUG_ON(xa, !xa_empty(xa)); 1186 1187 xa_store_index(xa, min, GFP_KERNEL); 1188 __check_store_iter(test, min, i, 1); 1189 XA_BUG_ON(xa, !xa_empty(xa)); 1190 xa_store_index(xa, max, GFP_KERNEL); 1191 __check_store_iter(test, min, i, 1); 1192 XA_BUG_ON(xa, !xa_empty(xa)); 1193 1194 for (j = 0; j < min; j++) 1195 xa_store_index(xa, j, GFP_KERNEL); 1196 __check_store_iter(test, 0, i, min); 1197 XA_BUG_ON(xa, !xa_empty(xa)); 1198 for (j = 0; j < min; j++) 1199 xa_store_index(xa, min + j, GFP_KERNEL); 1200 __check_store_iter(test, min, i, min); 1201 XA_BUG_ON(xa, !xa_empty(xa)); 1202 } 1203 #ifdef CONFIG_XARRAY_MULTI 1204 xa_store_index(xa, 63, GFP_KERNEL); 1205 xa_store_index(xa, 65, GFP_KERNEL); 1206 __check_store_iter(test, 64, 2, 1); 1207 xa_erase_index(test, xa, 63); 1208 #endif 1209 XA_BUG_ON(xa, !xa_empty(xa)); 1210 } 1211 1212 static noinline void check_multi_find_1(struct kunit *test, unsigned int order) 1213 { 1214 #ifdef CONFIG_XARRAY_MULTI 1215 struct xarray *xa = xa_param(test); 1216 1217 unsigned long multi = 3 << order; 1218 unsigned long next = 4 << order; 1219 unsigned long index; 1220 1221 xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL); 1222 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL); 1223 XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL); 1224 1225 index = 0; 1226 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 1227 xa_mk_value(multi)); 1228 XA_BUG_ON(xa, index != multi); 1229 index = multi + 1; 1230 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 1231 xa_mk_value(multi)); 1232 XA_BUG_ON(xa, (index < multi) || (index >= next)); 1233 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) != 1234 xa_mk_value(next)); 1235 XA_BUG_ON(xa, index != next); 1236 XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL); 1237 XA_BUG_ON(xa, index != next); 1238 1239 xa_erase_index(test, xa, multi); 1240 xa_erase_index(test, xa, next); 1241 xa_erase_index(test, xa, next + 1); 1242 XA_BUG_ON(xa, !xa_empty(xa)); 1243 #endif 1244 } 1245 1246 static noinline void check_multi_find_2(struct kunit *test) 1247 { 1248 struct xarray *xa = xa_param(test); 1249 1250 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; 1251 unsigned int i, j; 1252 void *entry; 1253 1254 for (i = 0; i < max_order; i++) { 1255 unsigned long index = 1UL << i; 1256 for (j = 0; j < index; j++) { 1257 XA_STATE(xas, xa, j + index); 1258 xa_store_index(xa, index - 1, GFP_KERNEL); 1259 xa_store_order(xa, index, i, xa_mk_index(index), 1260 GFP_KERNEL); 1261 rcu_read_lock(); 1262 xas_for_each(&xas, entry, ULONG_MAX) { 1263 xa_erase_index(test, xa, index); 1264 } 1265 rcu_read_unlock(); 1266 xa_erase_index(test, xa, index - 1); 1267 XA_BUG_ON(xa, !xa_empty(xa)); 1268 } 1269 } 1270 } 1271 1272 static noinline void check_multi_find_3(struct kunit *test) 1273 { 1274 struct xarray *xa = xa_param(test); 1275 1276 unsigned int order; 1277 1278 for (order = 5; order < order_limit; order++) { 1279 unsigned long index = 1UL << (order - 5); 1280 1281 XA_BUG_ON(xa, !xa_empty(xa)); 1282 xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL); 1283 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)); 1284 xa_erase_index(test, xa, 0); 1285 } 1286 } 1287 1288 static noinline void check_find_1(struct kunit *test) 1289 { 1290 struct xarray *xa = xa_param(test); 1291 1292 unsigned long i, j, k; 1293 1294 XA_BUG_ON(xa, !xa_empty(xa)); 1295 1296 /* 1297 * Check xa_find with all pairs between 0 and 99 inclusive, 1298 * starting at every index between 0 and 99 1299 */ 1300 for (i = 0; i < 100; i++) { 1301 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 1302 xa_set_mark(xa, i, XA_MARK_0); 1303 for (j = 0; j < i; j++) { 1304 XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) != 1305 NULL); 1306 xa_set_mark(xa, j, XA_MARK_0); 1307 for (k = 0; k < 100; k++) { 1308 unsigned long index = k; 1309 void *entry = xa_find(xa, &index, ULONG_MAX, 1310 XA_PRESENT); 1311 if (k <= j) 1312 XA_BUG_ON(xa, index != j); 1313 else if (k <= i) 1314 XA_BUG_ON(xa, index != i); 1315 else 1316 XA_BUG_ON(xa, entry != NULL); 1317 1318 index = k; 1319 entry = xa_find(xa, &index, ULONG_MAX, 1320 XA_MARK_0); 1321 if (k <= j) 1322 XA_BUG_ON(xa, index != j); 1323 else if (k <= i) 1324 XA_BUG_ON(xa, index != i); 1325 else 1326 XA_BUG_ON(xa, entry != NULL); 1327 } 1328 xa_erase_index(test, xa, j); 1329 XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); 1330 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 1331 } 1332 xa_erase_index(test, xa, i); 1333 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); 1334 } 1335 XA_BUG_ON(xa, !xa_empty(xa)); 1336 } 1337 1338 static noinline void check_find_2(struct kunit *test) 1339 { 1340 struct xarray *xa = xa_param(test); 1341 1342 void *entry; 1343 unsigned long i, j, index; 1344 1345 xa_for_each(xa, index, entry) { 1346 XA_BUG_ON(xa, true); 1347 } 1348 1349 for (i = 0; i < 1024; i++) { 1350 xa_store_index(xa, index, GFP_KERNEL); 1351 j = 0; 1352 xa_for_each(xa, index, entry) { 1353 XA_BUG_ON(xa, xa_mk_index(index) != entry); 1354 XA_BUG_ON(xa, index != j++); 1355 } 1356 } 1357 1358 xa_destroy(xa); 1359 } 1360 1361 static noinline void check_find_3(struct kunit *test) 1362 { 1363 struct xarray *xa = xa_param(test); 1364 1365 XA_STATE(xas, xa, 0); 1366 unsigned long i, j, k; 1367 void *entry; 1368 1369 for (i = 0; i < 100; i++) { 1370 for (j = 0; j < 100; j++) { 1371 rcu_read_lock(); 1372 for (k = 0; k < 100; k++) { 1373 xas_set(&xas, j); 1374 xas_for_each_marked(&xas, entry, k, XA_MARK_0) 1375 ; 1376 if (j > k) 1377 XA_BUG_ON(xa, 1378 xas.xa_node != XAS_RESTART); 1379 } 1380 rcu_read_unlock(); 1381 } 1382 xa_store_index(xa, i, GFP_KERNEL); 1383 xa_set_mark(xa, i, XA_MARK_0); 1384 } 1385 xa_destroy(xa); 1386 } 1387 1388 static noinline void check_find_4(struct kunit *test) 1389 { 1390 struct xarray *xa = xa_param(test); 1391 1392 unsigned long index = 0; 1393 void *entry; 1394 1395 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 1396 1397 entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); 1398 XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX)); 1399 1400 entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); 1401 XA_BUG_ON(xa, entry); 1402 1403 xa_erase_index(test, xa, ULONG_MAX); 1404 } 1405 1406 static noinline void check_find(struct kunit *test) 1407 { 1408 unsigned i; 1409 1410 check_find_1(test); 1411 check_find_2(test); 1412 check_find_3(test); 1413 check_find_4(test); 1414 1415 for (i = 2; i < 10; i++) 1416 check_multi_find_1(test, i); 1417 check_multi_find_2(test); 1418 check_multi_find_3(test); 1419 } 1420 1421 /* See find_swap_entry() in mm/shmem.c */ 1422 static noinline unsigned long xa_find_entry(struct xarray *xa, void *item) 1423 { 1424 XA_STATE(xas, xa, 0); 1425 unsigned int checked = 0; 1426 void *entry; 1427 1428 rcu_read_lock(); 1429 xas_for_each(&xas, entry, ULONG_MAX) { 1430 if (xas_retry(&xas, entry)) 1431 continue; 1432 if (entry == item) 1433 break; 1434 checked++; 1435 if ((checked % 4) != 0) 1436 continue; 1437 xas_pause(&xas); 1438 } 1439 rcu_read_unlock(); 1440 1441 return entry ? xas.xa_index : -1; 1442 } 1443 1444 static noinline void check_find_entry(struct kunit *test) 1445 { 1446 struct xarray *xa = xa_param(test); 1447 1448 #ifdef CONFIG_XARRAY_MULTI 1449 unsigned int order; 1450 unsigned long offset, index; 1451 1452 for (order = 0; order < 20; order++) { 1453 for (offset = 0; offset < (1UL << (order + 3)); 1454 offset += (1UL << order)) { 1455 for (index = 0; index < (1UL << (order + 5)); 1456 index += (1UL << order)) { 1457 xa_store_order(xa, index, order, 1458 xa_mk_index(index), GFP_KERNEL); 1459 XA_BUG_ON(xa, xa_load(xa, index) != 1460 xa_mk_index(index)); 1461 XA_BUG_ON(xa, xa_find_entry(xa, 1462 xa_mk_index(index)) != index); 1463 } 1464 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1465 xa_destroy(xa); 1466 } 1467 } 1468 #endif 1469 1470 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1471 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 1472 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1473 XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); 1474 xa_erase_index(test, xa, ULONG_MAX); 1475 XA_BUG_ON(xa, !xa_empty(xa)); 1476 } 1477 1478 static noinline void check_pause(struct kunit *test) 1479 { 1480 struct xarray *xa = xa_param(test); 1481 1482 XA_STATE(xas, xa, 0); 1483 void *entry; 1484 unsigned int order; 1485 unsigned long index = 1; 1486 unsigned int count = 0; 1487 1488 for (order = 0; order < order_limit; order++) { 1489 XA_BUG_ON(xa, xa_store_order(xa, index, order, 1490 xa_mk_index(index), GFP_KERNEL)); 1491 index += 1UL << order; 1492 } 1493 1494 rcu_read_lock(); 1495 xas_for_each(&xas, entry, ULONG_MAX) { 1496 XA_BUG_ON(xa, entry != xa_mk_index(1UL << count)); 1497 count++; 1498 } 1499 rcu_read_unlock(); 1500 XA_BUG_ON(xa, count != order_limit); 1501 1502 count = 0; 1503 xas_set(&xas, 0); 1504 rcu_read_lock(); 1505 xas_for_each(&xas, entry, ULONG_MAX) { 1506 XA_BUG_ON(xa, entry != xa_mk_index(1UL << count)); 1507 count++; 1508 xas_pause(&xas); 1509 } 1510 rcu_read_unlock(); 1511 XA_BUG_ON(xa, count != order_limit); 1512 1513 xa_destroy(xa); 1514 } 1515 1516 static noinline void check_move_tiny(struct kunit *test) 1517 { 1518 struct xarray *xa = xa_param(test); 1519 1520 XA_STATE(xas, xa, 0); 1521 1522 XA_BUG_ON(xa, !xa_empty(xa)); 1523 rcu_read_lock(); 1524 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1525 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1526 rcu_read_unlock(); 1527 xa_store_index(xa, 0, GFP_KERNEL); 1528 rcu_read_lock(); 1529 xas_set(&xas, 0); 1530 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0)); 1531 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1532 xas_set(&xas, 0); 1533 XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0)); 1534 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1535 rcu_read_unlock(); 1536 xa_erase_index(test, xa, 0); 1537 XA_BUG_ON(xa, !xa_empty(xa)); 1538 } 1539 1540 static noinline void check_move_max(struct kunit *test) 1541 { 1542 struct xarray *xa = xa_param(test); 1543 1544 XA_STATE(xas, xa, 0); 1545 1546 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 1547 rcu_read_lock(); 1548 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX)); 1549 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); 1550 rcu_read_unlock(); 1551 1552 xas_set(&xas, 0); 1553 rcu_read_lock(); 1554 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX)); 1555 xas_pause(&xas); 1556 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); 1557 rcu_read_unlock(); 1558 1559 xa_erase_index(test, xa, ULONG_MAX); 1560 XA_BUG_ON(xa, !xa_empty(xa)); 1561 } 1562 1563 static noinline void check_move_small(struct kunit *test, unsigned long idx) 1564 { 1565 struct xarray *xa = xa_param(test); 1566 1567 XA_STATE(xas, xa, 0); 1568 unsigned long i; 1569 1570 xa_store_index(xa, 0, GFP_KERNEL); 1571 xa_store_index(xa, idx, GFP_KERNEL); 1572 1573 rcu_read_lock(); 1574 for (i = 0; i < idx * 4; i++) { 1575 void *entry = xas_next(&xas); 1576 if (i <= idx) 1577 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 1578 XA_BUG_ON(xa, xas.xa_index != i); 1579 if (i == 0 || i == idx) 1580 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1581 else 1582 XA_BUG_ON(xa, entry != NULL); 1583 } 1584 xas_next(&xas); 1585 XA_BUG_ON(xa, xas.xa_index != i); 1586 1587 do { 1588 void *entry = xas_prev(&xas); 1589 i--; 1590 if (i <= idx) 1591 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 1592 XA_BUG_ON(xa, xas.xa_index != i); 1593 if (i == 0 || i == idx) 1594 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1595 else 1596 XA_BUG_ON(xa, entry != NULL); 1597 } while (i > 0); 1598 1599 xas_set(&xas, ULONG_MAX); 1600 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1601 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1602 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0)); 1603 XA_BUG_ON(xa, xas.xa_index != 0); 1604 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1605 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1606 rcu_read_unlock(); 1607 1608 xa_erase_index(test, xa, 0); 1609 xa_erase_index(test, xa, idx); 1610 XA_BUG_ON(xa, !xa_empty(xa)); 1611 } 1612 1613 static noinline void check_move(struct kunit *test) 1614 { 1615 struct xarray *xa = xa_param(test); 1616 1617 XA_STATE(xas, xa, (1 << 16) - 1); 1618 unsigned long i; 1619 1620 for (i = 0; i < (1 << 16); i++) 1621 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 1622 1623 rcu_read_lock(); 1624 do { 1625 void *entry = xas_prev(&xas); 1626 i--; 1627 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1628 XA_BUG_ON(xa, i != xas.xa_index); 1629 } while (i != 0); 1630 1631 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1632 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1633 1634 do { 1635 void *entry = xas_next(&xas); 1636 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1637 XA_BUG_ON(xa, i != xas.xa_index); 1638 i++; 1639 } while (i < (1 << 16)); 1640 rcu_read_unlock(); 1641 1642 for (i = (1 << 8); i < (1 << 15); i++) 1643 xa_erase_index(test, xa, i); 1644 1645 i = xas.xa_index; 1646 1647 rcu_read_lock(); 1648 do { 1649 void *entry = xas_prev(&xas); 1650 i--; 1651 if ((i < (1 << 8)) || (i >= (1 << 15))) 1652 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1653 else 1654 XA_BUG_ON(xa, entry != NULL); 1655 XA_BUG_ON(xa, i != xas.xa_index); 1656 } while (i != 0); 1657 1658 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1659 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1660 1661 do { 1662 void *entry = xas_next(&xas); 1663 if ((i < (1 << 8)) || (i >= (1 << 15))) 1664 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1665 else 1666 XA_BUG_ON(xa, entry != NULL); 1667 XA_BUG_ON(xa, i != xas.xa_index); 1668 i++; 1669 } while (i < (1 << 16)); 1670 rcu_read_unlock(); 1671 1672 xa_destroy(xa); 1673 1674 check_move_tiny(test); 1675 check_move_max(test); 1676 1677 for (i = 0; i < 16; i++) 1678 check_move_small(test, 1UL << i); 1679 1680 for (i = 2; i < 16; i++) 1681 check_move_small(test, (1UL << i) - 1); 1682 } 1683 1684 static noinline void xa_store_many_order(struct kunit *test, struct xarray *xa, 1685 unsigned long index, unsigned order) 1686 { 1687 XA_STATE_ORDER(xas, xa, index, order); 1688 unsigned int i = 0; 1689 1690 do { 1691 xas_lock(&xas); 1692 XA_BUG_ON(xa, xas_find_conflict(&xas)); 1693 xas_create_range(&xas); 1694 if (xas_error(&xas)) 1695 goto unlock; 1696 for (i = 0; i < (1U << order); i++) { 1697 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i))); 1698 xas_next(&xas); 1699 } 1700 unlock: 1701 xas_unlock(&xas); 1702 } while (xas_nomem(&xas, GFP_KERNEL)); 1703 1704 XA_BUG_ON(xa, xas_error(&xas)); 1705 } 1706 1707 static noinline void check_create_range_1(struct kunit *test, 1708 unsigned long index, unsigned order) 1709 { 1710 struct xarray *xa = xa_param(test); 1711 1712 unsigned long i; 1713 1714 xa_store_many_order(test, xa, index, order); 1715 for (i = index; i < index + (1UL << order); i++) 1716 xa_erase_index(test, xa, i); 1717 XA_BUG_ON(xa, !xa_empty(xa)); 1718 } 1719 1720 static noinline void check_create_range_2(struct kunit *test, unsigned int order) 1721 { 1722 struct xarray *xa = xa_param(test); 1723 1724 unsigned long i; 1725 unsigned long nr = 1UL << order; 1726 1727 for (i = 0; i < nr * nr; i += nr) 1728 xa_store_many_order(test, xa, i, order); 1729 for (i = 0; i < nr * nr; i++) 1730 xa_erase_index(test, xa, i); 1731 XA_BUG_ON(xa, !xa_empty(xa)); 1732 } 1733 1734 static noinline void check_create_range_3(struct kunit *test) 1735 { 1736 XA_STATE(xas, NULL, 0); 1737 xas_set_err(&xas, -EEXIST); 1738 xas_create_range(&xas); 1739 XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST); 1740 } 1741 1742 static noinline void check_create_range_4(struct kunit *test, 1743 unsigned long index, unsigned order) 1744 { 1745 struct xarray *xa = xa_param(test); 1746 1747 XA_STATE_ORDER(xas, xa, index, order); 1748 unsigned long base = xas.xa_index; 1749 unsigned long i = 0; 1750 1751 xa_store_index(xa, index, GFP_KERNEL); 1752 do { 1753 xas_lock(&xas); 1754 xas_create_range(&xas); 1755 if (xas_error(&xas)) 1756 goto unlock; 1757 for (i = 0; i < (1UL << order); i++) { 1758 void *old = xas_store(&xas, xa_mk_index(base + i)); 1759 if (xas.xa_index == index) 1760 XA_BUG_ON(xa, old != xa_mk_index(base + i)); 1761 else 1762 XA_BUG_ON(xa, old != NULL); 1763 xas_next(&xas); 1764 } 1765 unlock: 1766 xas_unlock(&xas); 1767 } while (xas_nomem(&xas, GFP_KERNEL)); 1768 1769 XA_BUG_ON(xa, xas_error(&xas)); 1770 1771 for (i = base; i < base + (1UL << order); i++) 1772 xa_erase_index(test, xa, i); 1773 XA_BUG_ON(xa, !xa_empty(xa)); 1774 } 1775 1776 static noinline void check_create_range_5(struct kunit *test, 1777 unsigned long index, unsigned int order) 1778 { 1779 struct xarray *xa = xa_param(test); 1780 1781 XA_STATE_ORDER(xas, xa, index, order); 1782 unsigned int i; 1783 1784 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 1785 1786 for (i = 0; i < order + 10; i++) { 1787 do { 1788 xas_lock(&xas); 1789 xas_create_range(&xas); 1790 xas_unlock(&xas); 1791 } while (xas_nomem(&xas, GFP_KERNEL)); 1792 } 1793 1794 xa_destroy(xa); 1795 } 1796 1797 static noinline void check_create_range(struct kunit *test) 1798 { 1799 unsigned int order; 1800 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1; 1801 1802 for (order = 0; order < max_order; order++) { 1803 check_create_range_1(test, 0, order); 1804 check_create_range_1(test, 1U << order, order); 1805 check_create_range_1(test, 2U << order, order); 1806 check_create_range_1(test, 3U << order, order); 1807 check_create_range_1(test, 1U << 24, order); 1808 if (order < 10) 1809 check_create_range_2(test, order); 1810 1811 check_create_range_4(test, 0, order); 1812 check_create_range_4(test, 1U << order, order); 1813 check_create_range_4(test, 2U << order, order); 1814 check_create_range_4(test, 3U << order, order); 1815 check_create_range_4(test, 1U << 24, order); 1816 1817 check_create_range_4(test, 1, order); 1818 check_create_range_4(test, (1U << order) + 1, order); 1819 check_create_range_4(test, (2U << order) + 1, order); 1820 check_create_range_4(test, (2U << order) - 1, order); 1821 check_create_range_4(test, (3U << order) + 1, order); 1822 check_create_range_4(test, (3U << order) - 1, order); 1823 check_create_range_4(test, (1U << 24) + 1, order); 1824 1825 check_create_range_5(test, 0, order); 1826 check_create_range_5(test, (1U << order), order); 1827 } 1828 1829 check_create_range_3(test); 1830 } 1831 1832 static noinline void __check_store_range(struct kunit *test, unsigned long first, 1833 unsigned long last) 1834 { 1835 struct xarray *xa = xa_param(test); 1836 1837 #ifdef CONFIG_XARRAY_MULTI 1838 xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); 1839 1840 XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first)); 1841 XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first)); 1842 XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); 1843 XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); 1844 1845 xa_store_range(xa, first, last, NULL, GFP_KERNEL); 1846 #endif 1847 1848 XA_BUG_ON(xa, !xa_empty(xa)); 1849 } 1850 1851 static noinline void check_store_range(struct kunit *test) 1852 { 1853 unsigned long i, j; 1854 1855 for (i = 0; i < 128; i++) { 1856 for (j = i; j < 128; j++) { 1857 __check_store_range(test, i, j); 1858 __check_store_range(test, 128 + i, 128 + j); 1859 __check_store_range(test, 4095 + i, 4095 + j); 1860 __check_store_range(test, 4096 + i, 4096 + j); 1861 __check_store_range(test, 123456 + i, 123456 + j); 1862 __check_store_range(test, (1 << 24) + i, (1 << 24) + j); 1863 } 1864 } 1865 } 1866 1867 #ifdef CONFIG_XARRAY_MULTI 1868 static void check_split_1(struct kunit *test, unsigned long index, 1869 unsigned int order, unsigned int new_order) 1870 { 1871 struct xarray *xa = xa_param(test); 1872 1873 XA_STATE_ORDER(xas, xa, index, new_order); 1874 unsigned int i, found; 1875 void *entry; 1876 1877 xa_store_order(xa, index, order, xa, GFP_KERNEL); 1878 xa_set_mark(xa, index, XA_MARK_1); 1879 1880 xas_split_alloc(&xas, xa, order, GFP_KERNEL); 1881 xas_lock(&xas); 1882 xas_split(&xas, xa, order); 1883 for (i = 0; i < (1 << order); i += (1 << new_order)) 1884 __xa_store(xa, index + i, xa_mk_index(index + i), 0); 1885 xas_unlock(&xas); 1886 1887 for (i = 0; i < (1 << order); i++) { 1888 unsigned int val = index + (i & ~((1 << new_order) - 1)); 1889 XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val)); 1890 } 1891 1892 xa_set_mark(xa, index, XA_MARK_0); 1893 XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); 1894 1895 xas_set_order(&xas, index, 0); 1896 found = 0; 1897 rcu_read_lock(); 1898 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) { 1899 found++; 1900 XA_BUG_ON(xa, xa_is_internal(entry)); 1901 } 1902 rcu_read_unlock(); 1903 XA_BUG_ON(xa, found != 1 << (order - new_order)); 1904 1905 xa_destroy(xa); 1906 } 1907 1908 static noinline void check_split(struct kunit *test) 1909 { 1910 struct xarray *xa = xa_param(test); 1911 1912 unsigned int order, new_order; 1913 1914 XA_BUG_ON(xa, !xa_empty(xa)); 1915 1916 for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) { 1917 for (new_order = 0; new_order < order; new_order++) { 1918 check_split_1(test, 0, order, new_order); 1919 check_split_1(test, 1UL << order, order, new_order); 1920 check_split_1(test, 3UL << order, order, new_order); 1921 } 1922 } 1923 } 1924 #else 1925 static void check_split(struct kunit *test) { } 1926 #endif 1927 1928 static void check_align_1(struct kunit *test, char *name) 1929 { 1930 struct xarray *xa = xa_param(test); 1931 1932 int i; 1933 unsigned int id; 1934 unsigned long index; 1935 void *entry; 1936 1937 for (i = 0; i < 8; i++) { 1938 XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b, 1939 GFP_KERNEL) != 0); 1940 XA_BUG_ON(xa, id != i); 1941 } 1942 xa_for_each(xa, index, entry) 1943 XA_BUG_ON(xa, xa_is_err(entry)); 1944 xa_destroy(xa); 1945 } 1946 1947 /* 1948 * We should always be able to store without allocating memory after 1949 * reserving a slot. 1950 */ 1951 static void check_align_2(struct kunit *test, char *name) 1952 { 1953 struct xarray *xa = xa_param(test); 1954 1955 int i; 1956 1957 XA_BUG_ON(xa, !xa_empty(xa)); 1958 1959 for (i = 0; i < 8; i++) { 1960 XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL); 1961 xa_erase(xa, 0); 1962 } 1963 1964 for (i = 0; i < 8; i++) { 1965 XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0); 1966 XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL); 1967 xa_erase(xa, 0); 1968 } 1969 1970 XA_BUG_ON(xa, !xa_empty(xa)); 1971 } 1972 1973 static noinline void check_align(struct kunit *test) 1974 { 1975 char name[] = "Motorola 68000"; 1976 1977 check_align_1(test, name); 1978 check_align_1(test, name + 1); 1979 check_align_1(test, name + 2); 1980 check_align_1(test, name + 3); 1981 check_align_2(test, name); 1982 } 1983 1984 static LIST_HEAD(shadow_nodes); 1985 1986 static void test_update_node(struct xa_node *node) 1987 { 1988 if (node->count && node->count == node->nr_values) { 1989 if (list_empty(&node->private_list)) 1990 list_add(&shadow_nodes, &node->private_list); 1991 } else { 1992 if (!list_empty(&node->private_list)) 1993 list_del_init(&node->private_list); 1994 } 1995 } 1996 1997 static noinline void shadow_remove(struct kunit *test, struct xarray *xa) 1998 { 1999 struct xa_node *node; 2000 2001 xa_lock(xa); 2002 while ((node = list_first_entry_or_null(&shadow_nodes, 2003 struct xa_node, private_list))) { 2004 XA_BUG_ON(xa, node->array != xa); 2005 list_del_init(&node->private_list); 2006 xa_delete_node(node, test_update_node); 2007 } 2008 xa_unlock(xa); 2009 } 2010 2011 struct workingset_testcase { 2012 struct xarray *xa; 2013 unsigned long index; 2014 }; 2015 2016 static noinline void check_workingset(struct kunit *test) 2017 { 2018 struct workingset_testcase tc = *(struct workingset_testcase *)test->param_value; 2019 struct xarray *xa = tc.xa; 2020 unsigned long index = tc.index; 2021 2022 XA_STATE(xas, xa, index); 2023 xas_set_update(&xas, test_update_node); 2024 2025 do { 2026 xas_lock(&xas); 2027 xas_store(&xas, xa_mk_value(0)); 2028 xas_next(&xas); 2029 xas_store(&xas, xa_mk_value(1)); 2030 xas_unlock(&xas); 2031 } while (xas_nomem(&xas, GFP_KERNEL)); 2032 2033 XA_BUG_ON(xa, list_empty(&shadow_nodes)); 2034 2035 xas_lock(&xas); 2036 xas_next(&xas); 2037 xas_store(&xas, &xas); 2038 XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 2039 2040 xas_store(&xas, xa_mk_value(2)); 2041 xas_unlock(&xas); 2042 XA_BUG_ON(xa, list_empty(&shadow_nodes)); 2043 2044 shadow_remove(test, xa); 2045 XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 2046 XA_BUG_ON(xa, !xa_empty(xa)); 2047 } 2048 2049 /* 2050 * Check that the pointer / value / sibling entries are accounted the 2051 * way we expect them to be. 2052 */ 2053 static noinline void check_account(struct kunit *test) 2054 { 2055 #ifdef CONFIG_XARRAY_MULTI 2056 struct xarray *xa = xa_param(test); 2057 2058 unsigned int order; 2059 2060 for (order = 1; order < 12; order++) { 2061 XA_STATE(xas, xa, 1 << order); 2062 2063 xa_store_order(xa, 0, order, xa, GFP_KERNEL); 2064 rcu_read_lock(); 2065 xas_load(&xas); 2066 XA_BUG_ON(xa, xas.xa_node->count == 0); 2067 XA_BUG_ON(xa, xas.xa_node->count > (1 << order)); 2068 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 2069 rcu_read_unlock(); 2070 2071 xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order), 2072 GFP_KERNEL); 2073 XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); 2074 2075 xa_erase(xa, 1 << order); 2076 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 2077 2078 xa_erase(xa, 0); 2079 XA_BUG_ON(xa, !xa_empty(xa)); 2080 } 2081 #endif 2082 } 2083 2084 static noinline void check_get_order(struct kunit *test) 2085 { 2086 struct xarray *xa = xa_param(test); 2087 2088 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 2089 unsigned int order; 2090 unsigned long i, j; 2091 2092 for (i = 0; i < 3; i++) 2093 XA_BUG_ON(xa, xa_get_order(xa, i) != 0); 2094 2095 for (order = 0; order < max_order; order++) { 2096 for (i = 0; i < 10; i++) { 2097 xa_store_order(xa, i << order, order, 2098 xa_mk_index(i << order), GFP_KERNEL); 2099 for (j = i << order; j < (i + 1) << order; j++) 2100 XA_BUG_ON(xa, xa_get_order(xa, j) != order); 2101 xa_erase(xa, i << order); 2102 } 2103 } 2104 } 2105 2106 static noinline void check_xas_get_order(struct kunit *test) 2107 { 2108 struct xarray *xa = xa_param(test); 2109 2110 XA_STATE(xas, xa, 0); 2111 2112 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 2113 unsigned int order; 2114 unsigned long i, j; 2115 2116 for (order = 0; order < max_order; order++) { 2117 for (i = 0; i < 10; i++) { 2118 xas_set_order(&xas, i << order, order); 2119 do { 2120 xas_lock(&xas); 2121 xas_store(&xas, xa_mk_value(i)); 2122 xas_unlock(&xas); 2123 } while (xas_nomem(&xas, GFP_KERNEL)); 2124 2125 for (j = i << order; j < (i + 1) << order; j++) { 2126 xas_set_order(&xas, j, 0); 2127 rcu_read_lock(); 2128 xas_load(&xas); 2129 XA_BUG_ON(xa, xas_get_order(&xas) != order); 2130 rcu_read_unlock(); 2131 } 2132 2133 xas_lock(&xas); 2134 xas_set_order(&xas, i << order, order); 2135 xas_store(&xas, NULL); 2136 xas_unlock(&xas); 2137 } 2138 } 2139 } 2140 2141 static noinline void check_xas_conflict_get_order(struct kunit *test) 2142 { 2143 struct xarray *xa = xa_param(test); 2144 2145 XA_STATE(xas, xa, 0); 2146 2147 void *entry; 2148 int only_once; 2149 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 2150 unsigned int order; 2151 unsigned long i, j, k; 2152 2153 for (order = 0; order < max_order; order++) { 2154 for (i = 0; i < 10; i++) { 2155 xas_set_order(&xas, i << order, order); 2156 do { 2157 xas_lock(&xas); 2158 xas_store(&xas, xa_mk_value(i)); 2159 xas_unlock(&xas); 2160 } while (xas_nomem(&xas, GFP_KERNEL)); 2161 2162 /* 2163 * Ensure xas_get_order works with xas_for_each_conflict. 2164 */ 2165 j = i << order; 2166 for (k = 0; k < order; k++) { 2167 only_once = 0; 2168 xas_set_order(&xas, j + (1 << k), k); 2169 xas_lock(&xas); 2170 xas_for_each_conflict(&xas, entry) { 2171 XA_BUG_ON(xa, entry != xa_mk_value(i)); 2172 XA_BUG_ON(xa, xas_get_order(&xas) != order); 2173 only_once++; 2174 } 2175 XA_BUG_ON(xa, only_once != 1); 2176 xas_unlock(&xas); 2177 } 2178 2179 if (order < max_order - 1) { 2180 only_once = 0; 2181 xas_set_order(&xas, (i & ~1UL) << order, order + 1); 2182 xas_lock(&xas); 2183 xas_for_each_conflict(&xas, entry) { 2184 XA_BUG_ON(xa, entry != xa_mk_value(i)); 2185 XA_BUG_ON(xa, xas_get_order(&xas) != order); 2186 only_once++; 2187 } 2188 XA_BUG_ON(xa, only_once != 1); 2189 xas_unlock(&xas); 2190 } 2191 2192 xas_set_order(&xas, i << order, order); 2193 xas_lock(&xas); 2194 xas_store(&xas, NULL); 2195 xas_unlock(&xas); 2196 } 2197 } 2198 } 2199 2200 2201 static noinline void check_destroy(struct kunit *test) 2202 { 2203 struct xarray *xa = xa_param(test); 2204 2205 unsigned long index; 2206 2207 XA_BUG_ON(xa, !xa_empty(xa)); 2208 2209 /* Destroying an empty array is a no-op */ 2210 xa_destroy(xa); 2211 XA_BUG_ON(xa, !xa_empty(xa)); 2212 2213 /* Destroying an array with a single entry */ 2214 for (index = 0; index < 1000; index++) { 2215 xa_store_index(xa, index, GFP_KERNEL); 2216 XA_BUG_ON(xa, xa_empty(xa)); 2217 xa_destroy(xa); 2218 XA_BUG_ON(xa, !xa_empty(xa)); 2219 } 2220 2221 /* Destroying an array with a single entry at ULONG_MAX */ 2222 xa_store(xa, ULONG_MAX, xa, GFP_KERNEL); 2223 XA_BUG_ON(xa, xa_empty(xa)); 2224 xa_destroy(xa); 2225 XA_BUG_ON(xa, !xa_empty(xa)); 2226 2227 #ifdef CONFIG_XARRAY_MULTI 2228 /* Destroying an array with a multi-index entry */ 2229 xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL); 2230 XA_BUG_ON(xa, xa_empty(xa)); 2231 xa_destroy(xa); 2232 XA_BUG_ON(xa, !xa_empty(xa)); 2233 #endif 2234 } 2235 2236 static DEFINE_XARRAY(array); 2237 static struct xarray *arrays[] = { &array }; 2238 KUNIT_ARRAY_PARAM(array, arrays, NULL); 2239 2240 static struct xarray *xa0s[] = { &xa0 }; 2241 KUNIT_ARRAY_PARAM(xa0, xa0s, NULL); 2242 2243 static struct workingset_testcase workingset_testcases[] = { 2244 { &array, 0 }, 2245 { &array, 64 }, 2246 { &array, 4096 }, 2247 }; 2248 KUNIT_ARRAY_PARAM(workingset, workingset_testcases, NULL); 2249 2250 static struct kunit_case xarray_cases[] = { 2251 KUNIT_CASE_PARAM(check_xa_err, array_gen_params), 2252 KUNIT_CASE_PARAM(check_xas_retry, array_gen_params), 2253 KUNIT_CASE_PARAM(check_xa_load, array_gen_params), 2254 KUNIT_CASE_PARAM(check_xa_mark, array_gen_params), 2255 KUNIT_CASE_PARAM(check_xa_shrink, array_gen_params), 2256 KUNIT_CASE_PARAM(check_xas_erase, array_gen_params), 2257 KUNIT_CASE_PARAM(check_insert, array_gen_params), 2258 KUNIT_CASE_PARAM(check_cmpxchg, array_gen_params), 2259 KUNIT_CASE_PARAM(check_cmpxchg_order, array_gen_params), 2260 KUNIT_CASE_PARAM(check_reserve, array_gen_params), 2261 KUNIT_CASE_PARAM(check_reserve, xa0_gen_params), 2262 KUNIT_CASE_PARAM(check_multi_store, array_gen_params), 2263 KUNIT_CASE_PARAM(check_multi_store_advanced, array_gen_params), 2264 KUNIT_CASE_PARAM(check_get_order, array_gen_params), 2265 KUNIT_CASE_PARAM(check_xas_get_order, array_gen_params), 2266 KUNIT_CASE_PARAM(check_xas_conflict_get_order, array_gen_params), 2267 KUNIT_CASE(check_xa_alloc), 2268 KUNIT_CASE_PARAM(check_find, array_gen_params), 2269 KUNIT_CASE_PARAM(check_find_entry, array_gen_params), 2270 KUNIT_CASE_PARAM(check_pause, array_gen_params), 2271 KUNIT_CASE_PARAM(check_account, array_gen_params), 2272 KUNIT_CASE_PARAM(check_destroy, array_gen_params), 2273 KUNIT_CASE_PARAM(check_move, array_gen_params), 2274 KUNIT_CASE_PARAM(check_create_range, array_gen_params), 2275 KUNIT_CASE_PARAM(check_store_range, array_gen_params), 2276 KUNIT_CASE_PARAM(check_store_iter, array_gen_params), 2277 KUNIT_CASE_PARAM(check_align, xa0_gen_params), 2278 KUNIT_CASE_PARAM(check_split, array_gen_params), 2279 KUNIT_CASE_PARAM(check_workingset, workingset_gen_params), 2280 {}, 2281 }; 2282 2283 static struct kunit_suite xarray_suite = { 2284 .name = "xarray", 2285 .test_cases = xarray_cases, 2286 }; 2287 2288 kunit_test_suite(xarray_suite); 2289 2290 MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>"); 2291 MODULE_DESCRIPTION("XArray API test module"); 2292 MODULE_LICENSE("GPL"); 2293