1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * test_xarray.c: Test the XArray API 4 * Copyright (c) 2017-2018 Microsoft Corporation 5 * Copyright (c) 2019-2020 Oracle 6 * Author: Matthew Wilcox <willy@infradead.org> 7 */ 8 9 #include <kunit/test.h> 10 11 #include <linux/module.h> 12 #include <linux/xarray.h> 13 14 static const unsigned int order_limit = 15 IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1; 16 17 #ifndef XA_DEBUG 18 # ifdef __KERNEL__ 19 void xa_dump(const struct xarray *xa) { } 20 # endif 21 #undef XA_BUG_ON 22 #define XA_BUG_ON(xa, x) do { \ 23 if (x) { \ 24 KUNIT_FAIL(test, #x); \ 25 xa_dump(xa); \ 26 dump_stack(); \ 27 } \ 28 } while (0) 29 #endif 30 31 static void *xa_mk_index(unsigned long index) 32 { 33 return xa_mk_value(index & LONG_MAX); 34 } 35 36 static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) 37 { 38 return xa_store(xa, index, xa_mk_index(index), gfp); 39 } 40 41 static void xa_insert_index(struct kunit *test, struct xarray *xa, unsigned long index) 42 { 43 XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index), 44 GFP_KERNEL) != 0); 45 } 46 47 static void xa_alloc_index(struct kunit *test, struct xarray *xa, unsigned long index, gfp_t gfp) 48 { 49 u32 id; 50 51 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b, 52 gfp) != 0); 53 XA_BUG_ON(xa, id != index); 54 } 55 56 static void xa_erase_index(struct kunit *test, struct xarray *xa, unsigned long index) 57 { 58 XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); 59 XA_BUG_ON(xa, xa_load(xa, index) != NULL); 60 } 61 62 /* 63 * If anyone needs this, please move it to xarray.c. We have no current 64 * users outside the test suite because all current multislot users want 65 * to use the advanced API. 66 */ 67 static void *xa_store_order(struct xarray *xa, unsigned long index, 68 unsigned order, void *entry, gfp_t gfp) 69 { 70 XA_STATE_ORDER(xas, xa, index, order); 71 void *curr; 72 73 do { 74 xas_lock(&xas); 75 curr = xas_store(&xas, entry); 76 xas_unlock(&xas); 77 } while (xas_nomem(&xas, gfp)); 78 79 return curr; 80 } 81 82 static inline struct xarray *xa_param(struct kunit *test) 83 { 84 return *(struct xarray **)test->param_value; 85 } 86 87 static noinline void check_xa_err(struct kunit *test) 88 { 89 struct xarray *xa = xa_param(test); 90 91 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0); 92 XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0); 93 #ifndef __KERNEL__ 94 /* The kernel does not fail GFP_NOWAIT allocations */ 95 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 96 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 97 #endif 98 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0); 99 XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0); 100 XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0); 101 // kills the test-suite :-( 102 // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); 103 } 104 105 static noinline void check_xas_retry(struct kunit *test) 106 { 107 struct xarray *xa = xa_param(test); 108 109 XA_STATE(xas, xa, 0); 110 void *entry; 111 112 xa_store_index(xa, 0, GFP_KERNEL); 113 xa_store_index(xa, 1, GFP_KERNEL); 114 115 rcu_read_lock(); 116 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); 117 xa_erase_index(test, xa, 1); 118 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); 119 XA_BUG_ON(xa, xas_retry(&xas, NULL)); 120 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); 121 xas_reset(&xas); 122 XA_BUG_ON(xa, xas.xa_node != XAS_RESTART); 123 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 124 XA_BUG_ON(xa, xas.xa_node != NULL); 125 rcu_read_unlock(); 126 127 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 128 129 rcu_read_lock(); 130 XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas))); 131 xas.xa_node = XAS_RESTART; 132 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 133 rcu_read_unlock(); 134 135 /* Make sure we can iterate through retry entries */ 136 xas_lock(&xas); 137 xas_set(&xas, 0); 138 xas_store(&xas, XA_RETRY_ENTRY); 139 xas_set(&xas, 1); 140 xas_store(&xas, XA_RETRY_ENTRY); 141 142 xas_set(&xas, 0); 143 xas_for_each(&xas, entry, ULONG_MAX) { 144 xas_store(&xas, xa_mk_index(xas.xa_index)); 145 } 146 xas_unlock(&xas); 147 148 xa_erase_index(test, xa, 0); 149 xa_erase_index(test, xa, 1); 150 } 151 152 static noinline void check_xa_load(struct kunit *test) 153 { 154 struct xarray *xa = xa_param(test); 155 156 unsigned long i, j; 157 158 for (i = 0; i < 1024; i++) { 159 for (j = 0; j < 1024; j++) { 160 void *entry = xa_load(xa, j); 161 if (j < i) 162 XA_BUG_ON(xa, xa_to_value(entry) != j); 163 else 164 XA_BUG_ON(xa, entry); 165 } 166 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 167 } 168 169 for (i = 0; i < 1024; i++) { 170 for (j = 0; j < 1024; j++) { 171 void *entry = xa_load(xa, j); 172 if (j >= i) 173 XA_BUG_ON(xa, xa_to_value(entry) != j); 174 else 175 XA_BUG_ON(xa, entry); 176 } 177 xa_erase_index(test, xa, i); 178 } 179 XA_BUG_ON(xa, !xa_empty(xa)); 180 } 181 182 static noinline void check_xa_mark_1(struct kunit *test, unsigned long index) 183 { 184 struct xarray *xa = xa_param(test); 185 186 unsigned int order; 187 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1; 188 189 /* NULL elements have no marks set */ 190 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 191 xa_set_mark(xa, index, XA_MARK_0); 192 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 193 194 /* Storing a pointer will not make a mark appear */ 195 XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL); 196 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 197 xa_set_mark(xa, index, XA_MARK_0); 198 XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); 199 200 /* Setting one mark will not set another mark */ 201 XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0)); 202 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1)); 203 204 /* Storing NULL clears marks, and they can't be set again */ 205 xa_erase_index(test, xa, index); 206 XA_BUG_ON(xa, !xa_empty(xa)); 207 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 208 xa_set_mark(xa, index, XA_MARK_0); 209 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 210 211 /* 212 * Storing a multi-index entry over entries with marks gives the 213 * entire entry the union of the marks 214 */ 215 BUG_ON((index % 4) != 0); 216 for (order = 2; order < max_order; order++) { 217 unsigned long base = round_down(index, 1UL << order); 218 unsigned long next = base + (1UL << order); 219 unsigned long i; 220 221 XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL)); 222 xa_set_mark(xa, index + 1, XA_MARK_0); 223 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); 224 xa_set_mark(xa, index + 2, XA_MARK_2); 225 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); 226 xa_store_order(xa, index, order, xa_mk_index(index), 227 GFP_KERNEL); 228 for (i = base; i < next; i++) { 229 XA_STATE(xas, xa, i); 230 unsigned int seen = 0; 231 void *entry; 232 233 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 234 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1)); 235 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2)); 236 237 /* We should see two elements in the array */ 238 rcu_read_lock(); 239 xas_for_each(&xas, entry, ULONG_MAX) 240 seen++; 241 rcu_read_unlock(); 242 XA_BUG_ON(xa, seen != 2); 243 244 /* One of which is marked */ 245 xas_set(&xas, 0); 246 seen = 0; 247 rcu_read_lock(); 248 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 249 seen++; 250 rcu_read_unlock(); 251 XA_BUG_ON(xa, seen != 1); 252 } 253 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); 254 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); 255 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); 256 xa_erase_index(test, xa, index); 257 xa_erase_index(test, xa, next); 258 XA_BUG_ON(xa, !xa_empty(xa)); 259 } 260 XA_BUG_ON(xa, !xa_empty(xa)); 261 } 262 263 static noinline void check_xa_mark_2(struct kunit *test) 264 { 265 struct xarray *xa = xa_param(test); 266 267 XA_STATE(xas, xa, 0); 268 unsigned long index; 269 unsigned int count = 0; 270 void *entry; 271 272 xa_store_index(xa, 0, GFP_KERNEL); 273 xa_set_mark(xa, 0, XA_MARK_0); 274 xas_lock(&xas); 275 xas_load(&xas); 276 xas_init_marks(&xas); 277 xas_unlock(&xas); 278 XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0); 279 280 for (index = 3500; index < 4500; index++) { 281 xa_store_index(xa, index, GFP_KERNEL); 282 xa_set_mark(xa, index, XA_MARK_0); 283 } 284 285 xas_reset(&xas); 286 rcu_read_lock(); 287 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 288 count++; 289 rcu_read_unlock(); 290 XA_BUG_ON(xa, count != 1000); 291 292 xas_lock(&xas); 293 xas_for_each(&xas, entry, ULONG_MAX) { 294 xas_init_marks(&xas); 295 XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0)); 296 XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0)); 297 } 298 xas_unlock(&xas); 299 300 xa_destroy(xa); 301 } 302 303 static noinline void check_xa_mark_3(struct kunit *test) 304 { 305 #ifdef CONFIG_XARRAY_MULTI 306 struct xarray *xa = xa_param(test); 307 308 XA_STATE(xas, xa, 0x41); 309 void *entry; 310 int count = 0; 311 312 xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL); 313 xa_set_mark(xa, 0x41, XA_MARK_0); 314 315 rcu_read_lock(); 316 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) { 317 count++; 318 XA_BUG_ON(xa, entry != xa_mk_index(0x40)); 319 } 320 XA_BUG_ON(xa, count != 1); 321 rcu_read_unlock(); 322 xa_destroy(xa); 323 #endif 324 } 325 326 static noinline void check_xa_mark(struct kunit *test) 327 { 328 unsigned long index; 329 330 for (index = 0; index < 16384; index += 4) 331 check_xa_mark_1(test, index); 332 333 check_xa_mark_2(test); 334 check_xa_mark_3(test); 335 } 336 337 static noinline void check_xa_shrink(struct kunit *test) 338 { 339 struct xarray *xa = xa_param(test); 340 341 XA_STATE(xas, xa, 1); 342 struct xa_node *node; 343 unsigned int order; 344 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1; 345 346 XA_BUG_ON(xa, !xa_empty(xa)); 347 XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL); 348 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 349 350 /* 351 * Check that erasing the entry at 1 shrinks the tree and properly 352 * marks the node as being deleted. 353 */ 354 xas_lock(&xas); 355 XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1)); 356 node = xas.xa_node; 357 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0)); 358 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 359 XA_BUG_ON(xa, xa_load(xa, 1) != NULL); 360 XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS); 361 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY); 362 XA_BUG_ON(xa, xas_load(&xas) != NULL); 363 xas_unlock(&xas); 364 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 365 xa_erase_index(test, xa, 0); 366 XA_BUG_ON(xa, !xa_empty(xa)); 367 368 for (order = 0; order < max_order; order++) { 369 unsigned long max = (1UL << order) - 1; 370 xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL); 371 XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0)); 372 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 373 rcu_read_lock(); 374 node = xa_head(xa); 375 rcu_read_unlock(); 376 XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) != 377 NULL); 378 rcu_read_lock(); 379 XA_BUG_ON(xa, xa_head(xa) == node); 380 rcu_read_unlock(); 381 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 382 xa_erase_index(test, xa, ULONG_MAX); 383 XA_BUG_ON(xa, xa->xa_head != node); 384 xa_erase_index(test, xa, 0); 385 } 386 } 387 388 static noinline void check_insert(struct kunit *test) 389 { 390 struct xarray *xa = xa_param(test); 391 392 unsigned long i; 393 394 for (i = 0; i < 1024; i++) { 395 xa_insert_index(test, xa, i); 396 XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL); 397 XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL); 398 xa_erase_index(test, xa, i); 399 } 400 401 for (i = 10; i < BITS_PER_LONG; i++) { 402 xa_insert_index(test, xa, 1UL << i); 403 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL); 404 XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL); 405 xa_erase_index(test, xa, 1UL << i); 406 407 xa_insert_index(test, xa, (1UL << i) - 1); 408 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL); 409 XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL); 410 xa_erase_index(test, xa, (1UL << i) - 1); 411 } 412 413 xa_insert_index(test, xa, ~0UL); 414 XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL); 415 XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL); 416 xa_erase_index(test, xa, ~0UL); 417 418 XA_BUG_ON(xa, !xa_empty(xa)); 419 } 420 421 static noinline void check_cmpxchg(struct kunit *test) 422 { 423 struct xarray *xa = xa_param(test); 424 425 void *FIVE = xa_mk_value(5); 426 void *SIX = xa_mk_value(6); 427 void *LOTS = xa_mk_value(12345678); 428 429 XA_BUG_ON(xa, !xa_empty(xa)); 430 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL); 431 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY); 432 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS); 433 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS); 434 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE); 435 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL); 436 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL); 437 XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY); 438 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE); 439 XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY); 440 xa_erase_index(test, xa, 12345678); 441 xa_erase_index(test, xa, 5); 442 XA_BUG_ON(xa, !xa_empty(xa)); 443 } 444 445 static noinline void check_cmpxchg_order(struct kunit *test) 446 { 447 #ifdef CONFIG_XARRAY_MULTI 448 struct xarray *xa = xa_param(test); 449 450 void *FIVE = xa_mk_value(5); 451 unsigned int i, order = 3; 452 453 XA_BUG_ON(xa, xa_store_order(xa, 0, order, FIVE, GFP_KERNEL)); 454 455 /* Check entry FIVE has the order saved */ 456 XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != order); 457 458 /* Check all the tied indexes have the same entry and order */ 459 for (i = 0; i < (1 << order); i++) { 460 XA_BUG_ON(xa, xa_load(xa, i) != FIVE); 461 XA_BUG_ON(xa, xa_get_order(xa, i) != order); 462 } 463 464 /* Ensure that nothing is stored at index '1 << order' */ 465 XA_BUG_ON(xa, xa_load(xa, 1 << order) != NULL); 466 467 /* 468 * Additionally, keep the node information and the order at 469 * '1 << order' 470 */ 471 XA_BUG_ON(xa, xa_store_order(xa, 1 << order, order, FIVE, GFP_KERNEL)); 472 for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) { 473 XA_BUG_ON(xa, xa_load(xa, i) != FIVE); 474 XA_BUG_ON(xa, xa_get_order(xa, i) != order); 475 } 476 477 /* Conditionally replace FIVE entry at index '0' with NULL */ 478 XA_BUG_ON(xa, xa_cmpxchg(xa, 0, FIVE, NULL, GFP_KERNEL) != FIVE); 479 480 /* Verify the order is lost at FIVE (and old) entries */ 481 XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != 0); 482 483 /* Verify the order and entries are lost in all the tied indexes */ 484 for (i = 0; i < (1 << order); i++) { 485 XA_BUG_ON(xa, xa_load(xa, i) != NULL); 486 XA_BUG_ON(xa, xa_get_order(xa, i) != 0); 487 } 488 489 /* Verify node and order are kept at '1 << order' */ 490 for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) { 491 XA_BUG_ON(xa, xa_load(xa, i) != FIVE); 492 XA_BUG_ON(xa, xa_get_order(xa, i) != order); 493 } 494 495 xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); 496 XA_BUG_ON(xa, !xa_empty(xa)); 497 #endif 498 } 499 500 static noinline void check_reserve(struct kunit *test) 501 { 502 struct xarray *xa = xa_param(test); 503 504 void *entry; 505 unsigned long index; 506 int count; 507 508 /* An array with a reserved entry is not empty */ 509 XA_BUG_ON(xa, !xa_empty(xa)); 510 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 511 XA_BUG_ON(xa, xa_empty(xa)); 512 XA_BUG_ON(xa, xa_load(xa, 12345678)); 513 xa_release(xa, 12345678); 514 XA_BUG_ON(xa, !xa_empty(xa)); 515 516 /* Releasing a used entry does nothing */ 517 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 518 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); 519 xa_release(xa, 12345678); 520 xa_erase_index(test, xa, 12345678); 521 XA_BUG_ON(xa, !xa_empty(xa)); 522 523 /* cmpxchg sees a reserved entry as ZERO */ 524 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 525 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY, 526 xa_mk_value(12345678), GFP_NOWAIT) != NULL); 527 xa_release(xa, 12345678); 528 xa_erase_index(test, xa, 12345678); 529 XA_BUG_ON(xa, !xa_empty(xa)); 530 531 /* xa_insert treats it as busy */ 532 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 533 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 534 -EBUSY); 535 XA_BUG_ON(xa, xa_empty(xa)); 536 XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL); 537 XA_BUG_ON(xa, !xa_empty(xa)); 538 539 /* Can iterate through a reserved entry */ 540 xa_store_index(xa, 5, GFP_KERNEL); 541 XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0); 542 xa_store_index(xa, 7, GFP_KERNEL); 543 544 count = 0; 545 xa_for_each(xa, index, entry) { 546 XA_BUG_ON(xa, index != 5 && index != 7); 547 count++; 548 } 549 XA_BUG_ON(xa, count != 2); 550 551 /* If we free a reserved entry, we should be able to allocate it */ 552 if (xa->xa_flags & XA_FLAGS_ALLOC) { 553 u32 id; 554 555 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8), 556 XA_LIMIT(5, 10), GFP_KERNEL) != 0); 557 XA_BUG_ON(xa, id != 8); 558 559 xa_release(xa, 6); 560 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6), 561 XA_LIMIT(5, 10), GFP_KERNEL) != 0); 562 XA_BUG_ON(xa, id != 6); 563 } 564 565 xa_destroy(xa); 566 } 567 568 static noinline void check_xas_erase(struct kunit *test) 569 { 570 struct xarray *xa = xa_param(test); 571 572 XA_STATE(xas, xa, 0); 573 void *entry; 574 unsigned long i, j; 575 576 for (i = 0; i < 200; i++) { 577 for (j = i; j < 2 * i + 17; j++) { 578 xas_set(&xas, j); 579 do { 580 xas_lock(&xas); 581 xas_store(&xas, xa_mk_index(j)); 582 xas_unlock(&xas); 583 } while (xas_nomem(&xas, GFP_KERNEL)); 584 } 585 586 xas_set(&xas, ULONG_MAX); 587 do { 588 xas_lock(&xas); 589 xas_store(&xas, xa_mk_value(0)); 590 xas_unlock(&xas); 591 } while (xas_nomem(&xas, GFP_KERNEL)); 592 593 xas_lock(&xas); 594 xas_store(&xas, NULL); 595 596 xas_set(&xas, 0); 597 j = i; 598 xas_for_each(&xas, entry, ULONG_MAX) { 599 XA_BUG_ON(xa, entry != xa_mk_index(j)); 600 xas_store(&xas, NULL); 601 j++; 602 } 603 xas_unlock(&xas); 604 XA_BUG_ON(xa, !xa_empty(xa)); 605 } 606 } 607 608 #ifdef CONFIG_XARRAY_MULTI 609 static noinline void check_multi_store_1(struct kunit *test, unsigned long index, 610 unsigned int order) 611 { 612 struct xarray *xa = xa_param(test); 613 614 XA_STATE(xas, xa, index); 615 unsigned long min = index & ~((1UL << order) - 1); 616 unsigned long max = min + (1UL << order); 617 618 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 619 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index)); 620 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index)); 621 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 622 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 623 624 xas_lock(&xas); 625 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index)); 626 xas_unlock(&xas); 627 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min)); 628 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min)); 629 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 630 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 631 632 xa_erase_index(test, xa, min); 633 XA_BUG_ON(xa, !xa_empty(xa)); 634 } 635 636 static noinline void check_multi_store_2(struct kunit *test, unsigned long index, 637 unsigned int order) 638 { 639 struct xarray *xa = xa_param(test); 640 641 XA_STATE(xas, xa, index); 642 xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); 643 644 xas_lock(&xas); 645 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0)); 646 XA_BUG_ON(xa, xas.xa_index != index); 647 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 648 xas_unlock(&xas); 649 XA_BUG_ON(xa, !xa_empty(xa)); 650 } 651 652 static noinline void check_multi_store_3(struct kunit *test, unsigned long index, 653 unsigned int order) 654 { 655 struct xarray *xa = xa_param(test); 656 657 XA_STATE(xas, xa, 0); 658 void *entry; 659 int n = 0; 660 661 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 662 663 xas_lock(&xas); 664 xas_for_each(&xas, entry, ULONG_MAX) { 665 XA_BUG_ON(xa, entry != xa_mk_index(index)); 666 n++; 667 } 668 XA_BUG_ON(xa, n != 1); 669 xas_set(&xas, index + 1); 670 xas_for_each(&xas, entry, ULONG_MAX) { 671 XA_BUG_ON(xa, entry != xa_mk_index(index)); 672 n++; 673 } 674 XA_BUG_ON(xa, n != 2); 675 xas_unlock(&xas); 676 677 xa_destroy(xa); 678 } 679 #endif 680 681 static noinline void check_multi_store(struct kunit *test) 682 { 683 #ifdef CONFIG_XARRAY_MULTI 684 struct xarray *xa = xa_param(test); 685 686 unsigned long i, j, k; 687 unsigned int max_order = (sizeof(long) == 4) ? 30 : 60; 688 689 /* Loading from any position returns the same value */ 690 xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL); 691 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 692 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 693 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 694 rcu_read_lock(); 695 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2); 696 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 697 rcu_read_unlock(); 698 699 /* Storing adjacent to the value does not alter the value */ 700 xa_store(xa, 3, xa, GFP_KERNEL); 701 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 702 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 703 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 704 rcu_read_lock(); 705 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3); 706 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 707 rcu_read_unlock(); 708 709 /* Overwriting multiple indexes works */ 710 xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL); 711 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1)); 712 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1)); 713 XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1)); 714 XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1)); 715 XA_BUG_ON(xa, xa_load(xa, 4) != NULL); 716 rcu_read_lock(); 717 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4); 718 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4); 719 rcu_read_unlock(); 720 721 /* We can erase multiple values with a single store */ 722 xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); 723 XA_BUG_ON(xa, !xa_empty(xa)); 724 725 /* Even when the first slot is empty but the others aren't */ 726 xa_store_index(xa, 1, GFP_KERNEL); 727 xa_store_index(xa, 2, GFP_KERNEL); 728 xa_store_order(xa, 0, 2, NULL, GFP_KERNEL); 729 XA_BUG_ON(xa, !xa_empty(xa)); 730 731 for (i = 0; i < max_order; i++) { 732 for (j = 0; j < max_order; j++) { 733 xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL); 734 xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL); 735 736 for (k = 0; k < max_order; k++) { 737 void *entry = xa_load(xa, (1UL << k) - 1); 738 if ((i < k) && (j < k)) 739 XA_BUG_ON(xa, entry != NULL); 740 else 741 XA_BUG_ON(xa, entry != xa_mk_index(j)); 742 } 743 744 xa_erase(xa, 0); 745 XA_BUG_ON(xa, !xa_empty(xa)); 746 } 747 } 748 749 for (i = 0; i < 20; i++) { 750 check_multi_store_1(test, 200, i); 751 check_multi_store_1(test, 0, i); 752 check_multi_store_1(test, (1UL << i) + 1, i); 753 } 754 check_multi_store_2(test, 4095, 9); 755 756 for (i = 1; i < 20; i++) { 757 check_multi_store_3(test, 0, i); 758 check_multi_store_3(test, 1UL << i, i); 759 } 760 #endif 761 } 762 763 #ifdef CONFIG_XARRAY_MULTI 764 /* mimics page cache __filemap_add_folio() */ 765 static noinline void check_xa_multi_store_adv_add(struct kunit *test, 766 unsigned long index, 767 unsigned int order, 768 void *p) 769 { 770 struct xarray *xa = xa_param(test); 771 772 XA_STATE(xas, xa, index); 773 unsigned int nrpages = 1UL << order; 774 775 /* users are responsible for index alignemnt to the order when adding */ 776 XA_BUG_ON(xa, index & (nrpages - 1)); 777 778 xas_set_order(&xas, index, order); 779 780 do { 781 xas_lock_irq(&xas); 782 xas_store(&xas, p); 783 xas_unlock_irq(&xas); 784 /* 785 * In our selftest case the only failure we can expect is for 786 * there not to be enough memory as we're not mimicking the 787 * entire page cache, so verify that's the only error we can run 788 * into here. The xas_nomem() which follows will ensure to fix 789 * that condition for us so to chug on on the loop. 790 */ 791 XA_BUG_ON(xa, xas_error(&xas) && xas_error(&xas) != -ENOMEM); 792 } while (xas_nomem(&xas, GFP_KERNEL)); 793 794 XA_BUG_ON(xa, xas_error(&xas)); 795 XA_BUG_ON(xa, xa_load(xa, index) != p); 796 } 797 798 /* mimics page_cache_delete() */ 799 static noinline void check_xa_multi_store_adv_del_entry(struct kunit *test, 800 unsigned long index, 801 unsigned int order) 802 { 803 struct xarray *xa = xa_param(test); 804 805 XA_STATE(xas, xa, index); 806 807 xas_set_order(&xas, index, order); 808 xas_store(&xas, NULL); 809 xas_init_marks(&xas); 810 } 811 812 static noinline void check_xa_multi_store_adv_delete(struct kunit *test, 813 unsigned long index, 814 unsigned int order) 815 { 816 struct xarray *xa = xa_param(test); 817 818 xa_lock_irq(xa); 819 check_xa_multi_store_adv_del_entry(test, index, order); 820 xa_unlock_irq(xa); 821 } 822 823 /* mimics page cache filemap_get_entry() */ 824 static noinline void *test_get_entry(struct xarray *xa, unsigned long index) 825 { 826 XA_STATE(xas, xa, index); 827 void *p; 828 static unsigned int loops = 0; 829 830 rcu_read_lock(); 831 repeat: 832 xas_reset(&xas); 833 p = xas_load(&xas); 834 if (xas_retry(&xas, p)) 835 goto repeat; 836 rcu_read_unlock(); 837 838 /* 839 * This is not part of the page cache, this selftest is pretty 840 * aggressive and does not want to trust the xarray API but rather 841 * test it, and for order 20 (4 GiB block size) we can loop over 842 * over a million entries which can cause a soft lockup. Page cache 843 * APIs won't be stupid, proper page cache APIs loop over the proper 844 * order so when using a larger order we skip shared entries. 845 */ 846 if (++loops % XA_CHECK_SCHED == 0) 847 schedule(); 848 849 return p; 850 } 851 852 static unsigned long some_val = 0xdeadbeef; 853 static unsigned long some_val_2 = 0xdeaddead; 854 855 /* mimics the page cache usage */ 856 static noinline void check_xa_multi_store_adv(struct kunit *test, 857 unsigned long pos, 858 unsigned int order) 859 { 860 struct xarray *xa = xa_param(test); 861 862 unsigned int nrpages = 1UL << order; 863 unsigned long index, base, next_index, next_next_index; 864 unsigned int i; 865 866 index = pos >> PAGE_SHIFT; 867 base = round_down(index, nrpages); 868 next_index = round_down(base + nrpages, nrpages); 869 next_next_index = round_down(next_index + nrpages, nrpages); 870 871 check_xa_multi_store_adv_add(test, base, order, &some_val); 872 873 for (i = 0; i < nrpages; i++) 874 XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val); 875 876 XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL); 877 878 /* Use order 0 for the next item */ 879 check_xa_multi_store_adv_add(test, next_index, 0, &some_val_2); 880 XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2); 881 882 /* Remove the next item */ 883 check_xa_multi_store_adv_delete(test, next_index, 0); 884 885 /* Now use order for a new pointer */ 886 check_xa_multi_store_adv_add(test, next_index, order, &some_val_2); 887 888 for (i = 0; i < nrpages; i++) 889 XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2); 890 891 check_xa_multi_store_adv_delete(test, next_index, order); 892 check_xa_multi_store_adv_delete(test, base, order); 893 XA_BUG_ON(xa, !xa_empty(xa)); 894 895 /* starting fresh again */ 896 897 /* let's test some holes now */ 898 899 /* hole at base and next_next */ 900 check_xa_multi_store_adv_add(test, next_index, order, &some_val_2); 901 902 for (i = 0; i < nrpages; i++) 903 XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); 904 905 for (i = 0; i < nrpages; i++) 906 XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2); 907 908 for (i = 0; i < nrpages; i++) 909 XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL); 910 911 check_xa_multi_store_adv_delete(test, next_index, order); 912 XA_BUG_ON(xa, !xa_empty(xa)); 913 914 /* hole at base and next */ 915 916 check_xa_multi_store_adv_add(test, next_next_index, order, &some_val_2); 917 918 for (i = 0; i < nrpages; i++) 919 XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); 920 921 for (i = 0; i < nrpages; i++) 922 XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != NULL); 923 924 for (i = 0; i < nrpages; i++) 925 XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2); 926 927 check_xa_multi_store_adv_delete(test, next_next_index, order); 928 XA_BUG_ON(xa, !xa_empty(xa)); 929 } 930 #endif 931 932 static noinline void check_multi_store_advanced(struct kunit *test) 933 { 934 #ifdef CONFIG_XARRAY_MULTI 935 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 936 unsigned long end = ULONG_MAX/2; 937 unsigned long pos, i; 938 939 /* 940 * About 117 million tests below. 941 */ 942 for (pos = 7; pos < end; pos = (pos * pos) + 564) { 943 for (i = 0; i < max_order; i++) { 944 check_xa_multi_store_adv(test, pos, i); 945 check_xa_multi_store_adv(test, pos + 157, i); 946 } 947 } 948 #endif 949 } 950 951 static noinline void check_xa_alloc_1(struct kunit *test, struct xarray *xa, unsigned int base) 952 { 953 int i; 954 u32 id; 955 956 XA_BUG_ON(xa, !xa_empty(xa)); 957 /* An empty array should assign %base to the first alloc */ 958 xa_alloc_index(test, xa, base, GFP_KERNEL); 959 960 /* Erasing it should make the array empty again */ 961 xa_erase_index(test, xa, base); 962 XA_BUG_ON(xa, !xa_empty(xa)); 963 964 /* And it should assign %base again */ 965 xa_alloc_index(test, xa, base, GFP_KERNEL); 966 967 /* Allocating and then erasing a lot should not lose base */ 968 for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++) 969 xa_alloc_index(test, xa, i, GFP_KERNEL); 970 for (i = base; i < 2 * XA_CHUNK_SIZE; i++) 971 xa_erase_index(test, xa, i); 972 xa_alloc_index(test, xa, base, GFP_KERNEL); 973 974 /* Destroying the array should do the same as erasing */ 975 xa_destroy(xa); 976 977 /* And it should assign %base again */ 978 xa_alloc_index(test, xa, base, GFP_KERNEL); 979 980 /* The next assigned ID should be base+1 */ 981 xa_alloc_index(test, xa, base + 1, GFP_KERNEL); 982 xa_erase_index(test, xa, base + 1); 983 984 /* Storing a value should mark it used */ 985 xa_store_index(xa, base + 1, GFP_KERNEL); 986 xa_alloc_index(test, xa, base + 2, GFP_KERNEL); 987 988 /* If we then erase base, it should be free */ 989 xa_erase_index(test, xa, base); 990 xa_alloc_index(test, xa, base, GFP_KERNEL); 991 992 xa_erase_index(test, xa, base + 1); 993 xa_erase_index(test, xa, base + 2); 994 995 for (i = 1; i < 5000; i++) { 996 xa_alloc_index(test, xa, base + i, GFP_KERNEL); 997 } 998 999 xa_destroy(xa); 1000 1001 /* Check that we fail properly at the limit of allocation */ 1002 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1), 1003 XA_LIMIT(UINT_MAX - 1, UINT_MAX), 1004 GFP_KERNEL) != 0); 1005 XA_BUG_ON(xa, id != 0xfffffffeU); 1006 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX), 1007 XA_LIMIT(UINT_MAX - 1, UINT_MAX), 1008 GFP_KERNEL) != 0); 1009 XA_BUG_ON(xa, id != 0xffffffffU); 1010 id = 3; 1011 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0), 1012 XA_LIMIT(UINT_MAX - 1, UINT_MAX), 1013 GFP_KERNEL) != -EBUSY); 1014 XA_BUG_ON(xa, id != 3); 1015 xa_destroy(xa); 1016 1017 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), 1018 GFP_KERNEL) != -EBUSY); 1019 XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != NULL); 1020 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), 1021 GFP_KERNEL) != -EBUSY); 1022 xa_erase_index(test, xa, 3); 1023 XA_BUG_ON(xa, !xa_empty(xa)); 1024 } 1025 1026 static noinline void check_xa_alloc_2(struct kunit *test, struct xarray *xa, unsigned int base) 1027 { 1028 unsigned int i, id; 1029 unsigned long index; 1030 void *entry; 1031 1032 /* Allocate and free a NULL and check xa_empty() behaves */ 1033 XA_BUG_ON(xa, !xa_empty(xa)); 1034 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 1035 XA_BUG_ON(xa, id != base); 1036 XA_BUG_ON(xa, xa_empty(xa)); 1037 XA_BUG_ON(xa, xa_erase(xa, id) != NULL); 1038 XA_BUG_ON(xa, !xa_empty(xa)); 1039 1040 /* Ditto, but check destroy instead of erase */ 1041 XA_BUG_ON(xa, !xa_empty(xa)); 1042 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 1043 XA_BUG_ON(xa, id != base); 1044 XA_BUG_ON(xa, xa_empty(xa)); 1045 xa_destroy(xa); 1046 XA_BUG_ON(xa, !xa_empty(xa)); 1047 1048 for (i = base; i < base + 10; i++) { 1049 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, 1050 GFP_KERNEL) != 0); 1051 XA_BUG_ON(xa, id != i); 1052 } 1053 1054 XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL); 1055 XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL); 1056 XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4)); 1057 XA_BUG_ON(xa, xa_erase(xa, 5) != NULL); 1058 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 1059 XA_BUG_ON(xa, id != 5); 1060 1061 xa_for_each(xa, index, entry) { 1062 xa_erase_index(test, xa, index); 1063 } 1064 1065 for (i = base; i < base + 9; i++) { 1066 XA_BUG_ON(xa, xa_erase(xa, i) != NULL); 1067 XA_BUG_ON(xa, xa_empty(xa)); 1068 } 1069 XA_BUG_ON(xa, xa_erase(xa, 8) != NULL); 1070 XA_BUG_ON(xa, xa_empty(xa)); 1071 XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL); 1072 XA_BUG_ON(xa, !xa_empty(xa)); 1073 1074 xa_destroy(xa); 1075 } 1076 1077 static noinline void check_xa_alloc_3(struct kunit *test, struct xarray *xa, unsigned int base) 1078 { 1079 struct xa_limit limit = XA_LIMIT(1, 0x3fff); 1080 u32 next = 0; 1081 unsigned int i, id; 1082 unsigned long index; 1083 void *entry; 1084 1085 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit, 1086 &next, GFP_KERNEL) != 0); 1087 XA_BUG_ON(xa, id != 1); 1088 1089 next = 0x3ffd; 1090 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit, 1091 &next, GFP_KERNEL) != 0); 1092 XA_BUG_ON(xa, id != 0x3ffd); 1093 xa_erase_index(test, xa, 0x3ffd); 1094 xa_erase_index(test, xa, 1); 1095 XA_BUG_ON(xa, !xa_empty(xa)); 1096 1097 for (i = 0x3ffe; i < 0x4003; i++) { 1098 if (i < 0x4000) 1099 entry = xa_mk_index(i); 1100 else 1101 entry = xa_mk_index(i - 0x3fff); 1102 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit, 1103 &next, GFP_KERNEL) != (id == 1)); 1104 XA_BUG_ON(xa, xa_mk_index(id) != entry); 1105 } 1106 1107 /* Check wrap-around is handled correctly */ 1108 if (base != 0) 1109 xa_erase_index(test, xa, base); 1110 xa_erase_index(test, xa, base + 1); 1111 next = UINT_MAX; 1112 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), 1113 xa_limit_32b, &next, GFP_KERNEL) != 0); 1114 XA_BUG_ON(xa, id != UINT_MAX); 1115 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base), 1116 xa_limit_32b, &next, GFP_KERNEL) != 1); 1117 XA_BUG_ON(xa, id != base); 1118 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1), 1119 xa_limit_32b, &next, GFP_KERNEL) != 0); 1120 XA_BUG_ON(xa, id != base + 1); 1121 1122 xa_for_each(xa, index, entry) 1123 xa_erase_index(test, xa, index); 1124 1125 XA_BUG_ON(xa, !xa_empty(xa)); 1126 } 1127 1128 static DEFINE_XARRAY_ALLOC(xa0); 1129 static DEFINE_XARRAY_ALLOC1(xa1); 1130 1131 static noinline void check_xa_alloc(struct kunit *test) 1132 { 1133 check_xa_alloc_1(test, &xa0, 0); 1134 check_xa_alloc_1(test, &xa1, 1); 1135 check_xa_alloc_2(test, &xa0, 0); 1136 check_xa_alloc_2(test, &xa1, 1); 1137 check_xa_alloc_3(test, &xa0, 0); 1138 check_xa_alloc_3(test, &xa1, 1); 1139 } 1140 1141 static noinline void __check_store_iter(struct kunit *test, unsigned long start, 1142 unsigned int order, unsigned int present) 1143 { 1144 struct xarray *xa = xa_param(test); 1145 1146 XA_STATE_ORDER(xas, xa, start, order); 1147 void *entry; 1148 unsigned int count = 0; 1149 1150 retry: 1151 xas_lock(&xas); 1152 xas_for_each_conflict(&xas, entry) { 1153 XA_BUG_ON(xa, !xa_is_value(entry)); 1154 XA_BUG_ON(xa, entry < xa_mk_index(start)); 1155 XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1)); 1156 count++; 1157 } 1158 xas_store(&xas, xa_mk_index(start)); 1159 xas_unlock(&xas); 1160 if (xas_nomem(&xas, GFP_KERNEL)) { 1161 count = 0; 1162 goto retry; 1163 } 1164 XA_BUG_ON(xa, xas_error(&xas)); 1165 XA_BUG_ON(xa, count != present); 1166 XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); 1167 XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != 1168 xa_mk_index(start)); 1169 xa_erase_index(test, xa, start); 1170 } 1171 1172 static noinline void check_store_iter(struct kunit *test) 1173 { 1174 struct xarray *xa = xa_param(test); 1175 1176 unsigned int i, j; 1177 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 1178 1179 for (i = 0; i < max_order; i++) { 1180 unsigned int min = 1 << i; 1181 unsigned int max = (2 << i) - 1; 1182 __check_store_iter(test, 0, i, 0); 1183 XA_BUG_ON(xa, !xa_empty(xa)); 1184 __check_store_iter(test, min, i, 0); 1185 XA_BUG_ON(xa, !xa_empty(xa)); 1186 1187 xa_store_index(xa, min, GFP_KERNEL); 1188 __check_store_iter(test, min, i, 1); 1189 XA_BUG_ON(xa, !xa_empty(xa)); 1190 xa_store_index(xa, max, GFP_KERNEL); 1191 __check_store_iter(test, min, i, 1); 1192 XA_BUG_ON(xa, !xa_empty(xa)); 1193 1194 for (j = 0; j < min; j++) 1195 xa_store_index(xa, j, GFP_KERNEL); 1196 __check_store_iter(test, 0, i, min); 1197 XA_BUG_ON(xa, !xa_empty(xa)); 1198 for (j = 0; j < min; j++) 1199 xa_store_index(xa, min + j, GFP_KERNEL); 1200 __check_store_iter(test, min, i, min); 1201 XA_BUG_ON(xa, !xa_empty(xa)); 1202 } 1203 #ifdef CONFIG_XARRAY_MULTI 1204 xa_store_index(xa, 63, GFP_KERNEL); 1205 xa_store_index(xa, 65, GFP_KERNEL); 1206 __check_store_iter(test, 64, 2, 1); 1207 xa_erase_index(test, xa, 63); 1208 #endif 1209 XA_BUG_ON(xa, !xa_empty(xa)); 1210 } 1211 1212 static noinline void check_multi_find_1(struct kunit *test, unsigned int order) 1213 { 1214 #ifdef CONFIG_XARRAY_MULTI 1215 struct xarray *xa = xa_param(test); 1216 1217 unsigned long multi = 3 << order; 1218 unsigned long next = 4 << order; 1219 unsigned long index; 1220 1221 xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL); 1222 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL); 1223 XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL); 1224 1225 index = 0; 1226 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 1227 xa_mk_value(multi)); 1228 XA_BUG_ON(xa, index != multi); 1229 index = multi + 1; 1230 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 1231 xa_mk_value(multi)); 1232 XA_BUG_ON(xa, (index < multi) || (index >= next)); 1233 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) != 1234 xa_mk_value(next)); 1235 XA_BUG_ON(xa, index != next); 1236 XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL); 1237 XA_BUG_ON(xa, index != next); 1238 1239 xa_erase_index(test, xa, multi); 1240 xa_erase_index(test, xa, next); 1241 xa_erase_index(test, xa, next + 1); 1242 XA_BUG_ON(xa, !xa_empty(xa)); 1243 #endif 1244 } 1245 1246 static noinline void check_multi_find_2(struct kunit *test) 1247 { 1248 struct xarray *xa = xa_param(test); 1249 1250 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; 1251 unsigned int i, j; 1252 void *entry; 1253 1254 for (i = 0; i < max_order; i++) { 1255 unsigned long index = 1UL << i; 1256 for (j = 0; j < index; j++) { 1257 XA_STATE(xas, xa, j + index); 1258 xa_store_index(xa, index - 1, GFP_KERNEL); 1259 xa_store_order(xa, index, i, xa_mk_index(index), 1260 GFP_KERNEL); 1261 rcu_read_lock(); 1262 xas_for_each(&xas, entry, ULONG_MAX) { 1263 xa_erase_index(test, xa, index); 1264 } 1265 rcu_read_unlock(); 1266 xa_erase_index(test, xa, index - 1); 1267 XA_BUG_ON(xa, !xa_empty(xa)); 1268 } 1269 } 1270 } 1271 1272 static noinline void check_multi_find_3(struct kunit *test) 1273 { 1274 struct xarray *xa = xa_param(test); 1275 1276 unsigned int order; 1277 1278 for (order = 5; order < order_limit; order++) { 1279 unsigned long index = 1UL << (order - 5); 1280 1281 XA_BUG_ON(xa, !xa_empty(xa)); 1282 xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL); 1283 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)); 1284 xa_erase_index(test, xa, 0); 1285 } 1286 } 1287 1288 static noinline void check_find_1(struct kunit *test) 1289 { 1290 struct xarray *xa = xa_param(test); 1291 1292 unsigned long i, j, k; 1293 1294 XA_BUG_ON(xa, !xa_empty(xa)); 1295 1296 /* 1297 * Check xa_find with all pairs between 0 and 99 inclusive, 1298 * starting at every index between 0 and 99 1299 */ 1300 for (i = 0; i < 100; i++) { 1301 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 1302 xa_set_mark(xa, i, XA_MARK_0); 1303 for (j = 0; j < i; j++) { 1304 XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) != 1305 NULL); 1306 xa_set_mark(xa, j, XA_MARK_0); 1307 for (k = 0; k < 100; k++) { 1308 unsigned long index = k; 1309 void *entry = xa_find(xa, &index, ULONG_MAX, 1310 XA_PRESENT); 1311 if (k <= j) 1312 XA_BUG_ON(xa, index != j); 1313 else if (k <= i) 1314 XA_BUG_ON(xa, index != i); 1315 else 1316 XA_BUG_ON(xa, entry != NULL); 1317 1318 index = k; 1319 entry = xa_find(xa, &index, ULONG_MAX, 1320 XA_MARK_0); 1321 if (k <= j) 1322 XA_BUG_ON(xa, index != j); 1323 else if (k <= i) 1324 XA_BUG_ON(xa, index != i); 1325 else 1326 XA_BUG_ON(xa, entry != NULL); 1327 } 1328 xa_erase_index(test, xa, j); 1329 XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); 1330 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 1331 } 1332 xa_erase_index(test, xa, i); 1333 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); 1334 } 1335 XA_BUG_ON(xa, !xa_empty(xa)); 1336 } 1337 1338 static noinline void check_find_2(struct kunit *test) 1339 { 1340 struct xarray *xa = xa_param(test); 1341 1342 void *entry; 1343 unsigned long i, j, index; 1344 1345 xa_for_each(xa, index, entry) { 1346 XA_BUG_ON(xa, true); 1347 } 1348 1349 for (i = 0; i < 1024; i++) { 1350 xa_store_index(xa, index, GFP_KERNEL); 1351 j = 0; 1352 xa_for_each(xa, index, entry) { 1353 XA_BUG_ON(xa, xa_mk_index(index) != entry); 1354 XA_BUG_ON(xa, index != j++); 1355 } 1356 } 1357 1358 xa_destroy(xa); 1359 } 1360 1361 static noinline void check_find_3(struct kunit *test) 1362 { 1363 struct xarray *xa = xa_param(test); 1364 1365 XA_STATE(xas, xa, 0); 1366 unsigned long i, j, k; 1367 void *entry; 1368 1369 for (i = 0; i < 100; i++) { 1370 for (j = 0; j < 100; j++) { 1371 rcu_read_lock(); 1372 for (k = 0; k < 100; k++) { 1373 xas_set(&xas, j); 1374 xas_for_each_marked(&xas, entry, k, XA_MARK_0) 1375 ; 1376 if (j > k) 1377 XA_BUG_ON(xa, 1378 xas.xa_node != XAS_RESTART); 1379 } 1380 rcu_read_unlock(); 1381 } 1382 xa_store_index(xa, i, GFP_KERNEL); 1383 xa_set_mark(xa, i, XA_MARK_0); 1384 } 1385 xa_destroy(xa); 1386 } 1387 1388 static noinline void check_find_4(struct kunit *test) 1389 { 1390 struct xarray *xa = xa_param(test); 1391 1392 unsigned long index = 0; 1393 void *entry; 1394 1395 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 1396 1397 entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); 1398 XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX)); 1399 1400 entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); 1401 XA_BUG_ON(xa, entry); 1402 1403 xa_erase_index(test, xa, ULONG_MAX); 1404 } 1405 1406 static noinline void check_find(struct kunit *test) 1407 { 1408 unsigned i; 1409 1410 check_find_1(test); 1411 check_find_2(test); 1412 check_find_3(test); 1413 check_find_4(test); 1414 1415 for (i = 2; i < 10; i++) 1416 check_multi_find_1(test, i); 1417 check_multi_find_2(test); 1418 check_multi_find_3(test); 1419 } 1420 1421 /* See find_swap_entry() in mm/shmem.c */ 1422 static noinline unsigned long xa_find_entry(struct xarray *xa, void *item) 1423 { 1424 XA_STATE(xas, xa, 0); 1425 unsigned int checked = 0; 1426 void *entry; 1427 1428 rcu_read_lock(); 1429 xas_for_each(&xas, entry, ULONG_MAX) { 1430 if (xas_retry(&xas, entry)) 1431 continue; 1432 if (entry == item) 1433 break; 1434 checked++; 1435 if ((checked % 4) != 0) 1436 continue; 1437 xas_pause(&xas); 1438 } 1439 rcu_read_unlock(); 1440 1441 return entry ? xas.xa_index : -1; 1442 } 1443 1444 static noinline void check_find_entry(struct kunit *test) 1445 { 1446 struct xarray *xa = xa_param(test); 1447 1448 #ifdef CONFIG_XARRAY_MULTI 1449 unsigned int order; 1450 unsigned long offset, index; 1451 1452 for (order = 0; order < 20; order++) { 1453 for (offset = 0; offset < (1UL << (order + 3)); 1454 offset += (1UL << order)) { 1455 for (index = 0; index < (1UL << (order + 5)); 1456 index += (1UL << order)) { 1457 xa_store_order(xa, index, order, 1458 xa_mk_index(index), GFP_KERNEL); 1459 XA_BUG_ON(xa, xa_load(xa, index) != 1460 xa_mk_index(index)); 1461 XA_BUG_ON(xa, xa_find_entry(xa, 1462 xa_mk_index(index)) != index); 1463 } 1464 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1465 xa_destroy(xa); 1466 } 1467 } 1468 #endif 1469 1470 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1471 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 1472 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1473 XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); 1474 xa_erase_index(test, xa, ULONG_MAX); 1475 XA_BUG_ON(xa, !xa_empty(xa)); 1476 } 1477 1478 static noinline void check_pause(struct kunit *test) 1479 { 1480 struct xarray *xa = xa_param(test); 1481 1482 XA_STATE(xas, xa, 0); 1483 void *entry; 1484 unsigned int order; 1485 unsigned long index = 1; 1486 unsigned int count = 0; 1487 1488 for (order = 0; order < order_limit; order++) { 1489 XA_BUG_ON(xa, xa_store_order(xa, index, order, 1490 xa_mk_index(index), GFP_KERNEL)); 1491 index += 1UL << order; 1492 } 1493 1494 rcu_read_lock(); 1495 xas_for_each(&xas, entry, ULONG_MAX) { 1496 XA_BUG_ON(xa, entry != xa_mk_index(1UL << count)); 1497 count++; 1498 } 1499 rcu_read_unlock(); 1500 XA_BUG_ON(xa, count != order_limit); 1501 1502 count = 0; 1503 xas_set(&xas, 0); 1504 rcu_read_lock(); 1505 xas_for_each(&xas, entry, ULONG_MAX) { 1506 XA_BUG_ON(xa, entry != xa_mk_index(1UL << count)); 1507 count++; 1508 xas_pause(&xas); 1509 } 1510 rcu_read_unlock(); 1511 XA_BUG_ON(xa, count != order_limit); 1512 1513 xa_destroy(xa); 1514 1515 index = 0; 1516 for (order = XA_CHUNK_SHIFT; order > 0; order--) { 1517 XA_BUG_ON(xa, xa_store_order(xa, index, order, 1518 xa_mk_index(index), GFP_KERNEL)); 1519 index += 1UL << order; 1520 } 1521 1522 index = 0; 1523 count = 0; 1524 xas_set(&xas, 0); 1525 rcu_read_lock(); 1526 xas_for_each(&xas, entry, ULONG_MAX) { 1527 XA_BUG_ON(xa, entry != xa_mk_index(index)); 1528 index += 1UL << (XA_CHUNK_SHIFT - count); 1529 count++; 1530 } 1531 rcu_read_unlock(); 1532 XA_BUG_ON(xa, count != XA_CHUNK_SHIFT); 1533 1534 index = 0; 1535 count = 0; 1536 xas_set(&xas, XA_CHUNK_SIZE / 2 + 1); 1537 rcu_read_lock(); 1538 xas_for_each(&xas, entry, ULONG_MAX) { 1539 XA_BUG_ON(xa, entry != xa_mk_index(index)); 1540 index += 1UL << (XA_CHUNK_SHIFT - count); 1541 count++; 1542 xas_pause(&xas); 1543 } 1544 rcu_read_unlock(); 1545 XA_BUG_ON(xa, count != XA_CHUNK_SHIFT); 1546 1547 xa_destroy(xa); 1548 1549 } 1550 1551 static noinline void check_move_tiny(struct kunit *test) 1552 { 1553 struct xarray *xa = xa_param(test); 1554 1555 XA_STATE(xas, xa, 0); 1556 1557 XA_BUG_ON(xa, !xa_empty(xa)); 1558 rcu_read_lock(); 1559 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1560 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1561 rcu_read_unlock(); 1562 xa_store_index(xa, 0, GFP_KERNEL); 1563 rcu_read_lock(); 1564 xas_set(&xas, 0); 1565 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0)); 1566 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1567 xas_set(&xas, 0); 1568 XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0)); 1569 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1570 rcu_read_unlock(); 1571 xa_erase_index(test, xa, 0); 1572 XA_BUG_ON(xa, !xa_empty(xa)); 1573 } 1574 1575 static noinline void check_move_max(struct kunit *test) 1576 { 1577 struct xarray *xa = xa_param(test); 1578 1579 XA_STATE(xas, xa, 0); 1580 1581 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 1582 rcu_read_lock(); 1583 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX)); 1584 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); 1585 rcu_read_unlock(); 1586 1587 xas_set(&xas, 0); 1588 rcu_read_lock(); 1589 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX)); 1590 xas_pause(&xas); 1591 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); 1592 rcu_read_unlock(); 1593 1594 xa_erase_index(test, xa, ULONG_MAX); 1595 XA_BUG_ON(xa, !xa_empty(xa)); 1596 } 1597 1598 static noinline void check_move_small(struct kunit *test, unsigned long idx) 1599 { 1600 struct xarray *xa = xa_param(test); 1601 1602 XA_STATE(xas, xa, 0); 1603 unsigned long i; 1604 1605 xa_store_index(xa, 0, GFP_KERNEL); 1606 xa_store_index(xa, idx, GFP_KERNEL); 1607 1608 rcu_read_lock(); 1609 for (i = 0; i < idx * 4; i++) { 1610 void *entry = xas_next(&xas); 1611 if (i <= idx) 1612 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 1613 XA_BUG_ON(xa, xas.xa_index != i); 1614 if (i == 0 || i == idx) 1615 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1616 else 1617 XA_BUG_ON(xa, entry != NULL); 1618 } 1619 xas_next(&xas); 1620 XA_BUG_ON(xa, xas.xa_index != i); 1621 1622 do { 1623 void *entry = xas_prev(&xas); 1624 i--; 1625 if (i <= idx) 1626 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 1627 XA_BUG_ON(xa, xas.xa_index != i); 1628 if (i == 0 || i == idx) 1629 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1630 else 1631 XA_BUG_ON(xa, entry != NULL); 1632 } while (i > 0); 1633 1634 xas_set(&xas, ULONG_MAX); 1635 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1636 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1637 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0)); 1638 XA_BUG_ON(xa, xas.xa_index != 0); 1639 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1640 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1641 rcu_read_unlock(); 1642 1643 xa_erase_index(test, xa, 0); 1644 xa_erase_index(test, xa, idx); 1645 XA_BUG_ON(xa, !xa_empty(xa)); 1646 } 1647 1648 static noinline void check_move(struct kunit *test) 1649 { 1650 struct xarray *xa = xa_param(test); 1651 1652 XA_STATE(xas, xa, (1 << 16) - 1); 1653 unsigned long i; 1654 1655 for (i = 0; i < (1 << 16); i++) 1656 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 1657 1658 rcu_read_lock(); 1659 do { 1660 void *entry = xas_prev(&xas); 1661 i--; 1662 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1663 XA_BUG_ON(xa, i != xas.xa_index); 1664 } while (i != 0); 1665 1666 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1667 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1668 1669 do { 1670 void *entry = xas_next(&xas); 1671 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1672 XA_BUG_ON(xa, i != xas.xa_index); 1673 i++; 1674 } while (i < (1 << 16)); 1675 rcu_read_unlock(); 1676 1677 for (i = (1 << 8); i < (1 << 15); i++) 1678 xa_erase_index(test, xa, i); 1679 1680 i = xas.xa_index; 1681 1682 rcu_read_lock(); 1683 do { 1684 void *entry = xas_prev(&xas); 1685 i--; 1686 if ((i < (1 << 8)) || (i >= (1 << 15))) 1687 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1688 else 1689 XA_BUG_ON(xa, entry != NULL); 1690 XA_BUG_ON(xa, i != xas.xa_index); 1691 } while (i != 0); 1692 1693 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1694 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1695 1696 do { 1697 void *entry = xas_next(&xas); 1698 if ((i < (1 << 8)) || (i >= (1 << 15))) 1699 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1700 else 1701 XA_BUG_ON(xa, entry != NULL); 1702 XA_BUG_ON(xa, i != xas.xa_index); 1703 i++; 1704 } while (i < (1 << 16)); 1705 rcu_read_unlock(); 1706 1707 xa_destroy(xa); 1708 1709 check_move_tiny(test); 1710 check_move_max(test); 1711 1712 for (i = 0; i < 16; i++) 1713 check_move_small(test, 1UL << i); 1714 1715 for (i = 2; i < 16; i++) 1716 check_move_small(test, (1UL << i) - 1); 1717 } 1718 1719 static noinline void xa_store_many_order(struct kunit *test, struct xarray *xa, 1720 unsigned long index, unsigned order) 1721 { 1722 XA_STATE_ORDER(xas, xa, index, order); 1723 unsigned int i = 0; 1724 1725 do { 1726 xas_lock(&xas); 1727 XA_BUG_ON(xa, xas_find_conflict(&xas)); 1728 xas_create_range(&xas); 1729 if (xas_error(&xas)) 1730 goto unlock; 1731 for (i = 0; i < (1U << order); i++) { 1732 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i))); 1733 xas_next(&xas); 1734 } 1735 unlock: 1736 xas_unlock(&xas); 1737 } while (xas_nomem(&xas, GFP_KERNEL)); 1738 1739 XA_BUG_ON(xa, xas_error(&xas)); 1740 } 1741 1742 static noinline void check_create_range_1(struct kunit *test, 1743 unsigned long index, unsigned order) 1744 { 1745 struct xarray *xa = xa_param(test); 1746 1747 unsigned long i; 1748 1749 xa_store_many_order(test, xa, index, order); 1750 for (i = index; i < index + (1UL << order); i++) 1751 xa_erase_index(test, xa, i); 1752 XA_BUG_ON(xa, !xa_empty(xa)); 1753 } 1754 1755 static noinline void check_create_range_2(struct kunit *test, unsigned int order) 1756 { 1757 struct xarray *xa = xa_param(test); 1758 1759 unsigned long i; 1760 unsigned long nr = 1UL << order; 1761 1762 for (i = 0; i < nr * nr; i += nr) 1763 xa_store_many_order(test, xa, i, order); 1764 for (i = 0; i < nr * nr; i++) 1765 xa_erase_index(test, xa, i); 1766 XA_BUG_ON(xa, !xa_empty(xa)); 1767 } 1768 1769 static noinline void check_create_range_3(struct kunit *test) 1770 { 1771 XA_STATE(xas, NULL, 0); 1772 xas_set_err(&xas, -EEXIST); 1773 xas_create_range(&xas); 1774 XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST); 1775 } 1776 1777 static noinline void check_create_range_4(struct kunit *test, 1778 unsigned long index, unsigned order) 1779 { 1780 struct xarray *xa = xa_param(test); 1781 1782 XA_STATE_ORDER(xas, xa, index, order); 1783 unsigned long base = xas.xa_index; 1784 unsigned long i = 0; 1785 1786 xa_store_index(xa, index, GFP_KERNEL); 1787 do { 1788 xas_lock(&xas); 1789 xas_create_range(&xas); 1790 if (xas_error(&xas)) 1791 goto unlock; 1792 for (i = 0; i < (1UL << order); i++) { 1793 void *old = xas_store(&xas, xa_mk_index(base + i)); 1794 if (xas.xa_index == index) 1795 XA_BUG_ON(xa, old != xa_mk_index(base + i)); 1796 else 1797 XA_BUG_ON(xa, old != NULL); 1798 xas_next(&xas); 1799 } 1800 unlock: 1801 xas_unlock(&xas); 1802 } while (xas_nomem(&xas, GFP_KERNEL)); 1803 1804 XA_BUG_ON(xa, xas_error(&xas)); 1805 1806 for (i = base; i < base + (1UL << order); i++) 1807 xa_erase_index(test, xa, i); 1808 XA_BUG_ON(xa, !xa_empty(xa)); 1809 } 1810 1811 static noinline void check_create_range_5(struct kunit *test, 1812 unsigned long index, unsigned int order) 1813 { 1814 struct xarray *xa = xa_param(test); 1815 1816 XA_STATE_ORDER(xas, xa, index, order); 1817 unsigned int i; 1818 1819 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 1820 1821 for (i = 0; i < order + 10; i++) { 1822 do { 1823 xas_lock(&xas); 1824 xas_create_range(&xas); 1825 xas_unlock(&xas); 1826 } while (xas_nomem(&xas, GFP_KERNEL)); 1827 } 1828 1829 xa_destroy(xa); 1830 } 1831 1832 static noinline void check_create_range(struct kunit *test) 1833 { 1834 unsigned int order; 1835 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1; 1836 1837 for (order = 0; order < max_order; order++) { 1838 check_create_range_1(test, 0, order); 1839 check_create_range_1(test, 1U << order, order); 1840 check_create_range_1(test, 2U << order, order); 1841 check_create_range_1(test, 3U << order, order); 1842 check_create_range_1(test, 1U << 24, order); 1843 if (order < 10) 1844 check_create_range_2(test, order); 1845 1846 check_create_range_4(test, 0, order); 1847 check_create_range_4(test, 1U << order, order); 1848 check_create_range_4(test, 2U << order, order); 1849 check_create_range_4(test, 3U << order, order); 1850 check_create_range_4(test, 1U << 24, order); 1851 1852 check_create_range_4(test, 1, order); 1853 check_create_range_4(test, (1U << order) + 1, order); 1854 check_create_range_4(test, (2U << order) + 1, order); 1855 check_create_range_4(test, (2U << order) - 1, order); 1856 check_create_range_4(test, (3U << order) + 1, order); 1857 check_create_range_4(test, (3U << order) - 1, order); 1858 check_create_range_4(test, (1U << 24) + 1, order); 1859 1860 check_create_range_5(test, 0, order); 1861 check_create_range_5(test, (1U << order), order); 1862 } 1863 1864 check_create_range_3(test); 1865 } 1866 1867 static noinline void __check_store_range(struct kunit *test, unsigned long first, 1868 unsigned long last) 1869 { 1870 struct xarray *xa = xa_param(test); 1871 1872 #ifdef CONFIG_XARRAY_MULTI 1873 xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); 1874 1875 XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first)); 1876 XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first)); 1877 XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); 1878 XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); 1879 1880 xa_store_range(xa, first, last, NULL, GFP_KERNEL); 1881 #endif 1882 1883 XA_BUG_ON(xa, !xa_empty(xa)); 1884 } 1885 1886 static noinline void check_store_range(struct kunit *test) 1887 { 1888 unsigned long i, j; 1889 1890 for (i = 0; i < 128; i++) { 1891 for (j = i; j < 128; j++) { 1892 __check_store_range(test, i, j); 1893 __check_store_range(test, 128 + i, 128 + j); 1894 __check_store_range(test, 4095 + i, 4095 + j); 1895 __check_store_range(test, 4096 + i, 4096 + j); 1896 __check_store_range(test, 123456 + i, 123456 + j); 1897 __check_store_range(test, (1 << 24) + i, (1 << 24) + j); 1898 } 1899 } 1900 } 1901 1902 #ifdef CONFIG_XARRAY_MULTI 1903 static void check_split_1(struct kunit *test, unsigned long index, 1904 unsigned int order, unsigned int new_order) 1905 { 1906 struct xarray *xa = xa_param(test); 1907 1908 XA_STATE_ORDER(xas, xa, index, new_order); 1909 unsigned int i, found; 1910 void *entry; 1911 1912 xa_store_order(xa, index, order, xa, GFP_KERNEL); 1913 xa_set_mark(xa, index, XA_MARK_1); 1914 1915 xas_split_alloc(&xas, xa, order, GFP_KERNEL); 1916 xas_lock(&xas); 1917 xas_split(&xas, xa, order); 1918 for (i = 0; i < (1 << order); i += (1 << new_order)) 1919 __xa_store(xa, index + i, xa_mk_index(index + i), 0); 1920 xas_unlock(&xas); 1921 1922 for (i = 0; i < (1 << order); i++) { 1923 unsigned int val = index + (i & ~((1 << new_order) - 1)); 1924 XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val)); 1925 } 1926 1927 xa_set_mark(xa, index, XA_MARK_0); 1928 XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); 1929 1930 xas_set_order(&xas, index, 0); 1931 found = 0; 1932 rcu_read_lock(); 1933 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) { 1934 found++; 1935 XA_BUG_ON(xa, xa_is_internal(entry)); 1936 } 1937 rcu_read_unlock(); 1938 XA_BUG_ON(xa, found != 1 << (order - new_order)); 1939 1940 xa_destroy(xa); 1941 } 1942 1943 static noinline void check_split(struct kunit *test) 1944 { 1945 struct xarray *xa = xa_param(test); 1946 1947 unsigned int order, new_order; 1948 1949 XA_BUG_ON(xa, !xa_empty(xa)); 1950 1951 for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) { 1952 for (new_order = 0; new_order < order; new_order++) { 1953 check_split_1(test, 0, order, new_order); 1954 check_split_1(test, 1UL << order, order, new_order); 1955 check_split_1(test, 3UL << order, order, new_order); 1956 } 1957 } 1958 } 1959 #else 1960 static void check_split(struct kunit *test) { } 1961 #endif 1962 1963 static void check_align_1(struct kunit *test, char *name) 1964 { 1965 struct xarray *xa = xa_param(test); 1966 1967 int i; 1968 unsigned int id; 1969 unsigned long index; 1970 void *entry; 1971 1972 for (i = 0; i < 8; i++) { 1973 XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b, 1974 GFP_KERNEL) != 0); 1975 XA_BUG_ON(xa, id != i); 1976 } 1977 xa_for_each(xa, index, entry) 1978 XA_BUG_ON(xa, xa_is_err(entry)); 1979 xa_destroy(xa); 1980 } 1981 1982 /* 1983 * We should always be able to store without allocating memory after 1984 * reserving a slot. 1985 */ 1986 static void check_align_2(struct kunit *test, char *name) 1987 { 1988 struct xarray *xa = xa_param(test); 1989 1990 int i; 1991 1992 XA_BUG_ON(xa, !xa_empty(xa)); 1993 1994 for (i = 0; i < 8; i++) { 1995 XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL); 1996 xa_erase(xa, 0); 1997 } 1998 1999 for (i = 0; i < 8; i++) { 2000 XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0); 2001 XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL); 2002 xa_erase(xa, 0); 2003 } 2004 2005 XA_BUG_ON(xa, !xa_empty(xa)); 2006 } 2007 2008 static noinline void check_align(struct kunit *test) 2009 { 2010 char name[] = "Motorola 68000"; 2011 2012 check_align_1(test, name); 2013 check_align_1(test, name + 1); 2014 check_align_1(test, name + 2); 2015 check_align_1(test, name + 3); 2016 check_align_2(test, name); 2017 } 2018 2019 static LIST_HEAD(shadow_nodes); 2020 2021 static void test_update_node(struct xa_node *node) 2022 { 2023 if (node->count && node->count == node->nr_values) { 2024 if (list_empty(&node->private_list)) 2025 list_add(&shadow_nodes, &node->private_list); 2026 } else { 2027 if (!list_empty(&node->private_list)) 2028 list_del_init(&node->private_list); 2029 } 2030 } 2031 2032 static noinline void shadow_remove(struct kunit *test, struct xarray *xa) 2033 { 2034 struct xa_node *node; 2035 2036 xa_lock(xa); 2037 while ((node = list_first_entry_or_null(&shadow_nodes, 2038 struct xa_node, private_list))) { 2039 XA_BUG_ON(xa, node->array != xa); 2040 list_del_init(&node->private_list); 2041 xa_delete_node(node, test_update_node); 2042 } 2043 xa_unlock(xa); 2044 } 2045 2046 struct workingset_testcase { 2047 struct xarray *xa; 2048 unsigned long index; 2049 }; 2050 2051 static noinline void check_workingset(struct kunit *test) 2052 { 2053 struct workingset_testcase tc = *(struct workingset_testcase *)test->param_value; 2054 struct xarray *xa = tc.xa; 2055 unsigned long index = tc.index; 2056 2057 XA_STATE(xas, xa, index); 2058 xas_set_update(&xas, test_update_node); 2059 2060 do { 2061 xas_lock(&xas); 2062 xas_store(&xas, xa_mk_value(0)); 2063 xas_next(&xas); 2064 xas_store(&xas, xa_mk_value(1)); 2065 xas_unlock(&xas); 2066 } while (xas_nomem(&xas, GFP_KERNEL)); 2067 2068 XA_BUG_ON(xa, list_empty(&shadow_nodes)); 2069 2070 xas_lock(&xas); 2071 xas_next(&xas); 2072 xas_store(&xas, &xas); 2073 XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 2074 2075 xas_store(&xas, xa_mk_value(2)); 2076 xas_unlock(&xas); 2077 XA_BUG_ON(xa, list_empty(&shadow_nodes)); 2078 2079 shadow_remove(test, xa); 2080 XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 2081 XA_BUG_ON(xa, !xa_empty(xa)); 2082 } 2083 2084 /* 2085 * Check that the pointer / value / sibling entries are accounted the 2086 * way we expect them to be. 2087 */ 2088 static noinline void check_account(struct kunit *test) 2089 { 2090 #ifdef CONFIG_XARRAY_MULTI 2091 struct xarray *xa = xa_param(test); 2092 2093 unsigned int order; 2094 2095 for (order = 1; order < 12; order++) { 2096 XA_STATE(xas, xa, 1 << order); 2097 2098 xa_store_order(xa, 0, order, xa, GFP_KERNEL); 2099 rcu_read_lock(); 2100 xas_load(&xas); 2101 XA_BUG_ON(xa, xas.xa_node->count == 0); 2102 XA_BUG_ON(xa, xas.xa_node->count > (1 << order)); 2103 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 2104 rcu_read_unlock(); 2105 2106 xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order), 2107 GFP_KERNEL); 2108 XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); 2109 2110 xa_erase(xa, 1 << order); 2111 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 2112 2113 xa_erase(xa, 0); 2114 XA_BUG_ON(xa, !xa_empty(xa)); 2115 } 2116 #endif 2117 } 2118 2119 static noinline void check_get_order(struct kunit *test) 2120 { 2121 struct xarray *xa = xa_param(test); 2122 2123 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 2124 unsigned int order; 2125 unsigned long i, j; 2126 2127 for (i = 0; i < 3; i++) 2128 XA_BUG_ON(xa, xa_get_order(xa, i) != 0); 2129 2130 for (order = 0; order < max_order; order++) { 2131 for (i = 0; i < 10; i++) { 2132 xa_store_order(xa, i << order, order, 2133 xa_mk_index(i << order), GFP_KERNEL); 2134 for (j = i << order; j < (i + 1) << order; j++) 2135 XA_BUG_ON(xa, xa_get_order(xa, j) != order); 2136 xa_erase(xa, i << order); 2137 } 2138 } 2139 } 2140 2141 static noinline void check_xas_get_order(struct kunit *test) 2142 { 2143 struct xarray *xa = xa_param(test); 2144 2145 XA_STATE(xas, xa, 0); 2146 2147 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 2148 unsigned int order; 2149 unsigned long i, j; 2150 2151 for (order = 0; order < max_order; order++) { 2152 for (i = 0; i < 10; i++) { 2153 xas_set_order(&xas, i << order, order); 2154 do { 2155 xas_lock(&xas); 2156 xas_store(&xas, xa_mk_value(i)); 2157 xas_unlock(&xas); 2158 } while (xas_nomem(&xas, GFP_KERNEL)); 2159 2160 for (j = i << order; j < (i + 1) << order; j++) { 2161 xas_set_order(&xas, j, 0); 2162 rcu_read_lock(); 2163 xas_load(&xas); 2164 XA_BUG_ON(xa, xas_get_order(&xas) != order); 2165 rcu_read_unlock(); 2166 } 2167 2168 xas_lock(&xas); 2169 xas_set_order(&xas, i << order, order); 2170 xas_store(&xas, NULL); 2171 xas_unlock(&xas); 2172 } 2173 } 2174 } 2175 2176 static noinline void check_xas_conflict_get_order(struct kunit *test) 2177 { 2178 struct xarray *xa = xa_param(test); 2179 2180 XA_STATE(xas, xa, 0); 2181 2182 void *entry; 2183 int only_once; 2184 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 2185 unsigned int order; 2186 unsigned long i, j, k; 2187 2188 for (order = 0; order < max_order; order++) { 2189 for (i = 0; i < 10; i++) { 2190 xas_set_order(&xas, i << order, order); 2191 do { 2192 xas_lock(&xas); 2193 xas_store(&xas, xa_mk_value(i)); 2194 xas_unlock(&xas); 2195 } while (xas_nomem(&xas, GFP_KERNEL)); 2196 2197 /* 2198 * Ensure xas_get_order works with xas_for_each_conflict. 2199 */ 2200 j = i << order; 2201 for (k = 0; k < order; k++) { 2202 only_once = 0; 2203 xas_set_order(&xas, j + (1 << k), k); 2204 xas_lock(&xas); 2205 xas_for_each_conflict(&xas, entry) { 2206 XA_BUG_ON(xa, entry != xa_mk_value(i)); 2207 XA_BUG_ON(xa, xas_get_order(&xas) != order); 2208 only_once++; 2209 } 2210 XA_BUG_ON(xa, only_once != 1); 2211 xas_unlock(&xas); 2212 } 2213 2214 if (order < max_order - 1) { 2215 only_once = 0; 2216 xas_set_order(&xas, (i & ~1UL) << order, order + 1); 2217 xas_lock(&xas); 2218 xas_for_each_conflict(&xas, entry) { 2219 XA_BUG_ON(xa, entry != xa_mk_value(i)); 2220 XA_BUG_ON(xa, xas_get_order(&xas) != order); 2221 only_once++; 2222 } 2223 XA_BUG_ON(xa, only_once != 1); 2224 xas_unlock(&xas); 2225 } 2226 2227 xas_set_order(&xas, i << order, order); 2228 xas_lock(&xas); 2229 xas_store(&xas, NULL); 2230 xas_unlock(&xas); 2231 } 2232 } 2233 } 2234 2235 2236 static noinline void check_destroy(struct kunit *test) 2237 { 2238 struct xarray *xa = xa_param(test); 2239 2240 unsigned long index; 2241 2242 XA_BUG_ON(xa, !xa_empty(xa)); 2243 2244 /* Destroying an empty array is a no-op */ 2245 xa_destroy(xa); 2246 XA_BUG_ON(xa, !xa_empty(xa)); 2247 2248 /* Destroying an array with a single entry */ 2249 for (index = 0; index < 1000; index++) { 2250 xa_store_index(xa, index, GFP_KERNEL); 2251 XA_BUG_ON(xa, xa_empty(xa)); 2252 xa_destroy(xa); 2253 XA_BUG_ON(xa, !xa_empty(xa)); 2254 } 2255 2256 /* Destroying an array with a single entry at ULONG_MAX */ 2257 xa_store(xa, ULONG_MAX, xa, GFP_KERNEL); 2258 XA_BUG_ON(xa, xa_empty(xa)); 2259 xa_destroy(xa); 2260 XA_BUG_ON(xa, !xa_empty(xa)); 2261 2262 #ifdef CONFIG_XARRAY_MULTI 2263 /* Destroying an array with a multi-index entry */ 2264 xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL); 2265 XA_BUG_ON(xa, xa_empty(xa)); 2266 xa_destroy(xa); 2267 XA_BUG_ON(xa, !xa_empty(xa)); 2268 #endif 2269 } 2270 2271 static DEFINE_XARRAY(array); 2272 static struct xarray *arrays[] = { &array }; 2273 KUNIT_ARRAY_PARAM(array, arrays, NULL); 2274 2275 static struct xarray *xa0s[] = { &xa0 }; 2276 KUNIT_ARRAY_PARAM(xa0, xa0s, NULL); 2277 2278 static struct workingset_testcase workingset_testcases[] = { 2279 { &array, 0 }, 2280 { &array, 64 }, 2281 { &array, 4096 }, 2282 }; 2283 KUNIT_ARRAY_PARAM(workingset, workingset_testcases, NULL); 2284 2285 static struct kunit_case xarray_cases[] = { 2286 KUNIT_CASE_PARAM(check_xa_err, array_gen_params), 2287 KUNIT_CASE_PARAM(check_xas_retry, array_gen_params), 2288 KUNIT_CASE_PARAM(check_xa_load, array_gen_params), 2289 KUNIT_CASE_PARAM(check_xa_mark, array_gen_params), 2290 KUNIT_CASE_PARAM(check_xa_shrink, array_gen_params), 2291 KUNIT_CASE_PARAM(check_xas_erase, array_gen_params), 2292 KUNIT_CASE_PARAM(check_insert, array_gen_params), 2293 KUNIT_CASE_PARAM(check_cmpxchg, array_gen_params), 2294 KUNIT_CASE_PARAM(check_cmpxchg_order, array_gen_params), 2295 KUNIT_CASE_PARAM(check_reserve, array_gen_params), 2296 KUNIT_CASE_PARAM(check_reserve, xa0_gen_params), 2297 KUNIT_CASE_PARAM(check_multi_store, array_gen_params), 2298 KUNIT_CASE_PARAM(check_multi_store_advanced, array_gen_params), 2299 KUNIT_CASE_PARAM(check_get_order, array_gen_params), 2300 KUNIT_CASE_PARAM(check_xas_get_order, array_gen_params), 2301 KUNIT_CASE_PARAM(check_xas_conflict_get_order, array_gen_params), 2302 KUNIT_CASE(check_xa_alloc), 2303 KUNIT_CASE_PARAM(check_find, array_gen_params), 2304 KUNIT_CASE_PARAM(check_find_entry, array_gen_params), 2305 KUNIT_CASE_PARAM(check_pause, array_gen_params), 2306 KUNIT_CASE_PARAM(check_account, array_gen_params), 2307 KUNIT_CASE_PARAM(check_destroy, array_gen_params), 2308 KUNIT_CASE_PARAM(check_move, array_gen_params), 2309 KUNIT_CASE_PARAM(check_create_range, array_gen_params), 2310 KUNIT_CASE_PARAM(check_store_range, array_gen_params), 2311 KUNIT_CASE_PARAM(check_store_iter, array_gen_params), 2312 KUNIT_CASE_PARAM(check_align, xa0_gen_params), 2313 KUNIT_CASE_PARAM(check_split, array_gen_params), 2314 KUNIT_CASE_PARAM(check_workingset, workingset_gen_params), 2315 {}, 2316 }; 2317 2318 static struct kunit_suite xarray_suite = { 2319 .name = "xarray", 2320 .test_cases = xarray_cases, 2321 }; 2322 2323 kunit_test_suite(xarray_suite); 2324 2325 MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>"); 2326 MODULE_DESCRIPTION("XArray API test module"); 2327 MODULE_LICENSE("GPL"); 2328