1ad3d6c72SMatthew Wilcox // SPDX-License-Identifier: GPL-2.0+ 2ad3d6c72SMatthew Wilcox /* 3ad3d6c72SMatthew Wilcox * test_xarray.c: Test the XArray API 4ad3d6c72SMatthew Wilcox * Copyright (c) 2017-2018 Microsoft Corporation 5ad3d6c72SMatthew Wilcox * Author: Matthew Wilcox <willy@infradead.org> 6ad3d6c72SMatthew Wilcox */ 7ad3d6c72SMatthew Wilcox 8ad3d6c72SMatthew Wilcox #include <linux/xarray.h> 9ad3d6c72SMatthew Wilcox #include <linux/module.h> 10ad3d6c72SMatthew Wilcox 11ad3d6c72SMatthew Wilcox static unsigned int tests_run; 12ad3d6c72SMatthew Wilcox static unsigned int tests_passed; 13ad3d6c72SMatthew Wilcox 14ad3d6c72SMatthew Wilcox #ifndef XA_DEBUG 15ad3d6c72SMatthew Wilcox # ifdef __KERNEL__ 16ad3d6c72SMatthew Wilcox void xa_dump(const struct xarray *xa) { } 17ad3d6c72SMatthew Wilcox # endif 18ad3d6c72SMatthew Wilcox #undef XA_BUG_ON 19ad3d6c72SMatthew Wilcox #define XA_BUG_ON(xa, x) do { \ 20ad3d6c72SMatthew Wilcox tests_run++; \ 21ad3d6c72SMatthew Wilcox if (x) { \ 22ad3d6c72SMatthew Wilcox printk("BUG at %s:%d\n", __func__, __LINE__); \ 23ad3d6c72SMatthew Wilcox xa_dump(xa); \ 24ad3d6c72SMatthew Wilcox dump_stack(); \ 25ad3d6c72SMatthew Wilcox } else { \ 26ad3d6c72SMatthew Wilcox tests_passed++; \ 27ad3d6c72SMatthew Wilcox } \ 28ad3d6c72SMatthew Wilcox } while (0) 29ad3d6c72SMatthew Wilcox #endif 30ad3d6c72SMatthew Wilcox 31b7677a13SMatthew Wilcox static void *xa_mk_index(unsigned long index) 32b7677a13SMatthew Wilcox { 33b7677a13SMatthew Wilcox return xa_mk_value(index & LONG_MAX); 34b7677a13SMatthew Wilcox } 35b7677a13SMatthew Wilcox 36ad3d6c72SMatthew Wilcox static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) 37ad3d6c72SMatthew Wilcox { 38b7677a13SMatthew Wilcox return xa_store(xa, index, xa_mk_index(index), gfp); 39ad3d6c72SMatthew Wilcox } 40ad3d6c72SMatthew Wilcox 41371c752dSMatthew Wilcox static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) 42371c752dSMatthew Wilcox { 43371c752dSMatthew Wilcox u32 id = 0; 44371c752dSMatthew Wilcox 45b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index), 46371c752dSMatthew Wilcox gfp) != 0); 47371c752dSMatthew Wilcox XA_BUG_ON(xa, id != index); 48371c752dSMatthew Wilcox } 49371c752dSMatthew Wilcox 50ad3d6c72SMatthew Wilcox static void xa_erase_index(struct xarray *xa, unsigned long index) 51ad3d6c72SMatthew Wilcox { 52b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); 5358d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, index) != NULL); 5458d6ea30SMatthew Wilcox } 5558d6ea30SMatthew Wilcox 5658d6ea30SMatthew Wilcox /* 5758d6ea30SMatthew Wilcox * If anyone needs this, please move it to xarray.c. We have no current 5858d6ea30SMatthew Wilcox * users outside the test suite because all current multislot users want 5958d6ea30SMatthew Wilcox * to use the advanced API. 6058d6ea30SMatthew Wilcox */ 6158d6ea30SMatthew Wilcox static void *xa_store_order(struct xarray *xa, unsigned long index, 6258d6ea30SMatthew Wilcox unsigned order, void *entry, gfp_t gfp) 6358d6ea30SMatthew Wilcox { 6458d6ea30SMatthew Wilcox XA_STATE_ORDER(xas, xa, index, order); 6558d6ea30SMatthew Wilcox void *curr; 6658d6ea30SMatthew Wilcox 6758d6ea30SMatthew Wilcox do { 6858d6ea30SMatthew Wilcox xas_lock(&xas); 6958d6ea30SMatthew Wilcox curr = xas_store(&xas, entry); 7058d6ea30SMatthew Wilcox xas_unlock(&xas); 7158d6ea30SMatthew Wilcox } while (xas_nomem(&xas, gfp)); 7258d6ea30SMatthew Wilcox 7358d6ea30SMatthew Wilcox return curr; 7458d6ea30SMatthew Wilcox } 7558d6ea30SMatthew Wilcox 7658d6ea30SMatthew Wilcox static noinline void check_xa_err(struct xarray *xa) 7758d6ea30SMatthew Wilcox { 7858d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0); 7958d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0); 8058d6ea30SMatthew Wilcox #ifndef __KERNEL__ 8158d6ea30SMatthew Wilcox /* The kernel does not fail GFP_NOWAIT allocations */ 8258d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 8358d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 8458d6ea30SMatthew Wilcox #endif 8558d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0); 8658d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0); 8758d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0); 8858d6ea30SMatthew Wilcox // kills the test-suite :-( 8958d6ea30SMatthew Wilcox // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); 90ad3d6c72SMatthew Wilcox } 91ad3d6c72SMatthew Wilcox 92b803b428SMatthew Wilcox static noinline void check_xas_retry(struct xarray *xa) 93b803b428SMatthew Wilcox { 94b803b428SMatthew Wilcox XA_STATE(xas, xa, 0); 95b803b428SMatthew Wilcox void *entry; 96b803b428SMatthew Wilcox 97b803b428SMatthew Wilcox xa_store_index(xa, 0, GFP_KERNEL); 98b803b428SMatthew Wilcox xa_store_index(xa, 1, GFP_KERNEL); 99b803b428SMatthew Wilcox 100b803b428SMatthew Wilcox rcu_read_lock(); 101b803b428SMatthew Wilcox XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); 102b803b428SMatthew Wilcox xa_erase_index(xa, 1); 103b803b428SMatthew Wilcox XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); 104b803b428SMatthew Wilcox XA_BUG_ON(xa, xas_retry(&xas, NULL)); 105b803b428SMatthew Wilcox XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); 106b803b428SMatthew Wilcox xas_reset(&xas); 107b803b428SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node != XAS_RESTART); 108b803b428SMatthew Wilcox XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 109b803b428SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node != NULL); 110b803b428SMatthew Wilcox 111b803b428SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 112b803b428SMatthew Wilcox XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas))); 113b803b428SMatthew Wilcox xas.xa_node = XAS_RESTART; 114b803b428SMatthew Wilcox XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 115b803b428SMatthew Wilcox rcu_read_unlock(); 116b803b428SMatthew Wilcox 117b803b428SMatthew Wilcox /* Make sure we can iterate through retry entries */ 118b803b428SMatthew Wilcox xas_lock(&xas); 119b803b428SMatthew Wilcox xas_set(&xas, 0); 120b803b428SMatthew Wilcox xas_store(&xas, XA_RETRY_ENTRY); 121b803b428SMatthew Wilcox xas_set(&xas, 1); 122b803b428SMatthew Wilcox xas_store(&xas, XA_RETRY_ENTRY); 123b803b428SMatthew Wilcox 124b803b428SMatthew Wilcox xas_set(&xas, 0); 125b803b428SMatthew Wilcox xas_for_each(&xas, entry, ULONG_MAX) { 126b7677a13SMatthew Wilcox xas_store(&xas, xa_mk_index(xas.xa_index)); 127b803b428SMatthew Wilcox } 128b803b428SMatthew Wilcox xas_unlock(&xas); 129b803b428SMatthew Wilcox 130b803b428SMatthew Wilcox xa_erase_index(xa, 0); 131b803b428SMatthew Wilcox xa_erase_index(xa, 1); 132b803b428SMatthew Wilcox } 133b803b428SMatthew Wilcox 134ad3d6c72SMatthew Wilcox static noinline void check_xa_load(struct xarray *xa) 135ad3d6c72SMatthew Wilcox { 136ad3d6c72SMatthew Wilcox unsigned long i, j; 137ad3d6c72SMatthew Wilcox 138ad3d6c72SMatthew Wilcox for (i = 0; i < 1024; i++) { 139ad3d6c72SMatthew Wilcox for (j = 0; j < 1024; j++) { 140ad3d6c72SMatthew Wilcox void *entry = xa_load(xa, j); 141ad3d6c72SMatthew Wilcox if (j < i) 142ad3d6c72SMatthew Wilcox XA_BUG_ON(xa, xa_to_value(entry) != j); 143ad3d6c72SMatthew Wilcox else 144ad3d6c72SMatthew Wilcox XA_BUG_ON(xa, entry); 145ad3d6c72SMatthew Wilcox } 146ad3d6c72SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 147ad3d6c72SMatthew Wilcox } 148ad3d6c72SMatthew Wilcox 149ad3d6c72SMatthew Wilcox for (i = 0; i < 1024; i++) { 150ad3d6c72SMatthew Wilcox for (j = 0; j < 1024; j++) { 151ad3d6c72SMatthew Wilcox void *entry = xa_load(xa, j); 152ad3d6c72SMatthew Wilcox if (j >= i) 153ad3d6c72SMatthew Wilcox XA_BUG_ON(xa, xa_to_value(entry) != j); 154ad3d6c72SMatthew Wilcox else 155ad3d6c72SMatthew Wilcox XA_BUG_ON(xa, entry); 156ad3d6c72SMatthew Wilcox } 157ad3d6c72SMatthew Wilcox xa_erase_index(xa, i); 158ad3d6c72SMatthew Wilcox } 159ad3d6c72SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 160ad3d6c72SMatthew Wilcox } 161ad3d6c72SMatthew Wilcox 1629b89a035SMatthew Wilcox static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) 1639b89a035SMatthew Wilcox { 16458d6ea30SMatthew Wilcox unsigned int order; 16558d6ea30SMatthew Wilcox unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1; 16658d6ea30SMatthew Wilcox 1679b89a035SMatthew Wilcox /* NULL elements have no marks set */ 1689b89a035SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 1699b89a035SMatthew Wilcox xa_set_mark(xa, index, XA_MARK_0); 1709b89a035SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 1719b89a035SMatthew Wilcox 1729b89a035SMatthew Wilcox /* Storing a pointer will not make a mark appear */ 1739b89a035SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL); 1749b89a035SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 1759b89a035SMatthew Wilcox xa_set_mark(xa, index, XA_MARK_0); 1769b89a035SMatthew Wilcox XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); 1779b89a035SMatthew Wilcox 1789b89a035SMatthew Wilcox /* Setting one mark will not set another mark */ 1799b89a035SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0)); 1809b89a035SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1)); 1819b89a035SMatthew Wilcox 1829b89a035SMatthew Wilcox /* Storing NULL clears marks, and they can't be set again */ 1839b89a035SMatthew Wilcox xa_erase_index(xa, index); 1849b89a035SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 1859b89a035SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 1869b89a035SMatthew Wilcox xa_set_mark(xa, index, XA_MARK_0); 1879b89a035SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 18858d6ea30SMatthew Wilcox 18958d6ea30SMatthew Wilcox /* 19058d6ea30SMatthew Wilcox * Storing a multi-index entry over entries with marks gives the 19158d6ea30SMatthew Wilcox * entire entry the union of the marks 19258d6ea30SMatthew Wilcox */ 19358d6ea30SMatthew Wilcox BUG_ON((index % 4) != 0); 19458d6ea30SMatthew Wilcox for (order = 2; order < max_order; order++) { 19558d6ea30SMatthew Wilcox unsigned long base = round_down(index, 1UL << order); 19658d6ea30SMatthew Wilcox unsigned long next = base + (1UL << order); 19758d6ea30SMatthew Wilcox unsigned long i; 19858d6ea30SMatthew Wilcox 19958d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL)); 20058d6ea30SMatthew Wilcox xa_set_mark(xa, index + 1, XA_MARK_0); 20158d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); 20258d6ea30SMatthew Wilcox xa_set_mark(xa, index + 2, XA_MARK_1); 20358d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); 204b7677a13SMatthew Wilcox xa_store_order(xa, index, order, xa_mk_index(index), 20558d6ea30SMatthew Wilcox GFP_KERNEL); 20658d6ea30SMatthew Wilcox for (i = base; i < next; i++) { 20793eb07f7SMatthew Wilcox XA_STATE(xas, xa, i); 20893eb07f7SMatthew Wilcox unsigned int seen = 0; 20993eb07f7SMatthew Wilcox void *entry; 21093eb07f7SMatthew Wilcox 21158d6ea30SMatthew Wilcox XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 21258d6ea30SMatthew Wilcox XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_1)); 21358d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2)); 21493eb07f7SMatthew Wilcox 21593eb07f7SMatthew Wilcox /* We should see two elements in the array */ 216fffc9a26SMatthew Wilcox rcu_read_lock(); 21793eb07f7SMatthew Wilcox xas_for_each(&xas, entry, ULONG_MAX) 21893eb07f7SMatthew Wilcox seen++; 219fffc9a26SMatthew Wilcox rcu_read_unlock(); 22093eb07f7SMatthew Wilcox XA_BUG_ON(xa, seen != 2); 22193eb07f7SMatthew Wilcox 22293eb07f7SMatthew Wilcox /* One of which is marked */ 22393eb07f7SMatthew Wilcox xas_set(&xas, 0); 22493eb07f7SMatthew Wilcox seen = 0; 225fffc9a26SMatthew Wilcox rcu_read_lock(); 22693eb07f7SMatthew Wilcox xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 22793eb07f7SMatthew Wilcox seen++; 228fffc9a26SMatthew Wilcox rcu_read_unlock(); 22993eb07f7SMatthew Wilcox XA_BUG_ON(xa, seen != 1); 23058d6ea30SMatthew Wilcox } 23158d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); 23258d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); 23358d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); 23458d6ea30SMatthew Wilcox xa_erase_index(xa, index); 23558d6ea30SMatthew Wilcox xa_erase_index(xa, next); 23658d6ea30SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 23758d6ea30SMatthew Wilcox } 23858d6ea30SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 2399b89a035SMatthew Wilcox } 2409b89a035SMatthew Wilcox 241adb9d9c4SMatthew Wilcox static noinline void check_xa_mark_2(struct xarray *xa) 242adb9d9c4SMatthew Wilcox { 243adb9d9c4SMatthew Wilcox XA_STATE(xas, xa, 0); 244adb9d9c4SMatthew Wilcox unsigned long index; 245adb9d9c4SMatthew Wilcox unsigned int count = 0; 246adb9d9c4SMatthew Wilcox void *entry; 247adb9d9c4SMatthew Wilcox 248adb9d9c4SMatthew Wilcox xa_store_index(xa, 0, GFP_KERNEL); 249adb9d9c4SMatthew Wilcox xa_set_mark(xa, 0, XA_MARK_0); 250adb9d9c4SMatthew Wilcox xas_lock(&xas); 251adb9d9c4SMatthew Wilcox xas_load(&xas); 252adb9d9c4SMatthew Wilcox xas_init_marks(&xas); 253adb9d9c4SMatthew Wilcox xas_unlock(&xas); 254adb9d9c4SMatthew Wilcox XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0); 255adb9d9c4SMatthew Wilcox 256adb9d9c4SMatthew Wilcox for (index = 3500; index < 4500; index++) { 257adb9d9c4SMatthew Wilcox xa_store_index(xa, index, GFP_KERNEL); 258adb9d9c4SMatthew Wilcox xa_set_mark(xa, index, XA_MARK_0); 259adb9d9c4SMatthew Wilcox } 260adb9d9c4SMatthew Wilcox 261adb9d9c4SMatthew Wilcox xas_reset(&xas); 262adb9d9c4SMatthew Wilcox rcu_read_lock(); 263adb9d9c4SMatthew Wilcox xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 264adb9d9c4SMatthew Wilcox count++; 265adb9d9c4SMatthew Wilcox rcu_read_unlock(); 266adb9d9c4SMatthew Wilcox XA_BUG_ON(xa, count != 1000); 267adb9d9c4SMatthew Wilcox 268adb9d9c4SMatthew Wilcox xas_lock(&xas); 269adb9d9c4SMatthew Wilcox xas_for_each(&xas, entry, ULONG_MAX) { 270adb9d9c4SMatthew Wilcox xas_init_marks(&xas); 271adb9d9c4SMatthew Wilcox XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0)); 272adb9d9c4SMatthew Wilcox XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0)); 273adb9d9c4SMatthew Wilcox } 274adb9d9c4SMatthew Wilcox xas_unlock(&xas); 275adb9d9c4SMatthew Wilcox 276adb9d9c4SMatthew Wilcox xa_destroy(xa); 277adb9d9c4SMatthew Wilcox } 278adb9d9c4SMatthew Wilcox 2799b89a035SMatthew Wilcox static noinline void check_xa_mark(struct xarray *xa) 2809b89a035SMatthew Wilcox { 2819b89a035SMatthew Wilcox unsigned long index; 2829b89a035SMatthew Wilcox 2839b89a035SMatthew Wilcox for (index = 0; index < 16384; index += 4) 2849b89a035SMatthew Wilcox check_xa_mark_1(xa, index); 285adb9d9c4SMatthew Wilcox 286adb9d9c4SMatthew Wilcox check_xa_mark_2(xa); 2879b89a035SMatthew Wilcox } 2889b89a035SMatthew Wilcox 28958d6ea30SMatthew Wilcox static noinline void check_xa_shrink(struct xarray *xa) 29058d6ea30SMatthew Wilcox { 29158d6ea30SMatthew Wilcox XA_STATE(xas, xa, 1); 29258d6ea30SMatthew Wilcox struct xa_node *node; 29393eb07f7SMatthew Wilcox unsigned int order; 29493eb07f7SMatthew Wilcox unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1; 29558d6ea30SMatthew Wilcox 29658d6ea30SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 29758d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL); 29858d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 29958d6ea30SMatthew Wilcox 30058d6ea30SMatthew Wilcox /* 30158d6ea30SMatthew Wilcox * Check that erasing the entry at 1 shrinks the tree and properly 30258d6ea30SMatthew Wilcox * marks the node as being deleted. 30358d6ea30SMatthew Wilcox */ 30458d6ea30SMatthew Wilcox xas_lock(&xas); 30558d6ea30SMatthew Wilcox XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1)); 30658d6ea30SMatthew Wilcox node = xas.xa_node; 30758d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0)); 30858d6ea30SMatthew Wilcox XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 30958d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 1) != NULL); 31058d6ea30SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS); 31158d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY); 31258d6ea30SMatthew Wilcox XA_BUG_ON(xa, xas_load(&xas) != NULL); 31358d6ea30SMatthew Wilcox xas_unlock(&xas); 31458d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 31558d6ea30SMatthew Wilcox xa_erase_index(xa, 0); 31658d6ea30SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 31793eb07f7SMatthew Wilcox 31893eb07f7SMatthew Wilcox for (order = 0; order < max_order; order++) { 31993eb07f7SMatthew Wilcox unsigned long max = (1UL << order) - 1; 32093eb07f7SMatthew Wilcox xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL); 32193eb07f7SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0)); 32293eb07f7SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 32393eb07f7SMatthew Wilcox rcu_read_lock(); 32493eb07f7SMatthew Wilcox node = xa_head(xa); 32593eb07f7SMatthew Wilcox rcu_read_unlock(); 32693eb07f7SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) != 32793eb07f7SMatthew Wilcox NULL); 32893eb07f7SMatthew Wilcox rcu_read_lock(); 32993eb07f7SMatthew Wilcox XA_BUG_ON(xa, xa_head(xa) == node); 33093eb07f7SMatthew Wilcox rcu_read_unlock(); 33193eb07f7SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 33293eb07f7SMatthew Wilcox xa_erase_index(xa, ULONG_MAX); 33393eb07f7SMatthew Wilcox XA_BUG_ON(xa, xa->xa_head != node); 33493eb07f7SMatthew Wilcox xa_erase_index(xa, 0); 33593eb07f7SMatthew Wilcox } 33658d6ea30SMatthew Wilcox } 33758d6ea30SMatthew Wilcox 33841aec91fSMatthew Wilcox static noinline void check_cmpxchg(struct xarray *xa) 33941aec91fSMatthew Wilcox { 34041aec91fSMatthew Wilcox void *FIVE = xa_mk_value(5); 34141aec91fSMatthew Wilcox void *SIX = xa_mk_value(6); 34241aec91fSMatthew Wilcox void *LOTS = xa_mk_value(12345678); 34341aec91fSMatthew Wilcox 34441aec91fSMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 34541aec91fSMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL); 34641aec91fSMatthew Wilcox XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EEXIST); 34741aec91fSMatthew Wilcox XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS); 34841aec91fSMatthew Wilcox XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS); 34941aec91fSMatthew Wilcox XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE); 35041aec91fSMatthew Wilcox XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL); 35141aec91fSMatthew Wilcox XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL); 35241aec91fSMatthew Wilcox xa_erase_index(xa, 12345678); 35341aec91fSMatthew Wilcox xa_erase_index(xa, 5); 35441aec91fSMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 35541aec91fSMatthew Wilcox } 35641aec91fSMatthew Wilcox 3579f14d4f1SMatthew Wilcox static noinline void check_reserve(struct xarray *xa) 3589f14d4f1SMatthew Wilcox { 3599f14d4f1SMatthew Wilcox void *entry; 360*4a31896cSMatthew Wilcox unsigned long index; 3619f14d4f1SMatthew Wilcox 3629f14d4f1SMatthew Wilcox /* An array with a reserved entry is not empty */ 3639f14d4f1SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 3649f14d4f1SMatthew Wilcox xa_reserve(xa, 12345678, GFP_KERNEL); 3659f14d4f1SMatthew Wilcox XA_BUG_ON(xa, xa_empty(xa)); 3669f14d4f1SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 12345678)); 3679f14d4f1SMatthew Wilcox xa_release(xa, 12345678); 3689f14d4f1SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 3699f14d4f1SMatthew Wilcox 3709f14d4f1SMatthew Wilcox /* Releasing a used entry does nothing */ 3719f14d4f1SMatthew Wilcox xa_reserve(xa, 12345678, GFP_KERNEL); 3729f14d4f1SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); 3739f14d4f1SMatthew Wilcox xa_release(xa, 12345678); 3749f14d4f1SMatthew Wilcox xa_erase_index(xa, 12345678); 3759f14d4f1SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 3769f14d4f1SMatthew Wilcox 3779f14d4f1SMatthew Wilcox /* cmpxchg sees a reserved entry as NULL */ 3789f14d4f1SMatthew Wilcox xa_reserve(xa, 12345678, GFP_KERNEL); 3799f14d4f1SMatthew Wilcox XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678), 3809f14d4f1SMatthew Wilcox GFP_NOWAIT) != NULL); 3819f14d4f1SMatthew Wilcox xa_release(xa, 12345678); 3829f14d4f1SMatthew Wilcox xa_erase_index(xa, 12345678); 3839f14d4f1SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 3849f14d4f1SMatthew Wilcox 3854c0608f4SMatthew Wilcox /* And so does xa_insert */ 3864c0608f4SMatthew Wilcox xa_reserve(xa, 12345678, GFP_KERNEL); 3874c0608f4SMatthew Wilcox XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0); 3884c0608f4SMatthew Wilcox xa_erase_index(xa, 12345678); 3894c0608f4SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 3904c0608f4SMatthew Wilcox 3919f14d4f1SMatthew Wilcox /* Can iterate through a reserved entry */ 3929f14d4f1SMatthew Wilcox xa_store_index(xa, 5, GFP_KERNEL); 3939f14d4f1SMatthew Wilcox xa_reserve(xa, 6, GFP_KERNEL); 3949f14d4f1SMatthew Wilcox xa_store_index(xa, 7, GFP_KERNEL); 3959f14d4f1SMatthew Wilcox 396*4a31896cSMatthew Wilcox xa_for_each(xa, index, entry) { 3979f14d4f1SMatthew Wilcox XA_BUG_ON(xa, index != 5 && index != 7); 3989f14d4f1SMatthew Wilcox } 3999f14d4f1SMatthew Wilcox xa_destroy(xa); 4009f14d4f1SMatthew Wilcox } 4019f14d4f1SMatthew Wilcox 402b803b428SMatthew Wilcox static noinline void check_xas_erase(struct xarray *xa) 403b803b428SMatthew Wilcox { 404b803b428SMatthew Wilcox XA_STATE(xas, xa, 0); 405b803b428SMatthew Wilcox void *entry; 406b803b428SMatthew Wilcox unsigned long i, j; 407b803b428SMatthew Wilcox 408b803b428SMatthew Wilcox for (i = 0; i < 200; i++) { 409b803b428SMatthew Wilcox for (j = i; j < 2 * i + 17; j++) { 410b803b428SMatthew Wilcox xas_set(&xas, j); 411b803b428SMatthew Wilcox do { 412b803b428SMatthew Wilcox xas_lock(&xas); 413b7677a13SMatthew Wilcox xas_store(&xas, xa_mk_index(j)); 414b803b428SMatthew Wilcox xas_unlock(&xas); 415b803b428SMatthew Wilcox } while (xas_nomem(&xas, GFP_KERNEL)); 416b803b428SMatthew Wilcox } 417b803b428SMatthew Wilcox 418b803b428SMatthew Wilcox xas_set(&xas, ULONG_MAX); 419b803b428SMatthew Wilcox do { 420b803b428SMatthew Wilcox xas_lock(&xas); 421b803b428SMatthew Wilcox xas_store(&xas, xa_mk_value(0)); 422b803b428SMatthew Wilcox xas_unlock(&xas); 423b803b428SMatthew Wilcox } while (xas_nomem(&xas, GFP_KERNEL)); 424b803b428SMatthew Wilcox 425b803b428SMatthew Wilcox xas_lock(&xas); 426b803b428SMatthew Wilcox xas_store(&xas, NULL); 427b803b428SMatthew Wilcox 428b803b428SMatthew Wilcox xas_set(&xas, 0); 429b803b428SMatthew Wilcox j = i; 430b803b428SMatthew Wilcox xas_for_each(&xas, entry, ULONG_MAX) { 431b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(j)); 432b803b428SMatthew Wilcox xas_store(&xas, NULL); 433b803b428SMatthew Wilcox j++; 434b803b428SMatthew Wilcox } 435b803b428SMatthew Wilcox xas_unlock(&xas); 436b803b428SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 437b803b428SMatthew Wilcox } 438b803b428SMatthew Wilcox } 439b803b428SMatthew Wilcox 4404f06d630SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI 4414f06d630SMatthew Wilcox static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, 4424f06d630SMatthew Wilcox unsigned int order) 4434f06d630SMatthew Wilcox { 4444f06d630SMatthew Wilcox XA_STATE(xas, xa, index); 4454f06d630SMatthew Wilcox unsigned long min = index & ~((1UL << order) - 1); 4464f06d630SMatthew Wilcox unsigned long max = min + (1UL << order); 4474f06d630SMatthew Wilcox 448b7677a13SMatthew Wilcox xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 449b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index)); 450b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index)); 4514f06d630SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, max) != NULL); 4524f06d630SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 4534f06d630SMatthew Wilcox 454fffc9a26SMatthew Wilcox xas_lock(&xas); 455b7677a13SMatthew Wilcox XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index)); 456fffc9a26SMatthew Wilcox xas_unlock(&xas); 457b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min)); 458b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min)); 4594f06d630SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, max) != NULL); 4604f06d630SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 4614f06d630SMatthew Wilcox 4624f06d630SMatthew Wilcox xa_erase_index(xa, min); 4634f06d630SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 4644f06d630SMatthew Wilcox } 4654f06d630SMatthew Wilcox 4664f06d630SMatthew Wilcox static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, 4674f06d630SMatthew Wilcox unsigned int order) 4684f06d630SMatthew Wilcox { 4694f06d630SMatthew Wilcox XA_STATE(xas, xa, index); 4704f06d630SMatthew Wilcox xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); 4714f06d630SMatthew Wilcox 472fffc9a26SMatthew Wilcox xas_lock(&xas); 4734f06d630SMatthew Wilcox XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0)); 4744f06d630SMatthew Wilcox XA_BUG_ON(xa, xas.xa_index != index); 4754f06d630SMatthew Wilcox XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 476fffc9a26SMatthew Wilcox xas_unlock(&xas); 4774f06d630SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 4784f06d630SMatthew Wilcox } 4794f145cd6SMatthew Wilcox 4804f145cd6SMatthew Wilcox static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, 4814f145cd6SMatthew Wilcox unsigned int order) 4824f145cd6SMatthew Wilcox { 4834f145cd6SMatthew Wilcox XA_STATE(xas, xa, 0); 4844f145cd6SMatthew Wilcox void *entry; 4854f145cd6SMatthew Wilcox int n = 0; 4864f145cd6SMatthew Wilcox 4874f145cd6SMatthew Wilcox xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 4884f145cd6SMatthew Wilcox 4894f145cd6SMatthew Wilcox xas_lock(&xas); 4904f145cd6SMatthew Wilcox xas_for_each(&xas, entry, ULONG_MAX) { 4914f145cd6SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(index)); 4924f145cd6SMatthew Wilcox n++; 4934f145cd6SMatthew Wilcox } 4944f145cd6SMatthew Wilcox XA_BUG_ON(xa, n != 1); 4954f145cd6SMatthew Wilcox xas_set(&xas, index + 1); 4964f145cd6SMatthew Wilcox xas_for_each(&xas, entry, ULONG_MAX) { 4974f145cd6SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(index)); 4984f145cd6SMatthew Wilcox n++; 4994f145cd6SMatthew Wilcox } 5004f145cd6SMatthew Wilcox XA_BUG_ON(xa, n != 2); 5014f145cd6SMatthew Wilcox xas_unlock(&xas); 5024f145cd6SMatthew Wilcox 5034f145cd6SMatthew Wilcox xa_destroy(xa); 5044f145cd6SMatthew Wilcox } 5054f06d630SMatthew Wilcox #endif 5064f06d630SMatthew Wilcox 50758d6ea30SMatthew Wilcox static noinline void check_multi_store(struct xarray *xa) 50858d6ea30SMatthew Wilcox { 50958d6ea30SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI 51058d6ea30SMatthew Wilcox unsigned long i, j, k; 51158d6ea30SMatthew Wilcox unsigned int max_order = (sizeof(long) == 4) ? 30 : 60; 51258d6ea30SMatthew Wilcox 51358d6ea30SMatthew Wilcox /* Loading from any position returns the same value */ 51458d6ea30SMatthew Wilcox xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL); 51558d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 51658d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 51758d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 51858d6ea30SMatthew Wilcox rcu_read_lock(); 51958d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2); 52058d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 52158d6ea30SMatthew Wilcox rcu_read_unlock(); 52258d6ea30SMatthew Wilcox 52358d6ea30SMatthew Wilcox /* Storing adjacent to the value does not alter the value */ 52458d6ea30SMatthew Wilcox xa_store(xa, 3, xa, GFP_KERNEL); 52558d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 52658d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 52758d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 52858d6ea30SMatthew Wilcox rcu_read_lock(); 52958d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3); 53058d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 53158d6ea30SMatthew Wilcox rcu_read_unlock(); 53258d6ea30SMatthew Wilcox 53358d6ea30SMatthew Wilcox /* Overwriting multiple indexes works */ 53458d6ea30SMatthew Wilcox xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL); 53558d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1)); 53658d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1)); 53758d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1)); 53858d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1)); 53958d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 4) != NULL); 54058d6ea30SMatthew Wilcox rcu_read_lock(); 54158d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4); 54258d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4); 54358d6ea30SMatthew Wilcox rcu_read_unlock(); 54458d6ea30SMatthew Wilcox 54558d6ea30SMatthew Wilcox /* We can erase multiple values with a single store */ 5465404a7f1SMatthew Wilcox xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); 54758d6ea30SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 54858d6ea30SMatthew Wilcox 54958d6ea30SMatthew Wilcox /* Even when the first slot is empty but the others aren't */ 55058d6ea30SMatthew Wilcox xa_store_index(xa, 1, GFP_KERNEL); 55158d6ea30SMatthew Wilcox xa_store_index(xa, 2, GFP_KERNEL); 55258d6ea30SMatthew Wilcox xa_store_order(xa, 0, 2, NULL, GFP_KERNEL); 55358d6ea30SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 55458d6ea30SMatthew Wilcox 55558d6ea30SMatthew Wilcox for (i = 0; i < max_order; i++) { 55658d6ea30SMatthew Wilcox for (j = 0; j < max_order; j++) { 557b7677a13SMatthew Wilcox xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL); 558b7677a13SMatthew Wilcox xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL); 55958d6ea30SMatthew Wilcox 56058d6ea30SMatthew Wilcox for (k = 0; k < max_order; k++) { 56158d6ea30SMatthew Wilcox void *entry = xa_load(xa, (1UL << k) - 1); 56258d6ea30SMatthew Wilcox if ((i < k) && (j < k)) 56358d6ea30SMatthew Wilcox XA_BUG_ON(xa, entry != NULL); 56458d6ea30SMatthew Wilcox else 565b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(j)); 56658d6ea30SMatthew Wilcox } 56758d6ea30SMatthew Wilcox 56858d6ea30SMatthew Wilcox xa_erase(xa, 0); 56958d6ea30SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 57058d6ea30SMatthew Wilcox } 57158d6ea30SMatthew Wilcox } 5724f06d630SMatthew Wilcox 5734f06d630SMatthew Wilcox for (i = 0; i < 20; i++) { 5744f06d630SMatthew Wilcox check_multi_store_1(xa, 200, i); 5754f06d630SMatthew Wilcox check_multi_store_1(xa, 0, i); 5764f06d630SMatthew Wilcox check_multi_store_1(xa, (1UL << i) + 1, i); 5774f06d630SMatthew Wilcox } 5784f06d630SMatthew Wilcox check_multi_store_2(xa, 4095, 9); 5794f145cd6SMatthew Wilcox 5804f145cd6SMatthew Wilcox for (i = 1; i < 20; i++) { 5814f145cd6SMatthew Wilcox check_multi_store_3(xa, 0, i); 5824f145cd6SMatthew Wilcox check_multi_store_3(xa, 1UL << i, i); 5834f145cd6SMatthew Wilcox } 58458d6ea30SMatthew Wilcox #endif 58558d6ea30SMatthew Wilcox } 58658d6ea30SMatthew Wilcox 587371c752dSMatthew Wilcox static DEFINE_XARRAY_ALLOC(xa0); 588371c752dSMatthew Wilcox 589371c752dSMatthew Wilcox static noinline void check_xa_alloc(void) 590371c752dSMatthew Wilcox { 591371c752dSMatthew Wilcox int i; 592371c752dSMatthew Wilcox u32 id; 593371c752dSMatthew Wilcox 594371c752dSMatthew Wilcox /* An empty array should assign 0 to the first alloc */ 595371c752dSMatthew Wilcox xa_alloc_index(&xa0, 0, GFP_KERNEL); 596371c752dSMatthew Wilcox 597371c752dSMatthew Wilcox /* Erasing it should make the array empty again */ 598371c752dSMatthew Wilcox xa_erase_index(&xa0, 0); 599371c752dSMatthew Wilcox XA_BUG_ON(&xa0, !xa_empty(&xa0)); 600371c752dSMatthew Wilcox 601371c752dSMatthew Wilcox /* And it should assign 0 again */ 602371c752dSMatthew Wilcox xa_alloc_index(&xa0, 0, GFP_KERNEL); 603371c752dSMatthew Wilcox 604371c752dSMatthew Wilcox /* The next assigned ID should be 1 */ 605371c752dSMatthew Wilcox xa_alloc_index(&xa0, 1, GFP_KERNEL); 606371c752dSMatthew Wilcox xa_erase_index(&xa0, 1); 607371c752dSMatthew Wilcox 608371c752dSMatthew Wilcox /* Storing a value should mark it used */ 609371c752dSMatthew Wilcox xa_store_index(&xa0, 1, GFP_KERNEL); 610371c752dSMatthew Wilcox xa_alloc_index(&xa0, 2, GFP_KERNEL); 611371c752dSMatthew Wilcox 612371c752dSMatthew Wilcox /* If we then erase 0, it should be free */ 613371c752dSMatthew Wilcox xa_erase_index(&xa0, 0); 614371c752dSMatthew Wilcox xa_alloc_index(&xa0, 0, GFP_KERNEL); 615371c752dSMatthew Wilcox 616371c752dSMatthew Wilcox xa_erase_index(&xa0, 1); 617371c752dSMatthew Wilcox xa_erase_index(&xa0, 2); 618371c752dSMatthew Wilcox 619371c752dSMatthew Wilcox for (i = 1; i < 5000; i++) { 620371c752dSMatthew Wilcox xa_alloc_index(&xa0, i, GFP_KERNEL); 621371c752dSMatthew Wilcox } 622371c752dSMatthew Wilcox 623371c752dSMatthew Wilcox xa_destroy(&xa0); 624371c752dSMatthew Wilcox 625371c752dSMatthew Wilcox id = 0xfffffffeU; 626b7677a13SMatthew Wilcox XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), 627371c752dSMatthew Wilcox GFP_KERNEL) != 0); 628371c752dSMatthew Wilcox XA_BUG_ON(&xa0, id != 0xfffffffeU); 629b7677a13SMatthew Wilcox XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), 630371c752dSMatthew Wilcox GFP_KERNEL) != 0); 631371c752dSMatthew Wilcox XA_BUG_ON(&xa0, id != 0xffffffffU); 632b7677a13SMatthew Wilcox XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), 633371c752dSMatthew Wilcox GFP_KERNEL) != -ENOSPC); 634371c752dSMatthew Wilcox XA_BUG_ON(&xa0, id != 0xffffffffU); 635371c752dSMatthew Wilcox xa_destroy(&xa0); 63648483614SMatthew Wilcox 63748483614SMatthew Wilcox id = 10; 63848483614SMatthew Wilcox XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), 63948483614SMatthew Wilcox GFP_KERNEL) != -ENOSPC); 64048483614SMatthew Wilcox XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0); 64148483614SMatthew Wilcox XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), 64248483614SMatthew Wilcox GFP_KERNEL) != -ENOSPC); 64348483614SMatthew Wilcox xa_erase_index(&xa0, 3); 64448483614SMatthew Wilcox XA_BUG_ON(&xa0, !xa_empty(&xa0)); 645371c752dSMatthew Wilcox } 646371c752dSMatthew Wilcox 6474e99d4e9SMatthew Wilcox static noinline void __check_store_iter(struct xarray *xa, unsigned long start, 6484e99d4e9SMatthew Wilcox unsigned int order, unsigned int present) 6494e99d4e9SMatthew Wilcox { 6504e99d4e9SMatthew Wilcox XA_STATE_ORDER(xas, xa, start, order); 6514e99d4e9SMatthew Wilcox void *entry; 6524e99d4e9SMatthew Wilcox unsigned int count = 0; 6534e99d4e9SMatthew Wilcox 6544e99d4e9SMatthew Wilcox retry: 6554e99d4e9SMatthew Wilcox xas_lock(&xas); 6564e99d4e9SMatthew Wilcox xas_for_each_conflict(&xas, entry) { 6574e99d4e9SMatthew Wilcox XA_BUG_ON(xa, !xa_is_value(entry)); 658b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry < xa_mk_index(start)); 659b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1)); 6604e99d4e9SMatthew Wilcox count++; 6614e99d4e9SMatthew Wilcox } 662b7677a13SMatthew Wilcox xas_store(&xas, xa_mk_index(start)); 6634e99d4e9SMatthew Wilcox xas_unlock(&xas); 6644e99d4e9SMatthew Wilcox if (xas_nomem(&xas, GFP_KERNEL)) { 6654e99d4e9SMatthew Wilcox count = 0; 6664e99d4e9SMatthew Wilcox goto retry; 6674e99d4e9SMatthew Wilcox } 6684e99d4e9SMatthew Wilcox XA_BUG_ON(xa, xas_error(&xas)); 6694e99d4e9SMatthew Wilcox XA_BUG_ON(xa, count != present); 670b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); 6714e99d4e9SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != 672b7677a13SMatthew Wilcox xa_mk_index(start)); 6734e99d4e9SMatthew Wilcox xa_erase_index(xa, start); 6744e99d4e9SMatthew Wilcox } 6754e99d4e9SMatthew Wilcox 6764e99d4e9SMatthew Wilcox static noinline void check_store_iter(struct xarray *xa) 6774e99d4e9SMatthew Wilcox { 6784e99d4e9SMatthew Wilcox unsigned int i, j; 6794e99d4e9SMatthew Wilcox unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 6804e99d4e9SMatthew Wilcox 6814e99d4e9SMatthew Wilcox for (i = 0; i < max_order; i++) { 6824e99d4e9SMatthew Wilcox unsigned int min = 1 << i; 6834e99d4e9SMatthew Wilcox unsigned int max = (2 << i) - 1; 6844e99d4e9SMatthew Wilcox __check_store_iter(xa, 0, i, 0); 6854e99d4e9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 6864e99d4e9SMatthew Wilcox __check_store_iter(xa, min, i, 0); 6874e99d4e9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 6884e99d4e9SMatthew Wilcox 6894e99d4e9SMatthew Wilcox xa_store_index(xa, min, GFP_KERNEL); 6904e99d4e9SMatthew Wilcox __check_store_iter(xa, min, i, 1); 6914e99d4e9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 6924e99d4e9SMatthew Wilcox xa_store_index(xa, max, GFP_KERNEL); 6934e99d4e9SMatthew Wilcox __check_store_iter(xa, min, i, 1); 6944e99d4e9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 6954e99d4e9SMatthew Wilcox 6964e99d4e9SMatthew Wilcox for (j = 0; j < min; j++) 6974e99d4e9SMatthew Wilcox xa_store_index(xa, j, GFP_KERNEL); 6984e99d4e9SMatthew Wilcox __check_store_iter(xa, 0, i, min); 6994e99d4e9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 7004e99d4e9SMatthew Wilcox for (j = 0; j < min; j++) 7014e99d4e9SMatthew Wilcox xa_store_index(xa, min + j, GFP_KERNEL); 7024e99d4e9SMatthew Wilcox __check_store_iter(xa, min, i, min); 7034e99d4e9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 7044e99d4e9SMatthew Wilcox } 7054e99d4e9SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI 7064e99d4e9SMatthew Wilcox xa_store_index(xa, 63, GFP_KERNEL); 7074e99d4e9SMatthew Wilcox xa_store_index(xa, 65, GFP_KERNEL); 7084e99d4e9SMatthew Wilcox __check_store_iter(xa, 64, 2, 1); 7094e99d4e9SMatthew Wilcox xa_erase_index(xa, 63); 7104e99d4e9SMatthew Wilcox #endif 7114e99d4e9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 7124e99d4e9SMatthew Wilcox } 7134e99d4e9SMatthew Wilcox 714b803b428SMatthew Wilcox static noinline void check_multi_find(struct xarray *xa) 715b803b428SMatthew Wilcox { 716b803b428SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI 717b803b428SMatthew Wilcox unsigned long index; 718b803b428SMatthew Wilcox 719b803b428SMatthew Wilcox xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL); 720b803b428SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL); 721b803b428SMatthew Wilcox 722b803b428SMatthew Wilcox index = 0; 723b803b428SMatthew Wilcox XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 724b803b428SMatthew Wilcox xa_mk_value(12)); 725b803b428SMatthew Wilcox XA_BUG_ON(xa, index != 12); 726b803b428SMatthew Wilcox index = 13; 727b803b428SMatthew Wilcox XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 728b803b428SMatthew Wilcox xa_mk_value(12)); 729b803b428SMatthew Wilcox XA_BUG_ON(xa, (index < 12) || (index >= 16)); 730b803b428SMatthew Wilcox XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) != 731b803b428SMatthew Wilcox xa_mk_value(16)); 732b803b428SMatthew Wilcox XA_BUG_ON(xa, index != 16); 733b803b428SMatthew Wilcox 734b803b428SMatthew Wilcox xa_erase_index(xa, 12); 735b803b428SMatthew Wilcox xa_erase_index(xa, 16); 736b803b428SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 737b803b428SMatthew Wilcox #endif 738b803b428SMatthew Wilcox } 739b803b428SMatthew Wilcox 740b803b428SMatthew Wilcox static noinline void check_multi_find_2(struct xarray *xa) 741b803b428SMatthew Wilcox { 742b803b428SMatthew Wilcox unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; 743b803b428SMatthew Wilcox unsigned int i, j; 744b803b428SMatthew Wilcox void *entry; 745b803b428SMatthew Wilcox 746b803b428SMatthew Wilcox for (i = 0; i < max_order; i++) { 747b803b428SMatthew Wilcox unsigned long index = 1UL << i; 748b803b428SMatthew Wilcox for (j = 0; j < index; j++) { 749b803b428SMatthew Wilcox XA_STATE(xas, xa, j + index); 750b803b428SMatthew Wilcox xa_store_index(xa, index - 1, GFP_KERNEL); 751b7677a13SMatthew Wilcox xa_store_order(xa, index, i, xa_mk_index(index), 752b803b428SMatthew Wilcox GFP_KERNEL); 753b803b428SMatthew Wilcox rcu_read_lock(); 754b803b428SMatthew Wilcox xas_for_each(&xas, entry, ULONG_MAX) { 755b803b428SMatthew Wilcox xa_erase_index(xa, index); 756b803b428SMatthew Wilcox } 757b803b428SMatthew Wilcox rcu_read_unlock(); 758b803b428SMatthew Wilcox xa_erase_index(xa, index - 1); 759b803b428SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 760b803b428SMatthew Wilcox } 761b803b428SMatthew Wilcox } 762b803b428SMatthew Wilcox } 763b803b428SMatthew Wilcox 7648229706eSMatthew Wilcox static noinline void check_find_1(struct xarray *xa) 765b803b428SMatthew Wilcox { 766b803b428SMatthew Wilcox unsigned long i, j, k; 767b803b428SMatthew Wilcox 768b803b428SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 769b803b428SMatthew Wilcox 770b803b428SMatthew Wilcox /* 771b803b428SMatthew Wilcox * Check xa_find with all pairs between 0 and 99 inclusive, 772b803b428SMatthew Wilcox * starting at every index between 0 and 99 773b803b428SMatthew Wilcox */ 774b803b428SMatthew Wilcox for (i = 0; i < 100; i++) { 775b803b428SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 776b803b428SMatthew Wilcox xa_set_mark(xa, i, XA_MARK_0); 777b803b428SMatthew Wilcox for (j = 0; j < i; j++) { 778b803b428SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) != 779b803b428SMatthew Wilcox NULL); 780b803b428SMatthew Wilcox xa_set_mark(xa, j, XA_MARK_0); 781b803b428SMatthew Wilcox for (k = 0; k < 100; k++) { 782b803b428SMatthew Wilcox unsigned long index = k; 783b803b428SMatthew Wilcox void *entry = xa_find(xa, &index, ULONG_MAX, 784b803b428SMatthew Wilcox XA_PRESENT); 785b803b428SMatthew Wilcox if (k <= j) 786b803b428SMatthew Wilcox XA_BUG_ON(xa, index != j); 787b803b428SMatthew Wilcox else if (k <= i) 788b803b428SMatthew Wilcox XA_BUG_ON(xa, index != i); 789b803b428SMatthew Wilcox else 790b803b428SMatthew Wilcox XA_BUG_ON(xa, entry != NULL); 791b803b428SMatthew Wilcox 792b803b428SMatthew Wilcox index = k; 793b803b428SMatthew Wilcox entry = xa_find(xa, &index, ULONG_MAX, 794b803b428SMatthew Wilcox XA_MARK_0); 795b803b428SMatthew Wilcox if (k <= j) 796b803b428SMatthew Wilcox XA_BUG_ON(xa, index != j); 797b803b428SMatthew Wilcox else if (k <= i) 798b803b428SMatthew Wilcox XA_BUG_ON(xa, index != i); 799b803b428SMatthew Wilcox else 800b803b428SMatthew Wilcox XA_BUG_ON(xa, entry != NULL); 801b803b428SMatthew Wilcox } 802b803b428SMatthew Wilcox xa_erase_index(xa, j); 803b803b428SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); 804b803b428SMatthew Wilcox XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 805b803b428SMatthew Wilcox } 806b803b428SMatthew Wilcox xa_erase_index(xa, i); 807b803b428SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); 808b803b428SMatthew Wilcox } 809b803b428SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 8108229706eSMatthew Wilcox } 8118229706eSMatthew Wilcox 8128229706eSMatthew Wilcox static noinline void check_find_2(struct xarray *xa) 8138229706eSMatthew Wilcox { 8148229706eSMatthew Wilcox void *entry; 815*4a31896cSMatthew Wilcox unsigned long i, j, index; 8168229706eSMatthew Wilcox 817*4a31896cSMatthew Wilcox xa_for_each(xa, index, entry) { 8188229706eSMatthew Wilcox XA_BUG_ON(xa, true); 8198229706eSMatthew Wilcox } 8208229706eSMatthew Wilcox 8218229706eSMatthew Wilcox for (i = 0; i < 1024; i++) { 8228229706eSMatthew Wilcox xa_store_index(xa, index, GFP_KERNEL); 8238229706eSMatthew Wilcox j = 0; 824*4a31896cSMatthew Wilcox xa_for_each(xa, index, entry) { 825b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_mk_index(index) != entry); 8268229706eSMatthew Wilcox XA_BUG_ON(xa, index != j++); 8278229706eSMatthew Wilcox } 8288229706eSMatthew Wilcox } 8298229706eSMatthew Wilcox 8308229706eSMatthew Wilcox xa_destroy(xa); 8318229706eSMatthew Wilcox } 8328229706eSMatthew Wilcox 83348483614SMatthew Wilcox static noinline void check_find_3(struct xarray *xa) 83448483614SMatthew Wilcox { 83548483614SMatthew Wilcox XA_STATE(xas, xa, 0); 83648483614SMatthew Wilcox unsigned long i, j, k; 83748483614SMatthew Wilcox void *entry; 83848483614SMatthew Wilcox 83948483614SMatthew Wilcox for (i = 0; i < 100; i++) { 84048483614SMatthew Wilcox for (j = 0; j < 100; j++) { 841490fd30fSMatthew Wilcox rcu_read_lock(); 84248483614SMatthew Wilcox for (k = 0; k < 100; k++) { 84348483614SMatthew Wilcox xas_set(&xas, j); 84448483614SMatthew Wilcox xas_for_each_marked(&xas, entry, k, XA_MARK_0) 84548483614SMatthew Wilcox ; 84648483614SMatthew Wilcox if (j > k) 84748483614SMatthew Wilcox XA_BUG_ON(xa, 84848483614SMatthew Wilcox xas.xa_node != XAS_RESTART); 84948483614SMatthew Wilcox } 850490fd30fSMatthew Wilcox rcu_read_unlock(); 85148483614SMatthew Wilcox } 85248483614SMatthew Wilcox xa_store_index(xa, i, GFP_KERNEL); 85348483614SMatthew Wilcox xa_set_mark(xa, i, XA_MARK_0); 85448483614SMatthew Wilcox } 85548483614SMatthew Wilcox xa_destroy(xa); 85648483614SMatthew Wilcox } 85748483614SMatthew Wilcox 8588229706eSMatthew Wilcox static noinline void check_find(struct xarray *xa) 8598229706eSMatthew Wilcox { 8608229706eSMatthew Wilcox check_find_1(xa); 8618229706eSMatthew Wilcox check_find_2(xa); 86248483614SMatthew Wilcox check_find_3(xa); 863b803b428SMatthew Wilcox check_multi_find(xa); 864b803b428SMatthew Wilcox check_multi_find_2(xa); 865b803b428SMatthew Wilcox } 866b803b428SMatthew Wilcox 867e21a2955SMatthew Wilcox /* See find_swap_entry() in mm/shmem.c */ 868e21a2955SMatthew Wilcox static noinline unsigned long xa_find_entry(struct xarray *xa, void *item) 869e21a2955SMatthew Wilcox { 870e21a2955SMatthew Wilcox XA_STATE(xas, xa, 0); 871e21a2955SMatthew Wilcox unsigned int checked = 0; 872e21a2955SMatthew Wilcox void *entry; 873e21a2955SMatthew Wilcox 874e21a2955SMatthew Wilcox rcu_read_lock(); 875e21a2955SMatthew Wilcox xas_for_each(&xas, entry, ULONG_MAX) { 876e21a2955SMatthew Wilcox if (xas_retry(&xas, entry)) 877e21a2955SMatthew Wilcox continue; 878e21a2955SMatthew Wilcox if (entry == item) 879e21a2955SMatthew Wilcox break; 880e21a2955SMatthew Wilcox checked++; 881e21a2955SMatthew Wilcox if ((checked % 4) != 0) 882e21a2955SMatthew Wilcox continue; 883e21a2955SMatthew Wilcox xas_pause(&xas); 884e21a2955SMatthew Wilcox } 885e21a2955SMatthew Wilcox rcu_read_unlock(); 886e21a2955SMatthew Wilcox 887e21a2955SMatthew Wilcox return entry ? xas.xa_index : -1; 888e21a2955SMatthew Wilcox } 889e21a2955SMatthew Wilcox 890e21a2955SMatthew Wilcox static noinline void check_find_entry(struct xarray *xa) 891e21a2955SMatthew Wilcox { 892e21a2955SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI 893e21a2955SMatthew Wilcox unsigned int order; 894e21a2955SMatthew Wilcox unsigned long offset, index; 895e21a2955SMatthew Wilcox 896e21a2955SMatthew Wilcox for (order = 0; order < 20; order++) { 897e21a2955SMatthew Wilcox for (offset = 0; offset < (1UL << (order + 3)); 898e21a2955SMatthew Wilcox offset += (1UL << order)) { 899e21a2955SMatthew Wilcox for (index = 0; index < (1UL << (order + 5)); 900e21a2955SMatthew Wilcox index += (1UL << order)) { 901e21a2955SMatthew Wilcox xa_store_order(xa, index, order, 902b7677a13SMatthew Wilcox xa_mk_index(index), GFP_KERNEL); 903e21a2955SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, index) != 904b7677a13SMatthew Wilcox xa_mk_index(index)); 905e21a2955SMatthew Wilcox XA_BUG_ON(xa, xa_find_entry(xa, 906b7677a13SMatthew Wilcox xa_mk_index(index)) != index); 907e21a2955SMatthew Wilcox } 908e21a2955SMatthew Wilcox XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 909e21a2955SMatthew Wilcox xa_destroy(xa); 910e21a2955SMatthew Wilcox } 911e21a2955SMatthew Wilcox } 912e21a2955SMatthew Wilcox #endif 913e21a2955SMatthew Wilcox 914e21a2955SMatthew Wilcox XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 915e21a2955SMatthew Wilcox xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 916e21a2955SMatthew Wilcox XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 917b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); 918e21a2955SMatthew Wilcox xa_erase_index(xa, ULONG_MAX); 919e21a2955SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 920e21a2955SMatthew Wilcox } 921e21a2955SMatthew Wilcox 92264d3e9a9SMatthew Wilcox static noinline void check_move_small(struct xarray *xa, unsigned long idx) 92364d3e9a9SMatthew Wilcox { 92464d3e9a9SMatthew Wilcox XA_STATE(xas, xa, 0); 92564d3e9a9SMatthew Wilcox unsigned long i; 92664d3e9a9SMatthew Wilcox 92764d3e9a9SMatthew Wilcox xa_store_index(xa, 0, GFP_KERNEL); 92864d3e9a9SMatthew Wilcox xa_store_index(xa, idx, GFP_KERNEL); 92964d3e9a9SMatthew Wilcox 93064d3e9a9SMatthew Wilcox rcu_read_lock(); 93164d3e9a9SMatthew Wilcox for (i = 0; i < idx * 4; i++) { 93264d3e9a9SMatthew Wilcox void *entry = xas_next(&xas); 93364d3e9a9SMatthew Wilcox if (i <= idx) 93464d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 93564d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_index != i); 93664d3e9a9SMatthew Wilcox if (i == 0 || i == idx) 937b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(i)); 93864d3e9a9SMatthew Wilcox else 93964d3e9a9SMatthew Wilcox XA_BUG_ON(xa, entry != NULL); 94064d3e9a9SMatthew Wilcox } 94164d3e9a9SMatthew Wilcox xas_next(&xas); 94264d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_index != i); 94364d3e9a9SMatthew Wilcox 94464d3e9a9SMatthew Wilcox do { 94564d3e9a9SMatthew Wilcox void *entry = xas_prev(&xas); 94664d3e9a9SMatthew Wilcox i--; 94764d3e9a9SMatthew Wilcox if (i <= idx) 94864d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 94964d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_index != i); 95064d3e9a9SMatthew Wilcox if (i == 0 || i == idx) 951b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(i)); 95264d3e9a9SMatthew Wilcox else 95364d3e9a9SMatthew Wilcox XA_BUG_ON(xa, entry != NULL); 95464d3e9a9SMatthew Wilcox } while (i > 0); 95564d3e9a9SMatthew Wilcox 95664d3e9a9SMatthew Wilcox xas_set(&xas, ULONG_MAX); 95764d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas_next(&xas) != NULL); 95864d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 95964d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0)); 96064d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_index != 0); 96164d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas_prev(&xas) != NULL); 96264d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 96364d3e9a9SMatthew Wilcox rcu_read_unlock(); 96464d3e9a9SMatthew Wilcox 96564d3e9a9SMatthew Wilcox xa_erase_index(xa, 0); 96664d3e9a9SMatthew Wilcox xa_erase_index(xa, idx); 96764d3e9a9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 96864d3e9a9SMatthew Wilcox } 96964d3e9a9SMatthew Wilcox 97064d3e9a9SMatthew Wilcox static noinline void check_move(struct xarray *xa) 97164d3e9a9SMatthew Wilcox { 97264d3e9a9SMatthew Wilcox XA_STATE(xas, xa, (1 << 16) - 1); 97364d3e9a9SMatthew Wilcox unsigned long i; 97464d3e9a9SMatthew Wilcox 97564d3e9a9SMatthew Wilcox for (i = 0; i < (1 << 16); i++) 97664d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 97764d3e9a9SMatthew Wilcox 97864d3e9a9SMatthew Wilcox rcu_read_lock(); 97964d3e9a9SMatthew Wilcox do { 98064d3e9a9SMatthew Wilcox void *entry = xas_prev(&xas); 98164d3e9a9SMatthew Wilcox i--; 982b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(i)); 98364d3e9a9SMatthew Wilcox XA_BUG_ON(xa, i != xas.xa_index); 98464d3e9a9SMatthew Wilcox } while (i != 0); 98564d3e9a9SMatthew Wilcox 98664d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas_prev(&xas) != NULL); 98764d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 98864d3e9a9SMatthew Wilcox 98964d3e9a9SMatthew Wilcox do { 99064d3e9a9SMatthew Wilcox void *entry = xas_next(&xas); 991b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(i)); 99264d3e9a9SMatthew Wilcox XA_BUG_ON(xa, i != xas.xa_index); 99364d3e9a9SMatthew Wilcox i++; 99464d3e9a9SMatthew Wilcox } while (i < (1 << 16)); 99564d3e9a9SMatthew Wilcox rcu_read_unlock(); 99664d3e9a9SMatthew Wilcox 99764d3e9a9SMatthew Wilcox for (i = (1 << 8); i < (1 << 15); i++) 99864d3e9a9SMatthew Wilcox xa_erase_index(xa, i); 99964d3e9a9SMatthew Wilcox 100064d3e9a9SMatthew Wilcox i = xas.xa_index; 100164d3e9a9SMatthew Wilcox 100264d3e9a9SMatthew Wilcox rcu_read_lock(); 100364d3e9a9SMatthew Wilcox do { 100464d3e9a9SMatthew Wilcox void *entry = xas_prev(&xas); 100564d3e9a9SMatthew Wilcox i--; 100664d3e9a9SMatthew Wilcox if ((i < (1 << 8)) || (i >= (1 << 15))) 1007b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(i)); 100864d3e9a9SMatthew Wilcox else 100964d3e9a9SMatthew Wilcox XA_BUG_ON(xa, entry != NULL); 101064d3e9a9SMatthew Wilcox XA_BUG_ON(xa, i != xas.xa_index); 101164d3e9a9SMatthew Wilcox } while (i != 0); 101264d3e9a9SMatthew Wilcox 101364d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas_prev(&xas) != NULL); 101464d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 101564d3e9a9SMatthew Wilcox 101664d3e9a9SMatthew Wilcox do { 101764d3e9a9SMatthew Wilcox void *entry = xas_next(&xas); 101864d3e9a9SMatthew Wilcox if ((i < (1 << 8)) || (i >= (1 << 15))) 1019b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(i)); 102064d3e9a9SMatthew Wilcox else 102164d3e9a9SMatthew Wilcox XA_BUG_ON(xa, entry != NULL); 102264d3e9a9SMatthew Wilcox XA_BUG_ON(xa, i != xas.xa_index); 102364d3e9a9SMatthew Wilcox i++; 102464d3e9a9SMatthew Wilcox } while (i < (1 << 16)); 102564d3e9a9SMatthew Wilcox rcu_read_unlock(); 102664d3e9a9SMatthew Wilcox 102764d3e9a9SMatthew Wilcox xa_destroy(xa); 102864d3e9a9SMatthew Wilcox 102964d3e9a9SMatthew Wilcox for (i = 0; i < 16; i++) 103064d3e9a9SMatthew Wilcox check_move_small(xa, 1UL << i); 103164d3e9a9SMatthew Wilcox 103264d3e9a9SMatthew Wilcox for (i = 2; i < 16; i++) 103364d3e9a9SMatthew Wilcox check_move_small(xa, (1UL << i) - 1); 103464d3e9a9SMatthew Wilcox } 103564d3e9a9SMatthew Wilcox 10362264f513SMatthew Wilcox static noinline void xa_store_many_order(struct xarray *xa, 10372264f513SMatthew Wilcox unsigned long index, unsigned order) 10382264f513SMatthew Wilcox { 10392264f513SMatthew Wilcox XA_STATE_ORDER(xas, xa, index, order); 10402264f513SMatthew Wilcox unsigned int i = 0; 10412264f513SMatthew Wilcox 10422264f513SMatthew Wilcox do { 10432264f513SMatthew Wilcox xas_lock(&xas); 10442264f513SMatthew Wilcox XA_BUG_ON(xa, xas_find_conflict(&xas)); 10452264f513SMatthew Wilcox xas_create_range(&xas); 10462264f513SMatthew Wilcox if (xas_error(&xas)) 10472264f513SMatthew Wilcox goto unlock; 10482264f513SMatthew Wilcox for (i = 0; i < (1U << order); i++) { 1049b7677a13SMatthew Wilcox XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i))); 10502264f513SMatthew Wilcox xas_next(&xas); 10512264f513SMatthew Wilcox } 10522264f513SMatthew Wilcox unlock: 10532264f513SMatthew Wilcox xas_unlock(&xas); 10542264f513SMatthew Wilcox } while (xas_nomem(&xas, GFP_KERNEL)); 10552264f513SMatthew Wilcox 10562264f513SMatthew Wilcox XA_BUG_ON(xa, xas_error(&xas)); 10572264f513SMatthew Wilcox } 10582264f513SMatthew Wilcox 10592264f513SMatthew Wilcox static noinline void check_create_range_1(struct xarray *xa, 10602264f513SMatthew Wilcox unsigned long index, unsigned order) 10612264f513SMatthew Wilcox { 10622264f513SMatthew Wilcox unsigned long i; 10632264f513SMatthew Wilcox 10642264f513SMatthew Wilcox xa_store_many_order(xa, index, order); 10652264f513SMatthew Wilcox for (i = index; i < index + (1UL << order); i++) 10662264f513SMatthew Wilcox xa_erase_index(xa, i); 10672264f513SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 10682264f513SMatthew Wilcox } 10692264f513SMatthew Wilcox 10702264f513SMatthew Wilcox static noinline void check_create_range_2(struct xarray *xa, unsigned order) 10712264f513SMatthew Wilcox { 10722264f513SMatthew Wilcox unsigned long i; 10732264f513SMatthew Wilcox unsigned long nr = 1UL << order; 10742264f513SMatthew Wilcox 10752264f513SMatthew Wilcox for (i = 0; i < nr * nr; i += nr) 10762264f513SMatthew Wilcox xa_store_many_order(xa, i, order); 10772264f513SMatthew Wilcox for (i = 0; i < nr * nr; i++) 10782264f513SMatthew Wilcox xa_erase_index(xa, i); 10792264f513SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 10802264f513SMatthew Wilcox } 10812264f513SMatthew Wilcox 10822264f513SMatthew Wilcox static noinline void check_create_range_3(void) 10832264f513SMatthew Wilcox { 10842264f513SMatthew Wilcox XA_STATE(xas, NULL, 0); 10852264f513SMatthew Wilcox xas_set_err(&xas, -EEXIST); 10862264f513SMatthew Wilcox xas_create_range(&xas); 10872264f513SMatthew Wilcox XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST); 10882264f513SMatthew Wilcox } 10892264f513SMatthew Wilcox 10902264f513SMatthew Wilcox static noinline void check_create_range_4(struct xarray *xa, 10912264f513SMatthew Wilcox unsigned long index, unsigned order) 10922264f513SMatthew Wilcox { 10932264f513SMatthew Wilcox XA_STATE_ORDER(xas, xa, index, order); 10942264f513SMatthew Wilcox unsigned long base = xas.xa_index; 10952264f513SMatthew Wilcox unsigned long i = 0; 10962264f513SMatthew Wilcox 10972264f513SMatthew Wilcox xa_store_index(xa, index, GFP_KERNEL); 10982264f513SMatthew Wilcox do { 10992264f513SMatthew Wilcox xas_lock(&xas); 11002264f513SMatthew Wilcox xas_create_range(&xas); 11012264f513SMatthew Wilcox if (xas_error(&xas)) 11022264f513SMatthew Wilcox goto unlock; 11032264f513SMatthew Wilcox for (i = 0; i < (1UL << order); i++) { 1104b7677a13SMatthew Wilcox void *old = xas_store(&xas, xa_mk_index(base + i)); 11052264f513SMatthew Wilcox if (xas.xa_index == index) 1106b7677a13SMatthew Wilcox XA_BUG_ON(xa, old != xa_mk_index(base + i)); 11072264f513SMatthew Wilcox else 11082264f513SMatthew Wilcox XA_BUG_ON(xa, old != NULL); 11092264f513SMatthew Wilcox xas_next(&xas); 11102264f513SMatthew Wilcox } 11112264f513SMatthew Wilcox unlock: 11122264f513SMatthew Wilcox xas_unlock(&xas); 11132264f513SMatthew Wilcox } while (xas_nomem(&xas, GFP_KERNEL)); 11142264f513SMatthew Wilcox 11152264f513SMatthew Wilcox XA_BUG_ON(xa, xas_error(&xas)); 11162264f513SMatthew Wilcox 11172264f513SMatthew Wilcox for (i = base; i < base + (1UL << order); i++) 11182264f513SMatthew Wilcox xa_erase_index(xa, i); 11192264f513SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 11202264f513SMatthew Wilcox } 11212264f513SMatthew Wilcox 11222264f513SMatthew Wilcox static noinline void check_create_range(struct xarray *xa) 11232264f513SMatthew Wilcox { 11242264f513SMatthew Wilcox unsigned int order; 11252264f513SMatthew Wilcox unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1; 11262264f513SMatthew Wilcox 11272264f513SMatthew Wilcox for (order = 0; order < max_order; order++) { 11282264f513SMatthew Wilcox check_create_range_1(xa, 0, order); 11292264f513SMatthew Wilcox check_create_range_1(xa, 1U << order, order); 11302264f513SMatthew Wilcox check_create_range_1(xa, 2U << order, order); 11312264f513SMatthew Wilcox check_create_range_1(xa, 3U << order, order); 11322264f513SMatthew Wilcox check_create_range_1(xa, 1U << 24, order); 11332264f513SMatthew Wilcox if (order < 10) 11342264f513SMatthew Wilcox check_create_range_2(xa, order); 11352264f513SMatthew Wilcox 11362264f513SMatthew Wilcox check_create_range_4(xa, 0, order); 11372264f513SMatthew Wilcox check_create_range_4(xa, 1U << order, order); 11382264f513SMatthew Wilcox check_create_range_4(xa, 2U << order, order); 11392264f513SMatthew Wilcox check_create_range_4(xa, 3U << order, order); 11402264f513SMatthew Wilcox check_create_range_4(xa, 1U << 24, order); 11412264f513SMatthew Wilcox 11422264f513SMatthew Wilcox check_create_range_4(xa, 1, order); 11432264f513SMatthew Wilcox check_create_range_4(xa, (1U << order) + 1, order); 11442264f513SMatthew Wilcox check_create_range_4(xa, (2U << order) + 1, order); 11452264f513SMatthew Wilcox check_create_range_4(xa, (2U << order) - 1, order); 11462264f513SMatthew Wilcox check_create_range_4(xa, (3U << order) + 1, order); 11472264f513SMatthew Wilcox check_create_range_4(xa, (3U << order) - 1, order); 11482264f513SMatthew Wilcox check_create_range_4(xa, (1U << 24) + 1, order); 11492264f513SMatthew Wilcox } 11502264f513SMatthew Wilcox 11512264f513SMatthew Wilcox check_create_range_3(); 11522264f513SMatthew Wilcox } 11532264f513SMatthew Wilcox 11540e9446c3SMatthew Wilcox static noinline void __check_store_range(struct xarray *xa, unsigned long first, 11550e9446c3SMatthew Wilcox unsigned long last) 11560e9446c3SMatthew Wilcox { 11570e9446c3SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI 1158b7677a13SMatthew Wilcox xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); 11590e9446c3SMatthew Wilcox 1160b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first)); 1161b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first)); 11620e9446c3SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); 11630e9446c3SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); 11640e9446c3SMatthew Wilcox 11650e9446c3SMatthew Wilcox xa_store_range(xa, first, last, NULL, GFP_KERNEL); 11660e9446c3SMatthew Wilcox #endif 11670e9446c3SMatthew Wilcox 11680e9446c3SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 11690e9446c3SMatthew Wilcox } 11700e9446c3SMatthew Wilcox 11710e9446c3SMatthew Wilcox static noinline void check_store_range(struct xarray *xa) 11720e9446c3SMatthew Wilcox { 11730e9446c3SMatthew Wilcox unsigned long i, j; 11740e9446c3SMatthew Wilcox 11750e9446c3SMatthew Wilcox for (i = 0; i < 128; i++) { 11760e9446c3SMatthew Wilcox for (j = i; j < 128; j++) { 11770e9446c3SMatthew Wilcox __check_store_range(xa, i, j); 11780e9446c3SMatthew Wilcox __check_store_range(xa, 128 + i, 128 + j); 11790e9446c3SMatthew Wilcox __check_store_range(xa, 4095 + i, 4095 + j); 11800e9446c3SMatthew Wilcox __check_store_range(xa, 4096 + i, 4096 + j); 11810e9446c3SMatthew Wilcox __check_store_range(xa, 123456 + i, 123456 + j); 11825404a7f1SMatthew Wilcox __check_store_range(xa, (1 << 24) + i, (1 << 24) + j); 11830e9446c3SMatthew Wilcox } 11840e9446c3SMatthew Wilcox } 11850e9446c3SMatthew Wilcox } 11860e9446c3SMatthew Wilcox 1187a97e7904SMatthew Wilcox static LIST_HEAD(shadow_nodes); 1188a97e7904SMatthew Wilcox 1189a97e7904SMatthew Wilcox static void test_update_node(struct xa_node *node) 1190a97e7904SMatthew Wilcox { 1191a97e7904SMatthew Wilcox if (node->count && node->count == node->nr_values) { 1192a97e7904SMatthew Wilcox if (list_empty(&node->private_list)) 1193a97e7904SMatthew Wilcox list_add(&shadow_nodes, &node->private_list); 1194a97e7904SMatthew Wilcox } else { 1195a97e7904SMatthew Wilcox if (!list_empty(&node->private_list)) 1196a97e7904SMatthew Wilcox list_del_init(&node->private_list); 1197a97e7904SMatthew Wilcox } 1198a97e7904SMatthew Wilcox } 1199a97e7904SMatthew Wilcox 1200a97e7904SMatthew Wilcox static noinline void shadow_remove(struct xarray *xa) 1201a97e7904SMatthew Wilcox { 1202a97e7904SMatthew Wilcox struct xa_node *node; 1203a97e7904SMatthew Wilcox 1204a97e7904SMatthew Wilcox xa_lock(xa); 1205a97e7904SMatthew Wilcox while ((node = list_first_entry_or_null(&shadow_nodes, 1206a97e7904SMatthew Wilcox struct xa_node, private_list))) { 1207a97e7904SMatthew Wilcox XA_STATE(xas, node->array, 0); 1208a97e7904SMatthew Wilcox XA_BUG_ON(xa, node->array != xa); 1209a97e7904SMatthew Wilcox list_del_init(&node->private_list); 1210a97e7904SMatthew Wilcox xas.xa_node = xa_parent_locked(node->array, node); 1211a97e7904SMatthew Wilcox xas.xa_offset = node->offset; 1212a97e7904SMatthew Wilcox xas.xa_shift = node->shift + XA_CHUNK_SHIFT; 1213a97e7904SMatthew Wilcox xas_set_update(&xas, test_update_node); 1214a97e7904SMatthew Wilcox xas_store(&xas, NULL); 1215a97e7904SMatthew Wilcox } 1216a97e7904SMatthew Wilcox xa_unlock(xa); 1217a97e7904SMatthew Wilcox } 1218a97e7904SMatthew Wilcox 1219a97e7904SMatthew Wilcox static noinline void check_workingset(struct xarray *xa, unsigned long index) 1220a97e7904SMatthew Wilcox { 1221a97e7904SMatthew Wilcox XA_STATE(xas, xa, index); 1222a97e7904SMatthew Wilcox xas_set_update(&xas, test_update_node); 1223a97e7904SMatthew Wilcox 1224a97e7904SMatthew Wilcox do { 1225a97e7904SMatthew Wilcox xas_lock(&xas); 1226a97e7904SMatthew Wilcox xas_store(&xas, xa_mk_value(0)); 1227a97e7904SMatthew Wilcox xas_next(&xas); 1228a97e7904SMatthew Wilcox xas_store(&xas, xa_mk_value(1)); 1229a97e7904SMatthew Wilcox xas_unlock(&xas); 1230a97e7904SMatthew Wilcox } while (xas_nomem(&xas, GFP_KERNEL)); 1231a97e7904SMatthew Wilcox 1232a97e7904SMatthew Wilcox XA_BUG_ON(xa, list_empty(&shadow_nodes)); 1233a97e7904SMatthew Wilcox 1234a97e7904SMatthew Wilcox xas_lock(&xas); 1235a97e7904SMatthew Wilcox xas_next(&xas); 1236a97e7904SMatthew Wilcox xas_store(&xas, &xas); 1237a97e7904SMatthew Wilcox XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 1238a97e7904SMatthew Wilcox 1239a97e7904SMatthew Wilcox xas_store(&xas, xa_mk_value(2)); 1240a97e7904SMatthew Wilcox xas_unlock(&xas); 1241a97e7904SMatthew Wilcox XA_BUG_ON(xa, list_empty(&shadow_nodes)); 1242a97e7904SMatthew Wilcox 1243a97e7904SMatthew Wilcox shadow_remove(xa); 1244a97e7904SMatthew Wilcox XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 1245a97e7904SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 1246a97e7904SMatthew Wilcox } 1247a97e7904SMatthew Wilcox 1248d6427f81SMatthew Wilcox /* 1249d6427f81SMatthew Wilcox * Check that the pointer / value / sibling entries are accounted the 1250d6427f81SMatthew Wilcox * way we expect them to be. 1251d6427f81SMatthew Wilcox */ 1252d6427f81SMatthew Wilcox static noinline void check_account(struct xarray *xa) 1253d6427f81SMatthew Wilcox { 1254d6427f81SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI 1255d6427f81SMatthew Wilcox unsigned int order; 1256d6427f81SMatthew Wilcox 1257d6427f81SMatthew Wilcox for (order = 1; order < 12; order++) { 1258d6427f81SMatthew Wilcox XA_STATE(xas, xa, 1 << order); 1259d6427f81SMatthew Wilcox 1260d6427f81SMatthew Wilcox xa_store_order(xa, 0, order, xa, GFP_KERNEL); 1261fffc9a26SMatthew Wilcox rcu_read_lock(); 1262d6427f81SMatthew Wilcox xas_load(&xas); 1263d6427f81SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node->count == 0); 1264d6427f81SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node->count > (1 << order)); 1265d6427f81SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 1266fffc9a26SMatthew Wilcox rcu_read_unlock(); 1267d6427f81SMatthew Wilcox 1268b7677a13SMatthew Wilcox xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order), 1269d6427f81SMatthew Wilcox GFP_KERNEL); 1270d6427f81SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); 1271d6427f81SMatthew Wilcox 1272d6427f81SMatthew Wilcox xa_erase(xa, 1 << order); 1273d6427f81SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 1274d6427f81SMatthew Wilcox 1275d6427f81SMatthew Wilcox xa_erase(xa, 0); 1276d6427f81SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 1277d6427f81SMatthew Wilcox } 1278d6427f81SMatthew Wilcox #endif 1279d6427f81SMatthew Wilcox } 1280d6427f81SMatthew Wilcox 1281687149fcSMatthew Wilcox static noinline void check_destroy(struct xarray *xa) 1282687149fcSMatthew Wilcox { 1283687149fcSMatthew Wilcox unsigned long index; 1284687149fcSMatthew Wilcox 1285687149fcSMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 1286687149fcSMatthew Wilcox 1287687149fcSMatthew Wilcox /* Destroying an empty array is a no-op */ 1288687149fcSMatthew Wilcox xa_destroy(xa); 1289687149fcSMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 1290687149fcSMatthew Wilcox 1291687149fcSMatthew Wilcox /* Destroying an array with a single entry */ 1292687149fcSMatthew Wilcox for (index = 0; index < 1000; index++) { 1293687149fcSMatthew Wilcox xa_store_index(xa, index, GFP_KERNEL); 1294687149fcSMatthew Wilcox XA_BUG_ON(xa, xa_empty(xa)); 1295687149fcSMatthew Wilcox xa_destroy(xa); 1296687149fcSMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 1297687149fcSMatthew Wilcox } 1298687149fcSMatthew Wilcox 1299687149fcSMatthew Wilcox /* Destroying an array with a single entry at ULONG_MAX */ 1300687149fcSMatthew Wilcox xa_store(xa, ULONG_MAX, xa, GFP_KERNEL); 1301687149fcSMatthew Wilcox XA_BUG_ON(xa, xa_empty(xa)); 1302687149fcSMatthew Wilcox xa_destroy(xa); 1303687149fcSMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 1304687149fcSMatthew Wilcox 1305687149fcSMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI 1306687149fcSMatthew Wilcox /* Destroying an array with a multi-index entry */ 1307687149fcSMatthew Wilcox xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL); 1308687149fcSMatthew Wilcox XA_BUG_ON(xa, xa_empty(xa)); 1309687149fcSMatthew Wilcox xa_destroy(xa); 1310687149fcSMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 1311687149fcSMatthew Wilcox #endif 1312687149fcSMatthew Wilcox } 1313687149fcSMatthew Wilcox 131458d6ea30SMatthew Wilcox static DEFINE_XARRAY(array); 1315ad3d6c72SMatthew Wilcox 1316ad3d6c72SMatthew Wilcox static int xarray_checks(void) 1317ad3d6c72SMatthew Wilcox { 131858d6ea30SMatthew Wilcox check_xa_err(&array); 1319b803b428SMatthew Wilcox check_xas_retry(&array); 1320ad3d6c72SMatthew Wilcox check_xa_load(&array); 13219b89a035SMatthew Wilcox check_xa_mark(&array); 132258d6ea30SMatthew Wilcox check_xa_shrink(&array); 1323b803b428SMatthew Wilcox check_xas_erase(&array); 132441aec91fSMatthew Wilcox check_cmpxchg(&array); 13259f14d4f1SMatthew Wilcox check_reserve(&array); 132658d6ea30SMatthew Wilcox check_multi_store(&array); 1327371c752dSMatthew Wilcox check_xa_alloc(); 1328b803b428SMatthew Wilcox check_find(&array); 1329e21a2955SMatthew Wilcox check_find_entry(&array); 1330d6427f81SMatthew Wilcox check_account(&array); 1331687149fcSMatthew Wilcox check_destroy(&array); 133264d3e9a9SMatthew Wilcox check_move(&array); 13332264f513SMatthew Wilcox check_create_range(&array); 13340e9446c3SMatthew Wilcox check_store_range(&array); 13354e99d4e9SMatthew Wilcox check_store_iter(&array); 1336ad3d6c72SMatthew Wilcox 1337a97e7904SMatthew Wilcox check_workingset(&array, 0); 1338a97e7904SMatthew Wilcox check_workingset(&array, 64); 1339a97e7904SMatthew Wilcox check_workingset(&array, 4096); 1340a97e7904SMatthew Wilcox 1341ad3d6c72SMatthew Wilcox printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); 1342ad3d6c72SMatthew Wilcox return (tests_run == tests_passed) ? 0 : -EINVAL; 1343ad3d6c72SMatthew Wilcox } 1344ad3d6c72SMatthew Wilcox 1345ad3d6c72SMatthew Wilcox static void xarray_exit(void) 1346ad3d6c72SMatthew Wilcox { 1347ad3d6c72SMatthew Wilcox } 1348ad3d6c72SMatthew Wilcox 1349ad3d6c72SMatthew Wilcox module_init(xarray_checks); 1350ad3d6c72SMatthew Wilcox module_exit(xarray_exit); 1351ad3d6c72SMatthew Wilcox MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>"); 1352ad3d6c72SMatthew Wilcox MODULE_LICENSE("GPL"); 1353