xref: /linux/lib/test_xarray.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1ad3d6c72SMatthew Wilcox // SPDX-License-Identifier: GPL-2.0+
2ad3d6c72SMatthew Wilcox /*
3ad3d6c72SMatthew Wilcox  * test_xarray.c: Test the XArray API
4ad3d6c72SMatthew Wilcox  * Copyright (c) 2017-2018 Microsoft Corporation
5c44aa5e8SMatthew Wilcox (Oracle)  * Copyright (c) 2019-2020 Oracle
6ad3d6c72SMatthew Wilcox  * Author: Matthew Wilcox <willy@infradead.org>
7ad3d6c72SMatthew Wilcox  */
8ad3d6c72SMatthew Wilcox 
9ad3d6c72SMatthew Wilcox #include <linux/xarray.h>
10ad3d6c72SMatthew Wilcox #include <linux/module.h>
11ad3d6c72SMatthew Wilcox 
12ad3d6c72SMatthew Wilcox static unsigned int tests_run;
13ad3d6c72SMatthew Wilcox static unsigned int tests_passed;
14ad3d6c72SMatthew Wilcox 
15bd40b17cSMatthew Wilcox (Oracle) static const unsigned int order_limit =
16bd40b17cSMatthew Wilcox (Oracle) 		IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1;
17bd40b17cSMatthew Wilcox (Oracle) 
18ad3d6c72SMatthew Wilcox #ifndef XA_DEBUG
19ad3d6c72SMatthew Wilcox # ifdef __KERNEL__
xa_dump(const struct xarray * xa)20ad3d6c72SMatthew Wilcox void xa_dump(const struct xarray *xa) { }
21ad3d6c72SMatthew Wilcox # endif
22ad3d6c72SMatthew Wilcox #undef XA_BUG_ON
23ad3d6c72SMatthew Wilcox #define XA_BUG_ON(xa, x) do {					\
24ad3d6c72SMatthew Wilcox 	tests_run++;						\
25ad3d6c72SMatthew Wilcox 	if (x) {						\
26ad3d6c72SMatthew Wilcox 		printk("BUG at %s:%d\n", __func__, __LINE__);	\
27ad3d6c72SMatthew Wilcox 		xa_dump(xa);					\
28ad3d6c72SMatthew Wilcox 		dump_stack();					\
29ad3d6c72SMatthew Wilcox 	} else {						\
30ad3d6c72SMatthew Wilcox 		tests_passed++;					\
31ad3d6c72SMatthew Wilcox 	}							\
32ad3d6c72SMatthew Wilcox } while (0)
33ad3d6c72SMatthew Wilcox #endif
34ad3d6c72SMatthew Wilcox 
xa_mk_index(unsigned long index)35b7677a13SMatthew Wilcox static void *xa_mk_index(unsigned long index)
36b7677a13SMatthew Wilcox {
37b7677a13SMatthew Wilcox 	return xa_mk_value(index & LONG_MAX);
38b7677a13SMatthew Wilcox }
39b7677a13SMatthew Wilcox 
xa_store_index(struct xarray * xa,unsigned long index,gfp_t gfp)40ad3d6c72SMatthew Wilcox static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
41ad3d6c72SMatthew Wilcox {
42b7677a13SMatthew Wilcox 	return xa_store(xa, index, xa_mk_index(index), gfp);
43ad3d6c72SMatthew Wilcox }
44ad3d6c72SMatthew Wilcox 
xa_insert_index(struct xarray * xa,unsigned long index)4512fd2aeeSMatthew Wilcox static void xa_insert_index(struct xarray *xa, unsigned long index)
4612fd2aeeSMatthew Wilcox {
4712fd2aeeSMatthew Wilcox 	XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index),
4812fd2aeeSMatthew Wilcox 				GFP_KERNEL) != 0);
4912fd2aeeSMatthew Wilcox }
5012fd2aeeSMatthew Wilcox 
xa_alloc_index(struct xarray * xa,unsigned long index,gfp_t gfp)51371c752dSMatthew Wilcox static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
52371c752dSMatthew Wilcox {
53a3e4d3f9SMatthew Wilcox 	u32 id;
54371c752dSMatthew Wilcox 
55a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b,
56371c752dSMatthew Wilcox 				gfp) != 0);
57371c752dSMatthew Wilcox 	XA_BUG_ON(xa, id != index);
58371c752dSMatthew Wilcox }
59371c752dSMatthew Wilcox 
xa_erase_index(struct xarray * xa,unsigned long index)60ad3d6c72SMatthew Wilcox static void xa_erase_index(struct xarray *xa, unsigned long index)
61ad3d6c72SMatthew Wilcox {
62b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
6358d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, index) != NULL);
6458d6ea30SMatthew Wilcox }
6558d6ea30SMatthew Wilcox 
6658d6ea30SMatthew Wilcox /*
6758d6ea30SMatthew Wilcox  * If anyone needs this, please move it to xarray.c.  We have no current
6858d6ea30SMatthew Wilcox  * users outside the test suite because all current multislot users want
6958d6ea30SMatthew Wilcox  * to use the advanced API.
7058d6ea30SMatthew Wilcox  */
xa_store_order(struct xarray * xa,unsigned long index,unsigned order,void * entry,gfp_t gfp)7158d6ea30SMatthew Wilcox static void *xa_store_order(struct xarray *xa, unsigned long index,
7258d6ea30SMatthew Wilcox 		unsigned order, void *entry, gfp_t gfp)
7358d6ea30SMatthew Wilcox {
7458d6ea30SMatthew Wilcox 	XA_STATE_ORDER(xas, xa, index, order);
7558d6ea30SMatthew Wilcox 	void *curr;
7658d6ea30SMatthew Wilcox 
7758d6ea30SMatthew Wilcox 	do {
7858d6ea30SMatthew Wilcox 		xas_lock(&xas);
7958d6ea30SMatthew Wilcox 		curr = xas_store(&xas, entry);
8058d6ea30SMatthew Wilcox 		xas_unlock(&xas);
8158d6ea30SMatthew Wilcox 	} while (xas_nomem(&xas, gfp));
8258d6ea30SMatthew Wilcox 
8358d6ea30SMatthew Wilcox 	return curr;
8458d6ea30SMatthew Wilcox }
8558d6ea30SMatthew Wilcox 
check_xa_err(struct xarray * xa)8658d6ea30SMatthew Wilcox static noinline void check_xa_err(struct xarray *xa)
8758d6ea30SMatthew Wilcox {
8858d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
8958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
9058d6ea30SMatthew Wilcox #ifndef __KERNEL__
9158d6ea30SMatthew Wilcox 	/* The kernel does not fail GFP_NOWAIT allocations */
9258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
9358d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
9458d6ea30SMatthew Wilcox #endif
9558d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
9658d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
9758d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
9858d6ea30SMatthew Wilcox // kills the test-suite :-(
9958d6ea30SMatthew Wilcox //	XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
100ad3d6c72SMatthew Wilcox }
101ad3d6c72SMatthew Wilcox 
check_xas_retry(struct xarray * xa)102b803b428SMatthew Wilcox static noinline void check_xas_retry(struct xarray *xa)
103b803b428SMatthew Wilcox {
104b803b428SMatthew Wilcox 	XA_STATE(xas, xa, 0);
105b803b428SMatthew Wilcox 	void *entry;
106b803b428SMatthew Wilcox 
107b803b428SMatthew Wilcox 	xa_store_index(xa, 0, GFP_KERNEL);
108b803b428SMatthew Wilcox 	xa_store_index(xa, 1, GFP_KERNEL);
109b803b428SMatthew Wilcox 
110b803b428SMatthew Wilcox 	rcu_read_lock();
111b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
112b803b428SMatthew Wilcox 	xa_erase_index(xa, 1);
113b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
114b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_retry(&xas, NULL));
115b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
116b803b428SMatthew Wilcox 	xas_reset(&xas);
117b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
118b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
119b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_node != NULL);
120bd54211bSMatthew Wilcox 	rcu_read_unlock();
121b803b428SMatthew Wilcox 
122b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
123bd54211bSMatthew Wilcox 
124bd54211bSMatthew Wilcox 	rcu_read_lock();
125b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
126b803b428SMatthew Wilcox 	xas.xa_node = XAS_RESTART;
127b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
128b803b428SMatthew Wilcox 	rcu_read_unlock();
129b803b428SMatthew Wilcox 
130b803b428SMatthew Wilcox 	/* Make sure we can iterate through retry entries */
131b803b428SMatthew Wilcox 	xas_lock(&xas);
132b803b428SMatthew Wilcox 	xas_set(&xas, 0);
133b803b428SMatthew Wilcox 	xas_store(&xas, XA_RETRY_ENTRY);
134b803b428SMatthew Wilcox 	xas_set(&xas, 1);
135b803b428SMatthew Wilcox 	xas_store(&xas, XA_RETRY_ENTRY);
136b803b428SMatthew Wilcox 
137b803b428SMatthew Wilcox 	xas_set(&xas, 0);
138b803b428SMatthew Wilcox 	xas_for_each(&xas, entry, ULONG_MAX) {
139b7677a13SMatthew Wilcox 		xas_store(&xas, xa_mk_index(xas.xa_index));
140b803b428SMatthew Wilcox 	}
141b803b428SMatthew Wilcox 	xas_unlock(&xas);
142b803b428SMatthew Wilcox 
143b803b428SMatthew Wilcox 	xa_erase_index(xa, 0);
144b803b428SMatthew Wilcox 	xa_erase_index(xa, 1);
145b803b428SMatthew Wilcox }
146b803b428SMatthew Wilcox 
check_xa_load(struct xarray * xa)147ad3d6c72SMatthew Wilcox static noinline void check_xa_load(struct xarray *xa)
148ad3d6c72SMatthew Wilcox {
149ad3d6c72SMatthew Wilcox 	unsigned long i, j;
150ad3d6c72SMatthew Wilcox 
151ad3d6c72SMatthew Wilcox 	for (i = 0; i < 1024; i++) {
152ad3d6c72SMatthew Wilcox 		for (j = 0; j < 1024; j++) {
153ad3d6c72SMatthew Wilcox 			void *entry = xa_load(xa, j);
154ad3d6c72SMatthew Wilcox 			if (j < i)
155ad3d6c72SMatthew Wilcox 				XA_BUG_ON(xa, xa_to_value(entry) != j);
156ad3d6c72SMatthew Wilcox 			else
157ad3d6c72SMatthew Wilcox 				XA_BUG_ON(xa, entry);
158ad3d6c72SMatthew Wilcox 		}
159ad3d6c72SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
160ad3d6c72SMatthew Wilcox 	}
161ad3d6c72SMatthew Wilcox 
162ad3d6c72SMatthew Wilcox 	for (i = 0; i < 1024; i++) {
163ad3d6c72SMatthew Wilcox 		for (j = 0; j < 1024; j++) {
164ad3d6c72SMatthew Wilcox 			void *entry = xa_load(xa, j);
165ad3d6c72SMatthew Wilcox 			if (j >= i)
166ad3d6c72SMatthew Wilcox 				XA_BUG_ON(xa, xa_to_value(entry) != j);
167ad3d6c72SMatthew Wilcox 			else
168ad3d6c72SMatthew Wilcox 				XA_BUG_ON(xa, entry);
169ad3d6c72SMatthew Wilcox 		}
170ad3d6c72SMatthew Wilcox 		xa_erase_index(xa, i);
171ad3d6c72SMatthew Wilcox 	}
172ad3d6c72SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
173ad3d6c72SMatthew Wilcox }
174ad3d6c72SMatthew Wilcox 
check_xa_mark_1(struct xarray * xa,unsigned long index)1759b89a035SMatthew Wilcox static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
1769b89a035SMatthew Wilcox {
17758d6ea30SMatthew Wilcox 	unsigned int order;
17858d6ea30SMatthew Wilcox 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
17958d6ea30SMatthew Wilcox 
1809b89a035SMatthew Wilcox 	/* NULL elements have no marks set */
1819b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
1829b89a035SMatthew Wilcox 	xa_set_mark(xa, index, XA_MARK_0);
1839b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
1849b89a035SMatthew Wilcox 
1859b89a035SMatthew Wilcox 	/* Storing a pointer will not make a mark appear */
1869b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL);
1879b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
1889b89a035SMatthew Wilcox 	xa_set_mark(xa, index, XA_MARK_0);
1899b89a035SMatthew Wilcox 	XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
1909b89a035SMatthew Wilcox 
1919b89a035SMatthew Wilcox 	/* Setting one mark will not set another mark */
1929b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0));
1939b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1));
1949b89a035SMatthew Wilcox 
1959b89a035SMatthew Wilcox 	/* Storing NULL clears marks, and they can't be set again */
1969b89a035SMatthew Wilcox 	xa_erase_index(xa, index);
1979b89a035SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1989b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
1999b89a035SMatthew Wilcox 	xa_set_mark(xa, index, XA_MARK_0);
2009b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
20158d6ea30SMatthew Wilcox 
20258d6ea30SMatthew Wilcox 	/*
20358d6ea30SMatthew Wilcox 	 * Storing a multi-index entry over entries with marks gives the
20458d6ea30SMatthew Wilcox 	 * entire entry the union of the marks
20558d6ea30SMatthew Wilcox 	 */
20658d6ea30SMatthew Wilcox 	BUG_ON((index % 4) != 0);
20758d6ea30SMatthew Wilcox 	for (order = 2; order < max_order; order++) {
20858d6ea30SMatthew Wilcox 		unsigned long base = round_down(index, 1UL << order);
20958d6ea30SMatthew Wilcox 		unsigned long next = base + (1UL << order);
21058d6ea30SMatthew Wilcox 		unsigned long i;
21158d6ea30SMatthew Wilcox 
21258d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
21358d6ea30SMatthew Wilcox 		xa_set_mark(xa, index + 1, XA_MARK_0);
21458d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
215d69d287aSMatthew Wilcox 		xa_set_mark(xa, index + 2, XA_MARK_2);
21658d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
217b7677a13SMatthew Wilcox 		xa_store_order(xa, index, order, xa_mk_index(index),
21858d6ea30SMatthew Wilcox 				GFP_KERNEL);
21958d6ea30SMatthew Wilcox 		for (i = base; i < next; i++) {
22093eb07f7SMatthew Wilcox 			XA_STATE(xas, xa, i);
22193eb07f7SMatthew Wilcox 			unsigned int seen = 0;
22293eb07f7SMatthew Wilcox 			void *entry;
22393eb07f7SMatthew Wilcox 
22458d6ea30SMatthew Wilcox 			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
225d69d287aSMatthew Wilcox 			XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
226d69d287aSMatthew Wilcox 			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
22793eb07f7SMatthew Wilcox 
22893eb07f7SMatthew Wilcox 			/* We should see two elements in the array */
229fffc9a26SMatthew Wilcox 			rcu_read_lock();
23093eb07f7SMatthew Wilcox 			xas_for_each(&xas, entry, ULONG_MAX)
23193eb07f7SMatthew Wilcox 				seen++;
232fffc9a26SMatthew Wilcox 			rcu_read_unlock();
23393eb07f7SMatthew Wilcox 			XA_BUG_ON(xa, seen != 2);
23493eb07f7SMatthew Wilcox 
23593eb07f7SMatthew Wilcox 			/* One of which is marked */
23693eb07f7SMatthew Wilcox 			xas_set(&xas, 0);
23793eb07f7SMatthew Wilcox 			seen = 0;
238fffc9a26SMatthew Wilcox 			rcu_read_lock();
23993eb07f7SMatthew Wilcox 			xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
24093eb07f7SMatthew Wilcox 				seen++;
241fffc9a26SMatthew Wilcox 			rcu_read_unlock();
24293eb07f7SMatthew Wilcox 			XA_BUG_ON(xa, seen != 1);
24358d6ea30SMatthew Wilcox 		}
24458d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
24558d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
24658d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
24758d6ea30SMatthew Wilcox 		xa_erase_index(xa, index);
24858d6ea30SMatthew Wilcox 		xa_erase_index(xa, next);
24958d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
25058d6ea30SMatthew Wilcox 	}
25158d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
2529b89a035SMatthew Wilcox }
2539b89a035SMatthew Wilcox 
check_xa_mark_2(struct xarray * xa)254adb9d9c4SMatthew Wilcox static noinline void check_xa_mark_2(struct xarray *xa)
255adb9d9c4SMatthew Wilcox {
256adb9d9c4SMatthew Wilcox 	XA_STATE(xas, xa, 0);
257adb9d9c4SMatthew Wilcox 	unsigned long index;
258adb9d9c4SMatthew Wilcox 	unsigned int count = 0;
259adb9d9c4SMatthew Wilcox 	void *entry;
260adb9d9c4SMatthew Wilcox 
261adb9d9c4SMatthew Wilcox 	xa_store_index(xa, 0, GFP_KERNEL);
262adb9d9c4SMatthew Wilcox 	xa_set_mark(xa, 0, XA_MARK_0);
263adb9d9c4SMatthew Wilcox 	xas_lock(&xas);
264adb9d9c4SMatthew Wilcox 	xas_load(&xas);
265adb9d9c4SMatthew Wilcox 	xas_init_marks(&xas);
266adb9d9c4SMatthew Wilcox 	xas_unlock(&xas);
267adb9d9c4SMatthew Wilcox 	XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0);
268adb9d9c4SMatthew Wilcox 
269adb9d9c4SMatthew Wilcox 	for (index = 3500; index < 4500; index++) {
270adb9d9c4SMatthew Wilcox 		xa_store_index(xa, index, GFP_KERNEL);
271adb9d9c4SMatthew Wilcox 		xa_set_mark(xa, index, XA_MARK_0);
272adb9d9c4SMatthew Wilcox 	}
273adb9d9c4SMatthew Wilcox 
274adb9d9c4SMatthew Wilcox 	xas_reset(&xas);
275adb9d9c4SMatthew Wilcox 	rcu_read_lock();
276adb9d9c4SMatthew Wilcox 	xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
277adb9d9c4SMatthew Wilcox 		count++;
278adb9d9c4SMatthew Wilcox 	rcu_read_unlock();
279adb9d9c4SMatthew Wilcox 	XA_BUG_ON(xa, count != 1000);
280adb9d9c4SMatthew Wilcox 
281adb9d9c4SMatthew Wilcox 	xas_lock(&xas);
282adb9d9c4SMatthew Wilcox 	xas_for_each(&xas, entry, ULONG_MAX) {
283adb9d9c4SMatthew Wilcox 		xas_init_marks(&xas);
284adb9d9c4SMatthew Wilcox 		XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0));
285adb9d9c4SMatthew Wilcox 		XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0));
286adb9d9c4SMatthew Wilcox 	}
287adb9d9c4SMatthew Wilcox 	xas_unlock(&xas);
288adb9d9c4SMatthew Wilcox 
289adb9d9c4SMatthew Wilcox 	xa_destroy(xa);
290adb9d9c4SMatthew Wilcox }
291adb9d9c4SMatthew Wilcox 
check_xa_mark_3(struct xarray * xa)29204e9e9bbSMatthew Wilcox (Oracle) static noinline void check_xa_mark_3(struct xarray *xa)
29304e9e9bbSMatthew Wilcox (Oracle) {
29404e9e9bbSMatthew Wilcox (Oracle) #ifdef CONFIG_XARRAY_MULTI
29504e9e9bbSMatthew Wilcox (Oracle) 	XA_STATE(xas, xa, 0x41);
29604e9e9bbSMatthew Wilcox (Oracle) 	void *entry;
29704e9e9bbSMatthew Wilcox (Oracle) 	int count = 0;
29804e9e9bbSMatthew Wilcox (Oracle) 
29904e9e9bbSMatthew Wilcox (Oracle) 	xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL);
30004e9e9bbSMatthew Wilcox (Oracle) 	xa_set_mark(xa, 0x41, XA_MARK_0);
30104e9e9bbSMatthew Wilcox (Oracle) 
30204e9e9bbSMatthew Wilcox (Oracle) 	rcu_read_lock();
30304e9e9bbSMatthew Wilcox (Oracle) 	xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) {
30404e9e9bbSMatthew Wilcox (Oracle) 		count++;
30504e9e9bbSMatthew Wilcox (Oracle) 		XA_BUG_ON(xa, entry != xa_mk_index(0x40));
30604e9e9bbSMatthew Wilcox (Oracle) 	}
30704e9e9bbSMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, count != 1);
30804e9e9bbSMatthew Wilcox (Oracle) 	rcu_read_unlock();
30904e9e9bbSMatthew Wilcox (Oracle) 	xa_destroy(xa);
31004e9e9bbSMatthew Wilcox (Oracle) #endif
31104e9e9bbSMatthew Wilcox (Oracle) }
31204e9e9bbSMatthew Wilcox (Oracle) 
check_xa_mark(struct xarray * xa)3139b89a035SMatthew Wilcox static noinline void check_xa_mark(struct xarray *xa)
3149b89a035SMatthew Wilcox {
3159b89a035SMatthew Wilcox 	unsigned long index;
3169b89a035SMatthew Wilcox 
3179b89a035SMatthew Wilcox 	for (index = 0; index < 16384; index += 4)
3189b89a035SMatthew Wilcox 		check_xa_mark_1(xa, index);
319adb9d9c4SMatthew Wilcox 
320adb9d9c4SMatthew Wilcox 	check_xa_mark_2(xa);
32104e9e9bbSMatthew Wilcox (Oracle) 	check_xa_mark_3(xa);
3229b89a035SMatthew Wilcox }
3239b89a035SMatthew Wilcox 
check_xa_shrink(struct xarray * xa)32458d6ea30SMatthew Wilcox static noinline void check_xa_shrink(struct xarray *xa)
32558d6ea30SMatthew Wilcox {
32658d6ea30SMatthew Wilcox 	XA_STATE(xas, xa, 1);
32758d6ea30SMatthew Wilcox 	struct xa_node *node;
32893eb07f7SMatthew Wilcox 	unsigned int order;
32993eb07f7SMatthew Wilcox 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1;
33058d6ea30SMatthew Wilcox 
33158d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
33258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
33358d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
33458d6ea30SMatthew Wilcox 
33558d6ea30SMatthew Wilcox 	/*
33658d6ea30SMatthew Wilcox 	 * Check that erasing the entry at 1 shrinks the tree and properly
33758d6ea30SMatthew Wilcox 	 * marks the node as being deleted.
33858d6ea30SMatthew Wilcox 	 */
33958d6ea30SMatthew Wilcox 	xas_lock(&xas);
34058d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
34158d6ea30SMatthew Wilcox 	node = xas.xa_node;
34258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
34358d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
34458d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
34558d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
34658d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
34758d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xas_load(&xas) != NULL);
34858d6ea30SMatthew Wilcox 	xas_unlock(&xas);
34958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
35058d6ea30SMatthew Wilcox 	xa_erase_index(xa, 0);
35158d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
35293eb07f7SMatthew Wilcox 
35393eb07f7SMatthew Wilcox 	for (order = 0; order < max_order; order++) {
35493eb07f7SMatthew Wilcox 		unsigned long max = (1UL << order) - 1;
35593eb07f7SMatthew Wilcox 		xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL);
35693eb07f7SMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0));
35793eb07f7SMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
35893eb07f7SMatthew Wilcox 		rcu_read_lock();
35993eb07f7SMatthew Wilcox 		node = xa_head(xa);
36093eb07f7SMatthew Wilcox 		rcu_read_unlock();
36193eb07f7SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) !=
36293eb07f7SMatthew Wilcox 				NULL);
36393eb07f7SMatthew Wilcox 		rcu_read_lock();
36493eb07f7SMatthew Wilcox 		XA_BUG_ON(xa, xa_head(xa) == node);
36593eb07f7SMatthew Wilcox 		rcu_read_unlock();
36693eb07f7SMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
36793eb07f7SMatthew Wilcox 		xa_erase_index(xa, ULONG_MAX);
36893eb07f7SMatthew Wilcox 		XA_BUG_ON(xa, xa->xa_head != node);
36993eb07f7SMatthew Wilcox 		xa_erase_index(xa, 0);
37093eb07f7SMatthew Wilcox 	}
37158d6ea30SMatthew Wilcox }
37258d6ea30SMatthew Wilcox 
check_insert(struct xarray * xa)37312fd2aeeSMatthew Wilcox static noinline void check_insert(struct xarray *xa)
37412fd2aeeSMatthew Wilcox {
37512fd2aeeSMatthew Wilcox 	unsigned long i;
37612fd2aeeSMatthew Wilcox 
37712fd2aeeSMatthew Wilcox 	for (i = 0; i < 1024; i++) {
37812fd2aeeSMatthew Wilcox 		xa_insert_index(xa, i);
37912fd2aeeSMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL);
38012fd2aeeSMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL);
38112fd2aeeSMatthew Wilcox 		xa_erase_index(xa, i);
38212fd2aeeSMatthew Wilcox 	}
38312fd2aeeSMatthew Wilcox 
38412fd2aeeSMatthew Wilcox 	for (i = 10; i < BITS_PER_LONG; i++) {
38512fd2aeeSMatthew Wilcox 		xa_insert_index(xa, 1UL << i);
38612fd2aeeSMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL);
38712fd2aeeSMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL);
38812fd2aeeSMatthew Wilcox 		xa_erase_index(xa, 1UL << i);
38912fd2aeeSMatthew Wilcox 
39012fd2aeeSMatthew Wilcox 		xa_insert_index(xa, (1UL << i) - 1);
39112fd2aeeSMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL);
39212fd2aeeSMatthew Wilcox 		XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL);
39312fd2aeeSMatthew Wilcox 		xa_erase_index(xa, (1UL << i) - 1);
39412fd2aeeSMatthew Wilcox 	}
39512fd2aeeSMatthew Wilcox 
39612fd2aeeSMatthew Wilcox 	xa_insert_index(xa, ~0UL);
39712fd2aeeSMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL);
39812fd2aeeSMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL);
39912fd2aeeSMatthew Wilcox 	xa_erase_index(xa, ~0UL);
40012fd2aeeSMatthew Wilcox 
40112fd2aeeSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
40212fd2aeeSMatthew Wilcox }
40312fd2aeeSMatthew Wilcox 
check_cmpxchg(struct xarray * xa)40441aec91fSMatthew Wilcox static noinline void check_cmpxchg(struct xarray *xa)
40541aec91fSMatthew Wilcox {
40641aec91fSMatthew Wilcox 	void *FIVE = xa_mk_value(5);
40741aec91fSMatthew Wilcox 	void *SIX = xa_mk_value(6);
40841aec91fSMatthew Wilcox 	void *LOTS = xa_mk_value(12345678);
40941aec91fSMatthew Wilcox 
41041aec91fSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
41141aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
412fd9dc93eSMatthew Wilcox 	XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY);
41341aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
41441aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
41541aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
41641aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
41741aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
418062b7359SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY);
419062b7359SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE);
420062b7359SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY);
42141aec91fSMatthew Wilcox 	xa_erase_index(xa, 12345678);
42241aec91fSMatthew Wilcox 	xa_erase_index(xa, 5);
42341aec91fSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
42441aec91fSMatthew Wilcox }
42541aec91fSMatthew Wilcox 
check_cmpxchg_order(struct xarray * xa)426e777ae44SDaniel Gomez static noinline void check_cmpxchg_order(struct xarray *xa)
427e777ae44SDaniel Gomez {
428e777ae44SDaniel Gomez #ifdef CONFIG_XARRAY_MULTI
429e777ae44SDaniel Gomez 	void *FIVE = xa_mk_value(5);
430e777ae44SDaniel Gomez 	unsigned int i, order = 3;
431e777ae44SDaniel Gomez 
432e777ae44SDaniel Gomez 	XA_BUG_ON(xa, xa_store_order(xa, 0, order, FIVE, GFP_KERNEL));
433e777ae44SDaniel Gomez 
434e777ae44SDaniel Gomez 	/* Check entry FIVE has the order saved */
435e777ae44SDaniel Gomez 	XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != order);
436e777ae44SDaniel Gomez 
437e777ae44SDaniel Gomez 	/* Check all the tied indexes have the same entry and order */
438e777ae44SDaniel Gomez 	for (i = 0; i < (1 << order); i++) {
439e777ae44SDaniel Gomez 		XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
440e777ae44SDaniel Gomez 		XA_BUG_ON(xa, xa_get_order(xa, i) != order);
441e777ae44SDaniel Gomez 	}
442e777ae44SDaniel Gomez 
443e777ae44SDaniel Gomez 	/* Ensure that nothing is stored at index '1 << order' */
444e777ae44SDaniel Gomez 	XA_BUG_ON(xa, xa_load(xa, 1 << order) != NULL);
445e777ae44SDaniel Gomez 
446e777ae44SDaniel Gomez 	/*
447e777ae44SDaniel Gomez 	 * Additionally, keep the node information and the order at
448e777ae44SDaniel Gomez 	 * '1 << order'
449e777ae44SDaniel Gomez 	 */
450e777ae44SDaniel Gomez 	XA_BUG_ON(xa, xa_store_order(xa, 1 << order, order, FIVE, GFP_KERNEL));
451e777ae44SDaniel Gomez 	for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) {
452e777ae44SDaniel Gomez 		XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
453e777ae44SDaniel Gomez 		XA_BUG_ON(xa, xa_get_order(xa, i) != order);
454e777ae44SDaniel Gomez 	}
455e777ae44SDaniel Gomez 
456e777ae44SDaniel Gomez 	/* Conditionally replace FIVE entry at index '0' with NULL */
457e777ae44SDaniel Gomez 	XA_BUG_ON(xa, xa_cmpxchg(xa, 0, FIVE, NULL, GFP_KERNEL) != FIVE);
458e777ae44SDaniel Gomez 
459e777ae44SDaniel Gomez 	/* Verify the order is lost at FIVE (and old) entries */
460e777ae44SDaniel Gomez 	XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != 0);
461e777ae44SDaniel Gomez 
462e777ae44SDaniel Gomez 	/* Verify the order and entries are lost in all the tied indexes */
463e777ae44SDaniel Gomez 	for (i = 0; i < (1 << order); i++) {
464e777ae44SDaniel Gomez 		XA_BUG_ON(xa, xa_load(xa, i) != NULL);
465e777ae44SDaniel Gomez 		XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
466e777ae44SDaniel Gomez 	}
467e777ae44SDaniel Gomez 
468e777ae44SDaniel Gomez 	/* Verify node and order are kept at '1 << order' */
469e777ae44SDaniel Gomez 	for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) {
470e777ae44SDaniel Gomez 		XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
471e777ae44SDaniel Gomez 		XA_BUG_ON(xa, xa_get_order(xa, i) != order);
472e777ae44SDaniel Gomez 	}
473e777ae44SDaniel Gomez 
474e777ae44SDaniel Gomez 	xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
475e777ae44SDaniel Gomez 	XA_BUG_ON(xa, !xa_empty(xa));
476e777ae44SDaniel Gomez #endif
477e777ae44SDaniel Gomez }
478e777ae44SDaniel Gomez 
check_reserve(struct xarray * xa)4799f14d4f1SMatthew Wilcox static noinline void check_reserve(struct xarray *xa)
4809f14d4f1SMatthew Wilcox {
4819f14d4f1SMatthew Wilcox 	void *entry;
4824a31896cSMatthew Wilcox 	unsigned long index;
483b38f6c50SMatthew Wilcox 	int count;
4849f14d4f1SMatthew Wilcox 
4859f14d4f1SMatthew Wilcox 	/* An array with a reserved entry is not empty */
4869f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
487f818b82bSMatthew Wilcox 	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
4889f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
4899f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 12345678));
4909f14d4f1SMatthew Wilcox 	xa_release(xa, 12345678);
4919f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
4929f14d4f1SMatthew Wilcox 
4939f14d4f1SMatthew Wilcox 	/* Releasing a used entry does nothing */
494f818b82bSMatthew Wilcox 	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
4959f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
4969f14d4f1SMatthew Wilcox 	xa_release(xa, 12345678);
4979f14d4f1SMatthew Wilcox 	xa_erase_index(xa, 12345678);
4989f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
4999f14d4f1SMatthew Wilcox 
500b38f6c50SMatthew Wilcox 	/* cmpxchg sees a reserved entry as ZERO */
501f818b82bSMatthew Wilcox 	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
502b38f6c50SMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY,
503b38f6c50SMatthew Wilcox 				xa_mk_value(12345678), GFP_NOWAIT) != NULL);
5049f14d4f1SMatthew Wilcox 	xa_release(xa, 12345678);
5059f14d4f1SMatthew Wilcox 	xa_erase_index(xa, 12345678);
5069f14d4f1SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
5079f14d4f1SMatthew Wilcox 
508b38f6c50SMatthew Wilcox 	/* xa_insert treats it as busy */
509f818b82bSMatthew Wilcox 	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
510b0606fedSMatthew Wilcox 	XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
511fd9dc93eSMatthew Wilcox 			-EBUSY);
512b0606fedSMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
513b0606fedSMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
5144c0608f4SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
5154c0608f4SMatthew Wilcox 
5169f14d4f1SMatthew Wilcox 	/* Can iterate through a reserved entry */
5179f14d4f1SMatthew Wilcox 	xa_store_index(xa, 5, GFP_KERNEL);
518f818b82bSMatthew Wilcox 	XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0);
5199f14d4f1SMatthew Wilcox 	xa_store_index(xa, 7, GFP_KERNEL);
5209f14d4f1SMatthew Wilcox 
521b38f6c50SMatthew Wilcox 	count = 0;
5224a31896cSMatthew Wilcox 	xa_for_each(xa, index, entry) {
5239f14d4f1SMatthew Wilcox 		XA_BUG_ON(xa, index != 5 && index != 7);
524b38f6c50SMatthew Wilcox 		count++;
5259f14d4f1SMatthew Wilcox 	}
526b38f6c50SMatthew Wilcox 	XA_BUG_ON(xa, count != 2);
527b38f6c50SMatthew Wilcox 
528b38f6c50SMatthew Wilcox 	/* If we free a reserved entry, we should be able to allocate it */
529b38f6c50SMatthew Wilcox 	if (xa->xa_flags & XA_FLAGS_ALLOC) {
530b38f6c50SMatthew Wilcox 		u32 id;
531b38f6c50SMatthew Wilcox 
532b38f6c50SMatthew Wilcox 		XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8),
533b38f6c50SMatthew Wilcox 					XA_LIMIT(5, 10), GFP_KERNEL) != 0);
534b38f6c50SMatthew Wilcox 		XA_BUG_ON(xa, id != 8);
535b38f6c50SMatthew Wilcox 
536b38f6c50SMatthew Wilcox 		xa_release(xa, 6);
537b38f6c50SMatthew Wilcox 		XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6),
538b38f6c50SMatthew Wilcox 					XA_LIMIT(5, 10), GFP_KERNEL) != 0);
539b38f6c50SMatthew Wilcox 		XA_BUG_ON(xa, id != 6);
540b38f6c50SMatthew Wilcox 	}
541b38f6c50SMatthew Wilcox 
5429f14d4f1SMatthew Wilcox 	xa_destroy(xa);
5439f14d4f1SMatthew Wilcox }
5449f14d4f1SMatthew Wilcox 
check_xas_erase(struct xarray * xa)545b803b428SMatthew Wilcox static noinline void check_xas_erase(struct xarray *xa)
546b803b428SMatthew Wilcox {
547b803b428SMatthew Wilcox 	XA_STATE(xas, xa, 0);
548b803b428SMatthew Wilcox 	void *entry;
549b803b428SMatthew Wilcox 	unsigned long i, j;
550b803b428SMatthew Wilcox 
551b803b428SMatthew Wilcox 	for (i = 0; i < 200; i++) {
552b803b428SMatthew Wilcox 		for (j = i; j < 2 * i + 17; j++) {
553b803b428SMatthew Wilcox 			xas_set(&xas, j);
554b803b428SMatthew Wilcox 			do {
555b803b428SMatthew Wilcox 				xas_lock(&xas);
556b7677a13SMatthew Wilcox 				xas_store(&xas, xa_mk_index(j));
557b803b428SMatthew Wilcox 				xas_unlock(&xas);
558b803b428SMatthew Wilcox 			} while (xas_nomem(&xas, GFP_KERNEL));
559b803b428SMatthew Wilcox 		}
560b803b428SMatthew Wilcox 
561b803b428SMatthew Wilcox 		xas_set(&xas, ULONG_MAX);
562b803b428SMatthew Wilcox 		do {
563b803b428SMatthew Wilcox 			xas_lock(&xas);
564b803b428SMatthew Wilcox 			xas_store(&xas, xa_mk_value(0));
565b803b428SMatthew Wilcox 			xas_unlock(&xas);
566b803b428SMatthew Wilcox 		} while (xas_nomem(&xas, GFP_KERNEL));
567b803b428SMatthew Wilcox 
568b803b428SMatthew Wilcox 		xas_lock(&xas);
569b803b428SMatthew Wilcox 		xas_store(&xas, NULL);
570b803b428SMatthew Wilcox 
571b803b428SMatthew Wilcox 		xas_set(&xas, 0);
572b803b428SMatthew Wilcox 		j = i;
573b803b428SMatthew Wilcox 		xas_for_each(&xas, entry, ULONG_MAX) {
574b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_index(j));
575b803b428SMatthew Wilcox 			xas_store(&xas, NULL);
576b803b428SMatthew Wilcox 			j++;
577b803b428SMatthew Wilcox 		}
578b803b428SMatthew Wilcox 		xas_unlock(&xas);
579b803b428SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
580b803b428SMatthew Wilcox 	}
581b803b428SMatthew Wilcox }
582b803b428SMatthew Wilcox 
5834f06d630SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
check_multi_store_1(struct xarray * xa,unsigned long index,unsigned int order)5844f06d630SMatthew Wilcox static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
5854f06d630SMatthew Wilcox 		unsigned int order)
5864f06d630SMatthew Wilcox {
5874f06d630SMatthew Wilcox 	XA_STATE(xas, xa, index);
5884f06d630SMatthew Wilcox 	unsigned long min = index & ~((1UL << order) - 1);
5894f06d630SMatthew Wilcox 	unsigned long max = min + (1UL << order);
5904f06d630SMatthew Wilcox 
591b7677a13SMatthew Wilcox 	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
592b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
593b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
5944f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, max) != NULL);
5954f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
5964f06d630SMatthew Wilcox 
597fffc9a26SMatthew Wilcox 	xas_lock(&xas);
598b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
599fffc9a26SMatthew Wilcox 	xas_unlock(&xas);
600b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
601b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
6024f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, max) != NULL);
6034f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
6044f06d630SMatthew Wilcox 
6054f06d630SMatthew Wilcox 	xa_erase_index(xa, min);
6064f06d630SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
6074f06d630SMatthew Wilcox }
6084f06d630SMatthew Wilcox 
check_multi_store_2(struct xarray * xa,unsigned long index,unsigned int order)6094f06d630SMatthew Wilcox static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
6104f06d630SMatthew Wilcox 		unsigned int order)
6114f06d630SMatthew Wilcox {
6124f06d630SMatthew Wilcox 	XA_STATE(xas, xa, index);
6134f06d630SMatthew Wilcox 	xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
6144f06d630SMatthew Wilcox 
615fffc9a26SMatthew Wilcox 	xas_lock(&xas);
6164f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
6174f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != index);
6184f06d630SMatthew Wilcox 	XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
619fffc9a26SMatthew Wilcox 	xas_unlock(&xas);
6204f06d630SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
6214f06d630SMatthew Wilcox }
6224f145cd6SMatthew Wilcox 
check_multi_store_3(struct xarray * xa,unsigned long index,unsigned int order)6234f145cd6SMatthew Wilcox static noinline void check_multi_store_3(struct xarray *xa, unsigned long index,
6244f145cd6SMatthew Wilcox 		unsigned int order)
6254f145cd6SMatthew Wilcox {
6264f145cd6SMatthew Wilcox 	XA_STATE(xas, xa, 0);
6274f145cd6SMatthew Wilcox 	void *entry;
6284f145cd6SMatthew Wilcox 	int n = 0;
6294f145cd6SMatthew Wilcox 
6304f145cd6SMatthew Wilcox 	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
6314f145cd6SMatthew Wilcox 
6324f145cd6SMatthew Wilcox 	xas_lock(&xas);
6334f145cd6SMatthew Wilcox 	xas_for_each(&xas, entry, ULONG_MAX) {
6344f145cd6SMatthew Wilcox 		XA_BUG_ON(xa, entry != xa_mk_index(index));
6354f145cd6SMatthew Wilcox 		n++;
6364f145cd6SMatthew Wilcox 	}
6374f145cd6SMatthew Wilcox 	XA_BUG_ON(xa, n != 1);
6384f145cd6SMatthew Wilcox 	xas_set(&xas, index + 1);
6394f145cd6SMatthew Wilcox 	xas_for_each(&xas, entry, ULONG_MAX) {
6404f145cd6SMatthew Wilcox 		XA_BUG_ON(xa, entry != xa_mk_index(index));
6414f145cd6SMatthew Wilcox 		n++;
6424f145cd6SMatthew Wilcox 	}
6434f145cd6SMatthew Wilcox 	XA_BUG_ON(xa, n != 2);
6444f145cd6SMatthew Wilcox 	xas_unlock(&xas);
6454f145cd6SMatthew Wilcox 
6464f145cd6SMatthew Wilcox 	xa_destroy(xa);
6474f145cd6SMatthew Wilcox }
6484f06d630SMatthew Wilcox #endif
6494f06d630SMatthew Wilcox 
check_multi_store(struct xarray * xa)65058d6ea30SMatthew Wilcox static noinline void check_multi_store(struct xarray *xa)
65158d6ea30SMatthew Wilcox {
65258d6ea30SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
65358d6ea30SMatthew Wilcox 	unsigned long i, j, k;
65458d6ea30SMatthew Wilcox 	unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
65558d6ea30SMatthew Wilcox 
65658d6ea30SMatthew Wilcox 	/* Loading from any position returns the same value */
65758d6ea30SMatthew Wilcox 	xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
65858d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
65958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
66058d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
66158d6ea30SMatthew Wilcox 	rcu_read_lock();
66258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
66358d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
66458d6ea30SMatthew Wilcox 	rcu_read_unlock();
66558d6ea30SMatthew Wilcox 
66658d6ea30SMatthew Wilcox 	/* Storing adjacent to the value does not alter the value */
66758d6ea30SMatthew Wilcox 	xa_store(xa, 3, xa, GFP_KERNEL);
66858d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
66958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
67058d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
67158d6ea30SMatthew Wilcox 	rcu_read_lock();
67258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
67358d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
67458d6ea30SMatthew Wilcox 	rcu_read_unlock();
67558d6ea30SMatthew Wilcox 
67658d6ea30SMatthew Wilcox 	/* Overwriting multiple indexes works */
67758d6ea30SMatthew Wilcox 	xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
67858d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
67958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
68058d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
68158d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
68258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
68358d6ea30SMatthew Wilcox 	rcu_read_lock();
68458d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
68558d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
68658d6ea30SMatthew Wilcox 	rcu_read_unlock();
68758d6ea30SMatthew Wilcox 
68858d6ea30SMatthew Wilcox 	/* We can erase multiple values with a single store */
6895404a7f1SMatthew Wilcox 	xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
69058d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
69158d6ea30SMatthew Wilcox 
69258d6ea30SMatthew Wilcox 	/* Even when the first slot is empty but the others aren't */
69358d6ea30SMatthew Wilcox 	xa_store_index(xa, 1, GFP_KERNEL);
69458d6ea30SMatthew Wilcox 	xa_store_index(xa, 2, GFP_KERNEL);
69558d6ea30SMatthew Wilcox 	xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
69658d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
69758d6ea30SMatthew Wilcox 
69858d6ea30SMatthew Wilcox 	for (i = 0; i < max_order; i++) {
69958d6ea30SMatthew Wilcox 		for (j = 0; j < max_order; j++) {
700b7677a13SMatthew Wilcox 			xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
701b7677a13SMatthew Wilcox 			xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
70258d6ea30SMatthew Wilcox 
70358d6ea30SMatthew Wilcox 			for (k = 0; k < max_order; k++) {
70458d6ea30SMatthew Wilcox 				void *entry = xa_load(xa, (1UL << k) - 1);
70558d6ea30SMatthew Wilcox 				if ((i < k) && (j < k))
70658d6ea30SMatthew Wilcox 					XA_BUG_ON(xa, entry != NULL);
70758d6ea30SMatthew Wilcox 				else
708b7677a13SMatthew Wilcox 					XA_BUG_ON(xa, entry != xa_mk_index(j));
70958d6ea30SMatthew Wilcox 			}
71058d6ea30SMatthew Wilcox 
71158d6ea30SMatthew Wilcox 			xa_erase(xa, 0);
71258d6ea30SMatthew Wilcox 			XA_BUG_ON(xa, !xa_empty(xa));
71358d6ea30SMatthew Wilcox 		}
71458d6ea30SMatthew Wilcox 	}
7154f06d630SMatthew Wilcox 
7164f06d630SMatthew Wilcox 	for (i = 0; i < 20; i++) {
7174f06d630SMatthew Wilcox 		check_multi_store_1(xa, 200, i);
7184f06d630SMatthew Wilcox 		check_multi_store_1(xa, 0, i);
7194f06d630SMatthew Wilcox 		check_multi_store_1(xa, (1UL << i) + 1, i);
7204f06d630SMatthew Wilcox 	}
7214f06d630SMatthew Wilcox 	check_multi_store_2(xa, 4095, 9);
7224f145cd6SMatthew Wilcox 
7234f145cd6SMatthew Wilcox 	for (i = 1; i < 20; i++) {
7244f145cd6SMatthew Wilcox 		check_multi_store_3(xa, 0, i);
7254f145cd6SMatthew Wilcox 		check_multi_store_3(xa, 1UL << i, i);
7264f145cd6SMatthew Wilcox 	}
72758d6ea30SMatthew Wilcox #endif
72858d6ea30SMatthew Wilcox }
72958d6ea30SMatthew Wilcox 
730a60cc288SLuis Chamberlain #ifdef CONFIG_XARRAY_MULTI
731a60cc288SLuis Chamberlain /* mimics page cache __filemap_add_folio() */
check_xa_multi_store_adv_add(struct xarray * xa,unsigned long index,unsigned int order,void * p)732a60cc288SLuis Chamberlain static noinline void check_xa_multi_store_adv_add(struct xarray *xa,
733a60cc288SLuis Chamberlain 						  unsigned long index,
734a60cc288SLuis Chamberlain 						  unsigned int order,
735a60cc288SLuis Chamberlain 						  void *p)
736a60cc288SLuis Chamberlain {
737a60cc288SLuis Chamberlain 	XA_STATE(xas, xa, index);
738a60cc288SLuis Chamberlain 	unsigned int nrpages = 1UL << order;
739a60cc288SLuis Chamberlain 
740a60cc288SLuis Chamberlain 	/* users are responsible for index alignemnt to the order when adding */
741a60cc288SLuis Chamberlain 	XA_BUG_ON(xa, index & (nrpages - 1));
742a60cc288SLuis Chamberlain 
743a60cc288SLuis Chamberlain 	xas_set_order(&xas, index, order);
744a60cc288SLuis Chamberlain 
745a60cc288SLuis Chamberlain 	do {
746a60cc288SLuis Chamberlain 		xas_lock_irq(&xas);
747a60cc288SLuis Chamberlain 		xas_store(&xas, p);
748a60cc288SLuis Chamberlain 		xas_unlock_irq(&xas);
7492aaba39eSLuis Chamberlain 		/*
7502aaba39eSLuis Chamberlain 		 * In our selftest case the only failure we can expect is for
7512aaba39eSLuis Chamberlain 		 * there not to be enough memory as we're not mimicking the
7522aaba39eSLuis Chamberlain 		 * entire page cache, so verify that's the only error we can run
7532aaba39eSLuis Chamberlain 		 * into here. The xas_nomem() which follows will ensure to fix
7542aaba39eSLuis Chamberlain 		 * that condition for us so to chug on on the loop.
7552aaba39eSLuis Chamberlain 		 */
7562aaba39eSLuis Chamberlain 		XA_BUG_ON(xa, xas_error(&xas) && xas_error(&xas) != -ENOMEM);
757a60cc288SLuis Chamberlain 	} while (xas_nomem(&xas, GFP_KERNEL));
758a60cc288SLuis Chamberlain 
759a60cc288SLuis Chamberlain 	XA_BUG_ON(xa, xas_error(&xas));
7602aaba39eSLuis Chamberlain 	XA_BUG_ON(xa, xa_load(xa, index) != p);
761a60cc288SLuis Chamberlain }
762a60cc288SLuis Chamberlain 
763a60cc288SLuis Chamberlain /* mimics page_cache_delete() */
check_xa_multi_store_adv_del_entry(struct xarray * xa,unsigned long index,unsigned int order)764a60cc288SLuis Chamberlain static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa,
765a60cc288SLuis Chamberlain 							unsigned long index,
766a60cc288SLuis Chamberlain 							unsigned int order)
767a60cc288SLuis Chamberlain {
768a60cc288SLuis Chamberlain 	XA_STATE(xas, xa, index);
769a60cc288SLuis Chamberlain 
770a60cc288SLuis Chamberlain 	xas_set_order(&xas, index, order);
771a60cc288SLuis Chamberlain 	xas_store(&xas, NULL);
772a60cc288SLuis Chamberlain 	xas_init_marks(&xas);
773a60cc288SLuis Chamberlain }
774a60cc288SLuis Chamberlain 
check_xa_multi_store_adv_delete(struct xarray * xa,unsigned long index,unsigned int order)775a60cc288SLuis Chamberlain static noinline void check_xa_multi_store_adv_delete(struct xarray *xa,
776a60cc288SLuis Chamberlain 						     unsigned long index,
777a60cc288SLuis Chamberlain 						     unsigned int order)
778a60cc288SLuis Chamberlain {
779a60cc288SLuis Chamberlain 	xa_lock_irq(xa);
780a60cc288SLuis Chamberlain 	check_xa_multi_store_adv_del_entry(xa, index, order);
781a60cc288SLuis Chamberlain 	xa_unlock_irq(xa);
782a60cc288SLuis Chamberlain }
783a60cc288SLuis Chamberlain 
784a60cc288SLuis Chamberlain /* mimics page cache filemap_get_entry() */
test_get_entry(struct xarray * xa,unsigned long index)785a60cc288SLuis Chamberlain static noinline void *test_get_entry(struct xarray *xa, unsigned long index)
786a60cc288SLuis Chamberlain {
787a60cc288SLuis Chamberlain 	XA_STATE(xas, xa, index);
788a60cc288SLuis Chamberlain 	void *p;
789a60cc288SLuis Chamberlain 	static unsigned int loops = 0;
790a60cc288SLuis Chamberlain 
791a60cc288SLuis Chamberlain 	rcu_read_lock();
792a60cc288SLuis Chamberlain repeat:
793a60cc288SLuis Chamberlain 	xas_reset(&xas);
794a60cc288SLuis Chamberlain 	p = xas_load(&xas);
795a60cc288SLuis Chamberlain 	if (xas_retry(&xas, p))
796a60cc288SLuis Chamberlain 		goto repeat;
797a60cc288SLuis Chamberlain 	rcu_read_unlock();
798a60cc288SLuis Chamberlain 
799a60cc288SLuis Chamberlain 	/*
800a60cc288SLuis Chamberlain 	 * This is not part of the page cache, this selftest is pretty
801a60cc288SLuis Chamberlain 	 * aggressive and does not want to trust the xarray API but rather
802a60cc288SLuis Chamberlain 	 * test it, and for order 20 (4 GiB block size) we can loop over
803a60cc288SLuis Chamberlain 	 * over a million entries which can cause a soft lockup. Page cache
804a60cc288SLuis Chamberlain 	 * APIs won't be stupid, proper page cache APIs loop over the proper
805a60cc288SLuis Chamberlain 	 * order so when using a larger order we skip shared entries.
806a60cc288SLuis Chamberlain 	 */
807a60cc288SLuis Chamberlain 	if (++loops % XA_CHECK_SCHED == 0)
808a60cc288SLuis Chamberlain 		schedule();
809a60cc288SLuis Chamberlain 
810a60cc288SLuis Chamberlain 	return p;
811a60cc288SLuis Chamberlain }
812a60cc288SLuis Chamberlain 
813a60cc288SLuis Chamberlain static unsigned long some_val = 0xdeadbeef;
814a60cc288SLuis Chamberlain static unsigned long some_val_2 = 0xdeaddead;
815a60cc288SLuis Chamberlain 
816a60cc288SLuis Chamberlain /* mimics the page cache usage */
check_xa_multi_store_adv(struct xarray * xa,unsigned long pos,unsigned int order)817a60cc288SLuis Chamberlain static noinline void check_xa_multi_store_adv(struct xarray *xa,
818a60cc288SLuis Chamberlain 					      unsigned long pos,
819a60cc288SLuis Chamberlain 					      unsigned int order)
820a60cc288SLuis Chamberlain {
821a60cc288SLuis Chamberlain 	unsigned int nrpages = 1UL << order;
822a60cc288SLuis Chamberlain 	unsigned long index, base, next_index, next_next_index;
823a60cc288SLuis Chamberlain 	unsigned int i;
824a60cc288SLuis Chamberlain 
825a60cc288SLuis Chamberlain 	index = pos >> PAGE_SHIFT;
826a60cc288SLuis Chamberlain 	base = round_down(index, nrpages);
827a60cc288SLuis Chamberlain 	next_index = round_down(base + nrpages, nrpages);
828a60cc288SLuis Chamberlain 	next_next_index = round_down(next_index + nrpages, nrpages);
829a60cc288SLuis Chamberlain 
830a60cc288SLuis Chamberlain 	check_xa_multi_store_adv_add(xa, base, order, &some_val);
831a60cc288SLuis Chamberlain 
832a60cc288SLuis Chamberlain 	for (i = 0; i < nrpages; i++)
833a60cc288SLuis Chamberlain 		XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val);
834a60cc288SLuis Chamberlain 
835a60cc288SLuis Chamberlain 	XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL);
836a60cc288SLuis Chamberlain 
837a60cc288SLuis Chamberlain 	/* Use order 0 for the next item */
838a60cc288SLuis Chamberlain 	check_xa_multi_store_adv_add(xa, next_index, 0, &some_val_2);
839a60cc288SLuis Chamberlain 	XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2);
840a60cc288SLuis Chamberlain 
841a60cc288SLuis Chamberlain 	/* Remove the next item */
842a60cc288SLuis Chamberlain 	check_xa_multi_store_adv_delete(xa, next_index, 0);
843a60cc288SLuis Chamberlain 
844a60cc288SLuis Chamberlain 	/* Now use order for a new pointer */
845a60cc288SLuis Chamberlain 	check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2);
846a60cc288SLuis Chamberlain 
847a60cc288SLuis Chamberlain 	for (i = 0; i < nrpages; i++)
848a60cc288SLuis Chamberlain 		XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2);
849a60cc288SLuis Chamberlain 
850a60cc288SLuis Chamberlain 	check_xa_multi_store_adv_delete(xa, next_index, order);
851a60cc288SLuis Chamberlain 	check_xa_multi_store_adv_delete(xa, base, order);
852a60cc288SLuis Chamberlain 	XA_BUG_ON(xa, !xa_empty(xa));
853a60cc288SLuis Chamberlain 
854a60cc288SLuis Chamberlain 	/* starting fresh again */
855a60cc288SLuis Chamberlain 
856a60cc288SLuis Chamberlain 	/* let's test some holes now */
857a60cc288SLuis Chamberlain 
858a60cc288SLuis Chamberlain 	/* hole at base and next_next */
859a60cc288SLuis Chamberlain 	check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2);
860a60cc288SLuis Chamberlain 
861a60cc288SLuis Chamberlain 	for (i = 0; i < nrpages; i++)
862a60cc288SLuis Chamberlain 		XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL);
863a60cc288SLuis Chamberlain 
864a60cc288SLuis Chamberlain 	for (i = 0; i < nrpages; i++)
865a60cc288SLuis Chamberlain 		XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2);
866a60cc288SLuis Chamberlain 
867a60cc288SLuis Chamberlain 	for (i = 0; i < nrpages; i++)
868a60cc288SLuis Chamberlain 		XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL);
869a60cc288SLuis Chamberlain 
870a60cc288SLuis Chamberlain 	check_xa_multi_store_adv_delete(xa, next_index, order);
871a60cc288SLuis Chamberlain 	XA_BUG_ON(xa, !xa_empty(xa));
872a60cc288SLuis Chamberlain 
873a60cc288SLuis Chamberlain 	/* hole at base and next */
874a60cc288SLuis Chamberlain 
875a60cc288SLuis Chamberlain 	check_xa_multi_store_adv_add(xa, next_next_index, order, &some_val_2);
876a60cc288SLuis Chamberlain 
877a60cc288SLuis Chamberlain 	for (i = 0; i < nrpages; i++)
878a60cc288SLuis Chamberlain 		XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL);
879a60cc288SLuis Chamberlain 
880a60cc288SLuis Chamberlain 	for (i = 0; i < nrpages; i++)
881a60cc288SLuis Chamberlain 		XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != NULL);
882a60cc288SLuis Chamberlain 
883a60cc288SLuis Chamberlain 	for (i = 0; i < nrpages; i++)
884a60cc288SLuis Chamberlain 		XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2);
885a60cc288SLuis Chamberlain 
886a60cc288SLuis Chamberlain 	check_xa_multi_store_adv_delete(xa, next_next_index, order);
887a60cc288SLuis Chamberlain 	XA_BUG_ON(xa, !xa_empty(xa));
888a60cc288SLuis Chamberlain }
889a60cc288SLuis Chamberlain #endif
890a60cc288SLuis Chamberlain 
check_multi_store_advanced(struct xarray * xa)891a60cc288SLuis Chamberlain static noinline void check_multi_store_advanced(struct xarray *xa)
892a60cc288SLuis Chamberlain {
893a60cc288SLuis Chamberlain #ifdef CONFIG_XARRAY_MULTI
894a60cc288SLuis Chamberlain 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
895a60cc288SLuis Chamberlain 	unsigned long end = ULONG_MAX/2;
896a60cc288SLuis Chamberlain 	unsigned long pos, i;
897a60cc288SLuis Chamberlain 
898a60cc288SLuis Chamberlain 	/*
899a60cc288SLuis Chamberlain 	 * About 117 million tests below.
900a60cc288SLuis Chamberlain 	 */
901a60cc288SLuis Chamberlain 	for (pos = 7; pos < end; pos = (pos * pos) + 564) {
902a60cc288SLuis Chamberlain 		for (i = 0; i < max_order; i++) {
903a60cc288SLuis Chamberlain 			check_xa_multi_store_adv(xa, pos, i);
904a60cc288SLuis Chamberlain 			check_xa_multi_store_adv(xa, pos + 157, i);
905a60cc288SLuis Chamberlain 		}
906a60cc288SLuis Chamberlain 	}
907a60cc288SLuis Chamberlain #endif
908a60cc288SLuis Chamberlain }
909a60cc288SLuis Chamberlain 
check_xa_alloc_1(struct xarray * xa,unsigned int base)9103ccaf57aSMatthew Wilcox static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
911371c752dSMatthew Wilcox {
912371c752dSMatthew Wilcox 	int i;
913371c752dSMatthew Wilcox 	u32 id;
914371c752dSMatthew Wilcox 
9153ccaf57aSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
9163ccaf57aSMatthew Wilcox 	/* An empty array should assign %base to the first alloc */
9173ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base, GFP_KERNEL);
918371c752dSMatthew Wilcox 
919371c752dSMatthew Wilcox 	/* Erasing it should make the array empty again */
9203ccaf57aSMatthew Wilcox 	xa_erase_index(xa, base);
9213ccaf57aSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
922371c752dSMatthew Wilcox 
9233ccaf57aSMatthew Wilcox 	/* And it should assign %base again */
9243ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base, GFP_KERNEL);
925371c752dSMatthew Wilcox 
9263ccaf57aSMatthew Wilcox 	/* Allocating and then erasing a lot should not lose base */
9273ccaf57aSMatthew Wilcox 	for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++)
9283ccaf57aSMatthew Wilcox 		xa_alloc_index(xa, i, GFP_KERNEL);
9293ccaf57aSMatthew Wilcox 	for (i = base; i < 2 * XA_CHUNK_SIZE; i++)
9303ccaf57aSMatthew Wilcox 		xa_erase_index(xa, i);
9313ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base, GFP_KERNEL);
9323ccaf57aSMatthew Wilcox 
9333ccaf57aSMatthew Wilcox 	/* Destroying the array should do the same as erasing */
9343ccaf57aSMatthew Wilcox 	xa_destroy(xa);
9353ccaf57aSMatthew Wilcox 
9363ccaf57aSMatthew Wilcox 	/* And it should assign %base again */
9373ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base, GFP_KERNEL);
9383ccaf57aSMatthew Wilcox 
9393ccaf57aSMatthew Wilcox 	/* The next assigned ID should be base+1 */
9403ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base + 1, GFP_KERNEL);
9413ccaf57aSMatthew Wilcox 	xa_erase_index(xa, base + 1);
942371c752dSMatthew Wilcox 
943371c752dSMatthew Wilcox 	/* Storing a value should mark it used */
9443ccaf57aSMatthew Wilcox 	xa_store_index(xa, base + 1, GFP_KERNEL);
9453ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base + 2, GFP_KERNEL);
946371c752dSMatthew Wilcox 
9473ccaf57aSMatthew Wilcox 	/* If we then erase base, it should be free */
9483ccaf57aSMatthew Wilcox 	xa_erase_index(xa, base);
9493ccaf57aSMatthew Wilcox 	xa_alloc_index(xa, base, GFP_KERNEL);
950371c752dSMatthew Wilcox 
9513ccaf57aSMatthew Wilcox 	xa_erase_index(xa, base + 1);
9523ccaf57aSMatthew Wilcox 	xa_erase_index(xa, base + 2);
953371c752dSMatthew Wilcox 
954371c752dSMatthew Wilcox 	for (i = 1; i < 5000; i++) {
9553ccaf57aSMatthew Wilcox 		xa_alloc_index(xa, base + i, GFP_KERNEL);
956371c752dSMatthew Wilcox 	}
957371c752dSMatthew Wilcox 
9583ccaf57aSMatthew Wilcox 	xa_destroy(xa);
959371c752dSMatthew Wilcox 
9603ccaf57aSMatthew Wilcox 	/* Check that we fail properly at the limit of allocation */
961a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1),
962a3e4d3f9SMatthew Wilcox 				XA_LIMIT(UINT_MAX - 1, UINT_MAX),
963371c752dSMatthew Wilcox 				GFP_KERNEL) != 0);
9643ccaf57aSMatthew Wilcox 	XA_BUG_ON(xa, id != 0xfffffffeU);
965a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX),
966a3e4d3f9SMatthew Wilcox 				XA_LIMIT(UINT_MAX - 1, UINT_MAX),
967371c752dSMatthew Wilcox 				GFP_KERNEL) != 0);
9683ccaf57aSMatthew Wilcox 	XA_BUG_ON(xa, id != 0xffffffffU);
969a3e4d3f9SMatthew Wilcox 	id = 3;
970a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0),
971a3e4d3f9SMatthew Wilcox 				XA_LIMIT(UINT_MAX - 1, UINT_MAX),
972a3e4d3f9SMatthew Wilcox 				GFP_KERNEL) != -EBUSY);
973a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, id != 3);
9743ccaf57aSMatthew Wilcox 	xa_destroy(xa);
97548483614SMatthew Wilcox 
976a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
977a3e4d3f9SMatthew Wilcox 				GFP_KERNEL) != -EBUSY);
9783ccaf57aSMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0);
979a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
980a3e4d3f9SMatthew Wilcox 				GFP_KERNEL) != -EBUSY);
9813ccaf57aSMatthew Wilcox 	xa_erase_index(xa, 3);
9823ccaf57aSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
9833ccaf57aSMatthew Wilcox }
9843ccaf57aSMatthew Wilcox 
check_xa_alloc_2(struct xarray * xa,unsigned int base)985a3e4d3f9SMatthew Wilcox static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base)
986a3e4d3f9SMatthew Wilcox {
987a3e4d3f9SMatthew Wilcox 	unsigned int i, id;
988a3e4d3f9SMatthew Wilcox 	unsigned long index;
989a3e4d3f9SMatthew Wilcox 	void *entry;
990a3e4d3f9SMatthew Wilcox 
991a3e4d3f9SMatthew Wilcox 	/* Allocate and free a NULL and check xa_empty() behaves */
992a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
993a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
994a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, id != base);
995a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
996a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, id) != NULL);
997a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
998a3e4d3f9SMatthew Wilcox 
999a3e4d3f9SMatthew Wilcox 	/* Ditto, but check destroy instead of erase */
1000a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1001a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
1002a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, id != base);
1003a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
1004a3e4d3f9SMatthew Wilcox 	xa_destroy(xa);
1005a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1006a3e4d3f9SMatthew Wilcox 
1007a3e4d3f9SMatthew Wilcox 	for (i = base; i < base + 10; i++) {
1008a3e4d3f9SMatthew Wilcox 		XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b,
1009a3e4d3f9SMatthew Wilcox 					GFP_KERNEL) != 0);
1010a3e4d3f9SMatthew Wilcox 		XA_BUG_ON(xa, id != i);
1011a3e4d3f9SMatthew Wilcox 	}
1012a3e4d3f9SMatthew Wilcox 
1013a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL);
1014a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL);
1015a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4));
1016a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, 5) != NULL);
1017a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
1018a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, id != 5);
1019a3e4d3f9SMatthew Wilcox 
1020a3e4d3f9SMatthew Wilcox 	xa_for_each(xa, index, entry) {
1021a3e4d3f9SMatthew Wilcox 		xa_erase_index(xa, index);
1022a3e4d3f9SMatthew Wilcox 	}
1023a3e4d3f9SMatthew Wilcox 
1024a3e4d3f9SMatthew Wilcox 	for (i = base; i < base + 9; i++) {
1025a3e4d3f9SMatthew Wilcox 		XA_BUG_ON(xa, xa_erase(xa, i) != NULL);
1026a3e4d3f9SMatthew Wilcox 		XA_BUG_ON(xa, xa_empty(xa));
1027a3e4d3f9SMatthew Wilcox 	}
1028a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, 8) != NULL);
1029a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
1030a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL);
1031a3e4d3f9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1032a3e4d3f9SMatthew Wilcox 
1033a3e4d3f9SMatthew Wilcox 	xa_destroy(xa);
1034a3e4d3f9SMatthew Wilcox }
1035a3e4d3f9SMatthew Wilcox 
check_xa_alloc_3(struct xarray * xa,unsigned int base)10362fa044e5SMatthew Wilcox static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
10372fa044e5SMatthew Wilcox {
10382fa044e5SMatthew Wilcox 	struct xa_limit limit = XA_LIMIT(1, 0x3fff);
10392fa044e5SMatthew Wilcox 	u32 next = 0;
10402fa044e5SMatthew Wilcox 	unsigned int i, id;
10412fa044e5SMatthew Wilcox 	unsigned long index;
10422fa044e5SMatthew Wilcox 	void *entry;
10432fa044e5SMatthew Wilcox 
10442fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
10452fa044e5SMatthew Wilcox 				&next, GFP_KERNEL) != 0);
10462fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, id != 1);
10472fa044e5SMatthew Wilcox 
10482fa044e5SMatthew Wilcox 	next = 0x3ffd;
10492fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit,
10502fa044e5SMatthew Wilcox 				&next, GFP_KERNEL) != 0);
10512fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, id != 0x3ffd);
10522fa044e5SMatthew Wilcox 	xa_erase_index(xa, 0x3ffd);
10532fa044e5SMatthew Wilcox 	xa_erase_index(xa, 1);
10542fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
10552fa044e5SMatthew Wilcox 
10562fa044e5SMatthew Wilcox 	for (i = 0x3ffe; i < 0x4003; i++) {
10572fa044e5SMatthew Wilcox 		if (i < 0x4000)
10582fa044e5SMatthew Wilcox 			entry = xa_mk_index(i);
10592fa044e5SMatthew Wilcox 		else
10602fa044e5SMatthew Wilcox 			entry = xa_mk_index(i - 0x3fff);
10612fa044e5SMatthew Wilcox 		XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
10622fa044e5SMatthew Wilcox 					&next, GFP_KERNEL) != (id == 1));
10632fa044e5SMatthew Wilcox 		XA_BUG_ON(xa, xa_mk_index(id) != entry);
10642fa044e5SMatthew Wilcox 	}
10652fa044e5SMatthew Wilcox 
10662fa044e5SMatthew Wilcox 	/* Check wrap-around is handled correctly */
10672fa044e5SMatthew Wilcox 	if (base != 0)
10682fa044e5SMatthew Wilcox 		xa_erase_index(xa, base);
10692fa044e5SMatthew Wilcox 	xa_erase_index(xa, base + 1);
10702fa044e5SMatthew Wilcox 	next = UINT_MAX;
10712fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
10722fa044e5SMatthew Wilcox 				xa_limit_32b, &next, GFP_KERNEL) != 0);
10732fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, id != UINT_MAX);
10742fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
10752fa044e5SMatthew Wilcox 				xa_limit_32b, &next, GFP_KERNEL) != 1);
10762fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, id != base);
10772fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
10782fa044e5SMatthew Wilcox 				xa_limit_32b, &next, GFP_KERNEL) != 0);
10792fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, id != base + 1);
10802fa044e5SMatthew Wilcox 
10812fa044e5SMatthew Wilcox 	xa_for_each(xa, index, entry)
10822fa044e5SMatthew Wilcox 		xa_erase_index(xa, index);
10832fa044e5SMatthew Wilcox 
10842fa044e5SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
10852fa044e5SMatthew Wilcox }
10862fa044e5SMatthew Wilcox 
10873ccaf57aSMatthew Wilcox static DEFINE_XARRAY_ALLOC(xa0);
10883ccaf57aSMatthew Wilcox static DEFINE_XARRAY_ALLOC1(xa1);
10893ccaf57aSMatthew Wilcox 
check_xa_alloc(void)10903ccaf57aSMatthew Wilcox static noinline void check_xa_alloc(void)
10913ccaf57aSMatthew Wilcox {
10923ccaf57aSMatthew Wilcox 	check_xa_alloc_1(&xa0, 0);
10933ccaf57aSMatthew Wilcox 	check_xa_alloc_1(&xa1, 1);
1094a3e4d3f9SMatthew Wilcox 	check_xa_alloc_2(&xa0, 0);
1095a3e4d3f9SMatthew Wilcox 	check_xa_alloc_2(&xa1, 1);
10962fa044e5SMatthew Wilcox 	check_xa_alloc_3(&xa0, 0);
10972fa044e5SMatthew Wilcox 	check_xa_alloc_3(&xa1, 1);
1098371c752dSMatthew Wilcox }
1099371c752dSMatthew Wilcox 
__check_store_iter(struct xarray * xa,unsigned long start,unsigned int order,unsigned int present)11004e99d4e9SMatthew Wilcox static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
11014e99d4e9SMatthew Wilcox 			unsigned int order, unsigned int present)
11024e99d4e9SMatthew Wilcox {
11034e99d4e9SMatthew Wilcox 	XA_STATE_ORDER(xas, xa, start, order);
11044e99d4e9SMatthew Wilcox 	void *entry;
11054e99d4e9SMatthew Wilcox 	unsigned int count = 0;
11064e99d4e9SMatthew Wilcox 
11074e99d4e9SMatthew Wilcox retry:
11084e99d4e9SMatthew Wilcox 	xas_lock(&xas);
11094e99d4e9SMatthew Wilcox 	xas_for_each_conflict(&xas, entry) {
11104e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_is_value(entry));
1111b7677a13SMatthew Wilcox 		XA_BUG_ON(xa, entry < xa_mk_index(start));
1112b7677a13SMatthew Wilcox 		XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
11134e99d4e9SMatthew Wilcox 		count++;
11144e99d4e9SMatthew Wilcox 	}
1115b7677a13SMatthew Wilcox 	xas_store(&xas, xa_mk_index(start));
11164e99d4e9SMatthew Wilcox 	xas_unlock(&xas);
11174e99d4e9SMatthew Wilcox 	if (xas_nomem(&xas, GFP_KERNEL)) {
11184e99d4e9SMatthew Wilcox 		count = 0;
11194e99d4e9SMatthew Wilcox 		goto retry;
11204e99d4e9SMatthew Wilcox 	}
11214e99d4e9SMatthew Wilcox 	XA_BUG_ON(xa, xas_error(&xas));
11224e99d4e9SMatthew Wilcox 	XA_BUG_ON(xa, count != present);
1123b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
11244e99d4e9SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
1125b7677a13SMatthew Wilcox 			xa_mk_index(start));
11264e99d4e9SMatthew Wilcox 	xa_erase_index(xa, start);
11274e99d4e9SMatthew Wilcox }
11284e99d4e9SMatthew Wilcox 
check_store_iter(struct xarray * xa)11294e99d4e9SMatthew Wilcox static noinline void check_store_iter(struct xarray *xa)
11304e99d4e9SMatthew Wilcox {
11314e99d4e9SMatthew Wilcox 	unsigned int i, j;
11324e99d4e9SMatthew Wilcox 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
11334e99d4e9SMatthew Wilcox 
11344e99d4e9SMatthew Wilcox 	for (i = 0; i < max_order; i++) {
11354e99d4e9SMatthew Wilcox 		unsigned int min = 1 << i;
11364e99d4e9SMatthew Wilcox 		unsigned int max = (2 << i) - 1;
11374e99d4e9SMatthew Wilcox 		__check_store_iter(xa, 0, i, 0);
11384e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
11394e99d4e9SMatthew Wilcox 		__check_store_iter(xa, min, i, 0);
11404e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
11414e99d4e9SMatthew Wilcox 
11424e99d4e9SMatthew Wilcox 		xa_store_index(xa, min, GFP_KERNEL);
11434e99d4e9SMatthew Wilcox 		__check_store_iter(xa, min, i, 1);
11444e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
11454e99d4e9SMatthew Wilcox 		xa_store_index(xa, max, GFP_KERNEL);
11464e99d4e9SMatthew Wilcox 		__check_store_iter(xa, min, i, 1);
11474e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
11484e99d4e9SMatthew Wilcox 
11494e99d4e9SMatthew Wilcox 		for (j = 0; j < min; j++)
11504e99d4e9SMatthew Wilcox 			xa_store_index(xa, j, GFP_KERNEL);
11514e99d4e9SMatthew Wilcox 		__check_store_iter(xa, 0, i, min);
11524e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
11534e99d4e9SMatthew Wilcox 		for (j = 0; j < min; j++)
11544e99d4e9SMatthew Wilcox 			xa_store_index(xa, min + j, GFP_KERNEL);
11554e99d4e9SMatthew Wilcox 		__check_store_iter(xa, min, i, min);
11564e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
11574e99d4e9SMatthew Wilcox 	}
11584e99d4e9SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
11594e99d4e9SMatthew Wilcox 	xa_store_index(xa, 63, GFP_KERNEL);
11604e99d4e9SMatthew Wilcox 	xa_store_index(xa, 65, GFP_KERNEL);
11614e99d4e9SMatthew Wilcox 	__check_store_iter(xa, 64, 2, 1);
11624e99d4e9SMatthew Wilcox 	xa_erase_index(xa, 63);
11634e99d4e9SMatthew Wilcox #endif
11644e99d4e9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
11654e99d4e9SMatthew Wilcox }
11664e99d4e9SMatthew Wilcox 
check_multi_find_1(struct xarray * xa,unsigned order)116719c30f4dSMatthew Wilcox (Oracle) static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
1168b803b428SMatthew Wilcox {
1169b803b428SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
117019c30f4dSMatthew Wilcox (Oracle) 	unsigned long multi = 3 << order;
117119c30f4dSMatthew Wilcox (Oracle) 	unsigned long next = 4 << order;
1172b803b428SMatthew Wilcox 	unsigned long index;
1173b803b428SMatthew Wilcox 
117419c30f4dSMatthew Wilcox (Oracle) 	xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
117519c30f4dSMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
1176c44aa5e8SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
1177b803b428SMatthew Wilcox 
1178b803b428SMatthew Wilcox 	index = 0;
1179b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
118019c30f4dSMatthew Wilcox (Oracle) 			xa_mk_value(multi));
118119c30f4dSMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, index != multi);
118219c30f4dSMatthew Wilcox (Oracle) 	index = multi + 1;
1183b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
118419c30f4dSMatthew Wilcox (Oracle) 			xa_mk_value(multi));
118519c30f4dSMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, (index < multi) || (index >= next));
1186b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
118719c30f4dSMatthew Wilcox (Oracle) 			xa_mk_value(next));
118819c30f4dSMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, index != next);
1189c44aa5e8SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
1190c44aa5e8SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, index != next);
1191b803b428SMatthew Wilcox 
119219c30f4dSMatthew Wilcox (Oracle) 	xa_erase_index(xa, multi);
119319c30f4dSMatthew Wilcox (Oracle) 	xa_erase_index(xa, next);
1194c44aa5e8SMatthew Wilcox (Oracle) 	xa_erase_index(xa, next + 1);
1195b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1196b803b428SMatthew Wilcox #endif
1197b803b428SMatthew Wilcox }
1198b803b428SMatthew Wilcox 
check_multi_find_2(struct xarray * xa)1199b803b428SMatthew Wilcox static noinline void check_multi_find_2(struct xarray *xa)
1200b803b428SMatthew Wilcox {
1201b803b428SMatthew Wilcox 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
1202b803b428SMatthew Wilcox 	unsigned int i, j;
1203b803b428SMatthew Wilcox 	void *entry;
1204b803b428SMatthew Wilcox 
1205b803b428SMatthew Wilcox 	for (i = 0; i < max_order; i++) {
1206b803b428SMatthew Wilcox 		unsigned long index = 1UL << i;
1207b803b428SMatthew Wilcox 		for (j = 0; j < index; j++) {
1208b803b428SMatthew Wilcox 			XA_STATE(xas, xa, j + index);
1209b803b428SMatthew Wilcox 			xa_store_index(xa, index - 1, GFP_KERNEL);
1210b7677a13SMatthew Wilcox 			xa_store_order(xa, index, i, xa_mk_index(index),
1211b803b428SMatthew Wilcox 					GFP_KERNEL);
1212b803b428SMatthew Wilcox 			rcu_read_lock();
1213b803b428SMatthew Wilcox 			xas_for_each(&xas, entry, ULONG_MAX) {
1214b803b428SMatthew Wilcox 				xa_erase_index(xa, index);
1215b803b428SMatthew Wilcox 			}
1216b803b428SMatthew Wilcox 			rcu_read_unlock();
1217b803b428SMatthew Wilcox 			xa_erase_index(xa, index - 1);
1218b803b428SMatthew Wilcox 			XA_BUG_ON(xa, !xa_empty(xa));
1219b803b428SMatthew Wilcox 		}
1220b803b428SMatthew Wilcox 	}
1221b803b428SMatthew Wilcox }
1222b803b428SMatthew Wilcox 
check_multi_find_3(struct xarray * xa)1223bd40b17cSMatthew Wilcox (Oracle) static noinline void check_multi_find_3(struct xarray *xa)
1224bd40b17cSMatthew Wilcox (Oracle) {
1225bd40b17cSMatthew Wilcox (Oracle) 	unsigned int order;
1226bd40b17cSMatthew Wilcox (Oracle) 
1227bd40b17cSMatthew Wilcox (Oracle) 	for (order = 5; order < order_limit; order++) {
1228bd40b17cSMatthew Wilcox (Oracle) 		unsigned long index = 1UL << (order - 5);
1229bd40b17cSMatthew Wilcox (Oracle) 
1230bd40b17cSMatthew Wilcox (Oracle) 		XA_BUG_ON(xa, !xa_empty(xa));
1231bd40b17cSMatthew Wilcox (Oracle) 		xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL);
1232bd40b17cSMatthew Wilcox (Oracle) 		XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT));
1233bd40b17cSMatthew Wilcox (Oracle) 		xa_erase_index(xa, 0);
1234bd40b17cSMatthew Wilcox (Oracle) 	}
1235bd40b17cSMatthew Wilcox (Oracle) }
1236bd40b17cSMatthew Wilcox (Oracle) 
check_find_1(struct xarray * xa)12378229706eSMatthew Wilcox static noinline void check_find_1(struct xarray *xa)
1238b803b428SMatthew Wilcox {
1239b803b428SMatthew Wilcox 	unsigned long i, j, k;
1240b803b428SMatthew Wilcox 
1241b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1242b803b428SMatthew Wilcox 
1243b803b428SMatthew Wilcox 	/*
1244b803b428SMatthew Wilcox 	 * Check xa_find with all pairs between 0 and 99 inclusive,
1245b803b428SMatthew Wilcox 	 * starting at every index between 0 and 99
1246b803b428SMatthew Wilcox 	 */
1247b803b428SMatthew Wilcox 	for (i = 0; i < 100; i++) {
1248b803b428SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
1249b803b428SMatthew Wilcox 		xa_set_mark(xa, i, XA_MARK_0);
1250b803b428SMatthew Wilcox 		for (j = 0; j < i; j++) {
1251b803b428SMatthew Wilcox 			XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
1252b803b428SMatthew Wilcox 					NULL);
1253b803b428SMatthew Wilcox 			xa_set_mark(xa, j, XA_MARK_0);
1254b803b428SMatthew Wilcox 			for (k = 0; k < 100; k++) {
1255b803b428SMatthew Wilcox 				unsigned long index = k;
1256b803b428SMatthew Wilcox 				void *entry = xa_find(xa, &index, ULONG_MAX,
1257b803b428SMatthew Wilcox 								XA_PRESENT);
1258b803b428SMatthew Wilcox 				if (k <= j)
1259b803b428SMatthew Wilcox 					XA_BUG_ON(xa, index != j);
1260b803b428SMatthew Wilcox 				else if (k <= i)
1261b803b428SMatthew Wilcox 					XA_BUG_ON(xa, index != i);
1262b803b428SMatthew Wilcox 				else
1263b803b428SMatthew Wilcox 					XA_BUG_ON(xa, entry != NULL);
1264b803b428SMatthew Wilcox 
1265b803b428SMatthew Wilcox 				index = k;
1266b803b428SMatthew Wilcox 				entry = xa_find(xa, &index, ULONG_MAX,
1267b803b428SMatthew Wilcox 								XA_MARK_0);
1268b803b428SMatthew Wilcox 				if (k <= j)
1269b803b428SMatthew Wilcox 					XA_BUG_ON(xa, index != j);
1270b803b428SMatthew Wilcox 				else if (k <= i)
1271b803b428SMatthew Wilcox 					XA_BUG_ON(xa, index != i);
1272b803b428SMatthew Wilcox 				else
1273b803b428SMatthew Wilcox 					XA_BUG_ON(xa, entry != NULL);
1274b803b428SMatthew Wilcox 			}
1275b803b428SMatthew Wilcox 			xa_erase_index(xa, j);
1276b803b428SMatthew Wilcox 			XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
1277b803b428SMatthew Wilcox 			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
1278b803b428SMatthew Wilcox 		}
1279b803b428SMatthew Wilcox 		xa_erase_index(xa, i);
1280b803b428SMatthew Wilcox 		XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
1281b803b428SMatthew Wilcox 	}
1282b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
12838229706eSMatthew Wilcox }
12848229706eSMatthew Wilcox 
check_find_2(struct xarray * xa)12858229706eSMatthew Wilcox static noinline void check_find_2(struct xarray *xa)
12868229706eSMatthew Wilcox {
12878229706eSMatthew Wilcox 	void *entry;
12884a31896cSMatthew Wilcox 	unsigned long i, j, index;
12898229706eSMatthew Wilcox 
12904a31896cSMatthew Wilcox 	xa_for_each(xa, index, entry) {
12918229706eSMatthew Wilcox 		XA_BUG_ON(xa, true);
12928229706eSMatthew Wilcox 	}
12938229706eSMatthew Wilcox 
12948229706eSMatthew Wilcox 	for (i = 0; i < 1024; i++) {
12958229706eSMatthew Wilcox 		xa_store_index(xa, index, GFP_KERNEL);
12968229706eSMatthew Wilcox 		j = 0;
12974a31896cSMatthew Wilcox 		xa_for_each(xa, index, entry) {
1298b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, xa_mk_index(index) != entry);
12998229706eSMatthew Wilcox 			XA_BUG_ON(xa, index != j++);
13008229706eSMatthew Wilcox 		}
13018229706eSMatthew Wilcox 	}
13028229706eSMatthew Wilcox 
13038229706eSMatthew Wilcox 	xa_destroy(xa);
13048229706eSMatthew Wilcox }
13058229706eSMatthew Wilcox 
check_find_3(struct xarray * xa)130648483614SMatthew Wilcox static noinline void check_find_3(struct xarray *xa)
130748483614SMatthew Wilcox {
130848483614SMatthew Wilcox 	XA_STATE(xas, xa, 0);
130948483614SMatthew Wilcox 	unsigned long i, j, k;
131048483614SMatthew Wilcox 	void *entry;
131148483614SMatthew Wilcox 
131248483614SMatthew Wilcox 	for (i = 0; i < 100; i++) {
131348483614SMatthew Wilcox 		for (j = 0; j < 100; j++) {
1314490fd30fSMatthew Wilcox 			rcu_read_lock();
131548483614SMatthew Wilcox 			for (k = 0; k < 100; k++) {
131648483614SMatthew Wilcox 				xas_set(&xas, j);
131748483614SMatthew Wilcox 				xas_for_each_marked(&xas, entry, k, XA_MARK_0)
131848483614SMatthew Wilcox 					;
131948483614SMatthew Wilcox 				if (j > k)
132048483614SMatthew Wilcox 					XA_BUG_ON(xa,
132148483614SMatthew Wilcox 						xas.xa_node != XAS_RESTART);
132248483614SMatthew Wilcox 			}
1323490fd30fSMatthew Wilcox 			rcu_read_unlock();
132448483614SMatthew Wilcox 		}
132548483614SMatthew Wilcox 		xa_store_index(xa, i, GFP_KERNEL);
132648483614SMatthew Wilcox 		xa_set_mark(xa, i, XA_MARK_0);
132748483614SMatthew Wilcox 	}
132848483614SMatthew Wilcox 	xa_destroy(xa);
132948483614SMatthew Wilcox }
133048483614SMatthew Wilcox 
check_find_4(struct xarray * xa)1331430f24f9SMatthew Wilcox (Oracle) static noinline void check_find_4(struct xarray *xa)
1332430f24f9SMatthew Wilcox (Oracle) {
1333430f24f9SMatthew Wilcox (Oracle) 	unsigned long index = 0;
1334430f24f9SMatthew Wilcox (Oracle) 	void *entry;
1335430f24f9SMatthew Wilcox (Oracle) 
1336430f24f9SMatthew Wilcox (Oracle) 	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1337430f24f9SMatthew Wilcox (Oracle) 
1338430f24f9SMatthew Wilcox (Oracle) 	entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
1339430f24f9SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
1340430f24f9SMatthew Wilcox (Oracle) 
1341430f24f9SMatthew Wilcox (Oracle) 	entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
1342430f24f9SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, entry);
1343430f24f9SMatthew Wilcox (Oracle) 
1344430f24f9SMatthew Wilcox (Oracle) 	xa_erase_index(xa, ULONG_MAX);
1345430f24f9SMatthew Wilcox (Oracle) }
1346430f24f9SMatthew Wilcox (Oracle) 
check_find(struct xarray * xa)13478229706eSMatthew Wilcox static noinline void check_find(struct xarray *xa)
13488229706eSMatthew Wilcox {
134919c30f4dSMatthew Wilcox (Oracle) 	unsigned i;
135019c30f4dSMatthew Wilcox (Oracle) 
13518229706eSMatthew Wilcox 	check_find_1(xa);
13528229706eSMatthew Wilcox 	check_find_2(xa);
135348483614SMatthew Wilcox 	check_find_3(xa);
1354430f24f9SMatthew Wilcox (Oracle) 	check_find_4(xa);
135519c30f4dSMatthew Wilcox (Oracle) 
135619c30f4dSMatthew Wilcox (Oracle) 	for (i = 2; i < 10; i++)
135719c30f4dSMatthew Wilcox (Oracle) 		check_multi_find_1(xa, i);
1358b803b428SMatthew Wilcox 	check_multi_find_2(xa);
1359bd40b17cSMatthew Wilcox (Oracle) 	check_multi_find_3(xa);
1360b803b428SMatthew Wilcox }
1361b803b428SMatthew Wilcox 
1362e21a2955SMatthew Wilcox /* See find_swap_entry() in mm/shmem.c */
xa_find_entry(struct xarray * xa,void * item)1363e21a2955SMatthew Wilcox static noinline unsigned long xa_find_entry(struct xarray *xa, void *item)
1364e21a2955SMatthew Wilcox {
1365e21a2955SMatthew Wilcox 	XA_STATE(xas, xa, 0);
1366e21a2955SMatthew Wilcox 	unsigned int checked = 0;
1367e21a2955SMatthew Wilcox 	void *entry;
1368e21a2955SMatthew Wilcox 
1369e21a2955SMatthew Wilcox 	rcu_read_lock();
1370e21a2955SMatthew Wilcox 	xas_for_each(&xas, entry, ULONG_MAX) {
1371e21a2955SMatthew Wilcox 		if (xas_retry(&xas, entry))
1372e21a2955SMatthew Wilcox 			continue;
1373e21a2955SMatthew Wilcox 		if (entry == item)
1374e21a2955SMatthew Wilcox 			break;
1375e21a2955SMatthew Wilcox 		checked++;
1376e21a2955SMatthew Wilcox 		if ((checked % 4) != 0)
1377e21a2955SMatthew Wilcox 			continue;
1378e21a2955SMatthew Wilcox 		xas_pause(&xas);
1379e21a2955SMatthew Wilcox 	}
1380e21a2955SMatthew Wilcox 	rcu_read_unlock();
1381e21a2955SMatthew Wilcox 
1382e21a2955SMatthew Wilcox 	return entry ? xas.xa_index : -1;
1383e21a2955SMatthew Wilcox }
1384e21a2955SMatthew Wilcox 
check_find_entry(struct xarray * xa)1385e21a2955SMatthew Wilcox static noinline void check_find_entry(struct xarray *xa)
1386e21a2955SMatthew Wilcox {
1387e21a2955SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
1388e21a2955SMatthew Wilcox 	unsigned int order;
1389e21a2955SMatthew Wilcox 	unsigned long offset, index;
1390e21a2955SMatthew Wilcox 
1391e21a2955SMatthew Wilcox 	for (order = 0; order < 20; order++) {
1392e21a2955SMatthew Wilcox 		for (offset = 0; offset < (1UL << (order + 3));
1393e21a2955SMatthew Wilcox 		     offset += (1UL << order)) {
1394e21a2955SMatthew Wilcox 			for (index = 0; index < (1UL << (order + 5));
1395e21a2955SMatthew Wilcox 			     index += (1UL << order)) {
1396e21a2955SMatthew Wilcox 				xa_store_order(xa, index, order,
1397b7677a13SMatthew Wilcox 						xa_mk_index(index), GFP_KERNEL);
1398e21a2955SMatthew Wilcox 				XA_BUG_ON(xa, xa_load(xa, index) !=
1399b7677a13SMatthew Wilcox 						xa_mk_index(index));
1400e21a2955SMatthew Wilcox 				XA_BUG_ON(xa, xa_find_entry(xa,
1401b7677a13SMatthew Wilcox 						xa_mk_index(index)) != index);
1402e21a2955SMatthew Wilcox 			}
1403e21a2955SMatthew Wilcox 			XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1404e21a2955SMatthew Wilcox 			xa_destroy(xa);
1405e21a2955SMatthew Wilcox 		}
1406e21a2955SMatthew Wilcox 	}
1407e21a2955SMatthew Wilcox #endif
1408e21a2955SMatthew Wilcox 
1409e21a2955SMatthew Wilcox 	XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1410e21a2955SMatthew Wilcox 	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1411e21a2955SMatthew Wilcox 	XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1412b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
1413e21a2955SMatthew Wilcox 	xa_erase_index(xa, ULONG_MAX);
1414e21a2955SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1415e21a2955SMatthew Wilcox }
1416e21a2955SMatthew Wilcox 
check_pause(struct xarray * xa)1417c36d451aSMatthew Wilcox (Oracle) static noinline void check_pause(struct xarray *xa)
1418c36d451aSMatthew Wilcox (Oracle) {
1419c36d451aSMatthew Wilcox (Oracle) 	XA_STATE(xas, xa, 0);
1420c36d451aSMatthew Wilcox (Oracle) 	void *entry;
1421c36d451aSMatthew Wilcox (Oracle) 	unsigned int order;
1422c36d451aSMatthew Wilcox (Oracle) 	unsigned long index = 1;
1423c36d451aSMatthew Wilcox (Oracle) 	unsigned int count = 0;
1424c36d451aSMatthew Wilcox (Oracle) 
1425c36d451aSMatthew Wilcox (Oracle) 	for (order = 0; order < order_limit; order++) {
1426c36d451aSMatthew Wilcox (Oracle) 		XA_BUG_ON(xa, xa_store_order(xa, index, order,
1427c36d451aSMatthew Wilcox (Oracle) 					xa_mk_index(index), GFP_KERNEL));
1428c36d451aSMatthew Wilcox (Oracle) 		index += 1UL << order;
1429c36d451aSMatthew Wilcox (Oracle) 	}
1430c36d451aSMatthew Wilcox (Oracle) 
1431c36d451aSMatthew Wilcox (Oracle) 	rcu_read_lock();
1432c36d451aSMatthew Wilcox (Oracle) 	xas_for_each(&xas, entry, ULONG_MAX) {
1433c36d451aSMatthew Wilcox (Oracle) 		XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
1434c36d451aSMatthew Wilcox (Oracle) 		count++;
1435c36d451aSMatthew Wilcox (Oracle) 	}
1436c36d451aSMatthew Wilcox (Oracle) 	rcu_read_unlock();
1437c36d451aSMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, count != order_limit);
1438c36d451aSMatthew Wilcox (Oracle) 
1439c36d451aSMatthew Wilcox (Oracle) 	count = 0;
1440c36d451aSMatthew Wilcox (Oracle) 	xas_set(&xas, 0);
1441c36d451aSMatthew Wilcox (Oracle) 	rcu_read_lock();
1442c36d451aSMatthew Wilcox (Oracle) 	xas_for_each(&xas, entry, ULONG_MAX) {
1443c36d451aSMatthew Wilcox (Oracle) 		XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
1444c36d451aSMatthew Wilcox (Oracle) 		count++;
1445c36d451aSMatthew Wilcox (Oracle) 		xas_pause(&xas);
1446c36d451aSMatthew Wilcox (Oracle) 	}
1447c36d451aSMatthew Wilcox (Oracle) 	rcu_read_unlock();
1448c36d451aSMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, count != order_limit);
1449c36d451aSMatthew Wilcox (Oracle) 
1450c36d451aSMatthew Wilcox (Oracle) 	xa_destroy(xa);
1451c36d451aSMatthew Wilcox (Oracle) }
1452c36d451aSMatthew Wilcox (Oracle) 
check_move_tiny(struct xarray * xa)145391abab83SMatthew Wilcox (Oracle) static noinline void check_move_tiny(struct xarray *xa)
145491abab83SMatthew Wilcox (Oracle) {
145591abab83SMatthew Wilcox (Oracle) 	XA_STATE(xas, xa, 0);
145691abab83SMatthew Wilcox (Oracle) 
145791abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, !xa_empty(xa));
145891abab83SMatthew Wilcox (Oracle) 	rcu_read_lock();
145991abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_next(&xas) != NULL);
146091abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_next(&xas) != NULL);
146191abab83SMatthew Wilcox (Oracle) 	rcu_read_unlock();
146291abab83SMatthew Wilcox (Oracle) 	xa_store_index(xa, 0, GFP_KERNEL);
146391abab83SMatthew Wilcox (Oracle) 	rcu_read_lock();
146491abab83SMatthew Wilcox (Oracle) 	xas_set(&xas, 0);
146591abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0));
146691abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_next(&xas) != NULL);
146791abab83SMatthew Wilcox (Oracle) 	xas_set(&xas, 0);
146891abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0));
146991abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
147091abab83SMatthew Wilcox (Oracle) 	rcu_read_unlock();
147191abab83SMatthew Wilcox (Oracle) 	xa_erase_index(xa, 0);
147291abab83SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, !xa_empty(xa));
147391abab83SMatthew Wilcox (Oracle) }
147491abab83SMatthew Wilcox (Oracle) 
check_move_max(struct xarray * xa)147582a22311SMatthew Wilcox (Oracle) static noinline void check_move_max(struct xarray *xa)
147682a22311SMatthew Wilcox (Oracle) {
147782a22311SMatthew Wilcox (Oracle) 	XA_STATE(xas, xa, 0);
147882a22311SMatthew Wilcox (Oracle) 
147982a22311SMatthew Wilcox (Oracle) 	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
148082a22311SMatthew Wilcox (Oracle) 	rcu_read_lock();
148182a22311SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
148282a22311SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
148382a22311SMatthew Wilcox (Oracle) 	rcu_read_unlock();
148482a22311SMatthew Wilcox (Oracle) 
148582a22311SMatthew Wilcox (Oracle) 	xas_set(&xas, 0);
148682a22311SMatthew Wilcox (Oracle) 	rcu_read_lock();
148782a22311SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
148882a22311SMatthew Wilcox (Oracle) 	xas_pause(&xas);
148982a22311SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
149082a22311SMatthew Wilcox (Oracle) 	rcu_read_unlock();
149182a22311SMatthew Wilcox (Oracle) 
149282a22311SMatthew Wilcox (Oracle) 	xa_erase_index(xa, ULONG_MAX);
149382a22311SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, !xa_empty(xa));
149482a22311SMatthew Wilcox (Oracle) }
149582a22311SMatthew Wilcox (Oracle) 
check_move_small(struct xarray * xa,unsigned long idx)149664d3e9a9SMatthew Wilcox static noinline void check_move_small(struct xarray *xa, unsigned long idx)
149764d3e9a9SMatthew Wilcox {
149864d3e9a9SMatthew Wilcox 	XA_STATE(xas, xa, 0);
149964d3e9a9SMatthew Wilcox 	unsigned long i;
150064d3e9a9SMatthew Wilcox 
150164d3e9a9SMatthew Wilcox 	xa_store_index(xa, 0, GFP_KERNEL);
150264d3e9a9SMatthew Wilcox 	xa_store_index(xa, idx, GFP_KERNEL);
150364d3e9a9SMatthew Wilcox 
150464d3e9a9SMatthew Wilcox 	rcu_read_lock();
150564d3e9a9SMatthew Wilcox 	for (i = 0; i < idx * 4; i++) {
150664d3e9a9SMatthew Wilcox 		void *entry = xas_next(&xas);
150764d3e9a9SMatthew Wilcox 		if (i <= idx)
150864d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
150964d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_index != i);
151064d3e9a9SMatthew Wilcox 		if (i == 0 || i == idx)
1511b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_index(i));
151264d3e9a9SMatthew Wilcox 		else
151364d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != NULL);
151464d3e9a9SMatthew Wilcox 	}
151564d3e9a9SMatthew Wilcox 	xas_next(&xas);
151664d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != i);
151764d3e9a9SMatthew Wilcox 
151864d3e9a9SMatthew Wilcox 	do {
151964d3e9a9SMatthew Wilcox 		void *entry = xas_prev(&xas);
152064d3e9a9SMatthew Wilcox 		i--;
152164d3e9a9SMatthew Wilcox 		if (i <= idx)
152264d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
152364d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_index != i);
152464d3e9a9SMatthew Wilcox 		if (i == 0 || i == idx)
1525b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_index(i));
152664d3e9a9SMatthew Wilcox 		else
152764d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != NULL);
152864d3e9a9SMatthew Wilcox 	} while (i > 0);
152964d3e9a9SMatthew Wilcox 
153064d3e9a9SMatthew Wilcox 	xas_set(&xas, ULONG_MAX);
153164d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_next(&xas) != NULL);
153264d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
153364d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0));
153464d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != 0);
153564d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
153664d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
153764d3e9a9SMatthew Wilcox 	rcu_read_unlock();
153864d3e9a9SMatthew Wilcox 
153964d3e9a9SMatthew Wilcox 	xa_erase_index(xa, 0);
154064d3e9a9SMatthew Wilcox 	xa_erase_index(xa, idx);
154164d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
154264d3e9a9SMatthew Wilcox }
154364d3e9a9SMatthew Wilcox 
check_move(struct xarray * xa)154464d3e9a9SMatthew Wilcox static noinline void check_move(struct xarray *xa)
154564d3e9a9SMatthew Wilcox {
154664d3e9a9SMatthew Wilcox 	XA_STATE(xas, xa, (1 << 16) - 1);
154764d3e9a9SMatthew Wilcox 	unsigned long i;
154864d3e9a9SMatthew Wilcox 
154964d3e9a9SMatthew Wilcox 	for (i = 0; i < (1 << 16); i++)
155064d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
155164d3e9a9SMatthew Wilcox 
155264d3e9a9SMatthew Wilcox 	rcu_read_lock();
155364d3e9a9SMatthew Wilcox 	do {
155464d3e9a9SMatthew Wilcox 		void *entry = xas_prev(&xas);
155564d3e9a9SMatthew Wilcox 		i--;
1556b7677a13SMatthew Wilcox 		XA_BUG_ON(xa, entry != xa_mk_index(i));
155764d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, i != xas.xa_index);
155864d3e9a9SMatthew Wilcox 	} while (i != 0);
155964d3e9a9SMatthew Wilcox 
156064d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
156164d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
156264d3e9a9SMatthew Wilcox 
156364d3e9a9SMatthew Wilcox 	do {
156464d3e9a9SMatthew Wilcox 		void *entry = xas_next(&xas);
1565b7677a13SMatthew Wilcox 		XA_BUG_ON(xa, entry != xa_mk_index(i));
156664d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, i != xas.xa_index);
156764d3e9a9SMatthew Wilcox 		i++;
156864d3e9a9SMatthew Wilcox 	} while (i < (1 << 16));
156964d3e9a9SMatthew Wilcox 	rcu_read_unlock();
157064d3e9a9SMatthew Wilcox 
157164d3e9a9SMatthew Wilcox 	for (i = (1 << 8); i < (1 << 15); i++)
157264d3e9a9SMatthew Wilcox 		xa_erase_index(xa, i);
157364d3e9a9SMatthew Wilcox 
157464d3e9a9SMatthew Wilcox 	i = xas.xa_index;
157564d3e9a9SMatthew Wilcox 
157664d3e9a9SMatthew Wilcox 	rcu_read_lock();
157764d3e9a9SMatthew Wilcox 	do {
157864d3e9a9SMatthew Wilcox 		void *entry = xas_prev(&xas);
157964d3e9a9SMatthew Wilcox 		i--;
158064d3e9a9SMatthew Wilcox 		if ((i < (1 << 8)) || (i >= (1 << 15)))
1581b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_index(i));
158264d3e9a9SMatthew Wilcox 		else
158364d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != NULL);
158464d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, i != xas.xa_index);
158564d3e9a9SMatthew Wilcox 	} while (i != 0);
158664d3e9a9SMatthew Wilcox 
158764d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
158864d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
158964d3e9a9SMatthew Wilcox 
159064d3e9a9SMatthew Wilcox 	do {
159164d3e9a9SMatthew Wilcox 		void *entry = xas_next(&xas);
159264d3e9a9SMatthew Wilcox 		if ((i < (1 << 8)) || (i >= (1 << 15)))
1593b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_index(i));
159464d3e9a9SMatthew Wilcox 		else
159564d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != NULL);
159664d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, i != xas.xa_index);
159764d3e9a9SMatthew Wilcox 		i++;
159864d3e9a9SMatthew Wilcox 	} while (i < (1 << 16));
159964d3e9a9SMatthew Wilcox 	rcu_read_unlock();
160064d3e9a9SMatthew Wilcox 
160164d3e9a9SMatthew Wilcox 	xa_destroy(xa);
160264d3e9a9SMatthew Wilcox 
160391abab83SMatthew Wilcox (Oracle) 	check_move_tiny(xa);
160482a22311SMatthew Wilcox (Oracle) 	check_move_max(xa);
160591abab83SMatthew Wilcox (Oracle) 
160664d3e9a9SMatthew Wilcox 	for (i = 0; i < 16; i++)
160764d3e9a9SMatthew Wilcox 		check_move_small(xa, 1UL << i);
160864d3e9a9SMatthew Wilcox 
160964d3e9a9SMatthew Wilcox 	for (i = 2; i < 16; i++)
161064d3e9a9SMatthew Wilcox 		check_move_small(xa, (1UL << i) - 1);
161164d3e9a9SMatthew Wilcox }
161264d3e9a9SMatthew Wilcox 
xa_store_many_order(struct xarray * xa,unsigned long index,unsigned order)16132264f513SMatthew Wilcox static noinline void xa_store_many_order(struct xarray *xa,
16142264f513SMatthew Wilcox 		unsigned long index, unsigned order)
16152264f513SMatthew Wilcox {
16162264f513SMatthew Wilcox 	XA_STATE_ORDER(xas, xa, index, order);
16172264f513SMatthew Wilcox 	unsigned int i = 0;
16182264f513SMatthew Wilcox 
16192264f513SMatthew Wilcox 	do {
16202264f513SMatthew Wilcox 		xas_lock(&xas);
16212264f513SMatthew Wilcox 		XA_BUG_ON(xa, xas_find_conflict(&xas));
16222264f513SMatthew Wilcox 		xas_create_range(&xas);
16232264f513SMatthew Wilcox 		if (xas_error(&xas))
16242264f513SMatthew Wilcox 			goto unlock;
16252264f513SMatthew Wilcox 		for (i = 0; i < (1U << order); i++) {
1626b7677a13SMatthew Wilcox 			XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
16272264f513SMatthew Wilcox 			xas_next(&xas);
16282264f513SMatthew Wilcox 		}
16292264f513SMatthew Wilcox unlock:
16302264f513SMatthew Wilcox 		xas_unlock(&xas);
16312264f513SMatthew Wilcox 	} while (xas_nomem(&xas, GFP_KERNEL));
16322264f513SMatthew Wilcox 
16332264f513SMatthew Wilcox 	XA_BUG_ON(xa, xas_error(&xas));
16342264f513SMatthew Wilcox }
16352264f513SMatthew Wilcox 
check_create_range_1(struct xarray * xa,unsigned long index,unsigned order)16362264f513SMatthew Wilcox static noinline void check_create_range_1(struct xarray *xa,
16372264f513SMatthew Wilcox 		unsigned long index, unsigned order)
16382264f513SMatthew Wilcox {
16392264f513SMatthew Wilcox 	unsigned long i;
16402264f513SMatthew Wilcox 
16412264f513SMatthew Wilcox 	xa_store_many_order(xa, index, order);
16422264f513SMatthew Wilcox 	for (i = index; i < index + (1UL << order); i++)
16432264f513SMatthew Wilcox 		xa_erase_index(xa, i);
16442264f513SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
16452264f513SMatthew Wilcox }
16462264f513SMatthew Wilcox 
check_create_range_2(struct xarray * xa,unsigned order)16472264f513SMatthew Wilcox static noinline void check_create_range_2(struct xarray *xa, unsigned order)
16482264f513SMatthew Wilcox {
16492264f513SMatthew Wilcox 	unsigned long i;
16502264f513SMatthew Wilcox 	unsigned long nr = 1UL << order;
16512264f513SMatthew Wilcox 
16522264f513SMatthew Wilcox 	for (i = 0; i < nr * nr; i += nr)
16532264f513SMatthew Wilcox 		xa_store_many_order(xa, i, order);
16542264f513SMatthew Wilcox 	for (i = 0; i < nr * nr; i++)
16552264f513SMatthew Wilcox 		xa_erase_index(xa, i);
16562264f513SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
16572264f513SMatthew Wilcox }
16582264f513SMatthew Wilcox 
check_create_range_3(void)16592264f513SMatthew Wilcox static noinline void check_create_range_3(void)
16602264f513SMatthew Wilcox {
16612264f513SMatthew Wilcox 	XA_STATE(xas, NULL, 0);
16622264f513SMatthew Wilcox 	xas_set_err(&xas, -EEXIST);
16632264f513SMatthew Wilcox 	xas_create_range(&xas);
16642264f513SMatthew Wilcox 	XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST);
16652264f513SMatthew Wilcox }
16662264f513SMatthew Wilcox 
check_create_range_4(struct xarray * xa,unsigned long index,unsigned order)16672264f513SMatthew Wilcox static noinline void check_create_range_4(struct xarray *xa,
16682264f513SMatthew Wilcox 		unsigned long index, unsigned order)
16692264f513SMatthew Wilcox {
16702264f513SMatthew Wilcox 	XA_STATE_ORDER(xas, xa, index, order);
16712264f513SMatthew Wilcox 	unsigned long base = xas.xa_index;
16722264f513SMatthew Wilcox 	unsigned long i = 0;
16732264f513SMatthew Wilcox 
16742264f513SMatthew Wilcox 	xa_store_index(xa, index, GFP_KERNEL);
16752264f513SMatthew Wilcox 	do {
16762264f513SMatthew Wilcox 		xas_lock(&xas);
16772264f513SMatthew Wilcox 		xas_create_range(&xas);
16782264f513SMatthew Wilcox 		if (xas_error(&xas))
16792264f513SMatthew Wilcox 			goto unlock;
16802264f513SMatthew Wilcox 		for (i = 0; i < (1UL << order); i++) {
1681b7677a13SMatthew Wilcox 			void *old = xas_store(&xas, xa_mk_index(base + i));
16822264f513SMatthew Wilcox 			if (xas.xa_index == index)
1683b7677a13SMatthew Wilcox 				XA_BUG_ON(xa, old != xa_mk_index(base + i));
16842264f513SMatthew Wilcox 			else
16852264f513SMatthew Wilcox 				XA_BUG_ON(xa, old != NULL);
16862264f513SMatthew Wilcox 			xas_next(&xas);
16872264f513SMatthew Wilcox 		}
16882264f513SMatthew Wilcox unlock:
16892264f513SMatthew Wilcox 		xas_unlock(&xas);
16902264f513SMatthew Wilcox 	} while (xas_nomem(&xas, GFP_KERNEL));
16912264f513SMatthew Wilcox 
16922264f513SMatthew Wilcox 	XA_BUG_ON(xa, xas_error(&xas));
16932264f513SMatthew Wilcox 
16942264f513SMatthew Wilcox 	for (i = base; i < base + (1UL << order); i++)
16952264f513SMatthew Wilcox 		xa_erase_index(xa, i);
16962264f513SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
16972264f513SMatthew Wilcox }
16982264f513SMatthew Wilcox 
check_create_range_5(struct xarray * xa,unsigned long index,unsigned int order)16993e3c6580SMatthew Wilcox (Oracle) static noinline void check_create_range_5(struct xarray *xa,
17003e3c6580SMatthew Wilcox (Oracle) 		unsigned long index, unsigned int order)
17013e3c6580SMatthew Wilcox (Oracle) {
17023e3c6580SMatthew Wilcox (Oracle) 	XA_STATE_ORDER(xas, xa, index, order);
17033e3c6580SMatthew Wilcox (Oracle) 	unsigned int i;
17043e3c6580SMatthew Wilcox (Oracle) 
17053e3c6580SMatthew Wilcox (Oracle) 	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
17063e3c6580SMatthew Wilcox (Oracle) 
17073e3c6580SMatthew Wilcox (Oracle) 	for (i = 0; i < order + 10; i++) {
17083e3c6580SMatthew Wilcox (Oracle) 		do {
17093e3c6580SMatthew Wilcox (Oracle) 			xas_lock(&xas);
17103e3c6580SMatthew Wilcox (Oracle) 			xas_create_range(&xas);
17113e3c6580SMatthew Wilcox (Oracle) 			xas_unlock(&xas);
17123e3c6580SMatthew Wilcox (Oracle) 		} while (xas_nomem(&xas, GFP_KERNEL));
17133e3c6580SMatthew Wilcox (Oracle) 	}
17143e3c6580SMatthew Wilcox (Oracle) 
17153e3c6580SMatthew Wilcox (Oracle) 	xa_destroy(xa);
17163e3c6580SMatthew Wilcox (Oracle) }
17173e3c6580SMatthew Wilcox (Oracle) 
check_create_range(struct xarray * xa)17182264f513SMatthew Wilcox static noinline void check_create_range(struct xarray *xa)
17192264f513SMatthew Wilcox {
17202264f513SMatthew Wilcox 	unsigned int order;
17212264f513SMatthew Wilcox 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1;
17222264f513SMatthew Wilcox 
17232264f513SMatthew Wilcox 	for (order = 0; order < max_order; order++) {
17242264f513SMatthew Wilcox 		check_create_range_1(xa, 0, order);
17252264f513SMatthew Wilcox 		check_create_range_1(xa, 1U << order, order);
17262264f513SMatthew Wilcox 		check_create_range_1(xa, 2U << order, order);
17272264f513SMatthew Wilcox 		check_create_range_1(xa, 3U << order, order);
17282264f513SMatthew Wilcox 		check_create_range_1(xa, 1U << 24, order);
17292264f513SMatthew Wilcox 		if (order < 10)
17302264f513SMatthew Wilcox 			check_create_range_2(xa, order);
17312264f513SMatthew Wilcox 
17322264f513SMatthew Wilcox 		check_create_range_4(xa, 0, order);
17332264f513SMatthew Wilcox 		check_create_range_4(xa, 1U << order, order);
17342264f513SMatthew Wilcox 		check_create_range_4(xa, 2U << order, order);
17352264f513SMatthew Wilcox 		check_create_range_4(xa, 3U << order, order);
17362264f513SMatthew Wilcox 		check_create_range_4(xa, 1U << 24, order);
17372264f513SMatthew Wilcox 
17382264f513SMatthew Wilcox 		check_create_range_4(xa, 1, order);
17392264f513SMatthew Wilcox 		check_create_range_4(xa, (1U << order) + 1, order);
17402264f513SMatthew Wilcox 		check_create_range_4(xa, (2U << order) + 1, order);
17412264f513SMatthew Wilcox 		check_create_range_4(xa, (2U << order) - 1, order);
17422264f513SMatthew Wilcox 		check_create_range_4(xa, (3U << order) + 1, order);
17432264f513SMatthew Wilcox 		check_create_range_4(xa, (3U << order) - 1, order);
17442264f513SMatthew Wilcox 		check_create_range_4(xa, (1U << 24) + 1, order);
17453e3c6580SMatthew Wilcox (Oracle) 
17463e3c6580SMatthew Wilcox (Oracle) 		check_create_range_5(xa, 0, order);
17473e3c6580SMatthew Wilcox (Oracle) 		check_create_range_5(xa, (1U << order), order);
17482264f513SMatthew Wilcox 	}
17492264f513SMatthew Wilcox 
17502264f513SMatthew Wilcox 	check_create_range_3();
17512264f513SMatthew Wilcox }
17522264f513SMatthew Wilcox 
__check_store_range(struct xarray * xa,unsigned long first,unsigned long last)17530e9446c3SMatthew Wilcox static noinline void __check_store_range(struct xarray *xa, unsigned long first,
17540e9446c3SMatthew Wilcox 		unsigned long last)
17550e9446c3SMatthew Wilcox {
17560e9446c3SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
1757b7677a13SMatthew Wilcox 	xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
17580e9446c3SMatthew Wilcox 
1759b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
1760b7677a13SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
17610e9446c3SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
17620e9446c3SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
17630e9446c3SMatthew Wilcox 
17640e9446c3SMatthew Wilcox 	xa_store_range(xa, first, last, NULL, GFP_KERNEL);
17650e9446c3SMatthew Wilcox #endif
17660e9446c3SMatthew Wilcox 
17670e9446c3SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
17680e9446c3SMatthew Wilcox }
17690e9446c3SMatthew Wilcox 
check_store_range(struct xarray * xa)17700e9446c3SMatthew Wilcox static noinline void check_store_range(struct xarray *xa)
17710e9446c3SMatthew Wilcox {
17720e9446c3SMatthew Wilcox 	unsigned long i, j;
17730e9446c3SMatthew Wilcox 
17740e9446c3SMatthew Wilcox 	for (i = 0; i < 128; i++) {
17750e9446c3SMatthew Wilcox 		for (j = i; j < 128; j++) {
17760e9446c3SMatthew Wilcox 			__check_store_range(xa, i, j);
17770e9446c3SMatthew Wilcox 			__check_store_range(xa, 128 + i, 128 + j);
17780e9446c3SMatthew Wilcox 			__check_store_range(xa, 4095 + i, 4095 + j);
17790e9446c3SMatthew Wilcox 			__check_store_range(xa, 4096 + i, 4096 + j);
17800e9446c3SMatthew Wilcox 			__check_store_range(xa, 123456 + i, 123456 + j);
17815404a7f1SMatthew Wilcox 			__check_store_range(xa, (1 << 24) + i, (1 << 24) + j);
17820e9446c3SMatthew Wilcox 		}
17830e9446c3SMatthew Wilcox 	}
17840e9446c3SMatthew Wilcox }
17850e9446c3SMatthew Wilcox 
17868fc75643SMatthew Wilcox (Oracle) #ifdef CONFIG_XARRAY_MULTI
check_split_1(struct xarray * xa,unsigned long index,unsigned int order,unsigned int new_order)17878fc75643SMatthew Wilcox (Oracle) static void check_split_1(struct xarray *xa, unsigned long index,
17883012110dSMatthew Wilcox (Oracle) 				unsigned int order, unsigned int new_order)
17898fc75643SMatthew Wilcox (Oracle) {
17903012110dSMatthew Wilcox (Oracle) 	XA_STATE_ORDER(xas, xa, index, new_order);
17912a0774c2SMatthew Wilcox (Oracle) 	unsigned int i, found;
17922a0774c2SMatthew Wilcox (Oracle) 	void *entry;
17938fc75643SMatthew Wilcox (Oracle) 
17948fc75643SMatthew Wilcox (Oracle) 	xa_store_order(xa, index, order, xa, GFP_KERNEL);
17952a0774c2SMatthew Wilcox (Oracle) 	xa_set_mark(xa, index, XA_MARK_1);
17968fc75643SMatthew Wilcox (Oracle) 
17978fc75643SMatthew Wilcox (Oracle) 	xas_split_alloc(&xas, xa, order, GFP_KERNEL);
17988fc75643SMatthew Wilcox (Oracle) 	xas_lock(&xas);
17998fc75643SMatthew Wilcox (Oracle) 	xas_split(&xas, xa, order);
18003012110dSMatthew Wilcox (Oracle) 	for (i = 0; i < (1 << order); i += (1 << new_order))
18013012110dSMatthew Wilcox (Oracle) 		__xa_store(xa, index + i, xa_mk_index(index + i), 0);
18028fc75643SMatthew Wilcox (Oracle) 	xas_unlock(&xas);
18038fc75643SMatthew Wilcox (Oracle) 
18043012110dSMatthew Wilcox (Oracle) 	for (i = 0; i < (1 << order); i++) {
18053012110dSMatthew Wilcox (Oracle) 		unsigned int val = index + (i & ~((1 << new_order) - 1));
18063012110dSMatthew Wilcox (Oracle) 		XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
18078fc75643SMatthew Wilcox (Oracle) 	}
18088fc75643SMatthew Wilcox (Oracle) 
18098fc75643SMatthew Wilcox (Oracle) 	xa_set_mark(xa, index, XA_MARK_0);
18108fc75643SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
18118fc75643SMatthew Wilcox (Oracle) 
18122a0774c2SMatthew Wilcox (Oracle) 	xas_set_order(&xas, index, 0);
18132a0774c2SMatthew Wilcox (Oracle) 	found = 0;
18142a0774c2SMatthew Wilcox (Oracle) 	rcu_read_lock();
18152a0774c2SMatthew Wilcox (Oracle) 	xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) {
18162a0774c2SMatthew Wilcox (Oracle) 		found++;
18172a0774c2SMatthew Wilcox (Oracle) 		XA_BUG_ON(xa, xa_is_internal(entry));
18182a0774c2SMatthew Wilcox (Oracle) 	}
18192a0774c2SMatthew Wilcox (Oracle) 	rcu_read_unlock();
18202a0774c2SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, found != 1 << (order - new_order));
18212a0774c2SMatthew Wilcox (Oracle) 
18228fc75643SMatthew Wilcox (Oracle) 	xa_destroy(xa);
18238fc75643SMatthew Wilcox (Oracle) }
18248fc75643SMatthew Wilcox (Oracle) 
check_split(struct xarray * xa)18258fc75643SMatthew Wilcox (Oracle) static noinline void check_split(struct xarray *xa)
18268fc75643SMatthew Wilcox (Oracle) {
18273012110dSMatthew Wilcox (Oracle) 	unsigned int order, new_order;
18288fc75643SMatthew Wilcox (Oracle) 
18298fc75643SMatthew Wilcox (Oracle) 	XA_BUG_ON(xa, !xa_empty(xa));
18308fc75643SMatthew Wilcox (Oracle) 
18318fc75643SMatthew Wilcox (Oracle) 	for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
18323012110dSMatthew Wilcox (Oracle) 		for (new_order = 0; new_order < order; new_order++) {
18333012110dSMatthew Wilcox (Oracle) 			check_split_1(xa, 0, order, new_order);
18343012110dSMatthew Wilcox (Oracle) 			check_split_1(xa, 1UL << order, order, new_order);
18353012110dSMatthew Wilcox (Oracle) 			check_split_1(xa, 3UL << order, order, new_order);
18363012110dSMatthew Wilcox (Oracle) 		}
18378fc75643SMatthew Wilcox (Oracle) 	}
18388fc75643SMatthew Wilcox (Oracle) }
18398fc75643SMatthew Wilcox (Oracle) #else
check_split(struct xarray * xa)18408fc75643SMatthew Wilcox (Oracle) static void check_split(struct xarray *xa) { }
18418fc75643SMatthew Wilcox (Oracle) #endif
18428fc75643SMatthew Wilcox (Oracle) 
check_align_1(struct xarray * xa,char * name)184376b4e529SMatthew Wilcox static void check_align_1(struct xarray *xa, char *name)
184476b4e529SMatthew Wilcox {
184576b4e529SMatthew Wilcox 	int i;
184676b4e529SMatthew Wilcox 	unsigned int id;
184776b4e529SMatthew Wilcox 	unsigned long index;
184876b4e529SMatthew Wilcox 	void *entry;
184976b4e529SMatthew Wilcox 
185076b4e529SMatthew Wilcox 	for (i = 0; i < 8; i++) {
1851a3e4d3f9SMatthew Wilcox 		XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b,
1852a3e4d3f9SMatthew Wilcox 					GFP_KERNEL) != 0);
185376b4e529SMatthew Wilcox 		XA_BUG_ON(xa, id != i);
185476b4e529SMatthew Wilcox 	}
185576b4e529SMatthew Wilcox 	xa_for_each(xa, index, entry)
185676b4e529SMatthew Wilcox 		XA_BUG_ON(xa, xa_is_err(entry));
185776b4e529SMatthew Wilcox 	xa_destroy(xa);
185876b4e529SMatthew Wilcox }
185976b4e529SMatthew Wilcox 
18604a5c8d89SMatthew Wilcox /*
18614a5c8d89SMatthew Wilcox  * We should always be able to store without allocating memory after
18624a5c8d89SMatthew Wilcox  * reserving a slot.
18634a5c8d89SMatthew Wilcox  */
check_align_2(struct xarray * xa,char * name)18642fbe967bSMatthew Wilcox static void check_align_2(struct xarray *xa, char *name)
18652fbe967bSMatthew Wilcox {
18662fbe967bSMatthew Wilcox 	int i;
18672fbe967bSMatthew Wilcox 
18682fbe967bSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
18692fbe967bSMatthew Wilcox 
18702fbe967bSMatthew Wilcox 	for (i = 0; i < 8; i++) {
18712fbe967bSMatthew Wilcox 		XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL);
18722fbe967bSMatthew Wilcox 		xa_erase(xa, 0);
18732fbe967bSMatthew Wilcox 	}
18742fbe967bSMatthew Wilcox 
18754a5c8d89SMatthew Wilcox 	for (i = 0; i < 8; i++) {
18764a5c8d89SMatthew Wilcox 		XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0);
18774a5c8d89SMatthew Wilcox 		XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL);
18784a5c8d89SMatthew Wilcox 		xa_erase(xa, 0);
18794a5c8d89SMatthew Wilcox 	}
18804a5c8d89SMatthew Wilcox 
18812fbe967bSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
18822fbe967bSMatthew Wilcox }
18832fbe967bSMatthew Wilcox 
check_align(struct xarray * xa)188476b4e529SMatthew Wilcox static noinline void check_align(struct xarray *xa)
188576b4e529SMatthew Wilcox {
188676b4e529SMatthew Wilcox 	char name[] = "Motorola 68000";
188776b4e529SMatthew Wilcox 
188876b4e529SMatthew Wilcox 	check_align_1(xa, name);
188976b4e529SMatthew Wilcox 	check_align_1(xa, name + 1);
189076b4e529SMatthew Wilcox 	check_align_1(xa, name + 2);
189176b4e529SMatthew Wilcox 	check_align_1(xa, name + 3);
18922fbe967bSMatthew Wilcox 	check_align_2(xa, name);
189376b4e529SMatthew Wilcox }
189476b4e529SMatthew Wilcox 
1895a97e7904SMatthew Wilcox static LIST_HEAD(shadow_nodes);
1896a97e7904SMatthew Wilcox 
test_update_node(struct xa_node * node)1897a97e7904SMatthew Wilcox static void test_update_node(struct xa_node *node)
1898a97e7904SMatthew Wilcox {
1899a97e7904SMatthew Wilcox 	if (node->count && node->count == node->nr_values) {
1900a97e7904SMatthew Wilcox 		if (list_empty(&node->private_list))
1901a97e7904SMatthew Wilcox 			list_add(&shadow_nodes, &node->private_list);
1902a97e7904SMatthew Wilcox 	} else {
1903a97e7904SMatthew Wilcox 		if (!list_empty(&node->private_list))
1904a97e7904SMatthew Wilcox 			list_del_init(&node->private_list);
1905a97e7904SMatthew Wilcox 	}
1906a97e7904SMatthew Wilcox }
1907a97e7904SMatthew Wilcox 
shadow_remove(struct xarray * xa)1908a97e7904SMatthew Wilcox static noinline void shadow_remove(struct xarray *xa)
1909a97e7904SMatthew Wilcox {
1910a97e7904SMatthew Wilcox 	struct xa_node *node;
1911a97e7904SMatthew Wilcox 
1912a97e7904SMatthew Wilcox 	xa_lock(xa);
1913a97e7904SMatthew Wilcox 	while ((node = list_first_entry_or_null(&shadow_nodes,
1914a97e7904SMatthew Wilcox 					struct xa_node, private_list))) {
1915a97e7904SMatthew Wilcox 		XA_BUG_ON(xa, node->array != xa);
1916a97e7904SMatthew Wilcox 		list_del_init(&node->private_list);
1917f82cd2f0SMatthew Wilcox (Oracle) 		xa_delete_node(node, test_update_node);
1918a97e7904SMatthew Wilcox 	}
1919a97e7904SMatthew Wilcox 	xa_unlock(xa);
1920a97e7904SMatthew Wilcox }
1921a97e7904SMatthew Wilcox 
check_workingset(struct xarray * xa,unsigned long index)1922a97e7904SMatthew Wilcox static noinline void check_workingset(struct xarray *xa, unsigned long index)
1923a97e7904SMatthew Wilcox {
1924a97e7904SMatthew Wilcox 	XA_STATE(xas, xa, index);
1925a97e7904SMatthew Wilcox 	xas_set_update(&xas, test_update_node);
1926a97e7904SMatthew Wilcox 
1927a97e7904SMatthew Wilcox 	do {
1928a97e7904SMatthew Wilcox 		xas_lock(&xas);
1929a97e7904SMatthew Wilcox 		xas_store(&xas, xa_mk_value(0));
1930a97e7904SMatthew Wilcox 		xas_next(&xas);
1931a97e7904SMatthew Wilcox 		xas_store(&xas, xa_mk_value(1));
1932a97e7904SMatthew Wilcox 		xas_unlock(&xas);
1933a97e7904SMatthew Wilcox 	} while (xas_nomem(&xas, GFP_KERNEL));
1934a97e7904SMatthew Wilcox 
1935a97e7904SMatthew Wilcox 	XA_BUG_ON(xa, list_empty(&shadow_nodes));
1936a97e7904SMatthew Wilcox 
1937a97e7904SMatthew Wilcox 	xas_lock(&xas);
1938a97e7904SMatthew Wilcox 	xas_next(&xas);
1939a97e7904SMatthew Wilcox 	xas_store(&xas, &xas);
1940a97e7904SMatthew Wilcox 	XA_BUG_ON(xa, !list_empty(&shadow_nodes));
1941a97e7904SMatthew Wilcox 
1942a97e7904SMatthew Wilcox 	xas_store(&xas, xa_mk_value(2));
1943a97e7904SMatthew Wilcox 	xas_unlock(&xas);
1944a97e7904SMatthew Wilcox 	XA_BUG_ON(xa, list_empty(&shadow_nodes));
1945a97e7904SMatthew Wilcox 
1946a97e7904SMatthew Wilcox 	shadow_remove(xa);
1947a97e7904SMatthew Wilcox 	XA_BUG_ON(xa, !list_empty(&shadow_nodes));
1948a97e7904SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1949a97e7904SMatthew Wilcox }
1950a97e7904SMatthew Wilcox 
1951d6427f81SMatthew Wilcox /*
1952d6427f81SMatthew Wilcox  * Check that the pointer / value / sibling entries are accounted the
1953d6427f81SMatthew Wilcox  * way we expect them to be.
1954d6427f81SMatthew Wilcox  */
check_account(struct xarray * xa)1955d6427f81SMatthew Wilcox static noinline void check_account(struct xarray *xa)
1956d6427f81SMatthew Wilcox {
1957d6427f81SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
1958d6427f81SMatthew Wilcox 	unsigned int order;
1959d6427f81SMatthew Wilcox 
1960d6427f81SMatthew Wilcox 	for (order = 1; order < 12; order++) {
1961d6427f81SMatthew Wilcox 		XA_STATE(xas, xa, 1 << order);
1962d6427f81SMatthew Wilcox 
1963d6427f81SMatthew Wilcox 		xa_store_order(xa, 0, order, xa, GFP_KERNEL);
1964fffc9a26SMatthew Wilcox 		rcu_read_lock();
1965d6427f81SMatthew Wilcox 		xas_load(&xas);
1966d6427f81SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_node->count == 0);
1967d6427f81SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
1968d6427f81SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1969fffc9a26SMatthew Wilcox 		rcu_read_unlock();
1970d6427f81SMatthew Wilcox 
1971b7677a13SMatthew Wilcox 		xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
1972d6427f81SMatthew Wilcox 				GFP_KERNEL);
1973d6427f81SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
1974d6427f81SMatthew Wilcox 
1975d6427f81SMatthew Wilcox 		xa_erase(xa, 1 << order);
1976d6427f81SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1977d6427f81SMatthew Wilcox 
1978d6427f81SMatthew Wilcox 		xa_erase(xa, 0);
1979d6427f81SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
1980d6427f81SMatthew Wilcox 	}
1981d6427f81SMatthew Wilcox #endif
1982d6427f81SMatthew Wilcox }
1983d6427f81SMatthew Wilcox 
check_get_order(struct xarray * xa)198457417cebSMatthew Wilcox (Oracle) static noinline void check_get_order(struct xarray *xa)
198557417cebSMatthew Wilcox (Oracle) {
198657417cebSMatthew Wilcox (Oracle) 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
198757417cebSMatthew Wilcox (Oracle) 	unsigned int order;
198857417cebSMatthew Wilcox (Oracle) 	unsigned long i, j;
198957417cebSMatthew Wilcox (Oracle) 
199057417cebSMatthew Wilcox (Oracle) 	for (i = 0; i < 3; i++)
199157417cebSMatthew Wilcox (Oracle) 		XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
199257417cebSMatthew Wilcox (Oracle) 
199357417cebSMatthew Wilcox (Oracle) 	for (order = 0; order < max_order; order++) {
199457417cebSMatthew Wilcox (Oracle) 		for (i = 0; i < 10; i++) {
199557417cebSMatthew Wilcox (Oracle) 			xa_store_order(xa, i << order, order,
199657417cebSMatthew Wilcox (Oracle) 					xa_mk_index(i << order), GFP_KERNEL);
199757417cebSMatthew Wilcox (Oracle) 			for (j = i << order; j < (i + 1) << order; j++)
199857417cebSMatthew Wilcox (Oracle) 				XA_BUG_ON(xa, xa_get_order(xa, j) != order);
199957417cebSMatthew Wilcox (Oracle) 			xa_erase(xa, i << order);
200057417cebSMatthew Wilcox (Oracle) 		}
200157417cebSMatthew Wilcox (Oracle) 	}
200257417cebSMatthew Wilcox (Oracle) }
200357417cebSMatthew Wilcox (Oracle) 
check_xas_get_order(struct xarray * xa)2004a4864671SKairui Song static noinline void check_xas_get_order(struct xarray *xa)
2005a4864671SKairui Song {
2006a4864671SKairui Song 	XA_STATE(xas, xa, 0);
2007a4864671SKairui Song 
2008a4864671SKairui Song 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
2009a4864671SKairui Song 	unsigned int order;
2010a4864671SKairui Song 	unsigned long i, j;
2011a4864671SKairui Song 
2012a4864671SKairui Song 	for (order = 0; order < max_order; order++) {
2013a4864671SKairui Song 		for (i = 0; i < 10; i++) {
2014a4864671SKairui Song 			xas_set_order(&xas, i << order, order);
2015a4864671SKairui Song 			do {
2016a4864671SKairui Song 				xas_lock(&xas);
2017a4864671SKairui Song 				xas_store(&xas, xa_mk_value(i));
2018a4864671SKairui Song 				xas_unlock(&xas);
2019a4864671SKairui Song 			} while (xas_nomem(&xas, GFP_KERNEL));
2020a4864671SKairui Song 
2021a4864671SKairui Song 			for (j = i << order; j < (i + 1) << order; j++) {
2022a4864671SKairui Song 				xas_set_order(&xas, j, 0);
2023a4864671SKairui Song 				rcu_read_lock();
2024a4864671SKairui Song 				xas_load(&xas);
2025a4864671SKairui Song 				XA_BUG_ON(xa, xas_get_order(&xas) != order);
2026a4864671SKairui Song 				rcu_read_unlock();
2027a4864671SKairui Song 			}
2028a4864671SKairui Song 
2029a4864671SKairui Song 			xas_lock(&xas);
2030a4864671SKairui Song 			xas_set_order(&xas, i << order, order);
2031a4864671SKairui Song 			xas_store(&xas, NULL);
2032a4864671SKairui Song 			xas_unlock(&xas);
2033a4864671SKairui Song 		}
2034a4864671SKairui Song 	}
2035a4864671SKairui Song }
2036a4864671SKairui Song 
check_xas_conflict_get_order(struct xarray * xa)20376758c112SKairui Song static noinline void check_xas_conflict_get_order(struct xarray *xa)
20386758c112SKairui Song {
20396758c112SKairui Song 	XA_STATE(xas, xa, 0);
20406758c112SKairui Song 
20416758c112SKairui Song 	void *entry;
20426758c112SKairui Song 	int only_once;
20436758c112SKairui Song 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
20446758c112SKairui Song 	unsigned int order;
20456758c112SKairui Song 	unsigned long i, j, k;
20466758c112SKairui Song 
20476758c112SKairui Song 	for (order = 0; order < max_order; order++) {
20486758c112SKairui Song 		for (i = 0; i < 10; i++) {
20496758c112SKairui Song 			xas_set_order(&xas, i << order, order);
20506758c112SKairui Song 			do {
20516758c112SKairui Song 				xas_lock(&xas);
20526758c112SKairui Song 				xas_store(&xas, xa_mk_value(i));
20536758c112SKairui Song 				xas_unlock(&xas);
20546758c112SKairui Song 			} while (xas_nomem(&xas, GFP_KERNEL));
20556758c112SKairui Song 
20566758c112SKairui Song 			/*
20576758c112SKairui Song 			 * Ensure xas_get_order works with xas_for_each_conflict.
20586758c112SKairui Song 			 */
20596758c112SKairui Song 			j = i << order;
20606758c112SKairui Song 			for (k = 0; k < order; k++) {
20616758c112SKairui Song 				only_once = 0;
20626758c112SKairui Song 				xas_set_order(&xas, j + (1 << k), k);
20636758c112SKairui Song 				xas_lock(&xas);
20646758c112SKairui Song 				xas_for_each_conflict(&xas, entry) {
20656758c112SKairui Song 					XA_BUG_ON(xa, entry != xa_mk_value(i));
20666758c112SKairui Song 					XA_BUG_ON(xa, xas_get_order(&xas) != order);
20676758c112SKairui Song 					only_once++;
20686758c112SKairui Song 				}
20696758c112SKairui Song 				XA_BUG_ON(xa, only_once != 1);
20706758c112SKairui Song 				xas_unlock(&xas);
20716758c112SKairui Song 			}
20726758c112SKairui Song 
20736758c112SKairui Song 			if (order < max_order - 1) {
20746758c112SKairui Song 				only_once = 0;
20756758c112SKairui Song 				xas_set_order(&xas, (i & ~1UL) << order, order + 1);
20766758c112SKairui Song 				xas_lock(&xas);
20776758c112SKairui Song 				xas_for_each_conflict(&xas, entry) {
20786758c112SKairui Song 					XA_BUG_ON(xa, entry != xa_mk_value(i));
20796758c112SKairui Song 					XA_BUG_ON(xa, xas_get_order(&xas) != order);
20806758c112SKairui Song 					only_once++;
20816758c112SKairui Song 				}
20826758c112SKairui Song 				XA_BUG_ON(xa, only_once != 1);
20836758c112SKairui Song 				xas_unlock(&xas);
20846758c112SKairui Song 			}
20856758c112SKairui Song 
20866758c112SKairui Song 			xas_set_order(&xas, i << order, order);
20876758c112SKairui Song 			xas_lock(&xas);
20886758c112SKairui Song 			xas_store(&xas, NULL);
20896758c112SKairui Song 			xas_unlock(&xas);
20906758c112SKairui Song 		}
20916758c112SKairui Song 	}
20926758c112SKairui Song }
20936758c112SKairui Song 
20946758c112SKairui Song 
check_destroy(struct xarray * xa)2095687149fcSMatthew Wilcox static noinline void check_destroy(struct xarray *xa)
2096687149fcSMatthew Wilcox {
2097687149fcSMatthew Wilcox 	unsigned long index;
2098687149fcSMatthew Wilcox 
2099687149fcSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
2100687149fcSMatthew Wilcox 
2101687149fcSMatthew Wilcox 	/* Destroying an empty array is a no-op */
2102687149fcSMatthew Wilcox 	xa_destroy(xa);
2103687149fcSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
2104687149fcSMatthew Wilcox 
2105687149fcSMatthew Wilcox 	/* Destroying an array with a single entry */
2106687149fcSMatthew Wilcox 	for (index = 0; index < 1000; index++) {
2107687149fcSMatthew Wilcox 		xa_store_index(xa, index, GFP_KERNEL);
2108687149fcSMatthew Wilcox 		XA_BUG_ON(xa, xa_empty(xa));
2109687149fcSMatthew Wilcox 		xa_destroy(xa);
2110687149fcSMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
2111687149fcSMatthew Wilcox 	}
2112687149fcSMatthew Wilcox 
2113687149fcSMatthew Wilcox 	/* Destroying an array with a single entry at ULONG_MAX */
2114687149fcSMatthew Wilcox 	xa_store(xa, ULONG_MAX, xa, GFP_KERNEL);
2115687149fcSMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
2116687149fcSMatthew Wilcox 	xa_destroy(xa);
2117687149fcSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
2118687149fcSMatthew Wilcox 
2119687149fcSMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
2120687149fcSMatthew Wilcox 	/* Destroying an array with a multi-index entry */
2121687149fcSMatthew Wilcox 	xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL);
2122687149fcSMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
2123687149fcSMatthew Wilcox 	xa_destroy(xa);
2124687149fcSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
2125687149fcSMatthew Wilcox #endif
2126687149fcSMatthew Wilcox }
2127687149fcSMatthew Wilcox 
212858d6ea30SMatthew Wilcox static DEFINE_XARRAY(array);
2129ad3d6c72SMatthew Wilcox 
xarray_checks(void)2130ad3d6c72SMatthew Wilcox static int xarray_checks(void)
2131ad3d6c72SMatthew Wilcox {
213258d6ea30SMatthew Wilcox 	check_xa_err(&array);
2133b803b428SMatthew Wilcox 	check_xas_retry(&array);
2134ad3d6c72SMatthew Wilcox 	check_xa_load(&array);
21359b89a035SMatthew Wilcox 	check_xa_mark(&array);
213658d6ea30SMatthew Wilcox 	check_xa_shrink(&array);
2137b803b428SMatthew Wilcox 	check_xas_erase(&array);
213812fd2aeeSMatthew Wilcox 	check_insert(&array);
213941aec91fSMatthew Wilcox 	check_cmpxchg(&array);
2140e777ae44SDaniel Gomez 	check_cmpxchg_order(&array);
21419f14d4f1SMatthew Wilcox 	check_reserve(&array);
2142b38f6c50SMatthew Wilcox 	check_reserve(&xa0);
214358d6ea30SMatthew Wilcox 	check_multi_store(&array);
2144a60cc288SLuis Chamberlain 	check_multi_store_advanced(&array);
214557417cebSMatthew Wilcox (Oracle) 	check_get_order(&array);
2146a4864671SKairui Song 	check_xas_get_order(&array);
21476758c112SKairui Song 	check_xas_conflict_get_order(&array);
2148371c752dSMatthew Wilcox 	check_xa_alloc();
2149b803b428SMatthew Wilcox 	check_find(&array);
2150e21a2955SMatthew Wilcox 	check_find_entry(&array);
2151c36d451aSMatthew Wilcox (Oracle) 	check_pause(&array);
2152d6427f81SMatthew Wilcox 	check_account(&array);
2153687149fcSMatthew Wilcox 	check_destroy(&array);
215464d3e9a9SMatthew Wilcox 	check_move(&array);
21552264f513SMatthew Wilcox 	check_create_range(&array);
21560e9446c3SMatthew Wilcox 	check_store_range(&array);
21574e99d4e9SMatthew Wilcox 	check_store_iter(&array);
215876b4e529SMatthew Wilcox 	check_align(&xa0);
21598fc75643SMatthew Wilcox (Oracle) 	check_split(&array);
2160ad3d6c72SMatthew Wilcox 
2161a97e7904SMatthew Wilcox 	check_workingset(&array, 0);
2162a97e7904SMatthew Wilcox 	check_workingset(&array, 64);
2163a97e7904SMatthew Wilcox 	check_workingset(&array, 4096);
2164a97e7904SMatthew Wilcox 
2165ad3d6c72SMatthew Wilcox 	printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
2166ad3d6c72SMatthew Wilcox 	return (tests_run == tests_passed) ? 0 : -EINVAL;
2167ad3d6c72SMatthew Wilcox }
2168ad3d6c72SMatthew Wilcox 
xarray_exit(void)2169ad3d6c72SMatthew Wilcox static void xarray_exit(void)
2170ad3d6c72SMatthew Wilcox {
2171ad3d6c72SMatthew Wilcox }
2172ad3d6c72SMatthew Wilcox 
2173ad3d6c72SMatthew Wilcox module_init(xarray_checks);
2174ad3d6c72SMatthew Wilcox module_exit(xarray_exit);
2175ad3d6c72SMatthew Wilcox MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
2176*757234f1SJeff Johnson MODULE_DESCRIPTION("XArray API test module");
2177ad3d6c72SMatthew Wilcox MODULE_LICENSE("GPL");
2178