xref: /freebsd/sys/vm/vm_radix.h (revision 8703b572b9f6fa37018485e0188473f6097b5740)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2013 EMC Corp.
5  * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org>
6  * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #ifndef _VM_RADIX_H_
32 #define _VM_RADIX_H_
33 
34 #include <vm/_vm_radix.h>
35 
36 #ifdef _KERNEL
37 #include <sys/pctrie.h>
38 #include <vm/vm_page.h>
39 #include <vm/vm.h>
40 
41 void		vm_radix_wait(void);
42 void		vm_radix_zinit(void);
43 void		*vm_radix_node_alloc(struct pctrie *ptree);
44 void		vm_radix_node_free(struct pctrie *ptree, void *node);
45 extern smr_t	vm_radix_smr;
46 
47 static __inline void
vm_radix_init(struct vm_radix * rtree)48 vm_radix_init(struct vm_radix *rtree)
49 {
50 	pctrie_init(&rtree->rt_trie);
51 }
52 
53 static __inline bool
vm_radix_is_empty(struct vm_radix * rtree)54 vm_radix_is_empty(struct vm_radix *rtree)
55 {
56 	return (pctrie_is_empty(&rtree->rt_trie));
57 }
58 
59 PCTRIE_DEFINE_SMR(VM_RADIX, vm_page, pindex, vm_radix_node_alloc,
60     vm_radix_node_free, vm_radix_smr);
61 
62 /*
63  * Inserts the key-value pair into the trie, starting search from root.
64  * Panics if the key already exists.
65  */
66 static __inline int
vm_radix_insert(struct vm_radix * rtree,vm_page_t page)67 vm_radix_insert(struct vm_radix *rtree, vm_page_t page)
68 {
69 	return (VM_RADIX_PCTRIE_INSERT(&rtree->rt_trie, page));
70 }
71 
72 /*
73  * Inserts the key-value pair into the trie, starting search from iterator.
74  * Panics if the key already exists.
75  */
76 static __inline int
vm_radix_iter_insert(struct pctrie_iter * pages,vm_page_t page)77 vm_radix_iter_insert(struct pctrie_iter *pages, vm_page_t page)
78 {
79 	return (VM_RADIX_PCTRIE_ITER_INSERT(pages, page));
80 }
81 
82 /*
83  * Returns the value stored at the index assuming there is an external lock.
84  *
85  * If the index is not present, NULL is returned.
86  */
87 static __inline vm_page_t
vm_radix_lookup(struct vm_radix * rtree,vm_pindex_t index)88 vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index)
89 {
90 	return (VM_RADIX_PCTRIE_LOOKUP(&rtree->rt_trie, index));
91 }
92 
93 /*
94  * Returns the value stored at the index without requiring an external lock.
95  *
96  * If the index is not present, NULL is returned.
97  */
98 static __inline vm_page_t
vm_radix_lookup_unlocked(struct vm_radix * rtree,vm_pindex_t index)99 vm_radix_lookup_unlocked(struct vm_radix *rtree, vm_pindex_t index)
100 {
101 	return (VM_RADIX_PCTRIE_LOOKUP_UNLOCKED(&rtree->rt_trie, index));
102 }
103 
104 /*
105  * Returns the number of contiguous, non-NULL pages read into the ma[]
106  * array, without requiring an external lock.
107  */
108 static __inline int
vm_radix_lookup_range_unlocked(struct vm_radix * rtree,vm_pindex_t index,vm_page_t ma[],int count)109 vm_radix_lookup_range_unlocked(struct vm_radix *rtree, vm_pindex_t index,
110     vm_page_t ma[], int count)
111 {
112 	return (VM_RADIX_PCTRIE_LOOKUP_RANGE_UNLOCKED(&rtree->rt_trie, index,
113 	    ma, count));
114 }
115 
116 /*
117  * Returns the number of contiguous, non-NULL pages read into the ma[]
118  * array, without requiring an external lock.
119  */
120 static __inline int
vm_radix_iter_lookup_range(struct pctrie_iter * pages,vm_pindex_t index,vm_page_t ma[],int count)121 vm_radix_iter_lookup_range(struct pctrie_iter *pages, vm_pindex_t index,
122     vm_page_t ma[], int count)
123 {
124 	return (VM_RADIX_PCTRIE_ITER_LOOKUP_RANGE(pages, index, ma, count));
125 }
126 
127 /*
128  * Initialize an iterator for vm_radix.
129  */
130 static __inline void
vm_radix_iter_init(struct pctrie_iter * pages,struct vm_radix * rtree)131 vm_radix_iter_init(struct pctrie_iter *pages, struct vm_radix *rtree)
132 {
133 	pctrie_iter_init(pages, &rtree->rt_trie);
134 }
135 
136 /*
137  * Initialize an iterator for vm_radix.
138  */
139 static __inline void
vm_radix_iter_limit_init(struct pctrie_iter * pages,struct vm_radix * rtree,vm_pindex_t limit)140 vm_radix_iter_limit_init(struct pctrie_iter *pages, struct vm_radix *rtree,
141     vm_pindex_t limit)
142 {
143 	pctrie_iter_limit_init(pages, &rtree->rt_trie, limit);
144 }
145 
146 /*
147  * Returns the value stored at the index.
148  * Requires that access be externally synchronized by a lock.
149  *
150  * If the index is not present, NULL is returned.
151  */
152 static __inline vm_page_t
vm_radix_iter_lookup(struct pctrie_iter * pages,vm_pindex_t index)153 vm_radix_iter_lookup(struct pctrie_iter *pages, vm_pindex_t index)
154 {
155 	return (VM_RADIX_PCTRIE_ITER_LOOKUP(pages, index));
156 }
157 
158 /*
159  * Returns the value stored 'stride' steps beyond the current position.
160  * Requires that access be externally synchronized by a lock.
161  *
162  * If the index is not present, NULL is returned.
163  */
164 static __inline vm_page_t
vm_radix_iter_stride(struct pctrie_iter * pages,int stride)165 vm_radix_iter_stride(struct pctrie_iter *pages, int stride)
166 {
167 	return (VM_RADIX_PCTRIE_ITER_STRIDE(pages, stride));
168 }
169 
170 /*
171  * Returns the page with the least pindex that is greater than or equal to the
172  * specified pindex, or NULL if there are no such pages.
173  *
174  * Requires that access be externally synchronized by a lock.
175  */
176 static __inline vm_page_t
vm_radix_lookup_ge(struct vm_radix * rtree,vm_pindex_t index)177 vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index)
178 {
179 	return (VM_RADIX_PCTRIE_LOOKUP_GE(&rtree->rt_trie, index));
180 }
181 
182 /*
183  * Returns the page with the greatest pindex that is less than or equal to the
184  * specified pindex, or NULL if there are no such pages.
185  *
186  * Requires that access be externally synchronized by a lock.
187  */
188 static __inline vm_page_t
vm_radix_lookup_le(struct vm_radix * rtree,vm_pindex_t index)189 vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)
190 {
191 	return (VM_RADIX_PCTRIE_LOOKUP_LE(&rtree->rt_trie, index));
192 }
193 
194 /*
195  * Remove the specified index from the trie, and return the value stored at
196  * that index.  If the index is not present, return NULL.
197  */
198 static __inline vm_page_t
vm_radix_remove(struct vm_radix * rtree,vm_pindex_t index)199 vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
200 {
201 	return (VM_RADIX_PCTRIE_REMOVE_LOOKUP(&rtree->rt_trie, index));
202 }
203 
204 /*
205  * Remove the current page from the trie.
206  */
207 static __inline void
vm_radix_iter_remove(struct pctrie_iter * pages)208 vm_radix_iter_remove(struct pctrie_iter *pages)
209 {
210 	VM_RADIX_PCTRIE_ITER_REMOVE(pages);
211 }
212 
213 /*
214  * Reclaim all the interior nodes of the trie, and invoke the callback
215  * on all the pages, in order.
216  */
217 static __inline void
vm_radix_reclaim_callback(struct vm_radix * rtree,void (* page_cb)(vm_page_t,void *),void * arg)218 vm_radix_reclaim_callback(struct vm_radix *rtree,
219     void (*page_cb)(vm_page_t, void *), void *arg)
220 {
221 	VM_RADIX_PCTRIE_RECLAIM_CALLBACK(&rtree->rt_trie, page_cb, arg);
222 }
223 
224 /*
225  * Initialize an iterator pointing to the page with the least pindex that is
226  * greater than or equal to the specified pindex, or NULL if there are no such
227  * pages.  Return the page.
228  *
229  * Requires that access be externally synchronized by a lock.
230  */
231 static __inline vm_page_t
vm_radix_iter_lookup_ge(struct pctrie_iter * pages,vm_pindex_t index)232 vm_radix_iter_lookup_ge(struct pctrie_iter *pages, vm_pindex_t index)
233 {
234 	return (VM_RADIX_PCTRIE_ITER_LOOKUP_GE(pages, index));
235 }
236 
237 /*
238  * Update the iterator to point to the page with the least pindex that is 'jump'
239  * or more greater than or equal to the current pindex, or NULL if there are no
240  * such pages.  Return the page.
241  *
242  * Requires that access be externally synchronized by a lock.
243  */
244 static __inline vm_page_t
vm_radix_iter_jump(struct pctrie_iter * pages,vm_pindex_t jump)245 vm_radix_iter_jump(struct pctrie_iter *pages, vm_pindex_t jump)
246 {
247 	return (VM_RADIX_PCTRIE_ITER_JUMP_GE(pages, jump));
248 }
249 
250 /*
251  * Update the iterator to point to the page with the least pindex that is one or
252  * more greater than the current pindex, or NULL if there are no such pages.
253  * Return the page.
254  *
255  * Requires that access be externally synchronized by a lock.
256  */
257 static __inline vm_page_t
vm_radix_iter_step(struct pctrie_iter * pages)258 vm_radix_iter_step(struct pctrie_iter *pages)
259 {
260 	return (VM_RADIX_PCTRIE_ITER_STEP_GE(pages));
261 }
262 
263 /*
264  * Iterate over each non-NULL page from page 'start' to the end of the object.
265  */
266 #define VM_RADIX_FOREACH_FROM(m, pages, start)				\
267 	for (m = vm_radix_iter_lookup_ge(pages, start); m != NULL;	\
268 	    m = vm_radix_iter_step(pages))
269 
270 /*
271  * Iterate over each non-NULL page from the beginning to the end of the object.
272  */
273 #define VM_RADIX_FOREACH(m, pages) VM_RADIX_FOREACH_FROM(m, pages, 0)
274 
275 /*
276  * Initialize an iterator pointing to the page with the greatest pindex that is
277  * less than or equal to the specified pindex, or NULL if there are no such
278  * pages.  Return the page.
279  *
280  * Requires that access be externally synchronized by a lock.
281  */
282 static __inline vm_page_t
vm_radix_iter_lookup_le(struct pctrie_iter * pages,vm_pindex_t index)283 vm_radix_iter_lookup_le(struct pctrie_iter *pages, vm_pindex_t index)
284 {
285 	return (VM_RADIX_PCTRIE_ITER_LOOKUP_LE(pages, index));
286 }
287 
288 /*
289  * Initialize an iterator pointing to the page with the greatest pindex that is
290  * less than to the specified pindex, or NULL if there are no such
291  * pages.  Return the page.
292  *
293  * Requires that access be externally synchronized by a lock.
294  */
295 static __inline vm_page_t
vm_radix_iter_lookup_lt(struct pctrie_iter * pages,vm_pindex_t index)296 vm_radix_iter_lookup_lt(struct pctrie_iter *pages, vm_pindex_t index)
297 {
298 	return (index == 0 ? NULL : vm_radix_iter_lookup_le(pages, index - 1));
299 }
300 
301 /*
302  * Update the iterator to point to the page with the pindex that is one greater
303  * than the current pindex, or NULL if there is no such page.  Return the page.
304  *
305  * Requires that access be externally synchronized by a lock.
306  */
307 static __inline vm_page_t
vm_radix_iter_next(struct pctrie_iter * pages)308 vm_radix_iter_next(struct pctrie_iter *pages)
309 {
310 	return (VM_RADIX_PCTRIE_ITER_NEXT(pages));
311 }
312 
313 /*
314  * Iterate over consecutive non-NULL pages from position 'start' to first NULL
315  * page.
316  */
317 #define VM_RADIX_FORALL_FROM(m, pages, start)				\
318 	for (m = vm_radix_iter_lookup(pages, start); m != NULL;		\
319 	    m = vm_radix_iter_next(pages))
320 
321 /*
322  * Iterate over consecutive non-NULL pages from the beginning to first NULL
323  * page.
324  */
325 #define VM_RADIX_FORALL(m, pages) VM_RADIX_FORALL_FROM(m, pages, 0)
326 
327 /*
328  * Update the iterator to point to the page with the pindex that is one less
329  * than the current pindex, or NULL if there is no such page.  Return the page.
330  *
331  * Requires that access be externally synchronized by a lock.
332  */
333 static __inline vm_page_t
vm_radix_iter_prev(struct pctrie_iter * pages)334 vm_radix_iter_prev(struct pctrie_iter *pages)
335 {
336 	return (VM_RADIX_PCTRIE_ITER_PREV(pages));
337 }
338 
339 /*
340  * Return the current page.
341  *
342  * Requires that access be externally synchronized by a lock.
343  */
344 static __inline vm_page_t
vm_radix_iter_page(struct pctrie_iter * pages)345 vm_radix_iter_page(struct pctrie_iter *pages)
346 {
347 	return (VM_RADIX_PCTRIE_ITER_VALUE(pages));
348 }
349 
350 /*
351  * Replace an existing page in the trie with another one.
352  * Panics if there is not an old page in the trie at the new page's index.
353  */
354 static __inline vm_page_t
vm_radix_replace(struct vm_radix * rtree,vm_page_t newpage)355 vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage)
356 {
357 	return (VM_RADIX_PCTRIE_REPLACE(&rtree->rt_trie, newpage));
358 }
359 
360 #endif /* _KERNEL */
361 #endif /* !_VM_RADIX_H_ */
362