xref: /freebsd/sys/vm/vm_radix.h (revision ae1a0648b05acf798816e7b83b3c10856de5c8e5)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2013 EMC Corp.
5  * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org>
6  * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #ifndef _VM_RADIX_H_
32 #define _VM_RADIX_H_
33 
34 #include <vm/_vm_radix.h>
35 
36 #ifdef _KERNEL
37 #include <sys/pctrie.h>
38 #include <vm/vm_page.h>
39 #include <vm/vm.h>
40 
41 void		vm_radix_wait(void);
42 void		vm_radix_zinit(void);
43 void		*vm_radix_node_alloc(struct pctrie *ptree);
44 void		vm_radix_node_free(struct pctrie *ptree, void *node);
45 extern smr_t	vm_radix_smr;
46 
47 static __inline void
48 vm_radix_init(struct vm_radix *rtree)
49 {
50 	pctrie_init(&rtree->rt_trie);
51 }
52 
53 static __inline bool
54 vm_radix_is_empty(struct vm_radix *rtree)
55 {
56 	return (pctrie_is_empty(&rtree->rt_trie));
57 }
58 
59 PCTRIE_DEFINE_SMR(VM_RADIX, vm_page, pindex, vm_radix_node_alloc,
60     vm_radix_node_free, vm_radix_smr);
61 
62 /*
63  * Inserts the key-value pair into the trie.
64  * Panics if the key already exists.
65  */
66 static __inline int
67 vm_radix_insert(struct vm_radix *rtree, vm_page_t page)
68 {
69 	return (VM_RADIX_PCTRIE_INSERT(&rtree->rt_trie, page));
70 }
71 
72 /*
73  * Insert the page into the vm_radix tree with its pindex as the key.  Panic if
74  * the pindex already exists.  Return zero on success or a non-zero error on
75  * memory allocation failure.  Set the out parameter mpred to the previous page
76  * in the tree as if found by a previous call to vm_radix_lookup_le with the
77  * new page pindex.
78  */
79 static __inline int
80 vm_radix_insert_lookup_lt(struct vm_radix *rtree, vm_page_t page,
81     vm_page_t *mpred)
82 {
83 	int error;
84 
85 	error = VM_RADIX_PCTRIE_INSERT_LOOKUP_LE(&rtree->rt_trie, page, mpred);
86 	if (__predict_false(error == EEXIST))
87 		panic("vm_radix_insert_lookup_lt: page already present, %p",
88 		    *mpred);
89 	return (error);
90 }
91 
92 /*
93  * Returns the value stored at the index assuming there is an external lock.
94  *
95  * If the index is not present, NULL is returned.
96  */
97 static __inline vm_page_t
98 vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index)
99 {
100 	return (VM_RADIX_PCTRIE_LOOKUP(&rtree->rt_trie, index));
101 }
102 
103 /*
104  * Returns the value stored at the index without requiring an external lock.
105  *
106  * If the index is not present, NULL is returned.
107  */
108 static __inline vm_page_t
109 vm_radix_lookup_unlocked(struct vm_radix *rtree, vm_pindex_t index)
110 {
111 	return (VM_RADIX_PCTRIE_LOOKUP_UNLOCKED(&rtree->rt_trie, index));
112 }
113 
114 /*
115  * Initialize an iterator for vm_radix.
116  */
117 static __inline void
118 vm_radix_iter_init(struct pctrie_iter *pages, struct vm_radix *rtree)
119 {
120 	pctrie_iter_init(pages, &rtree->rt_trie);
121 }
122 
123 /*
124  * Initialize an iterator for vm_radix.
125  */
126 static __inline void
127 vm_radix_iter_limit_init(struct pctrie_iter *pages, struct vm_radix *rtree,
128     vm_pindex_t limit)
129 {
130 	pctrie_iter_limit_init(pages, &rtree->rt_trie, limit);
131 }
132 
133 /*
134  * Returns the value stored at the index.
135  * Requires that access be externally synchronized by a lock.
136  *
137  * If the index is not present, NULL is returned.
138  */
139 static __inline vm_page_t
140 vm_radix_iter_lookup(struct pctrie_iter *pages, vm_pindex_t index)
141 {
142 	return (VM_RADIX_PCTRIE_ITER_LOOKUP(pages, index));
143 }
144 
145 /*
146  * Returns the value stored 'stride' steps beyond the current position.
147  * Requires that access be externally synchronized by a lock.
148  *
149  * If the index is not present, NULL is returned.
150  */
151 static __inline vm_page_t
152 vm_radix_iter_stride(struct pctrie_iter *pages, int stride)
153 {
154 	return (VM_RADIX_PCTRIE_ITER_STRIDE(pages, stride));
155 }
156 
157 /*
158  * Returns the page with the least pindex that is greater than or equal to the
159  * specified pindex, or NULL if there are no such pages.
160  *
161  * Requires that access be externally synchronized by a lock.
162  */
163 static __inline vm_page_t
164 vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index)
165 {
166 	return (VM_RADIX_PCTRIE_LOOKUP_GE(&rtree->rt_trie, index));
167 }
168 
169 /*
170  * Returns the page with the greatest pindex that is less than or equal to the
171  * specified pindex, or NULL if there are no such pages.
172  *
173  * Requires that access be externally synchronized by a lock.
174  */
175 static __inline vm_page_t
176 vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)
177 {
178 	return (VM_RADIX_PCTRIE_LOOKUP_LE(&rtree->rt_trie, index));
179 }
180 
181 /*
182  * Remove the specified index from the trie, and return the value stored at
183  * that index.  If the index is not present, return NULL.
184  */
185 static __inline vm_page_t
186 vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
187 {
188 	return (VM_RADIX_PCTRIE_REMOVE_LOOKUP(&rtree->rt_trie, index));
189 }
190 
191 /*
192  * Reclaim all the interior nodes from the radix tree.
193  */
194 static __inline void
195 vm_radix_reclaim_allnodes(struct vm_radix *rtree)
196 {
197 	VM_RADIX_PCTRIE_RECLAIM(&rtree->rt_trie);
198 }
199 
200 /*
201  * Initialize an iterator pointing to the page with the least pindex that is
202  * greater than or equal to the specified pindex, or NULL if there are no such
203  * pages.  Return the page.
204  *
205  * Requires that access be externally synchronized by a lock.
206  */
207 static __inline vm_page_t
208 vm_radix_iter_lookup_ge(struct pctrie_iter *pages, vm_pindex_t index)
209 {
210 	return (VM_RADIX_PCTRIE_ITER_LOOKUP_GE(pages, index));
211 }
212 
213 /*
214  * Update the iterator to point to the page with the least pindex that is 'jump'
215  * or more greater than or equal to the current pindex, or NULL if there are no
216  * such pages.  Return the page.
217  *
218  * Requires that access be externally synchronized by a lock.
219  */
220 static __inline vm_page_t
221 vm_radix_iter_jump(struct pctrie_iter *pages, vm_pindex_t jump)
222 {
223 	return (VM_RADIX_PCTRIE_ITER_JUMP_GE(pages, jump));
224 }
225 
226 /*
227  * Update the iterator to point to the page with the least pindex that is one or
228  * more greater than the current pindex, or NULL if there are no such pages.
229  * Return the page.
230  *
231  * Requires that access be externally synchronized by a lock.
232  */
233 static __inline vm_page_t
234 vm_radix_iter_step(struct pctrie_iter *pages)
235 {
236 	return (VM_RADIX_PCTRIE_ITER_STEP_GE(pages));
237 }
238 
239 /*
240  * Update the iterator to point to the page with the pindex that is one greater
241  * than the current pindex, or NULL if there is no such page.  Return the page.
242  *
243  * Requires that access be externally synchronized by a lock.
244  */
245 static __inline vm_page_t
246 vm_radix_iter_next(struct pctrie_iter *pages)
247 {
248 	return (VM_RADIX_PCTRIE_ITER_NEXT(pages));
249 }
250 
251 /*
252  * Update the iterator to point to the page with the pindex that is one less
253  * than the current pindex, or NULL if there is no such page.  Return the page.
254  *
255  * Requires that access be externally synchronized by a lock.
256  */
257 static __inline vm_page_t
258 vm_radix_iter_prev(struct pctrie_iter *pages)
259 {
260 	return (VM_RADIX_PCTRIE_ITER_PREV(pages));
261 }
262 
263 /*
264  * Return the current page.
265  *
266  * Requires that access be externally synchronized by a lock.
267  */
268 static __inline vm_page_t
269 vm_radix_iter_page(struct pctrie_iter *pages)
270 {
271 	return (VM_RADIX_PCTRIE_ITER_VALUE(pages));
272 }
273 
274 /*
275  * Replace an existing page in the trie with another one.
276  * Panics if there is not an old page in the trie at the new page's index.
277  */
278 static __inline vm_page_t
279 vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage)
280 {
281 	return (VM_RADIX_PCTRIE_REPLACE(&rtree->rt_trie, newpage));
282 }
283 
284 #endif /* _KERNEL */
285 #endif /* !_VM_RADIX_H_ */
286