1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2013 EMC Corp.
5 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org>
6 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #ifndef _VM_RADIX_H_
32 #define _VM_RADIX_H_
33
34 #include <vm/_vm_radix.h>
35
36 #ifdef _KERNEL
37 #include <sys/pctrie.h>
38 #include <vm/vm_page.h>
39 #include <vm/vm.h>
40
41 void vm_radix_wait(void);
42 void vm_radix_zinit(void);
43 void *vm_radix_node_alloc(struct pctrie *ptree);
44 void vm_radix_node_free(struct pctrie *ptree, void *node);
45 extern smr_t vm_radix_smr;
46
47 static __inline void
vm_radix_init(struct vm_radix * rtree)48 vm_radix_init(struct vm_radix *rtree)
49 {
50 pctrie_init(&rtree->rt_trie);
51 }
52
53 static __inline bool
vm_radix_is_empty(struct vm_radix * rtree)54 vm_radix_is_empty(struct vm_radix *rtree)
55 {
56 return (pctrie_is_empty(&rtree->rt_trie));
57 }
58
59 PCTRIE_DEFINE_SMR(VM_RADIX, vm_page, pindex, vm_radix_node_alloc,
60 vm_radix_node_free, vm_radix_smr);
61
62 /*
63 * Inserts the key-value pair into the trie, starting search from root.
64 * Panics if the key already exists.
65 */
66 static __inline int
vm_radix_insert(struct vm_radix * rtree,vm_page_t page)67 vm_radix_insert(struct vm_radix *rtree, vm_page_t page)
68 {
69 return (VM_RADIX_PCTRIE_INSERT(&rtree->rt_trie, page));
70 }
71
72 /*
73 * Inserts the key-value pair into the trie, starting search from iterator.
74 * Panics if the key already exists.
75 */
76 static __inline int
vm_radix_iter_insert(struct pctrie_iter * pages,vm_page_t page)77 vm_radix_iter_insert(struct pctrie_iter *pages, vm_page_t page)
78 {
79 return (VM_RADIX_PCTRIE_ITER_INSERT(pages, page));
80 }
81
82 /*
83 * Insert the page into the vm_radix tree with its pindex as the key. Panic if
84 * the pindex already exists. Return zero on success or a non-zero error on
85 * memory allocation failure. Set the out parameter mpred to the previous page
86 * in the tree as if found by a previous call to vm_radix_lookup_le with the
87 * new page pindex.
88 */
89 static __inline int
vm_radix_insert_lookup_lt(struct vm_radix * rtree,vm_page_t page,vm_page_t * mpred)90 vm_radix_insert_lookup_lt(struct vm_radix *rtree, vm_page_t page,
91 vm_page_t *mpred)
92 {
93 int error;
94
95 error = VM_RADIX_PCTRIE_INSERT_LOOKUP_LE(&rtree->rt_trie, page, mpred);
96 if (__predict_false(error == EEXIST))
97 panic("vm_radix_insert_lookup_lt: page already present, %p",
98 *mpred);
99 return (error);
100 }
101
102 /*
103 * Returns the value stored at the index assuming there is an external lock.
104 *
105 * If the index is not present, NULL is returned.
106 */
107 static __inline vm_page_t
vm_radix_lookup(struct vm_radix * rtree,vm_pindex_t index)108 vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index)
109 {
110 return (VM_RADIX_PCTRIE_LOOKUP(&rtree->rt_trie, index));
111 }
112
113 /*
114 * Returns the value stored at the index without requiring an external lock.
115 *
116 * If the index is not present, NULL is returned.
117 */
118 static __inline vm_page_t
vm_radix_lookup_unlocked(struct vm_radix * rtree,vm_pindex_t index)119 vm_radix_lookup_unlocked(struct vm_radix *rtree, vm_pindex_t index)
120 {
121 return (VM_RADIX_PCTRIE_LOOKUP_UNLOCKED(&rtree->rt_trie, index));
122 }
123
124 /*
125 * Initialize an iterator for vm_radix.
126 */
127 static __inline void
vm_radix_iter_init(struct pctrie_iter * pages,struct vm_radix * rtree)128 vm_radix_iter_init(struct pctrie_iter *pages, struct vm_radix *rtree)
129 {
130 pctrie_iter_init(pages, &rtree->rt_trie);
131 }
132
133 /*
134 * Initialize an iterator for vm_radix.
135 */
136 static __inline void
vm_radix_iter_limit_init(struct pctrie_iter * pages,struct vm_radix * rtree,vm_pindex_t limit)137 vm_radix_iter_limit_init(struct pctrie_iter *pages, struct vm_radix *rtree,
138 vm_pindex_t limit)
139 {
140 pctrie_iter_limit_init(pages, &rtree->rt_trie, limit);
141 }
142
143 /*
144 * Returns the value stored at the index.
145 * Requires that access be externally synchronized by a lock.
146 *
147 * If the index is not present, NULL is returned.
148 */
149 static __inline vm_page_t
vm_radix_iter_lookup(struct pctrie_iter * pages,vm_pindex_t index)150 vm_radix_iter_lookup(struct pctrie_iter *pages, vm_pindex_t index)
151 {
152 return (VM_RADIX_PCTRIE_ITER_LOOKUP(pages, index));
153 }
154
155 /*
156 * Returns the value stored 'stride' steps beyond the current position.
157 * Requires that access be externally synchronized by a lock.
158 *
159 * If the index is not present, NULL is returned.
160 */
161 static __inline vm_page_t
vm_radix_iter_stride(struct pctrie_iter * pages,int stride)162 vm_radix_iter_stride(struct pctrie_iter *pages, int stride)
163 {
164 return (VM_RADIX_PCTRIE_ITER_STRIDE(pages, stride));
165 }
166
167 /*
168 * Returns the page with the least pindex that is greater than or equal to the
169 * specified pindex, or NULL if there are no such pages.
170 *
171 * Requires that access be externally synchronized by a lock.
172 */
173 static __inline vm_page_t
vm_radix_lookup_ge(struct vm_radix * rtree,vm_pindex_t index)174 vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index)
175 {
176 return (VM_RADIX_PCTRIE_LOOKUP_GE(&rtree->rt_trie, index));
177 }
178
179 /*
180 * Returns the page with the greatest pindex that is less than or equal to the
181 * specified pindex, or NULL if there are no such pages.
182 *
183 * Requires that access be externally synchronized by a lock.
184 */
185 static __inline vm_page_t
vm_radix_lookup_le(struct vm_radix * rtree,vm_pindex_t index)186 vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)
187 {
188 return (VM_RADIX_PCTRIE_LOOKUP_LE(&rtree->rt_trie, index));
189 }
190
191 /*
192 * Remove the specified index from the trie, and return the value stored at
193 * that index. If the index is not present, return NULL.
194 */
195 static __inline vm_page_t
vm_radix_remove(struct vm_radix * rtree,vm_pindex_t index)196 vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
197 {
198 return (VM_RADIX_PCTRIE_REMOVE_LOOKUP(&rtree->rt_trie, index));
199 }
200
201 /*
202 * Remove the current page from the trie.
203 */
204 static __inline void
vm_radix_iter_remove(struct pctrie_iter * pages)205 vm_radix_iter_remove(struct pctrie_iter *pages)
206 {
207 VM_RADIX_PCTRIE_ITER_REMOVE(pages);
208 }
209
210 /*
211 * Reclaim all the interior nodes of the trie, and invoke the callback
212 * on all the pages, in order.
213 */
214 static __inline void
vm_radix_reclaim_callback(struct vm_radix * rtree,void (* page_cb)(vm_page_t,void *),void * arg)215 vm_radix_reclaim_callback(struct vm_radix *rtree,
216 void (*page_cb)(vm_page_t, void *), void *arg)
217 {
218 VM_RADIX_PCTRIE_RECLAIM_CALLBACK(&rtree->rt_trie, page_cb, arg);
219 }
220
221 /*
222 * Initialize an iterator pointing to the page with the least pindex that is
223 * greater than or equal to the specified pindex, or NULL if there are no such
224 * pages. Return the page.
225 *
226 * Requires that access be externally synchronized by a lock.
227 */
228 static __inline vm_page_t
vm_radix_iter_lookup_ge(struct pctrie_iter * pages,vm_pindex_t index)229 vm_radix_iter_lookup_ge(struct pctrie_iter *pages, vm_pindex_t index)
230 {
231 return (VM_RADIX_PCTRIE_ITER_LOOKUP_GE(pages, index));
232 }
233
234 /*
235 * Update the iterator to point to the page with the least pindex that is 'jump'
236 * or more greater than or equal to the current pindex, or NULL if there are no
237 * such pages. Return the page.
238 *
239 * Requires that access be externally synchronized by a lock.
240 */
241 static __inline vm_page_t
vm_radix_iter_jump(struct pctrie_iter * pages,vm_pindex_t jump)242 vm_radix_iter_jump(struct pctrie_iter *pages, vm_pindex_t jump)
243 {
244 return (VM_RADIX_PCTRIE_ITER_JUMP_GE(pages, jump));
245 }
246
247 /*
248 * Update the iterator to point to the page with the least pindex that is one or
249 * more greater than the current pindex, or NULL if there are no such pages.
250 * Return the page.
251 *
252 * Requires that access be externally synchronized by a lock.
253 */
254 static __inline vm_page_t
vm_radix_iter_step(struct pctrie_iter * pages)255 vm_radix_iter_step(struct pctrie_iter *pages)
256 {
257 return (VM_RADIX_PCTRIE_ITER_STEP_GE(pages));
258 }
259
260 /*
261 * Initialize an iterator pointing to the page with the greatest pindex that is
262 * less than or equal to the specified pindex, or NULL if there are no such
263 * pages. Return the page.
264 *
265 * Requires that access be externally synchronized by a lock.
266 */
267 static __inline vm_page_t
vm_radix_iter_lookup_le(struct pctrie_iter * pages,vm_pindex_t index)268 vm_radix_iter_lookup_le(struct pctrie_iter *pages, vm_pindex_t index)
269 {
270 return (VM_RADIX_PCTRIE_ITER_LOOKUP_LE(pages, index));
271 }
272
273 /*
274 * Update the iterator to point to the page with the pindex that is one greater
275 * than the current pindex, or NULL if there is no such page. Return the page.
276 *
277 * Requires that access be externally synchronized by a lock.
278 */
279 static __inline vm_page_t
vm_radix_iter_next(struct pctrie_iter * pages)280 vm_radix_iter_next(struct pctrie_iter *pages)
281 {
282 return (VM_RADIX_PCTRIE_ITER_NEXT(pages));
283 }
284
285 /*
286 * Update the iterator to point to the page with the pindex that is one less
287 * than the current pindex, or NULL if there is no such page. Return the page.
288 *
289 * Requires that access be externally synchronized by a lock.
290 */
291 static __inline vm_page_t
vm_radix_iter_prev(struct pctrie_iter * pages)292 vm_radix_iter_prev(struct pctrie_iter *pages)
293 {
294 return (VM_RADIX_PCTRIE_ITER_PREV(pages));
295 }
296
297 /*
298 * Return the current page.
299 *
300 * Requires that access be externally synchronized by a lock.
301 */
302 static __inline vm_page_t
vm_radix_iter_page(struct pctrie_iter * pages)303 vm_radix_iter_page(struct pctrie_iter *pages)
304 {
305 return (VM_RADIX_PCTRIE_ITER_VALUE(pages));
306 }
307
308 /*
309 * Replace an existing page in the trie with another one.
310 * Panics if there is not an old page in the trie at the new page's index.
311 */
312 static __inline vm_page_t
vm_radix_replace(struct vm_radix * rtree,vm_page_t newpage)313 vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage)
314 {
315 return (VM_RADIX_PCTRIE_REPLACE(&rtree->rt_trie, newpage));
316 }
317
318 #endif /* _KERNEL */
319 #endif /* !_VM_RADIX_H_ */
320