1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * The Mach Operating System project at Carnegie-Mellon University.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 /*-
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 */
62
63 /*
64 * Resident memory management module.
65 */
66
67 #include <sys/cdefs.h>
68 #include "opt_vm.h"
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/counter.h>
73 #include <sys/domainset.h>
74 #include <sys/kernel.h>
75 #include <sys/limits.h>
76 #include <sys/linker.h>
77 #include <sys/lock.h>
78 #include <sys/malloc.h>
79 #include <sys/mman.h>
80 #include <sys/msgbuf.h>
81 #include <sys/mutex.h>
82 #include <sys/proc.h>
83 #include <sys/rwlock.h>
84 #include <sys/sleepqueue.h>
85 #include <sys/sbuf.h>
86 #include <sys/sched.h>
87 #include <sys/smp.h>
88 #include <sys/sysctl.h>
89 #include <sys/vmmeter.h>
90 #include <sys/vnode.h>
91
92 #include <vm/vm.h>
93 #include <vm/pmap.h>
94 #include <vm/vm_param.h>
95 #include <vm/vm_domainset.h>
96 #include <vm/vm_kern.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_object.h>
99 #include <vm/vm_page.h>
100 #include <vm/vm_pageout.h>
101 #include <vm/vm_phys.h>
102 #include <vm/vm_pagequeue.h>
103 #include <vm/vm_pager.h>
104 #include <vm/vm_radix.h>
105 #include <vm/vm_reserv.h>
106 #include <vm/vm_extern.h>
107 #include <vm/vm_dumpset.h>
108 #include <vm/uma.h>
109 #include <vm/uma_int.h>
110
111 #include <machine/md_var.h>
112
113 struct vm_domain vm_dom[MAXMEMDOM];
114
115 DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT]);
116
117 struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT];
118
119 struct mtx_padalign __exclusive_cache_line vm_domainset_lock;
120 /* The following fields are protected by the domainset lock. */
121 domainset_t __exclusive_cache_line vm_min_domains;
122 domainset_t __exclusive_cache_line vm_severe_domains;
123 static int vm_min_waiters;
124 static int vm_severe_waiters;
125 static int vm_pageproc_waiters;
126
127 static SYSCTL_NODE(_vm_stats, OID_AUTO, page, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
128 "VM page statistics");
129
130 static COUNTER_U64_DEFINE_EARLY(pqstate_commit_retries);
131 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, pqstate_commit_retries,
132 CTLFLAG_RD, &pqstate_commit_retries,
133 "Number of failed per-page atomic queue state updates");
134
135 static COUNTER_U64_DEFINE_EARLY(queue_ops);
136 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_ops,
137 CTLFLAG_RD, &queue_ops,
138 "Number of batched queue operations");
139
140 static COUNTER_U64_DEFINE_EARLY(queue_nops);
141 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_nops,
142 CTLFLAG_RD, &queue_nops,
143 "Number of batched queue operations with no effects");
144
145 /*
146 * bogus page -- for I/O to/from partially complete buffers,
147 * or for paging into sparsely invalid regions.
148 */
149 vm_page_t bogus_page;
150
151 vm_page_t vm_page_array;
152 long vm_page_array_size;
153 long first_page;
154
155 struct bitset *vm_page_dump;
156 long vm_page_dump_pages;
157
158 static TAILQ_HEAD(, vm_page) blacklist_head;
159 static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS);
160 SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD |
161 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages");
162
163 static uma_zone_t fakepg_zone;
164
165 static vm_page_t vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex,
166 int req, vm_page_t mpred);
167 static void vm_page_alloc_check(vm_page_t m);
168 static vm_page_t vm_page_alloc_nofree_domain(int domain, int req);
169 static bool _vm_page_busy_sleep(vm_object_t obj, vm_page_t m,
170 vm_pindex_t pindex, const char *wmesg, int allocflags, bool locked);
171 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
172 static void vm_page_enqueue(vm_page_t m, uint8_t queue);
173 static bool vm_page_free_prep(vm_page_t m);
174 static void vm_page_free_toq(vm_page_t m);
175 static void vm_page_init(void *dummy);
176 static int vm_page_insert_after(vm_page_t m, vm_object_t object,
177 vm_pindex_t pindex, vm_page_t mpred);
178 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object,
179 vm_page_t mpred);
180 static void vm_page_mvqueue(vm_page_t m, const uint8_t queue,
181 const uint16_t nflag);
182 static int vm_page_reclaim_run(int req_class, int domain, u_long npages,
183 vm_page_t m_run, vm_paddr_t high);
184 static void vm_page_release_toq(vm_page_t m, uint8_t nqueue, bool noreuse);
185 static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object,
186 int req);
187 static int vm_page_zone_import(void *arg, void **store, int cnt, int domain,
188 int flags);
189 static void vm_page_zone_release(void *arg, void **store, int cnt);
190
191 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL);
192
193 static void
vm_page_init(void * dummy)194 vm_page_init(void *dummy)
195 {
196
197 fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,
198 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
199 bogus_page = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_NOFREE);
200 }
201
202 static int pgcache_zone_max_pcpu;
203 SYSCTL_INT(_vm, OID_AUTO, pgcache_zone_max_pcpu,
204 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pgcache_zone_max_pcpu, 0,
205 "Per-CPU page cache size");
206
207 /*
208 * The cache page zone is initialized later since we need to be able to allocate
209 * pages before UMA is fully initialized.
210 */
211 static void
vm_page_init_cache_zones(void * dummy __unused)212 vm_page_init_cache_zones(void *dummy __unused)
213 {
214 struct vm_domain *vmd;
215 struct vm_pgcache *pgcache;
216 int cache, domain, maxcache, pool;
217
218 TUNABLE_INT_FETCH("vm.pgcache_zone_max_pcpu", &pgcache_zone_max_pcpu);
219 maxcache = pgcache_zone_max_pcpu * mp_ncpus;
220 for (domain = 0; domain < vm_ndomains; domain++) {
221 vmd = VM_DOMAIN(domain);
222 for (pool = 0; pool < VM_NFREEPOOL; pool++) {
223 pgcache = &vmd->vmd_pgcache[pool];
224 pgcache->domain = domain;
225 pgcache->pool = pool;
226 pgcache->zone = uma_zcache_create("vm pgcache",
227 PAGE_SIZE, NULL, NULL, NULL, NULL,
228 vm_page_zone_import, vm_page_zone_release, pgcache,
229 UMA_ZONE_VM);
230
231 /*
232 * Limit each pool's zone to 0.1% of the pages in the
233 * domain.
234 */
235 cache = maxcache != 0 ? maxcache :
236 vmd->vmd_page_count / 1000;
237 uma_zone_set_maxcache(pgcache->zone, cache);
238 }
239 }
240 }
241 SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL);
242
243 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
244 #if PAGE_SIZE == 32768
245 #ifdef CTASSERT
246 CTASSERT(sizeof(u_long) >= 8);
247 #endif
248 #endif
249
250 /*
251 * vm_set_page_size:
252 *
253 * Sets the page size, perhaps based upon the memory
254 * size. Must be called before any use of page-size
255 * dependent functions.
256 */
257 void
vm_set_page_size(void)258 vm_set_page_size(void)
259 {
260 if (vm_cnt.v_page_size == 0)
261 vm_cnt.v_page_size = PAGE_SIZE;
262 if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0)
263 panic("vm_set_page_size: page size not a power of two");
264 }
265
266 /*
267 * vm_page_blacklist_next:
268 *
269 * Find the next entry in the provided string of blacklist
270 * addresses. Entries are separated by space, comma, or newline.
271 * If an invalid integer is encountered then the rest of the
272 * string is skipped. Updates the list pointer to the next
273 * character, or NULL if the string is exhausted or invalid.
274 */
275 static vm_paddr_t
vm_page_blacklist_next(char ** list,char * end)276 vm_page_blacklist_next(char **list, char *end)
277 {
278 vm_paddr_t bad;
279 char *cp, *pos;
280
281 if (list == NULL || *list == NULL)
282 return (0);
283 if (**list =='\0') {
284 *list = NULL;
285 return (0);
286 }
287
288 /*
289 * If there's no end pointer then the buffer is coming from
290 * the kenv and we know it's null-terminated.
291 */
292 if (end == NULL)
293 end = *list + strlen(*list);
294
295 /* Ensure that strtoq() won't walk off the end */
296 if (*end != '\0') {
297 if (*end == '\n' || *end == ' ' || *end == ',')
298 *end = '\0';
299 else {
300 printf("Blacklist not terminated, skipping\n");
301 *list = NULL;
302 return (0);
303 }
304 }
305
306 for (pos = *list; *pos != '\0'; pos = cp) {
307 bad = strtoq(pos, &cp, 0);
308 if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') {
309 if (bad == 0) {
310 if (++cp < end)
311 continue;
312 else
313 break;
314 }
315 } else
316 break;
317 if (*cp == '\0' || ++cp >= end)
318 *list = NULL;
319 else
320 *list = cp;
321 return (trunc_page(bad));
322 }
323 printf("Garbage in RAM blacklist, skipping\n");
324 *list = NULL;
325 return (0);
326 }
327
328 bool
vm_page_blacklist_add(vm_paddr_t pa,bool verbose)329 vm_page_blacklist_add(vm_paddr_t pa, bool verbose)
330 {
331 struct vm_domain *vmd;
332 vm_page_t m;
333 bool found;
334
335 m = vm_phys_paddr_to_vm_page(pa);
336 if (m == NULL)
337 return (true); /* page does not exist, no failure */
338
339 vmd = VM_DOMAIN(vm_phys_domain(pa));
340 vm_domain_free_lock(vmd);
341 found = vm_phys_unfree_page(pa);
342 vm_domain_free_unlock(vmd);
343 if (found) {
344 vm_domain_freecnt_inc(vmd, -1);
345 TAILQ_INSERT_TAIL(&blacklist_head, m, listq);
346 if (verbose)
347 printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa);
348 }
349 return (found);
350 }
351
352 /*
353 * vm_page_blacklist_check:
354 *
355 * Iterate through the provided string of blacklist addresses, pulling
356 * each entry out of the physical allocator free list and putting it
357 * onto a list for reporting via the vm.page_blacklist sysctl.
358 */
359 static void
vm_page_blacklist_check(char * list,char * end)360 vm_page_blacklist_check(char *list, char *end)
361 {
362 vm_paddr_t pa;
363 char *next;
364
365 next = list;
366 while (next != NULL) {
367 if ((pa = vm_page_blacklist_next(&next, end)) == 0)
368 continue;
369 vm_page_blacklist_add(pa, bootverbose);
370 }
371 }
372
373 /*
374 * vm_page_blacklist_load:
375 *
376 * Search for a special module named "ram_blacklist". It'll be a
377 * plain text file provided by the user via the loader directive
378 * of the same name.
379 */
380 static void
vm_page_blacklist_load(char ** list,char ** end)381 vm_page_blacklist_load(char **list, char **end)
382 {
383 void *mod;
384 u_char *ptr;
385 u_int len;
386
387 mod = NULL;
388 ptr = NULL;
389
390 mod = preload_search_by_type("ram_blacklist");
391 if (mod != NULL) {
392 ptr = preload_fetch_addr(mod);
393 len = preload_fetch_size(mod);
394 }
395 *list = ptr;
396 if (ptr != NULL)
397 *end = ptr + len;
398 else
399 *end = NULL;
400 return;
401 }
402
403 static int
sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS)404 sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS)
405 {
406 vm_page_t m;
407 struct sbuf sbuf;
408 int error, first;
409
410 first = 1;
411 error = sysctl_wire_old_buffer(req, 0);
412 if (error != 0)
413 return (error);
414 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
415 TAILQ_FOREACH(m, &blacklist_head, listq) {
416 sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",",
417 (uintmax_t)m->phys_addr);
418 first = 0;
419 }
420 error = sbuf_finish(&sbuf);
421 sbuf_delete(&sbuf);
422 return (error);
423 }
424
425 /*
426 * Initialize a dummy page for use in scans of the specified paging queue.
427 * In principle, this function only needs to set the flag PG_MARKER.
428 * Nonetheless, it write busies the page as a safety precaution.
429 */
430 void
vm_page_init_marker(vm_page_t marker,int queue,uint16_t aflags)431 vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags)
432 {
433
434 bzero(marker, sizeof(*marker));
435 marker->flags = PG_MARKER;
436 marker->a.flags = aflags;
437 marker->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
438 marker->a.queue = queue;
439 }
440
441 static void
vm_page_domain_init(int domain)442 vm_page_domain_init(int domain)
443 {
444 struct vm_domain *vmd;
445 struct vm_pagequeue *pq;
446 int i;
447
448 vmd = VM_DOMAIN(domain);
449 bzero(vmd, sizeof(*vmd));
450 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) =
451 "vm inactive pagequeue";
452 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) =
453 "vm active pagequeue";
454 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) =
455 "vm laundry pagequeue";
456 *__DECONST(const char **,
457 &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) =
458 "vm unswappable pagequeue";
459 vmd->vmd_domain = domain;
460 vmd->vmd_page_count = 0;
461 vmd->vmd_free_count = 0;
462 vmd->vmd_segs = 0;
463 vmd->vmd_oom = false;
464 vmd->vmd_helper_threads_enabled = true;
465 for (i = 0; i < PQ_COUNT; i++) {
466 pq = &vmd->vmd_pagequeues[i];
467 TAILQ_INIT(&pq->pq_pl);
468 mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue",
469 MTX_DEF | MTX_DUPOK);
470 pq->pq_pdpages = 0;
471 vm_page_init_marker(&vmd->vmd_markers[i], i, 0);
472 }
473 mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF);
474 mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF);
475 snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain);
476
477 /*
478 * inacthead is used to provide FIFO ordering for LRU-bypassing
479 * insertions.
480 */
481 vm_page_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE, PGA_ENQUEUED);
482 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl,
483 &vmd->vmd_inacthead, plinks.q);
484
485 /*
486 * The clock pages are used to implement active queue scanning without
487 * requeues. Scans start at clock[0], which is advanced after the scan
488 * ends. When the two clock hands meet, they are reset and scanning
489 * resumes from the head of the queue.
490 */
491 vm_page_init_marker(&vmd->vmd_clock[0], PQ_ACTIVE, PGA_ENQUEUED);
492 vm_page_init_marker(&vmd->vmd_clock[1], PQ_ACTIVE, PGA_ENQUEUED);
493 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl,
494 &vmd->vmd_clock[0], plinks.q);
495 TAILQ_INSERT_TAIL(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl,
496 &vmd->vmd_clock[1], plinks.q);
497 }
498
499 /*
500 * Initialize a physical page in preparation for adding it to the free
501 * lists.
502 */
503 void
vm_page_init_page(vm_page_t m,vm_paddr_t pa,int segind,int pool)504 vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind, int pool)
505 {
506 m->object = NULL;
507 m->ref_count = 0;
508 m->busy_lock = VPB_FREED;
509 m->flags = m->a.flags = 0;
510 m->phys_addr = pa;
511 m->a.queue = PQ_NONE;
512 m->psind = 0;
513 m->segind = segind;
514 m->order = VM_NFREEORDER;
515 m->pool = pool;
516 m->valid = m->dirty = 0;
517 pmap_page_init(m);
518 }
519
520 #ifndef PMAP_HAS_PAGE_ARRAY
521 static vm_paddr_t
vm_page_array_alloc(vm_offset_t * vaddr,vm_paddr_t end,vm_paddr_t page_range)522 vm_page_array_alloc(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t page_range)
523 {
524 vm_paddr_t new_end;
525
526 /*
527 * Reserve an unmapped guard page to trap access to vm_page_array[-1].
528 * However, because this page is allocated from KVM, out-of-bounds
529 * accesses using the direct map will not be trapped.
530 */
531 *vaddr += PAGE_SIZE;
532
533 /*
534 * Allocate physical memory for the page structures, and map it.
535 */
536 new_end = trunc_page(end - page_range * sizeof(struct vm_page));
537 vm_page_array = (vm_page_t)pmap_map(vaddr, new_end, end,
538 VM_PROT_READ | VM_PROT_WRITE);
539 vm_page_array_size = page_range;
540
541 return (new_end);
542 }
543 #endif
544
545 /*
546 * vm_page_startup:
547 *
548 * Initializes the resident memory module. Allocates physical memory for
549 * bootstrapping UMA and some data structures that are used to manage
550 * physical pages. Initializes these structures, and populates the free
551 * page queues.
552 */
553 vm_offset_t
vm_page_startup(vm_offset_t vaddr)554 vm_page_startup(vm_offset_t vaddr)
555 {
556 struct vm_phys_seg *seg;
557 struct vm_domain *vmd;
558 vm_page_t m;
559 char *list, *listend;
560 vm_paddr_t end, high_avail, low_avail, new_end, size;
561 vm_paddr_t page_range __unused;
562 vm_paddr_t last_pa, pa, startp, endp;
563 u_long pagecount;
564 #if MINIDUMP_PAGE_TRACKING
565 u_long vm_page_dump_size;
566 #endif
567 int biggestone, i, segind;
568 #ifdef WITNESS
569 vm_offset_t mapped;
570 int witness_size;
571 #endif
572 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
573 long ii;
574 #endif
575 int pool;
576 #ifdef VM_FREEPOOL_LAZYINIT
577 int lazyinit;
578 #endif
579
580 vaddr = round_page(vaddr);
581
582 vm_phys_early_startup();
583 biggestone = vm_phys_avail_largest();
584 end = phys_avail[biggestone+1];
585
586 /*
587 * Initialize the page and queue locks.
588 */
589 mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF);
590 for (i = 0; i < PA_LOCK_COUNT; i++)
591 mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
592 for (i = 0; i < vm_ndomains; i++)
593 vm_page_domain_init(i);
594
595 new_end = end;
596 #ifdef WITNESS
597 witness_size = round_page(witness_startup_count());
598 new_end -= witness_size;
599 mapped = pmap_map(&vaddr, new_end, new_end + witness_size,
600 VM_PROT_READ | VM_PROT_WRITE);
601 bzero((void *)mapped, witness_size);
602 witness_startup((void *)mapped);
603 #endif
604
605 #if MINIDUMP_PAGE_TRACKING
606 /*
607 * Allocate a bitmap to indicate that a random physical page
608 * needs to be included in a minidump.
609 *
610 * The amd64 port needs this to indicate which direct map pages
611 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
612 *
613 * However, i386 still needs this workspace internally within the
614 * minidump code. In theory, they are not needed on i386, but are
615 * included should the sf_buf code decide to use them.
616 */
617 last_pa = 0;
618 vm_page_dump_pages = 0;
619 for (i = 0; dump_avail[i + 1] != 0; i += 2) {
620 vm_page_dump_pages += howmany(dump_avail[i + 1], PAGE_SIZE) -
621 dump_avail[i] / PAGE_SIZE;
622 if (dump_avail[i + 1] > last_pa)
623 last_pa = dump_avail[i + 1];
624 }
625 vm_page_dump_size = round_page(BITSET_SIZE(vm_page_dump_pages));
626 new_end -= vm_page_dump_size;
627 vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
628 new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
629 bzero((void *)vm_page_dump, vm_page_dump_size);
630 #if MINIDUMP_STARTUP_PAGE_TRACKING
631 /*
632 * Include the UMA bootstrap pages, witness pages and vm_page_dump
633 * in a crash dump. When pmap_map() uses the direct map, they are
634 * not automatically included.
635 */
636 for (pa = new_end; pa < end; pa += PAGE_SIZE)
637 dump_add_page(pa);
638 #endif
639 #else
640 (void)last_pa;
641 #endif
642 phys_avail[biggestone + 1] = new_end;
643 #ifdef __amd64__
644 /*
645 * Request that the physical pages underlying the message buffer be
646 * included in a crash dump. Since the message buffer is accessed
647 * through the direct map, they are not automatically included.
648 */
649 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr);
650 last_pa = pa + round_page(msgbufsize);
651 while (pa < last_pa) {
652 dump_add_page(pa);
653 pa += PAGE_SIZE;
654 }
655 #else
656 (void)pa;
657 #endif
658 /*
659 * Compute the number of pages of memory that will be available for
660 * use, taking into account the overhead of a page structure per page.
661 * In other words, solve
662 * "available physical memory" - round_page(page_range *
663 * sizeof(struct vm_page)) = page_range * PAGE_SIZE
664 * for page_range.
665 */
666 low_avail = phys_avail[0];
667 high_avail = phys_avail[1];
668 for (i = 0; i < vm_phys_nsegs; i++) {
669 if (vm_phys_segs[i].start < low_avail)
670 low_avail = vm_phys_segs[i].start;
671 if (vm_phys_segs[i].end > high_avail)
672 high_avail = vm_phys_segs[i].end;
673 }
674 /* Skip the first chunk. It is already accounted for. */
675 for (i = 2; phys_avail[i + 1] != 0; i += 2) {
676 if (phys_avail[i] < low_avail)
677 low_avail = phys_avail[i];
678 if (phys_avail[i + 1] > high_avail)
679 high_avail = phys_avail[i + 1];
680 }
681 first_page = low_avail / PAGE_SIZE;
682 #ifdef VM_PHYSSEG_SPARSE
683 size = 0;
684 for (i = 0; i < vm_phys_nsegs; i++)
685 size += vm_phys_segs[i].end - vm_phys_segs[i].start;
686 for (i = 0; phys_avail[i + 1] != 0; i += 2)
687 size += phys_avail[i + 1] - phys_avail[i];
688 #elif defined(VM_PHYSSEG_DENSE)
689 size = high_avail - low_avail;
690 #else
691 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
692 #endif
693
694 #ifdef PMAP_HAS_PAGE_ARRAY
695 pmap_page_array_startup(size / PAGE_SIZE);
696 biggestone = vm_phys_avail_largest();
697 end = new_end = phys_avail[biggestone + 1];
698 #else
699 #ifdef VM_PHYSSEG_DENSE
700 /*
701 * In the VM_PHYSSEG_DENSE case, the number of pages can account for
702 * the overhead of a page structure per page only if vm_page_array is
703 * allocated from the last physical memory chunk. Otherwise, we must
704 * allocate page structures representing the physical memory
705 * underlying vm_page_array, even though they will not be used.
706 */
707 if (new_end != high_avail)
708 page_range = size / PAGE_SIZE;
709 else
710 #endif
711 {
712 page_range = size / (PAGE_SIZE + sizeof(struct vm_page));
713
714 /*
715 * If the partial bytes remaining are large enough for
716 * a page (PAGE_SIZE) without a corresponding
717 * 'struct vm_page', then new_end will contain an
718 * extra page after subtracting the length of the VM
719 * page array. Compensate by subtracting an extra
720 * page from new_end.
721 */
722 if (size % (PAGE_SIZE + sizeof(struct vm_page)) >= PAGE_SIZE) {
723 if (new_end == high_avail)
724 high_avail -= PAGE_SIZE;
725 new_end -= PAGE_SIZE;
726 }
727 }
728 end = new_end;
729 new_end = vm_page_array_alloc(&vaddr, end, page_range);
730 #endif
731
732 #if VM_NRESERVLEVEL > 0
733 /*
734 * Allocate physical memory for the reservation management system's
735 * data structures, and map it.
736 */
737 new_end = vm_reserv_startup(&vaddr, new_end);
738 #endif
739 #if MINIDUMP_PAGE_TRACKING && MINIDUMP_STARTUP_PAGE_TRACKING
740 /*
741 * Include vm_page_array and vm_reserv_array in a crash dump.
742 */
743 for (pa = new_end; pa < end; pa += PAGE_SIZE)
744 dump_add_page(pa);
745 #endif
746 phys_avail[biggestone + 1] = new_end;
747
748 /*
749 * Add physical memory segments corresponding to the available
750 * physical pages.
751 */
752 for (i = 0; phys_avail[i + 1] != 0; i += 2)
753 if (vm_phys_avail_size(i) != 0)
754 vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]);
755
756 /*
757 * Initialize the physical memory allocator.
758 */
759 vm_phys_init();
760
761 pool = VM_FREEPOOL_DEFAULT;
762 #ifdef VM_FREEPOOL_LAZYINIT
763 lazyinit = 1;
764 TUNABLE_INT_FETCH("debug.vm.lazy_page_init", &lazyinit);
765 if (lazyinit)
766 pool = VM_FREEPOOL_LAZYINIT;
767 #endif
768
769 /*
770 * Initialize the page structures and add every available page to the
771 * physical memory allocator's free lists.
772 */
773 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
774 for (ii = 0; ii < vm_page_array_size; ii++) {
775 m = &vm_page_array[ii];
776 vm_page_init_page(m, (first_page + ii) << PAGE_SHIFT, 0,
777 VM_FREEPOOL_DEFAULT);
778 m->flags = PG_FICTITIOUS;
779 }
780 #endif
781 vm_cnt.v_page_count = 0;
782 for (segind = 0; segind < vm_phys_nsegs; segind++) {
783 seg = &vm_phys_segs[segind];
784
785 /*
786 * Initialize pages not covered by phys_avail[], since they
787 * might be freed to the allocator at some future point, e.g.,
788 * by kmem_bootstrap_free().
789 */
790 startp = seg->start;
791 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
792 if (startp >= seg->end)
793 break;
794 if (phys_avail[i + 1] < startp)
795 continue;
796 if (phys_avail[i] <= startp) {
797 startp = phys_avail[i + 1];
798 continue;
799 }
800 m = vm_phys_seg_paddr_to_vm_page(seg, startp);
801 for (endp = MIN(phys_avail[i], seg->end);
802 startp < endp; startp += PAGE_SIZE, m++) {
803 vm_page_init_page(m, startp, segind,
804 VM_FREEPOOL_DEFAULT);
805 }
806 }
807
808 /*
809 * Add the segment's pages that are covered by one of
810 * phys_avail's ranges to the free lists.
811 */
812 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
813 if (seg->end <= phys_avail[i] ||
814 seg->start >= phys_avail[i + 1])
815 continue;
816
817 startp = MAX(seg->start, phys_avail[i]);
818 endp = MIN(seg->end, phys_avail[i + 1]);
819 pagecount = (u_long)atop(endp - startp);
820 if (pagecount == 0)
821 continue;
822
823 /*
824 * If lazy vm_page initialization is not enabled, simply
825 * initialize all of the pages in the segment covered by
826 * phys_avail. Otherwise, initialize only the first
827 * page of each run of free pages handed to the vm_phys
828 * allocator, which in turn defers initialization of
829 * pages until they are needed.
830 *
831 * This avoids blocking the boot process for long
832 * periods, which may be relevant for VMs (which ought
833 * to boot as quickly as possible) and/or systems with
834 * large amounts of physical memory.
835 */
836 m = vm_phys_seg_paddr_to_vm_page(seg, startp);
837 vm_page_init_page(m, startp, segind, pool);
838 if (pool == VM_FREEPOOL_DEFAULT) {
839 for (u_long j = 1; j < pagecount; j++) {
840 vm_page_init_page(&m[j],
841 startp + ptoa((vm_paddr_t)j),
842 segind, pool);
843 }
844 }
845 vmd = VM_DOMAIN(seg->domain);
846 vm_domain_free_lock(vmd);
847 vm_phys_enqueue_contig(m, pool, pagecount);
848 vm_domain_free_unlock(vmd);
849 vm_domain_freecnt_inc(vmd, pagecount);
850 vm_cnt.v_page_count += (u_int)pagecount;
851 vmd->vmd_page_count += (u_int)pagecount;
852 vmd->vmd_segs |= 1UL << segind;
853 }
854 }
855
856 /*
857 * Remove blacklisted pages from the physical memory allocator.
858 */
859 TAILQ_INIT(&blacklist_head);
860 vm_page_blacklist_load(&list, &listend);
861 vm_page_blacklist_check(list, listend);
862
863 list = kern_getenv("vm.blacklist");
864 vm_page_blacklist_check(list, NULL);
865
866 freeenv(list);
867 #if VM_NRESERVLEVEL > 0
868 /*
869 * Initialize the reservation management system.
870 */
871 vm_reserv_init();
872 #endif
873
874 return (vaddr);
875 }
876
877 void
vm_page_reference(vm_page_t m)878 vm_page_reference(vm_page_t m)
879 {
880
881 vm_page_aflag_set(m, PGA_REFERENCED);
882 }
883
884 /*
885 * vm_page_trybusy
886 *
887 * Helper routine for grab functions to trylock busy.
888 *
889 * Returns true on success and false on failure.
890 */
891 static bool
vm_page_trybusy(vm_page_t m,int allocflags)892 vm_page_trybusy(vm_page_t m, int allocflags)
893 {
894
895 if ((allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0)
896 return (vm_page_trysbusy(m));
897 else
898 return (vm_page_tryxbusy(m));
899 }
900
901 /*
902 * vm_page_tryacquire
903 *
904 * Helper routine for grab functions to trylock busy and wire.
905 *
906 * Returns true on success and false on failure.
907 */
908 static inline bool
vm_page_tryacquire(vm_page_t m,int allocflags)909 vm_page_tryacquire(vm_page_t m, int allocflags)
910 {
911 bool locked;
912
913 locked = vm_page_trybusy(m, allocflags);
914 if (locked && (allocflags & VM_ALLOC_WIRED) != 0)
915 vm_page_wire(m);
916 return (locked);
917 }
918
919 /*
920 * vm_page_busy_acquire:
921 *
922 * Acquire the busy lock as described by VM_ALLOC_* flags. Will loop
923 * and drop the object lock if necessary.
924 */
925 bool
vm_page_busy_acquire(vm_page_t m,int allocflags)926 vm_page_busy_acquire(vm_page_t m, int allocflags)
927 {
928 vm_object_t obj;
929 bool locked;
930
931 /*
932 * The page-specific object must be cached because page
933 * identity can change during the sleep, causing the
934 * re-lock of a different object.
935 * It is assumed that a reference to the object is already
936 * held by the callers.
937 */
938 obj = atomic_load_ptr(&m->object);
939 for (;;) {
940 if (vm_page_tryacquire(m, allocflags))
941 return (true);
942 if ((allocflags & VM_ALLOC_NOWAIT) != 0)
943 return (false);
944 if (obj != NULL)
945 locked = VM_OBJECT_WOWNED(obj);
946 else
947 locked = false;
948 MPASS(locked || vm_page_wired(m));
949 if (_vm_page_busy_sleep(obj, m, m->pindex, "vmpba", allocflags,
950 locked) && locked)
951 VM_OBJECT_WLOCK(obj);
952 if ((allocflags & VM_ALLOC_WAITFAIL) != 0)
953 return (false);
954 KASSERT(m->object == obj || m->object == NULL,
955 ("vm_page_busy_acquire: page %p does not belong to %p",
956 m, obj));
957 }
958 }
959
960 /*
961 * vm_page_busy_downgrade:
962 *
963 * Downgrade an exclusive busy page into a single shared busy page.
964 */
965 void
vm_page_busy_downgrade(vm_page_t m)966 vm_page_busy_downgrade(vm_page_t m)
967 {
968 u_int x;
969
970 vm_page_assert_xbusied(m);
971
972 x = vm_page_busy_fetch(m);
973 for (;;) {
974 if (atomic_fcmpset_rel_int(&m->busy_lock,
975 &x, VPB_SHARERS_WORD(1)))
976 break;
977 }
978 if ((x & VPB_BIT_WAITERS) != 0)
979 wakeup(m);
980 }
981
982 /*
983 *
984 * vm_page_busy_tryupgrade:
985 *
986 * Attempt to upgrade a single shared busy into an exclusive busy.
987 */
988 int
vm_page_busy_tryupgrade(vm_page_t m)989 vm_page_busy_tryupgrade(vm_page_t m)
990 {
991 u_int ce, x;
992
993 vm_page_assert_sbusied(m);
994
995 x = vm_page_busy_fetch(m);
996 ce = VPB_CURTHREAD_EXCLUSIVE;
997 for (;;) {
998 if (VPB_SHARERS(x) > 1)
999 return (0);
1000 KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1),
1001 ("vm_page_busy_tryupgrade: invalid lock state"));
1002 if (!atomic_fcmpset_acq_int(&m->busy_lock, &x,
1003 ce | (x & VPB_BIT_WAITERS)))
1004 continue;
1005 return (1);
1006 }
1007 }
1008
1009 /*
1010 * vm_page_sbusied:
1011 *
1012 * Return a positive value if the page is shared busied, 0 otherwise.
1013 */
1014 int
vm_page_sbusied(vm_page_t m)1015 vm_page_sbusied(vm_page_t m)
1016 {
1017 u_int x;
1018
1019 x = vm_page_busy_fetch(m);
1020 return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED);
1021 }
1022
1023 /*
1024 * vm_page_sunbusy:
1025 *
1026 * Shared unbusy a page.
1027 */
1028 void
vm_page_sunbusy(vm_page_t m)1029 vm_page_sunbusy(vm_page_t m)
1030 {
1031 u_int x;
1032
1033 vm_page_assert_sbusied(m);
1034
1035 x = vm_page_busy_fetch(m);
1036 for (;;) {
1037 KASSERT(x != VPB_FREED,
1038 ("vm_page_sunbusy: Unlocking freed page."));
1039 if (VPB_SHARERS(x) > 1) {
1040 if (atomic_fcmpset_int(&m->busy_lock, &x,
1041 x - VPB_ONE_SHARER))
1042 break;
1043 continue;
1044 }
1045 KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1),
1046 ("vm_page_sunbusy: invalid lock state"));
1047 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED))
1048 continue;
1049 if ((x & VPB_BIT_WAITERS) == 0)
1050 break;
1051 wakeup(m);
1052 break;
1053 }
1054 }
1055
1056 /*
1057 * vm_page_busy_sleep:
1058 *
1059 * Sleep if the page is busy, using the page pointer as wchan.
1060 * This is used to implement the hard-path of the busying mechanism.
1061 *
1062 * If VM_ALLOC_IGN_SBUSY is specified in allocflags, the function
1063 * will not sleep if the page is shared-busy.
1064 *
1065 * The object lock must be held on entry.
1066 *
1067 * Returns true if it slept and dropped the object lock, or false
1068 * if there was no sleep and the lock is still held.
1069 */
1070 bool
vm_page_busy_sleep(vm_page_t m,const char * wmesg,int allocflags)1071 vm_page_busy_sleep(vm_page_t m, const char *wmesg, int allocflags)
1072 {
1073 vm_object_t obj;
1074
1075 obj = m->object;
1076 VM_OBJECT_ASSERT_LOCKED(obj);
1077
1078 return (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, allocflags,
1079 true));
1080 }
1081
1082 /*
1083 * vm_page_busy_sleep_unlocked:
1084 *
1085 * Sleep if the page is busy, using the page pointer as wchan.
1086 * This is used to implement the hard-path of busying mechanism.
1087 *
1088 * If VM_ALLOC_IGN_SBUSY is specified in allocflags, the function
1089 * will not sleep if the page is shared-busy.
1090 *
1091 * The object lock must not be held on entry. The operation will
1092 * return if the page changes identity.
1093 */
1094 void
vm_page_busy_sleep_unlocked(vm_object_t obj,vm_page_t m,vm_pindex_t pindex,const char * wmesg,int allocflags)1095 vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, vm_pindex_t pindex,
1096 const char *wmesg, int allocflags)
1097 {
1098 VM_OBJECT_ASSERT_UNLOCKED(obj);
1099
1100 (void)_vm_page_busy_sleep(obj, m, pindex, wmesg, allocflags, false);
1101 }
1102
1103 /*
1104 * _vm_page_busy_sleep:
1105 *
1106 * Internal busy sleep function. Verifies the page identity and
1107 * lockstate against parameters. Returns true if it sleeps and
1108 * false otherwise.
1109 *
1110 * allocflags uses VM_ALLOC_* flags to specify the lock required.
1111 *
1112 * If locked is true the lock will be dropped for any true returns
1113 * and held for any false returns.
1114 */
1115 static bool
_vm_page_busy_sleep(vm_object_t obj,vm_page_t m,vm_pindex_t pindex,const char * wmesg,int allocflags,bool locked)1116 _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, vm_pindex_t pindex,
1117 const char *wmesg, int allocflags, bool locked)
1118 {
1119 bool xsleep;
1120 u_int x;
1121
1122 /*
1123 * If the object is busy we must wait for that to drain to zero
1124 * before trying the page again.
1125 */
1126 if (obj != NULL && vm_object_busied(obj)) {
1127 if (locked)
1128 VM_OBJECT_DROP(obj);
1129 vm_object_busy_wait(obj, wmesg);
1130 return (true);
1131 }
1132
1133 if (!vm_page_busied(m))
1134 return (false);
1135
1136 xsleep = (allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0;
1137 sleepq_lock(m);
1138 x = vm_page_busy_fetch(m);
1139 do {
1140 /*
1141 * If the page changes objects or becomes unlocked we can
1142 * simply return.
1143 */
1144 if (x == VPB_UNBUSIED ||
1145 (xsleep && (x & VPB_BIT_SHARED) != 0) ||
1146 m->object != obj || m->pindex != pindex) {
1147 sleepq_release(m);
1148 return (false);
1149 }
1150 if ((x & VPB_BIT_WAITERS) != 0)
1151 break;
1152 } while (!atomic_fcmpset_int(&m->busy_lock, &x, x | VPB_BIT_WAITERS));
1153 if (locked)
1154 VM_OBJECT_DROP(obj);
1155 DROP_GIANT();
1156 sleepq_add(m, NULL, wmesg, 0, 0);
1157 sleepq_wait(m, PVM);
1158 PICKUP_GIANT();
1159 return (true);
1160 }
1161
1162 /*
1163 * vm_page_trysbusy:
1164 *
1165 * Try to shared busy a page.
1166 * If the operation succeeds 1 is returned otherwise 0.
1167 * The operation never sleeps.
1168 */
1169 int
vm_page_trysbusy(vm_page_t m)1170 vm_page_trysbusy(vm_page_t m)
1171 {
1172 vm_object_t obj;
1173 u_int x;
1174
1175 obj = m->object;
1176 x = vm_page_busy_fetch(m);
1177 for (;;) {
1178 if ((x & VPB_BIT_SHARED) == 0)
1179 return (0);
1180 /*
1181 * Reduce the window for transient busies that will trigger
1182 * false negatives in vm_page_ps_test().
1183 */
1184 if (obj != NULL && vm_object_busied(obj))
1185 return (0);
1186 if (atomic_fcmpset_acq_int(&m->busy_lock, &x,
1187 x + VPB_ONE_SHARER))
1188 break;
1189 }
1190
1191 /* Refetch the object now that we're guaranteed that it is stable. */
1192 obj = m->object;
1193 if (obj != NULL && vm_object_busied(obj)) {
1194 vm_page_sunbusy(m);
1195 return (0);
1196 }
1197 return (1);
1198 }
1199
1200 /*
1201 * vm_page_tryxbusy:
1202 *
1203 * Try to exclusive busy a page.
1204 * If the operation succeeds 1 is returned otherwise 0.
1205 * The operation never sleeps.
1206 */
1207 int
vm_page_tryxbusy(vm_page_t m)1208 vm_page_tryxbusy(vm_page_t m)
1209 {
1210 vm_object_t obj;
1211
1212 if (atomic_cmpset_acq_int(&m->busy_lock, VPB_UNBUSIED,
1213 VPB_CURTHREAD_EXCLUSIVE) == 0)
1214 return (0);
1215
1216 obj = m->object;
1217 if (obj != NULL && vm_object_busied(obj)) {
1218 vm_page_xunbusy(m);
1219 return (0);
1220 }
1221 return (1);
1222 }
1223
1224 static void
vm_page_xunbusy_hard_tail(vm_page_t m)1225 vm_page_xunbusy_hard_tail(vm_page_t m)
1226 {
1227 atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
1228 /* Wake the waiter. */
1229 wakeup(m);
1230 }
1231
1232 /*
1233 * vm_page_xunbusy_hard:
1234 *
1235 * Called when unbusy has failed because there is a waiter.
1236 */
1237 void
vm_page_xunbusy_hard(vm_page_t m)1238 vm_page_xunbusy_hard(vm_page_t m)
1239 {
1240 vm_page_assert_xbusied(m);
1241 vm_page_xunbusy_hard_tail(m);
1242 }
1243
1244 void
vm_page_xunbusy_hard_unchecked(vm_page_t m)1245 vm_page_xunbusy_hard_unchecked(vm_page_t m)
1246 {
1247 vm_page_assert_xbusied_unchecked(m);
1248 vm_page_xunbusy_hard_tail(m);
1249 }
1250
1251 static void
vm_page_busy_free(vm_page_t m)1252 vm_page_busy_free(vm_page_t m)
1253 {
1254 u_int x;
1255
1256 atomic_thread_fence_rel();
1257 x = atomic_swap_int(&m->busy_lock, VPB_FREED);
1258 if ((x & VPB_BIT_WAITERS) != 0)
1259 wakeup(m);
1260 }
1261
1262 /*
1263 * vm_page_unhold_pages:
1264 *
1265 * Unhold each of the pages that is referenced by the given array.
1266 */
1267 void
vm_page_unhold_pages(vm_page_t * ma,int count)1268 vm_page_unhold_pages(vm_page_t *ma, int count)
1269 {
1270
1271 for (; count != 0; count--) {
1272 vm_page_unwire(*ma, PQ_ACTIVE);
1273 ma++;
1274 }
1275 }
1276
1277 vm_page_t
PHYS_TO_VM_PAGE(vm_paddr_t pa)1278 PHYS_TO_VM_PAGE(vm_paddr_t pa)
1279 {
1280 vm_page_t m;
1281
1282 #ifdef VM_PHYSSEG_SPARSE
1283 m = vm_phys_paddr_to_vm_page(pa);
1284 if (m == NULL)
1285 m = vm_phys_fictitious_to_vm_page(pa);
1286 return (m);
1287 #elif defined(VM_PHYSSEG_DENSE)
1288 long pi;
1289
1290 pi = atop(pa);
1291 if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
1292 m = &vm_page_array[pi - first_page];
1293 return (m);
1294 }
1295 return (vm_phys_fictitious_to_vm_page(pa));
1296 #else
1297 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
1298 #endif
1299 }
1300
1301 /*
1302 * vm_page_getfake:
1303 *
1304 * Create a fictitious page with the specified physical address and
1305 * memory attribute. The memory attribute is the only the machine-
1306 * dependent aspect of a fictitious page that must be initialized.
1307 */
1308 vm_page_t
vm_page_getfake(vm_paddr_t paddr,vm_memattr_t memattr)1309 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
1310 {
1311 vm_page_t m;
1312
1313 m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
1314 vm_page_initfake(m, paddr, memattr);
1315 return (m);
1316 }
1317
1318 void
vm_page_initfake(vm_page_t m,vm_paddr_t paddr,vm_memattr_t memattr)1319 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
1320 {
1321
1322 if ((m->flags & PG_FICTITIOUS) != 0) {
1323 /*
1324 * The page's memattr might have changed since the
1325 * previous initialization. Update the pmap to the
1326 * new memattr.
1327 */
1328 goto memattr;
1329 }
1330 m->phys_addr = paddr;
1331 m->a.queue = PQ_NONE;
1332 /* Fictitious pages don't use "segind". */
1333 m->flags = PG_FICTITIOUS;
1334 /* Fictitious pages don't use "order" or "pool". */
1335 m->oflags = VPO_UNMANAGED;
1336 m->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
1337 /* Fictitious pages are unevictable. */
1338 m->ref_count = 1;
1339 pmap_page_init(m);
1340 memattr:
1341 pmap_page_set_memattr(m, memattr);
1342 }
1343
1344 /*
1345 * vm_page_putfake:
1346 *
1347 * Release a fictitious page.
1348 */
1349 void
vm_page_putfake(vm_page_t m)1350 vm_page_putfake(vm_page_t m)
1351 {
1352
1353 KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m));
1354 KASSERT((m->flags & PG_FICTITIOUS) != 0,
1355 ("vm_page_putfake: bad page %p", m));
1356 vm_page_assert_xbusied(m);
1357 vm_page_busy_free(m);
1358 uma_zfree(fakepg_zone, m);
1359 }
1360
1361 /*
1362 * vm_page_updatefake:
1363 *
1364 * Update the given fictitious page to the specified physical address and
1365 * memory attribute.
1366 */
1367 void
vm_page_updatefake(vm_page_t m,vm_paddr_t paddr,vm_memattr_t memattr)1368 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
1369 {
1370
1371 KASSERT((m->flags & PG_FICTITIOUS) != 0,
1372 ("vm_page_updatefake: bad page %p", m));
1373 m->phys_addr = paddr;
1374 pmap_page_set_memattr(m, memattr);
1375 }
1376
1377 /*
1378 * vm_page_free:
1379 *
1380 * Free a page.
1381 */
1382 void
vm_page_free(vm_page_t m)1383 vm_page_free(vm_page_t m)
1384 {
1385
1386 m->flags &= ~PG_ZERO;
1387 vm_page_free_toq(m);
1388 }
1389
1390 /*
1391 * vm_page_free_zero:
1392 *
1393 * Free a page to the zerod-pages queue
1394 */
1395 void
vm_page_free_zero(vm_page_t m)1396 vm_page_free_zero(vm_page_t m)
1397 {
1398
1399 m->flags |= PG_ZERO;
1400 vm_page_free_toq(m);
1401 }
1402
1403 /*
1404 * Unbusy and handle the page queueing for a page from a getpages request that
1405 * was optionally read ahead or behind.
1406 */
1407 void
vm_page_readahead_finish(vm_page_t m)1408 vm_page_readahead_finish(vm_page_t m)
1409 {
1410
1411 /* We shouldn't put invalid pages on queues. */
1412 KASSERT(!vm_page_none_valid(m), ("%s: %p is invalid", __func__, m));
1413
1414 /*
1415 * Since the page is not the actually needed one, whether it should
1416 * be activated or deactivated is not obvious. Empirical results
1417 * have shown that deactivating the page is usually the best choice,
1418 * unless the page is wanted by another thread.
1419 */
1420 if ((vm_page_busy_fetch(m) & VPB_BIT_WAITERS) != 0)
1421 vm_page_activate(m);
1422 else
1423 vm_page_deactivate(m);
1424 vm_page_xunbusy_unchecked(m);
1425 }
1426
1427 /*
1428 * Destroy the identity of an invalid page and free it if possible.
1429 * This is intended to be used when reading a page from backing store fails.
1430 */
1431 void
vm_page_free_invalid(vm_page_t m)1432 vm_page_free_invalid(vm_page_t m)
1433 {
1434
1435 KASSERT(vm_page_none_valid(m), ("page %p is valid", m));
1436 KASSERT(!pmap_page_is_mapped(m), ("page %p is mapped", m));
1437 KASSERT(m->object != NULL, ("page %p has no object", m));
1438 VM_OBJECT_ASSERT_WLOCKED(m->object);
1439
1440 /*
1441 * We may be attempting to free the page as part of the handling for an
1442 * I/O error, in which case the page was xbusied by a different thread.
1443 */
1444 vm_page_xbusy_claim(m);
1445
1446 /*
1447 * If someone has wired this page while the object lock
1448 * was not held, then the thread that unwires is responsible
1449 * for freeing the page. Otherwise just free the page now.
1450 * The wire count of this unmapped page cannot change while
1451 * we have the page xbusy and the page's object wlocked.
1452 */
1453 if (vm_page_remove(m))
1454 vm_page_free(m);
1455 }
1456
1457 /*
1458 * vm_page_dirty_KBI: [ internal use only ]
1459 *
1460 * Set all bits in the page's dirty field.
1461 *
1462 * The object containing the specified page must be locked if the
1463 * call is made from the machine-independent layer.
1464 *
1465 * See vm_page_clear_dirty_mask().
1466 *
1467 * This function should only be called by vm_page_dirty().
1468 */
1469 void
vm_page_dirty_KBI(vm_page_t m)1470 vm_page_dirty_KBI(vm_page_t m)
1471 {
1472
1473 /* Refer to this operation by its public name. */
1474 KASSERT(vm_page_all_valid(m), ("vm_page_dirty: page is invalid!"));
1475 m->dirty = VM_PAGE_BITS_ALL;
1476 }
1477
1478 /*
1479 * Insert the given page into the given object at the given pindex. mpred is
1480 * used for memq linkage. From vm_page_insert, lookup is true, mpred is
1481 * initially NULL, and this procedure looks it up. From vm_page_insert_after
1482 * and vm_page_iter_insert, lookup is false and mpred is known to the caller
1483 * to be valid, and may be NULL if this will be the page with the lowest
1484 * pindex.
1485 *
1486 * The procedure is marked __always_inline to suggest to the compiler to
1487 * eliminate the lookup parameter and the associated alternate branch.
1488 */
1489 static __always_inline int
vm_page_insert_lookup(vm_page_t m,vm_object_t object,vm_pindex_t pindex,struct pctrie_iter * pages,bool iter,vm_page_t mpred,bool lookup)1490 vm_page_insert_lookup(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
1491 struct pctrie_iter *pages, bool iter, vm_page_t mpred, bool lookup)
1492 {
1493 int error;
1494
1495 VM_OBJECT_ASSERT_WLOCKED(object);
1496 KASSERT(m->object == NULL,
1497 ("vm_page_insert: page %p already inserted", m));
1498
1499 /*
1500 * Record the object/offset pair in this page.
1501 */
1502 m->object = object;
1503 m->pindex = pindex;
1504 m->ref_count |= VPRC_OBJREF;
1505
1506 /*
1507 * Add this page to the object's radix tree, and look up mpred if
1508 * needed.
1509 */
1510 if (iter) {
1511 KASSERT(!lookup, ("%s: cannot lookup mpred", __func__));
1512 error = vm_radix_iter_insert(pages, m);
1513 } else if (lookup)
1514 error = vm_radix_insert_lookup_lt(&object->rtree, m, &mpred);
1515 else
1516 error = vm_radix_insert(&object->rtree, m);
1517 if (__predict_false(error != 0)) {
1518 m->object = NULL;
1519 m->pindex = 0;
1520 m->ref_count &= ~VPRC_OBJREF;
1521 return (1);
1522 }
1523
1524 /*
1525 * Now link into the object's ordered list of backed pages.
1526 */
1527 vm_page_insert_radixdone(m, object, mpred);
1528 vm_pager_page_inserted(object, m);
1529 return (0);
1530 }
1531
1532 /*
1533 * vm_page_insert: [ internal use only ]
1534 *
1535 * Inserts the given mem entry into the object and object list.
1536 *
1537 * The object must be locked.
1538 */
1539 int
vm_page_insert(vm_page_t m,vm_object_t object,vm_pindex_t pindex)1540 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
1541 {
1542 return (vm_page_insert_lookup(m, object, pindex, NULL, false, NULL,
1543 true));
1544 }
1545
1546 /*
1547 * vm_page_insert_after:
1548 *
1549 * Inserts the page "m" into the specified object at offset "pindex".
1550 *
1551 * The page "mpred" must immediately precede the offset "pindex" within
1552 * the specified object.
1553 *
1554 * The object must be locked.
1555 */
1556 static int
vm_page_insert_after(vm_page_t m,vm_object_t object,vm_pindex_t pindex,vm_page_t mpred)1557 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
1558 vm_page_t mpred)
1559 {
1560 return (vm_page_insert_lookup(m, object, pindex, NULL, false, mpred,
1561 false));
1562 }
1563
1564 /*
1565 * vm_page_iter_insert:
1566 *
1567 * Tries to insert the page "m" into the specified object at offset
1568 * "pindex" using the iterator "pages". Returns 0 if the insertion was
1569 * successful.
1570 *
1571 * The page "mpred" must immediately precede the offset "pindex" within
1572 * the specified object.
1573 *
1574 * The object must be locked.
1575 */
1576 static int
vm_page_iter_insert(struct pctrie_iter * pages,vm_page_t m,vm_object_t object,vm_pindex_t pindex,vm_page_t mpred)1577 vm_page_iter_insert(struct pctrie_iter *pages, vm_page_t m, vm_object_t object,
1578 vm_pindex_t pindex, vm_page_t mpred)
1579 {
1580 return (vm_page_insert_lookup(m, object, pindex, pages, true, mpred,
1581 false));
1582 }
1583
1584 /*
1585 * vm_page_insert_radixdone:
1586 *
1587 * Complete page "m" insertion into the specified object after the
1588 * radix trie hooking.
1589 *
1590 * The page "mpred" must precede the offset "m->pindex" within the
1591 * specified object.
1592 *
1593 * The object must be locked.
1594 */
1595 static void
vm_page_insert_radixdone(vm_page_t m,vm_object_t object,vm_page_t mpred)1596 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred)
1597 {
1598
1599 VM_OBJECT_ASSERT_WLOCKED(object);
1600 KASSERT(object != NULL && m->object == object,
1601 ("vm_page_insert_radixdone: page %p has inconsistent object", m));
1602 KASSERT((m->ref_count & VPRC_OBJREF) != 0,
1603 ("vm_page_insert_radixdone: page %p is missing object ref", m));
1604 if (mpred != NULL) {
1605 KASSERT(mpred->object == object,
1606 ("vm_page_insert_radixdone: object doesn't contain mpred"));
1607 KASSERT(mpred->pindex < m->pindex,
1608 ("vm_page_insert_radixdone: mpred doesn't precede pindex"));
1609 KASSERT(TAILQ_NEXT(mpred, listq) == NULL ||
1610 m->pindex < TAILQ_NEXT(mpred, listq)->pindex,
1611 ("vm_page_insert_radixdone: pindex doesn't precede msucc"));
1612 } else {
1613 KASSERT(TAILQ_EMPTY(&object->memq) ||
1614 m->pindex < TAILQ_FIRST(&object->memq)->pindex,
1615 ("vm_page_insert_radixdone: no mpred but not first page"));
1616 }
1617
1618 if (mpred != NULL)
1619 TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq);
1620 else
1621 TAILQ_INSERT_HEAD(&object->memq, m, listq);
1622
1623 /*
1624 * Show that the object has one more resident page.
1625 */
1626 object->resident_page_count++;
1627
1628 /*
1629 * Hold the vnode until the last page is released.
1630 */
1631 if (object->resident_page_count == 1 && object->type == OBJT_VNODE)
1632 vhold(object->handle);
1633
1634 /*
1635 * Since we are inserting a new and possibly dirty page,
1636 * update the object's generation count.
1637 */
1638 if (pmap_page_is_write_mapped(m))
1639 vm_object_set_writeable_dirty(object);
1640 }
1641
1642 /*
1643 * vm_page_remove_radixdone
1644 *
1645 * Complete page "m" removal from the specified object after the radix trie
1646 * unhooking.
1647 *
1648 * The caller is responsible for updating the page's fields to reflect this
1649 * removal.
1650 */
1651 static void
vm_page_remove_radixdone(vm_page_t m)1652 vm_page_remove_radixdone(vm_page_t m)
1653 {
1654 vm_object_t object;
1655
1656 vm_page_assert_xbusied(m);
1657 object = m->object;
1658 VM_OBJECT_ASSERT_WLOCKED(object);
1659 KASSERT((m->ref_count & VPRC_OBJREF) != 0,
1660 ("page %p is missing its object ref", m));
1661
1662 /* Deferred free of swap space. */
1663 if ((m->a.flags & PGA_SWAP_FREE) != 0)
1664 vm_pager_page_unswapped(m);
1665
1666 vm_pager_page_removed(object, m);
1667 m->object = NULL;
1668
1669 /*
1670 * Now remove from the object's list of backed pages.
1671 */
1672 TAILQ_REMOVE(&object->memq, m, listq);
1673
1674 /*
1675 * And show that the object has one fewer resident page.
1676 */
1677 object->resident_page_count--;
1678
1679 /*
1680 * The vnode may now be recycled.
1681 */
1682 if (object->resident_page_count == 0 && object->type == OBJT_VNODE)
1683 vdrop(object->handle);
1684 }
1685
1686 /*
1687 * vm_page_free_object_prep:
1688 *
1689 * Disassociates the given page from its VM object.
1690 *
1691 * The object must be locked, and the page must be xbusy.
1692 */
1693 static void
vm_page_free_object_prep(vm_page_t m)1694 vm_page_free_object_prep(vm_page_t m)
1695 {
1696 KASSERT(((m->oflags & VPO_UNMANAGED) != 0) ==
1697 ((m->object->flags & OBJ_UNMANAGED) != 0),
1698 ("%s: managed flag mismatch for page %p",
1699 __func__, m));
1700 vm_page_assert_xbusied(m);
1701
1702 /*
1703 * The object reference can be released without an atomic
1704 * operation.
1705 */
1706 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
1707 m->ref_count == VPRC_OBJREF,
1708 ("%s: page %p has unexpected ref_count %u",
1709 __func__, m, m->ref_count));
1710 vm_page_remove_radixdone(m);
1711 m->ref_count -= VPRC_OBJREF;
1712 }
1713
1714 /*
1715 * vm_page_iter_free:
1716 *
1717 * Free the given page, and use the iterator to remove it from the radix
1718 * tree.
1719 */
1720 void
vm_page_iter_free(struct pctrie_iter * pages,vm_page_t m)1721 vm_page_iter_free(struct pctrie_iter *pages, vm_page_t m)
1722 {
1723 vm_radix_iter_remove(pages);
1724 vm_page_free_object_prep(m);
1725 vm_page_xunbusy(m);
1726 m->flags &= ~PG_ZERO;
1727 vm_page_free_toq(m);
1728 }
1729
1730 /*
1731 * vm_page_remove:
1732 *
1733 * Removes the specified page from its containing object, but does not
1734 * invalidate any backing storage. Returns true if the object's reference
1735 * was the last reference to the page, and false otherwise.
1736 *
1737 * The object must be locked and the page must be exclusively busied.
1738 * The exclusive busy will be released on return. If this is not the
1739 * final ref and the caller does not hold a wire reference it may not
1740 * continue to access the page.
1741 */
1742 bool
vm_page_remove(vm_page_t m)1743 vm_page_remove(vm_page_t m)
1744 {
1745 bool dropped;
1746
1747 dropped = vm_page_remove_xbusy(m);
1748 vm_page_xunbusy(m);
1749
1750 return (dropped);
1751 }
1752
1753 /*
1754 * vm_page_iter_remove:
1755 *
1756 * Remove the current page, and use the iterator to remove it from the
1757 * radix tree.
1758 */
1759 bool
vm_page_iter_remove(struct pctrie_iter * pages,vm_page_t m)1760 vm_page_iter_remove(struct pctrie_iter *pages, vm_page_t m)
1761 {
1762 bool dropped;
1763
1764 vm_radix_iter_remove(pages);
1765 vm_page_remove_radixdone(m);
1766 dropped = (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF);
1767 vm_page_xunbusy(m);
1768
1769 return (dropped);
1770 }
1771
1772 /*
1773 * vm_page_radix_remove
1774 *
1775 * Removes the specified page from the radix tree.
1776 */
1777 static void
vm_page_radix_remove(vm_page_t m)1778 vm_page_radix_remove(vm_page_t m)
1779 {
1780 vm_page_t mrem __diagused;
1781
1782 mrem = vm_radix_remove(&m->object->rtree, m->pindex);
1783 KASSERT(mrem == m,
1784 ("removed page %p, expected page %p", mrem, m));
1785 }
1786
1787 /*
1788 * vm_page_remove_xbusy
1789 *
1790 * Removes the page but leaves the xbusy held. Returns true if this
1791 * removed the final ref and false otherwise.
1792 */
1793 bool
vm_page_remove_xbusy(vm_page_t m)1794 vm_page_remove_xbusy(vm_page_t m)
1795 {
1796
1797 vm_page_radix_remove(m);
1798 vm_page_remove_radixdone(m);
1799 return (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF);
1800 }
1801
1802 /*
1803 * vm_page_lookup:
1804 *
1805 * Returns the page associated with the object/offset
1806 * pair specified; if none is found, NULL is returned.
1807 *
1808 * The object must be locked.
1809 */
1810 vm_page_t
vm_page_lookup(vm_object_t object,vm_pindex_t pindex)1811 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
1812 {
1813
1814 VM_OBJECT_ASSERT_LOCKED(object);
1815 return (vm_radix_lookup(&object->rtree, pindex));
1816 }
1817
1818 /*
1819 * vm_page_iter_init:
1820 *
1821 * Initialize iterator for vm pages.
1822 */
1823 void
vm_page_iter_init(struct pctrie_iter * pages,vm_object_t object)1824 vm_page_iter_init(struct pctrie_iter *pages, vm_object_t object)
1825 {
1826
1827 vm_radix_iter_init(pages, &object->rtree);
1828 }
1829
1830 /*
1831 * vm_page_iter_init:
1832 *
1833 * Initialize iterator for vm pages.
1834 */
1835 void
vm_page_iter_limit_init(struct pctrie_iter * pages,vm_object_t object,vm_pindex_t limit)1836 vm_page_iter_limit_init(struct pctrie_iter *pages, vm_object_t object,
1837 vm_pindex_t limit)
1838 {
1839
1840 vm_radix_iter_limit_init(pages, &object->rtree, limit);
1841 }
1842
1843 /*
1844 * vm_page_iter_lookup:
1845 *
1846 * Returns the page associated with the object/offset pair specified, and
1847 * stores the path to its position; if none is found, NULL is returned.
1848 *
1849 * The iter pctrie must be locked.
1850 */
1851 vm_page_t
vm_page_iter_lookup(struct pctrie_iter * pages,vm_pindex_t pindex)1852 vm_page_iter_lookup(struct pctrie_iter *pages, vm_pindex_t pindex)
1853 {
1854
1855 return (vm_radix_iter_lookup(pages, pindex));
1856 }
1857
1858 /*
1859 * vm_page_lookup_unlocked:
1860 *
1861 * Returns the page associated with the object/offset pair specified;
1862 * if none is found, NULL is returned. The page may be no longer be
1863 * present in the object at the time that this function returns. Only
1864 * useful for opportunistic checks such as inmem().
1865 */
1866 vm_page_t
vm_page_lookup_unlocked(vm_object_t object,vm_pindex_t pindex)1867 vm_page_lookup_unlocked(vm_object_t object, vm_pindex_t pindex)
1868 {
1869
1870 return (vm_radix_lookup_unlocked(&object->rtree, pindex));
1871 }
1872
1873 /*
1874 * vm_page_relookup:
1875 *
1876 * Returns a page that must already have been busied by
1877 * the caller. Used for bogus page replacement.
1878 */
1879 vm_page_t
vm_page_relookup(vm_object_t object,vm_pindex_t pindex)1880 vm_page_relookup(vm_object_t object, vm_pindex_t pindex)
1881 {
1882 vm_page_t m;
1883
1884 m = vm_page_lookup_unlocked(object, pindex);
1885 KASSERT(m != NULL && (vm_page_busied(m) || vm_page_wired(m)) &&
1886 m->object == object && m->pindex == pindex,
1887 ("vm_page_relookup: Invalid page %p", m));
1888 return (m);
1889 }
1890
1891 /*
1892 * This should only be used by lockless functions for releasing transient
1893 * incorrect acquires. The page may have been freed after we acquired a
1894 * busy lock. In this case busy_lock == VPB_FREED and we have nothing
1895 * further to do.
1896 */
1897 static void
vm_page_busy_release(vm_page_t m)1898 vm_page_busy_release(vm_page_t m)
1899 {
1900 u_int x;
1901
1902 x = vm_page_busy_fetch(m);
1903 for (;;) {
1904 if (x == VPB_FREED)
1905 break;
1906 if ((x & VPB_BIT_SHARED) != 0 && VPB_SHARERS(x) > 1) {
1907 if (atomic_fcmpset_int(&m->busy_lock, &x,
1908 x - VPB_ONE_SHARER))
1909 break;
1910 continue;
1911 }
1912 KASSERT((x & VPB_BIT_SHARED) != 0 ||
1913 (x & ~VPB_BIT_WAITERS) == VPB_CURTHREAD_EXCLUSIVE,
1914 ("vm_page_busy_release: %p xbusy not owned.", m));
1915 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED))
1916 continue;
1917 if ((x & VPB_BIT_WAITERS) != 0)
1918 wakeup(m);
1919 break;
1920 }
1921 }
1922
1923 /*
1924 * vm_page_find_least:
1925 *
1926 * Returns the page associated with the object with least pindex
1927 * greater than or equal to the parameter pindex, or NULL.
1928 *
1929 * The object must be locked.
1930 */
1931 vm_page_t
vm_page_find_least(vm_object_t object,vm_pindex_t pindex)1932 vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
1933 {
1934 vm_page_t m;
1935
1936 VM_OBJECT_ASSERT_LOCKED(object);
1937 if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex)
1938 m = vm_radix_lookup_ge(&object->rtree, pindex);
1939 return (m);
1940 }
1941
1942 /*
1943 * vm_page_iter_lookup_ge:
1944 *
1945 * Returns the page associated with the object with least pindex
1946 * greater than or equal to the parameter pindex, or NULL. Initializes the
1947 * iterator to point to that page.
1948 *
1949 * The iter pctrie must be locked.
1950 */
1951 vm_page_t
vm_page_iter_lookup_ge(struct pctrie_iter * pages,vm_pindex_t pindex)1952 vm_page_iter_lookup_ge(struct pctrie_iter *pages, vm_pindex_t pindex)
1953 {
1954
1955 return (vm_radix_iter_lookup_ge(pages, pindex));
1956 }
1957
1958 /*
1959 * Returns the given page's successor (by pindex) within the object if it is
1960 * resident; if none is found, NULL is returned.
1961 *
1962 * The object must be locked.
1963 */
1964 vm_page_t
vm_page_next(vm_page_t m)1965 vm_page_next(vm_page_t m)
1966 {
1967 vm_page_t next;
1968
1969 VM_OBJECT_ASSERT_LOCKED(m->object);
1970 if ((next = TAILQ_NEXT(m, listq)) != NULL) {
1971 MPASS(next->object == m->object);
1972 if (next->pindex != m->pindex + 1)
1973 next = NULL;
1974 }
1975 return (next);
1976 }
1977
1978 /*
1979 * Returns the given page's predecessor (by pindex) within the object if it is
1980 * resident; if none is found, NULL is returned.
1981 *
1982 * The object must be locked.
1983 */
1984 vm_page_t
vm_page_prev(vm_page_t m)1985 vm_page_prev(vm_page_t m)
1986 {
1987 vm_page_t prev;
1988
1989 VM_OBJECT_ASSERT_LOCKED(m->object);
1990 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) {
1991 MPASS(prev->object == m->object);
1992 if (prev->pindex != m->pindex - 1)
1993 prev = NULL;
1994 }
1995 return (prev);
1996 }
1997
1998 /*
1999 * Uses the page mnew as a replacement for an existing page at index
2000 * pindex which must be already present in the object.
2001 *
2002 * Both pages must be exclusively busied on enter. The old page is
2003 * unbusied on exit.
2004 *
2005 * A return value of true means mold is now free. If this is not the
2006 * final ref and the caller does not hold a wire reference it may not
2007 * continue to access the page.
2008 */
2009 static bool
vm_page_replace_hold(vm_page_t mnew,vm_object_t object,vm_pindex_t pindex,vm_page_t mold)2010 vm_page_replace_hold(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex,
2011 vm_page_t mold)
2012 {
2013 vm_page_t mret __diagused;
2014 bool dropped;
2015
2016 VM_OBJECT_ASSERT_WLOCKED(object);
2017 vm_page_assert_xbusied(mold);
2018 KASSERT(mnew->object == NULL && (mnew->ref_count & VPRC_OBJREF) == 0,
2019 ("vm_page_replace: page %p already in object", mnew));
2020
2021 /*
2022 * This function mostly follows vm_page_insert() and
2023 * vm_page_remove() without the radix, object count and vnode
2024 * dance. Double check such functions for more comments.
2025 */
2026
2027 mnew->object = object;
2028 mnew->pindex = pindex;
2029 atomic_set_int(&mnew->ref_count, VPRC_OBJREF);
2030 mret = vm_radix_replace(&object->rtree, mnew);
2031 KASSERT(mret == mold,
2032 ("invalid page replacement, mold=%p, mret=%p", mold, mret));
2033 KASSERT((mold->oflags & VPO_UNMANAGED) ==
2034 (mnew->oflags & VPO_UNMANAGED),
2035 ("vm_page_replace: mismatched VPO_UNMANAGED"));
2036
2037 /* Keep the resident page list in sorted order. */
2038 TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq);
2039 TAILQ_REMOVE(&object->memq, mold, listq);
2040 mold->object = NULL;
2041
2042 /*
2043 * The object's resident_page_count does not change because we have
2044 * swapped one page for another, but the generation count should
2045 * change if the page is dirty.
2046 */
2047 if (pmap_page_is_write_mapped(mnew))
2048 vm_object_set_writeable_dirty(object);
2049 dropped = vm_page_drop(mold, VPRC_OBJREF) == VPRC_OBJREF;
2050 vm_page_xunbusy(mold);
2051
2052 return (dropped);
2053 }
2054
2055 void
vm_page_replace(vm_page_t mnew,vm_object_t object,vm_pindex_t pindex,vm_page_t mold)2056 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex,
2057 vm_page_t mold)
2058 {
2059
2060 vm_page_assert_xbusied(mnew);
2061
2062 if (vm_page_replace_hold(mnew, object, pindex, mold))
2063 vm_page_free(mold);
2064 }
2065
2066 /*
2067 * vm_page_iter_rename:
2068 *
2069 * Tries to move the specified page from its current object to a new object
2070 * and pindex, using the given iterator to remove the page from its current
2071 * object. Returns true if the move was successful, and false if the move
2072 * was aborted due to a failed memory allocation.
2073 *
2074 * Panics if a page already resides in the new object at the new pindex.
2075 *
2076 * Note: swap associated with the page must be invalidated by the move. We
2077 * have to do this for several reasons: (1) we aren't freeing the
2078 * page, (2) we are dirtying the page, (3) the VM system is probably
2079 * moving the page from object A to B, and will then later move
2080 * the backing store from A to B and we can't have a conflict.
2081 *
2082 * Note: we *always* dirty the page. It is necessary both for the
2083 * fact that we moved it, and because we may be invalidating
2084 * swap.
2085 *
2086 * The objects must be locked.
2087 */
2088 bool
vm_page_iter_rename(struct pctrie_iter * old_pages,vm_page_t m,vm_object_t new_object,vm_pindex_t new_pindex)2089 vm_page_iter_rename(struct pctrie_iter *old_pages, vm_page_t m,
2090 vm_object_t new_object, vm_pindex_t new_pindex)
2091 {
2092 vm_page_t mpred;
2093 vm_pindex_t opidx;
2094
2095 KASSERT((m->ref_count & VPRC_OBJREF) != 0,
2096 ("%s: page %p is missing object ref", __func__, m));
2097 VM_OBJECT_ASSERT_WLOCKED(m->object);
2098 VM_OBJECT_ASSERT_WLOCKED(new_object);
2099
2100 /*
2101 * Create a custom version of vm_page_insert() which does not depend
2102 * by m_prev and can cheat on the implementation aspects of the
2103 * function.
2104 */
2105 opidx = m->pindex;
2106 m->pindex = new_pindex;
2107 if (vm_radix_insert_lookup_lt(&new_object->rtree, m, &mpred) != 0) {
2108 m->pindex = opidx;
2109 return (false);
2110 }
2111
2112 /*
2113 * The operation cannot fail anymore. The removal must happen before
2114 * the listq iterator is tainted.
2115 */
2116 m->pindex = opidx;
2117 vm_radix_iter_remove(old_pages);
2118 vm_page_remove_radixdone(m);
2119
2120 /* Return back to the new pindex to complete vm_page_insert(). */
2121 m->pindex = new_pindex;
2122 m->object = new_object;
2123
2124 vm_page_insert_radixdone(m, new_object, mpred);
2125 vm_page_dirty(m);
2126 vm_pager_page_inserted(new_object, m);
2127 return (true);
2128 }
2129
2130 /*
2131 * vm_page_mpred:
2132 *
2133 * Return the greatest page of the object with index <= pindex,
2134 * or NULL, if there is none. Assumes object lock is held.
2135 */
2136 vm_page_t
vm_page_mpred(vm_object_t object,vm_pindex_t pindex)2137 vm_page_mpred(vm_object_t object, vm_pindex_t pindex)
2138 {
2139 return (vm_radix_lookup_le(&object->rtree, pindex));
2140 }
2141
2142 /*
2143 * vm_page_alloc:
2144 *
2145 * Allocate and return a page that is associated with the specified
2146 * object and offset pair. By default, this page is exclusive busied.
2147 *
2148 * The caller must always specify an allocation class.
2149 *
2150 * allocation classes:
2151 * VM_ALLOC_NORMAL normal process request
2152 * VM_ALLOC_SYSTEM system *really* needs a page
2153 * VM_ALLOC_INTERRUPT interrupt time request
2154 *
2155 * optional allocation flags:
2156 * VM_ALLOC_COUNT(number) the number of additional pages that the caller
2157 * intends to allocate
2158 * VM_ALLOC_NOBUSY do not exclusive busy the page
2159 * VM_ALLOC_NODUMP do not include the page in a kernel core dump
2160 * VM_ALLOC_SBUSY shared busy the allocated page
2161 * VM_ALLOC_WIRED wire the allocated page
2162 * VM_ALLOC_ZERO prefer a zeroed page
2163 */
2164 vm_page_t
vm_page_alloc(vm_object_t object,vm_pindex_t pindex,int req)2165 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
2166 {
2167
2168 return (vm_page_alloc_after(object, pindex, req,
2169 vm_page_mpred(object, pindex)));
2170 }
2171
2172 /*
2173 * Allocate a page in the specified object with the given page index. To
2174 * optimize insertion of the page into the object, the caller must also specify
2175 * the resident page in the object with largest index smaller than the given
2176 * page index, or NULL if no such page exists.
2177 */
2178 static vm_page_t
vm_page_alloc_after(vm_object_t object,vm_pindex_t pindex,int req,vm_page_t mpred)2179 vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex,
2180 int req, vm_page_t mpred)
2181 {
2182 struct vm_domainset_iter di;
2183 vm_page_t m;
2184 int domain;
2185
2186 vm_domainset_iter_page_init(&di, object, pindex, &domain, &req);
2187 do {
2188 m = vm_page_alloc_domain_after(object, pindex, domain, req,
2189 mpred);
2190 if (m != NULL)
2191 break;
2192 } while (vm_domainset_iter_page(&di, object, &domain) == 0);
2193
2194 return (m);
2195 }
2196
2197 /*
2198 * Returns true if the number of free pages exceeds the minimum
2199 * for the request class and false otherwise.
2200 */
2201 static int
_vm_domain_allocate(struct vm_domain * vmd,int req_class,int npages)2202 _vm_domain_allocate(struct vm_domain *vmd, int req_class, int npages)
2203 {
2204 u_int limit, old, new;
2205
2206 if (req_class == VM_ALLOC_INTERRUPT)
2207 limit = 0;
2208 else if (req_class == VM_ALLOC_SYSTEM)
2209 limit = vmd->vmd_interrupt_free_min;
2210 else
2211 limit = vmd->vmd_free_reserved;
2212
2213 /*
2214 * Attempt to reserve the pages. Fail if we're below the limit.
2215 */
2216 limit += npages;
2217 old = atomic_load_int(&vmd->vmd_free_count);
2218 do {
2219 if (old < limit)
2220 return (0);
2221 new = old - npages;
2222 } while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0);
2223
2224 /* Wake the page daemon if we've crossed the threshold. */
2225 if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old))
2226 pagedaemon_wakeup(vmd->vmd_domain);
2227
2228 /* Only update bitsets on transitions. */
2229 if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) ||
2230 (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe))
2231 vm_domain_set(vmd);
2232
2233 return (1);
2234 }
2235
2236 int
vm_domain_allocate(struct vm_domain * vmd,int req,int npages)2237 vm_domain_allocate(struct vm_domain *vmd, int req, int npages)
2238 {
2239 int req_class;
2240
2241 /*
2242 * The page daemon is allowed to dig deeper into the free page list.
2243 */
2244 req_class = req & VM_ALLOC_CLASS_MASK;
2245 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
2246 req_class = VM_ALLOC_SYSTEM;
2247 return (_vm_domain_allocate(vmd, req_class, npages));
2248 }
2249
2250 vm_page_t
vm_page_alloc_domain_after(vm_object_t object,vm_pindex_t pindex,int domain,int req,vm_page_t mpred)2251 vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
2252 int req, vm_page_t mpred)
2253 {
2254 struct vm_domain *vmd;
2255 vm_page_t m;
2256 int flags;
2257
2258 #define VPA_FLAGS (VM_ALLOC_CLASS_MASK | VM_ALLOC_WAITFAIL | \
2259 VM_ALLOC_NOWAIT | VM_ALLOC_NOBUSY | \
2260 VM_ALLOC_SBUSY | VM_ALLOC_WIRED | \
2261 VM_ALLOC_NODUMP | VM_ALLOC_ZERO | \
2262 VM_ALLOC_NOFREE | VM_ALLOC_COUNT_MASK)
2263 KASSERT((req & ~VPA_FLAGS) == 0,
2264 ("invalid request %#x", req));
2265 KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
2266 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
2267 ("invalid request %#x", req));
2268 KASSERT(mpred == NULL || mpred->pindex < pindex,
2269 ("mpred %p doesn't precede pindex 0x%jx", mpred,
2270 (uintmax_t)pindex));
2271 VM_OBJECT_ASSERT_WLOCKED(object);
2272
2273 flags = 0;
2274 m = NULL;
2275 if (!vm_pager_can_alloc_page(object, pindex))
2276 return (NULL);
2277 again:
2278 if (__predict_false((req & VM_ALLOC_NOFREE) != 0)) {
2279 m = vm_page_alloc_nofree_domain(domain, req);
2280 if (m != NULL)
2281 goto found;
2282 }
2283 #if VM_NRESERVLEVEL > 0
2284 /*
2285 * Can we allocate the page from a reservation?
2286 */
2287 if (vm_object_reserv(object) &&
2288 (m = vm_reserv_alloc_page(object, pindex, domain, req, mpred)) !=
2289 NULL) {
2290 goto found;
2291 }
2292 #endif
2293 vmd = VM_DOMAIN(domain);
2294 if (vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone != NULL) {
2295 m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone,
2296 M_NOWAIT | M_NOVM);
2297 if (m != NULL) {
2298 flags |= PG_PCPU_CACHE;
2299 goto found;
2300 }
2301 }
2302 if (vm_domain_allocate(vmd, req, 1)) {
2303 /*
2304 * If not, allocate it from the free page queues.
2305 */
2306 vm_domain_free_lock(vmd);
2307 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, 0);
2308 vm_domain_free_unlock(vmd);
2309 if (m == NULL) {
2310 vm_domain_freecnt_inc(vmd, 1);
2311 #if VM_NRESERVLEVEL > 0
2312 if (vm_reserv_reclaim_inactive(domain))
2313 goto again;
2314 #endif
2315 }
2316 }
2317 if (m == NULL) {
2318 /*
2319 * Not allocatable, give up.
2320 */
2321 if (vm_domain_alloc_fail(vmd, object, req))
2322 goto again;
2323 return (NULL);
2324 }
2325
2326 /*
2327 * At this point we had better have found a good page.
2328 */
2329 found:
2330 vm_page_dequeue(m);
2331 vm_page_alloc_check(m);
2332
2333 /*
2334 * Initialize the page. Only the PG_ZERO flag is inherited.
2335 */
2336 flags |= m->flags & PG_ZERO;
2337 if ((req & VM_ALLOC_NODUMP) != 0)
2338 flags |= PG_NODUMP;
2339 if ((req & VM_ALLOC_NOFREE) != 0)
2340 flags |= PG_NOFREE;
2341 m->flags = flags;
2342 m->a.flags = 0;
2343 m->oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0;
2344 m->pool = VM_FREEPOOL_DEFAULT;
2345 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
2346 m->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
2347 else if ((req & VM_ALLOC_SBUSY) != 0)
2348 m->busy_lock = VPB_SHARERS_WORD(1);
2349 else
2350 m->busy_lock = VPB_UNBUSIED;
2351 if (req & VM_ALLOC_WIRED) {
2352 vm_wire_add(1);
2353 m->ref_count = 1;
2354 }
2355 m->a.act_count = 0;
2356
2357 if (vm_page_insert_after(m, object, pindex, mpred)) {
2358 if (req & VM_ALLOC_WIRED) {
2359 vm_wire_sub(1);
2360 m->ref_count = 0;
2361 }
2362 KASSERT(m->object == NULL, ("page %p has object", m));
2363 m->oflags = VPO_UNMANAGED;
2364 m->busy_lock = VPB_UNBUSIED;
2365 /* Don't change PG_ZERO. */
2366 vm_page_free_toq(m);
2367 if (req & VM_ALLOC_WAITFAIL) {
2368 VM_OBJECT_WUNLOCK(object);
2369 vm_radix_wait();
2370 VM_OBJECT_WLOCK(object);
2371 }
2372 return (NULL);
2373 }
2374
2375 /* Ignore device objects; the pager sets "memattr" for them. */
2376 if (object->memattr != VM_MEMATTR_DEFAULT &&
2377 (object->flags & OBJ_FICTITIOUS) == 0)
2378 pmap_page_set_memattr(m, object->memattr);
2379
2380 return (m);
2381 }
2382
2383 /*
2384 * vm_page_alloc_contig:
2385 *
2386 * Allocate a contiguous set of physical pages of the given size "npages"
2387 * from the free lists. All of the physical pages must be at or above
2388 * the given physical address "low" and below the given physical address
2389 * "high". The given value "alignment" determines the alignment of the
2390 * first physical page in the set. If the given value "boundary" is
2391 * non-zero, then the set of physical pages cannot cross any physical
2392 * address boundary that is a multiple of that value. Both "alignment"
2393 * and "boundary" must be a power of two.
2394 *
2395 * If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT,
2396 * then the memory attribute setting for the physical pages is configured
2397 * to the object's memory attribute setting. Otherwise, the memory
2398 * attribute setting for the physical pages is configured to "memattr",
2399 * overriding the object's memory attribute setting. However, if the
2400 * object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the
2401 * memory attribute setting for the physical pages cannot be configured
2402 * to VM_MEMATTR_DEFAULT.
2403 *
2404 * The specified object may not contain fictitious pages.
2405 *
2406 * The caller must always specify an allocation class.
2407 *
2408 * allocation classes:
2409 * VM_ALLOC_NORMAL normal process request
2410 * VM_ALLOC_SYSTEM system *really* needs a page
2411 * VM_ALLOC_INTERRUPT interrupt time request
2412 *
2413 * optional allocation flags:
2414 * VM_ALLOC_NOBUSY do not exclusive busy the page
2415 * VM_ALLOC_NODUMP do not include the page in a kernel core dump
2416 * VM_ALLOC_SBUSY shared busy the allocated page
2417 * VM_ALLOC_WIRED wire the allocated page
2418 * VM_ALLOC_ZERO prefer a zeroed page
2419 */
2420 vm_page_t
vm_page_alloc_contig(vm_object_t object,vm_pindex_t pindex,int req,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary,vm_memattr_t memattr)2421 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
2422 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
2423 vm_paddr_t boundary, vm_memattr_t memattr)
2424 {
2425 struct vm_domainset_iter di;
2426 vm_page_t bounds[2];
2427 vm_page_t m;
2428 int domain;
2429 int start_segind;
2430
2431 start_segind = -1;
2432
2433 vm_domainset_iter_page_init(&di, object, pindex, &domain, &req);
2434 do {
2435 m = vm_page_alloc_contig_domain(object, pindex, domain, req,
2436 npages, low, high, alignment, boundary, memattr);
2437 if (m != NULL)
2438 break;
2439 if (start_segind == -1)
2440 start_segind = vm_phys_lookup_segind(low);
2441 if (vm_phys_find_range(bounds, start_segind, domain,
2442 npages, low, high) == -1) {
2443 vm_domainset_iter_ignore(&di, domain);
2444 }
2445 } while (vm_domainset_iter_page(&di, object, &domain) == 0);
2446
2447 return (m);
2448 }
2449
2450 static vm_page_t
vm_page_find_contig_domain(int domain,int req,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary)2451 vm_page_find_contig_domain(int domain, int req, u_long npages, vm_paddr_t low,
2452 vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
2453 {
2454 struct vm_domain *vmd;
2455 vm_page_t m_ret;
2456
2457 /*
2458 * Can we allocate the pages without the number of free pages falling
2459 * below the lower bound for the allocation class?
2460 */
2461 vmd = VM_DOMAIN(domain);
2462 if (!vm_domain_allocate(vmd, req, npages))
2463 return (NULL);
2464 /*
2465 * Try to allocate the pages from the free page queues.
2466 */
2467 vm_domain_free_lock(vmd);
2468 m_ret = vm_phys_alloc_contig(domain, npages, low, high,
2469 alignment, boundary);
2470 vm_domain_free_unlock(vmd);
2471 if (m_ret != NULL)
2472 return (m_ret);
2473 #if VM_NRESERVLEVEL > 0
2474 /*
2475 * Try to break a reservation to allocate the pages.
2476 */
2477 if ((req & VM_ALLOC_NORECLAIM) == 0) {
2478 m_ret = vm_reserv_reclaim_contig(domain, npages, low,
2479 high, alignment, boundary);
2480 if (m_ret != NULL)
2481 return (m_ret);
2482 }
2483 #endif
2484 vm_domain_freecnt_inc(vmd, npages);
2485 return (NULL);
2486 }
2487
2488 vm_page_t
vm_page_alloc_contig_domain(vm_object_t object,vm_pindex_t pindex,int domain,int req,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary,vm_memattr_t memattr)2489 vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain,
2490 int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
2491 vm_paddr_t boundary, vm_memattr_t memattr)
2492 {
2493 struct pctrie_iter pages;
2494 vm_page_t m, m_ret, mpred;
2495 u_int busy_lock, flags, oflags;
2496
2497 #define VPAC_FLAGS (VPA_FLAGS | VM_ALLOC_NORECLAIM)
2498 KASSERT((req & ~VPAC_FLAGS) == 0,
2499 ("invalid request %#x", req));
2500 KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
2501 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
2502 ("invalid request %#x", req));
2503 KASSERT((req & (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM)) !=
2504 (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM),
2505 ("invalid request %#x", req));
2506 VM_OBJECT_ASSERT_WLOCKED(object);
2507 KASSERT((object->flags & OBJ_FICTITIOUS) == 0,
2508 ("vm_page_alloc_contig: object %p has fictitious pages",
2509 object));
2510 KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
2511
2512 vm_page_iter_init(&pages, object);
2513 mpred = vm_radix_iter_lookup_le(&pages, pindex);
2514 KASSERT(mpred == NULL || mpred->pindex != pindex,
2515 ("vm_page_alloc_contig: pindex already allocated"));
2516 for (;;) {
2517 #if VM_NRESERVLEVEL > 0
2518 /*
2519 * Can we allocate the pages from a reservation?
2520 */
2521 if (vm_object_reserv(object) &&
2522 (m_ret = vm_reserv_alloc_contig(object, pindex, domain, req,
2523 mpred, npages, low, high, alignment, boundary)) != NULL) {
2524 break;
2525 }
2526 #endif
2527 if ((m_ret = vm_page_find_contig_domain(domain, req, npages,
2528 low, high, alignment, boundary)) != NULL)
2529 break;
2530 if (!vm_domain_alloc_fail(VM_DOMAIN(domain), object, req))
2531 return (NULL);
2532 }
2533
2534 /*
2535 * Initialize the pages. Only the PG_ZERO flag is inherited.
2536 */
2537 flags = PG_ZERO;
2538 if ((req & VM_ALLOC_NODUMP) != 0)
2539 flags |= PG_NODUMP;
2540 oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0;
2541 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
2542 busy_lock = VPB_CURTHREAD_EXCLUSIVE;
2543 else if ((req & VM_ALLOC_SBUSY) != 0)
2544 busy_lock = VPB_SHARERS_WORD(1);
2545 else
2546 busy_lock = VPB_UNBUSIED;
2547 if ((req & VM_ALLOC_WIRED) != 0)
2548 vm_wire_add(npages);
2549 if (object->memattr != VM_MEMATTR_DEFAULT &&
2550 memattr == VM_MEMATTR_DEFAULT)
2551 memattr = object->memattr;
2552 for (m = m_ret; m < &m_ret[npages]; m++) {
2553 vm_page_dequeue(m);
2554 vm_page_alloc_check(m);
2555 m->a.flags = 0;
2556 m->flags = (m->flags | PG_NODUMP) & flags;
2557 m->busy_lock = busy_lock;
2558 if ((req & VM_ALLOC_WIRED) != 0)
2559 m->ref_count = 1;
2560 m->a.act_count = 0;
2561 m->oflags = oflags;
2562 m->pool = VM_FREEPOOL_DEFAULT;
2563 if (vm_page_iter_insert(&pages, m, object, pindex, mpred)) {
2564 if ((req & VM_ALLOC_WIRED) != 0)
2565 vm_wire_sub(npages);
2566 KASSERT(m->object == NULL,
2567 ("page %p has object", m));
2568 mpred = m;
2569 for (m = m_ret; m < &m_ret[npages]; m++) {
2570 if (m <= mpred &&
2571 (req & VM_ALLOC_WIRED) != 0)
2572 m->ref_count = 0;
2573 m->oflags = VPO_UNMANAGED;
2574 m->busy_lock = VPB_UNBUSIED;
2575 /* Don't change PG_ZERO. */
2576 vm_page_free_toq(m);
2577 }
2578 if (req & VM_ALLOC_WAITFAIL) {
2579 VM_OBJECT_WUNLOCK(object);
2580 vm_radix_wait();
2581 VM_OBJECT_WLOCK(object);
2582 }
2583 return (NULL);
2584 }
2585 mpred = m;
2586 if (memattr != VM_MEMATTR_DEFAULT)
2587 pmap_page_set_memattr(m, memattr);
2588 pindex++;
2589 }
2590 return (m_ret);
2591 }
2592
2593 /*
2594 * Allocate a physical page that is not intended to be inserted into a VM
2595 * object.
2596 */
2597 vm_page_t
vm_page_alloc_noobj_domain(int domain,int req)2598 vm_page_alloc_noobj_domain(int domain, int req)
2599 {
2600 struct vm_domain *vmd;
2601 vm_page_t m;
2602 int flags;
2603
2604 #define VPAN_FLAGS (VM_ALLOC_CLASS_MASK | VM_ALLOC_WAITFAIL | \
2605 VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | \
2606 VM_ALLOC_NOBUSY | VM_ALLOC_WIRED | \
2607 VM_ALLOC_NODUMP | VM_ALLOC_ZERO | \
2608 VM_ALLOC_NOFREE | VM_ALLOC_COUNT_MASK)
2609 KASSERT((req & ~VPAN_FLAGS) == 0,
2610 ("invalid request %#x", req));
2611
2612 flags = ((req & VM_ALLOC_NODUMP) != 0 ? PG_NODUMP : 0) |
2613 ((req & VM_ALLOC_NOFREE) != 0 ? PG_NOFREE : 0);
2614 vmd = VM_DOMAIN(domain);
2615 again:
2616 if (__predict_false((req & VM_ALLOC_NOFREE) != 0)) {
2617 m = vm_page_alloc_nofree_domain(domain, req);
2618 if (m != NULL)
2619 goto found;
2620 }
2621
2622 if (vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone != NULL) {
2623 m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone,
2624 M_NOWAIT | M_NOVM);
2625 if (m != NULL) {
2626 flags |= PG_PCPU_CACHE;
2627 goto found;
2628 }
2629 }
2630
2631 if (vm_domain_allocate(vmd, req, 1)) {
2632 vm_domain_free_lock(vmd);
2633 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DIRECT, 0);
2634 vm_domain_free_unlock(vmd);
2635 if (m == NULL) {
2636 vm_domain_freecnt_inc(vmd, 1);
2637 #if VM_NRESERVLEVEL > 0
2638 if (vm_reserv_reclaim_inactive(domain))
2639 goto again;
2640 #endif
2641 }
2642 }
2643 if (m == NULL) {
2644 if (vm_domain_alloc_fail(vmd, NULL, req))
2645 goto again;
2646 return (NULL);
2647 }
2648
2649 found:
2650 vm_page_dequeue(m);
2651 vm_page_alloc_check(m);
2652
2653 /*
2654 * Consumers should not rely on a useful default pindex value.
2655 */
2656 m->pindex = 0xdeadc0dedeadc0de;
2657 m->flags = (m->flags & PG_ZERO) | flags;
2658 m->a.flags = 0;
2659 m->oflags = VPO_UNMANAGED;
2660 m->pool = VM_FREEPOOL_DIRECT;
2661 m->busy_lock = VPB_UNBUSIED;
2662 if ((req & VM_ALLOC_WIRED) != 0) {
2663 vm_wire_add(1);
2664 m->ref_count = 1;
2665 }
2666
2667 if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0)
2668 pmap_zero_page(m);
2669
2670 return (m);
2671 }
2672
2673 #if VM_NRESERVLEVEL > 1
2674 #define VM_NOFREE_IMPORT_ORDER (VM_LEVEL_1_ORDER + VM_LEVEL_0_ORDER)
2675 #elif VM_NRESERVLEVEL > 0
2676 #define VM_NOFREE_IMPORT_ORDER VM_LEVEL_0_ORDER
2677 #else
2678 #define VM_NOFREE_IMPORT_ORDER 8
2679 #endif
2680
2681 /*
2682 * Allocate a single NOFREE page.
2683 *
2684 * This routine hands out NOFREE pages from higher-order
2685 * physical memory blocks in order to reduce memory fragmentation.
2686 * When a NOFREE for a given domain chunk is used up,
2687 * the routine will try to fetch a new one from the freelists
2688 * and discard the old one.
2689 */
2690 static vm_page_t
vm_page_alloc_nofree_domain(int domain,int req)2691 vm_page_alloc_nofree_domain(int domain, int req)
2692 {
2693 vm_page_t m;
2694 struct vm_domain *vmd;
2695 struct vm_nofreeq *nqp;
2696
2697 KASSERT((req & VM_ALLOC_NOFREE) != 0, ("invalid request %#x", req));
2698
2699 vmd = VM_DOMAIN(domain);
2700 nqp = &vmd->vmd_nofreeq;
2701 vm_domain_free_lock(vmd);
2702 if (nqp->offs >= (1 << VM_NOFREE_IMPORT_ORDER) || nqp->ma == NULL) {
2703 if (!vm_domain_allocate(vmd, req,
2704 1 << VM_NOFREE_IMPORT_ORDER)) {
2705 vm_domain_free_unlock(vmd);
2706 return (NULL);
2707 }
2708 nqp->ma = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT,
2709 VM_NOFREE_IMPORT_ORDER);
2710 if (nqp->ma == NULL) {
2711 vm_domain_freecnt_inc(vmd, 1 << VM_NOFREE_IMPORT_ORDER);
2712 vm_domain_free_unlock(vmd);
2713 return (NULL);
2714 }
2715 nqp->offs = 0;
2716 }
2717 m = &nqp->ma[nqp->offs++];
2718 vm_domain_free_unlock(vmd);
2719 VM_CNT_ADD(v_nofree_count, 1);
2720
2721 return (m);
2722 }
2723
2724 vm_page_t
vm_page_alloc_noobj(int req)2725 vm_page_alloc_noobj(int req)
2726 {
2727 struct vm_domainset_iter di;
2728 vm_page_t m;
2729 int domain;
2730
2731 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
2732 do {
2733 m = vm_page_alloc_noobj_domain(domain, req);
2734 if (m != NULL)
2735 break;
2736 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
2737
2738 return (m);
2739 }
2740
2741 vm_page_t
vm_page_alloc_noobj_contig(int req,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary,vm_memattr_t memattr)2742 vm_page_alloc_noobj_contig(int req, u_long npages, vm_paddr_t low,
2743 vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
2744 vm_memattr_t memattr)
2745 {
2746 struct vm_domainset_iter di;
2747 vm_page_t m;
2748 int domain;
2749
2750 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
2751 do {
2752 m = vm_page_alloc_noobj_contig_domain(domain, req, npages, low,
2753 high, alignment, boundary, memattr);
2754 if (m != NULL)
2755 break;
2756 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
2757
2758 return (m);
2759 }
2760
2761 vm_page_t
vm_page_alloc_noobj_contig_domain(int domain,int req,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary,vm_memattr_t memattr)2762 vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages,
2763 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
2764 vm_memattr_t memattr)
2765 {
2766 vm_page_t m, m_ret;
2767 u_int flags;
2768
2769 #define VPANC_FLAGS (VPAN_FLAGS | VM_ALLOC_NORECLAIM)
2770 KASSERT((req & ~VPANC_FLAGS) == 0,
2771 ("invalid request %#x", req));
2772 KASSERT((req & (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM)) !=
2773 (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM),
2774 ("invalid request %#x", req));
2775 KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
2776 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
2777 ("invalid request %#x", req));
2778 KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
2779
2780 while ((m_ret = vm_page_find_contig_domain(domain, req, npages,
2781 low, high, alignment, boundary)) == NULL) {
2782 if (!vm_domain_alloc_fail(VM_DOMAIN(domain), NULL, req))
2783 return (NULL);
2784 }
2785
2786 /*
2787 * Initialize the pages. Only the PG_ZERO flag is inherited.
2788 */
2789 flags = PG_ZERO;
2790 if ((req & VM_ALLOC_NODUMP) != 0)
2791 flags |= PG_NODUMP;
2792 if ((req & VM_ALLOC_WIRED) != 0)
2793 vm_wire_add(npages);
2794 for (m = m_ret; m < &m_ret[npages]; m++) {
2795 vm_page_dequeue(m);
2796 vm_page_alloc_check(m);
2797
2798 /*
2799 * Consumers should not rely on a useful default pindex value.
2800 */
2801 m->pindex = 0xdeadc0dedeadc0de;
2802 m->a.flags = 0;
2803 m->flags = (m->flags | PG_NODUMP) & flags;
2804 m->busy_lock = VPB_UNBUSIED;
2805 if ((req & VM_ALLOC_WIRED) != 0)
2806 m->ref_count = 1;
2807 m->a.act_count = 0;
2808 m->oflags = VPO_UNMANAGED;
2809 m->pool = VM_FREEPOOL_DIRECT;
2810
2811 /*
2812 * Zero the page before updating any mappings since the page is
2813 * not yet shared with any devices which might require the
2814 * non-default memory attribute. pmap_page_set_memattr()
2815 * flushes data caches before returning.
2816 */
2817 if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0)
2818 pmap_zero_page(m);
2819 if (memattr != VM_MEMATTR_DEFAULT)
2820 pmap_page_set_memattr(m, memattr);
2821 }
2822 return (m_ret);
2823 }
2824
2825 /*
2826 * Check a page that has been freshly dequeued from a freelist.
2827 */
2828 static void
vm_page_alloc_check(vm_page_t m)2829 vm_page_alloc_check(vm_page_t m)
2830 {
2831
2832 KASSERT(m->object == NULL, ("page %p has object", m));
2833 KASSERT(m->a.queue == PQ_NONE &&
2834 (m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
2835 ("page %p has unexpected queue %d, flags %#x",
2836 m, m->a.queue, (m->a.flags & PGA_QUEUE_STATE_MASK)));
2837 KASSERT(m->ref_count == 0, ("page %p has references", m));
2838 KASSERT(vm_page_busy_freed(m), ("page %p is not freed", m));
2839 KASSERT(m->dirty == 0, ("page %p is dirty", m));
2840 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
2841 ("page %p has unexpected memattr %d",
2842 m, pmap_page_get_memattr(m)));
2843 KASSERT(vm_page_none_valid(m), ("free page %p is valid", m));
2844 pmap_vm_page_alloc_check(m);
2845 }
2846
2847 static int
vm_page_zone_import(void * arg,void ** store,int cnt,int domain,int flags)2848 vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags)
2849 {
2850 struct vm_domain *vmd;
2851 struct vm_pgcache *pgcache;
2852 int i;
2853
2854 pgcache = arg;
2855 vmd = VM_DOMAIN(pgcache->domain);
2856
2857 /*
2858 * The page daemon should avoid creating extra memory pressure since its
2859 * main purpose is to replenish the store of free pages.
2860 */
2861 if (vmd->vmd_severeset || curproc == pageproc ||
2862 !_vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt))
2863 return (0);
2864 domain = vmd->vmd_domain;
2865 vm_domain_free_lock(vmd);
2866 i = vm_phys_alloc_npages(domain, pgcache->pool, cnt,
2867 (vm_page_t *)store);
2868 vm_domain_free_unlock(vmd);
2869 if (cnt != i)
2870 vm_domain_freecnt_inc(vmd, cnt - i);
2871
2872 return (i);
2873 }
2874
2875 static void
vm_page_zone_release(void * arg,void ** store,int cnt)2876 vm_page_zone_release(void *arg, void **store, int cnt)
2877 {
2878 struct vm_domain *vmd;
2879 struct vm_pgcache *pgcache;
2880 vm_page_t m;
2881 int i;
2882
2883 pgcache = arg;
2884 vmd = VM_DOMAIN(pgcache->domain);
2885 vm_domain_free_lock(vmd);
2886 for (i = 0; i < cnt; i++) {
2887 m = (vm_page_t)store[i];
2888 vm_phys_free_pages(m, pgcache->pool, 0);
2889 }
2890 vm_domain_free_unlock(vmd);
2891 vm_domain_freecnt_inc(vmd, cnt);
2892 }
2893
2894 #define VPSC_ANY 0 /* No restrictions. */
2895 #define VPSC_NORESERV 1 /* Skip reservations; implies VPSC_NOSUPER. */
2896 #define VPSC_NOSUPER 2 /* Skip superpages. */
2897
2898 /*
2899 * vm_page_scan_contig:
2900 *
2901 * Scan vm_page_array[] between the specified entries "m_start" and
2902 * "m_end" for a run of contiguous physical pages that satisfy the
2903 * specified conditions, and return the lowest page in the run. The
2904 * specified "alignment" determines the alignment of the lowest physical
2905 * page in the run. If the specified "boundary" is non-zero, then the
2906 * run of physical pages cannot span a physical address that is a
2907 * multiple of "boundary".
2908 *
2909 * "m_end" is never dereferenced, so it need not point to a vm_page
2910 * structure within vm_page_array[].
2911 *
2912 * "npages" must be greater than zero. "m_start" and "m_end" must not
2913 * span a hole (or discontiguity) in the physical address space. Both
2914 * "alignment" and "boundary" must be a power of two.
2915 */
2916 static vm_page_t
vm_page_scan_contig(u_long npages,vm_page_t m_start,vm_page_t m_end,u_long alignment,vm_paddr_t boundary,int options)2917 vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
2918 u_long alignment, vm_paddr_t boundary, int options)
2919 {
2920 vm_object_t object;
2921 vm_paddr_t pa;
2922 vm_page_t m, m_run;
2923 #if VM_NRESERVLEVEL > 0
2924 int level;
2925 #endif
2926 int m_inc, order, run_ext, run_len;
2927
2928 KASSERT(npages > 0, ("npages is 0"));
2929 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
2930 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
2931 m_run = NULL;
2932 run_len = 0;
2933 for (m = m_start; m < m_end && run_len < npages; m += m_inc) {
2934 KASSERT((m->flags & PG_MARKER) == 0,
2935 ("page %p is PG_MARKER", m));
2936 KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->ref_count >= 1,
2937 ("fictitious page %p has invalid ref count", m));
2938
2939 /*
2940 * If the current page would be the start of a run, check its
2941 * physical address against the end, alignment, and boundary
2942 * conditions. If it doesn't satisfy these conditions, either
2943 * terminate the scan or advance to the next page that
2944 * satisfies the failed condition.
2945 */
2946 if (run_len == 0) {
2947 KASSERT(m_run == NULL, ("m_run != NULL"));
2948 if (m + npages > m_end)
2949 break;
2950 pa = VM_PAGE_TO_PHYS(m);
2951 if (!vm_addr_align_ok(pa, alignment)) {
2952 m_inc = atop(roundup2(pa, alignment) - pa);
2953 continue;
2954 }
2955 if (!vm_addr_bound_ok(pa, ptoa(npages), boundary)) {
2956 m_inc = atop(roundup2(pa, boundary) - pa);
2957 continue;
2958 }
2959 } else
2960 KASSERT(m_run != NULL, ("m_run == NULL"));
2961
2962 retry:
2963 m_inc = 1;
2964 if (vm_page_wired(m))
2965 run_ext = 0;
2966 #if VM_NRESERVLEVEL > 0
2967 else if ((level = vm_reserv_level(m)) >= 0 &&
2968 (options & VPSC_NORESERV) != 0) {
2969 run_ext = 0;
2970 /* Advance to the end of the reservation. */
2971 pa = VM_PAGE_TO_PHYS(m);
2972 m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) -
2973 pa);
2974 }
2975 #endif
2976 else if ((object = atomic_load_ptr(&m->object)) != NULL) {
2977 /*
2978 * The page is considered eligible for relocation if
2979 * and only if it could be laundered or reclaimed by
2980 * the page daemon.
2981 */
2982 VM_OBJECT_RLOCK(object);
2983 if (object != m->object) {
2984 VM_OBJECT_RUNLOCK(object);
2985 goto retry;
2986 }
2987 /* Don't care: PG_NODUMP, PG_ZERO. */
2988 if ((object->flags & OBJ_SWAP) == 0 &&
2989 object->type != OBJT_VNODE) {
2990 run_ext = 0;
2991 #if VM_NRESERVLEVEL > 0
2992 } else if ((options & VPSC_NOSUPER) != 0 &&
2993 (level = vm_reserv_level_iffullpop(m)) >= 0) {
2994 run_ext = 0;
2995 /* Advance to the end of the superpage. */
2996 pa = VM_PAGE_TO_PHYS(m);
2997 m_inc = atop(roundup2(pa + 1,
2998 vm_reserv_size(level)) - pa);
2999 #endif
3000 } else if (object->memattr == VM_MEMATTR_DEFAULT &&
3001 vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) {
3002 /*
3003 * The page is allocated but eligible for
3004 * relocation. Extend the current run by one
3005 * page.
3006 */
3007 KASSERT(pmap_page_get_memattr(m) ==
3008 VM_MEMATTR_DEFAULT,
3009 ("page %p has an unexpected memattr", m));
3010 KASSERT((m->oflags & (VPO_SWAPINPROG |
3011 VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0,
3012 ("page %p has unexpected oflags", m));
3013 /* Don't care: PGA_NOSYNC. */
3014 run_ext = 1;
3015 } else
3016 run_ext = 0;
3017 VM_OBJECT_RUNLOCK(object);
3018 #if VM_NRESERVLEVEL > 0
3019 } else if (level >= 0) {
3020 /*
3021 * The page is reserved but not yet allocated. In
3022 * other words, it is still free. Extend the current
3023 * run by one page.
3024 */
3025 run_ext = 1;
3026 #endif
3027 } else if ((order = m->order) < VM_NFREEORDER) {
3028 /*
3029 * The page is enqueued in the physical memory
3030 * allocator's free page queues. Moreover, it is the
3031 * first page in a power-of-two-sized run of
3032 * contiguous free pages. Add these pages to the end
3033 * of the current run, and jump ahead.
3034 */
3035 run_ext = 1 << order;
3036 m_inc = 1 << order;
3037 } else {
3038 /*
3039 * Skip the page for one of the following reasons: (1)
3040 * It is enqueued in the physical memory allocator's
3041 * free page queues. However, it is not the first
3042 * page in a run of contiguous free pages. (This case
3043 * rarely occurs because the scan is performed in
3044 * ascending order.) (2) It is not reserved, and it is
3045 * transitioning from free to allocated. (Conversely,
3046 * the transition from allocated to free for managed
3047 * pages is blocked by the page busy lock.) (3) It is
3048 * allocated but not contained by an object and not
3049 * wired, e.g., allocated by Xen's balloon driver.
3050 */
3051 run_ext = 0;
3052 }
3053
3054 /*
3055 * Extend or reset the current run of pages.
3056 */
3057 if (run_ext > 0) {
3058 if (run_len == 0)
3059 m_run = m;
3060 run_len += run_ext;
3061 } else {
3062 if (run_len > 0) {
3063 m_run = NULL;
3064 run_len = 0;
3065 }
3066 }
3067 }
3068 if (run_len >= npages)
3069 return (m_run);
3070 return (NULL);
3071 }
3072
3073 /*
3074 * vm_page_reclaim_run:
3075 *
3076 * Try to relocate each of the allocated virtual pages within the
3077 * specified run of physical pages to a new physical address. Free the
3078 * physical pages underlying the relocated virtual pages. A virtual page
3079 * is relocatable if and only if it could be laundered or reclaimed by
3080 * the page daemon. Whenever possible, a virtual page is relocated to a
3081 * physical address above "high".
3082 *
3083 * Returns 0 if every physical page within the run was already free or
3084 * just freed by a successful relocation. Otherwise, returns a non-zero
3085 * value indicating why the last attempt to relocate a virtual page was
3086 * unsuccessful.
3087 *
3088 * "req_class" must be an allocation class.
3089 */
3090 static int
vm_page_reclaim_run(int req_class,int domain,u_long npages,vm_page_t m_run,vm_paddr_t high)3091 vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run,
3092 vm_paddr_t high)
3093 {
3094 struct vm_domain *vmd;
3095 struct spglist free;
3096 vm_object_t object;
3097 vm_paddr_t pa;
3098 vm_page_t m, m_end, m_new;
3099 int error, order, req;
3100
3101 KASSERT((req_class & VM_ALLOC_CLASS_MASK) == req_class,
3102 ("req_class is not an allocation class"));
3103 SLIST_INIT(&free);
3104 error = 0;
3105 m = m_run;
3106 m_end = m_run + npages;
3107 for (; error == 0 && m < m_end; m++) {
3108 KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0,
3109 ("page %p is PG_FICTITIOUS or PG_MARKER", m));
3110
3111 /*
3112 * Racily check for wirings. Races are handled once the object
3113 * lock is held and the page is unmapped.
3114 */
3115 if (vm_page_wired(m))
3116 error = EBUSY;
3117 else if ((object = atomic_load_ptr(&m->object)) != NULL) {
3118 /*
3119 * The page is relocated if and only if it could be
3120 * laundered or reclaimed by the page daemon.
3121 */
3122 VM_OBJECT_WLOCK(object);
3123 /* Don't care: PG_NODUMP, PG_ZERO. */
3124 if (m->object != object ||
3125 ((object->flags & OBJ_SWAP) == 0 &&
3126 object->type != OBJT_VNODE))
3127 error = EINVAL;
3128 else if (object->memattr != VM_MEMATTR_DEFAULT)
3129 error = EINVAL;
3130 else if (vm_page_queue(m) != PQ_NONE &&
3131 vm_page_tryxbusy(m) != 0) {
3132 if (vm_page_wired(m)) {
3133 vm_page_xunbusy(m);
3134 error = EBUSY;
3135 goto unlock;
3136 }
3137 KASSERT(pmap_page_get_memattr(m) ==
3138 VM_MEMATTR_DEFAULT,
3139 ("page %p has an unexpected memattr", m));
3140 KASSERT(m->oflags == 0,
3141 ("page %p has unexpected oflags", m));
3142 /* Don't care: PGA_NOSYNC. */
3143 if (!vm_page_none_valid(m)) {
3144 /*
3145 * First, try to allocate a new page
3146 * that is above "high". Failing
3147 * that, try to allocate a new page
3148 * that is below "m_run". Allocate
3149 * the new page between the end of
3150 * "m_run" and "high" only as a last
3151 * resort.
3152 */
3153 req = req_class;
3154 if ((m->flags & PG_NODUMP) != 0)
3155 req |= VM_ALLOC_NODUMP;
3156 if (trunc_page(high) !=
3157 ~(vm_paddr_t)PAGE_MASK) {
3158 m_new =
3159 vm_page_alloc_noobj_contig(
3160 req, 1, round_page(high),
3161 ~(vm_paddr_t)0, PAGE_SIZE,
3162 0, VM_MEMATTR_DEFAULT);
3163 } else
3164 m_new = NULL;
3165 if (m_new == NULL) {
3166 pa = VM_PAGE_TO_PHYS(m_run);
3167 m_new =
3168 vm_page_alloc_noobj_contig(
3169 req, 1, 0, pa - 1,
3170 PAGE_SIZE, 0,
3171 VM_MEMATTR_DEFAULT);
3172 }
3173 if (m_new == NULL) {
3174 pa += ptoa(npages);
3175 m_new =
3176 vm_page_alloc_noobj_contig(
3177 req, 1, pa, high, PAGE_SIZE,
3178 0, VM_MEMATTR_DEFAULT);
3179 }
3180 if (m_new == NULL) {
3181 vm_page_xunbusy(m);
3182 error = ENOMEM;
3183 goto unlock;
3184 }
3185
3186 /*
3187 * Unmap the page and check for new
3188 * wirings that may have been acquired
3189 * through a pmap lookup.
3190 */
3191 if (object->ref_count != 0 &&
3192 !vm_page_try_remove_all(m)) {
3193 vm_page_xunbusy(m);
3194 vm_page_free(m_new);
3195 error = EBUSY;
3196 goto unlock;
3197 }
3198
3199 /*
3200 * Replace "m" with the new page. For
3201 * vm_page_replace(), "m" must be busy
3202 * and dequeued. Finally, change "m"
3203 * as if vm_page_free() was called.
3204 */
3205 m_new->a.flags = m->a.flags &
3206 ~PGA_QUEUE_STATE_MASK;
3207 KASSERT(m_new->oflags == VPO_UNMANAGED,
3208 ("page %p is managed", m_new));
3209 m_new->oflags = 0;
3210 pmap_copy_page(m, m_new);
3211 m_new->valid = m->valid;
3212 m_new->dirty = m->dirty;
3213 m->flags &= ~PG_ZERO;
3214 vm_page_dequeue(m);
3215 if (vm_page_replace_hold(m_new, object,
3216 m->pindex, m) &&
3217 vm_page_free_prep(m))
3218 SLIST_INSERT_HEAD(&free, m,
3219 plinks.s.ss);
3220
3221 /*
3222 * The new page must be deactivated
3223 * before the object is unlocked.
3224 */
3225 vm_page_deactivate(m_new);
3226 } else {
3227 m->flags &= ~PG_ZERO;
3228 vm_page_dequeue(m);
3229 if (vm_page_free_prep(m))
3230 SLIST_INSERT_HEAD(&free, m,
3231 plinks.s.ss);
3232 KASSERT(m->dirty == 0,
3233 ("page %p is dirty", m));
3234 }
3235 } else
3236 error = EBUSY;
3237 unlock:
3238 VM_OBJECT_WUNLOCK(object);
3239 } else {
3240 MPASS(vm_page_domain(m) == domain);
3241 vmd = VM_DOMAIN(domain);
3242 vm_domain_free_lock(vmd);
3243 order = m->order;
3244 if (order < VM_NFREEORDER) {
3245 /*
3246 * The page is enqueued in the physical memory
3247 * allocator's free page queues. Moreover, it
3248 * is the first page in a power-of-two-sized
3249 * run of contiguous free pages. Jump ahead
3250 * to the last page within that run, and
3251 * continue from there.
3252 */
3253 m += (1 << order) - 1;
3254 }
3255 #if VM_NRESERVLEVEL > 0
3256 else if (vm_reserv_is_page_free(m))
3257 order = 0;
3258 #endif
3259 vm_domain_free_unlock(vmd);
3260 if (order == VM_NFREEORDER)
3261 error = EINVAL;
3262 }
3263 }
3264 if ((m = SLIST_FIRST(&free)) != NULL) {
3265 int cnt;
3266
3267 vmd = VM_DOMAIN(domain);
3268 cnt = 0;
3269 vm_domain_free_lock(vmd);
3270 do {
3271 MPASS(vm_page_domain(m) == domain);
3272 SLIST_REMOVE_HEAD(&free, plinks.s.ss);
3273 vm_phys_free_pages(m, m->pool, 0);
3274 cnt++;
3275 } while ((m = SLIST_FIRST(&free)) != NULL);
3276 vm_domain_free_unlock(vmd);
3277 vm_domain_freecnt_inc(vmd, cnt);
3278 }
3279 return (error);
3280 }
3281
3282 #define NRUNS 16
3283
3284 #define RUN_INDEX(count, nruns) ((count) % (nruns))
3285
3286 #define MIN_RECLAIM 8
3287
3288 /*
3289 * vm_page_reclaim_contig:
3290 *
3291 * Reclaim allocated, contiguous physical memory satisfying the specified
3292 * conditions by relocating the virtual pages using that physical memory.
3293 * Returns 0 if reclamation is successful, ERANGE if the specified domain
3294 * can't possibly satisfy the reclamation request, or ENOMEM if not
3295 * currently able to reclaim the requested number of pages. Since
3296 * relocation requires the allocation of physical pages, reclamation may
3297 * fail with ENOMEM due to a shortage of free pages. When reclamation
3298 * fails in this manner, callers are expected to perform vm_wait() before
3299 * retrying a failed allocation operation, e.g., vm_page_alloc_contig().
3300 *
3301 * The caller must always specify an allocation class through "req".
3302 *
3303 * allocation classes:
3304 * VM_ALLOC_NORMAL normal process request
3305 * VM_ALLOC_SYSTEM system *really* needs a page
3306 * VM_ALLOC_INTERRUPT interrupt time request
3307 *
3308 * The optional allocation flags are ignored.
3309 *
3310 * "npages" must be greater than zero. Both "alignment" and "boundary"
3311 * must be a power of two.
3312 */
3313 int
vm_page_reclaim_contig_domain_ext(int domain,int req,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary,int desired_runs)3314 vm_page_reclaim_contig_domain_ext(int domain, int req, u_long npages,
3315 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
3316 int desired_runs)
3317 {
3318 struct vm_domain *vmd;
3319 vm_page_t bounds[2], m_run, _m_runs[NRUNS], *m_runs;
3320 u_long count, minalign, reclaimed;
3321 int error, i, min_reclaim, nruns, options, req_class;
3322 int segind, start_segind;
3323 int ret;
3324
3325 KASSERT(npages > 0, ("npages is 0"));
3326 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
3327 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
3328
3329 ret = ENOMEM;
3330
3331 /*
3332 * If the caller wants to reclaim multiple runs, try to allocate
3333 * space to store the runs. If that fails, fall back to the old
3334 * behavior of just reclaiming MIN_RECLAIM pages.
3335 */
3336 if (desired_runs > 1)
3337 m_runs = malloc((NRUNS + desired_runs) * sizeof(*m_runs),
3338 M_TEMP, M_NOWAIT);
3339 else
3340 m_runs = NULL;
3341
3342 if (m_runs == NULL) {
3343 m_runs = _m_runs;
3344 nruns = NRUNS;
3345 } else {
3346 nruns = NRUNS + desired_runs - 1;
3347 }
3348 min_reclaim = MAX(desired_runs * npages, MIN_RECLAIM);
3349
3350 /*
3351 * The caller will attempt an allocation after some runs have been
3352 * reclaimed and added to the vm_phys buddy lists. Due to limitations
3353 * of vm_phys_alloc_contig(), round up the requested length to the next
3354 * power of two or maximum chunk size, and ensure that each run is
3355 * suitably aligned.
3356 */
3357 minalign = 1ul << imin(flsl(npages - 1), VM_NFREEORDER - 1);
3358 npages = roundup2(npages, minalign);
3359 if (alignment < ptoa(minalign))
3360 alignment = ptoa(minalign);
3361
3362 /*
3363 * The page daemon is allowed to dig deeper into the free page list.
3364 */
3365 req_class = req & VM_ALLOC_CLASS_MASK;
3366 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
3367 req_class = VM_ALLOC_SYSTEM;
3368
3369 start_segind = vm_phys_lookup_segind(low);
3370
3371 /*
3372 * Return if the number of free pages cannot satisfy the requested
3373 * allocation.
3374 */
3375 vmd = VM_DOMAIN(domain);
3376 count = vmd->vmd_free_count;
3377 if (count < npages + vmd->vmd_free_reserved || (count < npages +
3378 vmd->vmd_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) ||
3379 (count < npages && req_class == VM_ALLOC_INTERRUPT))
3380 goto done;
3381
3382 /*
3383 * Scan up to three times, relaxing the restrictions ("options") on
3384 * the reclamation of reservations and superpages each time.
3385 */
3386 for (options = VPSC_NORESERV;;) {
3387 bool phys_range_exists = false;
3388
3389 /*
3390 * Find the highest runs that satisfy the given constraints
3391 * and restrictions, and record them in "m_runs".
3392 */
3393 count = 0;
3394 segind = start_segind;
3395 while ((segind = vm_phys_find_range(bounds, segind, domain,
3396 npages, low, high)) != -1) {
3397 phys_range_exists = true;
3398 while ((m_run = vm_page_scan_contig(npages, bounds[0],
3399 bounds[1], alignment, boundary, options))) {
3400 bounds[0] = m_run + npages;
3401 m_runs[RUN_INDEX(count, nruns)] = m_run;
3402 count++;
3403 }
3404 segind++;
3405 }
3406
3407 if (!phys_range_exists) {
3408 ret = ERANGE;
3409 goto done;
3410 }
3411
3412 /*
3413 * Reclaim the highest runs in LIFO (descending) order until
3414 * the number of reclaimed pages, "reclaimed", is at least
3415 * "min_reclaim". Reset "reclaimed" each time because each
3416 * reclamation is idempotent, and runs will (likely) recur
3417 * from one scan to the next as restrictions are relaxed.
3418 */
3419 reclaimed = 0;
3420 for (i = 0; count > 0 && i < nruns; i++) {
3421 count--;
3422 m_run = m_runs[RUN_INDEX(count, nruns)];
3423 error = vm_page_reclaim_run(req_class, domain, npages,
3424 m_run, high);
3425 if (error == 0) {
3426 reclaimed += npages;
3427 if (reclaimed >= min_reclaim) {
3428 ret = 0;
3429 goto done;
3430 }
3431 }
3432 }
3433
3434 /*
3435 * Either relax the restrictions on the next scan or return if
3436 * the last scan had no restrictions.
3437 */
3438 if (options == VPSC_NORESERV)
3439 options = VPSC_NOSUPER;
3440 else if (options == VPSC_NOSUPER)
3441 options = VPSC_ANY;
3442 else if (options == VPSC_ANY) {
3443 if (reclaimed != 0)
3444 ret = 0;
3445 goto done;
3446 }
3447 }
3448 done:
3449 if (m_runs != _m_runs)
3450 free(m_runs, M_TEMP);
3451 return (ret);
3452 }
3453
3454 int
vm_page_reclaim_contig_domain(int domain,int req,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary)3455 vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
3456 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
3457 {
3458 return (vm_page_reclaim_contig_domain_ext(domain, req, npages, low, high,
3459 alignment, boundary, 1));
3460 }
3461
3462 int
vm_page_reclaim_contig(int req,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary)3463 vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high,
3464 u_long alignment, vm_paddr_t boundary)
3465 {
3466 struct vm_domainset_iter di;
3467 int domain, ret, status;
3468
3469 ret = ERANGE;
3470
3471 vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
3472 do {
3473 status = vm_page_reclaim_contig_domain(domain, req, npages, low,
3474 high, alignment, boundary);
3475 if (status == 0)
3476 return (0);
3477 else if (status == ERANGE)
3478 vm_domainset_iter_ignore(&di, domain);
3479 else {
3480 KASSERT(status == ENOMEM, ("Unrecognized error %d "
3481 "from vm_page_reclaim_contig_domain()", status));
3482 ret = ENOMEM;
3483 }
3484 } while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
3485
3486 return (ret);
3487 }
3488
3489 /*
3490 * Set the domain in the appropriate page level domainset.
3491 */
3492 void
vm_domain_set(struct vm_domain * vmd)3493 vm_domain_set(struct vm_domain *vmd)
3494 {
3495
3496 mtx_lock(&vm_domainset_lock);
3497 if (!vmd->vmd_minset && vm_paging_min(vmd)) {
3498 vmd->vmd_minset = 1;
3499 DOMAINSET_SET(vmd->vmd_domain, &vm_min_domains);
3500 }
3501 if (!vmd->vmd_severeset && vm_paging_severe(vmd)) {
3502 vmd->vmd_severeset = 1;
3503 DOMAINSET_SET(vmd->vmd_domain, &vm_severe_domains);
3504 }
3505 mtx_unlock(&vm_domainset_lock);
3506 }
3507
3508 /*
3509 * Clear the domain from the appropriate page level domainset.
3510 */
3511 void
vm_domain_clear(struct vm_domain * vmd)3512 vm_domain_clear(struct vm_domain *vmd)
3513 {
3514
3515 mtx_lock(&vm_domainset_lock);
3516 if (vmd->vmd_minset && !vm_paging_min(vmd)) {
3517 vmd->vmd_minset = 0;
3518 DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains);
3519 if (vm_min_waiters != 0) {
3520 vm_min_waiters = 0;
3521 wakeup(&vm_min_domains);
3522 }
3523 }
3524 if (vmd->vmd_severeset && !vm_paging_severe(vmd)) {
3525 vmd->vmd_severeset = 0;
3526 DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains);
3527 if (vm_severe_waiters != 0) {
3528 vm_severe_waiters = 0;
3529 wakeup(&vm_severe_domains);
3530 }
3531 }
3532
3533 /*
3534 * If pageout daemon needs pages, then tell it that there are
3535 * some free.
3536 */
3537 if (vmd->vmd_pageout_pages_needed &&
3538 vmd->vmd_free_count >= vmd->vmd_pageout_free_min) {
3539 wakeup(&vmd->vmd_pageout_pages_needed);
3540 vmd->vmd_pageout_pages_needed = 0;
3541 }
3542
3543 /* See comments in vm_wait_doms(). */
3544 if (vm_pageproc_waiters) {
3545 vm_pageproc_waiters = 0;
3546 wakeup(&vm_pageproc_waiters);
3547 }
3548 mtx_unlock(&vm_domainset_lock);
3549 }
3550
3551 /*
3552 * Wait for free pages to exceed the min threshold globally.
3553 */
3554 void
vm_wait_min(void)3555 vm_wait_min(void)
3556 {
3557
3558 mtx_lock(&vm_domainset_lock);
3559 while (vm_page_count_min()) {
3560 vm_min_waiters++;
3561 msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0);
3562 }
3563 mtx_unlock(&vm_domainset_lock);
3564 }
3565
3566 /*
3567 * Wait for free pages to exceed the severe threshold globally.
3568 */
3569 void
vm_wait_severe(void)3570 vm_wait_severe(void)
3571 {
3572
3573 mtx_lock(&vm_domainset_lock);
3574 while (vm_page_count_severe()) {
3575 vm_severe_waiters++;
3576 msleep(&vm_severe_domains, &vm_domainset_lock, PVM,
3577 "vmwait", 0);
3578 }
3579 mtx_unlock(&vm_domainset_lock);
3580 }
3581
3582 u_int
vm_wait_count(void)3583 vm_wait_count(void)
3584 {
3585
3586 return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters);
3587 }
3588
3589 int
vm_wait_doms(const domainset_t * wdoms,int mflags)3590 vm_wait_doms(const domainset_t *wdoms, int mflags)
3591 {
3592 int error;
3593
3594 error = 0;
3595
3596 /*
3597 * We use racey wakeup synchronization to avoid expensive global
3598 * locking for the pageproc when sleeping with a non-specific vm_wait.
3599 * To handle this, we only sleep for one tick in this instance. It
3600 * is expected that most allocations for the pageproc will come from
3601 * kmem or vm_page_grab* which will use the more specific and
3602 * race-free vm_wait_domain().
3603 */
3604 if (curproc == pageproc) {
3605 mtx_lock(&vm_domainset_lock);
3606 vm_pageproc_waiters++;
3607 error = msleep(&vm_pageproc_waiters, &vm_domainset_lock,
3608 PVM | PDROP | mflags, "pageprocwait", 1);
3609 } else {
3610 /*
3611 * XXX Ideally we would wait only until the allocation could
3612 * be satisfied. This condition can cause new allocators to
3613 * consume all freed pages while old allocators wait.
3614 */
3615 mtx_lock(&vm_domainset_lock);
3616 if (vm_page_count_min_set(wdoms)) {
3617 if (pageproc == NULL)
3618 panic("vm_wait in early boot");
3619 vm_min_waiters++;
3620 error = msleep(&vm_min_domains, &vm_domainset_lock,
3621 PVM | PDROP | mflags, "vmwait", 0);
3622 } else
3623 mtx_unlock(&vm_domainset_lock);
3624 }
3625 return (error);
3626 }
3627
3628 /*
3629 * vm_wait_domain:
3630 *
3631 * Sleep until free pages are available for allocation.
3632 * - Called in various places after failed memory allocations.
3633 */
3634 void
vm_wait_domain(int domain)3635 vm_wait_domain(int domain)
3636 {
3637 struct vm_domain *vmd;
3638 domainset_t wdom;
3639
3640 vmd = VM_DOMAIN(domain);
3641 vm_domain_free_assert_unlocked(vmd);
3642
3643 if (curproc == pageproc) {
3644 mtx_lock(&vm_domainset_lock);
3645 if (vmd->vmd_free_count < vmd->vmd_pageout_free_min) {
3646 vmd->vmd_pageout_pages_needed = 1;
3647 msleep(&vmd->vmd_pageout_pages_needed,
3648 &vm_domainset_lock, PDROP | PSWP, "VMWait", 0);
3649 } else
3650 mtx_unlock(&vm_domainset_lock);
3651 } else {
3652 DOMAINSET_ZERO(&wdom);
3653 DOMAINSET_SET(vmd->vmd_domain, &wdom);
3654 vm_wait_doms(&wdom, 0);
3655 }
3656 }
3657
3658 static int
vm_wait_flags(vm_object_t obj,int mflags)3659 vm_wait_flags(vm_object_t obj, int mflags)
3660 {
3661 struct domainset *d;
3662
3663 d = NULL;
3664
3665 /*
3666 * Carefully fetch pointers only once: the struct domainset
3667 * itself is ummutable but the pointer might change.
3668 */
3669 if (obj != NULL)
3670 d = obj->domain.dr_policy;
3671 if (d == NULL)
3672 d = curthread->td_domain.dr_policy;
3673
3674 return (vm_wait_doms(&d->ds_mask, mflags));
3675 }
3676
3677 /*
3678 * vm_wait:
3679 *
3680 * Sleep until free pages are available for allocation in the
3681 * affinity domains of the obj. If obj is NULL, the domain set
3682 * for the calling thread is used.
3683 * Called in various places after failed memory allocations.
3684 */
3685 void
vm_wait(vm_object_t obj)3686 vm_wait(vm_object_t obj)
3687 {
3688 (void)vm_wait_flags(obj, 0);
3689 }
3690
3691 int
vm_wait_intr(vm_object_t obj)3692 vm_wait_intr(vm_object_t obj)
3693 {
3694 return (vm_wait_flags(obj, PCATCH));
3695 }
3696
3697 /*
3698 * vm_domain_alloc_fail:
3699 *
3700 * Called when a page allocation function fails. Informs the
3701 * pagedaemon and performs the requested wait. Requires the
3702 * domain_free and object lock on entry. Returns with the
3703 * object lock held and free lock released. Returns an error when
3704 * retry is necessary.
3705 *
3706 */
3707 static int
vm_domain_alloc_fail(struct vm_domain * vmd,vm_object_t object,int req)3708 vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req)
3709 {
3710
3711 vm_domain_free_assert_unlocked(vmd);
3712
3713 atomic_add_int(&vmd->vmd_pageout_deficit,
3714 max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
3715 if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) {
3716 if (object != NULL)
3717 VM_OBJECT_WUNLOCK(object);
3718 vm_wait_domain(vmd->vmd_domain);
3719 if (object != NULL)
3720 VM_OBJECT_WLOCK(object);
3721 if (req & VM_ALLOC_WAITOK)
3722 return (EAGAIN);
3723 }
3724
3725 return (0);
3726 }
3727
3728 /*
3729 * vm_waitpfault:
3730 *
3731 * Sleep until free pages are available for allocation.
3732 * - Called only in vm_fault so that processes page faulting
3733 * can be easily tracked.
3734 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing
3735 * processes will be able to grab memory first. Do not change
3736 * this balance without careful testing first.
3737 */
3738 void
vm_waitpfault(struct domainset * dset,int timo)3739 vm_waitpfault(struct domainset *dset, int timo)
3740 {
3741
3742 /*
3743 * XXX Ideally we would wait only until the allocation could
3744 * be satisfied. This condition can cause new allocators to
3745 * consume all freed pages while old allocators wait.
3746 */
3747 mtx_lock(&vm_domainset_lock);
3748 if (vm_page_count_min_set(&dset->ds_mask)) {
3749 vm_min_waiters++;
3750 msleep(&vm_min_domains, &vm_domainset_lock, PUSER | PDROP,
3751 "pfault", timo);
3752 } else
3753 mtx_unlock(&vm_domainset_lock);
3754 }
3755
3756 static struct vm_pagequeue *
_vm_page_pagequeue(vm_page_t m,uint8_t queue)3757 _vm_page_pagequeue(vm_page_t m, uint8_t queue)
3758 {
3759
3760 return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]);
3761 }
3762
3763 #ifdef INVARIANTS
3764 static struct vm_pagequeue *
vm_page_pagequeue(vm_page_t m)3765 vm_page_pagequeue(vm_page_t m)
3766 {
3767
3768 return (_vm_page_pagequeue(m, vm_page_astate_load(m).queue));
3769 }
3770 #endif
3771
3772 static __always_inline bool
vm_page_pqstate_fcmpset(vm_page_t m,vm_page_astate_t * old,vm_page_astate_t new)3773 vm_page_pqstate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
3774 {
3775 vm_page_astate_t tmp;
3776
3777 tmp = *old;
3778 do {
3779 if (__predict_true(vm_page_astate_fcmpset(m, old, new)))
3780 return (true);
3781 counter_u64_add(pqstate_commit_retries, 1);
3782 } while (old->_bits == tmp._bits);
3783
3784 return (false);
3785 }
3786
3787 /*
3788 * Do the work of committing a queue state update that moves the page out of
3789 * its current queue.
3790 */
3791 static bool
_vm_page_pqstate_commit_dequeue(struct vm_pagequeue * pq,vm_page_t m,vm_page_astate_t * old,vm_page_astate_t new)3792 _vm_page_pqstate_commit_dequeue(struct vm_pagequeue *pq, vm_page_t m,
3793 vm_page_astate_t *old, vm_page_astate_t new)
3794 {
3795 vm_page_t next;
3796
3797 vm_pagequeue_assert_locked(pq);
3798 KASSERT(vm_page_pagequeue(m) == pq,
3799 ("%s: queue %p does not match page %p", __func__, pq, m));
3800 KASSERT(old->queue != PQ_NONE && new.queue != old->queue,
3801 ("%s: invalid queue indices %d %d",
3802 __func__, old->queue, new.queue));
3803
3804 /*
3805 * Once the queue index of the page changes there is nothing
3806 * synchronizing with further updates to the page's physical
3807 * queue state. Therefore we must speculatively remove the page
3808 * from the queue now and be prepared to roll back if the queue
3809 * state update fails. If the page is not physically enqueued then
3810 * we just update its queue index.
3811 */
3812 if ((old->flags & PGA_ENQUEUED) != 0) {
3813 new.flags &= ~PGA_ENQUEUED;
3814 next = TAILQ_NEXT(m, plinks.q);
3815 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
3816 vm_pagequeue_cnt_dec(pq);
3817 if (!vm_page_pqstate_fcmpset(m, old, new)) {
3818 if (next == NULL)
3819 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
3820 else
3821 TAILQ_INSERT_BEFORE(next, m, plinks.q);
3822 vm_pagequeue_cnt_inc(pq);
3823 return (false);
3824 } else {
3825 return (true);
3826 }
3827 } else {
3828 return (vm_page_pqstate_fcmpset(m, old, new));
3829 }
3830 }
3831
3832 static bool
vm_page_pqstate_commit_dequeue(vm_page_t m,vm_page_astate_t * old,vm_page_astate_t new)3833 vm_page_pqstate_commit_dequeue(vm_page_t m, vm_page_astate_t *old,
3834 vm_page_astate_t new)
3835 {
3836 struct vm_pagequeue *pq;
3837 vm_page_astate_t as;
3838 bool ret;
3839
3840 pq = _vm_page_pagequeue(m, old->queue);
3841
3842 /*
3843 * The queue field and PGA_ENQUEUED flag are stable only so long as the
3844 * corresponding page queue lock is held.
3845 */
3846 vm_pagequeue_lock(pq);
3847 as = vm_page_astate_load(m);
3848 if (__predict_false(as._bits != old->_bits)) {
3849 *old = as;
3850 ret = false;
3851 } else {
3852 ret = _vm_page_pqstate_commit_dequeue(pq, m, old, new);
3853 }
3854 vm_pagequeue_unlock(pq);
3855 return (ret);
3856 }
3857
3858 /*
3859 * Commit a queue state update that enqueues or requeues a page.
3860 */
3861 static bool
_vm_page_pqstate_commit_requeue(struct vm_pagequeue * pq,vm_page_t m,vm_page_astate_t * old,vm_page_astate_t new)3862 _vm_page_pqstate_commit_requeue(struct vm_pagequeue *pq, vm_page_t m,
3863 vm_page_astate_t *old, vm_page_astate_t new)
3864 {
3865 struct vm_domain *vmd;
3866
3867 vm_pagequeue_assert_locked(pq);
3868 KASSERT(old->queue != PQ_NONE && new.queue == old->queue,
3869 ("%s: invalid queue indices %d %d",
3870 __func__, old->queue, new.queue));
3871
3872 new.flags |= PGA_ENQUEUED;
3873 if (!vm_page_pqstate_fcmpset(m, old, new))
3874 return (false);
3875
3876 if ((old->flags & PGA_ENQUEUED) != 0)
3877 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
3878 else
3879 vm_pagequeue_cnt_inc(pq);
3880
3881 /*
3882 * Give PGA_REQUEUE_HEAD precedence over PGA_REQUEUE. In particular, if
3883 * both flags are set in close succession, only PGA_REQUEUE_HEAD will be
3884 * applied, even if it was set first.
3885 */
3886 if ((old->flags & PGA_REQUEUE_HEAD) != 0) {
3887 vmd = vm_pagequeue_domain(m);
3888 KASSERT(pq == &vmd->vmd_pagequeues[PQ_INACTIVE],
3889 ("%s: invalid page queue for page %p", __func__, m));
3890 TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
3891 } else {
3892 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
3893 }
3894 return (true);
3895 }
3896
3897 /*
3898 * Commit a queue state update that encodes a request for a deferred queue
3899 * operation.
3900 */
3901 static bool
vm_page_pqstate_commit_request(vm_page_t m,vm_page_astate_t * old,vm_page_astate_t new)3902 vm_page_pqstate_commit_request(vm_page_t m, vm_page_astate_t *old,
3903 vm_page_astate_t new)
3904 {
3905
3906 KASSERT(old->queue == new.queue || new.queue != PQ_NONE,
3907 ("%s: invalid state, queue %d flags %x",
3908 __func__, new.queue, new.flags));
3909
3910 if (old->_bits != new._bits &&
3911 !vm_page_pqstate_fcmpset(m, old, new))
3912 return (false);
3913 vm_page_pqbatch_submit(m, new.queue);
3914 return (true);
3915 }
3916
3917 /*
3918 * A generic queue state update function. This handles more cases than the
3919 * specialized functions above.
3920 */
3921 bool
vm_page_pqstate_commit(vm_page_t m,vm_page_astate_t * old,vm_page_astate_t new)3922 vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
3923 {
3924
3925 if (old->_bits == new._bits)
3926 return (true);
3927
3928 if (old->queue != PQ_NONE && new.queue != old->queue) {
3929 if (!vm_page_pqstate_commit_dequeue(m, old, new))
3930 return (false);
3931 if (new.queue != PQ_NONE)
3932 vm_page_pqbatch_submit(m, new.queue);
3933 } else {
3934 if (!vm_page_pqstate_fcmpset(m, old, new))
3935 return (false);
3936 if (new.queue != PQ_NONE &&
3937 ((new.flags & ~old->flags) & PGA_QUEUE_OP_MASK) != 0)
3938 vm_page_pqbatch_submit(m, new.queue);
3939 }
3940 return (true);
3941 }
3942
3943 /*
3944 * Apply deferred queue state updates to a page.
3945 */
3946 static inline void
vm_pqbatch_process_page(struct vm_pagequeue * pq,vm_page_t m,uint8_t queue)3947 vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m, uint8_t queue)
3948 {
3949 vm_page_astate_t new, old;
3950
3951 CRITICAL_ASSERT(curthread);
3952 vm_pagequeue_assert_locked(pq);
3953 KASSERT(queue < PQ_COUNT,
3954 ("%s: invalid queue index %d", __func__, queue));
3955 KASSERT(pq == _vm_page_pagequeue(m, queue),
3956 ("%s: page %p does not belong to queue %p", __func__, m, pq));
3957
3958 for (old = vm_page_astate_load(m);;) {
3959 if (__predict_false(old.queue != queue ||
3960 (old.flags & PGA_QUEUE_OP_MASK) == 0)) {
3961 counter_u64_add(queue_nops, 1);
3962 break;
3963 }
3964 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3965 ("%s: page %p is unmanaged", __func__, m));
3966
3967 new = old;
3968 if ((old.flags & PGA_DEQUEUE) != 0) {
3969 new.flags &= ~PGA_QUEUE_OP_MASK;
3970 new.queue = PQ_NONE;
3971 if (__predict_true(_vm_page_pqstate_commit_dequeue(pq,
3972 m, &old, new))) {
3973 counter_u64_add(queue_ops, 1);
3974 break;
3975 }
3976 } else {
3977 new.flags &= ~(PGA_REQUEUE | PGA_REQUEUE_HEAD);
3978 if (__predict_true(_vm_page_pqstate_commit_requeue(pq,
3979 m, &old, new))) {
3980 counter_u64_add(queue_ops, 1);
3981 break;
3982 }
3983 }
3984 }
3985 }
3986
3987 static void
vm_pqbatch_process(struct vm_pagequeue * pq,struct vm_batchqueue * bq,uint8_t queue)3988 vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq,
3989 uint8_t queue)
3990 {
3991 int i;
3992
3993 for (i = 0; i < bq->bq_cnt; i++)
3994 vm_pqbatch_process_page(pq, bq->bq_pa[i], queue);
3995 vm_batchqueue_init(bq);
3996 }
3997
3998 /*
3999 * vm_page_pqbatch_submit: [ internal use only ]
4000 *
4001 * Enqueue a page in the specified page queue's batched work queue.
4002 * The caller must have encoded the requested operation in the page
4003 * structure's a.flags field.
4004 */
4005 void
vm_page_pqbatch_submit(vm_page_t m,uint8_t queue)4006 vm_page_pqbatch_submit(vm_page_t m, uint8_t queue)
4007 {
4008 struct vm_batchqueue *bq;
4009 struct vm_pagequeue *pq;
4010 int domain, slots_remaining;
4011
4012 KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue));
4013
4014 domain = vm_page_domain(m);
4015 critical_enter();
4016 bq = DPCPU_PTR(pqbatch[domain][queue]);
4017 slots_remaining = vm_batchqueue_insert(bq, m);
4018 if (slots_remaining > (VM_BATCHQUEUE_SIZE >> 1)) {
4019 /* keep building the bq */
4020 critical_exit();
4021 return;
4022 } else if (slots_remaining > 0 ) {
4023 /* Try to process the bq if we can get the lock */
4024 pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue];
4025 if (vm_pagequeue_trylock(pq)) {
4026 vm_pqbatch_process(pq, bq, queue);
4027 vm_pagequeue_unlock(pq);
4028 }
4029 critical_exit();
4030 return;
4031 }
4032 critical_exit();
4033
4034 /* if we make it here, the bq is full so wait for the lock */
4035
4036 pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue];
4037 vm_pagequeue_lock(pq);
4038 critical_enter();
4039 bq = DPCPU_PTR(pqbatch[domain][queue]);
4040 vm_pqbatch_process(pq, bq, queue);
4041 vm_pqbatch_process_page(pq, m, queue);
4042 vm_pagequeue_unlock(pq);
4043 critical_exit();
4044 }
4045
4046 /*
4047 * vm_page_pqbatch_drain: [ internal use only ]
4048 *
4049 * Force all per-CPU page queue batch queues to be drained. This is
4050 * intended for use in severe memory shortages, to ensure that pages
4051 * do not remain stuck in the batch queues.
4052 */
4053 void
vm_page_pqbatch_drain(void)4054 vm_page_pqbatch_drain(void)
4055 {
4056 struct thread *td;
4057 struct vm_domain *vmd;
4058 struct vm_pagequeue *pq;
4059 int cpu, domain, queue;
4060
4061 td = curthread;
4062 CPU_FOREACH(cpu) {
4063 thread_lock(td);
4064 sched_bind(td, cpu);
4065 thread_unlock(td);
4066
4067 for (domain = 0; domain < vm_ndomains; domain++) {
4068 vmd = VM_DOMAIN(domain);
4069 for (queue = 0; queue < PQ_COUNT; queue++) {
4070 pq = &vmd->vmd_pagequeues[queue];
4071 vm_pagequeue_lock(pq);
4072 critical_enter();
4073 vm_pqbatch_process(pq,
4074 DPCPU_PTR(pqbatch[domain][queue]), queue);
4075 critical_exit();
4076 vm_pagequeue_unlock(pq);
4077 }
4078 }
4079 }
4080 thread_lock(td);
4081 sched_unbind(td);
4082 thread_unlock(td);
4083 }
4084
4085 /*
4086 * vm_page_dequeue_deferred: [ internal use only ]
4087 *
4088 * Request removal of the given page from its current page
4089 * queue. Physical removal from the queue may be deferred
4090 * indefinitely.
4091 */
4092 void
vm_page_dequeue_deferred(vm_page_t m)4093 vm_page_dequeue_deferred(vm_page_t m)
4094 {
4095 vm_page_astate_t new, old;
4096
4097 old = vm_page_astate_load(m);
4098 do {
4099 if (old.queue == PQ_NONE) {
4100 KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0,
4101 ("%s: page %p has unexpected queue state",
4102 __func__, m));
4103 break;
4104 }
4105 new = old;
4106 new.flags |= PGA_DEQUEUE;
4107 } while (!vm_page_pqstate_commit_request(m, &old, new));
4108 }
4109
4110 /*
4111 * vm_page_dequeue:
4112 *
4113 * Remove the page from whichever page queue it's in, if any, before
4114 * returning.
4115 */
4116 void
vm_page_dequeue(vm_page_t m)4117 vm_page_dequeue(vm_page_t m)
4118 {
4119 vm_page_astate_t new, old;
4120
4121 old = vm_page_astate_load(m);
4122 do {
4123 if (old.queue == PQ_NONE) {
4124 KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0,
4125 ("%s: page %p has unexpected queue state",
4126 __func__, m));
4127 break;
4128 }
4129 new = old;
4130 new.flags &= ~PGA_QUEUE_OP_MASK;
4131 new.queue = PQ_NONE;
4132 } while (!vm_page_pqstate_commit_dequeue(m, &old, new));
4133
4134 }
4135
4136 /*
4137 * Schedule the given page for insertion into the specified page queue.
4138 * Physical insertion of the page may be deferred indefinitely.
4139 */
4140 static void
vm_page_enqueue(vm_page_t m,uint8_t queue)4141 vm_page_enqueue(vm_page_t m, uint8_t queue)
4142 {
4143
4144 KASSERT(m->a.queue == PQ_NONE &&
4145 (m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
4146 ("%s: page %p is already enqueued", __func__, m));
4147 KASSERT(m->ref_count > 0,
4148 ("%s: page %p does not carry any references", __func__, m));
4149
4150 m->a.queue = queue;
4151 if ((m->a.flags & PGA_REQUEUE) == 0)
4152 vm_page_aflag_set(m, PGA_REQUEUE);
4153 vm_page_pqbatch_submit(m, queue);
4154 }
4155
4156 /*
4157 * vm_page_free_prep:
4158 *
4159 * Prepares the given page to be put on the free list,
4160 * disassociating it from any VM object. The caller may return
4161 * the page to the free list only if this function returns true.
4162 *
4163 * The object, if it exists, must be locked, and then the page must
4164 * be xbusy. Otherwise the page must be not busied. A managed
4165 * page must be unmapped.
4166 */
4167 static bool
vm_page_free_prep(vm_page_t m)4168 vm_page_free_prep(vm_page_t m)
4169 {
4170
4171 /*
4172 * Synchronize with threads that have dropped a reference to this
4173 * page.
4174 */
4175 atomic_thread_fence_acq();
4176
4177 #if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP)
4178 if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) {
4179 uint64_t *p;
4180 int i;
4181 p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
4182 for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++)
4183 KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx",
4184 m, i, (uintmax_t)*p));
4185 }
4186 #endif
4187 KASSERT((m->flags & PG_NOFREE) == 0,
4188 ("%s: attempting to free a PG_NOFREE page", __func__));
4189 if ((m->oflags & VPO_UNMANAGED) == 0) {
4190 KASSERT(!pmap_page_is_mapped(m),
4191 ("vm_page_free_prep: freeing mapped page %p", m));
4192 KASSERT((m->a.flags & (PGA_EXECUTABLE | PGA_WRITEABLE)) == 0,
4193 ("vm_page_free_prep: mapping flags set in page %p", m));
4194 } else {
4195 KASSERT(m->a.queue == PQ_NONE,
4196 ("vm_page_free_prep: unmanaged page %p is queued", m));
4197 }
4198 VM_CNT_INC(v_tfree);
4199
4200 if (m->object != NULL) {
4201 vm_page_radix_remove(m);
4202 vm_page_free_object_prep(m);
4203 } else
4204 vm_page_assert_unbusied(m);
4205
4206 vm_page_busy_free(m);
4207
4208 /*
4209 * If fictitious remove object association and
4210 * return.
4211 */
4212 if ((m->flags & PG_FICTITIOUS) != 0) {
4213 KASSERT(m->ref_count == 1,
4214 ("fictitious page %p is referenced", m));
4215 KASSERT(m->a.queue == PQ_NONE,
4216 ("fictitious page %p is queued", m));
4217 return (false);
4218 }
4219
4220 /*
4221 * Pages need not be dequeued before they are returned to the physical
4222 * memory allocator, but they must at least be marked for a deferred
4223 * dequeue.
4224 */
4225 if ((m->oflags & VPO_UNMANAGED) == 0)
4226 vm_page_dequeue_deferred(m);
4227
4228 m->valid = 0;
4229 vm_page_undirty(m);
4230
4231 if (m->ref_count != 0)
4232 panic("vm_page_free_prep: page %p has references", m);
4233
4234 /*
4235 * Restore the default memory attribute to the page.
4236 */
4237 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
4238 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
4239
4240 #if VM_NRESERVLEVEL > 0
4241 /*
4242 * Determine whether the page belongs to a reservation. If the page was
4243 * allocated from a per-CPU cache, it cannot belong to a reservation, so
4244 * as an optimization, we avoid the check in that case.
4245 */
4246 if ((m->flags & PG_PCPU_CACHE) == 0 && vm_reserv_free_page(m))
4247 return (false);
4248 #endif
4249
4250 return (true);
4251 }
4252
4253 /*
4254 * vm_page_free_toq:
4255 *
4256 * Returns the given page to the free list, disassociating it
4257 * from any VM object.
4258 *
4259 * The object must be locked. The page must be exclusively busied if it
4260 * belongs to an object.
4261 */
4262 static void
vm_page_free_toq(vm_page_t m)4263 vm_page_free_toq(vm_page_t m)
4264 {
4265 struct vm_domain *vmd;
4266 uma_zone_t zone;
4267
4268 if (!vm_page_free_prep(m))
4269 return;
4270
4271 vmd = vm_pagequeue_domain(m);
4272 zone = vmd->vmd_pgcache[m->pool].zone;
4273 if ((m->flags & PG_PCPU_CACHE) != 0 && zone != NULL) {
4274 uma_zfree(zone, m);
4275 return;
4276 }
4277 vm_domain_free_lock(vmd);
4278 vm_phys_free_pages(m, m->pool, 0);
4279 vm_domain_free_unlock(vmd);
4280 vm_domain_freecnt_inc(vmd, 1);
4281 }
4282
4283 /*
4284 * vm_page_free_pages_toq:
4285 *
4286 * Returns a list of pages to the free list, disassociating it
4287 * from any VM object. In other words, this is equivalent to
4288 * calling vm_page_free_toq() for each page of a list of VM objects.
4289 */
4290 int
vm_page_free_pages_toq(struct spglist * free,bool update_wire_count)4291 vm_page_free_pages_toq(struct spglist *free, bool update_wire_count)
4292 {
4293 vm_page_t m;
4294 int count;
4295
4296 if (SLIST_EMPTY(free))
4297 return (0);
4298
4299 count = 0;
4300 while ((m = SLIST_FIRST(free)) != NULL) {
4301 count++;
4302 SLIST_REMOVE_HEAD(free, plinks.s.ss);
4303 vm_page_free_toq(m);
4304 }
4305
4306 if (update_wire_count)
4307 vm_wire_sub(count);
4308 return (count);
4309 }
4310
4311 /*
4312 * Mark this page as wired down. For managed pages, this prevents reclamation
4313 * by the page daemon, or when the containing object, if any, is destroyed.
4314 */
4315 void
vm_page_wire(vm_page_t m)4316 vm_page_wire(vm_page_t m)
4317 {
4318 u_int old;
4319
4320 #ifdef INVARIANTS
4321 if (m->object != NULL && !vm_page_busied(m) &&
4322 !vm_object_busied(m->object))
4323 VM_OBJECT_ASSERT_LOCKED(m->object);
4324 #endif
4325 KASSERT((m->flags & PG_FICTITIOUS) == 0 ||
4326 VPRC_WIRE_COUNT(m->ref_count) >= 1,
4327 ("vm_page_wire: fictitious page %p has zero wirings", m));
4328
4329 old = atomic_fetchadd_int(&m->ref_count, 1);
4330 KASSERT(VPRC_WIRE_COUNT(old) != VPRC_WIRE_COUNT_MAX,
4331 ("vm_page_wire: counter overflow for page %p", m));
4332 if (VPRC_WIRE_COUNT(old) == 0) {
4333 if ((m->oflags & VPO_UNMANAGED) == 0)
4334 vm_page_aflag_set(m, PGA_DEQUEUE);
4335 vm_wire_add(1);
4336 }
4337 }
4338
4339 /*
4340 * Attempt to wire a mapped page following a pmap lookup of that page.
4341 * This may fail if a thread is concurrently tearing down mappings of the page.
4342 * The transient failure is acceptable because it translates to the
4343 * failure of the caller pmap_extract_and_hold(), which should be then
4344 * followed by the vm_fault() fallback, see e.g. vm_fault_quick_hold_pages().
4345 */
4346 bool
vm_page_wire_mapped(vm_page_t m)4347 vm_page_wire_mapped(vm_page_t m)
4348 {
4349 u_int old;
4350
4351 old = atomic_load_int(&m->ref_count);
4352 do {
4353 KASSERT(old > 0,
4354 ("vm_page_wire_mapped: wiring unreferenced page %p", m));
4355 if ((old & VPRC_BLOCKED) != 0)
4356 return (false);
4357 } while (!atomic_fcmpset_int(&m->ref_count, &old, old + 1));
4358
4359 if (VPRC_WIRE_COUNT(old) == 0) {
4360 if ((m->oflags & VPO_UNMANAGED) == 0)
4361 vm_page_aflag_set(m, PGA_DEQUEUE);
4362 vm_wire_add(1);
4363 }
4364 return (true);
4365 }
4366
4367 /*
4368 * Release a wiring reference to a managed page. If the page still belongs to
4369 * an object, update its position in the page queues to reflect the reference.
4370 * If the wiring was the last reference to the page, free the page.
4371 */
4372 static void
vm_page_unwire_managed(vm_page_t m,uint8_t nqueue,bool noreuse)4373 vm_page_unwire_managed(vm_page_t m, uint8_t nqueue, bool noreuse)
4374 {
4375 u_int old;
4376
4377 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4378 ("%s: page %p is unmanaged", __func__, m));
4379
4380 /*
4381 * Update LRU state before releasing the wiring reference.
4382 * Use a release store when updating the reference count to
4383 * synchronize with vm_page_free_prep().
4384 */
4385 old = atomic_load_int(&m->ref_count);
4386 do {
4387 u_int count;
4388
4389 KASSERT(VPRC_WIRE_COUNT(old) > 0,
4390 ("vm_page_unwire: wire count underflow for page %p", m));
4391
4392 count = old & ~VPRC_BLOCKED;
4393 if (count > VPRC_OBJREF + 1) {
4394 /*
4395 * The page has at least one other wiring reference. An
4396 * earlier iteration of this loop may have called
4397 * vm_page_release_toq() and cleared PGA_DEQUEUE, so
4398 * re-set it if necessary.
4399 */
4400 if ((vm_page_astate_load(m).flags & PGA_DEQUEUE) == 0)
4401 vm_page_aflag_set(m, PGA_DEQUEUE);
4402 } else if (count == VPRC_OBJREF + 1) {
4403 /*
4404 * This is the last wiring. Clear PGA_DEQUEUE and
4405 * update the page's queue state to reflect the
4406 * reference. If the page does not belong to an object
4407 * (i.e., the VPRC_OBJREF bit is clear), we only need to
4408 * clear leftover queue state.
4409 */
4410 vm_page_release_toq(m, nqueue, noreuse);
4411 } else if (count == 1) {
4412 vm_page_aflag_clear(m, PGA_DEQUEUE);
4413 }
4414 } while (!atomic_fcmpset_rel_int(&m->ref_count, &old, old - 1));
4415
4416 if (VPRC_WIRE_COUNT(old) == 1) {
4417 vm_wire_sub(1);
4418 if (old == 1)
4419 vm_page_free(m);
4420 }
4421 }
4422
4423 /*
4424 * Release one wiring of the specified page, potentially allowing it to be
4425 * paged out.
4426 *
4427 * Only managed pages belonging to an object can be paged out. If the number
4428 * of wirings transitions to zero and the page is eligible for page out, then
4429 * the page is added to the specified paging queue. If the released wiring
4430 * represented the last reference to the page, the page is freed.
4431 */
4432 void
vm_page_unwire(vm_page_t m,uint8_t nqueue)4433 vm_page_unwire(vm_page_t m, uint8_t nqueue)
4434 {
4435
4436 KASSERT(nqueue < PQ_COUNT,
4437 ("vm_page_unwire: invalid queue %u request for page %p",
4438 nqueue, m));
4439
4440 if ((m->oflags & VPO_UNMANAGED) != 0) {
4441 if (vm_page_unwire_noq(m) && m->ref_count == 0)
4442 vm_page_free(m);
4443 return;
4444 }
4445 vm_page_unwire_managed(m, nqueue, false);
4446 }
4447
4448 /*
4449 * Unwire a page without (re-)inserting it into a page queue. It is up
4450 * to the caller to enqueue, requeue, or free the page as appropriate.
4451 * In most cases involving managed pages, vm_page_unwire() should be used
4452 * instead.
4453 */
4454 bool
vm_page_unwire_noq(vm_page_t m)4455 vm_page_unwire_noq(vm_page_t m)
4456 {
4457 u_int old;
4458
4459 old = vm_page_drop(m, 1);
4460 KASSERT(VPRC_WIRE_COUNT(old) != 0,
4461 ("%s: counter underflow for page %p", __func__, m));
4462 KASSERT((m->flags & PG_FICTITIOUS) == 0 || VPRC_WIRE_COUNT(old) > 1,
4463 ("%s: missing ref on fictitious page %p", __func__, m));
4464
4465 if (VPRC_WIRE_COUNT(old) > 1)
4466 return (false);
4467 if ((m->oflags & VPO_UNMANAGED) == 0)
4468 vm_page_aflag_clear(m, PGA_DEQUEUE);
4469 vm_wire_sub(1);
4470 return (true);
4471 }
4472
4473 /*
4474 * Ensure that the page ends up in the specified page queue. If the page is
4475 * active or being moved to the active queue, ensure that its act_count is
4476 * at least ACT_INIT but do not otherwise mess with it.
4477 */
4478 static __always_inline void
vm_page_mvqueue(vm_page_t m,const uint8_t nqueue,const uint16_t nflag)4479 vm_page_mvqueue(vm_page_t m, const uint8_t nqueue, const uint16_t nflag)
4480 {
4481 vm_page_astate_t old, new;
4482
4483 KASSERT(m->ref_count > 0,
4484 ("%s: page %p does not carry any references", __func__, m));
4485 KASSERT(nflag == PGA_REQUEUE || nflag == PGA_REQUEUE_HEAD,
4486 ("%s: invalid flags %x", __func__, nflag));
4487
4488 if ((m->oflags & VPO_UNMANAGED) != 0 || vm_page_wired(m))
4489 return;
4490
4491 old = vm_page_astate_load(m);
4492 do {
4493 if ((old.flags & PGA_DEQUEUE) != 0)
4494 break;
4495 new = old;
4496 new.flags &= ~PGA_QUEUE_OP_MASK;
4497 if (nqueue == PQ_ACTIVE)
4498 new.act_count = max(old.act_count, ACT_INIT);
4499 if (old.queue == nqueue) {
4500 /*
4501 * There is no need to requeue pages already in the
4502 * active queue.
4503 */
4504 if (nqueue != PQ_ACTIVE ||
4505 (old.flags & PGA_ENQUEUED) == 0)
4506 new.flags |= nflag;
4507 } else {
4508 new.flags |= nflag;
4509 new.queue = nqueue;
4510 }
4511 } while (!vm_page_pqstate_commit(m, &old, new));
4512 }
4513
4514 /*
4515 * Put the specified page on the active list (if appropriate).
4516 */
4517 void
vm_page_activate(vm_page_t m)4518 vm_page_activate(vm_page_t m)
4519 {
4520
4521 vm_page_mvqueue(m, PQ_ACTIVE, PGA_REQUEUE);
4522 }
4523
4524 /*
4525 * Move the specified page to the tail of the inactive queue, or requeue
4526 * the page if it is already in the inactive queue.
4527 */
4528 void
vm_page_deactivate(vm_page_t m)4529 vm_page_deactivate(vm_page_t m)
4530 {
4531
4532 vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE);
4533 }
4534
4535 void
vm_page_deactivate_noreuse(vm_page_t m)4536 vm_page_deactivate_noreuse(vm_page_t m)
4537 {
4538
4539 vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE_HEAD);
4540 }
4541
4542 /*
4543 * Put a page in the laundry, or requeue it if it is already there.
4544 */
4545 void
vm_page_launder(vm_page_t m)4546 vm_page_launder(vm_page_t m)
4547 {
4548
4549 vm_page_mvqueue(m, PQ_LAUNDRY, PGA_REQUEUE);
4550 }
4551
4552 /*
4553 * Put a page in the PQ_UNSWAPPABLE holding queue.
4554 */
4555 void
vm_page_unswappable(vm_page_t m)4556 vm_page_unswappable(vm_page_t m)
4557 {
4558
4559 VM_OBJECT_ASSERT_LOCKED(m->object);
4560 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4561 ("page %p already unswappable", m));
4562
4563 vm_page_dequeue(m);
4564 vm_page_enqueue(m, PQ_UNSWAPPABLE);
4565 }
4566
4567 /*
4568 * Release a page back to the page queues in preparation for unwiring.
4569 */
4570 static void
vm_page_release_toq(vm_page_t m,uint8_t nqueue,const bool noreuse)4571 vm_page_release_toq(vm_page_t m, uint8_t nqueue, const bool noreuse)
4572 {
4573 vm_page_astate_t old, new;
4574 uint16_t nflag;
4575
4576 /*
4577 * Use a check of the valid bits to determine whether we should
4578 * accelerate reclamation of the page. The object lock might not be
4579 * held here, in which case the check is racy. At worst we will either
4580 * accelerate reclamation of a valid page and violate LRU, or
4581 * unnecessarily defer reclamation of an invalid page.
4582 *
4583 * If we were asked to not cache the page, place it near the head of the
4584 * inactive queue so that is reclaimed sooner.
4585 */
4586 if (noreuse || vm_page_none_valid(m)) {
4587 nqueue = PQ_INACTIVE;
4588 nflag = PGA_REQUEUE_HEAD;
4589 } else {
4590 nflag = PGA_REQUEUE;
4591 }
4592
4593 old = vm_page_astate_load(m);
4594 do {
4595 new = old;
4596
4597 /*
4598 * If the page is already in the active queue and we are not
4599 * trying to accelerate reclamation, simply mark it as
4600 * referenced and avoid any queue operations.
4601 */
4602 new.flags &= ~PGA_QUEUE_OP_MASK;
4603 if (nflag != PGA_REQUEUE_HEAD && old.queue == PQ_ACTIVE &&
4604 (old.flags & PGA_ENQUEUED) != 0)
4605 new.flags |= PGA_REFERENCED;
4606 else {
4607 new.flags |= nflag;
4608 new.queue = nqueue;
4609 }
4610 } while (!vm_page_pqstate_commit(m, &old, new));
4611 }
4612
4613 /*
4614 * Unwire a page and either attempt to free it or re-add it to the page queues.
4615 */
4616 void
vm_page_release(vm_page_t m,int flags)4617 vm_page_release(vm_page_t m, int flags)
4618 {
4619 vm_object_t object;
4620
4621 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4622 ("vm_page_release: page %p is unmanaged", m));
4623
4624 if ((flags & VPR_TRYFREE) != 0) {
4625 for (;;) {
4626 object = atomic_load_ptr(&m->object);
4627 if (object == NULL)
4628 break;
4629 /* Depends on type-stability. */
4630 if (vm_page_busied(m) || !VM_OBJECT_TRYWLOCK(object))
4631 break;
4632 if (object == m->object) {
4633 vm_page_release_locked(m, flags);
4634 VM_OBJECT_WUNLOCK(object);
4635 return;
4636 }
4637 VM_OBJECT_WUNLOCK(object);
4638 }
4639 }
4640 vm_page_unwire_managed(m, PQ_INACTIVE, flags != 0);
4641 }
4642
4643 /* See vm_page_release(). */
4644 void
vm_page_release_locked(vm_page_t m,int flags)4645 vm_page_release_locked(vm_page_t m, int flags)
4646 {
4647
4648 VM_OBJECT_ASSERT_WLOCKED(m->object);
4649 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4650 ("vm_page_release_locked: page %p is unmanaged", m));
4651
4652 if (vm_page_unwire_noq(m)) {
4653 if ((flags & VPR_TRYFREE) != 0 &&
4654 (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) &&
4655 m->dirty == 0 && vm_page_tryxbusy(m)) {
4656 /*
4657 * An unlocked lookup may have wired the page before the
4658 * busy lock was acquired, in which case the page must
4659 * not be freed.
4660 */
4661 if (__predict_true(!vm_page_wired(m))) {
4662 vm_page_free(m);
4663 return;
4664 }
4665 vm_page_xunbusy(m);
4666 } else {
4667 vm_page_release_toq(m, PQ_INACTIVE, flags != 0);
4668 }
4669 }
4670 }
4671
4672 static bool
vm_page_try_blocked_op(vm_page_t m,void (* op)(vm_page_t))4673 vm_page_try_blocked_op(vm_page_t m, void (*op)(vm_page_t))
4674 {
4675 u_int old;
4676
4677 KASSERT(m->object != NULL && (m->oflags & VPO_UNMANAGED) == 0,
4678 ("vm_page_try_blocked_op: page %p has no object", m));
4679 KASSERT(vm_page_busied(m),
4680 ("vm_page_try_blocked_op: page %p is not busy", m));
4681 VM_OBJECT_ASSERT_LOCKED(m->object);
4682
4683 old = atomic_load_int(&m->ref_count);
4684 do {
4685 KASSERT(old != 0,
4686 ("vm_page_try_blocked_op: page %p has no references", m));
4687 KASSERT((old & VPRC_BLOCKED) == 0,
4688 ("vm_page_try_blocked_op: page %p blocks wirings", m));
4689 if (VPRC_WIRE_COUNT(old) != 0)
4690 return (false);
4691 } while (!atomic_fcmpset_int(&m->ref_count, &old, old | VPRC_BLOCKED));
4692
4693 (op)(m);
4694
4695 /*
4696 * If the object is read-locked, new wirings may be created via an
4697 * object lookup.
4698 */
4699 old = vm_page_drop(m, VPRC_BLOCKED);
4700 KASSERT(!VM_OBJECT_WOWNED(m->object) ||
4701 old == (VPRC_BLOCKED | VPRC_OBJREF),
4702 ("vm_page_try_blocked_op: unexpected refcount value %u for %p",
4703 old, m));
4704 return (true);
4705 }
4706
4707 /*
4708 * Atomically check for wirings and remove all mappings of the page.
4709 */
4710 bool
vm_page_try_remove_all(vm_page_t m)4711 vm_page_try_remove_all(vm_page_t m)
4712 {
4713
4714 return (vm_page_try_blocked_op(m, pmap_remove_all));
4715 }
4716
4717 /*
4718 * Atomically check for wirings and remove all writeable mappings of the page.
4719 */
4720 bool
vm_page_try_remove_write(vm_page_t m)4721 vm_page_try_remove_write(vm_page_t m)
4722 {
4723
4724 return (vm_page_try_blocked_op(m, pmap_remove_write));
4725 }
4726
4727 /*
4728 * vm_page_advise
4729 *
4730 * Apply the specified advice to the given page.
4731 */
4732 void
vm_page_advise(vm_page_t m,int advice)4733 vm_page_advise(vm_page_t m, int advice)
4734 {
4735
4736 VM_OBJECT_ASSERT_WLOCKED(m->object);
4737 vm_page_assert_xbusied(m);
4738
4739 if (advice == MADV_FREE)
4740 /*
4741 * Mark the page clean. This will allow the page to be freed
4742 * without first paging it out. MADV_FREE pages are often
4743 * quickly reused by malloc(3), so we do not do anything that
4744 * would result in a page fault on a later access.
4745 */
4746 vm_page_undirty(m);
4747 else if (advice != MADV_DONTNEED) {
4748 if (advice == MADV_WILLNEED)
4749 vm_page_activate(m);
4750 return;
4751 }
4752
4753 if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m))
4754 vm_page_dirty(m);
4755
4756 /*
4757 * Clear any references to the page. Otherwise, the page daemon will
4758 * immediately reactivate the page.
4759 */
4760 vm_page_aflag_clear(m, PGA_REFERENCED);
4761
4762 /*
4763 * Place clean pages near the head of the inactive queue rather than
4764 * the tail, thus defeating the queue's LRU operation and ensuring that
4765 * the page will be reused quickly. Dirty pages not already in the
4766 * laundry are moved there.
4767 */
4768 if (m->dirty == 0)
4769 vm_page_deactivate_noreuse(m);
4770 else if (!vm_page_in_laundry(m))
4771 vm_page_launder(m);
4772 }
4773
4774 /*
4775 * vm_page_grab_release
4776 *
4777 * Helper routine for grab functions to release busy on return.
4778 */
4779 static inline void
vm_page_grab_release(vm_page_t m,int allocflags)4780 vm_page_grab_release(vm_page_t m, int allocflags)
4781 {
4782
4783 if ((allocflags & VM_ALLOC_NOBUSY) != 0) {
4784 if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0)
4785 vm_page_sunbusy(m);
4786 else
4787 vm_page_xunbusy(m);
4788 }
4789 }
4790
4791 /*
4792 * vm_page_grab_sleep
4793 *
4794 * Sleep for busy according to VM_ALLOC_ parameters. Returns true
4795 * if the caller should retry and false otherwise.
4796 *
4797 * If the object is locked on entry the object will be unlocked with
4798 * false returns and still locked but possibly having been dropped
4799 * with true returns.
4800 */
4801 static bool
vm_page_grab_sleep(vm_object_t object,vm_page_t m,vm_pindex_t pindex,const char * wmesg,int allocflags,bool locked)4802 vm_page_grab_sleep(vm_object_t object, vm_page_t m, vm_pindex_t pindex,
4803 const char *wmesg, int allocflags, bool locked)
4804 {
4805
4806 if ((allocflags & VM_ALLOC_NOWAIT) != 0)
4807 return (false);
4808
4809 /*
4810 * Reference the page before unlocking and sleeping so that
4811 * the page daemon is less likely to reclaim it.
4812 */
4813 if (locked && (allocflags & VM_ALLOC_NOCREAT) == 0)
4814 vm_page_reference(m);
4815
4816 if (_vm_page_busy_sleep(object, m, pindex, wmesg, allocflags, locked) &&
4817 locked)
4818 VM_OBJECT_WLOCK(object);
4819 if ((allocflags & VM_ALLOC_WAITFAIL) != 0)
4820 return (false);
4821
4822 return (true);
4823 }
4824
4825 /*
4826 * Assert that the grab flags are valid.
4827 */
4828 static inline void
vm_page_grab_check(int allocflags)4829 vm_page_grab_check(int allocflags)
4830 {
4831
4832 KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 ||
4833 (allocflags & VM_ALLOC_WIRED) != 0,
4834 ("vm_page_grab*: the pages must be busied or wired"));
4835
4836 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
4837 (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
4838 ("vm_page_grab*: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
4839 }
4840
4841 /*
4842 * Calculate the page allocation flags for grab.
4843 */
4844 static inline int
vm_page_grab_pflags(int allocflags)4845 vm_page_grab_pflags(int allocflags)
4846 {
4847 int pflags;
4848
4849 pflags = allocflags &
4850 ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL |
4851 VM_ALLOC_NOBUSY | VM_ALLOC_IGN_SBUSY);
4852 if ((allocflags & VM_ALLOC_NOWAIT) == 0)
4853 pflags |= VM_ALLOC_WAITFAIL;
4854 if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0)
4855 pflags |= VM_ALLOC_SBUSY;
4856
4857 return (pflags);
4858 }
4859
4860 /*
4861 * Grab a page, waiting until we are waken up due to the page
4862 * changing state. We keep on waiting, if the page continues
4863 * to be in the object. If the page doesn't exist, first allocate it
4864 * and then conditionally zero it.
4865 *
4866 * This routine may sleep.
4867 *
4868 * The object must be locked on entry. The lock will, however, be released
4869 * and reacquired if the routine sleeps.
4870 */
4871 vm_page_t
vm_page_grab(vm_object_t object,vm_pindex_t pindex,int allocflags)4872 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
4873 {
4874 vm_page_t m;
4875
4876 VM_OBJECT_ASSERT_WLOCKED(object);
4877 vm_page_grab_check(allocflags);
4878
4879 retrylookup:
4880 if ((m = vm_page_lookup(object, pindex)) != NULL) {
4881 if (!vm_page_tryacquire(m, allocflags)) {
4882 if (vm_page_grab_sleep(object, m, pindex, "pgrbwt",
4883 allocflags, true))
4884 goto retrylookup;
4885 return (NULL);
4886 }
4887 goto out;
4888 }
4889 if ((allocflags & VM_ALLOC_NOCREAT) != 0)
4890 return (NULL);
4891 m = vm_page_alloc(object, pindex, vm_page_grab_pflags(allocflags));
4892 if (m == NULL) {
4893 if ((allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0)
4894 return (NULL);
4895 goto retrylookup;
4896 }
4897 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
4898 pmap_zero_page(m);
4899
4900 out:
4901 vm_page_grab_release(m, allocflags);
4902
4903 return (m);
4904 }
4905
4906 /*
4907 * Attempt to validate a page, locklessly acquiring it if necessary, given a
4908 * (object, pindex) tuple and either an invalided page or NULL. The resulting
4909 * page will be validated against the identity tuple, and busied or wired as
4910 * requested. A NULL page returned guarantees that the page was not in radix at
4911 * the time of the call but callers must perform higher level synchronization or
4912 * retry the operation under a lock if they require an atomic answer. This is
4913 * the only lock free validation routine, other routines can depend on the
4914 * resulting page state.
4915 *
4916 * The return value PAGE_NOT_ACQUIRED indicates that the operation failed due to
4917 * caller flags.
4918 */
4919 #define PAGE_NOT_ACQUIRED ((vm_page_t)1)
4920 static vm_page_t
vm_page_acquire_unlocked(vm_object_t object,vm_pindex_t pindex,vm_page_t m,int allocflags)4921 vm_page_acquire_unlocked(vm_object_t object, vm_pindex_t pindex, vm_page_t m,
4922 int allocflags)
4923 {
4924 if (m == NULL)
4925 m = vm_page_lookup_unlocked(object, pindex);
4926 for (; m != NULL; m = vm_page_lookup_unlocked(object, pindex)) {
4927 if (vm_page_trybusy(m, allocflags)) {
4928 if (m->object == object && m->pindex == pindex) {
4929 if ((allocflags & VM_ALLOC_WIRED) != 0)
4930 vm_page_wire(m);
4931 vm_page_grab_release(m, allocflags);
4932 break;
4933 }
4934 /* relookup. */
4935 vm_page_busy_release(m);
4936 cpu_spinwait();
4937 continue;
4938 }
4939 if (!vm_page_grab_sleep(object, m, pindex, "pgnslp",
4940 allocflags, false))
4941 return (PAGE_NOT_ACQUIRED);
4942 }
4943 return (m);
4944 }
4945
4946 /*
4947 * Try to locklessly grab a page and fall back to the object lock if NOCREAT
4948 * is not set.
4949 */
4950 vm_page_t
vm_page_grab_unlocked(vm_object_t object,vm_pindex_t pindex,int allocflags)4951 vm_page_grab_unlocked(vm_object_t object, vm_pindex_t pindex, int allocflags)
4952 {
4953 vm_page_t m;
4954
4955 vm_page_grab_check(allocflags);
4956 m = vm_page_acquire_unlocked(object, pindex, NULL, allocflags);
4957 if (m == PAGE_NOT_ACQUIRED)
4958 return (NULL);
4959 if (m != NULL)
4960 return (m);
4961
4962 /*
4963 * The radix lockless lookup should never return a false negative
4964 * errors. If the user specifies NOCREAT they are guaranteed there
4965 * was no page present at the instant of the call. A NOCREAT caller
4966 * must handle create races gracefully.
4967 */
4968 if ((allocflags & VM_ALLOC_NOCREAT) != 0)
4969 return (NULL);
4970
4971 VM_OBJECT_WLOCK(object);
4972 m = vm_page_grab(object, pindex, allocflags);
4973 VM_OBJECT_WUNLOCK(object);
4974
4975 return (m);
4976 }
4977
4978 /*
4979 * Grab a page and make it valid, paging in if necessary. Pages missing from
4980 * their pager are zero filled and validated. If a VM_ALLOC_COUNT is supplied
4981 * and the page is not valid as many as VM_INITIAL_PAGEIN pages can be brought
4982 * in simultaneously. Additional pages will be left on a paging queue but
4983 * will neither be wired nor busy regardless of allocflags.
4984 */
4985 int
vm_page_grab_valid(vm_page_t * mp,vm_object_t object,vm_pindex_t pindex,int allocflags)4986 vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags)
4987 {
4988 vm_page_t m;
4989 vm_page_t ma[VM_INITIAL_PAGEIN];
4990 int after, i, pflags, rv;
4991
4992 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
4993 (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
4994 ("vm_page_grab_valid: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
4995 KASSERT((allocflags &
4996 (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0,
4997 ("vm_page_grab_valid: Invalid flags 0x%X", allocflags));
4998 VM_OBJECT_ASSERT_WLOCKED(object);
4999 pflags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY |
5000 VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY);
5001 pflags |= VM_ALLOC_WAITFAIL;
5002
5003 retrylookup:
5004 if ((m = vm_page_lookup(object, pindex)) != NULL) {
5005 /*
5006 * If the page is fully valid it can only become invalid
5007 * with the object lock held. If it is not valid it can
5008 * become valid with the busy lock held. Therefore, we
5009 * may unnecessarily lock the exclusive busy here if we
5010 * race with I/O completion not using the object lock.
5011 * However, we will not end up with an invalid page and a
5012 * shared lock.
5013 */
5014 if (!vm_page_trybusy(m,
5015 vm_page_all_valid(m) ? allocflags : 0)) {
5016 (void)vm_page_grab_sleep(object, m, pindex, "pgrbwt",
5017 allocflags, true);
5018 goto retrylookup;
5019 }
5020 if (vm_page_all_valid(m))
5021 goto out;
5022 if ((allocflags & VM_ALLOC_NOCREAT) != 0) {
5023 vm_page_busy_release(m);
5024 *mp = NULL;
5025 return (VM_PAGER_FAIL);
5026 }
5027 } else if ((allocflags & VM_ALLOC_NOCREAT) != 0) {
5028 *mp = NULL;
5029 return (VM_PAGER_FAIL);
5030 } else if ((m = vm_page_alloc(object, pindex, pflags)) == NULL) {
5031 if (!vm_pager_can_alloc_page(object, pindex)) {
5032 *mp = NULL;
5033 return (VM_PAGER_AGAIN);
5034 }
5035 goto retrylookup;
5036 }
5037
5038 vm_page_assert_xbusied(m);
5039 if (vm_pager_has_page(object, pindex, NULL, &after)) {
5040 after = MIN(after, VM_INITIAL_PAGEIN);
5041 after = MIN(after, allocflags >> VM_ALLOC_COUNT_SHIFT);
5042 after = MAX(after, 1);
5043 ma[0] = m;
5044 for (i = 1; i < after; i++) {
5045 if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) {
5046 if (vm_page_any_valid(ma[i]) ||
5047 !vm_page_tryxbusy(ma[i]))
5048 break;
5049 } else {
5050 ma[i] = vm_page_alloc(object, m->pindex + i,
5051 VM_ALLOC_NORMAL);
5052 if (ma[i] == NULL)
5053 break;
5054 }
5055 }
5056 after = i;
5057 vm_object_pip_add(object, after);
5058 VM_OBJECT_WUNLOCK(object);
5059 rv = vm_pager_get_pages(object, ma, after, NULL, NULL);
5060 VM_OBJECT_WLOCK(object);
5061 vm_object_pip_wakeupn(object, after);
5062 /* Pager may have replaced a page. */
5063 m = ma[0];
5064 if (rv != VM_PAGER_OK) {
5065 for (i = 0; i < after; i++) {
5066 if (!vm_page_wired(ma[i]))
5067 vm_page_free(ma[i]);
5068 else
5069 vm_page_xunbusy(ma[i]);
5070 }
5071 *mp = NULL;
5072 return (rv);
5073 }
5074 for (i = 1; i < after; i++)
5075 vm_page_readahead_finish(ma[i]);
5076 MPASS(vm_page_all_valid(m));
5077 } else {
5078 vm_page_zero_invalid(m, TRUE);
5079 }
5080 out:
5081 if ((allocflags & VM_ALLOC_WIRED) != 0)
5082 vm_page_wire(m);
5083 if ((allocflags & VM_ALLOC_SBUSY) != 0 && vm_page_xbusied(m))
5084 vm_page_busy_downgrade(m);
5085 else if ((allocflags & VM_ALLOC_NOBUSY) != 0)
5086 vm_page_busy_release(m);
5087 *mp = m;
5088 return (VM_PAGER_OK);
5089 }
5090
5091 /*
5092 * Locklessly grab a valid page. If the page is not valid or not yet
5093 * allocated this will fall back to the object lock method.
5094 */
5095 int
vm_page_grab_valid_unlocked(vm_page_t * mp,vm_object_t object,vm_pindex_t pindex,int allocflags)5096 vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object,
5097 vm_pindex_t pindex, int allocflags)
5098 {
5099 vm_page_t m;
5100 int flags;
5101 int error;
5102
5103 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
5104 (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
5105 ("vm_page_grab_valid_unlocked: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY "
5106 "mismatch"));
5107 KASSERT((allocflags &
5108 (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0,
5109 ("vm_page_grab_valid_unlocked: Invalid flags 0x%X", allocflags));
5110
5111 /*
5112 * Attempt a lockless lookup and busy. We need at least an sbusy
5113 * before we can inspect the valid field and return a wired page.
5114 */
5115 flags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
5116 vm_page_grab_check(flags);
5117 m = vm_page_acquire_unlocked(object, pindex, NULL, flags);
5118 if (m == PAGE_NOT_ACQUIRED)
5119 return (VM_PAGER_FAIL);
5120 if (m != NULL) {
5121 if (vm_page_all_valid(m)) {
5122 if ((allocflags & VM_ALLOC_WIRED) != 0)
5123 vm_page_wire(m);
5124 vm_page_grab_release(m, allocflags);
5125 *mp = m;
5126 return (VM_PAGER_OK);
5127 }
5128 vm_page_busy_release(m);
5129 }
5130 if ((allocflags & VM_ALLOC_NOCREAT) != 0) {
5131 *mp = NULL;
5132 return (VM_PAGER_FAIL);
5133 }
5134 VM_OBJECT_WLOCK(object);
5135 error = vm_page_grab_valid(mp, object, pindex, allocflags);
5136 VM_OBJECT_WUNLOCK(object);
5137
5138 return (error);
5139 }
5140
5141 /*
5142 * Return the specified range of pages from the given object. For each
5143 * page offset within the range, if a page already exists within the object
5144 * at that offset and it is busy, then wait for it to change state. If,
5145 * instead, the page doesn't exist, then allocate it.
5146 *
5147 * The caller must always specify an allocation class.
5148 *
5149 * allocation classes:
5150 * VM_ALLOC_NORMAL normal process request
5151 * VM_ALLOC_SYSTEM system *really* needs the pages
5152 *
5153 * The caller must always specify that the pages are to be busied and/or
5154 * wired.
5155 *
5156 * optional allocation flags:
5157 * VM_ALLOC_IGN_SBUSY do not sleep on soft busy pages
5158 * VM_ALLOC_NOBUSY do not exclusive busy the page
5159 * VM_ALLOC_NOWAIT do not sleep
5160 * VM_ALLOC_SBUSY set page to sbusy state
5161 * VM_ALLOC_WIRED wire the pages
5162 * VM_ALLOC_ZERO zero and validate any invalid pages
5163 *
5164 * If VM_ALLOC_NOWAIT is not specified, this routine may sleep. Otherwise, it
5165 * may return a partial prefix of the requested range.
5166 */
5167 int
vm_page_grab_pages(vm_object_t object,vm_pindex_t pindex,int allocflags,vm_page_t * ma,int count)5168 vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
5169 vm_page_t *ma, int count)
5170 {
5171 vm_page_t m, mpred;
5172 int pflags;
5173 int i;
5174
5175 VM_OBJECT_ASSERT_WLOCKED(object);
5176 KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0,
5177 ("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed"));
5178 KASSERT(count > 0,
5179 ("vm_page_grab_pages: invalid page count %d", count));
5180 vm_page_grab_check(allocflags);
5181
5182 pflags = vm_page_grab_pflags(allocflags);
5183 i = 0;
5184 retrylookup:
5185 m = vm_page_mpred(object, pindex + i);
5186 if (m == NULL || m->pindex != pindex + i) {
5187 mpred = m;
5188 m = NULL;
5189 } else
5190 mpred = TAILQ_PREV(m, pglist, listq);
5191 for (; i < count; i++) {
5192 if (m != NULL) {
5193 if (!vm_page_tryacquire(m, allocflags)) {
5194 if (vm_page_grab_sleep(object, m, pindex + i,
5195 "grbmaw", allocflags, true))
5196 goto retrylookup;
5197 break;
5198 }
5199 } else {
5200 if ((allocflags & VM_ALLOC_NOCREAT) != 0)
5201 break;
5202 m = vm_page_alloc_after(object, pindex + i,
5203 pflags | VM_ALLOC_COUNT(count - i), mpred);
5204 if (m == NULL) {
5205 if ((allocflags & (VM_ALLOC_NOWAIT |
5206 VM_ALLOC_WAITFAIL)) != 0)
5207 break;
5208 goto retrylookup;
5209 }
5210 }
5211 if (vm_page_none_valid(m) &&
5212 (allocflags & VM_ALLOC_ZERO) != 0) {
5213 if ((m->flags & PG_ZERO) == 0)
5214 pmap_zero_page(m);
5215 vm_page_valid(m);
5216 }
5217 vm_page_grab_release(m, allocflags);
5218 ma[i] = mpred = m;
5219 m = vm_page_next(m);
5220 }
5221 return (i);
5222 }
5223
5224 /*
5225 * Unlocked variant of vm_page_grab_pages(). This accepts the same flags
5226 * and will fall back to the locked variant to handle allocation.
5227 */
5228 int
vm_page_grab_pages_unlocked(vm_object_t object,vm_pindex_t pindex,int allocflags,vm_page_t * ma,int count)5229 vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex,
5230 int allocflags, vm_page_t *ma, int count)
5231 {
5232 vm_page_t m;
5233 int flags;
5234 int i;
5235
5236 KASSERT(count > 0,
5237 ("vm_page_grab_pages_unlocked: invalid page count %d", count));
5238 vm_page_grab_check(allocflags);
5239
5240 /*
5241 * Modify flags for lockless acquire to hold the page until we
5242 * set it valid if necessary.
5243 */
5244 flags = allocflags & ~VM_ALLOC_NOBUSY;
5245 vm_page_grab_check(flags);
5246 m = NULL;
5247 for (i = 0; i < count; i++, pindex++) {
5248 /*
5249 * We may see a false NULL here because the previous page has
5250 * been removed or just inserted and the list is loaded without
5251 * barriers. Switch to radix to verify.
5252 */
5253 if (m == NULL || QMD_IS_TRASHED(m) || m->pindex != pindex ||
5254 atomic_load_ptr(&m->object) != object) {
5255 /*
5256 * This guarantees the result is instantaneously
5257 * correct.
5258 */
5259 m = NULL;
5260 }
5261 m = vm_page_acquire_unlocked(object, pindex, m, flags);
5262 if (m == PAGE_NOT_ACQUIRED)
5263 return (i);
5264 if (m == NULL)
5265 break;
5266 if ((flags & VM_ALLOC_ZERO) != 0 && vm_page_none_valid(m)) {
5267 if ((m->flags & PG_ZERO) == 0)
5268 pmap_zero_page(m);
5269 vm_page_valid(m);
5270 }
5271 /* m will still be wired or busy according to flags. */
5272 vm_page_grab_release(m, allocflags);
5273 ma[i] = m;
5274 m = TAILQ_NEXT(m, listq);
5275 }
5276 if (i == count || (allocflags & VM_ALLOC_NOCREAT) != 0)
5277 return (i);
5278 count -= i;
5279 VM_OBJECT_WLOCK(object);
5280 i += vm_page_grab_pages(object, pindex, allocflags, &ma[i], count);
5281 VM_OBJECT_WUNLOCK(object);
5282
5283 return (i);
5284 }
5285
5286 /*
5287 * Mapping function for valid or dirty bits in a page.
5288 *
5289 * Inputs are required to range within a page.
5290 */
5291 vm_page_bits_t
vm_page_bits(int base,int size)5292 vm_page_bits(int base, int size)
5293 {
5294 int first_bit;
5295 int last_bit;
5296
5297 KASSERT(
5298 base + size <= PAGE_SIZE,
5299 ("vm_page_bits: illegal base/size %d/%d", base, size)
5300 );
5301
5302 if (size == 0) /* handle degenerate case */
5303 return (0);
5304
5305 first_bit = base >> DEV_BSHIFT;
5306 last_bit = (base + size - 1) >> DEV_BSHIFT;
5307
5308 return (((vm_page_bits_t)2 << last_bit) -
5309 ((vm_page_bits_t)1 << first_bit));
5310 }
5311
5312 void
vm_page_bits_set(vm_page_t m,vm_page_bits_t * bits,vm_page_bits_t set)5313 vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set)
5314 {
5315
5316 #if PAGE_SIZE == 32768
5317 atomic_set_64((uint64_t *)bits, set);
5318 #elif PAGE_SIZE == 16384
5319 atomic_set_32((uint32_t *)bits, set);
5320 #elif (PAGE_SIZE == 8192) && defined(atomic_set_16)
5321 atomic_set_16((uint16_t *)bits, set);
5322 #elif (PAGE_SIZE == 4096) && defined(atomic_set_8)
5323 atomic_set_8((uint8_t *)bits, set);
5324 #else /* PAGE_SIZE <= 8192 */
5325 uintptr_t addr;
5326 int shift;
5327
5328 addr = (uintptr_t)bits;
5329 /*
5330 * Use a trick to perform a 32-bit atomic on the
5331 * containing aligned word, to not depend on the existence
5332 * of atomic_{set, clear}_{8, 16}.
5333 */
5334 shift = addr & (sizeof(uint32_t) - 1);
5335 #if BYTE_ORDER == BIG_ENDIAN
5336 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY;
5337 #else
5338 shift *= NBBY;
5339 #endif
5340 addr &= ~(sizeof(uint32_t) - 1);
5341 atomic_set_32((uint32_t *)addr, set << shift);
5342 #endif /* PAGE_SIZE */
5343 }
5344
5345 static inline void
vm_page_bits_clear(vm_page_t m,vm_page_bits_t * bits,vm_page_bits_t clear)5346 vm_page_bits_clear(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t clear)
5347 {
5348
5349 #if PAGE_SIZE == 32768
5350 atomic_clear_64((uint64_t *)bits, clear);
5351 #elif PAGE_SIZE == 16384
5352 atomic_clear_32((uint32_t *)bits, clear);
5353 #elif (PAGE_SIZE == 8192) && defined(atomic_clear_16)
5354 atomic_clear_16((uint16_t *)bits, clear);
5355 #elif (PAGE_SIZE == 4096) && defined(atomic_clear_8)
5356 atomic_clear_8((uint8_t *)bits, clear);
5357 #else /* PAGE_SIZE <= 8192 */
5358 uintptr_t addr;
5359 int shift;
5360
5361 addr = (uintptr_t)bits;
5362 /*
5363 * Use a trick to perform a 32-bit atomic on the
5364 * containing aligned word, to not depend on the existence
5365 * of atomic_{set, clear}_{8, 16}.
5366 */
5367 shift = addr & (sizeof(uint32_t) - 1);
5368 #if BYTE_ORDER == BIG_ENDIAN
5369 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY;
5370 #else
5371 shift *= NBBY;
5372 #endif
5373 addr &= ~(sizeof(uint32_t) - 1);
5374 atomic_clear_32((uint32_t *)addr, clear << shift);
5375 #endif /* PAGE_SIZE */
5376 }
5377
5378 static inline vm_page_bits_t
vm_page_bits_swap(vm_page_t m,vm_page_bits_t * bits,vm_page_bits_t newbits)5379 vm_page_bits_swap(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t newbits)
5380 {
5381 #if PAGE_SIZE == 32768
5382 uint64_t old;
5383
5384 old = *bits;
5385 while (atomic_fcmpset_64(bits, &old, newbits) == 0);
5386 return (old);
5387 #elif PAGE_SIZE == 16384
5388 uint32_t old;
5389
5390 old = *bits;
5391 while (atomic_fcmpset_32(bits, &old, newbits) == 0);
5392 return (old);
5393 #elif (PAGE_SIZE == 8192) && defined(atomic_fcmpset_16)
5394 uint16_t old;
5395
5396 old = *bits;
5397 while (atomic_fcmpset_16(bits, &old, newbits) == 0);
5398 return (old);
5399 #elif (PAGE_SIZE == 4096) && defined(atomic_fcmpset_8)
5400 uint8_t old;
5401
5402 old = *bits;
5403 while (atomic_fcmpset_8(bits, &old, newbits) == 0);
5404 return (old);
5405 #else /* PAGE_SIZE <= 4096*/
5406 uintptr_t addr;
5407 uint32_t old, new, mask;
5408 int shift;
5409
5410 addr = (uintptr_t)bits;
5411 /*
5412 * Use a trick to perform a 32-bit atomic on the
5413 * containing aligned word, to not depend on the existence
5414 * of atomic_{set, swap, clear}_{8, 16}.
5415 */
5416 shift = addr & (sizeof(uint32_t) - 1);
5417 #if BYTE_ORDER == BIG_ENDIAN
5418 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY;
5419 #else
5420 shift *= NBBY;
5421 #endif
5422 addr &= ~(sizeof(uint32_t) - 1);
5423 mask = VM_PAGE_BITS_ALL << shift;
5424
5425 old = *bits;
5426 do {
5427 new = old & ~mask;
5428 new |= newbits << shift;
5429 } while (atomic_fcmpset_32((uint32_t *)addr, &old, new) == 0);
5430 return (old >> shift);
5431 #endif /* PAGE_SIZE */
5432 }
5433
5434 /*
5435 * vm_page_set_valid_range:
5436 *
5437 * Sets portions of a page valid. The arguments are expected
5438 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
5439 * of any partial chunks touched by the range. The invalid portion of
5440 * such chunks will be zeroed.
5441 *
5442 * (base + size) must be less then or equal to PAGE_SIZE.
5443 */
5444 void
vm_page_set_valid_range(vm_page_t m,int base,int size)5445 vm_page_set_valid_range(vm_page_t m, int base, int size)
5446 {
5447 int endoff, frag;
5448 vm_page_bits_t pagebits;
5449
5450 vm_page_assert_busied(m);
5451 if (size == 0) /* handle degenerate case */
5452 return;
5453
5454 /*
5455 * If the base is not DEV_BSIZE aligned and the valid
5456 * bit is clear, we have to zero out a portion of the
5457 * first block.
5458 */
5459 if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
5460 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
5461 pmap_zero_page_area(m, frag, base - frag);
5462
5463 /*
5464 * If the ending offset is not DEV_BSIZE aligned and the
5465 * valid bit is clear, we have to zero out a portion of
5466 * the last block.
5467 */
5468 endoff = base + size;
5469 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
5470 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
5471 pmap_zero_page_area(m, endoff,
5472 DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
5473
5474 /*
5475 * Assert that no previously invalid block that is now being validated
5476 * is already dirty.
5477 */
5478 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0,
5479 ("vm_page_set_valid_range: page %p is dirty", m));
5480
5481 /*
5482 * Set valid bits inclusive of any overlap.
5483 */
5484 pagebits = vm_page_bits(base, size);
5485 if (vm_page_xbusied(m))
5486 m->valid |= pagebits;
5487 else
5488 vm_page_bits_set(m, &m->valid, pagebits);
5489 }
5490
5491 /*
5492 * Set the page dirty bits and free the invalid swap space if
5493 * present. Returns the previous dirty bits.
5494 */
5495 vm_page_bits_t
vm_page_set_dirty(vm_page_t m)5496 vm_page_set_dirty(vm_page_t m)
5497 {
5498 vm_page_bits_t old;
5499
5500 VM_PAGE_OBJECT_BUSY_ASSERT(m);
5501
5502 if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) {
5503 old = m->dirty;
5504 m->dirty = VM_PAGE_BITS_ALL;
5505 } else
5506 old = vm_page_bits_swap(m, &m->dirty, VM_PAGE_BITS_ALL);
5507 if (old == 0 && (m->a.flags & PGA_SWAP_SPACE) != 0)
5508 vm_pager_page_unswapped(m);
5509
5510 return (old);
5511 }
5512
5513 /*
5514 * Clear the given bits from the specified page's dirty field.
5515 */
5516 static __inline void
vm_page_clear_dirty_mask(vm_page_t m,vm_page_bits_t pagebits)5517 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
5518 {
5519
5520 vm_page_assert_busied(m);
5521
5522 /*
5523 * If the page is xbusied and not write mapped we are the
5524 * only thread that can modify dirty bits. Otherwise, The pmap
5525 * layer can call vm_page_dirty() without holding a distinguished
5526 * lock. The combination of page busy and atomic operations
5527 * suffice to guarantee consistency of the page dirty field.
5528 */
5529 if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m))
5530 m->dirty &= ~pagebits;
5531 else
5532 vm_page_bits_clear(m, &m->dirty, pagebits);
5533 }
5534
5535 /*
5536 * vm_page_set_validclean:
5537 *
5538 * Sets portions of a page valid and clean. The arguments are expected
5539 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
5540 * of any partial chunks touched by the range. The invalid portion of
5541 * such chunks will be zero'd.
5542 *
5543 * (base + size) must be less then or equal to PAGE_SIZE.
5544 */
5545 void
vm_page_set_validclean(vm_page_t m,int base,int size)5546 vm_page_set_validclean(vm_page_t m, int base, int size)
5547 {
5548 vm_page_bits_t oldvalid, pagebits;
5549 int endoff, frag;
5550
5551 vm_page_assert_busied(m);
5552 if (size == 0) /* handle degenerate case */
5553 return;
5554
5555 /*
5556 * If the base is not DEV_BSIZE aligned and the valid
5557 * bit is clear, we have to zero out a portion of the
5558 * first block.
5559 */
5560 if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
5561 (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
5562 pmap_zero_page_area(m, frag, base - frag);
5563
5564 /*
5565 * If the ending offset is not DEV_BSIZE aligned and the
5566 * valid bit is clear, we have to zero out a portion of
5567 * the last block.
5568 */
5569 endoff = base + size;
5570 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
5571 (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0)
5572 pmap_zero_page_area(m, endoff,
5573 DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
5574
5575 /*
5576 * Set valid, clear dirty bits. If validating the entire
5577 * page we can safely clear the pmap modify bit. We also
5578 * use this opportunity to clear the PGA_NOSYNC flag. If a process
5579 * takes a write fault on a MAP_NOSYNC memory area the flag will
5580 * be set again.
5581 *
5582 * We set valid bits inclusive of any overlap, but we can only
5583 * clear dirty bits for DEV_BSIZE chunks that are fully within
5584 * the range.
5585 */
5586 oldvalid = m->valid;
5587 pagebits = vm_page_bits(base, size);
5588 if (vm_page_xbusied(m))
5589 m->valid |= pagebits;
5590 else
5591 vm_page_bits_set(m, &m->valid, pagebits);
5592 #if 0 /* NOT YET */
5593 if ((frag = base & (DEV_BSIZE - 1)) != 0) {
5594 frag = DEV_BSIZE - frag;
5595 base += frag;
5596 size -= frag;
5597 if (size < 0)
5598 size = 0;
5599 }
5600 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
5601 #endif
5602 if (base == 0 && size == PAGE_SIZE) {
5603 /*
5604 * The page can only be modified within the pmap if it is
5605 * mapped, and it can only be mapped if it was previously
5606 * fully valid.
5607 */
5608 if (oldvalid == VM_PAGE_BITS_ALL)
5609 /*
5610 * Perform the pmap_clear_modify() first. Otherwise,
5611 * a concurrent pmap operation, such as
5612 * pmap_protect(), could clear a modification in the
5613 * pmap and set the dirty field on the page before
5614 * pmap_clear_modify() had begun and after the dirty
5615 * field was cleared here.
5616 */
5617 pmap_clear_modify(m);
5618 m->dirty = 0;
5619 vm_page_aflag_clear(m, PGA_NOSYNC);
5620 } else if (oldvalid != VM_PAGE_BITS_ALL && vm_page_xbusied(m))
5621 m->dirty &= ~pagebits;
5622 else
5623 vm_page_clear_dirty_mask(m, pagebits);
5624 }
5625
5626 void
vm_page_clear_dirty(vm_page_t m,int base,int size)5627 vm_page_clear_dirty(vm_page_t m, int base, int size)
5628 {
5629
5630 vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
5631 }
5632
5633 /*
5634 * vm_page_set_invalid:
5635 *
5636 * Invalidates DEV_BSIZE'd chunks within a page. Both the
5637 * valid and dirty bits for the effected areas are cleared.
5638 */
5639 void
vm_page_set_invalid(vm_page_t m,int base,int size)5640 vm_page_set_invalid(vm_page_t m, int base, int size)
5641 {
5642 vm_page_bits_t bits;
5643 vm_object_t object;
5644
5645 /*
5646 * The object lock is required so that pages can't be mapped
5647 * read-only while we're in the process of invalidating them.
5648 */
5649 object = m->object;
5650 VM_OBJECT_ASSERT_WLOCKED(object);
5651 vm_page_assert_busied(m);
5652
5653 if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) +
5654 size >= object->un_pager.vnp.vnp_size)
5655 bits = VM_PAGE_BITS_ALL;
5656 else
5657 bits = vm_page_bits(base, size);
5658 if (object->ref_count != 0 && vm_page_all_valid(m) && bits != 0)
5659 pmap_remove_all(m);
5660 KASSERT((bits == 0 && vm_page_all_valid(m)) ||
5661 !pmap_page_is_mapped(m),
5662 ("vm_page_set_invalid: page %p is mapped", m));
5663 if (vm_page_xbusied(m)) {
5664 m->valid &= ~bits;
5665 m->dirty &= ~bits;
5666 } else {
5667 vm_page_bits_clear(m, &m->valid, bits);
5668 vm_page_bits_clear(m, &m->dirty, bits);
5669 }
5670 }
5671
5672 /*
5673 * vm_page_invalid:
5674 *
5675 * Invalidates the entire page. The page must be busy, unmapped, and
5676 * the enclosing object must be locked. The object locks protects
5677 * against concurrent read-only pmap enter which is done without
5678 * busy.
5679 */
5680 void
vm_page_invalid(vm_page_t m)5681 vm_page_invalid(vm_page_t m)
5682 {
5683
5684 vm_page_assert_busied(m);
5685 VM_OBJECT_ASSERT_WLOCKED(m->object);
5686 MPASS(!pmap_page_is_mapped(m));
5687
5688 if (vm_page_xbusied(m))
5689 m->valid = 0;
5690 else
5691 vm_page_bits_clear(m, &m->valid, VM_PAGE_BITS_ALL);
5692 }
5693
5694 /*
5695 * vm_page_zero_invalid()
5696 *
5697 * The kernel assumes that the invalid portions of a page contain
5698 * garbage, but such pages can be mapped into memory by user code.
5699 * When this occurs, we must zero out the non-valid portions of the
5700 * page so user code sees what it expects.
5701 *
5702 * Pages are most often semi-valid when the end of a file is mapped
5703 * into memory and the file's size is not page aligned.
5704 */
5705 void
vm_page_zero_invalid(vm_page_t m,boolean_t setvalid)5706 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
5707 {
5708 int b;
5709 int i;
5710
5711 /*
5712 * Scan the valid bits looking for invalid sections that
5713 * must be zeroed. Invalid sub-DEV_BSIZE'd areas ( where the
5714 * valid bit may be set ) have already been zeroed by
5715 * vm_page_set_validclean().
5716 */
5717 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
5718 if (i == (PAGE_SIZE / DEV_BSIZE) ||
5719 (m->valid & ((vm_page_bits_t)1 << i))) {
5720 if (i > b) {
5721 pmap_zero_page_area(m,
5722 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
5723 }
5724 b = i + 1;
5725 }
5726 }
5727
5728 /*
5729 * setvalid is TRUE when we can safely set the zero'd areas
5730 * as being valid. We can do this if there are no cache consistency
5731 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS.
5732 */
5733 if (setvalid)
5734 vm_page_valid(m);
5735 }
5736
5737 /*
5738 * vm_page_is_valid:
5739 *
5740 * Is (partial) page valid? Note that the case where size == 0
5741 * will return FALSE in the degenerate case where the page is
5742 * entirely invalid, and TRUE otherwise.
5743 *
5744 * Some callers envoke this routine without the busy lock held and
5745 * handle races via higher level locks. Typical callers should
5746 * hold a busy lock to prevent invalidation.
5747 */
5748 int
vm_page_is_valid(vm_page_t m,int base,int size)5749 vm_page_is_valid(vm_page_t m, int base, int size)
5750 {
5751 vm_page_bits_t bits;
5752
5753 bits = vm_page_bits(base, size);
5754 return (vm_page_any_valid(m) && (m->valid & bits) == bits);
5755 }
5756
5757 /*
5758 * Returns true if all of the specified predicates are true for the entire
5759 * (super)page and false otherwise.
5760 */
5761 bool
vm_page_ps_test(vm_page_t m,int psind,int flags,vm_page_t skip_m)5762 vm_page_ps_test(vm_page_t m, int psind, int flags, vm_page_t skip_m)
5763 {
5764 vm_object_t object;
5765 int i, npages;
5766
5767 object = m->object;
5768 if (skip_m != NULL && skip_m->object != object)
5769 return (false);
5770 VM_OBJECT_ASSERT_LOCKED(object);
5771 KASSERT(psind <= m->psind,
5772 ("psind %d > psind %d of m %p", psind, m->psind, m));
5773 npages = atop(pagesizes[psind]);
5774
5775 /*
5776 * The physically contiguous pages that make up a superpage, i.e., a
5777 * page with a page size index ("psind") greater than zero, will
5778 * occupy adjacent entries in vm_page_array[].
5779 */
5780 for (i = 0; i < npages; i++) {
5781 /* Always test object consistency, including "skip_m". */
5782 if (m[i].object != object)
5783 return (false);
5784 if (&m[i] == skip_m)
5785 continue;
5786 if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i]))
5787 return (false);
5788 if ((flags & PS_ALL_DIRTY) != 0) {
5789 /*
5790 * Calling vm_page_test_dirty() or pmap_is_modified()
5791 * might stop this case from spuriously returning
5792 * "false". However, that would require a write lock
5793 * on the object containing "m[i]".
5794 */
5795 if (m[i].dirty != VM_PAGE_BITS_ALL)
5796 return (false);
5797 }
5798 if ((flags & PS_ALL_VALID) != 0 &&
5799 m[i].valid != VM_PAGE_BITS_ALL)
5800 return (false);
5801 }
5802 return (true);
5803 }
5804
5805 /*
5806 * Set the page's dirty bits if the page is modified.
5807 */
5808 void
vm_page_test_dirty(vm_page_t m)5809 vm_page_test_dirty(vm_page_t m)
5810 {
5811
5812 vm_page_assert_busied(m);
5813 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
5814 vm_page_dirty(m);
5815 }
5816
5817 void
vm_page_valid(vm_page_t m)5818 vm_page_valid(vm_page_t m)
5819 {
5820
5821 vm_page_assert_busied(m);
5822 if (vm_page_xbusied(m))
5823 m->valid = VM_PAGE_BITS_ALL;
5824 else
5825 vm_page_bits_set(m, &m->valid, VM_PAGE_BITS_ALL);
5826 }
5827
5828 void
vm_page_lock_KBI(vm_page_t m,const char * file,int line)5829 vm_page_lock_KBI(vm_page_t m, const char *file, int line)
5830 {
5831
5832 mtx_lock_flags_(vm_page_lockptr(m), 0, file, line);
5833 }
5834
5835 void
vm_page_unlock_KBI(vm_page_t m,const char * file,int line)5836 vm_page_unlock_KBI(vm_page_t m, const char *file, int line)
5837 {
5838
5839 mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line);
5840 }
5841
5842 int
vm_page_trylock_KBI(vm_page_t m,const char * file,int line)5843 vm_page_trylock_KBI(vm_page_t m, const char *file, int line)
5844 {
5845
5846 return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line));
5847 }
5848
5849 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
5850 void
vm_page_assert_locked_KBI(vm_page_t m,const char * file,int line)5851 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line)
5852 {
5853
5854 vm_page_lock_assert_KBI(m, MA_OWNED, file, line);
5855 }
5856
5857 void
vm_page_lock_assert_KBI(vm_page_t m,int a,const char * file,int line)5858 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line)
5859 {
5860
5861 mtx_assert_(vm_page_lockptr(m), a, file, line);
5862 }
5863 #endif
5864
5865 #ifdef INVARIANTS
5866 void
vm_page_object_busy_assert(vm_page_t m)5867 vm_page_object_busy_assert(vm_page_t m)
5868 {
5869
5870 /*
5871 * Certain of the page's fields may only be modified by the
5872 * holder of a page or object busy.
5873 */
5874 if (m->object != NULL && !vm_page_busied(m))
5875 VM_OBJECT_ASSERT_BUSY(m->object);
5876 }
5877
5878 void
vm_page_assert_pga_writeable(vm_page_t m,uint16_t bits)5879 vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits)
5880 {
5881
5882 if ((bits & PGA_WRITEABLE) == 0)
5883 return;
5884
5885 /*
5886 * The PGA_WRITEABLE flag can only be set if the page is
5887 * managed, is exclusively busied or the object is locked.
5888 * Currently, this flag is only set by pmap_enter().
5889 */
5890 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5891 ("PGA_WRITEABLE on unmanaged page"));
5892 if (!vm_page_xbusied(m))
5893 VM_OBJECT_ASSERT_BUSY(m->object);
5894 }
5895 #endif
5896
5897 #include "opt_ddb.h"
5898 #ifdef DDB
5899 #include <sys/kernel.h>
5900
5901 #include <ddb/ddb.h>
5902
DB_SHOW_COMMAND_FLAGS(page,vm_page_print_page_info,DB_CMD_MEMSAFE)5903 DB_SHOW_COMMAND_FLAGS(page, vm_page_print_page_info, DB_CMD_MEMSAFE)
5904 {
5905
5906 db_printf("vm_cnt.v_free_count: %d\n", vm_free_count());
5907 db_printf("vm_cnt.v_inactive_count: %d\n", vm_inactive_count());
5908 db_printf("vm_cnt.v_active_count: %d\n", vm_active_count());
5909 db_printf("vm_cnt.v_laundry_count: %d\n", vm_laundry_count());
5910 db_printf("vm_cnt.v_wire_count: %d\n", vm_wire_count());
5911 db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved);
5912 db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min);
5913 db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target);
5914 db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target);
5915 }
5916
DB_SHOW_COMMAND_FLAGS(pageq,vm_page_print_pageq_info,DB_CMD_MEMSAFE)5917 DB_SHOW_COMMAND_FLAGS(pageq, vm_page_print_pageq_info, DB_CMD_MEMSAFE)
5918 {
5919 int dom;
5920
5921 db_printf("pq_free %d\n", vm_free_count());
5922 for (dom = 0; dom < vm_ndomains; dom++) {
5923 db_printf(
5924 "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d pq_unsw %d\n",
5925 dom,
5926 vm_dom[dom].vmd_page_count,
5927 vm_dom[dom].vmd_free_count,
5928 vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt,
5929 vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt,
5930 vm_dom[dom].vmd_pagequeues[PQ_LAUNDRY].pq_cnt,
5931 vm_dom[dom].vmd_pagequeues[PQ_UNSWAPPABLE].pq_cnt);
5932 }
5933 }
5934
DB_SHOW_COMMAND(pginfo,vm_page_print_pginfo)5935 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo)
5936 {
5937 vm_page_t m;
5938 boolean_t phys, virt;
5939
5940 if (!have_addr) {
5941 db_printf("show pginfo addr\n");
5942 return;
5943 }
5944
5945 phys = strchr(modif, 'p') != NULL;
5946 virt = strchr(modif, 'v') != NULL;
5947 if (virt)
5948 m = PHYS_TO_VM_PAGE(pmap_kextract(addr));
5949 else if (phys)
5950 m = PHYS_TO_VM_PAGE(addr);
5951 else
5952 m = (vm_page_t)addr;
5953 db_printf(
5954 "page %p obj %p pidx 0x%jx phys 0x%jx q %d ref 0x%x\n"
5955 " af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n",
5956 m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr,
5957 m->a.queue, m->ref_count, m->a.flags, m->oflags,
5958 m->flags, m->a.act_count, m->busy_lock, m->valid, m->dirty);
5959 }
5960 #endif /* DDB */
5961