xref: /freebsd/sys/vm/vm_page.c (revision 840aca288042eaf625a23908e807abdfde0bc21d)
1 /*-
2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1998 Matthew Dillon.  All Rights Reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * The Mach Operating System project at Carnegie-Mellon University.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
36  */
37 
38 /*-
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  */
64 
65 /*
66  *	Resident memory management module.
67  */
68 
69 #include <sys/cdefs.h>
70 __FBSDID("$FreeBSD$");
71 
72 #include "opt_vm.h"
73 
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/lock.h>
77 #include <sys/domainset.h>
78 #include <sys/kernel.h>
79 #include <sys/limits.h>
80 #include <sys/linker.h>
81 #include <sys/malloc.h>
82 #include <sys/mman.h>
83 #include <sys/msgbuf.h>
84 #include <sys/mutex.h>
85 #include <sys/proc.h>
86 #include <sys/rwlock.h>
87 #include <sys/sbuf.h>
88 #include <sys/sched.h>
89 #include <sys/smp.h>
90 #include <sys/sysctl.h>
91 #include <sys/vmmeter.h>
92 #include <sys/vnode.h>
93 
94 #include <vm/vm.h>
95 #include <vm/pmap.h>
96 #include <vm/vm_param.h>
97 #include <vm/vm_domainset.h>
98 #include <vm/vm_kern.h>
99 #include <vm/vm_map.h>
100 #include <vm/vm_object.h>
101 #include <vm/vm_page.h>
102 #include <vm/vm_pageout.h>
103 #include <vm/vm_phys.h>
104 #include <vm/vm_pagequeue.h>
105 #include <vm/vm_pager.h>
106 #include <vm/vm_radix.h>
107 #include <vm/vm_reserv.h>
108 #include <vm/vm_extern.h>
109 #include <vm/uma.h>
110 #include <vm/uma_int.h>
111 
112 #include <machine/md_var.h>
113 
114 extern int	uma_startup_count(int);
115 extern void	uma_startup(void *, int);
116 extern int	vmem_startup_count(void);
117 
118 struct vm_domain vm_dom[MAXMEMDOM];
119 
120 DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT]);
121 
122 struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT];
123 
124 struct mtx_padalign __exclusive_cache_line vm_domainset_lock;
125 /* The following fields are protected by the domainset lock. */
126 domainset_t __exclusive_cache_line vm_min_domains;
127 domainset_t __exclusive_cache_line vm_severe_domains;
128 static int vm_min_waiters;
129 static int vm_severe_waiters;
130 static int vm_pageproc_waiters;
131 
132 /*
133  * bogus page -- for I/O to/from partially complete buffers,
134  * or for paging into sparsely invalid regions.
135  */
136 vm_page_t bogus_page;
137 
138 #ifdef PMAP_HAS_PAGE_ARRAY
139 vm_page_t vm_page_array = (vm_page_t)PA_MIN_ADDRESS;
140 #else
141 vm_page_t vm_page_array;
142 #endif
143 long vm_page_array_size;
144 long first_page;
145 
146 static int boot_pages;
147 SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
148     &boot_pages, 0,
149     "number of pages allocated for bootstrapping the VM system");
150 
151 static int pa_tryrelock_restart;
152 SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD,
153     &pa_tryrelock_restart, 0, "Number of tryrelock restarts");
154 
155 static TAILQ_HEAD(, vm_page) blacklist_head;
156 static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS);
157 SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD |
158     CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages");
159 
160 static uma_zone_t fakepg_zone;
161 
162 static void vm_page_alloc_check(vm_page_t m);
163 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
164 static void vm_page_dequeue_complete(vm_page_t m);
165 static void vm_page_enqueue(vm_page_t m, uint8_t queue);
166 static void vm_page_init(void *dummy);
167 static int vm_page_insert_after(vm_page_t m, vm_object_t object,
168     vm_pindex_t pindex, vm_page_t mpred);
169 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object,
170     vm_page_t mpred);
171 static int vm_page_reclaim_run(int req_class, int domain, u_long npages,
172     vm_page_t m_run, vm_paddr_t high);
173 static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object,
174     int req);
175 static int vm_page_zone_import(void *arg, void **store, int cnt, int domain,
176     int flags);
177 static void vm_page_zone_release(void *arg, void **store, int cnt);
178 
179 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL);
180 
181 static void
182 vm_page_init(void *dummy)
183 {
184 
185 	fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,
186 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM);
187 	bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
188 	    VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
189 }
190 
191 /*
192  * The cache page zone is initialized later since we need to be able to allocate
193  * pages before UMA is fully initialized.
194  */
195 static void
196 vm_page_init_cache_zones(void *dummy __unused)
197 {
198 	struct vm_domain *vmd;
199 	struct vm_pgcache *pgcache;
200 	int domain, pool;
201 
202 	for (domain = 0; domain < vm_ndomains; domain++) {
203 		vmd = VM_DOMAIN(domain);
204 
205 		/*
206 		 * Don't allow the page caches to take up more than .25% of
207 		 * memory.
208 		 */
209 		if (vmd->vmd_page_count / 400 < 256 * mp_ncpus * VM_NFREEPOOL)
210 			continue;
211 		for (pool = 0; pool < VM_NFREEPOOL; pool++) {
212 			pgcache = &vmd->vmd_pgcache[pool];
213 			pgcache->domain = domain;
214 			pgcache->pool = pool;
215 			pgcache->zone = uma_zcache_create("vm pgcache",
216 			    sizeof(struct vm_page), NULL, NULL, NULL, NULL,
217 			    vm_page_zone_import, vm_page_zone_release, pgcache,
218 			    UMA_ZONE_MAXBUCKET | UMA_ZONE_VM);
219 			(void)uma_zone_set_maxcache(pgcache->zone, 0);
220 		}
221 	}
222 }
223 SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL);
224 
225 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
226 #if PAGE_SIZE == 32768
227 #ifdef CTASSERT
228 CTASSERT(sizeof(u_long) >= 8);
229 #endif
230 #endif
231 
232 /*
233  * Try to acquire a physical address lock while a pmap is locked.  If we
234  * fail to trylock we unlock and lock the pmap directly and cache the
235  * locked pa in *locked.  The caller should then restart their loop in case
236  * the virtual to physical mapping has changed.
237  */
238 int
239 vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked)
240 {
241 	vm_paddr_t lockpa;
242 
243 	lockpa = *locked;
244 	*locked = pa;
245 	if (lockpa) {
246 		PA_LOCK_ASSERT(lockpa, MA_OWNED);
247 		if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa))
248 			return (0);
249 		PA_UNLOCK(lockpa);
250 	}
251 	if (PA_TRYLOCK(pa))
252 		return (0);
253 	PMAP_UNLOCK(pmap);
254 	atomic_add_int(&pa_tryrelock_restart, 1);
255 	PA_LOCK(pa);
256 	PMAP_LOCK(pmap);
257 	return (EAGAIN);
258 }
259 
260 /*
261  *	vm_set_page_size:
262  *
263  *	Sets the page size, perhaps based upon the memory
264  *	size.  Must be called before any use of page-size
265  *	dependent functions.
266  */
267 void
268 vm_set_page_size(void)
269 {
270 	if (vm_cnt.v_page_size == 0)
271 		vm_cnt.v_page_size = PAGE_SIZE;
272 	if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0)
273 		panic("vm_set_page_size: page size not a power of two");
274 }
275 
276 /*
277  *	vm_page_blacklist_next:
278  *
279  *	Find the next entry in the provided string of blacklist
280  *	addresses.  Entries are separated by space, comma, or newline.
281  *	If an invalid integer is encountered then the rest of the
282  *	string is skipped.  Updates the list pointer to the next
283  *	character, or NULL if the string is exhausted or invalid.
284  */
285 static vm_paddr_t
286 vm_page_blacklist_next(char **list, char *end)
287 {
288 	vm_paddr_t bad;
289 	char *cp, *pos;
290 
291 	if (list == NULL || *list == NULL)
292 		return (0);
293 	if (**list =='\0') {
294 		*list = NULL;
295 		return (0);
296 	}
297 
298 	/*
299 	 * If there's no end pointer then the buffer is coming from
300 	 * the kenv and we know it's null-terminated.
301 	 */
302 	if (end == NULL)
303 		end = *list + strlen(*list);
304 
305 	/* Ensure that strtoq() won't walk off the end */
306 	if (*end != '\0') {
307 		if (*end == '\n' || *end == ' ' || *end  == ',')
308 			*end = '\0';
309 		else {
310 			printf("Blacklist not terminated, skipping\n");
311 			*list = NULL;
312 			return (0);
313 		}
314 	}
315 
316 	for (pos = *list; *pos != '\0'; pos = cp) {
317 		bad = strtoq(pos, &cp, 0);
318 		if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') {
319 			if (bad == 0) {
320 				if (++cp < end)
321 					continue;
322 				else
323 					break;
324 			}
325 		} else
326 			break;
327 		if (*cp == '\0' || ++cp >= end)
328 			*list = NULL;
329 		else
330 			*list = cp;
331 		return (trunc_page(bad));
332 	}
333 	printf("Garbage in RAM blacklist, skipping\n");
334 	*list = NULL;
335 	return (0);
336 }
337 
338 bool
339 vm_page_blacklist_add(vm_paddr_t pa, bool verbose)
340 {
341 	struct vm_domain *vmd;
342 	vm_page_t m;
343 	int ret;
344 
345 	m = vm_phys_paddr_to_vm_page(pa);
346 	if (m == NULL)
347 		return (true); /* page does not exist, no failure */
348 
349 	vmd = vm_pagequeue_domain(m);
350 	vm_domain_free_lock(vmd);
351 	ret = vm_phys_unfree_page(m);
352 	vm_domain_free_unlock(vmd);
353 	if (ret != 0) {
354 		vm_domain_freecnt_inc(vmd, -1);
355 		TAILQ_INSERT_TAIL(&blacklist_head, m, listq);
356 		if (verbose)
357 			printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa);
358 	}
359 	return (ret);
360 }
361 
362 /*
363  *	vm_page_blacklist_check:
364  *
365  *	Iterate through the provided string of blacklist addresses, pulling
366  *	each entry out of the physical allocator free list and putting it
367  *	onto a list for reporting via the vm.page_blacklist sysctl.
368  */
369 static void
370 vm_page_blacklist_check(char *list, char *end)
371 {
372 	vm_paddr_t pa;
373 	char *next;
374 
375 	next = list;
376 	while (next != NULL) {
377 		if ((pa = vm_page_blacklist_next(&next, end)) == 0)
378 			continue;
379 		vm_page_blacklist_add(pa, bootverbose);
380 	}
381 }
382 
383 /*
384  *	vm_page_blacklist_load:
385  *
386  *	Search for a special module named "ram_blacklist".  It'll be a
387  *	plain text file provided by the user via the loader directive
388  *	of the same name.
389  */
390 static void
391 vm_page_blacklist_load(char **list, char **end)
392 {
393 	void *mod;
394 	u_char *ptr;
395 	u_int len;
396 
397 	mod = NULL;
398 	ptr = NULL;
399 
400 	mod = preload_search_by_type("ram_blacklist");
401 	if (mod != NULL) {
402 		ptr = preload_fetch_addr(mod);
403 		len = preload_fetch_size(mod);
404         }
405 	*list = ptr;
406 	if (ptr != NULL)
407 		*end = ptr + len;
408 	else
409 		*end = NULL;
410 	return;
411 }
412 
413 static int
414 sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS)
415 {
416 	vm_page_t m;
417 	struct sbuf sbuf;
418 	int error, first;
419 
420 	first = 1;
421 	error = sysctl_wire_old_buffer(req, 0);
422 	if (error != 0)
423 		return (error);
424 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
425 	TAILQ_FOREACH(m, &blacklist_head, listq) {
426 		sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",",
427 		    (uintmax_t)m->phys_addr);
428 		first = 0;
429 	}
430 	error = sbuf_finish(&sbuf);
431 	sbuf_delete(&sbuf);
432 	return (error);
433 }
434 
435 /*
436  * Initialize a dummy page for use in scans of the specified paging queue.
437  * In principle, this function only needs to set the flag PG_MARKER.
438  * Nonetheless, it write busies the page as a safety precaution.
439  */
440 static void
441 vm_page_init_marker(vm_page_t marker, int queue, uint8_t aflags)
442 {
443 
444 	bzero(marker, sizeof(*marker));
445 	marker->flags = PG_MARKER;
446 	marker->aflags = aflags;
447 	marker->busy_lock = VPB_SINGLE_EXCLUSIVER;
448 	marker->queue = queue;
449 }
450 
451 static void
452 vm_page_domain_init(int domain)
453 {
454 	struct vm_domain *vmd;
455 	struct vm_pagequeue *pq;
456 	int i;
457 
458 	vmd = VM_DOMAIN(domain);
459 	bzero(vmd, sizeof(*vmd));
460 	*__DECONST(char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) =
461 	    "vm inactive pagequeue";
462 	*__DECONST(char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) =
463 	    "vm active pagequeue";
464 	*__DECONST(char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) =
465 	    "vm laundry pagequeue";
466 	*__DECONST(char **, &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) =
467 	    "vm unswappable pagequeue";
468 	vmd->vmd_domain = domain;
469 	vmd->vmd_page_count = 0;
470 	vmd->vmd_free_count = 0;
471 	vmd->vmd_segs = 0;
472 	vmd->vmd_oom = FALSE;
473 	for (i = 0; i < PQ_COUNT; i++) {
474 		pq = &vmd->vmd_pagequeues[i];
475 		TAILQ_INIT(&pq->pq_pl);
476 		mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue",
477 		    MTX_DEF | MTX_DUPOK);
478 		pq->pq_pdpages = 0;
479 		vm_page_init_marker(&vmd->vmd_markers[i], i, 0);
480 	}
481 	mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF);
482 	mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF);
483 	snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain);
484 
485 	/*
486 	 * inacthead is used to provide FIFO ordering for LRU-bypassing
487 	 * insertions.
488 	 */
489 	vm_page_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE, PGA_ENQUEUED);
490 	TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl,
491 	    &vmd->vmd_inacthead, plinks.q);
492 
493 	/*
494 	 * The clock pages are used to implement active queue scanning without
495 	 * requeues.  Scans start at clock[0], which is advanced after the scan
496 	 * ends.  When the two clock hands meet, they are reset and scanning
497 	 * resumes from the head of the queue.
498 	 */
499 	vm_page_init_marker(&vmd->vmd_clock[0], PQ_ACTIVE, PGA_ENQUEUED);
500 	vm_page_init_marker(&vmd->vmd_clock[1], PQ_ACTIVE, PGA_ENQUEUED);
501 	TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl,
502 	    &vmd->vmd_clock[0], plinks.q);
503 	TAILQ_INSERT_TAIL(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl,
504 	    &vmd->vmd_clock[1], plinks.q);
505 }
506 
507 /*
508  * Initialize a physical page in preparation for adding it to the free
509  * lists.
510  */
511 static void
512 vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind)
513 {
514 
515 	m->object = NULL;
516 	m->wire_count = 0;
517 	m->busy_lock = VPB_UNBUSIED;
518 	m->flags = m->aflags = 0;
519 	m->phys_addr = pa;
520 	m->queue = PQ_NONE;
521 	m->psind = 0;
522 	m->segind = segind;
523 	m->order = VM_NFREEORDER;
524 	m->pool = VM_FREEPOOL_DEFAULT;
525 	m->valid = m->dirty = 0;
526 	pmap_page_init(m);
527 }
528 
529 #ifndef PMAP_HAS_PAGE_ARRAY
530 static vm_paddr_t
531 vm_page_array_alloc(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t page_range)
532 {
533 	vm_paddr_t new_end;
534 
535 	/*
536 	 * Reserve an unmapped guard page to trap access to vm_page_array[-1].
537 	 * However, because this page is allocated from KVM, out-of-bounds
538 	 * accesses using the direct map will not be trapped.
539 	 */
540 	*vaddr += PAGE_SIZE;
541 
542 	/*
543 	 * Allocate physical memory for the page structures, and map it.
544 	 */
545 	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
546 	vm_page_array = (vm_page_t)pmap_map(vaddr, new_end, end,
547 	    VM_PROT_READ | VM_PROT_WRITE);
548 	vm_page_array_size = page_range;
549 
550 	return (new_end);
551 }
552 #endif
553 
554 /*
555  *	vm_page_startup:
556  *
557  *	Initializes the resident memory module.  Allocates physical memory for
558  *	bootstrapping UMA and some data structures that are used to manage
559  *	physical pages.  Initializes these structures, and populates the free
560  *	page queues.
561  */
562 vm_offset_t
563 vm_page_startup(vm_offset_t vaddr)
564 {
565 	struct vm_phys_seg *seg;
566 	vm_page_t m;
567 	char *list, *listend;
568 	vm_offset_t mapped;
569 	vm_paddr_t end, high_avail, low_avail, new_end, page_range, size;
570 	vm_paddr_t last_pa, pa;
571 	u_long pagecount;
572 	int biggestone, i, segind;
573 #ifdef WITNESS
574 	int witness_size;
575 #endif
576 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
577 	long ii;
578 #endif
579 
580 	vaddr = round_page(vaddr);
581 
582 	vm_phys_early_startup();
583 	biggestone = vm_phys_avail_largest();
584 	end = phys_avail[biggestone+1];
585 
586 	/*
587 	 * Initialize the page and queue locks.
588 	 */
589 	mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF);
590 	for (i = 0; i < PA_LOCK_COUNT; i++)
591 		mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
592 	for (i = 0; i < vm_ndomains; i++)
593 		vm_page_domain_init(i);
594 
595 	/*
596 	 * Allocate memory for use when boot strapping the kernel memory
597 	 * allocator.  Tell UMA how many zones we are going to create
598 	 * before going fully functional.  UMA will add its zones.
599 	 *
600 	 * VM startup zones: vmem, vmem_btag, VM OBJECT, RADIX NODE, MAP,
601 	 * KMAP ENTRY, MAP ENTRY, VMSPACE.
602 	 */
603 	boot_pages = uma_startup_count(8);
604 
605 #ifndef UMA_MD_SMALL_ALLOC
606 	/* vmem_startup() calls uma_prealloc(). */
607 	boot_pages += vmem_startup_count();
608 	/* vm_map_startup() calls uma_prealloc(). */
609 	boot_pages += howmany(MAX_KMAP,
610 	    UMA_SLAB_SPACE / sizeof(struct vm_map));
611 
612 	/*
613 	 * Before going fully functional kmem_init() does allocation
614 	 * from "KMAP ENTRY" and vmem_create() does allocation from "vmem".
615 	 */
616 	boot_pages += 2;
617 #endif
618 	/*
619 	 * CTFLAG_RDTUN doesn't work during the early boot process, so we must
620 	 * manually fetch the value.
621 	 */
622 	TUNABLE_INT_FETCH("vm.boot_pages", &boot_pages);
623 	new_end = end - (boot_pages * UMA_SLAB_SIZE);
624 	new_end = trunc_page(new_end);
625 	mapped = pmap_map(&vaddr, new_end, end,
626 	    VM_PROT_READ | VM_PROT_WRITE);
627 	bzero((void *)mapped, end - new_end);
628 	uma_startup((void *)mapped, boot_pages);
629 
630 #ifdef WITNESS
631 	witness_size = round_page(witness_startup_count());
632 	new_end -= witness_size;
633 	mapped = pmap_map(&vaddr, new_end, new_end + witness_size,
634 	    VM_PROT_READ | VM_PROT_WRITE);
635 	bzero((void *)mapped, witness_size);
636 	witness_startup((void *)mapped);
637 #endif
638 
639 #if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \
640     defined(__i386__) || defined(__mips__) || defined(__riscv)
641 	/*
642 	 * Allocate a bitmap to indicate that a random physical page
643 	 * needs to be included in a minidump.
644 	 *
645 	 * The amd64 port needs this to indicate which direct map pages
646 	 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
647 	 *
648 	 * However, i386 still needs this workspace internally within the
649 	 * minidump code.  In theory, they are not needed on i386, but are
650 	 * included should the sf_buf code decide to use them.
651 	 */
652 	last_pa = 0;
653 	for (i = 0; dump_avail[i + 1] != 0; i += 2)
654 		if (dump_avail[i + 1] > last_pa)
655 			last_pa = dump_avail[i + 1];
656 	page_range = last_pa / PAGE_SIZE;
657 	vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
658 	new_end -= vm_page_dump_size;
659 	vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
660 	    new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
661 	bzero((void *)vm_page_dump, vm_page_dump_size);
662 #else
663 	(void)last_pa;
664 #endif
665 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \
666     defined(__riscv)
667 	/*
668 	 * Include the UMA bootstrap pages, witness pages and vm_page_dump
669 	 * in a crash dump.  When pmap_map() uses the direct map, they are
670 	 * not automatically included.
671 	 */
672 	for (pa = new_end; pa < end; pa += PAGE_SIZE)
673 		dump_add_page(pa);
674 #endif
675 	phys_avail[biggestone + 1] = new_end;
676 #ifdef __amd64__
677 	/*
678 	 * Request that the physical pages underlying the message buffer be
679 	 * included in a crash dump.  Since the message buffer is accessed
680 	 * through the direct map, they are not automatically included.
681 	 */
682 	pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr);
683 	last_pa = pa + round_page(msgbufsize);
684 	while (pa < last_pa) {
685 		dump_add_page(pa);
686 		pa += PAGE_SIZE;
687 	}
688 #endif
689 	/*
690 	 * Compute the number of pages of memory that will be available for
691 	 * use, taking into account the overhead of a page structure per page.
692 	 * In other words, solve
693 	 *	"available physical memory" - round_page(page_range *
694 	 *	    sizeof(struct vm_page)) = page_range * PAGE_SIZE
695 	 * for page_range.
696 	 */
697 	low_avail = phys_avail[0];
698 	high_avail = phys_avail[1];
699 	for (i = 0; i < vm_phys_nsegs; i++) {
700 		if (vm_phys_segs[i].start < low_avail)
701 			low_avail = vm_phys_segs[i].start;
702 		if (vm_phys_segs[i].end > high_avail)
703 			high_avail = vm_phys_segs[i].end;
704 	}
705 	/* Skip the first chunk.  It is already accounted for. */
706 	for (i = 2; phys_avail[i + 1] != 0; i += 2) {
707 		if (phys_avail[i] < low_avail)
708 			low_avail = phys_avail[i];
709 		if (phys_avail[i + 1] > high_avail)
710 			high_avail = phys_avail[i + 1];
711 	}
712 	first_page = low_avail / PAGE_SIZE;
713 #ifdef VM_PHYSSEG_SPARSE
714 	size = 0;
715 	for (i = 0; i < vm_phys_nsegs; i++)
716 		size += vm_phys_segs[i].end - vm_phys_segs[i].start;
717 	for (i = 0; phys_avail[i + 1] != 0; i += 2)
718 		size += phys_avail[i + 1] - phys_avail[i];
719 #elif defined(VM_PHYSSEG_DENSE)
720 	size = high_avail - low_avail;
721 #else
722 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
723 #endif
724 
725 #ifdef PMAP_HAS_PAGE_ARRAY
726 	pmap_page_array_startup(size / PAGE_SIZE);
727 	biggestone = vm_phys_avail_largest();
728 	end = new_end = phys_avail[biggestone + 1];
729 #else
730 #ifdef VM_PHYSSEG_DENSE
731 	/*
732 	 * In the VM_PHYSSEG_DENSE case, the number of pages can account for
733 	 * the overhead of a page structure per page only if vm_page_array is
734 	 * allocated from the last physical memory chunk.  Otherwise, we must
735 	 * allocate page structures representing the physical memory
736 	 * underlying vm_page_array, even though they will not be used.
737 	 */
738 	if (new_end != high_avail)
739 		page_range = size / PAGE_SIZE;
740 	else
741 #endif
742 	{
743 		page_range = size / (PAGE_SIZE + sizeof(struct vm_page));
744 
745 		/*
746 		 * If the partial bytes remaining are large enough for
747 		 * a page (PAGE_SIZE) without a corresponding
748 		 * 'struct vm_page', then new_end will contain an
749 		 * extra page after subtracting the length of the VM
750 		 * page array.  Compensate by subtracting an extra
751 		 * page from new_end.
752 		 */
753 		if (size % (PAGE_SIZE + sizeof(struct vm_page)) >= PAGE_SIZE) {
754 			if (new_end == high_avail)
755 				high_avail -= PAGE_SIZE;
756 			new_end -= PAGE_SIZE;
757 		}
758 	}
759 	end = new_end;
760 	new_end = vm_page_array_alloc(&vaddr, end, page_range);
761 #endif
762 
763 #if VM_NRESERVLEVEL > 0
764 	/*
765 	 * Allocate physical memory for the reservation management system's
766 	 * data structures, and map it.
767 	 */
768 	new_end = vm_reserv_startup(&vaddr, new_end);
769 #endif
770 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \
771     defined(__riscv)
772 	/*
773 	 * Include vm_page_array and vm_reserv_array in a crash dump.
774 	 */
775 	for (pa = new_end; pa < end; pa += PAGE_SIZE)
776 		dump_add_page(pa);
777 #endif
778 	phys_avail[biggestone + 1] = new_end;
779 
780 	/*
781 	 * Add physical memory segments corresponding to the available
782 	 * physical pages.
783 	 */
784 	for (i = 0; phys_avail[i + 1] != 0; i += 2)
785 		if (vm_phys_avail_size(i) != 0)
786 			vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]);
787 
788 	/*
789 	 * Initialize the physical memory allocator.
790 	 */
791 	vm_phys_init();
792 
793 	/*
794 	 * Initialize the page structures and add every available page to the
795 	 * physical memory allocator's free lists.
796 	 */
797 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
798 	for (ii = 0; ii < vm_page_array_size; ii++) {
799 		m = &vm_page_array[ii];
800 		vm_page_init_page(m, (first_page + ii) << PAGE_SHIFT, 0);
801 		m->flags = PG_FICTITIOUS;
802 	}
803 #endif
804 	vm_cnt.v_page_count = 0;
805 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
806 		seg = &vm_phys_segs[segind];
807 		for (m = seg->first_page, pa = seg->start; pa < seg->end;
808 		    m++, pa += PAGE_SIZE)
809 			vm_page_init_page(m, pa, segind);
810 
811 		/*
812 		 * Add the segment to the free lists only if it is covered by
813 		 * one of the ranges in phys_avail.  Because we've added the
814 		 * ranges to the vm_phys_segs array, we can assume that each
815 		 * segment is either entirely contained in one of the ranges,
816 		 * or doesn't overlap any of them.
817 		 */
818 		for (i = 0; phys_avail[i + 1] != 0; i += 2) {
819 			struct vm_domain *vmd;
820 
821 			if (seg->start < phys_avail[i] ||
822 			    seg->end > phys_avail[i + 1])
823 				continue;
824 
825 			m = seg->first_page;
826 			pagecount = (u_long)atop(seg->end - seg->start);
827 
828 			vmd = VM_DOMAIN(seg->domain);
829 			vm_domain_free_lock(vmd);
830 			vm_phys_enqueue_contig(m, pagecount);
831 			vm_domain_free_unlock(vmd);
832 			vm_domain_freecnt_inc(vmd, pagecount);
833 			vm_cnt.v_page_count += (u_int)pagecount;
834 
835 			vmd = VM_DOMAIN(seg->domain);
836 			vmd->vmd_page_count += (u_int)pagecount;
837 			vmd->vmd_segs |= 1UL << m->segind;
838 			break;
839 		}
840 	}
841 
842 	/*
843 	 * Remove blacklisted pages from the physical memory allocator.
844 	 */
845 	TAILQ_INIT(&blacklist_head);
846 	vm_page_blacklist_load(&list, &listend);
847 	vm_page_blacklist_check(list, listend);
848 
849 	list = kern_getenv("vm.blacklist");
850 	vm_page_blacklist_check(list, NULL);
851 
852 	freeenv(list);
853 #if VM_NRESERVLEVEL > 0
854 	/*
855 	 * Initialize the reservation management system.
856 	 */
857 	vm_reserv_init();
858 #endif
859 
860 	return (vaddr);
861 }
862 
863 void
864 vm_page_reference(vm_page_t m)
865 {
866 
867 	vm_page_aflag_set(m, PGA_REFERENCED);
868 }
869 
870 /*
871  *	vm_page_busy_downgrade:
872  *
873  *	Downgrade an exclusive busy page into a single shared busy page.
874  */
875 void
876 vm_page_busy_downgrade(vm_page_t m)
877 {
878 	u_int x;
879 	bool locked;
880 
881 	vm_page_assert_xbusied(m);
882 	locked = mtx_owned(vm_page_lockptr(m));
883 
884 	for (;;) {
885 		x = m->busy_lock;
886 		x &= VPB_BIT_WAITERS;
887 		if (x != 0 && !locked)
888 			vm_page_lock(m);
889 		if (atomic_cmpset_rel_int(&m->busy_lock,
890 		    VPB_SINGLE_EXCLUSIVER | x, VPB_SHARERS_WORD(1)))
891 			break;
892 		if (x != 0 && !locked)
893 			vm_page_unlock(m);
894 	}
895 	if (x != 0) {
896 		wakeup(m);
897 		if (!locked)
898 			vm_page_unlock(m);
899 	}
900 }
901 
902 /*
903  *	vm_page_sbusied:
904  *
905  *	Return a positive value if the page is shared busied, 0 otherwise.
906  */
907 int
908 vm_page_sbusied(vm_page_t m)
909 {
910 	u_int x;
911 
912 	x = m->busy_lock;
913 	return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED);
914 }
915 
916 /*
917  *	vm_page_sunbusy:
918  *
919  *	Shared unbusy a page.
920  */
921 void
922 vm_page_sunbusy(vm_page_t m)
923 {
924 	u_int x;
925 
926 	vm_page_lock_assert(m, MA_NOTOWNED);
927 	vm_page_assert_sbusied(m);
928 
929 	for (;;) {
930 		x = m->busy_lock;
931 		if (VPB_SHARERS(x) > 1) {
932 			if (atomic_cmpset_int(&m->busy_lock, x,
933 			    x - VPB_ONE_SHARER))
934 				break;
935 			continue;
936 		}
937 		if ((x & VPB_BIT_WAITERS) == 0) {
938 			KASSERT(x == VPB_SHARERS_WORD(1),
939 			    ("vm_page_sunbusy: invalid lock state"));
940 			if (atomic_cmpset_int(&m->busy_lock,
941 			    VPB_SHARERS_WORD(1), VPB_UNBUSIED))
942 				break;
943 			continue;
944 		}
945 		KASSERT(x == (VPB_SHARERS_WORD(1) | VPB_BIT_WAITERS),
946 		    ("vm_page_sunbusy: invalid lock state for waiters"));
947 
948 		vm_page_lock(m);
949 		if (!atomic_cmpset_int(&m->busy_lock, x, VPB_UNBUSIED)) {
950 			vm_page_unlock(m);
951 			continue;
952 		}
953 		wakeup(m);
954 		vm_page_unlock(m);
955 		break;
956 	}
957 }
958 
959 /*
960  *	vm_page_busy_sleep:
961  *
962  *	Sleep and release the page lock, using the page pointer as wchan.
963  *	This is used to implement the hard-path of busying mechanism.
964  *
965  *	The given page must be locked.
966  *
967  *	If nonshared is true, sleep only if the page is xbusy.
968  */
969 void
970 vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared)
971 {
972 	u_int x;
973 
974 	vm_page_assert_locked(m);
975 
976 	x = m->busy_lock;
977 	if (x == VPB_UNBUSIED || (nonshared && (x & VPB_BIT_SHARED) != 0) ||
978 	    ((x & VPB_BIT_WAITERS) == 0 &&
979 	    !atomic_cmpset_int(&m->busy_lock, x, x | VPB_BIT_WAITERS))) {
980 		vm_page_unlock(m);
981 		return;
982 	}
983 	msleep(m, vm_page_lockptr(m), PVM | PDROP, wmesg, 0);
984 }
985 
986 /*
987  *	vm_page_trysbusy:
988  *
989  *	Try to shared busy a page.
990  *	If the operation succeeds 1 is returned otherwise 0.
991  *	The operation never sleeps.
992  */
993 int
994 vm_page_trysbusy(vm_page_t m)
995 {
996 	u_int x;
997 
998 	for (;;) {
999 		x = m->busy_lock;
1000 		if ((x & VPB_BIT_SHARED) == 0)
1001 			return (0);
1002 		if (atomic_cmpset_acq_int(&m->busy_lock, x, x + VPB_ONE_SHARER))
1003 			return (1);
1004 	}
1005 }
1006 
1007 static void
1008 vm_page_xunbusy_locked(vm_page_t m)
1009 {
1010 
1011 	vm_page_assert_xbusied(m);
1012 	vm_page_assert_locked(m);
1013 
1014 	atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
1015 	/* There is a waiter, do wakeup() instead of vm_page_flash(). */
1016 	wakeup(m);
1017 }
1018 
1019 void
1020 vm_page_xunbusy_maybelocked(vm_page_t m)
1021 {
1022 	bool lockacq;
1023 
1024 	vm_page_assert_xbusied(m);
1025 
1026 	/*
1027 	 * Fast path for unbusy.  If it succeeds, we know that there
1028 	 * are no waiters, so we do not need a wakeup.
1029 	 */
1030 	if (atomic_cmpset_rel_int(&m->busy_lock, VPB_SINGLE_EXCLUSIVER,
1031 	    VPB_UNBUSIED))
1032 		return;
1033 
1034 	lockacq = !mtx_owned(vm_page_lockptr(m));
1035 	if (lockacq)
1036 		vm_page_lock(m);
1037 	vm_page_xunbusy_locked(m);
1038 	if (lockacq)
1039 		vm_page_unlock(m);
1040 }
1041 
1042 /*
1043  *	vm_page_xunbusy_hard:
1044  *
1045  *	Called after the first try the exclusive unbusy of a page failed.
1046  *	It is assumed that the waiters bit is on.
1047  */
1048 void
1049 vm_page_xunbusy_hard(vm_page_t m)
1050 {
1051 
1052 	vm_page_assert_xbusied(m);
1053 
1054 	vm_page_lock(m);
1055 	vm_page_xunbusy_locked(m);
1056 	vm_page_unlock(m);
1057 }
1058 
1059 /*
1060  *	vm_page_flash:
1061  *
1062  *	Wakeup anyone waiting for the page.
1063  *	The ownership bits do not change.
1064  *
1065  *	The given page must be locked.
1066  */
1067 void
1068 vm_page_flash(vm_page_t m)
1069 {
1070 	u_int x;
1071 
1072 	vm_page_lock_assert(m, MA_OWNED);
1073 
1074 	for (;;) {
1075 		x = m->busy_lock;
1076 		if ((x & VPB_BIT_WAITERS) == 0)
1077 			return;
1078 		if (atomic_cmpset_int(&m->busy_lock, x,
1079 		    x & (~VPB_BIT_WAITERS)))
1080 			break;
1081 	}
1082 	wakeup(m);
1083 }
1084 
1085 /*
1086  * Avoid releasing and reacquiring the same page lock.
1087  */
1088 void
1089 vm_page_change_lock(vm_page_t m, struct mtx **mtx)
1090 {
1091 	struct mtx *mtx1;
1092 
1093 	mtx1 = vm_page_lockptr(m);
1094 	if (*mtx == mtx1)
1095 		return;
1096 	if (*mtx != NULL)
1097 		mtx_unlock(*mtx);
1098 	*mtx = mtx1;
1099 	mtx_lock(mtx1);
1100 }
1101 
1102 /*
1103  *	vm_page_unhold_pages:
1104  *
1105  *	Unhold each of the pages that is referenced by the given array.
1106  */
1107 void
1108 vm_page_unhold_pages(vm_page_t *ma, int count)
1109 {
1110 	struct mtx *mtx;
1111 
1112 	mtx = NULL;
1113 	for (; count != 0; count--) {
1114 		vm_page_change_lock(*ma, &mtx);
1115 		if (vm_page_unwire(*ma, PQ_ACTIVE) && (*ma)->object == NULL)
1116 			vm_page_free(*ma);
1117 		ma++;
1118 	}
1119 	if (mtx != NULL)
1120 		mtx_unlock(mtx);
1121 }
1122 
1123 vm_page_t
1124 PHYS_TO_VM_PAGE(vm_paddr_t pa)
1125 {
1126 	vm_page_t m;
1127 
1128 #ifdef VM_PHYSSEG_SPARSE
1129 	m = vm_phys_paddr_to_vm_page(pa);
1130 	if (m == NULL)
1131 		m = vm_phys_fictitious_to_vm_page(pa);
1132 	return (m);
1133 #elif defined(VM_PHYSSEG_DENSE)
1134 	long pi;
1135 
1136 	pi = atop(pa);
1137 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
1138 		m = &vm_page_array[pi - first_page];
1139 		return (m);
1140 	}
1141 	return (vm_phys_fictitious_to_vm_page(pa));
1142 #else
1143 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
1144 #endif
1145 }
1146 
1147 /*
1148  *	vm_page_getfake:
1149  *
1150  *	Create a fictitious page with the specified physical address and
1151  *	memory attribute.  The memory attribute is the only the machine-
1152  *	dependent aspect of a fictitious page that must be initialized.
1153  */
1154 vm_page_t
1155 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
1156 {
1157 	vm_page_t m;
1158 
1159 	m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
1160 	vm_page_initfake(m, paddr, memattr);
1161 	return (m);
1162 }
1163 
1164 void
1165 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
1166 {
1167 
1168 	if ((m->flags & PG_FICTITIOUS) != 0) {
1169 		/*
1170 		 * The page's memattr might have changed since the
1171 		 * previous initialization.  Update the pmap to the
1172 		 * new memattr.
1173 		 */
1174 		goto memattr;
1175 	}
1176 	m->phys_addr = paddr;
1177 	m->queue = PQ_NONE;
1178 	/* Fictitious pages don't use "segind". */
1179 	m->flags = PG_FICTITIOUS;
1180 	/* Fictitious pages don't use "order" or "pool". */
1181 	m->oflags = VPO_UNMANAGED;
1182 	m->busy_lock = VPB_SINGLE_EXCLUSIVER;
1183 	m->wire_count = 1;
1184 	pmap_page_init(m);
1185 memattr:
1186 	pmap_page_set_memattr(m, memattr);
1187 }
1188 
1189 /*
1190  *	vm_page_putfake:
1191  *
1192  *	Release a fictitious page.
1193  */
1194 void
1195 vm_page_putfake(vm_page_t m)
1196 {
1197 
1198 	KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m));
1199 	KASSERT((m->flags & PG_FICTITIOUS) != 0,
1200 	    ("vm_page_putfake: bad page %p", m));
1201 	uma_zfree(fakepg_zone, m);
1202 }
1203 
1204 /*
1205  *	vm_page_updatefake:
1206  *
1207  *	Update the given fictitious page to the specified physical address and
1208  *	memory attribute.
1209  */
1210 void
1211 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
1212 {
1213 
1214 	KASSERT((m->flags & PG_FICTITIOUS) != 0,
1215 	    ("vm_page_updatefake: bad page %p", m));
1216 	m->phys_addr = paddr;
1217 	pmap_page_set_memattr(m, memattr);
1218 }
1219 
1220 /*
1221  *	vm_page_free:
1222  *
1223  *	Free a page.
1224  */
1225 void
1226 vm_page_free(vm_page_t m)
1227 {
1228 
1229 	m->flags &= ~PG_ZERO;
1230 	vm_page_free_toq(m);
1231 }
1232 
1233 /*
1234  *	vm_page_free_zero:
1235  *
1236  *	Free a page to the zerod-pages queue
1237  */
1238 void
1239 vm_page_free_zero(vm_page_t m)
1240 {
1241 
1242 	m->flags |= PG_ZERO;
1243 	vm_page_free_toq(m);
1244 }
1245 
1246 /*
1247  * Unbusy and handle the page queueing for a page from a getpages request that
1248  * was optionally read ahead or behind.
1249  */
1250 void
1251 vm_page_readahead_finish(vm_page_t m)
1252 {
1253 
1254 	/* We shouldn't put invalid pages on queues. */
1255 	KASSERT(m->valid != 0, ("%s: %p is invalid", __func__, m));
1256 
1257 	/*
1258 	 * Since the page is not the actually needed one, whether it should
1259 	 * be activated or deactivated is not obvious.  Empirical results
1260 	 * have shown that deactivating the page is usually the best choice,
1261 	 * unless the page is wanted by another thread.
1262 	 */
1263 	vm_page_lock(m);
1264 	if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
1265 		vm_page_activate(m);
1266 	else
1267 		vm_page_deactivate(m);
1268 	vm_page_unlock(m);
1269 	vm_page_xunbusy(m);
1270 }
1271 
1272 /*
1273  *	vm_page_sleep_if_busy:
1274  *
1275  *	Sleep and release the page queues lock if the page is busied.
1276  *	Returns TRUE if the thread slept.
1277  *
1278  *	The given page must be unlocked and object containing it must
1279  *	be locked.
1280  */
1281 int
1282 vm_page_sleep_if_busy(vm_page_t m, const char *msg)
1283 {
1284 	vm_object_t obj;
1285 
1286 	vm_page_lock_assert(m, MA_NOTOWNED);
1287 	VM_OBJECT_ASSERT_WLOCKED(m->object);
1288 
1289 	if (vm_page_busied(m)) {
1290 		/*
1291 		 * The page-specific object must be cached because page
1292 		 * identity can change during the sleep, causing the
1293 		 * re-lock of a different object.
1294 		 * It is assumed that a reference to the object is already
1295 		 * held by the callers.
1296 		 */
1297 		obj = m->object;
1298 		vm_page_lock(m);
1299 		VM_OBJECT_WUNLOCK(obj);
1300 		vm_page_busy_sleep(m, msg, false);
1301 		VM_OBJECT_WLOCK(obj);
1302 		return (TRUE);
1303 	}
1304 	return (FALSE);
1305 }
1306 
1307 /*
1308  *	vm_page_dirty_KBI:		[ internal use only ]
1309  *
1310  *	Set all bits in the page's dirty field.
1311  *
1312  *	The object containing the specified page must be locked if the
1313  *	call is made from the machine-independent layer.
1314  *
1315  *	See vm_page_clear_dirty_mask().
1316  *
1317  *	This function should only be called by vm_page_dirty().
1318  */
1319 void
1320 vm_page_dirty_KBI(vm_page_t m)
1321 {
1322 
1323 	/* Refer to this operation by its public name. */
1324 	KASSERT(m->valid == VM_PAGE_BITS_ALL,
1325 	    ("vm_page_dirty: page is invalid!"));
1326 	m->dirty = VM_PAGE_BITS_ALL;
1327 }
1328 
1329 /*
1330  *	vm_page_insert:		[ internal use only ]
1331  *
1332  *	Inserts the given mem entry into the object and object list.
1333  *
1334  *	The object must be locked.
1335  */
1336 int
1337 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
1338 {
1339 	vm_page_t mpred;
1340 
1341 	VM_OBJECT_ASSERT_WLOCKED(object);
1342 	mpred = vm_radix_lookup_le(&object->rtree, pindex);
1343 	return (vm_page_insert_after(m, object, pindex, mpred));
1344 }
1345 
1346 /*
1347  *	vm_page_insert_after:
1348  *
1349  *	Inserts the page "m" into the specified object at offset "pindex".
1350  *
1351  *	The page "mpred" must immediately precede the offset "pindex" within
1352  *	the specified object.
1353  *
1354  *	The object must be locked.
1355  */
1356 static int
1357 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
1358     vm_page_t mpred)
1359 {
1360 	vm_page_t msucc;
1361 
1362 	VM_OBJECT_ASSERT_WLOCKED(object);
1363 	KASSERT(m->object == NULL,
1364 	    ("vm_page_insert_after: page already inserted"));
1365 	if (mpred != NULL) {
1366 		KASSERT(mpred->object == object,
1367 		    ("vm_page_insert_after: object doesn't contain mpred"));
1368 		KASSERT(mpred->pindex < pindex,
1369 		    ("vm_page_insert_after: mpred doesn't precede pindex"));
1370 		msucc = TAILQ_NEXT(mpred, listq);
1371 	} else
1372 		msucc = TAILQ_FIRST(&object->memq);
1373 	if (msucc != NULL)
1374 		KASSERT(msucc->pindex > pindex,
1375 		    ("vm_page_insert_after: msucc doesn't succeed pindex"));
1376 
1377 	/*
1378 	 * Record the object/offset pair in this page
1379 	 */
1380 	m->object = object;
1381 	m->pindex = pindex;
1382 
1383 	/*
1384 	 * Now link into the object's ordered list of backed pages.
1385 	 */
1386 	if (vm_radix_insert(&object->rtree, m)) {
1387 		m->object = NULL;
1388 		m->pindex = 0;
1389 		return (1);
1390 	}
1391 	vm_page_insert_radixdone(m, object, mpred);
1392 	return (0);
1393 }
1394 
1395 /*
1396  *	vm_page_insert_radixdone:
1397  *
1398  *	Complete page "m" insertion into the specified object after the
1399  *	radix trie hooking.
1400  *
1401  *	The page "mpred" must precede the offset "m->pindex" within the
1402  *	specified object.
1403  *
1404  *	The object must be locked.
1405  */
1406 static void
1407 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred)
1408 {
1409 
1410 	VM_OBJECT_ASSERT_WLOCKED(object);
1411 	KASSERT(object != NULL && m->object == object,
1412 	    ("vm_page_insert_radixdone: page %p has inconsistent object", m));
1413 	if (mpred != NULL) {
1414 		KASSERT(mpred->object == object,
1415 		    ("vm_page_insert_after: object doesn't contain mpred"));
1416 		KASSERT(mpred->pindex < m->pindex,
1417 		    ("vm_page_insert_after: mpred doesn't precede pindex"));
1418 	}
1419 
1420 	if (mpred != NULL)
1421 		TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq);
1422 	else
1423 		TAILQ_INSERT_HEAD(&object->memq, m, listq);
1424 
1425 	/*
1426 	 * Show that the object has one more resident page.
1427 	 */
1428 	object->resident_page_count++;
1429 
1430 	/*
1431 	 * Hold the vnode until the last page is released.
1432 	 */
1433 	if (object->resident_page_count == 1 && object->type == OBJT_VNODE)
1434 		vhold(object->handle);
1435 
1436 	/*
1437 	 * Since we are inserting a new and possibly dirty page,
1438 	 * update the object's OBJ_MIGHTBEDIRTY flag.
1439 	 */
1440 	if (pmap_page_is_write_mapped(m))
1441 		vm_object_set_writeable_dirty(object);
1442 }
1443 
1444 /*
1445  *	vm_page_remove:
1446  *
1447  *	Removes the specified page from its containing object, but does not
1448  *	invalidate any backing storage.  Return true if the page may be safely
1449  *	freed and false otherwise.
1450  *
1451  *	The object must be locked.  The page must be locked if it is managed.
1452  */
1453 bool
1454 vm_page_remove(vm_page_t m)
1455 {
1456 	vm_object_t object;
1457 	vm_page_t mrem;
1458 
1459 	object = m->object;
1460 
1461 	if ((m->oflags & VPO_UNMANAGED) == 0)
1462 		vm_page_assert_locked(m);
1463 	VM_OBJECT_ASSERT_WLOCKED(object);
1464 	if (vm_page_xbusied(m))
1465 		vm_page_xunbusy_maybelocked(m);
1466 	mrem = vm_radix_remove(&object->rtree, m->pindex);
1467 	KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m));
1468 
1469 	/*
1470 	 * Now remove from the object's list of backed pages.
1471 	 */
1472 	TAILQ_REMOVE(&object->memq, m, listq);
1473 
1474 	/*
1475 	 * And show that the object has one fewer resident page.
1476 	 */
1477 	object->resident_page_count--;
1478 
1479 	/*
1480 	 * The vnode may now be recycled.
1481 	 */
1482 	if (object->resident_page_count == 0 && object->type == OBJT_VNODE)
1483 		vdrop(object->handle);
1484 
1485 	m->object = NULL;
1486 	return (!vm_page_wired(m));
1487 }
1488 
1489 /*
1490  *	vm_page_lookup:
1491  *
1492  *	Returns the page associated with the object/offset
1493  *	pair specified; if none is found, NULL is returned.
1494  *
1495  *	The object must be locked.
1496  */
1497 vm_page_t
1498 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
1499 {
1500 
1501 	VM_OBJECT_ASSERT_LOCKED(object);
1502 	return (vm_radix_lookup(&object->rtree, pindex));
1503 }
1504 
1505 /*
1506  *	vm_page_find_least:
1507  *
1508  *	Returns the page associated with the object with least pindex
1509  *	greater than or equal to the parameter pindex, or NULL.
1510  *
1511  *	The object must be locked.
1512  */
1513 vm_page_t
1514 vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
1515 {
1516 	vm_page_t m;
1517 
1518 	VM_OBJECT_ASSERT_LOCKED(object);
1519 	if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex)
1520 		m = vm_radix_lookup_ge(&object->rtree, pindex);
1521 	return (m);
1522 }
1523 
1524 /*
1525  * Returns the given page's successor (by pindex) within the object if it is
1526  * resident; if none is found, NULL is returned.
1527  *
1528  * The object must be locked.
1529  */
1530 vm_page_t
1531 vm_page_next(vm_page_t m)
1532 {
1533 	vm_page_t next;
1534 
1535 	VM_OBJECT_ASSERT_LOCKED(m->object);
1536 	if ((next = TAILQ_NEXT(m, listq)) != NULL) {
1537 		MPASS(next->object == m->object);
1538 		if (next->pindex != m->pindex + 1)
1539 			next = NULL;
1540 	}
1541 	return (next);
1542 }
1543 
1544 /*
1545  * Returns the given page's predecessor (by pindex) within the object if it is
1546  * resident; if none is found, NULL is returned.
1547  *
1548  * The object must be locked.
1549  */
1550 vm_page_t
1551 vm_page_prev(vm_page_t m)
1552 {
1553 	vm_page_t prev;
1554 
1555 	VM_OBJECT_ASSERT_LOCKED(m->object);
1556 	if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) {
1557 		MPASS(prev->object == m->object);
1558 		if (prev->pindex != m->pindex - 1)
1559 			prev = NULL;
1560 	}
1561 	return (prev);
1562 }
1563 
1564 /*
1565  * Uses the page mnew as a replacement for an existing page at index
1566  * pindex which must be already present in the object.
1567  *
1568  * The existing page must not be on a paging queue.
1569  */
1570 vm_page_t
1571 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex)
1572 {
1573 	vm_page_t mold;
1574 
1575 	VM_OBJECT_ASSERT_WLOCKED(object);
1576 	KASSERT(mnew->object == NULL,
1577 	    ("vm_page_replace: page %p already in object", mnew));
1578 	KASSERT(mnew->queue == PQ_NONE || vm_page_wired(mnew),
1579 	    ("vm_page_replace: new page %p is on a paging queue", mnew));
1580 
1581 	/*
1582 	 * This function mostly follows vm_page_insert() and
1583 	 * vm_page_remove() without the radix, object count and vnode
1584 	 * dance.  Double check such functions for more comments.
1585 	 */
1586 
1587 	mnew->object = object;
1588 	mnew->pindex = pindex;
1589 	mold = vm_radix_replace(&object->rtree, mnew);
1590 	KASSERT(mold->queue == PQ_NONE,
1591 	    ("vm_page_replace: old page %p is on a paging queue", mold));
1592 
1593 	/* Keep the resident page list in sorted order. */
1594 	TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq);
1595 	TAILQ_REMOVE(&object->memq, mold, listq);
1596 
1597 	mold->object = NULL;
1598 	vm_page_xunbusy_maybelocked(mold);
1599 
1600 	/*
1601 	 * The object's resident_page_count does not change because we have
1602 	 * swapped one page for another, but OBJ_MIGHTBEDIRTY.
1603 	 */
1604 	if (pmap_page_is_write_mapped(mnew))
1605 		vm_object_set_writeable_dirty(object);
1606 	return (mold);
1607 }
1608 
1609 /*
1610  *	vm_page_rename:
1611  *
1612  *	Move the given memory entry from its
1613  *	current object to the specified target object/offset.
1614  *
1615  *	Note: swap associated with the page must be invalidated by the move.  We
1616  *	      have to do this for several reasons:  (1) we aren't freeing the
1617  *	      page, (2) we are dirtying the page, (3) the VM system is probably
1618  *	      moving the page from object A to B, and will then later move
1619  *	      the backing store from A to B and we can't have a conflict.
1620  *
1621  *	Note: we *always* dirty the page.  It is necessary both for the
1622  *	      fact that we moved it, and because we may be invalidating
1623  *	      swap.
1624  *
1625  *	The objects must be locked.
1626  */
1627 int
1628 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
1629 {
1630 	vm_page_t mpred;
1631 	vm_pindex_t opidx;
1632 
1633 	VM_OBJECT_ASSERT_WLOCKED(new_object);
1634 
1635 	mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex);
1636 	KASSERT(mpred == NULL || mpred->pindex != new_pindex,
1637 	    ("vm_page_rename: pindex already renamed"));
1638 
1639 	/*
1640 	 * Create a custom version of vm_page_insert() which does not depend
1641 	 * by m_prev and can cheat on the implementation aspects of the
1642 	 * function.
1643 	 */
1644 	opidx = m->pindex;
1645 	m->pindex = new_pindex;
1646 	if (vm_radix_insert(&new_object->rtree, m)) {
1647 		m->pindex = opidx;
1648 		return (1);
1649 	}
1650 
1651 	/*
1652 	 * The operation cannot fail anymore.  The removal must happen before
1653 	 * the listq iterator is tainted.
1654 	 */
1655 	m->pindex = opidx;
1656 	vm_page_lock(m);
1657 	(void)vm_page_remove(m);
1658 
1659 	/* Return back to the new pindex to complete vm_page_insert(). */
1660 	m->pindex = new_pindex;
1661 	m->object = new_object;
1662 	vm_page_unlock(m);
1663 	vm_page_insert_radixdone(m, new_object, mpred);
1664 	vm_page_dirty(m);
1665 	return (0);
1666 }
1667 
1668 /*
1669  *	vm_page_alloc:
1670  *
1671  *	Allocate and return a page that is associated with the specified
1672  *	object and offset pair.  By default, this page is exclusive busied.
1673  *
1674  *	The caller must always specify an allocation class.
1675  *
1676  *	allocation classes:
1677  *	VM_ALLOC_NORMAL		normal process request
1678  *	VM_ALLOC_SYSTEM		system *really* needs a page
1679  *	VM_ALLOC_INTERRUPT	interrupt time request
1680  *
1681  *	optional allocation flags:
1682  *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
1683  *				intends to allocate
1684  *	VM_ALLOC_NOBUSY		do not exclusive busy the page
1685  *	VM_ALLOC_NODUMP		do not include the page in a kernel core dump
1686  *	VM_ALLOC_NOOBJ		page is not associated with an object and
1687  *				should not be exclusive busy
1688  *	VM_ALLOC_SBUSY		shared busy the allocated page
1689  *	VM_ALLOC_WIRED		wire the allocated page
1690  *	VM_ALLOC_ZERO		prefer a zeroed page
1691  */
1692 vm_page_t
1693 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
1694 {
1695 
1696 	return (vm_page_alloc_after(object, pindex, req, object != NULL ?
1697 	    vm_radix_lookup_le(&object->rtree, pindex) : NULL));
1698 }
1699 
1700 vm_page_t
1701 vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, int domain,
1702     int req)
1703 {
1704 
1705 	return (vm_page_alloc_domain_after(object, pindex, domain, req,
1706 	    object != NULL ? vm_radix_lookup_le(&object->rtree, pindex) :
1707 	    NULL));
1708 }
1709 
1710 /*
1711  * Allocate a page in the specified object with the given page index.  To
1712  * optimize insertion of the page into the object, the caller must also specifiy
1713  * the resident page in the object with largest index smaller than the given
1714  * page index, or NULL if no such page exists.
1715  */
1716 vm_page_t
1717 vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex,
1718     int req, vm_page_t mpred)
1719 {
1720 	struct vm_domainset_iter di;
1721 	vm_page_t m;
1722 	int domain;
1723 
1724 	vm_domainset_iter_page_init(&di, object, pindex, &domain, &req);
1725 	do {
1726 		m = vm_page_alloc_domain_after(object, pindex, domain, req,
1727 		    mpred);
1728 		if (m != NULL)
1729 			break;
1730 	} while (vm_domainset_iter_page(&di, object, &domain) == 0);
1731 
1732 	return (m);
1733 }
1734 
1735 /*
1736  * Returns true if the number of free pages exceeds the minimum
1737  * for the request class and false otherwise.
1738  */
1739 int
1740 vm_domain_allocate(struct vm_domain *vmd, int req, int npages)
1741 {
1742 	u_int limit, old, new;
1743 
1744 	req = req & VM_ALLOC_CLASS_MASK;
1745 
1746 	/*
1747 	 * The page daemon is allowed to dig deeper into the free page list.
1748 	 */
1749 	if (curproc == pageproc && req != VM_ALLOC_INTERRUPT)
1750 		req = VM_ALLOC_SYSTEM;
1751 	if (req == VM_ALLOC_INTERRUPT)
1752 		limit = 0;
1753 	else if (req == VM_ALLOC_SYSTEM)
1754 		limit = vmd->vmd_interrupt_free_min;
1755 	else
1756 		limit = vmd->vmd_free_reserved;
1757 
1758 	/*
1759 	 * Attempt to reserve the pages.  Fail if we're below the limit.
1760 	 */
1761 	limit += npages;
1762 	old = vmd->vmd_free_count;
1763 	do {
1764 		if (old < limit)
1765 			return (0);
1766 		new = old - npages;
1767 	} while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0);
1768 
1769 	/* Wake the page daemon if we've crossed the threshold. */
1770 	if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old))
1771 		pagedaemon_wakeup(vmd->vmd_domain);
1772 
1773 	/* Only update bitsets on transitions. */
1774 	if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) ||
1775 	    (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe))
1776 		vm_domain_set(vmd);
1777 
1778 	return (1);
1779 }
1780 
1781 vm_page_t
1782 vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
1783     int req, vm_page_t mpred)
1784 {
1785 	struct vm_domain *vmd;
1786 	vm_page_t m;
1787 	int flags, pool;
1788 
1789 	KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
1790 	    (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
1791 	    ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
1792 	    (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
1793 	    ("inconsistent object(%p)/req(%x)", object, req));
1794 	KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0,
1795 	    ("Can't sleep and retry object insertion."));
1796 	KASSERT(mpred == NULL || mpred->pindex < pindex,
1797 	    ("mpred %p doesn't precede pindex 0x%jx", mpred,
1798 	    (uintmax_t)pindex));
1799 	if (object != NULL)
1800 		VM_OBJECT_ASSERT_WLOCKED(object);
1801 
1802 	flags = 0;
1803 	m = NULL;
1804 	pool = object != NULL ? VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT;
1805 again:
1806 #if VM_NRESERVLEVEL > 0
1807 	/*
1808 	 * Can we allocate the page from a reservation?
1809 	 */
1810 	if (vm_object_reserv(object) &&
1811 	    (m = vm_reserv_alloc_page(object, pindex, domain, req, mpred)) !=
1812 	    NULL) {
1813 		domain = vm_phys_domain(m);
1814 		vmd = VM_DOMAIN(domain);
1815 		goto found;
1816 	}
1817 #endif
1818 	vmd = VM_DOMAIN(domain);
1819 	if (vmd->vmd_pgcache[pool].zone != NULL) {
1820 		m = uma_zalloc(vmd->vmd_pgcache[pool].zone, M_NOWAIT);
1821 		if (m != NULL) {
1822 			flags |= PG_PCPU_CACHE;
1823 			goto found;
1824 		}
1825 	}
1826 	if (vm_domain_allocate(vmd, req, 1)) {
1827 		/*
1828 		 * If not, allocate it from the free page queues.
1829 		 */
1830 		vm_domain_free_lock(vmd);
1831 		m = vm_phys_alloc_pages(domain, pool, 0);
1832 		vm_domain_free_unlock(vmd);
1833 		if (m == NULL) {
1834 			vm_domain_freecnt_inc(vmd, 1);
1835 #if VM_NRESERVLEVEL > 0
1836 			if (vm_reserv_reclaim_inactive(domain))
1837 				goto again;
1838 #endif
1839 		}
1840 	}
1841 	if (m == NULL) {
1842 		/*
1843 		 * Not allocatable, give up.
1844 		 */
1845 		if (vm_domain_alloc_fail(vmd, object, req))
1846 			goto again;
1847 		return (NULL);
1848 	}
1849 
1850 	/*
1851 	 * At this point we had better have found a good page.
1852 	 */
1853 found:
1854 	vm_page_dequeue(m);
1855 	vm_page_alloc_check(m);
1856 
1857 	/*
1858 	 * Initialize the page.  Only the PG_ZERO flag is inherited.
1859 	 */
1860 	if ((req & VM_ALLOC_ZERO) != 0)
1861 		flags |= (m->flags & PG_ZERO);
1862 	if ((req & VM_ALLOC_NODUMP) != 0)
1863 		flags |= PG_NODUMP;
1864 	m->flags = flags;
1865 	m->aflags = 0;
1866 	m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
1867 	    VPO_UNMANAGED : 0;
1868 	m->busy_lock = VPB_UNBUSIED;
1869 	if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0)
1870 		m->busy_lock = VPB_SINGLE_EXCLUSIVER;
1871 	if ((req & VM_ALLOC_SBUSY) != 0)
1872 		m->busy_lock = VPB_SHARERS_WORD(1);
1873 	if (req & VM_ALLOC_WIRED) {
1874 		/*
1875 		 * The page lock is not required for wiring a page until that
1876 		 * page is inserted into the object.
1877 		 */
1878 		vm_wire_add(1);
1879 		m->wire_count = 1;
1880 	}
1881 	m->act_count = 0;
1882 
1883 	if (object != NULL) {
1884 		if (vm_page_insert_after(m, object, pindex, mpred)) {
1885 			if (req & VM_ALLOC_WIRED) {
1886 				vm_wire_sub(1);
1887 				m->wire_count = 0;
1888 			}
1889 			KASSERT(m->object == NULL, ("page %p has object", m));
1890 			m->oflags = VPO_UNMANAGED;
1891 			m->busy_lock = VPB_UNBUSIED;
1892 			/* Don't change PG_ZERO. */
1893 			vm_page_free_toq(m);
1894 			if (req & VM_ALLOC_WAITFAIL) {
1895 				VM_OBJECT_WUNLOCK(object);
1896 				vm_radix_wait();
1897 				VM_OBJECT_WLOCK(object);
1898 			}
1899 			return (NULL);
1900 		}
1901 
1902 		/* Ignore device objects; the pager sets "memattr" for them. */
1903 		if (object->memattr != VM_MEMATTR_DEFAULT &&
1904 		    (object->flags & OBJ_FICTITIOUS) == 0)
1905 			pmap_page_set_memattr(m, object->memattr);
1906 	} else
1907 		m->pindex = pindex;
1908 
1909 	return (m);
1910 }
1911 
1912 /*
1913  *	vm_page_alloc_contig:
1914  *
1915  *	Allocate a contiguous set of physical pages of the given size "npages"
1916  *	from the free lists.  All of the physical pages must be at or above
1917  *	the given physical address "low" and below the given physical address
1918  *	"high".  The given value "alignment" determines the alignment of the
1919  *	first physical page in the set.  If the given value "boundary" is
1920  *	non-zero, then the set of physical pages cannot cross any physical
1921  *	address boundary that is a multiple of that value.  Both "alignment"
1922  *	and "boundary" must be a power of two.
1923  *
1924  *	If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT,
1925  *	then the memory attribute setting for the physical pages is configured
1926  *	to the object's memory attribute setting.  Otherwise, the memory
1927  *	attribute setting for the physical pages is configured to "memattr",
1928  *	overriding the object's memory attribute setting.  However, if the
1929  *	object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the
1930  *	memory attribute setting for the physical pages cannot be configured
1931  *	to VM_MEMATTR_DEFAULT.
1932  *
1933  *	The specified object may not contain fictitious pages.
1934  *
1935  *	The caller must always specify an allocation class.
1936  *
1937  *	allocation classes:
1938  *	VM_ALLOC_NORMAL		normal process request
1939  *	VM_ALLOC_SYSTEM		system *really* needs a page
1940  *	VM_ALLOC_INTERRUPT	interrupt time request
1941  *
1942  *	optional allocation flags:
1943  *	VM_ALLOC_NOBUSY		do not exclusive busy the page
1944  *	VM_ALLOC_NODUMP		do not include the page in a kernel core dump
1945  *	VM_ALLOC_NOOBJ		page is not associated with an object and
1946  *				should not be exclusive busy
1947  *	VM_ALLOC_SBUSY		shared busy the allocated page
1948  *	VM_ALLOC_WIRED		wire the allocated page
1949  *	VM_ALLOC_ZERO		prefer a zeroed page
1950  */
1951 vm_page_t
1952 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
1953     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
1954     vm_paddr_t boundary, vm_memattr_t memattr)
1955 {
1956 	struct vm_domainset_iter di;
1957 	vm_page_t m;
1958 	int domain;
1959 
1960 	vm_domainset_iter_page_init(&di, object, pindex, &domain, &req);
1961 	do {
1962 		m = vm_page_alloc_contig_domain(object, pindex, domain, req,
1963 		    npages, low, high, alignment, boundary, memattr);
1964 		if (m != NULL)
1965 			break;
1966 	} while (vm_domainset_iter_page(&di, object, &domain) == 0);
1967 
1968 	return (m);
1969 }
1970 
1971 vm_page_t
1972 vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain,
1973     int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
1974     vm_paddr_t boundary, vm_memattr_t memattr)
1975 {
1976 	struct vm_domain *vmd;
1977 	vm_page_t m, m_ret, mpred;
1978 	u_int busy_lock, flags, oflags;
1979 
1980 	mpred = NULL;	/* XXX: pacify gcc */
1981 	KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
1982 	    (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
1983 	    ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
1984 	    (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
1985 	    ("vm_page_alloc_contig: inconsistent object(%p)/req(%x)", object,
1986 	    req));
1987 	KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0,
1988 	    ("Can't sleep and retry object insertion."));
1989 	if (object != NULL) {
1990 		VM_OBJECT_ASSERT_WLOCKED(object);
1991 		KASSERT((object->flags & OBJ_FICTITIOUS) == 0,
1992 		    ("vm_page_alloc_contig: object %p has fictitious pages",
1993 		    object));
1994 	}
1995 	KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
1996 
1997 	if (object != NULL) {
1998 		mpred = vm_radix_lookup_le(&object->rtree, pindex);
1999 		KASSERT(mpred == NULL || mpred->pindex != pindex,
2000 		    ("vm_page_alloc_contig: pindex already allocated"));
2001 	}
2002 
2003 	/*
2004 	 * Can we allocate the pages without the number of free pages falling
2005 	 * below the lower bound for the allocation class?
2006 	 */
2007 	m_ret = NULL;
2008 again:
2009 #if VM_NRESERVLEVEL > 0
2010 	/*
2011 	 * Can we allocate the pages from a reservation?
2012 	 */
2013 	if (vm_object_reserv(object) &&
2014 	    (m_ret = vm_reserv_alloc_contig(object, pindex, domain, req,
2015 	    mpred, npages, low, high, alignment, boundary)) != NULL) {
2016 		domain = vm_phys_domain(m_ret);
2017 		vmd = VM_DOMAIN(domain);
2018 		goto found;
2019 	}
2020 #endif
2021 	vmd = VM_DOMAIN(domain);
2022 	if (vm_domain_allocate(vmd, req, npages)) {
2023 		/*
2024 		 * allocate them from the free page queues.
2025 		 */
2026 		vm_domain_free_lock(vmd);
2027 		m_ret = vm_phys_alloc_contig(domain, npages, low, high,
2028 		    alignment, boundary);
2029 		vm_domain_free_unlock(vmd);
2030 		if (m_ret == NULL) {
2031 			vm_domain_freecnt_inc(vmd, npages);
2032 #if VM_NRESERVLEVEL > 0
2033 			if (vm_reserv_reclaim_contig(domain, npages, low,
2034 			    high, alignment, boundary))
2035 				goto again;
2036 #endif
2037 		}
2038 	}
2039 	if (m_ret == NULL) {
2040 		if (vm_domain_alloc_fail(vmd, object, req))
2041 			goto again;
2042 		return (NULL);
2043 	}
2044 #if VM_NRESERVLEVEL > 0
2045 found:
2046 #endif
2047 	for (m = m_ret; m < &m_ret[npages]; m++) {
2048 		vm_page_dequeue(m);
2049 		vm_page_alloc_check(m);
2050 	}
2051 
2052 	/*
2053 	 * Initialize the pages.  Only the PG_ZERO flag is inherited.
2054 	 */
2055 	flags = 0;
2056 	if ((req & VM_ALLOC_ZERO) != 0)
2057 		flags = PG_ZERO;
2058 	if ((req & VM_ALLOC_NODUMP) != 0)
2059 		flags |= PG_NODUMP;
2060 	oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
2061 	    VPO_UNMANAGED : 0;
2062 	busy_lock = VPB_UNBUSIED;
2063 	if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0)
2064 		busy_lock = VPB_SINGLE_EXCLUSIVER;
2065 	if ((req & VM_ALLOC_SBUSY) != 0)
2066 		busy_lock = VPB_SHARERS_WORD(1);
2067 	if ((req & VM_ALLOC_WIRED) != 0)
2068 		vm_wire_add(npages);
2069 	if (object != NULL) {
2070 		if (object->memattr != VM_MEMATTR_DEFAULT &&
2071 		    memattr == VM_MEMATTR_DEFAULT)
2072 			memattr = object->memattr;
2073 	}
2074 	for (m = m_ret; m < &m_ret[npages]; m++) {
2075 		m->aflags = 0;
2076 		m->flags = (m->flags | PG_NODUMP) & flags;
2077 		m->busy_lock = busy_lock;
2078 		if ((req & VM_ALLOC_WIRED) != 0)
2079 			m->wire_count = 1;
2080 		m->act_count = 0;
2081 		m->oflags = oflags;
2082 		if (object != NULL) {
2083 			if (vm_page_insert_after(m, object, pindex, mpred)) {
2084 				if ((req & VM_ALLOC_WIRED) != 0)
2085 					vm_wire_sub(npages);
2086 				KASSERT(m->object == NULL,
2087 				    ("page %p has object", m));
2088 				mpred = m;
2089 				for (m = m_ret; m < &m_ret[npages]; m++) {
2090 					if (m <= mpred &&
2091 					    (req & VM_ALLOC_WIRED) != 0)
2092 						m->wire_count = 0;
2093 					m->oflags = VPO_UNMANAGED;
2094 					m->busy_lock = VPB_UNBUSIED;
2095 					/* Don't change PG_ZERO. */
2096 					vm_page_free_toq(m);
2097 				}
2098 				if (req & VM_ALLOC_WAITFAIL) {
2099 					VM_OBJECT_WUNLOCK(object);
2100 					vm_radix_wait();
2101 					VM_OBJECT_WLOCK(object);
2102 				}
2103 				return (NULL);
2104 			}
2105 			mpred = m;
2106 		} else
2107 			m->pindex = pindex;
2108 		if (memattr != VM_MEMATTR_DEFAULT)
2109 			pmap_page_set_memattr(m, memattr);
2110 		pindex++;
2111 	}
2112 	return (m_ret);
2113 }
2114 
2115 /*
2116  * Check a page that has been freshly dequeued from a freelist.
2117  */
2118 static void
2119 vm_page_alloc_check(vm_page_t m)
2120 {
2121 
2122 	KASSERT(m->object == NULL, ("page %p has object", m));
2123 	KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0,
2124 	    ("page %p has unexpected queue %d, flags %#x",
2125 	    m, m->queue, (m->aflags & PGA_QUEUE_STATE_MASK)));
2126 	KASSERT(!vm_page_wired(m), ("page %p is wired", m));
2127 	KASSERT(!vm_page_busied(m), ("page %p is busy", m));
2128 	KASSERT(m->dirty == 0, ("page %p is dirty", m));
2129 	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
2130 	    ("page %p has unexpected memattr %d",
2131 	    m, pmap_page_get_memattr(m)));
2132 	KASSERT(m->valid == 0, ("free page %p is valid", m));
2133 }
2134 
2135 /*
2136  * 	vm_page_alloc_freelist:
2137  *
2138  *	Allocate a physical page from the specified free page list.
2139  *
2140  *	The caller must always specify an allocation class.
2141  *
2142  *	allocation classes:
2143  *	VM_ALLOC_NORMAL		normal process request
2144  *	VM_ALLOC_SYSTEM		system *really* needs a page
2145  *	VM_ALLOC_INTERRUPT	interrupt time request
2146  *
2147  *	optional allocation flags:
2148  *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
2149  *				intends to allocate
2150  *	VM_ALLOC_WIRED		wire the allocated page
2151  *	VM_ALLOC_ZERO		prefer a zeroed page
2152  */
2153 vm_page_t
2154 vm_page_alloc_freelist(int freelist, int req)
2155 {
2156 	struct vm_domainset_iter di;
2157 	vm_page_t m;
2158 	int domain;
2159 
2160 	vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
2161 	do {
2162 		m = vm_page_alloc_freelist_domain(domain, freelist, req);
2163 		if (m != NULL)
2164 			break;
2165 	} while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
2166 
2167 	return (m);
2168 }
2169 
2170 vm_page_t
2171 vm_page_alloc_freelist_domain(int domain, int freelist, int req)
2172 {
2173 	struct vm_domain *vmd;
2174 	vm_page_t m;
2175 	u_int flags;
2176 
2177 	m = NULL;
2178 	vmd = VM_DOMAIN(domain);
2179 again:
2180 	if (vm_domain_allocate(vmd, req, 1)) {
2181 		vm_domain_free_lock(vmd);
2182 		m = vm_phys_alloc_freelist_pages(domain, freelist,
2183 		    VM_FREEPOOL_DIRECT, 0);
2184 		vm_domain_free_unlock(vmd);
2185 		if (m == NULL)
2186 			vm_domain_freecnt_inc(vmd, 1);
2187 	}
2188 	if (m == NULL) {
2189 		if (vm_domain_alloc_fail(vmd, NULL, req))
2190 			goto again;
2191 		return (NULL);
2192 	}
2193 	vm_page_dequeue(m);
2194 	vm_page_alloc_check(m);
2195 
2196 	/*
2197 	 * Initialize the page.  Only the PG_ZERO flag is inherited.
2198 	 */
2199 	m->aflags = 0;
2200 	flags = 0;
2201 	if ((req & VM_ALLOC_ZERO) != 0)
2202 		flags = PG_ZERO;
2203 	m->flags &= flags;
2204 	if ((req & VM_ALLOC_WIRED) != 0) {
2205 		/*
2206 		 * The page lock is not required for wiring a page that does
2207 		 * not belong to an object.
2208 		 */
2209 		vm_wire_add(1);
2210 		m->wire_count = 1;
2211 	}
2212 	/* Unmanaged pages don't use "act_count". */
2213 	m->oflags = VPO_UNMANAGED;
2214 	return (m);
2215 }
2216 
2217 static int
2218 vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags)
2219 {
2220 	struct vm_domain *vmd;
2221 	struct vm_pgcache *pgcache;
2222 	int i;
2223 
2224 	pgcache = arg;
2225 	vmd = VM_DOMAIN(pgcache->domain);
2226 	/* Only import if we can bring in a full bucket. */
2227 	if (cnt == 1 || !vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt))
2228 		return (0);
2229 	domain = vmd->vmd_domain;
2230 	vm_domain_free_lock(vmd);
2231 	i = vm_phys_alloc_npages(domain, pgcache->pool, cnt,
2232 	    (vm_page_t *)store);
2233 	vm_domain_free_unlock(vmd);
2234 	if (cnt != i)
2235 		vm_domain_freecnt_inc(vmd, cnt - i);
2236 
2237 	return (i);
2238 }
2239 
2240 static void
2241 vm_page_zone_release(void *arg, void **store, int cnt)
2242 {
2243 	struct vm_domain *vmd;
2244 	struct vm_pgcache *pgcache;
2245 	vm_page_t m;
2246 	int i;
2247 
2248 	pgcache = arg;
2249 	vmd = VM_DOMAIN(pgcache->domain);
2250 	vm_domain_free_lock(vmd);
2251 	for (i = 0; i < cnt; i++) {
2252 		m = (vm_page_t)store[i];
2253 		vm_phys_free_pages(m, 0);
2254 	}
2255 	vm_domain_free_unlock(vmd);
2256 	vm_domain_freecnt_inc(vmd, cnt);
2257 }
2258 
2259 #define	VPSC_ANY	0	/* No restrictions. */
2260 #define	VPSC_NORESERV	1	/* Skip reservations; implies VPSC_NOSUPER. */
2261 #define	VPSC_NOSUPER	2	/* Skip superpages. */
2262 
2263 /*
2264  *	vm_page_scan_contig:
2265  *
2266  *	Scan vm_page_array[] between the specified entries "m_start" and
2267  *	"m_end" for a run of contiguous physical pages that satisfy the
2268  *	specified conditions, and return the lowest page in the run.  The
2269  *	specified "alignment" determines the alignment of the lowest physical
2270  *	page in the run.  If the specified "boundary" is non-zero, then the
2271  *	run of physical pages cannot span a physical address that is a
2272  *	multiple of "boundary".
2273  *
2274  *	"m_end" is never dereferenced, so it need not point to a vm_page
2275  *	structure within vm_page_array[].
2276  *
2277  *	"npages" must be greater than zero.  "m_start" and "m_end" must not
2278  *	span a hole (or discontiguity) in the physical address space.  Both
2279  *	"alignment" and "boundary" must be a power of two.
2280  */
2281 vm_page_t
2282 vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
2283     u_long alignment, vm_paddr_t boundary, int options)
2284 {
2285 	struct mtx *m_mtx;
2286 	vm_object_t object;
2287 	vm_paddr_t pa;
2288 	vm_page_t m, m_run;
2289 #if VM_NRESERVLEVEL > 0
2290 	int level;
2291 #endif
2292 	int m_inc, order, run_ext, run_len;
2293 
2294 	KASSERT(npages > 0, ("npages is 0"));
2295 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
2296 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
2297 	m_run = NULL;
2298 	run_len = 0;
2299 	m_mtx = NULL;
2300 	for (m = m_start; m < m_end && run_len < npages; m += m_inc) {
2301 		KASSERT((m->flags & PG_MARKER) == 0,
2302 		    ("page %p is PG_MARKER", m));
2303 		KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->wire_count == 1,
2304 		    ("fictitious page %p has invalid wire count", m));
2305 
2306 		/*
2307 		 * If the current page would be the start of a run, check its
2308 		 * physical address against the end, alignment, and boundary
2309 		 * conditions.  If it doesn't satisfy these conditions, either
2310 		 * terminate the scan or advance to the next page that
2311 		 * satisfies the failed condition.
2312 		 */
2313 		if (run_len == 0) {
2314 			KASSERT(m_run == NULL, ("m_run != NULL"));
2315 			if (m + npages > m_end)
2316 				break;
2317 			pa = VM_PAGE_TO_PHYS(m);
2318 			if ((pa & (alignment - 1)) != 0) {
2319 				m_inc = atop(roundup2(pa, alignment) - pa);
2320 				continue;
2321 			}
2322 			if (rounddown2(pa ^ (pa + ptoa(npages) - 1),
2323 			    boundary) != 0) {
2324 				m_inc = atop(roundup2(pa, boundary) - pa);
2325 				continue;
2326 			}
2327 		} else
2328 			KASSERT(m_run != NULL, ("m_run == NULL"));
2329 
2330 		vm_page_change_lock(m, &m_mtx);
2331 		m_inc = 1;
2332 retry:
2333 		if (vm_page_wired(m))
2334 			run_ext = 0;
2335 #if VM_NRESERVLEVEL > 0
2336 		else if ((level = vm_reserv_level(m)) >= 0 &&
2337 		    (options & VPSC_NORESERV) != 0) {
2338 			run_ext = 0;
2339 			/* Advance to the end of the reservation. */
2340 			pa = VM_PAGE_TO_PHYS(m);
2341 			m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) -
2342 			    pa);
2343 		}
2344 #endif
2345 		else if ((object = m->object) != NULL) {
2346 			/*
2347 			 * The page is considered eligible for relocation if
2348 			 * and only if it could be laundered or reclaimed by
2349 			 * the page daemon.
2350 			 */
2351 			if (!VM_OBJECT_TRYRLOCK(object)) {
2352 				mtx_unlock(m_mtx);
2353 				VM_OBJECT_RLOCK(object);
2354 				mtx_lock(m_mtx);
2355 				if (m->object != object) {
2356 					/*
2357 					 * The page may have been freed.
2358 					 */
2359 					VM_OBJECT_RUNLOCK(object);
2360 					goto retry;
2361 				} else if (vm_page_wired(m)) {
2362 					run_ext = 0;
2363 					goto unlock;
2364 				}
2365 			}
2366 			/* Don't care: PG_NODUMP, PG_ZERO. */
2367 			if (object->type != OBJT_DEFAULT &&
2368 			    object->type != OBJT_SWAP &&
2369 			    object->type != OBJT_VNODE) {
2370 				run_ext = 0;
2371 #if VM_NRESERVLEVEL > 0
2372 			} else if ((options & VPSC_NOSUPER) != 0 &&
2373 			    (level = vm_reserv_level_iffullpop(m)) >= 0) {
2374 				run_ext = 0;
2375 				/* Advance to the end of the superpage. */
2376 				pa = VM_PAGE_TO_PHYS(m);
2377 				m_inc = atop(roundup2(pa + 1,
2378 				    vm_reserv_size(level)) - pa);
2379 #endif
2380 			} else if (object->memattr == VM_MEMATTR_DEFAULT &&
2381 			    vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) {
2382 				/*
2383 				 * The page is allocated but eligible for
2384 				 * relocation.  Extend the current run by one
2385 				 * page.
2386 				 */
2387 				KASSERT(pmap_page_get_memattr(m) ==
2388 				    VM_MEMATTR_DEFAULT,
2389 				    ("page %p has an unexpected memattr", m));
2390 				KASSERT((m->oflags & (VPO_SWAPINPROG |
2391 				    VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0,
2392 				    ("page %p has unexpected oflags", m));
2393 				/* Don't care: VPO_NOSYNC. */
2394 				run_ext = 1;
2395 			} else
2396 				run_ext = 0;
2397 unlock:
2398 			VM_OBJECT_RUNLOCK(object);
2399 #if VM_NRESERVLEVEL > 0
2400 		} else if (level >= 0) {
2401 			/*
2402 			 * The page is reserved but not yet allocated.  In
2403 			 * other words, it is still free.  Extend the current
2404 			 * run by one page.
2405 			 */
2406 			run_ext = 1;
2407 #endif
2408 		} else if ((order = m->order) < VM_NFREEORDER) {
2409 			/*
2410 			 * The page is enqueued in the physical memory
2411 			 * allocator's free page queues.  Moreover, it is the
2412 			 * first page in a power-of-two-sized run of
2413 			 * contiguous free pages.  Add these pages to the end
2414 			 * of the current run, and jump ahead.
2415 			 */
2416 			run_ext = 1 << order;
2417 			m_inc = 1 << order;
2418 		} else {
2419 			/*
2420 			 * Skip the page for one of the following reasons: (1)
2421 			 * It is enqueued in the physical memory allocator's
2422 			 * free page queues.  However, it is not the first
2423 			 * page in a run of contiguous free pages.  (This case
2424 			 * rarely occurs because the scan is performed in
2425 			 * ascending order.) (2) It is not reserved, and it is
2426 			 * transitioning from free to allocated.  (Conversely,
2427 			 * the transition from allocated to free for managed
2428 			 * pages is blocked by the page lock.) (3) It is
2429 			 * allocated but not contained by an object and not
2430 			 * wired, e.g., allocated by Xen's balloon driver.
2431 			 */
2432 			run_ext = 0;
2433 		}
2434 
2435 		/*
2436 		 * Extend or reset the current run of pages.
2437 		 */
2438 		if (run_ext > 0) {
2439 			if (run_len == 0)
2440 				m_run = m;
2441 			run_len += run_ext;
2442 		} else {
2443 			if (run_len > 0) {
2444 				m_run = NULL;
2445 				run_len = 0;
2446 			}
2447 		}
2448 	}
2449 	if (m_mtx != NULL)
2450 		mtx_unlock(m_mtx);
2451 	if (run_len >= npages)
2452 		return (m_run);
2453 	return (NULL);
2454 }
2455 
2456 /*
2457  *	vm_page_reclaim_run:
2458  *
2459  *	Try to relocate each of the allocated virtual pages within the
2460  *	specified run of physical pages to a new physical address.  Free the
2461  *	physical pages underlying the relocated virtual pages.  A virtual page
2462  *	is relocatable if and only if it could be laundered or reclaimed by
2463  *	the page daemon.  Whenever possible, a virtual page is relocated to a
2464  *	physical address above "high".
2465  *
2466  *	Returns 0 if every physical page within the run was already free or
2467  *	just freed by a successful relocation.  Otherwise, returns a non-zero
2468  *	value indicating why the last attempt to relocate a virtual page was
2469  *	unsuccessful.
2470  *
2471  *	"req_class" must be an allocation class.
2472  */
2473 static int
2474 vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run,
2475     vm_paddr_t high)
2476 {
2477 	struct vm_domain *vmd;
2478 	struct mtx *m_mtx;
2479 	struct spglist free;
2480 	vm_object_t object;
2481 	vm_paddr_t pa;
2482 	vm_page_t m, m_end, m_new;
2483 	int error, order, req;
2484 
2485 	KASSERT((req_class & VM_ALLOC_CLASS_MASK) == req_class,
2486 	    ("req_class is not an allocation class"));
2487 	SLIST_INIT(&free);
2488 	error = 0;
2489 	m = m_run;
2490 	m_end = m_run + npages;
2491 	m_mtx = NULL;
2492 	for (; error == 0 && m < m_end; m++) {
2493 		KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0,
2494 		    ("page %p is PG_FICTITIOUS or PG_MARKER", m));
2495 
2496 		/*
2497 		 * Avoid releasing and reacquiring the same page lock.
2498 		 */
2499 		vm_page_change_lock(m, &m_mtx);
2500 retry:
2501 		if (vm_page_wired(m))
2502 			error = EBUSY;
2503 		else if ((object = m->object) != NULL) {
2504 			/*
2505 			 * The page is relocated if and only if it could be
2506 			 * laundered or reclaimed by the page daemon.
2507 			 */
2508 			if (!VM_OBJECT_TRYWLOCK(object)) {
2509 				mtx_unlock(m_mtx);
2510 				VM_OBJECT_WLOCK(object);
2511 				mtx_lock(m_mtx);
2512 				if (m->object != object) {
2513 					/*
2514 					 * The page may have been freed.
2515 					 */
2516 					VM_OBJECT_WUNLOCK(object);
2517 					goto retry;
2518 				} else if (vm_page_wired(m)) {
2519 					error = EBUSY;
2520 					goto unlock;
2521 				}
2522 			}
2523 			/* Don't care: PG_NODUMP, PG_ZERO. */
2524 			if (object->type != OBJT_DEFAULT &&
2525 			    object->type != OBJT_SWAP &&
2526 			    object->type != OBJT_VNODE)
2527 				error = EINVAL;
2528 			else if (object->memattr != VM_MEMATTR_DEFAULT)
2529 				error = EINVAL;
2530 			else if (vm_page_queue(m) != PQ_NONE &&
2531 			    !vm_page_busied(m)) {
2532 				KASSERT(pmap_page_get_memattr(m) ==
2533 				    VM_MEMATTR_DEFAULT,
2534 				    ("page %p has an unexpected memattr", m));
2535 				KASSERT((m->oflags & (VPO_SWAPINPROG |
2536 				    VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0,
2537 				    ("page %p has unexpected oflags", m));
2538 				/* Don't care: VPO_NOSYNC. */
2539 				if (m->valid != 0) {
2540 					/*
2541 					 * First, try to allocate a new page
2542 					 * that is above "high".  Failing
2543 					 * that, try to allocate a new page
2544 					 * that is below "m_run".  Allocate
2545 					 * the new page between the end of
2546 					 * "m_run" and "high" only as a last
2547 					 * resort.
2548 					 */
2549 					req = req_class | VM_ALLOC_NOOBJ;
2550 					if ((m->flags & PG_NODUMP) != 0)
2551 						req |= VM_ALLOC_NODUMP;
2552 					if (trunc_page(high) !=
2553 					    ~(vm_paddr_t)PAGE_MASK) {
2554 						m_new = vm_page_alloc_contig(
2555 						    NULL, 0, req, 1,
2556 						    round_page(high),
2557 						    ~(vm_paddr_t)0,
2558 						    PAGE_SIZE, 0,
2559 						    VM_MEMATTR_DEFAULT);
2560 					} else
2561 						m_new = NULL;
2562 					if (m_new == NULL) {
2563 						pa = VM_PAGE_TO_PHYS(m_run);
2564 						m_new = vm_page_alloc_contig(
2565 						    NULL, 0, req, 1,
2566 						    0, pa - 1, PAGE_SIZE, 0,
2567 						    VM_MEMATTR_DEFAULT);
2568 					}
2569 					if (m_new == NULL) {
2570 						pa += ptoa(npages);
2571 						m_new = vm_page_alloc_contig(
2572 						    NULL, 0, req, 1,
2573 						    pa, high, PAGE_SIZE, 0,
2574 						    VM_MEMATTR_DEFAULT);
2575 					}
2576 					if (m_new == NULL) {
2577 						error = ENOMEM;
2578 						goto unlock;
2579 					}
2580 					KASSERT(!vm_page_wired(m_new),
2581 					    ("page %p is wired", m_new));
2582 
2583 					/*
2584 					 * Replace "m" with the new page.  For
2585 					 * vm_page_replace(), "m" must be busy
2586 					 * and dequeued.  Finally, change "m"
2587 					 * as if vm_page_free() was called.
2588 					 */
2589 					if (object->ref_count != 0)
2590 						pmap_remove_all(m);
2591 					m_new->aflags = m->aflags &
2592 					    ~PGA_QUEUE_STATE_MASK;
2593 					KASSERT(m_new->oflags == VPO_UNMANAGED,
2594 					    ("page %p is managed", m_new));
2595 					m_new->oflags = m->oflags & VPO_NOSYNC;
2596 					pmap_copy_page(m, m_new);
2597 					m_new->valid = m->valid;
2598 					m_new->dirty = m->dirty;
2599 					m->flags &= ~PG_ZERO;
2600 					vm_page_xbusy(m);
2601 					vm_page_dequeue(m);
2602 					vm_page_replace_checked(m_new, object,
2603 					    m->pindex, m);
2604 					if (vm_page_free_prep(m))
2605 						SLIST_INSERT_HEAD(&free, m,
2606 						    plinks.s.ss);
2607 
2608 					/*
2609 					 * The new page must be deactivated
2610 					 * before the object is unlocked.
2611 					 */
2612 					vm_page_change_lock(m_new, &m_mtx);
2613 					vm_page_deactivate(m_new);
2614 				} else {
2615 					m->flags &= ~PG_ZERO;
2616 					vm_page_dequeue(m);
2617 					if (vm_page_free_prep(m))
2618 						SLIST_INSERT_HEAD(&free, m,
2619 						    plinks.s.ss);
2620 					KASSERT(m->dirty == 0,
2621 					    ("page %p is dirty", m));
2622 				}
2623 			} else
2624 				error = EBUSY;
2625 unlock:
2626 			VM_OBJECT_WUNLOCK(object);
2627 		} else {
2628 			MPASS(vm_phys_domain(m) == domain);
2629 			vmd = VM_DOMAIN(domain);
2630 			vm_domain_free_lock(vmd);
2631 			order = m->order;
2632 			if (order < VM_NFREEORDER) {
2633 				/*
2634 				 * The page is enqueued in the physical memory
2635 				 * allocator's free page queues.  Moreover, it
2636 				 * is the first page in a power-of-two-sized
2637 				 * run of contiguous free pages.  Jump ahead
2638 				 * to the last page within that run, and
2639 				 * continue from there.
2640 				 */
2641 				m += (1 << order) - 1;
2642 			}
2643 #if VM_NRESERVLEVEL > 0
2644 			else if (vm_reserv_is_page_free(m))
2645 				order = 0;
2646 #endif
2647 			vm_domain_free_unlock(vmd);
2648 			if (order == VM_NFREEORDER)
2649 				error = EINVAL;
2650 		}
2651 	}
2652 	if (m_mtx != NULL)
2653 		mtx_unlock(m_mtx);
2654 	if ((m = SLIST_FIRST(&free)) != NULL) {
2655 		int cnt;
2656 
2657 		vmd = VM_DOMAIN(domain);
2658 		cnt = 0;
2659 		vm_domain_free_lock(vmd);
2660 		do {
2661 			MPASS(vm_phys_domain(m) == domain);
2662 			SLIST_REMOVE_HEAD(&free, plinks.s.ss);
2663 			vm_phys_free_pages(m, 0);
2664 			cnt++;
2665 		} while ((m = SLIST_FIRST(&free)) != NULL);
2666 		vm_domain_free_unlock(vmd);
2667 		vm_domain_freecnt_inc(vmd, cnt);
2668 	}
2669 	return (error);
2670 }
2671 
2672 #define	NRUNS	16
2673 
2674 CTASSERT(powerof2(NRUNS));
2675 
2676 #define	RUN_INDEX(count)	((count) & (NRUNS - 1))
2677 
2678 #define	MIN_RECLAIM	8
2679 
2680 /*
2681  *	vm_page_reclaim_contig:
2682  *
2683  *	Reclaim allocated, contiguous physical memory satisfying the specified
2684  *	conditions by relocating the virtual pages using that physical memory.
2685  *	Returns true if reclamation is successful and false otherwise.  Since
2686  *	relocation requires the allocation of physical pages, reclamation may
2687  *	fail due to a shortage of free pages.  When reclamation fails, callers
2688  *	are expected to perform vm_wait() before retrying a failed allocation
2689  *	operation, e.g., vm_page_alloc_contig().
2690  *
2691  *	The caller must always specify an allocation class through "req".
2692  *
2693  *	allocation classes:
2694  *	VM_ALLOC_NORMAL		normal process request
2695  *	VM_ALLOC_SYSTEM		system *really* needs a page
2696  *	VM_ALLOC_INTERRUPT	interrupt time request
2697  *
2698  *	The optional allocation flags are ignored.
2699  *
2700  *	"npages" must be greater than zero.  Both "alignment" and "boundary"
2701  *	must be a power of two.
2702  */
2703 bool
2704 vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
2705     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
2706 {
2707 	struct vm_domain *vmd;
2708 	vm_paddr_t curr_low;
2709 	vm_page_t m_run, m_runs[NRUNS];
2710 	u_long count, reclaimed;
2711 	int error, i, options, req_class;
2712 
2713 	KASSERT(npages > 0, ("npages is 0"));
2714 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
2715 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
2716 	req_class = req & VM_ALLOC_CLASS_MASK;
2717 
2718 	/*
2719 	 * The page daemon is allowed to dig deeper into the free page list.
2720 	 */
2721 	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
2722 		req_class = VM_ALLOC_SYSTEM;
2723 
2724 	/*
2725 	 * Return if the number of free pages cannot satisfy the requested
2726 	 * allocation.
2727 	 */
2728 	vmd = VM_DOMAIN(domain);
2729 	count = vmd->vmd_free_count;
2730 	if (count < npages + vmd->vmd_free_reserved || (count < npages +
2731 	    vmd->vmd_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) ||
2732 	    (count < npages && req_class == VM_ALLOC_INTERRUPT))
2733 		return (false);
2734 
2735 	/*
2736 	 * Scan up to three times, relaxing the restrictions ("options") on
2737 	 * the reclamation of reservations and superpages each time.
2738 	 */
2739 	for (options = VPSC_NORESERV;;) {
2740 		/*
2741 		 * Find the highest runs that satisfy the given constraints
2742 		 * and restrictions, and record them in "m_runs".
2743 		 */
2744 		curr_low = low;
2745 		count = 0;
2746 		for (;;) {
2747 			m_run = vm_phys_scan_contig(domain, npages, curr_low,
2748 			    high, alignment, boundary, options);
2749 			if (m_run == NULL)
2750 				break;
2751 			curr_low = VM_PAGE_TO_PHYS(m_run) + ptoa(npages);
2752 			m_runs[RUN_INDEX(count)] = m_run;
2753 			count++;
2754 		}
2755 
2756 		/*
2757 		 * Reclaim the highest runs in LIFO (descending) order until
2758 		 * the number of reclaimed pages, "reclaimed", is at least
2759 		 * MIN_RECLAIM.  Reset "reclaimed" each time because each
2760 		 * reclamation is idempotent, and runs will (likely) recur
2761 		 * from one scan to the next as restrictions are relaxed.
2762 		 */
2763 		reclaimed = 0;
2764 		for (i = 0; count > 0 && i < NRUNS; i++) {
2765 			count--;
2766 			m_run = m_runs[RUN_INDEX(count)];
2767 			error = vm_page_reclaim_run(req_class, domain, npages,
2768 			    m_run, high);
2769 			if (error == 0) {
2770 				reclaimed += npages;
2771 				if (reclaimed >= MIN_RECLAIM)
2772 					return (true);
2773 			}
2774 		}
2775 
2776 		/*
2777 		 * Either relax the restrictions on the next scan or return if
2778 		 * the last scan had no restrictions.
2779 		 */
2780 		if (options == VPSC_NORESERV)
2781 			options = VPSC_NOSUPER;
2782 		else if (options == VPSC_NOSUPER)
2783 			options = VPSC_ANY;
2784 		else if (options == VPSC_ANY)
2785 			return (reclaimed != 0);
2786 	}
2787 }
2788 
2789 bool
2790 vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high,
2791     u_long alignment, vm_paddr_t boundary)
2792 {
2793 	struct vm_domainset_iter di;
2794 	int domain;
2795 	bool ret;
2796 
2797 	vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
2798 	do {
2799 		ret = vm_page_reclaim_contig_domain(domain, req, npages, low,
2800 		    high, alignment, boundary);
2801 		if (ret)
2802 			break;
2803 	} while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
2804 
2805 	return (ret);
2806 }
2807 
2808 /*
2809  * Set the domain in the appropriate page level domainset.
2810  */
2811 void
2812 vm_domain_set(struct vm_domain *vmd)
2813 {
2814 
2815 	mtx_lock(&vm_domainset_lock);
2816 	if (!vmd->vmd_minset && vm_paging_min(vmd)) {
2817 		vmd->vmd_minset = 1;
2818 		DOMAINSET_SET(vmd->vmd_domain, &vm_min_domains);
2819 	}
2820 	if (!vmd->vmd_severeset && vm_paging_severe(vmd)) {
2821 		vmd->vmd_severeset = 1;
2822 		DOMAINSET_SET(vmd->vmd_domain, &vm_severe_domains);
2823 	}
2824 	mtx_unlock(&vm_domainset_lock);
2825 }
2826 
2827 /*
2828  * Clear the domain from the appropriate page level domainset.
2829  */
2830 void
2831 vm_domain_clear(struct vm_domain *vmd)
2832 {
2833 
2834 	mtx_lock(&vm_domainset_lock);
2835 	if (vmd->vmd_minset && !vm_paging_min(vmd)) {
2836 		vmd->vmd_minset = 0;
2837 		DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains);
2838 		if (vm_min_waiters != 0) {
2839 			vm_min_waiters = 0;
2840 			wakeup(&vm_min_domains);
2841 		}
2842 	}
2843 	if (vmd->vmd_severeset && !vm_paging_severe(vmd)) {
2844 		vmd->vmd_severeset = 0;
2845 		DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains);
2846 		if (vm_severe_waiters != 0) {
2847 			vm_severe_waiters = 0;
2848 			wakeup(&vm_severe_domains);
2849 		}
2850 	}
2851 
2852 	/*
2853 	 * If pageout daemon needs pages, then tell it that there are
2854 	 * some free.
2855 	 */
2856 	if (vmd->vmd_pageout_pages_needed &&
2857 	    vmd->vmd_free_count >= vmd->vmd_pageout_free_min) {
2858 		wakeup(&vmd->vmd_pageout_pages_needed);
2859 		vmd->vmd_pageout_pages_needed = 0;
2860 	}
2861 
2862 	/* See comments in vm_wait_doms(). */
2863 	if (vm_pageproc_waiters) {
2864 		vm_pageproc_waiters = 0;
2865 		wakeup(&vm_pageproc_waiters);
2866 	}
2867 	mtx_unlock(&vm_domainset_lock);
2868 }
2869 
2870 /*
2871  * Wait for free pages to exceed the min threshold globally.
2872  */
2873 void
2874 vm_wait_min(void)
2875 {
2876 
2877 	mtx_lock(&vm_domainset_lock);
2878 	while (vm_page_count_min()) {
2879 		vm_min_waiters++;
2880 		msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0);
2881 	}
2882 	mtx_unlock(&vm_domainset_lock);
2883 }
2884 
2885 /*
2886  * Wait for free pages to exceed the severe threshold globally.
2887  */
2888 void
2889 vm_wait_severe(void)
2890 {
2891 
2892 	mtx_lock(&vm_domainset_lock);
2893 	while (vm_page_count_severe()) {
2894 		vm_severe_waiters++;
2895 		msleep(&vm_severe_domains, &vm_domainset_lock, PVM,
2896 		    "vmwait", 0);
2897 	}
2898 	mtx_unlock(&vm_domainset_lock);
2899 }
2900 
2901 u_int
2902 vm_wait_count(void)
2903 {
2904 
2905 	return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters);
2906 }
2907 
2908 void
2909 vm_wait_doms(const domainset_t *wdoms)
2910 {
2911 
2912 	/*
2913 	 * We use racey wakeup synchronization to avoid expensive global
2914 	 * locking for the pageproc when sleeping with a non-specific vm_wait.
2915 	 * To handle this, we only sleep for one tick in this instance.  It
2916 	 * is expected that most allocations for the pageproc will come from
2917 	 * kmem or vm_page_grab* which will use the more specific and
2918 	 * race-free vm_wait_domain().
2919 	 */
2920 	if (curproc == pageproc) {
2921 		mtx_lock(&vm_domainset_lock);
2922 		vm_pageproc_waiters++;
2923 		msleep(&vm_pageproc_waiters, &vm_domainset_lock, PVM | PDROP,
2924 		    "pageprocwait", 1);
2925 	} else {
2926 		/*
2927 		 * XXX Ideally we would wait only until the allocation could
2928 		 * be satisfied.  This condition can cause new allocators to
2929 		 * consume all freed pages while old allocators wait.
2930 		 */
2931 		mtx_lock(&vm_domainset_lock);
2932 		if (vm_page_count_min_set(wdoms)) {
2933 			vm_min_waiters++;
2934 			msleep(&vm_min_domains, &vm_domainset_lock,
2935 			    PVM | PDROP, "vmwait", 0);
2936 		} else
2937 			mtx_unlock(&vm_domainset_lock);
2938 	}
2939 }
2940 
2941 /*
2942  *	vm_wait_domain:
2943  *
2944  *	Sleep until free pages are available for allocation.
2945  *	- Called in various places after failed memory allocations.
2946  */
2947 void
2948 vm_wait_domain(int domain)
2949 {
2950 	struct vm_domain *vmd;
2951 	domainset_t wdom;
2952 
2953 	vmd = VM_DOMAIN(domain);
2954 	vm_domain_free_assert_unlocked(vmd);
2955 
2956 	if (curproc == pageproc) {
2957 		mtx_lock(&vm_domainset_lock);
2958 		if (vmd->vmd_free_count < vmd->vmd_pageout_free_min) {
2959 			vmd->vmd_pageout_pages_needed = 1;
2960 			msleep(&vmd->vmd_pageout_pages_needed,
2961 			    &vm_domainset_lock, PDROP | PSWP, "VMWait", 0);
2962 		} else
2963 			mtx_unlock(&vm_domainset_lock);
2964 	} else {
2965 		if (pageproc == NULL)
2966 			panic("vm_wait in early boot");
2967 		DOMAINSET_ZERO(&wdom);
2968 		DOMAINSET_SET(vmd->vmd_domain, &wdom);
2969 		vm_wait_doms(&wdom);
2970 	}
2971 }
2972 
2973 /*
2974  *	vm_wait:
2975  *
2976  *	Sleep until free pages are available for allocation in the
2977  *	affinity domains of the obj.  If obj is NULL, the domain set
2978  *	for the calling thread is used.
2979  *	Called in various places after failed memory allocations.
2980  */
2981 void
2982 vm_wait(vm_object_t obj)
2983 {
2984 	struct domainset *d;
2985 
2986 	d = NULL;
2987 
2988 	/*
2989 	 * Carefully fetch pointers only once: the struct domainset
2990 	 * itself is ummutable but the pointer might change.
2991 	 */
2992 	if (obj != NULL)
2993 		d = obj->domain.dr_policy;
2994 	if (d == NULL)
2995 		d = curthread->td_domain.dr_policy;
2996 
2997 	vm_wait_doms(&d->ds_mask);
2998 }
2999 
3000 /*
3001  *	vm_domain_alloc_fail:
3002  *
3003  *	Called when a page allocation function fails.  Informs the
3004  *	pagedaemon and performs the requested wait.  Requires the
3005  *	domain_free and object lock on entry.  Returns with the
3006  *	object lock held and free lock released.  Returns an error when
3007  *	retry is necessary.
3008  *
3009  */
3010 static int
3011 vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req)
3012 {
3013 
3014 	vm_domain_free_assert_unlocked(vmd);
3015 
3016 	atomic_add_int(&vmd->vmd_pageout_deficit,
3017 	    max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
3018 	if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) {
3019 		if (object != NULL)
3020 			VM_OBJECT_WUNLOCK(object);
3021 		vm_wait_domain(vmd->vmd_domain);
3022 		if (object != NULL)
3023 			VM_OBJECT_WLOCK(object);
3024 		if (req & VM_ALLOC_WAITOK)
3025 			return (EAGAIN);
3026 	}
3027 
3028 	return (0);
3029 }
3030 
3031 /*
3032  *	vm_waitpfault:
3033  *
3034  *	Sleep until free pages are available for allocation.
3035  *	- Called only in vm_fault so that processes page faulting
3036  *	  can be easily tracked.
3037  *	- Sleeps at a lower priority than vm_wait() so that vm_wait()ing
3038  *	  processes will be able to grab memory first.  Do not change
3039  *	  this balance without careful testing first.
3040  */
3041 void
3042 vm_waitpfault(struct domainset *dset, int timo)
3043 {
3044 
3045 	/*
3046 	 * XXX Ideally we would wait only until the allocation could
3047 	 * be satisfied.  This condition can cause new allocators to
3048 	 * consume all freed pages while old allocators wait.
3049 	 */
3050 	mtx_lock(&vm_domainset_lock);
3051 	if (vm_page_count_min_set(&dset->ds_mask)) {
3052 		vm_min_waiters++;
3053 		msleep(&vm_min_domains, &vm_domainset_lock, PUSER | PDROP,
3054 		    "pfault", timo);
3055 	} else
3056 		mtx_unlock(&vm_domainset_lock);
3057 }
3058 
3059 static struct vm_pagequeue *
3060 vm_page_pagequeue(vm_page_t m)
3061 {
3062 
3063 	uint8_t queue;
3064 
3065 	if ((queue = atomic_load_8(&m->queue)) == PQ_NONE)
3066 		return (NULL);
3067 	return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]);
3068 }
3069 
3070 static inline void
3071 vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m)
3072 {
3073 	struct vm_domain *vmd;
3074 	uint8_t qflags;
3075 
3076 	CRITICAL_ASSERT(curthread);
3077 	vm_pagequeue_assert_locked(pq);
3078 
3079 	/*
3080 	 * The page daemon is allowed to set m->queue = PQ_NONE without
3081 	 * the page queue lock held.  In this case it is about to free the page,
3082 	 * which must not have any queue state.
3083 	 */
3084 	qflags = atomic_load_8(&m->aflags);
3085 	KASSERT(pq == vm_page_pagequeue(m) ||
3086 	    (qflags & PGA_QUEUE_STATE_MASK) == 0,
3087 	    ("page %p doesn't belong to queue %p but has aflags %#x",
3088 	    m, pq, qflags));
3089 
3090 	if ((qflags & PGA_DEQUEUE) != 0) {
3091 		if (__predict_true((qflags & PGA_ENQUEUED) != 0))
3092 			vm_pagequeue_remove(pq, m);
3093 		vm_page_dequeue_complete(m);
3094 	} else if ((qflags & (PGA_REQUEUE | PGA_REQUEUE_HEAD)) != 0) {
3095 		if ((qflags & PGA_ENQUEUED) != 0)
3096 			TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
3097 		else {
3098 			vm_pagequeue_cnt_inc(pq);
3099 			vm_page_aflag_set(m, PGA_ENQUEUED);
3100 		}
3101 
3102 		/*
3103 		 * Give PGA_REQUEUE_HEAD precedence over PGA_REQUEUE.
3104 		 * In particular, if both flags are set in close succession,
3105 		 * only PGA_REQUEUE_HEAD will be applied, even if it was set
3106 		 * first.
3107 		 */
3108 		if ((qflags & PGA_REQUEUE_HEAD) != 0) {
3109 			KASSERT(m->queue == PQ_INACTIVE,
3110 			    ("head enqueue not supported for page %p", m));
3111 			vmd = vm_pagequeue_domain(m);
3112 			TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
3113 		} else
3114 			TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
3115 
3116 		vm_page_aflag_clear(m, qflags & (PGA_REQUEUE |
3117 		    PGA_REQUEUE_HEAD));
3118 	}
3119 }
3120 
3121 static void
3122 vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq,
3123     uint8_t queue)
3124 {
3125 	vm_page_t m;
3126 	int i;
3127 
3128 	for (i = 0; i < bq->bq_cnt; i++) {
3129 		m = bq->bq_pa[i];
3130 		if (__predict_false(m->queue != queue))
3131 			continue;
3132 		vm_pqbatch_process_page(pq, m);
3133 	}
3134 	vm_batchqueue_init(bq);
3135 }
3136 
3137 void
3138 vm_page_pqbatch_submit(vm_page_t m, uint8_t queue)
3139 {
3140 	struct vm_batchqueue *bq;
3141 	struct vm_pagequeue *pq;
3142 	int domain;
3143 
3144 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3145 	    ("page %p is unmanaged", m));
3146 	KASSERT(mtx_owned(vm_page_lockptr(m)) ||
3147 	    (m->object == NULL && (m->aflags & PGA_DEQUEUE) != 0),
3148 	    ("missing synchronization for page %p", m));
3149 	KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue));
3150 
3151 	domain = vm_phys_domain(m);
3152 	pq = &vm_pagequeue_domain(m)->vmd_pagequeues[queue];
3153 
3154 	critical_enter();
3155 	bq = DPCPU_PTR(pqbatch[domain][queue]);
3156 	if (vm_batchqueue_insert(bq, m)) {
3157 		critical_exit();
3158 		return;
3159 	}
3160 	if (!vm_pagequeue_trylock(pq)) {
3161 		critical_exit();
3162 		vm_pagequeue_lock(pq);
3163 		critical_enter();
3164 		bq = DPCPU_PTR(pqbatch[domain][queue]);
3165 	}
3166 	vm_pqbatch_process(pq, bq, queue);
3167 
3168 	/*
3169 	 * The page may have been logically dequeued before we acquired the
3170 	 * page queue lock.  In this case, since we either hold the page lock
3171 	 * or the page is being freed, a different thread cannot be concurrently
3172 	 * enqueuing the page.
3173 	 */
3174 	if (__predict_true(m->queue == queue))
3175 		vm_pqbatch_process_page(pq, m);
3176 	else {
3177 		KASSERT(m->queue == PQ_NONE,
3178 		    ("invalid queue transition for page %p", m));
3179 		KASSERT((m->aflags & PGA_ENQUEUED) == 0,
3180 		    ("page %p is enqueued with invalid queue index", m));
3181 	}
3182 	vm_pagequeue_unlock(pq);
3183 	critical_exit();
3184 }
3185 
3186 /*
3187  *	vm_page_pqbatch_drain:		[ internal use only ]
3188  *
3189  *	Force all per-CPU page queue batch queues to be drained.  This is
3190  *	intended for use in severe memory shortages, to ensure that pages
3191  *	do not remain stuck in the batch queues.
3192  */
3193 void
3194 vm_page_pqbatch_drain(void)
3195 {
3196 	struct thread *td;
3197 	struct vm_domain *vmd;
3198 	struct vm_pagequeue *pq;
3199 	int cpu, domain, queue;
3200 
3201 	td = curthread;
3202 	CPU_FOREACH(cpu) {
3203 		thread_lock(td);
3204 		sched_bind(td, cpu);
3205 		thread_unlock(td);
3206 
3207 		for (domain = 0; domain < vm_ndomains; domain++) {
3208 			vmd = VM_DOMAIN(domain);
3209 			for (queue = 0; queue < PQ_COUNT; queue++) {
3210 				pq = &vmd->vmd_pagequeues[queue];
3211 				vm_pagequeue_lock(pq);
3212 				critical_enter();
3213 				vm_pqbatch_process(pq,
3214 				    DPCPU_PTR(pqbatch[domain][queue]), queue);
3215 				critical_exit();
3216 				vm_pagequeue_unlock(pq);
3217 			}
3218 		}
3219 	}
3220 	thread_lock(td);
3221 	sched_unbind(td);
3222 	thread_unlock(td);
3223 }
3224 
3225 /*
3226  * Complete the logical removal of a page from a page queue.  We must be
3227  * careful to synchronize with the page daemon, which may be concurrently
3228  * examining the page with only the page lock held.  The page must not be
3229  * in a state where it appears to be logically enqueued.
3230  */
3231 static void
3232 vm_page_dequeue_complete(vm_page_t m)
3233 {
3234 
3235 	m->queue = PQ_NONE;
3236 	atomic_thread_fence_rel();
3237 	vm_page_aflag_clear(m, PGA_QUEUE_STATE_MASK);
3238 }
3239 
3240 /*
3241  *	vm_page_dequeue_deferred:	[ internal use only ]
3242  *
3243  *	Request removal of the given page from its current page
3244  *	queue.  Physical removal from the queue may be deferred
3245  *	indefinitely.
3246  *
3247  *	The page must be locked.
3248  */
3249 void
3250 vm_page_dequeue_deferred(vm_page_t m)
3251 {
3252 	uint8_t queue;
3253 
3254 	vm_page_assert_locked(m);
3255 
3256 	if ((queue = vm_page_queue(m)) == PQ_NONE)
3257 		return;
3258 	vm_page_aflag_set(m, PGA_DEQUEUE);
3259 	vm_page_pqbatch_submit(m, queue);
3260 }
3261 
3262 /*
3263  * A variant of vm_page_dequeue_deferred() that does not assert the page
3264  * lock and is only to be called from vm_page_free_prep().  It is just an
3265  * open-coded implementation of vm_page_dequeue_deferred().  Because the
3266  * page is being freed, we can assume that nothing else is scheduling queue
3267  * operations on this page, so we get for free the mutual exclusion that
3268  * is otherwise provided by the page lock.
3269  */
3270 static void
3271 vm_page_dequeue_deferred_free(vm_page_t m)
3272 {
3273 	uint8_t queue;
3274 
3275 	KASSERT(m->object == NULL, ("page %p has an object reference", m));
3276 
3277 	if ((m->aflags & PGA_DEQUEUE) != 0)
3278 		return;
3279 	atomic_thread_fence_acq();
3280 	if ((queue = m->queue) == PQ_NONE)
3281 		return;
3282 	vm_page_aflag_set(m, PGA_DEQUEUE);
3283 	vm_page_pqbatch_submit(m, queue);
3284 }
3285 
3286 /*
3287  *	vm_page_dequeue:
3288  *
3289  *	Remove the page from whichever page queue it's in, if any.
3290  *	The page must either be locked or unallocated.  This constraint
3291  *	ensures that the queue state of the page will remain consistent
3292  *	after this function returns.
3293  */
3294 void
3295 vm_page_dequeue(vm_page_t m)
3296 {
3297 	struct vm_pagequeue *pq, *pq1;
3298 	uint8_t aflags;
3299 
3300 	KASSERT(mtx_owned(vm_page_lockptr(m)) || m->object == NULL,
3301 	    ("page %p is allocated and unlocked", m));
3302 
3303 	for (pq = vm_page_pagequeue(m);; pq = pq1) {
3304 		if (pq == NULL) {
3305 			/*
3306 			 * A thread may be concurrently executing
3307 			 * vm_page_dequeue_complete().  Ensure that all queue
3308 			 * state is cleared before we return.
3309 			 */
3310 			aflags = atomic_load_8(&m->aflags);
3311 			if ((aflags & PGA_QUEUE_STATE_MASK) == 0)
3312 				return;
3313 			KASSERT((aflags & PGA_DEQUEUE) != 0,
3314 			    ("page %p has unexpected queue state flags %#x",
3315 			    m, aflags));
3316 
3317 			/*
3318 			 * Busy wait until the thread updating queue state is
3319 			 * finished.  Such a thread must be executing in a
3320 			 * critical section.
3321 			 */
3322 			cpu_spinwait();
3323 			pq1 = vm_page_pagequeue(m);
3324 			continue;
3325 		}
3326 		vm_pagequeue_lock(pq);
3327 		if ((pq1 = vm_page_pagequeue(m)) == pq)
3328 			break;
3329 		vm_pagequeue_unlock(pq);
3330 	}
3331 	KASSERT(pq == vm_page_pagequeue(m),
3332 	    ("%s: page %p migrated directly between queues", __func__, m));
3333 	KASSERT((m->aflags & PGA_DEQUEUE) != 0 ||
3334 	    mtx_owned(vm_page_lockptr(m)),
3335 	    ("%s: queued unlocked page %p", __func__, m));
3336 
3337 	if ((m->aflags & PGA_ENQUEUED) != 0)
3338 		vm_pagequeue_remove(pq, m);
3339 	vm_page_dequeue_complete(m);
3340 	vm_pagequeue_unlock(pq);
3341 }
3342 
3343 /*
3344  * Schedule the given page for insertion into the specified page queue.
3345  * Physical insertion of the page may be deferred indefinitely.
3346  */
3347 static void
3348 vm_page_enqueue(vm_page_t m, uint8_t queue)
3349 {
3350 
3351 	vm_page_assert_locked(m);
3352 	KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0,
3353 	    ("%s: page %p is already enqueued", __func__, m));
3354 
3355 	m->queue = queue;
3356 	if ((m->aflags & PGA_REQUEUE) == 0)
3357 		vm_page_aflag_set(m, PGA_REQUEUE);
3358 	vm_page_pqbatch_submit(m, queue);
3359 }
3360 
3361 /*
3362  *	vm_page_requeue:		[ internal use only ]
3363  *
3364  *	Schedule a requeue of the given page.
3365  *
3366  *	The page must be locked.
3367  */
3368 void
3369 vm_page_requeue(vm_page_t m)
3370 {
3371 
3372 	vm_page_assert_locked(m);
3373 	KASSERT(vm_page_queue(m) != PQ_NONE,
3374 	    ("%s: page %p is not logically enqueued", __func__, m));
3375 
3376 	if ((m->aflags & PGA_REQUEUE) == 0)
3377 		vm_page_aflag_set(m, PGA_REQUEUE);
3378 	vm_page_pqbatch_submit(m, atomic_load_8(&m->queue));
3379 }
3380 
3381 /*
3382  *	vm_page_free_prep:
3383  *
3384  *	Prepares the given page to be put on the free list,
3385  *	disassociating it from any VM object. The caller may return
3386  *	the page to the free list only if this function returns true.
3387  *
3388  *	The object must be locked.  The page must be locked if it is
3389  *	managed.
3390  */
3391 bool
3392 vm_page_free_prep(vm_page_t m)
3393 {
3394 
3395 #if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP)
3396 	if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) {
3397 		uint64_t *p;
3398 		int i;
3399 		p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
3400 		for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++)
3401 			KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx",
3402 			    m, i, (uintmax_t)*p));
3403 	}
3404 #endif
3405 	if ((m->oflags & VPO_UNMANAGED) == 0) {
3406 		vm_page_lock_assert(m, MA_OWNED);
3407 		KASSERT(!pmap_page_is_mapped(m),
3408 		    ("vm_page_free_prep: freeing mapped page %p", m));
3409 	} else
3410 		KASSERT(m->queue == PQ_NONE,
3411 		    ("vm_page_free_prep: unmanaged page %p is queued", m));
3412 	VM_CNT_INC(v_tfree);
3413 
3414 	if (vm_page_sbusied(m))
3415 		panic("vm_page_free_prep: freeing busy page %p", m);
3416 
3417 	if (m->object != NULL)
3418 		(void)vm_page_remove(m);
3419 
3420 	/*
3421 	 * If fictitious remove object association and
3422 	 * return.
3423 	 */
3424 	if ((m->flags & PG_FICTITIOUS) != 0) {
3425 		KASSERT(m->wire_count == 1,
3426 		    ("fictitious page %p is not wired", m));
3427 		KASSERT(m->queue == PQ_NONE,
3428 		    ("fictitious page %p is queued", m));
3429 		return (false);
3430 	}
3431 
3432 	/*
3433 	 * Pages need not be dequeued before they are returned to the physical
3434 	 * memory allocator, but they must at least be marked for a deferred
3435 	 * dequeue.
3436 	 */
3437 	if ((m->oflags & VPO_UNMANAGED) == 0)
3438 		vm_page_dequeue_deferred_free(m);
3439 
3440 	m->valid = 0;
3441 	vm_page_undirty(m);
3442 
3443 	if (vm_page_wired(m) != 0)
3444 		panic("vm_page_free_prep: freeing wired page %p", m);
3445 
3446 	/*
3447 	 * Restore the default memory attribute to the page.
3448 	 */
3449 	if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
3450 		pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
3451 
3452 #if VM_NRESERVLEVEL > 0
3453 	/*
3454 	 * Determine whether the page belongs to a reservation.  If the page was
3455 	 * allocated from a per-CPU cache, it cannot belong to a reservation, so
3456 	 * as an optimization, we avoid the check in that case.
3457 	 */
3458 	if ((m->flags & PG_PCPU_CACHE) == 0 && vm_reserv_free_page(m))
3459 		return (false);
3460 #endif
3461 
3462 	return (true);
3463 }
3464 
3465 /*
3466  *	vm_page_free_toq:
3467  *
3468  *	Returns the given page to the free list, disassociating it
3469  *	from any VM object.
3470  *
3471  *	The object must be locked.  The page must be locked if it is
3472  *	managed.
3473  */
3474 void
3475 vm_page_free_toq(vm_page_t m)
3476 {
3477 	struct vm_domain *vmd;
3478 	uma_zone_t zone;
3479 
3480 	if (!vm_page_free_prep(m))
3481 		return;
3482 
3483 	vmd = vm_pagequeue_domain(m);
3484 	zone = vmd->vmd_pgcache[m->pool].zone;
3485 	if ((m->flags & PG_PCPU_CACHE) != 0 && zone != NULL) {
3486 		uma_zfree(zone, m);
3487 		return;
3488 	}
3489 	vm_domain_free_lock(vmd);
3490 	vm_phys_free_pages(m, 0);
3491 	vm_domain_free_unlock(vmd);
3492 	vm_domain_freecnt_inc(vmd, 1);
3493 }
3494 
3495 /*
3496  *	vm_page_free_pages_toq:
3497  *
3498  *	Returns a list of pages to the free list, disassociating it
3499  *	from any VM object.  In other words, this is equivalent to
3500  *	calling vm_page_free_toq() for each page of a list of VM objects.
3501  *
3502  *	The objects must be locked.  The pages must be locked if it is
3503  *	managed.
3504  */
3505 void
3506 vm_page_free_pages_toq(struct spglist *free, bool update_wire_count)
3507 {
3508 	vm_page_t m;
3509 	int count;
3510 
3511 	if (SLIST_EMPTY(free))
3512 		return;
3513 
3514 	count = 0;
3515 	while ((m = SLIST_FIRST(free)) != NULL) {
3516 		count++;
3517 		SLIST_REMOVE_HEAD(free, plinks.s.ss);
3518 		vm_page_free_toq(m);
3519 	}
3520 
3521 	if (update_wire_count)
3522 		vm_wire_sub(count);
3523 }
3524 
3525 /*
3526  *	vm_page_wire:
3527  *
3528  * Mark this page as wired down.  If the page is fictitious, then
3529  * its wire count must remain one.
3530  *
3531  * The page must be locked.
3532  */
3533 void
3534 vm_page_wire(vm_page_t m)
3535 {
3536 
3537 	vm_page_assert_locked(m);
3538 	if ((m->flags & PG_FICTITIOUS) != 0) {
3539 		KASSERT(m->wire_count == 1,
3540 		    ("vm_page_wire: fictitious page %p's wire count isn't one",
3541 		    m));
3542 		return;
3543 	}
3544 	if (!vm_page_wired(m)) {
3545 		KASSERT((m->oflags & VPO_UNMANAGED) == 0 ||
3546 		    m->queue == PQ_NONE,
3547 		    ("vm_page_wire: unmanaged page %p is queued", m));
3548 		vm_wire_add(1);
3549 	}
3550 	m->wire_count++;
3551 	KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
3552 }
3553 
3554 /*
3555  * vm_page_unwire:
3556  *
3557  * Release one wiring of the specified page, potentially allowing it to be
3558  * paged out.  Returns TRUE if the number of wirings transitions to zero and
3559  * FALSE otherwise.
3560  *
3561  * Only managed pages belonging to an object can be paged out.  If the number
3562  * of wirings transitions to zero and the page is eligible for page out, then
3563  * the page is added to the specified paging queue (unless PQ_NONE is
3564  * specified, in which case the page is dequeued if it belongs to a paging
3565  * queue).
3566  *
3567  * If a page is fictitious, then its wire count must always be one.
3568  *
3569  * A managed page must be locked.
3570  */
3571 bool
3572 vm_page_unwire(vm_page_t m, uint8_t queue)
3573 {
3574 	bool unwired;
3575 
3576 	KASSERT(queue < PQ_COUNT || queue == PQ_NONE,
3577 	    ("vm_page_unwire: invalid queue %u request for page %p",
3578 	    queue, m));
3579 	if ((m->oflags & VPO_UNMANAGED) == 0)
3580 		vm_page_assert_locked(m);
3581 
3582 	unwired = vm_page_unwire_noq(m);
3583 	if (!unwired || (m->oflags & VPO_UNMANAGED) != 0 || m->object == NULL)
3584 		return (unwired);
3585 
3586 	if (vm_page_queue(m) == queue) {
3587 		if (queue == PQ_ACTIVE)
3588 			vm_page_reference(m);
3589 		else if (queue != PQ_NONE)
3590 			vm_page_requeue(m);
3591 	} else {
3592 		vm_page_dequeue(m);
3593 		if (queue != PQ_NONE) {
3594 			vm_page_enqueue(m, queue);
3595 			if (queue == PQ_ACTIVE)
3596 				/* Initialize act_count. */
3597 				vm_page_activate(m);
3598 		}
3599 	}
3600 	return (unwired);
3601 }
3602 
3603 /*
3604  *
3605  * vm_page_unwire_noq:
3606  *
3607  * Unwire a page without (re-)inserting it into a page queue.  It is up
3608  * to the caller to enqueue, requeue, or free the page as appropriate.
3609  * In most cases, vm_page_unwire() should be used instead.
3610  */
3611 bool
3612 vm_page_unwire_noq(vm_page_t m)
3613 {
3614 
3615 	if ((m->oflags & VPO_UNMANAGED) == 0)
3616 		vm_page_assert_locked(m);
3617 	if ((m->flags & PG_FICTITIOUS) != 0) {
3618 		KASSERT(m->wire_count == 1,
3619 	    ("vm_page_unwire: fictitious page %p's wire count isn't one", m));
3620 		return (false);
3621 	}
3622 	if (!vm_page_wired(m))
3623 		panic("vm_page_unwire: page %p's wire count is zero", m);
3624 	m->wire_count--;
3625 	if (m->wire_count == 0) {
3626 		vm_wire_sub(1);
3627 		return (true);
3628 	} else
3629 		return (false);
3630 }
3631 
3632 /*
3633  *	vm_page_activate:
3634  *
3635  *	Put the specified page on the active list (if appropriate).
3636  *	Ensure that act_count is at least ACT_INIT but do not otherwise
3637  *	mess with it.
3638  *
3639  *	The page must be locked.
3640  */
3641 void
3642 vm_page_activate(vm_page_t m)
3643 {
3644 
3645 	vm_page_assert_locked(m);
3646 
3647 	if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0)
3648 		return;
3649 	if (vm_page_queue(m) == PQ_ACTIVE) {
3650 		if (m->act_count < ACT_INIT)
3651 			m->act_count = ACT_INIT;
3652 		return;
3653 	}
3654 
3655 	vm_page_dequeue(m);
3656 	if (m->act_count < ACT_INIT)
3657 		m->act_count = ACT_INIT;
3658 	vm_page_enqueue(m, PQ_ACTIVE);
3659 }
3660 
3661 /*
3662  * Move the specified page to the tail of the inactive queue, or requeue
3663  * the page if it is already in the inactive queue.
3664  *
3665  * The page must be locked.
3666  */
3667 void
3668 vm_page_deactivate(vm_page_t m)
3669 {
3670 
3671 	vm_page_assert_locked(m);
3672 
3673 	if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0)
3674 		return;
3675 
3676 	if (!vm_page_inactive(m)) {
3677 		vm_page_dequeue(m);
3678 		vm_page_enqueue(m, PQ_INACTIVE);
3679 	} else
3680 		vm_page_requeue(m);
3681 }
3682 
3683 /*
3684  * Move the specified page close to the head of the inactive queue,
3685  * bypassing LRU.  A marker page is used to maintain FIFO ordering.
3686  * As with regular enqueues, we use a per-CPU batch queue to reduce
3687  * contention on the page queue lock.
3688  *
3689  * The page must be locked.
3690  */
3691 void
3692 vm_page_deactivate_noreuse(vm_page_t m)
3693 {
3694 
3695 	vm_page_assert_locked(m);
3696 
3697 	if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0)
3698 		return;
3699 
3700 	if (!vm_page_inactive(m)) {
3701 		vm_page_dequeue(m);
3702 		m->queue = PQ_INACTIVE;
3703 	}
3704 	if ((m->aflags & PGA_REQUEUE_HEAD) == 0)
3705 		vm_page_aflag_set(m, PGA_REQUEUE_HEAD);
3706 	vm_page_pqbatch_submit(m, PQ_INACTIVE);
3707 }
3708 
3709 /*
3710  * vm_page_launder
3711  *
3712  * 	Put a page in the laundry, or requeue it if it is already there.
3713  */
3714 void
3715 vm_page_launder(vm_page_t m)
3716 {
3717 
3718 	vm_page_assert_locked(m);
3719 	if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0)
3720 		return;
3721 
3722 	if (vm_page_in_laundry(m))
3723 		vm_page_requeue(m);
3724 	else {
3725 		vm_page_dequeue(m);
3726 		vm_page_enqueue(m, PQ_LAUNDRY);
3727 	}
3728 }
3729 
3730 /*
3731  * vm_page_unswappable
3732  *
3733  *	Put a page in the PQ_UNSWAPPABLE holding queue.
3734  */
3735 void
3736 vm_page_unswappable(vm_page_t m)
3737 {
3738 
3739 	vm_page_assert_locked(m);
3740 	KASSERT(!vm_page_wired(m) && (m->oflags & VPO_UNMANAGED) == 0,
3741 	    ("page %p already unswappable", m));
3742 
3743 	vm_page_dequeue(m);
3744 	vm_page_enqueue(m, PQ_UNSWAPPABLE);
3745 }
3746 
3747 static void
3748 vm_page_release_toq(vm_page_t m, int flags)
3749 {
3750 
3751 	/*
3752 	 * Use a check of the valid bits to determine whether we should
3753 	 * accelerate reclamation of the page.  The object lock might not be
3754 	 * held here, in which case the check is racy.  At worst we will either
3755 	 * accelerate reclamation of a valid page and violate LRU, or
3756 	 * unnecessarily defer reclamation of an invalid page.
3757 	 *
3758 	 * If we were asked to not cache the page, place it near the head of the
3759 	 * inactive queue so that is reclaimed sooner.
3760 	 */
3761 	if ((flags & (VPR_TRYFREE | VPR_NOREUSE)) != 0 || m->valid == 0)
3762 		vm_page_deactivate_noreuse(m);
3763 	else if (vm_page_active(m))
3764 		vm_page_reference(m);
3765 	else
3766 		vm_page_deactivate(m);
3767 }
3768 
3769 /*
3770  * Unwire a page and either attempt to free it or re-add it to the page queues.
3771  */
3772 void
3773 vm_page_release(vm_page_t m, int flags)
3774 {
3775 	vm_object_t object;
3776 	bool freed;
3777 
3778 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3779 	    ("vm_page_release: page %p is unmanaged", m));
3780 
3781 	vm_page_lock(m);
3782 	if (m->object != NULL)
3783 		VM_OBJECT_ASSERT_UNLOCKED(m->object);
3784 	if (vm_page_unwire_noq(m)) {
3785 		if ((object = m->object) == NULL) {
3786 			vm_page_free(m);
3787 		} else {
3788 			freed = false;
3789 			if ((flags & VPR_TRYFREE) != 0 && !vm_page_busied(m) &&
3790 			    /* Depends on type stability. */
3791 			    VM_OBJECT_TRYWLOCK(object)) {
3792 				/*
3793 				 * Only free unmapped pages.  The busy test from
3794 				 * before the object was locked cannot be relied
3795 				 * upon.
3796 				 */
3797 				if ((object->ref_count == 0 ||
3798 				    !pmap_page_is_mapped(m)) && m->dirty == 0 &&
3799 				    !vm_page_busied(m)) {
3800 					vm_page_free(m);
3801 					freed = true;
3802 				}
3803 				VM_OBJECT_WUNLOCK(object);
3804 			}
3805 
3806 			if (!freed)
3807 				vm_page_release_toq(m, flags);
3808 		}
3809 	}
3810 	vm_page_unlock(m);
3811 }
3812 
3813 /* See vm_page_release(). */
3814 void
3815 vm_page_release_locked(vm_page_t m, int flags)
3816 {
3817 
3818 	VM_OBJECT_ASSERT_WLOCKED(m->object);
3819 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3820 	    ("vm_page_release_locked: page %p is unmanaged", m));
3821 
3822 	vm_page_lock(m);
3823 	if (vm_page_unwire_noq(m)) {
3824 		if ((flags & VPR_TRYFREE) != 0 &&
3825 		    (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) &&
3826 		    m->dirty == 0 && !vm_page_busied(m)) {
3827 			vm_page_free(m);
3828 		} else {
3829 			vm_page_release_toq(m, flags);
3830 		}
3831 	}
3832 	vm_page_unlock(m);
3833 }
3834 
3835 /*
3836  * vm_page_advise
3837  *
3838  * 	Apply the specified advice to the given page.
3839  *
3840  *	The object and page must be locked.
3841  */
3842 void
3843 vm_page_advise(vm_page_t m, int advice)
3844 {
3845 
3846 	vm_page_assert_locked(m);
3847 	VM_OBJECT_ASSERT_WLOCKED(m->object);
3848 	if (advice == MADV_FREE)
3849 		/*
3850 		 * Mark the page clean.  This will allow the page to be freed
3851 		 * without first paging it out.  MADV_FREE pages are often
3852 		 * quickly reused by malloc(3), so we do not do anything that
3853 		 * would result in a page fault on a later access.
3854 		 */
3855 		vm_page_undirty(m);
3856 	else if (advice != MADV_DONTNEED) {
3857 		if (advice == MADV_WILLNEED)
3858 			vm_page_activate(m);
3859 		return;
3860 	}
3861 
3862 	/*
3863 	 * Clear any references to the page.  Otherwise, the page daemon will
3864 	 * immediately reactivate the page.
3865 	 */
3866 	vm_page_aflag_clear(m, PGA_REFERENCED);
3867 
3868 	if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m))
3869 		vm_page_dirty(m);
3870 
3871 	/*
3872 	 * Place clean pages near the head of the inactive queue rather than
3873 	 * the tail, thus defeating the queue's LRU operation and ensuring that
3874 	 * the page will be reused quickly.  Dirty pages not already in the
3875 	 * laundry are moved there.
3876 	 */
3877 	if (m->dirty == 0)
3878 		vm_page_deactivate_noreuse(m);
3879 	else if (!vm_page_in_laundry(m))
3880 		vm_page_launder(m);
3881 }
3882 
3883 /*
3884  * Grab a page, waiting until we are waken up due to the page
3885  * changing state.  We keep on waiting, if the page continues
3886  * to be in the object.  If the page doesn't exist, first allocate it
3887  * and then conditionally zero it.
3888  *
3889  * This routine may sleep.
3890  *
3891  * The object must be locked on entry.  The lock will, however, be released
3892  * and reacquired if the routine sleeps.
3893  */
3894 vm_page_t
3895 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
3896 {
3897 	vm_page_t m;
3898 	int sleep;
3899 	int pflags;
3900 
3901 	VM_OBJECT_ASSERT_WLOCKED(object);
3902 	KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
3903 	    (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
3904 	    ("vm_page_grab: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
3905 	pflags = allocflags &
3906 	    ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
3907 	if ((allocflags & VM_ALLOC_NOWAIT) == 0)
3908 		pflags |= VM_ALLOC_WAITFAIL;
3909 retrylookup:
3910 	if ((m = vm_page_lookup(object, pindex)) != NULL) {
3911 		sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ?
3912 		    vm_page_xbusied(m) : vm_page_busied(m);
3913 		if (sleep) {
3914 			if ((allocflags & VM_ALLOC_NOWAIT) != 0)
3915 				return (NULL);
3916 			/*
3917 			 * Reference the page before unlocking and
3918 			 * sleeping so that the page daemon is less
3919 			 * likely to reclaim it.
3920 			 */
3921 			vm_page_aflag_set(m, PGA_REFERENCED);
3922 			vm_page_lock(m);
3923 			VM_OBJECT_WUNLOCK(object);
3924 			vm_page_busy_sleep(m, "pgrbwt", (allocflags &
3925 			    VM_ALLOC_IGN_SBUSY) != 0);
3926 			VM_OBJECT_WLOCK(object);
3927 			goto retrylookup;
3928 		} else {
3929 			if ((allocflags & VM_ALLOC_WIRED) != 0) {
3930 				vm_page_lock(m);
3931 				vm_page_wire(m);
3932 				vm_page_unlock(m);
3933 			}
3934 			if ((allocflags &
3935 			    (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
3936 				vm_page_xbusy(m);
3937 			if ((allocflags & VM_ALLOC_SBUSY) != 0)
3938 				vm_page_sbusy(m);
3939 			return (m);
3940 		}
3941 	}
3942 	m = vm_page_alloc(object, pindex, pflags);
3943 	if (m == NULL) {
3944 		if ((allocflags & VM_ALLOC_NOWAIT) != 0)
3945 			return (NULL);
3946 		goto retrylookup;
3947 	}
3948 	if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
3949 		pmap_zero_page(m);
3950 	return (m);
3951 }
3952 
3953 /*
3954  * Return the specified range of pages from the given object.  For each
3955  * page offset within the range, if a page already exists within the object
3956  * at that offset and it is busy, then wait for it to change state.  If,
3957  * instead, the page doesn't exist, then allocate it.
3958  *
3959  * The caller must always specify an allocation class.
3960  *
3961  * allocation classes:
3962  *	VM_ALLOC_NORMAL		normal process request
3963  *	VM_ALLOC_SYSTEM		system *really* needs the pages
3964  *
3965  * The caller must always specify that the pages are to be busied and/or
3966  * wired.
3967  *
3968  * optional allocation flags:
3969  *	VM_ALLOC_IGN_SBUSY	do not sleep on soft busy pages
3970  *	VM_ALLOC_NOBUSY		do not exclusive busy the page
3971  *	VM_ALLOC_NOWAIT		do not sleep
3972  *	VM_ALLOC_SBUSY		set page to sbusy state
3973  *	VM_ALLOC_WIRED		wire the pages
3974  *	VM_ALLOC_ZERO		zero and validate any invalid pages
3975  *
3976  * If VM_ALLOC_NOWAIT is not specified, this routine may sleep.  Otherwise, it
3977  * may return a partial prefix of the requested range.
3978  */
3979 int
3980 vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
3981     vm_page_t *ma, int count)
3982 {
3983 	vm_page_t m, mpred;
3984 	int pflags;
3985 	int i;
3986 	bool sleep;
3987 
3988 	VM_OBJECT_ASSERT_WLOCKED(object);
3989 	KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0,
3990 	    ("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed"));
3991 	KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 ||
3992 	    (allocflags & VM_ALLOC_WIRED) != 0,
3993 	    ("vm_page_grab_pages: the pages must be busied or wired"));
3994 	KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
3995 	    (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
3996 	    ("vm_page_grab_pages: VM_ALLOC_SBUSY/IGN_SBUSY mismatch"));
3997 	if (count == 0)
3998 		return (0);
3999 	pflags = allocflags & ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK |
4000 	    VM_ALLOC_WAITFAIL | VM_ALLOC_IGN_SBUSY);
4001 	if ((allocflags & VM_ALLOC_NOWAIT) == 0)
4002 		pflags |= VM_ALLOC_WAITFAIL;
4003 	i = 0;
4004 retrylookup:
4005 	m = vm_radix_lookup_le(&object->rtree, pindex + i);
4006 	if (m == NULL || m->pindex != pindex + i) {
4007 		mpred = m;
4008 		m = NULL;
4009 	} else
4010 		mpred = TAILQ_PREV(m, pglist, listq);
4011 	for (; i < count; i++) {
4012 		if (m != NULL) {
4013 			sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ?
4014 			    vm_page_xbusied(m) : vm_page_busied(m);
4015 			if (sleep) {
4016 				if ((allocflags & VM_ALLOC_NOWAIT) != 0)
4017 					break;
4018 				/*
4019 				 * Reference the page before unlocking and
4020 				 * sleeping so that the page daemon is less
4021 				 * likely to reclaim it.
4022 				 */
4023 				vm_page_aflag_set(m, PGA_REFERENCED);
4024 				vm_page_lock(m);
4025 				VM_OBJECT_WUNLOCK(object);
4026 				vm_page_busy_sleep(m, "grbmaw", (allocflags &
4027 				    VM_ALLOC_IGN_SBUSY) != 0);
4028 				VM_OBJECT_WLOCK(object);
4029 				goto retrylookup;
4030 			}
4031 			if ((allocflags & VM_ALLOC_WIRED) != 0) {
4032 				vm_page_lock(m);
4033 				vm_page_wire(m);
4034 				vm_page_unlock(m);
4035 			}
4036 			if ((allocflags & (VM_ALLOC_NOBUSY |
4037 			    VM_ALLOC_SBUSY)) == 0)
4038 				vm_page_xbusy(m);
4039 			if ((allocflags & VM_ALLOC_SBUSY) != 0)
4040 				vm_page_sbusy(m);
4041 		} else {
4042 			m = vm_page_alloc_after(object, pindex + i,
4043 			    pflags | VM_ALLOC_COUNT(count - i), mpred);
4044 			if (m == NULL) {
4045 				if ((allocflags & VM_ALLOC_NOWAIT) != 0)
4046 					break;
4047 				goto retrylookup;
4048 			}
4049 		}
4050 		if (m->valid == 0 && (allocflags & VM_ALLOC_ZERO) != 0) {
4051 			if ((m->flags & PG_ZERO) == 0)
4052 				pmap_zero_page(m);
4053 			m->valid = VM_PAGE_BITS_ALL;
4054 		}
4055 		ma[i] = mpred = m;
4056 		m = vm_page_next(m);
4057 	}
4058 	return (i);
4059 }
4060 
4061 /*
4062  * Mapping function for valid or dirty bits in a page.
4063  *
4064  * Inputs are required to range within a page.
4065  */
4066 vm_page_bits_t
4067 vm_page_bits(int base, int size)
4068 {
4069 	int first_bit;
4070 	int last_bit;
4071 
4072 	KASSERT(
4073 	    base + size <= PAGE_SIZE,
4074 	    ("vm_page_bits: illegal base/size %d/%d", base, size)
4075 	);
4076 
4077 	if (size == 0)		/* handle degenerate case */
4078 		return (0);
4079 
4080 	first_bit = base >> DEV_BSHIFT;
4081 	last_bit = (base + size - 1) >> DEV_BSHIFT;
4082 
4083 	return (((vm_page_bits_t)2 << last_bit) -
4084 	    ((vm_page_bits_t)1 << first_bit));
4085 }
4086 
4087 /*
4088  *	vm_page_set_valid_range:
4089  *
4090  *	Sets portions of a page valid.  The arguments are expected
4091  *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
4092  *	of any partial chunks touched by the range.  The invalid portion of
4093  *	such chunks will be zeroed.
4094  *
4095  *	(base + size) must be less then or equal to PAGE_SIZE.
4096  */
4097 void
4098 vm_page_set_valid_range(vm_page_t m, int base, int size)
4099 {
4100 	int endoff, frag;
4101 
4102 	VM_OBJECT_ASSERT_WLOCKED(m->object);
4103 	if (size == 0)	/* handle degenerate case */
4104 		return;
4105 
4106 	/*
4107 	 * If the base is not DEV_BSIZE aligned and the valid
4108 	 * bit is clear, we have to zero out a portion of the
4109 	 * first block.
4110 	 */
4111 	if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
4112 	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
4113 		pmap_zero_page_area(m, frag, base - frag);
4114 
4115 	/*
4116 	 * If the ending offset is not DEV_BSIZE aligned and the
4117 	 * valid bit is clear, we have to zero out a portion of
4118 	 * the last block.
4119 	 */
4120 	endoff = base + size;
4121 	if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
4122 	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
4123 		pmap_zero_page_area(m, endoff,
4124 		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
4125 
4126 	/*
4127 	 * Assert that no previously invalid block that is now being validated
4128 	 * is already dirty.
4129 	 */
4130 	KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0,
4131 	    ("vm_page_set_valid_range: page %p is dirty", m));
4132 
4133 	/*
4134 	 * Set valid bits inclusive of any overlap.
4135 	 */
4136 	m->valid |= vm_page_bits(base, size);
4137 }
4138 
4139 /*
4140  * Clear the given bits from the specified page's dirty field.
4141  */
4142 static __inline void
4143 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
4144 {
4145 	uintptr_t addr;
4146 #if PAGE_SIZE < 16384
4147 	int shift;
4148 #endif
4149 
4150 	/*
4151 	 * If the object is locked and the page is neither exclusive busy nor
4152 	 * write mapped, then the page's dirty field cannot possibly be
4153 	 * set by a concurrent pmap operation.
4154 	 */
4155 	VM_OBJECT_ASSERT_WLOCKED(m->object);
4156 	if (!vm_page_xbusied(m) && !pmap_page_is_write_mapped(m))
4157 		m->dirty &= ~pagebits;
4158 	else {
4159 		/*
4160 		 * The pmap layer can call vm_page_dirty() without
4161 		 * holding a distinguished lock.  The combination of
4162 		 * the object's lock and an atomic operation suffice
4163 		 * to guarantee consistency of the page dirty field.
4164 		 *
4165 		 * For PAGE_SIZE == 32768 case, compiler already
4166 		 * properly aligns the dirty field, so no forcible
4167 		 * alignment is needed. Only require existence of
4168 		 * atomic_clear_64 when page size is 32768.
4169 		 */
4170 		addr = (uintptr_t)&m->dirty;
4171 #if PAGE_SIZE == 32768
4172 		atomic_clear_64((uint64_t *)addr, pagebits);
4173 #elif PAGE_SIZE == 16384
4174 		atomic_clear_32((uint32_t *)addr, pagebits);
4175 #else		/* PAGE_SIZE <= 8192 */
4176 		/*
4177 		 * Use a trick to perform a 32-bit atomic on the
4178 		 * containing aligned word, to not depend on the existence
4179 		 * of atomic_clear_{8, 16}.
4180 		 */
4181 		shift = addr & (sizeof(uint32_t) - 1);
4182 #if BYTE_ORDER == BIG_ENDIAN
4183 		shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY;
4184 #else
4185 		shift *= NBBY;
4186 #endif
4187 		addr &= ~(sizeof(uint32_t) - 1);
4188 		atomic_clear_32((uint32_t *)addr, pagebits << shift);
4189 #endif		/* PAGE_SIZE */
4190 	}
4191 }
4192 
4193 /*
4194  *	vm_page_set_validclean:
4195  *
4196  *	Sets portions of a page valid and clean.  The arguments are expected
4197  *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
4198  *	of any partial chunks touched by the range.  The invalid portion of
4199  *	such chunks will be zero'd.
4200  *
4201  *	(base + size) must be less then or equal to PAGE_SIZE.
4202  */
4203 void
4204 vm_page_set_validclean(vm_page_t m, int base, int size)
4205 {
4206 	vm_page_bits_t oldvalid, pagebits;
4207 	int endoff, frag;
4208 
4209 	VM_OBJECT_ASSERT_WLOCKED(m->object);
4210 	if (size == 0)	/* handle degenerate case */
4211 		return;
4212 
4213 	/*
4214 	 * If the base is not DEV_BSIZE aligned and the valid
4215 	 * bit is clear, we have to zero out a portion of the
4216 	 * first block.
4217 	 */
4218 	if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
4219 	    (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
4220 		pmap_zero_page_area(m, frag, base - frag);
4221 
4222 	/*
4223 	 * If the ending offset is not DEV_BSIZE aligned and the
4224 	 * valid bit is clear, we have to zero out a portion of
4225 	 * the last block.
4226 	 */
4227 	endoff = base + size;
4228 	if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
4229 	    (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0)
4230 		pmap_zero_page_area(m, endoff,
4231 		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
4232 
4233 	/*
4234 	 * Set valid, clear dirty bits.  If validating the entire
4235 	 * page we can safely clear the pmap modify bit.  We also
4236 	 * use this opportunity to clear the VPO_NOSYNC flag.  If a process
4237 	 * takes a write fault on a MAP_NOSYNC memory area the flag will
4238 	 * be set again.
4239 	 *
4240 	 * We set valid bits inclusive of any overlap, but we can only
4241 	 * clear dirty bits for DEV_BSIZE chunks that are fully within
4242 	 * the range.
4243 	 */
4244 	oldvalid = m->valid;
4245 	pagebits = vm_page_bits(base, size);
4246 	m->valid |= pagebits;
4247 #if 0	/* NOT YET */
4248 	if ((frag = base & (DEV_BSIZE - 1)) != 0) {
4249 		frag = DEV_BSIZE - frag;
4250 		base += frag;
4251 		size -= frag;
4252 		if (size < 0)
4253 			size = 0;
4254 	}
4255 	pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
4256 #endif
4257 	if (base == 0 && size == PAGE_SIZE) {
4258 		/*
4259 		 * The page can only be modified within the pmap if it is
4260 		 * mapped, and it can only be mapped if it was previously
4261 		 * fully valid.
4262 		 */
4263 		if (oldvalid == VM_PAGE_BITS_ALL)
4264 			/*
4265 			 * Perform the pmap_clear_modify() first.  Otherwise,
4266 			 * a concurrent pmap operation, such as
4267 			 * pmap_protect(), could clear a modification in the
4268 			 * pmap and set the dirty field on the page before
4269 			 * pmap_clear_modify() had begun and after the dirty
4270 			 * field was cleared here.
4271 			 */
4272 			pmap_clear_modify(m);
4273 		m->dirty = 0;
4274 		m->oflags &= ~VPO_NOSYNC;
4275 	} else if (oldvalid != VM_PAGE_BITS_ALL)
4276 		m->dirty &= ~pagebits;
4277 	else
4278 		vm_page_clear_dirty_mask(m, pagebits);
4279 }
4280 
4281 void
4282 vm_page_clear_dirty(vm_page_t m, int base, int size)
4283 {
4284 
4285 	vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
4286 }
4287 
4288 /*
4289  *	vm_page_set_invalid:
4290  *
4291  *	Invalidates DEV_BSIZE'd chunks within a page.  Both the
4292  *	valid and dirty bits for the effected areas are cleared.
4293  */
4294 void
4295 vm_page_set_invalid(vm_page_t m, int base, int size)
4296 {
4297 	vm_page_bits_t bits;
4298 	vm_object_t object;
4299 
4300 	object = m->object;
4301 	VM_OBJECT_ASSERT_WLOCKED(object);
4302 	if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) +
4303 	    size >= object->un_pager.vnp.vnp_size)
4304 		bits = VM_PAGE_BITS_ALL;
4305 	else
4306 		bits = vm_page_bits(base, size);
4307 	if (object->ref_count != 0 && m->valid == VM_PAGE_BITS_ALL &&
4308 	    bits != 0)
4309 		pmap_remove_all(m);
4310 	KASSERT((bits == 0 && m->valid == VM_PAGE_BITS_ALL) ||
4311 	    !pmap_page_is_mapped(m),
4312 	    ("vm_page_set_invalid: page %p is mapped", m));
4313 	m->valid &= ~bits;
4314 	m->dirty &= ~bits;
4315 }
4316 
4317 /*
4318  * vm_page_zero_invalid()
4319  *
4320  *	The kernel assumes that the invalid portions of a page contain
4321  *	garbage, but such pages can be mapped into memory by user code.
4322  *	When this occurs, we must zero out the non-valid portions of the
4323  *	page so user code sees what it expects.
4324  *
4325  *	Pages are most often semi-valid when the end of a file is mapped
4326  *	into memory and the file's size is not page aligned.
4327  */
4328 void
4329 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
4330 {
4331 	int b;
4332 	int i;
4333 
4334 	VM_OBJECT_ASSERT_WLOCKED(m->object);
4335 	/*
4336 	 * Scan the valid bits looking for invalid sections that
4337 	 * must be zeroed.  Invalid sub-DEV_BSIZE'd areas ( where the
4338 	 * valid bit may be set ) have already been zeroed by
4339 	 * vm_page_set_validclean().
4340 	 */
4341 	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
4342 		if (i == (PAGE_SIZE / DEV_BSIZE) ||
4343 		    (m->valid & ((vm_page_bits_t)1 << i))) {
4344 			if (i > b) {
4345 				pmap_zero_page_area(m,
4346 				    b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
4347 			}
4348 			b = i + 1;
4349 		}
4350 	}
4351 
4352 	/*
4353 	 * setvalid is TRUE when we can safely set the zero'd areas
4354 	 * as being valid.  We can do this if there are no cache consistancy
4355 	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
4356 	 */
4357 	if (setvalid)
4358 		m->valid = VM_PAGE_BITS_ALL;
4359 }
4360 
4361 /*
4362  *	vm_page_is_valid:
4363  *
4364  *	Is (partial) page valid?  Note that the case where size == 0
4365  *	will return FALSE in the degenerate case where the page is
4366  *	entirely invalid, and TRUE otherwise.
4367  */
4368 int
4369 vm_page_is_valid(vm_page_t m, int base, int size)
4370 {
4371 	vm_page_bits_t bits;
4372 
4373 	VM_OBJECT_ASSERT_LOCKED(m->object);
4374 	bits = vm_page_bits(base, size);
4375 	return (m->valid != 0 && (m->valid & bits) == bits);
4376 }
4377 
4378 /*
4379  * Returns true if all of the specified predicates are true for the entire
4380  * (super)page and false otherwise.
4381  */
4382 bool
4383 vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m)
4384 {
4385 	vm_object_t object;
4386 	int i, npages;
4387 
4388 	object = m->object;
4389 	if (skip_m != NULL && skip_m->object != object)
4390 		return (false);
4391 	VM_OBJECT_ASSERT_LOCKED(object);
4392 	npages = atop(pagesizes[m->psind]);
4393 
4394 	/*
4395 	 * The physically contiguous pages that make up a superpage, i.e., a
4396 	 * page with a page size index ("psind") greater than zero, will
4397 	 * occupy adjacent entries in vm_page_array[].
4398 	 */
4399 	for (i = 0; i < npages; i++) {
4400 		/* Always test object consistency, including "skip_m". */
4401 		if (m[i].object != object)
4402 			return (false);
4403 		if (&m[i] == skip_m)
4404 			continue;
4405 		if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i]))
4406 			return (false);
4407 		if ((flags & PS_ALL_DIRTY) != 0) {
4408 			/*
4409 			 * Calling vm_page_test_dirty() or pmap_is_modified()
4410 			 * might stop this case from spuriously returning
4411 			 * "false".  However, that would require a write lock
4412 			 * on the object containing "m[i]".
4413 			 */
4414 			if (m[i].dirty != VM_PAGE_BITS_ALL)
4415 				return (false);
4416 		}
4417 		if ((flags & PS_ALL_VALID) != 0 &&
4418 		    m[i].valid != VM_PAGE_BITS_ALL)
4419 			return (false);
4420 	}
4421 	return (true);
4422 }
4423 
4424 /*
4425  * Set the page's dirty bits if the page is modified.
4426  */
4427 void
4428 vm_page_test_dirty(vm_page_t m)
4429 {
4430 
4431 	VM_OBJECT_ASSERT_WLOCKED(m->object);
4432 	if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
4433 		vm_page_dirty(m);
4434 }
4435 
4436 void
4437 vm_page_lock_KBI(vm_page_t m, const char *file, int line)
4438 {
4439 
4440 	mtx_lock_flags_(vm_page_lockptr(m), 0, file, line);
4441 }
4442 
4443 void
4444 vm_page_unlock_KBI(vm_page_t m, const char *file, int line)
4445 {
4446 
4447 	mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line);
4448 }
4449 
4450 int
4451 vm_page_trylock_KBI(vm_page_t m, const char *file, int line)
4452 {
4453 
4454 	return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line));
4455 }
4456 
4457 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
4458 void
4459 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line)
4460 {
4461 
4462 	vm_page_lock_assert_KBI(m, MA_OWNED, file, line);
4463 }
4464 
4465 void
4466 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line)
4467 {
4468 
4469 	mtx_assert_(vm_page_lockptr(m), a, file, line);
4470 }
4471 #endif
4472 
4473 #ifdef INVARIANTS
4474 void
4475 vm_page_object_lock_assert(vm_page_t m)
4476 {
4477 
4478 	/*
4479 	 * Certain of the page's fields may only be modified by the
4480 	 * holder of the containing object's lock or the exclusive busy.
4481 	 * holder.  Unfortunately, the holder of the write busy is
4482 	 * not recorded, and thus cannot be checked here.
4483 	 */
4484 	if (m->object != NULL && !vm_page_xbusied(m))
4485 		VM_OBJECT_ASSERT_WLOCKED(m->object);
4486 }
4487 
4488 void
4489 vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits)
4490 {
4491 
4492 	if ((bits & PGA_WRITEABLE) == 0)
4493 		return;
4494 
4495 	/*
4496 	 * The PGA_WRITEABLE flag can only be set if the page is
4497 	 * managed, is exclusively busied or the object is locked.
4498 	 * Currently, this flag is only set by pmap_enter().
4499 	 */
4500 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4501 	    ("PGA_WRITEABLE on unmanaged page"));
4502 	if (!vm_page_xbusied(m))
4503 		VM_OBJECT_ASSERT_LOCKED(m->object);
4504 }
4505 #endif
4506 
4507 #include "opt_ddb.h"
4508 #ifdef DDB
4509 #include <sys/kernel.h>
4510 
4511 #include <ddb/ddb.h>
4512 
4513 DB_SHOW_COMMAND(page, vm_page_print_page_info)
4514 {
4515 
4516 	db_printf("vm_cnt.v_free_count: %d\n", vm_free_count());
4517 	db_printf("vm_cnt.v_inactive_count: %d\n", vm_inactive_count());
4518 	db_printf("vm_cnt.v_active_count: %d\n", vm_active_count());
4519 	db_printf("vm_cnt.v_laundry_count: %d\n", vm_laundry_count());
4520 	db_printf("vm_cnt.v_wire_count: %d\n", vm_wire_count());
4521 	db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved);
4522 	db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min);
4523 	db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target);
4524 	db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target);
4525 }
4526 
4527 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
4528 {
4529 	int dom;
4530 
4531 	db_printf("pq_free %d\n", vm_free_count());
4532 	for (dom = 0; dom < vm_ndomains; dom++) {
4533 		db_printf(
4534     "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d pq_unsw %d\n",
4535 		    dom,
4536 		    vm_dom[dom].vmd_page_count,
4537 		    vm_dom[dom].vmd_free_count,
4538 		    vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt,
4539 		    vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt,
4540 		    vm_dom[dom].vmd_pagequeues[PQ_LAUNDRY].pq_cnt,
4541 		    vm_dom[dom].vmd_pagequeues[PQ_UNSWAPPABLE].pq_cnt);
4542 	}
4543 }
4544 
4545 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo)
4546 {
4547 	vm_page_t m;
4548 	boolean_t phys, virt;
4549 
4550 	if (!have_addr) {
4551 		db_printf("show pginfo addr\n");
4552 		return;
4553 	}
4554 
4555 	phys = strchr(modif, 'p') != NULL;
4556 	virt = strchr(modif, 'v') != NULL;
4557 	if (virt)
4558 		m = PHYS_TO_VM_PAGE(pmap_kextract(addr));
4559 	else if (phys)
4560 		m = PHYS_TO_VM_PAGE(addr);
4561 	else
4562 		m = (vm_page_t)addr;
4563 	db_printf(
4564     "page %p obj %p pidx 0x%jx phys 0x%jx q %d wire %d\n"
4565     "  af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n",
4566 	    m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr,
4567 	    m->queue, m->wire_count, m->aflags, m->oflags,
4568 	    m->flags, m->act_count, m->busy_lock, m->valid, m->dirty);
4569 }
4570 #endif /* DDB */
4571