xref: /freebsd/sys/vm/vm_page.c (revision 730cecb05aaf016ac52ef7cfc691ccec3a0408cd)
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1998 Matthew Dillon.  All Rights Reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * The Mach Operating System project at Carnegie-Mellon University.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
34  */
35 
36 /*-
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  */
62 
63 /*
64  *			GENERAL RULES ON VM_PAGE MANIPULATION
65  *
66  *	- A page queue lock is required when adding or removing a page from a
67  *	  page queue (vm_pagequeues[]), regardless of other locks or the
68  *	  busy state of a page.
69  *
70  *		* In general, no thread besides the page daemon can acquire or
71  *		  hold more than one page queue lock at a time.
72  *
73  *		* The page daemon can acquire and hold any pair of page queue
74  *		  locks in any order.
75  *
76  *	- The object lock is required when inserting or removing
77  *	  pages from an object (vm_page_insert() or vm_page_remove()).
78  *
79  */
80 
81 /*
82  *	Resident memory management module.
83  */
84 
85 #include <sys/cdefs.h>
86 __FBSDID("$FreeBSD$");
87 
88 #include "opt_vm.h"
89 
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/lock.h>
93 #include <sys/kernel.h>
94 #include <sys/limits.h>
95 #include <sys/malloc.h>
96 #include <sys/msgbuf.h>
97 #include <sys/mutex.h>
98 #include <sys/proc.h>
99 #include <sys/rwlock.h>
100 #include <sys/sysctl.h>
101 #include <sys/vmmeter.h>
102 #include <sys/vnode.h>
103 
104 #include <vm/vm.h>
105 #include <vm/pmap.h>
106 #include <vm/vm_param.h>
107 #include <vm/vm_kern.h>
108 #include <vm/vm_object.h>
109 #include <vm/vm_page.h>
110 #include <vm/vm_pageout.h>
111 #include <vm/vm_pager.h>
112 #include <vm/vm_phys.h>
113 #include <vm/vm_radix.h>
114 #include <vm/vm_reserv.h>
115 #include <vm/vm_extern.h>
116 #include <vm/uma.h>
117 #include <vm/uma_int.h>
118 
119 #include <machine/md_var.h>
120 
121 /*
122  *	Associated with page of user-allocatable memory is a
123  *	page structure.
124  */
125 
126 struct vm_pagequeue vm_pagequeues[PQ_COUNT] = {
127 	[PQ_INACTIVE] = {
128 		.pq_pl = TAILQ_HEAD_INITIALIZER(
129 		    vm_pagequeues[PQ_INACTIVE].pq_pl),
130 		.pq_cnt = &cnt.v_inactive_count,
131 		.pq_name = "vm inactive pagequeue"
132 	},
133 	[PQ_ACTIVE] = {
134 		.pq_pl = TAILQ_HEAD_INITIALIZER(
135 		    vm_pagequeues[PQ_ACTIVE].pq_pl),
136 		.pq_cnt = &cnt.v_active_count,
137 		.pq_name = "vm active pagequeue"
138 	}
139 };
140 struct mtx_padalign vm_page_queue_free_mtx;
141 
142 struct mtx_padalign pa_lock[PA_LOCK_COUNT];
143 
144 vm_page_t vm_page_array;
145 long vm_page_array_size;
146 long first_page;
147 int vm_page_zero_count;
148 
149 static int boot_pages = UMA_BOOT_PAGES;
150 TUNABLE_INT("vm.boot_pages", &boot_pages);
151 SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0,
152 	"number of pages allocated for bootstrapping the VM system");
153 
154 static int pa_tryrelock_restart;
155 SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD,
156     &pa_tryrelock_restart, 0, "Number of tryrelock restarts");
157 
158 static uma_zone_t fakepg_zone;
159 
160 static struct vnode *vm_page_alloc_init(vm_page_t m);
161 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
162 static void vm_page_enqueue(int queue, vm_page_t m);
163 static void vm_page_init_fakepg(void *dummy);
164 
165 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL);
166 
167 static void
168 vm_page_init_fakepg(void *dummy)
169 {
170 
171 	fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,
172 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM);
173 }
174 
175 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
176 #if PAGE_SIZE == 32768
177 #ifdef CTASSERT
178 CTASSERT(sizeof(u_long) >= 8);
179 #endif
180 #endif
181 
182 /*
183  * Try to acquire a physical address lock while a pmap is locked.  If we
184  * fail to trylock we unlock and lock the pmap directly and cache the
185  * locked pa in *locked.  The caller should then restart their loop in case
186  * the virtual to physical mapping has changed.
187  */
188 int
189 vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked)
190 {
191 	vm_paddr_t lockpa;
192 
193 	lockpa = *locked;
194 	*locked = pa;
195 	if (lockpa) {
196 		PA_LOCK_ASSERT(lockpa, MA_OWNED);
197 		if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa))
198 			return (0);
199 		PA_UNLOCK(lockpa);
200 	}
201 	if (PA_TRYLOCK(pa))
202 		return (0);
203 	PMAP_UNLOCK(pmap);
204 	atomic_add_int(&pa_tryrelock_restart, 1);
205 	PA_LOCK(pa);
206 	PMAP_LOCK(pmap);
207 	return (EAGAIN);
208 }
209 
210 /*
211  *	vm_set_page_size:
212  *
213  *	Sets the page size, perhaps based upon the memory
214  *	size.  Must be called before any use of page-size
215  *	dependent functions.
216  */
217 void
218 vm_set_page_size(void)
219 {
220 	if (cnt.v_page_size == 0)
221 		cnt.v_page_size = PAGE_SIZE;
222 	if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
223 		panic("vm_set_page_size: page size not a power of two");
224 }
225 
226 /*
227  *	vm_page_blacklist_lookup:
228  *
229  *	See if a physical address in this page has been listed
230  *	in the blacklist tunable.  Entries in the tunable are
231  *	separated by spaces or commas.  If an invalid integer is
232  *	encountered then the rest of the string is skipped.
233  */
234 static int
235 vm_page_blacklist_lookup(char *list, vm_paddr_t pa)
236 {
237 	vm_paddr_t bad;
238 	char *cp, *pos;
239 
240 	for (pos = list; *pos != '\0'; pos = cp) {
241 		bad = strtoq(pos, &cp, 0);
242 		if (*cp != '\0') {
243 			if (*cp == ' ' || *cp == ',') {
244 				cp++;
245 				if (cp == pos)
246 					continue;
247 			} else
248 				break;
249 		}
250 		if (pa == trunc_page(bad))
251 			return (1);
252 	}
253 	return (0);
254 }
255 
256 /*
257  *	vm_page_startup:
258  *
259  *	Initializes the resident memory module.
260  *
261  *	Allocates memory for the page cells, and
262  *	for the object/offset-to-page hash table headers.
263  *	Each page cell is initialized and placed on the free list.
264  */
265 vm_offset_t
266 vm_page_startup(vm_offset_t vaddr)
267 {
268 	vm_offset_t mapped;
269 	vm_paddr_t page_range;
270 	vm_paddr_t new_end;
271 	int i;
272 	vm_paddr_t pa;
273 	vm_paddr_t last_pa;
274 	char *list;
275 
276 	/* the biggest memory array is the second group of pages */
277 	vm_paddr_t end;
278 	vm_paddr_t biggestsize;
279 	vm_paddr_t low_water, high_water;
280 	int biggestone;
281 
282 	biggestsize = 0;
283 	biggestone = 0;
284 	vaddr = round_page(vaddr);
285 
286 	for (i = 0; phys_avail[i + 1]; i += 2) {
287 		phys_avail[i] = round_page(phys_avail[i]);
288 		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
289 	}
290 
291 	low_water = phys_avail[0];
292 	high_water = phys_avail[1];
293 
294 	for (i = 0; phys_avail[i + 1]; i += 2) {
295 		vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
296 
297 		if (size > biggestsize) {
298 			biggestone = i;
299 			biggestsize = size;
300 		}
301 		if (phys_avail[i] < low_water)
302 			low_water = phys_avail[i];
303 		if (phys_avail[i + 1] > high_water)
304 			high_water = phys_avail[i + 1];
305 	}
306 
307 #ifdef XEN
308 	low_water = 0;
309 #endif
310 
311 	end = phys_avail[biggestone+1];
312 
313 	/*
314 	 * Initialize the page and queue locks.
315 	 */
316 	mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF);
317 	for (i = 0; i < PA_LOCK_COUNT; i++)
318 		mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
319 	for (i = 0; i < PQ_COUNT; i++)
320 		vm_pagequeue_init_lock(&vm_pagequeues[i]);
321 
322 	/*
323 	 * Allocate memory for use when boot strapping the kernel memory
324 	 * allocator.
325 	 */
326 	new_end = end - (boot_pages * UMA_SLAB_SIZE);
327 	new_end = trunc_page(new_end);
328 	mapped = pmap_map(&vaddr, new_end, end,
329 	    VM_PROT_READ | VM_PROT_WRITE);
330 	bzero((void *)mapped, end - new_end);
331 	uma_startup((void *)mapped, boot_pages);
332 
333 #if defined(__amd64__) || defined(__i386__) || defined(__arm__) || \
334     defined(__mips__)
335 	/*
336 	 * Allocate a bitmap to indicate that a random physical page
337 	 * needs to be included in a minidump.
338 	 *
339 	 * The amd64 port needs this to indicate which direct map pages
340 	 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
341 	 *
342 	 * However, i386 still needs this workspace internally within the
343 	 * minidump code.  In theory, they are not needed on i386, but are
344 	 * included should the sf_buf code decide to use them.
345 	 */
346 	last_pa = 0;
347 	for (i = 0; dump_avail[i + 1] != 0; i += 2)
348 		if (dump_avail[i + 1] > last_pa)
349 			last_pa = dump_avail[i + 1];
350 	page_range = last_pa / PAGE_SIZE;
351 	vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
352 	new_end -= vm_page_dump_size;
353 	vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
354 	    new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
355 	bzero((void *)vm_page_dump, vm_page_dump_size);
356 #endif
357 #ifdef __amd64__
358 	/*
359 	 * Request that the physical pages underlying the message buffer be
360 	 * included in a crash dump.  Since the message buffer is accessed
361 	 * through the direct map, they are not automatically included.
362 	 */
363 	pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr);
364 	last_pa = pa + round_page(msgbufsize);
365 	while (pa < last_pa) {
366 		dump_add_page(pa);
367 		pa += PAGE_SIZE;
368 	}
369 #endif
370 	/*
371 	 * Compute the number of pages of memory that will be available for
372 	 * use (taking into account the overhead of a page structure per
373 	 * page).
374 	 */
375 	first_page = low_water / PAGE_SIZE;
376 #ifdef VM_PHYSSEG_SPARSE
377 	page_range = 0;
378 	for (i = 0; phys_avail[i + 1] != 0; i += 2)
379 		page_range += atop(phys_avail[i + 1] - phys_avail[i]);
380 #elif defined(VM_PHYSSEG_DENSE)
381 	page_range = high_water / PAGE_SIZE - first_page;
382 #else
383 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
384 #endif
385 	end = new_end;
386 
387 	/*
388 	 * Reserve an unmapped guard page to trap access to vm_page_array[-1].
389 	 */
390 	vaddr += PAGE_SIZE;
391 
392 	/*
393 	 * Initialize the mem entry structures now, and put them in the free
394 	 * queue.
395 	 */
396 	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
397 	mapped = pmap_map(&vaddr, new_end, end,
398 	    VM_PROT_READ | VM_PROT_WRITE);
399 	vm_page_array = (vm_page_t) mapped;
400 #if VM_NRESERVLEVEL > 0
401 	/*
402 	 * Allocate memory for the reservation management system's data
403 	 * structures.
404 	 */
405 	new_end = vm_reserv_startup(&vaddr, new_end, high_water);
406 #endif
407 #if defined(__amd64__) || defined(__mips__)
408 	/*
409 	 * pmap_map on amd64 and mips can come out of the direct-map, not kvm
410 	 * like i386, so the pages must be tracked for a crashdump to include
411 	 * this data.  This includes the vm_page_array and the early UMA
412 	 * bootstrap pages.
413 	 */
414 	for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE)
415 		dump_add_page(pa);
416 #endif
417 	phys_avail[biggestone + 1] = new_end;
418 
419 	/*
420 	 * Clear all of the page structures
421 	 */
422 	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
423 	for (i = 0; i < page_range; i++)
424 		vm_page_array[i].order = VM_NFREEORDER;
425 	vm_page_array_size = page_range;
426 
427 	/*
428 	 * Initialize the physical memory allocator.
429 	 */
430 	vm_phys_init();
431 
432 	/*
433 	 * Add every available physical page that is not blacklisted to
434 	 * the free lists.
435 	 */
436 	cnt.v_page_count = 0;
437 	cnt.v_free_count = 0;
438 	list = getenv("vm.blacklist");
439 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
440 		pa = phys_avail[i];
441 		last_pa = phys_avail[i + 1];
442 		while (pa < last_pa) {
443 			if (list != NULL &&
444 			    vm_page_blacklist_lookup(list, pa))
445 				printf("Skipping page with pa 0x%jx\n",
446 				    (uintmax_t)pa);
447 			else
448 				vm_phys_add_page(pa);
449 			pa += PAGE_SIZE;
450 		}
451 	}
452 	freeenv(list);
453 #if VM_NRESERVLEVEL > 0
454 	/*
455 	 * Initialize the reservation management system.
456 	 */
457 	vm_reserv_init();
458 #endif
459 	return (vaddr);
460 }
461 
462 void
463 vm_page_reference(vm_page_t m)
464 {
465 
466 	vm_page_aflag_set(m, PGA_REFERENCED);
467 }
468 
469 void
470 vm_page_busy(vm_page_t m)
471 {
472 
473 	VM_OBJECT_ASSERT_WLOCKED(m->object);
474 	KASSERT((m->oflags & VPO_BUSY) == 0,
475 	    ("vm_page_busy: page already busy!!!"));
476 	m->oflags |= VPO_BUSY;
477 }
478 
479 /*
480  *      vm_page_flash:
481  *
482  *      wakeup anyone waiting for the page.
483  */
484 void
485 vm_page_flash(vm_page_t m)
486 {
487 
488 	VM_OBJECT_ASSERT_WLOCKED(m->object);
489 	if (m->oflags & VPO_WANTED) {
490 		m->oflags &= ~VPO_WANTED;
491 		wakeup(m);
492 	}
493 }
494 
495 /*
496  *      vm_page_wakeup:
497  *
498  *      clear the VPO_BUSY flag and wakeup anyone waiting for the
499  *      page.
500  *
501  */
502 void
503 vm_page_wakeup(vm_page_t m)
504 {
505 
506 	VM_OBJECT_ASSERT_WLOCKED(m->object);
507 	KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!"));
508 	m->oflags &= ~VPO_BUSY;
509 	vm_page_flash(m);
510 }
511 
512 void
513 vm_page_io_start(vm_page_t m)
514 {
515 
516 	VM_OBJECT_ASSERT_WLOCKED(m->object);
517 	m->busy++;
518 }
519 
520 void
521 vm_page_io_finish(vm_page_t m)
522 {
523 
524 	VM_OBJECT_ASSERT_WLOCKED(m->object);
525 	KASSERT(m->busy > 0, ("vm_page_io_finish: page %p is not busy", m));
526 	m->busy--;
527 	if (m->busy == 0)
528 		vm_page_flash(m);
529 }
530 
531 /*
532  * Keep page from being freed by the page daemon
533  * much of the same effect as wiring, except much lower
534  * overhead and should be used only for *very* temporary
535  * holding ("wiring").
536  */
537 void
538 vm_page_hold(vm_page_t mem)
539 {
540 
541 	vm_page_lock_assert(mem, MA_OWNED);
542         mem->hold_count++;
543 }
544 
545 void
546 vm_page_unhold(vm_page_t mem)
547 {
548 
549 	vm_page_lock_assert(mem, MA_OWNED);
550 	--mem->hold_count;
551 	KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
552 	if (mem->hold_count == 0 && (mem->flags & PG_UNHOLDFREE) != 0)
553 		vm_page_free_toq(mem);
554 }
555 
556 /*
557  *	vm_page_unhold_pages:
558  *
559  *	Unhold each of the pages that is referenced by the given array.
560  */
561 void
562 vm_page_unhold_pages(vm_page_t *ma, int count)
563 {
564 	struct mtx *mtx, *new_mtx;
565 
566 	mtx = NULL;
567 	for (; count != 0; count--) {
568 		/*
569 		 * Avoid releasing and reacquiring the same page lock.
570 		 */
571 		new_mtx = vm_page_lockptr(*ma);
572 		if (mtx != new_mtx) {
573 			if (mtx != NULL)
574 				mtx_unlock(mtx);
575 			mtx = new_mtx;
576 			mtx_lock(mtx);
577 		}
578 		vm_page_unhold(*ma);
579 		ma++;
580 	}
581 	if (mtx != NULL)
582 		mtx_unlock(mtx);
583 }
584 
585 vm_page_t
586 PHYS_TO_VM_PAGE(vm_paddr_t pa)
587 {
588 	vm_page_t m;
589 
590 #ifdef VM_PHYSSEG_SPARSE
591 	m = vm_phys_paddr_to_vm_page(pa);
592 	if (m == NULL)
593 		m = vm_phys_fictitious_to_vm_page(pa);
594 	return (m);
595 #elif defined(VM_PHYSSEG_DENSE)
596 	long pi;
597 
598 	pi = atop(pa);
599 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
600 		m = &vm_page_array[pi - first_page];
601 		return (m);
602 	}
603 	return (vm_phys_fictitious_to_vm_page(pa));
604 #else
605 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
606 #endif
607 }
608 
609 /*
610  *	vm_page_getfake:
611  *
612  *	Create a fictitious page with the specified physical address and
613  *	memory attribute.  The memory attribute is the only the machine-
614  *	dependent aspect of a fictitious page that must be initialized.
615  */
616 vm_page_t
617 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
618 {
619 	vm_page_t m;
620 
621 	m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
622 	vm_page_initfake(m, paddr, memattr);
623 	return (m);
624 }
625 
626 void
627 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
628 {
629 
630 	if ((m->flags & PG_FICTITIOUS) != 0) {
631 		/*
632 		 * The page's memattr might have changed since the
633 		 * previous initialization.  Update the pmap to the
634 		 * new memattr.
635 		 */
636 		goto memattr;
637 	}
638 	m->phys_addr = paddr;
639 	m->queue = PQ_NONE;
640 	/* Fictitious pages don't use "segind". */
641 	m->flags = PG_FICTITIOUS;
642 	/* Fictitious pages don't use "order" or "pool". */
643 	m->oflags = VPO_BUSY | VPO_UNMANAGED;
644 	m->wire_count = 1;
645 memattr:
646 	pmap_page_set_memattr(m, memattr);
647 }
648 
649 /*
650  *	vm_page_putfake:
651  *
652  *	Release a fictitious page.
653  */
654 void
655 vm_page_putfake(vm_page_t m)
656 {
657 
658 	KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m));
659 	KASSERT((m->flags & PG_FICTITIOUS) != 0,
660 	    ("vm_page_putfake: bad page %p", m));
661 	uma_zfree(fakepg_zone, m);
662 }
663 
664 /*
665  *	vm_page_updatefake:
666  *
667  *	Update the given fictitious page to the specified physical address and
668  *	memory attribute.
669  */
670 void
671 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
672 {
673 
674 	KASSERT((m->flags & PG_FICTITIOUS) != 0,
675 	    ("vm_page_updatefake: bad page %p", m));
676 	m->phys_addr = paddr;
677 	pmap_page_set_memattr(m, memattr);
678 }
679 
680 /*
681  *	vm_page_free:
682  *
683  *	Free a page.
684  */
685 void
686 vm_page_free(vm_page_t m)
687 {
688 
689 	m->flags &= ~PG_ZERO;
690 	vm_page_free_toq(m);
691 }
692 
693 /*
694  *	vm_page_free_zero:
695  *
696  *	Free a page to the zerod-pages queue
697  */
698 void
699 vm_page_free_zero(vm_page_t m)
700 {
701 
702 	m->flags |= PG_ZERO;
703 	vm_page_free_toq(m);
704 }
705 
706 /*
707  * Unbusy and handle the page queueing for a page from the VOP_GETPAGES()
708  * array which is not the request page.
709  */
710 void
711 vm_page_readahead_finish(vm_page_t m)
712 {
713 
714 	if (m->valid != 0) {
715 		/*
716 		 * Since the page is not the requested page, whether
717 		 * it should be activated or deactivated is not
718 		 * obvious.  Empirical results have shown that
719 		 * deactivating the page is usually the best choice,
720 		 * unless the page is wanted by another thread.
721 		 */
722 		if (m->oflags & VPO_WANTED) {
723 			vm_page_lock(m);
724 			vm_page_activate(m);
725 			vm_page_unlock(m);
726 		} else {
727 			vm_page_lock(m);
728 			vm_page_deactivate(m);
729 			vm_page_unlock(m);
730 		}
731 		vm_page_wakeup(m);
732 	} else {
733 		/*
734 		 * Free the completely invalid page.  Such page state
735 		 * occurs due to the short read operation which did
736 		 * not covered our page at all, or in case when a read
737 		 * error happens.
738 		 */
739 		vm_page_lock(m);
740 		vm_page_free(m);
741 		vm_page_unlock(m);
742 	}
743 }
744 
745 /*
746  *	vm_page_sleep:
747  *
748  *	Sleep and release the page lock.
749  *
750  *	The object containing the given page must be locked.
751  */
752 void
753 vm_page_sleep(vm_page_t m, const char *msg)
754 {
755 
756 	VM_OBJECT_ASSERT_WLOCKED(m->object);
757 	if (mtx_owned(vm_page_lockptr(m)))
758 		vm_page_unlock(m);
759 
760 	/*
761 	 * It's possible that while we sleep, the page will get
762 	 * unbusied and freed.  If we are holding the object
763 	 * lock, we will assume we hold a reference to the object
764 	 * such that even if m->object changes, we can re-lock
765 	 * it.
766 	 */
767 	m->oflags |= VPO_WANTED;
768 	VM_OBJECT_SLEEP(m->object, m, PVM, msg, 0);
769 }
770 
771 /*
772  *	vm_page_dirty_KBI:		[ internal use only ]
773  *
774  *	Set all bits in the page's dirty field.
775  *
776  *	The object containing the specified page must be locked if the
777  *	call is made from the machine-independent layer.
778  *
779  *	See vm_page_clear_dirty_mask().
780  *
781  *	This function should only be called by vm_page_dirty().
782  */
783 void
784 vm_page_dirty_KBI(vm_page_t m)
785 {
786 
787 	/* These assertions refer to this operation by its public name. */
788 	KASSERT((m->flags & PG_CACHED) == 0,
789 	    ("vm_page_dirty: page in cache!"));
790 	KASSERT(!VM_PAGE_IS_FREE(m),
791 	    ("vm_page_dirty: page is free!"));
792 	KASSERT(m->valid == VM_PAGE_BITS_ALL,
793 	    ("vm_page_dirty: page is invalid!"));
794 	m->dirty = VM_PAGE_BITS_ALL;
795 }
796 
797 /*
798  *	vm_page_insert:		[ internal use only ]
799  *
800  *	Inserts the given mem entry into the object and object list.
801  *
802  *	The pagetables are not updated but will presumably fault the page
803  *	in if necessary, or if a kernel page the caller will at some point
804  *	enter the page into the kernel's pmap.  We are not allowed to sleep
805  *	here so we *can't* do this anyway.
806  *
807  *	The object must be locked.
808  */
809 void
810 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
811 {
812 	vm_page_t neighbor;
813 
814 	VM_OBJECT_ASSERT_WLOCKED(object);
815 	if (m->object != NULL)
816 		panic("vm_page_insert: page already inserted");
817 
818 	/*
819 	 * Record the object/offset pair in this page
820 	 */
821 	m->object = object;
822 	m->pindex = pindex;
823 
824 	/*
825 	 * Now link into the object's ordered list of backed pages.
826 	 */
827 	if (object->resident_page_count == 0) {
828 		TAILQ_INSERT_TAIL(&object->memq, m, listq);
829 	} else {
830 		neighbor = vm_radix_lookup_le(&object->rtree, pindex);
831 		if (neighbor != NULL) {
832 		    	KASSERT(pindex > neighbor->pindex,
833 			    ("vm_page_insert: offset %ju less than %ju",
834 			    (uintmax_t)pindex, (uintmax_t)neighbor->pindex));
835 			TAILQ_INSERT_AFTER(&object->memq, neighbor, m, listq);
836 		} else
837 			TAILQ_INSERT_HEAD(&object->memq, m, listq);
838 	}
839 	vm_radix_insert(&object->rtree, m);
840 
841 	/*
842 	 * Show that the object has one more resident page.
843 	 */
844 	object->resident_page_count++;
845 
846 	/*
847 	 * Hold the vnode until the last page is released.
848 	 */
849 	if (object->resident_page_count == 1 && object->type == OBJT_VNODE)
850 		vhold(object->handle);
851 
852 	/*
853 	 * Since we are inserting a new and possibly dirty page,
854 	 * update the object's OBJ_MIGHTBEDIRTY flag.
855 	 */
856 	if (pmap_page_is_write_mapped(m))
857 		vm_object_set_writeable_dirty(object);
858 }
859 
860 /*
861  *	vm_page_remove:
862  *
863  *	Removes the given mem entry from the object/offset-page
864  *	table and the object page list, but do not invalidate/terminate
865  *	the backing store.
866  *
867  *	The underlying pmap entry (if any) is NOT removed here.
868  *
869  *	The object must be locked.  The page must be locked if it is managed.
870  */
871 void
872 vm_page_remove(vm_page_t m)
873 {
874 	vm_object_t object;
875 
876 	if ((m->oflags & VPO_UNMANAGED) == 0)
877 		vm_page_lock_assert(m, MA_OWNED);
878 	if ((object = m->object) == NULL)
879 		return;
880 	VM_OBJECT_ASSERT_WLOCKED(object);
881 	if (m->oflags & VPO_BUSY) {
882 		m->oflags &= ~VPO_BUSY;
883 		vm_page_flash(m);
884 	}
885 
886 	/*
887 	 * Now remove from the object's list of backed pages.
888 	 */
889 	vm_radix_remove(&object->rtree, m->pindex);
890 	TAILQ_REMOVE(&object->memq, m, listq);
891 
892 	/*
893 	 * And show that the object has one fewer resident page.
894 	 */
895 	object->resident_page_count--;
896 
897 	/*
898 	 * The vnode may now be recycled.
899 	 */
900 	if (object->resident_page_count == 0 && object->type == OBJT_VNODE)
901 		vdrop(object->handle);
902 
903 	m->object = NULL;
904 }
905 
906 /*
907  *	vm_page_lookup:
908  *
909  *	Returns the page associated with the object/offset
910  *	pair specified; if none is found, NULL is returned.
911  *
912  *	The object must be locked.
913  */
914 vm_page_t
915 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
916 {
917 
918 	VM_OBJECT_ASSERT_WLOCKED(object);
919 	return (vm_radix_lookup(&object->rtree, pindex));
920 }
921 
922 /*
923  *	vm_page_find_least:
924  *
925  *	Returns the page associated with the object with least pindex
926  *	greater than or equal to the parameter pindex, or NULL.
927  *
928  *	The object must be locked.
929  */
930 vm_page_t
931 vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
932 {
933 	vm_page_t m;
934 
935 	VM_OBJECT_ASSERT_WLOCKED(object);
936 	if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex)
937 		m = vm_radix_lookup_ge(&object->rtree, pindex);
938 	return (m);
939 }
940 
941 /*
942  * Returns the given page's successor (by pindex) within the object if it is
943  * resident; if none is found, NULL is returned.
944  *
945  * The object must be locked.
946  */
947 vm_page_t
948 vm_page_next(vm_page_t m)
949 {
950 	vm_page_t next;
951 
952 	VM_OBJECT_ASSERT_WLOCKED(m->object);
953 	if ((next = TAILQ_NEXT(m, listq)) != NULL &&
954 	    next->pindex != m->pindex + 1)
955 		next = NULL;
956 	return (next);
957 }
958 
959 /*
960  * Returns the given page's predecessor (by pindex) within the object if it is
961  * resident; if none is found, NULL is returned.
962  *
963  * The object must be locked.
964  */
965 vm_page_t
966 vm_page_prev(vm_page_t m)
967 {
968 	vm_page_t prev;
969 
970 	VM_OBJECT_ASSERT_WLOCKED(m->object);
971 	if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL &&
972 	    prev->pindex != m->pindex - 1)
973 		prev = NULL;
974 	return (prev);
975 }
976 
977 /*
978  *	vm_page_rename:
979  *
980  *	Move the given memory entry from its
981  *	current object to the specified target object/offset.
982  *
983  *	Note: swap associated with the page must be invalidated by the move.  We
984  *	      have to do this for several reasons:  (1) we aren't freeing the
985  *	      page, (2) we are dirtying the page, (3) the VM system is probably
986  *	      moving the page from object A to B, and will then later move
987  *	      the backing store from A to B and we can't have a conflict.
988  *
989  *	Note: we *always* dirty the page.  It is necessary both for the
990  *	      fact that we moved it, and because we may be invalidating
991  *	      swap.  If the page is on the cache, we have to deactivate it
992  *	      or vm_page_dirty() will panic.  Dirty pages are not allowed
993  *	      on the cache.
994  *
995  *	The objects must be locked.  The page must be locked if it is managed.
996  */
997 void
998 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
999 {
1000 
1001 	vm_page_remove(m);
1002 	vm_page_insert(m, new_object, new_pindex);
1003 	vm_page_dirty(m);
1004 }
1005 
1006 /*
1007  *	Convert all of the given object's cached pages that have a
1008  *	pindex within the given range into free pages.  If the value
1009  *	zero is given for "end", then the range's upper bound is
1010  *	infinity.  If the given object is backed by a vnode and it
1011  *	transitions from having one or more cached pages to none, the
1012  *	vnode's hold count is reduced.
1013  */
1014 void
1015 vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1016 {
1017 	vm_page_t m;
1018 	boolean_t empty;
1019 
1020 	mtx_lock(&vm_page_queue_free_mtx);
1021 	if (__predict_false(vm_radix_is_empty(&object->cache))) {
1022 		mtx_unlock(&vm_page_queue_free_mtx);
1023 		return;
1024 	}
1025 	while ((m = vm_radix_lookup_ge(&object->cache, start)) != NULL) {
1026 		if (end != 0 && m->pindex >= end)
1027 			break;
1028 		vm_radix_remove(&object->cache, m->pindex);
1029 		m->object = NULL;
1030 		m->valid = 0;
1031 		/* Clear PG_CACHED and set PG_FREE. */
1032 		m->flags ^= PG_CACHED | PG_FREE;
1033 		KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE,
1034 		    ("vm_page_cache_free: page %p has inconsistent flags", m));
1035 		cnt.v_cache_count--;
1036 		cnt.v_free_count++;
1037 	}
1038 	empty = vm_radix_is_empty(&object->cache);
1039 	mtx_unlock(&vm_page_queue_free_mtx);
1040 	if (object->type == OBJT_VNODE && empty)
1041 		vdrop(object->handle);
1042 }
1043 
1044 /*
1045  *	Returns the cached page that is associated with the given
1046  *	object and offset.  If, however, none exists, returns NULL.
1047  *
1048  *	The free page queue must be locked.
1049  */
1050 static inline vm_page_t
1051 vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex)
1052 {
1053 
1054 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1055 	return (vm_radix_lookup(&object->cache, pindex));
1056 }
1057 
1058 /*
1059  *	Remove the given cached page from its containing object's
1060  *	collection of cached pages.
1061  *
1062  *	The free page queue must be locked.
1063  */
1064 static void
1065 vm_page_cache_remove(vm_page_t m)
1066 {
1067 
1068 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1069 	KASSERT((m->flags & PG_CACHED) != 0,
1070 	    ("vm_page_cache_remove: page %p is not cached", m));
1071 	vm_radix_remove(&m->object->cache, m->pindex);
1072 	m->object = NULL;
1073 	cnt.v_cache_count--;
1074 }
1075 
1076 /*
1077  *	Transfer all of the cached pages with offset greater than or
1078  *	equal to 'offidxstart' from the original object's cache to the
1079  *	new object's cache.  However, any cached pages with offset
1080  *	greater than or equal to the new object's size are kept in the
1081  *	original object.  Initially, the new object's cache must be
1082  *	empty.  Offset 'offidxstart' in the original object must
1083  *	correspond to offset zero in the new object.
1084  *
1085  *	The new object must be locked.
1086  */
1087 void
1088 vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart,
1089     vm_object_t new_object)
1090 {
1091 	vm_page_t m;
1092 
1093 	/*
1094 	 * Insertion into an object's collection of cached pages
1095 	 * requires the object to be locked.  In contrast, removal does
1096 	 * not.
1097 	 */
1098 	VM_OBJECT_ASSERT_WLOCKED(new_object);
1099 	KASSERT(vm_radix_is_empty(&new_object->cache),
1100 	    ("vm_page_cache_transfer: object %p has cached pages",
1101 	    new_object));
1102 	mtx_lock(&vm_page_queue_free_mtx);
1103 	while ((m = vm_radix_lookup_ge(&orig_object->cache,
1104 	    offidxstart)) != NULL) {
1105 		/*
1106 		 * Transfer all of the pages with offset greater than or
1107 		 * equal to 'offidxstart' from the original object's
1108 		 * cache to the new object's cache.
1109 		 */
1110 		if ((m->pindex - offidxstart) >= new_object->size)
1111 			break;
1112 		vm_radix_remove(&orig_object->cache, m->pindex);
1113 		/* Update the page's object and offset. */
1114 		m->object = new_object;
1115 		m->pindex -= offidxstart;
1116 		vm_radix_insert(&new_object->cache, m);
1117 	}
1118 	mtx_unlock(&vm_page_queue_free_mtx);
1119 }
1120 
1121 /*
1122  *	Returns TRUE if a cached page is associated with the given object and
1123  *	offset, and FALSE otherwise.
1124  *
1125  *	The object must be locked.
1126  */
1127 boolean_t
1128 vm_page_is_cached(vm_object_t object, vm_pindex_t pindex)
1129 {
1130 	vm_page_t m;
1131 
1132 	/*
1133 	 * Insertion into an object's collection of cached pages requires the
1134 	 * object to be locked.  Therefore, if the object is locked and the
1135 	 * object's collection is empty, there is no need to acquire the free
1136 	 * page queues lock in order to prove that the specified page doesn't
1137 	 * exist.
1138 	 */
1139 	VM_OBJECT_ASSERT_WLOCKED(object);
1140 	if (__predict_true(vm_object_cache_is_empty(object)))
1141 		return (FALSE);
1142 	mtx_lock(&vm_page_queue_free_mtx);
1143 	m = vm_page_cache_lookup(object, pindex);
1144 	mtx_unlock(&vm_page_queue_free_mtx);
1145 	return (m != NULL);
1146 }
1147 
1148 /*
1149  *	vm_page_alloc:
1150  *
1151  *	Allocate and return a page that is associated with the specified
1152  *	object and offset pair.  By default, this page has the flag VPO_BUSY
1153  *	set.
1154  *
1155  *	The caller must always specify an allocation class.
1156  *
1157  *	allocation classes:
1158  *	VM_ALLOC_NORMAL		normal process request
1159  *	VM_ALLOC_SYSTEM		system *really* needs a page
1160  *	VM_ALLOC_INTERRUPT	interrupt time request
1161  *
1162  *	optional allocation flags:
1163  *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
1164  *				intends to allocate
1165  *	VM_ALLOC_IFCACHED	return page only if it is cached
1166  *	VM_ALLOC_IFNOTCACHED	return NULL, do not reactivate if the page
1167  *				is cached
1168  *	VM_ALLOC_NOBUSY		do not set the flag VPO_BUSY on the page
1169  *	VM_ALLOC_NODUMP		do not include the page in a kernel core dump
1170  *	VM_ALLOC_NOOBJ		page is not associated with an object and
1171  *				should not have the flag VPO_BUSY set
1172  *	VM_ALLOC_WIRED		wire the allocated page
1173  *	VM_ALLOC_ZERO		prefer a zeroed page
1174  *
1175  *	This routine may not sleep.
1176  */
1177 vm_page_t
1178 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
1179 {
1180 	struct vnode *vp = NULL;
1181 	vm_object_t m_object;
1182 	vm_page_t m;
1183 	int flags, req_class;
1184 
1185 	KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0),
1186 	    ("vm_page_alloc: inconsistent object/req"));
1187 	if (object != NULL)
1188 		VM_OBJECT_ASSERT_WLOCKED(object);
1189 
1190 	req_class = req & VM_ALLOC_CLASS_MASK;
1191 
1192 	/*
1193 	 * The page daemon is allowed to dig deeper into the free page list.
1194 	 */
1195 	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1196 		req_class = VM_ALLOC_SYSTEM;
1197 
1198 	mtx_lock(&vm_page_queue_free_mtx);
1199 	if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
1200 	    (req_class == VM_ALLOC_SYSTEM &&
1201 	    cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) ||
1202 	    (req_class == VM_ALLOC_INTERRUPT &&
1203 	    cnt.v_free_count + cnt.v_cache_count > 0)) {
1204 		/*
1205 		 * Allocate from the free queue if the number of free pages
1206 		 * exceeds the minimum for the request class.
1207 		 */
1208 		if (object != NULL &&
1209 		    (m = vm_page_cache_lookup(object, pindex)) != NULL) {
1210 			if ((req & VM_ALLOC_IFNOTCACHED) != 0) {
1211 				mtx_unlock(&vm_page_queue_free_mtx);
1212 				return (NULL);
1213 			}
1214 			if (vm_phys_unfree_page(m))
1215 				vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0);
1216 #if VM_NRESERVLEVEL > 0
1217 			else if (!vm_reserv_reactivate_page(m))
1218 #else
1219 			else
1220 #endif
1221 				panic("vm_page_alloc: cache page %p is missing"
1222 				    " from the free queue", m);
1223 		} else if ((req & VM_ALLOC_IFCACHED) != 0) {
1224 			mtx_unlock(&vm_page_queue_free_mtx);
1225 			return (NULL);
1226 #if VM_NRESERVLEVEL > 0
1227 		} else if (object == NULL || (object->flags & (OBJ_COLORED |
1228 		    OBJ_FICTITIOUS)) != OBJ_COLORED ||
1229 		    (m = vm_reserv_alloc_page(object, pindex)) == NULL) {
1230 #else
1231 		} else {
1232 #endif
1233 			m = vm_phys_alloc_pages(object != NULL ?
1234 			    VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
1235 #if VM_NRESERVLEVEL > 0
1236 			if (m == NULL && vm_reserv_reclaim_inactive()) {
1237 				m = vm_phys_alloc_pages(object != NULL ?
1238 				    VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT,
1239 				    0);
1240 			}
1241 #endif
1242 		}
1243 	} else {
1244 		/*
1245 		 * Not allocatable, give up.
1246 		 */
1247 		mtx_unlock(&vm_page_queue_free_mtx);
1248 		atomic_add_int(&vm_pageout_deficit,
1249 		    max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
1250 		pagedaemon_wakeup();
1251 		return (NULL);
1252 	}
1253 
1254 	/*
1255 	 *  At this point we had better have found a good page.
1256 	 */
1257 	KASSERT(m != NULL, ("vm_page_alloc: missing page"));
1258 	KASSERT(m->queue == PQ_NONE,
1259 	    ("vm_page_alloc: page %p has unexpected queue %d", m, m->queue));
1260 	KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m));
1261 	KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m));
1262 	KASSERT(m->busy == 0, ("vm_page_alloc: page %p is busy", m));
1263 	KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m));
1264 	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
1265 	    ("vm_page_alloc: page %p has unexpected memattr %d", m,
1266 	    pmap_page_get_memattr(m)));
1267 	if ((m->flags & PG_CACHED) != 0) {
1268 		KASSERT((m->flags & PG_ZERO) == 0,
1269 		    ("vm_page_alloc: cached page %p is PG_ZERO", m));
1270 		KASSERT(m->valid != 0,
1271 		    ("vm_page_alloc: cached page %p is invalid", m));
1272 		if (m->object == object && m->pindex == pindex)
1273 	  		cnt.v_reactivated++;
1274 		else
1275 			m->valid = 0;
1276 		m_object = m->object;
1277 		vm_page_cache_remove(m);
1278 		if (m_object->type == OBJT_VNODE &&
1279 		    vm_object_cache_is_empty(m_object))
1280 			vp = m_object->handle;
1281 	} else {
1282 		KASSERT(VM_PAGE_IS_FREE(m),
1283 		    ("vm_page_alloc: page %p is not free", m));
1284 		KASSERT(m->valid == 0,
1285 		    ("vm_page_alloc: free page %p is valid", m));
1286 		cnt.v_free_count--;
1287 	}
1288 
1289 	/*
1290 	 * Only the PG_ZERO flag is inherited.  The PG_CACHED or PG_FREE flag
1291 	 * must be cleared before the free page queues lock is released.
1292 	 */
1293 	flags = 0;
1294 	if (m->flags & PG_ZERO) {
1295 		vm_page_zero_count--;
1296 		if (req & VM_ALLOC_ZERO)
1297 			flags = PG_ZERO;
1298 	}
1299 	if (req & VM_ALLOC_NODUMP)
1300 		flags |= PG_NODUMP;
1301 	m->flags = flags;
1302 	mtx_unlock(&vm_page_queue_free_mtx);
1303 	m->aflags = 0;
1304 	m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
1305 	    VPO_UNMANAGED : 0;
1306 	if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ)) == 0)
1307 		m->oflags |= VPO_BUSY;
1308 	if (req & VM_ALLOC_WIRED) {
1309 		/*
1310 		 * The page lock is not required for wiring a page until that
1311 		 * page is inserted into the object.
1312 		 */
1313 		atomic_add_int(&cnt.v_wire_count, 1);
1314 		m->wire_count = 1;
1315 	}
1316 	m->act_count = 0;
1317 
1318 	if (object != NULL) {
1319 		/* Ignore device objects; the pager sets "memattr" for them. */
1320 		if (object->memattr != VM_MEMATTR_DEFAULT &&
1321 		    (object->flags & OBJ_FICTITIOUS) == 0)
1322 			pmap_page_set_memattr(m, object->memattr);
1323 		vm_page_insert(m, object, pindex);
1324 	} else
1325 		m->pindex = pindex;
1326 
1327 	/*
1328 	 * The following call to vdrop() must come after the above call
1329 	 * to vm_page_insert() in case both affect the same object and
1330 	 * vnode.  Otherwise, the affected vnode's hold count could
1331 	 * temporarily become zero.
1332 	 */
1333 	if (vp != NULL)
1334 		vdrop(vp);
1335 
1336 	/*
1337 	 * Don't wakeup too often - wakeup the pageout daemon when
1338 	 * we would be nearly out of memory.
1339 	 */
1340 	if (vm_paging_needed())
1341 		pagedaemon_wakeup();
1342 
1343 	return (m);
1344 }
1345 
1346 /*
1347  *	vm_page_alloc_contig:
1348  *
1349  *	Allocate a contiguous set of physical pages of the given size "npages"
1350  *	from the free lists.  All of the physical pages must be at or above
1351  *	the given physical address "low" and below the given physical address
1352  *	"high".  The given value "alignment" determines the alignment of the
1353  *	first physical page in the set.  If the given value "boundary" is
1354  *	non-zero, then the set of physical pages cannot cross any physical
1355  *	address boundary that is a multiple of that value.  Both "alignment"
1356  *	and "boundary" must be a power of two.
1357  *
1358  *	If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT,
1359  *	then the memory attribute setting for the physical pages is configured
1360  *	to the object's memory attribute setting.  Otherwise, the memory
1361  *	attribute setting for the physical pages is configured to "memattr",
1362  *	overriding the object's memory attribute setting.  However, if the
1363  *	object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the
1364  *	memory attribute setting for the physical pages cannot be configured
1365  *	to VM_MEMATTR_DEFAULT.
1366  *
1367  *	The caller must always specify an allocation class.
1368  *
1369  *	allocation classes:
1370  *	VM_ALLOC_NORMAL		normal process request
1371  *	VM_ALLOC_SYSTEM		system *really* needs a page
1372  *	VM_ALLOC_INTERRUPT	interrupt time request
1373  *
1374  *	optional allocation flags:
1375  *	VM_ALLOC_NOBUSY		do not set the flag VPO_BUSY on the page
1376  *	VM_ALLOC_NOOBJ		page is not associated with an object and
1377  *				should not have the flag VPO_BUSY set
1378  *	VM_ALLOC_WIRED		wire the allocated page
1379  *	VM_ALLOC_ZERO		prefer a zeroed page
1380  *
1381  *	This routine may not sleep.
1382  */
1383 vm_page_t
1384 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
1385     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
1386     vm_paddr_t boundary, vm_memattr_t memattr)
1387 {
1388 	struct vnode *drop;
1389 	vm_page_t deferred_vdrop_list, m, m_ret;
1390 	u_int flags, oflags;
1391 	int req_class;
1392 
1393 	KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0),
1394 	    ("vm_page_alloc_contig: inconsistent object/req"));
1395 	if (object != NULL) {
1396 		VM_OBJECT_ASSERT_WLOCKED(object);
1397 		KASSERT(object->type == OBJT_PHYS,
1398 		    ("vm_page_alloc_contig: object %p isn't OBJT_PHYS",
1399 		    object));
1400 	}
1401 	KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
1402 	req_class = req & VM_ALLOC_CLASS_MASK;
1403 
1404 	/*
1405 	 * The page daemon is allowed to dig deeper into the free page list.
1406 	 */
1407 	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1408 		req_class = VM_ALLOC_SYSTEM;
1409 
1410 	deferred_vdrop_list = NULL;
1411 	mtx_lock(&vm_page_queue_free_mtx);
1412 	if (cnt.v_free_count + cnt.v_cache_count >= npages +
1413 	    cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM &&
1414 	    cnt.v_free_count + cnt.v_cache_count >= npages +
1415 	    cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT &&
1416 	    cnt.v_free_count + cnt.v_cache_count >= npages)) {
1417 #if VM_NRESERVLEVEL > 0
1418 retry:
1419 		if (object == NULL || (object->flags & OBJ_COLORED) == 0 ||
1420 		    (m_ret = vm_reserv_alloc_contig(object, pindex, npages,
1421 		    low, high, alignment, boundary)) == NULL)
1422 #endif
1423 			m_ret = vm_phys_alloc_contig(npages, low, high,
1424 			    alignment, boundary);
1425 	} else {
1426 		mtx_unlock(&vm_page_queue_free_mtx);
1427 		atomic_add_int(&vm_pageout_deficit, npages);
1428 		pagedaemon_wakeup();
1429 		return (NULL);
1430 	}
1431 	if (m_ret != NULL)
1432 		for (m = m_ret; m < &m_ret[npages]; m++) {
1433 			drop = vm_page_alloc_init(m);
1434 			if (drop != NULL) {
1435 				/*
1436 				 * Enqueue the vnode for deferred vdrop().
1437 				 *
1438 				 * Once the pages are removed from the free
1439 				 * page list, "pageq" can be safely abused to
1440 				 * construct a short-lived list of vnodes.
1441 				 */
1442 				m->pageq.tqe_prev = (void *)drop;
1443 				m->pageq.tqe_next = deferred_vdrop_list;
1444 				deferred_vdrop_list = m;
1445 			}
1446 		}
1447 	else {
1448 #if VM_NRESERVLEVEL > 0
1449 		if (vm_reserv_reclaim_contig(npages, low, high, alignment,
1450 		    boundary))
1451 			goto retry;
1452 #endif
1453 	}
1454 	mtx_unlock(&vm_page_queue_free_mtx);
1455 	if (m_ret == NULL)
1456 		return (NULL);
1457 
1458 	/*
1459 	 * Initialize the pages.  Only the PG_ZERO flag is inherited.
1460 	 */
1461 	flags = 0;
1462 	if ((req & VM_ALLOC_ZERO) != 0)
1463 		flags = PG_ZERO;
1464 	if ((req & VM_ALLOC_NODUMP) != 0)
1465 		flags |= PG_NODUMP;
1466 	if ((req & VM_ALLOC_WIRED) != 0)
1467 		atomic_add_int(&cnt.v_wire_count, npages);
1468 	oflags = VPO_UNMANAGED;
1469 	if (object != NULL) {
1470 		if ((req & VM_ALLOC_NOBUSY) == 0)
1471 			oflags |= VPO_BUSY;
1472 		if (object->memattr != VM_MEMATTR_DEFAULT &&
1473 		    memattr == VM_MEMATTR_DEFAULT)
1474 			memattr = object->memattr;
1475 	}
1476 	for (m = m_ret; m < &m_ret[npages]; m++) {
1477 		m->aflags = 0;
1478 		m->flags = (m->flags | PG_NODUMP) & flags;
1479 		if ((req & VM_ALLOC_WIRED) != 0)
1480 			m->wire_count = 1;
1481 		/* Unmanaged pages don't use "act_count". */
1482 		m->oflags = oflags;
1483 		if (memattr != VM_MEMATTR_DEFAULT)
1484 			pmap_page_set_memattr(m, memattr);
1485 		if (object != NULL)
1486 			vm_page_insert(m, object, pindex);
1487 		else
1488 			m->pindex = pindex;
1489 		pindex++;
1490 	}
1491 	while (deferred_vdrop_list != NULL) {
1492 		vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev);
1493 		deferred_vdrop_list = deferred_vdrop_list->pageq.tqe_next;
1494 	}
1495 	if (vm_paging_needed())
1496 		pagedaemon_wakeup();
1497 	return (m_ret);
1498 }
1499 
1500 /*
1501  * Initialize a page that has been freshly dequeued from a freelist.
1502  * The caller has to drop the vnode returned, if it is not NULL.
1503  *
1504  * This function may only be used to initialize unmanaged pages.
1505  *
1506  * To be called with vm_page_queue_free_mtx held.
1507  */
1508 static struct vnode *
1509 vm_page_alloc_init(vm_page_t m)
1510 {
1511 	struct vnode *drop;
1512 	vm_object_t m_object;
1513 
1514 	KASSERT(m->queue == PQ_NONE,
1515 	    ("vm_page_alloc_init: page %p has unexpected queue %d",
1516 	    m, m->queue));
1517 	KASSERT(m->wire_count == 0,
1518 	    ("vm_page_alloc_init: page %p is wired", m));
1519 	KASSERT(m->hold_count == 0,
1520 	    ("vm_page_alloc_init: page %p is held", m));
1521 	KASSERT(m->busy == 0,
1522 	    ("vm_page_alloc_init: page %p is busy", m));
1523 	KASSERT(m->dirty == 0,
1524 	    ("vm_page_alloc_init: page %p is dirty", m));
1525 	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
1526 	    ("vm_page_alloc_init: page %p has unexpected memattr %d",
1527 	    m, pmap_page_get_memattr(m)));
1528 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1529 	drop = NULL;
1530 	if ((m->flags & PG_CACHED) != 0) {
1531 		KASSERT((m->flags & PG_ZERO) == 0,
1532 		    ("vm_page_alloc_init: cached page %p is PG_ZERO", m));
1533 		m->valid = 0;
1534 		m_object = m->object;
1535 		vm_page_cache_remove(m);
1536 		if (m_object->type == OBJT_VNODE &&
1537 		    vm_object_cache_is_empty(m_object))
1538 			drop = m_object->handle;
1539 	} else {
1540 		KASSERT(VM_PAGE_IS_FREE(m),
1541 		    ("vm_page_alloc_init: page %p is not free", m));
1542 		KASSERT(m->valid == 0,
1543 		    ("vm_page_alloc_init: free page %p is valid", m));
1544 		cnt.v_free_count--;
1545 		if ((m->flags & PG_ZERO) != 0)
1546 			vm_page_zero_count--;
1547 	}
1548 	/* Don't clear the PG_ZERO flag; we'll need it later. */
1549 	m->flags &= PG_ZERO;
1550 	return (drop);
1551 }
1552 
1553 /*
1554  * 	vm_page_alloc_freelist:
1555  *
1556  *	Allocate a physical page from the specified free page list.
1557  *
1558  *	The caller must always specify an allocation class.
1559  *
1560  *	allocation classes:
1561  *	VM_ALLOC_NORMAL		normal process request
1562  *	VM_ALLOC_SYSTEM		system *really* needs a page
1563  *	VM_ALLOC_INTERRUPT	interrupt time request
1564  *
1565  *	optional allocation flags:
1566  *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
1567  *				intends to allocate
1568  *	VM_ALLOC_WIRED		wire the allocated page
1569  *	VM_ALLOC_ZERO		prefer a zeroed page
1570  *
1571  *	This routine may not sleep.
1572  */
1573 vm_page_t
1574 vm_page_alloc_freelist(int flind, int req)
1575 {
1576 	struct vnode *drop;
1577 	vm_page_t m;
1578 	u_int flags;
1579 	int req_class;
1580 
1581 	req_class = req & VM_ALLOC_CLASS_MASK;
1582 
1583 	/*
1584 	 * The page daemon is allowed to dig deeper into the free page list.
1585 	 */
1586 	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1587 		req_class = VM_ALLOC_SYSTEM;
1588 
1589 	/*
1590 	 * Do not allocate reserved pages unless the req has asked for it.
1591 	 */
1592 	mtx_lock(&vm_page_queue_free_mtx);
1593 	if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
1594 	    (req_class == VM_ALLOC_SYSTEM &&
1595 	    cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) ||
1596 	    (req_class == VM_ALLOC_INTERRUPT &&
1597 	    cnt.v_free_count + cnt.v_cache_count > 0))
1598 		m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0);
1599 	else {
1600 		mtx_unlock(&vm_page_queue_free_mtx);
1601 		atomic_add_int(&vm_pageout_deficit,
1602 		    max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
1603 		pagedaemon_wakeup();
1604 		return (NULL);
1605 	}
1606 	if (m == NULL) {
1607 		mtx_unlock(&vm_page_queue_free_mtx);
1608 		return (NULL);
1609 	}
1610 	drop = vm_page_alloc_init(m);
1611 	mtx_unlock(&vm_page_queue_free_mtx);
1612 
1613 	/*
1614 	 * Initialize the page.  Only the PG_ZERO flag is inherited.
1615 	 */
1616 	m->aflags = 0;
1617 	flags = 0;
1618 	if ((req & VM_ALLOC_ZERO) != 0)
1619 		flags = PG_ZERO;
1620 	m->flags &= flags;
1621 	if ((req & VM_ALLOC_WIRED) != 0) {
1622 		/*
1623 		 * The page lock is not required for wiring a page that does
1624 		 * not belong to an object.
1625 		 */
1626 		atomic_add_int(&cnt.v_wire_count, 1);
1627 		m->wire_count = 1;
1628 	}
1629 	/* Unmanaged pages don't use "act_count". */
1630 	m->oflags = VPO_UNMANAGED;
1631 	if (drop != NULL)
1632 		vdrop(drop);
1633 	if (vm_paging_needed())
1634 		pagedaemon_wakeup();
1635 	return (m);
1636 }
1637 
1638 /*
1639  *	vm_wait:	(also see VM_WAIT macro)
1640  *
1641  *	Sleep until free pages are available for allocation.
1642  *	- Called in various places before memory allocations.
1643  */
1644 void
1645 vm_wait(void)
1646 {
1647 
1648 	mtx_lock(&vm_page_queue_free_mtx);
1649 	if (curproc == pageproc) {
1650 		vm_pageout_pages_needed = 1;
1651 		msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx,
1652 		    PDROP | PSWP, "VMWait", 0);
1653 	} else {
1654 		if (!vm_pages_needed) {
1655 			vm_pages_needed = 1;
1656 			wakeup(&vm_pages_needed);
1657 		}
1658 		msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM,
1659 		    "vmwait", 0);
1660 	}
1661 }
1662 
1663 /*
1664  *	vm_waitpfault:	(also see VM_WAITPFAULT macro)
1665  *
1666  *	Sleep until free pages are available for allocation.
1667  *	- Called only in vm_fault so that processes page faulting
1668  *	  can be easily tracked.
1669  *	- Sleeps at a lower priority than vm_wait() so that vm_wait()ing
1670  *	  processes will be able to grab memory first.  Do not change
1671  *	  this balance without careful testing first.
1672  */
1673 void
1674 vm_waitpfault(void)
1675 {
1676 
1677 	mtx_lock(&vm_page_queue_free_mtx);
1678 	if (!vm_pages_needed) {
1679 		vm_pages_needed = 1;
1680 		wakeup(&vm_pages_needed);
1681 	}
1682 	msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER,
1683 	    "pfault", 0);
1684 }
1685 
1686 /*
1687  *	vm_page_dequeue:
1688  *
1689  *	Remove the given page from its current page queue.
1690  *
1691  *	The page must be locked.
1692  */
1693 void
1694 vm_page_dequeue(vm_page_t m)
1695 {
1696 	struct vm_pagequeue *pq;
1697 
1698 	vm_page_lock_assert(m, MA_OWNED);
1699 	KASSERT(m->queue != PQ_NONE,
1700 	    ("vm_page_dequeue: page %p is not queued", m));
1701 	pq = &vm_pagequeues[m->queue];
1702 	vm_pagequeue_lock(pq);
1703 	m->queue = PQ_NONE;
1704 	TAILQ_REMOVE(&pq->pq_pl, m, pageq);
1705 	(*pq->pq_cnt)--;
1706 	vm_pagequeue_unlock(pq);
1707 }
1708 
1709 /*
1710  *	vm_page_dequeue_locked:
1711  *
1712  *	Remove the given page from its current page queue.
1713  *
1714  *	The page and page queue must be locked.
1715  */
1716 void
1717 vm_page_dequeue_locked(vm_page_t m)
1718 {
1719 	struct vm_pagequeue *pq;
1720 
1721 	vm_page_lock_assert(m, MA_OWNED);
1722 	pq = &vm_pagequeues[m->queue];
1723 	vm_pagequeue_assert_locked(pq);
1724 	m->queue = PQ_NONE;
1725 	TAILQ_REMOVE(&pq->pq_pl, m, pageq);
1726 	(*pq->pq_cnt)--;
1727 }
1728 
1729 /*
1730  *	vm_page_enqueue:
1731  *
1732  *	Add the given page to the specified page queue.
1733  *
1734  *	The page must be locked.
1735  */
1736 static void
1737 vm_page_enqueue(int queue, vm_page_t m)
1738 {
1739 	struct vm_pagequeue *pq;
1740 
1741 	vm_page_lock_assert(m, MA_OWNED);
1742 	pq = &vm_pagequeues[queue];
1743 	vm_pagequeue_lock(pq);
1744 	m->queue = queue;
1745 	TAILQ_INSERT_TAIL(&pq->pq_pl, m, pageq);
1746 	++*pq->pq_cnt;
1747 	vm_pagequeue_unlock(pq);
1748 }
1749 
1750 /*
1751  *	vm_page_requeue:
1752  *
1753  *	Move the given page to the tail of its current page queue.
1754  *
1755  *	The page must be locked.
1756  */
1757 void
1758 vm_page_requeue(vm_page_t m)
1759 {
1760 	struct vm_pagequeue *pq;
1761 
1762 	vm_page_lock_assert(m, MA_OWNED);
1763 	KASSERT(m->queue != PQ_NONE,
1764 	    ("vm_page_requeue: page %p is not queued", m));
1765 	pq = &vm_pagequeues[m->queue];
1766 	vm_pagequeue_lock(pq);
1767 	TAILQ_REMOVE(&pq->pq_pl, m, pageq);
1768 	TAILQ_INSERT_TAIL(&pq->pq_pl, m, pageq);
1769 	vm_pagequeue_unlock(pq);
1770 }
1771 
1772 /*
1773  *	vm_page_requeue_locked:
1774  *
1775  *	Move the given page to the tail of its current page queue.
1776  *
1777  *	The page queue must be locked.
1778  */
1779 void
1780 vm_page_requeue_locked(vm_page_t m)
1781 {
1782 	struct vm_pagequeue *pq;
1783 
1784 	KASSERT(m->queue != PQ_NONE,
1785 	    ("vm_page_requeue_locked: page %p is not queued", m));
1786 	pq = &vm_pagequeues[m->queue];
1787 	vm_pagequeue_assert_locked(pq);
1788 	TAILQ_REMOVE(&pq->pq_pl, m, pageq);
1789 	TAILQ_INSERT_TAIL(&pq->pq_pl, m, pageq);
1790 }
1791 
1792 /*
1793  *	vm_page_activate:
1794  *
1795  *	Put the specified page on the active list (if appropriate).
1796  *	Ensure that act_count is at least ACT_INIT but do not otherwise
1797  *	mess with it.
1798  *
1799  *	The page must be locked.
1800  */
1801 void
1802 vm_page_activate(vm_page_t m)
1803 {
1804 	int queue;
1805 
1806 	vm_page_lock_assert(m, MA_OWNED);
1807 	VM_OBJECT_ASSERT_WLOCKED(m->object);
1808 	if ((queue = m->queue) != PQ_ACTIVE) {
1809 		if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
1810 			if (m->act_count < ACT_INIT)
1811 				m->act_count = ACT_INIT;
1812 			if (queue != PQ_NONE)
1813 				vm_page_dequeue(m);
1814 			vm_page_enqueue(PQ_ACTIVE, m);
1815 		} else
1816 			KASSERT(queue == PQ_NONE,
1817 			    ("vm_page_activate: wired page %p is queued", m));
1818 	} else {
1819 		if (m->act_count < ACT_INIT)
1820 			m->act_count = ACT_INIT;
1821 	}
1822 }
1823 
1824 /*
1825  *	vm_page_free_wakeup:
1826  *
1827  *	Helper routine for vm_page_free_toq() and vm_page_cache().  This
1828  *	routine is called when a page has been added to the cache or free
1829  *	queues.
1830  *
1831  *	The page queues must be locked.
1832  */
1833 static inline void
1834 vm_page_free_wakeup(void)
1835 {
1836 
1837 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1838 	/*
1839 	 * if pageout daemon needs pages, then tell it that there are
1840 	 * some free.
1841 	 */
1842 	if (vm_pageout_pages_needed &&
1843 	    cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
1844 		wakeup(&vm_pageout_pages_needed);
1845 		vm_pageout_pages_needed = 0;
1846 	}
1847 	/*
1848 	 * wakeup processes that are waiting on memory if we hit a
1849 	 * high water mark. And wakeup scheduler process if we have
1850 	 * lots of memory. this process will swapin processes.
1851 	 */
1852 	if (vm_pages_needed && !vm_page_count_min()) {
1853 		vm_pages_needed = 0;
1854 		wakeup(&cnt.v_free_count);
1855 	}
1856 }
1857 
1858 /*
1859  *	vm_page_free_toq:
1860  *
1861  *	Returns the given page to the free list,
1862  *	disassociating it with any VM object.
1863  *
1864  *	The object must be locked.  The page must be locked if it is managed.
1865  */
1866 void
1867 vm_page_free_toq(vm_page_t m)
1868 {
1869 
1870 	if ((m->oflags & VPO_UNMANAGED) == 0) {
1871 		vm_page_lock_assert(m, MA_OWNED);
1872 		KASSERT(!pmap_page_is_mapped(m),
1873 		    ("vm_page_free_toq: freeing mapped page %p", m));
1874 	} else
1875 		KASSERT(m->queue == PQ_NONE,
1876 		    ("vm_page_free_toq: unmanaged page %p is queued", m));
1877 	PCPU_INC(cnt.v_tfree);
1878 
1879 	if (VM_PAGE_IS_FREE(m))
1880 		panic("vm_page_free: freeing free page %p", m);
1881 	else if (m->busy != 0)
1882 		panic("vm_page_free: freeing busy page %p", m);
1883 
1884 	/*
1885 	 * Unqueue, then remove page.  Note that we cannot destroy
1886 	 * the page here because we do not want to call the pager's
1887 	 * callback routine until after we've put the page on the
1888 	 * appropriate free queue.
1889 	 */
1890 	vm_page_remque(m);
1891 	vm_page_remove(m);
1892 
1893 	/*
1894 	 * If fictitious remove object association and
1895 	 * return, otherwise delay object association removal.
1896 	 */
1897 	if ((m->flags & PG_FICTITIOUS) != 0) {
1898 		return;
1899 	}
1900 
1901 	m->valid = 0;
1902 	vm_page_undirty(m);
1903 
1904 	if (m->wire_count != 0)
1905 		panic("vm_page_free: freeing wired page %p", m);
1906 	if (m->hold_count != 0) {
1907 		m->flags &= ~PG_ZERO;
1908 		KASSERT((m->flags & PG_UNHOLDFREE) == 0,
1909 		    ("vm_page_free: freeing PG_UNHOLDFREE page %p", m));
1910 		m->flags |= PG_UNHOLDFREE;
1911 	} else {
1912 		/*
1913 		 * Restore the default memory attribute to the page.
1914 		 */
1915 		if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
1916 			pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
1917 
1918 		/*
1919 		 * Insert the page into the physical memory allocator's
1920 		 * cache/free page queues.
1921 		 */
1922 		mtx_lock(&vm_page_queue_free_mtx);
1923 		m->flags |= PG_FREE;
1924 		cnt.v_free_count++;
1925 #if VM_NRESERVLEVEL > 0
1926 		if (!vm_reserv_free_page(m))
1927 #else
1928 		if (TRUE)
1929 #endif
1930 			vm_phys_free_pages(m, 0);
1931 		if ((m->flags & PG_ZERO) != 0)
1932 			++vm_page_zero_count;
1933 		else
1934 			vm_page_zero_idle_wakeup();
1935 		vm_page_free_wakeup();
1936 		mtx_unlock(&vm_page_queue_free_mtx);
1937 	}
1938 }
1939 
1940 /*
1941  *	vm_page_wire:
1942  *
1943  *	Mark this page as wired down by yet
1944  *	another map, removing it from paging queues
1945  *	as necessary.
1946  *
1947  *	If the page is fictitious, then its wire count must remain one.
1948  *
1949  *	The page must be locked.
1950  */
1951 void
1952 vm_page_wire(vm_page_t m)
1953 {
1954 
1955 	/*
1956 	 * Only bump the wire statistics if the page is not already wired,
1957 	 * and only unqueue the page if it is on some queue (if it is unmanaged
1958 	 * it is already off the queues).
1959 	 */
1960 	vm_page_lock_assert(m, MA_OWNED);
1961 	if ((m->flags & PG_FICTITIOUS) != 0) {
1962 		KASSERT(m->wire_count == 1,
1963 		    ("vm_page_wire: fictitious page %p's wire count isn't one",
1964 		    m));
1965 		return;
1966 	}
1967 	if (m->wire_count == 0) {
1968 		KASSERT((m->oflags & VPO_UNMANAGED) == 0 ||
1969 		    m->queue == PQ_NONE,
1970 		    ("vm_page_wire: unmanaged page %p is queued", m));
1971 		vm_page_remque(m);
1972 		atomic_add_int(&cnt.v_wire_count, 1);
1973 	}
1974 	m->wire_count++;
1975 	KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
1976 }
1977 
1978 /*
1979  * vm_page_unwire:
1980  *
1981  * Release one wiring of the specified page, potentially enabling it to be
1982  * paged again.  If paging is enabled, then the value of the parameter
1983  * "activate" determines to which queue the page is added.  If "activate" is
1984  * non-zero, then the page is added to the active queue.  Otherwise, it is
1985  * added to the inactive queue.
1986  *
1987  * However, unless the page belongs to an object, it is not enqueued because
1988  * it cannot be paged out.
1989  *
1990  * If a page is fictitious, then its wire count must alway be one.
1991  *
1992  * A managed page must be locked.
1993  */
1994 void
1995 vm_page_unwire(vm_page_t m, int activate)
1996 {
1997 
1998 	if ((m->oflags & VPO_UNMANAGED) == 0)
1999 		vm_page_lock_assert(m, MA_OWNED);
2000 	if ((m->flags & PG_FICTITIOUS) != 0) {
2001 		KASSERT(m->wire_count == 1,
2002 	    ("vm_page_unwire: fictitious page %p's wire count isn't one", m));
2003 		return;
2004 	}
2005 	if (m->wire_count > 0) {
2006 		m->wire_count--;
2007 		if (m->wire_count == 0) {
2008 			atomic_subtract_int(&cnt.v_wire_count, 1);
2009 			if ((m->oflags & VPO_UNMANAGED) != 0 ||
2010 			    m->object == NULL)
2011 				return;
2012 			if (!activate)
2013 				m->flags &= ~PG_WINATCFLS;
2014 			vm_page_enqueue(activate ? PQ_ACTIVE : PQ_INACTIVE, m);
2015 		}
2016 	} else
2017 		panic("vm_page_unwire: page %p's wire count is zero", m);
2018 }
2019 
2020 /*
2021  * Move the specified page to the inactive queue.
2022  *
2023  * Many pages placed on the inactive queue should actually go
2024  * into the cache, but it is difficult to figure out which.  What
2025  * we do instead, if the inactive target is well met, is to put
2026  * clean pages at the head of the inactive queue instead of the tail.
2027  * This will cause them to be moved to the cache more quickly and
2028  * if not actively re-referenced, reclaimed more quickly.  If we just
2029  * stick these pages at the end of the inactive queue, heavy filesystem
2030  * meta-data accesses can cause an unnecessary paging load on memory bound
2031  * processes.  This optimization causes one-time-use metadata to be
2032  * reused more quickly.
2033  *
2034  * Normally athead is 0 resulting in LRU operation.  athead is set
2035  * to 1 if we want this page to be 'as if it were placed in the cache',
2036  * except without unmapping it from the process address space.
2037  *
2038  * The page must be locked.
2039  */
2040 static inline void
2041 _vm_page_deactivate(vm_page_t m, int athead)
2042 {
2043 	struct vm_pagequeue *pq;
2044 	int queue;
2045 
2046 	vm_page_lock_assert(m, MA_OWNED);
2047 
2048 	/*
2049 	 * Ignore if already inactive.
2050 	 */
2051 	if ((queue = m->queue) == PQ_INACTIVE)
2052 		return;
2053 	if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
2054 		if (queue != PQ_NONE)
2055 			vm_page_dequeue(m);
2056 		m->flags &= ~PG_WINATCFLS;
2057 		pq = &vm_pagequeues[PQ_INACTIVE];
2058 		vm_pagequeue_lock(pq);
2059 		m->queue = PQ_INACTIVE;
2060 		if (athead)
2061 			TAILQ_INSERT_HEAD(&pq->pq_pl, m, pageq);
2062 		else
2063 			TAILQ_INSERT_TAIL(&pq->pq_pl, m, pageq);
2064 		cnt.v_inactive_count++;
2065 		vm_pagequeue_unlock(pq);
2066 	}
2067 }
2068 
2069 /*
2070  * Move the specified page to the inactive queue.
2071  *
2072  * The page must be locked.
2073  */
2074 void
2075 vm_page_deactivate(vm_page_t m)
2076 {
2077 
2078 	_vm_page_deactivate(m, 0);
2079 }
2080 
2081 /*
2082  * vm_page_try_to_cache:
2083  *
2084  * Returns 0 on failure, 1 on success
2085  */
2086 int
2087 vm_page_try_to_cache(vm_page_t m)
2088 {
2089 
2090 	vm_page_lock_assert(m, MA_OWNED);
2091 	VM_OBJECT_ASSERT_WLOCKED(m->object);
2092 	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
2093 	    (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
2094 		return (0);
2095 	pmap_remove_all(m);
2096 	if (m->dirty)
2097 		return (0);
2098 	vm_page_cache(m);
2099 	return (1);
2100 }
2101 
2102 /*
2103  * vm_page_try_to_free()
2104  *
2105  *	Attempt to free the page.  If we cannot free it, we do nothing.
2106  *	1 is returned on success, 0 on failure.
2107  */
2108 int
2109 vm_page_try_to_free(vm_page_t m)
2110 {
2111 
2112 	vm_page_lock_assert(m, MA_OWNED);
2113 	if (m->object != NULL)
2114 		VM_OBJECT_ASSERT_WLOCKED(m->object);
2115 	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
2116 	    (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
2117 		return (0);
2118 	pmap_remove_all(m);
2119 	if (m->dirty)
2120 		return (0);
2121 	vm_page_free(m);
2122 	return (1);
2123 }
2124 
2125 /*
2126  * vm_page_cache
2127  *
2128  * Put the specified page onto the page cache queue (if appropriate).
2129  *
2130  * The object and page must be locked.
2131  */
2132 void
2133 vm_page_cache(vm_page_t m)
2134 {
2135 	vm_object_t object;
2136 	boolean_t cache_was_empty;
2137 
2138 	vm_page_lock_assert(m, MA_OWNED);
2139 	object = m->object;
2140 	VM_OBJECT_ASSERT_WLOCKED(object);
2141 	if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) || m->busy ||
2142 	    m->hold_count || m->wire_count)
2143 		panic("vm_page_cache: attempting to cache busy page");
2144 	KASSERT(!pmap_page_is_mapped(m),
2145 	    ("vm_page_cache: page %p is mapped", m));
2146 	KASSERT(m->dirty == 0, ("vm_page_cache: page %p is dirty", m));
2147 	if (m->valid == 0 || object->type == OBJT_DEFAULT ||
2148 	    (object->type == OBJT_SWAP &&
2149 	    !vm_pager_has_page(object, m->pindex, NULL, NULL))) {
2150 		/*
2151 		 * Hypothesis: A cache-elgible page belonging to a
2152 		 * default object or swap object but without a backing
2153 		 * store must be zero filled.
2154 		 */
2155 		vm_page_free(m);
2156 		return;
2157 	}
2158 	KASSERT((m->flags & PG_CACHED) == 0,
2159 	    ("vm_page_cache: page %p is already cached", m));
2160 	PCPU_INC(cnt.v_tcached);
2161 
2162 	/*
2163 	 * Remove the page from the paging queues.
2164 	 */
2165 	vm_page_remque(m);
2166 
2167 	/*
2168 	 * Remove the page from the object's collection of resident
2169 	 * pages.
2170 	 */
2171 	vm_radix_remove(&object->rtree, m->pindex);
2172 	TAILQ_REMOVE(&object->memq, m, listq);
2173 	object->resident_page_count--;
2174 
2175 	/*
2176 	 * Restore the default memory attribute to the page.
2177 	 */
2178 	if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
2179 		pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
2180 
2181 	/*
2182 	 * Insert the page into the object's collection of cached pages
2183 	 * and the physical memory allocator's cache/free page queues.
2184 	 */
2185 	m->flags &= ~PG_ZERO;
2186 	mtx_lock(&vm_page_queue_free_mtx);
2187 	m->flags |= PG_CACHED;
2188 	cnt.v_cache_count++;
2189 	cache_was_empty = vm_radix_is_empty(&object->cache);
2190 	vm_radix_insert(&object->cache, m);
2191 #if VM_NRESERVLEVEL > 0
2192 	if (!vm_reserv_free_page(m)) {
2193 #else
2194 	if (TRUE) {
2195 #endif
2196 		vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0);
2197 		vm_phys_free_pages(m, 0);
2198 	}
2199 	vm_page_free_wakeup();
2200 	mtx_unlock(&vm_page_queue_free_mtx);
2201 
2202 	/*
2203 	 * Increment the vnode's hold count if this is the object's only
2204 	 * cached page.  Decrement the vnode's hold count if this was
2205 	 * the object's only resident page.
2206 	 */
2207 	if (object->type == OBJT_VNODE) {
2208 		if (cache_was_empty && object->resident_page_count != 0)
2209 			vhold(object->handle);
2210 		else if (!cache_was_empty && object->resident_page_count == 0)
2211 			vdrop(object->handle);
2212 	}
2213 }
2214 
2215 /*
2216  * vm_page_dontneed
2217  *
2218  *	Cache, deactivate, or do nothing as appropriate.  This routine
2219  *	is typically used by madvise() MADV_DONTNEED.
2220  *
2221  *	Generally speaking we want to move the page into the cache so
2222  *	it gets reused quickly.  However, this can result in a silly syndrome
2223  *	due to the page recycling too quickly.  Small objects will not be
2224  *	fully cached.  On the otherhand, if we move the page to the inactive
2225  *	queue we wind up with a problem whereby very large objects
2226  *	unnecessarily blow away our inactive and cache queues.
2227  *
2228  *	The solution is to move the pages based on a fixed weighting.  We
2229  *	either leave them alone, deactivate them, or move them to the cache,
2230  *	where moving them to the cache has the highest weighting.
2231  *	By forcing some pages into other queues we eventually force the
2232  *	system to balance the queues, potentially recovering other unrelated
2233  *	space from active.  The idea is to not force this to happen too
2234  *	often.
2235  *
2236  *	The object and page must be locked.
2237  */
2238 void
2239 vm_page_dontneed(vm_page_t m)
2240 {
2241 	int dnw;
2242 	int head;
2243 
2244 	vm_page_lock_assert(m, MA_OWNED);
2245 	VM_OBJECT_ASSERT_WLOCKED(m->object);
2246 	dnw = PCPU_GET(dnweight);
2247 	PCPU_INC(dnweight);
2248 
2249 	/*
2250 	 * Occasionally leave the page alone.
2251 	 */
2252 	if ((dnw & 0x01F0) == 0 || m->queue == PQ_INACTIVE) {
2253 		if (m->act_count >= ACT_INIT)
2254 			--m->act_count;
2255 		return;
2256 	}
2257 
2258 	/*
2259 	 * Clear any references to the page.  Otherwise, the page daemon will
2260 	 * immediately reactivate the page.
2261 	 *
2262 	 * Perform the pmap_clear_reference() first.  Otherwise, a concurrent
2263 	 * pmap operation, such as pmap_remove(), could clear a reference in
2264 	 * the pmap and set PGA_REFERENCED on the page before the
2265 	 * pmap_clear_reference() had completed.  Consequently, the page would
2266 	 * appear referenced based upon an old reference that occurred before
2267 	 * this function ran.
2268 	 */
2269 	pmap_clear_reference(m);
2270 	vm_page_aflag_clear(m, PGA_REFERENCED);
2271 
2272 	if (m->dirty == 0 && pmap_is_modified(m))
2273 		vm_page_dirty(m);
2274 
2275 	if (m->dirty || (dnw & 0x0070) == 0) {
2276 		/*
2277 		 * Deactivate the page 3 times out of 32.
2278 		 */
2279 		head = 0;
2280 	} else {
2281 		/*
2282 		 * Cache the page 28 times out of every 32.  Note that
2283 		 * the page is deactivated instead of cached, but placed
2284 		 * at the head of the queue instead of the tail.
2285 		 */
2286 		head = 1;
2287 	}
2288 	_vm_page_deactivate(m, head);
2289 }
2290 
2291 /*
2292  * Grab a page, waiting until we are waken up due to the page
2293  * changing state.  We keep on waiting, if the page continues
2294  * to be in the object.  If the page doesn't exist, first allocate it
2295  * and then conditionally zero it.
2296  *
2297  * The caller must always specify the VM_ALLOC_RETRY flag.  This is intended
2298  * to facilitate its eventual removal.
2299  *
2300  * This routine may sleep.
2301  *
2302  * The object must be locked on entry.  The lock will, however, be released
2303  * and reacquired if the routine sleeps.
2304  */
2305 vm_page_t
2306 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
2307 {
2308 	vm_page_t m;
2309 
2310 	VM_OBJECT_ASSERT_WLOCKED(object);
2311 	KASSERT((allocflags & VM_ALLOC_RETRY) != 0,
2312 	    ("vm_page_grab: VM_ALLOC_RETRY is required"));
2313 retrylookup:
2314 	if ((m = vm_page_lookup(object, pindex)) != NULL) {
2315 		if ((m->oflags & VPO_BUSY) != 0 ||
2316 		    ((allocflags & VM_ALLOC_IGN_SBUSY) == 0 && m->busy != 0)) {
2317 			/*
2318 			 * Reference the page before unlocking and
2319 			 * sleeping so that the page daemon is less
2320 			 * likely to reclaim it.
2321 			 */
2322 			vm_page_aflag_set(m, PGA_REFERENCED);
2323 			vm_page_sleep(m, "pgrbwt");
2324 			goto retrylookup;
2325 		} else {
2326 			if ((allocflags & VM_ALLOC_WIRED) != 0) {
2327 				vm_page_lock(m);
2328 				vm_page_wire(m);
2329 				vm_page_unlock(m);
2330 			}
2331 			if ((allocflags & VM_ALLOC_NOBUSY) == 0)
2332 				vm_page_busy(m);
2333 			return (m);
2334 		}
2335 	}
2336 	m = vm_page_alloc(object, pindex, allocflags & ~(VM_ALLOC_RETRY |
2337 	    VM_ALLOC_IGN_SBUSY));
2338 	if (m == NULL) {
2339 		VM_OBJECT_WUNLOCK(object);
2340 		VM_WAIT;
2341 		VM_OBJECT_WLOCK(object);
2342 		goto retrylookup;
2343 	} else if (m->valid != 0)
2344 		return (m);
2345 	if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
2346 		pmap_zero_page(m);
2347 	return (m);
2348 }
2349 
2350 /*
2351  * Mapping function for valid or dirty bits in a page.
2352  *
2353  * Inputs are required to range within a page.
2354  */
2355 vm_page_bits_t
2356 vm_page_bits(int base, int size)
2357 {
2358 	int first_bit;
2359 	int last_bit;
2360 
2361 	KASSERT(
2362 	    base + size <= PAGE_SIZE,
2363 	    ("vm_page_bits: illegal base/size %d/%d", base, size)
2364 	);
2365 
2366 	if (size == 0)		/* handle degenerate case */
2367 		return (0);
2368 
2369 	first_bit = base >> DEV_BSHIFT;
2370 	last_bit = (base + size - 1) >> DEV_BSHIFT;
2371 
2372 	return (((vm_page_bits_t)2 << last_bit) -
2373 	    ((vm_page_bits_t)1 << first_bit));
2374 }
2375 
2376 /*
2377  *	vm_page_set_valid_range:
2378  *
2379  *	Sets portions of a page valid.  The arguments are expected
2380  *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
2381  *	of any partial chunks touched by the range.  The invalid portion of
2382  *	such chunks will be zeroed.
2383  *
2384  *	(base + size) must be less then or equal to PAGE_SIZE.
2385  */
2386 void
2387 vm_page_set_valid_range(vm_page_t m, int base, int size)
2388 {
2389 	int endoff, frag;
2390 
2391 	VM_OBJECT_ASSERT_WLOCKED(m->object);
2392 	if (size == 0)	/* handle degenerate case */
2393 		return;
2394 
2395 	/*
2396 	 * If the base is not DEV_BSIZE aligned and the valid
2397 	 * bit is clear, we have to zero out a portion of the
2398 	 * first block.
2399 	 */
2400 	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2401 	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
2402 		pmap_zero_page_area(m, frag, base - frag);
2403 
2404 	/*
2405 	 * If the ending offset is not DEV_BSIZE aligned and the
2406 	 * valid bit is clear, we have to zero out a portion of
2407 	 * the last block.
2408 	 */
2409 	endoff = base + size;
2410 	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
2411 	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
2412 		pmap_zero_page_area(m, endoff,
2413 		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
2414 
2415 	/*
2416 	 * Assert that no previously invalid block that is now being validated
2417 	 * is already dirty.
2418 	 */
2419 	KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0,
2420 	    ("vm_page_set_valid_range: page %p is dirty", m));
2421 
2422 	/*
2423 	 * Set valid bits inclusive of any overlap.
2424 	 */
2425 	m->valid |= vm_page_bits(base, size);
2426 }
2427 
2428 /*
2429  * Clear the given bits from the specified page's dirty field.
2430  */
2431 static __inline void
2432 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
2433 {
2434 	uintptr_t addr;
2435 #if PAGE_SIZE < 16384
2436 	int shift;
2437 #endif
2438 
2439 	/*
2440 	 * If the object is locked and the page is neither VPO_BUSY nor
2441 	 * write mapped, then the page's dirty field cannot possibly be
2442 	 * set by a concurrent pmap operation.
2443 	 */
2444 	VM_OBJECT_ASSERT_WLOCKED(m->object);
2445 	if ((m->oflags & VPO_BUSY) == 0 && !pmap_page_is_write_mapped(m))
2446 		m->dirty &= ~pagebits;
2447 	else {
2448 		/*
2449 		 * The pmap layer can call vm_page_dirty() without
2450 		 * holding a distinguished lock.  The combination of
2451 		 * the object's lock and an atomic operation suffice
2452 		 * to guarantee consistency of the page dirty field.
2453 		 *
2454 		 * For PAGE_SIZE == 32768 case, compiler already
2455 		 * properly aligns the dirty field, so no forcible
2456 		 * alignment is needed. Only require existence of
2457 		 * atomic_clear_64 when page size is 32768.
2458 		 */
2459 		addr = (uintptr_t)&m->dirty;
2460 #if PAGE_SIZE == 32768
2461 		atomic_clear_64((uint64_t *)addr, pagebits);
2462 #elif PAGE_SIZE == 16384
2463 		atomic_clear_32((uint32_t *)addr, pagebits);
2464 #else		/* PAGE_SIZE <= 8192 */
2465 		/*
2466 		 * Use a trick to perform a 32-bit atomic on the
2467 		 * containing aligned word, to not depend on the existence
2468 		 * of atomic_clear_{8, 16}.
2469 		 */
2470 		shift = addr & (sizeof(uint32_t) - 1);
2471 #if BYTE_ORDER == BIG_ENDIAN
2472 		shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY;
2473 #else
2474 		shift *= NBBY;
2475 #endif
2476 		addr &= ~(sizeof(uint32_t) - 1);
2477 		atomic_clear_32((uint32_t *)addr, pagebits << shift);
2478 #endif		/* PAGE_SIZE */
2479 	}
2480 }
2481 
2482 /*
2483  *	vm_page_set_validclean:
2484  *
2485  *	Sets portions of a page valid and clean.  The arguments are expected
2486  *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
2487  *	of any partial chunks touched by the range.  The invalid portion of
2488  *	such chunks will be zero'd.
2489  *
2490  *	(base + size) must be less then or equal to PAGE_SIZE.
2491  */
2492 void
2493 vm_page_set_validclean(vm_page_t m, int base, int size)
2494 {
2495 	vm_page_bits_t oldvalid, pagebits;
2496 	int endoff, frag;
2497 
2498 	VM_OBJECT_ASSERT_WLOCKED(m->object);
2499 	if (size == 0)	/* handle degenerate case */
2500 		return;
2501 
2502 	/*
2503 	 * If the base is not DEV_BSIZE aligned and the valid
2504 	 * bit is clear, we have to zero out a portion of the
2505 	 * first block.
2506 	 */
2507 	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2508 	    (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
2509 		pmap_zero_page_area(m, frag, base - frag);
2510 
2511 	/*
2512 	 * If the ending offset is not DEV_BSIZE aligned and the
2513 	 * valid bit is clear, we have to zero out a portion of
2514 	 * the last block.
2515 	 */
2516 	endoff = base + size;
2517 	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
2518 	    (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0)
2519 		pmap_zero_page_area(m, endoff,
2520 		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
2521 
2522 	/*
2523 	 * Set valid, clear dirty bits.  If validating the entire
2524 	 * page we can safely clear the pmap modify bit.  We also
2525 	 * use this opportunity to clear the VPO_NOSYNC flag.  If a process
2526 	 * takes a write fault on a MAP_NOSYNC memory area the flag will
2527 	 * be set again.
2528 	 *
2529 	 * We set valid bits inclusive of any overlap, but we can only
2530 	 * clear dirty bits for DEV_BSIZE chunks that are fully within
2531 	 * the range.
2532 	 */
2533 	oldvalid = m->valid;
2534 	pagebits = vm_page_bits(base, size);
2535 	m->valid |= pagebits;
2536 #if 0	/* NOT YET */
2537 	if ((frag = base & (DEV_BSIZE - 1)) != 0) {
2538 		frag = DEV_BSIZE - frag;
2539 		base += frag;
2540 		size -= frag;
2541 		if (size < 0)
2542 			size = 0;
2543 	}
2544 	pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
2545 #endif
2546 	if (base == 0 && size == PAGE_SIZE) {
2547 		/*
2548 		 * The page can only be modified within the pmap if it is
2549 		 * mapped, and it can only be mapped if it was previously
2550 		 * fully valid.
2551 		 */
2552 		if (oldvalid == VM_PAGE_BITS_ALL)
2553 			/*
2554 			 * Perform the pmap_clear_modify() first.  Otherwise,
2555 			 * a concurrent pmap operation, such as
2556 			 * pmap_protect(), could clear a modification in the
2557 			 * pmap and set the dirty field on the page before
2558 			 * pmap_clear_modify() had begun and after the dirty
2559 			 * field was cleared here.
2560 			 */
2561 			pmap_clear_modify(m);
2562 		m->dirty = 0;
2563 		m->oflags &= ~VPO_NOSYNC;
2564 	} else if (oldvalid != VM_PAGE_BITS_ALL)
2565 		m->dirty &= ~pagebits;
2566 	else
2567 		vm_page_clear_dirty_mask(m, pagebits);
2568 }
2569 
2570 void
2571 vm_page_clear_dirty(vm_page_t m, int base, int size)
2572 {
2573 
2574 	vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
2575 }
2576 
2577 /*
2578  *	vm_page_set_invalid:
2579  *
2580  *	Invalidates DEV_BSIZE'd chunks within a page.  Both the
2581  *	valid and dirty bits for the effected areas are cleared.
2582  */
2583 void
2584 vm_page_set_invalid(vm_page_t m, int base, int size)
2585 {
2586 	vm_page_bits_t bits;
2587 
2588 	VM_OBJECT_ASSERT_WLOCKED(m->object);
2589 	KASSERT((m->oflags & VPO_BUSY) == 0,
2590 	    ("vm_page_set_invalid: page %p is busy", m));
2591 	bits = vm_page_bits(base, size);
2592 	if (m->valid == VM_PAGE_BITS_ALL && bits != 0)
2593 		pmap_remove_all(m);
2594 	KASSERT(!pmap_page_is_mapped(m),
2595 	    ("vm_page_set_invalid: page %p is mapped", m));
2596 	m->valid &= ~bits;
2597 	m->dirty &= ~bits;
2598 }
2599 
2600 /*
2601  * vm_page_zero_invalid()
2602  *
2603  *	The kernel assumes that the invalid portions of a page contain
2604  *	garbage, but such pages can be mapped into memory by user code.
2605  *	When this occurs, we must zero out the non-valid portions of the
2606  *	page so user code sees what it expects.
2607  *
2608  *	Pages are most often semi-valid when the end of a file is mapped
2609  *	into memory and the file's size is not page aligned.
2610  */
2611 void
2612 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
2613 {
2614 	int b;
2615 	int i;
2616 
2617 	VM_OBJECT_ASSERT_WLOCKED(m->object);
2618 	/*
2619 	 * Scan the valid bits looking for invalid sections that
2620 	 * must be zerod.  Invalid sub-DEV_BSIZE'd areas ( where the
2621 	 * valid bit may be set ) have already been zerod by
2622 	 * vm_page_set_validclean().
2623 	 */
2624 	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
2625 		if (i == (PAGE_SIZE / DEV_BSIZE) ||
2626 		    (m->valid & ((vm_page_bits_t)1 << i))) {
2627 			if (i > b) {
2628 				pmap_zero_page_area(m,
2629 				    b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
2630 			}
2631 			b = i + 1;
2632 		}
2633 	}
2634 
2635 	/*
2636 	 * setvalid is TRUE when we can safely set the zero'd areas
2637 	 * as being valid.  We can do this if there are no cache consistancy
2638 	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
2639 	 */
2640 	if (setvalid)
2641 		m->valid = VM_PAGE_BITS_ALL;
2642 }
2643 
2644 /*
2645  *	vm_page_is_valid:
2646  *
2647  *	Is (partial) page valid?  Note that the case where size == 0
2648  *	will return FALSE in the degenerate case where the page is
2649  *	entirely invalid, and TRUE otherwise.
2650  */
2651 int
2652 vm_page_is_valid(vm_page_t m, int base, int size)
2653 {
2654 	vm_page_bits_t bits;
2655 
2656 	VM_OBJECT_ASSERT_WLOCKED(m->object);
2657 	bits = vm_page_bits(base, size);
2658 	return (m->valid != 0 && (m->valid & bits) == bits);
2659 }
2660 
2661 /*
2662  * Set the page's dirty bits if the page is modified.
2663  */
2664 void
2665 vm_page_test_dirty(vm_page_t m)
2666 {
2667 
2668 	VM_OBJECT_ASSERT_WLOCKED(m->object);
2669 	if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
2670 		vm_page_dirty(m);
2671 }
2672 
2673 void
2674 vm_page_lock_KBI(vm_page_t m, const char *file, int line)
2675 {
2676 
2677 	mtx_lock_flags_(vm_page_lockptr(m), 0, file, line);
2678 }
2679 
2680 void
2681 vm_page_unlock_KBI(vm_page_t m, const char *file, int line)
2682 {
2683 
2684 	mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line);
2685 }
2686 
2687 int
2688 vm_page_trylock_KBI(vm_page_t m, const char *file, int line)
2689 {
2690 
2691 	return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line));
2692 }
2693 
2694 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
2695 void
2696 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line)
2697 {
2698 
2699 	mtx_assert_(vm_page_lockptr(m), a, file, line);
2700 }
2701 #endif
2702 
2703 int so_zerocp_fullpage = 0;
2704 
2705 /*
2706  *	Replace the given page with a copy.  The copied page assumes
2707  *	the portion of the given page's "wire_count" that is not the
2708  *	responsibility of this copy-on-write mechanism.
2709  *
2710  *	The object containing the given page must have a non-zero
2711  *	paging-in-progress count and be locked.
2712  */
2713 void
2714 vm_page_cowfault(vm_page_t m)
2715 {
2716 	vm_page_t mnew;
2717 	vm_object_t object;
2718 	vm_pindex_t pindex;
2719 
2720 	vm_page_lock_assert(m, MA_OWNED);
2721 	object = m->object;
2722 	VM_OBJECT_ASSERT_WLOCKED(object);
2723 	KASSERT(object->paging_in_progress != 0,
2724 	    ("vm_page_cowfault: object %p's paging-in-progress count is zero.",
2725 	    object));
2726 	pindex = m->pindex;
2727 
2728  retry_alloc:
2729 	pmap_remove_all(m);
2730 	vm_page_remove(m);
2731 	mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
2732 	if (mnew == NULL) {
2733 		vm_page_insert(m, object, pindex);
2734 		vm_page_unlock(m);
2735 		VM_OBJECT_WUNLOCK(object);
2736 		VM_WAIT;
2737 		VM_OBJECT_WLOCK(object);
2738 		if (m == vm_page_lookup(object, pindex)) {
2739 			vm_page_lock(m);
2740 			goto retry_alloc;
2741 		} else {
2742 			/*
2743 			 * Page disappeared during the wait.
2744 			 */
2745 			return;
2746 		}
2747 	}
2748 
2749 	if (m->cow == 0) {
2750 		/*
2751 		 * check to see if we raced with an xmit complete when
2752 		 * waiting to allocate a page.  If so, put things back
2753 		 * the way they were
2754 		 */
2755 		vm_page_unlock(m);
2756 		vm_page_lock(mnew);
2757 		vm_page_free(mnew);
2758 		vm_page_unlock(mnew);
2759 		vm_page_insert(m, object, pindex);
2760 	} else { /* clear COW & copy page */
2761 		if (!so_zerocp_fullpage)
2762 			pmap_copy_page(m, mnew);
2763 		mnew->valid = VM_PAGE_BITS_ALL;
2764 		vm_page_dirty(mnew);
2765 		mnew->wire_count = m->wire_count - m->cow;
2766 		m->wire_count = m->cow;
2767 		vm_page_unlock(m);
2768 	}
2769 }
2770 
2771 void
2772 vm_page_cowclear(vm_page_t m)
2773 {
2774 
2775 	vm_page_lock_assert(m, MA_OWNED);
2776 	if (m->cow) {
2777 		m->cow--;
2778 		/*
2779 		 * let vm_fault add back write permission  lazily
2780 		 */
2781 	}
2782 	/*
2783 	 *  sf_buf_free() will free the page, so we needn't do it here
2784 	 */
2785 }
2786 
2787 int
2788 vm_page_cowsetup(vm_page_t m)
2789 {
2790 
2791 	vm_page_lock_assert(m, MA_OWNED);
2792 	if ((m->flags & PG_FICTITIOUS) != 0 ||
2793 	    (m->oflags & VPO_UNMANAGED) != 0 ||
2794 	    m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYWLOCK(m->object))
2795 		return (EBUSY);
2796 	m->cow++;
2797 	pmap_remove_write(m);
2798 	VM_OBJECT_WUNLOCK(m->object);
2799 	return (0);
2800 }
2801 
2802 #ifdef INVARIANTS
2803 void
2804 vm_page_object_lock_assert(vm_page_t m)
2805 {
2806 
2807 	/*
2808 	 * Certain of the page's fields may only be modified by the
2809 	 * holder of the containing object's lock or the setter of the
2810 	 * page's VPO_BUSY flag.  Unfortunately, the setter of the
2811 	 * VPO_BUSY flag is not recorded, and thus cannot be checked
2812 	 * here.
2813 	 */
2814 	if (m->object != NULL && (m->oflags & VPO_BUSY) == 0)
2815 		VM_OBJECT_ASSERT_WLOCKED(m->object);
2816 }
2817 #endif
2818 
2819 #include "opt_ddb.h"
2820 #ifdef DDB
2821 #include <sys/kernel.h>
2822 
2823 #include <ddb/ddb.h>
2824 
2825 DB_SHOW_COMMAND(page, vm_page_print_page_info)
2826 {
2827 	db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
2828 	db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
2829 	db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
2830 	db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
2831 	db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
2832 	db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
2833 	db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
2834 	db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
2835 	db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
2836 	db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
2837 }
2838 
2839 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
2840 {
2841 
2842 	db_printf("PQ_FREE:");
2843 	db_printf(" %d", cnt.v_free_count);
2844 	db_printf("\n");
2845 
2846 	db_printf("PQ_CACHE:");
2847 	db_printf(" %d", cnt.v_cache_count);
2848 	db_printf("\n");
2849 
2850 	db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
2851 		*vm_pagequeues[PQ_ACTIVE].pq_cnt,
2852 		*vm_pagequeues[PQ_INACTIVE].pq_cnt);
2853 }
2854 #endif /* DDB */
2855