xref: /freebsd/sys/vm/vm_page.c (revision aca5021d5f7dcab1e11692923266373e35322d9a)
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1998 Matthew Dillon.  All Rights Reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * The Mach Operating System project at Carnegie-Mellon University.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
34  */
35 
36 /*-
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  */
62 
63 /*
64  *			GENERAL RULES ON VM_PAGE MANIPULATION
65  *
66  *	- A page queue lock is required when adding or removing a page from a
67  *	  page queue regardless of other locks or the busy state of a page.
68  *
69  *		* In general, no thread besides the page daemon can acquire or
70  *		  hold more than one page queue lock at a time.
71  *
72  *		* The page daemon can acquire and hold any pair of page queue
73  *		  locks in any order.
74  *
75  *	- The object lock is required when inserting or removing
76  *	  pages from an object (vm_page_insert() or vm_page_remove()).
77  *
78  */
79 
80 /*
81  *	Resident memory management module.
82  */
83 
84 #include <sys/cdefs.h>
85 __FBSDID("$FreeBSD$");
86 
87 #include "opt_vm.h"
88 
89 #include <sys/param.h>
90 #include <sys/systm.h>
91 #include <sys/lock.h>
92 #include <sys/kernel.h>
93 #include <sys/limits.h>
94 #include <sys/malloc.h>
95 #include <sys/mman.h>
96 #include <sys/msgbuf.h>
97 #include <sys/mutex.h>
98 #include <sys/proc.h>
99 #include <sys/rwlock.h>
100 #include <sys/sysctl.h>
101 #include <sys/vmmeter.h>
102 #include <sys/vnode.h>
103 
104 #include <vm/vm.h>
105 #include <vm/pmap.h>
106 #include <vm/vm_param.h>
107 #include <vm/vm_kern.h>
108 #include <vm/vm_object.h>
109 #include <vm/vm_page.h>
110 #include <vm/vm_pageout.h>
111 #include <vm/vm_pager.h>
112 #include <vm/vm_phys.h>
113 #include <vm/vm_radix.h>
114 #include <vm/vm_reserv.h>
115 #include <vm/vm_extern.h>
116 #include <vm/uma.h>
117 #include <vm/uma_int.h>
118 
119 #include <machine/md_var.h>
120 
121 /*
122  *	Associated with page of user-allocatable memory is a
123  *	page structure.
124  */
125 
126 struct vm_domain vm_dom[MAXMEMDOM];
127 struct mtx_padalign vm_page_queue_free_mtx;
128 
129 struct mtx_padalign pa_lock[PA_LOCK_COUNT];
130 
131 vm_page_t vm_page_array;
132 long vm_page_array_size;
133 long first_page;
134 int vm_page_zero_count;
135 
136 static int boot_pages = UMA_BOOT_PAGES;
137 TUNABLE_INT("vm.boot_pages", &boot_pages);
138 SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0,
139 	"number of pages allocated for bootstrapping the VM system");
140 
141 static int pa_tryrelock_restart;
142 SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD,
143     &pa_tryrelock_restart, 0, "Number of tryrelock restarts");
144 
145 static uma_zone_t fakepg_zone;
146 
147 static struct vnode *vm_page_alloc_init(vm_page_t m);
148 static void vm_page_cache_turn_free(vm_page_t m);
149 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
150 static void vm_page_enqueue(int queue, vm_page_t m);
151 static void vm_page_init_fakepg(void *dummy);
152 static int vm_page_insert_after(vm_page_t m, vm_object_t object,
153     vm_pindex_t pindex, vm_page_t mpred);
154 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object,
155     vm_page_t mpred);
156 
157 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL);
158 
159 static void
160 vm_page_init_fakepg(void *dummy)
161 {
162 
163 	fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,
164 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM);
165 }
166 
167 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
168 #if PAGE_SIZE == 32768
169 #ifdef CTASSERT
170 CTASSERT(sizeof(u_long) >= 8);
171 #endif
172 #endif
173 
174 /*
175  * Try to acquire a physical address lock while a pmap is locked.  If we
176  * fail to trylock we unlock and lock the pmap directly and cache the
177  * locked pa in *locked.  The caller should then restart their loop in case
178  * the virtual to physical mapping has changed.
179  */
180 int
181 vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked)
182 {
183 	vm_paddr_t lockpa;
184 
185 	lockpa = *locked;
186 	*locked = pa;
187 	if (lockpa) {
188 		PA_LOCK_ASSERT(lockpa, MA_OWNED);
189 		if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa))
190 			return (0);
191 		PA_UNLOCK(lockpa);
192 	}
193 	if (PA_TRYLOCK(pa))
194 		return (0);
195 	PMAP_UNLOCK(pmap);
196 	atomic_add_int(&pa_tryrelock_restart, 1);
197 	PA_LOCK(pa);
198 	PMAP_LOCK(pmap);
199 	return (EAGAIN);
200 }
201 
202 /*
203  *	vm_set_page_size:
204  *
205  *	Sets the page size, perhaps based upon the memory
206  *	size.  Must be called before any use of page-size
207  *	dependent functions.
208  */
209 void
210 vm_set_page_size(void)
211 {
212 	if (cnt.v_page_size == 0)
213 		cnt.v_page_size = PAGE_SIZE;
214 	if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
215 		panic("vm_set_page_size: page size not a power of two");
216 }
217 
218 /*
219  *	vm_page_blacklist_lookup:
220  *
221  *	See if a physical address in this page has been listed
222  *	in the blacklist tunable.  Entries in the tunable are
223  *	separated by spaces or commas.  If an invalid integer is
224  *	encountered then the rest of the string is skipped.
225  */
226 static int
227 vm_page_blacklist_lookup(char *list, vm_paddr_t pa)
228 {
229 	vm_paddr_t bad;
230 	char *cp, *pos;
231 
232 	for (pos = list; *pos != '\0'; pos = cp) {
233 		bad = strtoq(pos, &cp, 0);
234 		if (*cp != '\0') {
235 			if (*cp == ' ' || *cp == ',') {
236 				cp++;
237 				if (cp == pos)
238 					continue;
239 			} else
240 				break;
241 		}
242 		if (pa == trunc_page(bad))
243 			return (1);
244 	}
245 	return (0);
246 }
247 
248 static void
249 vm_page_domain_init(struct vm_domain *vmd)
250 {
251 	struct vm_pagequeue *pq;
252 	int i;
253 
254 	*__DECONST(char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) =
255 	    "vm inactive pagequeue";
256 	*__DECONST(int **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_vcnt) =
257 	    &cnt.v_inactive_count;
258 	*__DECONST(char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) =
259 	    "vm active pagequeue";
260 	*__DECONST(int **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_vcnt) =
261 	    &cnt.v_active_count;
262 	vmd->vmd_fullintervalcount = 0;
263 	vmd->vmd_page_count = 0;
264 	vmd->vmd_free_count = 0;
265 	vmd->vmd_segs = 0;
266 	vmd->vmd_oom = FALSE;
267 	vmd->vmd_pass = 0;
268 	for (i = 0; i < PQ_COUNT; i++) {
269 		pq = &vmd->vmd_pagequeues[i];
270 		TAILQ_INIT(&pq->pq_pl);
271 		mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue",
272 		    MTX_DEF | MTX_DUPOK);
273 	}
274 }
275 
276 /*
277  *	vm_page_startup:
278  *
279  *	Initializes the resident memory module.
280  *
281  *	Allocates memory for the page cells, and
282  *	for the object/offset-to-page hash table headers.
283  *	Each page cell is initialized and placed on the free list.
284  */
285 vm_offset_t
286 vm_page_startup(vm_offset_t vaddr)
287 {
288 	vm_offset_t mapped;
289 	vm_paddr_t page_range;
290 	vm_paddr_t new_end;
291 	int i;
292 	vm_paddr_t pa;
293 	vm_paddr_t last_pa;
294 	char *list;
295 
296 	/* the biggest memory array is the second group of pages */
297 	vm_paddr_t end;
298 	vm_paddr_t biggestsize;
299 	vm_paddr_t low_water, high_water;
300 	int biggestone;
301 
302 	biggestsize = 0;
303 	biggestone = 0;
304 	vaddr = round_page(vaddr);
305 
306 	for (i = 0; phys_avail[i + 1]; i += 2) {
307 		phys_avail[i] = round_page(phys_avail[i]);
308 		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
309 	}
310 
311 	low_water = phys_avail[0];
312 	high_water = phys_avail[1];
313 
314 	for (i = 0; phys_avail[i + 1]; i += 2) {
315 		vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
316 
317 		if (size > biggestsize) {
318 			biggestone = i;
319 			biggestsize = size;
320 		}
321 		if (phys_avail[i] < low_water)
322 			low_water = phys_avail[i];
323 		if (phys_avail[i + 1] > high_water)
324 			high_water = phys_avail[i + 1];
325 	}
326 
327 #ifdef XEN
328 	low_water = 0;
329 #endif
330 
331 	end = phys_avail[biggestone+1];
332 
333 	/*
334 	 * Initialize the page and queue locks.
335 	 */
336 	mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF);
337 	for (i = 0; i < PA_LOCK_COUNT; i++)
338 		mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
339 	for (i = 0; i < vm_ndomains; i++)
340 		vm_page_domain_init(&vm_dom[i]);
341 
342 	/*
343 	 * Allocate memory for use when boot strapping the kernel memory
344 	 * allocator.
345 	 */
346 	new_end = end - (boot_pages * UMA_SLAB_SIZE);
347 	new_end = trunc_page(new_end);
348 	mapped = pmap_map(&vaddr, new_end, end,
349 	    VM_PROT_READ | VM_PROT_WRITE);
350 	bzero((void *)mapped, end - new_end);
351 	uma_startup((void *)mapped, boot_pages);
352 
353 #if defined(__amd64__) || defined(__i386__) || defined(__arm__) || \
354     defined(__mips__)
355 	/*
356 	 * Allocate a bitmap to indicate that a random physical page
357 	 * needs to be included in a minidump.
358 	 *
359 	 * The amd64 port needs this to indicate which direct map pages
360 	 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
361 	 *
362 	 * However, i386 still needs this workspace internally within the
363 	 * minidump code.  In theory, they are not needed on i386, but are
364 	 * included should the sf_buf code decide to use them.
365 	 */
366 	last_pa = 0;
367 	for (i = 0; dump_avail[i + 1] != 0; i += 2)
368 		if (dump_avail[i + 1] > last_pa)
369 			last_pa = dump_avail[i + 1];
370 	page_range = last_pa / PAGE_SIZE;
371 	vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
372 	new_end -= vm_page_dump_size;
373 	vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
374 	    new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
375 	bzero((void *)vm_page_dump, vm_page_dump_size);
376 #endif
377 #ifdef __amd64__
378 	/*
379 	 * Request that the physical pages underlying the message buffer be
380 	 * included in a crash dump.  Since the message buffer is accessed
381 	 * through the direct map, they are not automatically included.
382 	 */
383 	pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr);
384 	last_pa = pa + round_page(msgbufsize);
385 	while (pa < last_pa) {
386 		dump_add_page(pa);
387 		pa += PAGE_SIZE;
388 	}
389 #endif
390 	/*
391 	 * Compute the number of pages of memory that will be available for
392 	 * use (taking into account the overhead of a page structure per
393 	 * page).
394 	 */
395 	first_page = low_water / PAGE_SIZE;
396 #ifdef VM_PHYSSEG_SPARSE
397 	page_range = 0;
398 	for (i = 0; phys_avail[i + 1] != 0; i += 2)
399 		page_range += atop(phys_avail[i + 1] - phys_avail[i]);
400 #elif defined(VM_PHYSSEG_DENSE)
401 	page_range = high_water / PAGE_SIZE - first_page;
402 #else
403 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
404 #endif
405 	end = new_end;
406 
407 	/*
408 	 * Reserve an unmapped guard page to trap access to vm_page_array[-1].
409 	 */
410 	vaddr += PAGE_SIZE;
411 
412 	/*
413 	 * Initialize the mem entry structures now, and put them in the free
414 	 * queue.
415 	 */
416 	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
417 	mapped = pmap_map(&vaddr, new_end, end,
418 	    VM_PROT_READ | VM_PROT_WRITE);
419 	vm_page_array = (vm_page_t) mapped;
420 #if VM_NRESERVLEVEL > 0
421 	/*
422 	 * Allocate memory for the reservation management system's data
423 	 * structures.
424 	 */
425 	new_end = vm_reserv_startup(&vaddr, new_end, high_water);
426 #endif
427 #if defined(__amd64__) || defined(__mips__)
428 	/*
429 	 * pmap_map on amd64 and mips can come out of the direct-map, not kvm
430 	 * like i386, so the pages must be tracked for a crashdump to include
431 	 * this data.  This includes the vm_page_array and the early UMA
432 	 * bootstrap pages.
433 	 */
434 	for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE)
435 		dump_add_page(pa);
436 #endif
437 	phys_avail[biggestone + 1] = new_end;
438 
439 	/*
440 	 * Clear all of the page structures
441 	 */
442 	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
443 	for (i = 0; i < page_range; i++)
444 		vm_page_array[i].order = VM_NFREEORDER;
445 	vm_page_array_size = page_range;
446 
447 	/*
448 	 * Initialize the physical memory allocator.
449 	 */
450 	vm_phys_init();
451 
452 	/*
453 	 * Add every available physical page that is not blacklisted to
454 	 * the free lists.
455 	 */
456 	cnt.v_page_count = 0;
457 	cnt.v_free_count = 0;
458 	list = getenv("vm.blacklist");
459 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
460 		pa = phys_avail[i];
461 		last_pa = phys_avail[i + 1];
462 		while (pa < last_pa) {
463 			if (list != NULL &&
464 			    vm_page_blacklist_lookup(list, pa))
465 				printf("Skipping page with pa 0x%jx\n",
466 				    (uintmax_t)pa);
467 			else
468 				vm_phys_add_page(pa);
469 			pa += PAGE_SIZE;
470 		}
471 	}
472 	freeenv(list);
473 #if VM_NRESERVLEVEL > 0
474 	/*
475 	 * Initialize the reservation management system.
476 	 */
477 	vm_reserv_init();
478 #endif
479 	return (vaddr);
480 }
481 
482 void
483 vm_page_reference(vm_page_t m)
484 {
485 
486 	vm_page_aflag_set(m, PGA_REFERENCED);
487 }
488 
489 /*
490  *	vm_page_busy_downgrade:
491  *
492  *	Downgrade an exclusive busy page into a single shared busy page.
493  */
494 void
495 vm_page_busy_downgrade(vm_page_t m)
496 {
497 	u_int x;
498 
499 	vm_page_assert_xbusied(m);
500 
501 	for (;;) {
502 		x = m->busy_lock;
503 		x &= VPB_BIT_WAITERS;
504 		if (atomic_cmpset_rel_int(&m->busy_lock,
505 		    VPB_SINGLE_EXCLUSIVER | x, VPB_SHARERS_WORD(1) | x))
506 			break;
507 	}
508 }
509 
510 /*
511  *	vm_page_sbusied:
512  *
513  *	Return a positive value if the page is shared busied, 0 otherwise.
514  */
515 int
516 vm_page_sbusied(vm_page_t m)
517 {
518 	u_int x;
519 
520 	x = m->busy_lock;
521 	return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED);
522 }
523 
524 /*
525  *	vm_page_sunbusy:
526  *
527  *	Shared unbusy a page.
528  */
529 void
530 vm_page_sunbusy(vm_page_t m)
531 {
532 	u_int x;
533 
534 	vm_page_assert_sbusied(m);
535 
536 	for (;;) {
537 		x = m->busy_lock;
538 		if (VPB_SHARERS(x) > 1) {
539 			if (atomic_cmpset_int(&m->busy_lock, x,
540 			    x - VPB_ONE_SHARER))
541 				break;
542 			continue;
543 		}
544 		if ((x & VPB_BIT_WAITERS) == 0) {
545 			KASSERT(x == VPB_SHARERS_WORD(1),
546 			    ("vm_page_sunbusy: invalid lock state"));
547 			if (atomic_cmpset_int(&m->busy_lock,
548 			    VPB_SHARERS_WORD(1), VPB_UNBUSIED))
549 				break;
550 			continue;
551 		}
552 		KASSERT(x == (VPB_SHARERS_WORD(1) | VPB_BIT_WAITERS),
553 		    ("vm_page_sunbusy: invalid lock state for waiters"));
554 
555 		vm_page_lock(m);
556 		if (!atomic_cmpset_int(&m->busy_lock, x, VPB_UNBUSIED)) {
557 			vm_page_unlock(m);
558 			continue;
559 		}
560 		wakeup(m);
561 		vm_page_unlock(m);
562 		break;
563 	}
564 }
565 
566 /*
567  *	vm_page_busy_sleep:
568  *
569  *	Sleep and release the page lock, using the page pointer as wchan.
570  *	This is used to implement the hard-path of busying mechanism.
571  *
572  *	The given page must be locked.
573  */
574 void
575 vm_page_busy_sleep(vm_page_t m, const char *wmesg)
576 {
577 	u_int x;
578 
579 	vm_page_lock_assert(m, MA_OWNED);
580 
581 	x = m->busy_lock;
582 	if (x == VPB_UNBUSIED) {
583 		vm_page_unlock(m);
584 		return;
585 	}
586 	if ((x & VPB_BIT_WAITERS) == 0 &&
587 	    !atomic_cmpset_int(&m->busy_lock, x, x | VPB_BIT_WAITERS)) {
588 		vm_page_unlock(m);
589 		return;
590 	}
591 	msleep(m, vm_page_lockptr(m), PVM | PDROP, wmesg, 0);
592 }
593 
594 /*
595  *	vm_page_trysbusy:
596  *
597  *	Try to shared busy a page.
598  *	If the operation succeeds 1 is returned otherwise 0.
599  *	The operation never sleeps.
600  */
601 int
602 vm_page_trysbusy(vm_page_t m)
603 {
604 	u_int x;
605 
606 	x = m->busy_lock;
607 	return ((x & VPB_BIT_SHARED) != 0 &&
608 	    atomic_cmpset_acq_int(&m->busy_lock, x, x + VPB_ONE_SHARER));
609 }
610 
611 /*
612  *	vm_page_xunbusy_hard:
613  *
614  *	Called after the first try the exclusive unbusy of a page failed.
615  *	It is assumed that the waiters bit is on.
616  */
617 void
618 vm_page_xunbusy_hard(vm_page_t m)
619 {
620 
621 	vm_page_assert_xbusied(m);
622 
623 	vm_page_lock(m);
624 	atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
625 	wakeup(m);
626 	vm_page_unlock(m);
627 }
628 
629 /*
630  *	vm_page_flash:
631  *
632  *	Wakeup anyone waiting for the page.
633  *	The ownership bits do not change.
634  *
635  *	The given page must be locked.
636  */
637 void
638 vm_page_flash(vm_page_t m)
639 {
640 	u_int x;
641 
642 	vm_page_lock_assert(m, MA_OWNED);
643 
644 	for (;;) {
645 		x = m->busy_lock;
646 		if ((x & VPB_BIT_WAITERS) == 0)
647 			return;
648 		if (atomic_cmpset_int(&m->busy_lock, x,
649 		    x & (~VPB_BIT_WAITERS)))
650 			break;
651 	}
652 	wakeup(m);
653 }
654 
655 /*
656  * Keep page from being freed by the page daemon
657  * much of the same effect as wiring, except much lower
658  * overhead and should be used only for *very* temporary
659  * holding ("wiring").
660  */
661 void
662 vm_page_hold(vm_page_t mem)
663 {
664 
665 	vm_page_lock_assert(mem, MA_OWNED);
666         mem->hold_count++;
667 }
668 
669 void
670 vm_page_unhold(vm_page_t mem)
671 {
672 
673 	vm_page_lock_assert(mem, MA_OWNED);
674 	--mem->hold_count;
675 	KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
676 	if (mem->hold_count == 0 && (mem->flags & PG_UNHOLDFREE) != 0)
677 		vm_page_free_toq(mem);
678 }
679 
680 /*
681  *	vm_page_unhold_pages:
682  *
683  *	Unhold each of the pages that is referenced by the given array.
684  */
685 void
686 vm_page_unhold_pages(vm_page_t *ma, int count)
687 {
688 	struct mtx *mtx, *new_mtx;
689 
690 	mtx = NULL;
691 	for (; count != 0; count--) {
692 		/*
693 		 * Avoid releasing and reacquiring the same page lock.
694 		 */
695 		new_mtx = vm_page_lockptr(*ma);
696 		if (mtx != new_mtx) {
697 			if (mtx != NULL)
698 				mtx_unlock(mtx);
699 			mtx = new_mtx;
700 			mtx_lock(mtx);
701 		}
702 		vm_page_unhold(*ma);
703 		ma++;
704 	}
705 	if (mtx != NULL)
706 		mtx_unlock(mtx);
707 }
708 
709 vm_page_t
710 PHYS_TO_VM_PAGE(vm_paddr_t pa)
711 {
712 	vm_page_t m;
713 
714 #ifdef VM_PHYSSEG_SPARSE
715 	m = vm_phys_paddr_to_vm_page(pa);
716 	if (m == NULL)
717 		m = vm_phys_fictitious_to_vm_page(pa);
718 	return (m);
719 #elif defined(VM_PHYSSEG_DENSE)
720 	long pi;
721 
722 	pi = atop(pa);
723 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
724 		m = &vm_page_array[pi - first_page];
725 		return (m);
726 	}
727 	return (vm_phys_fictitious_to_vm_page(pa));
728 #else
729 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
730 #endif
731 }
732 
733 /*
734  *	vm_page_getfake:
735  *
736  *	Create a fictitious page with the specified physical address and
737  *	memory attribute.  The memory attribute is the only the machine-
738  *	dependent aspect of a fictitious page that must be initialized.
739  */
740 vm_page_t
741 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
742 {
743 	vm_page_t m;
744 
745 	m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
746 	vm_page_initfake(m, paddr, memattr);
747 	return (m);
748 }
749 
750 void
751 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
752 {
753 
754 	if ((m->flags & PG_FICTITIOUS) != 0) {
755 		/*
756 		 * The page's memattr might have changed since the
757 		 * previous initialization.  Update the pmap to the
758 		 * new memattr.
759 		 */
760 		goto memattr;
761 	}
762 	m->phys_addr = paddr;
763 	m->queue = PQ_NONE;
764 	/* Fictitious pages don't use "segind". */
765 	m->flags = PG_FICTITIOUS;
766 	/* Fictitious pages don't use "order" or "pool". */
767 	m->oflags = VPO_UNMANAGED;
768 	m->busy_lock = VPB_SINGLE_EXCLUSIVER;
769 	m->wire_count = 1;
770 	pmap_page_init(m);
771 memattr:
772 	pmap_page_set_memattr(m, memattr);
773 }
774 
775 /*
776  *	vm_page_putfake:
777  *
778  *	Release a fictitious page.
779  */
780 void
781 vm_page_putfake(vm_page_t m)
782 {
783 
784 	KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m));
785 	KASSERT((m->flags & PG_FICTITIOUS) != 0,
786 	    ("vm_page_putfake: bad page %p", m));
787 	uma_zfree(fakepg_zone, m);
788 }
789 
790 /*
791  *	vm_page_updatefake:
792  *
793  *	Update the given fictitious page to the specified physical address and
794  *	memory attribute.
795  */
796 void
797 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
798 {
799 
800 	KASSERT((m->flags & PG_FICTITIOUS) != 0,
801 	    ("vm_page_updatefake: bad page %p", m));
802 	m->phys_addr = paddr;
803 	pmap_page_set_memattr(m, memattr);
804 }
805 
806 /*
807  *	vm_page_free:
808  *
809  *	Free a page.
810  */
811 void
812 vm_page_free(vm_page_t m)
813 {
814 
815 	m->flags &= ~PG_ZERO;
816 	vm_page_free_toq(m);
817 }
818 
819 /*
820  *	vm_page_free_zero:
821  *
822  *	Free a page to the zerod-pages queue
823  */
824 void
825 vm_page_free_zero(vm_page_t m)
826 {
827 
828 	m->flags |= PG_ZERO;
829 	vm_page_free_toq(m);
830 }
831 
832 /*
833  * Unbusy and handle the page queueing for a page from the VOP_GETPAGES()
834  * array which is not the request page.
835  */
836 void
837 vm_page_readahead_finish(vm_page_t m)
838 {
839 
840 	if (m->valid != 0) {
841 		/*
842 		 * Since the page is not the requested page, whether
843 		 * it should be activated or deactivated is not
844 		 * obvious.  Empirical results have shown that
845 		 * deactivating the page is usually the best choice,
846 		 * unless the page is wanted by another thread.
847 		 */
848 		vm_page_lock(m);
849 		if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
850 			vm_page_activate(m);
851 		else
852 			vm_page_deactivate(m);
853 		vm_page_unlock(m);
854 		vm_page_xunbusy(m);
855 	} else {
856 		/*
857 		 * Free the completely invalid page.  Such page state
858 		 * occurs due to the short read operation which did
859 		 * not covered our page at all, or in case when a read
860 		 * error happens.
861 		 */
862 		vm_page_lock(m);
863 		vm_page_free(m);
864 		vm_page_unlock(m);
865 	}
866 }
867 
868 /*
869  *	vm_page_sleep_if_busy:
870  *
871  *	Sleep and release the page queues lock if the page is busied.
872  *	Returns TRUE if the thread slept.
873  *
874  *	The given page must be unlocked and object containing it must
875  *	be locked.
876  */
877 int
878 vm_page_sleep_if_busy(vm_page_t m, const char *msg)
879 {
880 	vm_object_t obj;
881 
882 	vm_page_lock_assert(m, MA_NOTOWNED);
883 	VM_OBJECT_ASSERT_WLOCKED(m->object);
884 
885 	if (vm_page_busied(m)) {
886 		/*
887 		 * The page-specific object must be cached because page
888 		 * identity can change during the sleep, causing the
889 		 * re-lock of a different object.
890 		 * It is assumed that a reference to the object is already
891 		 * held by the callers.
892 		 */
893 		obj = m->object;
894 		vm_page_lock(m);
895 		VM_OBJECT_WUNLOCK(obj);
896 		vm_page_busy_sleep(m, msg);
897 		VM_OBJECT_WLOCK(obj);
898 		return (TRUE);
899 	}
900 	return (FALSE);
901 }
902 
903 /*
904  *	vm_page_dirty_KBI:		[ internal use only ]
905  *
906  *	Set all bits in the page's dirty field.
907  *
908  *	The object containing the specified page must be locked if the
909  *	call is made from the machine-independent layer.
910  *
911  *	See vm_page_clear_dirty_mask().
912  *
913  *	This function should only be called by vm_page_dirty().
914  */
915 void
916 vm_page_dirty_KBI(vm_page_t m)
917 {
918 
919 	/* These assertions refer to this operation by its public name. */
920 	KASSERT((m->flags & PG_CACHED) == 0,
921 	    ("vm_page_dirty: page in cache!"));
922 	KASSERT(!VM_PAGE_IS_FREE(m),
923 	    ("vm_page_dirty: page is free!"));
924 	KASSERT(m->valid == VM_PAGE_BITS_ALL,
925 	    ("vm_page_dirty: page is invalid!"));
926 	m->dirty = VM_PAGE_BITS_ALL;
927 }
928 
929 /*
930  *	vm_page_insert:		[ internal use only ]
931  *
932  *	Inserts the given mem entry into the object and object list.
933  *
934  *	The object must be locked.
935  */
936 int
937 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
938 {
939 	vm_page_t mpred;
940 
941 	VM_OBJECT_ASSERT_WLOCKED(object);
942 	mpred = vm_radix_lookup_le(&object->rtree, pindex);
943 	return (vm_page_insert_after(m, object, pindex, mpred));
944 }
945 
946 /*
947  *	vm_page_insert_after:
948  *
949  *	Inserts the page "m" into the specified object at offset "pindex".
950  *
951  *	The page "mpred" must immediately precede the offset "pindex" within
952  *	the specified object.
953  *
954  *	The object must be locked.
955  */
956 static int
957 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
958     vm_page_t mpred)
959 {
960 	vm_pindex_t sidx;
961 	vm_object_t sobj;
962 	vm_page_t msucc;
963 
964 	VM_OBJECT_ASSERT_WLOCKED(object);
965 	KASSERT(m->object == NULL,
966 	    ("vm_page_insert_after: page already inserted"));
967 	if (mpred != NULL) {
968 		KASSERT(mpred->object == object ||
969 		    (mpred->flags & PG_SLAB) != 0,
970 		    ("vm_page_insert_after: object doesn't contain mpred"));
971 		KASSERT(mpred->pindex < pindex,
972 		    ("vm_page_insert_after: mpred doesn't precede pindex"));
973 		msucc = TAILQ_NEXT(mpred, listq);
974 	} else
975 		msucc = TAILQ_FIRST(&object->memq);
976 	if (msucc != NULL)
977 		KASSERT(msucc->pindex > pindex,
978 		    ("vm_page_insert_after: msucc doesn't succeed pindex"));
979 
980 	/*
981 	 * Record the object/offset pair in this page
982 	 */
983 	sobj = m->object;
984 	sidx = m->pindex;
985 	m->object = object;
986 	m->pindex = pindex;
987 
988 	/*
989 	 * Now link into the object's ordered list of backed pages.
990 	 */
991 	if (vm_radix_insert(&object->rtree, m)) {
992 		m->object = sobj;
993 		m->pindex = sidx;
994 		return (1);
995 	}
996 	vm_page_insert_radixdone(m, object, mpred);
997 	return (0);
998 }
999 
1000 /*
1001  *	vm_page_insert_radixdone:
1002  *
1003  *	Complete page "m" insertion into the specified object after the
1004  *	radix trie hooking.
1005  *
1006  *	The page "mpred" must precede the offset "m->pindex" within the
1007  *	specified object.
1008  *
1009  *	The object must be locked.
1010  */
1011 static void
1012 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred)
1013 {
1014 
1015 	VM_OBJECT_ASSERT_WLOCKED(object);
1016 	KASSERT(object != NULL && m->object == object,
1017 	    ("vm_page_insert_radixdone: page %p has inconsistent object", m));
1018 	if (mpred != NULL) {
1019 		KASSERT(mpred->object == object ||
1020 		    (mpred->flags & PG_SLAB) != 0,
1021 		    ("vm_page_insert_after: object doesn't contain mpred"));
1022 		KASSERT(mpred->pindex < m->pindex,
1023 		    ("vm_page_insert_after: mpred doesn't precede pindex"));
1024 	}
1025 
1026 	if (mpred != NULL)
1027 		TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq);
1028 	else
1029 		TAILQ_INSERT_HEAD(&object->memq, m, listq);
1030 
1031 	/*
1032 	 * Show that the object has one more resident page.
1033 	 */
1034 	object->resident_page_count++;
1035 
1036 	/*
1037 	 * Hold the vnode until the last page is released.
1038 	 */
1039 	if (object->resident_page_count == 1 && object->type == OBJT_VNODE)
1040 		vhold(object->handle);
1041 
1042 	/*
1043 	 * Since we are inserting a new and possibly dirty page,
1044 	 * update the object's OBJ_MIGHTBEDIRTY flag.
1045 	 */
1046 	if (pmap_page_is_write_mapped(m))
1047 		vm_object_set_writeable_dirty(object);
1048 }
1049 
1050 /*
1051  *	vm_page_remove:
1052  *
1053  *	Removes the given mem entry from the object/offset-page
1054  *	table and the object page list, but do not invalidate/terminate
1055  *	the backing store.
1056  *
1057  *	The object must be locked.  The page must be locked if it is managed.
1058  */
1059 void
1060 vm_page_remove(vm_page_t m)
1061 {
1062 	vm_object_t object;
1063 	boolean_t lockacq;
1064 
1065 	if ((m->oflags & VPO_UNMANAGED) == 0)
1066 		vm_page_lock_assert(m, MA_OWNED);
1067 	if ((object = m->object) == NULL)
1068 		return;
1069 	VM_OBJECT_ASSERT_WLOCKED(object);
1070 	if (vm_page_xbusied(m)) {
1071 		lockacq = FALSE;
1072 		if ((m->oflags & VPO_UNMANAGED) != 0 &&
1073 		    !mtx_owned(vm_page_lockptr(m))) {
1074 			lockacq = TRUE;
1075 			vm_page_lock(m);
1076 		}
1077 		vm_page_flash(m);
1078 		atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
1079 		if (lockacq)
1080 			vm_page_unlock(m);
1081 	}
1082 
1083 	/*
1084 	 * Now remove from the object's list of backed pages.
1085 	 */
1086 	vm_radix_remove(&object->rtree, m->pindex);
1087 	TAILQ_REMOVE(&object->memq, m, listq);
1088 
1089 	/*
1090 	 * And show that the object has one fewer resident page.
1091 	 */
1092 	object->resident_page_count--;
1093 
1094 	/*
1095 	 * The vnode may now be recycled.
1096 	 */
1097 	if (object->resident_page_count == 0 && object->type == OBJT_VNODE)
1098 		vdrop(object->handle);
1099 
1100 	m->object = NULL;
1101 }
1102 
1103 /*
1104  *	vm_page_lookup:
1105  *
1106  *	Returns the page associated with the object/offset
1107  *	pair specified; if none is found, NULL is returned.
1108  *
1109  *	The object must be locked.
1110  */
1111 vm_page_t
1112 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
1113 {
1114 
1115 	VM_OBJECT_ASSERT_LOCKED(object);
1116 	return (vm_radix_lookup(&object->rtree, pindex));
1117 }
1118 
1119 /*
1120  *	vm_page_find_least:
1121  *
1122  *	Returns the page associated with the object with least pindex
1123  *	greater than or equal to the parameter pindex, or NULL.
1124  *
1125  *	The object must be locked.
1126  */
1127 vm_page_t
1128 vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
1129 {
1130 	vm_page_t m;
1131 
1132 	VM_OBJECT_ASSERT_LOCKED(object);
1133 	if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex)
1134 		m = vm_radix_lookup_ge(&object->rtree, pindex);
1135 	return (m);
1136 }
1137 
1138 /*
1139  * Returns the given page's successor (by pindex) within the object if it is
1140  * resident; if none is found, NULL is returned.
1141  *
1142  * The object must be locked.
1143  */
1144 vm_page_t
1145 vm_page_next(vm_page_t m)
1146 {
1147 	vm_page_t next;
1148 
1149 	VM_OBJECT_ASSERT_WLOCKED(m->object);
1150 	if ((next = TAILQ_NEXT(m, listq)) != NULL &&
1151 	    next->pindex != m->pindex + 1)
1152 		next = NULL;
1153 	return (next);
1154 }
1155 
1156 /*
1157  * Returns the given page's predecessor (by pindex) within the object if it is
1158  * resident; if none is found, NULL is returned.
1159  *
1160  * The object must be locked.
1161  */
1162 vm_page_t
1163 vm_page_prev(vm_page_t m)
1164 {
1165 	vm_page_t prev;
1166 
1167 	VM_OBJECT_ASSERT_WLOCKED(m->object);
1168 	if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL &&
1169 	    prev->pindex != m->pindex - 1)
1170 		prev = NULL;
1171 	return (prev);
1172 }
1173 
1174 /*
1175  * Uses the page mnew as a replacement for an existing page at index
1176  * pindex which must be already present in the object.
1177  *
1178  * The existing page must not be on a paging queue.
1179  */
1180 vm_page_t
1181 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex)
1182 {
1183 	vm_page_t mold, mpred;
1184 
1185 	VM_OBJECT_ASSERT_WLOCKED(object);
1186 
1187 	/*
1188 	 * This function mostly follows vm_page_insert() and
1189 	 * vm_page_remove() without the radix, object count and vnode
1190 	 * dance.  Double check such functions for more comments.
1191 	 */
1192 	mpred = vm_radix_lookup(&object->rtree, pindex);
1193 	KASSERT(mpred != NULL,
1194 	    ("vm_page_replace: replacing page not present with pindex"));
1195 	mpred = TAILQ_PREV(mpred, respgs, listq);
1196 	if (mpred != NULL)
1197 		KASSERT(mpred->pindex < pindex,
1198 		    ("vm_page_insert_after: mpred doesn't precede pindex"));
1199 
1200 	mnew->object = object;
1201 	mnew->pindex = pindex;
1202 	mold = vm_radix_replace(&object->rtree, mnew, pindex);
1203 	KASSERT(mold->queue == PQ_NONE,
1204 	    ("vm_page_replace: mold is on a paging queue"));
1205 
1206 	/* Detach the old page from the resident tailq. */
1207 	TAILQ_REMOVE(&object->memq, mold, listq);
1208 
1209 	mold->object = NULL;
1210 	vm_page_xunbusy(mold);
1211 
1212 	/* Insert the new page in the resident tailq. */
1213 	if (mpred != NULL)
1214 		TAILQ_INSERT_AFTER(&object->memq, mpred, mnew, listq);
1215 	else
1216 		TAILQ_INSERT_HEAD(&object->memq, mnew, listq);
1217 	if (pmap_page_is_write_mapped(mnew))
1218 		vm_object_set_writeable_dirty(object);
1219 	return (mold);
1220 }
1221 
1222 /*
1223  *	vm_page_rename:
1224  *
1225  *	Move the given memory entry from its
1226  *	current object to the specified target object/offset.
1227  *
1228  *	Note: swap associated with the page must be invalidated by the move.  We
1229  *	      have to do this for several reasons:  (1) we aren't freeing the
1230  *	      page, (2) we are dirtying the page, (3) the VM system is probably
1231  *	      moving the page from object A to B, and will then later move
1232  *	      the backing store from A to B and we can't have a conflict.
1233  *
1234  *	Note: we *always* dirty the page.  It is necessary both for the
1235  *	      fact that we moved it, and because we may be invalidating
1236  *	      swap.  If the page is on the cache, we have to deactivate it
1237  *	      or vm_page_dirty() will panic.  Dirty pages are not allowed
1238  *	      on the cache.
1239  *
1240  *	The objects must be locked.
1241  */
1242 int
1243 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
1244 {
1245 	vm_page_t mpred;
1246 	vm_pindex_t opidx;
1247 
1248 	VM_OBJECT_ASSERT_WLOCKED(new_object);
1249 
1250 	mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex);
1251 	KASSERT(mpred == NULL || mpred->pindex != new_pindex,
1252 	    ("vm_page_rename: pindex already renamed"));
1253 
1254 	/*
1255 	 * Create a custom version of vm_page_insert() which does not depend
1256 	 * by m_prev and can cheat on the implementation aspects of the
1257 	 * function.
1258 	 */
1259 	opidx = m->pindex;
1260 	m->pindex = new_pindex;
1261 	if (vm_radix_insert(&new_object->rtree, m)) {
1262 		m->pindex = opidx;
1263 		return (1);
1264 	}
1265 
1266 	/*
1267 	 * The operation cannot fail anymore.  The removal must happen before
1268 	 * the listq iterator is tainted.
1269 	 */
1270 	m->pindex = opidx;
1271 	vm_page_lock(m);
1272 	vm_page_remove(m);
1273 
1274 	/* Return back to the new pindex to complete vm_page_insert(). */
1275 	m->pindex = new_pindex;
1276 	m->object = new_object;
1277 	vm_page_unlock(m);
1278 	vm_page_insert_radixdone(m, new_object, mpred);
1279 	vm_page_dirty(m);
1280 	return (0);
1281 }
1282 
1283 /*
1284  *	Convert all of the given object's cached pages that have a
1285  *	pindex within the given range into free pages.  If the value
1286  *	zero is given for "end", then the range's upper bound is
1287  *	infinity.  If the given object is backed by a vnode and it
1288  *	transitions from having one or more cached pages to none, the
1289  *	vnode's hold count is reduced.
1290  */
1291 void
1292 vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1293 {
1294 	vm_page_t m;
1295 	boolean_t empty;
1296 
1297 	mtx_lock(&vm_page_queue_free_mtx);
1298 	if (__predict_false(vm_radix_is_empty(&object->cache))) {
1299 		mtx_unlock(&vm_page_queue_free_mtx);
1300 		return;
1301 	}
1302 	while ((m = vm_radix_lookup_ge(&object->cache, start)) != NULL) {
1303 		if (end != 0 && m->pindex >= end)
1304 			break;
1305 		vm_radix_remove(&object->cache, m->pindex);
1306 		vm_page_cache_turn_free(m);
1307 	}
1308 	empty = vm_radix_is_empty(&object->cache);
1309 	mtx_unlock(&vm_page_queue_free_mtx);
1310 	if (object->type == OBJT_VNODE && empty)
1311 		vdrop(object->handle);
1312 }
1313 
1314 /*
1315  *	Returns the cached page that is associated with the given
1316  *	object and offset.  If, however, none exists, returns NULL.
1317  *
1318  *	The free page queue must be locked.
1319  */
1320 static inline vm_page_t
1321 vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex)
1322 {
1323 
1324 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1325 	return (vm_radix_lookup(&object->cache, pindex));
1326 }
1327 
1328 /*
1329  *	Remove the given cached page from its containing object's
1330  *	collection of cached pages.
1331  *
1332  *	The free page queue must be locked.
1333  */
1334 static void
1335 vm_page_cache_remove(vm_page_t m)
1336 {
1337 
1338 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1339 	KASSERT((m->flags & PG_CACHED) != 0,
1340 	    ("vm_page_cache_remove: page %p is not cached", m));
1341 	vm_radix_remove(&m->object->cache, m->pindex);
1342 	m->object = NULL;
1343 	cnt.v_cache_count--;
1344 }
1345 
1346 /*
1347  *	Transfer all of the cached pages with offset greater than or
1348  *	equal to 'offidxstart' from the original object's cache to the
1349  *	new object's cache.  However, any cached pages with offset
1350  *	greater than or equal to the new object's size are kept in the
1351  *	original object.  Initially, the new object's cache must be
1352  *	empty.  Offset 'offidxstart' in the original object must
1353  *	correspond to offset zero in the new object.
1354  *
1355  *	The new object must be locked.
1356  */
1357 void
1358 vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart,
1359     vm_object_t new_object)
1360 {
1361 	vm_page_t m;
1362 
1363 	/*
1364 	 * Insertion into an object's collection of cached pages
1365 	 * requires the object to be locked.  In contrast, removal does
1366 	 * not.
1367 	 */
1368 	VM_OBJECT_ASSERT_WLOCKED(new_object);
1369 	KASSERT(vm_radix_is_empty(&new_object->cache),
1370 	    ("vm_page_cache_transfer: object %p has cached pages",
1371 	    new_object));
1372 	mtx_lock(&vm_page_queue_free_mtx);
1373 	while ((m = vm_radix_lookup_ge(&orig_object->cache,
1374 	    offidxstart)) != NULL) {
1375 		/*
1376 		 * Transfer all of the pages with offset greater than or
1377 		 * equal to 'offidxstart' from the original object's
1378 		 * cache to the new object's cache.
1379 		 */
1380 		if ((m->pindex - offidxstart) >= new_object->size)
1381 			break;
1382 		vm_radix_remove(&orig_object->cache, m->pindex);
1383 		/* Update the page's object and offset. */
1384 		m->object = new_object;
1385 		m->pindex -= offidxstart;
1386 		if (vm_radix_insert(&new_object->cache, m))
1387 			vm_page_cache_turn_free(m);
1388 	}
1389 	mtx_unlock(&vm_page_queue_free_mtx);
1390 }
1391 
1392 /*
1393  *	Returns TRUE if a cached page is associated with the given object and
1394  *	offset, and FALSE otherwise.
1395  *
1396  *	The object must be locked.
1397  */
1398 boolean_t
1399 vm_page_is_cached(vm_object_t object, vm_pindex_t pindex)
1400 {
1401 	vm_page_t m;
1402 
1403 	/*
1404 	 * Insertion into an object's collection of cached pages requires the
1405 	 * object to be locked.  Therefore, if the object is locked and the
1406 	 * object's collection is empty, there is no need to acquire the free
1407 	 * page queues lock in order to prove that the specified page doesn't
1408 	 * exist.
1409 	 */
1410 	VM_OBJECT_ASSERT_WLOCKED(object);
1411 	if (__predict_true(vm_object_cache_is_empty(object)))
1412 		return (FALSE);
1413 	mtx_lock(&vm_page_queue_free_mtx);
1414 	m = vm_page_cache_lookup(object, pindex);
1415 	mtx_unlock(&vm_page_queue_free_mtx);
1416 	return (m != NULL);
1417 }
1418 
1419 /*
1420  *	vm_page_alloc:
1421  *
1422  *	Allocate and return a page that is associated with the specified
1423  *	object and offset pair.  By default, this page is exclusive busied.
1424  *
1425  *	The caller must always specify an allocation class.
1426  *
1427  *	allocation classes:
1428  *	VM_ALLOC_NORMAL		normal process request
1429  *	VM_ALLOC_SYSTEM		system *really* needs a page
1430  *	VM_ALLOC_INTERRUPT	interrupt time request
1431  *
1432  *	optional allocation flags:
1433  *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
1434  *				intends to allocate
1435  *	VM_ALLOC_IFCACHED	return page only if it is cached
1436  *	VM_ALLOC_IFNOTCACHED	return NULL, do not reactivate if the page
1437  *				is cached
1438  *	VM_ALLOC_NOBUSY		do not exclusive busy the page
1439  *	VM_ALLOC_NODUMP		do not include the page in a kernel core dump
1440  *	VM_ALLOC_NOOBJ		page is not associated with an object and
1441  *				should not be exclusive busy
1442  *	VM_ALLOC_SBUSY		shared busy the allocated page
1443  *	VM_ALLOC_WIRED		wire the allocated page
1444  *	VM_ALLOC_ZERO		prefer a zeroed page
1445  *
1446  *	This routine may not sleep.
1447  */
1448 vm_page_t
1449 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
1450 {
1451 	struct vnode *vp = NULL;
1452 	vm_object_t m_object;
1453 	vm_page_t m, mpred;
1454 	int flags, req_class;
1455 
1456 	mpred = 0;	/* XXX: pacify gcc */
1457 	KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
1458 	    (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
1459 	    ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
1460 	    (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
1461 	    ("vm_page_alloc: inconsistent object(%p)/req(%x)", (void *)object,
1462 	    req));
1463 	if (object != NULL)
1464 		VM_OBJECT_ASSERT_WLOCKED(object);
1465 
1466 	req_class = req & VM_ALLOC_CLASS_MASK;
1467 
1468 	/*
1469 	 * The page daemon is allowed to dig deeper into the free page list.
1470 	 */
1471 	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1472 		req_class = VM_ALLOC_SYSTEM;
1473 
1474 	if (object != NULL) {
1475 		mpred = vm_radix_lookup_le(&object->rtree, pindex);
1476 		KASSERT(mpred == NULL || mpred->pindex != pindex,
1477 		   ("vm_page_alloc: pindex already allocated"));
1478 	}
1479 
1480 	/*
1481 	 * The page allocation request can came from consumers which already
1482 	 * hold the free page queue mutex, like vm_page_insert() in
1483 	 * vm_page_cache().
1484 	 */
1485 	mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE);
1486 	if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
1487 	    (req_class == VM_ALLOC_SYSTEM &&
1488 	    cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) ||
1489 	    (req_class == VM_ALLOC_INTERRUPT &&
1490 	    cnt.v_free_count + cnt.v_cache_count > 0)) {
1491 		/*
1492 		 * Allocate from the free queue if the number of free pages
1493 		 * exceeds the minimum for the request class.
1494 		 */
1495 		if (object != NULL &&
1496 		    (m = vm_page_cache_lookup(object, pindex)) != NULL) {
1497 			if ((req & VM_ALLOC_IFNOTCACHED) != 0) {
1498 				mtx_unlock(&vm_page_queue_free_mtx);
1499 				return (NULL);
1500 			}
1501 			if (vm_phys_unfree_page(m))
1502 				vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0);
1503 #if VM_NRESERVLEVEL > 0
1504 			else if (!vm_reserv_reactivate_page(m))
1505 #else
1506 			else
1507 #endif
1508 				panic("vm_page_alloc: cache page %p is missing"
1509 				    " from the free queue", m);
1510 		} else if ((req & VM_ALLOC_IFCACHED) != 0) {
1511 			mtx_unlock(&vm_page_queue_free_mtx);
1512 			return (NULL);
1513 #if VM_NRESERVLEVEL > 0
1514 		} else if (object == NULL || (object->flags & (OBJ_COLORED |
1515 		    OBJ_FICTITIOUS)) != OBJ_COLORED || (m =
1516 		    vm_reserv_alloc_page(object, pindex, mpred)) == NULL) {
1517 #else
1518 		} else {
1519 #endif
1520 			m = vm_phys_alloc_pages(object != NULL ?
1521 			    VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
1522 #if VM_NRESERVLEVEL > 0
1523 			if (m == NULL && vm_reserv_reclaim_inactive()) {
1524 				m = vm_phys_alloc_pages(object != NULL ?
1525 				    VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT,
1526 				    0);
1527 			}
1528 #endif
1529 		}
1530 	} else {
1531 		/*
1532 		 * Not allocatable, give up.
1533 		 */
1534 		mtx_unlock(&vm_page_queue_free_mtx);
1535 		atomic_add_int(&vm_pageout_deficit,
1536 		    max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
1537 		pagedaemon_wakeup();
1538 		return (NULL);
1539 	}
1540 
1541 	/*
1542 	 *  At this point we had better have found a good page.
1543 	 */
1544 	KASSERT(m != NULL, ("vm_page_alloc: missing page"));
1545 	KASSERT(m->queue == PQ_NONE,
1546 	    ("vm_page_alloc: page %p has unexpected queue %d", m, m->queue));
1547 	KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m));
1548 	KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m));
1549 	KASSERT(!vm_page_sbusied(m),
1550 	    ("vm_page_alloc: page %p is busy", m));
1551 	KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m));
1552 	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
1553 	    ("vm_page_alloc: page %p has unexpected memattr %d", m,
1554 	    pmap_page_get_memattr(m)));
1555 	if ((m->flags & PG_CACHED) != 0) {
1556 		KASSERT((m->flags & PG_ZERO) == 0,
1557 		    ("vm_page_alloc: cached page %p is PG_ZERO", m));
1558 		KASSERT(m->valid != 0,
1559 		    ("vm_page_alloc: cached page %p is invalid", m));
1560 		if (m->object == object && m->pindex == pindex)
1561 	  		cnt.v_reactivated++;
1562 		else
1563 			m->valid = 0;
1564 		m_object = m->object;
1565 		vm_page_cache_remove(m);
1566 		if (m_object->type == OBJT_VNODE &&
1567 		    vm_object_cache_is_empty(m_object))
1568 			vp = m_object->handle;
1569 	} else {
1570 		KASSERT(VM_PAGE_IS_FREE(m),
1571 		    ("vm_page_alloc: page %p is not free", m));
1572 		KASSERT(m->valid == 0,
1573 		    ("vm_page_alloc: free page %p is valid", m));
1574 		vm_phys_freecnt_adj(m, -1);
1575 	}
1576 
1577 	/*
1578 	 * Only the PG_ZERO flag is inherited.  The PG_CACHED or PG_FREE flag
1579 	 * must be cleared before the free page queues lock is released.
1580 	 */
1581 	flags = 0;
1582 	if (m->flags & PG_ZERO) {
1583 		vm_page_zero_count--;
1584 		if (req & VM_ALLOC_ZERO)
1585 			flags = PG_ZERO;
1586 	}
1587 	if (req & VM_ALLOC_NODUMP)
1588 		flags |= PG_NODUMP;
1589 	m->flags = flags;
1590 	mtx_unlock(&vm_page_queue_free_mtx);
1591 	m->aflags = 0;
1592 	m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
1593 	    VPO_UNMANAGED : 0;
1594 	m->busy_lock = VPB_UNBUSIED;
1595 	if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0)
1596 		m->busy_lock = VPB_SINGLE_EXCLUSIVER;
1597 	if ((req & VM_ALLOC_SBUSY) != 0)
1598 		m->busy_lock = VPB_SHARERS_WORD(1);
1599 	if (req & VM_ALLOC_WIRED) {
1600 		/*
1601 		 * The page lock is not required for wiring a page until that
1602 		 * page is inserted into the object.
1603 		 */
1604 		atomic_add_int(&cnt.v_wire_count, 1);
1605 		m->wire_count = 1;
1606 	}
1607 	m->act_count = 0;
1608 
1609 	if (object != NULL) {
1610 		if (vm_page_insert_after(m, object, pindex, mpred)) {
1611 			/* See the comment below about hold count. */
1612 			if (vp != NULL)
1613 				vdrop(vp);
1614 			pagedaemon_wakeup();
1615 			m->object = NULL;
1616 			vm_page_free(m);
1617 			return (NULL);
1618 		}
1619 
1620 		/* Ignore device objects; the pager sets "memattr" for them. */
1621 		if (object->memattr != VM_MEMATTR_DEFAULT &&
1622 		    (object->flags & OBJ_FICTITIOUS) == 0)
1623 			pmap_page_set_memattr(m, object->memattr);
1624 	} else
1625 		m->pindex = pindex;
1626 
1627 	/*
1628 	 * The following call to vdrop() must come after the above call
1629 	 * to vm_page_insert() in case both affect the same object and
1630 	 * vnode.  Otherwise, the affected vnode's hold count could
1631 	 * temporarily become zero.
1632 	 */
1633 	if (vp != NULL)
1634 		vdrop(vp);
1635 
1636 	/*
1637 	 * Don't wakeup too often - wakeup the pageout daemon when
1638 	 * we would be nearly out of memory.
1639 	 */
1640 	if (vm_paging_needed())
1641 		pagedaemon_wakeup();
1642 
1643 	return (m);
1644 }
1645 
1646 static void
1647 vm_page_alloc_contig_vdrop(struct spglist *lst)
1648 {
1649 
1650 	while (!SLIST_EMPTY(lst)) {
1651 		vdrop((struct vnode *)SLIST_FIRST(lst)-> plinks.s.pv);
1652 		SLIST_REMOVE_HEAD(lst, plinks.s.ss);
1653 	}
1654 }
1655 
1656 /*
1657  *	vm_page_alloc_contig:
1658  *
1659  *	Allocate a contiguous set of physical pages of the given size "npages"
1660  *	from the free lists.  All of the physical pages must be at or above
1661  *	the given physical address "low" and below the given physical address
1662  *	"high".  The given value "alignment" determines the alignment of the
1663  *	first physical page in the set.  If the given value "boundary" is
1664  *	non-zero, then the set of physical pages cannot cross any physical
1665  *	address boundary that is a multiple of that value.  Both "alignment"
1666  *	and "boundary" must be a power of two.
1667  *
1668  *	If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT,
1669  *	then the memory attribute setting for the physical pages is configured
1670  *	to the object's memory attribute setting.  Otherwise, the memory
1671  *	attribute setting for the physical pages is configured to "memattr",
1672  *	overriding the object's memory attribute setting.  However, if the
1673  *	object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the
1674  *	memory attribute setting for the physical pages cannot be configured
1675  *	to VM_MEMATTR_DEFAULT.
1676  *
1677  *	The caller must always specify an allocation class.
1678  *
1679  *	allocation classes:
1680  *	VM_ALLOC_NORMAL		normal process request
1681  *	VM_ALLOC_SYSTEM		system *really* needs a page
1682  *	VM_ALLOC_INTERRUPT	interrupt time request
1683  *
1684  *	optional allocation flags:
1685  *	VM_ALLOC_NOBUSY		do not exclusive busy the page
1686  *	VM_ALLOC_NOOBJ		page is not associated with an object and
1687  *				should not be exclusive busy
1688  *	VM_ALLOC_SBUSY		shared busy the allocated page
1689  *	VM_ALLOC_WIRED		wire the allocated page
1690  *	VM_ALLOC_ZERO		prefer a zeroed page
1691  *
1692  *	This routine may not sleep.
1693  */
1694 vm_page_t
1695 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
1696     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
1697     vm_paddr_t boundary, vm_memattr_t memattr)
1698 {
1699 	struct vnode *drop;
1700 	struct spglist deferred_vdrop_list;
1701 	vm_page_t m, m_tmp, m_ret;
1702 	u_int flags, oflags;
1703 	int req_class;
1704 
1705 	KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
1706 	    (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
1707 	    ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
1708 	    (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
1709 	    ("vm_page_alloc: inconsistent object(%p)/req(%x)", (void *)object,
1710 	    req));
1711 	if (object != NULL) {
1712 		VM_OBJECT_ASSERT_WLOCKED(object);
1713 		KASSERT(object->type == OBJT_PHYS,
1714 		    ("vm_page_alloc_contig: object %p isn't OBJT_PHYS",
1715 		    object));
1716 	}
1717 	KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
1718 	req_class = req & VM_ALLOC_CLASS_MASK;
1719 
1720 	/*
1721 	 * The page daemon is allowed to dig deeper into the free page list.
1722 	 */
1723 	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1724 		req_class = VM_ALLOC_SYSTEM;
1725 
1726 	SLIST_INIT(&deferred_vdrop_list);
1727 	mtx_lock(&vm_page_queue_free_mtx);
1728 	if (cnt.v_free_count + cnt.v_cache_count >= npages +
1729 	    cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM &&
1730 	    cnt.v_free_count + cnt.v_cache_count >= npages +
1731 	    cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT &&
1732 	    cnt.v_free_count + cnt.v_cache_count >= npages)) {
1733 #if VM_NRESERVLEVEL > 0
1734 retry:
1735 		if (object == NULL || (object->flags & OBJ_COLORED) == 0 ||
1736 		    (m_ret = vm_reserv_alloc_contig(object, pindex, npages,
1737 		    low, high, alignment, boundary)) == NULL)
1738 #endif
1739 			m_ret = vm_phys_alloc_contig(npages, low, high,
1740 			    alignment, boundary);
1741 	} else {
1742 		mtx_unlock(&vm_page_queue_free_mtx);
1743 		atomic_add_int(&vm_pageout_deficit, npages);
1744 		pagedaemon_wakeup();
1745 		return (NULL);
1746 	}
1747 	if (m_ret != NULL)
1748 		for (m = m_ret; m < &m_ret[npages]; m++) {
1749 			drop = vm_page_alloc_init(m);
1750 			if (drop != NULL) {
1751 				/*
1752 				 * Enqueue the vnode for deferred vdrop().
1753 				 *
1754 				 * Once the pages are removed from the free
1755 				 * page list, "pageq" can be safely abused to
1756 				 * construct a short-lived list of vnodes.
1757 				 */
1758 				m->plinks.s.pv = drop;
1759 				SLIST_INSERT_HEAD(&deferred_vdrop_list, m,
1760 				    plinks.s.ss);
1761 			}
1762 		}
1763 	else {
1764 #if VM_NRESERVLEVEL > 0
1765 		if (vm_reserv_reclaim_contig(npages, low, high, alignment,
1766 		    boundary))
1767 			goto retry;
1768 #endif
1769 	}
1770 	mtx_unlock(&vm_page_queue_free_mtx);
1771 	if (m_ret == NULL)
1772 		return (NULL);
1773 
1774 	/*
1775 	 * Initialize the pages.  Only the PG_ZERO flag is inherited.
1776 	 */
1777 	flags = 0;
1778 	if ((req & VM_ALLOC_ZERO) != 0)
1779 		flags = PG_ZERO;
1780 	if ((req & VM_ALLOC_NODUMP) != 0)
1781 		flags |= PG_NODUMP;
1782 	if ((req & VM_ALLOC_WIRED) != 0)
1783 		atomic_add_int(&cnt.v_wire_count, npages);
1784 	oflags = VPO_UNMANAGED;
1785 	if (object != NULL) {
1786 		if (object->memattr != VM_MEMATTR_DEFAULT &&
1787 		    memattr == VM_MEMATTR_DEFAULT)
1788 			memattr = object->memattr;
1789 	}
1790 	for (m = m_ret; m < &m_ret[npages]; m++) {
1791 		m->aflags = 0;
1792 		m->flags = (m->flags | PG_NODUMP) & flags;
1793 		m->busy_lock = VPB_UNBUSIED;
1794 		if (object != NULL) {
1795 			if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
1796 				m->busy_lock = VPB_SINGLE_EXCLUSIVER;
1797 			if ((req & VM_ALLOC_SBUSY) != 0)
1798 				m->busy_lock = VPB_SHARERS_WORD(1);
1799 		}
1800 		if ((req & VM_ALLOC_WIRED) != 0)
1801 			m->wire_count = 1;
1802 		/* Unmanaged pages don't use "act_count". */
1803 		m->oflags = oflags;
1804 		if (object != NULL) {
1805 			if (vm_page_insert(m, object, pindex)) {
1806 				vm_page_alloc_contig_vdrop(
1807 				    &deferred_vdrop_list);
1808 				if (vm_paging_needed())
1809 					pagedaemon_wakeup();
1810 				for (m = m_ret, m_tmp = m_ret;
1811 				    m < &m_ret[npages]; m++) {
1812 					if (m_tmp < m)
1813 						m_tmp++;
1814 					else
1815 						m->object = NULL;
1816 					vm_page_free(m);
1817 				}
1818 				return (NULL);
1819 			}
1820 		} else
1821 			m->pindex = pindex;
1822 		if (memattr != VM_MEMATTR_DEFAULT)
1823 			pmap_page_set_memattr(m, memattr);
1824 		pindex++;
1825 	}
1826 	vm_page_alloc_contig_vdrop(&deferred_vdrop_list);
1827 	if (vm_paging_needed())
1828 		pagedaemon_wakeup();
1829 	return (m_ret);
1830 }
1831 
1832 /*
1833  * Initialize a page that has been freshly dequeued from a freelist.
1834  * The caller has to drop the vnode returned, if it is not NULL.
1835  *
1836  * This function may only be used to initialize unmanaged pages.
1837  *
1838  * To be called with vm_page_queue_free_mtx held.
1839  */
1840 static struct vnode *
1841 vm_page_alloc_init(vm_page_t m)
1842 {
1843 	struct vnode *drop;
1844 	vm_object_t m_object;
1845 
1846 	KASSERT(m->queue == PQ_NONE,
1847 	    ("vm_page_alloc_init: page %p has unexpected queue %d",
1848 	    m, m->queue));
1849 	KASSERT(m->wire_count == 0,
1850 	    ("vm_page_alloc_init: page %p is wired", m));
1851 	KASSERT(m->hold_count == 0,
1852 	    ("vm_page_alloc_init: page %p is held", m));
1853 	KASSERT(!vm_page_sbusied(m),
1854 	    ("vm_page_alloc_init: page %p is busy", m));
1855 	KASSERT(m->dirty == 0,
1856 	    ("vm_page_alloc_init: page %p is dirty", m));
1857 	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
1858 	    ("vm_page_alloc_init: page %p has unexpected memattr %d",
1859 	    m, pmap_page_get_memattr(m)));
1860 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1861 	drop = NULL;
1862 	if ((m->flags & PG_CACHED) != 0) {
1863 		KASSERT((m->flags & PG_ZERO) == 0,
1864 		    ("vm_page_alloc_init: cached page %p is PG_ZERO", m));
1865 		m->valid = 0;
1866 		m_object = m->object;
1867 		vm_page_cache_remove(m);
1868 		if (m_object->type == OBJT_VNODE &&
1869 		    vm_object_cache_is_empty(m_object))
1870 			drop = m_object->handle;
1871 	} else {
1872 		KASSERT(VM_PAGE_IS_FREE(m),
1873 		    ("vm_page_alloc_init: page %p is not free", m));
1874 		KASSERT(m->valid == 0,
1875 		    ("vm_page_alloc_init: free page %p is valid", m));
1876 		vm_phys_freecnt_adj(m, -1);
1877 		if ((m->flags & PG_ZERO) != 0)
1878 			vm_page_zero_count--;
1879 	}
1880 	/* Don't clear the PG_ZERO flag; we'll need it later. */
1881 	m->flags &= PG_ZERO;
1882 	return (drop);
1883 }
1884 
1885 /*
1886  * 	vm_page_alloc_freelist:
1887  *
1888  *	Allocate a physical page from the specified free page list.
1889  *
1890  *	The caller must always specify an allocation class.
1891  *
1892  *	allocation classes:
1893  *	VM_ALLOC_NORMAL		normal process request
1894  *	VM_ALLOC_SYSTEM		system *really* needs a page
1895  *	VM_ALLOC_INTERRUPT	interrupt time request
1896  *
1897  *	optional allocation flags:
1898  *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
1899  *				intends to allocate
1900  *	VM_ALLOC_WIRED		wire the allocated page
1901  *	VM_ALLOC_ZERO		prefer a zeroed page
1902  *
1903  *	This routine may not sleep.
1904  */
1905 vm_page_t
1906 vm_page_alloc_freelist(int flind, int req)
1907 {
1908 	struct vnode *drop;
1909 	vm_page_t m;
1910 	u_int flags;
1911 	int req_class;
1912 
1913 	req_class = req & VM_ALLOC_CLASS_MASK;
1914 
1915 	/*
1916 	 * The page daemon is allowed to dig deeper into the free page list.
1917 	 */
1918 	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1919 		req_class = VM_ALLOC_SYSTEM;
1920 
1921 	/*
1922 	 * Do not allocate reserved pages unless the req has asked for it.
1923 	 */
1924 	mtx_lock(&vm_page_queue_free_mtx);
1925 	if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
1926 	    (req_class == VM_ALLOC_SYSTEM &&
1927 	    cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) ||
1928 	    (req_class == VM_ALLOC_INTERRUPT &&
1929 	    cnt.v_free_count + cnt.v_cache_count > 0))
1930 		m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0);
1931 	else {
1932 		mtx_unlock(&vm_page_queue_free_mtx);
1933 		atomic_add_int(&vm_pageout_deficit,
1934 		    max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
1935 		pagedaemon_wakeup();
1936 		return (NULL);
1937 	}
1938 	if (m == NULL) {
1939 		mtx_unlock(&vm_page_queue_free_mtx);
1940 		return (NULL);
1941 	}
1942 	drop = vm_page_alloc_init(m);
1943 	mtx_unlock(&vm_page_queue_free_mtx);
1944 
1945 	/*
1946 	 * Initialize the page.  Only the PG_ZERO flag is inherited.
1947 	 */
1948 	m->aflags = 0;
1949 	flags = 0;
1950 	if ((req & VM_ALLOC_ZERO) != 0)
1951 		flags = PG_ZERO;
1952 	m->flags &= flags;
1953 	if ((req & VM_ALLOC_WIRED) != 0) {
1954 		/*
1955 		 * The page lock is not required for wiring a page that does
1956 		 * not belong to an object.
1957 		 */
1958 		atomic_add_int(&cnt.v_wire_count, 1);
1959 		m->wire_count = 1;
1960 	}
1961 	/* Unmanaged pages don't use "act_count". */
1962 	m->oflags = VPO_UNMANAGED;
1963 	if (drop != NULL)
1964 		vdrop(drop);
1965 	if (vm_paging_needed())
1966 		pagedaemon_wakeup();
1967 	return (m);
1968 }
1969 
1970 /*
1971  *	vm_wait:	(also see VM_WAIT macro)
1972  *
1973  *	Sleep until free pages are available for allocation.
1974  *	- Called in various places before memory allocations.
1975  */
1976 void
1977 vm_wait(void)
1978 {
1979 
1980 	mtx_lock(&vm_page_queue_free_mtx);
1981 	if (curproc == pageproc) {
1982 		vm_pageout_pages_needed = 1;
1983 		msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx,
1984 		    PDROP | PSWP, "VMWait", 0);
1985 	} else {
1986 		if (!vm_pages_needed) {
1987 			vm_pages_needed = 1;
1988 			wakeup(&vm_pages_needed);
1989 		}
1990 		msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM,
1991 		    "vmwait", 0);
1992 	}
1993 }
1994 
1995 /*
1996  *	vm_waitpfault:	(also see VM_WAITPFAULT macro)
1997  *
1998  *	Sleep until free pages are available for allocation.
1999  *	- Called only in vm_fault so that processes page faulting
2000  *	  can be easily tracked.
2001  *	- Sleeps at a lower priority than vm_wait() so that vm_wait()ing
2002  *	  processes will be able to grab memory first.  Do not change
2003  *	  this balance without careful testing first.
2004  */
2005 void
2006 vm_waitpfault(void)
2007 {
2008 
2009 	mtx_lock(&vm_page_queue_free_mtx);
2010 	if (!vm_pages_needed) {
2011 		vm_pages_needed = 1;
2012 		wakeup(&vm_pages_needed);
2013 	}
2014 	msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER,
2015 	    "pfault", 0);
2016 }
2017 
2018 struct vm_pagequeue *
2019 vm_page_pagequeue(vm_page_t m)
2020 {
2021 
2022 	return (&vm_phys_domain(m)->vmd_pagequeues[m->queue]);
2023 }
2024 
2025 /*
2026  *	vm_page_dequeue:
2027  *
2028  *	Remove the given page from its current page queue.
2029  *
2030  *	The page must be locked.
2031  */
2032 void
2033 vm_page_dequeue(vm_page_t m)
2034 {
2035 	struct vm_pagequeue *pq;
2036 
2037 	vm_page_lock_assert(m, MA_OWNED);
2038 	KASSERT(m->queue != PQ_NONE,
2039 	    ("vm_page_dequeue: page %p is not queued", m));
2040 	pq = vm_page_pagequeue(m);
2041 	vm_pagequeue_lock(pq);
2042 	m->queue = PQ_NONE;
2043 	TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
2044 	vm_pagequeue_cnt_dec(pq);
2045 	vm_pagequeue_unlock(pq);
2046 }
2047 
2048 /*
2049  *	vm_page_dequeue_locked:
2050  *
2051  *	Remove the given page from its current page queue.
2052  *
2053  *	The page and page queue must be locked.
2054  */
2055 void
2056 vm_page_dequeue_locked(vm_page_t m)
2057 {
2058 	struct vm_pagequeue *pq;
2059 
2060 	vm_page_lock_assert(m, MA_OWNED);
2061 	pq = vm_page_pagequeue(m);
2062 	vm_pagequeue_assert_locked(pq);
2063 	m->queue = PQ_NONE;
2064 	TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
2065 	vm_pagequeue_cnt_dec(pq);
2066 }
2067 
2068 /*
2069  *	vm_page_enqueue:
2070  *
2071  *	Add the given page to the specified page queue.
2072  *
2073  *	The page must be locked.
2074  */
2075 static void
2076 vm_page_enqueue(int queue, vm_page_t m)
2077 {
2078 	struct vm_pagequeue *pq;
2079 
2080 	vm_page_lock_assert(m, MA_OWNED);
2081 	pq = &vm_phys_domain(m)->vmd_pagequeues[queue];
2082 	vm_pagequeue_lock(pq);
2083 	m->queue = queue;
2084 	TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
2085 	vm_pagequeue_cnt_inc(pq);
2086 	vm_pagequeue_unlock(pq);
2087 }
2088 
2089 /*
2090  *	vm_page_requeue:
2091  *
2092  *	Move the given page to the tail of its current page queue.
2093  *
2094  *	The page must be locked.
2095  */
2096 void
2097 vm_page_requeue(vm_page_t m)
2098 {
2099 	struct vm_pagequeue *pq;
2100 
2101 	vm_page_lock_assert(m, MA_OWNED);
2102 	KASSERT(m->queue != PQ_NONE,
2103 	    ("vm_page_requeue: page %p is not queued", m));
2104 	pq = vm_page_pagequeue(m);
2105 	vm_pagequeue_lock(pq);
2106 	TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
2107 	TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
2108 	vm_pagequeue_unlock(pq);
2109 }
2110 
2111 /*
2112  *	vm_page_requeue_locked:
2113  *
2114  *	Move the given page to the tail of its current page queue.
2115  *
2116  *	The page queue must be locked.
2117  */
2118 void
2119 vm_page_requeue_locked(vm_page_t m)
2120 {
2121 	struct vm_pagequeue *pq;
2122 
2123 	KASSERT(m->queue != PQ_NONE,
2124 	    ("vm_page_requeue_locked: page %p is not queued", m));
2125 	pq = vm_page_pagequeue(m);
2126 	vm_pagequeue_assert_locked(pq);
2127 	TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
2128 	TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
2129 }
2130 
2131 /*
2132  *	vm_page_activate:
2133  *
2134  *	Put the specified page on the active list (if appropriate).
2135  *	Ensure that act_count is at least ACT_INIT but do not otherwise
2136  *	mess with it.
2137  *
2138  *	The page must be locked.
2139  */
2140 void
2141 vm_page_activate(vm_page_t m)
2142 {
2143 	int queue;
2144 
2145 	vm_page_lock_assert(m, MA_OWNED);
2146 	if ((queue = m->queue) != PQ_ACTIVE) {
2147 		if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
2148 			if (m->act_count < ACT_INIT)
2149 				m->act_count = ACT_INIT;
2150 			if (queue != PQ_NONE)
2151 				vm_page_dequeue(m);
2152 			vm_page_enqueue(PQ_ACTIVE, m);
2153 		} else
2154 			KASSERT(queue == PQ_NONE,
2155 			    ("vm_page_activate: wired page %p is queued", m));
2156 	} else {
2157 		if (m->act_count < ACT_INIT)
2158 			m->act_count = ACT_INIT;
2159 	}
2160 }
2161 
2162 /*
2163  *	vm_page_free_wakeup:
2164  *
2165  *	Helper routine for vm_page_free_toq() and vm_page_cache().  This
2166  *	routine is called when a page has been added to the cache or free
2167  *	queues.
2168  *
2169  *	The page queues must be locked.
2170  */
2171 static inline void
2172 vm_page_free_wakeup(void)
2173 {
2174 
2175 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
2176 	/*
2177 	 * if pageout daemon needs pages, then tell it that there are
2178 	 * some free.
2179 	 */
2180 	if (vm_pageout_pages_needed &&
2181 	    cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
2182 		wakeup(&vm_pageout_pages_needed);
2183 		vm_pageout_pages_needed = 0;
2184 	}
2185 	/*
2186 	 * wakeup processes that are waiting on memory if we hit a
2187 	 * high water mark. And wakeup scheduler process if we have
2188 	 * lots of memory. this process will swapin processes.
2189 	 */
2190 	if (vm_pages_needed && !vm_page_count_min()) {
2191 		vm_pages_needed = 0;
2192 		wakeup(&cnt.v_free_count);
2193 	}
2194 }
2195 
2196 /*
2197  *	Turn a cached page into a free page, by changing its attributes.
2198  *	Keep the statistics up-to-date.
2199  *
2200  *	The free page queue must be locked.
2201  */
2202 static void
2203 vm_page_cache_turn_free(vm_page_t m)
2204 {
2205 
2206 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
2207 
2208 	m->object = NULL;
2209 	m->valid = 0;
2210 	/* Clear PG_CACHED and set PG_FREE. */
2211 	m->flags ^= PG_CACHED | PG_FREE;
2212 	KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE,
2213 	    ("vm_page_cache_free: page %p has inconsistent flags", m));
2214 	cnt.v_cache_count--;
2215 	vm_phys_freecnt_adj(m, 1);
2216 }
2217 
2218 /*
2219  *	vm_page_free_toq:
2220  *
2221  *	Returns the given page to the free list,
2222  *	disassociating it with any VM object.
2223  *
2224  *	The object must be locked.  The page must be locked if it is managed.
2225  */
2226 void
2227 vm_page_free_toq(vm_page_t m)
2228 {
2229 
2230 	if ((m->oflags & VPO_UNMANAGED) == 0) {
2231 		vm_page_lock_assert(m, MA_OWNED);
2232 		KASSERT(!pmap_page_is_mapped(m),
2233 		    ("vm_page_free_toq: freeing mapped page %p", m));
2234 	} else
2235 		KASSERT(m->queue == PQ_NONE,
2236 		    ("vm_page_free_toq: unmanaged page %p is queued", m));
2237 	PCPU_INC(cnt.v_tfree);
2238 
2239 	if (VM_PAGE_IS_FREE(m))
2240 		panic("vm_page_free: freeing free page %p", m);
2241 	else if (vm_page_sbusied(m))
2242 		panic("vm_page_free: freeing busy page %p", m);
2243 
2244 	/*
2245 	 * Unqueue, then remove page.  Note that we cannot destroy
2246 	 * the page here because we do not want to call the pager's
2247 	 * callback routine until after we've put the page on the
2248 	 * appropriate free queue.
2249 	 */
2250 	vm_page_remque(m);
2251 	vm_page_remove(m);
2252 
2253 	/*
2254 	 * If fictitious remove object association and
2255 	 * return, otherwise delay object association removal.
2256 	 */
2257 	if ((m->flags & PG_FICTITIOUS) != 0) {
2258 		return;
2259 	}
2260 
2261 	m->valid = 0;
2262 	vm_page_undirty(m);
2263 
2264 	if (m->wire_count != 0)
2265 		panic("vm_page_free: freeing wired page %p", m);
2266 	if (m->hold_count != 0) {
2267 		m->flags &= ~PG_ZERO;
2268 		KASSERT((m->flags & PG_UNHOLDFREE) == 0,
2269 		    ("vm_page_free: freeing PG_UNHOLDFREE page %p", m));
2270 		m->flags |= PG_UNHOLDFREE;
2271 	} else {
2272 		/*
2273 		 * Restore the default memory attribute to the page.
2274 		 */
2275 		if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
2276 			pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
2277 
2278 		/*
2279 		 * Insert the page into the physical memory allocator's
2280 		 * cache/free page queues.
2281 		 */
2282 		mtx_lock(&vm_page_queue_free_mtx);
2283 		m->flags |= PG_FREE;
2284 		vm_phys_freecnt_adj(m, 1);
2285 #if VM_NRESERVLEVEL > 0
2286 		if (!vm_reserv_free_page(m))
2287 #else
2288 		if (TRUE)
2289 #endif
2290 			vm_phys_free_pages(m, 0);
2291 		if ((m->flags & PG_ZERO) != 0)
2292 			++vm_page_zero_count;
2293 		else
2294 			vm_page_zero_idle_wakeup();
2295 		vm_page_free_wakeup();
2296 		mtx_unlock(&vm_page_queue_free_mtx);
2297 	}
2298 }
2299 
2300 /*
2301  *	vm_page_wire:
2302  *
2303  *	Mark this page as wired down by yet
2304  *	another map, removing it from paging queues
2305  *	as necessary.
2306  *
2307  *	If the page is fictitious, then its wire count must remain one.
2308  *
2309  *	The page must be locked.
2310  */
2311 void
2312 vm_page_wire(vm_page_t m)
2313 {
2314 
2315 	/*
2316 	 * Only bump the wire statistics if the page is not already wired,
2317 	 * and only unqueue the page if it is on some queue (if it is unmanaged
2318 	 * it is already off the queues).
2319 	 */
2320 	vm_page_lock_assert(m, MA_OWNED);
2321 	if ((m->flags & PG_FICTITIOUS) != 0) {
2322 		KASSERT(m->wire_count == 1,
2323 		    ("vm_page_wire: fictitious page %p's wire count isn't one",
2324 		    m));
2325 		return;
2326 	}
2327 	if (m->wire_count == 0) {
2328 		KASSERT((m->oflags & VPO_UNMANAGED) == 0 ||
2329 		    m->queue == PQ_NONE,
2330 		    ("vm_page_wire: unmanaged page %p is queued", m));
2331 		vm_page_remque(m);
2332 		atomic_add_int(&cnt.v_wire_count, 1);
2333 	}
2334 	m->wire_count++;
2335 	KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
2336 }
2337 
2338 /*
2339  * vm_page_unwire:
2340  *
2341  * Release one wiring of the specified page, potentially enabling it to be
2342  * paged again.  If paging is enabled, then the value of the parameter
2343  * "activate" determines to which queue the page is added.  If "activate" is
2344  * non-zero, then the page is added to the active queue.  Otherwise, it is
2345  * added to the inactive queue.
2346  *
2347  * However, unless the page belongs to an object, it is not enqueued because
2348  * it cannot be paged out.
2349  *
2350  * If a page is fictitious, then its wire count must always be one.
2351  *
2352  * A managed page must be locked.
2353  */
2354 void
2355 vm_page_unwire(vm_page_t m, int activate)
2356 {
2357 
2358 	if ((m->oflags & VPO_UNMANAGED) == 0)
2359 		vm_page_lock_assert(m, MA_OWNED);
2360 	if ((m->flags & PG_FICTITIOUS) != 0) {
2361 		KASSERT(m->wire_count == 1,
2362 	    ("vm_page_unwire: fictitious page %p's wire count isn't one", m));
2363 		return;
2364 	}
2365 	if (m->wire_count > 0) {
2366 		m->wire_count--;
2367 		if (m->wire_count == 0) {
2368 			atomic_subtract_int(&cnt.v_wire_count, 1);
2369 			if ((m->oflags & VPO_UNMANAGED) != 0 ||
2370 			    m->object == NULL)
2371 				return;
2372 			if (!activate)
2373 				m->flags &= ~PG_WINATCFLS;
2374 			vm_page_enqueue(activate ? PQ_ACTIVE : PQ_INACTIVE, m);
2375 		}
2376 	} else
2377 		panic("vm_page_unwire: page %p's wire count is zero", m);
2378 }
2379 
2380 /*
2381  * Move the specified page to the inactive queue.
2382  *
2383  * Many pages placed on the inactive queue should actually go
2384  * into the cache, but it is difficult to figure out which.  What
2385  * we do instead, if the inactive target is well met, is to put
2386  * clean pages at the head of the inactive queue instead of the tail.
2387  * This will cause them to be moved to the cache more quickly and
2388  * if not actively re-referenced, reclaimed more quickly.  If we just
2389  * stick these pages at the end of the inactive queue, heavy filesystem
2390  * meta-data accesses can cause an unnecessary paging load on memory bound
2391  * processes.  This optimization causes one-time-use metadata to be
2392  * reused more quickly.
2393  *
2394  * Normally athead is 0 resulting in LRU operation.  athead is set
2395  * to 1 if we want this page to be 'as if it were placed in the cache',
2396  * except without unmapping it from the process address space.
2397  *
2398  * The page must be locked.
2399  */
2400 static inline void
2401 _vm_page_deactivate(vm_page_t m, int athead)
2402 {
2403 	struct vm_pagequeue *pq;
2404 	int queue;
2405 
2406 	vm_page_lock_assert(m, MA_OWNED);
2407 
2408 	/*
2409 	 * Ignore if already inactive.
2410 	 */
2411 	if ((queue = m->queue) == PQ_INACTIVE)
2412 		return;
2413 	if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
2414 		if (queue != PQ_NONE)
2415 			vm_page_dequeue(m);
2416 		m->flags &= ~PG_WINATCFLS;
2417 		pq = &vm_phys_domain(m)->vmd_pagequeues[PQ_INACTIVE];
2418 		vm_pagequeue_lock(pq);
2419 		m->queue = PQ_INACTIVE;
2420 		if (athead)
2421 			TAILQ_INSERT_HEAD(&pq->pq_pl, m, plinks.q);
2422 		else
2423 			TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
2424 		vm_pagequeue_cnt_inc(pq);
2425 		vm_pagequeue_unlock(pq);
2426 	}
2427 }
2428 
2429 /*
2430  * Move the specified page to the inactive queue.
2431  *
2432  * The page must be locked.
2433  */
2434 void
2435 vm_page_deactivate(vm_page_t m)
2436 {
2437 
2438 	_vm_page_deactivate(m, 0);
2439 }
2440 
2441 /*
2442  * vm_page_try_to_cache:
2443  *
2444  * Returns 0 on failure, 1 on success
2445  */
2446 int
2447 vm_page_try_to_cache(vm_page_t m)
2448 {
2449 
2450 	vm_page_lock_assert(m, MA_OWNED);
2451 	VM_OBJECT_ASSERT_WLOCKED(m->object);
2452 	if (m->dirty || m->hold_count || m->wire_count ||
2453 	    (m->oflags & VPO_UNMANAGED) != 0 || vm_page_busied(m))
2454 		return (0);
2455 	pmap_remove_all(m);
2456 	if (m->dirty)
2457 		return (0);
2458 	vm_page_cache(m);
2459 	return (1);
2460 }
2461 
2462 /*
2463  * vm_page_try_to_free()
2464  *
2465  *	Attempt to free the page.  If we cannot free it, we do nothing.
2466  *	1 is returned on success, 0 on failure.
2467  */
2468 int
2469 vm_page_try_to_free(vm_page_t m)
2470 {
2471 
2472 	vm_page_lock_assert(m, MA_OWNED);
2473 	if (m->object != NULL)
2474 		VM_OBJECT_ASSERT_WLOCKED(m->object);
2475 	if (m->dirty || m->hold_count || m->wire_count ||
2476 	    (m->oflags & VPO_UNMANAGED) != 0 || vm_page_busied(m))
2477 		return (0);
2478 	pmap_remove_all(m);
2479 	if (m->dirty)
2480 		return (0);
2481 	vm_page_free(m);
2482 	return (1);
2483 }
2484 
2485 /*
2486  * vm_page_cache
2487  *
2488  * Put the specified page onto the page cache queue (if appropriate).
2489  *
2490  * The object and page must be locked.
2491  */
2492 void
2493 vm_page_cache(vm_page_t m)
2494 {
2495 	vm_object_t object;
2496 	boolean_t cache_was_empty;
2497 
2498 	vm_page_lock_assert(m, MA_OWNED);
2499 	object = m->object;
2500 	VM_OBJECT_ASSERT_WLOCKED(object);
2501 	if (vm_page_busied(m) || (m->oflags & VPO_UNMANAGED) ||
2502 	    m->hold_count || m->wire_count)
2503 		panic("vm_page_cache: attempting to cache busy page");
2504 	KASSERT(!pmap_page_is_mapped(m),
2505 	    ("vm_page_cache: page %p is mapped", m));
2506 	KASSERT(m->dirty == 0, ("vm_page_cache: page %p is dirty", m));
2507 	if (m->valid == 0 || object->type == OBJT_DEFAULT ||
2508 	    (object->type == OBJT_SWAP &&
2509 	    !vm_pager_has_page(object, m->pindex, NULL, NULL))) {
2510 		/*
2511 		 * Hypothesis: A cache-elgible page belonging to a
2512 		 * default object or swap object but without a backing
2513 		 * store must be zero filled.
2514 		 */
2515 		vm_page_free(m);
2516 		return;
2517 	}
2518 	KASSERT((m->flags & PG_CACHED) == 0,
2519 	    ("vm_page_cache: page %p is already cached", m));
2520 
2521 	/*
2522 	 * Remove the page from the paging queues.
2523 	 */
2524 	vm_page_remque(m);
2525 
2526 	/*
2527 	 * Remove the page from the object's collection of resident
2528 	 * pages.
2529 	 */
2530 	vm_radix_remove(&object->rtree, m->pindex);
2531 	TAILQ_REMOVE(&object->memq, m, listq);
2532 	object->resident_page_count--;
2533 
2534 	/*
2535 	 * Restore the default memory attribute to the page.
2536 	 */
2537 	if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
2538 		pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
2539 
2540 	/*
2541 	 * Insert the page into the object's collection of cached pages
2542 	 * and the physical memory allocator's cache/free page queues.
2543 	 */
2544 	m->flags &= ~PG_ZERO;
2545 	mtx_lock(&vm_page_queue_free_mtx);
2546 	cache_was_empty = vm_radix_is_empty(&object->cache);
2547 	if (vm_radix_insert(&object->cache, m)) {
2548 		mtx_unlock(&vm_page_queue_free_mtx);
2549 		if (object->resident_page_count == 0)
2550 			vdrop(object->handle);
2551 		m->object = NULL;
2552 		vm_page_free(m);
2553 		return;
2554 	}
2555 	m->flags |= PG_CACHED;
2556 	cnt.v_cache_count++;
2557 	PCPU_INC(cnt.v_tcached);
2558 #if VM_NRESERVLEVEL > 0
2559 	if (!vm_reserv_free_page(m)) {
2560 #else
2561 	if (TRUE) {
2562 #endif
2563 		vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0);
2564 		vm_phys_free_pages(m, 0);
2565 	}
2566 	vm_page_free_wakeup();
2567 	mtx_unlock(&vm_page_queue_free_mtx);
2568 
2569 	/*
2570 	 * Increment the vnode's hold count if this is the object's only
2571 	 * cached page.  Decrement the vnode's hold count if this was
2572 	 * the object's only resident page.
2573 	 */
2574 	if (object->type == OBJT_VNODE) {
2575 		if (cache_was_empty && object->resident_page_count != 0)
2576 			vhold(object->handle);
2577 		else if (!cache_was_empty && object->resident_page_count == 0)
2578 			vdrop(object->handle);
2579 	}
2580 }
2581 
2582 /*
2583  * vm_page_advise
2584  *
2585  *	Cache, deactivate, or do nothing as appropriate.  This routine
2586  *	is used by madvise().
2587  *
2588  *	Generally speaking we want to move the page into the cache so
2589  *	it gets reused quickly.  However, this can result in a silly syndrome
2590  *	due to the page recycling too quickly.  Small objects will not be
2591  *	fully cached.  On the other hand, if we move the page to the inactive
2592  *	queue we wind up with a problem whereby very large objects
2593  *	unnecessarily blow away our inactive and cache queues.
2594  *
2595  *	The solution is to move the pages based on a fixed weighting.  We
2596  *	either leave them alone, deactivate them, or move them to the cache,
2597  *	where moving them to the cache has the highest weighting.
2598  *	By forcing some pages into other queues we eventually force the
2599  *	system to balance the queues, potentially recovering other unrelated
2600  *	space from active.  The idea is to not force this to happen too
2601  *	often.
2602  *
2603  *	The object and page must be locked.
2604  */
2605 void
2606 vm_page_advise(vm_page_t m, int advice)
2607 {
2608 	int dnw, head;
2609 
2610 	vm_page_assert_locked(m);
2611 	VM_OBJECT_ASSERT_WLOCKED(m->object);
2612 	if (advice == MADV_FREE) {
2613 		/*
2614 		 * Mark the page clean.  This will allow the page to be freed
2615 		 * up by the system.  However, such pages are often reused
2616 		 * quickly by malloc() so we do not do anything that would
2617 		 * cause a page fault if we can help it.
2618 		 *
2619 		 * Specifically, we do not try to actually free the page now
2620 		 * nor do we try to put it in the cache (which would cause a
2621 		 * page fault on reuse).
2622 		 *
2623 		 * But we do make the page is freeable as we can without
2624 		 * actually taking the step of unmapping it.
2625 		 */
2626 		pmap_clear_modify(m);
2627 		m->dirty = 0;
2628 		m->act_count = 0;
2629 	} else if (advice != MADV_DONTNEED)
2630 		return;
2631 	dnw = PCPU_GET(dnweight);
2632 	PCPU_INC(dnweight);
2633 
2634 	/*
2635 	 * Occasionally leave the page alone.
2636 	 */
2637 	if ((dnw & 0x01F0) == 0 || m->queue == PQ_INACTIVE) {
2638 		if (m->act_count >= ACT_INIT)
2639 			--m->act_count;
2640 		return;
2641 	}
2642 
2643 	/*
2644 	 * Clear any references to the page.  Otherwise, the page daemon will
2645 	 * immediately reactivate the page.
2646 	 *
2647 	 * Perform the pmap_clear_reference() first.  Otherwise, a concurrent
2648 	 * pmap operation, such as pmap_remove(), could clear a reference in
2649 	 * the pmap and set PGA_REFERENCED on the page before the
2650 	 * pmap_clear_reference() had completed.  Consequently, the page would
2651 	 * appear referenced based upon an old reference that occurred before
2652 	 * this function ran.
2653 	 */
2654 	pmap_clear_reference(m);
2655 	vm_page_aflag_clear(m, PGA_REFERENCED);
2656 
2657 	if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m))
2658 		vm_page_dirty(m);
2659 
2660 	if (m->dirty || (dnw & 0x0070) == 0) {
2661 		/*
2662 		 * Deactivate the page 3 times out of 32.
2663 		 */
2664 		head = 0;
2665 	} else {
2666 		/*
2667 		 * Cache the page 28 times out of every 32.  Note that
2668 		 * the page is deactivated instead of cached, but placed
2669 		 * at the head of the queue instead of the tail.
2670 		 */
2671 		head = 1;
2672 	}
2673 	_vm_page_deactivate(m, head);
2674 }
2675 
2676 /*
2677  * Grab a page, waiting until we are waken up due to the page
2678  * changing state.  We keep on waiting, if the page continues
2679  * to be in the object.  If the page doesn't exist, first allocate it
2680  * and then conditionally zero it.
2681  *
2682  * The caller must always specify the VM_ALLOC_RETRY flag.  This is intended
2683  * to facilitate its eventual removal.
2684  *
2685  * This routine may sleep.
2686  *
2687  * The object must be locked on entry.  The lock will, however, be released
2688  * and reacquired if the routine sleeps.
2689  */
2690 vm_page_t
2691 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
2692 {
2693 	vm_page_t m;
2694 	int sleep;
2695 
2696 	VM_OBJECT_ASSERT_WLOCKED(object);
2697 	KASSERT((allocflags & VM_ALLOC_RETRY) != 0,
2698 	    ("vm_page_grab: VM_ALLOC_RETRY is required"));
2699 	KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
2700 	    (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
2701 	    ("vm_page_grab: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
2702 retrylookup:
2703 	if ((m = vm_page_lookup(object, pindex)) != NULL) {
2704 		sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ?
2705 		    vm_page_xbusied(m) : vm_page_busied(m);
2706 		if (sleep) {
2707 			/*
2708 			 * Reference the page before unlocking and
2709 			 * sleeping so that the page daemon is less
2710 			 * likely to reclaim it.
2711 			 */
2712 			vm_page_aflag_set(m, PGA_REFERENCED);
2713 			vm_page_lock(m);
2714 			VM_OBJECT_WUNLOCK(object);
2715 			vm_page_busy_sleep(m, "pgrbwt");
2716 			VM_OBJECT_WLOCK(object);
2717 			goto retrylookup;
2718 		} else {
2719 			if ((allocflags & VM_ALLOC_WIRED) != 0) {
2720 				vm_page_lock(m);
2721 				vm_page_wire(m);
2722 				vm_page_unlock(m);
2723 			}
2724 			if ((allocflags &
2725 			    (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
2726 				vm_page_xbusy(m);
2727 			if ((allocflags & VM_ALLOC_SBUSY) != 0)
2728 				vm_page_sbusy(m);
2729 			return (m);
2730 		}
2731 	}
2732 	m = vm_page_alloc(object, pindex, allocflags & ~(VM_ALLOC_RETRY |
2733 	    VM_ALLOC_IGN_SBUSY));
2734 	if (m == NULL) {
2735 		VM_OBJECT_WUNLOCK(object);
2736 		VM_WAIT;
2737 		VM_OBJECT_WLOCK(object);
2738 		goto retrylookup;
2739 	} else if (m->valid != 0)
2740 		return (m);
2741 	if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
2742 		pmap_zero_page(m);
2743 	return (m);
2744 }
2745 
2746 /*
2747  * Mapping function for valid or dirty bits in a page.
2748  *
2749  * Inputs are required to range within a page.
2750  */
2751 vm_page_bits_t
2752 vm_page_bits(int base, int size)
2753 {
2754 	int first_bit;
2755 	int last_bit;
2756 
2757 	KASSERT(
2758 	    base + size <= PAGE_SIZE,
2759 	    ("vm_page_bits: illegal base/size %d/%d", base, size)
2760 	);
2761 
2762 	if (size == 0)		/* handle degenerate case */
2763 		return (0);
2764 
2765 	first_bit = base >> DEV_BSHIFT;
2766 	last_bit = (base + size - 1) >> DEV_BSHIFT;
2767 
2768 	return (((vm_page_bits_t)2 << last_bit) -
2769 	    ((vm_page_bits_t)1 << first_bit));
2770 }
2771 
2772 /*
2773  *	vm_page_set_valid_range:
2774  *
2775  *	Sets portions of a page valid.  The arguments are expected
2776  *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
2777  *	of any partial chunks touched by the range.  The invalid portion of
2778  *	such chunks will be zeroed.
2779  *
2780  *	(base + size) must be less then or equal to PAGE_SIZE.
2781  */
2782 void
2783 vm_page_set_valid_range(vm_page_t m, int base, int size)
2784 {
2785 	int endoff, frag;
2786 
2787 	VM_OBJECT_ASSERT_WLOCKED(m->object);
2788 	if (size == 0)	/* handle degenerate case */
2789 		return;
2790 
2791 	/*
2792 	 * If the base is not DEV_BSIZE aligned and the valid
2793 	 * bit is clear, we have to zero out a portion of the
2794 	 * first block.
2795 	 */
2796 	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2797 	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
2798 		pmap_zero_page_area(m, frag, base - frag);
2799 
2800 	/*
2801 	 * If the ending offset is not DEV_BSIZE aligned and the
2802 	 * valid bit is clear, we have to zero out a portion of
2803 	 * the last block.
2804 	 */
2805 	endoff = base + size;
2806 	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
2807 	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
2808 		pmap_zero_page_area(m, endoff,
2809 		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
2810 
2811 	/*
2812 	 * Assert that no previously invalid block that is now being validated
2813 	 * is already dirty.
2814 	 */
2815 	KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0,
2816 	    ("vm_page_set_valid_range: page %p is dirty", m));
2817 
2818 	/*
2819 	 * Set valid bits inclusive of any overlap.
2820 	 */
2821 	m->valid |= vm_page_bits(base, size);
2822 }
2823 
2824 /*
2825  * Clear the given bits from the specified page's dirty field.
2826  */
2827 static __inline void
2828 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
2829 {
2830 	uintptr_t addr;
2831 #if PAGE_SIZE < 16384
2832 	int shift;
2833 #endif
2834 
2835 	/*
2836 	 * If the object is locked and the page is neither exclusive busy nor
2837 	 * write mapped, then the page's dirty field cannot possibly be
2838 	 * set by a concurrent pmap operation.
2839 	 */
2840 	VM_OBJECT_ASSERT_WLOCKED(m->object);
2841 	if (!vm_page_xbusied(m) && !pmap_page_is_write_mapped(m))
2842 		m->dirty &= ~pagebits;
2843 	else {
2844 		/*
2845 		 * The pmap layer can call vm_page_dirty() without
2846 		 * holding a distinguished lock.  The combination of
2847 		 * the object's lock and an atomic operation suffice
2848 		 * to guarantee consistency of the page dirty field.
2849 		 *
2850 		 * For PAGE_SIZE == 32768 case, compiler already
2851 		 * properly aligns the dirty field, so no forcible
2852 		 * alignment is needed. Only require existence of
2853 		 * atomic_clear_64 when page size is 32768.
2854 		 */
2855 		addr = (uintptr_t)&m->dirty;
2856 #if PAGE_SIZE == 32768
2857 		atomic_clear_64((uint64_t *)addr, pagebits);
2858 #elif PAGE_SIZE == 16384
2859 		atomic_clear_32((uint32_t *)addr, pagebits);
2860 #else		/* PAGE_SIZE <= 8192 */
2861 		/*
2862 		 * Use a trick to perform a 32-bit atomic on the
2863 		 * containing aligned word, to not depend on the existence
2864 		 * of atomic_clear_{8, 16}.
2865 		 */
2866 		shift = addr & (sizeof(uint32_t) - 1);
2867 #if BYTE_ORDER == BIG_ENDIAN
2868 		shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY;
2869 #else
2870 		shift *= NBBY;
2871 #endif
2872 		addr &= ~(sizeof(uint32_t) - 1);
2873 		atomic_clear_32((uint32_t *)addr, pagebits << shift);
2874 #endif		/* PAGE_SIZE */
2875 	}
2876 }
2877 
2878 /*
2879  *	vm_page_set_validclean:
2880  *
2881  *	Sets portions of a page valid and clean.  The arguments are expected
2882  *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
2883  *	of any partial chunks touched by the range.  The invalid portion of
2884  *	such chunks will be zero'd.
2885  *
2886  *	(base + size) must be less then or equal to PAGE_SIZE.
2887  */
2888 void
2889 vm_page_set_validclean(vm_page_t m, int base, int size)
2890 {
2891 	vm_page_bits_t oldvalid, pagebits;
2892 	int endoff, frag;
2893 
2894 	VM_OBJECT_ASSERT_WLOCKED(m->object);
2895 	if (size == 0)	/* handle degenerate case */
2896 		return;
2897 
2898 	/*
2899 	 * If the base is not DEV_BSIZE aligned and the valid
2900 	 * bit is clear, we have to zero out a portion of the
2901 	 * first block.
2902 	 */
2903 	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2904 	    (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
2905 		pmap_zero_page_area(m, frag, base - frag);
2906 
2907 	/*
2908 	 * If the ending offset is not DEV_BSIZE aligned and the
2909 	 * valid bit is clear, we have to zero out a portion of
2910 	 * the last block.
2911 	 */
2912 	endoff = base + size;
2913 	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
2914 	    (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0)
2915 		pmap_zero_page_area(m, endoff,
2916 		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
2917 
2918 	/*
2919 	 * Set valid, clear dirty bits.  If validating the entire
2920 	 * page we can safely clear the pmap modify bit.  We also
2921 	 * use this opportunity to clear the VPO_NOSYNC flag.  If a process
2922 	 * takes a write fault on a MAP_NOSYNC memory area the flag will
2923 	 * be set again.
2924 	 *
2925 	 * We set valid bits inclusive of any overlap, but we can only
2926 	 * clear dirty bits for DEV_BSIZE chunks that are fully within
2927 	 * the range.
2928 	 */
2929 	oldvalid = m->valid;
2930 	pagebits = vm_page_bits(base, size);
2931 	m->valid |= pagebits;
2932 #if 0	/* NOT YET */
2933 	if ((frag = base & (DEV_BSIZE - 1)) != 0) {
2934 		frag = DEV_BSIZE - frag;
2935 		base += frag;
2936 		size -= frag;
2937 		if (size < 0)
2938 			size = 0;
2939 	}
2940 	pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
2941 #endif
2942 	if (base == 0 && size == PAGE_SIZE) {
2943 		/*
2944 		 * The page can only be modified within the pmap if it is
2945 		 * mapped, and it can only be mapped if it was previously
2946 		 * fully valid.
2947 		 */
2948 		if (oldvalid == VM_PAGE_BITS_ALL)
2949 			/*
2950 			 * Perform the pmap_clear_modify() first.  Otherwise,
2951 			 * a concurrent pmap operation, such as
2952 			 * pmap_protect(), could clear a modification in the
2953 			 * pmap and set the dirty field on the page before
2954 			 * pmap_clear_modify() had begun and after the dirty
2955 			 * field was cleared here.
2956 			 */
2957 			pmap_clear_modify(m);
2958 		m->dirty = 0;
2959 		m->oflags &= ~VPO_NOSYNC;
2960 	} else if (oldvalid != VM_PAGE_BITS_ALL)
2961 		m->dirty &= ~pagebits;
2962 	else
2963 		vm_page_clear_dirty_mask(m, pagebits);
2964 }
2965 
2966 void
2967 vm_page_clear_dirty(vm_page_t m, int base, int size)
2968 {
2969 
2970 	vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
2971 }
2972 
2973 /*
2974  *	vm_page_set_invalid:
2975  *
2976  *	Invalidates DEV_BSIZE'd chunks within a page.  Both the
2977  *	valid and dirty bits for the effected areas are cleared.
2978  */
2979 void
2980 vm_page_set_invalid(vm_page_t m, int base, int size)
2981 {
2982 	vm_page_bits_t bits;
2983 
2984 	VM_OBJECT_ASSERT_WLOCKED(m->object);
2985 	bits = vm_page_bits(base, size);
2986 	if (m->valid == VM_PAGE_BITS_ALL && bits != 0)
2987 		pmap_remove_all(m);
2988 	KASSERT(!pmap_page_is_mapped(m),
2989 	    ("vm_page_set_invalid: page %p is mapped", m));
2990 	m->valid &= ~bits;
2991 	m->dirty &= ~bits;
2992 }
2993 
2994 /*
2995  * vm_page_zero_invalid()
2996  *
2997  *	The kernel assumes that the invalid portions of a page contain
2998  *	garbage, but such pages can be mapped into memory by user code.
2999  *	When this occurs, we must zero out the non-valid portions of the
3000  *	page so user code sees what it expects.
3001  *
3002  *	Pages are most often semi-valid when the end of a file is mapped
3003  *	into memory and the file's size is not page aligned.
3004  */
3005 void
3006 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
3007 {
3008 	int b;
3009 	int i;
3010 
3011 	VM_OBJECT_ASSERT_WLOCKED(m->object);
3012 	/*
3013 	 * Scan the valid bits looking for invalid sections that
3014 	 * must be zerod.  Invalid sub-DEV_BSIZE'd areas ( where the
3015 	 * valid bit may be set ) have already been zerod by
3016 	 * vm_page_set_validclean().
3017 	 */
3018 	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
3019 		if (i == (PAGE_SIZE / DEV_BSIZE) ||
3020 		    (m->valid & ((vm_page_bits_t)1 << i))) {
3021 			if (i > b) {
3022 				pmap_zero_page_area(m,
3023 				    b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
3024 			}
3025 			b = i + 1;
3026 		}
3027 	}
3028 
3029 	/*
3030 	 * setvalid is TRUE when we can safely set the zero'd areas
3031 	 * as being valid.  We can do this if there are no cache consistancy
3032 	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
3033 	 */
3034 	if (setvalid)
3035 		m->valid = VM_PAGE_BITS_ALL;
3036 }
3037 
3038 /*
3039  *	vm_page_is_valid:
3040  *
3041  *	Is (partial) page valid?  Note that the case where size == 0
3042  *	will return FALSE in the degenerate case where the page is
3043  *	entirely invalid, and TRUE otherwise.
3044  */
3045 int
3046 vm_page_is_valid(vm_page_t m, int base, int size)
3047 {
3048 	vm_page_bits_t bits;
3049 
3050 	VM_OBJECT_ASSERT_LOCKED(m->object);
3051 	bits = vm_page_bits(base, size);
3052 	return (m->valid != 0 && (m->valid & bits) == bits);
3053 }
3054 
3055 /*
3056  * Set the page's dirty bits if the page is modified.
3057  */
3058 void
3059 vm_page_test_dirty(vm_page_t m)
3060 {
3061 
3062 	VM_OBJECT_ASSERT_WLOCKED(m->object);
3063 	if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
3064 		vm_page_dirty(m);
3065 }
3066 
3067 void
3068 vm_page_lock_KBI(vm_page_t m, const char *file, int line)
3069 {
3070 
3071 	mtx_lock_flags_(vm_page_lockptr(m), 0, file, line);
3072 }
3073 
3074 void
3075 vm_page_unlock_KBI(vm_page_t m, const char *file, int line)
3076 {
3077 
3078 	mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line);
3079 }
3080 
3081 int
3082 vm_page_trylock_KBI(vm_page_t m, const char *file, int line)
3083 {
3084 
3085 	return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line));
3086 }
3087 
3088 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
3089 void
3090 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line)
3091 {
3092 
3093 	vm_page_lock_assert_KBI(m, MA_OWNED, file, line);
3094 }
3095 
3096 void
3097 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line)
3098 {
3099 
3100 	mtx_assert_(vm_page_lockptr(m), a, file, line);
3101 }
3102 #endif
3103 
3104 int so_zerocp_fullpage = 0;
3105 
3106 /*
3107  *	Replace the given page with a copy.  The copied page assumes
3108  *	the portion of the given page's "wire_count" that is not the
3109  *	responsibility of this copy-on-write mechanism.
3110  *
3111  *	The object containing the given page must have a non-zero
3112  *	paging-in-progress count and be locked.
3113  */
3114 void
3115 vm_page_cowfault(vm_page_t m)
3116 {
3117 	vm_page_t mnew;
3118 	vm_object_t object;
3119 	vm_pindex_t pindex;
3120 
3121 	vm_page_lock_assert(m, MA_OWNED);
3122 	object = m->object;
3123 	VM_OBJECT_ASSERT_WLOCKED(object);
3124 	KASSERT(object->paging_in_progress != 0,
3125 	    ("vm_page_cowfault: object %p's paging-in-progress count is zero.",
3126 	    object));
3127 	pindex = m->pindex;
3128 
3129  retry_alloc:
3130 	mnew = vm_page_alloc(NULL, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
3131 	if (mnew == NULL) {
3132 		vm_page_unlock(m);
3133 		VM_OBJECT_WUNLOCK(object);
3134 		VM_WAIT;
3135 		VM_OBJECT_WLOCK(object);
3136 		if (m == vm_page_lookup(object, pindex)) {
3137 			vm_page_lock(m);
3138 			goto retry_alloc;
3139 		} else {
3140 			/*
3141 			 * Page disappeared during the wait.
3142 			 */
3143 			return;
3144 		}
3145 	}
3146 
3147 	if (m->cow == 0) {
3148 		/*
3149 		 * check to see if we raced with an xmit complete when
3150 		 * waiting to allocate a page.  If so, put things back
3151 		 * the way they were
3152 		 */
3153 		vm_page_unlock(m);
3154 		vm_page_lock(mnew);
3155 		vm_page_free(mnew);
3156 		vm_page_unlock(mnew);
3157 	} else { /* clear COW & copy page */
3158 		pmap_remove_all(m);
3159 		mnew->object = object;
3160 		if (object->memattr != VM_MEMATTR_DEFAULT &&
3161 		    (object->flags & OBJ_FICTITIOUS) == 0)
3162 			pmap_page_set_memattr(mnew, object->memattr);
3163 		if (vm_page_replace(mnew, object, pindex) != m)
3164 			panic("vm_page_cowfault: invalid page replacement");
3165 		if (!so_zerocp_fullpage)
3166 			pmap_copy_page(m, mnew);
3167 		mnew->valid = VM_PAGE_BITS_ALL;
3168 		vm_page_dirty(mnew);
3169 		mnew->wire_count = m->wire_count - m->cow;
3170 		m->wire_count = m->cow;
3171 		vm_page_unlock(m);
3172 	}
3173 }
3174 
3175 void
3176 vm_page_cowclear(vm_page_t m)
3177 {
3178 
3179 	vm_page_lock_assert(m, MA_OWNED);
3180 	if (m->cow) {
3181 		m->cow--;
3182 		/*
3183 		 * let vm_fault add back write permission  lazily
3184 		 */
3185 	}
3186 	/*
3187 	 *  sf_buf_free() will free the page, so we needn't do it here
3188 	 */
3189 }
3190 
3191 int
3192 vm_page_cowsetup(vm_page_t m)
3193 {
3194 
3195 	vm_page_lock_assert(m, MA_OWNED);
3196 	if ((m->flags & PG_FICTITIOUS) != 0 ||
3197 	    (m->oflags & VPO_UNMANAGED) != 0 ||
3198 	    m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYWLOCK(m->object))
3199 		return (EBUSY);
3200 	m->cow++;
3201 	pmap_remove_write(m);
3202 	VM_OBJECT_WUNLOCK(m->object);
3203 	return (0);
3204 }
3205 
3206 #ifdef INVARIANTS
3207 void
3208 vm_page_object_lock_assert(vm_page_t m)
3209 {
3210 
3211 	/*
3212 	 * Certain of the page's fields may only be modified by the
3213 	 * holder of the containing object's lock or the exclusive busy.
3214 	 * holder.  Unfortunately, the holder of the write busy is
3215 	 * not recorded, and thus cannot be checked here.
3216 	 */
3217 	if (m->object != NULL && !vm_page_xbusied(m))
3218 		VM_OBJECT_ASSERT_WLOCKED(m->object);
3219 }
3220 #endif
3221 
3222 #include "opt_ddb.h"
3223 #ifdef DDB
3224 #include <sys/kernel.h>
3225 
3226 #include <ddb/ddb.h>
3227 
3228 DB_SHOW_COMMAND(page, vm_page_print_page_info)
3229 {
3230 	db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
3231 	db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
3232 	db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
3233 	db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
3234 	db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
3235 	db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
3236 	db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
3237 	db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
3238 	db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
3239 	db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
3240 }
3241 
3242 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
3243 {
3244 	int dom;
3245 
3246 	db_printf("pq_free %d pq_cache %d\n",
3247 	    cnt.v_free_count, cnt.v_cache_count);
3248 	for (dom = 0; dom < vm_ndomains; dom++) {
3249 		db_printf(
3250 	"dom %d page_cnt %d free %d pq_act %d pq_inact %d pass %d\n",
3251 		    dom,
3252 		    vm_dom[dom].vmd_page_count,
3253 		    vm_dom[dom].vmd_free_count,
3254 		    vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt,
3255 		    vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt,
3256 		    vm_dom[dom].vmd_pass);
3257 	}
3258 }
3259 
3260 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo)
3261 {
3262 	vm_page_t m;
3263 	boolean_t phys;
3264 
3265 	if (!have_addr) {
3266 		db_printf("show pginfo addr\n");
3267 		return;
3268 	}
3269 
3270 	phys = strchr(modif, 'p') != NULL;
3271 	if (phys)
3272 		m = PHYS_TO_VM_PAGE(addr);
3273 	else
3274 		m = (vm_page_t)addr;
3275 	db_printf(
3276     "page %p obj %p pidx 0x%jx phys 0x%jx q %d hold %d wire %d\n"
3277     "  af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n",
3278 	    m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr,
3279 	    m->queue, m->hold_count, m->wire_count, m->aflags, m->oflags,
3280 	    m->flags, m->act_count, m->busy_lock, m->valid, m->dirty);
3281 }
3282 #endif /* DDB */
3283