xref: /freebsd/sys/vm/vm_page.c (revision 56abdbc5f709fc0e18624b3b7586647459922a41)
1 /*-
2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1998 Matthew Dillon.  All Rights Reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * The Mach Operating System project at Carnegie-Mellon University.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 /*-
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  */
62 
63 /*
64  *	Resident memory management module.
65  */
66 
67 #include <sys/cdefs.h>
68 #include "opt_vm.h"
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/counter.h>
73 #include <sys/domainset.h>
74 #include <sys/kernel.h>
75 #include <sys/limits.h>
76 #include <sys/linker.h>
77 #include <sys/lock.h>
78 #include <sys/malloc.h>
79 #include <sys/mman.h>
80 #include <sys/msgbuf.h>
81 #include <sys/mutex.h>
82 #include <sys/proc.h>
83 #include <sys/rwlock.h>
84 #include <sys/sleepqueue.h>
85 #include <sys/sbuf.h>
86 #include <sys/sched.h>
87 #include <sys/sf_buf.h>
88 #include <sys/smp.h>
89 #include <sys/sysctl.h>
90 #include <sys/vmmeter.h>
91 #include <sys/vnode.h>
92 
93 #include <vm/vm.h>
94 #include <vm/pmap.h>
95 #include <vm/vm_param.h>
96 #include <vm/vm_domainset.h>
97 #include <vm/vm_kern.h>
98 #include <vm/vm_map.h>
99 #include <vm/vm_object.h>
100 #include <vm/vm_page.h>
101 #include <vm/vm_pageout.h>
102 #include <vm/vm_phys.h>
103 #include <vm/vm_pagequeue.h>
104 #include <vm/vm_pager.h>
105 #include <vm/vm_radix.h>
106 #include <vm/vm_reserv.h>
107 #include <vm/vm_extern.h>
108 #include <vm/vm_dumpset.h>
109 #include <vm/uma.h>
110 #include <vm/uma_int.h>
111 
112 #include <machine/md_var.h>
113 #if defined(__aarch64__)
114 #include <machine/pmap.h>
115 #include <machine/rsi.h>
116 #endif
117 
118 struct vm_domain vm_dom[MAXMEMDOM];
119 
120 DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT]);
121 
122 struct mtx_padalign __exclusive_cache_line vm_domainset_lock;
123 /* The following fields are protected by the domainset lock. */
124 domainset_t __exclusive_cache_line vm_min_domains;
125 domainset_t __exclusive_cache_line vm_severe_domains;
126 static int vm_min_waiters;
127 static int vm_severe_waiters;
128 static int vm_pageproc_waiters;
129 
130 static SYSCTL_NODE(_vm_stats, OID_AUTO, page, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
131     "VM page statistics");
132 
133 static COUNTER_U64_DEFINE_EARLY(pqstate_commit_retries);
134 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, pqstate_commit_retries,
135     CTLFLAG_RD, &pqstate_commit_retries,
136     "Number of failed per-page atomic queue state updates");
137 
138 static COUNTER_U64_DEFINE_EARLY(queue_ops);
139 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_ops,
140     CTLFLAG_RD, &queue_ops,
141     "Number of batched queue operations");
142 
143 static COUNTER_U64_DEFINE_EARLY(queue_nops);
144 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_nops,
145     CTLFLAG_RD, &queue_nops,
146     "Number of batched queue operations with no effects");
147 
148 static unsigned long nofreeq_size;
149 SYSCTL_ULONG(_vm_stats_page, OID_AUTO, nofreeq_size, CTLFLAG_RD,
150     &nofreeq_size, 0,
151     "Size of the nofree queue");
152 
153 #ifdef INVARIANTS
154 bool vm_check_pg_zero = false;
155 SYSCTL_BOOL(_debug, OID_AUTO, vm_check_pg_zero, CTLFLAG_RWTUN,
156     &vm_check_pg_zero, 0,
157     "verify content of freed zero-filled pages");
158 #endif
159 
160 /*
161  * bogus page -- for I/O to/from partially complete buffers,
162  * or for paging into sparsely invalid regions.
163  */
164 vm_page_t bogus_page;
165 
166 vm_page_t vm_page_array;
167 long vm_page_array_size;
168 long first_page;
169 
170 struct bitset *vm_page_dump;
171 long vm_page_dump_pages;
172 
173 static TAILQ_HEAD(, vm_page) blacklist_head;
174 static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS);
175 SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD |
176     CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages");
177 
178 static uma_zone_t fakepg_zone;
179 
180 static void vm_page_alloc_check(vm_page_t m);
181 static vm_page_t vm_page_alloc_nofree_domain(int domain, int req);
182 static bool _vm_page_busy_sleep(vm_object_t obj, vm_page_t m,
183     vm_pindex_t pindex, const char *wmesg, int allocflags, bool locked);
184 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
185 static void vm_page_enqueue(vm_page_t m, uint8_t queue);
186 static bool vm_page_free_prep(vm_page_t m);
187 static void vm_page_free_toq(vm_page_t m);
188 static void vm_page_init(void *dummy);
189 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object);
190 static void vm_page_mvqueue(vm_page_t m, const uint8_t queue,
191     const uint16_t nflag);
192 static int vm_page_reclaim_run(int req_class, int domain, u_long npages,
193     vm_page_t m_run, vm_paddr_t high);
194 static void vm_page_release_toq(vm_page_t m, uint8_t nqueue, bool noreuse);
195 static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object,
196     int req);
197 static int vm_page_zone_import(void *arg, void **store, int cnt, int domain,
198     int flags);
199 static void vm_page_zone_release(void *arg, void **store, int cnt);
200 
201 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL);
202 
203 static void
vm_page_init(void * dummy)204 vm_page_init(void *dummy)
205 {
206 
207 	fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,
208 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
209 	bogus_page = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_NOFREE);
210 }
211 
212 static int pgcache_zone_max_pcpu;
213 SYSCTL_INT(_vm, OID_AUTO, pgcache_zone_max_pcpu,
214     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pgcache_zone_max_pcpu, 0,
215     "Per-CPU page cache size");
216 
217 /*
218  * The cache page zone is initialized later since we need to be able to allocate
219  * pages before UMA is fully initialized.
220  */
221 static void
vm_page_init_cache_zones(void * dummy __unused)222 vm_page_init_cache_zones(void *dummy __unused)
223 {
224 	struct vm_domain *vmd;
225 	struct vm_pgcache *pgcache;
226 	int cache, domain, maxcache, pool;
227 
228 	TUNABLE_INT_FETCH("vm.pgcache_zone_max_pcpu", &pgcache_zone_max_pcpu);
229 	maxcache = pgcache_zone_max_pcpu * mp_ncpus;
230 	for (domain = 0; domain < vm_ndomains; domain++) {
231 		vmd = VM_DOMAIN(domain);
232 		for (pool = 0; pool < VM_NFREEPOOL; pool++) {
233 #ifdef VM_FREEPOOL_LAZYINIT
234 			if (pool == VM_FREEPOOL_LAZYINIT)
235 				continue;
236 #endif
237 			pgcache = &vmd->vmd_pgcache[pool];
238 			pgcache->domain = domain;
239 			pgcache->pool = pool;
240 			pgcache->zone = uma_zcache_create("vm pgcache",
241 			    PAGE_SIZE, NULL, NULL, NULL, NULL,
242 			    vm_page_zone_import, vm_page_zone_release, pgcache,
243 			    UMA_ZONE_VM);
244 
245 			/*
246 			 * Limit each pool's zone to 0.1% of the pages in the
247 			 * domain.
248 			 */
249 			cache = maxcache != 0 ? maxcache :
250 			    vmd->vmd_page_count / 1000;
251 			uma_zone_set_maxcache(pgcache->zone, cache);
252 		}
253 	}
254 }
255 SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL);
256 
257 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
258 #if PAGE_SIZE == 32768
259 #ifdef CTASSERT
260 CTASSERT(sizeof(u_long) >= 8);
261 #endif
262 #endif
263 
264 /*
265  *	vm_set_page_size:
266  *
267  *	Sets the page size, perhaps based upon the memory
268  *	size.  Must be called before any use of page-size
269  *	dependent functions.
270  */
271 void
vm_set_page_size(void)272 vm_set_page_size(void)
273 {
274 	if (vm_cnt.v_page_size == 0)
275 		vm_cnt.v_page_size = PAGE_SIZE;
276 	if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0)
277 		panic("vm_set_page_size: page size not a power of two");
278 }
279 
280 /*
281  *	vm_page_blacklist_next:
282  *
283  *	Find the next entry in the provided string of blacklist
284  *	addresses.  Entries are separated by space, comma, or newline.
285  *	If an invalid integer is encountered then the rest of the
286  *	string is skipped.  Updates the list pointer to the next
287  *	character, or NULL if the string is exhausted or invalid.
288  */
289 static vm_paddr_t
vm_page_blacklist_next(char ** list,char * end)290 vm_page_blacklist_next(char **list, char *end)
291 {
292 	vm_paddr_t bad;
293 	char *cp, *pos;
294 
295 	if (list == NULL || *list == NULL)
296 		return (0);
297 	if (**list =='\0') {
298 		*list = NULL;
299 		return (0);
300 	}
301 
302 	/*
303 	 * If there's no end pointer then the buffer is coming from
304 	 * the kenv and we know it's null-terminated.
305 	 */
306 	if (end == NULL)
307 		end = *list + strlen(*list);
308 
309 	/* Ensure that strtoq() won't walk off the end */
310 	if (*end != '\0') {
311 		if (*end == '\n' || *end == ' ' || *end  == ',')
312 			*end = '\0';
313 		else {
314 			printf("Blacklist not terminated, skipping\n");
315 			*list = NULL;
316 			return (0);
317 		}
318 	}
319 
320 	for (pos = *list; *pos != '\0'; pos = cp) {
321 		bad = strtoq(pos, &cp, 0);
322 		if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') {
323 			if (bad == 0) {
324 				if (++cp < end)
325 					continue;
326 				else
327 					break;
328 			}
329 		} else
330 			break;
331 		if (*cp == '\0' || ++cp >= end)
332 			*list = NULL;
333 		else
334 			*list = cp;
335 		return (trunc_page(bad));
336 	}
337 	printf("Garbage in RAM blacklist, skipping\n");
338 	*list = NULL;
339 	return (0);
340 }
341 
342 bool
vm_page_blacklist_add(vm_paddr_t pa,bool verbose)343 vm_page_blacklist_add(vm_paddr_t pa, bool verbose)
344 {
345 	struct vm_domain *vmd;
346 	vm_page_t m;
347 	bool found;
348 
349 	m = vm_phys_paddr_to_vm_page(pa);
350 	if (m == NULL)
351 		return (true); /* page does not exist, no failure */
352 
353 	vmd = VM_DOMAIN(vm_phys_domain(pa));
354 	vm_domain_free_lock(vmd);
355 	found = vm_phys_unfree_page(pa);
356 	vm_domain_free_unlock(vmd);
357 	if (found) {
358 		vm_domain_freecnt_inc(vmd, -1);
359 		TAILQ_INSERT_TAIL(&blacklist_head, m, plinks.q);
360 		if (verbose)
361 			printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa);
362 	}
363 	return (found);
364 }
365 
366 /*
367  *	vm_page_blacklist_check:
368  *
369  *	Iterate through the provided string of blacklist addresses, pulling
370  *	each entry out of the physical allocator free list and putting it
371  *	onto a list for reporting via the vm.page_blacklist sysctl.
372  */
373 static void
vm_page_blacklist_check(char * list,char * end)374 vm_page_blacklist_check(char *list, char *end)
375 {
376 	vm_paddr_t pa;
377 	char *next;
378 
379 	next = list;
380 	while (next != NULL) {
381 		if ((pa = vm_page_blacklist_next(&next, end)) == 0)
382 			continue;
383 		vm_page_blacklist_add(pa, bootverbose);
384 	}
385 }
386 
387 /*
388  *	vm_page_blacklist_load:
389  *
390  *	Search for a special module named "ram_blacklist".  It'll be a
391  *	plain text file provided by the user via the loader directive
392  *	of the same name.
393  */
394 static void
vm_page_blacklist_load(char ** list,char ** end)395 vm_page_blacklist_load(char **list, char **end)
396 {
397 	void *mod;
398 	u_char *ptr;
399 	u_int len;
400 
401 	mod = NULL;
402 	ptr = NULL;
403 
404 	mod = preload_search_by_type("ram_blacklist");
405 	if (mod != NULL) {
406 		ptr = preload_fetch_addr(mod);
407 		len = preload_fetch_size(mod);
408         }
409 
410 	if (ptr != NULL && len > 0) {
411 		*list = ptr;
412 		*end = ptr + len - 1;
413 	} else {
414 		*list = NULL;
415 		*end = NULL;
416 	}
417 
418 	return;
419 }
420 
421 static int
sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS)422 sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS)
423 {
424 	vm_page_t m;
425 	struct sbuf sbuf;
426 	int error, first;
427 
428 	first = 1;
429 	error = sysctl_wire_old_buffer(req, 0);
430 	if (error != 0)
431 		return (error);
432 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
433 	TAILQ_FOREACH(m, &blacklist_head, plinks.q) {
434 		sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",",
435 		    (uintmax_t)m->phys_addr);
436 		first = 0;
437 	}
438 	error = sbuf_finish(&sbuf);
439 	sbuf_delete(&sbuf);
440 	return (error);
441 }
442 
443 /*
444  * Initialize a dummy page for use in scans of the specified paging queue.
445  * In principle, this function only needs to set the flag PG_MARKER.
446  * Nonetheless, it write busies the page as a safety precaution.
447  */
448 void
vm_page_init_marker(vm_page_t marker,int queue,uint16_t aflags)449 vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags)
450 {
451 
452 	bzero(marker, sizeof(*marker));
453 	marker->flags = PG_MARKER;
454 	marker->a.flags = aflags;
455 	marker->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
456 	marker->a.queue = queue;
457 }
458 
459 static void
vm_page_domain_init(int domain)460 vm_page_domain_init(int domain)
461 {
462 	struct vm_domain *vmd;
463 	struct vm_pagequeue *pq;
464 	int i;
465 
466 	vmd = VM_DOMAIN(domain);
467 	bzero(vmd, sizeof(*vmd));
468 	*__DECONST(const char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) =
469 	    "vm inactive pagequeue";
470 	*__DECONST(const char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) =
471 	    "vm active pagequeue";
472 	*__DECONST(const char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) =
473 	    "vm laundry pagequeue";
474 	*__DECONST(const char **,
475 	    &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) =
476 	    "vm unswappable pagequeue";
477 	vmd->vmd_domain = domain;
478 	vmd->vmd_page_count = 0;
479 	vmd->vmd_free_count = 0;
480 	vmd->vmd_segs = 0;
481 	vmd->vmd_oom = false;
482 	vmd->vmd_helper_threads_enabled = true;
483 	for (i = 0; i < PQ_COUNT; i++) {
484 		pq = &vmd->vmd_pagequeues[i];
485 		TAILQ_INIT(&pq->pq_pl);
486 		mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue",
487 		    MTX_DEF | MTX_DUPOK);
488 		pq->pq_pdpages = 0;
489 		vm_page_init_marker(&vmd->vmd_markers[i], i, 0);
490 	}
491 	mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF);
492 	mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF);
493 	snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain);
494 
495 	/*
496 	 * inacthead is used to provide FIFO ordering for LRU-bypassing
497 	 * insertions.
498 	 */
499 	vm_page_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE, PGA_ENQUEUED);
500 	TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl,
501 	    &vmd->vmd_inacthead, plinks.q);
502 
503 	/*
504 	 * The clock pages are used to implement active queue scanning without
505 	 * requeues.  Scans start at clock[0], which is advanced after the scan
506 	 * ends.  When the two clock hands meet, they are reset and scanning
507 	 * resumes from the head of the queue.
508 	 */
509 	vm_page_init_marker(&vmd->vmd_clock[0], PQ_ACTIVE, PGA_ENQUEUED);
510 	vm_page_init_marker(&vmd->vmd_clock[1], PQ_ACTIVE, PGA_ENQUEUED);
511 	TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl,
512 	    &vmd->vmd_clock[0], plinks.q);
513 	TAILQ_INSERT_TAIL(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl,
514 	    &vmd->vmd_clock[1], plinks.q);
515 }
516 
517 /*
518  * Initialize a physical page in preparation for adding it to the free
519  * lists.
520  */
521 void
vm_page_init_page(vm_page_t m,vm_paddr_t pa,int segind,int pool)522 vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind, int pool)
523 {
524 	m->object = NULL;
525 	m->ref_count = 0;
526 	m->busy_lock = VPB_FREED;
527 	m->flags = m->a.flags = 0;
528 	m->phys_addr = pa;
529 	m->a.queue = PQ_NONE;
530 	m->psind = 0;
531 	m->segind = segind;
532 	m->order = VM_NFREEORDER;
533 	m->pool = pool;
534 	m->valid = m->dirty = 0;
535 	pmap_page_init(m);
536 }
537 
538 #ifndef PMAP_HAS_PAGE_ARRAY
539 static vm_paddr_t
vm_page_array_alloc(vm_offset_t * vaddr,vm_paddr_t end,vm_paddr_t page_range)540 vm_page_array_alloc(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t page_range)
541 {
542 	vm_paddr_t new_end;
543 
544 	/*
545 	 * Reserve an unmapped guard page to trap access to vm_page_array[-1].
546 	 * However, because this page is allocated from KVM, out-of-bounds
547 	 * accesses using the direct map will not be trapped.
548 	 */
549 	*vaddr += PAGE_SIZE;
550 
551 	/*
552 	 * Allocate physical memory for the page structures, and map it.
553 	 */
554 	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
555 	vm_page_array = (vm_page_t)pmap_map(vaddr, new_end, end,
556 	    VM_PROT_READ | VM_PROT_WRITE);
557 	vm_page_array_size = page_range;
558 
559 	return (new_end);
560 }
561 #endif
562 
563 /*
564  *	vm_page_startup:
565  *
566  *	Initializes the resident memory module.  Allocates physical memory for
567  *	bootstrapping UMA and some data structures that are used to manage
568  *	physical pages.  Initializes these structures, and populates the free
569  *	page queues.
570  */
571 vm_offset_t
vm_page_startup(vm_offset_t vaddr)572 vm_page_startup(vm_offset_t vaddr)
573 {
574 	struct vm_phys_seg *seg;
575 	struct vm_domain *vmd;
576 	vm_page_t m;
577 	char *list, *listend;
578 	vm_paddr_t end, high_avail, low_avail, new_end, size;
579 	vm_paddr_t page_range __unused;
580 	vm_paddr_t last_pa, pa, startp, endp;
581 	u_long pagecount;
582 #if MINIDUMP_PAGE_TRACKING
583 	u_long vm_page_dump_size;
584 #endif
585 	int biggestone, i, segind;
586 #ifdef WITNESS
587 	void *mapped;
588 	int witness_size;
589 #endif
590 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
591 	long ii;
592 #endif
593 	int pool;
594 #ifdef VM_FREEPOOL_LAZYINIT
595 	int lazyinit;
596 #endif
597 
598 	vaddr = round_page(vaddr);
599 
600 	vm_phys_early_startup();
601 	biggestone = vm_phys_avail_largest();
602 	end = phys_avail[biggestone+1];
603 
604 	/*
605 	 * Initialize the page and queue locks.
606 	 */
607 	mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF);
608 	for (i = 0; i < vm_ndomains; i++)
609 		vm_page_domain_init(i);
610 
611 	new_end = end;
612 #ifdef WITNESS
613 	witness_size = round_page(witness_startup_count());
614 	new_end -= witness_size;
615 	mapped = pmap_map(&vaddr, new_end, new_end + witness_size,
616 	    VM_PROT_READ | VM_PROT_WRITE);
617 	bzero(mapped, witness_size);
618 	witness_startup(mapped);
619 #endif
620 
621 #if MINIDUMP_PAGE_TRACKING
622 	/*
623 	 * Allocate a bitmap to indicate that a random physical page
624 	 * needs to be included in a minidump.
625 	 *
626 	 * The amd64 port needs this to indicate which direct map pages
627 	 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
628 	 *
629 	 * However, i386 still needs this workspace internally within the
630 	 * minidump code.  In theory, they are not needed on i386, but are
631 	 * included should the sf_buf code decide to use them.
632 	 */
633 	last_pa = 0;
634 	vm_page_dump_pages = 0;
635 	for (i = 0; dump_avail[i + 1] != 0; i += 2) {
636 		vm_page_dump_pages += howmany(dump_avail[i + 1], PAGE_SIZE) -
637 		    dump_avail[i] / PAGE_SIZE;
638 		if (dump_avail[i + 1] > last_pa)
639 			last_pa = dump_avail[i + 1];
640 	}
641 	vm_page_dump_size = round_page(BITSET_SIZE(vm_page_dump_pages));
642 	new_end -= vm_page_dump_size;
643 	vm_page_dump = pmap_map(&vaddr, new_end,
644 	    new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
645 	bzero((void *)vm_page_dump, vm_page_dump_size);
646 #if MINIDUMP_STARTUP_PAGE_TRACKING
647 	/*
648 	 * Include the UMA bootstrap pages, witness pages and vm_page_dump
649 	 * in a crash dump.  When pmap_map() uses the direct map, they are
650 	 * not automatically included.
651 	 */
652 	for (pa = new_end; pa < end; pa += PAGE_SIZE)
653 		dump_add_page(pa);
654 #endif
655 #else
656 	(void)last_pa;
657 #endif
658 	phys_avail[biggestone + 1] = new_end;
659 #ifdef __amd64__
660 	/*
661 	 * Request that the physical pages underlying the message buffer be
662 	 * included in a crash dump.  Since the message buffer is accessed
663 	 * through the direct map, they are not automatically included.
664 	 */
665 	pa = DMAP_TO_PHYS(msgbufp->msg_ptr);
666 	last_pa = pa + round_page(msgbufsize);
667 	while (pa < last_pa) {
668 		dump_add_page(pa);
669 		pa += PAGE_SIZE;
670 	}
671 #else
672 	(void)pa;
673 #endif
674 
675 	/*
676 	 * Determine the lowest and highest physical addresses and, in the case
677 	 * of VM_PHYSSEG_SPARSE, the exact size of the available physical
678 	 * memory.  vm_phys_early_startup() already checked that phys_avail[]
679 	 * has at least one element.
680 	 */
681 #ifdef VM_PHYSSEG_SPARSE
682 	size = phys_avail[1] - phys_avail[0];
683 #endif
684 	low_avail = phys_avail[0];
685 	high_avail = phys_avail[1];
686 	for (i = 2; phys_avail[i + 1] != 0; i += 2) {
687 #ifdef VM_PHYSSEG_SPARSE
688 		size += phys_avail[i + 1] - phys_avail[i];
689 #endif
690 		if (phys_avail[i] < low_avail)
691 			low_avail = phys_avail[i];
692 		if (phys_avail[i + 1] > high_avail)
693 			high_avail = phys_avail[i + 1];
694 	}
695 	for (i = 0; i < vm_phys_nsegs; i++) {
696 #ifdef VM_PHYSSEG_SPARSE
697 		size += vm_phys_segs[i].end - vm_phys_segs[i].start;
698 #endif
699 		if (vm_phys_segs[i].start < low_avail)
700 			low_avail = vm_phys_segs[i].start;
701 		if (vm_phys_segs[i].end > high_avail)
702 			high_avail = vm_phys_segs[i].end;
703 	}
704 	first_page = low_avail / PAGE_SIZE;
705 #ifdef VM_PHYSSEG_DENSE
706 	size = high_avail - low_avail;
707 #endif
708 
709 #ifdef PMAP_HAS_PAGE_ARRAY
710 	pmap_page_array_startup(size / PAGE_SIZE);
711 	biggestone = vm_phys_avail_largest();
712 	end = new_end = phys_avail[biggestone + 1];
713 #else
714 #ifdef VM_PHYSSEG_DENSE
715 	/*
716 	 * In the VM_PHYSSEG_DENSE case, the number of pages can account for
717 	 * the overhead of a page structure per page only if vm_page_array is
718 	 * allocated from the last physical memory chunk.  Otherwise, we must
719 	 * allocate page structures representing the physical memory
720 	 * underlying vm_page_array, even though they will not be used.
721 	 */
722 	if (new_end != high_avail)
723 		page_range = size / PAGE_SIZE;
724 	else
725 #endif
726 	{
727 		page_range = size / (PAGE_SIZE + sizeof(struct vm_page));
728 
729 		/*
730 		 * If the partial bytes remaining are large enough for
731 		 * a page (PAGE_SIZE) without a corresponding
732 		 * 'struct vm_page', then new_end will contain an
733 		 * extra page after subtracting the length of the VM
734 		 * page array.  Compensate by subtracting an extra
735 		 * page from new_end.
736 		 */
737 		if (size % (PAGE_SIZE + sizeof(struct vm_page)) >= PAGE_SIZE) {
738 			if (new_end == high_avail)
739 				high_avail -= PAGE_SIZE;
740 			new_end -= PAGE_SIZE;
741 		}
742 	}
743 	end = new_end;
744 	new_end = vm_page_array_alloc(&vaddr, end, page_range);
745 #endif
746 
747 #if VM_NRESERVLEVEL > 0
748 	/*
749 	 * Allocate physical memory for the reservation management system's
750 	 * data structures, and map it.
751 	 */
752 	new_end = vm_reserv_startup(&vaddr, new_end);
753 #endif
754 #if MINIDUMP_PAGE_TRACKING && MINIDUMP_STARTUP_PAGE_TRACKING
755 	/*
756 	 * Include vm_page_array and vm_reserv_array in a crash dump.
757 	 */
758 	for (pa = new_end; pa < end; pa += PAGE_SIZE)
759 		dump_add_page(pa);
760 #endif
761 	phys_avail[biggestone + 1] = new_end;
762 
763 	/*
764 	 * Add physical memory segments corresponding to the available
765 	 * physical pages.
766 	 */
767 	for (i = 0; phys_avail[i + 1] != 0; i += 2)
768 		vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]);
769 
770 	/*
771 	 * Initialize the physical memory allocator.
772 	 */
773 	vm_phys_init();
774 
775 	pool = VM_FREEPOOL_DEFAULT;
776 #ifdef VM_FREEPOOL_LAZYINIT
777 	lazyinit = 1;
778 	TUNABLE_INT_FETCH("debug.vm.lazy_page_init", &lazyinit);
779 	if (lazyinit)
780 		pool = VM_FREEPOOL_LAZYINIT;
781 #endif
782 
783 	/*
784 	 * Initialize the page structures and add every available page to the
785 	 * physical memory allocator's free lists.
786 	 */
787 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
788 	for (ii = 0; ii < vm_page_array_size; ii++) {
789 		m = &vm_page_array[ii];
790 		vm_page_init_page(m, (first_page + ii) << PAGE_SHIFT, 0,
791 		    VM_FREEPOOL_DEFAULT);
792 		m->flags = PG_FICTITIOUS;
793 	}
794 #endif
795 	vm_cnt.v_page_count = 0;
796 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
797 		seg = &vm_phys_segs[segind];
798 
799 		/*
800 		 * Initialize pages not covered by phys_avail[], since they
801 		 * might be freed to the allocator at some future point, e.g.,
802 		 * by kmem_bootstrap_free().
803 		 */
804 		startp = seg->start;
805 		for (i = 0; phys_avail[i + 1] != 0; i += 2) {
806 			if (startp >= seg->end)
807 				break;
808 			if (phys_avail[i + 1] < startp)
809 				continue;
810 			if (phys_avail[i] <= startp) {
811 				startp = phys_avail[i + 1];
812 				continue;
813 			}
814 			m = vm_phys_seg_paddr_to_vm_page(seg, startp);
815 			for (endp = MIN(phys_avail[i], seg->end);
816 			    startp < endp; startp += PAGE_SIZE, m++) {
817 				vm_page_init_page(m, startp, segind,
818 				    VM_FREEPOOL_DEFAULT);
819 			}
820 		}
821 
822 		/*
823 		 * Add the segment's pages that are covered by one of
824 		 * phys_avail's ranges to the free lists.
825 		 */
826 		for (i = 0; phys_avail[i + 1] != 0; i += 2) {
827 			if (seg->end <= phys_avail[i] ||
828 			    seg->start >= phys_avail[i + 1])
829 				continue;
830 
831 			startp = MAX(seg->start, phys_avail[i]);
832 			endp = MIN(seg->end, phys_avail[i + 1]);
833 			pagecount = (u_long)atop(endp - startp);
834 			if (pagecount == 0)
835 				continue;
836 
837 			/*
838 			 * If lazy vm_page initialization is not enabled, simply
839 			 * initialize all of the pages in the segment covered by
840 			 * phys_avail.  Otherwise, initialize only the first
841 			 * page of each run of free pages handed to the vm_phys
842 			 * allocator, which in turn defers initialization of
843 			 * pages until they are needed.
844 			 *
845 			 * This avoids blocking the boot process for long
846 			 * periods, which may be relevant for VMs (which ought
847 			 * to boot as quickly as possible) and/or systems with
848 			 * large amounts of physical memory.
849 			 */
850 			m = vm_phys_seg_paddr_to_vm_page(seg, startp);
851 			vm_page_init_page(m, startp, segind, pool);
852 			if (pool == VM_FREEPOOL_DEFAULT) {
853 				for (u_long j = 1; j < pagecount; j++) {
854 					vm_page_init_page(&m[j],
855 					    startp + ptoa((vm_paddr_t)j),
856 					    segind, pool);
857 				}
858 			}
859 			vmd = VM_DOMAIN(seg->domain);
860 			vm_domain_free_lock(vmd);
861 			vm_phys_enqueue_contig(m, pool, pagecount);
862 			vm_domain_free_unlock(vmd);
863 			vm_domain_freecnt_inc(vmd, pagecount);
864 			vm_cnt.v_page_count += (u_int)pagecount;
865 			vmd->vmd_page_count += (u_int)pagecount;
866 			vmd->vmd_segs |= 1UL << segind;
867 		}
868 	}
869 
870 	/*
871 	 * Remove blacklisted pages from the physical memory allocator.
872 	 */
873 	TAILQ_INIT(&blacklist_head);
874 	vm_page_blacklist_load(&list, &listend);
875 	vm_page_blacklist_check(list, listend);
876 
877 	list = kern_getenv("vm.blacklist");
878 	vm_page_blacklist_check(list, NULL);
879 
880 	freeenv(list);
881 #if VM_NRESERVLEVEL > 0
882 	/*
883 	 * Initialize the reservation management system.
884 	 */
885 	vm_reserv_init();
886 #endif
887 
888 	return (vaddr);
889 }
890 
891 void
vm_page_reference(vm_page_t m)892 vm_page_reference(vm_page_t m)
893 {
894 
895 	vm_page_aflag_set(m, PGA_REFERENCED);
896 }
897 
898 /*
899  *	vm_page_trybusy
900  *
901  *	Helper routine for grab functions to trylock busy.
902  *
903  *	Returns true on success and false on failure.
904  */
905 static bool
vm_page_trybusy(vm_page_t m,int allocflags)906 vm_page_trybusy(vm_page_t m, int allocflags)
907 {
908 
909 	if ((allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0)
910 		return (vm_page_trysbusy(m));
911 	else
912 		return (vm_page_tryxbusy(m));
913 }
914 
915 /*
916  *	vm_page_tryacquire
917  *
918  *	Helper routine for grab functions to trylock busy and wire.
919  *
920  *	Returns true on success and false on failure.
921  */
922 static inline bool
vm_page_tryacquire(vm_page_t m,int allocflags)923 vm_page_tryacquire(vm_page_t m, int allocflags)
924 {
925 	bool locked;
926 
927 	locked = vm_page_trybusy(m, allocflags);
928 	if (locked && (allocflags & VM_ALLOC_WIRED) != 0)
929 		vm_page_wire(m);
930 	return (locked);
931 }
932 
933 /*
934  *	vm_page_busy_acquire:
935  *
936  *	Acquire the busy lock as described by VM_ALLOC_* flags.  Will loop
937  *	and drop the object lock if necessary.
938  */
939 bool
vm_page_busy_acquire(vm_page_t m,int allocflags)940 vm_page_busy_acquire(vm_page_t m, int allocflags)
941 {
942 	vm_object_t obj;
943 	bool locked;
944 
945 	/*
946 	 * The page-specific object must be cached because page
947 	 * identity can change during the sleep, causing the
948 	 * re-lock of a different object.
949 	 * It is assumed that a reference to the object is already
950 	 * held by the callers.
951 	 */
952 	obj = atomic_load_ptr(&m->object);
953 	for (;;) {
954 		if (vm_page_tryacquire(m, allocflags))
955 			return (true);
956 		if ((allocflags & VM_ALLOC_NOWAIT) != 0)
957 			return (false);
958 		if (obj != NULL)
959 			locked = VM_OBJECT_WOWNED(obj);
960 		else
961 			locked = false;
962 		MPASS(locked || vm_page_wired(m));
963 		if (_vm_page_busy_sleep(obj, m, m->pindex, "vmpba", allocflags,
964 		    locked) && locked)
965 			VM_OBJECT_WLOCK(obj);
966 		if ((allocflags & VM_ALLOC_WAITFAIL) != 0)
967 			return (false);
968 		KASSERT(m->object == obj || m->object == NULL,
969 		    ("vm_page_busy_acquire: page %p does not belong to %p",
970 		    m, obj));
971 	}
972 }
973 
974 /*
975  *	vm_page_busy_downgrade:
976  *
977  *	Downgrade an exclusive busy page into a single shared busy page.
978  */
979 void
vm_page_busy_downgrade(vm_page_t m)980 vm_page_busy_downgrade(vm_page_t m)
981 {
982 	u_int x;
983 
984 	vm_page_assert_xbusied(m);
985 
986 	x = vm_page_busy_fetch(m);
987 	for (;;) {
988 		if (atomic_fcmpset_rel_int(&m->busy_lock,
989 		    &x, VPB_SHARERS_WORD(1)))
990 			break;
991 	}
992 	if ((x & VPB_BIT_WAITERS) != 0)
993 		wakeup(m);
994 }
995 
996 /*
997  *
998  *	vm_page_busy_tryupgrade:
999  *
1000  *	Attempt to upgrade a single shared busy into an exclusive busy.
1001  */
1002 int
vm_page_busy_tryupgrade(vm_page_t m)1003 vm_page_busy_tryupgrade(vm_page_t m)
1004 {
1005 	u_int ce, x;
1006 
1007 	vm_page_assert_sbusied(m);
1008 
1009 	x = vm_page_busy_fetch(m);
1010 	ce = VPB_CURTHREAD_EXCLUSIVE;
1011 	for (;;) {
1012 		if (VPB_SHARERS(x) > 1)
1013 			return (0);
1014 		KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1),
1015 		    ("vm_page_busy_tryupgrade: invalid lock state"));
1016 		if (!atomic_fcmpset_acq_int(&m->busy_lock, &x,
1017 		    ce | (x & VPB_BIT_WAITERS)))
1018 			continue;
1019 		return (1);
1020 	}
1021 }
1022 
1023 /*
1024  *	vm_page_sbusied:
1025  *
1026  *	Return a positive value if the page is shared busied, 0 otherwise.
1027  */
1028 int
vm_page_sbusied(vm_page_t m)1029 vm_page_sbusied(vm_page_t m)
1030 {
1031 	u_int x;
1032 
1033 	x = vm_page_busy_fetch(m);
1034 	return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED);
1035 }
1036 
1037 /*
1038  *	vm_page_sunbusy:
1039  *
1040  *	Shared unbusy a page.
1041  */
1042 void
vm_page_sunbusy(vm_page_t m)1043 vm_page_sunbusy(vm_page_t m)
1044 {
1045 	u_int x;
1046 
1047 	vm_page_assert_sbusied(m);
1048 
1049 	x = vm_page_busy_fetch(m);
1050 	for (;;) {
1051 		KASSERT(x != VPB_FREED,
1052 		    ("vm_page_sunbusy: Unlocking freed page."));
1053 		if (VPB_SHARERS(x) > 1) {
1054 			if (atomic_fcmpset_int(&m->busy_lock, &x,
1055 			    x - VPB_ONE_SHARER))
1056 				break;
1057 			continue;
1058 		}
1059 		KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1),
1060 		    ("vm_page_sunbusy: invalid lock state"));
1061 		if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED))
1062 			continue;
1063 		if ((x & VPB_BIT_WAITERS) == 0)
1064 			break;
1065 		wakeup(m);
1066 		break;
1067 	}
1068 }
1069 
1070 /*
1071  *	vm_page_busy_sleep:
1072  *
1073  *	Sleep if the page is busy, using the page pointer as wchan.
1074  *	This is used to implement the hard-path of the busying mechanism.
1075  *
1076  *	If VM_ALLOC_IGN_SBUSY is specified in allocflags, the function
1077  *	will not sleep if the page is shared-busy.
1078  *
1079  *	The object lock must be held on entry.
1080  *
1081  *	Returns true if it slept and dropped the object lock, or false
1082  *	if there was no sleep and the lock is still held.
1083  */
1084 bool
vm_page_busy_sleep(vm_page_t m,const char * wmesg,int allocflags)1085 vm_page_busy_sleep(vm_page_t m, const char *wmesg, int allocflags)
1086 {
1087 	vm_object_t obj;
1088 
1089 	obj = m->object;
1090 	VM_OBJECT_ASSERT_LOCKED(obj);
1091 
1092 	return (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, allocflags,
1093 	    true));
1094 }
1095 
1096 /*
1097  *	vm_page_busy_sleep_unlocked:
1098  *
1099  *	Sleep if the page is busy, using the page pointer as wchan.
1100  *	This is used to implement the hard-path of busying mechanism.
1101  *
1102  *	If VM_ALLOC_IGN_SBUSY is specified in allocflags, the function
1103  *	will not sleep if the page is shared-busy.
1104  *
1105  *	The object lock must not be held on entry.  The operation will
1106  *	return if the page changes identity.
1107  */
1108 void
vm_page_busy_sleep_unlocked(vm_object_t obj,vm_page_t m,vm_pindex_t pindex,const char * wmesg,int allocflags)1109 vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, vm_pindex_t pindex,
1110     const char *wmesg, int allocflags)
1111 {
1112 	VM_OBJECT_ASSERT_UNLOCKED(obj);
1113 
1114 	(void)_vm_page_busy_sleep(obj, m, pindex, wmesg, allocflags, false);
1115 }
1116 
1117 /*
1118  *	_vm_page_busy_sleep:
1119  *
1120  *	Internal busy sleep function.  Verifies the page identity and
1121  *	lockstate against parameters.  Returns true if it sleeps and
1122  *	false otherwise.
1123  *
1124  *	allocflags uses VM_ALLOC_* flags to specify the lock required.
1125  *
1126  *	If locked is true the lock will be dropped for any true returns
1127  *	and held for any false returns.
1128  */
1129 static bool
_vm_page_busy_sleep(vm_object_t obj,vm_page_t m,vm_pindex_t pindex,const char * wmesg,int allocflags,bool locked)1130 _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, vm_pindex_t pindex,
1131     const char *wmesg, int allocflags, bool locked)
1132 {
1133 	bool xsleep;
1134 	u_int x;
1135 
1136 	/*
1137 	 * If the object is busy we must wait for that to drain to zero
1138 	 * before trying the page again.
1139 	 */
1140 	if (obj != NULL && vm_object_busied(obj)) {
1141 		if (locked)
1142 			VM_OBJECT_DROP(obj);
1143 		vm_object_busy_wait(obj, wmesg);
1144 		return (true);
1145 	}
1146 
1147 	if (!vm_page_busied(m))
1148 		return (false);
1149 
1150 	xsleep = (allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0;
1151 	sleepq_lock(m);
1152 	x = vm_page_busy_fetch(m);
1153 	do {
1154 		/*
1155 		 * If the page changes objects or becomes unlocked we can
1156 		 * simply return.
1157 		 */
1158 		if (x == VPB_UNBUSIED ||
1159 		    (xsleep && (x & VPB_BIT_SHARED) != 0) ||
1160 		    m->object != obj || m->pindex != pindex) {
1161 			sleepq_release(m);
1162 			return (false);
1163 		}
1164 		if ((x & VPB_BIT_WAITERS) != 0)
1165 			break;
1166 	} while (!atomic_fcmpset_int(&m->busy_lock, &x, x | VPB_BIT_WAITERS));
1167 	if (locked)
1168 		VM_OBJECT_DROP(obj);
1169 	DROP_GIANT();
1170 	sleepq_add(m, NULL, wmesg, 0, 0);
1171 	sleepq_wait(m, PVM);
1172 	PICKUP_GIANT();
1173 	return (true);
1174 }
1175 
1176 /*
1177  *	vm_page_trysbusy:
1178  *
1179  *	Try to shared busy a page.
1180  *	If the operation succeeds 1 is returned otherwise 0.
1181  *	The operation never sleeps.
1182  */
1183 int
vm_page_trysbusy(vm_page_t m)1184 vm_page_trysbusy(vm_page_t m)
1185 {
1186 	vm_object_t obj;
1187 	u_int x;
1188 
1189 	obj = m->object;
1190 	x = vm_page_busy_fetch(m);
1191 	for (;;) {
1192 		if ((x & VPB_BIT_SHARED) == 0)
1193 			return (0);
1194 		/*
1195 		 * Reduce the window for transient busies that will trigger
1196 		 * false negatives in vm_page_ps_test().
1197 		 */
1198 		if (obj != NULL && vm_object_busied(obj))
1199 			return (0);
1200 		if (atomic_fcmpset_acq_int(&m->busy_lock, &x,
1201 		    x + VPB_ONE_SHARER))
1202 			break;
1203 	}
1204 
1205 	/* Refetch the object now that we're guaranteed that it is stable. */
1206 	obj = m->object;
1207 	if (obj != NULL && vm_object_busied(obj)) {
1208 		vm_page_sunbusy(m);
1209 		return (0);
1210 	}
1211 	return (1);
1212 }
1213 
1214 /*
1215  *	vm_page_tryxbusy:
1216  *
1217  *	Try to exclusive busy a page.
1218  *	If the operation succeeds 1 is returned otherwise 0.
1219  *	The operation never sleeps.
1220  */
1221 int
vm_page_tryxbusy(vm_page_t m)1222 vm_page_tryxbusy(vm_page_t m)
1223 {
1224 	vm_object_t obj;
1225 
1226         if (atomic_cmpset_acq_int(&m->busy_lock, VPB_UNBUSIED,
1227             VPB_CURTHREAD_EXCLUSIVE) == 0)
1228 		return (0);
1229 
1230 	obj = m->object;
1231 	if (obj != NULL && vm_object_busied(obj)) {
1232 		vm_page_xunbusy(m);
1233 		return (0);
1234 	}
1235 	return (1);
1236 }
1237 
1238 static void
vm_page_xunbusy_hard_tail(vm_page_t m)1239 vm_page_xunbusy_hard_tail(vm_page_t m)
1240 {
1241 	atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
1242 	/* Wake the waiter. */
1243 	wakeup(m);
1244 }
1245 
1246 /*
1247  *	vm_page_xunbusy_hard:
1248  *
1249  *	Called when unbusy has failed because there is a waiter.
1250  */
1251 void
vm_page_xunbusy_hard(vm_page_t m)1252 vm_page_xunbusy_hard(vm_page_t m)
1253 {
1254 	vm_page_assert_xbusied(m);
1255 	vm_page_xunbusy_hard_tail(m);
1256 }
1257 
1258 void
vm_page_xunbusy_hard_unchecked(vm_page_t m)1259 vm_page_xunbusy_hard_unchecked(vm_page_t m)
1260 {
1261 	vm_page_assert_xbusied_unchecked(m);
1262 	vm_page_xunbusy_hard_tail(m);
1263 }
1264 
1265 static void
vm_page_busy_free(vm_page_t m)1266 vm_page_busy_free(vm_page_t m)
1267 {
1268 	u_int x;
1269 
1270 	atomic_thread_fence_rel();
1271 	x = atomic_swap_int(&m->busy_lock, VPB_FREED);
1272 	if ((x & VPB_BIT_WAITERS) != 0)
1273 		wakeup(m);
1274 }
1275 
1276 /*
1277  *	vm_page_unhold_pages:
1278  *
1279  *	Unhold each of the pages that is referenced by the given array.
1280  */
1281 void
vm_page_unhold_pages(vm_page_t * ma,int count)1282 vm_page_unhold_pages(vm_page_t *ma, int count)
1283 {
1284 
1285 	for (; count != 0; count--) {
1286 		vm_page_unwire(*ma, PQ_ACTIVE);
1287 		ma++;
1288 	}
1289 }
1290 
1291 vm_page_t
PHYS_TO_VM_PAGE(vm_paddr_t pa)1292 PHYS_TO_VM_PAGE(vm_paddr_t pa)
1293 {
1294 	vm_page_t m;
1295 
1296 #ifdef VM_PHYSSEG_SPARSE
1297 #if defined(__aarch64__)
1298 	if (in_realm())
1299 		pa &= ~prot_ns_shared_pa; /* Mask off secure bit */
1300 #endif
1301 	m = vm_phys_paddr_to_vm_page(pa);
1302 	if (m == NULL)
1303 		m = vm_phys_fictitious_to_vm_page(pa);
1304 	return (m);
1305 #elif defined(VM_PHYSSEG_DENSE)
1306 	long pi;
1307 
1308 	pi = atop(pa);
1309 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
1310 		m = &vm_page_array[pi - first_page];
1311 		return (m);
1312 	}
1313 	return (vm_phys_fictitious_to_vm_page(pa));
1314 #else
1315 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
1316 #endif
1317 }
1318 
1319 /*
1320  *	vm_page_getfake:
1321  *
1322  *	Create a fictitious page with the specified physical address and
1323  *	memory attribute.  The memory attribute is the only the machine-
1324  *	dependent aspect of a fictitious page that must be initialized.
1325  */
1326 vm_page_t
vm_page_getfake(vm_paddr_t paddr,vm_memattr_t memattr)1327 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
1328 {
1329 	vm_page_t m;
1330 
1331 	m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
1332 	vm_page_initfake(m, paddr, memattr);
1333 	return (m);
1334 }
1335 
1336 void
vm_page_initfake(vm_page_t m,vm_paddr_t paddr,vm_memattr_t memattr)1337 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
1338 {
1339 
1340 	if ((m->flags & PG_FICTITIOUS) != 0) {
1341 		/*
1342 		 * The page's memattr might have changed since the
1343 		 * previous initialization.  Update the pmap to the
1344 		 * new memattr.
1345 		 */
1346 		goto memattr;
1347 	}
1348 	m->phys_addr = paddr;
1349 	m->a.queue = PQ_NONE;
1350 	/* Fictitious pages don't use "segind". */
1351 	m->flags = PG_FICTITIOUS;
1352 	/* Fictitious pages don't use "order" or "pool". */
1353 	m->oflags = VPO_UNMANAGED;
1354 	m->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
1355 	/* Fictitious pages are unevictable. */
1356 	m->ref_count = 1;
1357 	pmap_page_init(m);
1358 memattr:
1359 	pmap_page_set_memattr(m, memattr);
1360 }
1361 
1362 /*
1363  *	vm_page_putfake:
1364  *
1365  *	Release a fictitious page.
1366  */
1367 void
vm_page_putfake(vm_page_t m)1368 vm_page_putfake(vm_page_t m)
1369 {
1370 
1371 	KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m));
1372 	KASSERT((m->flags & PG_FICTITIOUS) != 0,
1373 	    ("vm_page_putfake: bad page %p", m));
1374 	vm_page_assert_xbusied(m);
1375 	vm_page_busy_free(m);
1376 	uma_zfree(fakepg_zone, m);
1377 }
1378 
1379 /*
1380  *	vm_page_updatefake:
1381  *
1382  *	Update the given fictitious page to the specified physical address and
1383  *	memory attribute.
1384  */
1385 void
vm_page_updatefake(vm_page_t m,vm_paddr_t paddr,vm_memattr_t memattr)1386 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
1387 {
1388 
1389 	KASSERT((m->flags & PG_FICTITIOUS) != 0,
1390 	    ("vm_page_updatefake: bad page %p", m));
1391 	m->phys_addr = paddr;
1392 	pmap_page_set_memattr(m, memattr);
1393 }
1394 
1395 /*
1396  *	vm_page_free:
1397  *
1398  *	Free a page.
1399  */
1400 void
vm_page_free(vm_page_t m)1401 vm_page_free(vm_page_t m)
1402 {
1403 
1404 	m->flags &= ~PG_ZERO;
1405 	vm_page_free_toq(m);
1406 }
1407 
1408 /*
1409  *	vm_page_free_zero:
1410  *
1411  *	Free a page to the zerod-pages queue
1412  */
1413 void
vm_page_free_zero(vm_page_t m)1414 vm_page_free_zero(vm_page_t m)
1415 {
1416 
1417 	m->flags |= PG_ZERO;
1418 	vm_page_free_toq(m);
1419 }
1420 
1421 /*
1422  * Unbusy and handle the page queueing for a page from a getpages request that
1423  * was optionally read ahead or behind.
1424  */
1425 void
vm_page_readahead_finish(vm_page_t m)1426 vm_page_readahead_finish(vm_page_t m)
1427 {
1428 
1429 	/* We shouldn't put invalid pages on queues. */
1430 	KASSERT(!vm_page_none_valid(m), ("%s: %p is invalid", __func__, m));
1431 
1432 	/*
1433 	 * Since the page is not the actually needed one, whether it should
1434 	 * be activated or deactivated is not obvious.  Empirical results
1435 	 * have shown that deactivating the page is usually the best choice,
1436 	 * unless the page is wanted by another thread.
1437 	 */
1438 	if ((vm_page_busy_fetch(m) & VPB_BIT_WAITERS) != 0)
1439 		vm_page_activate(m);
1440 	else
1441 		vm_page_deactivate(m);
1442 	vm_page_xunbusy_unchecked(m);
1443 }
1444 
1445 /*
1446  * Destroy the identity of an invalid page and free it if possible.
1447  * This is intended to be used when reading a page from backing store fails.
1448  */
1449 void
vm_page_free_invalid(vm_page_t m)1450 vm_page_free_invalid(vm_page_t m)
1451 {
1452 
1453 	KASSERT(vm_page_none_valid(m), ("page %p is valid", m));
1454 	KASSERT(!pmap_page_is_mapped(m), ("page %p is mapped", m));
1455 	KASSERT(m->object != NULL, ("page %p has no object", m));
1456 	VM_OBJECT_ASSERT_WLOCKED(m->object);
1457 
1458 	/*
1459 	 * We may be attempting to free the page as part of the handling for an
1460 	 * I/O error, in which case the page was xbusied by a different thread.
1461 	 */
1462 	vm_page_xbusy_claim(m);
1463 
1464 	/*
1465 	 * If someone has wired this page while the object lock
1466 	 * was not held, then the thread that unwires is responsible
1467 	 * for freeing the page.  Otherwise just free the page now.
1468 	 * The wire count of this unmapped page cannot change while
1469 	 * we have the page xbusy and the page's object wlocked.
1470 	 */
1471 	if (vm_page_remove(m))
1472 		vm_page_free(m);
1473 }
1474 
1475 /*
1476  *	vm_page_dirty_KBI:		[ internal use only ]
1477  *
1478  *	Set all bits in the page's dirty field.
1479  *
1480  *	The object containing the specified page must be locked if the
1481  *	call is made from the machine-independent layer.
1482  *
1483  *	See vm_page_clear_dirty_mask().
1484  *
1485  *	This function should only be called by vm_page_dirty().
1486  */
1487 void
vm_page_dirty_KBI(vm_page_t m)1488 vm_page_dirty_KBI(vm_page_t m)
1489 {
1490 
1491 	/* Refer to this operation by its public name. */
1492 	KASSERT(vm_page_all_valid(m), ("vm_page_dirty: page is invalid!"));
1493 	m->dirty = VM_PAGE_BITS_ALL;
1494 }
1495 
1496 /*
1497  * Insert the given page into the given object at the given pindex.
1498  *
1499  * The procedure is marked __always_inline to suggest to the compiler to
1500  * eliminate the iter parameter and the associated alternate branch.
1501  */
1502 static __always_inline int
vm_page_insert_lookup(vm_page_t m,vm_object_t object,vm_pindex_t pindex,bool iter,struct pctrie_iter * pages)1503 vm_page_insert_lookup(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
1504     bool iter, struct pctrie_iter *pages)
1505 {
1506 	int error;
1507 
1508 	VM_OBJECT_ASSERT_WLOCKED(object);
1509 	KASSERT(m->object == NULL,
1510 	    ("vm_page_insert: page %p already inserted", m));
1511 
1512 	/*
1513 	 * Record the object/offset pair in this page.
1514 	 */
1515 	m->object = object;
1516 	m->pindex = pindex;
1517 	m->ref_count |= VPRC_OBJREF;
1518 
1519 	/*
1520 	 * Add this page to the object's radix tree.
1521 	 */
1522 	if (iter)
1523 		error = vm_radix_iter_insert(pages, m);
1524 	else
1525 		error = vm_radix_insert(&object->rtree, m);
1526 	if (__predict_false(error != 0)) {
1527 		m->object = NULL;
1528 		m->pindex = 0;
1529 		m->ref_count &= ~VPRC_OBJREF;
1530 		return (1);
1531 	}
1532 
1533 	vm_page_insert_radixdone(m, object);
1534 	vm_pager_page_inserted(object, m);
1535 	return (0);
1536 }
1537 
1538 /*
1539  *	vm_page_insert:		[ internal use only ]
1540  *
1541  *	Inserts the given mem entry into the object and object list.
1542  *
1543  *	The object must be locked.
1544  */
1545 int
vm_page_insert(vm_page_t m,vm_object_t object,vm_pindex_t pindex)1546 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
1547 {
1548 	return (vm_page_insert_lookup(m, object, pindex, false, NULL));
1549 }
1550 
1551 /*
1552  *	vm_page_iter_insert:
1553  *
1554  *	Tries to insert the page "m" into the specified object at offset
1555  *	"pindex" using the iterator "pages".  Returns 0 if the insertion was
1556  *	successful.
1557  *
1558  *	The object must be locked.
1559  */
1560 int
vm_page_iter_insert(vm_page_t m,vm_object_t object,vm_pindex_t pindex,struct pctrie_iter * pages)1561 vm_page_iter_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
1562     struct pctrie_iter *pages)
1563 {
1564 	return (vm_page_insert_lookup(m, object, pindex, true, pages));
1565 }
1566 
1567 /*
1568  *	vm_page_insert_radixdone:
1569  *
1570  *	Complete page "m" insertion into the specified object after the
1571  *	radix trie hooking.
1572  *
1573  *	The object must be locked.
1574  */
1575 static void
vm_page_insert_radixdone(vm_page_t m,vm_object_t object)1576 vm_page_insert_radixdone(vm_page_t m, vm_object_t object)
1577 {
1578 
1579 	VM_OBJECT_ASSERT_WLOCKED(object);
1580 	KASSERT(object != NULL && m->object == object,
1581 	    ("vm_page_insert_radixdone: page %p has inconsistent object", m));
1582 	KASSERT((m->ref_count & VPRC_OBJREF) != 0,
1583 	    ("vm_page_insert_radixdone: page %p is missing object ref", m));
1584 
1585 	/*
1586 	 * Show that the object has one more resident page.
1587 	 */
1588 	object->resident_page_count++;
1589 
1590 	/*
1591 	 * Hold the vnode until the last page is released.
1592 	 */
1593 	if (object->resident_page_count == 1 && object->type == OBJT_VNODE)
1594 		vhold(object->handle);
1595 
1596 	/*
1597 	 * Since we are inserting a new and possibly dirty page,
1598 	 * update the object's generation count.
1599 	 */
1600 	if (pmap_page_is_write_mapped(m))
1601 		vm_object_set_writeable_dirty(object);
1602 }
1603 
1604 /*
1605  *	vm_page_remove_radixdone
1606  *
1607  *	Complete page "m" removal from the specified object after the radix trie
1608  *	unhooking.
1609  *
1610  *	The caller is responsible for updating the page's fields to reflect this
1611  *	removal.
1612  */
1613 static void
vm_page_remove_radixdone(vm_page_t m)1614 vm_page_remove_radixdone(vm_page_t m)
1615 {
1616 	vm_object_t object;
1617 
1618 	vm_page_assert_xbusied(m);
1619 	object = m->object;
1620 	VM_OBJECT_ASSERT_WLOCKED(object);
1621 	KASSERT((m->ref_count & VPRC_OBJREF) != 0,
1622 	    ("page %p is missing its object ref", m));
1623 
1624 	/* Deferred free of swap space. */
1625 	if ((m->a.flags & PGA_SWAP_FREE) != 0)
1626 		vm_pager_page_unswapped(m);
1627 
1628 	vm_pager_page_removed(object, m);
1629 	m->object = NULL;
1630 
1631 	/*
1632 	 * And show that the object has one fewer resident page.
1633 	 */
1634 	object->resident_page_count--;
1635 
1636 	/*
1637 	 * The vnode may now be recycled.
1638 	 */
1639 	if (object->resident_page_count == 0 && object->type == OBJT_VNODE)
1640 		vdrop(object->handle);
1641 }
1642 
1643 /*
1644  *	vm_page_free_object_prep:
1645  *
1646  *	Disassociates the given page from its VM object.
1647  *
1648  *	The object must be locked, and the page must be xbusy.
1649  */
1650 static void
vm_page_free_object_prep(vm_page_t m)1651 vm_page_free_object_prep(vm_page_t m)
1652 {
1653 	KASSERT(((m->oflags & VPO_UNMANAGED) != 0) ==
1654 	    ((m->object->flags & OBJ_UNMANAGED) != 0),
1655 	    ("%s: managed flag mismatch for page %p",
1656 	     __func__, m));
1657 	vm_page_assert_xbusied(m);
1658 
1659 	/*
1660 	 * The object reference can be released without an atomic
1661 	 * operation.
1662 	 */
1663 	KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
1664 	    m->ref_count == VPRC_OBJREF,
1665 	    ("%s: page %p has unexpected ref_count %u",
1666 	    __func__, m, m->ref_count));
1667 	vm_page_remove_radixdone(m);
1668 	m->ref_count -= VPRC_OBJREF;
1669 }
1670 
1671 /*
1672  *	vm_page_iter_free:
1673  *
1674  *	Free the given page, and use the iterator to remove it from the radix
1675  *	tree.
1676  */
1677 void
vm_page_iter_free(struct pctrie_iter * pages,vm_page_t m)1678 vm_page_iter_free(struct pctrie_iter *pages, vm_page_t m)
1679 {
1680 	vm_radix_iter_remove(pages);
1681 	vm_page_free_object_prep(m);
1682 	vm_page_xunbusy(m);
1683 	m->flags &= ~PG_ZERO;
1684 	vm_page_free_toq(m);
1685 }
1686 
1687 /*
1688  *	vm_page_remove:
1689  *
1690  *	Removes the specified page from its containing object, but does not
1691  *	invalidate any backing storage.  Returns true if the object's reference
1692  *	was the last reference to the page, and false otherwise.
1693  *
1694  *	The object must be locked and the page must be exclusively busied.
1695  *	The exclusive busy will be released on return.  If this is not the
1696  *	final ref and the caller does not hold a wire reference it may not
1697  *	continue to access the page.
1698  */
1699 bool
vm_page_remove(vm_page_t m)1700 vm_page_remove(vm_page_t m)
1701 {
1702 	bool dropped;
1703 
1704 	dropped = vm_page_remove_xbusy(m);
1705 	vm_page_xunbusy(m);
1706 
1707 	return (dropped);
1708 }
1709 
1710 /*
1711  *	vm_page_iter_remove:
1712  *
1713  *	Remove the current page, and use the iterator to remove it from the
1714  *	radix tree.
1715  */
1716 bool
vm_page_iter_remove(struct pctrie_iter * pages,vm_page_t m)1717 vm_page_iter_remove(struct pctrie_iter *pages, vm_page_t m)
1718 {
1719 	bool dropped;
1720 
1721 	vm_radix_iter_remove(pages);
1722 	vm_page_remove_radixdone(m);
1723 	dropped = (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF);
1724 	vm_page_xunbusy(m);
1725 
1726 	return (dropped);
1727 }
1728 
1729 /*
1730  *	vm_page_radix_remove
1731  *
1732  *	Removes the specified page from the radix tree.
1733  */
1734 static void
vm_page_radix_remove(vm_page_t m)1735 vm_page_radix_remove(vm_page_t m)
1736 {
1737 	vm_page_t mrem __diagused;
1738 
1739 	mrem = vm_radix_remove(&m->object->rtree, m->pindex);
1740 	KASSERT(mrem == m,
1741 	    ("removed page %p, expected page %p", mrem, m));
1742 }
1743 
1744 /*
1745  *	vm_page_remove_xbusy
1746  *
1747  *	Removes the page but leaves the xbusy held.  Returns true if this
1748  *	removed the final ref and false otherwise.
1749  */
1750 bool
vm_page_remove_xbusy(vm_page_t m)1751 vm_page_remove_xbusy(vm_page_t m)
1752 {
1753 
1754 	vm_page_radix_remove(m);
1755 	vm_page_remove_radixdone(m);
1756 	return (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF);
1757 }
1758 
1759 /*
1760  *	vm_page_lookup:
1761  *
1762  *	Returns the page associated with the object/offset
1763  *	pair specified; if none is found, NULL is returned.
1764  *
1765  *	The object must be locked.
1766  */
1767 vm_page_t
vm_page_lookup(vm_object_t object,vm_pindex_t pindex)1768 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
1769 {
1770 
1771 	VM_OBJECT_ASSERT_LOCKED(object);
1772 	return (vm_radix_lookup(&object->rtree, pindex));
1773 }
1774 
1775 /*
1776  *	vm_page_iter_init:
1777  *
1778  *	Initialize iterator for vm pages.
1779  */
1780 void
vm_page_iter_init(struct pctrie_iter * pages,vm_object_t object)1781 vm_page_iter_init(struct pctrie_iter *pages, vm_object_t object)
1782 {
1783 
1784 	vm_radix_iter_init(pages, &object->rtree);
1785 }
1786 
1787 /*
1788  *	vm_page_iter_init:
1789  *
1790  *	Initialize iterator for vm pages.
1791  */
1792 void
vm_page_iter_limit_init(struct pctrie_iter * pages,vm_object_t object,vm_pindex_t limit)1793 vm_page_iter_limit_init(struct pctrie_iter *pages, vm_object_t object,
1794     vm_pindex_t limit)
1795 {
1796 
1797 	vm_radix_iter_limit_init(pages, &object->rtree, limit);
1798 }
1799 
1800 /*
1801  *	vm_page_lookup_unlocked:
1802  *
1803  *	Returns the page associated with the object/offset pair specified;
1804  *	if none is found, NULL is returned.  The page may be no longer be
1805  *	present in the object at the time that this function returns.  Only
1806  *	useful for opportunistic checks such as inmem().
1807  */
1808 vm_page_t
vm_page_lookup_unlocked(vm_object_t object,vm_pindex_t pindex)1809 vm_page_lookup_unlocked(vm_object_t object, vm_pindex_t pindex)
1810 {
1811 
1812 	return (vm_radix_lookup_unlocked(&object->rtree, pindex));
1813 }
1814 
1815 /*
1816  *	vm_page_relookup:
1817  *
1818  *	Returns a page that must already have been busied by
1819  *	the caller.  Used for bogus page replacement.
1820  */
1821 vm_page_t
vm_page_relookup(vm_object_t object,vm_pindex_t pindex)1822 vm_page_relookup(vm_object_t object, vm_pindex_t pindex)
1823 {
1824 	vm_page_t m;
1825 
1826 	m = vm_page_lookup_unlocked(object, pindex);
1827 	KASSERT(m != NULL && (vm_page_busied(m) || vm_page_wired(m)) &&
1828 	    m->object == object && m->pindex == pindex,
1829 	    ("vm_page_relookup: Invalid page %p", m));
1830 	return (m);
1831 }
1832 
1833 /*
1834  * This should only be used by lockless functions for releasing transient
1835  * incorrect acquires.  The page may have been freed after we acquired a
1836  * busy lock.  In this case busy_lock == VPB_FREED and we have nothing
1837  * further to do.
1838  */
1839 static void
vm_page_busy_release(vm_page_t m)1840 vm_page_busy_release(vm_page_t m)
1841 {
1842 	u_int x;
1843 
1844 	x = vm_page_busy_fetch(m);
1845 	for (;;) {
1846 		if (x == VPB_FREED)
1847 			break;
1848 		if ((x & VPB_BIT_SHARED) != 0 && VPB_SHARERS(x) > 1) {
1849 			if (atomic_fcmpset_int(&m->busy_lock, &x,
1850 			    x - VPB_ONE_SHARER))
1851 				break;
1852 			continue;
1853 		}
1854 		KASSERT((x & VPB_BIT_SHARED) != 0 ||
1855 		    (x & ~VPB_BIT_WAITERS) == VPB_CURTHREAD_EXCLUSIVE,
1856 		    ("vm_page_busy_release: %p xbusy not owned.", m));
1857 		if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED))
1858 			continue;
1859 		if ((x & VPB_BIT_WAITERS) != 0)
1860 			wakeup(m);
1861 		break;
1862 	}
1863 }
1864 
1865 /*
1866  * Uses the page mnew as a replacement for an existing page at index
1867  * pindex which must be already present in the object.
1868  *
1869  * Both pages must be exclusively busied on enter.  The old page is
1870  * unbusied on exit.
1871  *
1872  * A return value of true means mold is now free.  If this is not the
1873  * final ref and the caller does not hold a wire reference it may not
1874  * continue to access the page.
1875  */
1876 static bool
vm_page_replace_hold(vm_page_t mnew,vm_object_t object,vm_pindex_t pindex,vm_page_t mold)1877 vm_page_replace_hold(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex,
1878     vm_page_t mold)
1879 {
1880 	vm_page_t mret __diagused;
1881 	bool dropped;
1882 
1883 	VM_OBJECT_ASSERT_WLOCKED(object);
1884 	vm_page_assert_xbusied(mold);
1885 	KASSERT(mnew->object == NULL && (mnew->ref_count & VPRC_OBJREF) == 0,
1886 	    ("vm_page_replace: page %p already in object", mnew));
1887 
1888 	/*
1889 	 * This function mostly follows vm_page_insert() and
1890 	 * vm_page_remove() without the radix, object count and vnode
1891 	 * dance.  Double check such functions for more comments.
1892 	 */
1893 
1894 	mnew->object = object;
1895 	mnew->pindex = pindex;
1896 	atomic_set_int(&mnew->ref_count, VPRC_OBJREF);
1897 	mret = vm_radix_replace(&object->rtree, mnew);
1898 	KASSERT(mret == mold,
1899 	    ("invalid page replacement, mold=%p, mret=%p", mold, mret));
1900 	KASSERT((mold->oflags & VPO_UNMANAGED) ==
1901 	    (mnew->oflags & VPO_UNMANAGED),
1902 	    ("vm_page_replace: mismatched VPO_UNMANAGED"));
1903 
1904 	mold->object = NULL;
1905 
1906 	/*
1907 	 * The object's resident_page_count does not change because we have
1908 	 * swapped one page for another, but the generation count should
1909 	 * change if the page is dirty.
1910 	 */
1911 	if (pmap_page_is_write_mapped(mnew))
1912 		vm_object_set_writeable_dirty(object);
1913 	dropped = vm_page_drop(mold, VPRC_OBJREF) == VPRC_OBJREF;
1914 	vm_page_xunbusy(mold);
1915 
1916 	return (dropped);
1917 }
1918 
1919 void
vm_page_replace(vm_page_t mnew,vm_object_t object,vm_pindex_t pindex,vm_page_t mold)1920 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex,
1921     vm_page_t mold)
1922 {
1923 
1924 	vm_page_assert_xbusied(mnew);
1925 
1926 	if (vm_page_replace_hold(mnew, object, pindex, mold))
1927 		vm_page_free(mold);
1928 }
1929 
1930 /*
1931  *	vm_page_iter_rename:
1932  *
1933  *	Tries to move the specified page from its current object to a new object
1934  *	and pindex, using the given iterator to remove the page from its current
1935  *	object.  Returns true if the move was successful, and false if the move
1936  *	was aborted due to a failed memory allocation.
1937  *
1938  *	Panics if a page already resides in the new object at the new pindex.
1939  *
1940  *	This routine dirties the page if it is valid, as callers are expected to
1941  *	transfer backing storage only after moving the page.  Dirtying the page
1942  *	ensures that the destination object retains the most recent copy of the
1943  *	page.
1944  *
1945  *	The objects must be locked.
1946  */
1947 bool
vm_page_iter_rename(struct pctrie_iter * old_pages,vm_page_t m,vm_object_t new_object,vm_pindex_t new_pindex)1948 vm_page_iter_rename(struct pctrie_iter *old_pages, vm_page_t m,
1949     vm_object_t new_object, vm_pindex_t new_pindex)
1950 {
1951 	vm_pindex_t opidx;
1952 
1953 	KASSERT((m->ref_count & VPRC_OBJREF) != 0,
1954 	    ("%s: page %p is missing object ref", __func__, m));
1955 	VM_OBJECT_ASSERT_WLOCKED(m->object);
1956 	VM_OBJECT_ASSERT_WLOCKED(new_object);
1957 
1958 	/*
1959 	 * Create a custom version of vm_page_insert() which does not depend
1960 	 * by m_prev and can cheat on the implementation aspects of the
1961 	 * function.
1962 	 */
1963 	opidx = m->pindex;
1964 	m->pindex = new_pindex;
1965 	if (vm_radix_insert(&new_object->rtree, m) != 0) {
1966 		m->pindex = opidx;
1967 		return (false);
1968 	}
1969 
1970 	/*
1971 	 * The operation cannot fail anymore.
1972 	 */
1973 	m->pindex = opidx;
1974 	vm_radix_iter_remove(old_pages);
1975 	vm_page_remove_radixdone(m);
1976 
1977 	/* Return back to the new pindex to complete vm_page_insert(). */
1978 	m->pindex = new_pindex;
1979 	m->object = new_object;
1980 
1981 	vm_page_insert_radixdone(m, new_object);
1982 	if (vm_page_any_valid(m))
1983 		vm_page_dirty(m);
1984 	vm_pager_page_inserted(new_object, m);
1985 	return (true);
1986 }
1987 
1988 /*
1989  *	vm_page_alloc:
1990  *
1991  *	Allocate and return a page that is associated with the specified
1992  *	object and offset pair.  By default, this page is exclusive busied.
1993  *
1994  *	The caller must always specify an allocation class.
1995  *
1996  *	allocation classes:
1997  *	VM_ALLOC_NORMAL		normal process request
1998  *	VM_ALLOC_SYSTEM		system *really* needs a page
1999  *	VM_ALLOC_INTERRUPT	interrupt time request
2000  *
2001  *	optional allocation flags:
2002  *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
2003  *				intends to allocate
2004  *	VM_ALLOC_NOBUSY		do not exclusive busy the page
2005  *	VM_ALLOC_NODUMP		do not include the page in a kernel core dump
2006  *	VM_ALLOC_NOFREE		page will never be freed
2007  *	VM_ALLOC_NOWAIT		ignored (default behavior)
2008  *	VM_ALLOC_SBUSY		shared busy the allocated page
2009  *	VM_ALLOC_WAITFAIL	in case of failure, sleep before returning
2010  *	VM_ALLOC_WIRED		wire the allocated page
2011  *	VM_ALLOC_ZERO		prefer a zeroed page
2012  */
2013 vm_page_t
vm_page_alloc(vm_object_t object,vm_pindex_t pindex,int req)2014 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
2015 {
2016 	struct pctrie_iter pages;
2017 
2018 	vm_page_iter_init(&pages, object);
2019 	return (vm_page_alloc_iter(object, pindex, req, &pages));
2020 }
2021 
2022 /*
2023  * Allocate a page in the specified object with the given page index.  If the
2024  * object lock is dropped and regained, the pages iter is reset.
2025  */
2026 vm_page_t
vm_page_alloc_iter(vm_object_t object,vm_pindex_t pindex,int req,struct pctrie_iter * pages)2027 vm_page_alloc_iter(vm_object_t object, vm_pindex_t pindex, int req,
2028     struct pctrie_iter *pages)
2029 {
2030 	struct vm_domainset_iter di;
2031 	vm_page_t m;
2032 	int domain;
2033 
2034 	if (vm_domainset_iter_page_init(&di, object, pindex, &domain, &req) != 0)
2035 		return (NULL);
2036 
2037 	do {
2038 		m = vm_page_alloc_domain_iter(object, pindex, domain, req,
2039 		    pages);
2040 		if (m != NULL)
2041 			break;
2042 	} while (vm_domainset_iter_page(&di, object, &domain, pages) == 0);
2043 
2044 	return (m);
2045 }
2046 
2047 /*
2048  * Returns true if the number of free pages exceeds the minimum
2049  * for the request class and false otherwise.
2050  */
2051 static int
_vm_domain_allocate(struct vm_domain * vmd,int req_class,int npages)2052 _vm_domain_allocate(struct vm_domain *vmd, int req_class, int npages)
2053 {
2054 	u_int limit, old, new;
2055 
2056 	if (req_class == VM_ALLOC_INTERRUPT)
2057 		limit = 0;
2058 	else if (req_class == VM_ALLOC_SYSTEM)
2059 		limit = vmd->vmd_interrupt_free_min;
2060 	else
2061 		limit = vmd->vmd_free_reserved;
2062 
2063 	/*
2064 	 * Attempt to reserve the pages.  Fail if we're below the limit.
2065 	 */
2066 	limit += npages;
2067 	old = atomic_load_int(&vmd->vmd_free_count);
2068 	do {
2069 		if (old < limit)
2070 			return (0);
2071 		new = old - npages;
2072 	} while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0);
2073 
2074 	/* Wake the page daemon if we've crossed the threshold. */
2075 	if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old))
2076 		pagedaemon_wakeup(vmd->vmd_domain);
2077 
2078 	/* Only update bitsets on transitions. */
2079 	if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) ||
2080 	    (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe))
2081 		vm_domain_set(vmd);
2082 
2083 	return (1);
2084 }
2085 
2086 int
vm_domain_allocate(struct vm_domain * vmd,int req,int npages)2087 vm_domain_allocate(struct vm_domain *vmd, int req, int npages)
2088 {
2089 	int req_class;
2090 
2091 	/*
2092 	 * The page daemon is allowed to dig deeper into the free page list.
2093 	 */
2094 	req_class = req & VM_ALLOC_CLASS_MASK;
2095 	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
2096 		req_class = VM_ALLOC_SYSTEM;
2097 	return (_vm_domain_allocate(vmd, req_class, npages));
2098 }
2099 
2100 vm_page_t
vm_page_alloc_domain_iter(vm_object_t object,vm_pindex_t pindex,int domain,int req,struct pctrie_iter * pages)2101 vm_page_alloc_domain_iter(vm_object_t object, vm_pindex_t pindex, int domain,
2102     int req, struct pctrie_iter *pages)
2103 {
2104 	struct vm_domain *vmd;
2105 	vm_page_t m;
2106 	int flags;
2107 
2108 #define	VM_ALLOC_COMMON	(VM_ALLOC_CLASS_MASK | VM_ALLOC_NODUMP |	\
2109 			 VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL |		\
2110 			 VM_ALLOC_WIRED | VM_ALLOC_ZERO)
2111 #define	VPA_FLAGS	(VM_ALLOC_COMMON | VM_ALLOC_COUNT_MASK |	\
2112 			 VM_ALLOC_NOBUSY | VM_ALLOC_NOFREE |		\
2113 			 VM_ALLOC_SBUSY)
2114 	KASSERT((req & ~VPA_FLAGS) == 0,
2115 	    ("invalid request %#x", req));
2116 	KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
2117 	    (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
2118 	    ("invalid request %#x", req));
2119 	VM_OBJECT_ASSERT_WLOCKED(object);
2120 
2121 	flags = 0;
2122 	m = NULL;
2123 	if (!vm_pager_can_alloc_page(object, pindex))
2124 		return (NULL);
2125 #if VM_NRESERVLEVEL > 0
2126 again:
2127 #endif
2128 	if (__predict_false((req & VM_ALLOC_NOFREE) != 0)) {
2129 		m = vm_page_alloc_nofree_domain(domain, req);
2130 		if (m != NULL)
2131 			goto found;
2132 	}
2133 #if VM_NRESERVLEVEL > 0
2134 	/*
2135 	 * Can we allocate the page from a reservation?
2136 	 */
2137 	if (vm_object_reserv(object) &&
2138 	    (m = vm_reserv_alloc_page(object, pindex, domain, req, pages)) !=
2139 	    NULL) {
2140 		goto found;
2141 	}
2142 #endif
2143 	vmd = VM_DOMAIN(domain);
2144 	if (vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone != NULL) {
2145 		m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone,
2146 		    M_NOWAIT | M_NOVM);
2147 		if (m != NULL) {
2148 			flags |= PG_PCPU_CACHE;
2149 			goto found;
2150 		}
2151 	}
2152 	if (vm_domain_allocate(vmd, req, 1)) {
2153 		/*
2154 		 * If not, allocate it from the free page queues.
2155 		 */
2156 		vm_domain_free_lock(vmd);
2157 		m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, 0);
2158 		vm_domain_free_unlock(vmd);
2159 		if (m == NULL) {
2160 			vm_domain_freecnt_inc(vmd, 1);
2161 #if VM_NRESERVLEVEL > 0
2162 			if (vm_reserv_reclaim_inactive(domain))
2163 				goto again;
2164 #endif
2165 		}
2166 	}
2167 	if (m == NULL) {
2168 		/*
2169 		 * Not allocatable, give up.
2170 		 */
2171 		(void)vm_domain_alloc_fail(vmd, object, req);
2172 		if ((req & VM_ALLOC_WAITFAIL) != 0)
2173 			pctrie_iter_reset(pages);
2174 		return (NULL);
2175 	}
2176 
2177 	/*
2178 	 * At this point we had better have found a good page.
2179 	 */
2180 found:
2181 	vm_page_dequeue(m);
2182 	vm_page_alloc_check(m);
2183 
2184 	/*
2185 	 * Initialize the page.  Only the PG_ZERO flag is inherited.
2186 	 */
2187 	flags |= m->flags & PG_ZERO;
2188 	if ((req & VM_ALLOC_NODUMP) != 0)
2189 		flags |= PG_NODUMP;
2190 	if ((req & VM_ALLOC_NOFREE) != 0)
2191 		flags |= PG_NOFREE;
2192 	m->flags = flags;
2193 	m->a.flags = 0;
2194 	m->oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0;
2195 	m->pool = VM_FREEPOOL_DEFAULT;
2196 	if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
2197 		m->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
2198 	else if ((req & VM_ALLOC_SBUSY) != 0)
2199 		m->busy_lock = VPB_SHARERS_WORD(1);
2200 	else
2201 		m->busy_lock = VPB_UNBUSIED;
2202 	if (req & VM_ALLOC_WIRED) {
2203 		vm_wire_add(1);
2204 		m->ref_count = 1;
2205 	}
2206 	m->a.act_count = 0;
2207 
2208 	if (vm_page_iter_insert(m, object, pindex, pages)) {
2209 		if (req & VM_ALLOC_WIRED) {
2210 			vm_wire_sub(1);
2211 			m->ref_count = 0;
2212 		}
2213 		KASSERT(m->object == NULL, ("page %p has object", m));
2214 		m->oflags = VPO_UNMANAGED;
2215 		m->busy_lock = VPB_UNBUSIED;
2216 		/* Don't change PG_ZERO. */
2217 		vm_page_free_toq(m);
2218 		if (req & VM_ALLOC_WAITFAIL) {
2219 			VM_OBJECT_WUNLOCK(object);
2220 			vm_radix_wait();
2221 			pctrie_iter_reset(pages);
2222 			VM_OBJECT_WLOCK(object);
2223 		}
2224 		return (NULL);
2225 	}
2226 
2227 	/* Ignore device objects; the pager sets "memattr" for them. */
2228 	if (object->memattr != VM_MEMATTR_DEFAULT &&
2229 	    (object->flags & OBJ_FICTITIOUS) == 0)
2230 		pmap_page_set_memattr(m, object->memattr);
2231 
2232 	return (m);
2233 }
2234 
2235 /*
2236  *	vm_page_alloc_contig:
2237  *
2238  *	Allocate a contiguous set of physical pages of the given size "npages"
2239  *	from the free lists.  All of the physical pages must be at or above
2240  *	the given physical address "low" and below the given physical address
2241  *	"high".  The given value "alignment" determines the alignment of the
2242  *	first physical page in the set.  If the given value "boundary" is
2243  *	non-zero, then the set of physical pages cannot cross any physical
2244  *	address boundary that is a multiple of that value.  Both "alignment"
2245  *	and "boundary" must be a power of two.
2246  *
2247  *	If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT,
2248  *	then the memory attribute setting for the physical pages is configured
2249  *	to the object's memory attribute setting.  Otherwise, the memory
2250  *	attribute setting for the physical pages is configured to "memattr",
2251  *	overriding the object's memory attribute setting.  However, if the
2252  *	object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the
2253  *	memory attribute setting for the physical pages cannot be configured
2254  *	to VM_MEMATTR_DEFAULT.
2255  *
2256  *	The specified object may not contain fictitious pages.
2257  *
2258  *	The caller must always specify an allocation class.
2259  *
2260  *	allocation classes:
2261  *	VM_ALLOC_NORMAL		normal process request
2262  *	VM_ALLOC_SYSTEM		system *really* needs the pages
2263  *	VM_ALLOC_INTERRUPT	interrupt time request
2264  *
2265  *	optional allocation flags:
2266  *	VM_ALLOC_NOBUSY		do not exclusive busy the pages
2267  *	VM_ALLOC_NODUMP		do not include the pages in a kernel core dump
2268  *	VM_ALLOC_NORECLAIM	do not reclaim after initial failure
2269  *	VM_ALLOC_NOWAIT		ignored (default behavior)
2270  *	VM_ALLOC_SBUSY		shared busy the allocated pages
2271  *	VM_ALLOC_WAITFAIL	in case of failure, sleep before returning
2272  *	VM_ALLOC_WIRED		wire the allocated pages
2273  *	VM_ALLOC_ZERO		prefer zeroed pages
2274  */
2275 vm_page_t
vm_page_alloc_contig(vm_object_t object,vm_pindex_t pindex,int req,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary,vm_memattr_t memattr)2276 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
2277     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
2278     vm_paddr_t boundary, vm_memattr_t memattr)
2279 {
2280 	struct vm_domainset_iter di;
2281 	vm_page_t bounds[2];
2282 	vm_page_t m;
2283 	int domain;
2284 	int start_segind;
2285 
2286 	start_segind = -1;
2287 
2288 	if (vm_domainset_iter_page_init(&di, object, pindex, &domain, &req) != 0)
2289 		return (NULL);
2290 
2291 	do {
2292 		m = vm_page_alloc_contig_domain(object, pindex, domain, req,
2293 		    npages, low, high, alignment, boundary, memattr);
2294 		if (m != NULL)
2295 			break;
2296 		if (start_segind == -1)
2297 			start_segind = vm_phys_lookup_segind(low);
2298 		if (vm_phys_find_range(bounds, start_segind, domain,
2299 		    npages, low, high) == -1) {
2300 			vm_domainset_iter_ignore(&di, domain);
2301 		}
2302 	} while (vm_domainset_iter_page(&di, object, &domain, NULL) == 0);
2303 
2304 	return (m);
2305 }
2306 
2307 static vm_page_t
vm_page_find_contig_domain(int domain,int req,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary)2308 vm_page_find_contig_domain(int domain, int req, u_long npages, vm_paddr_t low,
2309     vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
2310 {
2311 	struct vm_domain *vmd;
2312 	vm_page_t m_ret;
2313 
2314 	/*
2315 	 * Can we allocate the pages without the number of free pages falling
2316 	 * below the lower bound for the allocation class?
2317 	 */
2318 	vmd = VM_DOMAIN(domain);
2319 	if (!vm_domain_allocate(vmd, req, npages))
2320 		return (NULL);
2321 	/*
2322 	 * Try to allocate the pages from the free page queues.
2323 	 */
2324 	vm_domain_free_lock(vmd);
2325 	m_ret = vm_phys_alloc_contig(domain, npages, low, high,
2326 	    alignment, boundary);
2327 	vm_domain_free_unlock(vmd);
2328 	if (m_ret != NULL)
2329 		return (m_ret);
2330 #if VM_NRESERVLEVEL > 0
2331 	/*
2332 	 * Try to break a reservation to allocate the pages.
2333 	 */
2334 	if ((req & VM_ALLOC_NORECLAIM) == 0) {
2335 		m_ret = vm_reserv_reclaim_contig(domain, npages, low,
2336 	            high, alignment, boundary);
2337 		if (m_ret != NULL)
2338 			return (m_ret);
2339 	}
2340 #endif
2341 	vm_domain_freecnt_inc(vmd, npages);
2342 	return (NULL);
2343 }
2344 
2345 vm_page_t
vm_page_alloc_contig_domain(vm_object_t object,vm_pindex_t pindex,int domain,int req,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary,vm_memattr_t memattr)2346 vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain,
2347     int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
2348     vm_paddr_t boundary, vm_memattr_t memattr)
2349 {
2350 	struct pctrie_iter pages;
2351 	vm_page_t m, m_ret, mpred;
2352 	u_int busy_lock, flags, oflags;
2353 
2354 #define	VPAC_FLAGS	(VM_ALLOC_COMMON | VM_ALLOC_COUNT_MASK |	\
2355 			 VM_ALLOC_NOBUSY | VM_ALLOC_NORECLAIM |		\
2356 			 VM_ALLOC_SBUSY)
2357 	KASSERT((req & ~VPAC_FLAGS) == 0,
2358 	    ("invalid request %#x", req));
2359 	KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
2360 	    (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
2361 	    ("invalid request %#x", req));
2362 	VM_OBJECT_ASSERT_WLOCKED(object);
2363 	KASSERT((object->flags & OBJ_FICTITIOUS) == 0,
2364 	    ("vm_page_alloc_contig: object %p has fictitious pages",
2365 	    object));
2366 	KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
2367 
2368 	vm_page_iter_init(&pages, object);
2369 	m_ret = NULL;
2370 #if VM_NRESERVLEVEL > 0
2371 	/*
2372 	 * Can we allocate the pages from a reservation?
2373 	 */
2374 	if (vm_object_reserv(object)) {
2375 		m_ret = vm_reserv_alloc_contig(object, pindex, domain,
2376 		    req, npages, low, high, alignment, boundary, &pages);
2377 	}
2378 #endif
2379 	if (m_ret == NULL) {
2380 		m_ret = vm_page_find_contig_domain(domain, req, npages,
2381 		    low, high, alignment, boundary);
2382 	}
2383 	if (m_ret == NULL) {
2384 		(void)vm_domain_alloc_fail(VM_DOMAIN(domain), object, req);
2385 		return (NULL);
2386 	}
2387 
2388 	/*
2389 	 * Initialize the pages.  Only the PG_ZERO flag is inherited.
2390 	 */
2391 	flags = PG_ZERO;
2392 	if ((req & VM_ALLOC_NODUMP) != 0)
2393 		flags |= PG_NODUMP;
2394 	oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0;
2395 	if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
2396 		busy_lock = VPB_CURTHREAD_EXCLUSIVE;
2397 	else if ((req & VM_ALLOC_SBUSY) != 0)
2398 		busy_lock = VPB_SHARERS_WORD(1);
2399 	else
2400 		busy_lock = VPB_UNBUSIED;
2401 	if ((req & VM_ALLOC_WIRED) != 0)
2402 		vm_wire_add(npages);
2403 	if (object->memattr != VM_MEMATTR_DEFAULT &&
2404 	    memattr == VM_MEMATTR_DEFAULT)
2405 		memattr = object->memattr;
2406 	for (m = m_ret; m < &m_ret[npages]; m++) {
2407 		vm_page_dequeue(m);
2408 		vm_page_alloc_check(m);
2409 		m->a.flags = 0;
2410 		m->flags = (m->flags | PG_NODUMP) & flags;
2411 		m->busy_lock = busy_lock;
2412 		if ((req & VM_ALLOC_WIRED) != 0)
2413 			m->ref_count = 1;
2414 		m->a.act_count = 0;
2415 		m->oflags = oflags;
2416 		m->pool = VM_FREEPOOL_DEFAULT;
2417 		if (vm_page_iter_insert(m, object, pindex, &pages)) {
2418 			if ((req & VM_ALLOC_WIRED) != 0)
2419 				vm_wire_sub(npages);
2420 			KASSERT(m->object == NULL,
2421 			    ("page %p has object", m));
2422 			mpred = m;
2423 			for (m = m_ret; m < &m_ret[npages]; m++) {
2424 				if (m <= mpred &&
2425 				    (req & VM_ALLOC_WIRED) != 0)
2426 					m->ref_count = 0;
2427 				m->oflags = VPO_UNMANAGED;
2428 				m->busy_lock = VPB_UNBUSIED;
2429 				/* Don't change PG_ZERO. */
2430 				vm_page_free_toq(m);
2431 			}
2432 			if (req & VM_ALLOC_WAITFAIL) {
2433 				VM_OBJECT_WUNLOCK(object);
2434 				vm_radix_wait();
2435 				VM_OBJECT_WLOCK(object);
2436 			}
2437 			return (NULL);
2438 		}
2439 		if (memattr != VM_MEMATTR_DEFAULT)
2440 			pmap_page_set_memattr(m, memattr);
2441 		pindex++;
2442 	}
2443 	return (m_ret);
2444 }
2445 
2446 /*
2447  * Allocate a physical page that is not intended to be inserted into a VM
2448  * object.
2449  */
2450 vm_page_t
vm_page_alloc_noobj_domain(int domain,int req)2451 vm_page_alloc_noobj_domain(int domain, int req)
2452 {
2453 	struct vm_domain *vmd;
2454 	vm_page_t m;
2455 	int flags;
2456 
2457 #define	VPAN_FLAGS	(VM_ALLOC_COMMON | VM_ALLOC_COUNT_MASK |	\
2458 			 VM_ALLOC_NOFREE | VM_ALLOC_WAITOK)
2459 	KASSERT((req & ~VPAN_FLAGS) == 0,
2460 	    ("invalid request %#x", req));
2461 
2462 	flags = ((req & VM_ALLOC_NODUMP) != 0 ? PG_NODUMP : 0) |
2463 	    ((req & VM_ALLOC_NOFREE) != 0 ? PG_NOFREE : 0);
2464 	vmd = VM_DOMAIN(domain);
2465 again:
2466 	if (__predict_false((req & VM_ALLOC_NOFREE) != 0)) {
2467 		m = vm_page_alloc_nofree_domain(domain, req);
2468 		if (m != NULL)
2469 			goto found;
2470 	}
2471 
2472 	if (vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone != NULL) {
2473 		m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone,
2474 		    M_NOWAIT | M_NOVM);
2475 		if (m != NULL) {
2476 			flags |= PG_PCPU_CACHE;
2477 			goto found;
2478 		}
2479 	}
2480 
2481 	if (vm_domain_allocate(vmd, req, 1)) {
2482 		vm_domain_free_lock(vmd);
2483 		m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DIRECT, 0);
2484 		vm_domain_free_unlock(vmd);
2485 		if (m == NULL) {
2486 			vm_domain_freecnt_inc(vmd, 1);
2487 #if VM_NRESERVLEVEL > 0
2488 			if (vm_reserv_reclaim_inactive(domain))
2489 				goto again;
2490 #endif
2491 		}
2492 	}
2493 	if (m == NULL) {
2494 		if (!vm_domain_alloc_fail(vmd, NULL, req))
2495 			return (NULL);
2496 		goto again;
2497 	}
2498 
2499 found:
2500 	/*
2501 	 * If the page comes from the free page cache, then it might still
2502 	 * have a pending deferred dequeue.  Specifically, when the page is
2503 	 * imported from a different pool by vm_phys_alloc_npages(), the
2504 	 * second, third, etc. pages in a non-zero order set could have
2505 	 * pending deferred dequeues.
2506 	 */
2507 	vm_page_dequeue(m);
2508 	vm_page_alloc_check(m);
2509 
2510 	/*
2511 	 * Consumers should not rely on a useful default pindex value.
2512 	 */
2513 	m->pindex = 0xdeadc0dedeadc0de;
2514 	m->flags = (m->flags & PG_ZERO) | flags;
2515 	m->a.flags = 0;
2516 	m->oflags = VPO_UNMANAGED;
2517 	m->pool = VM_FREEPOOL_DIRECT;
2518 	m->busy_lock = VPB_UNBUSIED;
2519 	if ((req & VM_ALLOC_WIRED) != 0) {
2520 		vm_wire_add(1);
2521 		m->ref_count = 1;
2522 	}
2523 
2524 	if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0)
2525 		pmap_zero_page(m);
2526 
2527 	return (m);
2528 }
2529 
2530 #if VM_NRESERVLEVEL > 1
2531 #define	VM_NOFREE_IMPORT_ORDER	(VM_LEVEL_1_ORDER + VM_LEVEL_0_ORDER)
2532 #elif VM_NRESERVLEVEL > 0
2533 #define	VM_NOFREE_IMPORT_ORDER	VM_LEVEL_0_ORDER
2534 #else
2535 #define	VM_NOFREE_IMPORT_ORDER	8
2536 #endif
2537 
2538 /*
2539  * Allocate a single NOFREE page.
2540  *
2541  * This routine hands out NOFREE pages from higher-order
2542  * physical memory blocks in order to reduce memory fragmentation.
2543  * When a NOFREE for a given domain chunk is used up,
2544  * the routine will try to fetch a new one from the freelists
2545  * and discard the old one.
2546  */
2547 static vm_page_t __noinline
vm_page_alloc_nofree_domain(int domain,int req)2548 vm_page_alloc_nofree_domain(int domain, int req)
2549 {
2550 	vm_page_t m;
2551 	struct vm_domain *vmd;
2552 
2553 	KASSERT((req & VM_ALLOC_NOFREE) != 0, ("invalid request %#x", req));
2554 
2555 	vmd = VM_DOMAIN(domain);
2556 	vm_domain_free_lock(vmd);
2557 	if (TAILQ_EMPTY(&vmd->vmd_nofreeq)) {
2558 		int count;
2559 
2560 		count = 1 << VM_NOFREE_IMPORT_ORDER;
2561 		if (!vm_domain_allocate(vmd, req, count)) {
2562 			vm_domain_free_unlock(vmd);
2563 			return (NULL);
2564 		}
2565 		m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT,
2566 		    VM_NOFREE_IMPORT_ORDER);
2567 		if (m == NULL) {
2568 			vm_domain_freecnt_inc(vmd, count);
2569 			vm_domain_free_unlock(vmd);
2570 			return (NULL);
2571 		}
2572 		m->ref_count = count - 1;
2573 		TAILQ_INSERT_HEAD(&vmd->vmd_nofreeq, m, plinks.q);
2574 		atomic_add_long(&nofreeq_size, count);
2575 	}
2576 	m = TAILQ_FIRST(&vmd->vmd_nofreeq);
2577 	TAILQ_REMOVE(&vmd->vmd_nofreeq, m, plinks.q);
2578 	if (m->ref_count > 0) {
2579 		vm_page_t m_next;
2580 
2581 		m_next = &m[1];
2582 		vm_page_dequeue(m_next);
2583 		m_next->ref_count = m->ref_count - 1;
2584 		TAILQ_INSERT_HEAD(&vmd->vmd_nofreeq, m_next, plinks.q);
2585 		m->ref_count = 0;
2586 	}
2587 	vm_domain_free_unlock(vmd);
2588 	atomic_add_long(&nofreeq_size, -1);
2589 	VM_CNT_INC(v_nofree_count);
2590 
2591 	return (m);
2592 }
2593 
2594 /*
2595  * Though a NOFREE page by definition should not be freed, we support putting
2596  * them aside for future NOFREE allocations.  This enables code which allocates
2597  * NOFREE pages for some purpose but then encounters an error and releases
2598  * resources.
2599  */
2600 static void __noinline
vm_page_free_nofree(struct vm_domain * vmd,vm_page_t m)2601 vm_page_free_nofree(struct vm_domain *vmd, vm_page_t m)
2602 {
2603 	VM_CNT_ADD(v_nofree_count, -1);
2604 	atomic_add_long(&nofreeq_size, 1);
2605 	vm_domain_free_lock(vmd);
2606 	MPASS(m->ref_count == 0);
2607 	TAILQ_INSERT_HEAD(&vmd->vmd_nofreeq, m, plinks.q);
2608 	vm_domain_free_unlock(vmd);
2609 }
2610 
2611 vm_page_t
vm_page_alloc_noobj(int req)2612 vm_page_alloc_noobj(int req)
2613 {
2614 	struct vm_domainset_iter di;
2615 	vm_page_t m;
2616 	int domain;
2617 
2618 	if (vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req) != 0)
2619 		return (NULL);
2620 
2621 	do {
2622 		m = vm_page_alloc_noobj_domain(domain, req);
2623 		if (m != NULL)
2624 			break;
2625 	} while (vm_domainset_iter_page(&di, NULL, &domain, NULL) == 0);
2626 
2627 	return (m);
2628 }
2629 
2630 vm_page_t
vm_page_alloc_noobj_contig(int req,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary,vm_memattr_t memattr)2631 vm_page_alloc_noobj_contig(int req, u_long npages, vm_paddr_t low,
2632     vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
2633     vm_memattr_t memattr)
2634 {
2635 	struct vm_domainset_iter di;
2636 	vm_page_t m;
2637 	int domain;
2638 
2639 	if (vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req) != 0)
2640 		return (NULL);
2641 
2642 	do {
2643 		m = vm_page_alloc_noobj_contig_domain(domain, req, npages, low,
2644 		    high, alignment, boundary, memattr);
2645 		if (m != NULL)
2646 			break;
2647 	} while (vm_domainset_iter_page(&di, NULL, &domain, NULL) == 0);
2648 
2649 	return (m);
2650 }
2651 
2652 vm_page_t
vm_page_alloc_noobj_contig_domain(int domain,int req,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary,vm_memattr_t memattr)2653 vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages,
2654     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
2655     vm_memattr_t memattr)
2656 {
2657 	vm_page_t m, m_ret;
2658 	u_int flags;
2659 
2660 #define	VPANC_FLAGS	(VM_ALLOC_COMMON | VM_ALLOC_COUNT_MASK |	\
2661 			 VM_ALLOC_NORECLAIM | VM_ALLOC_WAITOK)
2662 	KASSERT((req & ~VPANC_FLAGS) == 0,
2663 	    ("invalid request %#x", req));
2664 	KASSERT((req & (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM)) !=
2665 	    (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM),
2666 	    ("invalid request %#x", req));
2667 	KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
2668 
2669 	while ((m_ret = vm_page_find_contig_domain(domain, req, npages,
2670 	    low, high, alignment, boundary)) == NULL) {
2671 		if (!vm_domain_alloc_fail(VM_DOMAIN(domain), NULL, req))
2672 			return (NULL);
2673 	}
2674 
2675 	/*
2676 	 * Initialize the pages.  Only the PG_ZERO flag is inherited.
2677 	 */
2678 	flags = PG_ZERO;
2679 	if ((req & VM_ALLOC_NODUMP) != 0)
2680 		flags |= PG_NODUMP;
2681 	if ((req & VM_ALLOC_WIRED) != 0)
2682 		vm_wire_add(npages);
2683 	for (m = m_ret; m < &m_ret[npages]; m++) {
2684 		vm_page_dequeue(m);
2685 		vm_page_alloc_check(m);
2686 
2687 		/*
2688 		 * Consumers should not rely on a useful default pindex value.
2689 		 */
2690 		m->pindex = 0xdeadc0dedeadc0de;
2691 		m->a.flags = 0;
2692 		m->flags = (m->flags | PG_NODUMP) & flags;
2693 		m->busy_lock = VPB_UNBUSIED;
2694 		if ((req & VM_ALLOC_WIRED) != 0)
2695 			m->ref_count = 1;
2696 		m->a.act_count = 0;
2697 		m->oflags = VPO_UNMANAGED;
2698 		m->pool = VM_FREEPOOL_DIRECT;
2699 
2700 		/*
2701 		 * Zero the page before updating any mappings since the page is
2702 		 * not yet shared with any devices which might require the
2703 		 * non-default memory attribute.  pmap_page_set_memattr()
2704 		 * flushes data caches before returning.
2705 		 */
2706 		if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0)
2707 			pmap_zero_page(m);
2708 		if (memattr != VM_MEMATTR_DEFAULT)
2709 			pmap_page_set_memattr(m, memattr);
2710 	}
2711 	return (m_ret);
2712 }
2713 
2714 /*
2715  * Check a page that has been freshly dequeued from a freelist.
2716  */
2717 static void
vm_page_alloc_check(vm_page_t m)2718 vm_page_alloc_check(vm_page_t m)
2719 {
2720 
2721 	KASSERT(m->object == NULL, ("page %p has object", m));
2722 	KASSERT(m->a.queue == PQ_NONE &&
2723 	    (m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
2724 	    ("page %p has unexpected queue %d, flags %#x",
2725 	    m, m->a.queue, (m->a.flags & PGA_QUEUE_STATE_MASK)));
2726 	KASSERT(m->ref_count == 0, ("page %p has references", m));
2727 	KASSERT(vm_page_busy_freed(m), ("page %p is not freed", m));
2728 	KASSERT(m->dirty == 0, ("page %p is dirty", m));
2729 	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
2730 	    ("page %p has unexpected memattr %d",
2731 	    m, pmap_page_get_memattr(m)));
2732 	KASSERT(vm_page_none_valid(m), ("free page %p is valid", m));
2733 	pmap_vm_page_alloc_check(m);
2734 }
2735 
2736 static int
vm_page_zone_import(void * arg,void ** store,int cnt,int domain,int flags)2737 vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags)
2738 {
2739 	struct vm_domain *vmd;
2740 	struct vm_pgcache *pgcache;
2741 	int i;
2742 
2743 	pgcache = arg;
2744 	vmd = VM_DOMAIN(pgcache->domain);
2745 
2746 	/*
2747 	 * The page daemon should avoid creating extra memory pressure since its
2748 	 * main purpose is to replenish the store of free pages.
2749 	 */
2750 	if (vmd->vmd_severeset || curproc == pageproc ||
2751 	    !_vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt))
2752 		return (0);
2753 	domain = vmd->vmd_domain;
2754 	vm_domain_free_lock(vmd);
2755 	i = vm_phys_alloc_npages(domain, pgcache->pool, cnt,
2756 	    (vm_page_t *)store);
2757 	vm_domain_free_unlock(vmd);
2758 	if (cnt != i)
2759 		vm_domain_freecnt_inc(vmd, cnt - i);
2760 
2761 	return (i);
2762 }
2763 
2764 static void
vm_page_zone_release(void * arg,void ** store,int cnt)2765 vm_page_zone_release(void *arg, void **store, int cnt)
2766 {
2767 	struct vm_domain *vmd;
2768 	struct vm_pgcache *pgcache;
2769 	vm_page_t m;
2770 	int i;
2771 
2772 	pgcache = arg;
2773 	vmd = VM_DOMAIN(pgcache->domain);
2774 	vm_domain_free_lock(vmd);
2775 	for (i = 0; i < cnt; i++) {
2776 		m = (vm_page_t)store[i];
2777 		vm_phys_free_pages(m, pgcache->pool, 0);
2778 	}
2779 	vm_domain_free_unlock(vmd);
2780 	vm_domain_freecnt_inc(vmd, cnt);
2781 }
2782 
2783 #define	VPSC_ANY	0	/* No restrictions. */
2784 #define	VPSC_NORESERV	1	/* Skip reservations; implies VPSC_NOSUPER. */
2785 #define	VPSC_NOSUPER	2	/* Skip superpages. */
2786 
2787 /*
2788  *	vm_page_scan_contig:
2789  *
2790  *	Scan vm_page_array[] between the specified entries "m_start" and
2791  *	"m_end" for a run of contiguous physical pages that satisfy the
2792  *	specified conditions, and return the lowest page in the run.  The
2793  *	specified "alignment" determines the alignment of the lowest physical
2794  *	page in the run.  If the specified "boundary" is non-zero, then the
2795  *	run of physical pages cannot span a physical address that is a
2796  *	multiple of "boundary".
2797  *
2798  *	"m_end" is never dereferenced, so it need not point to a vm_page
2799  *	structure within vm_page_array[].
2800  *
2801  *	"npages" must be greater than zero.  "m_start" and "m_end" must not
2802  *	span a hole (or discontiguity) in the physical address space.  Both
2803  *	"alignment" and "boundary" must be a power of two.
2804  */
2805 static vm_page_t
vm_page_scan_contig(u_long npages,vm_page_t m_start,vm_page_t m_end,u_long alignment,vm_paddr_t boundary,int options)2806 vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
2807     u_long alignment, vm_paddr_t boundary, int options)
2808 {
2809 	vm_object_t object;
2810 	vm_paddr_t pa;
2811 	vm_page_t m, m_run;
2812 #if VM_NRESERVLEVEL > 0
2813 	int level;
2814 #endif
2815 	int m_inc, order, run_ext, run_len;
2816 
2817 	KASSERT(npages > 0, ("npages is 0"));
2818 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
2819 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
2820 	m_run = NULL;
2821 	run_len = 0;
2822 	for (m = m_start; m < m_end && run_len < npages; m += m_inc) {
2823 		KASSERT((m->flags & PG_MARKER) == 0,
2824 		    ("page %p is PG_MARKER", m));
2825 		KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->ref_count >= 1,
2826 		    ("fictitious page %p has invalid ref count", m));
2827 
2828 		/*
2829 		 * If the current page would be the start of a run, check its
2830 		 * physical address against the end, alignment, and boundary
2831 		 * conditions.  If it doesn't satisfy these conditions, either
2832 		 * terminate the scan or advance to the next page that
2833 		 * satisfies the failed condition.
2834 		 */
2835 		if (run_len == 0) {
2836 			KASSERT(m_run == NULL, ("m_run != NULL"));
2837 			if (m + npages > m_end)
2838 				break;
2839 			pa = VM_PAGE_TO_PHYS(m);
2840 			if (!vm_addr_align_ok(pa, alignment)) {
2841 				m_inc = atop(roundup2(pa, alignment) - pa);
2842 				continue;
2843 			}
2844 			if (!vm_addr_bound_ok(pa, ptoa(npages), boundary)) {
2845 				m_inc = atop(roundup2(pa, boundary) - pa);
2846 				continue;
2847 			}
2848 		} else
2849 			KASSERT(m_run != NULL, ("m_run == NULL"));
2850 
2851 retry:
2852 		m_inc = 1;
2853 		if (vm_page_wired(m))
2854 			run_ext = 0;
2855 #if VM_NRESERVLEVEL > 0
2856 		else if ((level = vm_reserv_level(m)) >= 0 &&
2857 		    (options & VPSC_NORESERV) != 0) {
2858 			run_ext = 0;
2859 			/* Advance to the end of the reservation. */
2860 			pa = VM_PAGE_TO_PHYS(m);
2861 			m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) -
2862 			    pa);
2863 		}
2864 #endif
2865 		else if ((object = atomic_load_ptr(&m->object)) != NULL) {
2866 			/*
2867 			 * The page is considered eligible for relocation if
2868 			 * and only if it could be laundered or reclaimed by
2869 			 * the page daemon.
2870 			 */
2871 			VM_OBJECT_RLOCK(object);
2872 			if (object != m->object) {
2873 				VM_OBJECT_RUNLOCK(object);
2874 				goto retry;
2875 			}
2876 			/* Don't care: PG_NODUMP, PG_ZERO. */
2877 			if ((object->flags & OBJ_SWAP) == 0 &&
2878 			    object->type != OBJT_VNODE) {
2879 				run_ext = 0;
2880 #if VM_NRESERVLEVEL > 0
2881 			} else if ((options & VPSC_NOSUPER) != 0 &&
2882 			    (level = vm_reserv_level_iffullpop(m)) >= 0) {
2883 				run_ext = 0;
2884 				/* Advance to the end of the superpage. */
2885 				pa = VM_PAGE_TO_PHYS(m);
2886 				m_inc = atop(roundup2(pa + 1,
2887 				    vm_reserv_size(level)) - pa);
2888 #endif
2889 			} else if (object->memattr == VM_MEMATTR_DEFAULT &&
2890 			    vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) {
2891 				/*
2892 				 * The page is allocated but eligible for
2893 				 * relocation.  Extend the current run by one
2894 				 * page.
2895 				 */
2896 				KASSERT(pmap_page_get_memattr(m) ==
2897 				    VM_MEMATTR_DEFAULT,
2898 				    ("page %p has an unexpected memattr", m));
2899 				KASSERT((m->oflags & (VPO_SWAPINPROG |
2900 				    VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0,
2901 				    ("page %p has unexpected oflags", m));
2902 				/* Don't care: PGA_NOSYNC. */
2903 				run_ext = 1;
2904 			} else
2905 				run_ext = 0;
2906 			VM_OBJECT_RUNLOCK(object);
2907 #if VM_NRESERVLEVEL > 0
2908 		} else if (level >= 0) {
2909 			/*
2910 			 * The page is reserved but not yet allocated.  In
2911 			 * other words, it is still free.  Extend the current
2912 			 * run by one page.
2913 			 */
2914 			run_ext = 1;
2915 #endif
2916 		} else if ((order = m->order) < VM_NFREEORDER) {
2917 			/*
2918 			 * The page is enqueued in the physical memory
2919 			 * allocator's free page queues.  Moreover, it is the
2920 			 * first page in a power-of-two-sized run of
2921 			 * contiguous free pages.  Add these pages to the end
2922 			 * of the current run, and jump ahead.
2923 			 */
2924 			run_ext = 1 << order;
2925 			m_inc = 1 << order;
2926 		} else {
2927 			/*
2928 			 * Skip the page for one of the following reasons: (1)
2929 			 * It is enqueued in the physical memory allocator's
2930 			 * free page queues.  However, it is not the first
2931 			 * page in a run of contiguous free pages.  (This case
2932 			 * rarely occurs because the scan is performed in
2933 			 * ascending order.) (2) It is not reserved, and it is
2934 			 * transitioning from free to allocated.  (Conversely,
2935 			 * the transition from allocated to free for managed
2936 			 * pages is blocked by the page busy lock.) (3) It is
2937 			 * allocated but not contained by an object and not
2938 			 * wired, e.g., allocated by Xen's balloon driver.
2939 			 */
2940 			run_ext = 0;
2941 		}
2942 
2943 		/*
2944 		 * Extend or reset the current run of pages.
2945 		 */
2946 		if (run_ext > 0) {
2947 			if (run_len == 0)
2948 				m_run = m;
2949 			run_len += run_ext;
2950 		} else {
2951 			if (run_len > 0) {
2952 				m_run = NULL;
2953 				run_len = 0;
2954 			}
2955 		}
2956 	}
2957 	if (run_len >= npages)
2958 		return (m_run);
2959 	return (NULL);
2960 }
2961 
2962 /*
2963  *	vm_page_reclaim_run:
2964  *
2965  *	Try to relocate each of the allocated virtual pages within the
2966  *	specified run of physical pages to a new physical address.  Free the
2967  *	physical pages underlying the relocated virtual pages.  A virtual page
2968  *	is relocatable if and only if it could be laundered or reclaimed by
2969  *	the page daemon.  Whenever possible, a virtual page is relocated to a
2970  *	physical address above "high".
2971  *
2972  *	Returns 0 if every physical page within the run was already free or
2973  *	just freed by a successful relocation.  Otherwise, returns a non-zero
2974  *	value indicating why the last attempt to relocate a virtual page was
2975  *	unsuccessful.
2976  *
2977  *	"req_class" must be an allocation class.
2978  */
2979 static int
vm_page_reclaim_run(int req_class,int domain,u_long npages,vm_page_t m_run,vm_paddr_t high)2980 vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run,
2981     vm_paddr_t high)
2982 {
2983 	struct vm_domain *vmd;
2984 	struct spglist free;
2985 	vm_object_t object;
2986 	vm_paddr_t pa;
2987 	vm_page_t m, m_end, m_new;
2988 	int error, order, req;
2989 
2990 	KASSERT((req_class & VM_ALLOC_CLASS_MASK) == req_class,
2991 	    ("req_class is not an allocation class"));
2992 	SLIST_INIT(&free);
2993 	error = 0;
2994 	m = m_run;
2995 	m_end = m_run + npages;
2996 	for (; error == 0 && m < m_end; m++) {
2997 		KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0,
2998 		    ("page %p is PG_FICTITIOUS or PG_MARKER", m));
2999 
3000 		/*
3001 		 * Racily check for wirings.  Races are handled once the object
3002 		 * lock is held and the page is unmapped.
3003 		 */
3004 		if (vm_page_wired(m))
3005 			error = EBUSY;
3006 		else if ((object = atomic_load_ptr(&m->object)) != NULL) {
3007 			/*
3008 			 * The page is relocated if and only if it could be
3009 			 * laundered or reclaimed by the page daemon.
3010 			 */
3011 			VM_OBJECT_WLOCK(object);
3012 			/* Don't care: PG_NODUMP, PG_ZERO. */
3013 			if (m->object != object ||
3014 			    ((object->flags & OBJ_SWAP) == 0 &&
3015 			    object->type != OBJT_VNODE))
3016 				error = EINVAL;
3017 			else if (object->memattr != VM_MEMATTR_DEFAULT)
3018 				error = EINVAL;
3019 			else if (vm_page_queue(m) != PQ_NONE &&
3020 			    vm_page_tryxbusy(m) != 0) {
3021 				if (vm_page_wired(m)) {
3022 					vm_page_xunbusy(m);
3023 					error = EBUSY;
3024 					goto unlock;
3025 				}
3026 				KASSERT(pmap_page_get_memattr(m) ==
3027 				    VM_MEMATTR_DEFAULT,
3028 				    ("page %p has an unexpected memattr", m));
3029 				KASSERT(m->oflags == 0,
3030 				    ("page %p has unexpected oflags", m));
3031 				/* Don't care: PGA_NOSYNC. */
3032 				if (!vm_page_none_valid(m)) {
3033 					/*
3034 					 * First, try to allocate a new page
3035 					 * that is above "high".  Failing
3036 					 * that, try to allocate a new page
3037 					 * that is below "m_run".  Allocate
3038 					 * the new page between the end of
3039 					 * "m_run" and "high" only as a last
3040 					 * resort.
3041 					 */
3042 					req = req_class;
3043 					if ((m->flags & PG_NODUMP) != 0)
3044 						req |= VM_ALLOC_NODUMP;
3045 					if (trunc_page(high) !=
3046 					    ~(vm_paddr_t)PAGE_MASK) {
3047 						m_new =
3048 						    vm_page_alloc_noobj_contig(
3049 						    req, 1, round_page(high),
3050 						    ~(vm_paddr_t)0, PAGE_SIZE,
3051 						    0, VM_MEMATTR_DEFAULT);
3052 					} else
3053 						m_new = NULL;
3054 					if (m_new == NULL) {
3055 						pa = VM_PAGE_TO_PHYS(m_run);
3056 						m_new =
3057 						    vm_page_alloc_noobj_contig(
3058 						    req, 1, 0, pa - 1,
3059 						    PAGE_SIZE, 0,
3060 						    VM_MEMATTR_DEFAULT);
3061 					}
3062 					if (m_new == NULL) {
3063 						pa += ptoa(npages);
3064 						m_new =
3065 						    vm_page_alloc_noobj_contig(
3066 						    req, 1, pa, high, PAGE_SIZE,
3067 						    0, VM_MEMATTR_DEFAULT);
3068 					}
3069 					if (m_new == NULL) {
3070 						vm_page_xunbusy(m);
3071 						error = ENOMEM;
3072 						goto unlock;
3073 					}
3074 
3075 					/*
3076 					 * Unmap the page and check for new
3077 					 * wirings that may have been acquired
3078 					 * through a pmap lookup.
3079 					 */
3080 					if (object->ref_count != 0 &&
3081 					    !vm_page_try_remove_all(m)) {
3082 						vm_page_xunbusy(m);
3083 						vm_page_free(m_new);
3084 						error = EBUSY;
3085 						goto unlock;
3086 					}
3087 
3088 					/*
3089 					 * Replace "m" with the new page.  For
3090 					 * vm_page_replace(), "m" must be busy
3091 					 * and dequeued.  Finally, change "m"
3092 					 * as if vm_page_free() was called.
3093 					 */
3094 					m_new->a.flags = m->a.flags &
3095 					    ~PGA_QUEUE_STATE_MASK;
3096 					KASSERT(m_new->oflags == VPO_UNMANAGED,
3097 					    ("page %p is managed", m_new));
3098 					m_new->oflags = 0;
3099 					pmap_copy_page(m, m_new);
3100 					m_new->valid = m->valid;
3101 					m_new->dirty = m->dirty;
3102 					m->flags &= ~PG_ZERO;
3103 					vm_page_dequeue(m);
3104 					if (vm_page_replace_hold(m_new, object,
3105 					    m->pindex, m) &&
3106 					    vm_page_free_prep(m))
3107 						SLIST_INSERT_HEAD(&free, m,
3108 						    plinks.s.ss);
3109 
3110 					/*
3111 					 * The new page must be deactivated
3112 					 * before the object is unlocked.
3113 					 */
3114 					vm_page_deactivate(m_new);
3115 				} else {
3116 					m->flags &= ~PG_ZERO;
3117 					vm_page_dequeue(m);
3118 					if (vm_page_free_prep(m))
3119 						SLIST_INSERT_HEAD(&free, m,
3120 						    plinks.s.ss);
3121 					KASSERT(m->dirty == 0,
3122 					    ("page %p is dirty", m));
3123 				}
3124 			} else
3125 				error = EBUSY;
3126 unlock:
3127 			VM_OBJECT_WUNLOCK(object);
3128 		} else {
3129 			MPASS(vm_page_domain(m) == domain);
3130 			vmd = VM_DOMAIN(domain);
3131 			vm_domain_free_lock(vmd);
3132 			order = m->order;
3133 			if (order < VM_NFREEORDER) {
3134 				/*
3135 				 * The page is enqueued in the physical memory
3136 				 * allocator's free page queues.  Moreover, it
3137 				 * is the first page in a power-of-two-sized
3138 				 * run of contiguous free pages.  Jump ahead
3139 				 * to the last page within that run, and
3140 				 * continue from there.
3141 				 */
3142 				m += (1 << order) - 1;
3143 			}
3144 #if VM_NRESERVLEVEL > 0
3145 			else if (vm_reserv_is_page_free(m))
3146 				order = 0;
3147 #endif
3148 			vm_domain_free_unlock(vmd);
3149 			if (order == VM_NFREEORDER)
3150 				error = EINVAL;
3151 		}
3152 	}
3153 	if ((m = SLIST_FIRST(&free)) != NULL) {
3154 		int cnt;
3155 
3156 		vmd = VM_DOMAIN(domain);
3157 		cnt = 0;
3158 		vm_domain_free_lock(vmd);
3159 		do {
3160 			MPASS(vm_page_domain(m) == domain);
3161 			SLIST_REMOVE_HEAD(&free, plinks.s.ss);
3162 			vm_phys_free_pages(m, m->pool, 0);
3163 			cnt++;
3164 		} while ((m = SLIST_FIRST(&free)) != NULL);
3165 		vm_domain_free_unlock(vmd);
3166 		vm_domain_freecnt_inc(vmd, cnt);
3167 	}
3168 	return (error);
3169 }
3170 
3171 #define	NRUNS	16
3172 
3173 #define	RUN_INDEX(count, nruns)	((count) % (nruns))
3174 
3175 #define	MIN_RECLAIM	8
3176 
3177 /*
3178  *	vm_page_reclaim_contig:
3179  *
3180  *	Reclaim allocated, contiguous physical memory satisfying the specified
3181  *	conditions by relocating the virtual pages using that physical memory.
3182  *	Returns 0 if reclamation is successful, ERANGE if the specified domain
3183  *	can't possibly satisfy the reclamation request, or ENOMEM if not
3184  *	currently able to reclaim the requested number of pages.  Since
3185  *	relocation requires the allocation of physical pages, reclamation may
3186  *	fail with ENOMEM due to a shortage of free pages.  When reclamation
3187  *	fails in this manner, callers are expected to perform vm_wait() before
3188  *	retrying a failed allocation operation, e.g., vm_page_alloc_contig().
3189  *
3190  *	The caller must always specify an allocation class through "req".
3191  *
3192  *	allocation classes:
3193  *	VM_ALLOC_NORMAL		normal process request
3194  *	VM_ALLOC_SYSTEM		system *really* needs a page
3195  *	VM_ALLOC_INTERRUPT	interrupt time request
3196  *
3197  *	The optional allocation flags are ignored.
3198  *
3199  *	"npages" must be greater than zero.  Both "alignment" and "boundary"
3200  *	must be a power of two.
3201  */
3202 int
vm_page_reclaim_contig_domain_ext(int domain,int req,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary,int desired_runs)3203 vm_page_reclaim_contig_domain_ext(int domain, int req, u_long npages,
3204     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
3205     int desired_runs)
3206 {
3207 	struct vm_domain *vmd;
3208 	vm_page_t bounds[2], m_run, _m_runs[NRUNS], *m_runs;
3209 	u_long count, minalign, reclaimed;
3210 	int error, i, min_reclaim, nruns, options, req_class;
3211 	int segind, start_segind;
3212 	int ret;
3213 
3214 	KASSERT(npages > 0, ("npages is 0"));
3215 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
3216 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
3217 
3218 	ret = ENOMEM;
3219 
3220 	/*
3221 	 * If the caller wants to reclaim multiple runs, try to allocate
3222 	 * space to store the runs.  If that fails, fall back to the old
3223 	 * behavior of just reclaiming MIN_RECLAIM pages.
3224 	 */
3225 	if (desired_runs > 1)
3226 		m_runs = malloc((NRUNS + desired_runs) * sizeof(*m_runs),
3227 		    M_TEMP, M_NOWAIT);
3228 	else
3229 		m_runs = NULL;
3230 
3231 	if (m_runs == NULL) {
3232 		m_runs = _m_runs;
3233 		nruns = NRUNS;
3234 	} else {
3235 		nruns = NRUNS + desired_runs - 1;
3236 	}
3237 	min_reclaim = MAX(desired_runs * npages, MIN_RECLAIM);
3238 
3239 	/*
3240 	 * The caller will attempt an allocation after some runs have been
3241 	 * reclaimed and added to the vm_phys buddy lists.  Due to limitations
3242 	 * of vm_phys_alloc_contig(), round up the requested length to the next
3243 	 * power of two or maximum chunk size, and ensure that each run is
3244 	 * suitably aligned.
3245 	 */
3246 	minalign = 1ul << imin(flsl(npages - 1), VM_NFREEORDER - 1);
3247 	npages = roundup2(npages, minalign);
3248 	if (alignment < ptoa(minalign))
3249 		alignment = ptoa(minalign);
3250 
3251 	/*
3252 	 * The page daemon is allowed to dig deeper into the free page list.
3253 	 */
3254 	req_class = req & VM_ALLOC_CLASS_MASK;
3255 	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
3256 		req_class = VM_ALLOC_SYSTEM;
3257 
3258 	start_segind = vm_phys_lookup_segind(low);
3259 
3260 	/*
3261 	 * Return if the number of free pages cannot satisfy the requested
3262 	 * allocation.
3263 	 */
3264 	vmd = VM_DOMAIN(domain);
3265 	count = vmd->vmd_free_count;
3266 	if (count < npages + vmd->vmd_free_reserved || (count < npages +
3267 	    vmd->vmd_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) ||
3268 	    (count < npages && req_class == VM_ALLOC_INTERRUPT))
3269 		goto done;
3270 
3271 	/*
3272 	 * Scan up to three times, relaxing the restrictions ("options") on
3273 	 * the reclamation of reservations and superpages each time.
3274 	 */
3275 	for (options = VPSC_NORESERV;;) {
3276 		bool phys_range_exists = false;
3277 
3278 		/*
3279 		 * Find the highest runs that satisfy the given constraints
3280 		 * and restrictions, and record them in "m_runs".
3281 		 */
3282 		count = 0;
3283 		segind = start_segind;
3284 		while ((segind = vm_phys_find_range(bounds, segind, domain,
3285 		    npages, low, high)) != -1) {
3286 			phys_range_exists = true;
3287 			while ((m_run = vm_page_scan_contig(npages, bounds[0],
3288 			    bounds[1], alignment, boundary, options))) {
3289 				bounds[0] = m_run + npages;
3290 				m_runs[RUN_INDEX(count, nruns)] = m_run;
3291 				count++;
3292 			}
3293 			segind++;
3294 		}
3295 
3296 		if (!phys_range_exists) {
3297 			ret = ERANGE;
3298 			goto done;
3299 		}
3300 
3301 		/*
3302 		 * Reclaim the highest runs in LIFO (descending) order until
3303 		 * the number of reclaimed pages, "reclaimed", is at least
3304 		 * "min_reclaim".  Reset "reclaimed" each time because each
3305 		 * reclamation is idempotent, and runs will (likely) recur
3306 		 * from one scan to the next as restrictions are relaxed.
3307 		 */
3308 		reclaimed = 0;
3309 		for (i = 0; count > 0 && i < nruns; i++) {
3310 			count--;
3311 			m_run = m_runs[RUN_INDEX(count, nruns)];
3312 			error = vm_page_reclaim_run(req_class, domain, npages,
3313 			    m_run, high);
3314 			if (error == 0) {
3315 				reclaimed += npages;
3316 				if (reclaimed >= min_reclaim) {
3317 					ret = 0;
3318 					goto done;
3319 				}
3320 			}
3321 		}
3322 
3323 		/*
3324 		 * Either relax the restrictions on the next scan or return if
3325 		 * the last scan had no restrictions.
3326 		 */
3327 		if (options == VPSC_NORESERV)
3328 			options = VPSC_NOSUPER;
3329 		else if (options == VPSC_NOSUPER)
3330 			options = VPSC_ANY;
3331 		else if (options == VPSC_ANY) {
3332 			if (reclaimed != 0)
3333 				ret = 0;
3334 			goto done;
3335 		}
3336 	}
3337 done:
3338 	if (m_runs != _m_runs)
3339 		free(m_runs, M_TEMP);
3340 	return (ret);
3341 }
3342 
3343 int
vm_page_reclaim_contig_domain(int domain,int req,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary)3344 vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
3345     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
3346 {
3347 	return (vm_page_reclaim_contig_domain_ext(domain, req, npages, low,
3348 	    high, alignment, boundary, 1));
3349 }
3350 
3351 int
vm_page_reclaim_contig(int req,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary)3352 vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high,
3353     u_long alignment, vm_paddr_t boundary)
3354 {
3355 	struct vm_domainset_iter di;
3356 	int domain, ret, status;
3357 
3358 	ret = ERANGE;
3359 
3360 	if (vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req) != 0)
3361 		return (ret);
3362 
3363 	do {
3364 		status = vm_page_reclaim_contig_domain(domain, req, npages, low,
3365 		    high, alignment, boundary);
3366 		if (status == 0)
3367 			return (0);
3368 		else if (status == ERANGE)
3369 			vm_domainset_iter_ignore(&di, domain);
3370 		else {
3371 			KASSERT(status == ENOMEM, ("Unrecognized error %d "
3372 			    "from vm_page_reclaim_contig_domain()", status));
3373 			ret = ENOMEM;
3374 		}
3375 	} while (vm_domainset_iter_page(&di, NULL, &domain, NULL) == 0);
3376 
3377 	return (ret);
3378 }
3379 
3380 /*
3381  * Set the domain in the appropriate page level domainset.
3382  */
3383 void
vm_domain_set(struct vm_domain * vmd)3384 vm_domain_set(struct vm_domain *vmd)
3385 {
3386 
3387 	mtx_lock(&vm_domainset_lock);
3388 	if (!vmd->vmd_minset && vm_paging_min(vmd)) {
3389 		vmd->vmd_minset = 1;
3390 		DOMAINSET_SET(vmd->vmd_domain, &vm_min_domains);
3391 	}
3392 	if (!vmd->vmd_severeset && vm_paging_severe(vmd)) {
3393 		vmd->vmd_severeset = 1;
3394 		DOMAINSET_SET(vmd->vmd_domain, &vm_severe_domains);
3395 	}
3396 	mtx_unlock(&vm_domainset_lock);
3397 }
3398 
3399 /*
3400  * Clear the domain from the appropriate page level domainset.
3401  */
3402 void
vm_domain_clear(struct vm_domain * vmd)3403 vm_domain_clear(struct vm_domain *vmd)
3404 {
3405 
3406 	mtx_lock(&vm_domainset_lock);
3407 	if (vmd->vmd_minset && !vm_paging_min(vmd)) {
3408 		vmd->vmd_minset = 0;
3409 		DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains);
3410 		if (vm_min_waiters != 0) {
3411 			vm_min_waiters = 0;
3412 			wakeup(&vm_min_domains);
3413 		}
3414 	}
3415 	if (vmd->vmd_severeset && !vm_paging_severe(vmd)) {
3416 		vmd->vmd_severeset = 0;
3417 		DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains);
3418 		if (vm_severe_waiters != 0) {
3419 			vm_severe_waiters = 0;
3420 			wakeup(&vm_severe_domains);
3421 		}
3422 	}
3423 
3424 	/*
3425 	 * If pageout daemon needs pages, then tell it that there are
3426 	 * some free.
3427 	 */
3428 	if (vmd->vmd_pageout_pages_needed &&
3429 	    vmd->vmd_free_count >= vmd->vmd_pageout_free_min) {
3430 		wakeup(&vmd->vmd_pageout_pages_needed);
3431 		vmd->vmd_pageout_pages_needed = 0;
3432 	}
3433 
3434 	/* See comments in vm_wait_doms(). */
3435 	if (vm_pageproc_waiters) {
3436 		vm_pageproc_waiters = 0;
3437 		wakeup(&vm_pageproc_waiters);
3438 	}
3439 	mtx_unlock(&vm_domainset_lock);
3440 }
3441 
3442 /*
3443  * Wait for free pages to exceed the min threshold globally.
3444  */
3445 void
vm_wait_min(void)3446 vm_wait_min(void)
3447 {
3448 
3449 	mtx_lock(&vm_domainset_lock);
3450 	while (vm_page_count_min()) {
3451 		vm_min_waiters++;
3452 		msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0);
3453 	}
3454 	mtx_unlock(&vm_domainset_lock);
3455 }
3456 
3457 /*
3458  * Wait for free pages to exceed the severe threshold globally.
3459  */
3460 void
vm_wait_severe(void)3461 vm_wait_severe(void)
3462 {
3463 
3464 	mtx_lock(&vm_domainset_lock);
3465 	while (vm_page_count_severe()) {
3466 		vm_severe_waiters++;
3467 		msleep(&vm_severe_domains, &vm_domainset_lock, PVM,
3468 		    "vmwait", 0);
3469 	}
3470 	mtx_unlock(&vm_domainset_lock);
3471 }
3472 
3473 u_int
vm_wait_count(void)3474 vm_wait_count(void)
3475 {
3476 
3477 	return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters);
3478 }
3479 
3480 int
vm_wait_doms(const domainset_t * wdoms,int mflags)3481 vm_wait_doms(const domainset_t *wdoms, int mflags)
3482 {
3483 	int error;
3484 
3485 	error = 0;
3486 
3487 	/*
3488 	 * We use racey wakeup synchronization to avoid expensive global
3489 	 * locking for the pageproc when sleeping with a non-specific vm_wait.
3490 	 * To handle this, we only sleep for one tick in this instance.  It
3491 	 * is expected that most allocations for the pageproc will come from
3492 	 * kmem or vm_page_grab* which will use the more specific and
3493 	 * race-free vm_wait_domain().
3494 	 */
3495 	if (curproc == pageproc) {
3496 		mtx_lock(&vm_domainset_lock);
3497 		vm_pageproc_waiters++;
3498 		error = msleep(&vm_pageproc_waiters, &vm_domainset_lock,
3499 		    PVM | PDROP | mflags, "pageprocwait", 1);
3500 	} else {
3501 		/*
3502 		 * XXX Ideally we would wait only until the allocation could
3503 		 * be satisfied.  This condition can cause new allocators to
3504 		 * consume all freed pages while old allocators wait.
3505 		 */
3506 		mtx_lock(&vm_domainset_lock);
3507 		if (vm_page_count_min_set(wdoms)) {
3508 			if (pageproc == NULL)
3509 				panic("vm_wait in early boot");
3510 			vm_min_waiters++;
3511 			error = msleep(&vm_min_domains, &vm_domainset_lock,
3512 			    PVM | PDROP | mflags, "vmwait", 0);
3513 		} else
3514 			mtx_unlock(&vm_domainset_lock);
3515 	}
3516 	return (error);
3517 }
3518 
3519 /*
3520  *	vm_wait_domain:
3521  *
3522  *	Sleep until free pages are available for allocation.
3523  *	- Called in various places after failed memory allocations.
3524  */
3525 void
vm_wait_domain(int domain)3526 vm_wait_domain(int domain)
3527 {
3528 	struct vm_domain *vmd;
3529 	domainset_t wdom;
3530 
3531 	vmd = VM_DOMAIN(domain);
3532 	vm_domain_free_assert_unlocked(vmd);
3533 
3534 	if (curproc == pageproc) {
3535 		mtx_lock(&vm_domainset_lock);
3536 		if (vmd->vmd_free_count < vmd->vmd_pageout_free_min) {
3537 			vmd->vmd_pageout_pages_needed = 1;
3538 			msleep(&vmd->vmd_pageout_pages_needed,
3539 			    &vm_domainset_lock, PDROP | PSWP, "VMWait", 0);
3540 		} else
3541 			mtx_unlock(&vm_domainset_lock);
3542 	} else {
3543 		DOMAINSET_ZERO(&wdom);
3544 		DOMAINSET_SET(vmd->vmd_domain, &wdom);
3545 		vm_wait_doms(&wdom, 0);
3546 	}
3547 }
3548 
3549 static int
vm_wait_flags(vm_object_t obj,int mflags)3550 vm_wait_flags(vm_object_t obj, int mflags)
3551 {
3552 	struct domainset *d;
3553 
3554 	d = NULL;
3555 
3556 	/*
3557 	 * Carefully fetch pointers only once: the struct domainset
3558 	 * itself is ummutable but the pointer might change.
3559 	 */
3560 	if (obj != NULL)
3561 		d = obj->domain.dr_policy;
3562 	if (d == NULL)
3563 		d = curthread->td_domain.dr_policy;
3564 
3565 	return (vm_wait_doms(&d->ds_mask, mflags));
3566 }
3567 
3568 /*
3569  *	vm_wait:
3570  *
3571  *	Sleep until free pages are available for allocation in the
3572  *	affinity domains of the obj.  If obj is NULL, the domain set
3573  *	for the calling thread is used.
3574  *	Called in various places after failed memory allocations.
3575  */
3576 void
vm_wait(vm_object_t obj)3577 vm_wait(vm_object_t obj)
3578 {
3579 	(void)vm_wait_flags(obj, 0);
3580 }
3581 
3582 int
vm_wait_intr(vm_object_t obj)3583 vm_wait_intr(vm_object_t obj)
3584 {
3585 	return (vm_wait_flags(obj, PCATCH));
3586 }
3587 
3588 /*
3589  *	vm_domain_alloc_fail:
3590  *
3591  *	Called when a page allocation function fails.  Informs the
3592  *	pagedaemon and performs the requested wait.  Requires the
3593  *	domain_free and object lock on entry.  Returns with the
3594  *	object lock held and free lock released.  Returns an error when
3595  *	retry is necessary.
3596  *
3597  */
3598 static int
vm_domain_alloc_fail(struct vm_domain * vmd,vm_object_t object,int req)3599 vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req)
3600 {
3601 
3602 	vm_domain_free_assert_unlocked(vmd);
3603 
3604 	atomic_add_int(&vmd->vmd_pageout_deficit,
3605 	    max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
3606 	if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) {
3607 		if (object != NULL)
3608 			VM_OBJECT_WUNLOCK(object);
3609 		vm_wait_domain(vmd->vmd_domain);
3610 		if (object != NULL)
3611 			VM_OBJECT_WLOCK(object);
3612 		if (req & VM_ALLOC_WAITOK)
3613 			return (EAGAIN);
3614 	}
3615 
3616 	return (0);
3617 }
3618 
3619 /*
3620  *	vm_waitpfault:
3621  *
3622  *	Sleep until free pages are available for allocation.
3623  *	- Called only in vm_fault so that processes page faulting
3624  *	  can be easily tracked.
3625  *	- Sleeps at a lower priority than vm_wait() so that vm_wait()ing
3626  *	  processes will be able to grab memory first.  Do not change
3627  *	  this balance without careful testing first.
3628  */
3629 void
vm_waitpfault(struct domainset * dset,int timo)3630 vm_waitpfault(struct domainset *dset, int timo)
3631 {
3632 
3633 	/*
3634 	 * XXX Ideally we would wait only until the allocation could
3635 	 * be satisfied.  This condition can cause new allocators to
3636 	 * consume all freed pages while old allocators wait.
3637 	 */
3638 	mtx_lock(&vm_domainset_lock);
3639 	if (vm_page_count_min_set(&dset->ds_mask)) {
3640 		vm_min_waiters++;
3641 		msleep(&vm_min_domains, &vm_domainset_lock, PUSER | PDROP,
3642 		    "pfault", timo);
3643 	} else
3644 		mtx_unlock(&vm_domainset_lock);
3645 }
3646 
3647 static struct vm_pagequeue *
_vm_page_pagequeue(vm_page_t m,uint8_t queue)3648 _vm_page_pagequeue(vm_page_t m, uint8_t queue)
3649 {
3650 
3651 	return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]);
3652 }
3653 
3654 #ifdef INVARIANTS
3655 static struct vm_pagequeue *
vm_page_pagequeue(vm_page_t m)3656 vm_page_pagequeue(vm_page_t m)
3657 {
3658 
3659 	return (_vm_page_pagequeue(m, vm_page_astate_load(m).queue));
3660 }
3661 #endif
3662 
3663 static __always_inline bool
vm_page_pqstate_fcmpset(vm_page_t m,vm_page_astate_t * old,vm_page_astate_t new)3664 vm_page_pqstate_fcmpset(vm_page_t m, vm_page_astate_t *old,
3665     vm_page_astate_t new)
3666 {
3667 	vm_page_astate_t tmp;
3668 
3669 	tmp = *old;
3670 	do {
3671 		if (__predict_true(vm_page_astate_fcmpset(m, old, new)))
3672 			return (true);
3673 		counter_u64_add(pqstate_commit_retries, 1);
3674 	} while (old->_bits == tmp._bits);
3675 
3676 	return (false);
3677 }
3678 
3679 /*
3680  * Do the work of committing a queue state update that moves the page out of
3681  * its current queue.
3682  */
3683 static bool
_vm_page_pqstate_commit_dequeue(struct vm_pagequeue * pq,vm_page_t m,vm_page_astate_t * old,vm_page_astate_t new)3684 _vm_page_pqstate_commit_dequeue(struct vm_pagequeue *pq, vm_page_t m,
3685     vm_page_astate_t *old, vm_page_astate_t new)
3686 {
3687 	vm_page_t next;
3688 
3689 	vm_pagequeue_assert_locked(pq);
3690 	KASSERT(vm_page_pagequeue(m) == pq,
3691 	    ("%s: queue %p does not match page %p", __func__, pq, m));
3692 	KASSERT(old->queue != PQ_NONE && new.queue != old->queue,
3693 	    ("%s: invalid queue indices %d %d",
3694 	    __func__, old->queue, new.queue));
3695 
3696 	/*
3697 	 * Once the queue index of the page changes there is nothing
3698 	 * synchronizing with further updates to the page's physical
3699 	 * queue state.  Therefore we must speculatively remove the page
3700 	 * from the queue now and be prepared to roll back if the queue
3701 	 * state update fails.  If the page is not physically enqueued then
3702 	 * we just update its queue index.
3703 	 */
3704 	if ((old->flags & PGA_ENQUEUED) != 0) {
3705 		new.flags &= ~PGA_ENQUEUED;
3706 		next = TAILQ_NEXT(m, plinks.q);
3707 		TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
3708 		vm_pagequeue_cnt_dec(pq);
3709 		if (!vm_page_pqstate_fcmpset(m, old, new)) {
3710 			if (next == NULL)
3711 				TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
3712 			else
3713 				TAILQ_INSERT_BEFORE(next, m, plinks.q);
3714 			vm_pagequeue_cnt_inc(pq);
3715 			return (false);
3716 		} else {
3717 			return (true);
3718 		}
3719 	} else {
3720 		return (vm_page_pqstate_fcmpset(m, old, new));
3721 	}
3722 }
3723 
3724 static bool
vm_page_pqstate_commit_dequeue(vm_page_t m,vm_page_astate_t * old,vm_page_astate_t new)3725 vm_page_pqstate_commit_dequeue(vm_page_t m, vm_page_astate_t *old,
3726     vm_page_astate_t new)
3727 {
3728 	struct vm_pagequeue *pq;
3729 	vm_page_astate_t as;
3730 	bool ret;
3731 
3732 	pq = _vm_page_pagequeue(m, old->queue);
3733 
3734 	/*
3735 	 * The queue field and PGA_ENQUEUED flag are stable only so long as the
3736 	 * corresponding page queue lock is held.
3737 	 */
3738 	vm_pagequeue_lock(pq);
3739 	as = vm_page_astate_load(m);
3740 	if (__predict_false(as._bits != old->_bits)) {
3741 		*old = as;
3742 		ret = false;
3743 	} else {
3744 		ret = _vm_page_pqstate_commit_dequeue(pq, m, old, new);
3745 	}
3746 	vm_pagequeue_unlock(pq);
3747 	return (ret);
3748 }
3749 
3750 /*
3751  * Commit a queue state update that enqueues or requeues a page.
3752  */
3753 static bool
_vm_page_pqstate_commit_requeue(struct vm_pagequeue * pq,vm_page_t m,vm_page_astate_t * old,vm_page_astate_t new)3754 _vm_page_pqstate_commit_requeue(struct vm_pagequeue *pq, vm_page_t m,
3755     vm_page_astate_t *old, vm_page_astate_t new)
3756 {
3757 	struct vm_domain *vmd;
3758 
3759 	vm_pagequeue_assert_locked(pq);
3760 	KASSERT(old->queue != PQ_NONE && new.queue == old->queue,
3761 	    ("%s: invalid queue indices %d %d",
3762 	    __func__, old->queue, new.queue));
3763 
3764 	new.flags |= PGA_ENQUEUED;
3765 	if (!vm_page_pqstate_fcmpset(m, old, new))
3766 		return (false);
3767 
3768 	if ((old->flags & PGA_ENQUEUED) != 0)
3769 		TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
3770 	else
3771 		vm_pagequeue_cnt_inc(pq);
3772 
3773 	/*
3774 	 * Give PGA_REQUEUE_HEAD precedence over PGA_REQUEUE.  In particular, if
3775 	 * both flags are set in close succession, only PGA_REQUEUE_HEAD will be
3776 	 * applied, even if it was set first.
3777 	 */
3778 	if ((old->flags & PGA_REQUEUE_HEAD) != 0) {
3779 		vmd = vm_pagequeue_domain(m);
3780 		KASSERT(pq == &vmd->vmd_pagequeues[PQ_INACTIVE],
3781 		    ("%s: invalid page queue for page %p", __func__, m));
3782 		TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
3783 	} else {
3784 		TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
3785 	}
3786 	return (true);
3787 }
3788 
3789 /*
3790  * Commit a queue state update that encodes a request for a deferred queue
3791  * operation.
3792  */
3793 static bool
vm_page_pqstate_commit_request(vm_page_t m,vm_page_astate_t * old,vm_page_astate_t new)3794 vm_page_pqstate_commit_request(vm_page_t m, vm_page_astate_t *old,
3795     vm_page_astate_t new)
3796 {
3797 
3798 	KASSERT(old->queue == new.queue || new.queue != PQ_NONE,
3799 	    ("%s: invalid state, queue %d flags %x",
3800 	    __func__, new.queue, new.flags));
3801 
3802 	if (old->_bits != new._bits &&
3803 	    !vm_page_pqstate_fcmpset(m, old, new))
3804 		return (false);
3805 	vm_page_pqbatch_submit(m, new.queue);
3806 	return (true);
3807 }
3808 
3809 /*
3810  * A generic queue state update function.  This handles more cases than the
3811  * specialized functions above.
3812  */
3813 bool
vm_page_pqstate_commit(vm_page_t m,vm_page_astate_t * old,vm_page_astate_t new)3814 vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
3815 {
3816 
3817 	if (old->_bits == new._bits)
3818 		return (true);
3819 
3820 	if (old->queue != PQ_NONE && new.queue != old->queue) {
3821 		if (!vm_page_pqstate_commit_dequeue(m, old, new))
3822 			return (false);
3823 		if (new.queue != PQ_NONE)
3824 			vm_page_pqbatch_submit(m, new.queue);
3825 	} else {
3826 		if (!vm_page_pqstate_fcmpset(m, old, new))
3827 			return (false);
3828 		if (new.queue != PQ_NONE &&
3829 		    ((new.flags & ~old->flags) & PGA_QUEUE_OP_MASK) != 0)
3830 			vm_page_pqbatch_submit(m, new.queue);
3831 	}
3832 	return (true);
3833 }
3834 
3835 /*
3836  * Apply deferred queue state updates to a page.
3837  */
3838 static inline void
vm_pqbatch_process_page(struct vm_pagequeue * pq,vm_page_t m,uint8_t queue)3839 vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m, uint8_t queue)
3840 {
3841 	vm_page_astate_t new, old;
3842 
3843 	CRITICAL_ASSERT(curthread);
3844 	vm_pagequeue_assert_locked(pq);
3845 	KASSERT(queue < PQ_COUNT,
3846 	    ("%s: invalid queue index %d", __func__, queue));
3847 	KASSERT(pq == _vm_page_pagequeue(m, queue),
3848 	    ("%s: page %p does not belong to queue %p", __func__, m, pq));
3849 
3850 	for (old = vm_page_astate_load(m);;) {
3851 		if (__predict_false(old.queue != queue ||
3852 		    (old.flags & PGA_QUEUE_OP_MASK) == 0)) {
3853 			counter_u64_add(queue_nops, 1);
3854 			break;
3855 		}
3856 		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3857 		    ("%s: page %p is unmanaged", __func__, m));
3858 
3859 		new = old;
3860 		if ((old.flags & PGA_DEQUEUE) != 0) {
3861 			new.flags &= ~PGA_QUEUE_OP_MASK;
3862 			new.queue = PQ_NONE;
3863 			if (__predict_true(_vm_page_pqstate_commit_dequeue(pq,
3864 			    m, &old, new))) {
3865 				counter_u64_add(queue_ops, 1);
3866 				break;
3867 			}
3868 		} else {
3869 			new.flags &= ~(PGA_REQUEUE | PGA_REQUEUE_HEAD);
3870 			if (__predict_true(_vm_page_pqstate_commit_requeue(pq,
3871 			    m, &old, new))) {
3872 				counter_u64_add(queue_ops, 1);
3873 				break;
3874 			}
3875 		}
3876 	}
3877 }
3878 
3879 static void
vm_pqbatch_process(struct vm_pagequeue * pq,struct vm_batchqueue * bq,uint8_t queue)3880 vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq,
3881     uint8_t queue)
3882 {
3883 	int i;
3884 
3885 	for (i = 0; i < bq->bq_cnt; i++)
3886 		vm_pqbatch_process_page(pq, bq->bq_pa[i], queue);
3887 	vm_batchqueue_init(bq);
3888 }
3889 
3890 /*
3891  *	vm_page_pqbatch_submit:		[ internal use only ]
3892  *
3893  *	Enqueue a page in the specified page queue's batched work queue.
3894  *	The caller must have encoded the requested operation in the page
3895  *	structure's a.flags field.
3896  */
3897 void
vm_page_pqbatch_submit(vm_page_t m,uint8_t queue)3898 vm_page_pqbatch_submit(vm_page_t m, uint8_t queue)
3899 {
3900 	struct vm_batchqueue *bq;
3901 	struct vm_pagequeue *pq;
3902 	int domain, slots_remaining;
3903 
3904 	KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue));
3905 
3906 	domain = vm_page_domain(m);
3907 	critical_enter();
3908 	bq = DPCPU_PTR(pqbatch[domain][queue]);
3909 	slots_remaining = vm_batchqueue_insert(bq, m);
3910 	if (slots_remaining > (VM_BATCHQUEUE_SIZE >> 1)) {
3911 		/* keep building the bq */
3912 		critical_exit();
3913 		return;
3914 	} else if (slots_remaining > 0 ) {
3915 		/* Try to process the bq if we can get the lock */
3916 		pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue];
3917 		if (vm_pagequeue_trylock(pq)) {
3918 			vm_pqbatch_process(pq, bq, queue);
3919 			vm_pagequeue_unlock(pq);
3920 		}
3921 		critical_exit();
3922 		return;
3923 	}
3924 	critical_exit();
3925 
3926 	/* if we make it here, the bq is full so wait for the lock */
3927 
3928 	pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue];
3929 	vm_pagequeue_lock(pq);
3930 	critical_enter();
3931 	bq = DPCPU_PTR(pqbatch[domain][queue]);
3932 	vm_pqbatch_process(pq, bq, queue);
3933 	vm_pqbatch_process_page(pq, m, queue);
3934 	vm_pagequeue_unlock(pq);
3935 	critical_exit();
3936 }
3937 
3938 /*
3939  *	vm_page_pqbatch_drain:		[ internal use only ]
3940  *
3941  *	Force all per-CPU page queue batch queues to be drained.  This is
3942  *	intended for use in severe memory shortages, to ensure that pages
3943  *	do not remain stuck in the batch queues.
3944  */
3945 void
vm_page_pqbatch_drain(void)3946 vm_page_pqbatch_drain(void)
3947 {
3948 	struct thread *td;
3949 	struct vm_domain *vmd;
3950 	struct vm_pagequeue *pq;
3951 	int cpu, domain, queue;
3952 
3953 	td = curthread;
3954 	CPU_FOREACH(cpu) {
3955 		thread_lock(td);
3956 		sched_bind(td, cpu);
3957 		thread_unlock(td);
3958 
3959 		for (domain = 0; domain < vm_ndomains; domain++) {
3960 			vmd = VM_DOMAIN(domain);
3961 			for (queue = 0; queue < PQ_COUNT; queue++) {
3962 				pq = &vmd->vmd_pagequeues[queue];
3963 				vm_pagequeue_lock(pq);
3964 				critical_enter();
3965 				vm_pqbatch_process(pq,
3966 				    DPCPU_PTR(pqbatch[domain][queue]), queue);
3967 				critical_exit();
3968 				vm_pagequeue_unlock(pq);
3969 			}
3970 		}
3971 	}
3972 	thread_lock(td);
3973 	sched_unbind(td);
3974 	thread_unlock(td);
3975 }
3976 
3977 /*
3978  *	vm_page_dequeue_deferred:	[ internal use only ]
3979  *
3980  *	Request removal of the given page from its current page
3981  *	queue.  Physical removal from the queue may be deferred
3982  *	indefinitely.
3983  */
3984 void
vm_page_dequeue_deferred(vm_page_t m)3985 vm_page_dequeue_deferred(vm_page_t m)
3986 {
3987 	vm_page_astate_t new, old;
3988 
3989 	old = vm_page_astate_load(m);
3990 	do {
3991 		if (old.queue == PQ_NONE) {
3992 			KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0,
3993 			    ("%s: page %p has unexpected queue state",
3994 			    __func__, m));
3995 			break;
3996 		}
3997 		new = old;
3998 		new.flags |= PGA_DEQUEUE;
3999 	} while (!vm_page_pqstate_commit_request(m, &old, new));
4000 }
4001 
4002 /*
4003  *	vm_page_dequeue:
4004  *
4005  *	Remove the page from whichever page queue it's in, if any, before
4006  *	returning.
4007  */
4008 void
vm_page_dequeue(vm_page_t m)4009 vm_page_dequeue(vm_page_t m)
4010 {
4011 	vm_page_astate_t new, old;
4012 
4013 	old = vm_page_astate_load(m);
4014 	do {
4015 		if (__predict_true(old.queue == PQ_NONE)) {
4016 			KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0,
4017 			    ("%s: page %p has unexpected queue state",
4018 			    __func__, m));
4019 			break;
4020 		}
4021 		new = old;
4022 		new.flags &= ~PGA_QUEUE_OP_MASK;
4023 		new.queue = PQ_NONE;
4024 	} while (!vm_page_pqstate_commit_dequeue(m, &old, new));
4025 
4026 }
4027 
4028 /*
4029  * Schedule the given page for insertion into the specified page queue.
4030  * Physical insertion of the page may be deferred indefinitely.
4031  */
4032 static void
vm_page_enqueue(vm_page_t m,uint8_t queue)4033 vm_page_enqueue(vm_page_t m, uint8_t queue)
4034 {
4035 
4036 	KASSERT(m->a.queue == PQ_NONE &&
4037 	    (m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
4038 	    ("%s: page %p is already enqueued", __func__, m));
4039 	KASSERT(m->ref_count > 0,
4040 	    ("%s: page %p does not carry any references", __func__, m));
4041 
4042 	m->a.queue = queue;
4043 	if ((m->a.flags & PGA_REQUEUE) == 0)
4044 		vm_page_aflag_set(m, PGA_REQUEUE);
4045 	vm_page_pqbatch_submit(m, queue);
4046 }
4047 
4048 /*
4049  *	vm_page_free_prep:
4050  *
4051  *	Prepares the given page to be put on the free list,
4052  *	disassociating it from any VM object. The caller may return
4053  *	the page to the free list only if this function returns true.
4054  *
4055  *	The object, if it exists, must be locked, and then the page must
4056  *	be xbusy.  Otherwise the page must be not busied.  A managed
4057  *	page must be unmapped.
4058  */
4059 static bool
vm_page_free_prep(vm_page_t m)4060 vm_page_free_prep(vm_page_t m)
4061 {
4062 
4063 	/*
4064 	 * Synchronize with threads that have dropped a reference to this
4065 	 * page.
4066 	 */
4067 	atomic_thread_fence_acq();
4068 
4069 #ifdef INVARIANTS
4070 	if (vm_check_pg_zero && (m->flags & PG_ZERO) != 0) {
4071 		struct sf_buf *sf;
4072 		unsigned long *p;
4073 		int i;
4074 
4075 		sched_pin();
4076 		sf = sf_buf_alloc(m, SFB_CPUPRIVATE | SFB_NOWAIT);
4077 		if (sf != NULL) {
4078 			p = (unsigned long *)sf_buf_kva(sf);
4079 			for (i = 0; i < PAGE_SIZE / sizeof(*p); i++, p++) {
4080 				KASSERT(*p == 0,
4081 				    ("zerocheck failed page %p PG_ZERO %d %jx",
4082 				    m, i, (uintmax_t)*p));
4083 			}
4084 			sf_buf_free(sf);
4085 		}
4086 		sched_unpin();
4087 	}
4088 #endif
4089 	if ((m->oflags & VPO_UNMANAGED) == 0) {
4090 		KASSERT(!pmap_page_is_mapped(m),
4091 		    ("vm_page_free_prep: freeing mapped page %p", m));
4092 		KASSERT((m->a.flags & (PGA_EXECUTABLE | PGA_WRITEABLE)) == 0,
4093 		    ("vm_page_free_prep: mapping flags set in page %p", m));
4094 	} else {
4095 		KASSERT(m->a.queue == PQ_NONE,
4096 		    ("vm_page_free_prep: unmanaged page %p is queued", m));
4097 	}
4098 	VM_CNT_INC(v_tfree);
4099 
4100 	if (m->object != NULL) {
4101 		vm_page_radix_remove(m);
4102 		vm_page_free_object_prep(m);
4103 	} else
4104 		vm_page_assert_unbusied(m);
4105 
4106 	vm_page_busy_free(m);
4107 
4108 	/*
4109 	 * If fictitious remove object association and
4110 	 * return.
4111 	 */
4112 	if ((m->flags & PG_FICTITIOUS) != 0) {
4113 		KASSERT(m->ref_count == 1,
4114 		    ("fictitious page %p is referenced", m));
4115 		KASSERT(m->a.queue == PQ_NONE,
4116 		    ("fictitious page %p is queued", m));
4117 		return (false);
4118 	}
4119 
4120 	/*
4121 	 * Pages need not be dequeued before they are returned to the physical
4122 	 * memory allocator, but they must at least be marked for a deferred
4123 	 * dequeue.
4124 	 */
4125 	if ((m->oflags & VPO_UNMANAGED) == 0)
4126 		vm_page_dequeue_deferred(m);
4127 
4128 	m->valid = 0;
4129 	vm_page_undirty(m);
4130 
4131 	if (m->ref_count != 0)
4132 		panic("vm_page_free_prep: page %p has references", m);
4133 
4134 	/*
4135 	 * Restore the default memory attribute to the page.
4136 	 */
4137 	if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
4138 		pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
4139 
4140 #if VM_NRESERVLEVEL > 0
4141 	/*
4142 	 * Determine whether the page belongs to a reservation.  If the page was
4143 	 * allocated from a per-CPU cache, it cannot belong to a reservation, so
4144 	 * as an optimization, we avoid the check in that case.
4145 	 */
4146 	if ((m->flags & PG_PCPU_CACHE) == 0 && vm_reserv_free_page(m))
4147 		return (false);
4148 #endif
4149 
4150 	return (true);
4151 }
4152 
4153 /*
4154  *	vm_page_free_toq:
4155  *
4156  *	Returns the given page to the free list, disassociating it
4157  *	from any VM object.
4158  *
4159  *	The object must be locked.  The page must be exclusively busied if it
4160  *	belongs to an object.
4161  */
4162 static void
vm_page_free_toq(vm_page_t m)4163 vm_page_free_toq(vm_page_t m)
4164 {
4165 	struct vm_domain *vmd;
4166 	uma_zone_t zone;
4167 
4168 	if (!vm_page_free_prep(m))
4169 		return;
4170 
4171 	vmd = vm_pagequeue_domain(m);
4172 	if (__predict_false((m->flags & PG_NOFREE) != 0)) {
4173 		vm_page_free_nofree(vmd, m);
4174 		return;
4175 	}
4176 	zone = vmd->vmd_pgcache[m->pool].zone;
4177 	if ((m->flags & PG_PCPU_CACHE) != 0 && zone != NULL) {
4178 		uma_zfree(zone, m);
4179 		return;
4180 	}
4181 	vm_domain_free_lock(vmd);
4182 	vm_phys_free_pages(m, m->pool, 0);
4183 	vm_domain_free_unlock(vmd);
4184 	vm_domain_freecnt_inc(vmd, 1);
4185 }
4186 
4187 /*
4188  *	vm_page_free_pages_toq:
4189  *
4190  *	Returns a list of pages to the free list, disassociating it
4191  *	from any VM object.  In other words, this is equivalent to
4192  *	calling vm_page_free_toq() for each page of a list of VM objects.
4193  */
4194 int
vm_page_free_pages_toq(struct spglist * free,bool update_wire_count)4195 vm_page_free_pages_toq(struct spglist *free, bool update_wire_count)
4196 {
4197 	vm_page_t m;
4198 	int count;
4199 
4200 	if (SLIST_EMPTY(free))
4201 		return (0);
4202 
4203 	count = 0;
4204 	while ((m = SLIST_FIRST(free)) != NULL) {
4205 		count++;
4206 		SLIST_REMOVE_HEAD(free, plinks.s.ss);
4207 		vm_page_free_toq(m);
4208 	}
4209 
4210 	if (update_wire_count)
4211 		vm_wire_sub(count);
4212 	return (count);
4213 }
4214 
4215 /*
4216  * Mark this page as wired down.  For managed pages, this prevents reclamation
4217  * by the page daemon, or when the containing object, if any, is destroyed.
4218  */
4219 void
vm_page_wire(vm_page_t m)4220 vm_page_wire(vm_page_t m)
4221 {
4222 	u_int old;
4223 
4224 #ifdef INVARIANTS
4225 	if (m->object != NULL && !vm_page_busied(m) &&
4226 	    !vm_object_busied(m->object))
4227 		VM_OBJECT_ASSERT_LOCKED(m->object);
4228 #endif
4229 	KASSERT((m->flags & PG_FICTITIOUS) == 0 ||
4230 	    VPRC_WIRE_COUNT(m->ref_count) >= 1,
4231 	    ("vm_page_wire: fictitious page %p has zero wirings", m));
4232 
4233 	old = atomic_fetchadd_int(&m->ref_count, 1);
4234 	KASSERT(VPRC_WIRE_COUNT(old) != VPRC_WIRE_COUNT_MAX,
4235 	    ("vm_page_wire: counter overflow for page %p", m));
4236 	if (VPRC_WIRE_COUNT(old) == 0) {
4237 		if ((m->oflags & VPO_UNMANAGED) == 0)
4238 			vm_page_aflag_set(m, PGA_DEQUEUE);
4239 		vm_wire_add(1);
4240 	}
4241 }
4242 
4243 /*
4244  * Attempt to wire a mapped page following a pmap lookup of that page.
4245  * This may fail if a thread is concurrently tearing down mappings of the page.
4246  * The transient failure is acceptable because it translates to the
4247  * failure of the caller pmap_extract_and_hold(), which should be then
4248  * followed by the vm_fault() fallback, see e.g. vm_fault_quick_hold_pages().
4249  */
4250 bool
vm_page_wire_mapped(vm_page_t m)4251 vm_page_wire_mapped(vm_page_t m)
4252 {
4253 	u_int old;
4254 
4255 	old = atomic_load_int(&m->ref_count);
4256 	do {
4257 		KASSERT(old > 0,
4258 		    ("vm_page_wire_mapped: wiring unreferenced page %p", m));
4259 		if ((old & VPRC_BLOCKED) != 0)
4260 			return (false);
4261 	} while (!atomic_fcmpset_int(&m->ref_count, &old, old + 1));
4262 
4263 	if (VPRC_WIRE_COUNT(old) == 0) {
4264 		if ((m->oflags & VPO_UNMANAGED) == 0)
4265 			vm_page_aflag_set(m, PGA_DEQUEUE);
4266 		vm_wire_add(1);
4267 	}
4268 	return (true);
4269 }
4270 
4271 /*
4272  * Release a wiring reference to a managed page.  If the page still belongs to
4273  * an object, update its position in the page queues to reflect the reference.
4274  * If the wiring was the last reference to the page, free the page.
4275  */
4276 static void
vm_page_unwire_managed(vm_page_t m,uint8_t nqueue,bool noreuse)4277 vm_page_unwire_managed(vm_page_t m, uint8_t nqueue, bool noreuse)
4278 {
4279 	u_int old;
4280 
4281 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4282 	    ("%s: page %p is unmanaged", __func__, m));
4283 
4284 	/*
4285 	 * Update LRU state before releasing the wiring reference.
4286 	 * Use a release store when updating the reference count to
4287 	 * synchronize with vm_page_free_prep().
4288 	 */
4289 	old = atomic_load_int(&m->ref_count);
4290 	do {
4291 		u_int count;
4292 
4293 		KASSERT(VPRC_WIRE_COUNT(old) > 0,
4294 		    ("vm_page_unwire: wire count underflow for page %p", m));
4295 
4296 		count = old & ~VPRC_BLOCKED;
4297 		if (count > VPRC_OBJREF + 1) {
4298 			/*
4299 			 * The page has at least one other wiring reference.  An
4300 			 * earlier iteration of this loop may have called
4301 			 * vm_page_release_toq() and cleared PGA_DEQUEUE, so
4302 			 * re-set it if necessary.
4303 			 */
4304 			if ((vm_page_astate_load(m).flags & PGA_DEQUEUE) == 0)
4305 				vm_page_aflag_set(m, PGA_DEQUEUE);
4306 		} else if (count == VPRC_OBJREF + 1) {
4307 			/*
4308 			 * This is the last wiring.  Clear PGA_DEQUEUE and
4309 			 * update the page's queue state to reflect the
4310 			 * reference.  If the page does not belong to an object
4311 			 * (i.e., the VPRC_OBJREF bit is clear), we only need to
4312 			 * clear leftover queue state.
4313 			 */
4314 			vm_page_release_toq(m, nqueue, noreuse);
4315 		} else if (count == 1) {
4316 			vm_page_aflag_clear(m, PGA_DEQUEUE);
4317 		}
4318 	} while (!atomic_fcmpset_rel_int(&m->ref_count, &old, old - 1));
4319 
4320 	if (VPRC_WIRE_COUNT(old) == 1) {
4321 		vm_wire_sub(1);
4322 		if (old == 1)
4323 			vm_page_free(m);
4324 	}
4325 }
4326 
4327 /*
4328  * Release one wiring of the specified page, potentially allowing it to be
4329  * paged out.
4330  *
4331  * Only managed pages belonging to an object can be paged out.  If the number
4332  * of wirings transitions to zero and the page is eligible for page out, then
4333  * the page is added to the specified paging queue.  If the released wiring
4334  * represented the last reference to the page, the page is freed.
4335  */
4336 void
vm_page_unwire(vm_page_t m,uint8_t nqueue)4337 vm_page_unwire(vm_page_t m, uint8_t nqueue)
4338 {
4339 
4340 	KASSERT(nqueue < PQ_COUNT,
4341 	    ("vm_page_unwire: invalid queue %u request for page %p",
4342 	    nqueue, m));
4343 
4344 	if ((m->oflags & VPO_UNMANAGED) != 0) {
4345 		if (vm_page_unwire_noq(m) && m->ref_count == 0)
4346 			vm_page_free(m);
4347 		return;
4348 	}
4349 	vm_page_unwire_managed(m, nqueue, false);
4350 }
4351 
4352 /*
4353  * Unwire a page without (re-)inserting it into a page queue.  It is up
4354  * to the caller to enqueue, requeue, or free the page as appropriate.
4355  * In most cases involving managed pages, vm_page_unwire() should be used
4356  * instead.
4357  */
4358 bool
vm_page_unwire_noq(vm_page_t m)4359 vm_page_unwire_noq(vm_page_t m)
4360 {
4361 	u_int old;
4362 
4363 	old = vm_page_drop(m, 1);
4364 	KASSERT(VPRC_WIRE_COUNT(old) != 0,
4365 	    ("%s: counter underflow for page %p", __func__,  m));
4366 	KASSERT((m->flags & PG_FICTITIOUS) == 0 || VPRC_WIRE_COUNT(old) > 1,
4367 	    ("%s: missing ref on fictitious page %p", __func__, m));
4368 
4369 	if (VPRC_WIRE_COUNT(old) > 1)
4370 		return (false);
4371 	if ((m->oflags & VPO_UNMANAGED) == 0)
4372 		vm_page_aflag_clear(m, PGA_DEQUEUE);
4373 	vm_wire_sub(1);
4374 	return (true);
4375 }
4376 
4377 /*
4378  * Ensure that the page ends up in the specified page queue.  If the page is
4379  * active or being moved to the active queue, ensure that its act_count is
4380  * at least ACT_INIT but do not otherwise mess with it.
4381  */
4382 static __always_inline void
vm_page_mvqueue(vm_page_t m,const uint8_t nqueue,const uint16_t nflag)4383 vm_page_mvqueue(vm_page_t m, const uint8_t nqueue, const uint16_t nflag)
4384 {
4385 	vm_page_astate_t old, new;
4386 
4387 	KASSERT(m->ref_count > 0,
4388 	    ("%s: page %p does not carry any references", __func__, m));
4389 	KASSERT(nflag == PGA_REQUEUE || nflag == PGA_REQUEUE_HEAD,
4390 	    ("%s: invalid flags %x", __func__, nflag));
4391 
4392 	if ((m->oflags & VPO_UNMANAGED) != 0 || vm_page_wired(m))
4393 		return;
4394 
4395 	old = vm_page_astate_load(m);
4396 	do {
4397 		if ((old.flags & PGA_DEQUEUE) != 0)
4398 			break;
4399 		new = old;
4400 		new.flags &= ~PGA_QUEUE_OP_MASK;
4401 		if (nqueue == PQ_ACTIVE)
4402 			new.act_count = max(old.act_count, ACT_INIT);
4403 		if (old.queue == nqueue) {
4404 			/*
4405 			 * There is no need to requeue pages already in the
4406 			 * active queue.
4407 			 */
4408 			if (nqueue != PQ_ACTIVE ||
4409 			    (old.flags & PGA_ENQUEUED) == 0)
4410 				new.flags |= nflag;
4411 		} else {
4412 			new.flags |= nflag;
4413 			new.queue = nqueue;
4414 		}
4415 	} while (!vm_page_pqstate_commit(m, &old, new));
4416 }
4417 
4418 /*
4419  * Put the specified page on the active list (if appropriate).
4420  */
4421 void
vm_page_activate(vm_page_t m)4422 vm_page_activate(vm_page_t m)
4423 {
4424 
4425 	vm_page_mvqueue(m, PQ_ACTIVE, PGA_REQUEUE);
4426 }
4427 
4428 /*
4429  * Move the specified page to the tail of the inactive queue, or requeue
4430  * the page if it is already in the inactive queue.
4431  */
4432 void
vm_page_deactivate(vm_page_t m)4433 vm_page_deactivate(vm_page_t m)
4434 {
4435 
4436 	vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE);
4437 }
4438 
4439 void
vm_page_deactivate_noreuse(vm_page_t m)4440 vm_page_deactivate_noreuse(vm_page_t m)
4441 {
4442 
4443 	vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE_HEAD);
4444 }
4445 
4446 /*
4447  * Put a page in the laundry, or requeue it if it is already there.
4448  */
4449 void
vm_page_launder(vm_page_t m)4450 vm_page_launder(vm_page_t m)
4451 {
4452 
4453 	vm_page_mvqueue(m, PQ_LAUNDRY, PGA_REQUEUE);
4454 }
4455 
4456 /*
4457  * Put a page in the PQ_UNSWAPPABLE holding queue.
4458  */
4459 void
vm_page_unswappable(vm_page_t m)4460 vm_page_unswappable(vm_page_t m)
4461 {
4462 
4463 	VM_OBJECT_ASSERT_LOCKED(m->object);
4464 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4465 	    ("page %p already unswappable", m));
4466 
4467 	vm_page_dequeue(m);
4468 	vm_page_enqueue(m, PQ_UNSWAPPABLE);
4469 }
4470 
4471 /*
4472  * Release a page back to the page queues in preparation for unwiring.
4473  */
4474 static void
vm_page_release_toq(vm_page_t m,uint8_t nqueue,const bool noreuse)4475 vm_page_release_toq(vm_page_t m, uint8_t nqueue, const bool noreuse)
4476 {
4477 	vm_page_astate_t old, new;
4478 	uint16_t nflag;
4479 
4480 	/*
4481 	 * Use a check of the valid bits to determine whether we should
4482 	 * accelerate reclamation of the page.  The object lock might not be
4483 	 * held here, in which case the check is racy.  At worst we will either
4484 	 * accelerate reclamation of a valid page and violate LRU, or
4485 	 * unnecessarily defer reclamation of an invalid page.
4486 	 *
4487 	 * If we were asked to not cache the page, place it near the head of the
4488 	 * inactive queue so that is reclaimed sooner.
4489 	 */
4490 	if (noreuse || vm_page_none_valid(m)) {
4491 		nqueue = PQ_INACTIVE;
4492 		nflag = PGA_REQUEUE_HEAD;
4493 	} else {
4494 		nflag = PGA_REQUEUE;
4495 	}
4496 
4497 	old = vm_page_astate_load(m);
4498 	do {
4499 		new = old;
4500 
4501 		/*
4502 		 * If the page is already in the active queue and we are not
4503 		 * trying to accelerate reclamation, simply mark it as
4504 		 * referenced and avoid any queue operations.
4505 		 */
4506 		new.flags &= ~PGA_QUEUE_OP_MASK;
4507 		if (nflag != PGA_REQUEUE_HEAD && old.queue == PQ_ACTIVE &&
4508 		    (old.flags & PGA_ENQUEUED) != 0)
4509 			new.flags |= PGA_REFERENCED;
4510 		else {
4511 			new.flags |= nflag;
4512 			new.queue = nqueue;
4513 		}
4514 	} while (!vm_page_pqstate_commit(m, &old, new));
4515 }
4516 
4517 /*
4518  * Unwire a page and either attempt to free it or re-add it to the page queues.
4519  */
4520 void
vm_page_release(vm_page_t m,int flags)4521 vm_page_release(vm_page_t m, int flags)
4522 {
4523 	vm_object_t object;
4524 
4525 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4526 	    ("vm_page_release: page %p is unmanaged", m));
4527 
4528 	if ((flags & VPR_TRYFREE) != 0) {
4529 		for (;;) {
4530 			object = atomic_load_ptr(&m->object);
4531 			if (object == NULL)
4532 				break;
4533 			/* Depends on type-stability. */
4534 			if (vm_page_busied(m) || !VM_OBJECT_TRYWLOCK(object))
4535 				break;
4536 			if (object == m->object) {
4537 				vm_page_release_locked(m, flags);
4538 				VM_OBJECT_WUNLOCK(object);
4539 				return;
4540 			}
4541 			VM_OBJECT_WUNLOCK(object);
4542 		}
4543 	}
4544 	vm_page_unwire_managed(m, PQ_INACTIVE, flags != 0);
4545 }
4546 
4547 /* See vm_page_release(). */
4548 void
vm_page_release_locked(vm_page_t m,int flags)4549 vm_page_release_locked(vm_page_t m, int flags)
4550 {
4551 
4552 	VM_OBJECT_ASSERT_WLOCKED(m->object);
4553 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4554 	    ("vm_page_release_locked: page %p is unmanaged", m));
4555 
4556 	if (vm_page_unwire_noq(m)) {
4557 		if ((flags & VPR_TRYFREE) != 0 &&
4558 		    (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) &&
4559 		    m->dirty == 0 && vm_page_tryxbusy(m)) {
4560 			/*
4561 			 * An unlocked lookup may have wired the page before the
4562 			 * busy lock was acquired, in which case the page must
4563 			 * not be freed.
4564 			 */
4565 			if (__predict_true(!vm_page_wired(m))) {
4566 				vm_page_free(m);
4567 				return;
4568 			}
4569 			vm_page_xunbusy(m);
4570 		} else {
4571 			vm_page_release_toq(m, PQ_INACTIVE, flags != 0);
4572 		}
4573 	}
4574 }
4575 
4576 static bool
vm_page_try_blocked_op(vm_page_t m,void (* op)(vm_page_t))4577 vm_page_try_blocked_op(vm_page_t m, void (*op)(vm_page_t))
4578 {
4579 	u_int old;
4580 
4581 	KASSERT(m->object != NULL && (m->oflags & VPO_UNMANAGED) == 0,
4582 	    ("vm_page_try_blocked_op: page %p has no object", m));
4583 	KASSERT(vm_page_busied(m),
4584 	    ("vm_page_try_blocked_op: page %p is not busy", m));
4585 	VM_OBJECT_ASSERT_LOCKED(m->object);
4586 
4587 	old = atomic_load_int(&m->ref_count);
4588 	do {
4589 		KASSERT(old != 0,
4590 		    ("vm_page_try_blocked_op: page %p has no references", m));
4591 		KASSERT((old & VPRC_BLOCKED) == 0,
4592 		    ("vm_page_try_blocked_op: page %p blocks wirings", m));
4593 		if (VPRC_WIRE_COUNT(old) != 0)
4594 			return (false);
4595 	} while (!atomic_fcmpset_int(&m->ref_count, &old, old | VPRC_BLOCKED));
4596 
4597 	(op)(m);
4598 
4599 	/*
4600 	 * If the object is read-locked, new wirings may be created via an
4601 	 * object lookup.
4602 	 */
4603 	old = vm_page_drop(m, VPRC_BLOCKED);
4604 	KASSERT(!VM_OBJECT_WOWNED(m->object) ||
4605 	    old == (VPRC_BLOCKED | VPRC_OBJREF),
4606 	    ("vm_page_try_blocked_op: unexpected refcount value %u for %p",
4607 	    old, m));
4608 	return (true);
4609 }
4610 
4611 /*
4612  * Atomically check for wirings and remove all mappings of the page.
4613  */
4614 bool
vm_page_try_remove_all(vm_page_t m)4615 vm_page_try_remove_all(vm_page_t m)
4616 {
4617 
4618 	return (vm_page_try_blocked_op(m, pmap_remove_all));
4619 }
4620 
4621 /*
4622  * Atomically check for wirings and remove all writeable mappings of the page.
4623  */
4624 bool
vm_page_try_remove_write(vm_page_t m)4625 vm_page_try_remove_write(vm_page_t m)
4626 {
4627 
4628 	return (vm_page_try_blocked_op(m, pmap_remove_write));
4629 }
4630 
4631 /*
4632  * vm_page_advise
4633  *
4634  * 	Apply the specified advice to the given page.
4635  */
4636 void
vm_page_advise(vm_page_t m,int advice)4637 vm_page_advise(vm_page_t m, int advice)
4638 {
4639 
4640 	VM_OBJECT_ASSERT_WLOCKED(m->object);
4641 	vm_page_assert_xbusied(m);
4642 
4643 	if (advice == MADV_FREE)
4644 		/*
4645 		 * Mark the page clean.  This will allow the page to be freed
4646 		 * without first paging it out.  MADV_FREE pages are often
4647 		 * quickly reused by malloc(3), so we do not do anything that
4648 		 * would result in a page fault on a later access.
4649 		 */
4650 		vm_page_undirty(m);
4651 	else if (advice != MADV_DONTNEED) {
4652 		if (advice == MADV_WILLNEED)
4653 			vm_page_activate(m);
4654 		return;
4655 	}
4656 
4657 	if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m))
4658 		vm_page_dirty(m);
4659 
4660 	/*
4661 	 * Clear any references to the page.  Otherwise, the page daemon will
4662 	 * immediately reactivate the page.
4663 	 */
4664 	vm_page_aflag_clear(m, PGA_REFERENCED);
4665 
4666 	/*
4667 	 * Place clean pages near the head of the inactive queue rather than
4668 	 * the tail, thus defeating the queue's LRU operation and ensuring that
4669 	 * the page will be reused quickly.  Dirty pages not already in the
4670 	 * laundry are moved there.
4671 	 */
4672 	if (m->dirty == 0)
4673 		vm_page_deactivate_noreuse(m);
4674 	else if (!vm_page_in_laundry(m))
4675 		vm_page_launder(m);
4676 }
4677 
4678 /*
4679  *	vm_page_grab_release
4680  *
4681  *	Helper routine for grab functions to release busy on return.
4682  */
4683 static inline void
vm_page_grab_release(vm_page_t m,int allocflags)4684 vm_page_grab_release(vm_page_t m, int allocflags)
4685 {
4686 
4687 	if ((allocflags & VM_ALLOC_NOBUSY) != 0) {
4688 		if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0)
4689 			vm_page_sunbusy(m);
4690 		else
4691 			vm_page_xunbusy(m);
4692 	}
4693 }
4694 
4695 /*
4696  *	vm_page_grab_sleep
4697  *
4698  *	Sleep for busy according to VM_ALLOC_ parameters.  Returns true
4699  *	if the caller should retry and false otherwise.
4700  *
4701  *	If the object is locked on entry the object will be unlocked with
4702  *	false returns and still locked but possibly having been dropped
4703  *	with true returns.
4704  */
4705 static bool
vm_page_grab_sleep(vm_object_t object,vm_page_t m,vm_pindex_t pindex,const char * wmesg,int allocflags,bool locked)4706 vm_page_grab_sleep(vm_object_t object, vm_page_t m, vm_pindex_t pindex,
4707     const char *wmesg, int allocflags, bool locked)
4708 {
4709 
4710 	if ((allocflags & VM_ALLOC_NOWAIT) != 0)
4711 		return (false);
4712 
4713 	/*
4714 	 * Reference the page before unlocking and sleeping so that
4715 	 * the page daemon is less likely to reclaim it.
4716 	 */
4717 	if (locked && (allocflags & VM_ALLOC_NOCREAT) == 0)
4718 		vm_page_reference(m);
4719 
4720 	if (_vm_page_busy_sleep(object, m, pindex, wmesg, allocflags, locked) &&
4721 	    locked)
4722 		VM_OBJECT_WLOCK(object);
4723 	if ((allocflags & VM_ALLOC_WAITFAIL) != 0)
4724 		return (false);
4725 
4726 	return (true);
4727 }
4728 
4729 /*
4730  * Assert that the grab flags are valid.
4731  */
4732 static inline void
vm_page_grab_check(int allocflags)4733 vm_page_grab_check(int allocflags)
4734 {
4735 
4736 	KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 ||
4737 	    (allocflags & VM_ALLOC_WIRED) != 0,
4738 	    ("vm_page_grab*: the pages must be busied or wired"));
4739 
4740 	KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
4741 	    (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
4742 	    ("vm_page_grab*: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
4743 }
4744 
4745 /*
4746  * Calculate the page allocation flags for grab.
4747  */
4748 static inline int
vm_page_grab_pflags(int allocflags)4749 vm_page_grab_pflags(int allocflags)
4750 {
4751 	int pflags;
4752 
4753 	pflags = allocflags &
4754 	    ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL |
4755 	    VM_ALLOC_NOBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
4756 	if ((allocflags & VM_ALLOC_NOWAIT) == 0)
4757 		pflags |= VM_ALLOC_WAITFAIL;
4758 	if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0)
4759 		pflags |= VM_ALLOC_SBUSY;
4760 
4761 	return (pflags);
4762 }
4763 
4764 /*
4765  * Grab a page, waiting until we are woken up due to the page changing state.
4766  * We keep on waiting, if the page continues to be in the object, unless
4767  * allocflags forbid waiting.
4768  *
4769  * The object must be locked on entry.  This routine may sleep.  The lock will,
4770  * however, be released and reacquired if the routine sleeps.
4771  *
4772  *  Return a grabbed page, or NULL.  Set *found if a page was found, whether or
4773  *  not it was grabbed.
4774  */
4775 static inline vm_page_t
vm_page_grab_lookup(vm_object_t object,vm_pindex_t pindex,int allocflags,bool * found,struct pctrie_iter * pages)4776 vm_page_grab_lookup(vm_object_t object, vm_pindex_t pindex, int allocflags,
4777     bool *found, struct pctrie_iter *pages)
4778 {
4779 	vm_page_t m;
4780 
4781 	while ((*found = (m = vm_radix_iter_lookup(pages, pindex)) != NULL) &&
4782 	    !vm_page_tryacquire(m, allocflags)) {
4783 		if (!vm_page_grab_sleep(object, m, pindex, "pgrbwt",
4784 		    allocflags, true))
4785 			return (NULL);
4786 		pctrie_iter_reset(pages);
4787 	}
4788 	return (m);
4789 }
4790 
4791 /*
4792  * Grab a page.  Use an iterator parameter. Keep on waiting, as long as the page
4793  * exists in the object.  If the page doesn't exist, first allocate it and then
4794  * conditionally zero it.
4795  *
4796  * The object must be locked on entry.  This routine may sleep.  The lock will,
4797  * however, be released and reacquired if the routine sleeps.
4798  */
4799 vm_page_t
vm_page_grab_iter(vm_object_t object,vm_pindex_t pindex,int allocflags,struct pctrie_iter * pages)4800 vm_page_grab_iter(vm_object_t object, vm_pindex_t pindex, int allocflags,
4801     struct pctrie_iter *pages)
4802 {
4803 	vm_page_t m;
4804 	bool found;
4805 
4806 	VM_OBJECT_ASSERT_WLOCKED(object);
4807 	vm_page_grab_check(allocflags);
4808 
4809 	while ((m = vm_page_grab_lookup(
4810 	    object, pindex, allocflags, &found, pages)) == NULL) {
4811 		if ((allocflags & VM_ALLOC_NOCREAT) != 0)
4812 			return (NULL);
4813 		if (found &&
4814 		    (allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0)
4815 			return (NULL);
4816 		m = vm_page_alloc_iter(object, pindex,
4817 		    vm_page_grab_pflags(allocflags), pages);
4818 		if (m != NULL) {
4819 			if ((allocflags & VM_ALLOC_ZERO) != 0 &&
4820 			    (m->flags & PG_ZERO) == 0)
4821 				pmap_zero_page(m);
4822 			break;
4823 		}
4824 		if ((allocflags &
4825 		    (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0)
4826 			return (NULL);
4827 	}
4828 	vm_page_grab_release(m, allocflags);
4829 
4830 	return (m);
4831 }
4832 
4833 /*
4834  * Grab a page.  Keep on waiting, as long as the page exists in the object.  If
4835  * the page doesn't exist, first allocate it and then conditionally zero it.
4836  *
4837  * The object must be locked on entry.  This routine may sleep.  The lock will,
4838  * however, be released and reacquired if the routine sleeps.
4839  */
4840 vm_page_t
vm_page_grab(vm_object_t object,vm_pindex_t pindex,int allocflags)4841 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
4842 {
4843 	struct pctrie_iter pages;
4844 
4845 	VM_OBJECT_ASSERT_WLOCKED(object);
4846 	vm_page_iter_init(&pages, object);
4847 	return (vm_page_grab_iter(object, pindex, allocflags, &pages));
4848 }
4849 
4850 /*
4851  * Attempt to validate a page, locklessly acquiring it if necessary, given a
4852  * (object, pindex) tuple and either an invalided page or NULL.  The resulting
4853  * page will be validated against the identity tuple, and busied or wired as
4854  * requested.  A NULL page returned guarantees that the page was not in radix at
4855  * the time of the call but callers must perform higher level synchronization or
4856  * retry the operation under a lock if they require an atomic answer.  This is
4857  * the only lock free validation routine, other routines can depend on the
4858  * resulting page state.
4859  *
4860  * The return value PAGE_NOT_ACQUIRED indicates that the operation failed due to
4861  * caller flags.
4862  */
4863 #define PAGE_NOT_ACQUIRED ((vm_page_t)1)
4864 static vm_page_t
vm_page_acquire_unlocked(vm_object_t object,vm_pindex_t pindex,vm_page_t m,int allocflags)4865 vm_page_acquire_unlocked(vm_object_t object, vm_pindex_t pindex, vm_page_t m,
4866     int allocflags)
4867 {
4868 	if (m == NULL)
4869 		m = vm_page_lookup_unlocked(object, pindex);
4870 	for (; m != NULL; m = vm_page_lookup_unlocked(object, pindex)) {
4871 		if (vm_page_trybusy(m, allocflags)) {
4872 			if (m->object == object && m->pindex == pindex) {
4873 				if ((allocflags & VM_ALLOC_WIRED) != 0)
4874 					vm_page_wire(m);
4875 				vm_page_grab_release(m, allocflags);
4876 				break;
4877 			}
4878 			/* relookup. */
4879 			vm_page_busy_release(m);
4880 			cpu_spinwait();
4881 			continue;
4882 		}
4883 		if (!vm_page_grab_sleep(object, m, pindex, "pgnslp",
4884 		    allocflags, false))
4885 			return (PAGE_NOT_ACQUIRED);
4886 	}
4887 	return (m);
4888 }
4889 
4890 /*
4891  * Try to locklessly grab a page and fall back to the object lock if NOCREAT
4892  * is not set.
4893  */
4894 vm_page_t
vm_page_grab_unlocked(vm_object_t object,vm_pindex_t pindex,int allocflags)4895 vm_page_grab_unlocked(vm_object_t object, vm_pindex_t pindex, int allocflags)
4896 {
4897 	vm_page_t m;
4898 
4899 	vm_page_grab_check(allocflags);
4900 	m = vm_page_acquire_unlocked(object, pindex, NULL, allocflags);
4901 	if (m == PAGE_NOT_ACQUIRED)
4902 		return (NULL);
4903 	if (m != NULL)
4904 		return (m);
4905 
4906 	/*
4907 	 * The radix lockless lookup should never return a false negative
4908 	 * errors.  If the user specifies NOCREAT they are guaranteed there
4909 	 * was no page present at the instant of the call.  A NOCREAT caller
4910 	 * must handle create races gracefully.
4911 	 */
4912 	if ((allocflags & VM_ALLOC_NOCREAT) != 0)
4913 		return (NULL);
4914 
4915 	VM_OBJECT_WLOCK(object);
4916 	m = vm_page_grab(object, pindex, allocflags);
4917 	VM_OBJECT_WUNLOCK(object);
4918 
4919 	return (m);
4920 }
4921 
4922 /*
4923  * Grab a page and make it valid, paging in if necessary.  Use an iterator
4924  * parameter. Pages missing from their pager are zero filled and validated.  If
4925  * a VM_ALLOC_COUNT is supplied and the page is not valid as many as
4926  * VM_INITIAL_PAGEIN pages can be brought in simultaneously.  Additional pages
4927  * will be left on a paging queue but will neither be wired nor busy regardless
4928  * of allocflags.
4929  */
4930 int
vm_page_grab_valid_iter(vm_page_t * mp,vm_object_t object,vm_pindex_t pindex,int allocflags,struct pctrie_iter * pages)4931 vm_page_grab_valid_iter(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex,
4932     int allocflags, struct pctrie_iter *pages)
4933 {
4934 	vm_page_t m;
4935 	vm_page_t ma[VM_INITIAL_PAGEIN];
4936 	int after, ahead, i, pflags, rv;
4937 
4938 	KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
4939 	    (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
4940 	    ("vm_page_grab_valid: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
4941 	KASSERT((allocflags &
4942 	    (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0,
4943 	    ("vm_page_grab_valid: Invalid flags 0x%X", allocflags));
4944 	VM_OBJECT_ASSERT_WLOCKED(object);
4945 	pflags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY |
4946 	    VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY);
4947 	pflags |= VM_ALLOC_WAITFAIL;
4948 
4949 retrylookup:
4950 	if ((m = vm_radix_iter_lookup(pages, pindex)) != NULL) {
4951 		/*
4952 		 * If the page is fully valid it can only become invalid
4953 		 * with the object lock held.  If it is not valid it can
4954 		 * become valid with the busy lock held.  Therefore, we
4955 		 * may unnecessarily lock the exclusive busy here if we
4956 		 * race with I/O completion not using the object lock.
4957 		 * However, we will not end up with an invalid page and a
4958 		 * shared lock.
4959 		 */
4960 		if (!vm_page_trybusy(m,
4961 		    vm_page_all_valid(m) ? allocflags : 0)) {
4962 			(void)vm_page_grab_sleep(object, m, pindex, "pgrbwt",
4963 			    allocflags, true);
4964 			pctrie_iter_reset(pages);
4965 			goto retrylookup;
4966 		}
4967 		if (vm_page_all_valid(m))
4968 			goto out;
4969 		if ((allocflags & VM_ALLOC_NOCREAT) != 0) {
4970 			vm_page_busy_release(m);
4971 			*mp = NULL;
4972 			return (VM_PAGER_FAIL);
4973 		}
4974 	} else if ((allocflags & VM_ALLOC_NOCREAT) != 0) {
4975 		*mp = NULL;
4976 		return (VM_PAGER_FAIL);
4977 	} else {
4978 		m = vm_page_alloc_iter(object, pindex, pflags, pages);
4979 		if (m == NULL) {
4980 			if (!vm_pager_can_alloc_page(object, pindex)) {
4981 				*mp = NULL;
4982 				return (VM_PAGER_AGAIN);
4983 			}
4984 			goto retrylookup;
4985 		}
4986 	}
4987 
4988 	vm_page_assert_xbusied(m);
4989 	if (vm_pager_has_page(object, pindex, NULL, &after)) {
4990 		after = MIN(after, VM_INITIAL_PAGEIN);
4991 		after = MIN(after, allocflags >> VM_ALLOC_COUNT_SHIFT);
4992 		after = MAX(after, 1);
4993 		ma[0] = m;
4994 		pctrie_iter_reset(pages);
4995 		for (i = 1; i < after; i++) {
4996 			m = vm_radix_iter_lookup_ge(pages, pindex + i);
4997 			ahead = after;
4998 			if (m != NULL)
4999 				ahead = MIN(ahead, m->pindex - pindex);
5000 			for (; i < ahead; i++) {
5001 				ma[i] = vm_page_alloc_iter(object, pindex + i,
5002 				    VM_ALLOC_NORMAL, pages);
5003 				if (ma[i] == NULL)
5004 					break;
5005 			}
5006 			if (m == NULL || m->pindex != pindex + i ||
5007 			    vm_page_any_valid(m) || !vm_page_tryxbusy(m))
5008 				break;
5009 			ma[i] = m;
5010 		}
5011 		after = i;
5012 		vm_object_pip_add(object, after);
5013 		VM_OBJECT_WUNLOCK(object);
5014 		rv = vm_pager_get_pages(object, ma, after, NULL, NULL);
5015 		pctrie_iter_reset(pages);
5016 		VM_OBJECT_WLOCK(object);
5017 		vm_object_pip_wakeupn(object, after);
5018 		/* Pager may have replaced a page. */
5019 		m = ma[0];
5020 		if (rv != VM_PAGER_OK) {
5021 			for (i = 0; i < after; i++) {
5022 				if (!vm_page_wired(ma[i]))
5023 					vm_page_free(ma[i]);
5024 				else
5025 					vm_page_xunbusy(ma[i]);
5026 			}
5027 			*mp = NULL;
5028 			return (rv);
5029 		}
5030 		for (i = 1; i < after; i++)
5031 			vm_page_readahead_finish(ma[i]);
5032 		MPASS(vm_page_all_valid(m));
5033 	} else {
5034 		vm_page_zero_invalid(m, TRUE);
5035 		pctrie_iter_reset(pages);
5036 	}
5037 out:
5038 	if ((allocflags & VM_ALLOC_WIRED) != 0)
5039 		vm_page_wire(m);
5040 	if ((allocflags & VM_ALLOC_SBUSY) != 0 && vm_page_xbusied(m))
5041 		vm_page_busy_downgrade(m);
5042 	else if ((allocflags & VM_ALLOC_NOBUSY) != 0)
5043 		vm_page_busy_release(m);
5044 	*mp = m;
5045 	return (VM_PAGER_OK);
5046 }
5047 
5048 /*
5049  * Grab a page and make it valid, paging in if necessary.  Pages missing from
5050  * their pager are zero filled and validated.  If a VM_ALLOC_COUNT is supplied
5051  * and the page is not valid as many as VM_INITIAL_PAGEIN pages can be brought
5052  * in simultaneously.  Additional pages will be left on a paging queue but
5053  * will neither be wired nor busy regardless of allocflags.
5054  */
5055 int
vm_page_grab_valid(vm_page_t * mp,vm_object_t object,vm_pindex_t pindex,int allocflags)5056 vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex,
5057     int allocflags)
5058 {
5059 	struct pctrie_iter pages;
5060 
5061 	VM_OBJECT_ASSERT_WLOCKED(object);
5062 	vm_page_iter_init(&pages, object);
5063 	return (vm_page_grab_valid_iter(mp, object, pindex, allocflags,
5064 	    &pages));
5065 }
5066 
5067 /*
5068  * Grab a page.  Keep on waiting, as long as the page exists in the object.  If
5069  * the page doesn't exist, and the pager has it, allocate it and zero part of
5070  * it.
5071  *
5072  * The object must be locked on entry.  This routine may sleep.  The lock will,
5073  * however, be released and reacquired if the routine sleeps.
5074  */
5075 int
vm_page_grab_zero_partial(vm_object_t object,vm_pindex_t pindex,int base,int end)5076 vm_page_grab_zero_partial(vm_object_t object, vm_pindex_t pindex, int base,
5077     int end)
5078 {
5079 	struct pctrie_iter pages;
5080 	vm_page_t m;
5081 	int allocflags, rv;
5082 	bool found;
5083 
5084 	VM_OBJECT_ASSERT_WLOCKED(object);
5085 	KASSERT(base >= 0, ("%s: base %d", __func__, base));
5086 	KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
5087 	    end));
5088 
5089 	allocflags = VM_ALLOC_NOCREAT | VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL;
5090 	vm_page_iter_init(&pages, object);
5091 	while ((m = vm_page_grab_lookup(
5092 	    object, pindex, allocflags, &found, &pages)) == NULL) {
5093 		if (!vm_pager_has_page(object, pindex, NULL, NULL))
5094 			return (0);
5095 		m = vm_page_alloc_iter(object, pindex,
5096 		    vm_page_grab_pflags(allocflags), &pages);
5097 		if (m != NULL) {
5098 			vm_object_pip_add(object, 1);
5099 			VM_OBJECT_WUNLOCK(object);
5100 			rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
5101 			VM_OBJECT_WLOCK(object);
5102 			vm_object_pip_wakeup(object);
5103 			if (rv != VM_PAGER_OK) {
5104 				vm_page_free(m);
5105 				return (EIO);
5106 			}
5107 
5108 			/*
5109 			 * Since the page was not resident, and therefore not
5110 			 * recently accessed, immediately enqueue it for
5111 			 * asynchronous laundering.  The current operation is
5112 			 * not regarded as an access.
5113 			 */
5114 			vm_page_launder(m);
5115 			break;
5116 		}
5117 	}
5118 
5119 	pmap_zero_page_area(m, base, end - base);
5120 	KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid", __func__, m));
5121 	vm_page_set_dirty(m);
5122 	vm_page_xunbusy(m);
5123 	return (0);
5124 }
5125 
5126 /*
5127  * Locklessly grab a valid page.  If the page is not valid or not yet
5128  * allocated this will fall back to the object lock method.
5129  */
5130 int
vm_page_grab_valid_unlocked(vm_page_t * mp,vm_object_t object,vm_pindex_t pindex,int allocflags)5131 vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object,
5132     vm_pindex_t pindex, int allocflags)
5133 {
5134 	vm_page_t m;
5135 	int flags;
5136 	int error;
5137 
5138 	KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
5139 	    (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
5140 	    ("vm_page_grab_valid_unlocked: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY "
5141 	    "mismatch"));
5142 	KASSERT((allocflags &
5143 	    (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0,
5144 	    ("vm_page_grab_valid_unlocked: Invalid flags 0x%X", allocflags));
5145 
5146 	/*
5147 	 * Attempt a lockless lookup and busy.  We need at least an sbusy
5148 	 * before we can inspect the valid field and return a wired page.
5149 	 */
5150 	flags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
5151 	vm_page_grab_check(flags);
5152 	m = vm_page_acquire_unlocked(object, pindex, NULL, flags);
5153 	if (m == PAGE_NOT_ACQUIRED)
5154 		return (VM_PAGER_FAIL);
5155 	if (m != NULL) {
5156 		if (vm_page_all_valid(m)) {
5157 			if ((allocflags & VM_ALLOC_WIRED) != 0)
5158 				vm_page_wire(m);
5159 			vm_page_grab_release(m, allocflags);
5160 			*mp = m;
5161 			return (VM_PAGER_OK);
5162 		}
5163 		vm_page_busy_release(m);
5164 	}
5165 	if ((allocflags & VM_ALLOC_NOCREAT) != 0) {
5166 		*mp = NULL;
5167 		return (VM_PAGER_FAIL);
5168 	}
5169 	VM_OBJECT_WLOCK(object);
5170 	error = vm_page_grab_valid(mp, object, pindex, allocflags);
5171 	VM_OBJECT_WUNLOCK(object);
5172 
5173 	return (error);
5174 }
5175 
5176 /*
5177  * Return the specified range of pages from the given object.  For each
5178  * page offset within the range, if a page already exists within the object
5179  * at that offset and it is busy, then wait for it to change state.  If,
5180  * instead, the page doesn't exist, then allocate it.
5181  *
5182  * The caller must always specify an allocation class.
5183  *
5184  * allocation classes:
5185  *	VM_ALLOC_NORMAL		normal process request
5186  *	VM_ALLOC_SYSTEM		system *really* needs the pages
5187  *	VM_ALLOC_INTERRUPT	interrupt time request
5188  *
5189  * The caller must always specify that the pages are to be busied and/or
5190  * wired.
5191  *
5192  * optional allocation flags:
5193  *	VM_ALLOC_IGN_SBUSY	do not sleep on soft busy pages
5194  *	VM_ALLOC_NOBUSY		do not exclusive busy the pages
5195  *	VM_ALLOC_NODUMP		do not include the pages in a kernel core dump
5196  *	VM_ALLOC_NOFREE		pages will never be freed
5197  *	VM_ALLOC_NOWAIT		do not sleep
5198  *	VM_ALLOC_SBUSY		set pages to sbusy state
5199  *	VM_ALLOC_WAITFAIL	in case of failure, sleep before returning
5200  *	VM_ALLOC_WAITOK		ignored (default behavior)
5201  *	VM_ALLOC_WIRED		wire the pages
5202  *	VM_ALLOC_ZERO		zero and validate any invalid pages
5203  *
5204  * If VM_ALLOC_NOWAIT is not specified, this routine may sleep.  Otherwise, it
5205  * may return a partial prefix of the requested range.
5206  */
5207 int
vm_page_grab_pages(vm_object_t object,vm_pindex_t pindex,int allocflags,vm_page_t * ma,int count)5208 vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
5209     vm_page_t *ma, int count)
5210 {
5211 	struct pctrie_iter pages;
5212 	vm_page_t m;
5213 	int pflags;
5214 	int ahead, i;
5215 
5216 	VM_OBJECT_ASSERT_WLOCKED(object);
5217 	KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0,
5218 	    ("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed"));
5219 	KASSERT(count > 0,
5220 	    ("vm_page_grab_pages: invalid page count %d", count));
5221 	vm_page_grab_check(allocflags);
5222 
5223 	pflags = vm_page_grab_pflags(allocflags);
5224 	i = 0;
5225 	vm_page_iter_init(&pages, object);
5226 retrylookup:
5227 	ahead = -1;
5228 	for (; i < count; i++) {
5229 		if (ahead < 0) {
5230 			ahead = vm_radix_iter_lookup_range(
5231 			    &pages, pindex + i, &ma[i], count - i);
5232 		}
5233 		if (ahead-- > 0) {
5234 			m = ma[i];
5235 			if (!vm_page_tryacquire(m, allocflags)) {
5236 				if (vm_page_grab_sleep(object, m, pindex + i,
5237 				    "grbmaw", allocflags, true)) {
5238 					pctrie_iter_reset(&pages);
5239 					goto retrylookup;
5240 				}
5241 				break;
5242 			}
5243 		} else {
5244 			if ((allocflags & VM_ALLOC_NOCREAT) != 0)
5245 				break;
5246 			m = vm_page_alloc_iter(object, pindex + i,
5247 			    pflags | VM_ALLOC_COUNT(count - i), &pages);
5248 			/* pages was reset if alloc_iter lost the lock. */
5249 			if (m == NULL) {
5250 				if ((allocflags & (VM_ALLOC_NOWAIT |
5251 				    VM_ALLOC_WAITFAIL)) != 0)
5252 					break;
5253 				goto retrylookup;
5254 			}
5255 			ma[i] = m;
5256 		}
5257 		if (vm_page_none_valid(m) &&
5258 		    (allocflags & VM_ALLOC_ZERO) != 0) {
5259 			if ((m->flags & PG_ZERO) == 0)
5260 				pmap_zero_page(m);
5261 			vm_page_valid(m);
5262 		}
5263 		vm_page_grab_release(m, allocflags);
5264 	}
5265 	return (i);
5266 }
5267 
5268 /*
5269  * Unlocked variant of vm_page_grab_pages().  This accepts the same flags
5270  * and will fall back to the locked variant to handle allocation.
5271  */
5272 int
vm_page_grab_pages_unlocked(vm_object_t object,vm_pindex_t pindex,int allocflags,vm_page_t * ma,int count)5273 vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex,
5274     int allocflags, vm_page_t *ma, int count)
5275 {
5276 	vm_page_t m;
5277 	int flags;
5278 	int i, num_fetched;
5279 
5280 	KASSERT(count > 0,
5281 	    ("vm_page_grab_pages_unlocked: invalid page count %d", count));
5282 	vm_page_grab_check(allocflags);
5283 
5284 	/*
5285 	 * Modify flags for lockless acquire to hold the page until we
5286 	 * set it valid if necessary.
5287 	 */
5288 	flags = allocflags & ~VM_ALLOC_NOBUSY;
5289 	vm_page_grab_check(flags);
5290 	num_fetched = vm_radix_lookup_range_unlocked(&object->rtree, pindex,
5291 	    ma, count);
5292 	for (i = 0; i < num_fetched; i++, pindex++) {
5293 		m = vm_page_acquire_unlocked(object, pindex, ma[i], flags);
5294 		if (m == PAGE_NOT_ACQUIRED)
5295 			return (i);
5296 		if (m == NULL)
5297 			break;
5298 		if ((flags & VM_ALLOC_ZERO) != 0 && vm_page_none_valid(m)) {
5299 			if ((m->flags & PG_ZERO) == 0)
5300 				pmap_zero_page(m);
5301 			vm_page_valid(m);
5302 		}
5303 		/* m will still be wired or busy according to flags. */
5304 		vm_page_grab_release(m, allocflags);
5305 		/* vm_page_acquire_unlocked() may not return ma[i]. */
5306 		ma[i] = m;
5307 	}
5308 	if (i == count || (allocflags & VM_ALLOC_NOCREAT) != 0)
5309 		return (i);
5310 	count -= i;
5311 	VM_OBJECT_WLOCK(object);
5312 	i += vm_page_grab_pages(object, pindex, allocflags, &ma[i], count);
5313 	VM_OBJECT_WUNLOCK(object);
5314 
5315 	return (i);
5316 }
5317 
5318 /*
5319  * Mapping function for valid or dirty bits in a page.
5320  *
5321  * Inputs are required to range within a page.
5322  */
5323 vm_page_bits_t
vm_page_bits(int base,int size)5324 vm_page_bits(int base, int size)
5325 {
5326 	int first_bit;
5327 	int last_bit;
5328 
5329 	KASSERT(
5330 	    base + size <= PAGE_SIZE,
5331 	    ("vm_page_bits: illegal base/size %d/%d", base, size)
5332 	);
5333 
5334 	if (size == 0)		/* handle degenerate case */
5335 		return (0);
5336 
5337 	first_bit = base >> DEV_BSHIFT;
5338 	last_bit = (base + size - 1) >> DEV_BSHIFT;
5339 
5340 	return (((vm_page_bits_t)2 << last_bit) -
5341 	    ((vm_page_bits_t)1 << first_bit));
5342 }
5343 
5344 void
vm_page_bits_set(vm_page_t m,vm_page_bits_t * bits,vm_page_bits_t set)5345 vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set)
5346 {
5347 
5348 #if PAGE_SIZE == 32768
5349 	atomic_set_64((uint64_t *)bits, set);
5350 #elif PAGE_SIZE == 16384
5351 	atomic_set_32((uint32_t *)bits, set);
5352 #elif (PAGE_SIZE == 8192) && defined(atomic_set_16)
5353 	atomic_set_16((uint16_t *)bits, set);
5354 #elif (PAGE_SIZE == 4096) && defined(atomic_set_8)
5355 	atomic_set_8((uint8_t *)bits, set);
5356 #else		/* PAGE_SIZE <= 8192 */
5357 	uintptr_t addr;
5358 	int shift;
5359 
5360 	addr = (uintptr_t)bits;
5361 	/*
5362 	 * Use a trick to perform a 32-bit atomic on the
5363 	 * containing aligned word, to not depend on the existence
5364 	 * of atomic_{set, clear}_{8, 16}.
5365 	 */
5366 	shift = addr & (sizeof(uint32_t) - 1);
5367 #if BYTE_ORDER == BIG_ENDIAN
5368 	shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY;
5369 #else
5370 	shift *= NBBY;
5371 #endif
5372 	addr &= ~(sizeof(uint32_t) - 1);
5373 	atomic_set_32((uint32_t *)addr, set << shift);
5374 #endif		/* PAGE_SIZE */
5375 }
5376 
5377 static inline void
vm_page_bits_clear(vm_page_t m,vm_page_bits_t * bits,vm_page_bits_t clear)5378 vm_page_bits_clear(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t clear)
5379 {
5380 
5381 #if PAGE_SIZE == 32768
5382 	atomic_clear_64((uint64_t *)bits, clear);
5383 #elif PAGE_SIZE == 16384
5384 	atomic_clear_32((uint32_t *)bits, clear);
5385 #elif (PAGE_SIZE == 8192) && defined(atomic_clear_16)
5386 	atomic_clear_16((uint16_t *)bits, clear);
5387 #elif (PAGE_SIZE == 4096) && defined(atomic_clear_8)
5388 	atomic_clear_8((uint8_t *)bits, clear);
5389 #else		/* PAGE_SIZE <= 8192 */
5390 	uintptr_t addr;
5391 	int shift;
5392 
5393 	addr = (uintptr_t)bits;
5394 	/*
5395 	 * Use a trick to perform a 32-bit atomic on the
5396 	 * containing aligned word, to not depend on the existence
5397 	 * of atomic_{set, clear}_{8, 16}.
5398 	 */
5399 	shift = addr & (sizeof(uint32_t) - 1);
5400 #if BYTE_ORDER == BIG_ENDIAN
5401 	shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY;
5402 #else
5403 	shift *= NBBY;
5404 #endif
5405 	addr &= ~(sizeof(uint32_t) - 1);
5406 	atomic_clear_32((uint32_t *)addr, clear << shift);
5407 #endif		/* PAGE_SIZE */
5408 }
5409 
5410 static inline vm_page_bits_t
vm_page_bits_swap(vm_page_t m,vm_page_bits_t * bits,vm_page_bits_t newbits)5411 vm_page_bits_swap(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t newbits)
5412 {
5413 #if PAGE_SIZE == 32768
5414 	uint64_t old;
5415 
5416 	old = *bits;
5417 	while (atomic_fcmpset_64(bits, &old, newbits) == 0);
5418 	return (old);
5419 #elif PAGE_SIZE == 16384
5420 	uint32_t old;
5421 
5422 	old = *bits;
5423 	while (atomic_fcmpset_32(bits, &old, newbits) == 0);
5424 	return (old);
5425 #elif (PAGE_SIZE == 8192) && defined(atomic_fcmpset_16)
5426 	uint16_t old;
5427 
5428 	old = *bits;
5429 	while (atomic_fcmpset_16(bits, &old, newbits) == 0);
5430 	return (old);
5431 #elif (PAGE_SIZE == 4096) && defined(atomic_fcmpset_8)
5432 	uint8_t old;
5433 
5434 	old = *bits;
5435 	while (atomic_fcmpset_8(bits, &old, newbits) == 0);
5436 	return (old);
5437 #else		/* PAGE_SIZE <= 4096*/
5438 	uintptr_t addr;
5439 	uint32_t old, new, mask;
5440 	int shift;
5441 
5442 	addr = (uintptr_t)bits;
5443 	/*
5444 	 * Use a trick to perform a 32-bit atomic on the
5445 	 * containing aligned word, to not depend on the existence
5446 	 * of atomic_{set, swap, clear}_{8, 16}.
5447 	 */
5448 	shift = addr & (sizeof(uint32_t) - 1);
5449 #if BYTE_ORDER == BIG_ENDIAN
5450 	shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY;
5451 #else
5452 	shift *= NBBY;
5453 #endif
5454 	addr &= ~(sizeof(uint32_t) - 1);
5455 	mask = VM_PAGE_BITS_ALL << shift;
5456 
5457 	old = *bits;
5458 	do {
5459 		new = old & ~mask;
5460 		new |= newbits << shift;
5461 	} while (atomic_fcmpset_32((uint32_t *)addr, &old, new) == 0);
5462 	return (old >> shift);
5463 #endif		/* PAGE_SIZE */
5464 }
5465 
5466 /*
5467  *	vm_page_set_valid_range:
5468  *
5469  *	Sets portions of a page valid.  The arguments are expected
5470  *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
5471  *	of any partial chunks touched by the range.  The invalid portion of
5472  *	such chunks will be zeroed.
5473  *
5474  *	(base + size) must be less then or equal to PAGE_SIZE.
5475  */
5476 void
vm_page_set_valid_range(vm_page_t m,int base,int size)5477 vm_page_set_valid_range(vm_page_t m, int base, int size)
5478 {
5479 	int endoff, frag;
5480 	vm_page_bits_t pagebits;
5481 
5482 	vm_page_assert_busied(m);
5483 	if (size == 0)	/* handle degenerate case */
5484 		return;
5485 
5486 	/*
5487 	 * If the base is not DEV_BSIZE aligned and the valid
5488 	 * bit is clear, we have to zero out a portion of the
5489 	 * first block.
5490 	 */
5491 	if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
5492 	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
5493 		pmap_zero_page_area(m, frag, base - frag);
5494 
5495 	/*
5496 	 * If the ending offset is not DEV_BSIZE aligned and the
5497 	 * valid bit is clear, we have to zero out a portion of
5498 	 * the last block.
5499 	 */
5500 	endoff = base + size;
5501 	if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
5502 	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
5503 		pmap_zero_page_area(m, endoff,
5504 		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
5505 
5506 	/*
5507 	 * Assert that no previously invalid block that is now being validated
5508 	 * is already dirty.
5509 	 */
5510 	KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0,
5511 	    ("vm_page_set_valid_range: page %p is dirty", m));
5512 
5513 	/*
5514 	 * Set valid bits inclusive of any overlap.
5515 	 */
5516 	pagebits = vm_page_bits(base, size);
5517 	if (vm_page_xbusied(m))
5518 		m->valid |= pagebits;
5519 	else
5520 		vm_page_bits_set(m, &m->valid, pagebits);
5521 }
5522 
5523 /*
5524  * Set the page dirty bits and free the invalid swap space if
5525  * present.  Returns the previous dirty bits.
5526  */
5527 vm_page_bits_t
vm_page_set_dirty(vm_page_t m)5528 vm_page_set_dirty(vm_page_t m)
5529 {
5530 	vm_page_bits_t old;
5531 
5532 	VM_PAGE_OBJECT_BUSY_ASSERT(m);
5533 
5534 	if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) {
5535 		old = m->dirty;
5536 		m->dirty = VM_PAGE_BITS_ALL;
5537 	} else
5538 		old = vm_page_bits_swap(m, &m->dirty, VM_PAGE_BITS_ALL);
5539 	if (old == 0 && (m->a.flags & PGA_SWAP_SPACE) != 0)
5540 		vm_pager_page_unswapped(m);
5541 
5542 	return (old);
5543 }
5544 
5545 /*
5546  * Clear the given bits from the specified page's dirty field.
5547  */
5548 static __inline void
vm_page_clear_dirty_mask(vm_page_t m,vm_page_bits_t pagebits)5549 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
5550 {
5551 
5552 	vm_page_assert_busied(m);
5553 
5554 	/*
5555 	 * If the page is xbusied and not write mapped we are the
5556 	 * only thread that can modify dirty bits.  Otherwise, The pmap
5557 	 * layer can call vm_page_dirty() without holding a distinguished
5558 	 * lock.  The combination of page busy and atomic operations
5559 	 * suffice to guarantee consistency of the page dirty field.
5560 	 */
5561 	if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m))
5562 		m->dirty &= ~pagebits;
5563 	else
5564 		vm_page_bits_clear(m, &m->dirty, pagebits);
5565 }
5566 
5567 /*
5568  *	vm_page_set_validclean:
5569  *
5570  *	Sets portions of a page valid and clean.  The arguments are expected
5571  *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
5572  *	of any partial chunks touched by the range.  The invalid portion of
5573  *	such chunks will be zero'd.
5574  *
5575  *	(base + size) must be less then or equal to PAGE_SIZE.
5576  */
5577 void
vm_page_set_validclean(vm_page_t m,int base,int size)5578 vm_page_set_validclean(vm_page_t m, int base, int size)
5579 {
5580 	vm_page_bits_t oldvalid, pagebits;
5581 	int endoff, frag;
5582 
5583 	vm_page_assert_busied(m);
5584 	if (size == 0)	/* handle degenerate case */
5585 		return;
5586 
5587 	/*
5588 	 * If the base is not DEV_BSIZE aligned and the valid
5589 	 * bit is clear, we have to zero out a portion of the
5590 	 * first block.
5591 	 */
5592 	if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
5593 	    (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
5594 		pmap_zero_page_area(m, frag, base - frag);
5595 
5596 	/*
5597 	 * If the ending offset is not DEV_BSIZE aligned and the
5598 	 * valid bit is clear, we have to zero out a portion of
5599 	 * the last block.
5600 	 */
5601 	endoff = base + size;
5602 	if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
5603 	    (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0)
5604 		pmap_zero_page_area(m, endoff,
5605 		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
5606 
5607 	/*
5608 	 * Set valid, clear dirty bits.  If validating the entire
5609 	 * page we can safely clear the pmap modify bit.  We also
5610 	 * use this opportunity to clear the PGA_NOSYNC flag.  If a process
5611 	 * takes a write fault on a MAP_NOSYNC memory area the flag will
5612 	 * be set again.
5613 	 *
5614 	 * We set valid bits inclusive of any overlap, but we can only
5615 	 * clear dirty bits for DEV_BSIZE chunks that are fully within
5616 	 * the range.
5617 	 */
5618 	oldvalid = m->valid;
5619 	pagebits = vm_page_bits(base, size);
5620 	if (vm_page_xbusied(m))
5621 		m->valid |= pagebits;
5622 	else
5623 		vm_page_bits_set(m, &m->valid, pagebits);
5624 #if 0	/* NOT YET */
5625 	if ((frag = base & (DEV_BSIZE - 1)) != 0) {
5626 		frag = DEV_BSIZE - frag;
5627 		base += frag;
5628 		size -= frag;
5629 		if (size < 0)
5630 			size = 0;
5631 	}
5632 	pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
5633 #endif
5634 	if (base == 0 && size == PAGE_SIZE) {
5635 		/*
5636 		 * The page can only be modified within the pmap if it is
5637 		 * mapped, and it can only be mapped if it was previously
5638 		 * fully valid.
5639 		 */
5640 		if (oldvalid == VM_PAGE_BITS_ALL)
5641 			/*
5642 			 * Perform the pmap_clear_modify() first.  Otherwise,
5643 			 * a concurrent pmap operation, such as
5644 			 * pmap_protect(), could clear a modification in the
5645 			 * pmap and set the dirty field on the page before
5646 			 * pmap_clear_modify() had begun and after the dirty
5647 			 * field was cleared here.
5648 			 */
5649 			pmap_clear_modify(m);
5650 		m->dirty = 0;
5651 		vm_page_aflag_clear(m, PGA_NOSYNC);
5652 	} else if (oldvalid != VM_PAGE_BITS_ALL && vm_page_xbusied(m))
5653 		m->dirty &= ~pagebits;
5654 	else
5655 		vm_page_clear_dirty_mask(m, pagebits);
5656 }
5657 
5658 void
vm_page_clear_dirty(vm_page_t m,int base,int size)5659 vm_page_clear_dirty(vm_page_t m, int base, int size)
5660 {
5661 
5662 	vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
5663 }
5664 
5665 /*
5666  *	vm_page_set_invalid:
5667  *
5668  *	Invalidates DEV_BSIZE'd chunks within a page.  Both the
5669  *	valid and dirty bits for the effected areas are cleared.
5670  */
5671 void
vm_page_set_invalid(vm_page_t m,int base,int size)5672 vm_page_set_invalid(vm_page_t m, int base, int size)
5673 {
5674 	vm_page_bits_t bits;
5675 	vm_object_t object;
5676 
5677 	/*
5678 	 * The object lock is required so that pages can't be mapped
5679 	 * read-only while we're in the process of invalidating them.
5680 	 */
5681 	object = m->object;
5682 	VM_OBJECT_ASSERT_WLOCKED(object);
5683 	vm_page_assert_busied(m);
5684 
5685 	if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) +
5686 	    size >= object->un_pager.vnp.vnp_size)
5687 		bits = VM_PAGE_BITS_ALL;
5688 	else
5689 		bits = vm_page_bits(base, size);
5690 	if (object->ref_count != 0 && vm_page_all_valid(m) && bits != 0)
5691 		pmap_remove_all(m);
5692 	KASSERT((bits == 0 && vm_page_all_valid(m)) ||
5693 	    !pmap_page_is_mapped(m),
5694 	    ("vm_page_set_invalid: page %p is mapped", m));
5695 	if (vm_page_xbusied(m)) {
5696 		m->valid &= ~bits;
5697 		m->dirty &= ~bits;
5698 	} else {
5699 		vm_page_bits_clear(m, &m->valid, bits);
5700 		vm_page_bits_clear(m, &m->dirty, bits);
5701 	}
5702 }
5703 
5704 /*
5705  *	vm_page_invalid:
5706  *
5707  *	Invalidates the entire page.  The page must be busy, unmapped, and
5708  *	the enclosing object must be locked.  The object locks protects
5709  *	against concurrent read-only pmap enter which is done without
5710  *	busy.
5711  */
5712 void
vm_page_invalid(vm_page_t m)5713 vm_page_invalid(vm_page_t m)
5714 {
5715 
5716 	vm_page_assert_busied(m);
5717 	VM_OBJECT_ASSERT_WLOCKED(m->object);
5718 	MPASS(!pmap_page_is_mapped(m));
5719 
5720 	if (vm_page_xbusied(m))
5721 		m->valid = 0;
5722 	else
5723 		vm_page_bits_clear(m, &m->valid, VM_PAGE_BITS_ALL);
5724 }
5725 
5726 /*
5727  * vm_page_zero_invalid()
5728  *
5729  *	The kernel assumes that the invalid portions of a page contain
5730  *	garbage, but such pages can be mapped into memory by user code.
5731  *	When this occurs, we must zero out the non-valid portions of the
5732  *	page so user code sees what it expects.
5733  *
5734  *	Pages are most often semi-valid when the end of a file is mapped
5735  *	into memory and the file's size is not page aligned.
5736  */
5737 void
vm_page_zero_invalid(vm_page_t m,boolean_t setvalid)5738 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
5739 {
5740 	int b;
5741 	int i;
5742 
5743 	/*
5744 	 * Scan the valid bits looking for invalid sections that
5745 	 * must be zeroed.  Invalid sub-DEV_BSIZE'd areas ( where the
5746 	 * valid bit may be set ) have already been zeroed by
5747 	 * vm_page_set_validclean().
5748 	 */
5749 	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
5750 		if (i == (PAGE_SIZE / DEV_BSIZE) ||
5751 		    (m->valid & ((vm_page_bits_t)1 << i))) {
5752 			if (i > b) {
5753 				pmap_zero_page_area(m,
5754 				    b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
5755 			}
5756 			b = i + 1;
5757 		}
5758 	}
5759 
5760 	/*
5761 	 * setvalid is TRUE when we can safely set the zero'd areas
5762 	 * as being valid.  We can do this if there are no cache consistency
5763 	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
5764 	 */
5765 	if (setvalid)
5766 		vm_page_valid(m);
5767 }
5768 
5769 /*
5770  *	vm_page_is_valid:
5771  *
5772  *	Is (partial) page valid?  Note that the case where size == 0
5773  *	will return FALSE in the degenerate case where the page is
5774  *	entirely invalid, and TRUE otherwise.
5775  *
5776  *	Some callers envoke this routine without the busy lock held and
5777  *	handle races via higher level locks.  Typical callers should
5778  *	hold a busy lock to prevent invalidation.
5779  */
5780 int
vm_page_is_valid(vm_page_t m,int base,int size)5781 vm_page_is_valid(vm_page_t m, int base, int size)
5782 {
5783 	vm_page_bits_t bits;
5784 
5785 	bits = vm_page_bits(base, size);
5786 	return (vm_page_any_valid(m) && (m->valid & bits) == bits);
5787 }
5788 
5789 /*
5790  * Returns true if all of the specified predicates are true for the entire
5791  * (super)page and false otherwise.
5792  */
5793 bool
vm_page_ps_test(vm_page_t m,int psind,int flags,vm_page_t skip_m)5794 vm_page_ps_test(vm_page_t m, int psind, int flags, vm_page_t skip_m)
5795 {
5796 	vm_object_t object;
5797 	int i, npages;
5798 
5799 	object = m->object;
5800 	if (skip_m != NULL && skip_m->object != object)
5801 		return (false);
5802 	VM_OBJECT_ASSERT_LOCKED(object);
5803 	KASSERT(psind <= m->psind,
5804 	    ("psind %d > psind %d of m %p", psind, m->psind, m));
5805 	npages = atop(pagesizes[psind]);
5806 
5807 	/*
5808 	 * The physically contiguous pages that make up a superpage, i.e., a
5809 	 * page with a page size index ("psind") greater than zero, will
5810 	 * occupy adjacent entries in vm_page_array[].
5811 	 */
5812 	for (i = 0; i < npages; i++) {
5813 		/* Always test object consistency, including "skip_m". */
5814 		if (m[i].object != object)
5815 			return (false);
5816 		if (&m[i] == skip_m)
5817 			continue;
5818 		if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i]))
5819 			return (false);
5820 		if ((flags & PS_ALL_DIRTY) != 0) {
5821 			/*
5822 			 * Calling vm_page_test_dirty() or pmap_is_modified()
5823 			 * might stop this case from spuriously returning
5824 			 * "false".  However, that would require a write lock
5825 			 * on the object containing "m[i]".
5826 			 */
5827 			if (m[i].dirty != VM_PAGE_BITS_ALL)
5828 				return (false);
5829 		}
5830 		if ((flags & PS_ALL_VALID) != 0 &&
5831 		    m[i].valid != VM_PAGE_BITS_ALL)
5832 			return (false);
5833 	}
5834 	return (true);
5835 }
5836 
5837 /*
5838  * Set the page's dirty bits if the page is modified.
5839  */
5840 void
vm_page_test_dirty(vm_page_t m)5841 vm_page_test_dirty(vm_page_t m)
5842 {
5843 
5844 	vm_page_assert_busied(m);
5845 	if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
5846 		vm_page_dirty(m);
5847 }
5848 
5849 void
vm_page_valid(vm_page_t m)5850 vm_page_valid(vm_page_t m)
5851 {
5852 
5853 	vm_page_assert_busied(m);
5854 	if (vm_page_xbusied(m))
5855 		m->valid = VM_PAGE_BITS_ALL;
5856 	else
5857 		vm_page_bits_set(m, &m->valid, VM_PAGE_BITS_ALL);
5858 }
5859 
5860 #ifdef INVARIANTS
5861 void
vm_page_object_busy_assert(vm_page_t m)5862 vm_page_object_busy_assert(vm_page_t m)
5863 {
5864 
5865 	/*
5866 	 * Certain of the page's fields may only be modified by the
5867 	 * holder of a page or object busy.
5868 	 */
5869 	if (m->object != NULL && !vm_page_busied(m))
5870 		VM_OBJECT_ASSERT_BUSY(m->object);
5871 }
5872 
5873 void
vm_page_assert_pga_writeable(vm_page_t m,uint16_t bits)5874 vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits)
5875 {
5876 
5877 	if ((bits & PGA_WRITEABLE) == 0)
5878 		return;
5879 
5880 	/*
5881 	 * The PGA_WRITEABLE flag can only be set if the page is
5882 	 * managed, is exclusively busied or the object is locked.
5883 	 * Currently, this flag is only set by pmap_enter().
5884 	 */
5885 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5886 	    ("PGA_WRITEABLE on unmanaged page"));
5887 	if (!vm_page_xbusied(m))
5888 		VM_OBJECT_ASSERT_BUSY(m->object);
5889 }
5890 #endif
5891 
5892 #include "opt_ddb.h"
5893 #ifdef DDB
5894 #include <sys/kernel.h>
5895 
5896 #include <ddb/ddb.h>
5897 
DB_SHOW_COMMAND_FLAGS(page,vm_page_print_page_info,DB_CMD_MEMSAFE)5898 DB_SHOW_COMMAND_FLAGS(page, vm_page_print_page_info, DB_CMD_MEMSAFE)
5899 {
5900 
5901 	db_printf("vm_cnt.v_free_count: %d\n", vm_free_count());
5902 	db_printf("vm_cnt.v_inactive_count: %d\n", vm_inactive_count());
5903 	db_printf("vm_cnt.v_active_count: %d\n", vm_active_count());
5904 	db_printf("vm_cnt.v_laundry_count: %d\n", vm_laundry_count());
5905 	db_printf("vm_cnt.v_wire_count: %d\n", vm_wire_count());
5906 	db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved);
5907 	db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min);
5908 	db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target);
5909 	db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target);
5910 }
5911 
DB_SHOW_COMMAND_FLAGS(pageq,vm_page_print_pageq_info,DB_CMD_MEMSAFE)5912 DB_SHOW_COMMAND_FLAGS(pageq, vm_page_print_pageq_info, DB_CMD_MEMSAFE)
5913 {
5914 	int dom;
5915 
5916 	db_printf("pq_free %d\n", vm_free_count());
5917 	for (dom = 0; dom < vm_ndomains; dom++) {
5918 		db_printf(
5919     "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d pq_unsw %d\n",
5920 		    dom,
5921 		    vm_dom[dom].vmd_page_count,
5922 		    vm_dom[dom].vmd_free_count,
5923 		    vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt,
5924 		    vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt,
5925 		    vm_dom[dom].vmd_pagequeues[PQ_LAUNDRY].pq_cnt,
5926 		    vm_dom[dom].vmd_pagequeues[PQ_UNSWAPPABLE].pq_cnt);
5927 	}
5928 }
5929 
DB_SHOW_COMMAND(pginfo,vm_page_print_pginfo)5930 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo)
5931 {
5932 	vm_page_t m;
5933 	boolean_t phys, virt;
5934 
5935 	if (!have_addr) {
5936 		db_printf("show pginfo addr\n");
5937 		return;
5938 	}
5939 
5940 	phys = strchr(modif, 'p') != NULL;
5941 	virt = strchr(modif, 'v') != NULL;
5942 	if (virt)
5943 		m = PHYS_TO_VM_PAGE(pmap_kextract(addr));
5944 	else if (phys)
5945 		m = PHYS_TO_VM_PAGE(addr);
5946 	else
5947 		m = (vm_page_t)addr;
5948 	db_printf(
5949     "page %p obj %p pidx 0x%jx phys 0x%jx q %d ref 0x%x\n"
5950     "  af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n",
5951 	    m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr,
5952 	    m->a.queue, m->ref_count, m->a.flags, m->oflags,
5953 	    m->flags, m->a.act_count, m->busy_lock, m->valid, m->dirty);
5954 }
5955 #endif /* DDB */
5956