xref: /freebsd/sys/vm/vm_phys.c (revision 52baf267be42c3e14a9d843c24c953efae7195bd)
1 /*-
2  * Copyright (c) 2002-2006 Rice University
3  * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Alan L. Cox,
7  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
22  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  *	Physical memory system implementation
34  *
35  * Any external functions defined by this module are only to be used by the
36  * virtual memory system.
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include "opt_ddb.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/lock.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/queue.h>
51 #include <sys/sbuf.h>
52 #include <sys/sysctl.h>
53 #include <sys/vmmeter.h>
54 
55 #include <ddb/ddb.h>
56 
57 #include <vm/vm.h>
58 #include <vm/vm_param.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_object.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_phys.h>
63 
64 /*
65  * VM_FREELIST_DEFAULT is split into VM_NDOMAIN lists, one for each
66  * domain.  These extra lists are stored at the end of the regular
67  * free lists starting with VM_NFREELIST.
68  */
69 #define VM_RAW_NFREELIST	(VM_NFREELIST + VM_NDOMAIN - 1)
70 
71 struct vm_freelist {
72 	struct pglist pl;
73 	int lcnt;
74 };
75 
76 struct vm_phys_seg {
77 	vm_paddr_t	start;
78 	vm_paddr_t	end;
79 	vm_page_t	first_page;
80 	int		domain;
81 	struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER];
82 };
83 
84 struct mem_affinity *mem_affinity;
85 
86 static struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
87 
88 static int vm_phys_nsegs;
89 
90 #define VM_PHYS_FICTITIOUS_NSEGS	8
91 static struct vm_phys_fictitious_seg {
92 	vm_paddr_t	start;
93 	vm_paddr_t	end;
94 	vm_page_t	first_page;
95 } vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS];
96 static struct mtx vm_phys_fictitious_reg_mtx;
97 MALLOC_DEFINE(M_FICT_PAGES, "", "");
98 
99 static struct vm_freelist
100     vm_phys_free_queues[VM_RAW_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
101 static struct vm_freelist
102 (*vm_phys_lookup_lists[VM_NDOMAIN][VM_RAW_NFREELIST])[VM_NFREEPOOL][VM_NFREEORDER];
103 
104 static int vm_nfreelists = VM_FREELIST_DEFAULT + 1;
105 
106 static int cnt_prezero;
107 SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD,
108     &cnt_prezero, 0, "The number of physical pages prezeroed at idle time");
109 
110 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
111 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
112     NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
113 
114 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
115 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
116     NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
117 
118 #if VM_NDOMAIN > 1
119 static int sysctl_vm_phys_lookup_lists(SYSCTL_HANDLER_ARGS);
120 SYSCTL_OID(_vm, OID_AUTO, phys_lookup_lists, CTLTYPE_STRING | CTLFLAG_RD,
121     NULL, 0, sysctl_vm_phys_lookup_lists, "A", "Phys Lookup Lists");
122 #endif
123 
124 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind,
125     int domain);
126 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind);
127 static int vm_phys_paddr_to_segind(vm_paddr_t pa);
128 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
129     int order);
130 
131 /*
132  * Outputs the state of the physical memory allocator, specifically,
133  * the amount of physical memory in each free list.
134  */
135 static int
136 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
137 {
138 	struct sbuf sbuf;
139 	struct vm_freelist *fl;
140 	int error, flind, oind, pind;
141 
142 	error = sysctl_wire_old_buffer(req, 0);
143 	if (error != 0)
144 		return (error);
145 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
146 	for (flind = 0; flind < vm_nfreelists; flind++) {
147 		sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
148 		    "\n  ORDER (SIZE)  |  NUMBER"
149 		    "\n              ", flind);
150 		for (pind = 0; pind < VM_NFREEPOOL; pind++)
151 			sbuf_printf(&sbuf, "  |  POOL %d", pind);
152 		sbuf_printf(&sbuf, "\n--            ");
153 		for (pind = 0; pind < VM_NFREEPOOL; pind++)
154 			sbuf_printf(&sbuf, "-- --      ");
155 		sbuf_printf(&sbuf, "--\n");
156 		for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
157 			sbuf_printf(&sbuf, "  %2d (%6dK)", oind,
158 			    1 << (PAGE_SHIFT - 10 + oind));
159 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
160 				fl = vm_phys_free_queues[flind][pind];
161 				sbuf_printf(&sbuf, "  |  %6d", fl[oind].lcnt);
162 			}
163 			sbuf_printf(&sbuf, "\n");
164 		}
165 	}
166 	error = sbuf_finish(&sbuf);
167 	sbuf_delete(&sbuf);
168 	return (error);
169 }
170 
171 /*
172  * Outputs the set of physical memory segments.
173  */
174 static int
175 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
176 {
177 	struct sbuf sbuf;
178 	struct vm_phys_seg *seg;
179 	int error, segind;
180 
181 	error = sysctl_wire_old_buffer(req, 0);
182 	if (error != 0)
183 		return (error);
184 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
185 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
186 		sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
187 		seg = &vm_phys_segs[segind];
188 		sbuf_printf(&sbuf, "start:     %#jx\n",
189 		    (uintmax_t)seg->start);
190 		sbuf_printf(&sbuf, "end:       %#jx\n",
191 		    (uintmax_t)seg->end);
192 		sbuf_printf(&sbuf, "domain:    %d\n", seg->domain);
193 		sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
194 	}
195 	error = sbuf_finish(&sbuf);
196 	sbuf_delete(&sbuf);
197 	return (error);
198 }
199 
200 #if VM_NDOMAIN > 1
201 /*
202  * Outputs the set of free list lookup lists.
203  */
204 static int
205 sysctl_vm_phys_lookup_lists(SYSCTL_HANDLER_ARGS)
206 {
207 	struct sbuf sbuf;
208 	int domain, error, flind, ndomains;
209 
210 	error = sysctl_wire_old_buffer(req, 0);
211 	if (error != 0)
212 		return (error);
213 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
214 	ndomains = vm_nfreelists - VM_NFREELIST + 1;
215 	for (domain = 0; domain < ndomains; domain++) {
216 		sbuf_printf(&sbuf, "\nDOMAIN %d:\n\n", domain);
217 		for (flind = 0; flind < vm_nfreelists; flind++)
218 			sbuf_printf(&sbuf, "  [%d]:\t%p\n", flind,
219 			    vm_phys_lookup_lists[domain][flind]);
220 	}
221 	error = sbuf_finish(&sbuf);
222 	sbuf_delete(&sbuf);
223 	return (error);
224 }
225 #endif
226 
227 /*
228  * Create a physical memory segment.
229  */
230 static void
231 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, int domain)
232 {
233 	struct vm_phys_seg *seg;
234 #ifdef VM_PHYSSEG_SPARSE
235 	long pages;
236 	int segind;
237 
238 	pages = 0;
239 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
240 		seg = &vm_phys_segs[segind];
241 		pages += atop(seg->end - seg->start);
242 	}
243 #endif
244 	KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
245 	    ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
246 	seg = &vm_phys_segs[vm_phys_nsegs++];
247 	seg->start = start;
248 	seg->end = end;
249 	seg->domain = domain;
250 #ifdef VM_PHYSSEG_SPARSE
251 	seg->first_page = &vm_page_array[pages];
252 #else
253 	seg->first_page = PHYS_TO_VM_PAGE(start);
254 #endif
255 #if VM_NDOMAIN > 1
256 	if (flind == VM_FREELIST_DEFAULT && domain != 0) {
257 		flind = VM_NFREELIST + (domain - 1);
258 		if (flind >= vm_nfreelists)
259 			vm_nfreelists = flind + 1;
260 	}
261 #endif
262 	seg->free_queues = &vm_phys_free_queues[flind];
263 }
264 
265 static void
266 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind)
267 {
268 	int i;
269 
270 	if (mem_affinity == NULL) {
271 		_vm_phys_create_seg(start, end, flind, 0);
272 		return;
273 	}
274 
275 	for (i = 0;; i++) {
276 		if (mem_affinity[i].end == 0)
277 			panic("Reached end of affinity info");
278 		if (mem_affinity[i].end <= start)
279 			continue;
280 		if (mem_affinity[i].start > start)
281 			panic("No affinity info for start %jx",
282 			    (uintmax_t)start);
283 		if (mem_affinity[i].end >= end) {
284 			_vm_phys_create_seg(start, end, flind,
285 			    mem_affinity[i].domain);
286 			break;
287 		}
288 		_vm_phys_create_seg(start, mem_affinity[i].end, flind,
289 		    mem_affinity[i].domain);
290 		start = mem_affinity[i].end;
291 	}
292 }
293 
294 /*
295  * Initialize the physical memory allocator.
296  */
297 void
298 vm_phys_init(void)
299 {
300 	struct vm_freelist *fl;
301 	int flind, i, oind, pind;
302 #if VM_NDOMAIN > 1
303 	int ndomains, j;
304 #endif
305 
306 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
307 #ifdef	VM_FREELIST_ISADMA
308 		if (phys_avail[i] < 16777216) {
309 			if (phys_avail[i + 1] > 16777216) {
310 				vm_phys_create_seg(phys_avail[i], 16777216,
311 				    VM_FREELIST_ISADMA);
312 				vm_phys_create_seg(16777216, phys_avail[i + 1],
313 				    VM_FREELIST_DEFAULT);
314 			} else {
315 				vm_phys_create_seg(phys_avail[i],
316 				    phys_avail[i + 1], VM_FREELIST_ISADMA);
317 			}
318 			if (VM_FREELIST_ISADMA >= vm_nfreelists)
319 				vm_nfreelists = VM_FREELIST_ISADMA + 1;
320 		} else
321 #endif
322 #ifdef	VM_FREELIST_HIGHMEM
323 		if (phys_avail[i + 1] > VM_HIGHMEM_ADDRESS) {
324 			if (phys_avail[i] < VM_HIGHMEM_ADDRESS) {
325 				vm_phys_create_seg(phys_avail[i],
326 				    VM_HIGHMEM_ADDRESS, VM_FREELIST_DEFAULT);
327 				vm_phys_create_seg(VM_HIGHMEM_ADDRESS,
328 				    phys_avail[i + 1], VM_FREELIST_HIGHMEM);
329 			} else {
330 				vm_phys_create_seg(phys_avail[i],
331 				    phys_avail[i + 1], VM_FREELIST_HIGHMEM);
332 			}
333 			if (VM_FREELIST_HIGHMEM >= vm_nfreelists)
334 				vm_nfreelists = VM_FREELIST_HIGHMEM + 1;
335 		} else
336 #endif
337 		vm_phys_create_seg(phys_avail[i], phys_avail[i + 1],
338 		    VM_FREELIST_DEFAULT);
339 	}
340 	for (flind = 0; flind < vm_nfreelists; flind++) {
341 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
342 			fl = vm_phys_free_queues[flind][pind];
343 			for (oind = 0; oind < VM_NFREEORDER; oind++)
344 				TAILQ_INIT(&fl[oind].pl);
345 		}
346 	}
347 #if VM_NDOMAIN > 1
348 	/*
349 	 * Build a free list lookup list for each domain.  All of the
350 	 * memory domain lists are inserted at the VM_FREELIST_DEFAULT
351 	 * index in a round-robin order starting with the current
352 	 * domain.
353 	 */
354 	ndomains = vm_nfreelists - VM_NFREELIST + 1;
355 	for (flind = 0; flind < VM_FREELIST_DEFAULT; flind++)
356 		for (i = 0; i < ndomains; i++)
357 			vm_phys_lookup_lists[i][flind] =
358 			    &vm_phys_free_queues[flind];
359 	for (i = 0; i < ndomains; i++)
360 		for (j = 0; j < ndomains; j++) {
361 			flind = (i + j) % ndomains;
362 			if (flind == 0)
363 				flind = VM_FREELIST_DEFAULT;
364 			else
365 				flind += VM_NFREELIST - 1;
366 			vm_phys_lookup_lists[i][VM_FREELIST_DEFAULT + j] =
367 			    &vm_phys_free_queues[flind];
368 		}
369 	for (flind = VM_FREELIST_DEFAULT + 1; flind < VM_NFREELIST;
370 	     flind++)
371 		for (i = 0; i < ndomains; i++)
372 			vm_phys_lookup_lists[i][flind + ndomains - 1] =
373 			    &vm_phys_free_queues[flind];
374 #else
375 	for (flind = 0; flind < vm_nfreelists; flind++)
376 		vm_phys_lookup_lists[0][flind] = &vm_phys_free_queues[flind];
377 #endif
378 
379 	mtx_init(&vm_phys_fictitious_reg_mtx, "vmfctr", NULL, MTX_DEF);
380 }
381 
382 /*
383  * Split a contiguous, power of two-sized set of physical pages.
384  */
385 static __inline void
386 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
387 {
388 	vm_page_t m_buddy;
389 
390 	while (oind > order) {
391 		oind--;
392 		m_buddy = &m[1 << oind];
393 		KASSERT(m_buddy->order == VM_NFREEORDER,
394 		    ("vm_phys_split_pages: page %p has unexpected order %d",
395 		    m_buddy, m_buddy->order));
396 		m_buddy->order = oind;
397 		TAILQ_INSERT_HEAD(&fl[oind].pl, m_buddy, pageq);
398 		fl[oind].lcnt++;
399         }
400 }
401 
402 /*
403  * Initialize a physical page and add it to the free lists.
404  */
405 void
406 vm_phys_add_page(vm_paddr_t pa)
407 {
408 	vm_page_t m;
409 
410 	cnt.v_page_count++;
411 	m = vm_phys_paddr_to_vm_page(pa);
412 	m->phys_addr = pa;
413 	m->queue = PQ_NONE;
414 	m->segind = vm_phys_paddr_to_segind(pa);
415 	m->flags = PG_FREE;
416 	KASSERT(m->order == VM_NFREEORDER,
417 	    ("vm_phys_add_page: page %p has unexpected order %d",
418 	    m, m->order));
419 	m->pool = VM_FREEPOOL_DEFAULT;
420 	pmap_page_init(m);
421 	mtx_lock(&vm_page_queue_free_mtx);
422 	cnt.v_free_count++;
423 	vm_phys_free_pages(m, 0);
424 	mtx_unlock(&vm_page_queue_free_mtx);
425 }
426 
427 /*
428  * Allocate a contiguous, power of two-sized set of physical pages
429  * from the free lists.
430  *
431  * The free page queues must be locked.
432  */
433 vm_page_t
434 vm_phys_alloc_pages(int pool, int order)
435 {
436 	vm_page_t m;
437 	int flind;
438 
439 	for (flind = 0; flind < vm_nfreelists; flind++) {
440 		m = vm_phys_alloc_freelist_pages(flind, pool, order);
441 		if (m != NULL)
442 			return (m);
443 	}
444 	return (NULL);
445 }
446 
447 /*
448  * Find and dequeue a free page on the given free list, with the
449  * specified pool and order
450  */
451 vm_page_t
452 vm_phys_alloc_freelist_pages(int flind, int pool, int order)
453 {
454 	struct vm_freelist *fl;
455 	struct vm_freelist *alt;
456 	int domain, oind, pind;
457 	vm_page_t m;
458 
459 	KASSERT(flind < VM_NFREELIST,
460 	    ("vm_phys_alloc_freelist_pages: freelist %d is out of range", flind));
461 	KASSERT(pool < VM_NFREEPOOL,
462 	    ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
463 	KASSERT(order < VM_NFREEORDER,
464 	    ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
465 
466 #if VM_NDOMAIN > 1
467 	domain = PCPU_GET(domain);
468 #else
469 	domain = 0;
470 #endif
471 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
472 	fl = (*vm_phys_lookup_lists[domain][flind])[pool];
473 	for (oind = order; oind < VM_NFREEORDER; oind++) {
474 		m = TAILQ_FIRST(&fl[oind].pl);
475 		if (m != NULL) {
476 			TAILQ_REMOVE(&fl[oind].pl, m, pageq);
477 			fl[oind].lcnt--;
478 			m->order = VM_NFREEORDER;
479 			vm_phys_split_pages(m, oind, fl, order);
480 			return (m);
481 		}
482 	}
483 
484 	/*
485 	 * The given pool was empty.  Find the largest
486 	 * contiguous, power-of-two-sized set of pages in any
487 	 * pool.  Transfer these pages to the given pool, and
488 	 * use them to satisfy the allocation.
489 	 */
490 	for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
491 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
492 			alt = (*vm_phys_lookup_lists[domain][flind])[pind];
493 			m = TAILQ_FIRST(&alt[oind].pl);
494 			if (m != NULL) {
495 				TAILQ_REMOVE(&alt[oind].pl, m, pageq);
496 				alt[oind].lcnt--;
497 				m->order = VM_NFREEORDER;
498 				vm_phys_set_pool(pool, m, oind);
499 				vm_phys_split_pages(m, oind, fl, order);
500 				return (m);
501 			}
502 		}
503 	}
504 	return (NULL);
505 }
506 
507 /*
508  * Find the vm_page corresponding to the given physical address.
509  */
510 vm_page_t
511 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
512 {
513 	struct vm_phys_seg *seg;
514 	int segind;
515 
516 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
517 		seg = &vm_phys_segs[segind];
518 		if (pa >= seg->start && pa < seg->end)
519 			return (&seg->first_page[atop(pa - seg->start)]);
520 	}
521 	return (NULL);
522 }
523 
524 vm_page_t
525 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
526 {
527 	struct vm_phys_fictitious_seg *seg;
528 	vm_page_t m;
529 	int segind;
530 
531 	m = NULL;
532 	for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
533 		seg = &vm_phys_fictitious_segs[segind];
534 		if (pa >= seg->start && pa < seg->end) {
535 			m = &seg->first_page[atop(pa - seg->start)];
536 			KASSERT((m->flags & PG_FICTITIOUS) != 0,
537 			    ("%p not fictitious", m));
538 			break;
539 		}
540 	}
541 	return (m);
542 }
543 
544 int
545 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
546     vm_memattr_t memattr)
547 {
548 	struct vm_phys_fictitious_seg *seg;
549 	vm_page_t fp;
550 	long i, page_count;
551 	int segind;
552 #ifdef VM_PHYSSEG_DENSE
553 	long pi;
554 	boolean_t malloced;
555 #endif
556 
557 	page_count = (end - start) / PAGE_SIZE;
558 
559 #ifdef VM_PHYSSEG_DENSE
560 	pi = atop(start);
561 	if (pi >= first_page && atop(end) < vm_page_array_size) {
562 		fp = &vm_page_array[pi - first_page];
563 		malloced = FALSE;
564 	} else
565 #endif
566 	{
567 		fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
568 		    M_WAITOK | M_ZERO);
569 #ifdef VM_PHYSSEG_DENSE
570 		malloced = TRUE;
571 #endif
572 	}
573 	for (i = 0; i < page_count; i++) {
574 		vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr);
575 		pmap_page_init(&fp[i]);
576 		fp[i].oflags &= ~(VPO_BUSY | VPO_UNMANAGED);
577 	}
578 	mtx_lock(&vm_phys_fictitious_reg_mtx);
579 	for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
580 		seg = &vm_phys_fictitious_segs[segind];
581 		if (seg->start == 0 && seg->end == 0) {
582 			seg->start = start;
583 			seg->end = end;
584 			seg->first_page = fp;
585 			mtx_unlock(&vm_phys_fictitious_reg_mtx);
586 			return (0);
587 		}
588 	}
589 	mtx_unlock(&vm_phys_fictitious_reg_mtx);
590 #ifdef VM_PHYSSEG_DENSE
591 	if (malloced)
592 #endif
593 		free(fp, M_FICT_PAGES);
594 	return (EBUSY);
595 }
596 
597 void
598 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
599 {
600 	struct vm_phys_fictitious_seg *seg;
601 	vm_page_t fp;
602 	int segind;
603 #ifdef VM_PHYSSEG_DENSE
604 	long pi;
605 #endif
606 
607 #ifdef VM_PHYSSEG_DENSE
608 	pi = atop(start);
609 #endif
610 
611 	mtx_lock(&vm_phys_fictitious_reg_mtx);
612 	for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
613 		seg = &vm_phys_fictitious_segs[segind];
614 		if (seg->start == start && seg->end == end) {
615 			seg->start = seg->end = 0;
616 			fp = seg->first_page;
617 			seg->first_page = NULL;
618 			mtx_unlock(&vm_phys_fictitious_reg_mtx);
619 #ifdef VM_PHYSSEG_DENSE
620 			if (pi < first_page || atop(end) >= vm_page_array_size)
621 #endif
622 				free(fp, M_FICT_PAGES);
623 			return;
624 		}
625 	}
626 	mtx_unlock(&vm_phys_fictitious_reg_mtx);
627 	KASSERT(0, ("Unregistering not registered fictitious range"));
628 }
629 
630 /*
631  * Find the segment containing the given physical address.
632  */
633 static int
634 vm_phys_paddr_to_segind(vm_paddr_t pa)
635 {
636 	struct vm_phys_seg *seg;
637 	int segind;
638 
639 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
640 		seg = &vm_phys_segs[segind];
641 		if (pa >= seg->start && pa < seg->end)
642 			return (segind);
643 	}
644 	panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" ,
645 	    (uintmax_t)pa);
646 }
647 
648 /*
649  * Free a contiguous, power of two-sized set of physical pages.
650  *
651  * The free page queues must be locked.
652  */
653 void
654 vm_phys_free_pages(vm_page_t m, int order)
655 {
656 	struct vm_freelist *fl;
657 	struct vm_phys_seg *seg;
658 	vm_paddr_t pa;
659 	vm_page_t m_buddy;
660 
661 	KASSERT(m->order == VM_NFREEORDER,
662 	    ("vm_phys_free_pages: page %p has unexpected order %d",
663 	    m, m->order));
664 	KASSERT(m->pool < VM_NFREEPOOL,
665 	    ("vm_phys_free_pages: page %p has unexpected pool %d",
666 	    m, m->pool));
667 	KASSERT(order < VM_NFREEORDER,
668 	    ("vm_phys_free_pages: order %d is out of range", order));
669 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
670 	seg = &vm_phys_segs[m->segind];
671 	if (order < VM_NFREEORDER - 1) {
672 		pa = VM_PAGE_TO_PHYS(m);
673 		do {
674 			pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
675 			if (pa < seg->start || pa >= seg->end)
676 				break;
677 			m_buddy = &seg->first_page[atop(pa - seg->start)];
678 			if (m_buddy->order != order)
679 				break;
680 			fl = (*seg->free_queues)[m_buddy->pool];
681 			TAILQ_REMOVE(&fl[order].pl, m_buddy, pageq);
682 			fl[order].lcnt--;
683 			m_buddy->order = VM_NFREEORDER;
684 			if (m_buddy->pool != m->pool)
685 				vm_phys_set_pool(m->pool, m_buddy, order);
686 			order++;
687 			pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
688 			m = &seg->first_page[atop(pa - seg->start)];
689 		} while (order < VM_NFREEORDER - 1);
690 	}
691 	m->order = order;
692 	fl = (*seg->free_queues)[m->pool];
693 	TAILQ_INSERT_TAIL(&fl[order].pl, m, pageq);
694 	fl[order].lcnt++;
695 }
696 
697 /*
698  * Free a contiguous, arbitrarily sized set of physical pages.
699  *
700  * The free page queues must be locked.
701  */
702 void
703 vm_phys_free_contig(vm_page_t m, u_long npages)
704 {
705 	u_int n;
706 	int order;
707 
708 	/*
709 	 * Avoid unnecessary coalescing by freeing the pages in the largest
710 	 * possible power-of-two-sized subsets.
711 	 */
712 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
713 	for (;; npages -= n) {
714 		/*
715 		 * Unsigned "min" is used here so that "order" is assigned
716 		 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
717 		 * or the low-order bits of its physical address are zero
718 		 * because the size of a physical address exceeds the size of
719 		 * a long.
720 		 */
721 		order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
722 		    VM_NFREEORDER - 1);
723 		n = 1 << order;
724 		if (npages < n)
725 			break;
726 		vm_phys_free_pages(m, order);
727 		m += n;
728 	}
729 	/* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
730 	for (; npages > 0; npages -= n) {
731 		order = flsl(npages) - 1;
732 		n = 1 << order;
733 		vm_phys_free_pages(m, order);
734 		m += n;
735 	}
736 }
737 
738 /*
739  * Set the pool for a contiguous, power of two-sized set of physical pages.
740  */
741 void
742 vm_phys_set_pool(int pool, vm_page_t m, int order)
743 {
744 	vm_page_t m_tmp;
745 
746 	for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
747 		m_tmp->pool = pool;
748 }
749 
750 /*
751  * Search for the given physical page "m" in the free lists.  If the search
752  * succeeds, remove "m" from the free lists and return TRUE.  Otherwise, return
753  * FALSE, indicating that "m" is not in the free lists.
754  *
755  * The free page queues must be locked.
756  */
757 boolean_t
758 vm_phys_unfree_page(vm_page_t m)
759 {
760 	struct vm_freelist *fl;
761 	struct vm_phys_seg *seg;
762 	vm_paddr_t pa, pa_half;
763 	vm_page_t m_set, m_tmp;
764 	int order;
765 
766 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
767 
768 	/*
769 	 * First, find the contiguous, power of two-sized set of free
770 	 * physical pages containing the given physical page "m" and
771 	 * assign it to "m_set".
772 	 */
773 	seg = &vm_phys_segs[m->segind];
774 	for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
775 	    order < VM_NFREEORDER - 1; ) {
776 		order++;
777 		pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
778 		if (pa >= seg->start)
779 			m_set = &seg->first_page[atop(pa - seg->start)];
780 		else
781 			return (FALSE);
782 	}
783 	if (m_set->order < order)
784 		return (FALSE);
785 	if (m_set->order == VM_NFREEORDER)
786 		return (FALSE);
787 	KASSERT(m_set->order < VM_NFREEORDER,
788 	    ("vm_phys_unfree_page: page %p has unexpected order %d",
789 	    m_set, m_set->order));
790 
791 	/*
792 	 * Next, remove "m_set" from the free lists.  Finally, extract
793 	 * "m" from "m_set" using an iterative algorithm: While "m_set"
794 	 * is larger than a page, shrink "m_set" by returning the half
795 	 * of "m_set" that does not contain "m" to the free lists.
796 	 */
797 	fl = (*seg->free_queues)[m_set->pool];
798 	order = m_set->order;
799 	TAILQ_REMOVE(&fl[order].pl, m_set, pageq);
800 	fl[order].lcnt--;
801 	m_set->order = VM_NFREEORDER;
802 	while (order > 0) {
803 		order--;
804 		pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
805 		if (m->phys_addr < pa_half)
806 			m_tmp = &seg->first_page[atop(pa_half - seg->start)];
807 		else {
808 			m_tmp = m_set;
809 			m_set = &seg->first_page[atop(pa_half - seg->start)];
810 		}
811 		m_tmp->order = order;
812 		TAILQ_INSERT_HEAD(&fl[order].pl, m_tmp, pageq);
813 		fl[order].lcnt++;
814 	}
815 	KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
816 	return (TRUE);
817 }
818 
819 /*
820  * Try to zero one physical page.  Used by an idle priority thread.
821  */
822 boolean_t
823 vm_phys_zero_pages_idle(void)
824 {
825 	static struct vm_freelist *fl = vm_phys_free_queues[0][0];
826 	static int flind, oind, pind;
827 	vm_page_t m, m_tmp;
828 
829 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
830 	for (;;) {
831 		TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, pageq) {
832 			for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) {
833 				if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) {
834 					vm_phys_unfree_page(m_tmp);
835 					cnt.v_free_count--;
836 					mtx_unlock(&vm_page_queue_free_mtx);
837 					pmap_zero_page_idle(m_tmp);
838 					m_tmp->flags |= PG_ZERO;
839 					mtx_lock(&vm_page_queue_free_mtx);
840 					cnt.v_free_count++;
841 					vm_phys_free_pages(m_tmp, 0);
842 					vm_page_zero_count++;
843 					cnt_prezero++;
844 					return (TRUE);
845 				}
846 			}
847 		}
848 		oind++;
849 		if (oind == VM_NFREEORDER) {
850 			oind = 0;
851 			pind++;
852 			if (pind == VM_NFREEPOOL) {
853 				pind = 0;
854 				flind++;
855 				if (flind == vm_nfreelists)
856 					flind = 0;
857 			}
858 			fl = vm_phys_free_queues[flind][pind];
859 		}
860 	}
861 }
862 
863 /*
864  * Allocate a contiguous set of physical pages of the given size
865  * "npages" from the free lists.  All of the physical pages must be at
866  * or above the given physical address "low" and below the given
867  * physical address "high".  The given value "alignment" determines the
868  * alignment of the first physical page in the set.  If the given value
869  * "boundary" is non-zero, then the set of physical pages cannot cross
870  * any physical address boundary that is a multiple of that value.  Both
871  * "alignment" and "boundary" must be a power of two.
872  */
873 vm_page_t
874 vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
875     u_long alignment, vm_paddr_t boundary)
876 {
877 	struct vm_freelist *fl;
878 	struct vm_phys_seg *seg;
879 	vm_paddr_t pa, pa_last, size;
880 	vm_page_t m, m_ret;
881 	u_long npages_end;
882 	int domain, flind, oind, order, pind;
883 
884 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
885 #if VM_NDOMAIN > 1
886 	domain = PCPU_GET(domain);
887 #else
888 	domain = 0;
889 #endif
890 	size = npages << PAGE_SHIFT;
891 	KASSERT(size != 0,
892 	    ("vm_phys_alloc_contig: size must not be 0"));
893 	KASSERT((alignment & (alignment - 1)) == 0,
894 	    ("vm_phys_alloc_contig: alignment must be a power of 2"));
895 	KASSERT((boundary & (boundary - 1)) == 0,
896 	    ("vm_phys_alloc_contig: boundary must be a power of 2"));
897 	/* Compute the queue that is the best fit for npages. */
898 	for (order = 0; (1 << order) < npages; order++);
899 	for (flind = 0; flind < vm_nfreelists; flind++) {
900 		for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) {
901 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
902 				fl = (*vm_phys_lookup_lists[domain][flind])
903 				    [pind];
904 				TAILQ_FOREACH(m_ret, &fl[oind].pl, pageq) {
905 					/*
906 					 * A free list may contain physical pages
907 					 * from one or more segments.
908 					 */
909 					seg = &vm_phys_segs[m_ret->segind];
910 					if (seg->start > high ||
911 					    low >= seg->end)
912 						continue;
913 
914 					/*
915 					 * Is the size of this allocation request
916 					 * larger than the largest block size?
917 					 */
918 					if (order >= VM_NFREEORDER) {
919 						/*
920 						 * Determine if a sufficient number
921 						 * of subsequent blocks to satisfy
922 						 * the allocation request are free.
923 						 */
924 						pa = VM_PAGE_TO_PHYS(m_ret);
925 						pa_last = pa + size;
926 						for (;;) {
927 							pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1);
928 							if (pa >= pa_last)
929 								break;
930 							if (pa < seg->start ||
931 							    pa >= seg->end)
932 								break;
933 							m = &seg->first_page[atop(pa - seg->start)];
934 							if (m->order != VM_NFREEORDER - 1)
935 								break;
936 						}
937 						/* If not, continue to the next block. */
938 						if (pa < pa_last)
939 							continue;
940 					}
941 
942 					/*
943 					 * Determine if the blocks are within the given range,
944 					 * satisfy the given alignment, and do not cross the
945 					 * given boundary.
946 					 */
947 					pa = VM_PAGE_TO_PHYS(m_ret);
948 					if (pa >= low &&
949 					    pa + size <= high &&
950 					    (pa & (alignment - 1)) == 0 &&
951 					    ((pa ^ (pa + size - 1)) & ~(boundary - 1)) == 0)
952 						goto done;
953 				}
954 			}
955 		}
956 	}
957 	return (NULL);
958 done:
959 	for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
960 		fl = (*seg->free_queues)[m->pool];
961 		TAILQ_REMOVE(&fl[m->order].pl, m, pageq);
962 		fl[m->order].lcnt--;
963 		m->order = VM_NFREEORDER;
964 	}
965 	if (m_ret->pool != VM_FREEPOOL_DEFAULT)
966 		vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
967 	fl = (*seg->free_queues)[m_ret->pool];
968 	vm_phys_split_pages(m_ret, oind, fl, order);
969 	/* Return excess pages to the free lists. */
970 	npages_end = roundup2(npages, 1 << imin(oind, order));
971 	if (npages < npages_end)
972 		vm_phys_free_contig(&m_ret[npages], npages_end - npages);
973 	return (m_ret);
974 }
975 
976 #ifdef DDB
977 /*
978  * Show the number of physical pages in each of the free lists.
979  */
980 DB_SHOW_COMMAND(freepages, db_show_freepages)
981 {
982 	struct vm_freelist *fl;
983 	int flind, oind, pind;
984 
985 	for (flind = 0; flind < vm_nfreelists; flind++) {
986 		db_printf("FREE LIST %d:\n"
987 		    "\n  ORDER (SIZE)  |  NUMBER"
988 		    "\n              ", flind);
989 		for (pind = 0; pind < VM_NFREEPOOL; pind++)
990 			db_printf("  |  POOL %d", pind);
991 		db_printf("\n--            ");
992 		for (pind = 0; pind < VM_NFREEPOOL; pind++)
993 			db_printf("-- --      ");
994 		db_printf("--\n");
995 		for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
996 			db_printf("  %2.2d (%6.6dK)", oind,
997 			    1 << (PAGE_SHIFT - 10 + oind));
998 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
999 				fl = vm_phys_free_queues[flind][pind];
1000 				db_printf("  |  %6.6d", fl[oind].lcnt);
1001 			}
1002 			db_printf("\n");
1003 		}
1004 		db_printf("\n");
1005 	}
1006 }
1007 #endif
1008