xref: /freebsd/sys/vm/vm_phys.c (revision 586f63035fbe5e45cfc971037fd76375661ece26)
1 /*-
2  * Copyright (c) 2002-2006 Rice University
3  * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Alan L. Cox,
7  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
22  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  *	Physical memory system implementation
34  *
35  * Any external functions defined by this module are only to be used by the
36  * virtual memory system.
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include "opt_ddb.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/lock.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/queue.h>
51 #include <sys/sbuf.h>
52 #include <sys/sysctl.h>
53 #include <sys/vmmeter.h>
54 
55 #include <ddb/ddb.h>
56 
57 #include <vm/vm.h>
58 #include <vm/vm_param.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_object.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_phys.h>
63 
64 /*
65  * VM_FREELIST_DEFAULT is split into VM_NDOMAIN lists, one for each
66  * domain.  These extra lists are stored at the end of the regular
67  * free lists starting with VM_NFREELIST.
68  */
69 #define VM_RAW_NFREELIST	(VM_NFREELIST + VM_NDOMAIN - 1)
70 
71 struct vm_freelist {
72 	struct pglist pl;
73 	int lcnt;
74 };
75 
76 struct vm_phys_seg {
77 	vm_paddr_t	start;
78 	vm_paddr_t	end;
79 	vm_page_t	first_page;
80 	int		domain;
81 	struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER];
82 };
83 
84 struct mem_affinity *mem_affinity;
85 
86 static struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
87 
88 static int vm_phys_nsegs;
89 
90 static struct vm_freelist
91     vm_phys_free_queues[VM_RAW_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
92 static struct vm_freelist
93 (*vm_phys_lookup_lists[VM_NDOMAIN][VM_RAW_NFREELIST])[VM_NFREEPOOL][VM_NFREEORDER];
94 
95 static int vm_nfreelists = VM_FREELIST_DEFAULT + 1;
96 
97 static int cnt_prezero;
98 SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD,
99     &cnt_prezero, 0, "The number of physical pages prezeroed at idle time");
100 
101 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
102 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
103     NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
104 
105 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
106 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
107     NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
108 
109 #if VM_NDOMAIN > 1
110 static int sysctl_vm_phys_lookup_lists(SYSCTL_HANDLER_ARGS);
111 SYSCTL_OID(_vm, OID_AUTO, phys_lookup_lists, CTLTYPE_STRING | CTLFLAG_RD,
112     NULL, 0, sysctl_vm_phys_lookup_lists, "A", "Phys Lookup Lists");
113 #endif
114 
115 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind,
116     int domain);
117 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind);
118 static int vm_phys_paddr_to_segind(vm_paddr_t pa);
119 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
120     int order);
121 
122 /*
123  * Outputs the state of the physical memory allocator, specifically,
124  * the amount of physical memory in each free list.
125  */
126 static int
127 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
128 {
129 	struct sbuf sbuf;
130 	struct vm_freelist *fl;
131 	int error, flind, oind, pind;
132 
133 	error = sysctl_wire_old_buffer(req, 0);
134 	if (error != 0)
135 		return (error);
136 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
137 	for (flind = 0; flind < vm_nfreelists; flind++) {
138 		sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
139 		    "\n  ORDER (SIZE)  |  NUMBER"
140 		    "\n              ", flind);
141 		for (pind = 0; pind < VM_NFREEPOOL; pind++)
142 			sbuf_printf(&sbuf, "  |  POOL %d", pind);
143 		sbuf_printf(&sbuf, "\n--            ");
144 		for (pind = 0; pind < VM_NFREEPOOL; pind++)
145 			sbuf_printf(&sbuf, "-- --      ");
146 		sbuf_printf(&sbuf, "--\n");
147 		for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
148 			sbuf_printf(&sbuf, "  %2d (%6dK)", oind,
149 			    1 << (PAGE_SHIFT - 10 + oind));
150 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
151 				fl = vm_phys_free_queues[flind][pind];
152 				sbuf_printf(&sbuf, "  |  %6d", fl[oind].lcnt);
153 			}
154 			sbuf_printf(&sbuf, "\n");
155 		}
156 	}
157 	error = sbuf_finish(&sbuf);
158 	sbuf_delete(&sbuf);
159 	return (error);
160 }
161 
162 /*
163  * Outputs the set of physical memory segments.
164  */
165 static int
166 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
167 {
168 	struct sbuf sbuf;
169 	struct vm_phys_seg *seg;
170 	int error, segind;
171 
172 	error = sysctl_wire_old_buffer(req, 0);
173 	if (error != 0)
174 		return (error);
175 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
176 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
177 		sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
178 		seg = &vm_phys_segs[segind];
179 		sbuf_printf(&sbuf, "start:     %#jx\n",
180 		    (uintmax_t)seg->start);
181 		sbuf_printf(&sbuf, "end:       %#jx\n",
182 		    (uintmax_t)seg->end);
183 		sbuf_printf(&sbuf, "domain:    %d\n", seg->domain);
184 		sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
185 	}
186 	error = sbuf_finish(&sbuf);
187 	sbuf_delete(&sbuf);
188 	return (error);
189 }
190 
191 #if VM_NDOMAIN > 1
192 /*
193  * Outputs the set of free list lookup lists.
194  */
195 static int
196 sysctl_vm_phys_lookup_lists(SYSCTL_HANDLER_ARGS)
197 {
198 	struct sbuf sbuf;
199 	int domain, error, flind, ndomains;
200 
201 	error = sysctl_wire_old_buffer(req, 0);
202 	if (error != 0)
203 		return (error);
204 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
205 	ndomains = vm_nfreelists - VM_NFREELIST + 1;
206 	for (domain = 0; domain < ndomains; domain++) {
207 		sbuf_printf(&sbuf, "\nDOMAIN %d:\n\n", domain);
208 		for (flind = 0; flind < vm_nfreelists; flind++)
209 			sbuf_printf(&sbuf, "  [%d]:\t%p\n", flind,
210 			    vm_phys_lookup_lists[domain][flind]);
211 	}
212 	error = sbuf_finish(&sbuf);
213 	sbuf_delete(&sbuf);
214 	return (error);
215 }
216 #endif
217 
218 /*
219  * Create a physical memory segment.
220  */
221 static void
222 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, int domain)
223 {
224 	struct vm_phys_seg *seg;
225 #ifdef VM_PHYSSEG_SPARSE
226 	long pages;
227 	int segind;
228 
229 	pages = 0;
230 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
231 		seg = &vm_phys_segs[segind];
232 		pages += atop(seg->end - seg->start);
233 	}
234 #endif
235 	KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
236 	    ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
237 	seg = &vm_phys_segs[vm_phys_nsegs++];
238 	seg->start = start;
239 	seg->end = end;
240 	seg->domain = domain;
241 #ifdef VM_PHYSSEG_SPARSE
242 	seg->first_page = &vm_page_array[pages];
243 #else
244 	seg->first_page = PHYS_TO_VM_PAGE(start);
245 #endif
246 #if VM_NDOMAIN > 1
247 	if (flind == VM_FREELIST_DEFAULT && domain != 0) {
248 		flind = VM_NFREELIST + (domain - 1);
249 		if (flind >= vm_nfreelists)
250 			vm_nfreelists = flind + 1;
251 	}
252 #endif
253 	seg->free_queues = &vm_phys_free_queues[flind];
254 }
255 
256 static void
257 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind)
258 {
259 	int i;
260 
261 	if (mem_affinity == NULL) {
262 		_vm_phys_create_seg(start, end, flind, 0);
263 		return;
264 	}
265 
266 	for (i = 0;; i++) {
267 		if (mem_affinity[i].end == 0)
268 			panic("Reached end of affinity info");
269 		if (mem_affinity[i].end <= start)
270 			continue;
271 		if (mem_affinity[i].start > start)
272 			panic("No affinity info for start %jx",
273 			    (uintmax_t)start);
274 		if (mem_affinity[i].end >= end) {
275 			_vm_phys_create_seg(start, end, flind,
276 			    mem_affinity[i].domain);
277 			break;
278 		}
279 		_vm_phys_create_seg(start, mem_affinity[i].end, flind,
280 		    mem_affinity[i].domain);
281 		start = mem_affinity[i].end;
282 	}
283 }
284 
285 /*
286  * Initialize the physical memory allocator.
287  */
288 void
289 vm_phys_init(void)
290 {
291 	struct vm_freelist *fl;
292 	int flind, i, oind, pind;
293 #if VM_NDOMAIN > 1
294 	int ndomains, j;
295 #endif
296 
297 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
298 #ifdef	VM_FREELIST_ISADMA
299 		if (phys_avail[i] < 16777216) {
300 			if (phys_avail[i + 1] > 16777216) {
301 				vm_phys_create_seg(phys_avail[i], 16777216,
302 				    VM_FREELIST_ISADMA);
303 				vm_phys_create_seg(16777216, phys_avail[i + 1],
304 				    VM_FREELIST_DEFAULT);
305 			} else {
306 				vm_phys_create_seg(phys_avail[i],
307 				    phys_avail[i + 1], VM_FREELIST_ISADMA);
308 			}
309 			if (VM_FREELIST_ISADMA >= vm_nfreelists)
310 				vm_nfreelists = VM_FREELIST_ISADMA + 1;
311 		} else
312 #endif
313 #ifdef	VM_FREELIST_HIGHMEM
314 		if (phys_avail[i + 1] > VM_HIGHMEM_ADDRESS) {
315 			if (phys_avail[i] < VM_HIGHMEM_ADDRESS) {
316 				vm_phys_create_seg(phys_avail[i],
317 				    VM_HIGHMEM_ADDRESS, VM_FREELIST_DEFAULT);
318 				vm_phys_create_seg(VM_HIGHMEM_ADDRESS,
319 				    phys_avail[i + 1], VM_FREELIST_HIGHMEM);
320 			} else {
321 				vm_phys_create_seg(phys_avail[i],
322 				    phys_avail[i + 1], VM_FREELIST_HIGHMEM);
323 			}
324 			if (VM_FREELIST_HIGHMEM >= vm_nfreelists)
325 				vm_nfreelists = VM_FREELIST_HIGHMEM + 1;
326 		} else
327 #endif
328 		vm_phys_create_seg(phys_avail[i], phys_avail[i + 1],
329 		    VM_FREELIST_DEFAULT);
330 	}
331 	for (flind = 0; flind < vm_nfreelists; flind++) {
332 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
333 			fl = vm_phys_free_queues[flind][pind];
334 			for (oind = 0; oind < VM_NFREEORDER; oind++)
335 				TAILQ_INIT(&fl[oind].pl);
336 		}
337 	}
338 #if VM_NDOMAIN > 1
339 	/*
340 	 * Build a free list lookup list for each domain.  All of the
341 	 * memory domain lists are inserted at the VM_FREELIST_DEFAULT
342 	 * index in a round-robin order starting with the current
343 	 * domain.
344 	 */
345 	ndomains = vm_nfreelists - VM_NFREELIST + 1;
346 	for (flind = 0; flind < VM_FREELIST_DEFAULT; flind++)
347 		for (i = 0; i < ndomains; i++)
348 			vm_phys_lookup_lists[i][flind] =
349 			    &vm_phys_free_queues[flind];
350 	for (i = 0; i < ndomains; i++)
351 		for (j = 0; j < ndomains; j++) {
352 			flind = (i + j) % ndomains;
353 			if (flind == 0)
354 				flind = VM_FREELIST_DEFAULT;
355 			else
356 				flind += VM_NFREELIST - 1;
357 			vm_phys_lookup_lists[i][VM_FREELIST_DEFAULT + j] =
358 			    &vm_phys_free_queues[flind];
359 		}
360 	for (flind = VM_FREELIST_DEFAULT + 1; flind < VM_NFREELIST;
361 	     flind++)
362 		for (i = 0; i < ndomains; i++)
363 			vm_phys_lookup_lists[i][flind + ndomains - 1] =
364 			    &vm_phys_free_queues[flind];
365 #else
366 	for (flind = 0; flind < vm_nfreelists; flind++)
367 		vm_phys_lookup_lists[0][flind] = &vm_phys_free_queues[flind];
368 #endif
369 }
370 
371 /*
372  * Split a contiguous, power of two-sized set of physical pages.
373  */
374 static __inline void
375 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
376 {
377 	vm_page_t m_buddy;
378 
379 	while (oind > order) {
380 		oind--;
381 		m_buddy = &m[1 << oind];
382 		KASSERT(m_buddy->order == VM_NFREEORDER,
383 		    ("vm_phys_split_pages: page %p has unexpected order %d",
384 		    m_buddy, m_buddy->order));
385 		m_buddy->order = oind;
386 		TAILQ_INSERT_HEAD(&fl[oind].pl, m_buddy, pageq);
387 		fl[oind].lcnt++;
388         }
389 }
390 
391 /*
392  * Initialize a physical page and add it to the free lists.
393  */
394 void
395 vm_phys_add_page(vm_paddr_t pa)
396 {
397 	vm_page_t m;
398 
399 	cnt.v_page_count++;
400 	m = vm_phys_paddr_to_vm_page(pa);
401 	m->phys_addr = pa;
402 	m->queue = PQ_NONE;
403 	m->segind = vm_phys_paddr_to_segind(pa);
404 	m->flags = PG_FREE;
405 	KASSERT(m->order == VM_NFREEORDER,
406 	    ("vm_phys_add_page: page %p has unexpected order %d",
407 	    m, m->order));
408 	m->pool = VM_FREEPOOL_DEFAULT;
409 	pmap_page_init(m);
410 	mtx_lock(&vm_page_queue_free_mtx);
411 	cnt.v_free_count++;
412 	vm_phys_free_pages(m, 0);
413 	mtx_unlock(&vm_page_queue_free_mtx);
414 }
415 
416 /*
417  * Allocate a contiguous, power of two-sized set of physical pages
418  * from the free lists.
419  *
420  * The free page queues must be locked.
421  */
422 vm_page_t
423 vm_phys_alloc_pages(int pool, int order)
424 {
425 	vm_page_t m;
426 	int flind;
427 
428 	for (flind = 0; flind < vm_nfreelists; flind++) {
429 		m = vm_phys_alloc_freelist_pages(flind, pool, order);
430 		if (m != NULL)
431 			return (m);
432 	}
433 	return (NULL);
434 }
435 
436 /*
437  * Find and dequeue a free page on the given free list, with the
438  * specified pool and order
439  */
440 vm_page_t
441 vm_phys_alloc_freelist_pages(int flind, int pool, int order)
442 {
443 	struct vm_freelist *fl;
444 	struct vm_freelist *alt;
445 	int domain, oind, pind;
446 	vm_page_t m;
447 
448 	KASSERT(flind < VM_NFREELIST,
449 	    ("vm_phys_alloc_freelist_pages: freelist %d is out of range", flind));
450 	KASSERT(pool < VM_NFREEPOOL,
451 	    ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
452 	KASSERT(order < VM_NFREEORDER,
453 	    ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
454 
455 #if VM_NDOMAIN > 1
456 	domain = PCPU_GET(domain);
457 #else
458 	domain = 0;
459 #endif
460 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
461 	fl = (*vm_phys_lookup_lists[domain][flind])[pool];
462 	for (oind = order; oind < VM_NFREEORDER; oind++) {
463 		m = TAILQ_FIRST(&fl[oind].pl);
464 		if (m != NULL) {
465 			TAILQ_REMOVE(&fl[oind].pl, m, pageq);
466 			fl[oind].lcnt--;
467 			m->order = VM_NFREEORDER;
468 			vm_phys_split_pages(m, oind, fl, order);
469 			return (m);
470 		}
471 	}
472 
473 	/*
474 	 * The given pool was empty.  Find the largest
475 	 * contiguous, power-of-two-sized set of pages in any
476 	 * pool.  Transfer these pages to the given pool, and
477 	 * use them to satisfy the allocation.
478 	 */
479 	for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
480 		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
481 			alt = (*vm_phys_lookup_lists[domain][flind])[pind];
482 			m = TAILQ_FIRST(&alt[oind].pl);
483 			if (m != NULL) {
484 				TAILQ_REMOVE(&alt[oind].pl, m, pageq);
485 				alt[oind].lcnt--;
486 				m->order = VM_NFREEORDER;
487 				vm_phys_set_pool(pool, m, oind);
488 				vm_phys_split_pages(m, oind, fl, order);
489 				return (m);
490 			}
491 		}
492 	}
493 	return (NULL);
494 }
495 
496 /*
497  * Find the vm_page corresponding to the given physical address.
498  */
499 vm_page_t
500 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
501 {
502 	struct vm_phys_seg *seg;
503 	int segind;
504 
505 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
506 		seg = &vm_phys_segs[segind];
507 		if (pa >= seg->start && pa < seg->end)
508 			return (&seg->first_page[atop(pa - seg->start)]);
509 	}
510 	return (NULL);
511 }
512 
513 /*
514  * Find the segment containing the given physical address.
515  */
516 static int
517 vm_phys_paddr_to_segind(vm_paddr_t pa)
518 {
519 	struct vm_phys_seg *seg;
520 	int segind;
521 
522 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
523 		seg = &vm_phys_segs[segind];
524 		if (pa >= seg->start && pa < seg->end)
525 			return (segind);
526 	}
527 	panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" ,
528 	    (uintmax_t)pa);
529 }
530 
531 /*
532  * Free a contiguous, power of two-sized set of physical pages.
533  *
534  * The free page queues must be locked.
535  */
536 void
537 vm_phys_free_pages(vm_page_t m, int order)
538 {
539 	struct vm_freelist *fl;
540 	struct vm_phys_seg *seg;
541 	vm_paddr_t pa;
542 	vm_page_t m_buddy;
543 
544 	KASSERT(m->order == VM_NFREEORDER,
545 	    ("vm_phys_free_pages: page %p has unexpected order %d",
546 	    m, m->order));
547 	KASSERT(m->pool < VM_NFREEPOOL,
548 	    ("vm_phys_free_pages: page %p has unexpected pool %d",
549 	    m, m->pool));
550 	KASSERT(order < VM_NFREEORDER,
551 	    ("vm_phys_free_pages: order %d is out of range", order));
552 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
553 	seg = &vm_phys_segs[m->segind];
554 	if (order < VM_NFREEORDER - 1) {
555 		pa = VM_PAGE_TO_PHYS(m);
556 		do {
557 			pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
558 			if (pa < seg->start || pa >= seg->end)
559 				break;
560 			m_buddy = &seg->first_page[atop(pa - seg->start)];
561 			if (m_buddy->order != order)
562 				break;
563 			fl = (*seg->free_queues)[m_buddy->pool];
564 			TAILQ_REMOVE(&fl[order].pl, m_buddy, pageq);
565 			fl[order].lcnt--;
566 			m_buddy->order = VM_NFREEORDER;
567 			if (m_buddy->pool != m->pool)
568 				vm_phys_set_pool(m->pool, m_buddy, order);
569 			order++;
570 			pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
571 			m = &seg->first_page[atop(pa - seg->start)];
572 		} while (order < VM_NFREEORDER - 1);
573 	}
574 	m->order = order;
575 	fl = (*seg->free_queues)[m->pool];
576 	TAILQ_INSERT_TAIL(&fl[order].pl, m, pageq);
577 	fl[order].lcnt++;
578 }
579 
580 /*
581  * Free a contiguous, arbitrarily sized set of physical pages.
582  *
583  * The free page queues must be locked.
584  */
585 void
586 vm_phys_free_contig(vm_page_t m, u_long npages)
587 {
588 	u_int n;
589 	int order;
590 
591 	/*
592 	 * Avoid unnecessary coalescing by freeing the pages in the largest
593 	 * possible power-of-two-sized subsets.
594 	 */
595 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
596 	for (;; npages -= n) {
597 		/*
598 		 * Unsigned "min" is used here so that "order" is assigned
599 		 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
600 		 * or the low-order bits of its physical address are zero
601 		 * because the size of a physical address exceeds the size of
602 		 * a long.
603 		 */
604 		order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
605 		    VM_NFREEORDER - 1);
606 		n = 1 << order;
607 		if (npages < n)
608 			break;
609 		vm_phys_free_pages(m, order);
610 		m += n;
611 	}
612 	/* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
613 	for (; npages > 0; npages -= n) {
614 		order = flsl(npages) - 1;
615 		n = 1 << order;
616 		vm_phys_free_pages(m, order);
617 		m += n;
618 	}
619 }
620 
621 /*
622  * Set the pool for a contiguous, power of two-sized set of physical pages.
623  */
624 void
625 vm_phys_set_pool(int pool, vm_page_t m, int order)
626 {
627 	vm_page_t m_tmp;
628 
629 	for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
630 		m_tmp->pool = pool;
631 }
632 
633 /*
634  * Search for the given physical page "m" in the free lists.  If the search
635  * succeeds, remove "m" from the free lists and return TRUE.  Otherwise, return
636  * FALSE, indicating that "m" is not in the free lists.
637  *
638  * The free page queues must be locked.
639  */
640 boolean_t
641 vm_phys_unfree_page(vm_page_t m)
642 {
643 	struct vm_freelist *fl;
644 	struct vm_phys_seg *seg;
645 	vm_paddr_t pa, pa_half;
646 	vm_page_t m_set, m_tmp;
647 	int order;
648 
649 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
650 
651 	/*
652 	 * First, find the contiguous, power of two-sized set of free
653 	 * physical pages containing the given physical page "m" and
654 	 * assign it to "m_set".
655 	 */
656 	seg = &vm_phys_segs[m->segind];
657 	for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
658 	    order < VM_NFREEORDER - 1; ) {
659 		order++;
660 		pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
661 		if (pa >= seg->start)
662 			m_set = &seg->first_page[atop(pa - seg->start)];
663 		else
664 			return (FALSE);
665 	}
666 	if (m_set->order < order)
667 		return (FALSE);
668 	if (m_set->order == VM_NFREEORDER)
669 		return (FALSE);
670 	KASSERT(m_set->order < VM_NFREEORDER,
671 	    ("vm_phys_unfree_page: page %p has unexpected order %d",
672 	    m_set, m_set->order));
673 
674 	/*
675 	 * Next, remove "m_set" from the free lists.  Finally, extract
676 	 * "m" from "m_set" using an iterative algorithm: While "m_set"
677 	 * is larger than a page, shrink "m_set" by returning the half
678 	 * of "m_set" that does not contain "m" to the free lists.
679 	 */
680 	fl = (*seg->free_queues)[m_set->pool];
681 	order = m_set->order;
682 	TAILQ_REMOVE(&fl[order].pl, m_set, pageq);
683 	fl[order].lcnt--;
684 	m_set->order = VM_NFREEORDER;
685 	while (order > 0) {
686 		order--;
687 		pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
688 		if (m->phys_addr < pa_half)
689 			m_tmp = &seg->first_page[atop(pa_half - seg->start)];
690 		else {
691 			m_tmp = m_set;
692 			m_set = &seg->first_page[atop(pa_half - seg->start)];
693 		}
694 		m_tmp->order = order;
695 		TAILQ_INSERT_HEAD(&fl[order].pl, m_tmp, pageq);
696 		fl[order].lcnt++;
697 	}
698 	KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
699 	return (TRUE);
700 }
701 
702 /*
703  * Try to zero one physical page.  Used by an idle priority thread.
704  */
705 boolean_t
706 vm_phys_zero_pages_idle(void)
707 {
708 	static struct vm_freelist *fl = vm_phys_free_queues[0][0];
709 	static int flind, oind, pind;
710 	vm_page_t m, m_tmp;
711 
712 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
713 	for (;;) {
714 		TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, pageq) {
715 			for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) {
716 				if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) {
717 					vm_phys_unfree_page(m_tmp);
718 					cnt.v_free_count--;
719 					mtx_unlock(&vm_page_queue_free_mtx);
720 					pmap_zero_page_idle(m_tmp);
721 					m_tmp->flags |= PG_ZERO;
722 					mtx_lock(&vm_page_queue_free_mtx);
723 					cnt.v_free_count++;
724 					vm_phys_free_pages(m_tmp, 0);
725 					vm_page_zero_count++;
726 					cnt_prezero++;
727 					return (TRUE);
728 				}
729 			}
730 		}
731 		oind++;
732 		if (oind == VM_NFREEORDER) {
733 			oind = 0;
734 			pind++;
735 			if (pind == VM_NFREEPOOL) {
736 				pind = 0;
737 				flind++;
738 				if (flind == vm_nfreelists)
739 					flind = 0;
740 			}
741 			fl = vm_phys_free_queues[flind][pind];
742 		}
743 	}
744 }
745 
746 /*
747  * Allocate a contiguous set of physical pages of the given size
748  * "npages" from the free lists.  All of the physical pages must be at
749  * or above the given physical address "low" and below the given
750  * physical address "high".  The given value "alignment" determines the
751  * alignment of the first physical page in the set.  If the given value
752  * "boundary" is non-zero, then the set of physical pages cannot cross
753  * any physical address boundary that is a multiple of that value.  Both
754  * "alignment" and "boundary" must be a power of two.
755  */
756 vm_page_t
757 vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
758     u_long alignment, vm_paddr_t boundary)
759 {
760 	struct vm_freelist *fl;
761 	struct vm_phys_seg *seg;
762 	vm_paddr_t pa, pa_last, size;
763 	vm_page_t m, m_ret;
764 	u_long npages_end;
765 	int domain, flind, oind, order, pind;
766 
767 	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
768 #if VM_NDOMAIN > 1
769 	domain = PCPU_GET(domain);
770 #else
771 	domain = 0;
772 #endif
773 	size = npages << PAGE_SHIFT;
774 	KASSERT(size != 0,
775 	    ("vm_phys_alloc_contig: size must not be 0"));
776 	KASSERT((alignment & (alignment - 1)) == 0,
777 	    ("vm_phys_alloc_contig: alignment must be a power of 2"));
778 	KASSERT((boundary & (boundary - 1)) == 0,
779 	    ("vm_phys_alloc_contig: boundary must be a power of 2"));
780 	/* Compute the queue that is the best fit for npages. */
781 	for (order = 0; (1 << order) < npages; order++);
782 	for (flind = 0; flind < vm_nfreelists; flind++) {
783 		for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) {
784 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
785 				fl = (*vm_phys_lookup_lists[domain][flind])
786 				    [pind];
787 				TAILQ_FOREACH(m_ret, &fl[oind].pl, pageq) {
788 					/*
789 					 * A free list may contain physical pages
790 					 * from one or more segments.
791 					 */
792 					seg = &vm_phys_segs[m_ret->segind];
793 					if (seg->start > high ||
794 					    low >= seg->end)
795 						continue;
796 
797 					/*
798 					 * Is the size of this allocation request
799 					 * larger than the largest block size?
800 					 */
801 					if (order >= VM_NFREEORDER) {
802 						/*
803 						 * Determine if a sufficient number
804 						 * of subsequent blocks to satisfy
805 						 * the allocation request are free.
806 						 */
807 						pa = VM_PAGE_TO_PHYS(m_ret);
808 						pa_last = pa + size;
809 						for (;;) {
810 							pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1);
811 							if (pa >= pa_last)
812 								break;
813 							if (pa < seg->start ||
814 							    pa >= seg->end)
815 								break;
816 							m = &seg->first_page[atop(pa - seg->start)];
817 							if (m->order != VM_NFREEORDER - 1)
818 								break;
819 						}
820 						/* If not, continue to the next block. */
821 						if (pa < pa_last)
822 							continue;
823 					}
824 
825 					/*
826 					 * Determine if the blocks are within the given range,
827 					 * satisfy the given alignment, and do not cross the
828 					 * given boundary.
829 					 */
830 					pa = VM_PAGE_TO_PHYS(m_ret);
831 					if (pa >= low &&
832 					    pa + size <= high &&
833 					    (pa & (alignment - 1)) == 0 &&
834 					    ((pa ^ (pa + size - 1)) & ~(boundary - 1)) == 0)
835 						goto done;
836 				}
837 			}
838 		}
839 	}
840 	return (NULL);
841 done:
842 	for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
843 		fl = (*seg->free_queues)[m->pool];
844 		TAILQ_REMOVE(&fl[m->order].pl, m, pageq);
845 		fl[m->order].lcnt--;
846 		m->order = VM_NFREEORDER;
847 	}
848 	if (m_ret->pool != VM_FREEPOOL_DEFAULT)
849 		vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
850 	fl = (*seg->free_queues)[m_ret->pool];
851 	vm_phys_split_pages(m_ret, oind, fl, order);
852 	/* Return excess pages to the free lists. */
853 	npages_end = roundup2(npages, 1 << imin(oind, order));
854 	if (npages < npages_end)
855 		vm_phys_free_contig(&m_ret[npages], npages_end - npages);
856 	return (m_ret);
857 }
858 
859 #ifdef DDB
860 /*
861  * Show the number of physical pages in each of the free lists.
862  */
863 DB_SHOW_COMMAND(freepages, db_show_freepages)
864 {
865 	struct vm_freelist *fl;
866 	int flind, oind, pind;
867 
868 	for (flind = 0; flind < vm_nfreelists; flind++) {
869 		db_printf("FREE LIST %d:\n"
870 		    "\n  ORDER (SIZE)  |  NUMBER"
871 		    "\n              ", flind);
872 		for (pind = 0; pind < VM_NFREEPOOL; pind++)
873 			db_printf("  |  POOL %d", pind);
874 		db_printf("\n--            ");
875 		for (pind = 0; pind < VM_NFREEPOOL; pind++)
876 			db_printf("-- --      ");
877 		db_printf("--\n");
878 		for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
879 			db_printf("  %2.2d (%6.6dK)", oind,
880 			    1 << (PAGE_SHIFT - 10 + oind));
881 			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
882 				fl = vm_phys_free_queues[flind][pind];
883 				db_printf("  |  %6.6d", fl[oind].lcnt);
884 			}
885 			db_printf("\n");
886 		}
887 		db_printf("\n");
888 	}
889 }
890 #endif
891