1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002-2006 Rice University 5 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu> 6 * All rights reserved. 7 * 8 * This software was developed for the FreeBSD Project by Alan L. Cox, 9 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 30 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Physical memory system implementation 36 * 37 * Any external functions defined by this module are only to be used by the 38 * virtual memory system. 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 #include "opt_ddb.h" 45 #include "opt_vm.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/lock.h> 50 #include <sys/kernel.h> 51 #include <sys/malloc.h> 52 #include <sys/mutex.h> 53 #include <sys/proc.h> 54 #include <sys/queue.h> 55 #include <sys/rwlock.h> 56 #include <sys/sbuf.h> 57 #include <sys/sysctl.h> 58 #include <sys/tree.h> 59 #include <sys/vmmeter.h> 60 #include <sys/seq.h> 61 62 #include <ddb/ddb.h> 63 64 #include <vm/vm.h> 65 #include <vm/vm_param.h> 66 #include <vm/vm_kern.h> 67 #include <vm/vm_object.h> 68 #include <vm/vm_page.h> 69 #include <vm/vm_phys.h> 70 #include <vm/vm_pagequeue.h> 71 72 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX, 73 "Too many physsegs."); 74 75 #ifdef NUMA 76 struct mem_affinity __read_mostly *mem_affinity; 77 int __read_mostly *mem_locality; 78 #endif 79 80 int __read_mostly vm_ndomains = 1; 81 82 struct vm_phys_seg __read_mostly vm_phys_segs[VM_PHYSSEG_MAX]; 83 int __read_mostly vm_phys_nsegs; 84 85 struct vm_phys_fictitious_seg; 86 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *, 87 struct vm_phys_fictitious_seg *); 88 89 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree = 90 RB_INITIALIZER(_vm_phys_fictitious_tree); 91 92 struct vm_phys_fictitious_seg { 93 RB_ENTRY(vm_phys_fictitious_seg) node; 94 /* Memory region data */ 95 vm_paddr_t start; 96 vm_paddr_t end; 97 vm_page_t first_page; 98 }; 99 100 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node, 101 vm_phys_fictitious_cmp); 102 103 static struct rwlock_padalign vm_phys_fictitious_reg_lock; 104 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages"); 105 106 static struct vm_freelist __aligned(CACHE_LINE_SIZE) 107 vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER]; 108 109 static int __read_mostly vm_nfreelists; 110 111 /* 112 * Provides the mapping from VM_FREELIST_* to free list indices (flind). 113 */ 114 static int __read_mostly vm_freelist_to_flind[VM_NFREELIST]; 115 116 CTASSERT(VM_FREELIST_DEFAULT == 0); 117 118 #ifdef VM_FREELIST_ISADMA 119 #define VM_ISADMA_BOUNDARY 16777216 120 #endif 121 #ifdef VM_FREELIST_DMA32 122 #define VM_DMA32_BOUNDARY ((vm_paddr_t)1 << 32) 123 #endif 124 125 /* 126 * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about 127 * the ordering of the free list boundaries. 128 */ 129 #if defined(VM_ISADMA_BOUNDARY) && defined(VM_LOWMEM_BOUNDARY) 130 CTASSERT(VM_ISADMA_BOUNDARY < VM_LOWMEM_BOUNDARY); 131 #endif 132 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY) 133 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY); 134 #endif 135 136 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS); 137 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD, 138 NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info"); 139 140 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS); 141 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD, 142 NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info"); 143 144 #ifdef NUMA 145 static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS); 146 SYSCTL_OID(_vm, OID_AUTO, phys_locality, CTLTYPE_STRING | CTLFLAG_RD, 147 NULL, 0, sysctl_vm_phys_locality, "A", "Phys Locality Info"); 148 #endif 149 150 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD, 151 &vm_ndomains, 0, "Number of physical memory domains available."); 152 153 static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, 154 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 155 vm_paddr_t boundary); 156 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain); 157 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end); 158 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, 159 int order); 160 161 /* 162 * Red-black tree helpers for vm fictitious range management. 163 */ 164 static inline int 165 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p, 166 struct vm_phys_fictitious_seg *range) 167 { 168 169 KASSERT(range->start != 0 && range->end != 0, 170 ("Invalid range passed on search for vm_fictitious page")); 171 if (p->start >= range->end) 172 return (1); 173 if (p->start < range->start) 174 return (-1); 175 176 return (0); 177 } 178 179 static int 180 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1, 181 struct vm_phys_fictitious_seg *p2) 182 { 183 184 /* Check if this is a search for a page */ 185 if (p1->end == 0) 186 return (vm_phys_fictitious_in_range(p1, p2)); 187 188 KASSERT(p2->end != 0, 189 ("Invalid range passed as second parameter to vm fictitious comparison")); 190 191 /* Searching to add a new range */ 192 if (p1->end <= p2->start) 193 return (-1); 194 if (p1->start >= p2->end) 195 return (1); 196 197 panic("Trying to add overlapping vm fictitious ranges:\n" 198 "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start, 199 (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end); 200 } 201 202 int 203 vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high) 204 { 205 #ifdef NUMA 206 domainset_t mask; 207 int i; 208 209 if (vm_ndomains == 1 || mem_affinity == NULL) 210 return (0); 211 212 DOMAINSET_ZERO(&mask); 213 /* 214 * Check for any memory that overlaps low, high. 215 */ 216 for (i = 0; mem_affinity[i].end != 0; i++) 217 if (mem_affinity[i].start <= high && 218 mem_affinity[i].end >= low) 219 DOMAINSET_SET(mem_affinity[i].domain, &mask); 220 if (prefer != -1 && DOMAINSET_ISSET(prefer, &mask)) 221 return (prefer); 222 if (DOMAINSET_EMPTY(&mask)) 223 panic("vm_phys_domain_match: Impossible constraint"); 224 return (DOMAINSET_FFS(&mask) - 1); 225 #else 226 return (0); 227 #endif 228 } 229 230 /* 231 * Outputs the state of the physical memory allocator, specifically, 232 * the amount of physical memory in each free list. 233 */ 234 static int 235 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS) 236 { 237 struct sbuf sbuf; 238 struct vm_freelist *fl; 239 int dom, error, flind, oind, pind; 240 241 error = sysctl_wire_old_buffer(req, 0); 242 if (error != 0) 243 return (error); 244 sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req); 245 for (dom = 0; dom < vm_ndomains; dom++) { 246 sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom); 247 for (flind = 0; flind < vm_nfreelists; flind++) { 248 sbuf_printf(&sbuf, "\nFREE LIST %d:\n" 249 "\n ORDER (SIZE) | NUMBER" 250 "\n ", flind); 251 for (pind = 0; pind < VM_NFREEPOOL; pind++) 252 sbuf_printf(&sbuf, " | POOL %d", pind); 253 sbuf_printf(&sbuf, "\n-- "); 254 for (pind = 0; pind < VM_NFREEPOOL; pind++) 255 sbuf_printf(&sbuf, "-- -- "); 256 sbuf_printf(&sbuf, "--\n"); 257 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 258 sbuf_printf(&sbuf, " %2d (%6dK)", oind, 259 1 << (PAGE_SHIFT - 10 + oind)); 260 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 261 fl = vm_phys_free_queues[dom][flind][pind]; 262 sbuf_printf(&sbuf, " | %6d", 263 fl[oind].lcnt); 264 } 265 sbuf_printf(&sbuf, "\n"); 266 } 267 } 268 } 269 error = sbuf_finish(&sbuf); 270 sbuf_delete(&sbuf); 271 return (error); 272 } 273 274 /* 275 * Outputs the set of physical memory segments. 276 */ 277 static int 278 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS) 279 { 280 struct sbuf sbuf; 281 struct vm_phys_seg *seg; 282 int error, segind; 283 284 error = sysctl_wire_old_buffer(req, 0); 285 if (error != 0) 286 return (error); 287 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 288 for (segind = 0; segind < vm_phys_nsegs; segind++) { 289 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind); 290 seg = &vm_phys_segs[segind]; 291 sbuf_printf(&sbuf, "start: %#jx\n", 292 (uintmax_t)seg->start); 293 sbuf_printf(&sbuf, "end: %#jx\n", 294 (uintmax_t)seg->end); 295 sbuf_printf(&sbuf, "domain: %d\n", seg->domain); 296 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues); 297 } 298 error = sbuf_finish(&sbuf); 299 sbuf_delete(&sbuf); 300 return (error); 301 } 302 303 /* 304 * Return affinity, or -1 if there's no affinity information. 305 */ 306 int 307 vm_phys_mem_affinity(int f, int t) 308 { 309 310 #ifdef NUMA 311 if (mem_locality == NULL) 312 return (-1); 313 if (f >= vm_ndomains || t >= vm_ndomains) 314 return (-1); 315 return (mem_locality[f * vm_ndomains + t]); 316 #else 317 return (-1); 318 #endif 319 } 320 321 #ifdef NUMA 322 /* 323 * Outputs the VM locality table. 324 */ 325 static int 326 sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS) 327 { 328 struct sbuf sbuf; 329 int error, i, j; 330 331 error = sysctl_wire_old_buffer(req, 0); 332 if (error != 0) 333 return (error); 334 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 335 336 sbuf_printf(&sbuf, "\n"); 337 338 for (i = 0; i < vm_ndomains; i++) { 339 sbuf_printf(&sbuf, "%d: ", i); 340 for (j = 0; j < vm_ndomains; j++) { 341 sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j)); 342 } 343 sbuf_printf(&sbuf, "\n"); 344 } 345 error = sbuf_finish(&sbuf); 346 sbuf_delete(&sbuf); 347 return (error); 348 } 349 #endif 350 351 static void 352 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail) 353 { 354 355 m->order = order; 356 if (tail) 357 TAILQ_INSERT_TAIL(&fl[order].pl, m, listq); 358 else 359 TAILQ_INSERT_HEAD(&fl[order].pl, m, listq); 360 fl[order].lcnt++; 361 } 362 363 static void 364 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order) 365 { 366 367 TAILQ_REMOVE(&fl[order].pl, m, listq); 368 fl[order].lcnt--; 369 m->order = VM_NFREEORDER; 370 } 371 372 /* 373 * Create a physical memory segment. 374 */ 375 static void 376 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain) 377 { 378 struct vm_phys_seg *seg; 379 380 KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX, 381 ("vm_phys_create_seg: increase VM_PHYSSEG_MAX")); 382 KASSERT(domain >= 0 && domain < vm_ndomains, 383 ("vm_phys_create_seg: invalid domain provided")); 384 seg = &vm_phys_segs[vm_phys_nsegs++]; 385 while (seg > vm_phys_segs && (seg - 1)->start >= end) { 386 *seg = *(seg - 1); 387 seg--; 388 } 389 seg->start = start; 390 seg->end = end; 391 seg->domain = domain; 392 } 393 394 static void 395 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end) 396 { 397 #ifdef NUMA 398 int i; 399 400 if (mem_affinity == NULL) { 401 _vm_phys_create_seg(start, end, 0); 402 return; 403 } 404 405 for (i = 0;; i++) { 406 if (mem_affinity[i].end == 0) 407 panic("Reached end of affinity info"); 408 if (mem_affinity[i].end <= start) 409 continue; 410 if (mem_affinity[i].start > start) 411 panic("No affinity info for start %jx", 412 (uintmax_t)start); 413 if (mem_affinity[i].end >= end) { 414 _vm_phys_create_seg(start, end, 415 mem_affinity[i].domain); 416 break; 417 } 418 _vm_phys_create_seg(start, mem_affinity[i].end, 419 mem_affinity[i].domain); 420 start = mem_affinity[i].end; 421 } 422 #else 423 _vm_phys_create_seg(start, end, 0); 424 #endif 425 } 426 427 /* 428 * Add a physical memory segment. 429 */ 430 void 431 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end) 432 { 433 vm_paddr_t paddr; 434 435 KASSERT((start & PAGE_MASK) == 0, 436 ("vm_phys_define_seg: start is not page aligned")); 437 KASSERT((end & PAGE_MASK) == 0, 438 ("vm_phys_define_seg: end is not page aligned")); 439 440 /* 441 * Split the physical memory segment if it spans two or more free 442 * list boundaries. 443 */ 444 paddr = start; 445 #ifdef VM_FREELIST_ISADMA 446 if (paddr < VM_ISADMA_BOUNDARY && end > VM_ISADMA_BOUNDARY) { 447 vm_phys_create_seg(paddr, VM_ISADMA_BOUNDARY); 448 paddr = VM_ISADMA_BOUNDARY; 449 } 450 #endif 451 #ifdef VM_FREELIST_LOWMEM 452 if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) { 453 vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY); 454 paddr = VM_LOWMEM_BOUNDARY; 455 } 456 #endif 457 #ifdef VM_FREELIST_DMA32 458 if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) { 459 vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY); 460 paddr = VM_DMA32_BOUNDARY; 461 } 462 #endif 463 vm_phys_create_seg(paddr, end); 464 } 465 466 /* 467 * Initialize the physical memory allocator. 468 * 469 * Requires that vm_page_array is initialized! 470 */ 471 void 472 vm_phys_init(void) 473 { 474 struct vm_freelist *fl; 475 struct vm_phys_seg *seg; 476 u_long npages; 477 int dom, flind, freelist, oind, pind, segind; 478 479 /* 480 * Compute the number of free lists, and generate the mapping from the 481 * manifest constants VM_FREELIST_* to the free list indices. 482 * 483 * Initially, the entries of vm_freelist_to_flind[] are set to either 484 * 0 or 1 to indicate which free lists should be created. 485 */ 486 npages = 0; 487 for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { 488 seg = &vm_phys_segs[segind]; 489 #ifdef VM_FREELIST_ISADMA 490 if (seg->end <= VM_ISADMA_BOUNDARY) 491 vm_freelist_to_flind[VM_FREELIST_ISADMA] = 1; 492 else 493 #endif 494 #ifdef VM_FREELIST_LOWMEM 495 if (seg->end <= VM_LOWMEM_BOUNDARY) 496 vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1; 497 else 498 #endif 499 #ifdef VM_FREELIST_DMA32 500 if ( 501 #ifdef VM_DMA32_NPAGES_THRESHOLD 502 /* 503 * Create the DMA32 free list only if the amount of 504 * physical memory above physical address 4G exceeds the 505 * given threshold. 506 */ 507 npages > VM_DMA32_NPAGES_THRESHOLD && 508 #endif 509 seg->end <= VM_DMA32_BOUNDARY) 510 vm_freelist_to_flind[VM_FREELIST_DMA32] = 1; 511 else 512 #endif 513 { 514 npages += atop(seg->end - seg->start); 515 vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1; 516 } 517 } 518 /* Change each entry into a running total of the free lists. */ 519 for (freelist = 1; freelist < VM_NFREELIST; freelist++) { 520 vm_freelist_to_flind[freelist] += 521 vm_freelist_to_flind[freelist - 1]; 522 } 523 vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1]; 524 KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists")); 525 /* Change each entry into a free list index. */ 526 for (freelist = 0; freelist < VM_NFREELIST; freelist++) 527 vm_freelist_to_flind[freelist]--; 528 529 /* 530 * Initialize the first_page and free_queues fields of each physical 531 * memory segment. 532 */ 533 #ifdef VM_PHYSSEG_SPARSE 534 npages = 0; 535 #endif 536 for (segind = 0; segind < vm_phys_nsegs; segind++) { 537 seg = &vm_phys_segs[segind]; 538 #ifdef VM_PHYSSEG_SPARSE 539 seg->first_page = &vm_page_array[npages]; 540 npages += atop(seg->end - seg->start); 541 #else 542 seg->first_page = PHYS_TO_VM_PAGE(seg->start); 543 #endif 544 #ifdef VM_FREELIST_ISADMA 545 if (seg->end <= VM_ISADMA_BOUNDARY) { 546 flind = vm_freelist_to_flind[VM_FREELIST_ISADMA]; 547 KASSERT(flind >= 0, 548 ("vm_phys_init: ISADMA flind < 0")); 549 } else 550 #endif 551 #ifdef VM_FREELIST_LOWMEM 552 if (seg->end <= VM_LOWMEM_BOUNDARY) { 553 flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM]; 554 KASSERT(flind >= 0, 555 ("vm_phys_init: LOWMEM flind < 0")); 556 } else 557 #endif 558 #ifdef VM_FREELIST_DMA32 559 if (seg->end <= VM_DMA32_BOUNDARY) { 560 flind = vm_freelist_to_flind[VM_FREELIST_DMA32]; 561 KASSERT(flind >= 0, 562 ("vm_phys_init: DMA32 flind < 0")); 563 } else 564 #endif 565 { 566 flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT]; 567 KASSERT(flind >= 0, 568 ("vm_phys_init: DEFAULT flind < 0")); 569 } 570 seg->free_queues = &vm_phys_free_queues[seg->domain][flind]; 571 } 572 573 /* 574 * Initialize the free queues. 575 */ 576 for (dom = 0; dom < vm_ndomains; dom++) { 577 for (flind = 0; flind < vm_nfreelists; flind++) { 578 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 579 fl = vm_phys_free_queues[dom][flind][pind]; 580 for (oind = 0; oind < VM_NFREEORDER; oind++) 581 TAILQ_INIT(&fl[oind].pl); 582 } 583 } 584 } 585 586 rw_init(&vm_phys_fictitious_reg_lock, "vmfctr"); 587 } 588 589 /* 590 * Split a contiguous, power of two-sized set of physical pages. 591 */ 592 static __inline void 593 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order) 594 { 595 vm_page_t m_buddy; 596 597 while (oind > order) { 598 oind--; 599 m_buddy = &m[1 << oind]; 600 KASSERT(m_buddy->order == VM_NFREEORDER, 601 ("vm_phys_split_pages: page %p has unexpected order %d", 602 m_buddy, m_buddy->order)); 603 vm_freelist_add(fl, m_buddy, oind, 0); 604 } 605 } 606 607 /* 608 * Tries to allocate the specified number of pages from the specified pool 609 * within the specified domain. Returns the actual number of allocated pages 610 * and a pointer to each page through the array ma[]. 611 * 612 * The returned pages may not be physically contiguous. However, in contrast 613 * to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0), 614 * calling this function once to allocate the desired number of pages will 615 * avoid wasted time in vm_phys_split_pages(). 616 * 617 * The free page queues for the specified domain must be locked. 618 */ 619 int 620 vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]) 621 { 622 struct vm_freelist *alt, *fl; 623 vm_page_t m; 624 int avail, end, flind, freelist, i, need, oind, pind; 625 626 KASSERT(domain >= 0 && domain < vm_ndomains, 627 ("vm_phys_alloc_npages: domain %d is out of range", domain)); 628 KASSERT(pool < VM_NFREEPOOL, 629 ("vm_phys_alloc_npages: pool %d is out of range", pool)); 630 KASSERT(npages <= 1 << (VM_NFREEORDER - 1), 631 ("vm_phys_alloc_npages: npages %d is out of range", npages)); 632 vm_domain_free_assert_locked(VM_DOMAIN(domain)); 633 i = 0; 634 for (freelist = 0; freelist < VM_NFREELIST; freelist++) { 635 flind = vm_freelist_to_flind[freelist]; 636 if (flind < 0) 637 continue; 638 fl = vm_phys_free_queues[domain][flind][pool]; 639 for (oind = 0; oind < VM_NFREEORDER; oind++) { 640 while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) { 641 vm_freelist_rem(fl, m, oind); 642 avail = 1 << oind; 643 need = imin(npages - i, avail); 644 for (end = i + need; i < end;) 645 ma[i++] = m++; 646 if (need < avail) { 647 vm_phys_free_contig(m, avail - need); 648 return (npages); 649 } else if (i == npages) 650 return (npages); 651 } 652 } 653 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 654 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 655 alt = vm_phys_free_queues[domain][flind][pind]; 656 while ((m = TAILQ_FIRST(&alt[oind].pl)) != 657 NULL) { 658 vm_freelist_rem(alt, m, oind); 659 vm_phys_set_pool(pool, m, oind); 660 avail = 1 << oind; 661 need = imin(npages - i, avail); 662 for (end = i + need; i < end;) 663 ma[i++] = m++; 664 if (need < avail) { 665 vm_phys_free_contig(m, avail - 666 need); 667 return (npages); 668 } else if (i == npages) 669 return (npages); 670 } 671 } 672 } 673 } 674 return (i); 675 } 676 677 /* 678 * Allocate a contiguous, power of two-sized set of physical pages 679 * from the free lists. 680 * 681 * The free page queues must be locked. 682 */ 683 vm_page_t 684 vm_phys_alloc_pages(int domain, int pool, int order) 685 { 686 vm_page_t m; 687 int freelist; 688 689 for (freelist = 0; freelist < VM_NFREELIST; freelist++) { 690 m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order); 691 if (m != NULL) 692 return (m); 693 } 694 return (NULL); 695 } 696 697 /* 698 * Allocate a contiguous, power of two-sized set of physical pages from the 699 * specified free list. The free list must be specified using one of the 700 * manifest constants VM_FREELIST_*. 701 * 702 * The free page queues must be locked. 703 */ 704 vm_page_t 705 vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order) 706 { 707 struct vm_freelist *alt, *fl; 708 vm_page_t m; 709 int oind, pind, flind; 710 711 KASSERT(domain >= 0 && domain < vm_ndomains, 712 ("vm_phys_alloc_freelist_pages: domain %d is out of range", 713 domain)); 714 KASSERT(freelist < VM_NFREELIST, 715 ("vm_phys_alloc_freelist_pages: freelist %d is out of range", 716 freelist)); 717 KASSERT(pool < VM_NFREEPOOL, 718 ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool)); 719 KASSERT(order < VM_NFREEORDER, 720 ("vm_phys_alloc_freelist_pages: order %d is out of range", order)); 721 722 flind = vm_freelist_to_flind[freelist]; 723 /* Check if freelist is present */ 724 if (flind < 0) 725 return (NULL); 726 727 vm_domain_free_assert_locked(VM_DOMAIN(domain)); 728 fl = &vm_phys_free_queues[domain][flind][pool][0]; 729 for (oind = order; oind < VM_NFREEORDER; oind++) { 730 m = TAILQ_FIRST(&fl[oind].pl); 731 if (m != NULL) { 732 vm_freelist_rem(fl, m, oind); 733 vm_phys_split_pages(m, oind, fl, order); 734 return (m); 735 } 736 } 737 738 /* 739 * The given pool was empty. Find the largest 740 * contiguous, power-of-two-sized set of pages in any 741 * pool. Transfer these pages to the given pool, and 742 * use them to satisfy the allocation. 743 */ 744 for (oind = VM_NFREEORDER - 1; oind >= order; oind--) { 745 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 746 alt = &vm_phys_free_queues[domain][flind][pind][0]; 747 m = TAILQ_FIRST(&alt[oind].pl); 748 if (m != NULL) { 749 vm_freelist_rem(alt, m, oind); 750 vm_phys_set_pool(pool, m, oind); 751 vm_phys_split_pages(m, oind, fl, order); 752 return (m); 753 } 754 } 755 } 756 return (NULL); 757 } 758 759 /* 760 * Find the vm_page corresponding to the given physical address. 761 */ 762 vm_page_t 763 vm_phys_paddr_to_vm_page(vm_paddr_t pa) 764 { 765 struct vm_phys_seg *seg; 766 int segind; 767 768 for (segind = 0; segind < vm_phys_nsegs; segind++) { 769 seg = &vm_phys_segs[segind]; 770 if (pa >= seg->start && pa < seg->end) 771 return (&seg->first_page[atop(pa - seg->start)]); 772 } 773 return (NULL); 774 } 775 776 vm_page_t 777 vm_phys_fictitious_to_vm_page(vm_paddr_t pa) 778 { 779 struct vm_phys_fictitious_seg tmp, *seg; 780 vm_page_t m; 781 782 m = NULL; 783 tmp.start = pa; 784 tmp.end = 0; 785 786 rw_rlock(&vm_phys_fictitious_reg_lock); 787 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp); 788 rw_runlock(&vm_phys_fictitious_reg_lock); 789 if (seg == NULL) 790 return (NULL); 791 792 m = &seg->first_page[atop(pa - seg->start)]; 793 KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m)); 794 795 return (m); 796 } 797 798 static inline void 799 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start, 800 long page_count, vm_memattr_t memattr) 801 { 802 long i; 803 804 bzero(range, page_count * sizeof(*range)); 805 for (i = 0; i < page_count; i++) { 806 vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr); 807 range[i].oflags &= ~VPO_UNMANAGED; 808 range[i].busy_lock = VPB_UNBUSIED; 809 } 810 } 811 812 int 813 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, 814 vm_memattr_t memattr) 815 { 816 struct vm_phys_fictitious_seg *seg; 817 vm_page_t fp; 818 long page_count; 819 #ifdef VM_PHYSSEG_DENSE 820 long pi, pe; 821 long dpage_count; 822 #endif 823 824 KASSERT(start < end, 825 ("Start of segment isn't less than end (start: %jx end: %jx)", 826 (uintmax_t)start, (uintmax_t)end)); 827 828 page_count = (end - start) / PAGE_SIZE; 829 830 #ifdef VM_PHYSSEG_DENSE 831 pi = atop(start); 832 pe = atop(end); 833 if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 834 fp = &vm_page_array[pi - first_page]; 835 if ((pe - first_page) > vm_page_array_size) { 836 /* 837 * We have a segment that starts inside 838 * of vm_page_array, but ends outside of it. 839 * 840 * Use vm_page_array pages for those that are 841 * inside of the vm_page_array range, and 842 * allocate the remaining ones. 843 */ 844 dpage_count = vm_page_array_size - (pi - first_page); 845 vm_phys_fictitious_init_range(fp, start, dpage_count, 846 memattr); 847 page_count -= dpage_count; 848 start += ptoa(dpage_count); 849 goto alloc; 850 } 851 /* 852 * We can allocate the full range from vm_page_array, 853 * so there's no need to register the range in the tree. 854 */ 855 vm_phys_fictitious_init_range(fp, start, page_count, memattr); 856 return (0); 857 } else if (pe > first_page && (pe - first_page) < vm_page_array_size) { 858 /* 859 * We have a segment that ends inside of vm_page_array, 860 * but starts outside of it. 861 */ 862 fp = &vm_page_array[0]; 863 dpage_count = pe - first_page; 864 vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count, 865 memattr); 866 end -= ptoa(dpage_count); 867 page_count -= dpage_count; 868 goto alloc; 869 } else if (pi < first_page && pe > (first_page + vm_page_array_size)) { 870 /* 871 * Trying to register a fictitious range that expands before 872 * and after vm_page_array. 873 */ 874 return (EINVAL); 875 } else { 876 alloc: 877 #endif 878 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES, 879 M_WAITOK); 880 #ifdef VM_PHYSSEG_DENSE 881 } 882 #endif 883 vm_phys_fictitious_init_range(fp, start, page_count, memattr); 884 885 seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO); 886 seg->start = start; 887 seg->end = end; 888 seg->first_page = fp; 889 890 rw_wlock(&vm_phys_fictitious_reg_lock); 891 RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg); 892 rw_wunlock(&vm_phys_fictitious_reg_lock); 893 894 return (0); 895 } 896 897 void 898 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end) 899 { 900 struct vm_phys_fictitious_seg *seg, tmp; 901 #ifdef VM_PHYSSEG_DENSE 902 long pi, pe; 903 #endif 904 905 KASSERT(start < end, 906 ("Start of segment isn't less than end (start: %jx end: %jx)", 907 (uintmax_t)start, (uintmax_t)end)); 908 909 #ifdef VM_PHYSSEG_DENSE 910 pi = atop(start); 911 pe = atop(end); 912 if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 913 if ((pe - first_page) <= vm_page_array_size) { 914 /* 915 * This segment was allocated using vm_page_array 916 * only, there's nothing to do since those pages 917 * were never added to the tree. 918 */ 919 return; 920 } 921 /* 922 * We have a segment that starts inside 923 * of vm_page_array, but ends outside of it. 924 * 925 * Calculate how many pages were added to the 926 * tree and free them. 927 */ 928 start = ptoa(first_page + vm_page_array_size); 929 } else if (pe > first_page && (pe - first_page) < vm_page_array_size) { 930 /* 931 * We have a segment that ends inside of vm_page_array, 932 * but starts outside of it. 933 */ 934 end = ptoa(first_page); 935 } else if (pi < first_page && pe > (first_page + vm_page_array_size)) { 936 /* Since it's not possible to register such a range, panic. */ 937 panic( 938 "Unregistering not registered fictitious range [%#jx:%#jx]", 939 (uintmax_t)start, (uintmax_t)end); 940 } 941 #endif 942 tmp.start = start; 943 tmp.end = 0; 944 945 rw_wlock(&vm_phys_fictitious_reg_lock); 946 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp); 947 if (seg->start != start || seg->end != end) { 948 rw_wunlock(&vm_phys_fictitious_reg_lock); 949 panic( 950 "Unregistering not registered fictitious range [%#jx:%#jx]", 951 (uintmax_t)start, (uintmax_t)end); 952 } 953 RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg); 954 rw_wunlock(&vm_phys_fictitious_reg_lock); 955 free(seg->first_page, M_FICT_PAGES); 956 free(seg, M_FICT_PAGES); 957 } 958 959 /* 960 * Free a contiguous, power of two-sized set of physical pages. 961 * 962 * The free page queues must be locked. 963 */ 964 void 965 vm_phys_free_pages(vm_page_t m, int order) 966 { 967 struct vm_freelist *fl; 968 struct vm_phys_seg *seg; 969 vm_paddr_t pa; 970 vm_page_t m_buddy; 971 972 KASSERT(m->order == VM_NFREEORDER, 973 ("vm_phys_free_pages: page %p has unexpected order %d", 974 m, m->order)); 975 KASSERT(m->pool < VM_NFREEPOOL, 976 ("vm_phys_free_pages: page %p has unexpected pool %d", 977 m, m->pool)); 978 KASSERT(order < VM_NFREEORDER, 979 ("vm_phys_free_pages: order %d is out of range", order)); 980 seg = &vm_phys_segs[m->segind]; 981 vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); 982 if (order < VM_NFREEORDER - 1) { 983 pa = VM_PAGE_TO_PHYS(m); 984 do { 985 pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order)); 986 if (pa < seg->start || pa >= seg->end) 987 break; 988 m_buddy = &seg->first_page[atop(pa - seg->start)]; 989 if (m_buddy->order != order) 990 break; 991 fl = (*seg->free_queues)[m_buddy->pool]; 992 vm_freelist_rem(fl, m_buddy, order); 993 if (m_buddy->pool != m->pool) 994 vm_phys_set_pool(m->pool, m_buddy, order); 995 order++; 996 pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1); 997 m = &seg->first_page[atop(pa - seg->start)]; 998 } while (order < VM_NFREEORDER - 1); 999 } 1000 fl = (*seg->free_queues)[m->pool]; 1001 vm_freelist_add(fl, m, order, 1); 1002 } 1003 1004 /* 1005 * Free a contiguous, arbitrarily sized set of physical pages. 1006 * 1007 * The free page queues must be locked. 1008 */ 1009 void 1010 vm_phys_free_contig(vm_page_t m, u_long npages) 1011 { 1012 u_int n; 1013 int order; 1014 1015 /* 1016 * Avoid unnecessary coalescing by freeing the pages in the largest 1017 * possible power-of-two-sized subsets. 1018 */ 1019 vm_domain_free_assert_locked(vm_pagequeue_domain(m)); 1020 for (;; npages -= n) { 1021 /* 1022 * Unsigned "min" is used here so that "order" is assigned 1023 * "VM_NFREEORDER - 1" when "m"'s physical address is zero 1024 * or the low-order bits of its physical address are zero 1025 * because the size of a physical address exceeds the size of 1026 * a long. 1027 */ 1028 order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1, 1029 VM_NFREEORDER - 1); 1030 n = 1 << order; 1031 if (npages < n) 1032 break; 1033 vm_phys_free_pages(m, order); 1034 m += n; 1035 } 1036 /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */ 1037 for (; npages > 0; npages -= n) { 1038 order = flsl(npages) - 1; 1039 n = 1 << order; 1040 vm_phys_free_pages(m, order); 1041 m += n; 1042 } 1043 } 1044 1045 /* 1046 * Scan physical memory between the specified addresses "low" and "high" for a 1047 * run of contiguous physical pages that satisfy the specified conditions, and 1048 * return the lowest page in the run. The specified "alignment" determines 1049 * the alignment of the lowest physical page in the run. If the specified 1050 * "boundary" is non-zero, then the run of physical pages cannot span a 1051 * physical address that is a multiple of "boundary". 1052 * 1053 * "npages" must be greater than zero. Both "alignment" and "boundary" must 1054 * be a power of two. 1055 */ 1056 vm_page_t 1057 vm_phys_scan_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, 1058 u_long alignment, vm_paddr_t boundary, int options) 1059 { 1060 vm_paddr_t pa_end; 1061 vm_page_t m_end, m_run, m_start; 1062 struct vm_phys_seg *seg; 1063 int segind; 1064 1065 KASSERT(npages > 0, ("npages is 0")); 1066 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1067 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1068 if (low >= high) 1069 return (NULL); 1070 for (segind = 0; segind < vm_phys_nsegs; segind++) { 1071 seg = &vm_phys_segs[segind]; 1072 if (seg->domain != domain) 1073 continue; 1074 if (seg->start >= high) 1075 break; 1076 if (low >= seg->end) 1077 continue; 1078 if (low <= seg->start) 1079 m_start = seg->first_page; 1080 else 1081 m_start = &seg->first_page[atop(low - seg->start)]; 1082 if (high < seg->end) 1083 pa_end = high; 1084 else 1085 pa_end = seg->end; 1086 if (pa_end - VM_PAGE_TO_PHYS(m_start) < ptoa(npages)) 1087 continue; 1088 m_end = &seg->first_page[atop(pa_end - seg->start)]; 1089 m_run = vm_page_scan_contig(npages, m_start, m_end, 1090 alignment, boundary, options); 1091 if (m_run != NULL) 1092 return (m_run); 1093 } 1094 return (NULL); 1095 } 1096 1097 /* 1098 * Set the pool for a contiguous, power of two-sized set of physical pages. 1099 */ 1100 void 1101 vm_phys_set_pool(int pool, vm_page_t m, int order) 1102 { 1103 vm_page_t m_tmp; 1104 1105 for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) 1106 m_tmp->pool = pool; 1107 } 1108 1109 /* 1110 * Search for the given physical page "m" in the free lists. If the search 1111 * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return 1112 * FALSE, indicating that "m" is not in the free lists. 1113 * 1114 * The free page queues must be locked. 1115 */ 1116 boolean_t 1117 vm_phys_unfree_page(vm_page_t m) 1118 { 1119 struct vm_freelist *fl; 1120 struct vm_phys_seg *seg; 1121 vm_paddr_t pa, pa_half; 1122 vm_page_t m_set, m_tmp; 1123 int order; 1124 1125 /* 1126 * First, find the contiguous, power of two-sized set of free 1127 * physical pages containing the given physical page "m" and 1128 * assign it to "m_set". 1129 */ 1130 seg = &vm_phys_segs[m->segind]; 1131 vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); 1132 for (m_set = m, order = 0; m_set->order == VM_NFREEORDER && 1133 order < VM_NFREEORDER - 1; ) { 1134 order++; 1135 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order)); 1136 if (pa >= seg->start) 1137 m_set = &seg->first_page[atop(pa - seg->start)]; 1138 else 1139 return (FALSE); 1140 } 1141 if (m_set->order < order) 1142 return (FALSE); 1143 if (m_set->order == VM_NFREEORDER) 1144 return (FALSE); 1145 KASSERT(m_set->order < VM_NFREEORDER, 1146 ("vm_phys_unfree_page: page %p has unexpected order %d", 1147 m_set, m_set->order)); 1148 1149 /* 1150 * Next, remove "m_set" from the free lists. Finally, extract 1151 * "m" from "m_set" using an iterative algorithm: While "m_set" 1152 * is larger than a page, shrink "m_set" by returning the half 1153 * of "m_set" that does not contain "m" to the free lists. 1154 */ 1155 fl = (*seg->free_queues)[m_set->pool]; 1156 order = m_set->order; 1157 vm_freelist_rem(fl, m_set, order); 1158 while (order > 0) { 1159 order--; 1160 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order)); 1161 if (m->phys_addr < pa_half) 1162 m_tmp = &seg->first_page[atop(pa_half - seg->start)]; 1163 else { 1164 m_tmp = m_set; 1165 m_set = &seg->first_page[atop(pa_half - seg->start)]; 1166 } 1167 vm_freelist_add(fl, m_tmp, order, 0); 1168 } 1169 KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency")); 1170 return (TRUE); 1171 } 1172 1173 /* 1174 * Allocate a contiguous set of physical pages of the given size 1175 * "npages" from the free lists. All of the physical pages must be at 1176 * or above the given physical address "low" and below the given 1177 * physical address "high". The given value "alignment" determines the 1178 * alignment of the first physical page in the set. If the given value 1179 * "boundary" is non-zero, then the set of physical pages cannot cross 1180 * any physical address boundary that is a multiple of that value. Both 1181 * "alignment" and "boundary" must be a power of two. 1182 */ 1183 vm_page_t 1184 vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, 1185 u_long alignment, vm_paddr_t boundary) 1186 { 1187 vm_paddr_t pa_end, pa_start; 1188 vm_page_t m_run; 1189 struct vm_phys_seg *seg; 1190 int segind; 1191 1192 KASSERT(npages > 0, ("npages is 0")); 1193 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1194 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1195 vm_domain_free_assert_locked(VM_DOMAIN(domain)); 1196 if (low >= high) 1197 return (NULL); 1198 m_run = NULL; 1199 for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { 1200 seg = &vm_phys_segs[segind]; 1201 if (seg->start >= high || seg->domain != domain) 1202 continue; 1203 if (low >= seg->end) 1204 break; 1205 if (low <= seg->start) 1206 pa_start = seg->start; 1207 else 1208 pa_start = low; 1209 if (high < seg->end) 1210 pa_end = high; 1211 else 1212 pa_end = seg->end; 1213 if (pa_end - pa_start < ptoa(npages)) 1214 continue; 1215 m_run = vm_phys_alloc_seg_contig(seg, npages, low, high, 1216 alignment, boundary); 1217 if (m_run != NULL) 1218 break; 1219 } 1220 return (m_run); 1221 } 1222 1223 /* 1224 * Allocate a run of contiguous physical pages from the free list for the 1225 * specified segment. 1226 */ 1227 static vm_page_t 1228 vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages, 1229 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 1230 { 1231 struct vm_freelist *fl; 1232 vm_paddr_t pa, pa_end, size; 1233 vm_page_t m, m_ret; 1234 u_long npages_end; 1235 int oind, order, pind; 1236 1237 KASSERT(npages > 0, ("npages is 0")); 1238 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1239 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1240 vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); 1241 /* Compute the queue that is the best fit for npages. */ 1242 order = flsl(npages - 1); 1243 /* Search for a run satisfying the specified conditions. */ 1244 size = npages << PAGE_SHIFT; 1245 for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; 1246 oind++) { 1247 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 1248 fl = (*seg->free_queues)[pind]; 1249 TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) { 1250 /* 1251 * Is the size of this allocation request 1252 * larger than the largest block size? 1253 */ 1254 if (order >= VM_NFREEORDER) { 1255 /* 1256 * Determine if a sufficient number of 1257 * subsequent blocks to satisfy the 1258 * allocation request are free. 1259 */ 1260 pa = VM_PAGE_TO_PHYS(m_ret); 1261 pa_end = pa + size; 1262 if (pa_end < pa) 1263 continue; 1264 for (;;) { 1265 pa += 1 << (PAGE_SHIFT + 1266 VM_NFREEORDER - 1); 1267 if (pa >= pa_end || 1268 pa < seg->start || 1269 pa >= seg->end) 1270 break; 1271 m = &seg->first_page[atop(pa - 1272 seg->start)]; 1273 if (m->order != VM_NFREEORDER - 1274 1) 1275 break; 1276 } 1277 /* If not, go to the next block. */ 1278 if (pa < pa_end) 1279 continue; 1280 } 1281 1282 /* 1283 * Determine if the blocks are within the 1284 * given range, satisfy the given alignment, 1285 * and do not cross the given boundary. 1286 */ 1287 pa = VM_PAGE_TO_PHYS(m_ret); 1288 pa_end = pa + size; 1289 if (pa >= low && pa_end <= high && 1290 (pa & (alignment - 1)) == 0 && 1291 rounddown2(pa ^ (pa_end - 1), boundary) == 0) 1292 goto done; 1293 } 1294 } 1295 } 1296 return (NULL); 1297 done: 1298 for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) { 1299 fl = (*seg->free_queues)[m->pool]; 1300 vm_freelist_rem(fl, m, oind); 1301 if (m->pool != VM_FREEPOOL_DEFAULT) 1302 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind); 1303 } 1304 /* Return excess pages to the free lists. */ 1305 npages_end = roundup2(npages, 1 << oind); 1306 if (npages < npages_end) 1307 vm_phys_free_contig(&m_ret[npages], npages_end - npages); 1308 return (m_ret); 1309 } 1310 1311 #ifdef DDB 1312 /* 1313 * Show the number of physical pages in each of the free lists. 1314 */ 1315 DB_SHOW_COMMAND(freepages, db_show_freepages) 1316 { 1317 struct vm_freelist *fl; 1318 int flind, oind, pind, dom; 1319 1320 for (dom = 0; dom < vm_ndomains; dom++) { 1321 db_printf("DOMAIN: %d\n", dom); 1322 for (flind = 0; flind < vm_nfreelists; flind++) { 1323 db_printf("FREE LIST %d:\n" 1324 "\n ORDER (SIZE) | NUMBER" 1325 "\n ", flind); 1326 for (pind = 0; pind < VM_NFREEPOOL; pind++) 1327 db_printf(" | POOL %d", pind); 1328 db_printf("\n-- "); 1329 for (pind = 0; pind < VM_NFREEPOOL; pind++) 1330 db_printf("-- -- "); 1331 db_printf("--\n"); 1332 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 1333 db_printf(" %2.2d (%6.6dK)", oind, 1334 1 << (PAGE_SHIFT - 10 + oind)); 1335 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 1336 fl = vm_phys_free_queues[dom][flind][pind]; 1337 db_printf(" | %6.6d", fl[oind].lcnt); 1338 } 1339 db_printf("\n"); 1340 } 1341 db_printf("\n"); 1342 } 1343 db_printf("\n"); 1344 } 1345 } 1346 #endif 1347