1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2002-2006 Rice University 5 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu> 6 * All rights reserved. 7 * 8 * This software was developed for the FreeBSD Project by Alan L. Cox, 9 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 30 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Physical memory system implementation 36 * 37 * Any external functions defined by this module are only to be used by the 38 * virtual memory system. 39 */ 40 41 #include <sys/cdefs.h> 42 #include "opt_ddb.h" 43 #include "opt_vm.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/domainset.h> 48 #include <sys/lock.h> 49 #include <sys/kernel.h> 50 #include <sys/malloc.h> 51 #include <sys/mutex.h> 52 #include <sys/proc.h> 53 #include <sys/queue.h> 54 #include <sys/rwlock.h> 55 #include <sys/sbuf.h> 56 #include <sys/sysctl.h> 57 #include <sys/tree.h> 58 #include <sys/vmmeter.h> 59 60 #include <ddb/ddb.h> 61 62 #include <vm/vm.h> 63 #include <vm/vm_extern.h> 64 #include <vm/vm_param.h> 65 #include <vm/vm_kern.h> 66 #include <vm/vm_object.h> 67 #include <vm/vm_page.h> 68 #include <vm/vm_phys.h> 69 #include <vm/vm_pagequeue.h> 70 71 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX, 72 "Too many physsegs."); 73 _Static_assert(sizeof(long long) >= sizeof(vm_paddr_t), 74 "vm_paddr_t too big for ffsll, flsll."); 75 76 #ifdef NUMA 77 struct mem_affinity __read_mostly *mem_affinity; 78 int __read_mostly *mem_locality; 79 80 static int numa_disabled; 81 static SYSCTL_NODE(_vm, OID_AUTO, numa, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 82 "NUMA options"); 83 SYSCTL_INT(_vm_numa, OID_AUTO, disabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 84 &numa_disabled, 0, "NUMA-awareness in the allocators is disabled"); 85 #endif 86 87 int __read_mostly vm_ndomains = 1; 88 domainset_t __read_mostly all_domains = DOMAINSET_T_INITIALIZER(0x1); 89 90 struct vm_phys_seg __read_mostly vm_phys_segs[VM_PHYSSEG_MAX]; 91 int __read_mostly vm_phys_nsegs; 92 static struct vm_phys_seg vm_phys_early_segs[8]; 93 static int vm_phys_early_nsegs; 94 95 struct vm_phys_fictitious_seg; 96 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *, 97 struct vm_phys_fictitious_seg *); 98 99 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree = 100 RB_INITIALIZER(&vm_phys_fictitious_tree); 101 102 struct vm_phys_fictitious_seg { 103 RB_ENTRY(vm_phys_fictitious_seg) node; 104 /* Memory region data */ 105 vm_paddr_t start; 106 vm_paddr_t end; 107 vm_page_t first_page; 108 }; 109 110 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node, 111 vm_phys_fictitious_cmp); 112 113 static struct rwlock_padalign vm_phys_fictitious_reg_lock; 114 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages"); 115 116 static struct vm_freelist __aligned(CACHE_LINE_SIZE) 117 vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL] 118 [VM_NFREEORDER_MAX]; 119 120 static int __read_mostly vm_nfreelists; 121 122 /* 123 * These "avail lists" are globals used to communicate boot-time physical 124 * memory layout to other parts of the kernel. Each physically contiguous 125 * region of memory is defined by a start address at an even index and an 126 * end address at the following odd index. Each list is terminated by a 127 * pair of zero entries. 128 * 129 * dump_avail tells the dump code what regions to include in a crash dump, and 130 * phys_avail is all of the remaining physical memory that is available for 131 * the vm system. 132 * 133 * Initially dump_avail and phys_avail are identical. Boot time memory 134 * allocations remove extents from phys_avail that may still be included 135 * in dumps. 136 */ 137 vm_paddr_t phys_avail[PHYS_AVAIL_COUNT]; 138 vm_paddr_t dump_avail[PHYS_AVAIL_COUNT]; 139 140 /* 141 * Provides the mapping from VM_FREELIST_* to free list indices (flind). 142 */ 143 static int __read_mostly vm_freelist_to_flind[VM_NFREELIST]; 144 145 CTASSERT(VM_FREELIST_DEFAULT == 0); 146 147 #ifdef VM_FREELIST_DMA32 148 #define VM_DMA32_BOUNDARY ((vm_paddr_t)1 << 32) 149 #endif 150 151 /* 152 * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about 153 * the ordering of the free list boundaries. 154 */ 155 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY) 156 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY); 157 #endif 158 159 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS); 160 SYSCTL_OID(_vm, OID_AUTO, phys_free, 161 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 162 sysctl_vm_phys_free, "A", 163 "Phys Free Info"); 164 165 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS); 166 SYSCTL_OID(_vm, OID_AUTO, phys_segs, 167 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 168 sysctl_vm_phys_segs, "A", 169 "Phys Seg Info"); 170 171 #ifdef NUMA 172 static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS); 173 SYSCTL_OID(_vm, OID_AUTO, phys_locality, 174 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 175 sysctl_vm_phys_locality, "A", 176 "Phys Locality Info"); 177 #endif 178 179 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD, 180 &vm_ndomains, 0, "Number of physical memory domains available."); 181 182 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain); 183 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end); 184 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, 185 int order, int tail); 186 187 /* 188 * Red-black tree helpers for vm fictitious range management. 189 */ 190 static inline int 191 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p, 192 struct vm_phys_fictitious_seg *range) 193 { 194 195 KASSERT(range->start != 0 && range->end != 0, 196 ("Invalid range passed on search for vm_fictitious page")); 197 if (p->start >= range->end) 198 return (1); 199 if (p->start < range->start) 200 return (-1); 201 202 return (0); 203 } 204 205 static int 206 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1, 207 struct vm_phys_fictitious_seg *p2) 208 { 209 210 /* Check if this is a search for a page */ 211 if (p1->end == 0) 212 return (vm_phys_fictitious_in_range(p1, p2)); 213 214 KASSERT(p2->end != 0, 215 ("Invalid range passed as second parameter to vm fictitious comparison")); 216 217 /* Searching to add a new range */ 218 if (p1->end <= p2->start) 219 return (-1); 220 if (p1->start >= p2->end) 221 return (1); 222 223 panic("Trying to add overlapping vm fictitious ranges:\n" 224 "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start, 225 (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end); 226 } 227 228 int 229 vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high) 230 { 231 #ifdef NUMA 232 domainset_t mask; 233 int i; 234 235 if (vm_ndomains == 1 || mem_affinity == NULL) 236 return (0); 237 238 DOMAINSET_ZERO(&mask); 239 /* 240 * Check for any memory that overlaps low, high. 241 */ 242 for (i = 0; mem_affinity[i].end != 0; i++) 243 if (mem_affinity[i].start <= high && 244 mem_affinity[i].end >= low) 245 DOMAINSET_SET(mem_affinity[i].domain, &mask); 246 if (prefer != -1 && DOMAINSET_ISSET(prefer, &mask)) 247 return (prefer); 248 if (DOMAINSET_EMPTY(&mask)) 249 panic("vm_phys_domain_match: Impossible constraint"); 250 return (DOMAINSET_FFS(&mask) - 1); 251 #else 252 return (0); 253 #endif 254 } 255 256 /* 257 * Outputs the state of the physical memory allocator, specifically, 258 * the amount of physical memory in each free list. 259 */ 260 static int 261 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS) 262 { 263 struct sbuf sbuf; 264 struct vm_freelist *fl; 265 int dom, error, flind, oind, pind; 266 267 error = sysctl_wire_old_buffer(req, 0); 268 if (error != 0) 269 return (error); 270 sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req); 271 for (dom = 0; dom < vm_ndomains; dom++) { 272 sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom); 273 for (flind = 0; flind < vm_nfreelists; flind++) { 274 sbuf_printf(&sbuf, "\nFREE LIST %d:\n" 275 "\n ORDER (SIZE) | NUMBER" 276 "\n ", flind); 277 for (pind = 0; pind < VM_NFREEPOOL; pind++) 278 sbuf_printf(&sbuf, " | POOL %d", pind); 279 sbuf_printf(&sbuf, "\n-- "); 280 for (pind = 0; pind < VM_NFREEPOOL; pind++) 281 sbuf_printf(&sbuf, "-- -- "); 282 sbuf_printf(&sbuf, "--\n"); 283 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 284 sbuf_printf(&sbuf, " %2d (%6dK)", oind, 285 1 << (PAGE_SHIFT - 10 + oind)); 286 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 287 fl = vm_phys_free_queues[dom][flind][pind]; 288 sbuf_printf(&sbuf, " | %6d", 289 fl[oind].lcnt); 290 } 291 sbuf_printf(&sbuf, "\n"); 292 } 293 } 294 } 295 error = sbuf_finish(&sbuf); 296 sbuf_delete(&sbuf); 297 return (error); 298 } 299 300 /* 301 * Outputs the set of physical memory segments. 302 */ 303 static int 304 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS) 305 { 306 struct sbuf sbuf; 307 struct vm_phys_seg *seg; 308 int error, segind; 309 310 error = sysctl_wire_old_buffer(req, 0); 311 if (error != 0) 312 return (error); 313 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 314 for (segind = 0; segind < vm_phys_nsegs; segind++) { 315 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind); 316 seg = &vm_phys_segs[segind]; 317 sbuf_printf(&sbuf, "start: %#jx\n", 318 (uintmax_t)seg->start); 319 sbuf_printf(&sbuf, "end: %#jx\n", 320 (uintmax_t)seg->end); 321 sbuf_printf(&sbuf, "domain: %d\n", seg->domain); 322 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues); 323 } 324 error = sbuf_finish(&sbuf); 325 sbuf_delete(&sbuf); 326 return (error); 327 } 328 329 /* 330 * Return affinity, or -1 if there's no affinity information. 331 */ 332 int 333 vm_phys_mem_affinity(int f, int t) 334 { 335 336 #ifdef NUMA 337 if (mem_locality == NULL) 338 return (-1); 339 if (f >= vm_ndomains || t >= vm_ndomains) 340 return (-1); 341 return (mem_locality[f * vm_ndomains + t]); 342 #else 343 return (-1); 344 #endif 345 } 346 347 #ifdef NUMA 348 /* 349 * Outputs the VM locality table. 350 */ 351 static int 352 sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS) 353 { 354 struct sbuf sbuf; 355 int error, i, j; 356 357 error = sysctl_wire_old_buffer(req, 0); 358 if (error != 0) 359 return (error); 360 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 361 362 sbuf_printf(&sbuf, "\n"); 363 364 for (i = 0; i < vm_ndomains; i++) { 365 sbuf_printf(&sbuf, "%d: ", i); 366 for (j = 0; j < vm_ndomains; j++) { 367 sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j)); 368 } 369 sbuf_printf(&sbuf, "\n"); 370 } 371 error = sbuf_finish(&sbuf); 372 sbuf_delete(&sbuf); 373 return (error); 374 } 375 #endif 376 377 static void 378 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail) 379 { 380 381 m->order = order; 382 if (tail) 383 TAILQ_INSERT_TAIL(&fl[order].pl, m, listq); 384 else 385 TAILQ_INSERT_HEAD(&fl[order].pl, m, listq); 386 fl[order].lcnt++; 387 } 388 389 static void 390 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order) 391 { 392 393 TAILQ_REMOVE(&fl[order].pl, m, listq); 394 fl[order].lcnt--; 395 m->order = VM_NFREEORDER; 396 } 397 398 /* 399 * Create a physical memory segment. 400 */ 401 static void 402 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain) 403 { 404 struct vm_phys_seg *seg; 405 406 KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX, 407 ("vm_phys_create_seg: increase VM_PHYSSEG_MAX")); 408 KASSERT(domain >= 0 && domain < vm_ndomains, 409 ("vm_phys_create_seg: invalid domain provided")); 410 seg = &vm_phys_segs[vm_phys_nsegs++]; 411 while (seg > vm_phys_segs && (seg - 1)->start >= end) { 412 *seg = *(seg - 1); 413 seg--; 414 } 415 seg->start = start; 416 seg->end = end; 417 seg->domain = domain; 418 } 419 420 static void 421 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end) 422 { 423 #ifdef NUMA 424 int i; 425 426 if (mem_affinity == NULL) { 427 _vm_phys_create_seg(start, end, 0); 428 return; 429 } 430 431 for (i = 0;; i++) { 432 if (mem_affinity[i].end == 0) 433 panic("Reached end of affinity info"); 434 if (mem_affinity[i].end <= start) 435 continue; 436 if (mem_affinity[i].start > start) 437 panic("No affinity info for start %jx", 438 (uintmax_t)start); 439 if (mem_affinity[i].end >= end) { 440 _vm_phys_create_seg(start, end, 441 mem_affinity[i].domain); 442 break; 443 } 444 _vm_phys_create_seg(start, mem_affinity[i].end, 445 mem_affinity[i].domain); 446 start = mem_affinity[i].end; 447 } 448 #else 449 _vm_phys_create_seg(start, end, 0); 450 #endif 451 } 452 453 /* 454 * Add a physical memory segment. 455 */ 456 void 457 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end) 458 { 459 vm_paddr_t paddr; 460 461 KASSERT((start & PAGE_MASK) == 0, 462 ("vm_phys_define_seg: start is not page aligned")); 463 KASSERT((end & PAGE_MASK) == 0, 464 ("vm_phys_define_seg: end is not page aligned")); 465 466 /* 467 * Split the physical memory segment if it spans two or more free 468 * list boundaries. 469 */ 470 paddr = start; 471 #ifdef VM_FREELIST_LOWMEM 472 if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) { 473 vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY); 474 paddr = VM_LOWMEM_BOUNDARY; 475 } 476 #endif 477 #ifdef VM_FREELIST_DMA32 478 if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) { 479 vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY); 480 paddr = VM_DMA32_BOUNDARY; 481 } 482 #endif 483 vm_phys_create_seg(paddr, end); 484 } 485 486 /* 487 * Initialize the physical memory allocator. 488 * 489 * Requires that vm_page_array is initialized! 490 */ 491 void 492 vm_phys_init(void) 493 { 494 struct vm_freelist *fl; 495 struct vm_phys_seg *end_seg, *prev_seg, *seg, *tmp_seg; 496 #if defined(VM_DMA32_NPAGES_THRESHOLD) || defined(VM_PHYSSEG_SPARSE) 497 u_long npages; 498 #endif 499 int dom, flind, freelist, oind, pind, segind; 500 501 /* 502 * Compute the number of free lists, and generate the mapping from the 503 * manifest constants VM_FREELIST_* to the free list indices. 504 * 505 * Initially, the entries of vm_freelist_to_flind[] are set to either 506 * 0 or 1 to indicate which free lists should be created. 507 */ 508 #ifdef VM_DMA32_NPAGES_THRESHOLD 509 npages = 0; 510 #endif 511 for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { 512 seg = &vm_phys_segs[segind]; 513 #ifdef VM_FREELIST_LOWMEM 514 if (seg->end <= VM_LOWMEM_BOUNDARY) 515 vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1; 516 else 517 #endif 518 #ifdef VM_FREELIST_DMA32 519 if ( 520 #ifdef VM_DMA32_NPAGES_THRESHOLD 521 /* 522 * Create the DMA32 free list only if the amount of 523 * physical memory above physical address 4G exceeds the 524 * given threshold. 525 */ 526 npages > VM_DMA32_NPAGES_THRESHOLD && 527 #endif 528 seg->end <= VM_DMA32_BOUNDARY) 529 vm_freelist_to_flind[VM_FREELIST_DMA32] = 1; 530 else 531 #endif 532 { 533 #ifdef VM_DMA32_NPAGES_THRESHOLD 534 npages += atop(seg->end - seg->start); 535 #endif 536 vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1; 537 } 538 } 539 /* Change each entry into a running total of the free lists. */ 540 for (freelist = 1; freelist < VM_NFREELIST; freelist++) { 541 vm_freelist_to_flind[freelist] += 542 vm_freelist_to_flind[freelist - 1]; 543 } 544 vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1]; 545 KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists")); 546 /* Change each entry into a free list index. */ 547 for (freelist = 0; freelist < VM_NFREELIST; freelist++) 548 vm_freelist_to_flind[freelist]--; 549 550 /* 551 * Initialize the first_page and free_queues fields of each physical 552 * memory segment. 553 */ 554 #ifdef VM_PHYSSEG_SPARSE 555 npages = 0; 556 #endif 557 for (segind = 0; segind < vm_phys_nsegs; segind++) { 558 seg = &vm_phys_segs[segind]; 559 #ifdef VM_PHYSSEG_SPARSE 560 seg->first_page = &vm_page_array[npages]; 561 npages += atop(seg->end - seg->start); 562 #else 563 seg->first_page = PHYS_TO_VM_PAGE(seg->start); 564 #endif 565 #ifdef VM_FREELIST_LOWMEM 566 if (seg->end <= VM_LOWMEM_BOUNDARY) { 567 flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM]; 568 KASSERT(flind >= 0, 569 ("vm_phys_init: LOWMEM flind < 0")); 570 } else 571 #endif 572 #ifdef VM_FREELIST_DMA32 573 if (seg->end <= VM_DMA32_BOUNDARY) { 574 flind = vm_freelist_to_flind[VM_FREELIST_DMA32]; 575 KASSERT(flind >= 0, 576 ("vm_phys_init: DMA32 flind < 0")); 577 } else 578 #endif 579 { 580 flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT]; 581 KASSERT(flind >= 0, 582 ("vm_phys_init: DEFAULT flind < 0")); 583 } 584 seg->free_queues = &vm_phys_free_queues[seg->domain][flind]; 585 } 586 587 /* 588 * Coalesce physical memory segments that are contiguous and share the 589 * same per-domain free queues. 590 */ 591 prev_seg = vm_phys_segs; 592 seg = &vm_phys_segs[1]; 593 end_seg = &vm_phys_segs[vm_phys_nsegs]; 594 while (seg < end_seg) { 595 if (prev_seg->end == seg->start && 596 prev_seg->free_queues == seg->free_queues) { 597 prev_seg->end = seg->end; 598 KASSERT(prev_seg->domain == seg->domain, 599 ("vm_phys_init: free queues cannot span domains")); 600 vm_phys_nsegs--; 601 end_seg--; 602 for (tmp_seg = seg; tmp_seg < end_seg; tmp_seg++) 603 *tmp_seg = *(tmp_seg + 1); 604 } else { 605 prev_seg = seg; 606 seg++; 607 } 608 } 609 610 /* 611 * Initialize the free queues. 612 */ 613 for (dom = 0; dom < vm_ndomains; dom++) { 614 for (flind = 0; flind < vm_nfreelists; flind++) { 615 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 616 fl = vm_phys_free_queues[dom][flind][pind]; 617 for (oind = 0; oind < VM_NFREEORDER; oind++) 618 TAILQ_INIT(&fl[oind].pl); 619 } 620 } 621 } 622 623 rw_init(&vm_phys_fictitious_reg_lock, "vmfctr"); 624 } 625 626 /* 627 * Register info about the NUMA topology of the system. 628 * 629 * Invoked by platform-dependent code prior to vm_phys_init(). 630 */ 631 void 632 vm_phys_register_domains(int ndomains, struct mem_affinity *affinity, 633 int *locality) 634 { 635 #ifdef NUMA 636 int i; 637 638 /* 639 * For now the only override value that we support is 1, which 640 * effectively disables NUMA-awareness in the allocators. 641 */ 642 TUNABLE_INT_FETCH("vm.numa.disabled", &numa_disabled); 643 if (numa_disabled) 644 ndomains = 1; 645 646 if (ndomains > 1) { 647 vm_ndomains = ndomains; 648 mem_affinity = affinity; 649 mem_locality = locality; 650 } 651 652 for (i = 0; i < vm_ndomains; i++) 653 DOMAINSET_SET(i, &all_domains); 654 #else 655 (void)ndomains; 656 (void)affinity; 657 (void)locality; 658 #endif 659 } 660 661 /* 662 * Split a contiguous, power of two-sized set of physical pages. 663 * 664 * When this function is called by a page allocation function, the caller 665 * should request insertion at the head unless the order [order, oind) queues 666 * are known to be empty. The objective being to reduce the likelihood of 667 * long-term fragmentation by promoting contemporaneous allocation and 668 * (hopefully) deallocation. 669 */ 670 static __inline void 671 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order, 672 int tail) 673 { 674 vm_page_t m_buddy; 675 676 while (oind > order) { 677 oind--; 678 m_buddy = &m[1 << oind]; 679 KASSERT(m_buddy->order == VM_NFREEORDER, 680 ("vm_phys_split_pages: page %p has unexpected order %d", 681 m_buddy, m_buddy->order)); 682 vm_freelist_add(fl, m_buddy, oind, tail); 683 } 684 } 685 686 /* 687 * Add the physical pages [m, m + npages) at the beginning of a power-of-two 688 * aligned and sized set to the specified free list. 689 * 690 * When this function is called by a page allocation function, the caller 691 * should request insertion at the head unless the lower-order queues are 692 * known to be empty. The objective being to reduce the likelihood of long- 693 * term fragmentation by promoting contemporaneous allocation and (hopefully) 694 * deallocation. 695 * 696 * The physical page m's buddy must not be free. 697 */ 698 static void 699 vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail) 700 { 701 int order; 702 703 KASSERT(npages == 0 || 704 (VM_PAGE_TO_PHYS(m) & 705 ((PAGE_SIZE << (fls(npages) - 1)) - 1)) == 0, 706 ("%s: page %p and npages %u are misaligned", 707 __func__, m, npages)); 708 while (npages > 0) { 709 KASSERT(m->order == VM_NFREEORDER, 710 ("%s: page %p has unexpected order %d", 711 __func__, m, m->order)); 712 order = fls(npages) - 1; 713 KASSERT(order < VM_NFREEORDER, 714 ("%s: order %d is out of range", __func__, order)); 715 vm_freelist_add(fl, m, order, tail); 716 m += 1 << order; 717 npages -= 1 << order; 718 } 719 } 720 721 /* 722 * Add the physical pages [m, m + npages) at the end of a power-of-two aligned 723 * and sized set to the specified free list. 724 * 725 * When this function is called by a page allocation function, the caller 726 * should request insertion at the head unless the lower-order queues are 727 * known to be empty. The objective being to reduce the likelihood of long- 728 * term fragmentation by promoting contemporaneous allocation and (hopefully) 729 * deallocation. 730 * 731 * If npages is zero, this function does nothing and ignores the physical page 732 * parameter m. Otherwise, the physical page m's buddy must not be free. 733 */ 734 static vm_page_t 735 vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail) 736 { 737 int order; 738 739 KASSERT(npages == 0 || 740 ((VM_PAGE_TO_PHYS(m) + npages * PAGE_SIZE) & 741 ((PAGE_SIZE << (fls(npages) - 1)) - 1)) == 0, 742 ("vm_phys_enq_range: page %p and npages %u are misaligned", 743 m, npages)); 744 while (npages > 0) { 745 KASSERT(m->order == VM_NFREEORDER, 746 ("vm_phys_enq_range: page %p has unexpected order %d", 747 m, m->order)); 748 order = ffs(npages) - 1; 749 KASSERT(order < VM_NFREEORDER, 750 ("vm_phys_enq_range: order %d is out of range", order)); 751 vm_freelist_add(fl, m, order, tail); 752 m += 1 << order; 753 npages -= 1 << order; 754 } 755 return (m); 756 } 757 758 /* 759 * Set the pool for a contiguous, power of two-sized set of physical pages. 760 */ 761 static void 762 vm_phys_set_pool(int pool, vm_page_t m, int order) 763 { 764 vm_page_t m_tmp; 765 766 for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) 767 m_tmp->pool = pool; 768 } 769 770 /* 771 * Tries to allocate the specified number of pages from the specified pool 772 * within the specified domain. Returns the actual number of allocated pages 773 * and a pointer to each page through the array ma[]. 774 * 775 * The returned pages may not be physically contiguous. However, in contrast 776 * to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0), 777 * calling this function once to allocate the desired number of pages will 778 * avoid wasted time in vm_phys_split_pages(). 779 * 780 * The free page queues for the specified domain must be locked. 781 */ 782 int 783 vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]) 784 { 785 struct vm_freelist *alt, *fl; 786 vm_page_t m; 787 int avail, end, flind, freelist, i, oind, pind; 788 789 KASSERT(domain >= 0 && domain < vm_ndomains, 790 ("vm_phys_alloc_npages: domain %d is out of range", domain)); 791 KASSERT(pool < VM_NFREEPOOL, 792 ("vm_phys_alloc_npages: pool %d is out of range", pool)); 793 KASSERT(npages <= 1 << (VM_NFREEORDER - 1), 794 ("vm_phys_alloc_npages: npages %d is out of range", npages)); 795 vm_domain_free_assert_locked(VM_DOMAIN(domain)); 796 i = 0; 797 for (freelist = 0; freelist < VM_NFREELIST; freelist++) { 798 flind = vm_freelist_to_flind[freelist]; 799 if (flind < 0) 800 continue; 801 fl = vm_phys_free_queues[domain][flind][pool]; 802 for (oind = 0; oind < VM_NFREEORDER; oind++) { 803 while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) { 804 vm_freelist_rem(fl, m, oind); 805 avail = i + (1 << oind); 806 end = imin(npages, avail); 807 while (i < end) 808 ma[i++] = m++; 809 if (i == npages) { 810 /* 811 * Return excess pages to fl. Its order 812 * [0, oind) queues are empty. 813 */ 814 vm_phys_enq_range(m, avail - i, fl, 1); 815 return (npages); 816 } 817 } 818 } 819 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 820 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 821 alt = vm_phys_free_queues[domain][flind][pind]; 822 while ((m = TAILQ_FIRST(&alt[oind].pl)) != 823 NULL) { 824 vm_freelist_rem(alt, m, oind); 825 vm_phys_set_pool(pool, m, oind); 826 avail = i + (1 << oind); 827 end = imin(npages, avail); 828 while (i < end) 829 ma[i++] = m++; 830 if (i == npages) { 831 /* 832 * Return excess pages to fl. 833 * Its order [0, oind) queues 834 * are empty. 835 */ 836 vm_phys_enq_range(m, avail - i, 837 fl, 1); 838 return (npages); 839 } 840 } 841 } 842 } 843 } 844 return (i); 845 } 846 847 /* 848 * Allocate a contiguous, power of two-sized set of physical pages 849 * from the free lists. 850 * 851 * The free page queues must be locked. 852 */ 853 vm_page_t 854 vm_phys_alloc_pages(int domain, int pool, int order) 855 { 856 vm_page_t m; 857 int freelist; 858 859 for (freelist = 0; freelist < VM_NFREELIST; freelist++) { 860 m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order); 861 if (m != NULL) 862 return (m); 863 } 864 return (NULL); 865 } 866 867 /* 868 * Allocate a contiguous, power of two-sized set of physical pages from the 869 * specified free list. The free list must be specified using one of the 870 * manifest constants VM_FREELIST_*. 871 * 872 * The free page queues must be locked. 873 */ 874 vm_page_t 875 vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order) 876 { 877 struct vm_freelist *alt, *fl; 878 vm_page_t m; 879 int oind, pind, flind; 880 881 KASSERT(domain >= 0 && domain < vm_ndomains, 882 ("vm_phys_alloc_freelist_pages: domain %d is out of range", 883 domain)); 884 KASSERT(freelist < VM_NFREELIST, 885 ("vm_phys_alloc_freelist_pages: freelist %d is out of range", 886 freelist)); 887 KASSERT(pool < VM_NFREEPOOL, 888 ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool)); 889 KASSERT(order < VM_NFREEORDER, 890 ("vm_phys_alloc_freelist_pages: order %d is out of range", order)); 891 892 flind = vm_freelist_to_flind[freelist]; 893 /* Check if freelist is present */ 894 if (flind < 0) 895 return (NULL); 896 897 vm_domain_free_assert_locked(VM_DOMAIN(domain)); 898 fl = &vm_phys_free_queues[domain][flind][pool][0]; 899 for (oind = order; oind < VM_NFREEORDER; oind++) { 900 m = TAILQ_FIRST(&fl[oind].pl); 901 if (m != NULL) { 902 vm_freelist_rem(fl, m, oind); 903 /* The order [order, oind) queues are empty. */ 904 vm_phys_split_pages(m, oind, fl, order, 1); 905 return (m); 906 } 907 } 908 909 /* 910 * The given pool was empty. Find the largest 911 * contiguous, power-of-two-sized set of pages in any 912 * pool. Transfer these pages to the given pool, and 913 * use them to satisfy the allocation. 914 */ 915 for (oind = VM_NFREEORDER - 1; oind >= order; oind--) { 916 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 917 alt = &vm_phys_free_queues[domain][flind][pind][0]; 918 m = TAILQ_FIRST(&alt[oind].pl); 919 if (m != NULL) { 920 vm_freelist_rem(alt, m, oind); 921 vm_phys_set_pool(pool, m, oind); 922 /* The order [order, oind) queues are empty. */ 923 vm_phys_split_pages(m, oind, fl, order, 1); 924 return (m); 925 } 926 } 927 } 928 return (NULL); 929 } 930 931 /* 932 * Find the vm_page corresponding to the given physical address. 933 */ 934 vm_page_t 935 vm_phys_paddr_to_vm_page(vm_paddr_t pa) 936 { 937 struct vm_phys_seg *seg; 938 939 if ((seg = vm_phys_paddr_to_seg(pa)) != NULL) 940 return (&seg->first_page[atop(pa - seg->start)]); 941 return (NULL); 942 } 943 944 vm_page_t 945 vm_phys_fictitious_to_vm_page(vm_paddr_t pa) 946 { 947 struct vm_phys_fictitious_seg tmp, *seg; 948 vm_page_t m; 949 950 m = NULL; 951 tmp.start = pa; 952 tmp.end = 0; 953 954 rw_rlock(&vm_phys_fictitious_reg_lock); 955 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp); 956 rw_runlock(&vm_phys_fictitious_reg_lock); 957 if (seg == NULL) 958 return (NULL); 959 960 m = &seg->first_page[atop(pa - seg->start)]; 961 KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m)); 962 963 return (m); 964 } 965 966 static inline void 967 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start, 968 long page_count, vm_memattr_t memattr) 969 { 970 long i; 971 972 bzero(range, page_count * sizeof(*range)); 973 for (i = 0; i < page_count; i++) { 974 vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr); 975 range[i].oflags &= ~VPO_UNMANAGED; 976 range[i].busy_lock = VPB_UNBUSIED; 977 } 978 } 979 980 int 981 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, 982 vm_memattr_t memattr) 983 { 984 struct vm_phys_fictitious_seg *seg; 985 vm_page_t fp; 986 long page_count; 987 #ifdef VM_PHYSSEG_DENSE 988 long pi, pe; 989 long dpage_count; 990 #endif 991 992 KASSERT(start < end, 993 ("Start of segment isn't less than end (start: %jx end: %jx)", 994 (uintmax_t)start, (uintmax_t)end)); 995 996 page_count = (end - start) / PAGE_SIZE; 997 998 #ifdef VM_PHYSSEG_DENSE 999 pi = atop(start); 1000 pe = atop(end); 1001 if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 1002 fp = &vm_page_array[pi - first_page]; 1003 if ((pe - first_page) > vm_page_array_size) { 1004 /* 1005 * We have a segment that starts inside 1006 * of vm_page_array, but ends outside of it. 1007 * 1008 * Use vm_page_array pages for those that are 1009 * inside of the vm_page_array range, and 1010 * allocate the remaining ones. 1011 */ 1012 dpage_count = vm_page_array_size - (pi - first_page); 1013 vm_phys_fictitious_init_range(fp, start, dpage_count, 1014 memattr); 1015 page_count -= dpage_count; 1016 start += ptoa(dpage_count); 1017 goto alloc; 1018 } 1019 /* 1020 * We can allocate the full range from vm_page_array, 1021 * so there's no need to register the range in the tree. 1022 */ 1023 vm_phys_fictitious_init_range(fp, start, page_count, memattr); 1024 return (0); 1025 } else if (pe > first_page && (pe - first_page) < vm_page_array_size) { 1026 /* 1027 * We have a segment that ends inside of vm_page_array, 1028 * but starts outside of it. 1029 */ 1030 fp = &vm_page_array[0]; 1031 dpage_count = pe - first_page; 1032 vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count, 1033 memattr); 1034 end -= ptoa(dpage_count); 1035 page_count -= dpage_count; 1036 goto alloc; 1037 } else if (pi < first_page && pe > (first_page + vm_page_array_size)) { 1038 /* 1039 * Trying to register a fictitious range that expands before 1040 * and after vm_page_array. 1041 */ 1042 return (EINVAL); 1043 } else { 1044 alloc: 1045 #endif 1046 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES, 1047 M_WAITOK); 1048 #ifdef VM_PHYSSEG_DENSE 1049 } 1050 #endif 1051 vm_phys_fictitious_init_range(fp, start, page_count, memattr); 1052 1053 seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO); 1054 seg->start = start; 1055 seg->end = end; 1056 seg->first_page = fp; 1057 1058 rw_wlock(&vm_phys_fictitious_reg_lock); 1059 RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg); 1060 rw_wunlock(&vm_phys_fictitious_reg_lock); 1061 1062 return (0); 1063 } 1064 1065 void 1066 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end) 1067 { 1068 struct vm_phys_fictitious_seg *seg, tmp; 1069 #ifdef VM_PHYSSEG_DENSE 1070 long pi, pe; 1071 #endif 1072 1073 KASSERT(start < end, 1074 ("Start of segment isn't less than end (start: %jx end: %jx)", 1075 (uintmax_t)start, (uintmax_t)end)); 1076 1077 #ifdef VM_PHYSSEG_DENSE 1078 pi = atop(start); 1079 pe = atop(end); 1080 if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 1081 if ((pe - first_page) <= vm_page_array_size) { 1082 /* 1083 * This segment was allocated using vm_page_array 1084 * only, there's nothing to do since those pages 1085 * were never added to the tree. 1086 */ 1087 return; 1088 } 1089 /* 1090 * We have a segment that starts inside 1091 * of vm_page_array, but ends outside of it. 1092 * 1093 * Calculate how many pages were added to the 1094 * tree and free them. 1095 */ 1096 start = ptoa(first_page + vm_page_array_size); 1097 } else if (pe > first_page && (pe - first_page) < vm_page_array_size) { 1098 /* 1099 * We have a segment that ends inside of vm_page_array, 1100 * but starts outside of it. 1101 */ 1102 end = ptoa(first_page); 1103 } else if (pi < first_page && pe > (first_page + vm_page_array_size)) { 1104 /* Since it's not possible to register such a range, panic. */ 1105 panic( 1106 "Unregistering not registered fictitious range [%#jx:%#jx]", 1107 (uintmax_t)start, (uintmax_t)end); 1108 } 1109 #endif 1110 tmp.start = start; 1111 tmp.end = 0; 1112 1113 rw_wlock(&vm_phys_fictitious_reg_lock); 1114 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp); 1115 if (seg->start != start || seg->end != end) { 1116 rw_wunlock(&vm_phys_fictitious_reg_lock); 1117 panic( 1118 "Unregistering not registered fictitious range [%#jx:%#jx]", 1119 (uintmax_t)start, (uintmax_t)end); 1120 } 1121 RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg); 1122 rw_wunlock(&vm_phys_fictitious_reg_lock); 1123 free(seg->first_page, M_FICT_PAGES); 1124 free(seg, M_FICT_PAGES); 1125 } 1126 1127 /* 1128 * Free a contiguous, power of two-sized set of physical pages. 1129 * 1130 * The free page queues must be locked. 1131 */ 1132 void 1133 vm_phys_free_pages(vm_page_t m, int order) 1134 { 1135 struct vm_freelist *fl; 1136 struct vm_phys_seg *seg; 1137 vm_paddr_t pa; 1138 vm_page_t m_buddy; 1139 1140 KASSERT(m->order == VM_NFREEORDER, 1141 ("vm_phys_free_pages: page %p has unexpected order %d", 1142 m, m->order)); 1143 KASSERT(m->pool < VM_NFREEPOOL, 1144 ("vm_phys_free_pages: page %p has unexpected pool %d", 1145 m, m->pool)); 1146 KASSERT(order < VM_NFREEORDER, 1147 ("vm_phys_free_pages: order %d is out of range", order)); 1148 seg = &vm_phys_segs[m->segind]; 1149 vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); 1150 if (order < VM_NFREEORDER - 1) { 1151 pa = VM_PAGE_TO_PHYS(m); 1152 do { 1153 pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order)); 1154 if (pa < seg->start || pa >= seg->end) 1155 break; 1156 m_buddy = &seg->first_page[atop(pa - seg->start)]; 1157 if (m_buddy->order != order) 1158 break; 1159 fl = (*seg->free_queues)[m_buddy->pool]; 1160 vm_freelist_rem(fl, m_buddy, order); 1161 if (m_buddy->pool != m->pool) 1162 vm_phys_set_pool(m->pool, m_buddy, order); 1163 order++; 1164 pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1); 1165 m = &seg->first_page[atop(pa - seg->start)]; 1166 } while (order < VM_NFREEORDER - 1); 1167 } 1168 fl = (*seg->free_queues)[m->pool]; 1169 vm_freelist_add(fl, m, order, 1); 1170 } 1171 1172 /* 1173 * Free a contiguous, arbitrarily sized set of physical pages, without 1174 * merging across set boundaries. 1175 * 1176 * The free page queues must be locked. 1177 */ 1178 void 1179 vm_phys_enqueue_contig(vm_page_t m, u_long npages) 1180 { 1181 struct vm_freelist *fl; 1182 struct vm_phys_seg *seg; 1183 vm_page_t m_end; 1184 vm_paddr_t diff, lo; 1185 int order; 1186 1187 /* 1188 * Avoid unnecessary coalescing by freeing the pages in the largest 1189 * possible power-of-two-sized subsets. 1190 */ 1191 vm_domain_free_assert_locked(vm_pagequeue_domain(m)); 1192 seg = &vm_phys_segs[m->segind]; 1193 fl = (*seg->free_queues)[m->pool]; 1194 m_end = m + npages; 1195 /* Free blocks of increasing size. */ 1196 lo = atop(VM_PAGE_TO_PHYS(m)); 1197 if (m < m_end && 1198 (diff = lo ^ (lo + npages - 1)) != 0) { 1199 order = min(flsll(diff) - 1, VM_NFREEORDER - 1); 1200 m = vm_phys_enq_range(m, roundup2(lo, 1 << order) - lo, fl, 1); 1201 } 1202 1203 /* Free blocks of maximum size. */ 1204 order = VM_NFREEORDER - 1; 1205 while (m + (1 << order) <= m_end) { 1206 KASSERT(seg == &vm_phys_segs[m->segind], 1207 ("%s: page range [%p,%p) spans multiple segments", 1208 __func__, m_end - npages, m)); 1209 vm_freelist_add(fl, m, order, 1); 1210 m += 1 << order; 1211 } 1212 /* Free blocks of diminishing size. */ 1213 vm_phys_enq_beg(m, m_end - m, fl, 1); 1214 } 1215 1216 /* 1217 * Free a contiguous, arbitrarily sized set of physical pages. 1218 * 1219 * The free page queues must be locked. 1220 */ 1221 void 1222 vm_phys_free_contig(vm_page_t m, u_long npages) 1223 { 1224 vm_paddr_t lo; 1225 vm_page_t m_start, m_end; 1226 unsigned max_order, order_start, order_end; 1227 1228 vm_domain_free_assert_locked(vm_pagequeue_domain(m)); 1229 1230 lo = atop(VM_PAGE_TO_PHYS(m)); 1231 max_order = min(flsll(lo ^ (lo + npages)) - 1, VM_NFREEORDER - 1); 1232 1233 m_start = m; 1234 order_start = ffsll(lo) - 1; 1235 if (order_start < max_order) 1236 m_start += 1 << order_start; 1237 m_end = m + npages; 1238 order_end = ffsll(lo + npages) - 1; 1239 if (order_end < max_order) 1240 m_end -= 1 << order_end; 1241 /* 1242 * Avoid unnecessary coalescing by freeing the pages at the start and 1243 * end of the range last. 1244 */ 1245 if (m_start < m_end) 1246 vm_phys_enqueue_contig(m_start, m_end - m_start); 1247 if (order_start < max_order) 1248 vm_phys_free_pages(m, order_start); 1249 if (order_end < max_order) 1250 vm_phys_free_pages(m_end, order_end); 1251 } 1252 1253 /* 1254 * Identify the first address range within segment segind or greater 1255 * that matches the domain, lies within the low/high range, and has 1256 * enough pages. Return -1 if there is none. 1257 */ 1258 int 1259 vm_phys_find_range(vm_page_t bounds[], int segind, int domain, 1260 u_long npages, vm_paddr_t low, vm_paddr_t high) 1261 { 1262 vm_paddr_t pa_end, pa_start; 1263 struct vm_phys_seg *end_seg, *seg; 1264 1265 KASSERT(npages > 0, ("npages is zero")); 1266 KASSERT(domain >= 0 && domain < vm_ndomains, ("domain out of range")); 1267 end_seg = &vm_phys_segs[vm_phys_nsegs]; 1268 for (seg = &vm_phys_segs[segind]; seg < end_seg; seg++) { 1269 if (seg->domain != domain) 1270 continue; 1271 if (seg->start >= high) 1272 return (-1); 1273 pa_start = MAX(low, seg->start); 1274 pa_end = MIN(high, seg->end); 1275 if (pa_end - pa_start < ptoa(npages)) 1276 continue; 1277 bounds[0] = &seg->first_page[atop(pa_start - seg->start)]; 1278 bounds[1] = &seg->first_page[atop(pa_end - seg->start)]; 1279 return (seg - vm_phys_segs); 1280 } 1281 return (-1); 1282 } 1283 1284 /* 1285 * Search for the given physical page "m" in the free lists. If the search 1286 * succeeds, remove "m" from the free lists and return true. Otherwise, return 1287 * false, indicating that "m" is not in the free lists. 1288 * 1289 * The free page queues must be locked. 1290 */ 1291 bool 1292 vm_phys_unfree_page(vm_page_t m) 1293 { 1294 struct vm_freelist *fl; 1295 struct vm_phys_seg *seg; 1296 vm_paddr_t pa, pa_half; 1297 vm_page_t m_set, m_tmp; 1298 int order; 1299 1300 /* 1301 * First, find the contiguous, power of two-sized set of free 1302 * physical pages containing the given physical page "m" and 1303 * assign it to "m_set". 1304 */ 1305 seg = &vm_phys_segs[m->segind]; 1306 vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); 1307 for (m_set = m, order = 0; m_set->order == VM_NFREEORDER && 1308 order < VM_NFREEORDER - 1; ) { 1309 order++; 1310 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order)); 1311 if (pa >= seg->start) 1312 m_set = &seg->first_page[atop(pa - seg->start)]; 1313 else 1314 return (false); 1315 } 1316 if (m_set->order < order) 1317 return (false); 1318 if (m_set->order == VM_NFREEORDER) 1319 return (false); 1320 KASSERT(m_set->order < VM_NFREEORDER, 1321 ("vm_phys_unfree_page: page %p has unexpected order %d", 1322 m_set, m_set->order)); 1323 1324 /* 1325 * Next, remove "m_set" from the free lists. Finally, extract 1326 * "m" from "m_set" using an iterative algorithm: While "m_set" 1327 * is larger than a page, shrink "m_set" by returning the half 1328 * of "m_set" that does not contain "m" to the free lists. 1329 */ 1330 fl = (*seg->free_queues)[m_set->pool]; 1331 order = m_set->order; 1332 vm_freelist_rem(fl, m_set, order); 1333 while (order > 0) { 1334 order--; 1335 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order)); 1336 if (m->phys_addr < pa_half) 1337 m_tmp = &seg->first_page[atop(pa_half - seg->start)]; 1338 else { 1339 m_tmp = m_set; 1340 m_set = &seg->first_page[atop(pa_half - seg->start)]; 1341 } 1342 vm_freelist_add(fl, m_tmp, order, 0); 1343 } 1344 KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency")); 1345 return (true); 1346 } 1347 1348 /* 1349 * Find a run of contiguous physical pages, meeting alignment requirements, from 1350 * a list of max-sized page blocks, where we need at least two consecutive 1351 * blocks to satisfy the (large) page request. 1352 */ 1353 static vm_page_t 1354 vm_phys_find_freelist_contig(struct vm_freelist *fl, u_long npages, 1355 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 1356 { 1357 struct vm_phys_seg *seg; 1358 vm_page_t m, m_iter, m_ret; 1359 vm_paddr_t max_size, size; 1360 int max_order; 1361 1362 max_order = VM_NFREEORDER - 1; 1363 size = npages << PAGE_SHIFT; 1364 max_size = (vm_paddr_t)1 << (PAGE_SHIFT + max_order); 1365 KASSERT(size > max_size, ("size is too small")); 1366 1367 /* 1368 * In order to avoid examining any free max-sized page block more than 1369 * twice, identify the ones that are first in a physically-contiguous 1370 * sequence of such blocks, and only for those walk the sequence to 1371 * check if there are enough free blocks starting at a properly aligned 1372 * block. Thus, no block is checked for free-ness more than twice. 1373 */ 1374 TAILQ_FOREACH(m, &fl[max_order].pl, listq) { 1375 /* 1376 * Skip m unless it is first in a sequence of free max page 1377 * blocks >= low in its segment. 1378 */ 1379 seg = &vm_phys_segs[m->segind]; 1380 if (VM_PAGE_TO_PHYS(m) < MAX(low, seg->start)) 1381 continue; 1382 if (VM_PAGE_TO_PHYS(m) >= max_size && 1383 VM_PAGE_TO_PHYS(m) - max_size >= MAX(low, seg->start) && 1384 max_order == m[-1 << max_order].order) 1385 continue; 1386 1387 /* 1388 * Advance m_ret from m to the first of the sequence, if any, 1389 * that satisfies alignment conditions and might leave enough 1390 * space. 1391 */ 1392 m_ret = m; 1393 while (!vm_addr_ok(VM_PAGE_TO_PHYS(m_ret), 1394 size, alignment, boundary) && 1395 VM_PAGE_TO_PHYS(m_ret) + size <= MIN(high, seg->end) && 1396 max_order == m_ret[1 << max_order].order) 1397 m_ret += 1 << max_order; 1398 1399 /* 1400 * Skip m unless some block m_ret in the sequence is properly 1401 * aligned, and begins a sequence of enough pages less than 1402 * high, and in the same segment. 1403 */ 1404 if (VM_PAGE_TO_PHYS(m_ret) + size > MIN(high, seg->end)) 1405 continue; 1406 1407 /* 1408 * Skip m unless the blocks to allocate starting at m_ret are 1409 * all free. 1410 */ 1411 for (m_iter = m_ret; 1412 m_iter < m_ret + npages && max_order == m_iter->order; 1413 m_iter += 1 << max_order) { 1414 } 1415 if (m_iter < m_ret + npages) 1416 continue; 1417 return (m_ret); 1418 } 1419 return (NULL); 1420 } 1421 1422 /* 1423 * Find a run of contiguous physical pages from the specified free list 1424 * table. 1425 */ 1426 static vm_page_t 1427 vm_phys_find_queues_contig( 1428 struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX], 1429 u_long npages, vm_paddr_t low, vm_paddr_t high, 1430 u_long alignment, vm_paddr_t boundary) 1431 { 1432 struct vm_freelist *fl; 1433 vm_page_t m_ret; 1434 vm_paddr_t pa, pa_end, size; 1435 int oind, order, pind; 1436 1437 KASSERT(npages > 0, ("npages is 0")); 1438 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1439 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1440 /* Compute the queue that is the best fit for npages. */ 1441 order = flsl(npages - 1); 1442 /* Search for a large enough free block. */ 1443 size = npages << PAGE_SHIFT; 1444 for (oind = order; oind < VM_NFREEORDER; oind++) { 1445 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 1446 fl = (*queues)[pind]; 1447 TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) { 1448 /* 1449 * Determine if the address range starting at pa 1450 * is within the given range, satisfies the 1451 * given alignment, and does not cross the given 1452 * boundary. 1453 */ 1454 pa = VM_PAGE_TO_PHYS(m_ret); 1455 pa_end = pa + size; 1456 if (low <= pa && pa_end <= high && 1457 vm_addr_ok(pa, size, alignment, boundary)) 1458 return (m_ret); 1459 } 1460 } 1461 } 1462 if (order < VM_NFREEORDER) 1463 return (NULL); 1464 /* Search for a long-enough sequence of max-order blocks. */ 1465 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 1466 fl = (*queues)[pind]; 1467 m_ret = vm_phys_find_freelist_contig(fl, npages, 1468 low, high, alignment, boundary); 1469 if (m_ret != NULL) 1470 return (m_ret); 1471 } 1472 return (NULL); 1473 } 1474 1475 /* 1476 * Allocate a contiguous set of physical pages of the given size 1477 * "npages" from the free lists. All of the physical pages must be at 1478 * or above the given physical address "low" and below the given 1479 * physical address "high". The given value "alignment" determines the 1480 * alignment of the first physical page in the set. If the given value 1481 * "boundary" is non-zero, then the set of physical pages cannot cross 1482 * any physical address boundary that is a multiple of that value. Both 1483 * "alignment" and "boundary" must be a power of two. 1484 */ 1485 vm_page_t 1486 vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, 1487 u_long alignment, vm_paddr_t boundary) 1488 { 1489 vm_paddr_t pa_end, pa_start; 1490 struct vm_freelist *fl; 1491 vm_page_t m, m_run; 1492 struct vm_phys_seg *seg; 1493 struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX]; 1494 int oind, segind; 1495 1496 KASSERT(npages > 0, ("npages is 0")); 1497 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1498 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1499 vm_domain_free_assert_locked(VM_DOMAIN(domain)); 1500 if (low >= high) 1501 return (NULL); 1502 queues = NULL; 1503 m_run = NULL; 1504 for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { 1505 seg = &vm_phys_segs[segind]; 1506 if (seg->start >= high || seg->domain != domain) 1507 continue; 1508 if (low >= seg->end) 1509 break; 1510 if (low <= seg->start) 1511 pa_start = seg->start; 1512 else 1513 pa_start = low; 1514 if (high < seg->end) 1515 pa_end = high; 1516 else 1517 pa_end = seg->end; 1518 if (pa_end - pa_start < ptoa(npages)) 1519 continue; 1520 /* 1521 * If a previous segment led to a search using 1522 * the same free lists as would this segment, then 1523 * we've actually already searched within this 1524 * too. So skip it. 1525 */ 1526 if (seg->free_queues == queues) 1527 continue; 1528 queues = seg->free_queues; 1529 m_run = vm_phys_find_queues_contig(queues, npages, 1530 low, high, alignment, boundary); 1531 if (m_run != NULL) 1532 break; 1533 } 1534 if (m_run == NULL) 1535 return (NULL); 1536 1537 /* Allocate pages from the page-range found. */ 1538 for (m = m_run; m < &m_run[npages]; m = &m[1 << oind]) { 1539 fl = (*queues)[m->pool]; 1540 oind = m->order; 1541 vm_freelist_rem(fl, m, oind); 1542 if (m->pool != VM_FREEPOOL_DEFAULT) 1543 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind); 1544 } 1545 /* Return excess pages to the free lists. */ 1546 fl = (*queues)[VM_FREEPOOL_DEFAULT]; 1547 vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0); 1548 1549 /* Return page verified to satisfy conditions of request. */ 1550 pa_start = VM_PAGE_TO_PHYS(m_run); 1551 KASSERT(low <= pa_start, 1552 ("memory allocated below minimum requested range")); 1553 KASSERT(pa_start + ptoa(npages) <= high, 1554 ("memory allocated above maximum requested range")); 1555 seg = &vm_phys_segs[m_run->segind]; 1556 KASSERT(seg->domain == domain, 1557 ("memory not allocated from specified domain")); 1558 KASSERT(vm_addr_ok(pa_start, ptoa(npages), alignment, boundary), 1559 ("memory alignment/boundary constraints not satisfied")); 1560 return (m_run); 1561 } 1562 1563 /* 1564 * Return the index of the first unused slot which may be the terminating 1565 * entry. 1566 */ 1567 static int 1568 vm_phys_avail_count(void) 1569 { 1570 int i; 1571 1572 for (i = 0; phys_avail[i + 1]; i += 2) 1573 continue; 1574 if (i > PHYS_AVAIL_ENTRIES) 1575 panic("Improperly terminated phys_avail %d entries", i); 1576 1577 return (i); 1578 } 1579 1580 /* 1581 * Assert that a phys_avail entry is valid. 1582 */ 1583 static void 1584 vm_phys_avail_check(int i) 1585 { 1586 if (phys_avail[i] & PAGE_MASK) 1587 panic("Unaligned phys_avail[%d]: %#jx", i, 1588 (intmax_t)phys_avail[i]); 1589 if (phys_avail[i+1] & PAGE_MASK) 1590 panic("Unaligned phys_avail[%d + 1]: %#jx", i, 1591 (intmax_t)phys_avail[i]); 1592 if (phys_avail[i + 1] < phys_avail[i]) 1593 panic("phys_avail[%d] start %#jx < end %#jx", i, 1594 (intmax_t)phys_avail[i], (intmax_t)phys_avail[i+1]); 1595 } 1596 1597 /* 1598 * Return the index of an overlapping phys_avail entry or -1. 1599 */ 1600 #ifdef NUMA 1601 static int 1602 vm_phys_avail_find(vm_paddr_t pa) 1603 { 1604 int i; 1605 1606 for (i = 0; phys_avail[i + 1]; i += 2) 1607 if (phys_avail[i] <= pa && phys_avail[i + 1] > pa) 1608 return (i); 1609 return (-1); 1610 } 1611 #endif 1612 1613 /* 1614 * Return the index of the largest entry. 1615 */ 1616 int 1617 vm_phys_avail_largest(void) 1618 { 1619 vm_paddr_t sz, largesz; 1620 int largest; 1621 int i; 1622 1623 largest = 0; 1624 largesz = 0; 1625 for (i = 0; phys_avail[i + 1]; i += 2) { 1626 sz = vm_phys_avail_size(i); 1627 if (sz > largesz) { 1628 largesz = sz; 1629 largest = i; 1630 } 1631 } 1632 1633 return (largest); 1634 } 1635 1636 vm_paddr_t 1637 vm_phys_avail_size(int i) 1638 { 1639 1640 return (phys_avail[i + 1] - phys_avail[i]); 1641 } 1642 1643 /* 1644 * Split an entry at the address 'pa'. Return zero on success or errno. 1645 */ 1646 static int 1647 vm_phys_avail_split(vm_paddr_t pa, int i) 1648 { 1649 int cnt; 1650 1651 vm_phys_avail_check(i); 1652 if (pa <= phys_avail[i] || pa >= phys_avail[i + 1]) 1653 panic("vm_phys_avail_split: invalid address"); 1654 cnt = vm_phys_avail_count(); 1655 if (cnt >= PHYS_AVAIL_ENTRIES) 1656 return (ENOSPC); 1657 memmove(&phys_avail[i + 2], &phys_avail[i], 1658 (cnt - i) * sizeof(phys_avail[0])); 1659 phys_avail[i + 1] = pa; 1660 phys_avail[i + 2] = pa; 1661 vm_phys_avail_check(i); 1662 vm_phys_avail_check(i+2); 1663 1664 return (0); 1665 } 1666 1667 /* 1668 * Check if a given physical address can be included as part of a crash dump. 1669 */ 1670 bool 1671 vm_phys_is_dumpable(vm_paddr_t pa) 1672 { 1673 vm_page_t m; 1674 int i; 1675 1676 if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL) 1677 return ((m->flags & PG_NODUMP) == 0); 1678 1679 for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) { 1680 if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) 1681 return (true); 1682 } 1683 return (false); 1684 } 1685 1686 void 1687 vm_phys_early_add_seg(vm_paddr_t start, vm_paddr_t end) 1688 { 1689 struct vm_phys_seg *seg; 1690 1691 if (vm_phys_early_nsegs == -1) 1692 panic("%s: called after initialization", __func__); 1693 if (vm_phys_early_nsegs == nitems(vm_phys_early_segs)) 1694 panic("%s: ran out of early segments", __func__); 1695 1696 seg = &vm_phys_early_segs[vm_phys_early_nsegs++]; 1697 seg->start = start; 1698 seg->end = end; 1699 } 1700 1701 /* 1702 * This routine allocates NUMA node specific memory before the page 1703 * allocator is bootstrapped. 1704 */ 1705 vm_paddr_t 1706 vm_phys_early_alloc(int domain, size_t alloc_size) 1707 { 1708 #ifdef NUMA 1709 int mem_index; 1710 #endif 1711 int i, biggestone; 1712 vm_paddr_t pa, mem_start, mem_end, size, biggestsize, align; 1713 1714 KASSERT(domain == -1 || (domain >= 0 && domain < vm_ndomains), 1715 ("%s: invalid domain index %d", __func__, domain)); 1716 1717 /* 1718 * Search the mem_affinity array for the biggest address 1719 * range in the desired domain. This is used to constrain 1720 * the phys_avail selection below. 1721 */ 1722 biggestsize = 0; 1723 mem_start = 0; 1724 mem_end = -1; 1725 #ifdef NUMA 1726 mem_index = 0; 1727 if (mem_affinity != NULL) { 1728 for (i = 0;; i++) { 1729 size = mem_affinity[i].end - mem_affinity[i].start; 1730 if (size == 0) 1731 break; 1732 if (domain != -1 && mem_affinity[i].domain != domain) 1733 continue; 1734 if (size > biggestsize) { 1735 mem_index = i; 1736 biggestsize = size; 1737 } 1738 } 1739 mem_start = mem_affinity[mem_index].start; 1740 mem_end = mem_affinity[mem_index].end; 1741 } 1742 #endif 1743 1744 /* 1745 * Now find biggest physical segment in within the desired 1746 * numa domain. 1747 */ 1748 biggestsize = 0; 1749 biggestone = 0; 1750 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1751 /* skip regions that are out of range */ 1752 if (phys_avail[i+1] - alloc_size < mem_start || 1753 phys_avail[i+1] > mem_end) 1754 continue; 1755 size = vm_phys_avail_size(i); 1756 if (size > biggestsize) { 1757 biggestone = i; 1758 biggestsize = size; 1759 } 1760 } 1761 alloc_size = round_page(alloc_size); 1762 1763 /* 1764 * Grab single pages from the front to reduce fragmentation. 1765 */ 1766 if (alloc_size == PAGE_SIZE) { 1767 pa = phys_avail[biggestone]; 1768 phys_avail[biggestone] += PAGE_SIZE; 1769 vm_phys_avail_check(biggestone); 1770 return (pa); 1771 } 1772 1773 /* 1774 * Naturally align large allocations. 1775 */ 1776 align = phys_avail[biggestone + 1] & (alloc_size - 1); 1777 if (alloc_size + align > biggestsize) 1778 panic("cannot find a large enough size\n"); 1779 if (align != 0 && 1780 vm_phys_avail_split(phys_avail[biggestone + 1] - align, 1781 biggestone) != 0) 1782 /* Wasting memory. */ 1783 phys_avail[biggestone + 1] -= align; 1784 1785 phys_avail[biggestone + 1] -= alloc_size; 1786 vm_phys_avail_check(biggestone); 1787 pa = phys_avail[biggestone + 1]; 1788 return (pa); 1789 } 1790 1791 void 1792 vm_phys_early_startup(void) 1793 { 1794 struct vm_phys_seg *seg; 1795 int i; 1796 1797 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1798 phys_avail[i] = round_page(phys_avail[i]); 1799 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 1800 } 1801 1802 for (i = 0; i < vm_phys_early_nsegs; i++) { 1803 seg = &vm_phys_early_segs[i]; 1804 vm_phys_add_seg(seg->start, seg->end); 1805 } 1806 vm_phys_early_nsegs = -1; 1807 1808 #ifdef NUMA 1809 /* Force phys_avail to be split by domain. */ 1810 if (mem_affinity != NULL) { 1811 int idx; 1812 1813 for (i = 0; mem_affinity[i].end != 0; i++) { 1814 idx = vm_phys_avail_find(mem_affinity[i].start); 1815 if (idx != -1 && 1816 phys_avail[idx] != mem_affinity[i].start) 1817 vm_phys_avail_split(mem_affinity[i].start, idx); 1818 idx = vm_phys_avail_find(mem_affinity[i].end); 1819 if (idx != -1 && 1820 phys_avail[idx] != mem_affinity[i].end) 1821 vm_phys_avail_split(mem_affinity[i].end, idx); 1822 } 1823 } 1824 #endif 1825 } 1826 1827 #ifdef DDB 1828 /* 1829 * Show the number of physical pages in each of the free lists. 1830 */ 1831 DB_SHOW_COMMAND_FLAGS(freepages, db_show_freepages, DB_CMD_MEMSAFE) 1832 { 1833 struct vm_freelist *fl; 1834 int flind, oind, pind, dom; 1835 1836 for (dom = 0; dom < vm_ndomains; dom++) { 1837 db_printf("DOMAIN: %d\n", dom); 1838 for (flind = 0; flind < vm_nfreelists; flind++) { 1839 db_printf("FREE LIST %d:\n" 1840 "\n ORDER (SIZE) | NUMBER" 1841 "\n ", flind); 1842 for (pind = 0; pind < VM_NFREEPOOL; pind++) 1843 db_printf(" | POOL %d", pind); 1844 db_printf("\n-- "); 1845 for (pind = 0; pind < VM_NFREEPOOL; pind++) 1846 db_printf("-- -- "); 1847 db_printf("--\n"); 1848 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 1849 db_printf(" %2.2d (%6.6dK)", oind, 1850 1 << (PAGE_SHIFT - 10 + oind)); 1851 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 1852 fl = vm_phys_free_queues[dom][flind][pind]; 1853 db_printf(" | %6.6d", fl[oind].lcnt); 1854 } 1855 db_printf("\n"); 1856 } 1857 db_printf("\n"); 1858 } 1859 db_printf("\n"); 1860 } 1861 } 1862 #endif 1863