1 /*- 2 * Copyright (c) 2002-2006 Rice University 3 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu> 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Alan L. Cox, 7 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 28 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Physical memory system implementation 34 * 35 * Any external functions defined by this module are only to be used by the 36 * virtual memory system. 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include "opt_ddb.h" 43 #include "opt_vm.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/lock.h> 48 #include <sys/kernel.h> 49 #include <sys/malloc.h> 50 #include <sys/mutex.h> 51 #include <sys/proc.h> 52 #include <sys/queue.h> 53 #include <sys/rwlock.h> 54 #include <sys/sbuf.h> 55 #include <sys/sysctl.h> 56 #include <sys/tree.h> 57 #include <sys/vmmeter.h> 58 #include <sys/seq.h> 59 60 #include <ddb/ddb.h> 61 62 #include <vm/vm.h> 63 #include <vm/vm_param.h> 64 #include <vm/vm_kern.h> 65 #include <vm/vm_object.h> 66 #include <vm/vm_page.h> 67 #include <vm/vm_phys.h> 68 69 #include <vm/vm_domain.h> 70 71 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX, 72 "Too many physsegs."); 73 74 #ifdef VM_NUMA_ALLOC 75 struct mem_affinity *mem_affinity; 76 int *mem_locality; 77 #endif 78 79 int vm_ndomains = 1; 80 81 struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX]; 82 int vm_phys_nsegs; 83 84 struct vm_phys_fictitious_seg; 85 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *, 86 struct vm_phys_fictitious_seg *); 87 88 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree = 89 RB_INITIALIZER(_vm_phys_fictitious_tree); 90 91 struct vm_phys_fictitious_seg { 92 RB_ENTRY(vm_phys_fictitious_seg) node; 93 /* Memory region data */ 94 vm_paddr_t start; 95 vm_paddr_t end; 96 vm_page_t first_page; 97 }; 98 99 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node, 100 vm_phys_fictitious_cmp); 101 102 static struct rwlock vm_phys_fictitious_reg_lock; 103 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages"); 104 105 static struct vm_freelist 106 vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER]; 107 108 static int vm_nfreelists; 109 110 /* 111 * Provides the mapping from VM_FREELIST_* to free list indices (flind). 112 */ 113 static int vm_freelist_to_flind[VM_NFREELIST]; 114 115 CTASSERT(VM_FREELIST_DEFAULT == 0); 116 117 #ifdef VM_FREELIST_ISADMA 118 #define VM_ISADMA_BOUNDARY 16777216 119 #endif 120 #ifdef VM_FREELIST_DMA32 121 #define VM_DMA32_BOUNDARY ((vm_paddr_t)1 << 32) 122 #endif 123 124 /* 125 * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about 126 * the ordering of the free list boundaries. 127 */ 128 #if defined(VM_ISADMA_BOUNDARY) && defined(VM_LOWMEM_BOUNDARY) 129 CTASSERT(VM_ISADMA_BOUNDARY < VM_LOWMEM_BOUNDARY); 130 #endif 131 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY) 132 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY); 133 #endif 134 135 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS); 136 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD, 137 NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info"); 138 139 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS); 140 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD, 141 NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info"); 142 143 #ifdef VM_NUMA_ALLOC 144 static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS); 145 SYSCTL_OID(_vm, OID_AUTO, phys_locality, CTLTYPE_STRING | CTLFLAG_RD, 146 NULL, 0, sysctl_vm_phys_locality, "A", "Phys Locality Info"); 147 #endif 148 149 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD, 150 &vm_ndomains, 0, "Number of physical memory domains available."); 151 152 /* 153 * Default to first-touch + round-robin. 154 */ 155 static struct mtx vm_default_policy_mtx; 156 MTX_SYSINIT(vm_default_policy, &vm_default_policy_mtx, "default policy mutex", 157 MTX_DEF); 158 #ifdef VM_NUMA_ALLOC 159 static struct vm_domain_policy vm_default_policy = 160 VM_DOMAIN_POLICY_STATIC_INITIALISER(VM_POLICY_FIRST_TOUCH_ROUND_ROBIN, 0); 161 #else 162 /* Use round-robin so the domain policy code will only try once per allocation */ 163 static struct vm_domain_policy vm_default_policy = 164 VM_DOMAIN_POLICY_STATIC_INITIALISER(VM_POLICY_ROUND_ROBIN, 0); 165 #endif 166 167 static vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool, 168 int order); 169 static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, 170 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 171 vm_paddr_t boundary); 172 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain); 173 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end); 174 static int vm_phys_paddr_to_segind(vm_paddr_t pa); 175 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, 176 int order); 177 178 static int 179 sysctl_vm_default_policy(SYSCTL_HANDLER_ARGS) 180 { 181 char policy_name[32]; 182 int error; 183 184 mtx_lock(&vm_default_policy_mtx); 185 186 /* Map policy to output string */ 187 switch (vm_default_policy.p.policy) { 188 case VM_POLICY_FIRST_TOUCH: 189 strcpy(policy_name, "first-touch"); 190 break; 191 case VM_POLICY_FIRST_TOUCH_ROUND_ROBIN: 192 strcpy(policy_name, "first-touch-rr"); 193 break; 194 case VM_POLICY_ROUND_ROBIN: 195 default: 196 strcpy(policy_name, "rr"); 197 break; 198 } 199 mtx_unlock(&vm_default_policy_mtx); 200 201 error = sysctl_handle_string(oidp, &policy_name[0], 202 sizeof(policy_name), req); 203 if (error != 0 || req->newptr == NULL) 204 return (error); 205 206 mtx_lock(&vm_default_policy_mtx); 207 /* Set: match on the subset of policies that make sense as a default */ 208 if (strcmp("first-touch-rr", policy_name) == 0) { 209 vm_domain_policy_set(&vm_default_policy, 210 VM_POLICY_FIRST_TOUCH_ROUND_ROBIN, 0); 211 } else if (strcmp("first-touch", policy_name) == 0) { 212 vm_domain_policy_set(&vm_default_policy, 213 VM_POLICY_FIRST_TOUCH, 0); 214 } else if (strcmp("rr", policy_name) == 0) { 215 vm_domain_policy_set(&vm_default_policy, 216 VM_POLICY_ROUND_ROBIN, 0); 217 } else { 218 error = EINVAL; 219 goto finish; 220 } 221 222 error = 0; 223 finish: 224 mtx_unlock(&vm_default_policy_mtx); 225 return (error); 226 } 227 228 SYSCTL_PROC(_vm, OID_AUTO, default_policy, CTLTYPE_STRING | CTLFLAG_RW, 229 0, 0, sysctl_vm_default_policy, "A", 230 "Default policy (rr, first-touch, first-touch-rr"); 231 232 /* 233 * Red-black tree helpers for vm fictitious range management. 234 */ 235 static inline int 236 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p, 237 struct vm_phys_fictitious_seg *range) 238 { 239 240 KASSERT(range->start != 0 && range->end != 0, 241 ("Invalid range passed on search for vm_fictitious page")); 242 if (p->start >= range->end) 243 return (1); 244 if (p->start < range->start) 245 return (-1); 246 247 return (0); 248 } 249 250 static int 251 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1, 252 struct vm_phys_fictitious_seg *p2) 253 { 254 255 /* Check if this is a search for a page */ 256 if (p1->end == 0) 257 return (vm_phys_fictitious_in_range(p1, p2)); 258 259 KASSERT(p2->end != 0, 260 ("Invalid range passed as second parameter to vm fictitious comparison")); 261 262 /* Searching to add a new range */ 263 if (p1->end <= p2->start) 264 return (-1); 265 if (p1->start >= p2->end) 266 return (1); 267 268 panic("Trying to add overlapping vm fictitious ranges:\n" 269 "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start, 270 (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end); 271 } 272 273 #ifdef notyet 274 static __inline int 275 vm_rr_selectdomain(void) 276 { 277 #ifdef VM_NUMA_ALLOC 278 struct thread *td; 279 280 td = curthread; 281 282 td->td_dom_rr_idx++; 283 td->td_dom_rr_idx %= vm_ndomains; 284 return (td->td_dom_rr_idx); 285 #else 286 return (0); 287 #endif 288 } 289 #endif /* notyet */ 290 291 /* 292 * Initialise a VM domain iterator. 293 * 294 * Check the thread policy, then the proc policy, 295 * then default to the system policy. 296 * 297 * Later on the various layers will have this logic 298 * plumbed into them and the phys code will be explicitly 299 * handed a VM domain policy to use. 300 */ 301 static void 302 vm_policy_iterator_init(struct vm_domain_iterator *vi) 303 { 304 #ifdef VM_NUMA_ALLOC 305 struct vm_domain_policy lcl; 306 #endif 307 308 vm_domain_iterator_init(vi); 309 310 #ifdef VM_NUMA_ALLOC 311 /* Copy out the thread policy */ 312 vm_domain_policy_localcopy(&lcl, &curthread->td_vm_dom_policy); 313 if (lcl.p.policy != VM_POLICY_NONE) { 314 /* Thread policy is present; use it */ 315 vm_domain_iterator_set_policy(vi, &lcl); 316 return; 317 } 318 319 vm_domain_policy_localcopy(&lcl, 320 &curthread->td_proc->p_vm_dom_policy); 321 if (lcl.p.policy != VM_POLICY_NONE) { 322 /* Process policy is present; use it */ 323 vm_domain_iterator_set_policy(vi, &lcl); 324 return; 325 } 326 #endif 327 /* Use system default policy */ 328 vm_domain_iterator_set_policy(vi, &vm_default_policy); 329 } 330 331 static void 332 vm_policy_iterator_finish(struct vm_domain_iterator *vi) 333 { 334 335 vm_domain_iterator_cleanup(vi); 336 } 337 338 boolean_t 339 vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high) 340 { 341 struct vm_phys_seg *s; 342 int idx; 343 344 while ((idx = ffsl(mask)) != 0) { 345 idx--; /* ffsl counts from 1 */ 346 mask &= ~(1UL << idx); 347 s = &vm_phys_segs[idx]; 348 if (low < s->end && high > s->start) 349 return (TRUE); 350 } 351 return (FALSE); 352 } 353 354 /* 355 * Outputs the state of the physical memory allocator, specifically, 356 * the amount of physical memory in each free list. 357 */ 358 static int 359 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS) 360 { 361 struct sbuf sbuf; 362 struct vm_freelist *fl; 363 int dom, error, flind, oind, pind; 364 365 error = sysctl_wire_old_buffer(req, 0); 366 if (error != 0) 367 return (error); 368 sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req); 369 for (dom = 0; dom < vm_ndomains; dom++) { 370 sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom); 371 for (flind = 0; flind < vm_nfreelists; flind++) { 372 sbuf_printf(&sbuf, "\nFREE LIST %d:\n" 373 "\n ORDER (SIZE) | NUMBER" 374 "\n ", flind); 375 for (pind = 0; pind < VM_NFREEPOOL; pind++) 376 sbuf_printf(&sbuf, " | POOL %d", pind); 377 sbuf_printf(&sbuf, "\n-- "); 378 for (pind = 0; pind < VM_NFREEPOOL; pind++) 379 sbuf_printf(&sbuf, "-- -- "); 380 sbuf_printf(&sbuf, "--\n"); 381 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 382 sbuf_printf(&sbuf, " %2d (%6dK)", oind, 383 1 << (PAGE_SHIFT - 10 + oind)); 384 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 385 fl = vm_phys_free_queues[dom][flind][pind]; 386 sbuf_printf(&sbuf, " | %6d", 387 fl[oind].lcnt); 388 } 389 sbuf_printf(&sbuf, "\n"); 390 } 391 } 392 } 393 error = sbuf_finish(&sbuf); 394 sbuf_delete(&sbuf); 395 return (error); 396 } 397 398 /* 399 * Outputs the set of physical memory segments. 400 */ 401 static int 402 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS) 403 { 404 struct sbuf sbuf; 405 struct vm_phys_seg *seg; 406 int error, segind; 407 408 error = sysctl_wire_old_buffer(req, 0); 409 if (error != 0) 410 return (error); 411 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 412 for (segind = 0; segind < vm_phys_nsegs; segind++) { 413 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind); 414 seg = &vm_phys_segs[segind]; 415 sbuf_printf(&sbuf, "start: %#jx\n", 416 (uintmax_t)seg->start); 417 sbuf_printf(&sbuf, "end: %#jx\n", 418 (uintmax_t)seg->end); 419 sbuf_printf(&sbuf, "domain: %d\n", seg->domain); 420 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues); 421 } 422 error = sbuf_finish(&sbuf); 423 sbuf_delete(&sbuf); 424 return (error); 425 } 426 427 /* 428 * Return affinity, or -1 if there's no affinity information. 429 */ 430 int 431 vm_phys_mem_affinity(int f, int t) 432 { 433 434 #ifdef VM_NUMA_ALLOC 435 if (mem_locality == NULL) 436 return (-1); 437 if (f >= vm_ndomains || t >= vm_ndomains) 438 return (-1); 439 return (mem_locality[f * vm_ndomains + t]); 440 #else 441 return (-1); 442 #endif 443 } 444 445 #ifdef VM_NUMA_ALLOC 446 /* 447 * Outputs the VM locality table. 448 */ 449 static int 450 sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS) 451 { 452 struct sbuf sbuf; 453 int error, i, j; 454 455 error = sysctl_wire_old_buffer(req, 0); 456 if (error != 0) 457 return (error); 458 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 459 460 sbuf_printf(&sbuf, "\n"); 461 462 for (i = 0; i < vm_ndomains; i++) { 463 sbuf_printf(&sbuf, "%d: ", i); 464 for (j = 0; j < vm_ndomains; j++) { 465 sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j)); 466 } 467 sbuf_printf(&sbuf, "\n"); 468 } 469 error = sbuf_finish(&sbuf); 470 sbuf_delete(&sbuf); 471 return (error); 472 } 473 #endif 474 475 static void 476 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail) 477 { 478 479 m->order = order; 480 if (tail) 481 TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q); 482 else 483 TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q); 484 fl[order].lcnt++; 485 } 486 487 static void 488 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order) 489 { 490 491 TAILQ_REMOVE(&fl[order].pl, m, plinks.q); 492 fl[order].lcnt--; 493 m->order = VM_NFREEORDER; 494 } 495 496 /* 497 * Create a physical memory segment. 498 */ 499 static void 500 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain) 501 { 502 struct vm_phys_seg *seg; 503 504 KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX, 505 ("vm_phys_create_seg: increase VM_PHYSSEG_MAX")); 506 KASSERT(domain < vm_ndomains, 507 ("vm_phys_create_seg: invalid domain provided")); 508 seg = &vm_phys_segs[vm_phys_nsegs++]; 509 while (seg > vm_phys_segs && (seg - 1)->start >= end) { 510 *seg = *(seg - 1); 511 seg--; 512 } 513 seg->start = start; 514 seg->end = end; 515 seg->domain = domain; 516 } 517 518 static void 519 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end) 520 { 521 #ifdef VM_NUMA_ALLOC 522 int i; 523 524 if (mem_affinity == NULL) { 525 _vm_phys_create_seg(start, end, 0); 526 return; 527 } 528 529 for (i = 0;; i++) { 530 if (mem_affinity[i].end == 0) 531 panic("Reached end of affinity info"); 532 if (mem_affinity[i].end <= start) 533 continue; 534 if (mem_affinity[i].start > start) 535 panic("No affinity info for start %jx", 536 (uintmax_t)start); 537 if (mem_affinity[i].end >= end) { 538 _vm_phys_create_seg(start, end, 539 mem_affinity[i].domain); 540 break; 541 } 542 _vm_phys_create_seg(start, mem_affinity[i].end, 543 mem_affinity[i].domain); 544 start = mem_affinity[i].end; 545 } 546 #else 547 _vm_phys_create_seg(start, end, 0); 548 #endif 549 } 550 551 /* 552 * Add a physical memory segment. 553 */ 554 void 555 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end) 556 { 557 vm_paddr_t paddr; 558 559 KASSERT((start & PAGE_MASK) == 0, 560 ("vm_phys_define_seg: start is not page aligned")); 561 KASSERT((end & PAGE_MASK) == 0, 562 ("vm_phys_define_seg: end is not page aligned")); 563 564 /* 565 * Split the physical memory segment if it spans two or more free 566 * list boundaries. 567 */ 568 paddr = start; 569 #ifdef VM_FREELIST_ISADMA 570 if (paddr < VM_ISADMA_BOUNDARY && end > VM_ISADMA_BOUNDARY) { 571 vm_phys_create_seg(paddr, VM_ISADMA_BOUNDARY); 572 paddr = VM_ISADMA_BOUNDARY; 573 } 574 #endif 575 #ifdef VM_FREELIST_LOWMEM 576 if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) { 577 vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY); 578 paddr = VM_LOWMEM_BOUNDARY; 579 } 580 #endif 581 #ifdef VM_FREELIST_DMA32 582 if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) { 583 vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY); 584 paddr = VM_DMA32_BOUNDARY; 585 } 586 #endif 587 vm_phys_create_seg(paddr, end); 588 } 589 590 /* 591 * Initialize the physical memory allocator. 592 * 593 * Requires that vm_page_array is initialized! 594 */ 595 void 596 vm_phys_init(void) 597 { 598 struct vm_freelist *fl; 599 struct vm_phys_seg *seg; 600 u_long npages; 601 int dom, flind, freelist, oind, pind, segind; 602 603 /* 604 * Compute the number of free lists, and generate the mapping from the 605 * manifest constants VM_FREELIST_* to the free list indices. 606 * 607 * Initially, the entries of vm_freelist_to_flind[] are set to either 608 * 0 or 1 to indicate which free lists should be created. 609 */ 610 npages = 0; 611 for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { 612 seg = &vm_phys_segs[segind]; 613 #ifdef VM_FREELIST_ISADMA 614 if (seg->end <= VM_ISADMA_BOUNDARY) 615 vm_freelist_to_flind[VM_FREELIST_ISADMA] = 1; 616 else 617 #endif 618 #ifdef VM_FREELIST_LOWMEM 619 if (seg->end <= VM_LOWMEM_BOUNDARY) 620 vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1; 621 else 622 #endif 623 #ifdef VM_FREELIST_DMA32 624 if ( 625 #ifdef VM_DMA32_NPAGES_THRESHOLD 626 /* 627 * Create the DMA32 free list only if the amount of 628 * physical memory above physical address 4G exceeds the 629 * given threshold. 630 */ 631 npages > VM_DMA32_NPAGES_THRESHOLD && 632 #endif 633 seg->end <= VM_DMA32_BOUNDARY) 634 vm_freelist_to_flind[VM_FREELIST_DMA32] = 1; 635 else 636 #endif 637 { 638 npages += atop(seg->end - seg->start); 639 vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1; 640 } 641 } 642 /* Change each entry into a running total of the free lists. */ 643 for (freelist = 1; freelist < VM_NFREELIST; freelist++) { 644 vm_freelist_to_flind[freelist] += 645 vm_freelist_to_flind[freelist - 1]; 646 } 647 vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1]; 648 KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists")); 649 /* Change each entry into a free list index. */ 650 for (freelist = 0; freelist < VM_NFREELIST; freelist++) 651 vm_freelist_to_flind[freelist]--; 652 653 /* 654 * Initialize the first_page and free_queues fields of each physical 655 * memory segment. 656 */ 657 #ifdef VM_PHYSSEG_SPARSE 658 npages = 0; 659 #endif 660 for (segind = 0; segind < vm_phys_nsegs; segind++) { 661 seg = &vm_phys_segs[segind]; 662 #ifdef VM_PHYSSEG_SPARSE 663 seg->first_page = &vm_page_array[npages]; 664 npages += atop(seg->end - seg->start); 665 #else 666 seg->first_page = PHYS_TO_VM_PAGE(seg->start); 667 #endif 668 #ifdef VM_FREELIST_ISADMA 669 if (seg->end <= VM_ISADMA_BOUNDARY) { 670 flind = vm_freelist_to_flind[VM_FREELIST_ISADMA]; 671 KASSERT(flind >= 0, 672 ("vm_phys_init: ISADMA flind < 0")); 673 } else 674 #endif 675 #ifdef VM_FREELIST_LOWMEM 676 if (seg->end <= VM_LOWMEM_BOUNDARY) { 677 flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM]; 678 KASSERT(flind >= 0, 679 ("vm_phys_init: LOWMEM flind < 0")); 680 } else 681 #endif 682 #ifdef VM_FREELIST_DMA32 683 if (seg->end <= VM_DMA32_BOUNDARY) { 684 flind = vm_freelist_to_flind[VM_FREELIST_DMA32]; 685 KASSERT(flind >= 0, 686 ("vm_phys_init: DMA32 flind < 0")); 687 } else 688 #endif 689 { 690 flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT]; 691 KASSERT(flind >= 0, 692 ("vm_phys_init: DEFAULT flind < 0")); 693 } 694 seg->free_queues = &vm_phys_free_queues[seg->domain][flind]; 695 } 696 697 /* 698 * Initialize the free queues. 699 */ 700 for (dom = 0; dom < vm_ndomains; dom++) { 701 for (flind = 0; flind < vm_nfreelists; flind++) { 702 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 703 fl = vm_phys_free_queues[dom][flind][pind]; 704 for (oind = 0; oind < VM_NFREEORDER; oind++) 705 TAILQ_INIT(&fl[oind].pl); 706 } 707 } 708 } 709 710 rw_init(&vm_phys_fictitious_reg_lock, "vmfctr"); 711 } 712 713 /* 714 * Split a contiguous, power of two-sized set of physical pages. 715 */ 716 static __inline void 717 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order) 718 { 719 vm_page_t m_buddy; 720 721 while (oind > order) { 722 oind--; 723 m_buddy = &m[1 << oind]; 724 KASSERT(m_buddy->order == VM_NFREEORDER, 725 ("vm_phys_split_pages: page %p has unexpected order %d", 726 m_buddy, m_buddy->order)); 727 vm_freelist_add(fl, m_buddy, oind, 0); 728 } 729 } 730 731 /* 732 * Initialize a physical page in preparation for adding it to the free 733 * lists. 734 */ 735 void 736 vm_phys_init_page(vm_paddr_t pa) 737 { 738 vm_page_t m; 739 740 m = vm_phys_paddr_to_vm_page(pa); 741 m->object = NULL; 742 m->wire_count = 0; 743 m->busy_lock = VPB_UNBUSIED; 744 m->hold_count = 0; 745 m->flags = m->aflags = m->oflags = 0; 746 m->phys_addr = pa; 747 m->queue = PQ_NONE; 748 m->psind = 0; 749 m->segind = vm_phys_paddr_to_segind(pa); 750 m->order = VM_NFREEORDER; 751 m->pool = VM_FREEPOOL_DEFAULT; 752 m->valid = m->dirty = 0; 753 pmap_page_init(m); 754 } 755 756 /* 757 * Allocate a contiguous, power of two-sized set of physical pages 758 * from the free lists. 759 * 760 * The free page queues must be locked. 761 */ 762 vm_page_t 763 vm_phys_alloc_pages(int pool, int order) 764 { 765 vm_page_t m; 766 int domain, flind; 767 struct vm_domain_iterator vi; 768 769 KASSERT(pool < VM_NFREEPOOL, 770 ("vm_phys_alloc_pages: pool %d is out of range", pool)); 771 KASSERT(order < VM_NFREEORDER, 772 ("vm_phys_alloc_pages: order %d is out of range", order)); 773 774 vm_policy_iterator_init(&vi); 775 776 while ((vm_domain_iterator_run(&vi, &domain)) == 0) { 777 for (flind = 0; flind < vm_nfreelists; flind++) { 778 m = vm_phys_alloc_domain_pages(domain, flind, pool, 779 order); 780 if (m != NULL) 781 return (m); 782 } 783 } 784 785 vm_policy_iterator_finish(&vi); 786 return (NULL); 787 } 788 789 /* 790 * Allocate a contiguous, power of two-sized set of physical pages from the 791 * specified free list. The free list must be specified using one of the 792 * manifest constants VM_FREELIST_*. 793 * 794 * The free page queues must be locked. 795 */ 796 vm_page_t 797 vm_phys_alloc_freelist_pages(int freelist, int pool, int order) 798 { 799 vm_page_t m; 800 struct vm_domain_iterator vi; 801 int domain; 802 803 KASSERT(freelist < VM_NFREELIST, 804 ("vm_phys_alloc_freelist_pages: freelist %d is out of range", 805 freelist)); 806 KASSERT(pool < VM_NFREEPOOL, 807 ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool)); 808 KASSERT(order < VM_NFREEORDER, 809 ("vm_phys_alloc_freelist_pages: order %d is out of range", order)); 810 811 vm_policy_iterator_init(&vi); 812 813 while ((vm_domain_iterator_run(&vi, &domain)) == 0) { 814 m = vm_phys_alloc_domain_pages(domain, 815 vm_freelist_to_flind[freelist], pool, order); 816 if (m != NULL) 817 return (m); 818 } 819 820 vm_policy_iterator_finish(&vi); 821 return (NULL); 822 } 823 824 static vm_page_t 825 vm_phys_alloc_domain_pages(int domain, int flind, int pool, int order) 826 { 827 struct vm_freelist *fl; 828 struct vm_freelist *alt; 829 int oind, pind; 830 vm_page_t m; 831 832 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 833 fl = &vm_phys_free_queues[domain][flind][pool][0]; 834 for (oind = order; oind < VM_NFREEORDER; oind++) { 835 m = TAILQ_FIRST(&fl[oind].pl); 836 if (m != NULL) { 837 vm_freelist_rem(fl, m, oind); 838 vm_phys_split_pages(m, oind, fl, order); 839 return (m); 840 } 841 } 842 843 /* 844 * The given pool was empty. Find the largest 845 * contiguous, power-of-two-sized set of pages in any 846 * pool. Transfer these pages to the given pool, and 847 * use them to satisfy the allocation. 848 */ 849 for (oind = VM_NFREEORDER - 1; oind >= order; oind--) { 850 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 851 alt = &vm_phys_free_queues[domain][flind][pind][0]; 852 m = TAILQ_FIRST(&alt[oind].pl); 853 if (m != NULL) { 854 vm_freelist_rem(alt, m, oind); 855 vm_phys_set_pool(pool, m, oind); 856 vm_phys_split_pages(m, oind, fl, order); 857 return (m); 858 } 859 } 860 } 861 return (NULL); 862 } 863 864 /* 865 * Find the vm_page corresponding to the given physical address. 866 */ 867 vm_page_t 868 vm_phys_paddr_to_vm_page(vm_paddr_t pa) 869 { 870 struct vm_phys_seg *seg; 871 int segind; 872 873 for (segind = 0; segind < vm_phys_nsegs; segind++) { 874 seg = &vm_phys_segs[segind]; 875 if (pa >= seg->start && pa < seg->end) 876 return (&seg->first_page[atop(pa - seg->start)]); 877 } 878 return (NULL); 879 } 880 881 vm_page_t 882 vm_phys_fictitious_to_vm_page(vm_paddr_t pa) 883 { 884 struct vm_phys_fictitious_seg tmp, *seg; 885 vm_page_t m; 886 887 m = NULL; 888 tmp.start = pa; 889 tmp.end = 0; 890 891 rw_rlock(&vm_phys_fictitious_reg_lock); 892 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp); 893 rw_runlock(&vm_phys_fictitious_reg_lock); 894 if (seg == NULL) 895 return (NULL); 896 897 m = &seg->first_page[atop(pa - seg->start)]; 898 KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m)); 899 900 return (m); 901 } 902 903 static inline void 904 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start, 905 long page_count, vm_memattr_t memattr) 906 { 907 long i; 908 909 bzero(range, page_count * sizeof(*range)); 910 for (i = 0; i < page_count; i++) { 911 vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr); 912 range[i].oflags &= ~VPO_UNMANAGED; 913 range[i].busy_lock = VPB_UNBUSIED; 914 } 915 } 916 917 int 918 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, 919 vm_memattr_t memattr) 920 { 921 struct vm_phys_fictitious_seg *seg; 922 vm_page_t fp; 923 long page_count; 924 #ifdef VM_PHYSSEG_DENSE 925 long pi, pe; 926 long dpage_count; 927 #endif 928 929 KASSERT(start < end, 930 ("Start of segment isn't less than end (start: %jx end: %jx)", 931 (uintmax_t)start, (uintmax_t)end)); 932 933 page_count = (end - start) / PAGE_SIZE; 934 935 #ifdef VM_PHYSSEG_DENSE 936 pi = atop(start); 937 pe = atop(end); 938 if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 939 fp = &vm_page_array[pi - first_page]; 940 if ((pe - first_page) > vm_page_array_size) { 941 /* 942 * We have a segment that starts inside 943 * of vm_page_array, but ends outside of it. 944 * 945 * Use vm_page_array pages for those that are 946 * inside of the vm_page_array range, and 947 * allocate the remaining ones. 948 */ 949 dpage_count = vm_page_array_size - (pi - first_page); 950 vm_phys_fictitious_init_range(fp, start, dpage_count, 951 memattr); 952 page_count -= dpage_count; 953 start += ptoa(dpage_count); 954 goto alloc; 955 } 956 /* 957 * We can allocate the full range from vm_page_array, 958 * so there's no need to register the range in the tree. 959 */ 960 vm_phys_fictitious_init_range(fp, start, page_count, memattr); 961 return (0); 962 } else if (pe > first_page && (pe - first_page) < vm_page_array_size) { 963 /* 964 * We have a segment that ends inside of vm_page_array, 965 * but starts outside of it. 966 */ 967 fp = &vm_page_array[0]; 968 dpage_count = pe - first_page; 969 vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count, 970 memattr); 971 end -= ptoa(dpage_count); 972 page_count -= dpage_count; 973 goto alloc; 974 } else if (pi < first_page && pe > (first_page + vm_page_array_size)) { 975 /* 976 * Trying to register a fictitious range that expands before 977 * and after vm_page_array. 978 */ 979 return (EINVAL); 980 } else { 981 alloc: 982 #endif 983 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES, 984 M_WAITOK); 985 #ifdef VM_PHYSSEG_DENSE 986 } 987 #endif 988 vm_phys_fictitious_init_range(fp, start, page_count, memattr); 989 990 seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO); 991 seg->start = start; 992 seg->end = end; 993 seg->first_page = fp; 994 995 rw_wlock(&vm_phys_fictitious_reg_lock); 996 RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg); 997 rw_wunlock(&vm_phys_fictitious_reg_lock); 998 999 return (0); 1000 } 1001 1002 void 1003 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end) 1004 { 1005 struct vm_phys_fictitious_seg *seg, tmp; 1006 #ifdef VM_PHYSSEG_DENSE 1007 long pi, pe; 1008 #endif 1009 1010 KASSERT(start < end, 1011 ("Start of segment isn't less than end (start: %jx end: %jx)", 1012 (uintmax_t)start, (uintmax_t)end)); 1013 1014 #ifdef VM_PHYSSEG_DENSE 1015 pi = atop(start); 1016 pe = atop(end); 1017 if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 1018 if ((pe - first_page) <= vm_page_array_size) { 1019 /* 1020 * This segment was allocated using vm_page_array 1021 * only, there's nothing to do since those pages 1022 * were never added to the tree. 1023 */ 1024 return; 1025 } 1026 /* 1027 * We have a segment that starts inside 1028 * of vm_page_array, but ends outside of it. 1029 * 1030 * Calculate how many pages were added to the 1031 * tree and free them. 1032 */ 1033 start = ptoa(first_page + vm_page_array_size); 1034 } else if (pe > first_page && (pe - first_page) < vm_page_array_size) { 1035 /* 1036 * We have a segment that ends inside of vm_page_array, 1037 * but starts outside of it. 1038 */ 1039 end = ptoa(first_page); 1040 } else if (pi < first_page && pe > (first_page + vm_page_array_size)) { 1041 /* Since it's not possible to register such a range, panic. */ 1042 panic( 1043 "Unregistering not registered fictitious range [%#jx:%#jx]", 1044 (uintmax_t)start, (uintmax_t)end); 1045 } 1046 #endif 1047 tmp.start = start; 1048 tmp.end = 0; 1049 1050 rw_wlock(&vm_phys_fictitious_reg_lock); 1051 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp); 1052 if (seg->start != start || seg->end != end) { 1053 rw_wunlock(&vm_phys_fictitious_reg_lock); 1054 panic( 1055 "Unregistering not registered fictitious range [%#jx:%#jx]", 1056 (uintmax_t)start, (uintmax_t)end); 1057 } 1058 RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg); 1059 rw_wunlock(&vm_phys_fictitious_reg_lock); 1060 free(seg->first_page, M_FICT_PAGES); 1061 free(seg, M_FICT_PAGES); 1062 } 1063 1064 /* 1065 * Find the segment containing the given physical address. 1066 */ 1067 static int 1068 vm_phys_paddr_to_segind(vm_paddr_t pa) 1069 { 1070 struct vm_phys_seg *seg; 1071 int segind; 1072 1073 for (segind = 0; segind < vm_phys_nsegs; segind++) { 1074 seg = &vm_phys_segs[segind]; 1075 if (pa >= seg->start && pa < seg->end) 1076 return (segind); 1077 } 1078 panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" , 1079 (uintmax_t)pa); 1080 } 1081 1082 /* 1083 * Free a contiguous, power of two-sized set of physical pages. 1084 * 1085 * The free page queues must be locked. 1086 */ 1087 void 1088 vm_phys_free_pages(vm_page_t m, int order) 1089 { 1090 struct vm_freelist *fl; 1091 struct vm_phys_seg *seg; 1092 vm_paddr_t pa; 1093 vm_page_t m_buddy; 1094 1095 KASSERT(m->order == VM_NFREEORDER, 1096 ("vm_phys_free_pages: page %p has unexpected order %d", 1097 m, m->order)); 1098 KASSERT(m->pool < VM_NFREEPOOL, 1099 ("vm_phys_free_pages: page %p has unexpected pool %d", 1100 m, m->pool)); 1101 KASSERT(order < VM_NFREEORDER, 1102 ("vm_phys_free_pages: order %d is out of range", order)); 1103 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1104 seg = &vm_phys_segs[m->segind]; 1105 if (order < VM_NFREEORDER - 1) { 1106 pa = VM_PAGE_TO_PHYS(m); 1107 do { 1108 pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order)); 1109 if (pa < seg->start || pa >= seg->end) 1110 break; 1111 m_buddy = &seg->first_page[atop(pa - seg->start)]; 1112 if (m_buddy->order != order) 1113 break; 1114 fl = (*seg->free_queues)[m_buddy->pool]; 1115 vm_freelist_rem(fl, m_buddy, order); 1116 if (m_buddy->pool != m->pool) 1117 vm_phys_set_pool(m->pool, m_buddy, order); 1118 order++; 1119 pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1); 1120 m = &seg->first_page[atop(pa - seg->start)]; 1121 } while (order < VM_NFREEORDER - 1); 1122 } 1123 fl = (*seg->free_queues)[m->pool]; 1124 vm_freelist_add(fl, m, order, 1); 1125 } 1126 1127 /* 1128 * Free a contiguous, arbitrarily sized set of physical pages. 1129 * 1130 * The free page queues must be locked. 1131 */ 1132 void 1133 vm_phys_free_contig(vm_page_t m, u_long npages) 1134 { 1135 u_int n; 1136 int order; 1137 1138 /* 1139 * Avoid unnecessary coalescing by freeing the pages in the largest 1140 * possible power-of-two-sized subsets. 1141 */ 1142 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1143 for (;; npages -= n) { 1144 /* 1145 * Unsigned "min" is used here so that "order" is assigned 1146 * "VM_NFREEORDER - 1" when "m"'s physical address is zero 1147 * or the low-order bits of its physical address are zero 1148 * because the size of a physical address exceeds the size of 1149 * a long. 1150 */ 1151 order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1, 1152 VM_NFREEORDER - 1); 1153 n = 1 << order; 1154 if (npages < n) 1155 break; 1156 vm_phys_free_pages(m, order); 1157 m += n; 1158 } 1159 /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */ 1160 for (; npages > 0; npages -= n) { 1161 order = flsl(npages) - 1; 1162 n = 1 << order; 1163 vm_phys_free_pages(m, order); 1164 m += n; 1165 } 1166 } 1167 1168 /* 1169 * Scan physical memory between the specified addresses "low" and "high" for a 1170 * run of contiguous physical pages that satisfy the specified conditions, and 1171 * return the lowest page in the run. The specified "alignment" determines 1172 * the alignment of the lowest physical page in the run. If the specified 1173 * "boundary" is non-zero, then the run of physical pages cannot span a 1174 * physical address that is a multiple of "boundary". 1175 * 1176 * "npages" must be greater than zero. Both "alignment" and "boundary" must 1177 * be a power of two. 1178 */ 1179 vm_page_t 1180 vm_phys_scan_contig(u_long npages, vm_paddr_t low, vm_paddr_t high, 1181 u_long alignment, vm_paddr_t boundary, int options) 1182 { 1183 vm_paddr_t pa_end; 1184 vm_page_t m_end, m_run, m_start; 1185 struct vm_phys_seg *seg; 1186 int segind; 1187 1188 KASSERT(npages > 0, ("npages is 0")); 1189 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1190 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1191 if (low >= high) 1192 return (NULL); 1193 for (segind = 0; segind < vm_phys_nsegs; segind++) { 1194 seg = &vm_phys_segs[segind]; 1195 if (seg->start >= high) 1196 break; 1197 if (low >= seg->end) 1198 continue; 1199 if (low <= seg->start) 1200 m_start = seg->first_page; 1201 else 1202 m_start = &seg->first_page[atop(low - seg->start)]; 1203 if (high < seg->end) 1204 pa_end = high; 1205 else 1206 pa_end = seg->end; 1207 if (pa_end - VM_PAGE_TO_PHYS(m_start) < ptoa(npages)) 1208 continue; 1209 m_end = &seg->first_page[atop(pa_end - seg->start)]; 1210 m_run = vm_page_scan_contig(npages, m_start, m_end, 1211 alignment, boundary, options); 1212 if (m_run != NULL) 1213 return (m_run); 1214 } 1215 return (NULL); 1216 } 1217 1218 /* 1219 * Set the pool for a contiguous, power of two-sized set of physical pages. 1220 */ 1221 void 1222 vm_phys_set_pool(int pool, vm_page_t m, int order) 1223 { 1224 vm_page_t m_tmp; 1225 1226 for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) 1227 m_tmp->pool = pool; 1228 } 1229 1230 /* 1231 * Search for the given physical page "m" in the free lists. If the search 1232 * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return 1233 * FALSE, indicating that "m" is not in the free lists. 1234 * 1235 * The free page queues must be locked. 1236 */ 1237 boolean_t 1238 vm_phys_unfree_page(vm_page_t m) 1239 { 1240 struct vm_freelist *fl; 1241 struct vm_phys_seg *seg; 1242 vm_paddr_t pa, pa_half; 1243 vm_page_t m_set, m_tmp; 1244 int order; 1245 1246 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1247 1248 /* 1249 * First, find the contiguous, power of two-sized set of free 1250 * physical pages containing the given physical page "m" and 1251 * assign it to "m_set". 1252 */ 1253 seg = &vm_phys_segs[m->segind]; 1254 for (m_set = m, order = 0; m_set->order == VM_NFREEORDER && 1255 order < VM_NFREEORDER - 1; ) { 1256 order++; 1257 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order)); 1258 if (pa >= seg->start) 1259 m_set = &seg->first_page[atop(pa - seg->start)]; 1260 else 1261 return (FALSE); 1262 } 1263 if (m_set->order < order) 1264 return (FALSE); 1265 if (m_set->order == VM_NFREEORDER) 1266 return (FALSE); 1267 KASSERT(m_set->order < VM_NFREEORDER, 1268 ("vm_phys_unfree_page: page %p has unexpected order %d", 1269 m_set, m_set->order)); 1270 1271 /* 1272 * Next, remove "m_set" from the free lists. Finally, extract 1273 * "m" from "m_set" using an iterative algorithm: While "m_set" 1274 * is larger than a page, shrink "m_set" by returning the half 1275 * of "m_set" that does not contain "m" to the free lists. 1276 */ 1277 fl = (*seg->free_queues)[m_set->pool]; 1278 order = m_set->order; 1279 vm_freelist_rem(fl, m_set, order); 1280 while (order > 0) { 1281 order--; 1282 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order)); 1283 if (m->phys_addr < pa_half) 1284 m_tmp = &seg->first_page[atop(pa_half - seg->start)]; 1285 else { 1286 m_tmp = m_set; 1287 m_set = &seg->first_page[atop(pa_half - seg->start)]; 1288 } 1289 vm_freelist_add(fl, m_tmp, order, 0); 1290 } 1291 KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency")); 1292 return (TRUE); 1293 } 1294 1295 /* 1296 * Allocate a contiguous set of physical pages of the given size 1297 * "npages" from the free lists. All of the physical pages must be at 1298 * or above the given physical address "low" and below the given 1299 * physical address "high". The given value "alignment" determines the 1300 * alignment of the first physical page in the set. If the given value 1301 * "boundary" is non-zero, then the set of physical pages cannot cross 1302 * any physical address boundary that is a multiple of that value. Both 1303 * "alignment" and "boundary" must be a power of two. 1304 */ 1305 vm_page_t 1306 vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high, 1307 u_long alignment, vm_paddr_t boundary) 1308 { 1309 vm_paddr_t pa_end, pa_start; 1310 vm_page_t m_run; 1311 struct vm_domain_iterator vi; 1312 struct vm_phys_seg *seg; 1313 int domain, segind; 1314 1315 KASSERT(npages > 0, ("npages is 0")); 1316 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1317 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1318 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1319 if (low >= high) 1320 return (NULL); 1321 vm_policy_iterator_init(&vi); 1322 restartdom: 1323 if (vm_domain_iterator_run(&vi, &domain) != 0) { 1324 vm_policy_iterator_finish(&vi); 1325 return (NULL); 1326 } 1327 m_run = NULL; 1328 for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { 1329 seg = &vm_phys_segs[segind]; 1330 if (seg->start >= high || seg->domain != domain) 1331 continue; 1332 if (low >= seg->end) 1333 break; 1334 if (low <= seg->start) 1335 pa_start = seg->start; 1336 else 1337 pa_start = low; 1338 if (high < seg->end) 1339 pa_end = high; 1340 else 1341 pa_end = seg->end; 1342 if (pa_end - pa_start < ptoa(npages)) 1343 continue; 1344 m_run = vm_phys_alloc_seg_contig(seg, npages, low, high, 1345 alignment, boundary); 1346 if (m_run != NULL) 1347 break; 1348 } 1349 if (m_run == NULL && !vm_domain_iterator_isdone(&vi)) 1350 goto restartdom; 1351 vm_policy_iterator_finish(&vi); 1352 return (m_run); 1353 } 1354 1355 /* 1356 * Allocate a run of contiguous physical pages from the free list for the 1357 * specified segment. 1358 */ 1359 static vm_page_t 1360 vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages, 1361 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) 1362 { 1363 struct vm_freelist *fl; 1364 vm_paddr_t pa, pa_end, size; 1365 vm_page_t m, m_ret; 1366 u_long npages_end; 1367 int oind, order, pind; 1368 1369 KASSERT(npages > 0, ("npages is 0")); 1370 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); 1371 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); 1372 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1373 /* Compute the queue that is the best fit for npages. */ 1374 for (order = 0; (1 << order) < npages; order++); 1375 /* Search for a run satisfying the specified conditions. */ 1376 size = npages << PAGE_SHIFT; 1377 for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; 1378 oind++) { 1379 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 1380 fl = (*seg->free_queues)[pind]; 1381 TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) { 1382 /* 1383 * Is the size of this allocation request 1384 * larger than the largest block size? 1385 */ 1386 if (order >= VM_NFREEORDER) { 1387 /* 1388 * Determine if a sufficient number of 1389 * subsequent blocks to satisfy the 1390 * allocation request are free. 1391 */ 1392 pa = VM_PAGE_TO_PHYS(m_ret); 1393 pa_end = pa + size; 1394 for (;;) { 1395 pa += 1 << (PAGE_SHIFT + 1396 VM_NFREEORDER - 1); 1397 if (pa >= pa_end || 1398 pa < seg->start || 1399 pa >= seg->end) 1400 break; 1401 m = &seg->first_page[atop(pa - 1402 seg->start)]; 1403 if (m->order != VM_NFREEORDER - 1404 1) 1405 break; 1406 } 1407 /* If not, go to the next block. */ 1408 if (pa < pa_end) 1409 continue; 1410 } 1411 1412 /* 1413 * Determine if the blocks are within the 1414 * given range, satisfy the given alignment, 1415 * and do not cross the given boundary. 1416 */ 1417 pa = VM_PAGE_TO_PHYS(m_ret); 1418 pa_end = pa + size; 1419 if (pa >= low && pa_end <= high && 1420 (pa & (alignment - 1)) == 0 && 1421 rounddown2(pa ^ (pa_end - 1), boundary) == 0) 1422 goto done; 1423 } 1424 } 1425 } 1426 return (NULL); 1427 done: 1428 for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) { 1429 fl = (*seg->free_queues)[m->pool]; 1430 vm_freelist_rem(fl, m, m->order); 1431 } 1432 if (m_ret->pool != VM_FREEPOOL_DEFAULT) 1433 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind); 1434 fl = (*seg->free_queues)[m_ret->pool]; 1435 vm_phys_split_pages(m_ret, oind, fl, order); 1436 /* Return excess pages to the free lists. */ 1437 npages_end = roundup2(npages, 1 << imin(oind, order)); 1438 if (npages < npages_end) 1439 vm_phys_free_contig(&m_ret[npages], npages_end - npages); 1440 return (m_ret); 1441 } 1442 1443 #ifdef DDB 1444 /* 1445 * Show the number of physical pages in each of the free lists. 1446 */ 1447 DB_SHOW_COMMAND(freepages, db_show_freepages) 1448 { 1449 struct vm_freelist *fl; 1450 int flind, oind, pind, dom; 1451 1452 for (dom = 0; dom < vm_ndomains; dom++) { 1453 db_printf("DOMAIN: %d\n", dom); 1454 for (flind = 0; flind < vm_nfreelists; flind++) { 1455 db_printf("FREE LIST %d:\n" 1456 "\n ORDER (SIZE) | NUMBER" 1457 "\n ", flind); 1458 for (pind = 0; pind < VM_NFREEPOOL; pind++) 1459 db_printf(" | POOL %d", pind); 1460 db_printf("\n-- "); 1461 for (pind = 0; pind < VM_NFREEPOOL; pind++) 1462 db_printf("-- -- "); 1463 db_printf("--\n"); 1464 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 1465 db_printf(" %2.2d (%6.6dK)", oind, 1466 1 << (PAGE_SHIFT - 10 + oind)); 1467 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 1468 fl = vm_phys_free_queues[dom][flind][pind]; 1469 db_printf(" | %6.6d", fl[oind].lcnt); 1470 } 1471 db_printf("\n"); 1472 } 1473 db_printf("\n"); 1474 } 1475 db_printf("\n"); 1476 } 1477 } 1478 #endif 1479