1 /*- 2 * Copyright (c) 2002-2006 Rice University 3 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu> 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Alan L. Cox, 7 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 28 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Physical memory system implementation 34 * 35 * Any external functions defined by this module are only to be used by the 36 * virtual memory system. 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include "opt_ddb.h" 43 #include "opt_vm.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/lock.h> 48 #include <sys/kernel.h> 49 #include <sys/malloc.h> 50 #include <sys/mutex.h> 51 #if MAXMEMDOM > 1 52 #include <sys/proc.h> 53 #endif 54 #include <sys/queue.h> 55 #include <sys/sbuf.h> 56 #include <sys/sysctl.h> 57 #include <sys/vmmeter.h> 58 59 #include <ddb/ddb.h> 60 61 #include <vm/vm.h> 62 #include <vm/vm_param.h> 63 #include <vm/vm_kern.h> 64 #include <vm/vm_object.h> 65 #include <vm/vm_page.h> 66 #include <vm/vm_phys.h> 67 68 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX, 69 "Too many physsegs."); 70 71 struct mem_affinity *mem_affinity; 72 73 int vm_ndomains = 1; 74 75 struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX]; 76 int vm_phys_nsegs; 77 78 #define VM_PHYS_FICTITIOUS_NSEGS 8 79 static struct vm_phys_fictitious_seg { 80 vm_paddr_t start; 81 vm_paddr_t end; 82 vm_page_t first_page; 83 } vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS]; 84 static struct mtx vm_phys_fictitious_reg_mtx; 85 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages"); 86 87 static struct vm_freelist 88 vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER]; 89 90 static int vm_nfreelists = VM_FREELIST_DEFAULT + 1; 91 92 static int cnt_prezero; 93 SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD, 94 &cnt_prezero, 0, "The number of physical pages prezeroed at idle time"); 95 96 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS); 97 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD, 98 NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info"); 99 100 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS); 101 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD, 102 NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info"); 103 104 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD, 105 &vm_ndomains, 0, "Number of physical memory domains available."); 106 107 static vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool, 108 int order); 109 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, 110 int domain); 111 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind); 112 static int vm_phys_paddr_to_segind(vm_paddr_t pa); 113 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, 114 int order); 115 116 static __inline int 117 vm_rr_selectdomain(void) 118 { 119 #if MAXMEMDOM > 1 120 struct thread *td; 121 122 td = curthread; 123 124 td->td_dom_rr_idx++; 125 td->td_dom_rr_idx %= vm_ndomains; 126 return (td->td_dom_rr_idx); 127 #else 128 return (0); 129 #endif 130 } 131 132 boolean_t 133 vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high) 134 { 135 struct vm_phys_seg *s; 136 int idx; 137 138 while ((idx = ffsl(mask)) != 0) { 139 idx--; /* ffsl counts from 1 */ 140 mask &= ~(1UL << idx); 141 s = &vm_phys_segs[idx]; 142 if (low < s->end && high > s->start) 143 return (TRUE); 144 } 145 return (FALSE); 146 } 147 148 /* 149 * Outputs the state of the physical memory allocator, specifically, 150 * the amount of physical memory in each free list. 151 */ 152 static int 153 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS) 154 { 155 struct sbuf sbuf; 156 struct vm_freelist *fl; 157 int dom, error, flind, oind, pind; 158 159 error = sysctl_wire_old_buffer(req, 0); 160 if (error != 0) 161 return (error); 162 sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req); 163 for (dom = 0; dom < vm_ndomains; dom++) { 164 sbuf_printf(&sbuf,"DOMAIN: %d\n", dom); 165 for (flind = 0; flind < vm_nfreelists; flind++) { 166 sbuf_printf(&sbuf, "FREE LIST %d:\n" 167 "\n ORDER (SIZE) | NUMBER" 168 "\n ", flind); 169 for (pind = 0; pind < VM_NFREEPOOL; pind++) 170 sbuf_printf(&sbuf, " | POOL %d", pind); 171 sbuf_printf(&sbuf, "\n-- "); 172 for (pind = 0; pind < VM_NFREEPOOL; pind++) 173 sbuf_printf(&sbuf, "-- -- "); 174 sbuf_printf(&sbuf, "--\n"); 175 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 176 sbuf_printf(&sbuf, " %2d (%6dK)", oind, 177 1 << (PAGE_SHIFT - 10 + oind)); 178 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 179 fl = vm_phys_free_queues[dom][flind][pind]; 180 sbuf_printf(&sbuf, " | %6.6d", 181 fl[oind].lcnt); 182 } 183 sbuf_printf(&sbuf, "\n"); 184 } 185 sbuf_printf(&sbuf, "\n"); 186 } 187 sbuf_printf(&sbuf, "\n"); 188 } 189 error = sbuf_finish(&sbuf); 190 sbuf_delete(&sbuf); 191 return (error); 192 } 193 194 /* 195 * Outputs the set of physical memory segments. 196 */ 197 static int 198 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS) 199 { 200 struct sbuf sbuf; 201 struct vm_phys_seg *seg; 202 int error, segind; 203 204 error = sysctl_wire_old_buffer(req, 0); 205 if (error != 0) 206 return (error); 207 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 208 for (segind = 0; segind < vm_phys_nsegs; segind++) { 209 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind); 210 seg = &vm_phys_segs[segind]; 211 sbuf_printf(&sbuf, "start: %#jx\n", 212 (uintmax_t)seg->start); 213 sbuf_printf(&sbuf, "end: %#jx\n", 214 (uintmax_t)seg->end); 215 sbuf_printf(&sbuf, "domain: %d\n", seg->domain); 216 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues); 217 } 218 error = sbuf_finish(&sbuf); 219 sbuf_delete(&sbuf); 220 return (error); 221 } 222 223 static void 224 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail) 225 { 226 227 m->order = order; 228 if (tail) 229 TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q); 230 else 231 TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q); 232 fl[order].lcnt++; 233 } 234 235 static void 236 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order) 237 { 238 239 TAILQ_REMOVE(&fl[order].pl, m, plinks.q); 240 fl[order].lcnt--; 241 m->order = VM_NFREEORDER; 242 } 243 244 /* 245 * Create a physical memory segment. 246 */ 247 static void 248 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, int domain) 249 { 250 struct vm_phys_seg *seg; 251 #ifdef VM_PHYSSEG_SPARSE 252 long pages; 253 int segind; 254 255 pages = 0; 256 for (segind = 0; segind < vm_phys_nsegs; segind++) { 257 seg = &vm_phys_segs[segind]; 258 pages += atop(seg->end - seg->start); 259 } 260 #endif 261 KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX, 262 ("vm_phys_create_seg: increase VM_PHYSSEG_MAX")); 263 KASSERT(domain < vm_ndomains, 264 ("vm_phys_create_seg: invalid domain provided")); 265 seg = &vm_phys_segs[vm_phys_nsegs++]; 266 seg->start = start; 267 seg->end = end; 268 seg->domain = domain; 269 #ifdef VM_PHYSSEG_SPARSE 270 seg->first_page = &vm_page_array[pages]; 271 #else 272 seg->first_page = PHYS_TO_VM_PAGE(start); 273 #endif 274 seg->free_queues = &vm_phys_free_queues[domain][flind]; 275 } 276 277 static void 278 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind) 279 { 280 int i; 281 282 if (mem_affinity == NULL) { 283 _vm_phys_create_seg(start, end, flind, 0); 284 return; 285 } 286 287 for (i = 0;; i++) { 288 if (mem_affinity[i].end == 0) 289 panic("Reached end of affinity info"); 290 if (mem_affinity[i].end <= start) 291 continue; 292 if (mem_affinity[i].start > start) 293 panic("No affinity info for start %jx", 294 (uintmax_t)start); 295 if (mem_affinity[i].end >= end) { 296 _vm_phys_create_seg(start, end, flind, 297 mem_affinity[i].domain); 298 break; 299 } 300 _vm_phys_create_seg(start, mem_affinity[i].end, flind, 301 mem_affinity[i].domain); 302 start = mem_affinity[i].end; 303 } 304 } 305 306 /* 307 * Initialize the physical memory allocator. 308 */ 309 void 310 vm_phys_init(void) 311 { 312 struct vm_freelist *fl; 313 int dom, flind, i, oind, pind; 314 315 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 316 #ifdef VM_FREELIST_ISADMA 317 if (phys_avail[i] < 16777216) { 318 if (phys_avail[i + 1] > 16777216) { 319 vm_phys_create_seg(phys_avail[i], 16777216, 320 VM_FREELIST_ISADMA); 321 vm_phys_create_seg(16777216, phys_avail[i + 1], 322 VM_FREELIST_DEFAULT); 323 } else { 324 vm_phys_create_seg(phys_avail[i], 325 phys_avail[i + 1], VM_FREELIST_ISADMA); 326 } 327 if (VM_FREELIST_ISADMA >= vm_nfreelists) 328 vm_nfreelists = VM_FREELIST_ISADMA + 1; 329 } else 330 #endif 331 #ifdef VM_FREELIST_HIGHMEM 332 if (phys_avail[i + 1] > VM_HIGHMEM_ADDRESS) { 333 if (phys_avail[i] < VM_HIGHMEM_ADDRESS) { 334 vm_phys_create_seg(phys_avail[i], 335 VM_HIGHMEM_ADDRESS, VM_FREELIST_DEFAULT); 336 vm_phys_create_seg(VM_HIGHMEM_ADDRESS, 337 phys_avail[i + 1], VM_FREELIST_HIGHMEM); 338 } else { 339 vm_phys_create_seg(phys_avail[i], 340 phys_avail[i + 1], VM_FREELIST_HIGHMEM); 341 } 342 if (VM_FREELIST_HIGHMEM >= vm_nfreelists) 343 vm_nfreelists = VM_FREELIST_HIGHMEM + 1; 344 } else 345 #endif 346 vm_phys_create_seg(phys_avail[i], phys_avail[i + 1], 347 VM_FREELIST_DEFAULT); 348 } 349 for (dom = 0; dom < vm_ndomains; dom++) { 350 for (flind = 0; flind < vm_nfreelists; flind++) { 351 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 352 fl = vm_phys_free_queues[dom][flind][pind]; 353 for (oind = 0; oind < VM_NFREEORDER; oind++) 354 TAILQ_INIT(&fl[oind].pl); 355 } 356 } 357 } 358 mtx_init(&vm_phys_fictitious_reg_mtx, "vmfctr", NULL, MTX_DEF); 359 } 360 361 /* 362 * Split a contiguous, power of two-sized set of physical pages. 363 */ 364 static __inline void 365 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order) 366 { 367 vm_page_t m_buddy; 368 369 while (oind > order) { 370 oind--; 371 m_buddy = &m[1 << oind]; 372 KASSERT(m_buddy->order == VM_NFREEORDER, 373 ("vm_phys_split_pages: page %p has unexpected order %d", 374 m_buddy, m_buddy->order)); 375 vm_freelist_add(fl, m_buddy, oind, 0); 376 } 377 } 378 379 /* 380 * Initialize a physical page and add it to the free lists. 381 */ 382 void 383 vm_phys_add_page(vm_paddr_t pa) 384 { 385 vm_page_t m; 386 struct vm_domain *vmd; 387 388 cnt.v_page_count++; 389 m = vm_phys_paddr_to_vm_page(pa); 390 m->phys_addr = pa; 391 m->queue = PQ_NONE; 392 m->segind = vm_phys_paddr_to_segind(pa); 393 vmd = vm_phys_domain(m); 394 vmd->vmd_page_count++; 395 vmd->vmd_segs |= 1UL << m->segind; 396 m->flags = PG_FREE; 397 KASSERT(m->order == VM_NFREEORDER, 398 ("vm_phys_add_page: page %p has unexpected order %d", 399 m, m->order)); 400 m->pool = VM_FREEPOOL_DEFAULT; 401 pmap_page_init(m); 402 mtx_lock(&vm_page_queue_free_mtx); 403 vm_phys_freecnt_adj(m, 1); 404 vm_phys_free_pages(m, 0); 405 mtx_unlock(&vm_page_queue_free_mtx); 406 } 407 408 /* 409 * Allocate a contiguous, power of two-sized set of physical pages 410 * from the free lists. 411 * 412 * The free page queues must be locked. 413 */ 414 vm_page_t 415 vm_phys_alloc_pages(int pool, int order) 416 { 417 vm_page_t m; 418 int dom, domain, flind; 419 420 KASSERT(pool < VM_NFREEPOOL, 421 ("vm_phys_alloc_pages: pool %d is out of range", pool)); 422 KASSERT(order < VM_NFREEORDER, 423 ("vm_phys_alloc_pages: order %d is out of range", order)); 424 425 for (dom = 0; dom < vm_ndomains; dom++) { 426 domain = vm_rr_selectdomain(); 427 for (flind = 0; flind < vm_nfreelists; flind++) { 428 m = vm_phys_alloc_domain_pages(domain, flind, pool, 429 order); 430 if (m != NULL) 431 return (m); 432 } 433 } 434 return (NULL); 435 } 436 437 /* 438 * Find and dequeue a free page on the given free list, with the 439 * specified pool and order 440 */ 441 vm_page_t 442 vm_phys_alloc_freelist_pages(int flind, int pool, int order) 443 { 444 vm_page_t m; 445 int dom, domain; 446 447 KASSERT(flind < VM_NFREELIST, 448 ("vm_phys_alloc_freelist_pages: freelist %d is out of range", flind)); 449 KASSERT(pool < VM_NFREEPOOL, 450 ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool)); 451 KASSERT(order < VM_NFREEORDER, 452 ("vm_phys_alloc_freelist_pages: order %d is out of range", order)); 453 454 for (dom = 0; dom < vm_ndomains; dom++) { 455 domain = vm_rr_selectdomain(); 456 m = vm_phys_alloc_domain_pages(domain, flind, pool, order); 457 if (m != NULL) 458 return (m); 459 } 460 return (NULL); 461 } 462 463 static vm_page_t 464 vm_phys_alloc_domain_pages(int domain, int flind, int pool, int order) 465 { 466 struct vm_freelist *fl; 467 struct vm_freelist *alt; 468 int oind, pind; 469 vm_page_t m; 470 471 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 472 fl = &vm_phys_free_queues[domain][flind][pool][0]; 473 for (oind = order; oind < VM_NFREEORDER; oind++) { 474 m = TAILQ_FIRST(&fl[oind].pl); 475 if (m != NULL) { 476 vm_freelist_rem(fl, m, oind); 477 vm_phys_split_pages(m, oind, fl, order); 478 return (m); 479 } 480 } 481 482 /* 483 * The given pool was empty. Find the largest 484 * contiguous, power-of-two-sized set of pages in any 485 * pool. Transfer these pages to the given pool, and 486 * use them to satisfy the allocation. 487 */ 488 for (oind = VM_NFREEORDER - 1; oind >= order; oind--) { 489 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 490 alt = &vm_phys_free_queues[domain][flind][pind][0]; 491 m = TAILQ_FIRST(&alt[oind].pl); 492 if (m != NULL) { 493 vm_freelist_rem(alt, m, oind); 494 vm_phys_set_pool(pool, m, oind); 495 vm_phys_split_pages(m, oind, fl, order); 496 return (m); 497 } 498 } 499 } 500 return (NULL); 501 } 502 503 /* 504 * Find the vm_page corresponding to the given physical address. 505 */ 506 vm_page_t 507 vm_phys_paddr_to_vm_page(vm_paddr_t pa) 508 { 509 struct vm_phys_seg *seg; 510 int segind; 511 512 for (segind = 0; segind < vm_phys_nsegs; segind++) { 513 seg = &vm_phys_segs[segind]; 514 if (pa >= seg->start && pa < seg->end) 515 return (&seg->first_page[atop(pa - seg->start)]); 516 } 517 return (NULL); 518 } 519 520 vm_page_t 521 vm_phys_fictitious_to_vm_page(vm_paddr_t pa) 522 { 523 struct vm_phys_fictitious_seg *seg; 524 vm_page_t m; 525 int segind; 526 527 m = NULL; 528 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 529 seg = &vm_phys_fictitious_segs[segind]; 530 if (pa >= seg->start && pa < seg->end) { 531 m = &seg->first_page[atop(pa - seg->start)]; 532 KASSERT((m->flags & PG_FICTITIOUS) != 0, 533 ("%p not fictitious", m)); 534 break; 535 } 536 } 537 return (m); 538 } 539 540 int 541 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, 542 vm_memattr_t memattr) 543 { 544 struct vm_phys_fictitious_seg *seg; 545 vm_page_t fp; 546 long i, page_count; 547 int segind; 548 #ifdef VM_PHYSSEG_DENSE 549 long pi; 550 boolean_t malloced; 551 #endif 552 553 page_count = (end - start) / PAGE_SIZE; 554 555 #ifdef VM_PHYSSEG_DENSE 556 pi = atop(start); 557 if (pi >= first_page && atop(end) < vm_page_array_size) { 558 fp = &vm_page_array[pi - first_page]; 559 malloced = FALSE; 560 } else 561 #endif 562 { 563 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES, 564 M_WAITOK | M_ZERO); 565 #ifdef VM_PHYSSEG_DENSE 566 malloced = TRUE; 567 #endif 568 } 569 for (i = 0; i < page_count; i++) { 570 vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr); 571 fp[i].oflags &= ~VPO_UNMANAGED; 572 fp[i].busy_lock = VPB_UNBUSIED; 573 } 574 mtx_lock(&vm_phys_fictitious_reg_mtx); 575 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 576 seg = &vm_phys_fictitious_segs[segind]; 577 if (seg->start == 0 && seg->end == 0) { 578 seg->start = start; 579 seg->end = end; 580 seg->first_page = fp; 581 mtx_unlock(&vm_phys_fictitious_reg_mtx); 582 return (0); 583 } 584 } 585 mtx_unlock(&vm_phys_fictitious_reg_mtx); 586 #ifdef VM_PHYSSEG_DENSE 587 if (malloced) 588 #endif 589 free(fp, M_FICT_PAGES); 590 return (EBUSY); 591 } 592 593 void 594 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end) 595 { 596 struct vm_phys_fictitious_seg *seg; 597 vm_page_t fp; 598 int segind; 599 #ifdef VM_PHYSSEG_DENSE 600 long pi; 601 #endif 602 603 #ifdef VM_PHYSSEG_DENSE 604 pi = atop(start); 605 #endif 606 607 mtx_lock(&vm_phys_fictitious_reg_mtx); 608 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 609 seg = &vm_phys_fictitious_segs[segind]; 610 if (seg->start == start && seg->end == end) { 611 seg->start = seg->end = 0; 612 fp = seg->first_page; 613 seg->first_page = NULL; 614 mtx_unlock(&vm_phys_fictitious_reg_mtx); 615 #ifdef VM_PHYSSEG_DENSE 616 if (pi < first_page || atop(end) >= vm_page_array_size) 617 #endif 618 free(fp, M_FICT_PAGES); 619 return; 620 } 621 } 622 mtx_unlock(&vm_phys_fictitious_reg_mtx); 623 KASSERT(0, ("Unregistering not registered fictitious range")); 624 } 625 626 /* 627 * Find the segment containing the given physical address. 628 */ 629 static int 630 vm_phys_paddr_to_segind(vm_paddr_t pa) 631 { 632 struct vm_phys_seg *seg; 633 int segind; 634 635 for (segind = 0; segind < vm_phys_nsegs; segind++) { 636 seg = &vm_phys_segs[segind]; 637 if (pa >= seg->start && pa < seg->end) 638 return (segind); 639 } 640 panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" , 641 (uintmax_t)pa); 642 } 643 644 /* 645 * Free a contiguous, power of two-sized set of physical pages. 646 * 647 * The free page queues must be locked. 648 */ 649 void 650 vm_phys_free_pages(vm_page_t m, int order) 651 { 652 struct vm_freelist *fl; 653 struct vm_phys_seg *seg; 654 vm_paddr_t pa; 655 vm_page_t m_buddy; 656 657 KASSERT(m->order == VM_NFREEORDER, 658 ("vm_phys_free_pages: page %p has unexpected order %d", 659 m, m->order)); 660 KASSERT(m->pool < VM_NFREEPOOL, 661 ("vm_phys_free_pages: page %p has unexpected pool %d", 662 m, m->pool)); 663 KASSERT(order < VM_NFREEORDER, 664 ("vm_phys_free_pages: order %d is out of range", order)); 665 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 666 seg = &vm_phys_segs[m->segind]; 667 if (order < VM_NFREEORDER - 1) { 668 pa = VM_PAGE_TO_PHYS(m); 669 do { 670 pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order)); 671 if (pa < seg->start || pa >= seg->end) 672 break; 673 m_buddy = &seg->first_page[atop(pa - seg->start)]; 674 if (m_buddy->order != order) 675 break; 676 fl = (*seg->free_queues)[m_buddy->pool]; 677 vm_freelist_rem(fl, m_buddy, order); 678 if (m_buddy->pool != m->pool) 679 vm_phys_set_pool(m->pool, m_buddy, order); 680 order++; 681 pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1); 682 m = &seg->first_page[atop(pa - seg->start)]; 683 } while (order < VM_NFREEORDER - 1); 684 } 685 fl = (*seg->free_queues)[m->pool]; 686 vm_freelist_add(fl, m, order, 1); 687 } 688 689 /* 690 * Free a contiguous, arbitrarily sized set of physical pages. 691 * 692 * The free page queues must be locked. 693 */ 694 void 695 vm_phys_free_contig(vm_page_t m, u_long npages) 696 { 697 u_int n; 698 int order; 699 700 /* 701 * Avoid unnecessary coalescing by freeing the pages in the largest 702 * possible power-of-two-sized subsets. 703 */ 704 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 705 for (;; npages -= n) { 706 /* 707 * Unsigned "min" is used here so that "order" is assigned 708 * "VM_NFREEORDER - 1" when "m"'s physical address is zero 709 * or the low-order bits of its physical address are zero 710 * because the size of a physical address exceeds the size of 711 * a long. 712 */ 713 order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1, 714 VM_NFREEORDER - 1); 715 n = 1 << order; 716 if (npages < n) 717 break; 718 vm_phys_free_pages(m, order); 719 m += n; 720 } 721 /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */ 722 for (; npages > 0; npages -= n) { 723 order = flsl(npages) - 1; 724 n = 1 << order; 725 vm_phys_free_pages(m, order); 726 m += n; 727 } 728 } 729 730 /* 731 * Set the pool for a contiguous, power of two-sized set of physical pages. 732 */ 733 void 734 vm_phys_set_pool(int pool, vm_page_t m, int order) 735 { 736 vm_page_t m_tmp; 737 738 for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) 739 m_tmp->pool = pool; 740 } 741 742 /* 743 * Search for the given physical page "m" in the free lists. If the search 744 * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return 745 * FALSE, indicating that "m" is not in the free lists. 746 * 747 * The free page queues must be locked. 748 */ 749 boolean_t 750 vm_phys_unfree_page(vm_page_t m) 751 { 752 struct vm_freelist *fl; 753 struct vm_phys_seg *seg; 754 vm_paddr_t pa, pa_half; 755 vm_page_t m_set, m_tmp; 756 int order; 757 758 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 759 760 /* 761 * First, find the contiguous, power of two-sized set of free 762 * physical pages containing the given physical page "m" and 763 * assign it to "m_set". 764 */ 765 seg = &vm_phys_segs[m->segind]; 766 for (m_set = m, order = 0; m_set->order == VM_NFREEORDER && 767 order < VM_NFREEORDER - 1; ) { 768 order++; 769 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order)); 770 if (pa >= seg->start) 771 m_set = &seg->first_page[atop(pa - seg->start)]; 772 else 773 return (FALSE); 774 } 775 if (m_set->order < order) 776 return (FALSE); 777 if (m_set->order == VM_NFREEORDER) 778 return (FALSE); 779 KASSERT(m_set->order < VM_NFREEORDER, 780 ("vm_phys_unfree_page: page %p has unexpected order %d", 781 m_set, m_set->order)); 782 783 /* 784 * Next, remove "m_set" from the free lists. Finally, extract 785 * "m" from "m_set" using an iterative algorithm: While "m_set" 786 * is larger than a page, shrink "m_set" by returning the half 787 * of "m_set" that does not contain "m" to the free lists. 788 */ 789 fl = (*seg->free_queues)[m_set->pool]; 790 order = m_set->order; 791 vm_freelist_rem(fl, m_set, order); 792 while (order > 0) { 793 order--; 794 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order)); 795 if (m->phys_addr < pa_half) 796 m_tmp = &seg->first_page[atop(pa_half - seg->start)]; 797 else { 798 m_tmp = m_set; 799 m_set = &seg->first_page[atop(pa_half - seg->start)]; 800 } 801 vm_freelist_add(fl, m_tmp, order, 0); 802 } 803 KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency")); 804 return (TRUE); 805 } 806 807 /* 808 * Try to zero one physical page. Used by an idle priority thread. 809 */ 810 boolean_t 811 vm_phys_zero_pages_idle(void) 812 { 813 static struct vm_freelist *fl; 814 static int flind, oind, pind; 815 vm_page_t m, m_tmp; 816 int domain; 817 818 domain = vm_rr_selectdomain(); 819 fl = vm_phys_free_queues[domain][0][0]; 820 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 821 for (;;) { 822 TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, plinks.q) { 823 for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) { 824 if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) { 825 vm_phys_unfree_page(m_tmp); 826 vm_phys_freecnt_adj(m, -1); 827 mtx_unlock(&vm_page_queue_free_mtx); 828 pmap_zero_page_idle(m_tmp); 829 m_tmp->flags |= PG_ZERO; 830 mtx_lock(&vm_page_queue_free_mtx); 831 vm_phys_freecnt_adj(m, 1); 832 vm_phys_free_pages(m_tmp, 0); 833 vm_page_zero_count++; 834 cnt_prezero++; 835 return (TRUE); 836 } 837 } 838 } 839 oind++; 840 if (oind == VM_NFREEORDER) { 841 oind = 0; 842 pind++; 843 if (pind == VM_NFREEPOOL) { 844 pind = 0; 845 flind++; 846 if (flind == vm_nfreelists) 847 flind = 0; 848 } 849 fl = vm_phys_free_queues[domain][flind][pind]; 850 } 851 } 852 } 853 854 /* 855 * Allocate a contiguous set of physical pages of the given size 856 * "npages" from the free lists. All of the physical pages must be at 857 * or above the given physical address "low" and below the given 858 * physical address "high". The given value "alignment" determines the 859 * alignment of the first physical page in the set. If the given value 860 * "boundary" is non-zero, then the set of physical pages cannot cross 861 * any physical address boundary that is a multiple of that value. Both 862 * "alignment" and "boundary" must be a power of two. 863 */ 864 vm_page_t 865 vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high, 866 u_long alignment, vm_paddr_t boundary) 867 { 868 struct vm_freelist *fl; 869 struct vm_phys_seg *seg; 870 vm_paddr_t pa, pa_last, size; 871 vm_page_t m, m_ret; 872 u_long npages_end; 873 int dom, domain, flind, oind, order, pind; 874 875 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 876 size = npages << PAGE_SHIFT; 877 KASSERT(size != 0, 878 ("vm_phys_alloc_contig: size must not be 0")); 879 KASSERT((alignment & (alignment - 1)) == 0, 880 ("vm_phys_alloc_contig: alignment must be a power of 2")); 881 KASSERT((boundary & (boundary - 1)) == 0, 882 ("vm_phys_alloc_contig: boundary must be a power of 2")); 883 /* Compute the queue that is the best fit for npages. */ 884 for (order = 0; (1 << order) < npages; order++); 885 dom = 0; 886 restartdom: 887 domain = vm_rr_selectdomain(); 888 for (flind = 0; flind < vm_nfreelists; flind++) { 889 for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) { 890 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 891 fl = &vm_phys_free_queues[domain][flind][pind][0]; 892 TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) { 893 /* 894 * A free list may contain physical pages 895 * from one or more segments. 896 */ 897 seg = &vm_phys_segs[m_ret->segind]; 898 if (seg->start > high || 899 low >= seg->end) 900 continue; 901 902 /* 903 * Is the size of this allocation request 904 * larger than the largest block size? 905 */ 906 if (order >= VM_NFREEORDER) { 907 /* 908 * Determine if a sufficient number 909 * of subsequent blocks to satisfy 910 * the allocation request are free. 911 */ 912 pa = VM_PAGE_TO_PHYS(m_ret); 913 pa_last = pa + size; 914 for (;;) { 915 pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1); 916 if (pa >= pa_last) 917 break; 918 if (pa < seg->start || 919 pa >= seg->end) 920 break; 921 m = &seg->first_page[atop(pa - seg->start)]; 922 if (m->order != VM_NFREEORDER - 1) 923 break; 924 } 925 /* If not, continue to the next block. */ 926 if (pa < pa_last) 927 continue; 928 } 929 930 /* 931 * Determine if the blocks are within the given range, 932 * satisfy the given alignment, and do not cross the 933 * given boundary. 934 */ 935 pa = VM_PAGE_TO_PHYS(m_ret); 936 if (pa >= low && 937 pa + size <= high && 938 (pa & (alignment - 1)) == 0 && 939 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) == 0) 940 goto done; 941 } 942 } 943 } 944 } 945 if (++dom < vm_ndomains) 946 goto restartdom; 947 return (NULL); 948 done: 949 for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) { 950 fl = (*seg->free_queues)[m->pool]; 951 vm_freelist_rem(fl, m, m->order); 952 } 953 if (m_ret->pool != VM_FREEPOOL_DEFAULT) 954 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind); 955 fl = (*seg->free_queues)[m_ret->pool]; 956 vm_phys_split_pages(m_ret, oind, fl, order); 957 /* Return excess pages to the free lists. */ 958 npages_end = roundup2(npages, 1 << imin(oind, order)); 959 if (npages < npages_end) 960 vm_phys_free_contig(&m_ret[npages], npages_end - npages); 961 return (m_ret); 962 } 963 964 #ifdef DDB 965 /* 966 * Show the number of physical pages in each of the free lists. 967 */ 968 DB_SHOW_COMMAND(freepages, db_show_freepages) 969 { 970 struct vm_freelist *fl; 971 int flind, oind, pind, dom; 972 973 for (dom = 0; dom < vm_ndomains; dom++) { 974 db_printf("DOMAIN: %d\n", dom); 975 for (flind = 0; flind < vm_nfreelists; flind++) { 976 db_printf("FREE LIST %d:\n" 977 "\n ORDER (SIZE) | NUMBER" 978 "\n ", flind); 979 for (pind = 0; pind < VM_NFREEPOOL; pind++) 980 db_printf(" | POOL %d", pind); 981 db_printf("\n-- "); 982 for (pind = 0; pind < VM_NFREEPOOL; pind++) 983 db_printf("-- -- "); 984 db_printf("--\n"); 985 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 986 db_printf(" %2.2d (%6.6dK)", oind, 987 1 << (PAGE_SHIFT - 10 + oind)); 988 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 989 fl = vm_phys_free_queues[dom][flind][pind]; 990 db_printf(" | %6.6d", fl[oind].lcnt); 991 } 992 db_printf("\n"); 993 } 994 db_printf("\n"); 995 } 996 db_printf("\n"); 997 } 998 } 999 #endif 1000