1 /*- 2 * Copyright (c) 2002-2006 Rice University 3 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu> 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Alan L. Cox, 7 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 28 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Physical memory system implementation 34 * 35 * Any external functions defined by this module are only to be used by the 36 * virtual memory system. 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include "opt_ddb.h" 43 #include "opt_vm.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/lock.h> 48 #include <sys/kernel.h> 49 #include <sys/malloc.h> 50 #include <sys/mutex.h> 51 #if MAXMEMDOM > 1 52 #include <sys/proc.h> 53 #endif 54 #include <sys/queue.h> 55 #include <sys/sbuf.h> 56 #include <sys/sysctl.h> 57 #include <sys/vmmeter.h> 58 59 #include <ddb/ddb.h> 60 61 #include <vm/vm.h> 62 #include <vm/vm_param.h> 63 #include <vm/vm_kern.h> 64 #include <vm/vm_object.h> 65 #include <vm/vm_page.h> 66 #include <vm/vm_phys.h> 67 68 struct vm_freelist { 69 struct pglist pl; 70 int lcnt; 71 }; 72 73 struct vm_phys_seg { 74 vm_paddr_t start; 75 vm_paddr_t end; 76 vm_page_t first_page; 77 int domain; 78 struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER]; 79 }; 80 81 struct mem_affinity *mem_affinity; 82 83 int vm_ndomains = 1; 84 85 static struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX]; 86 87 static int vm_phys_nsegs; 88 89 #define VM_PHYS_FICTITIOUS_NSEGS 8 90 static struct vm_phys_fictitious_seg { 91 vm_paddr_t start; 92 vm_paddr_t end; 93 vm_page_t first_page; 94 } vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS]; 95 static struct mtx vm_phys_fictitious_reg_mtx; 96 MALLOC_DEFINE(M_FICT_PAGES, "", ""); 97 98 static struct vm_freelist 99 vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER]; 100 101 static int vm_nfreelists = VM_FREELIST_DEFAULT + 1; 102 103 static int cnt_prezero; 104 SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD, 105 &cnt_prezero, 0, "The number of physical pages prezeroed at idle time"); 106 107 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS); 108 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD, 109 NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info"); 110 111 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS); 112 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD, 113 NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info"); 114 115 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD, 116 &vm_ndomains, 0, "Number of physical memory domains available."); 117 118 static vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool, 119 int order); 120 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, 121 int domain); 122 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind); 123 static int vm_phys_paddr_to_segind(vm_paddr_t pa); 124 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, 125 int order); 126 127 static __inline int 128 vm_rr_selectdomain(void) 129 { 130 #if MAXMEMDOM > 1 131 struct thread *td; 132 133 td = curthread; 134 135 td->td_dom_rr_idx++; 136 td->td_dom_rr_idx %= vm_ndomains; 137 return (td->td_dom_rr_idx); 138 #else 139 return (0); 140 #endif 141 } 142 143 /* 144 * Outputs the state of the physical memory allocator, specifically, 145 * the amount of physical memory in each free list. 146 */ 147 static int 148 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS) 149 { 150 struct sbuf sbuf; 151 struct vm_freelist *fl; 152 int dom, error, flind, oind, pind; 153 154 error = sysctl_wire_old_buffer(req, 0); 155 if (error != 0) 156 return (error); 157 sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req); 158 for (dom = 0; dom < vm_ndomains; dom++) { 159 sbuf_printf(&sbuf,"DOMAIN: %d\n", dom); 160 for (flind = 0; flind < vm_nfreelists; flind++) { 161 sbuf_printf(&sbuf, "FREE LIST %d:\n" 162 "\n ORDER (SIZE) | NUMBER" 163 "\n ", flind); 164 for (pind = 0; pind < VM_NFREEPOOL; pind++) 165 sbuf_printf(&sbuf, " | POOL %d", pind); 166 sbuf_printf(&sbuf, "\n-- "); 167 for (pind = 0; pind < VM_NFREEPOOL; pind++) 168 sbuf_printf(&sbuf, "-- -- "); 169 sbuf_printf(&sbuf, "--\n"); 170 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 171 sbuf_printf(&sbuf, " %2d (%6dK)", oind, 172 1 << (PAGE_SHIFT - 10 + oind)); 173 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 174 fl = vm_phys_free_queues[dom][flind][pind]; 175 sbuf_printf(&sbuf, " | %6.6d", 176 fl[oind].lcnt); 177 } 178 sbuf_printf(&sbuf, "\n"); 179 } 180 sbuf_printf(&sbuf, "\n"); 181 } 182 sbuf_printf(&sbuf, "\n"); 183 } 184 error = sbuf_finish(&sbuf); 185 sbuf_delete(&sbuf); 186 return (error); 187 } 188 189 /* 190 * Outputs the set of physical memory segments. 191 */ 192 static int 193 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS) 194 { 195 struct sbuf sbuf; 196 struct vm_phys_seg *seg; 197 int error, segind; 198 199 error = sysctl_wire_old_buffer(req, 0); 200 if (error != 0) 201 return (error); 202 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 203 for (segind = 0; segind < vm_phys_nsegs; segind++) { 204 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind); 205 seg = &vm_phys_segs[segind]; 206 sbuf_printf(&sbuf, "start: %#jx\n", 207 (uintmax_t)seg->start); 208 sbuf_printf(&sbuf, "end: %#jx\n", 209 (uintmax_t)seg->end); 210 sbuf_printf(&sbuf, "domain: %d\n", seg->domain); 211 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues); 212 } 213 error = sbuf_finish(&sbuf); 214 sbuf_delete(&sbuf); 215 return (error); 216 } 217 218 static void 219 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail) 220 { 221 222 m->order = order; 223 if (tail) 224 TAILQ_INSERT_TAIL(&fl[order].pl, m, pageq); 225 else 226 TAILQ_INSERT_HEAD(&fl[order].pl, m, pageq); 227 fl[order].lcnt++; 228 } 229 230 static void 231 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order) 232 { 233 234 TAILQ_REMOVE(&fl[order].pl, m, pageq); 235 fl[order].lcnt--; 236 m->order = VM_NFREEORDER; 237 } 238 239 /* 240 * Create a physical memory segment. 241 */ 242 static void 243 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, int domain) 244 { 245 struct vm_phys_seg *seg; 246 #ifdef VM_PHYSSEG_SPARSE 247 long pages; 248 int segind; 249 250 pages = 0; 251 for (segind = 0; segind < vm_phys_nsegs; segind++) { 252 seg = &vm_phys_segs[segind]; 253 pages += atop(seg->end - seg->start); 254 } 255 #endif 256 KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX, 257 ("vm_phys_create_seg: increase VM_PHYSSEG_MAX")); 258 KASSERT(domain < vm_ndomains, 259 ("vm_phys_create_seg: invalid domain provided")); 260 seg = &vm_phys_segs[vm_phys_nsegs++]; 261 seg->start = start; 262 seg->end = end; 263 seg->domain = domain; 264 #ifdef VM_PHYSSEG_SPARSE 265 seg->first_page = &vm_page_array[pages]; 266 #else 267 seg->first_page = PHYS_TO_VM_PAGE(start); 268 #endif 269 seg->free_queues = &vm_phys_free_queues[domain][flind]; 270 } 271 272 static void 273 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind) 274 { 275 int i; 276 277 if (mem_affinity == NULL) { 278 _vm_phys_create_seg(start, end, flind, 0); 279 return; 280 } 281 282 for (i = 0;; i++) { 283 if (mem_affinity[i].end == 0) 284 panic("Reached end of affinity info"); 285 if (mem_affinity[i].end <= start) 286 continue; 287 if (mem_affinity[i].start > start) 288 panic("No affinity info for start %jx", 289 (uintmax_t)start); 290 if (mem_affinity[i].end >= end) { 291 _vm_phys_create_seg(start, end, flind, 292 mem_affinity[i].domain); 293 break; 294 } 295 _vm_phys_create_seg(start, mem_affinity[i].end, flind, 296 mem_affinity[i].domain); 297 start = mem_affinity[i].end; 298 } 299 } 300 301 /* 302 * Initialize the physical memory allocator. 303 */ 304 void 305 vm_phys_init(void) 306 { 307 struct vm_freelist *fl; 308 int dom, flind, i, oind, pind; 309 310 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 311 #ifdef VM_FREELIST_ISADMA 312 if (phys_avail[i] < 16777216) { 313 if (phys_avail[i + 1] > 16777216) { 314 vm_phys_create_seg(phys_avail[i], 16777216, 315 VM_FREELIST_ISADMA); 316 vm_phys_create_seg(16777216, phys_avail[i + 1], 317 VM_FREELIST_DEFAULT); 318 } else { 319 vm_phys_create_seg(phys_avail[i], 320 phys_avail[i + 1], VM_FREELIST_ISADMA); 321 } 322 if (VM_FREELIST_ISADMA >= vm_nfreelists) 323 vm_nfreelists = VM_FREELIST_ISADMA + 1; 324 } else 325 #endif 326 #ifdef VM_FREELIST_HIGHMEM 327 if (phys_avail[i + 1] > VM_HIGHMEM_ADDRESS) { 328 if (phys_avail[i] < VM_HIGHMEM_ADDRESS) { 329 vm_phys_create_seg(phys_avail[i], 330 VM_HIGHMEM_ADDRESS, VM_FREELIST_DEFAULT); 331 vm_phys_create_seg(VM_HIGHMEM_ADDRESS, 332 phys_avail[i + 1], VM_FREELIST_HIGHMEM); 333 } else { 334 vm_phys_create_seg(phys_avail[i], 335 phys_avail[i + 1], VM_FREELIST_HIGHMEM); 336 } 337 if (VM_FREELIST_HIGHMEM >= vm_nfreelists) 338 vm_nfreelists = VM_FREELIST_HIGHMEM + 1; 339 } else 340 #endif 341 vm_phys_create_seg(phys_avail[i], phys_avail[i + 1], 342 VM_FREELIST_DEFAULT); 343 } 344 for (dom = 0; dom < vm_ndomains; dom++) { 345 for (flind = 0; flind < vm_nfreelists; flind++) { 346 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 347 fl = vm_phys_free_queues[dom][flind][pind]; 348 for (oind = 0; oind < VM_NFREEORDER; oind++) 349 TAILQ_INIT(&fl[oind].pl); 350 } 351 } 352 } 353 mtx_init(&vm_phys_fictitious_reg_mtx, "vmfctr", NULL, MTX_DEF); 354 } 355 356 /* 357 * Split a contiguous, power of two-sized set of physical pages. 358 */ 359 static __inline void 360 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order) 361 { 362 vm_page_t m_buddy; 363 364 while (oind > order) { 365 oind--; 366 m_buddy = &m[1 << oind]; 367 KASSERT(m_buddy->order == VM_NFREEORDER, 368 ("vm_phys_split_pages: page %p has unexpected order %d", 369 m_buddy, m_buddy->order)); 370 vm_freelist_add(fl, m_buddy, oind, 0); 371 } 372 } 373 374 /* 375 * Initialize a physical page and add it to the free lists. 376 */ 377 void 378 vm_phys_add_page(vm_paddr_t pa) 379 { 380 vm_page_t m; 381 382 cnt.v_page_count++; 383 m = vm_phys_paddr_to_vm_page(pa); 384 m->phys_addr = pa; 385 m->queue = PQ_NONE; 386 m->segind = vm_phys_paddr_to_segind(pa); 387 m->flags = PG_FREE; 388 KASSERT(m->order == VM_NFREEORDER, 389 ("vm_phys_add_page: page %p has unexpected order %d", 390 m, m->order)); 391 m->pool = VM_FREEPOOL_DEFAULT; 392 pmap_page_init(m); 393 mtx_lock(&vm_page_queue_free_mtx); 394 cnt.v_free_count++; 395 vm_phys_free_pages(m, 0); 396 mtx_unlock(&vm_page_queue_free_mtx); 397 } 398 399 /* 400 * Allocate a contiguous, power of two-sized set of physical pages 401 * from the free lists. 402 * 403 * The free page queues must be locked. 404 */ 405 vm_page_t 406 vm_phys_alloc_pages(int pool, int order) 407 { 408 vm_page_t m; 409 int dom, domain, flind; 410 411 KASSERT(pool < VM_NFREEPOOL, 412 ("vm_phys_alloc_pages: pool %d is out of range", pool)); 413 KASSERT(order < VM_NFREEORDER, 414 ("vm_phys_alloc_pages: order %d is out of range", order)); 415 416 for (dom = 0; dom < vm_ndomains; dom++) { 417 domain = vm_rr_selectdomain(); 418 for (flind = 0; flind < vm_nfreelists; flind++) { 419 m = vm_phys_alloc_domain_pages(domain, flind, pool, 420 order); 421 if (m != NULL) 422 return (m); 423 } 424 } 425 return (NULL); 426 } 427 428 /* 429 * Find and dequeue a free page on the given free list, with the 430 * specified pool and order 431 */ 432 vm_page_t 433 vm_phys_alloc_freelist_pages(int flind, int pool, int order) 434 { 435 vm_page_t m; 436 int dom, domain; 437 438 KASSERT(flind < VM_NFREELIST, 439 ("vm_phys_alloc_freelist_pages: freelist %d is out of range", flind)); 440 KASSERT(pool < VM_NFREEPOOL, 441 ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool)); 442 KASSERT(order < VM_NFREEORDER, 443 ("vm_phys_alloc_freelist_pages: order %d is out of range", order)); 444 445 for (dom = 0; dom < vm_ndomains; dom++) { 446 domain = vm_rr_selectdomain(); 447 m = vm_phys_alloc_domain_pages(domain, flind, pool, order); 448 if (m != NULL) 449 return (m); 450 } 451 return (NULL); 452 } 453 454 static vm_page_t 455 vm_phys_alloc_domain_pages(int domain, int flind, int pool, int order) 456 { 457 struct vm_freelist *fl; 458 struct vm_freelist *alt; 459 int oind, pind; 460 vm_page_t m; 461 462 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 463 fl = &vm_phys_free_queues[domain][flind][pool][0]; 464 for (oind = order; oind < VM_NFREEORDER; oind++) { 465 m = TAILQ_FIRST(&fl[oind].pl); 466 if (m != NULL) { 467 vm_freelist_rem(fl, m, oind); 468 vm_phys_split_pages(m, oind, fl, order); 469 return (m); 470 } 471 } 472 473 /* 474 * The given pool was empty. Find the largest 475 * contiguous, power-of-two-sized set of pages in any 476 * pool. Transfer these pages to the given pool, and 477 * use them to satisfy the allocation. 478 */ 479 for (oind = VM_NFREEORDER - 1; oind >= order; oind--) { 480 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 481 alt = &vm_phys_free_queues[domain][flind][pind][0]; 482 m = TAILQ_FIRST(&alt[oind].pl); 483 if (m != NULL) { 484 vm_freelist_rem(alt, m, oind); 485 vm_phys_set_pool(pool, m, oind); 486 vm_phys_split_pages(m, oind, fl, order); 487 return (m); 488 } 489 } 490 } 491 return (NULL); 492 } 493 494 /* 495 * Find the vm_page corresponding to the given physical address. 496 */ 497 vm_page_t 498 vm_phys_paddr_to_vm_page(vm_paddr_t pa) 499 { 500 struct vm_phys_seg *seg; 501 int segind; 502 503 for (segind = 0; segind < vm_phys_nsegs; segind++) { 504 seg = &vm_phys_segs[segind]; 505 if (pa >= seg->start && pa < seg->end) 506 return (&seg->first_page[atop(pa - seg->start)]); 507 } 508 return (NULL); 509 } 510 511 vm_page_t 512 vm_phys_fictitious_to_vm_page(vm_paddr_t pa) 513 { 514 struct vm_phys_fictitious_seg *seg; 515 vm_page_t m; 516 int segind; 517 518 m = NULL; 519 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 520 seg = &vm_phys_fictitious_segs[segind]; 521 if (pa >= seg->start && pa < seg->end) { 522 m = &seg->first_page[atop(pa - seg->start)]; 523 KASSERT((m->flags & PG_FICTITIOUS) != 0, 524 ("%p not fictitious", m)); 525 break; 526 } 527 } 528 return (m); 529 } 530 531 int 532 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, 533 vm_memattr_t memattr) 534 { 535 struct vm_phys_fictitious_seg *seg; 536 vm_page_t fp; 537 long i, page_count; 538 int segind; 539 #ifdef VM_PHYSSEG_DENSE 540 long pi; 541 boolean_t malloced; 542 #endif 543 544 page_count = (end - start) / PAGE_SIZE; 545 546 #ifdef VM_PHYSSEG_DENSE 547 pi = atop(start); 548 if (pi >= first_page && atop(end) < vm_page_array_size) { 549 fp = &vm_page_array[pi - first_page]; 550 malloced = FALSE; 551 } else 552 #endif 553 { 554 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES, 555 M_WAITOK | M_ZERO); 556 #ifdef VM_PHYSSEG_DENSE 557 malloced = TRUE; 558 #endif 559 } 560 for (i = 0; i < page_count; i++) { 561 vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr); 562 pmap_page_init(&fp[i]); 563 fp[i].oflags &= ~(VPO_BUSY | VPO_UNMANAGED); 564 } 565 mtx_lock(&vm_phys_fictitious_reg_mtx); 566 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 567 seg = &vm_phys_fictitious_segs[segind]; 568 if (seg->start == 0 && seg->end == 0) { 569 seg->start = start; 570 seg->end = end; 571 seg->first_page = fp; 572 mtx_unlock(&vm_phys_fictitious_reg_mtx); 573 return (0); 574 } 575 } 576 mtx_unlock(&vm_phys_fictitious_reg_mtx); 577 #ifdef VM_PHYSSEG_DENSE 578 if (malloced) 579 #endif 580 free(fp, M_FICT_PAGES); 581 return (EBUSY); 582 } 583 584 void 585 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end) 586 { 587 struct vm_phys_fictitious_seg *seg; 588 vm_page_t fp; 589 int segind; 590 #ifdef VM_PHYSSEG_DENSE 591 long pi; 592 #endif 593 594 #ifdef VM_PHYSSEG_DENSE 595 pi = atop(start); 596 #endif 597 598 mtx_lock(&vm_phys_fictitious_reg_mtx); 599 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 600 seg = &vm_phys_fictitious_segs[segind]; 601 if (seg->start == start && seg->end == end) { 602 seg->start = seg->end = 0; 603 fp = seg->first_page; 604 seg->first_page = NULL; 605 mtx_unlock(&vm_phys_fictitious_reg_mtx); 606 #ifdef VM_PHYSSEG_DENSE 607 if (pi < first_page || atop(end) >= vm_page_array_size) 608 #endif 609 free(fp, M_FICT_PAGES); 610 return; 611 } 612 } 613 mtx_unlock(&vm_phys_fictitious_reg_mtx); 614 KASSERT(0, ("Unregistering not registered fictitious range")); 615 } 616 617 /* 618 * Find the segment containing the given physical address. 619 */ 620 static int 621 vm_phys_paddr_to_segind(vm_paddr_t pa) 622 { 623 struct vm_phys_seg *seg; 624 int segind; 625 626 for (segind = 0; segind < vm_phys_nsegs; segind++) { 627 seg = &vm_phys_segs[segind]; 628 if (pa >= seg->start && pa < seg->end) 629 return (segind); 630 } 631 panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" , 632 (uintmax_t)pa); 633 } 634 635 /* 636 * Free a contiguous, power of two-sized set of physical pages. 637 * 638 * The free page queues must be locked. 639 */ 640 void 641 vm_phys_free_pages(vm_page_t m, int order) 642 { 643 struct vm_freelist *fl; 644 struct vm_phys_seg *seg; 645 vm_paddr_t pa; 646 vm_page_t m_buddy; 647 648 KASSERT(m->order == VM_NFREEORDER, 649 ("vm_phys_free_pages: page %p has unexpected order %d", 650 m, m->order)); 651 KASSERT(m->pool < VM_NFREEPOOL, 652 ("vm_phys_free_pages: page %p has unexpected pool %d", 653 m, m->pool)); 654 KASSERT(order < VM_NFREEORDER, 655 ("vm_phys_free_pages: order %d is out of range", order)); 656 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 657 seg = &vm_phys_segs[m->segind]; 658 if (order < VM_NFREEORDER - 1) { 659 pa = VM_PAGE_TO_PHYS(m); 660 do { 661 pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order)); 662 if (pa < seg->start || pa >= seg->end) 663 break; 664 m_buddy = &seg->first_page[atop(pa - seg->start)]; 665 if (m_buddy->order != order) 666 break; 667 fl = (*seg->free_queues)[m_buddy->pool]; 668 vm_freelist_rem(fl, m_buddy, order); 669 if (m_buddy->pool != m->pool) 670 vm_phys_set_pool(m->pool, m_buddy, order); 671 order++; 672 pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1); 673 m = &seg->first_page[atop(pa - seg->start)]; 674 } while (order < VM_NFREEORDER - 1); 675 } 676 fl = (*seg->free_queues)[m->pool]; 677 vm_freelist_add(fl, m, order, 1); 678 } 679 680 /* 681 * Free a contiguous, arbitrarily sized set of physical pages. 682 * 683 * The free page queues must be locked. 684 */ 685 void 686 vm_phys_free_contig(vm_page_t m, u_long npages) 687 { 688 u_int n; 689 int order; 690 691 /* 692 * Avoid unnecessary coalescing by freeing the pages in the largest 693 * possible power-of-two-sized subsets. 694 */ 695 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 696 for (;; npages -= n) { 697 /* 698 * Unsigned "min" is used here so that "order" is assigned 699 * "VM_NFREEORDER - 1" when "m"'s physical address is zero 700 * or the low-order bits of its physical address are zero 701 * because the size of a physical address exceeds the size of 702 * a long. 703 */ 704 order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1, 705 VM_NFREEORDER - 1); 706 n = 1 << order; 707 if (npages < n) 708 break; 709 vm_phys_free_pages(m, order); 710 m += n; 711 } 712 /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */ 713 for (; npages > 0; npages -= n) { 714 order = flsl(npages) - 1; 715 n = 1 << order; 716 vm_phys_free_pages(m, order); 717 m += n; 718 } 719 } 720 721 /* 722 * Set the pool for a contiguous, power of two-sized set of physical pages. 723 */ 724 void 725 vm_phys_set_pool(int pool, vm_page_t m, int order) 726 { 727 vm_page_t m_tmp; 728 729 for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) 730 m_tmp->pool = pool; 731 } 732 733 /* 734 * Search for the given physical page "m" in the free lists. If the search 735 * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return 736 * FALSE, indicating that "m" is not in the free lists. 737 * 738 * The free page queues must be locked. 739 */ 740 boolean_t 741 vm_phys_unfree_page(vm_page_t m) 742 { 743 struct vm_freelist *fl; 744 struct vm_phys_seg *seg; 745 vm_paddr_t pa, pa_half; 746 vm_page_t m_set, m_tmp; 747 int order; 748 749 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 750 751 /* 752 * First, find the contiguous, power of two-sized set of free 753 * physical pages containing the given physical page "m" and 754 * assign it to "m_set". 755 */ 756 seg = &vm_phys_segs[m->segind]; 757 for (m_set = m, order = 0; m_set->order == VM_NFREEORDER && 758 order < VM_NFREEORDER - 1; ) { 759 order++; 760 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order)); 761 if (pa >= seg->start) 762 m_set = &seg->first_page[atop(pa - seg->start)]; 763 else 764 return (FALSE); 765 } 766 if (m_set->order < order) 767 return (FALSE); 768 if (m_set->order == VM_NFREEORDER) 769 return (FALSE); 770 KASSERT(m_set->order < VM_NFREEORDER, 771 ("vm_phys_unfree_page: page %p has unexpected order %d", 772 m_set, m_set->order)); 773 774 /* 775 * Next, remove "m_set" from the free lists. Finally, extract 776 * "m" from "m_set" using an iterative algorithm: While "m_set" 777 * is larger than a page, shrink "m_set" by returning the half 778 * of "m_set" that does not contain "m" to the free lists. 779 */ 780 fl = (*seg->free_queues)[m_set->pool]; 781 order = m_set->order; 782 vm_freelist_rem(fl, m_set, order); 783 while (order > 0) { 784 order--; 785 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order)); 786 if (m->phys_addr < pa_half) 787 m_tmp = &seg->first_page[atop(pa_half - seg->start)]; 788 else { 789 m_tmp = m_set; 790 m_set = &seg->first_page[atop(pa_half - seg->start)]; 791 } 792 vm_freelist_add(fl, m_tmp, order, 0); 793 } 794 KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency")); 795 return (TRUE); 796 } 797 798 /* 799 * Try to zero one physical page. Used by an idle priority thread. 800 */ 801 boolean_t 802 vm_phys_zero_pages_idle(void) 803 { 804 static struct vm_freelist *fl; 805 static int flind, oind, pind; 806 vm_page_t m, m_tmp; 807 int domain; 808 809 domain = vm_rr_selectdomain(); 810 fl = vm_phys_free_queues[domain][0][0]; 811 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 812 for (;;) { 813 TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, pageq) { 814 for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) { 815 if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) { 816 vm_phys_unfree_page(m_tmp); 817 cnt.v_free_count--; 818 mtx_unlock(&vm_page_queue_free_mtx); 819 pmap_zero_page_idle(m_tmp); 820 m_tmp->flags |= PG_ZERO; 821 mtx_lock(&vm_page_queue_free_mtx); 822 cnt.v_free_count++; 823 vm_phys_free_pages(m_tmp, 0); 824 vm_page_zero_count++; 825 cnt_prezero++; 826 return (TRUE); 827 } 828 } 829 } 830 oind++; 831 if (oind == VM_NFREEORDER) { 832 oind = 0; 833 pind++; 834 if (pind == VM_NFREEPOOL) { 835 pind = 0; 836 flind++; 837 if (flind == vm_nfreelists) 838 flind = 0; 839 } 840 fl = vm_phys_free_queues[domain][flind][pind]; 841 } 842 } 843 } 844 845 /* 846 * Allocate a contiguous set of physical pages of the given size 847 * "npages" from the free lists. All of the physical pages must be at 848 * or above the given physical address "low" and below the given 849 * physical address "high". The given value "alignment" determines the 850 * alignment of the first physical page in the set. If the given value 851 * "boundary" is non-zero, then the set of physical pages cannot cross 852 * any physical address boundary that is a multiple of that value. Both 853 * "alignment" and "boundary" must be a power of two. 854 */ 855 vm_page_t 856 vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high, 857 u_long alignment, vm_paddr_t boundary) 858 { 859 struct vm_freelist *fl; 860 struct vm_phys_seg *seg; 861 vm_paddr_t pa, pa_last, size; 862 vm_page_t m, m_ret; 863 u_long npages_end; 864 int dom, domain, flind, oind, order, pind; 865 866 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 867 size = npages << PAGE_SHIFT; 868 KASSERT(size != 0, 869 ("vm_phys_alloc_contig: size must not be 0")); 870 KASSERT((alignment & (alignment - 1)) == 0, 871 ("vm_phys_alloc_contig: alignment must be a power of 2")); 872 KASSERT((boundary & (boundary - 1)) == 0, 873 ("vm_phys_alloc_contig: boundary must be a power of 2")); 874 /* Compute the queue that is the best fit for npages. */ 875 for (order = 0; (1 << order) < npages; order++); 876 dom = 0; 877 restartdom: 878 domain = vm_rr_selectdomain(); 879 for (flind = 0; flind < vm_nfreelists; flind++) { 880 for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) { 881 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 882 fl = &vm_phys_free_queues[domain][flind][pind][0]; 883 TAILQ_FOREACH(m_ret, &fl[oind].pl, pageq) { 884 /* 885 * A free list may contain physical pages 886 * from one or more segments. 887 */ 888 seg = &vm_phys_segs[m_ret->segind]; 889 if (seg->start > high || 890 low >= seg->end) 891 continue; 892 893 /* 894 * Is the size of this allocation request 895 * larger than the largest block size? 896 */ 897 if (order >= VM_NFREEORDER) { 898 /* 899 * Determine if a sufficient number 900 * of subsequent blocks to satisfy 901 * the allocation request are free. 902 */ 903 pa = VM_PAGE_TO_PHYS(m_ret); 904 pa_last = pa + size; 905 for (;;) { 906 pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1); 907 if (pa >= pa_last) 908 break; 909 if (pa < seg->start || 910 pa >= seg->end) 911 break; 912 m = &seg->first_page[atop(pa - seg->start)]; 913 if (m->order != VM_NFREEORDER - 1) 914 break; 915 } 916 /* If not, continue to the next block. */ 917 if (pa < pa_last) 918 continue; 919 } 920 921 /* 922 * Determine if the blocks are within the given range, 923 * satisfy the given alignment, and do not cross the 924 * given boundary. 925 */ 926 pa = VM_PAGE_TO_PHYS(m_ret); 927 if (pa >= low && 928 pa + size <= high && 929 (pa & (alignment - 1)) == 0 && 930 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) == 0) 931 goto done; 932 } 933 } 934 } 935 } 936 if (++dom < vm_ndomains) 937 goto restartdom; 938 return (NULL); 939 done: 940 for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) { 941 fl = (*seg->free_queues)[m->pool]; 942 vm_freelist_rem(fl, m, m->order); 943 } 944 if (m_ret->pool != VM_FREEPOOL_DEFAULT) 945 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind); 946 fl = (*seg->free_queues)[m_ret->pool]; 947 vm_phys_split_pages(m_ret, oind, fl, order); 948 /* Return excess pages to the free lists. */ 949 npages_end = roundup2(npages, 1 << imin(oind, order)); 950 if (npages < npages_end) 951 vm_phys_free_contig(&m_ret[npages], npages_end - npages); 952 return (m_ret); 953 } 954 955 #ifdef DDB 956 /* 957 * Show the number of physical pages in each of the free lists. 958 */ 959 DB_SHOW_COMMAND(freepages, db_show_freepages) 960 { 961 struct vm_freelist *fl; 962 int flind, oind, pind, dom; 963 964 for (dom = 0; dom < vm_ndomains; dom++) { 965 db_printf("DOMAIN: %d\n", dom); 966 for (flind = 0; flind < vm_nfreelists; flind++) { 967 db_printf("FREE LIST %d:\n" 968 "\n ORDER (SIZE) | NUMBER" 969 "\n ", flind); 970 for (pind = 0; pind < VM_NFREEPOOL; pind++) 971 db_printf(" | POOL %d", pind); 972 db_printf("\n-- "); 973 for (pind = 0; pind < VM_NFREEPOOL; pind++) 974 db_printf("-- -- "); 975 db_printf("--\n"); 976 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 977 db_printf(" %2.2d (%6.6dK)", oind, 978 1 << (PAGE_SHIFT - 10 + oind)); 979 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 980 fl = vm_phys_free_queues[dom][flind][pind]; 981 db_printf(" | %6.6d", fl[oind].lcnt); 982 } 983 db_printf("\n"); 984 } 985 db_printf("\n"); 986 } 987 db_printf("\n"); 988 } 989 } 990 #endif 991