1 /*- 2 * Copyright (c) 2002-2006 Rice University 3 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu> 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Alan L. Cox, 7 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 28 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Physical memory system implementation 34 * 35 * Any external functions defined by this module are only to be used by the 36 * virtual memory system. 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include "opt_ddb.h" 43 #include "opt_vm.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/lock.h> 48 #include <sys/kernel.h> 49 #include <sys/malloc.h> 50 #include <sys/mutex.h> 51 #if MAXMEMDOM > 1 52 #include <sys/proc.h> 53 #endif 54 #include <sys/queue.h> 55 #include <sys/sbuf.h> 56 #include <sys/sysctl.h> 57 #include <sys/vmmeter.h> 58 59 #include <ddb/ddb.h> 60 61 #include <vm/vm.h> 62 #include <vm/vm_param.h> 63 #include <vm/vm_kern.h> 64 #include <vm/vm_object.h> 65 #include <vm/vm_page.h> 66 #include <vm/vm_phys.h> 67 68 struct vm_freelist { 69 struct pglist pl; 70 int lcnt; 71 }; 72 73 struct vm_phys_seg { 74 vm_paddr_t start; 75 vm_paddr_t end; 76 vm_page_t first_page; 77 int domain; 78 struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER]; 79 }; 80 81 struct mem_affinity *mem_affinity; 82 83 int vm_ndomains = 1; 84 85 static struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX]; 86 87 static int vm_phys_nsegs; 88 89 #define VM_PHYS_FICTITIOUS_NSEGS 8 90 static struct vm_phys_fictitious_seg { 91 vm_paddr_t start; 92 vm_paddr_t end; 93 vm_page_t first_page; 94 } vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS]; 95 static struct mtx vm_phys_fictitious_reg_mtx; 96 MALLOC_DEFINE(M_FICT_PAGES, "", ""); 97 98 static struct vm_freelist 99 vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER]; 100 101 static int vm_nfreelists = VM_FREELIST_DEFAULT + 1; 102 103 static int cnt_prezero; 104 SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD, 105 &cnt_prezero, 0, "The number of physical pages prezeroed at idle time"); 106 107 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS); 108 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD, 109 NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info"); 110 111 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS); 112 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD, 113 NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info"); 114 115 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD, 116 &vm_ndomains, 0, "Number of physical memory domains available."); 117 118 static vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool, 119 int order); 120 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, 121 int domain); 122 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind); 123 static int vm_phys_paddr_to_segind(vm_paddr_t pa); 124 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, 125 int order); 126 127 static __inline int 128 vm_rr_selectdomain(void) 129 { 130 #if MAXMEMDOM > 1 131 struct thread *td; 132 133 td = curthread; 134 135 td->td_dom_rr_idx++; 136 td->td_dom_rr_idx %= vm_ndomains; 137 return (td->td_dom_rr_idx); 138 #else 139 return (0); 140 #endif 141 } 142 143 /* 144 * Outputs the state of the physical memory allocator, specifically, 145 * the amount of physical memory in each free list. 146 */ 147 static int 148 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS) 149 { 150 struct sbuf sbuf; 151 struct vm_freelist *fl; 152 int dom, error, flind, oind, pind; 153 154 error = sysctl_wire_old_buffer(req, 0); 155 if (error != 0) 156 return (error); 157 sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req); 158 for (dom = 0; dom < vm_ndomains; dom++) { 159 sbuf_printf(&sbuf,"DOMAIN: %d\n", dom); 160 for (flind = 0; flind < vm_nfreelists; flind++) { 161 sbuf_printf(&sbuf, "FREE LIST %d:\n" 162 "\n ORDER (SIZE) | NUMBER" 163 "\n ", flind); 164 for (pind = 0; pind < VM_NFREEPOOL; pind++) 165 sbuf_printf(&sbuf, " | POOL %d", pind); 166 sbuf_printf(&sbuf, "\n-- "); 167 for (pind = 0; pind < VM_NFREEPOOL; pind++) 168 sbuf_printf(&sbuf, "-- -- "); 169 sbuf_printf(&sbuf, "--\n"); 170 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 171 sbuf_printf(&sbuf, " %2d (%6dK)", oind, 172 1 << (PAGE_SHIFT - 10 + oind)); 173 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 174 fl = vm_phys_free_queues[dom][flind][pind]; 175 sbuf_printf(&sbuf, " | %6.6d", 176 fl[oind].lcnt); 177 } 178 sbuf_printf(&sbuf, "\n"); 179 } 180 sbuf_printf(&sbuf, "\n"); 181 } 182 sbuf_printf(&sbuf, "\n"); 183 } 184 error = sbuf_finish(&sbuf); 185 sbuf_delete(&sbuf); 186 return (error); 187 } 188 189 /* 190 * Outputs the set of physical memory segments. 191 */ 192 static int 193 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS) 194 { 195 struct sbuf sbuf; 196 struct vm_phys_seg *seg; 197 int error, segind; 198 199 error = sysctl_wire_old_buffer(req, 0); 200 if (error != 0) 201 return (error); 202 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 203 for (segind = 0; segind < vm_phys_nsegs; segind++) { 204 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind); 205 seg = &vm_phys_segs[segind]; 206 sbuf_printf(&sbuf, "start: %#jx\n", 207 (uintmax_t)seg->start); 208 sbuf_printf(&sbuf, "end: %#jx\n", 209 (uintmax_t)seg->end); 210 sbuf_printf(&sbuf, "domain: %d\n", seg->domain); 211 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues); 212 } 213 error = sbuf_finish(&sbuf); 214 sbuf_delete(&sbuf); 215 return (error); 216 } 217 218 static void 219 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail) 220 { 221 222 m->order = order; 223 if (tail) 224 TAILQ_INSERT_TAIL(&fl[order].pl, m, pageq); 225 else 226 TAILQ_INSERT_HEAD(&fl[order].pl, m, pageq); 227 fl[order].lcnt++; 228 } 229 230 static void 231 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order) 232 { 233 234 TAILQ_REMOVE(&fl[order].pl, m, pageq); 235 fl[order].lcnt--; 236 m->order = VM_NFREEORDER; 237 } 238 239 /* 240 * Create a physical memory segment. 241 */ 242 static void 243 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, int domain) 244 { 245 struct vm_phys_seg *seg; 246 #ifdef VM_PHYSSEG_SPARSE 247 long pages; 248 int segind; 249 250 pages = 0; 251 for (segind = 0; segind < vm_phys_nsegs; segind++) { 252 seg = &vm_phys_segs[segind]; 253 pages += atop(seg->end - seg->start); 254 } 255 #endif 256 KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX, 257 ("vm_phys_create_seg: increase VM_PHYSSEG_MAX")); 258 KASSERT(domain < vm_ndomains, 259 ("vm_phys_create_seg: invalid domain provided")); 260 seg = &vm_phys_segs[vm_phys_nsegs++]; 261 seg->start = start; 262 seg->end = end; 263 seg->domain = domain; 264 #ifdef VM_PHYSSEG_SPARSE 265 seg->first_page = &vm_page_array[pages]; 266 #else 267 seg->first_page = PHYS_TO_VM_PAGE(start); 268 #endif 269 seg->free_queues = &vm_phys_free_queues[domain][flind]; 270 } 271 272 static void 273 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind) 274 { 275 int i; 276 277 if (mem_affinity == NULL) { 278 _vm_phys_create_seg(start, end, flind, 0); 279 return; 280 } 281 282 for (i = 0;; i++) { 283 if (mem_affinity[i].end == 0) 284 panic("Reached end of affinity info"); 285 if (mem_affinity[i].end <= start) 286 continue; 287 if (mem_affinity[i].start > start) 288 panic("No affinity info for start %jx", 289 (uintmax_t)start); 290 if (mem_affinity[i].end >= end) { 291 _vm_phys_create_seg(start, end, flind, 292 mem_affinity[i].domain); 293 break; 294 } 295 _vm_phys_create_seg(start, mem_affinity[i].end, flind, 296 mem_affinity[i].domain); 297 start = mem_affinity[i].end; 298 } 299 } 300 301 /* 302 * Initialize the physical memory allocator. 303 */ 304 void 305 vm_phys_init(void) 306 { 307 struct vm_freelist *fl; 308 int dom, flind, i, oind, pind; 309 310 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 311 #ifdef VM_FREELIST_ISADMA 312 if (phys_avail[i] < 16777216) { 313 if (phys_avail[i + 1] > 16777216) { 314 vm_phys_create_seg(phys_avail[i], 16777216, 315 VM_FREELIST_ISADMA); 316 vm_phys_create_seg(16777216, phys_avail[i + 1], 317 VM_FREELIST_DEFAULT); 318 } else { 319 vm_phys_create_seg(phys_avail[i], 320 phys_avail[i + 1], VM_FREELIST_ISADMA); 321 } 322 if (VM_FREELIST_ISADMA >= vm_nfreelists) 323 vm_nfreelists = VM_FREELIST_ISADMA + 1; 324 } else 325 #endif 326 #ifdef VM_FREELIST_HIGHMEM 327 if (phys_avail[i + 1] > VM_HIGHMEM_ADDRESS) { 328 if (phys_avail[i] < VM_HIGHMEM_ADDRESS) { 329 vm_phys_create_seg(phys_avail[i], 330 VM_HIGHMEM_ADDRESS, VM_FREELIST_DEFAULT); 331 vm_phys_create_seg(VM_HIGHMEM_ADDRESS, 332 phys_avail[i + 1], VM_FREELIST_HIGHMEM); 333 } else { 334 vm_phys_create_seg(phys_avail[i], 335 phys_avail[i + 1], VM_FREELIST_HIGHMEM); 336 } 337 if (VM_FREELIST_HIGHMEM >= vm_nfreelists) 338 vm_nfreelists = VM_FREELIST_HIGHMEM + 1; 339 } else 340 #endif 341 vm_phys_create_seg(phys_avail[i], phys_avail[i + 1], 342 VM_FREELIST_DEFAULT); 343 } 344 for (dom = 0; dom < vm_ndomains; dom++) { 345 for (flind = 0; flind < vm_nfreelists; flind++) { 346 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 347 fl = vm_phys_free_queues[dom][flind][pind]; 348 for (oind = 0; oind < VM_NFREEORDER; oind++) 349 TAILQ_INIT(&fl[oind].pl); 350 } 351 } 352 } 353 mtx_init(&vm_phys_fictitious_reg_mtx, "vmfctr", NULL, MTX_DEF); 354 } 355 356 /* 357 * Split a contiguous, power of two-sized set of physical pages. 358 */ 359 static __inline void 360 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order) 361 { 362 vm_page_t m_buddy; 363 364 while (oind > order) { 365 oind--; 366 m_buddy = &m[1 << oind]; 367 KASSERT(m_buddy->order == VM_NFREEORDER, 368 ("vm_phys_split_pages: page %p has unexpected order %d", 369 m_buddy, m_buddy->order)); 370 vm_freelist_add(fl, m_buddy, oind, 0); 371 } 372 } 373 374 /* 375 * Initialize a physical page and add it to the free lists. 376 */ 377 void 378 vm_phys_add_page(vm_paddr_t pa) 379 { 380 vm_page_t m; 381 382 cnt.v_page_count++; 383 m = vm_phys_paddr_to_vm_page(pa); 384 m->phys_addr = pa; 385 m->queue = PQ_NONE; 386 m->segind = vm_phys_paddr_to_segind(pa); 387 m->flags = PG_FREE; 388 KASSERT(m->order == VM_NFREEORDER, 389 ("vm_phys_add_page: page %p has unexpected order %d", 390 m, m->order)); 391 m->pool = VM_FREEPOOL_DEFAULT; 392 pmap_page_init(m); 393 mtx_lock(&vm_page_queue_free_mtx); 394 cnt.v_free_count++; 395 vm_phys_free_pages(m, 0); 396 mtx_unlock(&vm_page_queue_free_mtx); 397 } 398 399 /* 400 * Allocate a contiguous, power of two-sized set of physical pages 401 * from the free lists. 402 * 403 * The free page queues must be locked. 404 */ 405 vm_page_t 406 vm_phys_alloc_pages(int pool, int order) 407 { 408 vm_page_t m; 409 int dom, domain, flind; 410 411 KASSERT(pool < VM_NFREEPOOL, 412 ("vm_phys_alloc_pages: pool %d is out of range", pool)); 413 KASSERT(order < VM_NFREEORDER, 414 ("vm_phys_alloc_pages: order %d is out of range", order)); 415 416 for (dom = 0; dom < vm_ndomains; dom++) { 417 domain = vm_rr_selectdomain(); 418 for (flind = 0; flind < vm_nfreelists; flind++) { 419 m = vm_phys_alloc_domain_pages(domain, flind, pool, 420 order); 421 if (m != NULL) 422 return (m); 423 } 424 } 425 return (NULL); 426 } 427 428 /* 429 * Find and dequeue a free page on the given free list, with the 430 * specified pool and order 431 */ 432 vm_page_t 433 vm_phys_alloc_freelist_pages(int flind, int pool, int order) 434 { 435 vm_page_t m; 436 int dom, domain; 437 438 KASSERT(flind < VM_NFREELIST, 439 ("vm_phys_alloc_freelist_pages: freelist %d is out of range", flind)); 440 KASSERT(pool < VM_NFREEPOOL, 441 ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool)); 442 KASSERT(order < VM_NFREEORDER, 443 ("vm_phys_alloc_freelist_pages: order %d is out of range", order)); 444 445 for (dom = 0; dom < vm_ndomains; dom++) { 446 domain = vm_rr_selectdomain(); 447 m = vm_phys_alloc_domain_pages(domain, flind, pool, order); 448 if (m != NULL) 449 return (m); 450 } 451 return (NULL); 452 } 453 454 static vm_page_t 455 vm_phys_alloc_domain_pages(int domain, int flind, int pool, int order) 456 { 457 struct vm_freelist *fl; 458 struct vm_freelist *alt; 459 int oind, pind; 460 vm_page_t m; 461 462 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 463 fl = &vm_phys_free_queues[domain][flind][pool][0]; 464 for (oind = order; oind < VM_NFREEORDER; oind++) { 465 m = TAILQ_FIRST(&fl[oind].pl); 466 if (m != NULL) { 467 vm_freelist_rem(fl, m, oind); 468 vm_phys_split_pages(m, oind, fl, order); 469 return (m); 470 } 471 } 472 473 /* 474 * The given pool was empty. Find the largest 475 * contiguous, power-of-two-sized set of pages in any 476 * pool. Transfer these pages to the given pool, and 477 * use them to satisfy the allocation. 478 */ 479 for (oind = VM_NFREEORDER - 1; oind >= order; oind--) { 480 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 481 alt = &vm_phys_free_queues[domain][flind][pind][0]; 482 m = TAILQ_FIRST(&alt[oind].pl); 483 if (m != NULL) { 484 vm_freelist_rem(alt, m, oind); 485 vm_phys_set_pool(pool, m, oind); 486 vm_phys_split_pages(m, oind, fl, order); 487 return (m); 488 } 489 } 490 } 491 return (NULL); 492 } 493 494 /* 495 * Find the vm_page corresponding to the given physical address. 496 */ 497 vm_page_t 498 vm_phys_paddr_to_vm_page(vm_paddr_t pa) 499 { 500 struct vm_phys_seg *seg; 501 int segind; 502 503 for (segind = 0; segind < vm_phys_nsegs; segind++) { 504 seg = &vm_phys_segs[segind]; 505 if (pa >= seg->start && pa < seg->end) 506 return (&seg->first_page[atop(pa - seg->start)]); 507 } 508 return (NULL); 509 } 510 511 vm_page_t 512 vm_phys_fictitious_to_vm_page(vm_paddr_t pa) 513 { 514 struct vm_phys_fictitious_seg *seg; 515 vm_page_t m; 516 int segind; 517 518 m = NULL; 519 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 520 seg = &vm_phys_fictitious_segs[segind]; 521 if (pa >= seg->start && pa < seg->end) { 522 m = &seg->first_page[atop(pa - seg->start)]; 523 KASSERT((m->flags & PG_FICTITIOUS) != 0, 524 ("%p not fictitious", m)); 525 break; 526 } 527 } 528 return (m); 529 } 530 531 int 532 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, 533 vm_memattr_t memattr) 534 { 535 struct vm_phys_fictitious_seg *seg; 536 vm_page_t fp; 537 long i, page_count; 538 int segind; 539 #ifdef VM_PHYSSEG_DENSE 540 long pi; 541 boolean_t malloced; 542 #endif 543 544 page_count = (end - start) / PAGE_SIZE; 545 546 #ifdef VM_PHYSSEG_DENSE 547 pi = atop(start); 548 if (pi >= first_page && atop(end) < vm_page_array_size) { 549 fp = &vm_page_array[pi - first_page]; 550 malloced = FALSE; 551 } else 552 #endif 553 { 554 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES, 555 M_WAITOK | M_ZERO); 556 #ifdef VM_PHYSSEG_DENSE 557 malloced = TRUE; 558 #endif 559 } 560 for (i = 0; i < page_count; i++) { 561 vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr); 562 fp[i].oflags &= ~(VPO_BUSY | VPO_UNMANAGED); 563 } 564 mtx_lock(&vm_phys_fictitious_reg_mtx); 565 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 566 seg = &vm_phys_fictitious_segs[segind]; 567 if (seg->start == 0 && seg->end == 0) { 568 seg->start = start; 569 seg->end = end; 570 seg->first_page = fp; 571 mtx_unlock(&vm_phys_fictitious_reg_mtx); 572 return (0); 573 } 574 } 575 mtx_unlock(&vm_phys_fictitious_reg_mtx); 576 #ifdef VM_PHYSSEG_DENSE 577 if (malloced) 578 #endif 579 free(fp, M_FICT_PAGES); 580 return (EBUSY); 581 } 582 583 void 584 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end) 585 { 586 struct vm_phys_fictitious_seg *seg; 587 vm_page_t fp; 588 int segind; 589 #ifdef VM_PHYSSEG_DENSE 590 long pi; 591 #endif 592 593 #ifdef VM_PHYSSEG_DENSE 594 pi = atop(start); 595 #endif 596 597 mtx_lock(&vm_phys_fictitious_reg_mtx); 598 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 599 seg = &vm_phys_fictitious_segs[segind]; 600 if (seg->start == start && seg->end == end) { 601 seg->start = seg->end = 0; 602 fp = seg->first_page; 603 seg->first_page = NULL; 604 mtx_unlock(&vm_phys_fictitious_reg_mtx); 605 #ifdef VM_PHYSSEG_DENSE 606 if (pi < first_page || atop(end) >= vm_page_array_size) 607 #endif 608 free(fp, M_FICT_PAGES); 609 return; 610 } 611 } 612 mtx_unlock(&vm_phys_fictitious_reg_mtx); 613 KASSERT(0, ("Unregistering not registered fictitious range")); 614 } 615 616 /* 617 * Find the segment containing the given physical address. 618 */ 619 static int 620 vm_phys_paddr_to_segind(vm_paddr_t pa) 621 { 622 struct vm_phys_seg *seg; 623 int segind; 624 625 for (segind = 0; segind < vm_phys_nsegs; segind++) { 626 seg = &vm_phys_segs[segind]; 627 if (pa >= seg->start && pa < seg->end) 628 return (segind); 629 } 630 panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" , 631 (uintmax_t)pa); 632 } 633 634 /* 635 * Free a contiguous, power of two-sized set of physical pages. 636 * 637 * The free page queues must be locked. 638 */ 639 void 640 vm_phys_free_pages(vm_page_t m, int order) 641 { 642 struct vm_freelist *fl; 643 struct vm_phys_seg *seg; 644 vm_paddr_t pa; 645 vm_page_t m_buddy; 646 647 KASSERT(m->order == VM_NFREEORDER, 648 ("vm_phys_free_pages: page %p has unexpected order %d", 649 m, m->order)); 650 KASSERT(m->pool < VM_NFREEPOOL, 651 ("vm_phys_free_pages: page %p has unexpected pool %d", 652 m, m->pool)); 653 KASSERT(order < VM_NFREEORDER, 654 ("vm_phys_free_pages: order %d is out of range", order)); 655 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 656 seg = &vm_phys_segs[m->segind]; 657 if (order < VM_NFREEORDER - 1) { 658 pa = VM_PAGE_TO_PHYS(m); 659 do { 660 pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order)); 661 if (pa < seg->start || pa >= seg->end) 662 break; 663 m_buddy = &seg->first_page[atop(pa - seg->start)]; 664 if (m_buddy->order != order) 665 break; 666 fl = (*seg->free_queues)[m_buddy->pool]; 667 vm_freelist_rem(fl, m_buddy, order); 668 if (m_buddy->pool != m->pool) 669 vm_phys_set_pool(m->pool, m_buddy, order); 670 order++; 671 pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1); 672 m = &seg->first_page[atop(pa - seg->start)]; 673 } while (order < VM_NFREEORDER - 1); 674 } 675 fl = (*seg->free_queues)[m->pool]; 676 vm_freelist_add(fl, m, order, 1); 677 } 678 679 /* 680 * Free a contiguous, arbitrarily sized set of physical pages. 681 * 682 * The free page queues must be locked. 683 */ 684 void 685 vm_phys_free_contig(vm_page_t m, u_long npages) 686 { 687 u_int n; 688 int order; 689 690 /* 691 * Avoid unnecessary coalescing by freeing the pages in the largest 692 * possible power-of-two-sized subsets. 693 */ 694 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 695 for (;; npages -= n) { 696 /* 697 * Unsigned "min" is used here so that "order" is assigned 698 * "VM_NFREEORDER - 1" when "m"'s physical address is zero 699 * or the low-order bits of its physical address are zero 700 * because the size of a physical address exceeds the size of 701 * a long. 702 */ 703 order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1, 704 VM_NFREEORDER - 1); 705 n = 1 << order; 706 if (npages < n) 707 break; 708 vm_phys_free_pages(m, order); 709 m += n; 710 } 711 /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */ 712 for (; npages > 0; npages -= n) { 713 order = flsl(npages) - 1; 714 n = 1 << order; 715 vm_phys_free_pages(m, order); 716 m += n; 717 } 718 } 719 720 /* 721 * Set the pool for a contiguous, power of two-sized set of physical pages. 722 */ 723 void 724 vm_phys_set_pool(int pool, vm_page_t m, int order) 725 { 726 vm_page_t m_tmp; 727 728 for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) 729 m_tmp->pool = pool; 730 } 731 732 /* 733 * Search for the given physical page "m" in the free lists. If the search 734 * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return 735 * FALSE, indicating that "m" is not in the free lists. 736 * 737 * The free page queues must be locked. 738 */ 739 boolean_t 740 vm_phys_unfree_page(vm_page_t m) 741 { 742 struct vm_freelist *fl; 743 struct vm_phys_seg *seg; 744 vm_paddr_t pa, pa_half; 745 vm_page_t m_set, m_tmp; 746 int order; 747 748 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 749 750 /* 751 * First, find the contiguous, power of two-sized set of free 752 * physical pages containing the given physical page "m" and 753 * assign it to "m_set". 754 */ 755 seg = &vm_phys_segs[m->segind]; 756 for (m_set = m, order = 0; m_set->order == VM_NFREEORDER && 757 order < VM_NFREEORDER - 1; ) { 758 order++; 759 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order)); 760 if (pa >= seg->start) 761 m_set = &seg->first_page[atop(pa - seg->start)]; 762 else 763 return (FALSE); 764 } 765 if (m_set->order < order) 766 return (FALSE); 767 if (m_set->order == VM_NFREEORDER) 768 return (FALSE); 769 KASSERT(m_set->order < VM_NFREEORDER, 770 ("vm_phys_unfree_page: page %p has unexpected order %d", 771 m_set, m_set->order)); 772 773 /* 774 * Next, remove "m_set" from the free lists. Finally, extract 775 * "m" from "m_set" using an iterative algorithm: While "m_set" 776 * is larger than a page, shrink "m_set" by returning the half 777 * of "m_set" that does not contain "m" to the free lists. 778 */ 779 fl = (*seg->free_queues)[m_set->pool]; 780 order = m_set->order; 781 vm_freelist_rem(fl, m_set, order); 782 while (order > 0) { 783 order--; 784 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order)); 785 if (m->phys_addr < pa_half) 786 m_tmp = &seg->first_page[atop(pa_half - seg->start)]; 787 else { 788 m_tmp = m_set; 789 m_set = &seg->first_page[atop(pa_half - seg->start)]; 790 } 791 vm_freelist_add(fl, m_tmp, order, 0); 792 } 793 KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency")); 794 return (TRUE); 795 } 796 797 /* 798 * Try to zero one physical page. Used by an idle priority thread. 799 */ 800 boolean_t 801 vm_phys_zero_pages_idle(void) 802 { 803 static struct vm_freelist *fl; 804 static int flind, oind, pind; 805 vm_page_t m, m_tmp; 806 int domain; 807 808 domain = vm_rr_selectdomain(); 809 fl = vm_phys_free_queues[domain][0][0]; 810 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 811 for (;;) { 812 TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, pageq) { 813 for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) { 814 if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) { 815 vm_phys_unfree_page(m_tmp); 816 cnt.v_free_count--; 817 mtx_unlock(&vm_page_queue_free_mtx); 818 pmap_zero_page_idle(m_tmp); 819 m_tmp->flags |= PG_ZERO; 820 mtx_lock(&vm_page_queue_free_mtx); 821 cnt.v_free_count++; 822 vm_phys_free_pages(m_tmp, 0); 823 vm_page_zero_count++; 824 cnt_prezero++; 825 return (TRUE); 826 } 827 } 828 } 829 oind++; 830 if (oind == VM_NFREEORDER) { 831 oind = 0; 832 pind++; 833 if (pind == VM_NFREEPOOL) { 834 pind = 0; 835 flind++; 836 if (flind == vm_nfreelists) 837 flind = 0; 838 } 839 fl = vm_phys_free_queues[domain][flind][pind]; 840 } 841 } 842 } 843 844 /* 845 * Allocate a contiguous set of physical pages of the given size 846 * "npages" from the free lists. All of the physical pages must be at 847 * or above the given physical address "low" and below the given 848 * physical address "high". The given value "alignment" determines the 849 * alignment of the first physical page in the set. If the given value 850 * "boundary" is non-zero, then the set of physical pages cannot cross 851 * any physical address boundary that is a multiple of that value. Both 852 * "alignment" and "boundary" must be a power of two. 853 */ 854 vm_page_t 855 vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high, 856 u_long alignment, vm_paddr_t boundary) 857 { 858 struct vm_freelist *fl; 859 struct vm_phys_seg *seg; 860 vm_paddr_t pa, pa_last, size; 861 vm_page_t m, m_ret; 862 u_long npages_end; 863 int dom, domain, flind, oind, order, pind; 864 865 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 866 size = npages << PAGE_SHIFT; 867 KASSERT(size != 0, 868 ("vm_phys_alloc_contig: size must not be 0")); 869 KASSERT((alignment & (alignment - 1)) == 0, 870 ("vm_phys_alloc_contig: alignment must be a power of 2")); 871 KASSERT((boundary & (boundary - 1)) == 0, 872 ("vm_phys_alloc_contig: boundary must be a power of 2")); 873 /* Compute the queue that is the best fit for npages. */ 874 for (order = 0; (1 << order) < npages; order++); 875 dom = 0; 876 restartdom: 877 domain = vm_rr_selectdomain(); 878 for (flind = 0; flind < vm_nfreelists; flind++) { 879 for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) { 880 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 881 fl = &vm_phys_free_queues[domain][flind][pind][0]; 882 TAILQ_FOREACH(m_ret, &fl[oind].pl, pageq) { 883 /* 884 * A free list may contain physical pages 885 * from one or more segments. 886 */ 887 seg = &vm_phys_segs[m_ret->segind]; 888 if (seg->start > high || 889 low >= seg->end) 890 continue; 891 892 /* 893 * Is the size of this allocation request 894 * larger than the largest block size? 895 */ 896 if (order >= VM_NFREEORDER) { 897 /* 898 * Determine if a sufficient number 899 * of subsequent blocks to satisfy 900 * the allocation request are free. 901 */ 902 pa = VM_PAGE_TO_PHYS(m_ret); 903 pa_last = pa + size; 904 for (;;) { 905 pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1); 906 if (pa >= pa_last) 907 break; 908 if (pa < seg->start || 909 pa >= seg->end) 910 break; 911 m = &seg->first_page[atop(pa - seg->start)]; 912 if (m->order != VM_NFREEORDER - 1) 913 break; 914 } 915 /* If not, continue to the next block. */ 916 if (pa < pa_last) 917 continue; 918 } 919 920 /* 921 * Determine if the blocks are within the given range, 922 * satisfy the given alignment, and do not cross the 923 * given boundary. 924 */ 925 pa = VM_PAGE_TO_PHYS(m_ret); 926 if (pa >= low && 927 pa + size <= high && 928 (pa & (alignment - 1)) == 0 && 929 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) == 0) 930 goto done; 931 } 932 } 933 } 934 } 935 if (++dom < vm_ndomains) 936 goto restartdom; 937 return (NULL); 938 done: 939 for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) { 940 fl = (*seg->free_queues)[m->pool]; 941 vm_freelist_rem(fl, m, m->order); 942 } 943 if (m_ret->pool != VM_FREEPOOL_DEFAULT) 944 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind); 945 fl = (*seg->free_queues)[m_ret->pool]; 946 vm_phys_split_pages(m_ret, oind, fl, order); 947 /* Return excess pages to the free lists. */ 948 npages_end = roundup2(npages, 1 << imin(oind, order)); 949 if (npages < npages_end) 950 vm_phys_free_contig(&m_ret[npages], npages_end - npages); 951 return (m_ret); 952 } 953 954 #ifdef DDB 955 /* 956 * Show the number of physical pages in each of the free lists. 957 */ 958 DB_SHOW_COMMAND(freepages, db_show_freepages) 959 { 960 struct vm_freelist *fl; 961 int flind, oind, pind, dom; 962 963 for (dom = 0; dom < vm_ndomains; dom++) { 964 db_printf("DOMAIN: %d\n", dom); 965 for (flind = 0; flind < vm_nfreelists; flind++) { 966 db_printf("FREE LIST %d:\n" 967 "\n ORDER (SIZE) | NUMBER" 968 "\n ", flind); 969 for (pind = 0; pind < VM_NFREEPOOL; pind++) 970 db_printf(" | POOL %d", pind); 971 db_printf("\n-- "); 972 for (pind = 0; pind < VM_NFREEPOOL; pind++) 973 db_printf("-- -- "); 974 db_printf("--\n"); 975 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 976 db_printf(" %2.2d (%6.6dK)", oind, 977 1 << (PAGE_SHIFT - 10 + oind)); 978 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 979 fl = vm_phys_free_queues[dom][flind][pind]; 980 db_printf(" | %6.6d", fl[oind].lcnt); 981 } 982 db_printf("\n"); 983 } 984 db_printf("\n"); 985 } 986 db_printf("\n"); 987 } 988 } 989 #endif 990