1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 37 * $Id: vm_page.c,v 1.69 1996/10/15 03:16:45 dyson Exp $ 38 */ 39 40 /* 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 45 * 46 * Permission to use, copy, modify and distribute this software and 47 * its documentation is hereby granted, provided that both the copyright 48 * notice and this permission notice appear in all copies of the 49 * software, derivative works or modified versions, and any portions 50 * thereof, and that both notices appear in supporting documentation. 51 * 52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 55 * 56 * Carnegie Mellon requests users of this software to return to 57 * 58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 59 * School of Computer Science 60 * Carnegie Mellon University 61 * Pittsburgh PA 15213-3890 62 * 63 * any improvements or extensions that they make and grant Carnegie the 64 * rights to redistribute these changes. 65 */ 66 67 /* 68 * Resident memory management module. 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/malloc.h> 74 #include <sys/proc.h> 75 #include <sys/vmmeter.h> 76 77 #include <vm/vm.h> 78 #include <vm/vm_param.h> 79 #include <vm/vm_prot.h> 80 #include <vm/lock.h> 81 #include <vm/vm_kern.h> 82 #include <vm/vm_object.h> 83 #include <vm/vm_page.h> 84 #include <vm/vm_map.h> 85 #include <vm/vm_pageout.h> 86 #include <vm/vm_extern.h> 87 88 static void vm_page_queue_init __P((void)); 89 static vm_page_t vm_page_select_free __P((vm_object_t object, 90 vm_pindex_t pindex, int prefqueue)); 91 92 /* 93 * Associated with page of user-allocatable memory is a 94 * page structure. 95 */ 96 97 static struct pglist *vm_page_buckets; /* Array of buckets */ 98 static int vm_page_bucket_count; /* How big is array? */ 99 static int vm_page_hash_mask; /* Mask for hash function */ 100 101 struct pglist vm_page_queue_free[PQ_L2_SIZE]; 102 struct pglist vm_page_queue_zero[PQ_L2_SIZE]; 103 struct pglist vm_page_queue_active; 104 struct pglist vm_page_queue_inactive; 105 struct pglist vm_page_queue_cache[PQ_L2_SIZE]; 106 107 int no_queue; 108 109 struct vpgqueues vm_page_queues[PQ_COUNT]; 110 int pqcnt[PQ_COUNT]; 111 112 static void 113 vm_page_queue_init(void) { 114 int i; 115 116 vm_page_queues[PQ_NONE].pl = NULL; 117 vm_page_queues[PQ_NONE].cnt = &no_queue; 118 for(i=0;i<PQ_L2_SIZE;i++) { 119 vm_page_queues[PQ_FREE+i].pl = &vm_page_queue_free[i]; 120 vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count; 121 } 122 for(i=0;i<PQ_L2_SIZE;i++) { 123 vm_page_queues[PQ_ZERO+i].pl = &vm_page_queue_zero[i]; 124 vm_page_queues[PQ_ZERO+i].cnt = &cnt.v_free_count; 125 } 126 vm_page_queues[PQ_INACTIVE].pl = &vm_page_queue_inactive; 127 vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count; 128 129 vm_page_queues[PQ_ACTIVE].pl = &vm_page_queue_active; 130 vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count; 131 for(i=0;i<PQ_L2_SIZE;i++) { 132 vm_page_queues[PQ_CACHE+i].pl = &vm_page_queue_cache[i]; 133 vm_page_queues[PQ_CACHE+i].cnt = &cnt.v_cache_count; 134 } 135 for(i=0;i<PQ_COUNT;i++) { 136 if (vm_page_queues[i].pl) { 137 TAILQ_INIT(vm_page_queues[i].pl); 138 } else if (i != 0) { 139 panic("vm_page_queue_init: queue %d is null", i); 140 } 141 vm_page_queues[i].lcnt = &pqcnt[i]; 142 } 143 } 144 145 vm_page_t vm_page_array; 146 int vm_page_array_size; 147 long first_page; 148 static long last_page; 149 static vm_size_t page_mask; 150 static int page_shift; 151 int vm_page_zero_count; 152 153 /* 154 * map of contiguous valid DEV_BSIZE chunks in a page 155 * (this list is valid for page sizes upto 16*DEV_BSIZE) 156 */ 157 static u_short vm_page_dev_bsize_chunks[] = { 158 0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 159 0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff 160 }; 161 162 static inline int vm_page_hash __P((vm_object_t object, vm_pindex_t pindex)); 163 static int vm_page_freechk_and_unqueue __P((vm_page_t m)); 164 static void vm_page_free_wakeup __P((void)); 165 166 /* 167 * vm_set_page_size: 168 * 169 * Sets the page size, perhaps based upon the memory 170 * size. Must be called before any use of page-size 171 * dependent functions. 172 * 173 * Sets page_shift and page_mask from cnt.v_page_size. 174 */ 175 void 176 vm_set_page_size() 177 { 178 179 if (cnt.v_page_size == 0) 180 cnt.v_page_size = DEFAULT_PAGE_SIZE; 181 page_mask = cnt.v_page_size - 1; 182 if ((page_mask & cnt.v_page_size) != 0) 183 panic("vm_set_page_size: page size not a power of two"); 184 for (page_shift = 0;; page_shift++) 185 if ((1 << page_shift) == cnt.v_page_size) 186 break; 187 } 188 189 /* 190 * vm_page_startup: 191 * 192 * Initializes the resident memory module. 193 * 194 * Allocates memory for the page cells, and 195 * for the object/offset-to-page hash table headers. 196 * Each page cell is initialized and placed on the free list. 197 */ 198 199 vm_offset_t 200 vm_page_startup(starta, enda, vaddr) 201 register vm_offset_t starta; 202 vm_offset_t enda; 203 register vm_offset_t vaddr; 204 { 205 register vm_offset_t mapped; 206 register vm_page_t m; 207 register struct pglist *bucket; 208 vm_size_t npages, page_range; 209 register vm_offset_t new_start; 210 int i; 211 vm_offset_t pa; 212 int nblocks; 213 vm_offset_t first_managed_page; 214 215 /* the biggest memory array is the second group of pages */ 216 vm_offset_t start; 217 vm_offset_t biggestone, biggestsize; 218 219 vm_offset_t total; 220 221 total = 0; 222 biggestsize = 0; 223 biggestone = 0; 224 nblocks = 0; 225 vaddr = round_page(vaddr); 226 227 for (i = 0; phys_avail[i + 1]; i += 2) { 228 phys_avail[i] = round_page(phys_avail[i]); 229 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 230 } 231 232 for (i = 0; phys_avail[i + 1]; i += 2) { 233 int size = phys_avail[i + 1] - phys_avail[i]; 234 235 if (size > biggestsize) { 236 biggestone = i; 237 biggestsize = size; 238 } 239 ++nblocks; 240 total += size; 241 } 242 243 start = phys_avail[biggestone]; 244 245 /* 246 * Initialize the queue headers for the free queue, the active queue 247 * and the inactive queue. 248 */ 249 250 vm_page_queue_init(); 251 252 /* 253 * Allocate (and initialize) the hash table buckets. 254 * 255 * The number of buckets MUST BE a power of 2, and the actual value is 256 * the next power of 2 greater than the number of physical pages in 257 * the system. 258 * 259 * Note: This computation can be tweaked if desired. 260 */ 261 vm_page_buckets = (struct pglist *) vaddr; 262 bucket = vm_page_buckets; 263 if (vm_page_bucket_count == 0) { 264 vm_page_bucket_count = 1; 265 while (vm_page_bucket_count < atop(total)) 266 vm_page_bucket_count <<= 1; 267 } 268 vm_page_hash_mask = vm_page_bucket_count - 1; 269 270 /* 271 * Validate these addresses. 272 */ 273 274 new_start = start + vm_page_bucket_count * sizeof(struct pglist); 275 new_start = round_page(new_start); 276 mapped = vaddr; 277 vaddr = pmap_map(mapped, start, new_start, 278 VM_PROT_READ | VM_PROT_WRITE); 279 start = new_start; 280 bzero((caddr_t) mapped, vaddr - mapped); 281 mapped = vaddr; 282 283 for (i = 0; i < vm_page_bucket_count; i++) { 284 TAILQ_INIT(bucket); 285 bucket++; 286 } 287 288 /* 289 * round (or truncate) the addresses to our page size. 290 */ 291 292 /* 293 * Pre-allocate maps and map entries that cannot be dynamically 294 * allocated via malloc(). The maps include the kernel_map and 295 * kmem_map which must be initialized before malloc() will work 296 * (obviously). Also could include pager maps which would be 297 * allocated before kmeminit. 298 * 299 * Allow some kernel map entries... this should be plenty since people 300 * shouldn't be cluttering up the kernel map (they should use their 301 * own maps). 302 */ 303 304 kentry_data_size = MAX_KMAP * sizeof(struct vm_map) + 305 MAX_KMAPENT * sizeof(struct vm_map_entry); 306 kentry_data_size = round_page(kentry_data_size); 307 kentry_data = (vm_offset_t) vaddr; 308 vaddr += kentry_data_size; 309 310 /* 311 * Validate these zone addresses. 312 */ 313 314 new_start = start + (vaddr - mapped); 315 pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE); 316 bzero((caddr_t) mapped, (vaddr - mapped)); 317 start = round_page(new_start); 318 319 /* 320 * Compute the number of pages of memory that will be available for 321 * use (taking into account the overhead of a page structure per 322 * page). 323 */ 324 325 first_page = phys_avail[0] / PAGE_SIZE; 326 last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE; 327 328 page_range = last_page - (phys_avail[0] / PAGE_SIZE); 329 npages = (total - (page_range * sizeof(struct vm_page)) - 330 (start - phys_avail[biggestone])) / PAGE_SIZE; 331 332 /* 333 * Initialize the mem entry structures now, and put them in the free 334 * queue. 335 */ 336 337 vm_page_array = (vm_page_t) vaddr; 338 mapped = vaddr; 339 340 /* 341 * Validate these addresses. 342 */ 343 344 new_start = round_page(start + page_range * sizeof(struct vm_page)); 345 mapped = pmap_map(mapped, start, new_start, 346 VM_PROT_READ | VM_PROT_WRITE); 347 start = new_start; 348 349 first_managed_page = start / PAGE_SIZE; 350 351 /* 352 * Clear all of the page structures 353 */ 354 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 355 vm_page_array_size = page_range; 356 357 cnt.v_page_count = 0; 358 cnt.v_free_count = 0; 359 for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) { 360 if (i == biggestone) 361 pa = ptoa(first_managed_page); 362 else 363 pa = phys_avail[i]; 364 while (pa < phys_avail[i + 1] && npages-- > 0) { 365 ++cnt.v_page_count; 366 ++cnt.v_free_count; 367 m = PHYS_TO_VM_PAGE(pa); 368 m->phys_addr = pa; 369 m->flags = 0; 370 m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; 371 m->queue = PQ_FREE + m->pc; 372 TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq); 373 ++(*vm_page_queues[m->queue].lcnt); 374 pa += PAGE_SIZE; 375 } 376 } 377 378 return (mapped); 379 } 380 381 /* 382 * vm_page_hash: 383 * 384 * Distributes the object/offset key pair among hash buckets. 385 * 386 * NOTE: This macro depends on vm_page_bucket_count being a power of 2. 387 */ 388 static inline int 389 vm_page_hash(object, pindex) 390 vm_object_t object; 391 vm_pindex_t pindex; 392 { 393 return ((((unsigned) object) >> 5) + (pindex >> 1)) & vm_page_hash_mask; 394 } 395 396 /* 397 * vm_page_insert: [ internal use only ] 398 * 399 * Inserts the given mem entry into the object/object-page 400 * table and object list. 401 * 402 * The object and page must be locked, and must be splhigh. 403 */ 404 405 void 406 vm_page_insert(m, object, pindex) 407 register vm_page_t m; 408 register vm_object_t object; 409 register vm_pindex_t pindex; 410 { 411 register struct pglist *bucket; 412 413 if (m->flags & PG_TABLED) 414 panic("vm_page_insert: already inserted"); 415 416 /* 417 * Record the object/offset pair in this page 418 */ 419 420 m->object = object; 421 m->pindex = pindex; 422 423 /* 424 * Insert it into the object_object/offset hash table 425 */ 426 427 bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; 428 TAILQ_INSERT_TAIL(bucket, m, hashq); 429 430 /* 431 * Now link into the object's list of backed pages. 432 */ 433 434 TAILQ_INSERT_TAIL(&object->memq, m, listq); 435 m->flags |= PG_TABLED; 436 m->object->page_hint = m; 437 438 /* 439 * And show that the object has one more resident page. 440 */ 441 442 object->resident_page_count++; 443 } 444 445 /* 446 * vm_page_remove: [ internal use only ] 447 * NOTE: used by device pager as well -wfj 448 * 449 * Removes the given mem entry from the object/offset-page 450 * table and the object page list. 451 * 452 * The object and page must be locked, and at splhigh. 453 */ 454 455 void 456 vm_page_remove(m) 457 register vm_page_t m; 458 { 459 register struct pglist *bucket; 460 461 if (!(m->flags & PG_TABLED)) 462 return; 463 464 if (m->object->page_hint == m) 465 m->object->page_hint = NULL; 466 467 /* 468 * Remove from the object_object/offset hash table 469 */ 470 471 bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)]; 472 TAILQ_REMOVE(bucket, m, hashq); 473 474 /* 475 * Now remove from the object's list of backed pages. 476 */ 477 478 TAILQ_REMOVE(&m->object->memq, m, listq); 479 480 /* 481 * And show that the object has one fewer resident page. 482 */ 483 484 m->object->resident_page_count--; 485 486 m->flags &= ~PG_TABLED; 487 } 488 489 /* 490 * vm_page_lookup: 491 * 492 * Returns the page associated with the object/offset 493 * pair specified; if none is found, NULL is returned. 494 * 495 * The object must be locked. No side effects. 496 */ 497 498 vm_page_t 499 vm_page_lookup(object, pindex) 500 register vm_object_t object; 501 register vm_pindex_t pindex; 502 { 503 register vm_page_t m; 504 register struct pglist *bucket; 505 int s; 506 507 /* 508 * Search the hash table for this object/offset pair 509 */ 510 511 bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; 512 513 s = splvm(); 514 for (m = TAILQ_FIRST(bucket); m != NULL; m = TAILQ_NEXT(m,hashq)) { 515 if ((m->object == object) && (m->pindex == pindex)) { 516 splx(s); 517 m->object->page_hint = m; 518 return (m); 519 } 520 } 521 splx(s); 522 return (NULL); 523 } 524 525 /* 526 * vm_page_rename: 527 * 528 * Move the given memory entry from its 529 * current object to the specified target object/offset. 530 * 531 * The object must be locked. 532 */ 533 void 534 vm_page_rename(m, new_object, new_pindex) 535 register vm_page_t m; 536 register vm_object_t new_object; 537 vm_pindex_t new_pindex; 538 { 539 int s; 540 541 s = splvm(); 542 vm_page_remove(m); 543 vm_page_insert(m, new_object, new_pindex); 544 splx(s); 545 } 546 547 /* 548 * vm_page_unqueue without any wakeup 549 */ 550 void 551 vm_page_unqueue_nowakeup(m) 552 vm_page_t m; 553 { 554 int queue = m->queue; 555 struct vpgqueues *pq; 556 if (queue != PQ_NONE) { 557 pq = &vm_page_queues[queue]; 558 m->queue = PQ_NONE; 559 TAILQ_REMOVE(pq->pl, m, pageq); 560 --(*pq->cnt); 561 --(*pq->lcnt); 562 } 563 } 564 565 /* 566 * vm_page_unqueue must be called at splhigh(); 567 */ 568 void 569 vm_page_unqueue(m) 570 vm_page_t m; 571 { 572 int queue = m->queue; 573 struct vpgqueues *pq; 574 if (queue != PQ_NONE) { 575 m->queue = PQ_NONE; 576 pq = &vm_page_queues[queue]; 577 TAILQ_REMOVE(pq->pl, m, pageq); 578 --(*pq->cnt); 579 --(*pq->lcnt); 580 if ((m->queue - m->pc) == PQ_CACHE) { 581 if ((cnt.v_cache_count + cnt.v_free_count) < 582 (cnt.v_free_reserved + cnt.v_cache_min)) 583 pagedaemon_wakeup(); 584 } 585 } 586 } 587 588 /* 589 * Find a page on the specified queue with color optimization. 590 */ 591 vm_page_t 592 vm_page_list_find(basequeue, index) 593 int basequeue, index; 594 { 595 #if PQ_L2_SIZE > 1 596 597 int i,j; 598 vm_page_t m; 599 int hindex; 600 601 for(j = 0; j < PQ_L1_SIZE; j++) { 602 for(i = (PQ_L2_SIZE/2) - (PQ_L1_SIZE - 1); 603 i >= 0; 604 i -= PQ_L1_SIZE) { 605 hindex = (index + (i+j)) & PQ_L2_MASK; 606 m = TAILQ_FIRST(vm_page_queues[basequeue + hindex].pl); 607 if (m) 608 return m; 609 610 hindex = (index - (i+j)) & PQ_L2_MASK; 611 m = TAILQ_FIRST(vm_page_queues[basequeue + hindex].pl); 612 if (m) 613 return m; 614 } 615 } 616 return NULL; 617 #else 618 return TAILQ_FIRST(vm_page_queues[basequeue].pl); 619 #endif 620 621 } 622 623 /* 624 * Find a page on the specified queue with color optimization. 625 */ 626 vm_page_t 627 vm_page_select(object, pindex, basequeue) 628 vm_object_t object; 629 vm_pindex_t pindex; 630 int basequeue; 631 { 632 633 #if PQ_L2_SIZE > 1 634 int index; 635 index = (pindex + object->pg_color) & PQ_L2_MASK; 636 return vm_page_list_find(basequeue, index); 637 638 #else 639 return TAILQ_FIRST(vm_page_queues[basequeue].pl); 640 #endif 641 642 } 643 644 /* 645 * Find a free or zero page, with specified preference. 646 */ 647 static vm_page_t 648 vm_page_select_free(object, pindex, prefqueue) 649 vm_object_t object; 650 vm_pindex_t pindex; 651 int prefqueue; 652 { 653 #if PQ_L2_SIZE > 1 654 int i,j; 655 int index, hindex; 656 #endif 657 vm_page_t m; 658 int oqueuediff; 659 660 if (prefqueue == PQ_ZERO) 661 oqueuediff = PQ_FREE - PQ_ZERO; 662 else 663 oqueuediff = PQ_ZERO - PQ_FREE; 664 665 if (object->page_hint) { 666 if (object->page_hint->pindex == (pindex - 1)) { 667 vm_offset_t last_phys; 668 if ((object->page_hint->flags & PG_FICTITIOUS) == 0) { 669 if ((object->page_hint < &vm_page_array[cnt.v_page_count-1]) && 670 (object->page_hint >= &vm_page_array[0])) { 671 int queue; 672 last_phys = VM_PAGE_TO_PHYS(object->page_hint); 673 m = PHYS_TO_VM_PAGE(last_phys + PAGE_SIZE); 674 queue = m->queue - m->pc; 675 if (queue == PQ_FREE || queue == PQ_ZERO) { 676 return m; 677 } 678 } 679 } 680 } 681 } 682 683 684 #if PQ_L2_SIZE > 1 685 686 index = pindex + object->pg_color; 687 for(j = 0; j < PQ_L1_SIZE; j++) { 688 for(i = (PQ_L2_SIZE/2) - (PQ_L1_SIZE - 1); 689 (i + j) >= 0; 690 i -= PQ_L1_SIZE) { 691 692 hindex = prefqueue + ((index + (i+j)) & PQ_L2_MASK); 693 if (m = TAILQ_FIRST(vm_page_queues[hindex].pl)) 694 return m; 695 if (m = TAILQ_FIRST(vm_page_queues[hindex + oqueuediff].pl)) 696 return m; 697 698 hindex = prefqueue + ((index - (i+j)) & PQ_L2_MASK); 699 if (m = TAILQ_FIRST(vm_page_queues[hindex].pl)) 700 return m; 701 if (m = TAILQ_FIRST(vm_page_queues[hindex + oqueuediff].pl)) 702 return m; 703 } 704 } 705 #else 706 if (m = TAILQ_FIRST(vm_page_queues[prefqueue].pl)) 707 return m; 708 else 709 return TAILQ_FIRST(vm_page_queues[prefqueue + oqueuediff].pl); 710 #endif 711 712 return NULL; 713 } 714 715 /* 716 * vm_page_alloc: 717 * 718 * Allocate and return a memory cell associated 719 * with this VM object/offset pair. 720 * 721 * page_req classes: 722 * VM_ALLOC_NORMAL normal process request 723 * VM_ALLOC_SYSTEM system *really* needs a page 724 * VM_ALLOC_INTERRUPT interrupt time request 725 * VM_ALLOC_ZERO zero page 726 * 727 * Object must be locked. 728 */ 729 vm_page_t 730 vm_page_alloc(object, pindex, page_req) 731 vm_object_t object; 732 vm_pindex_t pindex; 733 int page_req; 734 { 735 register vm_page_t m; 736 struct vpgqueues *pq; 737 int queue; 738 int s; 739 740 #ifdef DIAGNOSTIC 741 m = vm_page_lookup(object, pindex); 742 if (m) 743 panic("vm_page_alloc: page already allocated"); 744 #endif 745 746 if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) { 747 page_req = VM_ALLOC_SYSTEM; 748 }; 749 750 s = splvm(); 751 752 switch (page_req) { 753 754 case VM_ALLOC_NORMAL: 755 if (cnt.v_free_count >= cnt.v_free_reserved) { 756 m = vm_page_select_free(object, pindex, PQ_FREE); 757 #if defined(DIAGNOSTIC) 758 if (m == NULL) 759 panic("vm_page_alloc(NORMAL): missing page on free queue\n"); 760 #endif 761 } else { 762 m = vm_page_select(object, pindex, PQ_CACHE); 763 if (m == NULL) { 764 splx(s); 765 #if defined(DIAGNOSTIC) 766 if (cnt.v_cache_count > 0) 767 printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count); 768 #endif 769 pagedaemon_wakeup(); 770 return (NULL); 771 } 772 } 773 break; 774 775 case VM_ALLOC_ZERO: 776 if (cnt.v_free_count >= cnt.v_free_reserved) { 777 m = vm_page_select_free(object, pindex, PQ_ZERO); 778 #if defined(DIAGNOSTIC) 779 if (m == NULL) 780 panic("vm_page_alloc(ZERO): missing page on free queue\n"); 781 #endif 782 } else { 783 m = vm_page_select(object, pindex, PQ_CACHE); 784 if (m == NULL) { 785 splx(s); 786 #if defined(DIAGNOSTIC) 787 if (cnt.v_cache_count > 0) 788 printf("vm_page_alloc(ZERO): missing pages on cache queue: %d\n", cnt.v_cache_count); 789 #endif 790 pagedaemon_wakeup(); 791 return (NULL); 792 } 793 } 794 break; 795 796 case VM_ALLOC_SYSTEM: 797 if ((cnt.v_free_count >= cnt.v_free_reserved) || 798 ((cnt.v_cache_count == 0) && 799 (cnt.v_free_count >= cnt.v_interrupt_free_min))) { 800 m = vm_page_select_free(object, pindex, PQ_FREE); 801 #if defined(DIAGNOSTIC) 802 if (m == NULL) 803 panic("vm_page_alloc(SYSTEM): missing page on free queue\n"); 804 #endif 805 } else { 806 m = vm_page_select(object, pindex, PQ_CACHE); 807 if (m == NULL) { 808 splx(s); 809 #if defined(DIAGNOSTIC) 810 if (cnt.v_cache_count > 0) 811 printf("vm_page_alloc(SYSTEM): missing pages on cache queue: %d\n", cnt.v_cache_count); 812 #endif 813 pagedaemon_wakeup(); 814 return (NULL); 815 } 816 } 817 break; 818 819 case VM_ALLOC_INTERRUPT: 820 if (cnt.v_free_count > 0) { 821 m = vm_page_select_free(object, pindex, PQ_FREE); 822 #if defined(DIAGNOSTIC) 823 if (m == NULL) 824 panic("vm_page_alloc(INTERRUPT): missing page on free queue\n"); 825 #endif 826 } else { 827 splx(s); 828 pagedaemon_wakeup(); 829 return (NULL); 830 } 831 break; 832 833 default: 834 panic("vm_page_alloc: invalid allocation class"); 835 } 836 837 queue = m->queue; 838 if (queue == PQ_ZERO) 839 --vm_page_zero_count; 840 pq = &vm_page_queues[queue]; 841 TAILQ_REMOVE(pq->pl, m, pageq); 842 --(*pq->cnt); 843 --(*pq->lcnt); 844 if ((m->queue - m->pc) == PQ_ZERO) { 845 m->flags = PG_ZERO|PG_BUSY; 846 } else if ((m->queue - m->pc) == PQ_CACHE) { 847 vm_page_remove(m); 848 m->flags = PG_BUSY; 849 } else { 850 m->flags = PG_BUSY; 851 } 852 m->wire_count = 0; 853 m->hold_count = 0; 854 m->act_count = 0; 855 m->busy = 0; 856 m->valid = 0; 857 m->dirty = 0; 858 m->queue = PQ_NONE; 859 860 /* XXX before splx until vm_page_insert is safe */ 861 vm_page_insert(m, object, pindex); 862 863 splx(s); 864 865 /* 866 * Don't wakeup too often - wakeup the pageout daemon when 867 * we would be nearly out of memory. 868 */ 869 if (((cnt.v_free_count + cnt.v_cache_count) < 870 (cnt.v_free_reserved + cnt.v_cache_min)) || 871 (cnt.v_free_count < cnt.v_pageout_free_min)) 872 pagedaemon_wakeup(); 873 874 return (m); 875 } 876 877 /* 878 * vm_page_activate: 879 * 880 * Put the specified page on the active list (if appropriate). 881 * 882 * The page queues must be locked. 883 */ 884 void 885 vm_page_activate(m) 886 register vm_page_t m; 887 { 888 int s; 889 890 s = splvm(); 891 if (m->queue == PQ_ACTIVE) 892 panic("vm_page_activate: already active"); 893 894 if ((m->queue - m->pc) == PQ_CACHE) 895 cnt.v_reactivated++; 896 897 vm_page_unqueue(m); 898 899 if (m->wire_count == 0) { 900 m->queue = PQ_ACTIVE; 901 ++(*vm_page_queues[PQ_ACTIVE].lcnt); 902 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 903 if (m->act_count < ACT_INIT) 904 m->act_count = ACT_INIT; 905 cnt.v_active_count++; 906 } 907 splx(s); 908 } 909 910 /* 911 * helper routine for vm_page_free and vm_page_free_zero 912 */ 913 static int 914 vm_page_freechk_and_unqueue(m) 915 vm_page_t m; 916 { 917 if (m->busy || 918 (m->flags & PG_BUSY) || 919 ((m->queue - m->pc) == PQ_FREE) || 920 (m->hold_count != 0)) { 921 printf("vm_page_free: pindex(%ld), busy(%d), PG_BUSY(%d), hold(%d)\n", 922 m->pindex, m->busy, 923 (m->flags & PG_BUSY) ? 1 : 0, m->hold_count); 924 if ((m->queue - m->pc) == PQ_FREE) 925 panic("vm_page_free: freeing free page"); 926 else 927 panic("vm_page_free: freeing busy page"); 928 } 929 930 vm_page_remove(m); 931 vm_page_unqueue_nowakeup(m); 932 if ((m->flags & PG_FICTITIOUS) != 0) { 933 return 0; 934 } 935 if (m->wire_count != 0) { 936 if (m->wire_count > 1) { 937 panic("vm_page_free: invalid wire count (%d), pindex: 0x%x", 938 m->wire_count, m->pindex); 939 } 940 m->wire_count = 0; 941 cnt.v_wire_count--; 942 } 943 944 return 1; 945 } 946 947 /* 948 * helper routine for vm_page_free and vm_page_free_zero 949 */ 950 static __inline void 951 vm_page_free_wakeup() 952 { 953 954 /* 955 * if pageout daemon needs pages, then tell it that there are 956 * some free. 957 */ 958 if (vm_pageout_pages_needed) { 959 wakeup(&vm_pageout_pages_needed); 960 vm_pageout_pages_needed = 0; 961 } 962 /* 963 * wakeup processes that are waiting on memory if we hit a 964 * high water mark. And wakeup scheduler process if we have 965 * lots of memory. this process will swapin processes. 966 */ 967 if (vm_pages_needed && 968 ((cnt.v_free_count + cnt.v_cache_count) >= cnt.v_free_min)) { 969 wakeup(&cnt.v_free_count); 970 vm_pages_needed = 0; 971 } 972 } 973 974 /* 975 * vm_page_free: 976 * 977 * Returns the given page to the free list, 978 * disassociating it with any VM object. 979 * 980 * Object and page must be locked prior to entry. 981 */ 982 void 983 vm_page_free(m) 984 register vm_page_t m; 985 { 986 int s; 987 struct vpgqueues *pq; 988 989 s = splvm(); 990 991 cnt.v_tfree++; 992 993 if (!vm_page_freechk_and_unqueue(m)) { 994 splx(s); 995 return; 996 } 997 998 m->queue = PQ_FREE + m->pc; 999 pq = &vm_page_queues[m->queue]; 1000 ++(*pq->lcnt); 1001 ++(*pq->cnt); 1002 /* 1003 * If the pageout process is grabbing the page, it is likely 1004 * that the page is NOT in the cache. It is more likely that 1005 * the page will be partially in the cache if it is being 1006 * explicitly freed. 1007 */ 1008 if (curproc == pageproc) { 1009 TAILQ_INSERT_TAIL(pq->pl, m, pageq); 1010 } else { 1011 TAILQ_INSERT_HEAD(pq->pl, m, pageq); 1012 } 1013 vm_page_free_wakeup(); 1014 splx(s); 1015 } 1016 1017 void 1018 vm_page_free_zero(m) 1019 register vm_page_t m; 1020 { 1021 int s; 1022 struct vpgqueues *pq; 1023 1024 s = splvm(); 1025 1026 cnt.v_tfree++; 1027 1028 if (!vm_page_freechk_and_unqueue(m)) { 1029 splx(s); 1030 return; 1031 } 1032 1033 m->queue = PQ_ZERO + m->pc; 1034 pq = &vm_page_queues[m->queue]; 1035 ++(*pq->lcnt); 1036 ++(*pq->cnt); 1037 1038 TAILQ_INSERT_HEAD(pq->pl, m, pageq); 1039 ++vm_page_zero_count; 1040 vm_page_free_wakeup(); 1041 splx(s); 1042 } 1043 1044 /* 1045 * vm_page_wire: 1046 * 1047 * Mark this page as wired down by yet 1048 * another map, removing it from paging queues 1049 * as necessary. 1050 * 1051 * The page queues must be locked. 1052 */ 1053 void 1054 vm_page_wire(m) 1055 register vm_page_t m; 1056 { 1057 int s; 1058 1059 if (m->wire_count == 0) { 1060 s = splvm(); 1061 vm_page_unqueue(m); 1062 splx(s); 1063 cnt.v_wire_count++; 1064 } 1065 ++(*vm_page_queues[PQ_NONE].lcnt); 1066 m->wire_count++; 1067 m->flags |= PG_MAPPED; 1068 } 1069 1070 /* 1071 * vm_page_unwire: 1072 * 1073 * Release one wiring of this page, potentially 1074 * enabling it to be paged again. 1075 * 1076 * The page queues must be locked. 1077 */ 1078 void 1079 vm_page_unwire(m) 1080 register vm_page_t m; 1081 { 1082 int s; 1083 1084 s = splvm(); 1085 1086 if (m->wire_count > 0) 1087 m->wire_count--; 1088 1089 if (m->wire_count == 0) { 1090 cnt.v_wire_count--; 1091 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1092 m->queue = PQ_ACTIVE; 1093 ++(*vm_page_queues[PQ_ACTIVE].lcnt); 1094 cnt.v_active_count++; 1095 } 1096 splx(s); 1097 } 1098 1099 1100 /* 1101 * vm_page_deactivate: 1102 * 1103 * Returns the given page to the inactive list, 1104 * indicating that no physical maps have access 1105 * to this page. [Used by the physical mapping system.] 1106 * 1107 * The page queues must be locked. 1108 */ 1109 void 1110 vm_page_deactivate(m) 1111 register vm_page_t m; 1112 { 1113 int s; 1114 1115 /* 1116 * Only move active pages -- ignore locked or already inactive ones. 1117 * 1118 * XXX: sometimes we get pages which aren't wired down or on any queue - 1119 * we need to put them on the inactive queue also, otherwise we lose 1120 * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93. 1121 */ 1122 if (m->queue == PQ_INACTIVE) 1123 return; 1124 1125 s = splvm(); 1126 if (m->wire_count == 0 && m->hold_count == 0) { 1127 if ((m->queue - m->pc) == PQ_CACHE) 1128 cnt.v_reactivated++; 1129 vm_page_unqueue(m); 1130 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 1131 m->queue = PQ_INACTIVE; 1132 ++(*vm_page_queues[PQ_INACTIVE].lcnt); 1133 cnt.v_inactive_count++; 1134 } 1135 splx(s); 1136 } 1137 1138 /* 1139 * vm_page_cache 1140 * 1141 * Put the specified page onto the page cache queue (if appropriate). 1142 */ 1143 void 1144 vm_page_cache(m) 1145 register vm_page_t m; 1146 { 1147 int s; 1148 1149 if ((m->flags & PG_BUSY) || m->busy || m->wire_count) { 1150 printf("vm_page_cache: attempting to cache busy page\n"); 1151 return; 1152 } 1153 if ((m->queue - m->pc) == PQ_CACHE) 1154 return; 1155 1156 vm_page_protect(m, VM_PROT_NONE); 1157 if (m->dirty != 0) { 1158 panic("vm_page_cache: caching a dirty page, pindex: %d", m->pindex); 1159 } 1160 s = splvm(); 1161 vm_page_unqueue_nowakeup(m); 1162 m->queue = PQ_CACHE + m->pc; 1163 ++(*vm_page_queues[m->queue].lcnt); 1164 TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq); 1165 cnt.v_cache_count++; 1166 vm_page_free_wakeup(); 1167 splx(s); 1168 } 1169 1170 1171 /* 1172 * mapping function for valid bits or for dirty bits in 1173 * a page 1174 */ 1175 inline int 1176 vm_page_bits(int base, int size) 1177 { 1178 u_short chunk; 1179 1180 if ((base == 0) && (size >= PAGE_SIZE)) 1181 return VM_PAGE_BITS_ALL; 1182 size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1183 base = (base % PAGE_SIZE) / DEV_BSIZE; 1184 chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE]; 1185 return (chunk << base) & VM_PAGE_BITS_ALL; 1186 } 1187 1188 /* 1189 * set a page valid and clean 1190 */ 1191 void 1192 vm_page_set_validclean(m, base, size) 1193 vm_page_t m; 1194 int base; 1195 int size; 1196 { 1197 int pagebits = vm_page_bits(base, size); 1198 m->valid |= pagebits; 1199 m->dirty &= ~pagebits; 1200 if( base == 0 && size == PAGE_SIZE) 1201 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 1202 } 1203 1204 /* 1205 * set a page (partially) invalid 1206 */ 1207 void 1208 vm_page_set_invalid(m, base, size) 1209 vm_page_t m; 1210 int base; 1211 int size; 1212 { 1213 int bits; 1214 1215 m->valid &= ~(bits = vm_page_bits(base, size)); 1216 if (m->valid == 0) 1217 m->dirty &= ~bits; 1218 } 1219 1220 /* 1221 * is (partial) page valid? 1222 */ 1223 int 1224 vm_page_is_valid(m, base, size) 1225 vm_page_t m; 1226 int base; 1227 int size; 1228 { 1229 int bits = vm_page_bits(base, size); 1230 1231 if (m->valid && ((m->valid & bits) == bits)) 1232 return 1; 1233 else 1234 return 0; 1235 } 1236 1237 void 1238 vm_page_test_dirty(m) 1239 vm_page_t m; 1240 { 1241 if ((m->dirty != VM_PAGE_BITS_ALL) && 1242 pmap_is_modified(VM_PAGE_TO_PHYS(m))) { 1243 m->dirty = VM_PAGE_BITS_ALL; 1244 } 1245 } 1246 1247 /* 1248 * This interface is for merging with malloc() someday. 1249 * Even if we never implement compaction so that contiguous allocation 1250 * works after initialization time, malloc()'s data structures are good 1251 * for statistics and for allocations of less than a page. 1252 */ 1253 void * 1254 contigmalloc(size, type, flags, low, high, alignment, boundary) 1255 unsigned long size; /* should be size_t here and for malloc() */ 1256 int type; 1257 int flags; 1258 unsigned long low; 1259 unsigned long high; 1260 unsigned long alignment; 1261 unsigned long boundary; 1262 { 1263 int i, s, start; 1264 vm_offset_t addr, phys, tmp_addr; 1265 int pass; 1266 vm_page_t pga = vm_page_array; 1267 1268 size = round_page(size); 1269 if (size == 0) 1270 panic("vm_page_alloc_contig: size must not be 0"); 1271 if ((alignment & (alignment - 1)) != 0) 1272 panic("vm_page_alloc_contig: alignment must be a power of 2"); 1273 if ((boundary & (boundary - 1)) != 0) 1274 panic("vm_page_alloc_contig: boundary must be a power of 2"); 1275 1276 start = 0; 1277 for (pass = 0; pass <= 1; pass++) { 1278 s = splvm(); 1279 again: 1280 /* 1281 * Find first page in array that is free, within range, aligned, and 1282 * such that the boundary won't be crossed. 1283 */ 1284 for (i = start; i < cnt.v_page_count; i++) { 1285 int pqtype; 1286 phys = VM_PAGE_TO_PHYS(&pga[i]); 1287 pqtype = pga[i].queue - pga[i].pc; 1288 if (((pqtype == PQ_ZERO) || (pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) && 1289 (phys >= low) && (phys < high) && 1290 ((phys & (alignment - 1)) == 0) && 1291 (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0)) 1292 break; 1293 } 1294 1295 /* 1296 * If the above failed or we will exceed the upper bound, fail. 1297 */ 1298 if ((i == cnt.v_page_count) || 1299 ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) { 1300 vm_page_t m, next; 1301 1302 again1: 1303 for (m = TAILQ_FIRST(&vm_page_queue_inactive); 1304 m != NULL; 1305 m = next) { 1306 1307 if (m->queue != PQ_INACTIVE) { 1308 break; 1309 } 1310 1311 next = TAILQ_NEXT(m, pageq); 1312 if (m->flags & PG_BUSY) { 1313 m->flags |= PG_WANTED; 1314 tsleep(m, PVM, "vpctw0", 0); 1315 goto again1; 1316 } 1317 vm_page_test_dirty(m); 1318 if (m->dirty) { 1319 if (m->object->type == OBJT_VNODE) { 1320 vm_object_page_clean(m->object, 0, 0, TRUE, TRUE); 1321 goto again1; 1322 } else if (m->object->type == OBJT_SWAP || 1323 m->object->type == OBJT_DEFAULT) { 1324 vm_page_protect(m, VM_PROT_NONE); 1325 vm_pageout_flush(&m, 1, 0); 1326 goto again1; 1327 } 1328 } 1329 if ((m->dirty == 0) && 1330 (m->busy == 0) && 1331 (m->hold_count == 0)) 1332 vm_page_cache(m); 1333 } 1334 1335 for (m = TAILQ_FIRST(&vm_page_queue_active); 1336 m != NULL; 1337 m = next) { 1338 1339 if (m->queue != PQ_ACTIVE) { 1340 break; 1341 } 1342 1343 next = TAILQ_NEXT(m, pageq); 1344 if (m->flags & PG_BUSY) { 1345 m->flags |= PG_WANTED; 1346 tsleep(m, PVM, "vpctw1", 0); 1347 goto again1; 1348 } 1349 vm_page_test_dirty(m); 1350 if (m->dirty) { 1351 if (m->object->type == OBJT_VNODE) { 1352 vm_object_page_clean(m->object, 0, 0, TRUE, TRUE); 1353 goto again1; 1354 } else if (m->object->type == OBJT_SWAP || 1355 m->object->type == OBJT_DEFAULT) { 1356 vm_page_protect(m, VM_PROT_NONE); 1357 vm_pageout_flush(&m, 1, 0); 1358 goto again1; 1359 } 1360 } 1361 if ((m->dirty == 0) && 1362 (m->busy == 0) && 1363 (m->hold_count == 0)) 1364 vm_page_cache(m); 1365 } 1366 1367 splx(s); 1368 continue; 1369 } 1370 start = i; 1371 1372 /* 1373 * Check successive pages for contiguous and free. 1374 */ 1375 for (i = start + 1; i < (start + size / PAGE_SIZE); i++) { 1376 int pqtype; 1377 pqtype = pga[i].queue - pga[i].pc; 1378 if ((VM_PAGE_TO_PHYS(&pga[i]) != 1379 (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) || 1380 ((pqtype != PQ_ZERO) && (pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) { 1381 start++; 1382 goto again; 1383 } 1384 } 1385 1386 for (i = start; i < (start + size / PAGE_SIZE); i++) { 1387 int pqtype; 1388 vm_page_t m = &pga[i]; 1389 1390 pqtype = m->queue - m->pc; 1391 if (pqtype == PQ_CACHE) 1392 vm_page_free(m); 1393 1394 TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq); 1395 --(*vm_page_queues[m->queue].lcnt); 1396 cnt.v_free_count--; 1397 m->valid = VM_PAGE_BITS_ALL; 1398 m->flags = 0; 1399 m->dirty = 0; 1400 m->wire_count = 0; 1401 m->busy = 0; 1402 m->queue = PQ_NONE; 1403 m->object = NULL; 1404 vm_page_wire(m); 1405 } 1406 1407 /* 1408 * We've found a contiguous chunk that meets are requirements. 1409 * Allocate kernel VM, unfree and assign the physical pages to it and 1410 * return kernel VM pointer. 1411 */ 1412 tmp_addr = addr = kmem_alloc_pageable(kernel_map, size); 1413 if (addr == 0) { 1414 /* 1415 * XXX We almost never run out of kernel virtual 1416 * space, so we don't make the allocated memory 1417 * above available. 1418 */ 1419 splx(s); 1420 return (NULL); 1421 } 1422 1423 for (i = start; i < (start + size / PAGE_SIZE); i++) { 1424 vm_page_t m = &pga[i]; 1425 vm_page_insert(m, kernel_object, 1426 OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS)); 1427 pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m)); 1428 tmp_addr += PAGE_SIZE; 1429 } 1430 1431 splx(s); 1432 return ((void *)addr); 1433 } 1434 return NULL; 1435 } 1436 1437 vm_offset_t 1438 vm_page_alloc_contig(size, low, high, alignment) 1439 vm_offset_t size; 1440 vm_offset_t low; 1441 vm_offset_t high; 1442 vm_offset_t alignment; 1443 { 1444 return ((vm_offset_t)contigmalloc(size, M_DEVBUF, M_NOWAIT, low, high, 1445 alignment, 0ul)); 1446 } 1447 1448 #include "opt_ddb.h" 1449 #ifdef DDB 1450 #include <sys/kernel.h> 1451 1452 #include <ddb/ddb.h> 1453 1454 DB_SHOW_COMMAND(page, vm_page_print_page_info) 1455 { 1456 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); 1457 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); 1458 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); 1459 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); 1460 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); 1461 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); 1462 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); 1463 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); 1464 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); 1465 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); 1466 } 1467 1468 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 1469 { 1470 int i; 1471 db_printf("PQ_FREE:"); 1472 for(i=0;i<PQ_L2_SIZE;i++) { 1473 db_printf(" %d", *vm_page_queues[PQ_FREE + i].lcnt); 1474 } 1475 db_printf("\n"); 1476 1477 db_printf("PQ_CACHE:"); 1478 for(i=0;i<PQ_L2_SIZE;i++) { 1479 db_printf(" %d", *vm_page_queues[PQ_CACHE + i].lcnt); 1480 } 1481 db_printf("\n"); 1482 1483 db_printf("PQ_ZERO:"); 1484 for(i=0;i<PQ_L2_SIZE;i++) { 1485 db_printf(" %d", *vm_page_queues[PQ_ZERO + i].lcnt); 1486 } 1487 db_printf("\n"); 1488 1489 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", 1490 *vm_page_queues[PQ_ACTIVE].lcnt, 1491 *vm_page_queues[PQ_INACTIVE].lcnt); 1492 } 1493 #endif /* DDB */ 1494