1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 37 * $Id$ 38 */ 39 40 /* 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 45 * 46 * Permission to use, copy, modify and distribute this software and 47 * its documentation is hereby granted, provided that both the copyright 48 * notice and this permission notice appear in all copies of the 49 * software, derivative works or modified versions, and any portions 50 * thereof, and that both notices appear in supporting documentation. 51 * 52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 55 * 56 * Carnegie Mellon requests users of this software to return to 57 * 58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 59 * School of Computer Science 60 * Carnegie Mellon University 61 * Pittsburgh PA 15213-3890 62 * 63 * any improvements or extensions that they make and grant Carnegie the 64 * rights to redistribute these changes. 65 */ 66 67 /* 68 * Resident memory management module. 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/malloc.h> 74 #include <sys/proc.h> 75 #include <sys/vmmeter.h> 76 77 #include <vm/vm.h> 78 #include <vm/vm_param.h> 79 #include <vm/vm_prot.h> 80 #include <sys/lock.h> 81 #include <vm/vm_kern.h> 82 #include <vm/vm_object.h> 83 #include <vm/vm_page.h> 84 #include <vm/vm_map.h> 85 #include <vm/vm_pageout.h> 86 #include <vm/vm_extern.h> 87 88 static void vm_page_queue_init __P((void)); 89 static vm_page_t vm_page_select_free __P((vm_object_t object, 90 vm_pindex_t pindex, int prefqueue)); 91 92 /* 93 * Associated with page of user-allocatable memory is a 94 * page structure. 95 */ 96 97 static struct pglist *vm_page_buckets; /* Array of buckets */ 98 static int vm_page_bucket_count; /* How big is array? */ 99 static int vm_page_hash_mask; /* Mask for hash function */ 100 101 struct pglist vm_page_queue_free[PQ_L2_SIZE] = {0}; 102 struct pglist vm_page_queue_zero[PQ_L2_SIZE] = {0}; 103 struct pglist vm_page_queue_active = {0}; 104 struct pglist vm_page_queue_inactive = {0}; 105 struct pglist vm_page_queue_cache[PQ_L2_SIZE] = {0}; 106 107 int no_queue=0; 108 109 struct vpgqueues vm_page_queues[PQ_COUNT] = {0}; 110 int pqcnt[PQ_COUNT] = {0}; 111 112 static void 113 vm_page_queue_init(void) { 114 int i; 115 116 vm_page_queues[PQ_NONE].pl = NULL; 117 vm_page_queues[PQ_NONE].cnt = &no_queue; 118 for(i=0;i<PQ_L2_SIZE;i++) { 119 vm_page_queues[PQ_FREE+i].pl = &vm_page_queue_free[i]; 120 vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count; 121 } 122 for(i=0;i<PQ_L2_SIZE;i++) { 123 vm_page_queues[PQ_ZERO+i].pl = &vm_page_queue_zero[i]; 124 vm_page_queues[PQ_ZERO+i].cnt = &cnt.v_free_count; 125 } 126 vm_page_queues[PQ_INACTIVE].pl = &vm_page_queue_inactive; 127 vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count; 128 129 vm_page_queues[PQ_ACTIVE].pl = &vm_page_queue_active; 130 vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count; 131 for(i=0;i<PQ_L2_SIZE;i++) { 132 vm_page_queues[PQ_CACHE+i].pl = &vm_page_queue_cache[i]; 133 vm_page_queues[PQ_CACHE+i].cnt = &cnt.v_cache_count; 134 } 135 for(i=0;i<PQ_COUNT;i++) { 136 if (vm_page_queues[i].pl) { 137 TAILQ_INIT(vm_page_queues[i].pl); 138 } else if (i != 0) { 139 panic("vm_page_queue_init: queue %d is null", i); 140 } 141 vm_page_queues[i].lcnt = &pqcnt[i]; 142 } 143 } 144 145 vm_page_t vm_page_array = 0; 146 int vm_page_array_size = 0; 147 long first_page = 0; 148 static long last_page; 149 static vm_size_t page_mask; 150 static int page_shift; 151 int vm_page_zero_count = 0; 152 153 /* 154 * map of contiguous valid DEV_BSIZE chunks in a page 155 * (this list is valid for page sizes upto 16*DEV_BSIZE) 156 */ 157 static u_short vm_page_dev_bsize_chunks[] = { 158 0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 159 0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff 160 }; 161 162 static inline int vm_page_hash __P((vm_object_t object, vm_pindex_t pindex)); 163 static int vm_page_freechk_and_unqueue __P((vm_page_t m)); 164 static void vm_page_free_wakeup __P((void)); 165 166 /* 167 * vm_set_page_size: 168 * 169 * Sets the page size, perhaps based upon the memory 170 * size. Must be called before any use of page-size 171 * dependent functions. 172 * 173 * Sets page_shift and page_mask from cnt.v_page_size. 174 */ 175 void 176 vm_set_page_size() 177 { 178 179 if (cnt.v_page_size == 0) 180 cnt.v_page_size = DEFAULT_PAGE_SIZE; 181 page_mask = cnt.v_page_size - 1; 182 if ((page_mask & cnt.v_page_size) != 0) 183 panic("vm_set_page_size: page size not a power of two"); 184 for (page_shift = 0;; page_shift++) 185 if ((1 << page_shift) == cnt.v_page_size) 186 break; 187 } 188 189 /* 190 * vm_page_startup: 191 * 192 * Initializes the resident memory module. 193 * 194 * Allocates memory for the page cells, and 195 * for the object/offset-to-page hash table headers. 196 * Each page cell is initialized and placed on the free list. 197 */ 198 199 vm_offset_t 200 vm_page_startup(starta, enda, vaddr) 201 register vm_offset_t starta; 202 vm_offset_t enda; 203 register vm_offset_t vaddr; 204 { 205 register vm_offset_t mapped; 206 register vm_page_t m; 207 register struct pglist *bucket; 208 vm_size_t npages, page_range; 209 register vm_offset_t new_start; 210 int i; 211 vm_offset_t pa; 212 int nblocks; 213 vm_offset_t first_managed_page; 214 215 /* the biggest memory array is the second group of pages */ 216 vm_offset_t start; 217 vm_offset_t biggestone, biggestsize; 218 219 vm_offset_t total; 220 221 total = 0; 222 biggestsize = 0; 223 biggestone = 0; 224 nblocks = 0; 225 vaddr = round_page(vaddr); 226 227 for (i = 0; phys_avail[i + 1]; i += 2) { 228 phys_avail[i] = round_page(phys_avail[i]); 229 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 230 } 231 232 for (i = 0; phys_avail[i + 1]; i += 2) { 233 int size = phys_avail[i + 1] - phys_avail[i]; 234 235 if (size > biggestsize) { 236 biggestone = i; 237 biggestsize = size; 238 } 239 ++nblocks; 240 total += size; 241 } 242 243 start = phys_avail[biggestone]; 244 245 /* 246 * Initialize the queue headers for the free queue, the active queue 247 * and the inactive queue. 248 */ 249 250 vm_page_queue_init(); 251 252 /* 253 * Allocate (and initialize) the hash table buckets. 254 * 255 * The number of buckets MUST BE a power of 2, and the actual value is 256 * the next power of 2 greater than the number of physical pages in 257 * the system. 258 * 259 * Note: This computation can be tweaked if desired. 260 */ 261 vm_page_buckets = (struct pglist *) vaddr; 262 bucket = vm_page_buckets; 263 if (vm_page_bucket_count == 0) { 264 vm_page_bucket_count = 1; 265 while (vm_page_bucket_count < atop(total)) 266 vm_page_bucket_count <<= 1; 267 } 268 vm_page_hash_mask = vm_page_bucket_count - 1; 269 270 /* 271 * Validate these addresses. 272 */ 273 274 new_start = start + vm_page_bucket_count * sizeof(struct pglist); 275 new_start = round_page(new_start); 276 mapped = vaddr; 277 vaddr = pmap_map(mapped, start, new_start, 278 VM_PROT_READ | VM_PROT_WRITE); 279 start = new_start; 280 bzero((caddr_t) mapped, vaddr - mapped); 281 mapped = vaddr; 282 283 for (i = 0; i < vm_page_bucket_count; i++) { 284 TAILQ_INIT(bucket); 285 bucket++; 286 } 287 288 /* 289 * round (or truncate) the addresses to our page size. 290 */ 291 292 /* 293 * Pre-allocate maps and map entries that cannot be dynamically 294 * allocated via malloc(). The maps include the kernel_map and 295 * kmem_map which must be initialized before malloc() will work 296 * (obviously). Also could include pager maps which would be 297 * allocated before kmeminit. 298 * 299 * Allow some kernel map entries... this should be plenty since people 300 * shouldn't be cluttering up the kernel map (they should use their 301 * own maps). 302 */ 303 304 kentry_data_size = MAX_KMAP * sizeof(struct vm_map) + 305 MAX_KMAPENT * sizeof(struct vm_map_entry); 306 kentry_data_size = round_page(kentry_data_size); 307 kentry_data = (vm_offset_t) vaddr; 308 vaddr += kentry_data_size; 309 310 /* 311 * Validate these zone addresses. 312 */ 313 314 new_start = start + (vaddr - mapped); 315 pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE); 316 bzero((caddr_t) mapped, (vaddr - mapped)); 317 start = round_page(new_start); 318 319 /* 320 * Compute the number of pages of memory that will be available for 321 * use (taking into account the overhead of a page structure per 322 * page). 323 */ 324 325 first_page = phys_avail[0] / PAGE_SIZE; 326 last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE; 327 328 page_range = last_page - (phys_avail[0] / PAGE_SIZE); 329 npages = (total - (page_range * sizeof(struct vm_page)) - 330 (start - phys_avail[biggestone])) / PAGE_SIZE; 331 332 /* 333 * Initialize the mem entry structures now, and put them in the free 334 * queue. 335 */ 336 337 vm_page_array = (vm_page_t) vaddr; 338 mapped = vaddr; 339 340 /* 341 * Validate these addresses. 342 */ 343 344 new_start = round_page(start + page_range * sizeof(struct vm_page)); 345 mapped = pmap_map(mapped, start, new_start, 346 VM_PROT_READ | VM_PROT_WRITE); 347 start = new_start; 348 349 first_managed_page = start / PAGE_SIZE; 350 351 /* 352 * Clear all of the page structures 353 */ 354 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 355 vm_page_array_size = page_range; 356 357 cnt.v_page_count = 0; 358 cnt.v_free_count = 0; 359 for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) { 360 if (i == biggestone) 361 pa = ptoa(first_managed_page); 362 else 363 pa = phys_avail[i]; 364 while (pa < phys_avail[i + 1] && npages-- > 0) { 365 ++cnt.v_page_count; 366 ++cnt.v_free_count; 367 m = PHYS_TO_VM_PAGE(pa); 368 m->phys_addr = pa; 369 m->flags = 0; 370 m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; 371 m->queue = PQ_FREE + m->pc; 372 TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq); 373 ++(*vm_page_queues[m->queue].lcnt); 374 pa += PAGE_SIZE; 375 } 376 } 377 378 return (mapped); 379 } 380 381 /* 382 * vm_page_hash: 383 * 384 * Distributes the object/offset key pair among hash buckets. 385 * 386 * NOTE: This macro depends on vm_page_bucket_count being a power of 2. 387 */ 388 static inline int 389 vm_page_hash(object, pindex) 390 vm_object_t object; 391 vm_pindex_t pindex; 392 { 393 return ((((unsigned) object) >> 5) + (pindex >> 1)) & vm_page_hash_mask; 394 } 395 396 /* 397 * vm_page_insert: [ internal use only ] 398 * 399 * Inserts the given mem entry into the object/object-page 400 * table and object list. 401 * 402 * The object and page must be locked, and must be splhigh. 403 */ 404 405 void 406 vm_page_insert(m, object, pindex) 407 register vm_page_t m; 408 register vm_object_t object; 409 register vm_pindex_t pindex; 410 { 411 register struct pglist *bucket; 412 413 if (m->flags & PG_TABLED) 414 panic("vm_page_insert: already inserted"); 415 416 /* 417 * Record the object/offset pair in this page 418 */ 419 420 m->object = object; 421 m->pindex = pindex; 422 423 /* 424 * Insert it into the object_object/offset hash table 425 */ 426 427 bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; 428 TAILQ_INSERT_TAIL(bucket, m, hashq); 429 430 /* 431 * Now link into the object's list of backed pages. 432 */ 433 434 TAILQ_INSERT_TAIL(&object->memq, m, listq); 435 m->flags |= PG_TABLED; 436 m->object->page_hint = m; 437 438 /* 439 * And show that the object has one more resident page. 440 */ 441 442 object->resident_page_count++; 443 } 444 445 /* 446 * vm_page_remove: [ internal use only ] 447 * NOTE: used by device pager as well -wfj 448 * 449 * Removes the given mem entry from the object/offset-page 450 * table and the object page list. 451 * 452 * The object and page must be locked, and at splhigh. 453 */ 454 455 void 456 vm_page_remove(m) 457 register vm_page_t m; 458 { 459 register struct pglist *bucket; 460 461 if (!(m->flags & PG_TABLED)) 462 return; 463 464 if (m->object->page_hint == m) 465 m->object->page_hint = NULL; 466 467 /* 468 * Remove from the object_object/offset hash table 469 */ 470 471 bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)]; 472 TAILQ_REMOVE(bucket, m, hashq); 473 474 /* 475 * Now remove from the object's list of backed pages. 476 */ 477 478 TAILQ_REMOVE(&m->object->memq, m, listq); 479 480 /* 481 * And show that the object has one fewer resident page. 482 */ 483 484 m->object->resident_page_count--; 485 486 m->flags &= ~PG_TABLED; 487 } 488 489 /* 490 * vm_page_lookup: 491 * 492 * Returns the page associated with the object/offset 493 * pair specified; if none is found, NULL is returned. 494 * 495 * The object must be locked. No side effects. 496 */ 497 498 vm_page_t 499 vm_page_lookup(object, pindex) 500 register vm_object_t object; 501 register vm_pindex_t pindex; 502 { 503 register vm_page_t m; 504 register struct pglist *bucket; 505 int s; 506 507 /* 508 * Search the hash table for this object/offset pair 509 */ 510 511 bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; 512 513 s = splvm(); 514 for (m = TAILQ_FIRST(bucket); m != NULL; m = TAILQ_NEXT(m,hashq)) { 515 if ((m->object == object) && (m->pindex == pindex)) { 516 splx(s); 517 m->object->page_hint = m; 518 return (m); 519 } 520 } 521 splx(s); 522 return (NULL); 523 } 524 525 /* 526 * vm_page_rename: 527 * 528 * Move the given memory entry from its 529 * current object to the specified target object/offset. 530 * 531 * The object must be locked. 532 */ 533 void 534 vm_page_rename(m, new_object, new_pindex) 535 register vm_page_t m; 536 register vm_object_t new_object; 537 vm_pindex_t new_pindex; 538 { 539 int s; 540 541 s = splvm(); 542 vm_page_remove(m); 543 vm_page_insert(m, new_object, new_pindex); 544 splx(s); 545 } 546 547 /* 548 * vm_page_unqueue without any wakeup 549 */ 550 void 551 vm_page_unqueue_nowakeup(m) 552 vm_page_t m; 553 { 554 int queue = m->queue; 555 struct vpgqueues *pq; 556 if (queue != PQ_NONE) { 557 pq = &vm_page_queues[queue]; 558 m->queue = PQ_NONE; 559 TAILQ_REMOVE(pq->pl, m, pageq); 560 --(*pq->cnt); 561 --(*pq->lcnt); 562 } 563 } 564 565 /* 566 * vm_page_unqueue must be called at splhigh(); 567 */ 568 void 569 vm_page_unqueue(m) 570 vm_page_t m; 571 { 572 int queue = m->queue; 573 struct vpgqueues *pq; 574 if (queue != PQ_NONE) { 575 m->queue = PQ_NONE; 576 pq = &vm_page_queues[queue]; 577 TAILQ_REMOVE(pq->pl, m, pageq); 578 --(*pq->cnt); 579 --(*pq->lcnt); 580 if ((m->queue - m->pc) == PQ_CACHE) { 581 if ((cnt.v_cache_count + cnt.v_free_count) < 582 (cnt.v_free_reserved + cnt.v_cache_min)) 583 pagedaemon_wakeup(); 584 } 585 } 586 } 587 588 /* 589 * Find a page on the specified queue with color optimization. 590 */ 591 vm_page_t 592 vm_page_list_find(basequeue, index) 593 int basequeue, index; 594 { 595 #if PQ_L2_SIZE > 1 596 597 int i,j; 598 vm_page_t m; 599 int hindex; 600 601 for(j = 0; j < PQ_L1_SIZE; j++) { 602 for(i = (PQ_L2_SIZE/2) - (PQ_L1_SIZE - 1); 603 i >= 0; 604 i -= PQ_L1_SIZE) { 605 hindex = (index + (i+j)) & PQ_L2_MASK; 606 m = TAILQ_FIRST(vm_page_queues[basequeue + hindex].pl); 607 if (m) 608 return m; 609 610 hindex = (index - (i+j)) & PQ_L2_MASK; 611 m = TAILQ_FIRST(vm_page_queues[basequeue + hindex].pl); 612 if (m) 613 return m; 614 } 615 } 616 return NULL; 617 #else 618 return TAILQ_FIRST(vm_page_queues[basequeue].pl); 619 #endif 620 621 } 622 623 /* 624 * Find a page on the specified queue with color optimization. 625 */ 626 vm_page_t 627 vm_page_select(object, pindex, basequeue) 628 vm_object_t object; 629 vm_pindex_t pindex; 630 int basequeue; 631 { 632 633 #if PQ_L2_SIZE > 1 634 int index; 635 index = (pindex + object->pg_color) & PQ_L2_MASK; 636 return vm_page_list_find(basequeue, index); 637 638 #else 639 return TAILQ_FIRST(vm_page_queues[basequeue].pl); 640 #endif 641 642 } 643 644 /* 645 * Find a free or zero page, with specified preference. 646 */ 647 static vm_page_t 648 vm_page_select_free(object, pindex, prefqueue) 649 vm_object_t object; 650 vm_pindex_t pindex; 651 int prefqueue; 652 { 653 #if PQ_L2_SIZE > 1 654 int i,j; 655 int index, hindex; 656 #endif 657 vm_page_t m; 658 int oqueuediff; 659 660 if (prefqueue == PQ_ZERO) 661 oqueuediff = PQ_FREE - PQ_ZERO; 662 else 663 oqueuediff = PQ_ZERO - PQ_FREE; 664 665 if (object->page_hint) { 666 if (object->page_hint->pindex == (pindex - 1)) { 667 vm_offset_t last_phys; 668 if ((object->page_hint->flags & PG_FICTITIOUS) == 0) { 669 if ((object->page_hint < &vm_page_array[cnt.v_page_count-1]) && 670 (object->page_hint >= &vm_page_array[0])) { 671 int queue; 672 last_phys = VM_PAGE_TO_PHYS(object->page_hint); 673 m = PHYS_TO_VM_PAGE(last_phys + PAGE_SIZE); 674 queue = m->queue - m->pc; 675 if (queue == PQ_FREE || queue == PQ_ZERO) { 676 return m; 677 } 678 } 679 } 680 } 681 } 682 683 684 #if PQ_L2_SIZE > 1 685 686 index = pindex + object->pg_color; 687 for(j = 0; j < PQ_L1_SIZE; j++) { 688 for(i = (PQ_L2_SIZE/2) - (PQ_L1_SIZE - 1); 689 (i + j) >= 0; 690 i -= PQ_L1_SIZE) { 691 692 hindex = prefqueue + ((index + (i+j)) & PQ_L2_MASK); 693 if (m = TAILQ_FIRST(vm_page_queues[hindex].pl)) 694 return m; 695 if (m = TAILQ_FIRST(vm_page_queues[hindex + oqueuediff].pl)) 696 return m; 697 698 hindex = prefqueue + ((index - (i+j)) & PQ_L2_MASK); 699 if (m = TAILQ_FIRST(vm_page_queues[hindex].pl)) 700 return m; 701 if (m = TAILQ_FIRST(vm_page_queues[hindex + oqueuediff].pl)) 702 return m; 703 } 704 } 705 #else 706 if (m = TAILQ_FIRST(vm_page_queues[prefqueue].pl)) 707 return m; 708 else 709 return TAILQ_FIRST(vm_page_queues[prefqueue + oqueuediff].pl); 710 #endif 711 712 return NULL; 713 } 714 715 /* 716 * vm_page_alloc: 717 * 718 * Allocate and return a memory cell associated 719 * with this VM object/offset pair. 720 * 721 * page_req classes: 722 * VM_ALLOC_NORMAL normal process request 723 * VM_ALLOC_SYSTEM system *really* needs a page 724 * VM_ALLOC_INTERRUPT interrupt time request 725 * VM_ALLOC_ZERO zero page 726 * 727 * Object must be locked. 728 */ 729 vm_page_t 730 vm_page_alloc(object, pindex, page_req) 731 vm_object_t object; 732 vm_pindex_t pindex; 733 int page_req; 734 { 735 register vm_page_t m; 736 struct vpgqueues *pq; 737 int queue; 738 int s; 739 740 #ifdef DIAGNOSTIC 741 m = vm_page_lookup(object, pindex); 742 if (m) 743 panic("vm_page_alloc: page already allocated"); 744 #endif 745 746 if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) { 747 page_req = VM_ALLOC_SYSTEM; 748 }; 749 750 s = splvm(); 751 752 switch (page_req) { 753 754 case VM_ALLOC_NORMAL: 755 if (cnt.v_free_count >= cnt.v_free_reserved) { 756 m = vm_page_select_free(object, pindex, PQ_FREE); 757 #if defined(DIAGNOSTIC) 758 if (m == NULL) 759 panic("vm_page_alloc(NORMAL): missing page on free queue\n"); 760 #endif 761 } else { 762 m = vm_page_select(object, pindex, PQ_CACHE); 763 if (m == NULL) { 764 splx(s); 765 #if defined(DIAGNOSTIC) 766 if (cnt.v_cache_count > 0) 767 printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count); 768 #endif 769 pagedaemon_wakeup(); 770 return (NULL); 771 } 772 } 773 break; 774 775 case VM_ALLOC_ZERO: 776 if (cnt.v_free_count >= cnt.v_free_reserved) { 777 m = vm_page_select_free(object, pindex, PQ_ZERO); 778 #if defined(DIAGNOSTIC) 779 if (m == NULL) 780 panic("vm_page_alloc(ZERO): missing page on free queue\n"); 781 #endif 782 } else { 783 m = vm_page_select(object, pindex, PQ_CACHE); 784 if (m == NULL) { 785 splx(s); 786 #if defined(DIAGNOSTIC) 787 if (cnt.v_cache_count > 0) 788 printf("vm_page_alloc(ZERO): missing pages on cache queue: %d\n", cnt.v_cache_count); 789 #endif 790 pagedaemon_wakeup(); 791 return (NULL); 792 } 793 } 794 break; 795 796 case VM_ALLOC_SYSTEM: 797 if ((cnt.v_free_count >= cnt.v_free_reserved) || 798 ((cnt.v_cache_count == 0) && 799 (cnt.v_free_count >= cnt.v_interrupt_free_min))) { 800 m = vm_page_select_free(object, pindex, PQ_FREE); 801 #if defined(DIAGNOSTIC) 802 if (m == NULL) 803 panic("vm_page_alloc(SYSTEM): missing page on free queue\n"); 804 #endif 805 } else { 806 m = vm_page_select(object, pindex, PQ_CACHE); 807 if (m == NULL) { 808 splx(s); 809 #if defined(DIAGNOSTIC) 810 if (cnt.v_cache_count > 0) 811 printf("vm_page_alloc(SYSTEM): missing pages on cache queue: %d\n", cnt.v_cache_count); 812 #endif 813 pagedaemon_wakeup(); 814 return (NULL); 815 } 816 } 817 break; 818 819 case VM_ALLOC_INTERRUPT: 820 if (cnt.v_free_count > 0) { 821 m = vm_page_select_free(object, pindex, PQ_FREE); 822 #if defined(DIAGNOSTIC) 823 if (m == NULL) 824 panic("vm_page_alloc(INTERRUPT): missing page on free queue\n"); 825 #endif 826 } else { 827 splx(s); 828 pagedaemon_wakeup(); 829 return (NULL); 830 } 831 break; 832 833 default: 834 panic("vm_page_alloc: invalid allocation class"); 835 } 836 837 queue = m->queue; 838 if (queue == PQ_ZERO) 839 --vm_page_zero_count; 840 pq = &vm_page_queues[queue]; 841 TAILQ_REMOVE(pq->pl, m, pageq); 842 --(*pq->cnt); 843 --(*pq->lcnt); 844 if ((m->queue - m->pc) == PQ_ZERO) { 845 m->flags = PG_ZERO|PG_BUSY; 846 } else if ((m->queue - m->pc) == PQ_CACHE) { 847 vm_page_remove(m); 848 m->flags = PG_BUSY; 849 } else { 850 m->flags = PG_BUSY; 851 } 852 m->wire_count = 0; 853 m->hold_count = 0; 854 m->act_count = 0; 855 m->busy = 0; 856 m->valid = 0; 857 m->dirty = 0; 858 m->queue = PQ_NONE; 859 860 /* XXX before splx until vm_page_insert is safe */ 861 vm_page_insert(m, object, pindex); 862 863 splx(s); 864 865 /* 866 * Don't wakeup too often - wakeup the pageout daemon when 867 * we would be nearly out of memory. 868 */ 869 if (((cnt.v_free_count + cnt.v_cache_count) < 870 (cnt.v_free_reserved + cnt.v_cache_min)) || 871 (cnt.v_free_count < cnt.v_pageout_free_min)) 872 pagedaemon_wakeup(); 873 874 return (m); 875 } 876 877 void 878 vm_wait() 879 { 880 int s; 881 882 s = splvm(); 883 if (curproc == pageproc) { 884 vm_pageout_pages_needed = 1; 885 tsleep(&vm_pageout_pages_needed, PSWP, "vmwait", 0); 886 } else { 887 if (!vm_pages_needed) { 888 vm_pages_needed++; 889 wakeup(&vm_pages_needed); 890 } 891 tsleep(&cnt.v_free_count, PVM, "vmwait", 0); 892 } 893 splx(s); 894 } 895 896 897 /* 898 * vm_page_activate: 899 * 900 * Put the specified page on the active list (if appropriate). 901 * 902 * The page queues must be locked. 903 */ 904 void 905 vm_page_activate(m) 906 register vm_page_t m; 907 { 908 int s; 909 910 s = splvm(); 911 if (m->queue == PQ_ACTIVE) 912 panic("vm_page_activate: already active"); 913 914 if ((m->queue - m->pc) == PQ_CACHE) 915 cnt.v_reactivated++; 916 917 vm_page_unqueue(m); 918 919 if (m->wire_count == 0) { 920 m->queue = PQ_ACTIVE; 921 ++(*vm_page_queues[PQ_ACTIVE].lcnt); 922 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 923 if (m->act_count < ACT_INIT) 924 m->act_count = ACT_INIT; 925 cnt.v_active_count++; 926 } 927 splx(s); 928 } 929 930 /* 931 * helper routine for vm_page_free and vm_page_free_zero 932 */ 933 static int 934 vm_page_freechk_and_unqueue(m) 935 vm_page_t m; 936 { 937 if (m->busy || 938 (m->flags & PG_BUSY) || 939 ((m->queue - m->pc) == PQ_FREE) || 940 (m->hold_count != 0)) { 941 printf("vm_page_free: pindex(%ld), busy(%d), PG_BUSY(%d), hold(%d)\n", 942 m->pindex, m->busy, 943 (m->flags & PG_BUSY) ? 1 : 0, m->hold_count); 944 if ((m->queue - m->pc) == PQ_FREE) 945 panic("vm_page_free: freeing free page"); 946 else 947 panic("vm_page_free: freeing busy page"); 948 } 949 950 vm_page_remove(m); 951 vm_page_unqueue_nowakeup(m); 952 if ((m->flags & PG_FICTITIOUS) != 0) { 953 return 0; 954 } 955 if (m->wire_count != 0) { 956 if (m->wire_count > 1) { 957 panic("vm_page_free: invalid wire count (%d), pindex: 0x%x", 958 m->wire_count, m->pindex); 959 } 960 m->wire_count = 0; 961 cnt.v_wire_count--; 962 } 963 964 return 1; 965 } 966 967 /* 968 * helper routine for vm_page_free and vm_page_free_zero 969 */ 970 static __inline void 971 vm_page_free_wakeup() 972 { 973 974 /* 975 * if pageout daemon needs pages, then tell it that there are 976 * some free. 977 */ 978 if (vm_pageout_pages_needed) { 979 wakeup(&vm_pageout_pages_needed); 980 vm_pageout_pages_needed = 0; 981 } 982 /* 983 * wakeup processes that are waiting on memory if we hit a 984 * high water mark. And wakeup scheduler process if we have 985 * lots of memory. this process will swapin processes. 986 */ 987 if (vm_pages_needed && 988 ((cnt.v_free_count + cnt.v_cache_count) >= cnt.v_free_min)) { 989 wakeup(&cnt.v_free_count); 990 vm_pages_needed = 0; 991 } 992 } 993 994 /* 995 * vm_page_free: 996 * 997 * Returns the given page to the free list, 998 * disassociating it with any VM object. 999 * 1000 * Object and page must be locked prior to entry. 1001 */ 1002 void 1003 vm_page_free(m) 1004 register vm_page_t m; 1005 { 1006 int s; 1007 struct vpgqueues *pq; 1008 1009 s = splvm(); 1010 1011 cnt.v_tfree++; 1012 1013 if (!vm_page_freechk_and_unqueue(m)) { 1014 splx(s); 1015 return; 1016 } 1017 1018 m->queue = PQ_FREE + m->pc; 1019 pq = &vm_page_queues[m->queue]; 1020 ++(*pq->lcnt); 1021 ++(*pq->cnt); 1022 /* 1023 * If the pageout process is grabbing the page, it is likely 1024 * that the page is NOT in the cache. It is more likely that 1025 * the page will be partially in the cache if it is being 1026 * explicitly freed. 1027 */ 1028 if (curproc == pageproc) { 1029 TAILQ_INSERT_TAIL(pq->pl, m, pageq); 1030 } else { 1031 TAILQ_INSERT_HEAD(pq->pl, m, pageq); 1032 } 1033 vm_page_free_wakeup(); 1034 splx(s); 1035 } 1036 1037 void 1038 vm_page_free_zero(m) 1039 register vm_page_t m; 1040 { 1041 int s; 1042 struct vpgqueues *pq; 1043 1044 s = splvm(); 1045 1046 cnt.v_tfree++; 1047 1048 if (!vm_page_freechk_and_unqueue(m)) { 1049 splx(s); 1050 return; 1051 } 1052 1053 m->queue = PQ_ZERO + m->pc; 1054 pq = &vm_page_queues[m->queue]; 1055 ++(*pq->lcnt); 1056 ++(*pq->cnt); 1057 1058 TAILQ_INSERT_HEAD(pq->pl, m, pageq); 1059 ++vm_page_zero_count; 1060 vm_page_free_wakeup(); 1061 splx(s); 1062 } 1063 1064 /* 1065 * vm_page_wire: 1066 * 1067 * Mark this page as wired down by yet 1068 * another map, removing it from paging queues 1069 * as necessary. 1070 * 1071 * The page queues must be locked. 1072 */ 1073 void 1074 vm_page_wire(m) 1075 register vm_page_t m; 1076 { 1077 int s; 1078 1079 if (m->wire_count == 0) { 1080 s = splvm(); 1081 vm_page_unqueue(m); 1082 splx(s); 1083 cnt.v_wire_count++; 1084 } 1085 ++(*vm_page_queues[PQ_NONE].lcnt); 1086 m->wire_count++; 1087 m->flags |= PG_MAPPED; 1088 } 1089 1090 /* 1091 * vm_page_unwire: 1092 * 1093 * Release one wiring of this page, potentially 1094 * enabling it to be paged again. 1095 * 1096 * The page queues must be locked. 1097 */ 1098 void 1099 vm_page_unwire(m) 1100 register vm_page_t m; 1101 { 1102 int s; 1103 1104 s = splvm(); 1105 1106 if (m->wire_count > 0) 1107 m->wire_count--; 1108 1109 if (m->wire_count == 0) { 1110 cnt.v_wire_count--; 1111 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1112 m->queue = PQ_ACTIVE; 1113 ++(*vm_page_queues[PQ_ACTIVE].lcnt); 1114 cnt.v_active_count++; 1115 } 1116 splx(s); 1117 } 1118 1119 1120 /* 1121 * vm_page_deactivate: 1122 * 1123 * Returns the given page to the inactive list, 1124 * indicating that no physical maps have access 1125 * to this page. [Used by the physical mapping system.] 1126 * 1127 * The page queues must be locked. 1128 */ 1129 void 1130 vm_page_deactivate(m) 1131 register vm_page_t m; 1132 { 1133 int s; 1134 1135 /* 1136 * Only move active pages -- ignore locked or already inactive ones. 1137 * 1138 * XXX: sometimes we get pages which aren't wired down or on any queue - 1139 * we need to put them on the inactive queue also, otherwise we lose 1140 * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93. 1141 */ 1142 if (m->queue == PQ_INACTIVE) 1143 return; 1144 1145 s = splvm(); 1146 if (m->wire_count == 0 && m->hold_count == 0) { 1147 if ((m->queue - m->pc) == PQ_CACHE) 1148 cnt.v_reactivated++; 1149 vm_page_unqueue(m); 1150 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 1151 m->queue = PQ_INACTIVE; 1152 ++(*vm_page_queues[PQ_INACTIVE].lcnt); 1153 cnt.v_inactive_count++; 1154 } 1155 splx(s); 1156 } 1157 1158 /* 1159 * vm_page_cache 1160 * 1161 * Put the specified page onto the page cache queue (if appropriate). 1162 */ 1163 void 1164 vm_page_cache(m) 1165 register vm_page_t m; 1166 { 1167 int s; 1168 1169 if ((m->flags & PG_BUSY) || m->busy || m->wire_count) { 1170 printf("vm_page_cache: attempting to cache busy page\n"); 1171 return; 1172 } 1173 if ((m->queue - m->pc) == PQ_CACHE) 1174 return; 1175 1176 vm_page_protect(m, VM_PROT_NONE); 1177 if (m->dirty != 0) { 1178 panic("vm_page_cache: caching a dirty page, pindex: %d", m->pindex); 1179 } 1180 s = splvm(); 1181 vm_page_unqueue_nowakeup(m); 1182 m->queue = PQ_CACHE + m->pc; 1183 ++(*vm_page_queues[m->queue].lcnt); 1184 TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq); 1185 cnt.v_cache_count++; 1186 vm_page_free_wakeup(); 1187 splx(s); 1188 } 1189 1190 1191 /* 1192 * mapping function for valid bits or for dirty bits in 1193 * a page 1194 */ 1195 inline int 1196 vm_page_bits(int base, int size) 1197 { 1198 u_short chunk; 1199 1200 if ((base == 0) && (size >= PAGE_SIZE)) 1201 return VM_PAGE_BITS_ALL; 1202 size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1203 base = (base % PAGE_SIZE) / DEV_BSIZE; 1204 chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE]; 1205 return (chunk << base) & VM_PAGE_BITS_ALL; 1206 } 1207 1208 /* 1209 * set a page valid and clean 1210 */ 1211 void 1212 vm_page_set_validclean(m, base, size) 1213 vm_page_t m; 1214 int base; 1215 int size; 1216 { 1217 int pagebits = vm_page_bits(base, size); 1218 m->valid |= pagebits; 1219 m->dirty &= ~pagebits; 1220 if( base == 0 && size == PAGE_SIZE) 1221 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 1222 } 1223 1224 /* 1225 * set a page (partially) invalid 1226 */ 1227 void 1228 vm_page_set_invalid(m, base, size) 1229 vm_page_t m; 1230 int base; 1231 int size; 1232 { 1233 int bits; 1234 1235 m->valid &= ~(bits = vm_page_bits(base, size)); 1236 if (m->valid == 0) 1237 m->dirty &= ~bits; 1238 } 1239 1240 /* 1241 * is (partial) page valid? 1242 */ 1243 int 1244 vm_page_is_valid(m, base, size) 1245 vm_page_t m; 1246 int base; 1247 int size; 1248 { 1249 int bits = vm_page_bits(base, size); 1250 1251 if (m->valid && ((m->valid & bits) == bits)) 1252 return 1; 1253 else 1254 return 0; 1255 } 1256 1257 void 1258 vm_page_test_dirty(m) 1259 vm_page_t m; 1260 { 1261 if ((m->dirty != VM_PAGE_BITS_ALL) && 1262 pmap_is_modified(VM_PAGE_TO_PHYS(m))) { 1263 m->dirty = VM_PAGE_BITS_ALL; 1264 } 1265 } 1266 1267 /* 1268 * This interface is for merging with malloc() someday. 1269 * Even if we never implement compaction so that contiguous allocation 1270 * works after initialization time, malloc()'s data structures are good 1271 * for statistics and for allocations of less than a page. 1272 */ 1273 void * 1274 contigmalloc1(size, type, flags, low, high, alignment, boundary, map) 1275 unsigned long size; /* should be size_t here and for malloc() */ 1276 int type; 1277 int flags; 1278 unsigned long low; 1279 unsigned long high; 1280 unsigned long alignment; 1281 unsigned long boundary; 1282 vm_map_t map; 1283 { 1284 int i, s, start; 1285 vm_offset_t addr, phys, tmp_addr; 1286 int pass; 1287 vm_page_t pga = vm_page_array; 1288 1289 size = round_page(size); 1290 if (size == 0) 1291 panic("vm_page_alloc_contig: size must not be 0"); 1292 if ((alignment & (alignment - 1)) != 0) 1293 panic("vm_page_alloc_contig: alignment must be a power of 2"); 1294 if ((boundary & (boundary - 1)) != 0) 1295 panic("vm_page_alloc_contig: boundary must be a power of 2"); 1296 1297 start = 0; 1298 for (pass = 0; pass <= 1; pass++) { 1299 s = splvm(); 1300 again: 1301 /* 1302 * Find first page in array that is free, within range, aligned, and 1303 * such that the boundary won't be crossed. 1304 */ 1305 for (i = start; i < cnt.v_page_count; i++) { 1306 int pqtype; 1307 phys = VM_PAGE_TO_PHYS(&pga[i]); 1308 pqtype = pga[i].queue - pga[i].pc; 1309 if (((pqtype == PQ_ZERO) || (pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) && 1310 (phys >= low) && (phys < high) && 1311 ((phys & (alignment - 1)) == 0) && 1312 (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0)) 1313 break; 1314 } 1315 1316 /* 1317 * If the above failed or we will exceed the upper bound, fail. 1318 */ 1319 if ((i == cnt.v_page_count) || 1320 ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) { 1321 vm_page_t m, next; 1322 1323 again1: 1324 for (m = TAILQ_FIRST(&vm_page_queue_inactive); 1325 m != NULL; 1326 m = next) { 1327 1328 if (m->queue != PQ_INACTIVE) { 1329 break; 1330 } 1331 1332 next = TAILQ_NEXT(m, pageq); 1333 if (m->flags & PG_BUSY) { 1334 m->flags |= PG_WANTED; 1335 tsleep(m, PVM, "vpctw0", 0); 1336 goto again1; 1337 } 1338 vm_page_test_dirty(m); 1339 if (m->dirty) { 1340 if (m->object->type == OBJT_VNODE) { 1341 vm_object_page_clean(m->object, 0, 0, TRUE, TRUE); 1342 goto again1; 1343 } else if (m->object->type == OBJT_SWAP || 1344 m->object->type == OBJT_DEFAULT) { 1345 vm_page_protect(m, VM_PROT_NONE); 1346 vm_pageout_flush(&m, 1, 0); 1347 goto again1; 1348 } 1349 } 1350 if ((m->dirty == 0) && 1351 (m->busy == 0) && 1352 (m->hold_count == 0)) 1353 vm_page_cache(m); 1354 } 1355 1356 for (m = TAILQ_FIRST(&vm_page_queue_active); 1357 m != NULL; 1358 m = next) { 1359 1360 if (m->queue != PQ_ACTIVE) { 1361 break; 1362 } 1363 1364 next = TAILQ_NEXT(m, pageq); 1365 if (m->flags & PG_BUSY) { 1366 m->flags |= PG_WANTED; 1367 tsleep(m, PVM, "vpctw1", 0); 1368 goto again1; 1369 } 1370 vm_page_test_dirty(m); 1371 if (m->dirty) { 1372 if (m->object->type == OBJT_VNODE) { 1373 vm_object_page_clean(m->object, 0, 0, TRUE, TRUE); 1374 goto again1; 1375 } else if (m->object->type == OBJT_SWAP || 1376 m->object->type == OBJT_DEFAULT) { 1377 vm_page_protect(m, VM_PROT_NONE); 1378 vm_pageout_flush(&m, 1, 0); 1379 goto again1; 1380 } 1381 } 1382 if ((m->dirty == 0) && 1383 (m->busy == 0) && 1384 (m->hold_count == 0)) 1385 vm_page_cache(m); 1386 } 1387 1388 splx(s); 1389 continue; 1390 } 1391 start = i; 1392 1393 /* 1394 * Check successive pages for contiguous and free. 1395 */ 1396 for (i = start + 1; i < (start + size / PAGE_SIZE); i++) { 1397 int pqtype; 1398 pqtype = pga[i].queue - pga[i].pc; 1399 if ((VM_PAGE_TO_PHYS(&pga[i]) != 1400 (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) || 1401 ((pqtype != PQ_ZERO) && (pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) { 1402 start++; 1403 goto again; 1404 } 1405 } 1406 1407 for (i = start; i < (start + size / PAGE_SIZE); i++) { 1408 int pqtype; 1409 vm_page_t m = &pga[i]; 1410 1411 pqtype = m->queue - m->pc; 1412 if (pqtype == PQ_CACHE) 1413 vm_page_free(m); 1414 1415 TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq); 1416 --(*vm_page_queues[m->queue].lcnt); 1417 cnt.v_free_count--; 1418 m->valid = VM_PAGE_BITS_ALL; 1419 m->flags = 0; 1420 m->dirty = 0; 1421 m->wire_count = 0; 1422 m->busy = 0; 1423 m->queue = PQ_NONE; 1424 m->object = NULL; 1425 vm_page_wire(m); 1426 } 1427 1428 /* 1429 * We've found a contiguous chunk that meets are requirements. 1430 * Allocate kernel VM, unfree and assign the physical pages to it and 1431 * return kernel VM pointer. 1432 */ 1433 tmp_addr = addr = kmem_alloc_pageable(map, size); 1434 if (addr == 0) { 1435 /* 1436 * XXX We almost never run out of kernel virtual 1437 * space, so we don't make the allocated memory 1438 * above available. 1439 */ 1440 splx(s); 1441 return (NULL); 1442 } 1443 1444 for (i = start; i < (start + size / PAGE_SIZE); i++) { 1445 vm_page_t m = &pga[i]; 1446 vm_page_insert(m, kernel_object, 1447 OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS)); 1448 pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m)); 1449 tmp_addr += PAGE_SIZE; 1450 } 1451 1452 splx(s); 1453 return ((void *)addr); 1454 } 1455 return NULL; 1456 } 1457 1458 void * 1459 contigmalloc(size, type, flags, low, high, alignment, boundary) 1460 unsigned long size; /* should be size_t here and for malloc() */ 1461 int type; 1462 int flags; 1463 unsigned long low; 1464 unsigned long high; 1465 unsigned long alignment; 1466 unsigned long boundary; 1467 { 1468 return contigmalloc1(size, type, flags, low, high, alignment, boundary, 1469 kernel_map); 1470 } 1471 1472 vm_offset_t 1473 vm_page_alloc_contig(size, low, high, alignment) 1474 vm_offset_t size; 1475 vm_offset_t low; 1476 vm_offset_t high; 1477 vm_offset_t alignment; 1478 { 1479 return ((vm_offset_t)contigmalloc1(size, M_DEVBUF, M_NOWAIT, low, high, 1480 alignment, 0ul, kernel_map)); 1481 } 1482 1483 #include "opt_ddb.h" 1484 #ifdef DDB 1485 #include <sys/kernel.h> 1486 1487 #include <ddb/ddb.h> 1488 1489 DB_SHOW_COMMAND(page, vm_page_print_page_info) 1490 { 1491 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); 1492 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); 1493 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); 1494 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); 1495 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); 1496 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); 1497 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); 1498 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); 1499 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); 1500 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); 1501 } 1502 1503 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 1504 { 1505 int i; 1506 db_printf("PQ_FREE:"); 1507 for(i=0;i<PQ_L2_SIZE;i++) { 1508 db_printf(" %d", *vm_page_queues[PQ_FREE + i].lcnt); 1509 } 1510 db_printf("\n"); 1511 1512 db_printf("PQ_CACHE:"); 1513 for(i=0;i<PQ_L2_SIZE;i++) { 1514 db_printf(" %d", *vm_page_queues[PQ_CACHE + i].lcnt); 1515 } 1516 db_printf("\n"); 1517 1518 db_printf("PQ_ZERO:"); 1519 for(i=0;i<PQ_L2_SIZE;i++) { 1520 db_printf(" %d", *vm_page_queues[PQ_ZERO + i].lcnt); 1521 } 1522 db_printf("\n"); 1523 1524 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", 1525 *vm_page_queues[PQ_ACTIVE].lcnt, 1526 *vm_page_queues[PQ_INACTIVE].lcnt); 1527 } 1528 #endif /* DDB */ 1529