1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 37 * $Id: vm_page.c,v 1.65 1996/09/28 03:33:35 dyson Exp $ 38 */ 39 40 /* 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 45 * 46 * Permission to use, copy, modify and distribute this software and 47 * its documentation is hereby granted, provided that both the copyright 48 * notice and this permission notice appear in all copies of the 49 * software, derivative works or modified versions, and any portions 50 * thereof, and that both notices appear in supporting documentation. 51 * 52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 55 * 56 * Carnegie Mellon requests users of this software to return to 57 * 58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 59 * School of Computer Science 60 * Carnegie Mellon University 61 * Pittsburgh PA 15213-3890 62 * 63 * any improvements or extensions that they make and grant Carnegie the 64 * rights to redistribute these changes. 65 */ 66 67 /* 68 * Resident memory management module. 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/malloc.h> 74 #include <sys/proc.h> 75 #include <sys/vmmeter.h> 76 77 #include <vm/vm.h> 78 #include <vm/vm_param.h> 79 #include <vm/vm_prot.h> 80 #include <vm/lock.h> 81 #include <vm/vm_kern.h> 82 #include <vm/vm_object.h> 83 #include <vm/vm_page.h> 84 #include <vm/vm_map.h> 85 #include <vm/vm_pageout.h> 86 #include <vm/vm_extern.h> 87 88 static void vm_page_queue_init __P((void)); 89 static vm_page_t vm_page_select_free __P((vm_object_t object, 90 vm_pindex_t pindex, int prefqueue)); 91 92 /* 93 * Associated with page of user-allocatable memory is a 94 * page structure. 95 */ 96 97 static struct pglist *vm_page_buckets; /* Array of buckets */ 98 static int vm_page_bucket_count; /* How big is array? */ 99 static int vm_page_hash_mask; /* Mask for hash function */ 100 101 struct pglist vm_page_queue_free[PQ_L2_SIZE]; 102 struct pglist vm_page_queue_zero[PQ_L2_SIZE]; 103 struct pglist vm_page_queue_active; 104 struct pglist vm_page_queue_inactive; 105 struct pglist vm_page_queue_cache[PQ_L2_SIZE]; 106 107 int no_queue; 108 109 struct vpgqueues vm_page_queues[PQ_COUNT]; 110 int pqcnt[PQ_COUNT]; 111 112 static void 113 vm_page_queue_init(void) { 114 int i; 115 116 vm_page_queues[PQ_NONE].pl = NULL; 117 vm_page_queues[PQ_NONE].cnt = &no_queue; 118 for(i=0;i<PQ_L2_SIZE;i++) { 119 vm_page_queues[PQ_FREE+i].pl = &vm_page_queue_free[i]; 120 vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count; 121 } 122 for(i=0;i<PQ_L2_SIZE;i++) { 123 vm_page_queues[PQ_ZERO+i].pl = &vm_page_queue_zero[i]; 124 vm_page_queues[PQ_ZERO+i].cnt = &cnt.v_free_count; 125 } 126 vm_page_queues[PQ_INACTIVE].pl = &vm_page_queue_inactive; 127 vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count; 128 129 vm_page_queues[PQ_ACTIVE].pl = &vm_page_queue_active; 130 vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count; 131 for(i=0;i<PQ_L2_SIZE;i++) { 132 vm_page_queues[PQ_CACHE+i].pl = &vm_page_queue_cache[i]; 133 vm_page_queues[PQ_CACHE+i].cnt = &cnt.v_cache_count; 134 } 135 for(i=0;i<PQ_COUNT;i++) { 136 if (vm_page_queues[i].pl) { 137 TAILQ_INIT(vm_page_queues[i].pl); 138 } else if (i != 0) { 139 panic("vm_page_queue_init: queue %d is null", i); 140 } 141 vm_page_queues[i].lcnt = &pqcnt[i]; 142 } 143 } 144 145 vm_page_t vm_page_array; 146 static int vm_page_array_size; 147 long first_page; 148 static long last_page; 149 static vm_size_t page_mask; 150 static int page_shift; 151 int vm_page_zero_count; 152 153 /* 154 * map of contiguous valid DEV_BSIZE chunks in a page 155 * (this list is valid for page sizes upto 16*DEV_BSIZE) 156 */ 157 static u_short vm_page_dev_bsize_chunks[] = { 158 0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 159 0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff 160 }; 161 162 static inline int vm_page_hash __P((vm_object_t object, vm_pindex_t pindex)) 163 __pure2; 164 static int vm_page_freechk_and_unqueue __P((vm_page_t m)); 165 static void vm_page_free_wakeup __P((void)); 166 167 /* 168 * vm_set_page_size: 169 * 170 * Sets the page size, perhaps based upon the memory 171 * size. Must be called before any use of page-size 172 * dependent functions. 173 * 174 * Sets page_shift and page_mask from cnt.v_page_size. 175 */ 176 void 177 vm_set_page_size() 178 { 179 180 if (cnt.v_page_size == 0) 181 cnt.v_page_size = DEFAULT_PAGE_SIZE; 182 page_mask = cnt.v_page_size - 1; 183 if ((page_mask & cnt.v_page_size) != 0) 184 panic("vm_set_page_size: page size not a power of two"); 185 for (page_shift = 0;; page_shift++) 186 if ((1 << page_shift) == cnt.v_page_size) 187 break; 188 } 189 190 /* 191 * vm_page_startup: 192 * 193 * Initializes the resident memory module. 194 * 195 * Allocates memory for the page cells, and 196 * for the object/offset-to-page hash table headers. 197 * Each page cell is initialized and placed on the free list. 198 */ 199 200 vm_offset_t 201 vm_page_startup(starta, enda, vaddr) 202 register vm_offset_t starta; 203 vm_offset_t enda; 204 register vm_offset_t vaddr; 205 { 206 register vm_offset_t mapped; 207 register vm_page_t m; 208 register struct pglist *bucket; 209 vm_size_t npages, page_range; 210 register vm_offset_t new_start; 211 int i; 212 vm_offset_t pa; 213 int nblocks; 214 vm_offset_t first_managed_page; 215 216 /* the biggest memory array is the second group of pages */ 217 vm_offset_t start; 218 vm_offset_t biggestone, biggestsize; 219 220 vm_offset_t total; 221 222 total = 0; 223 biggestsize = 0; 224 biggestone = 0; 225 nblocks = 0; 226 vaddr = round_page(vaddr); 227 228 for (i = 0; phys_avail[i + 1]; i += 2) { 229 phys_avail[i] = round_page(phys_avail[i]); 230 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 231 } 232 233 for (i = 0; phys_avail[i + 1]; i += 2) { 234 int size = phys_avail[i + 1] - phys_avail[i]; 235 236 if (size > biggestsize) { 237 biggestone = i; 238 biggestsize = size; 239 } 240 ++nblocks; 241 total += size; 242 } 243 244 start = phys_avail[biggestone]; 245 246 /* 247 * Initialize the queue headers for the free queue, the active queue 248 * and the inactive queue. 249 */ 250 251 vm_page_queue_init(); 252 253 /* 254 * Allocate (and initialize) the hash table buckets. 255 * 256 * The number of buckets MUST BE a power of 2, and the actual value is 257 * the next power of 2 greater than the number of physical pages in 258 * the system. 259 * 260 * Note: This computation can be tweaked if desired. 261 */ 262 vm_page_buckets = (struct pglist *) vaddr; 263 bucket = vm_page_buckets; 264 if (vm_page_bucket_count == 0) { 265 vm_page_bucket_count = 1; 266 while (vm_page_bucket_count < atop(total)) 267 vm_page_bucket_count <<= 1; 268 } 269 vm_page_hash_mask = vm_page_bucket_count - 1; 270 271 /* 272 * Validate these addresses. 273 */ 274 275 new_start = start + vm_page_bucket_count * sizeof(struct pglist); 276 new_start = round_page(new_start); 277 mapped = vaddr; 278 vaddr = pmap_map(mapped, start, new_start, 279 VM_PROT_READ | VM_PROT_WRITE); 280 start = new_start; 281 bzero((caddr_t) mapped, vaddr - mapped); 282 mapped = vaddr; 283 284 for (i = 0; i < vm_page_bucket_count; i++) { 285 TAILQ_INIT(bucket); 286 bucket++; 287 } 288 289 /* 290 * round (or truncate) the addresses to our page size. 291 */ 292 293 /* 294 * Pre-allocate maps and map entries that cannot be dynamically 295 * allocated via malloc(). The maps include the kernel_map and 296 * kmem_map which must be initialized before malloc() will work 297 * (obviously). Also could include pager maps which would be 298 * allocated before kmeminit. 299 * 300 * Allow some kernel map entries... this should be plenty since people 301 * shouldn't be cluttering up the kernel map (they should use their 302 * own maps). 303 */ 304 305 kentry_data_size = MAX_KMAP * sizeof(struct vm_map) + 306 MAX_KMAPENT * sizeof(struct vm_map_entry); 307 kentry_data_size = round_page(kentry_data_size); 308 kentry_data = (vm_offset_t) vaddr; 309 vaddr += kentry_data_size; 310 311 /* 312 * Validate these zone addresses. 313 */ 314 315 new_start = start + (vaddr - mapped); 316 pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE); 317 bzero((caddr_t) mapped, (vaddr - mapped)); 318 start = round_page(new_start); 319 320 /* 321 * Compute the number of pages of memory that will be available for 322 * use (taking into account the overhead of a page structure per 323 * page). 324 */ 325 326 first_page = phys_avail[0] / PAGE_SIZE; 327 last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE; 328 329 page_range = last_page - (phys_avail[0] / PAGE_SIZE); 330 npages = (total - (page_range * sizeof(struct vm_page)) - 331 (start - phys_avail[biggestone])) / PAGE_SIZE; 332 333 /* 334 * Initialize the mem entry structures now, and put them in the free 335 * queue. 336 */ 337 338 vm_page_array = (vm_page_t) vaddr; 339 mapped = vaddr; 340 341 /* 342 * Validate these addresses. 343 */ 344 345 new_start = round_page(start + page_range * sizeof(struct vm_page)); 346 mapped = pmap_map(mapped, start, new_start, 347 VM_PROT_READ | VM_PROT_WRITE); 348 start = new_start; 349 350 first_managed_page = start / PAGE_SIZE; 351 352 /* 353 * Clear all of the page structures 354 */ 355 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 356 vm_page_array_size = page_range; 357 358 cnt.v_page_count = 0; 359 cnt.v_free_count = 0; 360 for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) { 361 if (i == biggestone) 362 pa = ptoa(first_managed_page); 363 else 364 pa = phys_avail[i]; 365 while (pa < phys_avail[i + 1] && npages-- > 0) { 366 ++cnt.v_page_count; 367 ++cnt.v_free_count; 368 m = PHYS_TO_VM_PAGE(pa); 369 m->phys_addr = pa; 370 m->flags = 0; 371 m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; 372 m->queue = PQ_FREE + m->pc; 373 TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq); 374 ++(*vm_page_queues[m->queue].lcnt); 375 pa += PAGE_SIZE; 376 } 377 } 378 379 return (mapped); 380 } 381 382 /* 383 * vm_page_hash: 384 * 385 * Distributes the object/offset key pair among hash buckets. 386 * 387 * NOTE: This macro depends on vm_page_bucket_count being a power of 2. 388 */ 389 static inline int 390 vm_page_hash(object, pindex) 391 vm_object_t object; 392 vm_pindex_t pindex; 393 { 394 return ((((unsigned) object) >> 5) + (pindex >> 1)) & vm_page_hash_mask; 395 } 396 397 /* 398 * vm_page_insert: [ internal use only ] 399 * 400 * Inserts the given mem entry into the object/object-page 401 * table and object list. 402 * 403 * The object and page must be locked, and must be splhigh. 404 */ 405 406 void 407 vm_page_insert(m, object, pindex) 408 register vm_page_t m; 409 register vm_object_t object; 410 register vm_pindex_t pindex; 411 { 412 register struct pglist *bucket; 413 414 if (m->flags & PG_TABLED) 415 panic("vm_page_insert: already inserted"); 416 417 /* 418 * Record the object/offset pair in this page 419 */ 420 421 m->object = object; 422 m->pindex = pindex; 423 424 /* 425 * Insert it into the object_object/offset hash table 426 */ 427 428 bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; 429 TAILQ_INSERT_TAIL(bucket, m, hashq); 430 431 /* 432 * Now link into the object's list of backed pages. 433 */ 434 435 TAILQ_INSERT_TAIL(&object->memq, m, listq); 436 m->flags |= PG_TABLED; 437 m->object->page_hint = m; 438 439 /* 440 * And show that the object has one more resident page. 441 */ 442 443 object->resident_page_count++; 444 } 445 446 /* 447 * vm_page_remove: [ internal use only ] 448 * NOTE: used by device pager as well -wfj 449 * 450 * Removes the given mem entry from the object/offset-page 451 * table and the object page list. 452 * 453 * The object and page must be locked, and at splhigh. 454 */ 455 456 void 457 vm_page_remove(m) 458 register vm_page_t m; 459 { 460 register struct pglist *bucket; 461 462 if (!(m->flags & PG_TABLED)) 463 return; 464 465 if (m->object->page_hint == m) 466 m->object->page_hint = NULL; 467 468 /* 469 * Remove from the object_object/offset hash table 470 */ 471 472 bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)]; 473 TAILQ_REMOVE(bucket, m, hashq); 474 475 /* 476 * Now remove from the object's list of backed pages. 477 */ 478 479 TAILQ_REMOVE(&m->object->memq, m, listq); 480 481 /* 482 * And show that the object has one fewer resident page. 483 */ 484 485 m->object->resident_page_count--; 486 487 m->flags &= ~PG_TABLED; 488 } 489 490 /* 491 * vm_page_lookup: 492 * 493 * Returns the page associated with the object/offset 494 * pair specified; if none is found, NULL is returned. 495 * 496 * The object must be locked. No side effects. 497 */ 498 499 vm_page_t 500 vm_page_lookup(object, pindex) 501 register vm_object_t object; 502 register vm_pindex_t pindex; 503 { 504 register vm_page_t m; 505 register struct pglist *bucket; 506 int s; 507 508 /* 509 * Search the hash table for this object/offset pair 510 */ 511 512 bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; 513 514 s = splvm(); 515 for (m = TAILQ_FIRST(bucket); m != NULL; m = TAILQ_NEXT(m,hashq)) { 516 if ((m->object == object) && (m->pindex == pindex)) { 517 splx(s); 518 m->object->page_hint = m; 519 return (m); 520 } 521 } 522 splx(s); 523 return (NULL); 524 } 525 526 /* 527 * vm_page_rename: 528 * 529 * Move the given memory entry from its 530 * current object to the specified target object/offset. 531 * 532 * The object must be locked. 533 */ 534 void 535 vm_page_rename(m, new_object, new_pindex) 536 register vm_page_t m; 537 register vm_object_t new_object; 538 vm_pindex_t new_pindex; 539 { 540 int s; 541 542 s = splvm(); 543 vm_page_remove(m); 544 vm_page_insert(m, new_object, new_pindex); 545 splx(s); 546 } 547 548 /* 549 * vm_page_unqueue without any wakeup 550 */ 551 void 552 vm_page_unqueue_nowakeup(m) 553 vm_page_t m; 554 { 555 int queue = m->queue; 556 struct vpgqueues *pq; 557 if (queue != PQ_NONE) { 558 pq = &vm_page_queues[queue]; 559 m->queue = PQ_NONE; 560 TAILQ_REMOVE(pq->pl, m, pageq); 561 --(*pq->cnt); 562 --(*pq->lcnt); 563 } 564 } 565 566 /* 567 * vm_page_unqueue must be called at splhigh(); 568 */ 569 void 570 vm_page_unqueue(m) 571 vm_page_t m; 572 { 573 int queue = m->queue; 574 struct vpgqueues *pq; 575 if (queue != PQ_NONE) { 576 m->queue = PQ_NONE; 577 pq = &vm_page_queues[queue]; 578 TAILQ_REMOVE(pq->pl, m, pageq); 579 --(*pq->cnt); 580 --(*pq->lcnt); 581 if ((m->queue - m->pc) == PQ_CACHE) { 582 if ((cnt.v_cache_count + cnt.v_free_count) < 583 (cnt.v_free_reserved + cnt.v_cache_min)) 584 pagedaemon_wakeup(); 585 } 586 } 587 } 588 589 /* 590 * Find a page on the specified queue with color optimization. 591 */ 592 vm_page_t 593 vm_page_list_find(basequeue, index) 594 int basequeue, index; 595 { 596 #if PQ_L2_SIZE > 1 597 598 int i,j; 599 vm_page_t m; 600 int hindex; 601 602 for(j = 0; j < PQ_L1_SIZE; j++) { 603 for(i = (PQ_L2_SIZE/2) - (PQ_L1_SIZE - 1); 604 i > 0; 605 i -= PQ_L1_SIZE) { 606 hindex = (index + (i+j)) & PQ_L2_MASK; 607 m = TAILQ_FIRST(vm_page_queues[basequeue + hindex].pl); 608 if (m) 609 return m; 610 611 hindex = (index - (i+j)) & PQ_L2_MASK; 612 m = TAILQ_FIRST(vm_page_queues[basequeue + hindex].pl); 613 if (m) 614 return m; 615 } 616 } 617 return NULL; 618 #else 619 return TAILQ_FIRST(vm_page_queues[basequeue].pl); 620 #endif 621 622 } 623 624 /* 625 * Find a page on the specified queue with color optimization. 626 */ 627 vm_page_t 628 vm_page_select(object, pindex, basequeue) 629 vm_object_t object; 630 vm_pindex_t pindex; 631 int basequeue; 632 { 633 634 #if PQ_L2_SIZE > 1 635 int index; 636 index = (pindex + object->pg_color) & PQ_L2_MASK; 637 return vm_page_list_find(basequeue, index); 638 639 #else 640 return TAILQ_FIRST(vm_page_queues[basequeue].pl); 641 #endif 642 643 } 644 645 /* 646 * Find a free or zero page, with specified preference. 647 */ 648 static vm_page_t 649 vm_page_select_free(object, pindex, prefqueue) 650 vm_object_t object; 651 vm_pindex_t pindex; 652 int prefqueue; 653 { 654 #if PQ_L2_SIZE > 1 655 int i,j; 656 int index, hindex; 657 #endif 658 vm_page_t m; 659 int oqueuediff; 660 661 if (prefqueue == PQ_ZERO) 662 oqueuediff = PQ_FREE - PQ_ZERO; 663 else 664 oqueuediff = PQ_ZERO - PQ_FREE; 665 666 if (object->page_hint) { 667 if (object->page_hint->pindex == (pindex - 1)) { 668 vm_offset_t last_phys; 669 if ((object->page_hint->flags & PG_FICTITIOUS) == 0) { 670 if ((object->page_hint < &vm_page_array[cnt.v_page_count-1]) && 671 (object->page_hint >= &vm_page_array[0])) { 672 int queue; 673 last_phys = VM_PAGE_TO_PHYS(object->page_hint); 674 m = PHYS_TO_VM_PAGE(last_phys + PAGE_SIZE); 675 queue = m->queue - m->pc; 676 if (queue == PQ_FREE || queue == PQ_ZERO) { 677 return m; 678 } 679 } 680 } 681 } 682 } 683 684 685 #if PQ_L2_SIZE > 1 686 687 index = pindex + object->pg_color; 688 /* 689 * These are special cased because of clock-arithemetic 690 */ 691 for(j = 0; j < PQ_L1_SIZE; j++) { 692 for(i = (PQ_L2_SIZE/2) - (PQ_L1_SIZE - 1); 693 (i + j) > 0; 694 i -= PQ_L1_SIZE) { 695 696 hindex = prefqueue + ((index + (i+j)) & PQ_L2_MASK); 697 if (m = TAILQ_FIRST(vm_page_queues[hindex].pl)) 698 return m; 699 if (m = TAILQ_FIRST(vm_page_queues[hindex + oqueuediff].pl)) 700 return m; 701 702 hindex = prefqueue + ((index - (i+j)) & PQ_L2_MASK); 703 if (m = TAILQ_FIRST(vm_page_queues[hindex].pl)) 704 return m; 705 if (m = TAILQ_FIRST(vm_page_queues[hindex + oqueuediff].pl)) 706 return m; 707 } 708 } 709 #else 710 if (m = TAILQ_FIRST(vm_page_queues[prefqueue].pl)) 711 return m; 712 else 713 return TAILQ_FIRST(vm_page_queues[prefqueue + oqueuediff].pl); 714 #endif 715 716 return NULL; 717 } 718 719 /* 720 * vm_page_alloc: 721 * 722 * Allocate and return a memory cell associated 723 * with this VM object/offset pair. 724 * 725 * page_req classes: 726 * VM_ALLOC_NORMAL normal process request 727 * VM_ALLOC_SYSTEM system *really* needs a page 728 * VM_ALLOC_INTERRUPT interrupt time request 729 * VM_ALLOC_ZERO zero page 730 * 731 * Object must be locked. 732 */ 733 vm_page_t 734 vm_page_alloc(object, pindex, page_req) 735 vm_object_t object; 736 vm_pindex_t pindex; 737 int page_req; 738 { 739 register vm_page_t m; 740 struct vpgqueues *pq; 741 int queue; 742 int s; 743 744 #ifdef DIAGNOSTIC 745 m = vm_page_lookup(object, pindex); 746 if (m) 747 panic("vm_page_alloc: page already allocated"); 748 #endif 749 750 if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) { 751 page_req = VM_ALLOC_SYSTEM; 752 }; 753 754 s = splvm(); 755 756 switch (page_req) { 757 758 case VM_ALLOC_NORMAL: 759 if (cnt.v_free_count >= cnt.v_free_reserved) { 760 m = vm_page_select_free(object, pindex, PQ_FREE); 761 #if defined(DIAGNOSTIC) 762 if (m == NULL) 763 panic("vm_page_alloc(NORMAL): missing page on free queue\n"); 764 #endif 765 } else { 766 m = vm_page_select(object, pindex, PQ_CACHE); 767 if (m == NULL) { 768 splx(s); 769 #if defined(DIAGNOSTIC) 770 if (cnt.v_cache_count > 0) 771 printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count); 772 #endif 773 pagedaemon_wakeup(); 774 return (NULL); 775 } 776 } 777 break; 778 779 case VM_ALLOC_ZERO: 780 if (cnt.v_free_count >= cnt.v_free_reserved) { 781 m = vm_page_select_free(object, pindex, PQ_ZERO); 782 #if defined(DIAGNOSTIC) 783 if (m == NULL) 784 panic("vm_page_alloc(ZERO): missing page on free queue\n"); 785 #endif 786 } else { 787 m = vm_page_select(object, pindex, PQ_CACHE); 788 if (m == NULL) { 789 splx(s); 790 #if defined(DIAGNOSTIC) 791 if (cnt.v_cache_count > 0) 792 printf("vm_page_alloc(ZERO): missing pages on cache queue: %d\n", cnt.v_cache_count); 793 #endif 794 pagedaemon_wakeup(); 795 return (NULL); 796 } 797 } 798 break; 799 800 case VM_ALLOC_SYSTEM: 801 if ((cnt.v_free_count >= cnt.v_free_reserved) || 802 ((cnt.v_cache_count == 0) && 803 (cnt.v_free_count >= cnt.v_interrupt_free_min))) { 804 m = vm_page_select_free(object, pindex, PQ_FREE); 805 #if defined(DIAGNOSTIC) 806 if (m == NULL) 807 panic("vm_page_alloc(SYSTEM): missing page on free queue\n"); 808 #endif 809 } else { 810 m = vm_page_select(object, pindex, PQ_CACHE); 811 if (m == NULL) { 812 splx(s); 813 #if defined(DIAGNOSTIC) 814 if (cnt.v_cache_count > 0) 815 printf("vm_page_alloc(SYSTEM): missing pages on cache queue: %d\n", cnt.v_cache_count); 816 #endif 817 pagedaemon_wakeup(); 818 return (NULL); 819 } 820 } 821 break; 822 823 case VM_ALLOC_INTERRUPT: 824 if (cnt.v_free_count > 0) { 825 m = vm_page_select_free(object, pindex, PQ_FREE); 826 } else { 827 splx(s); 828 pagedaemon_wakeup(); 829 return (NULL); 830 } 831 break; 832 833 default: 834 panic("vm_page_alloc: invalid allocation class"); 835 } 836 837 queue = m->queue; 838 if (queue == PQ_ZERO) 839 --vm_page_zero_count; 840 pq = &vm_page_queues[queue]; 841 TAILQ_REMOVE(pq->pl, m, pageq); 842 --(*pq->cnt); 843 --(*pq->lcnt); 844 if ((m->queue - m->pc) == PQ_ZERO) { 845 m->flags = PG_ZERO|PG_BUSY; 846 } else if ((m->queue - m->pc) == PQ_CACHE) { 847 vm_page_remove(m); 848 m->flags = PG_BUSY; 849 } else { 850 m->flags = PG_BUSY; 851 } 852 m->wire_count = 0; 853 m->hold_count = 0; 854 m->act_count = 0; 855 m->busy = 0; 856 m->valid = 0; 857 m->dirty = 0; 858 m->queue = PQ_NONE; 859 860 /* XXX before splx until vm_page_insert is safe */ 861 vm_page_insert(m, object, pindex); 862 863 splx(s); 864 865 /* 866 * Don't wakeup too often - wakeup the pageout daemon when 867 * we would be nearly out of memory. 868 */ 869 if (((cnt.v_free_count + cnt.v_cache_count) < 870 (cnt.v_free_reserved + cnt.v_cache_min)) || 871 (cnt.v_free_count < cnt.v_pageout_free_min)) 872 pagedaemon_wakeup(); 873 874 return (m); 875 } 876 877 /* 878 * vm_page_activate: 879 * 880 * Put the specified page on the active list (if appropriate). 881 * 882 * The page queues must be locked. 883 */ 884 void 885 vm_page_activate(m) 886 register vm_page_t m; 887 { 888 int s; 889 890 s = splvm(); 891 if (m->queue == PQ_ACTIVE) 892 panic("vm_page_activate: already active"); 893 894 if ((m->queue - m->pc) == PQ_CACHE) 895 cnt.v_reactivated++; 896 897 vm_page_unqueue(m); 898 899 if (m->wire_count == 0) { 900 m->queue = PQ_ACTIVE; 901 ++(*vm_page_queues[PQ_ACTIVE].lcnt); 902 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 903 if (m->act_count < ACT_INIT) 904 m->act_count = ACT_INIT; 905 cnt.v_active_count++; 906 } 907 splx(s); 908 } 909 910 /* 911 * helper routine for vm_page_free and vm_page_free_zero 912 */ 913 static int 914 vm_page_freechk_and_unqueue(m) 915 vm_page_t m; 916 { 917 if (m->busy || 918 (m->flags & PG_BUSY) || 919 ((m->queue - m->pc) == PQ_FREE) || 920 (m->hold_count != 0)) { 921 printf("vm_page_free: pindex(%ld), busy(%d), PG_BUSY(%d), hold(%d)\n", 922 m->pindex, m->busy, 923 (m->flags & PG_BUSY) ? 1 : 0, m->hold_count); 924 if ((m->queue - m->pc) == PQ_FREE) 925 panic("vm_page_free: freeing free page"); 926 else 927 panic("vm_page_free: freeing busy page"); 928 } 929 930 vm_page_remove(m); 931 vm_page_unqueue_nowakeup(m); 932 if ((m->flags & PG_FICTITIOUS) != 0) { 933 return 0; 934 } 935 if (m->wire_count != 0) { 936 if (m->wire_count > 1) { 937 panic("vm_page_free: invalid wire count (%d), pindex: 0x%x", 938 m->wire_count, m->pindex); 939 } 940 m->wire_count = 0; 941 cnt.v_wire_count--; 942 } 943 944 return 1; 945 } 946 947 /* 948 * helper routine for vm_page_free and vm_page_free_zero 949 */ 950 static __inline void 951 vm_page_free_wakeup() 952 { 953 954 /* 955 * if pageout daemon needs pages, then tell it that there are 956 * some free. 957 */ 958 if (vm_pageout_pages_needed) { 959 wakeup(&vm_pageout_pages_needed); 960 vm_pageout_pages_needed = 0; 961 } 962 /* 963 * wakeup processes that are waiting on memory if we hit a 964 * high water mark. And wakeup scheduler process if we have 965 * lots of memory. this process will swapin processes. 966 */ 967 if (vm_pages_needed && 968 ((cnt.v_free_count + cnt.v_cache_count) >= cnt.v_free_min)) { 969 wakeup(&cnt.v_free_count); 970 vm_pages_needed = 0; 971 } 972 } 973 974 /* 975 * vm_page_free: 976 * 977 * Returns the given page to the free list, 978 * disassociating it with any VM object. 979 * 980 * Object and page must be locked prior to entry. 981 */ 982 void 983 vm_page_free(m) 984 register vm_page_t m; 985 { 986 int s; 987 struct vpgqueues *pq; 988 989 s = splvm(); 990 991 cnt.v_tfree++; 992 993 if (!vm_page_freechk_and_unqueue(m)) { 994 splx(s); 995 return; 996 } 997 998 m->queue = PQ_FREE + m->pc; 999 pq = &vm_page_queues[m->queue]; 1000 ++(*pq->lcnt); 1001 ++(*pq->cnt); 1002 /* 1003 * If the pageout process is grabbing the page, it is likely 1004 * that the page is NOT in the cache. It is more likely that 1005 * the page will be partially in the cache if it is being 1006 * explicitly freed. 1007 */ 1008 if (curproc == pageproc) { 1009 TAILQ_INSERT_TAIL(pq->pl, m, pageq); 1010 } else { 1011 TAILQ_INSERT_HEAD(pq->pl, m, pageq); 1012 } 1013 vm_page_free_wakeup(); 1014 splx(s); 1015 } 1016 1017 void 1018 vm_page_free_zero(m) 1019 register vm_page_t m; 1020 { 1021 int s; 1022 struct vpgqueues *pq; 1023 1024 s = splvm(); 1025 1026 cnt.v_tfree++; 1027 1028 if (!vm_page_freechk_and_unqueue(m)) { 1029 splx(s); 1030 return; 1031 } 1032 1033 m->queue = PQ_ZERO + m->pc; 1034 pq = &vm_page_queues[m->queue]; 1035 ++(*pq->lcnt); 1036 ++(*pq->cnt); 1037 1038 TAILQ_INSERT_HEAD(pq->pl, m, pageq); 1039 ++vm_page_zero_count; 1040 vm_page_free_wakeup(); 1041 splx(s); 1042 } 1043 1044 /* 1045 * vm_page_wire: 1046 * 1047 * Mark this page as wired down by yet 1048 * another map, removing it from paging queues 1049 * as necessary. 1050 * 1051 * The page queues must be locked. 1052 */ 1053 void 1054 vm_page_wire(m) 1055 register vm_page_t m; 1056 { 1057 int s; 1058 1059 if (m->wire_count == 0) { 1060 s = splvm(); 1061 vm_page_unqueue(m); 1062 splx(s); 1063 cnt.v_wire_count++; 1064 } 1065 ++(*vm_page_queues[PQ_NONE].lcnt); 1066 m->wire_count++; 1067 m->flags |= PG_MAPPED; 1068 } 1069 1070 /* 1071 * vm_page_unwire: 1072 * 1073 * Release one wiring of this page, potentially 1074 * enabling it to be paged again. 1075 * 1076 * The page queues must be locked. 1077 */ 1078 void 1079 vm_page_unwire(m) 1080 register vm_page_t m; 1081 { 1082 int s; 1083 1084 s = splvm(); 1085 1086 if (m->wire_count > 0) 1087 m->wire_count--; 1088 1089 if (m->wire_count == 0) { 1090 cnt.v_wire_count--; 1091 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1092 m->queue = PQ_ACTIVE; 1093 ++(*vm_page_queues[PQ_ACTIVE].lcnt); 1094 cnt.v_active_count++; 1095 } 1096 splx(s); 1097 } 1098 1099 1100 /* 1101 * vm_page_deactivate: 1102 * 1103 * Returns the given page to the inactive list, 1104 * indicating that no physical maps have access 1105 * to this page. [Used by the physical mapping system.] 1106 * 1107 * The page queues must be locked. 1108 */ 1109 void 1110 vm_page_deactivate(m) 1111 register vm_page_t m; 1112 { 1113 int s; 1114 1115 /* 1116 * Only move active pages -- ignore locked or already inactive ones. 1117 * 1118 * XXX: sometimes we get pages which aren't wired down or on any queue - 1119 * we need to put them on the inactive queue also, otherwise we lose 1120 * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93. 1121 */ 1122 if (m->queue == PQ_INACTIVE) 1123 return; 1124 1125 s = splvm(); 1126 if (m->wire_count == 0 && m->hold_count == 0) { 1127 if ((m->queue - m->pc) == PQ_CACHE) 1128 cnt.v_reactivated++; 1129 vm_page_unqueue(m); 1130 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 1131 m->queue = PQ_INACTIVE; 1132 ++(*vm_page_queues[PQ_INACTIVE].lcnt); 1133 cnt.v_inactive_count++; 1134 } 1135 splx(s); 1136 } 1137 1138 /* 1139 * vm_page_cache 1140 * 1141 * Put the specified page onto the page cache queue (if appropriate). 1142 */ 1143 void 1144 vm_page_cache(m) 1145 register vm_page_t m; 1146 { 1147 int s; 1148 1149 if ((m->flags & PG_BUSY) || m->busy || m->wire_count) { 1150 printf("vm_page_cache: attempting to cache busy page\n"); 1151 return; 1152 } 1153 if ((m->queue - m->pc) == PQ_CACHE) 1154 return; 1155 1156 vm_page_protect(m, VM_PROT_NONE); 1157 if (m->dirty != 0) { 1158 panic("vm_page_cache: caching a dirty page, pindex: %d", m->pindex); 1159 } 1160 s = splvm(); 1161 vm_page_unqueue_nowakeup(m); 1162 m->queue = PQ_CACHE + m->pc; 1163 ++(*vm_page_queues[m->queue].lcnt); 1164 TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq); 1165 cnt.v_cache_count++; 1166 vm_page_free_wakeup(); 1167 splx(s); 1168 } 1169 1170 1171 /* 1172 * mapping function for valid bits or for dirty bits in 1173 * a page 1174 */ 1175 inline int 1176 vm_page_bits(int base, int size) 1177 { 1178 u_short chunk; 1179 1180 if ((base == 0) && (size >= PAGE_SIZE)) 1181 return VM_PAGE_BITS_ALL; 1182 size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1183 base = (base % PAGE_SIZE) / DEV_BSIZE; 1184 chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE]; 1185 return (chunk << base) & VM_PAGE_BITS_ALL; 1186 } 1187 1188 /* 1189 * set a page valid and clean 1190 */ 1191 void 1192 vm_page_set_validclean(m, base, size) 1193 vm_page_t m; 1194 int base; 1195 int size; 1196 { 1197 int pagebits = vm_page_bits(base, size); 1198 m->valid |= pagebits; 1199 m->dirty &= ~pagebits; 1200 if( base == 0 && size == PAGE_SIZE) 1201 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 1202 } 1203 1204 /* 1205 * set a page (partially) invalid 1206 */ 1207 void 1208 vm_page_set_invalid(m, base, size) 1209 vm_page_t m; 1210 int base; 1211 int size; 1212 { 1213 int bits; 1214 1215 m->valid &= ~(bits = vm_page_bits(base, size)); 1216 if (m->valid == 0) 1217 m->dirty &= ~bits; 1218 } 1219 1220 /* 1221 * is (partial) page valid? 1222 */ 1223 int 1224 vm_page_is_valid(m, base, size) 1225 vm_page_t m; 1226 int base; 1227 int size; 1228 { 1229 int bits = vm_page_bits(base, size); 1230 1231 if (m->valid && ((m->valid & bits) == bits)) 1232 return 1; 1233 else 1234 return 0; 1235 } 1236 1237 void 1238 vm_page_test_dirty(m) 1239 vm_page_t m; 1240 { 1241 if ((m->dirty != VM_PAGE_BITS_ALL) && 1242 pmap_is_modified(VM_PAGE_TO_PHYS(m))) { 1243 m->dirty = VM_PAGE_BITS_ALL; 1244 } 1245 } 1246 1247 /* 1248 * This interface is for merging with malloc() someday. 1249 * Even if we never implement compaction so that contiguous allocation 1250 * works after initialization time, malloc()'s data structures are good 1251 * for statistics and for allocations of less than a page. 1252 */ 1253 void * 1254 contigmalloc(size, type, flags, low, high, alignment, boundary) 1255 unsigned long size; /* should be size_t here and for malloc() */ 1256 int type; 1257 int flags; 1258 unsigned long low; 1259 unsigned long high; 1260 unsigned long alignment; 1261 unsigned long boundary; 1262 { 1263 int i, s, start; 1264 vm_offset_t addr, phys, tmp_addr; 1265 vm_page_t pga = vm_page_array; 1266 1267 size = round_page(size); 1268 if (size == 0) 1269 panic("vm_page_alloc_contig: size must not be 0"); 1270 if ((alignment & (alignment - 1)) != 0) 1271 panic("vm_page_alloc_contig: alignment must be a power of 2"); 1272 if ((boundary & (boundary - 1)) != 0) 1273 panic("vm_page_alloc_contig: boundary must be a power of 2"); 1274 1275 start = 0; 1276 s = splvm(); 1277 again: 1278 /* 1279 * Find first page in array that is free, within range, aligned, and 1280 * such that the boundary won't be crossed. 1281 */ 1282 for (i = start; i < cnt.v_page_count; i++) { 1283 phys = VM_PAGE_TO_PHYS(&pga[i]); 1284 if (((pga[i].queue - pga[i].pc) == PQ_FREE) && 1285 (phys >= low) && (phys < high) && 1286 ((phys & (alignment - 1)) == 0) && 1287 (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0)) 1288 break; 1289 } 1290 1291 /* 1292 * If the above failed or we will exceed the upper bound, fail. 1293 */ 1294 if ((i == cnt.v_page_count) || 1295 ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) { 1296 splx(s); 1297 return (NULL); 1298 } 1299 start = i; 1300 1301 /* 1302 * Check successive pages for contiguous and free. 1303 */ 1304 for (i = start + 1; i < (start + size / PAGE_SIZE); i++) { 1305 if ((VM_PAGE_TO_PHYS(&pga[i]) != 1306 (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) || 1307 ((pga[i].queue - pga[i].pc) != PQ_FREE)) { 1308 start++; 1309 goto again; 1310 } 1311 } 1312 1313 /* 1314 * We've found a contiguous chunk that meets are requirements. 1315 * Allocate kernel VM, unfree and assign the physical pages to it and 1316 * return kernel VM pointer. 1317 */ 1318 tmp_addr = addr = kmem_alloc_pageable(kernel_map, size); 1319 if (addr == 0) { 1320 splx(s); 1321 return (NULL); 1322 } 1323 1324 for (i = start; i < (start + size / PAGE_SIZE); i++) { 1325 vm_page_t m = &pga[i]; 1326 1327 TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq); 1328 --(*vm_page_queues[m->queue].lcnt); 1329 cnt.v_free_count--; 1330 m->valid = VM_PAGE_BITS_ALL; 1331 m->flags = 0; 1332 m->dirty = 0; 1333 m->wire_count = 0; 1334 m->busy = 0; 1335 m->queue = PQ_NONE; 1336 vm_page_insert(m, kernel_object, 1337 OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS)); 1338 vm_page_wire(m); 1339 pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m)); 1340 tmp_addr += PAGE_SIZE; 1341 } 1342 1343 splx(s); 1344 return ((void *)addr); 1345 } 1346 1347 vm_offset_t 1348 vm_page_alloc_contig(size, low, high, alignment) 1349 vm_offset_t size; 1350 vm_offset_t low; 1351 vm_offset_t high; 1352 vm_offset_t alignment; 1353 { 1354 return ((vm_offset_t)contigmalloc(size, M_DEVBUF, M_NOWAIT, low, high, 1355 alignment, 0ul)); 1356 } 1357 1358 #include "opt_ddb.h" 1359 #ifdef DDB 1360 #include <sys/kernel.h> 1361 1362 #include <ddb/ddb.h> 1363 1364 DB_SHOW_COMMAND(page, vm_page_print_page_info) 1365 { 1366 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); 1367 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); 1368 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); 1369 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); 1370 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); 1371 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); 1372 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); 1373 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); 1374 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); 1375 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); 1376 } 1377 1378 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 1379 { 1380 int i; 1381 db_printf("PQ_FREE:"); 1382 for(i=0;i<PQ_L2_SIZE;i++) { 1383 db_printf(" %d", *vm_page_queues[PQ_FREE + i].lcnt); 1384 } 1385 db_printf("\n"); 1386 1387 db_printf("PQ_CACHE:"); 1388 for(i=0;i<PQ_L2_SIZE;i++) { 1389 db_printf(" %d", *vm_page_queues[PQ_CACHE + i].lcnt); 1390 } 1391 db_printf("\n"); 1392 1393 db_printf("PQ_ZERO:"); 1394 for(i=0;i<PQ_L2_SIZE;i++) { 1395 db_printf(" %d", *vm_page_queues[PQ_ZERO + i].lcnt); 1396 } 1397 db_printf("\n"); 1398 1399 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", 1400 *vm_page_queues[PQ_ACTIVE].lcnt, 1401 *vm_page_queues[PQ_INACTIVE].lcnt); 1402 } 1403 #endif /* DDB */ 1404