1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * University Copyright- Copyright (c) 1982, 1986, 1988 31 * The Regents of the University of California 32 * All Rights Reserved 33 * 34 * University Acknowledgment- Portions of this document are derived from 35 * software developed by the University of California, Berkeley, and its 36 * contributors. 37 */ 38 39 #pragma ident "%Z%%M% %I% %E% SMI" 40 41 /* 42 * VM - physical page management. 43 */ 44 45 #include <sys/types.h> 46 #include <sys/t_lock.h> 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/errno.h> 50 #include <sys/time.h> 51 #include <sys/vnode.h> 52 #include <sys/vm.h> 53 #include <sys/vtrace.h> 54 #include <sys/swap.h> 55 #include <sys/cmn_err.h> 56 #include <sys/tuneable.h> 57 #include <sys/sysmacros.h> 58 #include <sys/cpuvar.h> 59 #include <sys/callb.h> 60 #include <sys/debug.h> 61 #include <sys/tnf_probe.h> 62 #include <sys/condvar_impl.h> 63 #include <sys/mem_config.h> 64 #include <sys/mem_cage.h> 65 #include <sys/kmem.h> 66 #include <sys/atomic.h> 67 #include <sys/strlog.h> 68 #include <sys/mman.h> 69 #include <sys/ontrap.h> 70 #include <sys/lgrp.h> 71 #include <sys/vfs.h> 72 73 #include <vm/hat.h> 74 #include <vm/anon.h> 75 #include <vm/page.h> 76 #include <vm/seg.h> 77 #include <vm/pvn.h> 78 #include <vm/seg_kmem.h> 79 #include <vm/vm_dep.h> 80 #include <sys/vm_usage.h> 81 #include <fs/fs_subr.h> 82 83 static int nopageage = 0; 84 85 static pgcnt_t max_page_get; /* max page_get request size in pages */ 86 pgcnt_t total_pages = 0; /* total number of pages (used by /proc) */ 87 88 /* 89 * freemem_lock protects all freemem variables: 90 * availrmem. Also this lock protects the globals which track the 91 * availrmem changes for accurate kernel footprint calculation. 92 * See below for an explanation of these 93 * globals. 94 */ 95 kmutex_t freemem_lock; 96 pgcnt_t availrmem; 97 pgcnt_t availrmem_initial; 98 99 /* 100 * These globals track availrmem changes to get a more accurate 101 * estimate of tke kernel size. Historically pp_kernel is used for 102 * kernel size and is based on availrmem. But availrmem is adjusted for 103 * locked pages in the system not just for kernel locked pages. 104 * These new counters will track the pages locked through segvn and 105 * by explicit user locking. 106 * 107 * segvn_pages_locked : This keeps track on a global basis how many pages 108 * are currently locked because of I/O. 109 * 110 * pages_locked : How many pages are locked becuase of user specified 111 * locking through mlock or plock. 112 * 113 * pages_useclaim,pages_claimed : These two variables track the 114 * cliam adjustments because of the protection changes on a segvn segment. 115 * 116 * All these globals are protected by the same lock which protects availrmem. 117 */ 118 pgcnt_t segvn_pages_locked; 119 pgcnt_t pages_locked; 120 pgcnt_t pages_useclaim; 121 pgcnt_t pages_claimed; 122 123 124 /* 125 * new_freemem_lock protects freemem, freemem_wait & freemem_cv. 126 */ 127 static kmutex_t new_freemem_lock; 128 static uint_t freemem_wait; /* someone waiting for freemem */ 129 static kcondvar_t freemem_cv; 130 131 /* 132 * The logical page free list is maintained as two lists, the 'free' 133 * and the 'cache' lists. 134 * The free list contains those pages that should be reused first. 135 * 136 * The implementation of the lists is machine dependent. 137 * page_get_freelist(), page_get_cachelist(), 138 * page_list_sub(), and page_list_add() 139 * form the interface to the machine dependent implementation. 140 * 141 * Pages with p_free set are on the cache list. 142 * Pages with p_free and p_age set are on the free list, 143 * 144 * A page may be locked while on either list. 145 */ 146 147 /* 148 * free list accounting stuff. 149 * 150 * 151 * Spread out the value for the number of pages on the 152 * page free and page cache lists. If there is just one 153 * value, then it must be under just one lock. 154 * The lock contention and cache traffic are a real bother. 155 * 156 * When we acquire and then drop a single pcf lock 157 * we can start in the middle of the array of pcf structures. 158 * If we acquire more than one pcf lock at a time, we need to 159 * start at the front to avoid deadlocking. 160 * 161 * pcf_count holds the number of pages in each pool. 162 * 163 * pcf_block is set when page_create_get_something() has asked the 164 * PSM page freelist and page cachelist routines without specifying 165 * a color and nothing came back. This is used to block anything 166 * else from moving pages from one list to the other while the 167 * lists are searched again. If a page is freeed while pcf_block is 168 * set, then pcf_reserve is incremented. pcgs_unblock() takes care 169 * of clearning pcf_block, doing the wakeups, etc. 170 */ 171 172 #if NCPU <= 4 173 #define PAD 2 174 #define PCF_FANOUT 4 175 static uint_t pcf_mask = PCF_FANOUT - 1; 176 #else 177 #define PAD 10 178 #ifdef sun4v 179 #define PCF_FANOUT 32 180 #else 181 #define PCF_FANOUT 128 182 #endif 183 static uint_t pcf_mask = PCF_FANOUT - 1; 184 #endif 185 186 struct pcf { 187 kmutex_t pcf_lock; /* protects the structure */ 188 uint_t pcf_count; /* page count */ 189 uint_t pcf_wait; /* number of waiters */ 190 uint_t pcf_block; /* pcgs flag to page_free() */ 191 uint_t pcf_reserve; /* pages freed after pcf_block set */ 192 uint_t pcf_fill[PAD]; /* to line up on the caches */ 193 }; 194 195 static struct pcf pcf[PCF_FANOUT]; 196 #define PCF_INDEX() ((CPU->cpu_id) & (pcf_mask)) 197 198 kmutex_t pcgs_lock; /* serializes page_create_get_ */ 199 kmutex_t pcgs_cagelock; /* serializes NOSLEEP cage allocs */ 200 kmutex_t pcgs_wait_lock; /* used for delay in pcgs */ 201 static kcondvar_t pcgs_cv; /* cv for delay in pcgs */ 202 203 #ifdef VM_STATS 204 205 /* 206 * No locks, but so what, they are only statistics. 207 */ 208 209 static struct page_tcnt { 210 int pc_free_cache; /* free's into cache list */ 211 int pc_free_dontneed; /* free's with dontneed */ 212 int pc_free_pageout; /* free's from pageout */ 213 int pc_free_free; /* free's into free list */ 214 int pc_free_pages; /* free's into large page free list */ 215 int pc_destroy_pages; /* large page destroy's */ 216 int pc_get_cache; /* get's from cache list */ 217 int pc_get_free; /* get's from free list */ 218 int pc_reclaim; /* reclaim's */ 219 int pc_abortfree; /* abort's of free pages */ 220 int pc_find_hit; /* find's that find page */ 221 int pc_find_miss; /* find's that don't find page */ 222 int pc_destroy_free; /* # of free pages destroyed */ 223 #define PC_HASH_CNT (4*PAGE_HASHAVELEN) 224 int pc_find_hashlen[PC_HASH_CNT+1]; 225 int pc_addclaim_pages; 226 int pc_subclaim_pages; 227 int pc_free_replacement_page[2]; 228 int pc_try_demote_pages[6]; 229 int pc_demote_pages[2]; 230 } pagecnt; 231 232 uint_t hashin_count; 233 uint_t hashin_not_held; 234 uint_t hashin_already; 235 236 uint_t hashout_count; 237 uint_t hashout_not_held; 238 239 uint_t page_create_count; 240 uint_t page_create_not_enough; 241 uint_t page_create_not_enough_again; 242 uint_t page_create_zero; 243 uint_t page_create_hashout; 244 uint_t page_create_page_lock_failed; 245 uint_t page_create_trylock_failed; 246 uint_t page_create_found_one; 247 uint_t page_create_hashin_failed; 248 uint_t page_create_dropped_phm; 249 250 uint_t page_create_new; 251 uint_t page_create_exists; 252 uint_t page_create_putbacks; 253 uint_t page_create_overshoot; 254 255 uint_t page_reclaim_zero; 256 uint_t page_reclaim_zero_locked; 257 258 uint_t page_rename_exists; 259 uint_t page_rename_count; 260 261 uint_t page_lookup_cnt[20]; 262 uint_t page_lookup_nowait_cnt[10]; 263 uint_t page_find_cnt; 264 uint_t page_exists_cnt; 265 uint_t page_exists_forreal_cnt; 266 uint_t page_lookup_dev_cnt; 267 uint_t get_cachelist_cnt; 268 uint_t page_create_cnt[10]; 269 uint_t alloc_pages[8]; 270 uint_t page_exphcontg[19]; 271 uint_t page_create_large_cnt[10]; 272 273 /* 274 * Collects statistics. 275 */ 276 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 277 uint_t mylen = 0; \ 278 \ 279 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash, mylen++) { \ 280 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 281 break; \ 282 } \ 283 if ((pp) != NULL) \ 284 pagecnt.pc_find_hit++; \ 285 else \ 286 pagecnt.pc_find_miss++; \ 287 if (mylen > PC_HASH_CNT) \ 288 mylen = PC_HASH_CNT; \ 289 pagecnt.pc_find_hashlen[mylen]++; \ 290 } 291 292 #else /* VM_STATS */ 293 294 /* 295 * Don't collect statistics 296 */ 297 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 298 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \ 299 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 300 break; \ 301 } \ 302 } 303 304 #endif /* VM_STATS */ 305 306 307 308 #ifdef DEBUG 309 #define MEMSEG_SEARCH_STATS 310 #endif 311 312 #ifdef MEMSEG_SEARCH_STATS 313 struct memseg_stats { 314 uint_t nsearch; 315 uint_t nlastwon; 316 uint_t nhashwon; 317 uint_t nnotfound; 318 } memseg_stats; 319 320 #define MEMSEG_STAT_INCR(v) \ 321 atomic_add_32(&memseg_stats.v, 1) 322 #else 323 #define MEMSEG_STAT_INCR(x) 324 #endif 325 326 struct memseg *memsegs; /* list of memory segments */ 327 328 329 static void page_init_mem_config(void); 330 static int page_do_hashin(page_t *, vnode_t *, u_offset_t); 331 static void page_do_hashout(page_t *); 332 static void page_capture_init(); 333 int page_capture_take_action(page_t *, uint_t, void *); 334 335 static void page_demote_vp_pages(page_t *); 336 337 /* 338 * vm subsystem related initialization 339 */ 340 void 341 vm_init(void) 342 { 343 boolean_t callb_vm_cpr(void *, int); 344 345 (void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm"); 346 page_init_mem_config(); 347 page_retire_init(); 348 vm_usage_init(); 349 page_capture_init(); 350 } 351 352 /* 353 * This function is called at startup and when memory is added or deleted. 354 */ 355 void 356 init_pages_pp_maximum() 357 { 358 static pgcnt_t p_min; 359 static pgcnt_t pages_pp_maximum_startup; 360 static pgcnt_t avrmem_delta; 361 static int init_done; 362 static int user_set; /* true if set in /etc/system */ 363 364 if (init_done == 0) { 365 366 /* If the user specified a value, save it */ 367 if (pages_pp_maximum != 0) { 368 user_set = 1; 369 pages_pp_maximum_startup = pages_pp_maximum; 370 } 371 372 /* 373 * Setting of pages_pp_maximum is based first time 374 * on the value of availrmem just after the start-up 375 * allocations. To preserve this relationship at run 376 * time, use a delta from availrmem_initial. 377 */ 378 ASSERT(availrmem_initial >= availrmem); 379 avrmem_delta = availrmem_initial - availrmem; 380 381 /* The allowable floor of pages_pp_maximum */ 382 p_min = tune.t_minarmem + 100; 383 384 /* Make sure we don't come through here again. */ 385 init_done = 1; 386 } 387 /* 388 * Determine pages_pp_maximum, the number of currently available 389 * pages (availrmem) that can't be `locked'. If not set by 390 * the user, we set it to 4% of the currently available memory 391 * plus 4MB. 392 * But we also insist that it be greater than tune.t_minarmem; 393 * otherwise a process could lock down a lot of memory, get swapped 394 * out, and never have enough to get swapped back in. 395 */ 396 if (user_set) 397 pages_pp_maximum = pages_pp_maximum_startup; 398 else 399 pages_pp_maximum = ((availrmem_initial - avrmem_delta) / 25) 400 + btop(4 * 1024 * 1024); 401 402 if (pages_pp_maximum <= p_min) { 403 pages_pp_maximum = p_min; 404 } 405 } 406 407 void 408 set_max_page_get(pgcnt_t target_total_pages) 409 { 410 max_page_get = target_total_pages / 2; 411 } 412 413 static pgcnt_t pending_delete; 414 415 /*ARGSUSED*/ 416 static void 417 page_mem_config_post_add( 418 void *arg, 419 pgcnt_t delta_pages) 420 { 421 set_max_page_get(total_pages - pending_delete); 422 init_pages_pp_maximum(); 423 } 424 425 /*ARGSUSED*/ 426 static int 427 page_mem_config_pre_del( 428 void *arg, 429 pgcnt_t delta_pages) 430 { 431 pgcnt_t nv; 432 433 nv = atomic_add_long_nv(&pending_delete, (spgcnt_t)delta_pages); 434 set_max_page_get(total_pages - nv); 435 return (0); 436 } 437 438 /*ARGSUSED*/ 439 static void 440 page_mem_config_post_del( 441 void *arg, 442 pgcnt_t delta_pages, 443 int cancelled) 444 { 445 pgcnt_t nv; 446 447 nv = atomic_add_long_nv(&pending_delete, -(spgcnt_t)delta_pages); 448 set_max_page_get(total_pages - nv); 449 if (!cancelled) 450 init_pages_pp_maximum(); 451 } 452 453 static kphysm_setup_vector_t page_mem_config_vec = { 454 KPHYSM_SETUP_VECTOR_VERSION, 455 page_mem_config_post_add, 456 page_mem_config_pre_del, 457 page_mem_config_post_del, 458 }; 459 460 static void 461 page_init_mem_config(void) 462 { 463 int ret; 464 465 ret = kphysm_setup_func_register(&page_mem_config_vec, (void *)NULL); 466 ASSERT(ret == 0); 467 } 468 469 /* 470 * Evenly spread out the PCF counters for large free pages 471 */ 472 static void 473 page_free_large_ctr(pgcnt_t npages) 474 { 475 static struct pcf *p = pcf; 476 pgcnt_t lump; 477 478 freemem += npages; 479 480 lump = roundup(npages, PCF_FANOUT) / PCF_FANOUT; 481 482 while (npages > 0) { 483 484 ASSERT(!p->pcf_block); 485 486 if (lump < npages) { 487 p->pcf_count += (uint_t)lump; 488 npages -= lump; 489 } else { 490 p->pcf_count += (uint_t)npages; 491 npages = 0; 492 } 493 494 ASSERT(!p->pcf_wait); 495 496 if (++p > &pcf[PCF_FANOUT - 1]) 497 p = pcf; 498 } 499 500 ASSERT(npages == 0); 501 } 502 503 /* 504 * Add a physical chunk of memory to the system freee lists during startup. 505 * Platform specific startup() allocates the memory for the page structs. 506 * 507 * num - number of page structures 508 * base - page number (pfn) to be associated with the first page. 509 * 510 * Since we are doing this during startup (ie. single threaded), we will 511 * use shortcut routines to avoid any locking overhead while putting all 512 * these pages on the freelists. 513 * 514 * NOTE: Any changes performed to page_free(), must also be performed to 515 * add_physmem() since this is how we initialize all page_t's at 516 * boot time. 517 */ 518 void 519 add_physmem( 520 page_t *pp, 521 pgcnt_t num, 522 pfn_t pnum) 523 { 524 page_t *root = NULL; 525 uint_t szc = page_num_pagesizes() - 1; 526 pgcnt_t large = page_get_pagecnt(szc); 527 pgcnt_t cnt = 0; 528 529 TRACE_2(TR_FAC_VM, TR_PAGE_INIT, 530 "add_physmem:pp %p num %lu", pp, num); 531 532 /* 533 * Arbitrarily limit the max page_get request 534 * to 1/2 of the page structs we have. 535 */ 536 total_pages += num; 537 set_max_page_get(total_pages); 538 539 PLCNT_MODIFY_MAX(pnum, (long)num); 540 541 /* 542 * The physical space for the pages array 543 * representing ram pages has already been 544 * allocated. Here we initialize each lock 545 * in the page structure, and put each on 546 * the free list 547 */ 548 for (; num; pp++, pnum++, num--) { 549 550 /* 551 * this needs to fill in the page number 552 * and do any other arch specific initialization 553 */ 554 add_physmem_cb(pp, pnum); 555 556 pp->p_lckcnt = 0; 557 pp->p_cowcnt = 0; 558 pp->p_slckcnt = 0; 559 560 /* 561 * Initialize the page lock as unlocked, since nobody 562 * can see or access this page yet. 563 */ 564 pp->p_selock = 0; 565 566 /* 567 * Initialize IO lock 568 */ 569 page_iolock_init(pp); 570 571 /* 572 * initialize other fields in the page_t 573 */ 574 PP_SETFREE(pp); 575 page_clr_all_props(pp); 576 PP_SETAGED(pp); 577 pp->p_offset = (u_offset_t)-1; 578 pp->p_next = pp; 579 pp->p_prev = pp; 580 581 /* 582 * Simple case: System doesn't support large pages. 583 */ 584 if (szc == 0) { 585 pp->p_szc = 0; 586 page_free_at_startup(pp); 587 continue; 588 } 589 590 /* 591 * Handle unaligned pages, we collect them up onto 592 * the root page until we have a full large page. 593 */ 594 if (!IS_P2ALIGNED(pnum, large)) { 595 596 /* 597 * If not in a large page, 598 * just free as small page. 599 */ 600 if (root == NULL) { 601 pp->p_szc = 0; 602 page_free_at_startup(pp); 603 continue; 604 } 605 606 /* 607 * Link a constituent page into the large page. 608 */ 609 pp->p_szc = szc; 610 page_list_concat(&root, &pp); 611 612 /* 613 * When large page is fully formed, free it. 614 */ 615 if (++cnt == large) { 616 page_free_large_ctr(cnt); 617 page_list_add_pages(root, PG_LIST_ISINIT); 618 root = NULL; 619 cnt = 0; 620 } 621 continue; 622 } 623 624 /* 625 * At this point we have a page number which 626 * is aligned. We assert that we aren't already 627 * in a different large page. 628 */ 629 ASSERT(IS_P2ALIGNED(pnum, large)); 630 ASSERT(root == NULL && cnt == 0); 631 632 /* 633 * If insufficient number of pages left to form 634 * a large page, just free the small page. 635 */ 636 if (num < large) { 637 pp->p_szc = 0; 638 page_free_at_startup(pp); 639 continue; 640 } 641 642 /* 643 * Otherwise start a new large page. 644 */ 645 pp->p_szc = szc; 646 cnt++; 647 root = pp; 648 } 649 ASSERT(root == NULL && cnt == 0); 650 } 651 652 /* 653 * Find a page representing the specified [vp, offset]. 654 * If we find the page but it is intransit coming in, 655 * it will have an "exclusive" lock and we wait for 656 * the i/o to complete. A page found on the free list 657 * is always reclaimed and then locked. On success, the page 658 * is locked, its data is valid and it isn't on the free 659 * list, while a NULL is returned if the page doesn't exist. 660 */ 661 page_t * 662 page_lookup(vnode_t *vp, u_offset_t off, se_t se) 663 { 664 return (page_lookup_create(vp, off, se, NULL, NULL, 0)); 665 } 666 667 /* 668 * Find a page representing the specified [vp, offset]. 669 * We either return the one we found or, if passed in, 670 * create one with identity of [vp, offset] of the 671 * pre-allocated page. If we find exsisting page but it is 672 * intransit coming in, it will have an "exclusive" lock 673 * and we wait for the i/o to complete. A page found on 674 * the free list is always reclaimed and then locked. 675 * On success, the page is locked, its data is valid and 676 * it isn't on the free list, while a NULL is returned 677 * if the page doesn't exist and newpp is NULL; 678 */ 679 page_t * 680 page_lookup_create( 681 vnode_t *vp, 682 u_offset_t off, 683 se_t se, 684 page_t *newpp, 685 spgcnt_t *nrelocp, 686 int flags) 687 { 688 page_t *pp; 689 kmutex_t *phm; 690 ulong_t index; 691 uint_t hash_locked; 692 uint_t es; 693 694 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 695 VM_STAT_ADD(page_lookup_cnt[0]); 696 ASSERT(newpp ? PAGE_EXCL(newpp) : 1); 697 698 /* 699 * Acquire the appropriate page hash lock since 700 * we have to search the hash list. Pages that 701 * hash to this list can't change identity while 702 * this lock is held. 703 */ 704 hash_locked = 0; 705 index = PAGE_HASH_FUNC(vp, off); 706 phm = NULL; 707 top: 708 PAGE_HASH_SEARCH(index, pp, vp, off); 709 if (pp != NULL) { 710 VM_STAT_ADD(page_lookup_cnt[1]); 711 es = (newpp != NULL) ? 1 : 0; 712 es |= flags; 713 if (!hash_locked) { 714 VM_STAT_ADD(page_lookup_cnt[2]); 715 if (!page_try_reclaim_lock(pp, se, es)) { 716 /* 717 * On a miss, acquire the phm. Then 718 * next time, page_lock() will be called, 719 * causing a wait if the page is busy. 720 * just looping with page_trylock() would 721 * get pretty boring. 722 */ 723 VM_STAT_ADD(page_lookup_cnt[3]); 724 phm = PAGE_HASH_MUTEX(index); 725 mutex_enter(phm); 726 hash_locked = 1; 727 goto top; 728 } 729 } else { 730 VM_STAT_ADD(page_lookup_cnt[4]); 731 if (!page_lock_es(pp, se, phm, P_RECLAIM, es)) { 732 VM_STAT_ADD(page_lookup_cnt[5]); 733 goto top; 734 } 735 } 736 737 /* 738 * Since `pp' is locked it can not change identity now. 739 * Reconfirm we locked the correct page. 740 * 741 * Both the p_vnode and p_offset *must* be cast volatile 742 * to force a reload of their values: The PAGE_HASH_SEARCH 743 * macro will have stuffed p_vnode and p_offset into 744 * registers before calling page_trylock(); another thread, 745 * actually holding the hash lock, could have changed the 746 * page's identity in memory, but our registers would not 747 * be changed, fooling the reconfirmation. If the hash 748 * lock was held during the search, the casting would 749 * not be needed. 750 */ 751 VM_STAT_ADD(page_lookup_cnt[6]); 752 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 753 ((volatile u_offset_t)(pp->p_offset) != off)) { 754 VM_STAT_ADD(page_lookup_cnt[7]); 755 if (hash_locked) { 756 panic("page_lookup_create: lost page %p", 757 (void *)pp); 758 /*NOTREACHED*/ 759 } 760 page_unlock(pp); 761 phm = PAGE_HASH_MUTEX(index); 762 mutex_enter(phm); 763 hash_locked = 1; 764 goto top; 765 } 766 767 /* 768 * If page_trylock() was called, then pp may still be on 769 * the cachelist (can't be on the free list, it would not 770 * have been found in the search). If it is on the 771 * cachelist it must be pulled now. To pull the page from 772 * the cachelist, it must be exclusively locked. 773 * 774 * The other big difference between page_trylock() and 775 * page_lock(), is that page_lock() will pull the 776 * page from whatever free list (the cache list in this 777 * case) the page is on. If page_trylock() was used 778 * above, then we have to do the reclaim ourselves. 779 */ 780 if ((!hash_locked) && (PP_ISFREE(pp))) { 781 ASSERT(PP_ISAGED(pp) == 0); 782 VM_STAT_ADD(page_lookup_cnt[8]); 783 784 /* 785 * page_relcaim will insure that we 786 * have this page exclusively 787 */ 788 789 if (!page_reclaim(pp, NULL)) { 790 /* 791 * Page_reclaim dropped whatever lock 792 * we held. 793 */ 794 VM_STAT_ADD(page_lookup_cnt[9]); 795 phm = PAGE_HASH_MUTEX(index); 796 mutex_enter(phm); 797 hash_locked = 1; 798 goto top; 799 } else if (se == SE_SHARED && newpp == NULL) { 800 VM_STAT_ADD(page_lookup_cnt[10]); 801 page_downgrade(pp); 802 } 803 } 804 805 if (hash_locked) { 806 mutex_exit(phm); 807 } 808 809 if (newpp != NULL && pp->p_szc < newpp->p_szc && 810 PAGE_EXCL(pp) && nrelocp != NULL) { 811 ASSERT(nrelocp != NULL); 812 (void) page_relocate(&pp, &newpp, 1, 1, nrelocp, 813 NULL); 814 if (*nrelocp > 0) { 815 VM_STAT_COND_ADD(*nrelocp == 1, 816 page_lookup_cnt[11]); 817 VM_STAT_COND_ADD(*nrelocp > 1, 818 page_lookup_cnt[12]); 819 pp = newpp; 820 se = SE_EXCL; 821 } else { 822 if (se == SE_SHARED) { 823 page_downgrade(pp); 824 } 825 VM_STAT_ADD(page_lookup_cnt[13]); 826 } 827 } else if (newpp != NULL && nrelocp != NULL) { 828 if (PAGE_EXCL(pp) && se == SE_SHARED) { 829 page_downgrade(pp); 830 } 831 VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc, 832 page_lookup_cnt[14]); 833 VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc, 834 page_lookup_cnt[15]); 835 VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc, 836 page_lookup_cnt[16]); 837 } else if (newpp != NULL && PAGE_EXCL(pp)) { 838 se = SE_EXCL; 839 } 840 } else if (!hash_locked) { 841 VM_STAT_ADD(page_lookup_cnt[17]); 842 phm = PAGE_HASH_MUTEX(index); 843 mutex_enter(phm); 844 hash_locked = 1; 845 goto top; 846 } else if (newpp != NULL) { 847 /* 848 * If we have a preallocated page then 849 * insert it now and basically behave like 850 * page_create. 851 */ 852 VM_STAT_ADD(page_lookup_cnt[18]); 853 /* 854 * Since we hold the page hash mutex and 855 * just searched for this page, page_hashin 856 * had better not fail. If it does, that 857 * means some thread did not follow the 858 * page hash mutex rules. Panic now and 859 * get it over with. As usual, go down 860 * holding all the locks. 861 */ 862 ASSERT(MUTEX_HELD(phm)); 863 if (!page_hashin(newpp, vp, off, phm)) { 864 ASSERT(MUTEX_HELD(phm)); 865 panic("page_lookup_create: hashin failed %p %p %llx %p", 866 (void *)newpp, (void *)vp, off, (void *)phm); 867 /*NOTREACHED*/ 868 } 869 ASSERT(MUTEX_HELD(phm)); 870 mutex_exit(phm); 871 phm = NULL; 872 page_set_props(newpp, P_REF); 873 page_io_lock(newpp); 874 pp = newpp; 875 se = SE_EXCL; 876 } else { 877 VM_STAT_ADD(page_lookup_cnt[19]); 878 mutex_exit(phm); 879 } 880 881 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 882 883 ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1); 884 885 return (pp); 886 } 887 888 /* 889 * Search the hash list for the page representing the 890 * specified [vp, offset] and return it locked. Skip 891 * free pages and pages that cannot be locked as requested. 892 * Used while attempting to kluster pages. 893 */ 894 page_t * 895 page_lookup_nowait(vnode_t *vp, u_offset_t off, se_t se) 896 { 897 page_t *pp; 898 kmutex_t *phm; 899 ulong_t index; 900 uint_t locked; 901 902 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 903 VM_STAT_ADD(page_lookup_nowait_cnt[0]); 904 905 index = PAGE_HASH_FUNC(vp, off); 906 PAGE_HASH_SEARCH(index, pp, vp, off); 907 locked = 0; 908 if (pp == NULL) { 909 top: 910 VM_STAT_ADD(page_lookup_nowait_cnt[1]); 911 locked = 1; 912 phm = PAGE_HASH_MUTEX(index); 913 mutex_enter(phm); 914 PAGE_HASH_SEARCH(index, pp, vp, off); 915 } 916 917 if (pp == NULL || PP_ISFREE(pp)) { 918 VM_STAT_ADD(page_lookup_nowait_cnt[2]); 919 pp = NULL; 920 } else { 921 if (!page_trylock(pp, se)) { 922 VM_STAT_ADD(page_lookup_nowait_cnt[3]); 923 pp = NULL; 924 } else { 925 VM_STAT_ADD(page_lookup_nowait_cnt[4]); 926 /* 927 * See the comment in page_lookup() 928 */ 929 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 930 ((u_offset_t)(pp->p_offset) != off)) { 931 VM_STAT_ADD(page_lookup_nowait_cnt[5]); 932 if (locked) { 933 panic("page_lookup_nowait %p", 934 (void *)pp); 935 /*NOTREACHED*/ 936 } 937 page_unlock(pp); 938 goto top; 939 } 940 if (PP_ISFREE(pp)) { 941 VM_STAT_ADD(page_lookup_nowait_cnt[6]); 942 page_unlock(pp); 943 pp = NULL; 944 } 945 } 946 } 947 if (locked) { 948 VM_STAT_ADD(page_lookup_nowait_cnt[7]); 949 mutex_exit(phm); 950 } 951 952 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 953 954 return (pp); 955 } 956 957 /* 958 * Search the hash list for a page with the specified [vp, off] 959 * that is known to exist and is already locked. This routine 960 * is typically used by segment SOFTUNLOCK routines. 961 */ 962 page_t * 963 page_find(vnode_t *vp, u_offset_t off) 964 { 965 page_t *pp; 966 kmutex_t *phm; 967 ulong_t index; 968 969 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 970 VM_STAT_ADD(page_find_cnt); 971 972 index = PAGE_HASH_FUNC(vp, off); 973 phm = PAGE_HASH_MUTEX(index); 974 975 mutex_enter(phm); 976 PAGE_HASH_SEARCH(index, pp, vp, off); 977 mutex_exit(phm); 978 979 ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr); 980 return (pp); 981 } 982 983 /* 984 * Determine whether a page with the specified [vp, off] 985 * currently exists in the system. Obviously this should 986 * only be considered as a hint since nothing prevents the 987 * page from disappearing or appearing immediately after 988 * the return from this routine. Subsequently, we don't 989 * even bother to lock the list. 990 */ 991 page_t * 992 page_exists(vnode_t *vp, u_offset_t off) 993 { 994 page_t *pp; 995 ulong_t index; 996 997 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 998 VM_STAT_ADD(page_exists_cnt); 999 1000 index = PAGE_HASH_FUNC(vp, off); 1001 PAGE_HASH_SEARCH(index, pp, vp, off); 1002 1003 return (pp); 1004 } 1005 1006 /* 1007 * Determine if physically contiguous pages exist for [vp, off] - [vp, off + 1008 * page_size(szc)) range. if they exist and ppa is not NULL fill ppa array 1009 * with these pages locked SHARED. If necessary reclaim pages from 1010 * freelist. Return 1 if contiguous pages exist and 0 otherwise. 1011 * 1012 * If we fail to lock pages still return 1 if pages exist and contiguous. 1013 * But in this case return value is just a hint. ppa array won't be filled. 1014 * Caller should initialize ppa[0] as NULL to distinguish return value. 1015 * 1016 * Returns 0 if pages don't exist or not physically contiguous. 1017 * 1018 * This routine doesn't work for anonymous(swapfs) pages. 1019 */ 1020 int 1021 page_exists_physcontig(vnode_t *vp, u_offset_t off, uint_t szc, page_t *ppa[]) 1022 { 1023 pgcnt_t pages; 1024 pfn_t pfn; 1025 page_t *rootpp; 1026 pgcnt_t i; 1027 pgcnt_t j; 1028 u_offset_t save_off = off; 1029 ulong_t index; 1030 kmutex_t *phm; 1031 page_t *pp; 1032 uint_t pszc; 1033 int loopcnt = 0; 1034 1035 ASSERT(szc != 0); 1036 ASSERT(vp != NULL); 1037 ASSERT(!IS_SWAPFSVP(vp)); 1038 ASSERT(vp != &kvp); 1039 1040 again: 1041 if (++loopcnt > 3) { 1042 VM_STAT_ADD(page_exphcontg[0]); 1043 return (0); 1044 } 1045 1046 index = PAGE_HASH_FUNC(vp, off); 1047 phm = PAGE_HASH_MUTEX(index); 1048 1049 mutex_enter(phm); 1050 PAGE_HASH_SEARCH(index, pp, vp, off); 1051 mutex_exit(phm); 1052 1053 VM_STAT_ADD(page_exphcontg[1]); 1054 1055 if (pp == NULL) { 1056 VM_STAT_ADD(page_exphcontg[2]); 1057 return (0); 1058 } 1059 1060 pages = page_get_pagecnt(szc); 1061 rootpp = pp; 1062 pfn = rootpp->p_pagenum; 1063 1064 if ((pszc = pp->p_szc) >= szc && ppa != NULL) { 1065 VM_STAT_ADD(page_exphcontg[3]); 1066 if (!page_trylock(pp, SE_SHARED)) { 1067 VM_STAT_ADD(page_exphcontg[4]); 1068 return (1); 1069 } 1070 if (pp->p_szc != pszc || pp->p_vnode != vp || 1071 pp->p_offset != off) { 1072 VM_STAT_ADD(page_exphcontg[5]); 1073 page_unlock(pp); 1074 off = save_off; 1075 goto again; 1076 } 1077 /* 1078 * szc was non zero and vnode and offset matched after we 1079 * locked the page it means it can't become free on us. 1080 */ 1081 ASSERT(!PP_ISFREE(pp)); 1082 if (!IS_P2ALIGNED(pfn, pages)) { 1083 page_unlock(pp); 1084 return (0); 1085 } 1086 ppa[0] = pp; 1087 pp++; 1088 off += PAGESIZE; 1089 pfn++; 1090 for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1091 if (!page_trylock(pp, SE_SHARED)) { 1092 VM_STAT_ADD(page_exphcontg[6]); 1093 pp--; 1094 while (i-- > 0) { 1095 page_unlock(pp); 1096 pp--; 1097 } 1098 ppa[0] = NULL; 1099 return (1); 1100 } 1101 if (pp->p_szc != pszc) { 1102 VM_STAT_ADD(page_exphcontg[7]); 1103 page_unlock(pp); 1104 pp--; 1105 while (i-- > 0) { 1106 page_unlock(pp); 1107 pp--; 1108 } 1109 ppa[0] = NULL; 1110 off = save_off; 1111 goto again; 1112 } 1113 /* 1114 * szc the same as for previous already locked pages 1115 * with right identity. Since this page had correct 1116 * szc after we locked it can't get freed or destroyed 1117 * and therefore must have the expected identity. 1118 */ 1119 ASSERT(!PP_ISFREE(pp)); 1120 if (pp->p_vnode != vp || 1121 pp->p_offset != off) { 1122 panic("page_exists_physcontig: " 1123 "large page identity doesn't match"); 1124 } 1125 ppa[i] = pp; 1126 ASSERT(pp->p_pagenum == pfn); 1127 } 1128 VM_STAT_ADD(page_exphcontg[8]); 1129 ppa[pages] = NULL; 1130 return (1); 1131 } else if (pszc >= szc) { 1132 VM_STAT_ADD(page_exphcontg[9]); 1133 if (!IS_P2ALIGNED(pfn, pages)) { 1134 return (0); 1135 } 1136 return (1); 1137 } 1138 1139 if (!IS_P2ALIGNED(pfn, pages)) { 1140 VM_STAT_ADD(page_exphcontg[10]); 1141 return (0); 1142 } 1143 1144 if (page_numtomemseg_nolock(pfn) != 1145 page_numtomemseg_nolock(pfn + pages - 1)) { 1146 VM_STAT_ADD(page_exphcontg[11]); 1147 return (0); 1148 } 1149 1150 /* 1151 * We loop up 4 times across pages to promote page size. 1152 * We're extra cautious to promote page size atomically with respect 1153 * to everybody else. But we can probably optimize into 1 loop if 1154 * this becomes an issue. 1155 */ 1156 1157 for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1158 ASSERT(pp->p_pagenum == pfn); 1159 if (!page_trylock(pp, SE_EXCL)) { 1160 VM_STAT_ADD(page_exphcontg[12]); 1161 break; 1162 } 1163 if (pp->p_vnode != vp || 1164 pp->p_offset != off) { 1165 VM_STAT_ADD(page_exphcontg[13]); 1166 page_unlock(pp); 1167 break; 1168 } 1169 if (pp->p_szc >= szc) { 1170 ASSERT(i == 0); 1171 page_unlock(pp); 1172 off = save_off; 1173 goto again; 1174 } 1175 } 1176 1177 if (i != pages) { 1178 VM_STAT_ADD(page_exphcontg[14]); 1179 --pp; 1180 while (i-- > 0) { 1181 page_unlock(pp); 1182 --pp; 1183 } 1184 return (0); 1185 } 1186 1187 pp = rootpp; 1188 for (i = 0; i < pages; i++, pp++) { 1189 if (PP_ISFREE(pp)) { 1190 VM_STAT_ADD(page_exphcontg[15]); 1191 ASSERT(!PP_ISAGED(pp)); 1192 ASSERT(pp->p_szc == 0); 1193 if (!page_reclaim(pp, NULL)) { 1194 break; 1195 } 1196 } else { 1197 ASSERT(pp->p_szc < szc); 1198 VM_STAT_ADD(page_exphcontg[16]); 1199 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 1200 } 1201 } 1202 if (i < pages) { 1203 VM_STAT_ADD(page_exphcontg[17]); 1204 /* 1205 * page_reclaim failed because we were out of memory. 1206 * drop the rest of the locks and return because this page 1207 * must be already reallocated anyway. 1208 */ 1209 pp = rootpp; 1210 for (j = 0; j < pages; j++, pp++) { 1211 if (j != i) { 1212 page_unlock(pp); 1213 } 1214 } 1215 return (0); 1216 } 1217 1218 off = save_off; 1219 pp = rootpp; 1220 for (i = 0; i < pages; i++, pp++, off += PAGESIZE) { 1221 ASSERT(PAGE_EXCL(pp)); 1222 ASSERT(!PP_ISFREE(pp)); 1223 ASSERT(!hat_page_is_mapped(pp)); 1224 ASSERT(pp->p_vnode == vp); 1225 ASSERT(pp->p_offset == off); 1226 pp->p_szc = szc; 1227 } 1228 pp = rootpp; 1229 for (i = 0; i < pages; i++, pp++) { 1230 if (ppa == NULL) { 1231 page_unlock(pp); 1232 } else { 1233 ppa[i] = pp; 1234 page_downgrade(ppa[i]); 1235 } 1236 } 1237 if (ppa != NULL) { 1238 ppa[pages] = NULL; 1239 } 1240 VM_STAT_ADD(page_exphcontg[18]); 1241 ASSERT(vp->v_pages != NULL); 1242 return (1); 1243 } 1244 1245 /* 1246 * Determine whether a page with the specified [vp, off] 1247 * currently exists in the system and if so return its 1248 * size code. Obviously this should only be considered as 1249 * a hint since nothing prevents the page from disappearing 1250 * or appearing immediately after the return from this routine. 1251 */ 1252 int 1253 page_exists_forreal(vnode_t *vp, u_offset_t off, uint_t *szc) 1254 { 1255 page_t *pp; 1256 kmutex_t *phm; 1257 ulong_t index; 1258 int rc = 0; 1259 1260 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1261 ASSERT(szc != NULL); 1262 VM_STAT_ADD(page_exists_forreal_cnt); 1263 1264 index = PAGE_HASH_FUNC(vp, off); 1265 phm = PAGE_HASH_MUTEX(index); 1266 1267 mutex_enter(phm); 1268 PAGE_HASH_SEARCH(index, pp, vp, off); 1269 if (pp != NULL) { 1270 *szc = pp->p_szc; 1271 rc = 1; 1272 } 1273 mutex_exit(phm); 1274 return (rc); 1275 } 1276 1277 /* wakeup threads waiting for pages in page_create_get_something() */ 1278 void 1279 wakeup_pcgs(void) 1280 { 1281 if (!CV_HAS_WAITERS(&pcgs_cv)) 1282 return; 1283 cv_broadcast(&pcgs_cv); 1284 } 1285 1286 /* 1287 * 'freemem' is used all over the kernel as an indication of how many 1288 * pages are free (either on the cache list or on the free page list) 1289 * in the system. In very few places is a really accurate 'freemem' 1290 * needed. To avoid contention of the lock protecting a the 1291 * single freemem, it was spread out into NCPU buckets. Set_freemem 1292 * sets freemem to the total of all NCPU buckets. It is called from 1293 * clock() on each TICK. 1294 */ 1295 void 1296 set_freemem() 1297 { 1298 struct pcf *p; 1299 ulong_t t; 1300 uint_t i; 1301 1302 t = 0; 1303 p = pcf; 1304 for (i = 0; i < PCF_FANOUT; i++) { 1305 t += p->pcf_count; 1306 p++; 1307 } 1308 freemem = t; 1309 1310 /* 1311 * Don't worry about grabbing mutex. It's not that 1312 * critical if we miss a tick or two. This is 1313 * where we wakeup possible delayers in 1314 * page_create_get_something(). 1315 */ 1316 wakeup_pcgs(); 1317 } 1318 1319 ulong_t 1320 get_freemem() 1321 { 1322 struct pcf *p; 1323 ulong_t t; 1324 uint_t i; 1325 1326 t = 0; 1327 p = pcf; 1328 for (i = 0; i < PCF_FANOUT; i++) { 1329 t += p->pcf_count; 1330 p++; 1331 } 1332 /* 1333 * We just calculated it, might as well set it. 1334 */ 1335 freemem = t; 1336 return (t); 1337 } 1338 1339 /* 1340 * Acquire all of the page cache & free (pcf) locks. 1341 */ 1342 void 1343 pcf_acquire_all() 1344 { 1345 struct pcf *p; 1346 uint_t i; 1347 1348 p = pcf; 1349 for (i = 0; i < PCF_FANOUT; i++) { 1350 mutex_enter(&p->pcf_lock); 1351 p++; 1352 } 1353 } 1354 1355 /* 1356 * Release all the pcf_locks. 1357 */ 1358 void 1359 pcf_release_all() 1360 { 1361 struct pcf *p; 1362 uint_t i; 1363 1364 p = pcf; 1365 for (i = 0; i < PCF_FANOUT; i++) { 1366 mutex_exit(&p->pcf_lock); 1367 p++; 1368 } 1369 } 1370 1371 /* 1372 * Inform the VM system that we need some pages freed up. 1373 * Calls must be symmetric, e.g.: 1374 * 1375 * page_needfree(100); 1376 * wait a bit; 1377 * page_needfree(-100); 1378 */ 1379 void 1380 page_needfree(spgcnt_t npages) 1381 { 1382 mutex_enter(&new_freemem_lock); 1383 needfree += npages; 1384 mutex_exit(&new_freemem_lock); 1385 } 1386 1387 /* 1388 * Throttle for page_create(): try to prevent freemem from dropping 1389 * below throttlefree. We can't provide a 100% guarantee because 1390 * KM_NOSLEEP allocations, page_reclaim(), and various other things 1391 * nibble away at the freelist. However, we can block all PG_WAIT 1392 * allocations until memory becomes available. The motivation is 1393 * that several things can fall apart when there's no free memory: 1394 * 1395 * (1) If pageout() needs memory to push a page, the system deadlocks. 1396 * 1397 * (2) By (broken) specification, timeout(9F) can neither fail nor 1398 * block, so it has no choice but to panic the system if it 1399 * cannot allocate a callout structure. 1400 * 1401 * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block; 1402 * it panics if it cannot allocate a callback structure. 1403 * 1404 * (4) Untold numbers of third-party drivers have not yet been hardened 1405 * against KM_NOSLEEP and/or allocb() failures; they simply assume 1406 * success and panic the system with a data fault on failure. 1407 * (The long-term solution to this particular problem is to ship 1408 * hostile fault-injecting DEBUG kernels with the DDK.) 1409 * 1410 * It is theoretically impossible to guarantee success of non-blocking 1411 * allocations, but in practice, this throttle is very hard to break. 1412 */ 1413 static int 1414 page_create_throttle(pgcnt_t npages, int flags) 1415 { 1416 ulong_t fm; 1417 uint_t i; 1418 pgcnt_t tf; /* effective value of throttlefree */ 1419 1420 /* 1421 * Never deny pages when: 1422 * - it's a thread that cannot block [NOMEMWAIT()] 1423 * - the allocation cannot block and must not fail 1424 * - the allocation cannot block and is pageout dispensated 1425 */ 1426 if (NOMEMWAIT() || 1427 ((flags & (PG_WAIT | PG_PANIC)) == PG_PANIC) || 1428 ((flags & (PG_WAIT | PG_PUSHPAGE)) == PG_PUSHPAGE)) 1429 return (1); 1430 1431 /* 1432 * If the allocation can't block, we look favorably upon it 1433 * unless we're below pageout_reserve. In that case we fail 1434 * the allocation because we want to make sure there are a few 1435 * pages available for pageout. 1436 */ 1437 if ((flags & PG_WAIT) == 0) 1438 return (freemem >= npages + pageout_reserve); 1439 1440 /* Calculate the effective throttlefree value */ 1441 tf = throttlefree - 1442 ((flags & PG_PUSHPAGE) ? pageout_reserve : 0); 1443 1444 cv_signal(&proc_pageout->p_cv); 1445 1446 while (freemem < npages + tf) { 1447 pcf_acquire_all(); 1448 mutex_enter(&new_freemem_lock); 1449 fm = 0; 1450 for (i = 0; i < PCF_FANOUT; i++) { 1451 fm += pcf[i].pcf_count; 1452 pcf[i].pcf_wait++; 1453 mutex_exit(&pcf[i].pcf_lock); 1454 } 1455 freemem = fm; 1456 needfree += npages; 1457 freemem_wait++; 1458 cv_wait(&freemem_cv, &new_freemem_lock); 1459 freemem_wait--; 1460 needfree -= npages; 1461 mutex_exit(&new_freemem_lock); 1462 } 1463 return (1); 1464 } 1465 1466 /* 1467 * page_create_wait() is called to either coalecse pages from the 1468 * different pcf buckets or to wait because there simply are not 1469 * enough pages to satisfy the caller's request. 1470 * 1471 * Sadly, this is called from platform/vm/vm_machdep.c 1472 */ 1473 int 1474 page_create_wait(size_t npages, uint_t flags) 1475 { 1476 pgcnt_t total; 1477 uint_t i; 1478 struct pcf *p; 1479 1480 /* 1481 * Wait until there are enough free pages to satisfy our 1482 * entire request. 1483 * We set needfree += npages before prodding pageout, to make sure 1484 * it does real work when npages > lotsfree > freemem. 1485 */ 1486 VM_STAT_ADD(page_create_not_enough); 1487 1488 ASSERT(!kcage_on ? !(flags & PG_NORELOC) : 1); 1489 checkagain: 1490 if ((flags & PG_NORELOC) && 1491 kcage_freemem < kcage_throttlefree + npages) 1492 (void) kcage_create_throttle(npages, flags); 1493 1494 if (freemem < npages + throttlefree) 1495 if (!page_create_throttle(npages, flags)) 1496 return (0); 1497 1498 /* 1499 * Since page_create_va() looked at every 1500 * bucket, assume we are going to have to wait. 1501 * Get all of the pcf locks. 1502 */ 1503 total = 0; 1504 p = pcf; 1505 for (i = 0; i < PCF_FANOUT; i++) { 1506 mutex_enter(&p->pcf_lock); 1507 total += p->pcf_count; 1508 if (total >= npages) { 1509 /* 1510 * Wow! There are enough pages laying around 1511 * to satisfy the request. Do the accounting, 1512 * drop the locks we acquired, and go back. 1513 * 1514 * freemem is not protected by any lock. So, 1515 * we cannot have any assertion containing 1516 * freemem. 1517 */ 1518 freemem -= npages; 1519 1520 while (p >= pcf) { 1521 if (p->pcf_count <= npages) { 1522 npages -= p->pcf_count; 1523 p->pcf_count = 0; 1524 } else { 1525 p->pcf_count -= (uint_t)npages; 1526 npages = 0; 1527 } 1528 mutex_exit(&p->pcf_lock); 1529 p--; 1530 } 1531 ASSERT(npages == 0); 1532 return (1); 1533 } 1534 p++; 1535 } 1536 1537 /* 1538 * All of the pcf locks are held, there are not enough pages 1539 * to satisfy the request (npages < total). 1540 * Be sure to acquire the new_freemem_lock before dropping 1541 * the pcf locks. This prevents dropping wakeups in page_free(). 1542 * The order is always pcf_lock then new_freemem_lock. 1543 * 1544 * Since we hold all the pcf locks, it is a good time to set freemem. 1545 * 1546 * If the caller does not want to wait, return now. 1547 * Else turn the pageout daemon loose to find something 1548 * and wait till it does. 1549 * 1550 */ 1551 freemem = total; 1552 1553 if ((flags & PG_WAIT) == 0) { 1554 pcf_release_all(); 1555 1556 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_NOMEM, 1557 "page_create_nomem:npages %ld freemem %ld", npages, freemem); 1558 return (0); 1559 } 1560 1561 ASSERT(proc_pageout != NULL); 1562 cv_signal(&proc_pageout->p_cv); 1563 1564 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_START, 1565 "page_create_sleep_start: freemem %ld needfree %ld", 1566 freemem, needfree); 1567 1568 /* 1569 * We are going to wait. 1570 * We currently hold all of the pcf_locks, 1571 * get the new_freemem_lock (it protects freemem_wait), 1572 * before dropping the pcf_locks. 1573 */ 1574 mutex_enter(&new_freemem_lock); 1575 1576 p = pcf; 1577 for (i = 0; i < PCF_FANOUT; i++) { 1578 p->pcf_wait++; 1579 mutex_exit(&p->pcf_lock); 1580 p++; 1581 } 1582 1583 needfree += npages; 1584 freemem_wait++; 1585 1586 cv_wait(&freemem_cv, &new_freemem_lock); 1587 1588 freemem_wait--; 1589 needfree -= npages; 1590 1591 mutex_exit(&new_freemem_lock); 1592 1593 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_END, 1594 "page_create_sleep_end: freemem %ld needfree %ld", 1595 freemem, needfree); 1596 1597 VM_STAT_ADD(page_create_not_enough_again); 1598 goto checkagain; 1599 } 1600 1601 /* 1602 * A routine to do the opposite of page_create_wait(). 1603 */ 1604 void 1605 page_create_putback(spgcnt_t npages) 1606 { 1607 struct pcf *p; 1608 pgcnt_t lump; 1609 uint_t *which; 1610 1611 /* 1612 * When a contiguous lump is broken up, we have to 1613 * deal with lots of pages (min 64) so lets spread 1614 * the wealth around. 1615 */ 1616 lump = roundup(npages, PCF_FANOUT) / PCF_FANOUT; 1617 freemem += npages; 1618 1619 for (p = pcf; (npages > 0) && (p < &pcf[PCF_FANOUT]); p++) { 1620 which = &p->pcf_count; 1621 1622 mutex_enter(&p->pcf_lock); 1623 1624 if (p->pcf_block) { 1625 which = &p->pcf_reserve; 1626 } 1627 1628 if (lump < npages) { 1629 *which += (uint_t)lump; 1630 npages -= lump; 1631 } else { 1632 *which += (uint_t)npages; 1633 npages = 0; 1634 } 1635 1636 if (p->pcf_wait) { 1637 mutex_enter(&new_freemem_lock); 1638 /* 1639 * Check to see if some other thread 1640 * is actually waiting. Another bucket 1641 * may have woken it up by now. If there 1642 * are no waiters, then set our pcf_wait 1643 * count to zero to avoid coming in here 1644 * next time. 1645 */ 1646 if (freemem_wait) { 1647 if (npages > 1) { 1648 cv_broadcast(&freemem_cv); 1649 } else { 1650 cv_signal(&freemem_cv); 1651 } 1652 p->pcf_wait--; 1653 } else { 1654 p->pcf_wait = 0; 1655 } 1656 mutex_exit(&new_freemem_lock); 1657 } 1658 mutex_exit(&p->pcf_lock); 1659 } 1660 ASSERT(npages == 0); 1661 } 1662 1663 /* 1664 * A helper routine for page_create_get_something. 1665 * The indenting got to deep down there. 1666 * Unblock the pcf counters. Any pages freed after 1667 * pcf_block got set are moved to pcf_count and 1668 * wakeups (cv_broadcast() or cv_signal()) are done as needed. 1669 */ 1670 static void 1671 pcgs_unblock(void) 1672 { 1673 int i; 1674 struct pcf *p; 1675 1676 /* Update freemem while we're here. */ 1677 freemem = 0; 1678 p = pcf; 1679 for (i = 0; i < PCF_FANOUT; i++) { 1680 mutex_enter(&p->pcf_lock); 1681 ASSERT(p->pcf_count == 0); 1682 p->pcf_count = p->pcf_reserve; 1683 p->pcf_block = 0; 1684 freemem += p->pcf_count; 1685 if (p->pcf_wait) { 1686 mutex_enter(&new_freemem_lock); 1687 if (freemem_wait) { 1688 if (p->pcf_reserve > 1) { 1689 cv_broadcast(&freemem_cv); 1690 p->pcf_wait = 0; 1691 } else { 1692 cv_signal(&freemem_cv); 1693 p->pcf_wait--; 1694 } 1695 } else { 1696 p->pcf_wait = 0; 1697 } 1698 mutex_exit(&new_freemem_lock); 1699 } 1700 p->pcf_reserve = 0; 1701 mutex_exit(&p->pcf_lock); 1702 p++; 1703 } 1704 } 1705 1706 /* 1707 * Called from page_create_va() when both the cache and free lists 1708 * have been checked once. 1709 * 1710 * Either returns a page or panics since the accounting was done 1711 * way before we got here. 1712 * 1713 * We don't come here often, so leave the accounting on permanently. 1714 */ 1715 1716 #define MAX_PCGS 100 1717 1718 #ifdef DEBUG 1719 #define PCGS_TRIES 100 1720 #else /* DEBUG */ 1721 #define PCGS_TRIES 10 1722 #endif /* DEBUG */ 1723 1724 #ifdef VM_STATS 1725 uint_t pcgs_counts[PCGS_TRIES]; 1726 uint_t pcgs_too_many; 1727 uint_t pcgs_entered; 1728 uint_t pcgs_entered_noreloc; 1729 uint_t pcgs_locked; 1730 uint_t pcgs_cagelocked; 1731 #endif /* VM_STATS */ 1732 1733 static page_t * 1734 page_create_get_something(vnode_t *vp, u_offset_t off, struct seg *seg, 1735 caddr_t vaddr, uint_t flags) 1736 { 1737 uint_t count; 1738 page_t *pp; 1739 uint_t locked, i; 1740 struct pcf *p; 1741 lgrp_t *lgrp; 1742 int cagelocked = 0; 1743 1744 VM_STAT_ADD(pcgs_entered); 1745 1746 /* 1747 * Tap any reserve freelists: if we fail now, we'll die 1748 * since the page(s) we're looking for have already been 1749 * accounted for. 1750 */ 1751 flags |= PG_PANIC; 1752 1753 if ((flags & PG_NORELOC) != 0) { 1754 VM_STAT_ADD(pcgs_entered_noreloc); 1755 /* 1756 * Requests for free pages from critical threads 1757 * such as pageout still won't throttle here, but 1758 * we must try again, to give the cageout thread 1759 * another chance to catch up. Since we already 1760 * accounted for the pages, we had better get them 1761 * this time. 1762 * 1763 * N.B. All non-critical threads acquire the pcgs_cagelock 1764 * to serialize access to the freelists. This implements a 1765 * turnstile-type synchornization to avoid starvation of 1766 * critical requests for PG_NORELOC memory by non-critical 1767 * threads: all non-critical threads must acquire a 'ticket' 1768 * before passing through, which entails making sure 1769 * kcage_freemem won't fall below minfree prior to grabbing 1770 * pages from the freelists. 1771 */ 1772 if (kcage_create_throttle(1, flags) == KCT_NONCRIT) { 1773 mutex_enter(&pcgs_cagelock); 1774 cagelocked = 1; 1775 VM_STAT_ADD(pcgs_cagelocked); 1776 } 1777 } 1778 1779 /* 1780 * Time to get serious. 1781 * We failed to get a `correctly colored' page from both the 1782 * free and cache lists. 1783 * We escalate in stage. 1784 * 1785 * First try both lists without worring about color. 1786 * 1787 * Then, grab all page accounting locks (ie. pcf[]) and 1788 * steal any pages that they have and set the pcf_block flag to 1789 * stop deletions from the lists. This will help because 1790 * a page can get added to the free list while we are looking 1791 * at the cache list, then another page could be added to the cache 1792 * list allowing the page on the free list to be removed as we 1793 * move from looking at the cache list to the free list. This 1794 * could happen over and over. We would never find the page 1795 * we have accounted for. 1796 * 1797 * Noreloc pages are a subset of the global (relocatable) page pool. 1798 * They are not tracked separately in the pcf bins, so it is 1799 * impossible to know when doing pcf accounting if the available 1800 * page(s) are noreloc pages or not. When looking for a noreloc page 1801 * it is quite easy to end up here even if the global (relocatable) 1802 * page pool has plenty of free pages but the noreloc pool is empty. 1803 * 1804 * When the noreloc pool is empty (or low), additional noreloc pages 1805 * are created by converting pages from the global page pool. This 1806 * process will stall during pcf accounting if the pcf bins are 1807 * already locked. Such is the case when a noreloc allocation is 1808 * looping here in page_create_get_something waiting for more noreloc 1809 * pages to appear. 1810 * 1811 * Short of adding a new field to the pcf bins to accurately track 1812 * the number of free noreloc pages, we instead do not grab the 1813 * pcgs_lock, do not set the pcf blocks and do not timeout when 1814 * allocating a noreloc page. This allows noreloc allocations to 1815 * loop without blocking global page pool allocations. 1816 * 1817 * NOTE: the behaviour of page_create_get_something has not changed 1818 * for the case of global page pool allocations. 1819 */ 1820 1821 flags &= ~PG_MATCH_COLOR; 1822 locked = 0; 1823 #if defined(__i386) || defined(__amd64) 1824 /* 1825 * page_create_get_something may be called because 4g memory may be 1826 * depleted. Set flags to allow for relocation of base page below 1827 * 4g if necessary. 1828 */ 1829 if (physmax4g) 1830 flags |= (PGI_PGCPSZC0 | PGI_PGCPHIPRI); 1831 #endif 1832 1833 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 1834 1835 for (count = 0; kcage_on || count < MAX_PCGS; count++) { 1836 pp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 1837 flags, lgrp); 1838 if (pp == NULL) { 1839 pp = page_get_cachelist(vp, off, seg, vaddr, 1840 flags, lgrp); 1841 } 1842 if (pp == NULL) { 1843 /* 1844 * Serialize. Don't fight with other pcgs(). 1845 */ 1846 if (!locked && (!kcage_on || !(flags & PG_NORELOC))) { 1847 mutex_enter(&pcgs_lock); 1848 VM_STAT_ADD(pcgs_locked); 1849 locked = 1; 1850 p = pcf; 1851 for (i = 0; i < PCF_FANOUT; i++) { 1852 mutex_enter(&p->pcf_lock); 1853 ASSERT(p->pcf_block == 0); 1854 p->pcf_block = 1; 1855 p->pcf_reserve = p->pcf_count; 1856 p->pcf_count = 0; 1857 mutex_exit(&p->pcf_lock); 1858 p++; 1859 } 1860 freemem = 0; 1861 } 1862 1863 if (count) { 1864 /* 1865 * Since page_free() puts pages on 1866 * a list then accounts for it, we 1867 * just have to wait for page_free() 1868 * to unlock any page it was working 1869 * with. The page_lock()-page_reclaim() 1870 * path falls in the same boat. 1871 * 1872 * We don't need to check on the 1873 * PG_WAIT flag, we have already 1874 * accounted for the page we are 1875 * looking for in page_create_va(). 1876 * 1877 * We just wait a moment to let any 1878 * locked pages on the lists free up, 1879 * then continue around and try again. 1880 * 1881 * Will be awakened by set_freemem(). 1882 */ 1883 mutex_enter(&pcgs_wait_lock); 1884 cv_wait(&pcgs_cv, &pcgs_wait_lock); 1885 mutex_exit(&pcgs_wait_lock); 1886 } 1887 } else { 1888 #ifdef VM_STATS 1889 if (count >= PCGS_TRIES) { 1890 VM_STAT_ADD(pcgs_too_many); 1891 } else { 1892 VM_STAT_ADD(pcgs_counts[count]); 1893 } 1894 #endif 1895 if (locked) { 1896 pcgs_unblock(); 1897 mutex_exit(&pcgs_lock); 1898 } 1899 if (cagelocked) 1900 mutex_exit(&pcgs_cagelock); 1901 return (pp); 1902 } 1903 } 1904 /* 1905 * we go down holding the pcf locks. 1906 */ 1907 panic("no %spage found %d", 1908 ((flags & PG_NORELOC) ? "non-reloc " : ""), count); 1909 /*NOTREACHED*/ 1910 } 1911 1912 /* 1913 * Create enough pages for "bytes" worth of data starting at 1914 * "off" in "vp". 1915 * 1916 * Where flag must be one of: 1917 * 1918 * PG_EXCL: Exclusive create (fail if any page already 1919 * exists in the page cache) which does not 1920 * wait for memory to become available. 1921 * 1922 * PG_WAIT: Non-exclusive create which can wait for 1923 * memory to become available. 1924 * 1925 * PG_PHYSCONTIG: Allocate physically contiguous pages. 1926 * (Not Supported) 1927 * 1928 * A doubly linked list of pages is returned to the caller. Each page 1929 * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock) 1930 * lock. 1931 * 1932 * Unable to change the parameters to page_create() in a minor release, 1933 * we renamed page_create() to page_create_va(), changed all known calls 1934 * from page_create() to page_create_va(), and created this wrapper. 1935 * 1936 * Upon a major release, we should break compatibility by deleting this 1937 * wrapper, and replacing all the strings "page_create_va", with "page_create". 1938 * 1939 * NOTE: There is a copy of this interface as page_create_io() in 1940 * i86/vm/vm_machdep.c. Any bugs fixed here should be applied 1941 * there. 1942 */ 1943 page_t * 1944 page_create(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags) 1945 { 1946 caddr_t random_vaddr; 1947 struct seg kseg; 1948 1949 #ifdef DEBUG 1950 cmn_err(CE_WARN, "Using deprecated interface page_create: caller %p", 1951 (void *)caller()); 1952 #endif 1953 1954 random_vaddr = (caddr_t)(((uintptr_t)vp >> 7) ^ 1955 (uintptr_t)(off >> PAGESHIFT)); 1956 kseg.s_as = &kas; 1957 1958 return (page_create_va(vp, off, bytes, flags, &kseg, random_vaddr)); 1959 } 1960 1961 #ifdef DEBUG 1962 uint32_t pg_alloc_pgs_mtbf = 0; 1963 #endif 1964 1965 /* 1966 * Used for large page support. It will attempt to allocate 1967 * a large page(s) off the freelist. 1968 * 1969 * Returns non zero on failure. 1970 */ 1971 int 1972 page_alloc_pages(struct vnode *vp, struct seg *seg, caddr_t addr, 1973 page_t **basepp, page_t *ppa[], uint_t szc, int anypgsz) 1974 { 1975 pgcnt_t npgs, curnpgs, totpgs; 1976 size_t pgsz; 1977 page_t *pplist = NULL, *pp; 1978 int err = 0; 1979 lgrp_t *lgrp; 1980 1981 ASSERT(szc != 0 && szc <= (page_num_pagesizes() - 1)); 1982 1983 VM_STAT_ADD(alloc_pages[0]); 1984 1985 #ifdef DEBUG 1986 if (pg_alloc_pgs_mtbf && !(gethrtime() % pg_alloc_pgs_mtbf)) { 1987 return (ENOMEM); 1988 } 1989 #endif 1990 1991 pgsz = page_get_pagesize(szc); 1992 totpgs = curnpgs = npgs = pgsz >> PAGESHIFT; 1993 1994 ASSERT(((uintptr_t)addr & (pgsz - 1)) == 0); 1995 /* 1996 * One must be NULL but not both. 1997 * And one must be non NULL but not both. 1998 */ 1999 ASSERT(basepp != NULL || ppa != NULL); 2000 ASSERT(basepp == NULL || ppa == NULL); 2001 2002 (void) page_create_wait(npgs, PG_WAIT); 2003 2004 while (npgs && szc) { 2005 lgrp = lgrp_mem_choose(seg, addr, pgsz); 2006 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 0, lgrp); 2007 if (pp != NULL) { 2008 VM_STAT_ADD(alloc_pages[1]); 2009 page_list_concat(&pplist, &pp); 2010 ASSERT(npgs >= curnpgs); 2011 npgs -= curnpgs; 2012 } else if (anypgsz) { 2013 VM_STAT_ADD(alloc_pages[2]); 2014 szc--; 2015 pgsz = page_get_pagesize(szc); 2016 curnpgs = pgsz >> PAGESHIFT; 2017 } else { 2018 VM_STAT_ADD(alloc_pages[3]); 2019 ASSERT(npgs == totpgs); 2020 page_create_putback(npgs); 2021 return (ENOMEM); 2022 } 2023 } 2024 if (szc == 0) { 2025 VM_STAT_ADD(alloc_pages[4]); 2026 ASSERT(npgs != 0); 2027 page_create_putback(npgs); 2028 err = ENOMEM; 2029 } else if (basepp != NULL) { 2030 ASSERT(npgs == 0); 2031 ASSERT(ppa == NULL); 2032 *basepp = pplist; 2033 } 2034 2035 npgs = totpgs - npgs; 2036 pp = pplist; 2037 2038 /* 2039 * Clear the free and age bits. Also if we were passed in a ppa then 2040 * fill it in with all the constituent pages from the large page. But 2041 * if we failed to allocate all the pages just free what we got. 2042 */ 2043 while (npgs != 0) { 2044 ASSERT(PP_ISFREE(pp)); 2045 ASSERT(PP_ISAGED(pp)); 2046 if (ppa != NULL || err != 0) { 2047 if (err == 0) { 2048 VM_STAT_ADD(alloc_pages[5]); 2049 PP_CLRFREE(pp); 2050 PP_CLRAGED(pp); 2051 page_sub(&pplist, pp); 2052 *ppa++ = pp; 2053 npgs--; 2054 } else { 2055 VM_STAT_ADD(alloc_pages[6]); 2056 ASSERT(pp->p_szc != 0); 2057 curnpgs = page_get_pagecnt(pp->p_szc); 2058 page_list_break(&pp, &pplist, curnpgs); 2059 page_list_add_pages(pp, 0); 2060 page_create_putback(curnpgs); 2061 ASSERT(npgs >= curnpgs); 2062 npgs -= curnpgs; 2063 } 2064 pp = pplist; 2065 } else { 2066 VM_STAT_ADD(alloc_pages[7]); 2067 PP_CLRFREE(pp); 2068 PP_CLRAGED(pp); 2069 pp = pp->p_next; 2070 npgs--; 2071 } 2072 } 2073 return (err); 2074 } 2075 2076 /* 2077 * Get a single large page off of the freelists, and set it up for use. 2078 * Number of bytes requested must be a supported page size. 2079 * 2080 * Note that this call may fail even if there is sufficient 2081 * memory available or PG_WAIT is set, so the caller must 2082 * be willing to fallback on page_create_va(), block and retry, 2083 * or fail the requester. 2084 */ 2085 page_t * 2086 page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2087 struct seg *seg, caddr_t vaddr, void *arg) 2088 { 2089 pgcnt_t npages, pcftotal; 2090 page_t *pp; 2091 page_t *rootpp; 2092 lgrp_t *lgrp; 2093 uint_t enough; 2094 uint_t pcf_index; 2095 uint_t i; 2096 struct pcf *p; 2097 struct pcf *q; 2098 lgrp_id_t *lgrpid = (lgrp_id_t *)arg; 2099 2100 ASSERT(vp != NULL); 2101 2102 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2103 PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == 0); 2104 /* but no others */ 2105 2106 ASSERT((flags & PG_EXCL) == PG_EXCL); 2107 2108 npages = btop(bytes); 2109 2110 if (!kcage_on || panicstr) { 2111 /* 2112 * Cage is OFF, or we are single threaded in 2113 * panic, so make everything a RELOC request. 2114 */ 2115 flags &= ~PG_NORELOC; 2116 } 2117 2118 /* 2119 * Make sure there's adequate physical memory available. 2120 * Note: PG_WAIT is ignored here. 2121 */ 2122 if (freemem <= throttlefree + npages) { 2123 VM_STAT_ADD(page_create_large_cnt[1]); 2124 return (NULL); 2125 } 2126 2127 /* 2128 * If cage is on, dampen draw from cage when available 2129 * cage space is low. 2130 */ 2131 if ((flags & (PG_NORELOC | PG_WAIT)) == (PG_NORELOC | PG_WAIT) && 2132 kcage_freemem < kcage_throttlefree + npages) { 2133 2134 /* 2135 * The cage is on, the caller wants PG_NORELOC 2136 * pages and available cage memory is very low. 2137 * Call kcage_create_throttle() to attempt to 2138 * control demand on the cage. 2139 */ 2140 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) { 2141 VM_STAT_ADD(page_create_large_cnt[2]); 2142 return (NULL); 2143 } 2144 } 2145 2146 enough = 0; 2147 pcf_index = PCF_INDEX(); 2148 p = &pcf[pcf_index]; 2149 q = &pcf[PCF_FANOUT]; 2150 for (pcftotal = 0, i = 0; i < PCF_FANOUT; i++) { 2151 if (p->pcf_count > npages) { 2152 /* 2153 * a good one to try. 2154 */ 2155 mutex_enter(&p->pcf_lock); 2156 if (p->pcf_count > npages) { 2157 p->pcf_count -= (uint_t)npages; 2158 /* 2159 * freemem is not protected by any lock. 2160 * Thus, we cannot have any assertion 2161 * containing freemem here. 2162 */ 2163 freemem -= npages; 2164 enough = 1; 2165 mutex_exit(&p->pcf_lock); 2166 break; 2167 } 2168 mutex_exit(&p->pcf_lock); 2169 } 2170 pcftotal += p->pcf_count; 2171 p++; 2172 if (p >= q) { 2173 p = pcf; 2174 } 2175 } 2176 2177 if (!enough) { 2178 /* If there isn't enough memory available, give up. */ 2179 if (pcftotal < npages) { 2180 VM_STAT_ADD(page_create_large_cnt[3]); 2181 return (NULL); 2182 } 2183 2184 /* try to collect pages from several pcf bins */ 2185 for (p = pcf, pcftotal = 0, i = 0; i < PCF_FANOUT; i++) { 2186 mutex_enter(&p->pcf_lock); 2187 pcftotal += p->pcf_count; 2188 if (pcftotal >= npages) { 2189 /* 2190 * Wow! There are enough pages laying around 2191 * to satisfy the request. Do the accounting, 2192 * drop the locks we acquired, and go back. 2193 * 2194 * freemem is not protected by any lock. So, 2195 * we cannot have any assertion containing 2196 * freemem. 2197 */ 2198 pgcnt_t tpages = npages; 2199 freemem -= npages; 2200 while (p >= pcf) { 2201 if (p->pcf_count <= tpages) { 2202 tpages -= p->pcf_count; 2203 p->pcf_count = 0; 2204 } else { 2205 p->pcf_count -= (uint_t)tpages; 2206 tpages = 0; 2207 } 2208 mutex_exit(&p->pcf_lock); 2209 p--; 2210 } 2211 ASSERT(tpages == 0); 2212 break; 2213 } 2214 p++; 2215 } 2216 if (i == PCF_FANOUT) { 2217 /* failed to collect pages - release the locks */ 2218 while (--p >= pcf) { 2219 mutex_exit(&p->pcf_lock); 2220 } 2221 VM_STAT_ADD(page_create_large_cnt[4]); 2222 return (NULL); 2223 } 2224 } 2225 2226 /* 2227 * This is where this function behaves fundamentally differently 2228 * than page_create_va(); since we're intending to map the page 2229 * with a single TTE, we have to get it as a physically contiguous 2230 * hardware pagesize chunk. If we can't, we fail. 2231 */ 2232 if (lgrpid != NULL && *lgrpid >= 0 && *lgrpid <= lgrp_alloc_max && 2233 LGRP_EXISTS(lgrp_table[*lgrpid])) 2234 lgrp = lgrp_table[*lgrpid]; 2235 else 2236 lgrp = lgrp_mem_choose(seg, vaddr, bytes); 2237 2238 if ((rootpp = page_get_freelist(&kvp, off, seg, vaddr, 2239 bytes, flags & ~PG_MATCH_COLOR, lgrp)) == NULL) { 2240 page_create_putback(npages); 2241 VM_STAT_ADD(page_create_large_cnt[5]); 2242 return (NULL); 2243 } 2244 2245 /* 2246 * if we got the page with the wrong mtype give it back this is a 2247 * workaround for CR 6249718. When CR 6249718 is fixed we never get 2248 * inside "if" and the workaround becomes just a nop 2249 */ 2250 if (kcage_on && (flags & PG_NORELOC) && !PP_ISNORELOC(rootpp)) { 2251 page_list_add_pages(rootpp, 0); 2252 page_create_putback(npages); 2253 VM_STAT_ADD(page_create_large_cnt[6]); 2254 return (NULL); 2255 } 2256 2257 /* 2258 * If satisfying this request has left us with too little 2259 * memory, start the wheels turning to get some back. The 2260 * first clause of the test prevents waking up the pageout 2261 * daemon in situations where it would decide that there's 2262 * nothing to do. 2263 */ 2264 if (nscan < desscan && freemem < minfree) { 2265 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2266 "pageout_cv_signal:freemem %ld", freemem); 2267 cv_signal(&proc_pageout->p_cv); 2268 } 2269 2270 pp = rootpp; 2271 while (npages--) { 2272 ASSERT(PAGE_EXCL(pp)); 2273 ASSERT(pp->p_vnode == NULL); 2274 ASSERT(!hat_page_is_mapped(pp)); 2275 PP_CLRFREE(pp); 2276 PP_CLRAGED(pp); 2277 if (!page_hashin(pp, vp, off, NULL)) 2278 panic("page_create_large: hashin failed: page %p", 2279 (void *)pp); 2280 page_io_lock(pp); 2281 off += PAGESIZE; 2282 pp = pp->p_next; 2283 } 2284 2285 VM_STAT_ADD(page_create_large_cnt[0]); 2286 return (rootpp); 2287 } 2288 2289 page_t * 2290 page_create_va(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2291 struct seg *seg, caddr_t vaddr) 2292 { 2293 page_t *plist = NULL; 2294 pgcnt_t npages; 2295 pgcnt_t found_on_free = 0; 2296 pgcnt_t pages_req; 2297 page_t *npp = NULL; 2298 uint_t enough; 2299 uint_t i; 2300 uint_t pcf_index; 2301 struct pcf *p; 2302 struct pcf *q; 2303 lgrp_t *lgrp; 2304 2305 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START, 2306 "page_create_start:vp %p off %llx bytes %lu flags %x", 2307 vp, off, bytes, flags); 2308 2309 ASSERT(bytes != 0 && vp != NULL); 2310 2311 if ((flags & PG_EXCL) == 0 && (flags & PG_WAIT) == 0) { 2312 panic("page_create: invalid flags"); 2313 /*NOTREACHED*/ 2314 } 2315 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2316 PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == 0); 2317 /* but no others */ 2318 2319 pages_req = npages = btopr(bytes); 2320 /* 2321 * Try to see whether request is too large to *ever* be 2322 * satisfied, in order to prevent deadlock. We arbitrarily 2323 * decide to limit maximum size requests to max_page_get. 2324 */ 2325 if (npages >= max_page_get) { 2326 if ((flags & PG_WAIT) == 0) { 2327 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_TOOBIG, 2328 "page_create_toobig:vp %p off %llx npages " 2329 "%lu max_page_get %lu", 2330 vp, off, npages, max_page_get); 2331 return (NULL); 2332 } else { 2333 cmn_err(CE_WARN, 2334 "Request for too much kernel memory " 2335 "(%lu bytes), will hang forever", bytes); 2336 for (;;) 2337 delay(1000000000); 2338 } 2339 } 2340 2341 if (!kcage_on || panicstr) { 2342 /* 2343 * Cage is OFF, or we are single threaded in 2344 * panic, so make everything a RELOC request. 2345 */ 2346 flags &= ~PG_NORELOC; 2347 } 2348 2349 if (freemem <= throttlefree + npages) 2350 if (!page_create_throttle(npages, flags)) 2351 return (NULL); 2352 2353 /* 2354 * If cage is on, dampen draw from cage when available 2355 * cage space is low. 2356 */ 2357 if ((flags & PG_NORELOC) && 2358 kcage_freemem < kcage_throttlefree + npages) { 2359 2360 /* 2361 * The cage is on, the caller wants PG_NORELOC 2362 * pages and available cage memory is very low. 2363 * Call kcage_create_throttle() to attempt to 2364 * control demand on the cage. 2365 */ 2366 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) 2367 return (NULL); 2368 } 2369 2370 VM_STAT_ADD(page_create_cnt[0]); 2371 2372 enough = 0; 2373 pcf_index = PCF_INDEX(); 2374 2375 p = &pcf[pcf_index]; 2376 q = &pcf[PCF_FANOUT]; 2377 for (i = 0; i < PCF_FANOUT; i++) { 2378 if (p->pcf_count > npages) { 2379 /* 2380 * a good one to try. 2381 */ 2382 mutex_enter(&p->pcf_lock); 2383 if (p->pcf_count > npages) { 2384 p->pcf_count -= (uint_t)npages; 2385 /* 2386 * freemem is not protected by any lock. 2387 * Thus, we cannot have any assertion 2388 * containing freemem here. 2389 */ 2390 freemem -= npages; 2391 enough = 1; 2392 mutex_exit(&p->pcf_lock); 2393 break; 2394 } 2395 mutex_exit(&p->pcf_lock); 2396 } 2397 p++; 2398 if (p >= q) { 2399 p = pcf; 2400 } 2401 } 2402 2403 if (!enough) { 2404 /* 2405 * Have to look harder. If npages is greater than 2406 * one, then we might have to coalecse the counters. 2407 * 2408 * Go wait. We come back having accounted 2409 * for the memory. 2410 */ 2411 VM_STAT_ADD(page_create_cnt[1]); 2412 if (!page_create_wait(npages, flags)) { 2413 VM_STAT_ADD(page_create_cnt[2]); 2414 return (NULL); 2415 } 2416 } 2417 2418 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS, 2419 "page_create_success:vp %p off %llx", vp, off); 2420 2421 /* 2422 * If satisfying this request has left us with too little 2423 * memory, start the wheels turning to get some back. The 2424 * first clause of the test prevents waking up the pageout 2425 * daemon in situations where it would decide that there's 2426 * nothing to do. 2427 */ 2428 if (nscan < desscan && freemem < minfree) { 2429 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2430 "pageout_cv_signal:freemem %ld", freemem); 2431 cv_signal(&proc_pageout->p_cv); 2432 } 2433 2434 /* 2435 * Loop around collecting the requested number of pages. 2436 * Most of the time, we have to `create' a new page. With 2437 * this in mind, pull the page off the free list before 2438 * getting the hash lock. This will minimize the hash 2439 * lock hold time, nesting, and the like. If it turns 2440 * out we don't need the page, we put it back at the end. 2441 */ 2442 while (npages--) { 2443 page_t *pp; 2444 kmutex_t *phm = NULL; 2445 ulong_t index; 2446 2447 index = PAGE_HASH_FUNC(vp, off); 2448 top: 2449 ASSERT(phm == NULL); 2450 ASSERT(index == PAGE_HASH_FUNC(vp, off)); 2451 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 2452 2453 if (npp == NULL) { 2454 /* 2455 * Try to get a page from the freelist (ie, 2456 * a page with no [vp, off] tag). If that 2457 * fails, use the cachelist. 2458 * 2459 * During the first attempt at both the free 2460 * and cache lists we try for the correct color. 2461 */ 2462 /* 2463 * XXXX-how do we deal with virtual indexed 2464 * caches and and colors? 2465 */ 2466 VM_STAT_ADD(page_create_cnt[4]); 2467 /* 2468 * Get lgroup to allocate next page of shared memory 2469 * from and use it to specify where to allocate 2470 * the physical memory 2471 */ 2472 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 2473 npp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 2474 flags | PG_MATCH_COLOR, lgrp); 2475 if (npp == NULL) { 2476 npp = page_get_cachelist(vp, off, seg, 2477 vaddr, flags | PG_MATCH_COLOR, lgrp); 2478 if (npp == NULL) { 2479 npp = page_create_get_something(vp, 2480 off, seg, vaddr, 2481 flags & ~PG_MATCH_COLOR); 2482 } 2483 2484 if (PP_ISAGED(npp) == 0) { 2485 /* 2486 * Since this page came from the 2487 * cachelist, we must destroy the 2488 * old vnode association. 2489 */ 2490 page_hashout(npp, NULL); 2491 } 2492 } 2493 } 2494 2495 /* 2496 * We own this page! 2497 */ 2498 ASSERT(PAGE_EXCL(npp)); 2499 ASSERT(npp->p_vnode == NULL); 2500 ASSERT(!hat_page_is_mapped(npp)); 2501 PP_CLRFREE(npp); 2502 PP_CLRAGED(npp); 2503 2504 /* 2505 * Here we have a page in our hot little mits and are 2506 * just waiting to stuff it on the appropriate lists. 2507 * Get the mutex and check to see if it really does 2508 * not exist. 2509 */ 2510 phm = PAGE_HASH_MUTEX(index); 2511 mutex_enter(phm); 2512 PAGE_HASH_SEARCH(index, pp, vp, off); 2513 if (pp == NULL) { 2514 VM_STAT_ADD(page_create_new); 2515 pp = npp; 2516 npp = NULL; 2517 if (!page_hashin(pp, vp, off, phm)) { 2518 /* 2519 * Since we hold the page hash mutex and 2520 * just searched for this page, page_hashin 2521 * had better not fail. If it does, that 2522 * means somethread did not follow the 2523 * page hash mutex rules. Panic now and 2524 * get it over with. As usual, go down 2525 * holding all the locks. 2526 */ 2527 ASSERT(MUTEX_HELD(phm)); 2528 panic("page_create: " 2529 "hashin failed %p %p %llx %p", 2530 (void *)pp, (void *)vp, off, (void *)phm); 2531 /*NOTREACHED*/ 2532 } 2533 ASSERT(MUTEX_HELD(phm)); 2534 mutex_exit(phm); 2535 phm = NULL; 2536 2537 /* 2538 * Hat layer locking need not be done to set 2539 * the following bits since the page is not hashed 2540 * and was on the free list (i.e., had no mappings). 2541 * 2542 * Set the reference bit to protect 2543 * against immediate pageout 2544 * 2545 * XXXmh modify freelist code to set reference 2546 * bit so we don't have to do it here. 2547 */ 2548 page_set_props(pp, P_REF); 2549 found_on_free++; 2550 } else { 2551 VM_STAT_ADD(page_create_exists); 2552 if (flags & PG_EXCL) { 2553 /* 2554 * Found an existing page, and the caller 2555 * wanted all new pages. Undo all of the work 2556 * we have done. 2557 */ 2558 mutex_exit(phm); 2559 phm = NULL; 2560 while (plist != NULL) { 2561 pp = plist; 2562 page_sub(&plist, pp); 2563 page_io_unlock(pp); 2564 /* large pages should not end up here */ 2565 ASSERT(pp->p_szc == 0); 2566 /*LINTED: constant in conditional ctx*/ 2567 VN_DISPOSE(pp, B_INVAL, 0, kcred); 2568 } 2569 VM_STAT_ADD(page_create_found_one); 2570 goto fail; 2571 } 2572 ASSERT(flags & PG_WAIT); 2573 if (!page_lock(pp, SE_EXCL, phm, P_NO_RECLAIM)) { 2574 /* 2575 * Start all over again if we blocked trying 2576 * to lock the page. 2577 */ 2578 mutex_exit(phm); 2579 VM_STAT_ADD(page_create_page_lock_failed); 2580 phm = NULL; 2581 goto top; 2582 } 2583 mutex_exit(phm); 2584 phm = NULL; 2585 2586 if (PP_ISFREE(pp)) { 2587 ASSERT(PP_ISAGED(pp) == 0); 2588 VM_STAT_ADD(pagecnt.pc_get_cache); 2589 page_list_sub(pp, PG_CACHE_LIST); 2590 PP_CLRFREE(pp); 2591 found_on_free++; 2592 } 2593 } 2594 2595 /* 2596 * Got a page! It is locked. Acquire the i/o 2597 * lock since we are going to use the p_next and 2598 * p_prev fields to link the requested pages together. 2599 */ 2600 page_io_lock(pp); 2601 page_add(&plist, pp); 2602 plist = plist->p_next; 2603 off += PAGESIZE; 2604 vaddr += PAGESIZE; 2605 } 2606 2607 ASSERT((flags & PG_EXCL) ? (found_on_free == pages_req) : 1); 2608 fail: 2609 if (npp != NULL) { 2610 /* 2611 * Did not need this page after all. 2612 * Put it back on the free list. 2613 */ 2614 VM_STAT_ADD(page_create_putbacks); 2615 PP_SETFREE(npp); 2616 PP_SETAGED(npp); 2617 npp->p_offset = (u_offset_t)-1; 2618 page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL); 2619 page_unlock(npp); 2620 2621 } 2622 2623 ASSERT(pages_req >= found_on_free); 2624 2625 { 2626 uint_t overshoot = (uint_t)(pages_req - found_on_free); 2627 2628 if (overshoot) { 2629 VM_STAT_ADD(page_create_overshoot); 2630 p = &pcf[pcf_index]; 2631 mutex_enter(&p->pcf_lock); 2632 if (p->pcf_block) { 2633 p->pcf_reserve += overshoot; 2634 } else { 2635 p->pcf_count += overshoot; 2636 if (p->pcf_wait) { 2637 mutex_enter(&new_freemem_lock); 2638 if (freemem_wait) { 2639 cv_signal(&freemem_cv); 2640 p->pcf_wait--; 2641 } else { 2642 p->pcf_wait = 0; 2643 } 2644 mutex_exit(&new_freemem_lock); 2645 } 2646 } 2647 mutex_exit(&p->pcf_lock); 2648 /* freemem is approximate, so this test OK */ 2649 if (!p->pcf_block) 2650 freemem += overshoot; 2651 } 2652 } 2653 2654 return (plist); 2655 } 2656 2657 /* 2658 * One or more constituent pages of this large page has been marked 2659 * toxic. Simply demote the large page to PAGESIZE pages and let 2660 * page_free() handle it. This routine should only be called by 2661 * large page free routines (page_free_pages() and page_destroy_pages(). 2662 * All pages are locked SE_EXCL and have already been marked free. 2663 */ 2664 static void 2665 page_free_toxic_pages(page_t *rootpp) 2666 { 2667 page_t *tpp; 2668 pgcnt_t i, pgcnt = page_get_pagecnt(rootpp->p_szc); 2669 uint_t szc = rootpp->p_szc; 2670 2671 for (i = 0, tpp = rootpp; i < pgcnt; i++, tpp = tpp->p_next) { 2672 ASSERT(tpp->p_szc == szc); 2673 ASSERT((PAGE_EXCL(tpp) && 2674 !page_iolock_assert(tpp)) || panicstr); 2675 tpp->p_szc = 0; 2676 } 2677 2678 while (rootpp != NULL) { 2679 tpp = rootpp; 2680 page_sub(&rootpp, tpp); 2681 ASSERT(PP_ISFREE(tpp)); 2682 PP_CLRFREE(tpp); 2683 page_free(tpp, 1); 2684 } 2685 } 2686 2687 /* 2688 * Put page on the "free" list. 2689 * The free list is really two lists maintained by 2690 * the PSM of whatever machine we happen to be on. 2691 */ 2692 void 2693 page_free(page_t *pp, int dontneed) 2694 { 2695 struct pcf *p; 2696 uint_t pcf_index; 2697 2698 ASSERT((PAGE_EXCL(pp) && 2699 !page_iolock_assert(pp)) || panicstr); 2700 2701 if (PP_ISFREE(pp)) { 2702 panic("page_free: page %p is free", (void *)pp); 2703 } 2704 2705 if (pp->p_szc != 0) { 2706 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 2707 pp->p_vnode == &kvp) { 2708 panic("page_free: anon or kernel " 2709 "or no vnode large page %p", (void *)pp); 2710 } 2711 page_demote_vp_pages(pp); 2712 ASSERT(pp->p_szc == 0); 2713 } 2714 2715 /* 2716 * The page_struct_lock need not be acquired to examine these 2717 * fields since the page has an "exclusive" lock. 2718 */ 2719 if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 2720 pp->p_slckcnt != 0) { 2721 panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d " 2722 "slckcnt = %d", pp, page_pptonum(pp), pp->p_lckcnt, 2723 pp->p_cowcnt, pp->p_slckcnt); 2724 /*NOTREACHED*/ 2725 } 2726 2727 ASSERT(!hat_page_getshare(pp)); 2728 2729 PP_SETFREE(pp); 2730 ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) || 2731 !hat_ismod(pp)); 2732 page_clr_all_props(pp); 2733 ASSERT(!hat_page_getshare(pp)); 2734 2735 /* 2736 * Now we add the page to the head of the free list. 2737 * But if this page is associated with a paged vnode 2738 * then we adjust the head forward so that the page is 2739 * effectively at the end of the list. 2740 */ 2741 if (pp->p_vnode == NULL) { 2742 /* 2743 * Page has no identity, put it on the free list. 2744 */ 2745 PP_SETAGED(pp); 2746 pp->p_offset = (u_offset_t)-1; 2747 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 2748 VM_STAT_ADD(pagecnt.pc_free_free); 2749 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2750 "page_free_free:pp %p", pp); 2751 } else { 2752 PP_CLRAGED(pp); 2753 2754 if (!dontneed || nopageage) { 2755 /* move it to the tail of the list */ 2756 page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL); 2757 2758 VM_STAT_ADD(pagecnt.pc_free_cache); 2759 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL, 2760 "page_free_cache_tail:pp %p", pp); 2761 } else { 2762 page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD); 2763 2764 VM_STAT_ADD(pagecnt.pc_free_dontneed); 2765 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD, 2766 "page_free_cache_head:pp %p", pp); 2767 } 2768 } 2769 page_unlock(pp); 2770 2771 /* 2772 * Now do the `freemem' accounting. 2773 */ 2774 pcf_index = PCF_INDEX(); 2775 p = &pcf[pcf_index]; 2776 2777 mutex_enter(&p->pcf_lock); 2778 if (p->pcf_block) { 2779 p->pcf_reserve += 1; 2780 } else { 2781 p->pcf_count += 1; 2782 if (p->pcf_wait) { 2783 mutex_enter(&new_freemem_lock); 2784 /* 2785 * Check to see if some other thread 2786 * is actually waiting. Another bucket 2787 * may have woken it up by now. If there 2788 * are no waiters, then set our pcf_wait 2789 * count to zero to avoid coming in here 2790 * next time. Also, since only one page 2791 * was put on the free list, just wake 2792 * up one waiter. 2793 */ 2794 if (freemem_wait) { 2795 cv_signal(&freemem_cv); 2796 p->pcf_wait--; 2797 } else { 2798 p->pcf_wait = 0; 2799 } 2800 mutex_exit(&new_freemem_lock); 2801 } 2802 } 2803 mutex_exit(&p->pcf_lock); 2804 2805 /* freemem is approximate, so this test OK */ 2806 if (!p->pcf_block) 2807 freemem += 1; 2808 } 2809 2810 /* 2811 * Put page on the "free" list during intial startup. 2812 * This happens during initial single threaded execution. 2813 */ 2814 void 2815 page_free_at_startup(page_t *pp) 2816 { 2817 struct pcf *p; 2818 uint_t pcf_index; 2819 2820 page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT); 2821 VM_STAT_ADD(pagecnt.pc_free_free); 2822 2823 /* 2824 * Now do the `freemem' accounting. 2825 */ 2826 pcf_index = PCF_INDEX(); 2827 p = &pcf[pcf_index]; 2828 2829 ASSERT(p->pcf_block == 0); 2830 ASSERT(p->pcf_wait == 0); 2831 p->pcf_count += 1; 2832 2833 /* freemem is approximate, so this is OK */ 2834 freemem += 1; 2835 } 2836 2837 void 2838 page_free_pages(page_t *pp) 2839 { 2840 page_t *tpp, *rootpp = NULL; 2841 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 2842 pgcnt_t i; 2843 uint_t szc = pp->p_szc; 2844 2845 VM_STAT_ADD(pagecnt.pc_free_pages); 2846 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2847 "page_free_free:pp %p", pp); 2848 2849 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 2850 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 2851 panic("page_free_pages: not root page %p", (void *)pp); 2852 /*NOTREACHED*/ 2853 } 2854 2855 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 2856 ASSERT((PAGE_EXCL(tpp) && 2857 !page_iolock_assert(tpp)) || panicstr); 2858 if (PP_ISFREE(tpp)) { 2859 panic("page_free_pages: page %p is free", (void *)tpp); 2860 /*NOTREACHED*/ 2861 } 2862 if (hat_page_is_mapped(tpp) || tpp->p_lckcnt != 0 || 2863 tpp->p_cowcnt != 0 || tpp->p_slckcnt != 0) { 2864 panic("page_free_pages %p", (void *)tpp); 2865 /*NOTREACHED*/ 2866 } 2867 2868 ASSERT(!hat_page_getshare(tpp)); 2869 ASSERT(tpp->p_vnode == NULL); 2870 ASSERT(tpp->p_szc == szc); 2871 2872 PP_SETFREE(tpp); 2873 page_clr_all_props(tpp); 2874 PP_SETAGED(tpp); 2875 tpp->p_offset = (u_offset_t)-1; 2876 ASSERT(tpp->p_next == tpp); 2877 ASSERT(tpp->p_prev == tpp); 2878 page_list_concat(&rootpp, &tpp); 2879 } 2880 ASSERT(rootpp == pp); 2881 2882 page_list_add_pages(rootpp, 0); 2883 page_create_putback(pgcnt); 2884 } 2885 2886 int free_pages = 1; 2887 2888 /* 2889 * This routine attempts to return pages to the cachelist via page_release(). 2890 * It does not *have* to be successful in all cases, since the pageout scanner 2891 * will catch any pages it misses. It does need to be fast and not introduce 2892 * too much overhead. 2893 * 2894 * If a page isn't found on the unlocked sweep of the page_hash bucket, we 2895 * don't lock and retry. This is ok, since the page scanner will eventually 2896 * find any page we miss in free_vp_pages(). 2897 */ 2898 void 2899 free_vp_pages(vnode_t *vp, u_offset_t off, size_t len) 2900 { 2901 page_t *pp; 2902 u_offset_t eoff; 2903 extern int swap_in_range(vnode_t *, u_offset_t, size_t); 2904 2905 eoff = off + len; 2906 2907 if (free_pages == 0) 2908 return; 2909 if (swap_in_range(vp, off, len)) 2910 return; 2911 2912 for (; off < eoff; off += PAGESIZE) { 2913 2914 /* 2915 * find the page using a fast, but inexact search. It'll be OK 2916 * if a few pages slip through the cracks here. 2917 */ 2918 pp = page_exists(vp, off); 2919 2920 /* 2921 * If we didn't find the page (it may not exist), the page 2922 * is free, looks still in use (shared), or we can't lock it, 2923 * just give up. 2924 */ 2925 if (pp == NULL || 2926 PP_ISFREE(pp) || 2927 page_share_cnt(pp) > 0 || 2928 !page_trylock(pp, SE_EXCL)) 2929 continue; 2930 2931 /* 2932 * Once we have locked pp, verify that it's still the 2933 * correct page and not already free 2934 */ 2935 ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL)); 2936 if (pp->p_vnode != vp || pp->p_offset != off || PP_ISFREE(pp)) { 2937 page_unlock(pp); 2938 continue; 2939 } 2940 2941 /* 2942 * try to release the page... 2943 */ 2944 (void) page_release(pp, 1); 2945 } 2946 } 2947 2948 /* 2949 * Reclaim the given page from the free list. 2950 * Returns 1 on success or 0 on failure. 2951 * 2952 * The page is unlocked if it can't be reclaimed (when freemem == 0). 2953 * If `lock' is non-null, it will be dropped and re-acquired if 2954 * the routine must wait while freemem is 0. 2955 * 2956 * As it turns out, boot_getpages() does this. It picks a page, 2957 * based on where OBP mapped in some address, gets its pfn, searches 2958 * the memsegs, locks the page, then pulls it off the free list! 2959 */ 2960 int 2961 page_reclaim(page_t *pp, kmutex_t *lock) 2962 { 2963 struct pcf *p; 2964 uint_t pcf_index; 2965 struct cpu *cpup; 2966 uint_t i; 2967 pgcnt_t npgs, need; 2968 pgcnt_t collected = 0; 2969 2970 ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1); 2971 ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp)); 2972 2973 npgs = page_get_pagecnt(pp->p_szc); 2974 2975 /* 2976 * If `freemem' is 0, we cannot reclaim this page from the 2977 * freelist, so release every lock we might hold: the page, 2978 * and the `lock' before blocking. 2979 * 2980 * The only way `freemem' can become 0 while there are pages 2981 * marked free (have their p->p_free bit set) is when the 2982 * system is low on memory and doing a page_create(). In 2983 * order to guarantee that once page_create() starts acquiring 2984 * pages it will be able to get all that it needs since `freemem' 2985 * was decreased by the requested amount. So, we need to release 2986 * this page, and let page_create() have it. 2987 * 2988 * Since `freemem' being zero is not supposed to happen, just 2989 * use the usual hash stuff as a starting point. If that bucket 2990 * is empty, then assume the worst, and start at the beginning 2991 * of the pcf array. If we always start at the beginning 2992 * when acquiring more than one pcf lock, there won't be any 2993 * deadlock problems. 2994 */ 2995 2996 /* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */ 2997 2998 if (freemem <= throttlefree && !page_create_throttle(npgs, 0)) { 2999 pcf_acquire_all(); 3000 goto page_reclaim_nomem; 3001 } 3002 3003 pcf_index = PCF_INDEX(); 3004 p = &pcf[pcf_index]; 3005 mutex_enter(&p->pcf_lock); 3006 if (p->pcf_count >= npgs) { 3007 collected = npgs; 3008 p->pcf_count -= npgs; 3009 } 3010 mutex_exit(&p->pcf_lock); 3011 need = npgs - collected; 3012 3013 if (need > 0) { 3014 VM_STAT_ADD(page_reclaim_zero); 3015 /* 3016 * Check again. Its possible that some other thread 3017 * could have been right behind us, and added one 3018 * to a list somewhere. Acquire each of the pcf locks 3019 * until we find a page. 3020 */ 3021 p = pcf; 3022 for (i = 0; i < PCF_FANOUT; i++) { 3023 mutex_enter(&p->pcf_lock); 3024 if (p->pcf_count) { 3025 if (p->pcf_count >= need) { 3026 p->pcf_count -= need; 3027 collected += need; 3028 need = 0; 3029 break; 3030 } else if (p->pcf_count) { 3031 collected += p->pcf_count; 3032 need -= p->pcf_count; 3033 p->pcf_count = 0; 3034 } 3035 } 3036 p++; 3037 } 3038 3039 if (need > 0) { 3040 page_reclaim_nomem: 3041 /* 3042 * We really can't have page `pp'. 3043 * Time for the no-memory dance with 3044 * page_free(). This is just like 3045 * page_create_wait(). Plus the added 3046 * attraction of releasing whatever mutex 3047 * we held when we were called with in `lock'. 3048 * Page_unlock() will wakeup any thread 3049 * waiting around for this page. 3050 */ 3051 if (lock) { 3052 VM_STAT_ADD(page_reclaim_zero_locked); 3053 mutex_exit(lock); 3054 } 3055 page_unlock(pp); 3056 3057 /* 3058 * get this before we drop all the pcf locks. 3059 */ 3060 mutex_enter(&new_freemem_lock); 3061 3062 p = pcf; 3063 p->pcf_count += collected; 3064 for (i = 0; i < PCF_FANOUT; i++) { 3065 p->pcf_wait++; 3066 mutex_exit(&p->pcf_lock); 3067 p++; 3068 } 3069 3070 freemem_wait++; 3071 cv_wait(&freemem_cv, &new_freemem_lock); 3072 freemem_wait--; 3073 3074 mutex_exit(&new_freemem_lock); 3075 3076 if (lock) { 3077 mutex_enter(lock); 3078 } 3079 return (0); 3080 } 3081 3082 /* 3083 * We beat the PCF bins over the head until 3084 * we got the memory that we wanted. 3085 * The pcf accounting has been done, 3086 * though none of the pcf_wait flags have been set, 3087 * drop the locks and continue on. 3088 */ 3089 ASSERT(collected == npgs); 3090 while (p >= pcf) { 3091 mutex_exit(&p->pcf_lock); 3092 p--; 3093 } 3094 } 3095 3096 /* 3097 * freemem is not protected by any lock. Thus, we cannot 3098 * have any assertion containing freemem here. 3099 */ 3100 freemem -= npgs; 3101 3102 VM_STAT_ADD(pagecnt.pc_reclaim); 3103 if (PP_ISAGED(pp)) { 3104 if (npgs > 1) { 3105 page_list_sub_pages(pp, pp->p_szc); 3106 } else { 3107 page_list_sub(pp, PG_FREE_LIST); 3108 } 3109 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_FREE, 3110 "page_reclaim_free:pp %p", pp); 3111 } else { 3112 ASSERT(npgs == 1); 3113 page_list_sub(pp, PG_CACHE_LIST); 3114 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_CACHE, 3115 "page_reclaim_cache:pp %p", pp); 3116 } 3117 3118 /* 3119 * clear the p_free & p_age bits since this page is no longer 3120 * on the free list. Notice that there was a brief time where 3121 * a page is marked as free, but is not on the list. 3122 * 3123 * Set the reference bit to protect against immediate pageout. 3124 */ 3125 for (i = 0; i < npgs; i++, pp++) { 3126 PP_CLRFREE(pp); 3127 PP_CLRAGED(pp); 3128 page_set_props(pp, P_REF); 3129 } 3130 3131 CPU_STATS_ENTER_K(); 3132 cpup = CPU; /* get cpup now that CPU cannot change */ 3133 CPU_STATS_ADDQ(cpup, vm, pgrec, 1); 3134 CPU_STATS_ADDQ(cpup, vm, pgfrec, 1); 3135 CPU_STATS_EXIT_K(); 3136 3137 return (1); 3138 } 3139 3140 3141 3142 /* 3143 * Destroy identity of the page and put it back on 3144 * the page free list. Assumes that the caller has 3145 * acquired the "exclusive" lock on the page. 3146 */ 3147 void 3148 page_destroy(page_t *pp, int dontfree) 3149 { 3150 ASSERT((PAGE_EXCL(pp) && 3151 !page_iolock_assert(pp)) || panicstr); 3152 ASSERT(pp->p_slckcnt == 0 || panicstr); 3153 3154 if (pp->p_szc != 0) { 3155 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 3156 pp->p_vnode == &kvp) { 3157 panic("page_destroy: anon or kernel or no vnode " 3158 "large page %p", (void *)pp); 3159 } 3160 page_demote_vp_pages(pp); 3161 ASSERT(pp->p_szc == 0); 3162 } 3163 3164 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy:pp %p", pp); 3165 3166 /* 3167 * Unload translations, if any, then hash out the 3168 * page to erase its identity. 3169 */ 3170 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3171 page_hashout(pp, NULL); 3172 3173 if (!dontfree) { 3174 /* 3175 * Acquire the "freemem_lock" for availrmem. 3176 * The page_struct_lock need not be acquired for lckcnt 3177 * and cowcnt since the page has an "exclusive" lock. 3178 */ 3179 if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) { 3180 mutex_enter(&freemem_lock); 3181 if (pp->p_lckcnt != 0) { 3182 availrmem++; 3183 pp->p_lckcnt = 0; 3184 } 3185 if (pp->p_cowcnt != 0) { 3186 availrmem += pp->p_cowcnt; 3187 pp->p_cowcnt = 0; 3188 } 3189 mutex_exit(&freemem_lock); 3190 } 3191 /* 3192 * Put the page on the "free" list. 3193 */ 3194 page_free(pp, 0); 3195 } 3196 } 3197 3198 void 3199 page_destroy_pages(page_t *pp) 3200 { 3201 3202 page_t *tpp, *rootpp = NULL; 3203 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 3204 pgcnt_t i, pglcks = 0; 3205 uint_t szc = pp->p_szc; 3206 3207 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 3208 3209 VM_STAT_ADD(pagecnt.pc_destroy_pages); 3210 3211 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy_pages:pp %p", pp); 3212 3213 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 3214 panic("page_destroy_pages: not root page %p", (void *)pp); 3215 /*NOTREACHED*/ 3216 } 3217 3218 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 3219 ASSERT((PAGE_EXCL(tpp) && 3220 !page_iolock_assert(tpp)) || panicstr); 3221 ASSERT(tpp->p_slckcnt == 0 || panicstr); 3222 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 3223 page_hashout(tpp, NULL); 3224 ASSERT(tpp->p_offset == (u_offset_t)-1); 3225 if (tpp->p_lckcnt != 0) { 3226 pglcks++; 3227 tpp->p_lckcnt = 0; 3228 } else if (tpp->p_cowcnt != 0) { 3229 pglcks += tpp->p_cowcnt; 3230 tpp->p_cowcnt = 0; 3231 } 3232 ASSERT(!hat_page_getshare(tpp)); 3233 ASSERT(tpp->p_vnode == NULL); 3234 ASSERT(tpp->p_szc == szc); 3235 3236 PP_SETFREE(tpp); 3237 page_clr_all_props(tpp); 3238 PP_SETAGED(tpp); 3239 ASSERT(tpp->p_next == tpp); 3240 ASSERT(tpp->p_prev == tpp); 3241 page_list_concat(&rootpp, &tpp); 3242 } 3243 3244 ASSERT(rootpp == pp); 3245 if (pglcks != 0) { 3246 mutex_enter(&freemem_lock); 3247 availrmem += pglcks; 3248 mutex_exit(&freemem_lock); 3249 } 3250 3251 page_list_add_pages(rootpp, 0); 3252 page_create_putback(pgcnt); 3253 } 3254 3255 /* 3256 * Similar to page_destroy(), but destroys pages which are 3257 * locked and known to be on the page free list. Since 3258 * the page is known to be free and locked, no one can access 3259 * it. 3260 * 3261 * Also, the number of free pages does not change. 3262 */ 3263 void 3264 page_destroy_free(page_t *pp) 3265 { 3266 ASSERT(PAGE_EXCL(pp)); 3267 ASSERT(PP_ISFREE(pp)); 3268 ASSERT(pp->p_vnode); 3269 ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0); 3270 ASSERT(!hat_page_is_mapped(pp)); 3271 ASSERT(PP_ISAGED(pp) == 0); 3272 ASSERT(pp->p_szc == 0); 3273 3274 VM_STAT_ADD(pagecnt.pc_destroy_free); 3275 page_list_sub(pp, PG_CACHE_LIST); 3276 3277 page_hashout(pp, NULL); 3278 ASSERT(pp->p_vnode == NULL); 3279 ASSERT(pp->p_offset == (u_offset_t)-1); 3280 ASSERT(pp->p_hash == NULL); 3281 3282 PP_SETAGED(pp); 3283 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 3284 page_unlock(pp); 3285 3286 mutex_enter(&new_freemem_lock); 3287 if (freemem_wait) { 3288 cv_signal(&freemem_cv); 3289 } 3290 mutex_exit(&new_freemem_lock); 3291 } 3292 3293 /* 3294 * Rename the page "opp" to have an identity specified 3295 * by [vp, off]. If a page already exists with this name 3296 * it is locked and destroyed. Note that the page's 3297 * translations are not unloaded during the rename. 3298 * 3299 * This routine is used by the anon layer to "steal" the 3300 * original page and is not unlike destroying a page and 3301 * creating a new page using the same page frame. 3302 * 3303 * XXX -- Could deadlock if caller 1 tries to rename A to B while 3304 * caller 2 tries to rename B to A. 3305 */ 3306 void 3307 page_rename(page_t *opp, vnode_t *vp, u_offset_t off) 3308 { 3309 page_t *pp; 3310 int olckcnt = 0; 3311 int ocowcnt = 0; 3312 kmutex_t *phm; 3313 ulong_t index; 3314 3315 ASSERT(PAGE_EXCL(opp) && !page_iolock_assert(opp)); 3316 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3317 ASSERT(PP_ISFREE(opp) == 0); 3318 3319 VM_STAT_ADD(page_rename_count); 3320 3321 TRACE_3(TR_FAC_VM, TR_PAGE_RENAME, 3322 "page rename:pp %p vp %p off %llx", opp, vp, off); 3323 3324 /* 3325 * CacheFS may call page_rename for a large NFS page 3326 * when both CacheFS and NFS mount points are used 3327 * by applications. Demote this large page before 3328 * renaming it, to ensure that there are no "partial" 3329 * large pages left lying around. 3330 */ 3331 if (opp->p_szc != 0) { 3332 vnode_t *ovp = opp->p_vnode; 3333 ASSERT(ovp != NULL); 3334 ASSERT(!IS_SWAPFSVP(ovp)); 3335 ASSERT(ovp != &kvp); 3336 page_demote_vp_pages(opp); 3337 ASSERT(opp->p_szc == 0); 3338 } 3339 3340 page_hashout(opp, NULL); 3341 PP_CLRAGED(opp); 3342 3343 /* 3344 * Acquire the appropriate page hash lock, since 3345 * we're going to rename the page. 3346 */ 3347 index = PAGE_HASH_FUNC(vp, off); 3348 phm = PAGE_HASH_MUTEX(index); 3349 mutex_enter(phm); 3350 top: 3351 /* 3352 * Look for an existing page with this name and destroy it if found. 3353 * By holding the page hash lock all the way to the page_hashin() 3354 * call, we are assured that no page can be created with this 3355 * identity. In the case when the phm lock is dropped to undo any 3356 * hat layer mappings, the existing page is held with an "exclusive" 3357 * lock, again preventing another page from being created with 3358 * this identity. 3359 */ 3360 PAGE_HASH_SEARCH(index, pp, vp, off); 3361 if (pp != NULL) { 3362 VM_STAT_ADD(page_rename_exists); 3363 3364 /* 3365 * As it turns out, this is one of only two places where 3366 * page_lock() needs to hold the passed in lock in the 3367 * successful case. In all of the others, the lock could 3368 * be dropped as soon as the attempt is made to lock 3369 * the page. It is tempting to add yet another arguement, 3370 * PL_KEEP or PL_DROP, to let page_lock know what to do. 3371 */ 3372 if (!page_lock(pp, SE_EXCL, phm, P_RECLAIM)) { 3373 /* 3374 * Went to sleep because the page could not 3375 * be locked. We were woken up when the page 3376 * was unlocked, or when the page was destroyed. 3377 * In either case, `phm' was dropped while we 3378 * slept. Hence we should not just roar through 3379 * this loop. 3380 */ 3381 goto top; 3382 } 3383 3384 /* 3385 * If an existing page is a large page, then demote 3386 * it to ensure that no "partial" large pages are 3387 * "created" after page_rename. An existing page 3388 * can be a CacheFS page, and can't belong to swapfs. 3389 */ 3390 if (hat_page_is_mapped(pp)) { 3391 /* 3392 * Unload translations. Since we hold the 3393 * exclusive lock on this page, the page 3394 * can not be changed while we drop phm. 3395 * This is also not a lock protocol violation, 3396 * but rather the proper way to do things. 3397 */ 3398 mutex_exit(phm); 3399 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3400 if (pp->p_szc != 0) { 3401 ASSERT(!IS_SWAPFSVP(vp)); 3402 ASSERT(vp != &kvp); 3403 page_demote_vp_pages(pp); 3404 ASSERT(pp->p_szc == 0); 3405 } 3406 mutex_enter(phm); 3407 } else if (pp->p_szc != 0) { 3408 ASSERT(!IS_SWAPFSVP(vp)); 3409 ASSERT(vp != &kvp); 3410 mutex_exit(phm); 3411 page_demote_vp_pages(pp); 3412 ASSERT(pp->p_szc == 0); 3413 mutex_enter(phm); 3414 } 3415 page_hashout(pp, phm); 3416 } 3417 /* 3418 * Hash in the page with the new identity. 3419 */ 3420 if (!page_hashin(opp, vp, off, phm)) { 3421 /* 3422 * We were holding phm while we searched for [vp, off] 3423 * and only dropped phm if we found and locked a page. 3424 * If we can't create this page now, then some thing 3425 * is really broken. 3426 */ 3427 panic("page_rename: Can't hash in page: %p", (void *)pp); 3428 /*NOTREACHED*/ 3429 } 3430 3431 ASSERT(MUTEX_HELD(phm)); 3432 mutex_exit(phm); 3433 3434 /* 3435 * Now that we have dropped phm, lets get around to finishing up 3436 * with pp. 3437 */ 3438 if (pp != NULL) { 3439 ASSERT(!hat_page_is_mapped(pp)); 3440 /* for now large pages should not end up here */ 3441 ASSERT(pp->p_szc == 0); 3442 /* 3443 * Save the locks for transfer to the new page and then 3444 * clear them so page_free doesn't think they're important. 3445 * The page_struct_lock need not be acquired for lckcnt and 3446 * cowcnt since the page has an "exclusive" lock. 3447 */ 3448 olckcnt = pp->p_lckcnt; 3449 ocowcnt = pp->p_cowcnt; 3450 pp->p_lckcnt = pp->p_cowcnt = 0; 3451 3452 /* 3453 * Put the page on the "free" list after we drop 3454 * the lock. The less work under the lock the better. 3455 */ 3456 /*LINTED: constant in conditional context*/ 3457 VN_DISPOSE(pp, B_FREE, 0, kcred); 3458 } 3459 3460 /* 3461 * Transfer the lock count from the old page (if any). 3462 * The page_struct_lock need not be acquired for lckcnt and 3463 * cowcnt since the page has an "exclusive" lock. 3464 */ 3465 opp->p_lckcnt += olckcnt; 3466 opp->p_cowcnt += ocowcnt; 3467 } 3468 3469 /* 3470 * low level routine to add page `pp' to the hash and vp chains for [vp, offset] 3471 * 3472 * Pages are normally inserted at the start of a vnode's v_pages list. 3473 * If the vnode is VMODSORT and the page is modified, it goes at the end. 3474 * This can happen when a modified page is relocated for DR. 3475 * 3476 * Returns 1 on success and 0 on failure. 3477 */ 3478 static int 3479 page_do_hashin(page_t *pp, vnode_t *vp, u_offset_t offset) 3480 { 3481 page_t **listp; 3482 page_t *tp; 3483 ulong_t index; 3484 3485 ASSERT(PAGE_EXCL(pp)); 3486 ASSERT(vp != NULL); 3487 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3488 3489 /* 3490 * Be sure to set these up before the page is inserted on the hash 3491 * list. As soon as the page is placed on the list some other 3492 * thread might get confused and wonder how this page could 3493 * possibly hash to this list. 3494 */ 3495 pp->p_vnode = vp; 3496 pp->p_offset = offset; 3497 3498 /* 3499 * record if this page is on a swap vnode 3500 */ 3501 if ((vp->v_flag & VISSWAP) != 0) 3502 PP_SETSWAP(pp); 3503 3504 index = PAGE_HASH_FUNC(vp, offset); 3505 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(index))); 3506 listp = &page_hash[index]; 3507 3508 /* 3509 * If this page is already hashed in, fail this attempt to add it. 3510 */ 3511 for (tp = *listp; tp != NULL; tp = tp->p_hash) { 3512 if (tp->p_vnode == vp && tp->p_offset == offset) { 3513 pp->p_vnode = NULL; 3514 pp->p_offset = (u_offset_t)(-1); 3515 return (0); 3516 } 3517 } 3518 pp->p_hash = *listp; 3519 *listp = pp; 3520 3521 /* 3522 * Add the page to the vnode's list of pages 3523 */ 3524 if (vp->v_pages != NULL && IS_VMODSORT(vp) && hat_ismod(pp)) 3525 listp = &vp->v_pages->p_vpprev->p_vpnext; 3526 else 3527 listp = &vp->v_pages; 3528 3529 page_vpadd(listp, pp); 3530 3531 return (1); 3532 } 3533 3534 /* 3535 * Add page `pp' to both the hash and vp chains for [vp, offset]. 3536 * 3537 * Returns 1 on success and 0 on failure. 3538 * If hold is passed in, it is not dropped. 3539 */ 3540 int 3541 page_hashin(page_t *pp, vnode_t *vp, u_offset_t offset, kmutex_t *hold) 3542 { 3543 kmutex_t *phm = NULL; 3544 kmutex_t *vphm; 3545 int rc; 3546 3547 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3548 3549 TRACE_3(TR_FAC_VM, TR_PAGE_HASHIN, 3550 "page_hashin:pp %p vp %p offset %llx", 3551 pp, vp, offset); 3552 3553 VM_STAT_ADD(hashin_count); 3554 3555 if (hold != NULL) 3556 phm = hold; 3557 else { 3558 VM_STAT_ADD(hashin_not_held); 3559 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, offset)); 3560 mutex_enter(phm); 3561 } 3562 3563 vphm = page_vnode_mutex(vp); 3564 mutex_enter(vphm); 3565 rc = page_do_hashin(pp, vp, offset); 3566 mutex_exit(vphm); 3567 if (hold == NULL) 3568 mutex_exit(phm); 3569 if (rc == 0) 3570 VM_STAT_ADD(hashin_already); 3571 return (rc); 3572 } 3573 3574 /* 3575 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3576 * All mutexes must be held 3577 */ 3578 static void 3579 page_do_hashout(page_t *pp) 3580 { 3581 page_t **hpp; 3582 page_t *hp; 3583 vnode_t *vp = pp->p_vnode; 3584 3585 ASSERT(vp != NULL); 3586 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3587 3588 /* 3589 * First, take pp off of its hash chain. 3590 */ 3591 hpp = &page_hash[PAGE_HASH_FUNC(vp, pp->p_offset)]; 3592 3593 for (;;) { 3594 hp = *hpp; 3595 if (hp == pp) 3596 break; 3597 if (hp == NULL) { 3598 panic("page_do_hashout"); 3599 /*NOTREACHED*/ 3600 } 3601 hpp = &hp->p_hash; 3602 } 3603 *hpp = pp->p_hash; 3604 3605 /* 3606 * Now remove it from its associated vnode. 3607 */ 3608 if (vp->v_pages) 3609 page_vpsub(&vp->v_pages, pp); 3610 3611 pp->p_hash = NULL; 3612 page_clr_all_props(pp); 3613 PP_CLRSWAP(pp); 3614 pp->p_vnode = NULL; 3615 pp->p_offset = (u_offset_t)-1; 3616 } 3617 3618 /* 3619 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3620 * 3621 * When `phm' is non-NULL it contains the address of the mutex protecting the 3622 * hash list pp is on. It is not dropped. 3623 */ 3624 void 3625 page_hashout(page_t *pp, kmutex_t *phm) 3626 { 3627 vnode_t *vp; 3628 ulong_t index; 3629 kmutex_t *nphm; 3630 kmutex_t *vphm; 3631 kmutex_t *sep; 3632 3633 ASSERT(phm != NULL ? MUTEX_HELD(phm) : 1); 3634 ASSERT(pp->p_vnode != NULL); 3635 ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr); 3636 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp->p_vnode))); 3637 3638 vp = pp->p_vnode; 3639 3640 TRACE_2(TR_FAC_VM, TR_PAGE_HASHOUT, 3641 "page_hashout:pp %p vp %p", pp, vp); 3642 3643 /* Kernel probe */ 3644 TNF_PROBE_2(page_unmap, "vm pagefault", /* CSTYLED */, 3645 tnf_opaque, vnode, vp, 3646 tnf_offset, offset, pp->p_offset); 3647 3648 /* 3649 * 3650 */ 3651 VM_STAT_ADD(hashout_count); 3652 index = PAGE_HASH_FUNC(vp, pp->p_offset); 3653 if (phm == NULL) { 3654 VM_STAT_ADD(hashout_not_held); 3655 nphm = PAGE_HASH_MUTEX(index); 3656 mutex_enter(nphm); 3657 } 3658 ASSERT(phm ? phm == PAGE_HASH_MUTEX(index) : 1); 3659 3660 3661 /* 3662 * grab page vnode mutex and remove it... 3663 */ 3664 vphm = page_vnode_mutex(vp); 3665 mutex_enter(vphm); 3666 3667 page_do_hashout(pp); 3668 3669 mutex_exit(vphm); 3670 if (phm == NULL) 3671 mutex_exit(nphm); 3672 3673 /* 3674 * Wake up processes waiting for this page. The page's 3675 * identity has been changed, and is probably not the 3676 * desired page any longer. 3677 */ 3678 sep = page_se_mutex(pp); 3679 mutex_enter(sep); 3680 pp->p_selock &= ~SE_EWANTED; 3681 if (CV_HAS_WAITERS(&pp->p_cv)) 3682 cv_broadcast(&pp->p_cv); 3683 mutex_exit(sep); 3684 } 3685 3686 /* 3687 * Add the page to the front of a linked list of pages 3688 * using the p_next & p_prev pointers for the list. 3689 * The caller is responsible for protecting the list pointers. 3690 */ 3691 void 3692 page_add(page_t **ppp, page_t *pp) 3693 { 3694 ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3695 3696 page_add_common(ppp, pp); 3697 } 3698 3699 3700 3701 /* 3702 * Common code for page_add() and mach_page_add() 3703 */ 3704 void 3705 page_add_common(page_t **ppp, page_t *pp) 3706 { 3707 if (*ppp == NULL) { 3708 pp->p_next = pp->p_prev = pp; 3709 } else { 3710 pp->p_next = *ppp; 3711 pp->p_prev = (*ppp)->p_prev; 3712 (*ppp)->p_prev = pp; 3713 pp->p_prev->p_next = pp; 3714 } 3715 *ppp = pp; 3716 } 3717 3718 3719 /* 3720 * Remove this page from a linked list of pages 3721 * using the p_next & p_prev pointers for the list. 3722 * 3723 * The caller is responsible for protecting the list pointers. 3724 */ 3725 void 3726 page_sub(page_t **ppp, page_t *pp) 3727 { 3728 ASSERT((PP_ISFREE(pp)) ? 1 : 3729 (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3730 3731 if (*ppp == NULL || pp == NULL) { 3732 panic("page_sub: bad arg(s): pp %p, *ppp %p", 3733 (void *)pp, (void *)(*ppp)); 3734 /*NOTREACHED*/ 3735 } 3736 3737 page_sub_common(ppp, pp); 3738 } 3739 3740 3741 /* 3742 * Common code for page_sub() and mach_page_sub() 3743 */ 3744 void 3745 page_sub_common(page_t **ppp, page_t *pp) 3746 { 3747 if (*ppp == pp) 3748 *ppp = pp->p_next; /* go to next page */ 3749 3750 if (*ppp == pp) 3751 *ppp = NULL; /* page list is gone */ 3752 else { 3753 pp->p_prev->p_next = pp->p_next; 3754 pp->p_next->p_prev = pp->p_prev; 3755 } 3756 pp->p_prev = pp->p_next = pp; /* make pp a list of one */ 3757 } 3758 3759 3760 /* 3761 * Break page list cppp into two lists with npages in the first list. 3762 * The tail is returned in nppp. 3763 */ 3764 void 3765 page_list_break(page_t **oppp, page_t **nppp, pgcnt_t npages) 3766 { 3767 page_t *s1pp = *oppp; 3768 page_t *s2pp; 3769 page_t *e1pp, *e2pp; 3770 long n = 0; 3771 3772 if (s1pp == NULL) { 3773 *nppp = NULL; 3774 return; 3775 } 3776 if (npages == 0) { 3777 *nppp = s1pp; 3778 *oppp = NULL; 3779 return; 3780 } 3781 for (n = 0, s2pp = *oppp; n < npages; n++) { 3782 s2pp = s2pp->p_next; 3783 } 3784 /* Fix head and tail of new lists */ 3785 e1pp = s2pp->p_prev; 3786 e2pp = s1pp->p_prev; 3787 s1pp->p_prev = e1pp; 3788 e1pp->p_next = s1pp; 3789 s2pp->p_prev = e2pp; 3790 e2pp->p_next = s2pp; 3791 3792 /* second list empty */ 3793 if (s2pp == s1pp) { 3794 *oppp = s1pp; 3795 *nppp = NULL; 3796 } else { 3797 *oppp = s1pp; 3798 *nppp = s2pp; 3799 } 3800 } 3801 3802 /* 3803 * Concatenate page list nppp onto the end of list ppp. 3804 */ 3805 void 3806 page_list_concat(page_t **ppp, page_t **nppp) 3807 { 3808 page_t *s1pp, *s2pp, *e1pp, *e2pp; 3809 3810 if (*nppp == NULL) { 3811 return; 3812 } 3813 if (*ppp == NULL) { 3814 *ppp = *nppp; 3815 return; 3816 } 3817 s1pp = *ppp; 3818 e1pp = s1pp->p_prev; 3819 s2pp = *nppp; 3820 e2pp = s2pp->p_prev; 3821 s1pp->p_prev = e2pp; 3822 e2pp->p_next = s1pp; 3823 e1pp->p_next = s2pp; 3824 s2pp->p_prev = e1pp; 3825 } 3826 3827 /* 3828 * return the next page in the page list 3829 */ 3830 page_t * 3831 page_list_next(page_t *pp) 3832 { 3833 return (pp->p_next); 3834 } 3835 3836 3837 /* 3838 * Add the page to the front of the linked list of pages 3839 * using p_vpnext/p_vpprev pointers for the list. 3840 * 3841 * The caller is responsible for protecting the lists. 3842 */ 3843 void 3844 page_vpadd(page_t **ppp, page_t *pp) 3845 { 3846 if (*ppp == NULL) { 3847 pp->p_vpnext = pp->p_vpprev = pp; 3848 } else { 3849 pp->p_vpnext = *ppp; 3850 pp->p_vpprev = (*ppp)->p_vpprev; 3851 (*ppp)->p_vpprev = pp; 3852 pp->p_vpprev->p_vpnext = pp; 3853 } 3854 *ppp = pp; 3855 } 3856 3857 /* 3858 * Remove this page from the linked list of pages 3859 * using p_vpnext/p_vpprev pointers for the list. 3860 * 3861 * The caller is responsible for protecting the lists. 3862 */ 3863 void 3864 page_vpsub(page_t **ppp, page_t *pp) 3865 { 3866 if (*ppp == NULL || pp == NULL) { 3867 panic("page_vpsub: bad arg(s): pp %p, *ppp %p", 3868 (void *)pp, (void *)(*ppp)); 3869 /*NOTREACHED*/ 3870 } 3871 3872 if (*ppp == pp) 3873 *ppp = pp->p_vpnext; /* go to next page */ 3874 3875 if (*ppp == pp) 3876 *ppp = NULL; /* page list is gone */ 3877 else { 3878 pp->p_vpprev->p_vpnext = pp->p_vpnext; 3879 pp->p_vpnext->p_vpprev = pp->p_vpprev; 3880 } 3881 pp->p_vpprev = pp->p_vpnext = pp; /* make pp a list of one */ 3882 } 3883 3884 /* 3885 * Lock a physical page into memory "long term". Used to support "lock 3886 * in memory" functions. Accepts the page to be locked, and a cow variable 3887 * to indicate whether a the lock will travel to the new page during 3888 * a potential copy-on-write. 3889 */ 3890 int 3891 page_pp_lock( 3892 page_t *pp, /* page to be locked */ 3893 int cow, /* cow lock */ 3894 int kernel) /* must succeed -- ignore checking */ 3895 { 3896 int r = 0; /* result -- assume failure */ 3897 3898 ASSERT(PAGE_LOCKED(pp)); 3899 3900 page_struct_lock(pp); 3901 /* 3902 * Acquire the "freemem_lock" for availrmem. 3903 */ 3904 if (cow) { 3905 mutex_enter(&freemem_lock); 3906 if ((availrmem > pages_pp_maximum) && 3907 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 3908 availrmem--; 3909 pages_locked++; 3910 mutex_exit(&freemem_lock); 3911 r = 1; 3912 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 3913 cmn_err(CE_WARN, 3914 "COW lock limit reached on pfn 0x%lx", 3915 page_pptonum(pp)); 3916 } 3917 } else 3918 mutex_exit(&freemem_lock); 3919 } else { 3920 if (pp->p_lckcnt) { 3921 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 3922 r = 1; 3923 if (++pp->p_lckcnt == 3924 (ushort_t)PAGE_LOCK_MAXIMUM) { 3925 cmn_err(CE_WARN, "Page lock limit " 3926 "reached on pfn 0x%lx", 3927 page_pptonum(pp)); 3928 } 3929 } 3930 } else { 3931 if (kernel) { 3932 /* availrmem accounting done by caller */ 3933 ++pp->p_lckcnt; 3934 r = 1; 3935 } else { 3936 mutex_enter(&freemem_lock); 3937 if (availrmem > pages_pp_maximum) { 3938 availrmem--; 3939 pages_locked++; 3940 ++pp->p_lckcnt; 3941 r = 1; 3942 } 3943 mutex_exit(&freemem_lock); 3944 } 3945 } 3946 } 3947 page_struct_unlock(pp); 3948 return (r); 3949 } 3950 3951 /* 3952 * Decommit a lock on a physical page frame. Account for cow locks if 3953 * appropriate. 3954 */ 3955 void 3956 page_pp_unlock( 3957 page_t *pp, /* page to be unlocked */ 3958 int cow, /* expect cow lock */ 3959 int kernel) /* this was a kernel lock */ 3960 { 3961 ASSERT(PAGE_LOCKED(pp)); 3962 3963 page_struct_lock(pp); 3964 /* 3965 * Acquire the "freemem_lock" for availrmem. 3966 * If cowcnt or lcknt is already 0 do nothing; i.e., we 3967 * could be called to unlock even if nothing is locked. This could 3968 * happen if locked file pages were truncated (removing the lock) 3969 * and the file was grown again and new pages faulted in; the new 3970 * pages are unlocked but the segment still thinks they're locked. 3971 */ 3972 if (cow) { 3973 if (pp->p_cowcnt) { 3974 mutex_enter(&freemem_lock); 3975 pp->p_cowcnt--; 3976 availrmem++; 3977 pages_locked--; 3978 mutex_exit(&freemem_lock); 3979 } 3980 } else { 3981 if (pp->p_lckcnt && --pp->p_lckcnt == 0) { 3982 if (!kernel) { 3983 mutex_enter(&freemem_lock); 3984 availrmem++; 3985 pages_locked--; 3986 mutex_exit(&freemem_lock); 3987 } 3988 } 3989 } 3990 page_struct_unlock(pp); 3991 } 3992 3993 /* 3994 * This routine reserves availrmem for npages; 3995 * flags: KM_NOSLEEP or KM_SLEEP 3996 * returns 1 on success or 0 on failure 3997 */ 3998 int 3999 page_resv(pgcnt_t npages, uint_t flags) 4000 { 4001 mutex_enter(&freemem_lock); 4002 while (availrmem < tune.t_minarmem + npages) { 4003 if (flags & KM_NOSLEEP) { 4004 mutex_exit(&freemem_lock); 4005 return (0); 4006 } 4007 mutex_exit(&freemem_lock); 4008 page_needfree(npages); 4009 kmem_reap(); 4010 delay(hz >> 2); 4011 page_needfree(-(spgcnt_t)npages); 4012 mutex_enter(&freemem_lock); 4013 } 4014 availrmem -= npages; 4015 mutex_exit(&freemem_lock); 4016 return (1); 4017 } 4018 4019 /* 4020 * This routine unreserves availrmem for npages; 4021 */ 4022 void 4023 page_unresv(pgcnt_t npages) 4024 { 4025 mutex_enter(&freemem_lock); 4026 availrmem += npages; 4027 mutex_exit(&freemem_lock); 4028 } 4029 4030 /* 4031 * See Statement at the beginning of segvn_lockop() regarding 4032 * the way we handle cowcnts and lckcnts. 4033 * 4034 * Transfer cowcnt on 'opp' to cowcnt on 'npp' if the vpage 4035 * that breaks COW has PROT_WRITE. 4036 * 4037 * Note that, we may also break COW in case we are softlocking 4038 * on read access during physio; 4039 * in this softlock case, the vpage may not have PROT_WRITE. 4040 * So, we need to transfer lckcnt on 'opp' to lckcnt on 'npp' 4041 * if the vpage doesn't have PROT_WRITE. 4042 * 4043 * This routine is never called if we are stealing a page 4044 * in anon_private. 4045 * 4046 * The caller subtracted from availrmem for read only mapping. 4047 * if lckcnt is 1 increment availrmem. 4048 */ 4049 void 4050 page_pp_useclaim( 4051 page_t *opp, /* original page frame losing lock */ 4052 page_t *npp, /* new page frame gaining lock */ 4053 uint_t write_perm) /* set if vpage has PROT_WRITE */ 4054 { 4055 int payback = 0; 4056 4057 ASSERT(PAGE_LOCKED(opp)); 4058 ASSERT(PAGE_LOCKED(npp)); 4059 4060 page_struct_lock(opp); 4061 4062 ASSERT(npp->p_cowcnt == 0); 4063 ASSERT(npp->p_lckcnt == 0); 4064 4065 /* Don't use claim if nothing is locked (see page_pp_unlock above) */ 4066 if ((write_perm && opp->p_cowcnt != 0) || 4067 (!write_perm && opp->p_lckcnt != 0)) { 4068 4069 if (write_perm) { 4070 npp->p_cowcnt++; 4071 ASSERT(opp->p_cowcnt != 0); 4072 opp->p_cowcnt--; 4073 } else { 4074 4075 ASSERT(opp->p_lckcnt != 0); 4076 4077 /* 4078 * We didn't need availrmem decremented if p_lckcnt on 4079 * original page is 1. Here, we are unlocking 4080 * read-only copy belonging to original page and 4081 * are locking a copy belonging to new page. 4082 */ 4083 if (opp->p_lckcnt == 1) 4084 payback = 1; 4085 4086 npp->p_lckcnt++; 4087 opp->p_lckcnt--; 4088 } 4089 } 4090 if (payback) { 4091 mutex_enter(&freemem_lock); 4092 availrmem++; 4093 pages_useclaim--; 4094 mutex_exit(&freemem_lock); 4095 } 4096 page_struct_unlock(opp); 4097 } 4098 4099 /* 4100 * Simple claim adjust functions -- used to support changes in 4101 * claims due to changes in access permissions. Used by segvn_setprot(). 4102 */ 4103 int 4104 page_addclaim(page_t *pp) 4105 { 4106 int r = 0; /* result */ 4107 4108 ASSERT(PAGE_LOCKED(pp)); 4109 4110 page_struct_lock(pp); 4111 ASSERT(pp->p_lckcnt != 0); 4112 4113 if (pp->p_lckcnt == 1) { 4114 if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4115 --pp->p_lckcnt; 4116 r = 1; 4117 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4118 cmn_err(CE_WARN, 4119 "COW lock limit reached on pfn 0x%lx", 4120 page_pptonum(pp)); 4121 } 4122 } 4123 } else { 4124 mutex_enter(&freemem_lock); 4125 if ((availrmem > pages_pp_maximum) && 4126 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 4127 --availrmem; 4128 ++pages_claimed; 4129 mutex_exit(&freemem_lock); 4130 --pp->p_lckcnt; 4131 r = 1; 4132 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4133 cmn_err(CE_WARN, 4134 "COW lock limit reached on pfn 0x%lx", 4135 page_pptonum(pp)); 4136 } 4137 } else 4138 mutex_exit(&freemem_lock); 4139 } 4140 page_struct_unlock(pp); 4141 return (r); 4142 } 4143 4144 int 4145 page_subclaim(page_t *pp) 4146 { 4147 int r = 0; 4148 4149 ASSERT(PAGE_LOCKED(pp)); 4150 4151 page_struct_lock(pp); 4152 ASSERT(pp->p_cowcnt != 0); 4153 4154 if (pp->p_lckcnt) { 4155 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4156 r = 1; 4157 /* 4158 * for availrmem 4159 */ 4160 mutex_enter(&freemem_lock); 4161 availrmem++; 4162 pages_claimed--; 4163 mutex_exit(&freemem_lock); 4164 4165 pp->p_cowcnt--; 4166 4167 if (++pp->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4168 cmn_err(CE_WARN, 4169 "Page lock limit reached on pfn 0x%lx", 4170 page_pptonum(pp)); 4171 } 4172 } 4173 } else { 4174 r = 1; 4175 pp->p_cowcnt--; 4176 pp->p_lckcnt++; 4177 } 4178 page_struct_unlock(pp); 4179 return (r); 4180 } 4181 4182 int 4183 page_addclaim_pages(page_t **ppa) 4184 { 4185 4186 pgcnt_t lckpgs = 0, pg_idx; 4187 4188 VM_STAT_ADD(pagecnt.pc_addclaim_pages); 4189 4190 mutex_enter(&page_llock); 4191 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4192 4193 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4194 ASSERT(ppa[pg_idx]->p_lckcnt != 0); 4195 if (ppa[pg_idx]->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4196 mutex_exit(&page_llock); 4197 return (0); 4198 } 4199 if (ppa[pg_idx]->p_lckcnt > 1) 4200 lckpgs++; 4201 } 4202 4203 if (lckpgs != 0) { 4204 mutex_enter(&freemem_lock); 4205 if (availrmem >= pages_pp_maximum + lckpgs) { 4206 availrmem -= lckpgs; 4207 pages_claimed += lckpgs; 4208 } else { 4209 mutex_exit(&freemem_lock); 4210 mutex_exit(&page_llock); 4211 return (0); 4212 } 4213 mutex_exit(&freemem_lock); 4214 } 4215 4216 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4217 ppa[pg_idx]->p_lckcnt--; 4218 ppa[pg_idx]->p_cowcnt++; 4219 } 4220 mutex_exit(&page_llock); 4221 return (1); 4222 } 4223 4224 int 4225 page_subclaim_pages(page_t **ppa) 4226 { 4227 pgcnt_t ulckpgs = 0, pg_idx; 4228 4229 VM_STAT_ADD(pagecnt.pc_subclaim_pages); 4230 4231 mutex_enter(&page_llock); 4232 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4233 4234 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4235 ASSERT(ppa[pg_idx]->p_cowcnt != 0); 4236 if (ppa[pg_idx]->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4237 mutex_exit(&page_llock); 4238 return (0); 4239 } 4240 if (ppa[pg_idx]->p_lckcnt != 0) 4241 ulckpgs++; 4242 } 4243 4244 if (ulckpgs != 0) { 4245 mutex_enter(&freemem_lock); 4246 availrmem += ulckpgs; 4247 pages_claimed -= ulckpgs; 4248 mutex_exit(&freemem_lock); 4249 } 4250 4251 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4252 ppa[pg_idx]->p_cowcnt--; 4253 ppa[pg_idx]->p_lckcnt++; 4254 4255 } 4256 mutex_exit(&page_llock); 4257 return (1); 4258 } 4259 4260 page_t * 4261 page_numtopp(pfn_t pfnum, se_t se) 4262 { 4263 page_t *pp; 4264 4265 retry: 4266 pp = page_numtopp_nolock(pfnum); 4267 if (pp == NULL) { 4268 return ((page_t *)NULL); 4269 } 4270 4271 /* 4272 * Acquire the appropriate lock on the page. 4273 */ 4274 while (!page_lock(pp, se, (kmutex_t *)NULL, P_RECLAIM)) { 4275 if (page_pptonum(pp) != pfnum) 4276 goto retry; 4277 continue; 4278 } 4279 4280 if (page_pptonum(pp) != pfnum) { 4281 page_unlock(pp); 4282 goto retry; 4283 } 4284 4285 return (pp); 4286 } 4287 4288 page_t * 4289 page_numtopp_noreclaim(pfn_t pfnum, se_t se) 4290 { 4291 page_t *pp; 4292 4293 retry: 4294 pp = page_numtopp_nolock(pfnum); 4295 if (pp == NULL) { 4296 return ((page_t *)NULL); 4297 } 4298 4299 /* 4300 * Acquire the appropriate lock on the page. 4301 */ 4302 while (!page_lock(pp, se, (kmutex_t *)NULL, P_NO_RECLAIM)) { 4303 if (page_pptonum(pp) != pfnum) 4304 goto retry; 4305 continue; 4306 } 4307 4308 if (page_pptonum(pp) != pfnum) { 4309 page_unlock(pp); 4310 goto retry; 4311 } 4312 4313 return (pp); 4314 } 4315 4316 /* 4317 * This routine is like page_numtopp, but will only return page structs 4318 * for pages which are ok for loading into hardware using the page struct. 4319 */ 4320 page_t * 4321 page_numtopp_nowait(pfn_t pfnum, se_t se) 4322 { 4323 page_t *pp; 4324 4325 retry: 4326 pp = page_numtopp_nolock(pfnum); 4327 if (pp == NULL) { 4328 return ((page_t *)NULL); 4329 } 4330 4331 /* 4332 * Try to acquire the appropriate lock on the page. 4333 */ 4334 if (PP_ISFREE(pp)) 4335 pp = NULL; 4336 else { 4337 if (!page_trylock(pp, se)) 4338 pp = NULL; 4339 else { 4340 if (page_pptonum(pp) != pfnum) { 4341 page_unlock(pp); 4342 goto retry; 4343 } 4344 if (PP_ISFREE(pp)) { 4345 page_unlock(pp); 4346 pp = NULL; 4347 } 4348 } 4349 } 4350 return (pp); 4351 } 4352 4353 /* 4354 * Returns a count of dirty pages that are in the process 4355 * of being written out. If 'cleanit' is set, try to push the page. 4356 */ 4357 pgcnt_t 4358 page_busy(int cleanit) 4359 { 4360 page_t *page0 = page_first(); 4361 page_t *pp = page0; 4362 pgcnt_t nppbusy = 0; 4363 u_offset_t off; 4364 4365 do { 4366 vnode_t *vp = pp->p_vnode; 4367 4368 /* 4369 * A page is a candidate for syncing if it is: 4370 * 4371 * (a) On neither the freelist nor the cachelist 4372 * (b) Hashed onto a vnode 4373 * (c) Not a kernel page 4374 * (d) Dirty 4375 * (e) Not part of a swapfile 4376 * (f) a page which belongs to a real vnode; eg has a non-null 4377 * v_vfsp pointer. 4378 * (g) Backed by a filesystem which doesn't have a 4379 * stubbed-out sync operation 4380 */ 4381 if (!PP_ISFREE(pp) && vp != NULL && vp != &kvp && 4382 hat_ismod(pp) && !IS_SWAPVP(vp) && vp->v_vfsp != NULL && 4383 vfs_can_sync(vp->v_vfsp)) { 4384 nppbusy++; 4385 vfs_syncprogress(); 4386 4387 if (!cleanit) 4388 continue; 4389 if (!page_trylock(pp, SE_EXCL)) 4390 continue; 4391 4392 if (PP_ISFREE(pp) || vp == NULL || IS_SWAPVP(vp) || 4393 pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 4394 !(hat_pagesync(pp, 4395 HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) & P_MOD)) { 4396 page_unlock(pp); 4397 continue; 4398 } 4399 off = pp->p_offset; 4400 VN_HOLD(vp); 4401 page_unlock(pp); 4402 (void) VOP_PUTPAGE(vp, off, PAGESIZE, 4403 B_ASYNC | B_FREE, kcred); 4404 VN_RELE(vp); 4405 } 4406 } while ((pp = page_next(pp)) != page0); 4407 4408 return (nppbusy); 4409 } 4410 4411 void page_invalidate_pages(void); 4412 4413 /* 4414 * callback handler to vm sub-system 4415 * 4416 * callers make sure no recursive entries to this func. 4417 */ 4418 /*ARGSUSED*/ 4419 boolean_t 4420 callb_vm_cpr(void *arg, int code) 4421 { 4422 if (code == CB_CODE_CPR_CHKPT) 4423 page_invalidate_pages(); 4424 return (B_TRUE); 4425 } 4426 4427 /* 4428 * Invalidate all pages of the system. 4429 * It shouldn't be called until all user page activities are all stopped. 4430 */ 4431 void 4432 page_invalidate_pages() 4433 { 4434 page_t *pp; 4435 page_t *page0; 4436 pgcnt_t nbusypages; 4437 int retry = 0; 4438 const int MAXRETRIES = 4; 4439 #if defined(__sparc) 4440 extern struct vnode prom_ppages; 4441 #endif /* __sparc */ 4442 4443 top: 4444 /* 4445 * Flush dirty pages and destroy the clean ones. 4446 */ 4447 nbusypages = 0; 4448 4449 pp = page0 = page_first(); 4450 do { 4451 struct vnode *vp; 4452 u_offset_t offset; 4453 int mod; 4454 4455 /* 4456 * skip the page if it has no vnode or the page associated 4457 * with the kernel vnode or prom allocated kernel mem. 4458 */ 4459 #if defined(__sparc) 4460 if ((vp = pp->p_vnode) == NULL || vp == &kvp || 4461 vp == &prom_ppages) 4462 #else /* x86 doesn't have prom or prom_ppage */ 4463 if ((vp = pp->p_vnode) == NULL || vp == &kvp) 4464 #endif /* __sparc */ 4465 continue; 4466 4467 /* 4468 * skip the page which is already free invalidated. 4469 */ 4470 if (PP_ISFREE(pp) && PP_ISAGED(pp)) 4471 continue; 4472 4473 /* 4474 * skip pages that are already locked or can't be "exclusively" 4475 * locked or are already free. After we lock the page, check 4476 * the free and age bits again to be sure it's not destroied 4477 * yet. 4478 * To achieve max. parallelization, we use page_trylock instead 4479 * of page_lock so that we don't get block on individual pages 4480 * while we have thousands of other pages to process. 4481 */ 4482 if (!page_trylock(pp, SE_EXCL)) { 4483 nbusypages++; 4484 continue; 4485 } else if (PP_ISFREE(pp)) { 4486 if (!PP_ISAGED(pp)) { 4487 page_destroy_free(pp); 4488 } else { 4489 page_unlock(pp); 4490 } 4491 continue; 4492 } 4493 /* 4494 * Is this page involved in some I/O? shared? 4495 * 4496 * The page_struct_lock need not be acquired to 4497 * examine these fields since the page has an 4498 * "exclusive" lock. 4499 */ 4500 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 4501 page_unlock(pp); 4502 continue; 4503 } 4504 4505 if (vp->v_type == VCHR) { 4506 panic("vp->v_type == VCHR"); 4507 /*NOTREACHED*/ 4508 } 4509 4510 if (!page_try_demote_pages(pp)) { 4511 page_unlock(pp); 4512 continue; 4513 } 4514 4515 /* 4516 * Check the modified bit. Leave the bits alone in hardware 4517 * (they will be modified if we do the putpage). 4518 */ 4519 mod = (hat_pagesync(pp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) 4520 & P_MOD); 4521 if (mod) { 4522 offset = pp->p_offset; 4523 /* 4524 * Hold the vnode before releasing the page lock 4525 * to prevent it from being freed and re-used by 4526 * some other thread. 4527 */ 4528 VN_HOLD(vp); 4529 page_unlock(pp); 4530 /* 4531 * No error return is checked here. Callers such as 4532 * cpr deals with the dirty pages at the dump time 4533 * if this putpage fails. 4534 */ 4535 (void) VOP_PUTPAGE(vp, offset, PAGESIZE, B_INVAL, 4536 kcred); 4537 VN_RELE(vp); 4538 } else { 4539 page_destroy(pp, 0); 4540 } 4541 } while ((pp = page_next(pp)) != page0); 4542 if (nbusypages && retry++ < MAXRETRIES) { 4543 delay(1); 4544 goto top; 4545 } 4546 } 4547 4548 /* 4549 * Replace the page "old" with the page "new" on the page hash and vnode lists 4550 * 4551 * the replacemnt must be done in place, ie the equivalent sequence: 4552 * 4553 * vp = old->p_vnode; 4554 * off = old->p_offset; 4555 * page_do_hashout(old) 4556 * page_do_hashin(new, vp, off) 4557 * 4558 * doesn't work, since 4559 * 1) if old is the only page on the vnode, the v_pages list has a window 4560 * where it looks empty. This will break file system assumptions. 4561 * and 4562 * 2) pvn_vplist_dirty() can't deal with pages moving on the v_pages list. 4563 */ 4564 static void 4565 page_do_relocate_hash(page_t *new, page_t *old) 4566 { 4567 page_t **hash_list; 4568 vnode_t *vp = old->p_vnode; 4569 kmutex_t *sep; 4570 4571 ASSERT(PAGE_EXCL(old)); 4572 ASSERT(PAGE_EXCL(new)); 4573 ASSERT(vp != NULL); 4574 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 4575 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, old->p_offset)))); 4576 4577 /* 4578 * First find old page on the page hash list 4579 */ 4580 hash_list = &page_hash[PAGE_HASH_FUNC(vp, old->p_offset)]; 4581 4582 for (;;) { 4583 if (*hash_list == old) 4584 break; 4585 if (*hash_list == NULL) { 4586 panic("page_do_hashout"); 4587 /*NOTREACHED*/ 4588 } 4589 hash_list = &(*hash_list)->p_hash; 4590 } 4591 4592 /* 4593 * update new and replace old with new on the page hash list 4594 */ 4595 new->p_vnode = old->p_vnode; 4596 new->p_offset = old->p_offset; 4597 new->p_hash = old->p_hash; 4598 *hash_list = new; 4599 4600 if ((new->p_vnode->v_flag & VISSWAP) != 0) 4601 PP_SETSWAP(new); 4602 4603 /* 4604 * replace old with new on the vnode's page list 4605 */ 4606 if (old->p_vpnext == old) { 4607 new->p_vpnext = new; 4608 new->p_vpprev = new; 4609 } else { 4610 new->p_vpnext = old->p_vpnext; 4611 new->p_vpprev = old->p_vpprev; 4612 new->p_vpnext->p_vpprev = new; 4613 new->p_vpprev->p_vpnext = new; 4614 } 4615 if (vp->v_pages == old) 4616 vp->v_pages = new; 4617 4618 /* 4619 * clear out the old page 4620 */ 4621 old->p_hash = NULL; 4622 old->p_vpnext = NULL; 4623 old->p_vpprev = NULL; 4624 old->p_vnode = NULL; 4625 PP_CLRSWAP(old); 4626 old->p_offset = (u_offset_t)-1; 4627 page_clr_all_props(old); 4628 4629 /* 4630 * Wake up processes waiting for this page. The page's 4631 * identity has been changed, and is probably not the 4632 * desired page any longer. 4633 */ 4634 sep = page_se_mutex(old); 4635 mutex_enter(sep); 4636 old->p_selock &= ~SE_EWANTED; 4637 if (CV_HAS_WAITERS(&old->p_cv)) 4638 cv_broadcast(&old->p_cv); 4639 mutex_exit(sep); 4640 } 4641 4642 /* 4643 * This function moves the identity of page "pp_old" to page "pp_new". 4644 * Both pages must be locked on entry. "pp_new" is free, has no identity, 4645 * and need not be hashed out from anywhere. 4646 */ 4647 void 4648 page_relocate_hash(page_t *pp_new, page_t *pp_old) 4649 { 4650 vnode_t *vp = pp_old->p_vnode; 4651 u_offset_t off = pp_old->p_offset; 4652 kmutex_t *phm, *vphm; 4653 4654 /* 4655 * Rehash two pages 4656 */ 4657 ASSERT(PAGE_EXCL(pp_old)); 4658 ASSERT(PAGE_EXCL(pp_new)); 4659 ASSERT(vp != NULL); 4660 ASSERT(pp_new->p_vnode == NULL); 4661 4662 /* 4663 * hashout then hashin while holding the mutexes 4664 */ 4665 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, off)); 4666 mutex_enter(phm); 4667 vphm = page_vnode_mutex(vp); 4668 mutex_enter(vphm); 4669 4670 page_do_relocate_hash(pp_new, pp_old); 4671 4672 mutex_exit(vphm); 4673 mutex_exit(phm); 4674 4675 /* 4676 * The page_struct_lock need not be acquired for lckcnt and 4677 * cowcnt since the page has an "exclusive" lock. 4678 */ 4679 ASSERT(pp_new->p_lckcnt == 0); 4680 ASSERT(pp_new->p_cowcnt == 0); 4681 pp_new->p_lckcnt = pp_old->p_lckcnt; 4682 pp_new->p_cowcnt = pp_old->p_cowcnt; 4683 pp_old->p_lckcnt = pp_old->p_cowcnt = 0; 4684 4685 /* The following comment preserved from page_flip(). */ 4686 /* XXX - Do we need to protect fsdata? */ 4687 pp_new->p_fsdata = pp_old->p_fsdata; 4688 } 4689 4690 /* 4691 * Helper routine used to lock all remaining members of a 4692 * large page. The caller is responsible for passing in a locked 4693 * pp. If pp is a large page, then it succeeds in locking all the 4694 * remaining constituent pages or it returns with only the 4695 * original page locked. 4696 * 4697 * Returns 1 on success, 0 on failure. 4698 * 4699 * If success is returned this routine gurantees p_szc for all constituent 4700 * pages of a large page pp belongs to can't change. To achieve this we 4701 * recheck szc of pp after locking all constituent pages and retry if szc 4702 * changed (it could only decrease). Since hat_page_demote() needs an EXCL 4703 * lock on one of constituent pages it can't be running after all constituent 4704 * pages are locked. hat_page_demote() with a lock on a constituent page 4705 * outside of this large page (i.e. pp belonged to a larger large page) is 4706 * already done with all constituent pages of pp since the root's p_szc is 4707 * changed last. Thefore no need to synchronize with hat_page_demote() that 4708 * locked a constituent page outside of pp's current large page. 4709 */ 4710 #ifdef DEBUG 4711 uint32_t gpg_trylock_mtbf = 0; 4712 #endif 4713 4714 int 4715 group_page_trylock(page_t *pp, se_t se) 4716 { 4717 page_t *tpp; 4718 pgcnt_t npgs, i, j; 4719 uint_t pszc = pp->p_szc; 4720 4721 #ifdef DEBUG 4722 if (gpg_trylock_mtbf && !(gethrtime() % gpg_trylock_mtbf)) { 4723 return (0); 4724 } 4725 #endif 4726 4727 if (pp != PP_GROUPLEADER(pp, pszc)) { 4728 return (0); 4729 } 4730 4731 retry: 4732 ASSERT(PAGE_LOCKED_SE(pp, se)); 4733 ASSERT(!PP_ISFREE(pp)); 4734 if (pszc == 0) { 4735 return (1); 4736 } 4737 npgs = page_get_pagecnt(pszc); 4738 tpp = pp + 1; 4739 for (i = 1; i < npgs; i++, tpp++) { 4740 if (!page_trylock(tpp, se)) { 4741 tpp = pp + 1; 4742 for (j = 1; j < i; j++, tpp++) { 4743 page_unlock(tpp); 4744 } 4745 return (0); 4746 } 4747 } 4748 if (pp->p_szc != pszc) { 4749 ASSERT(pp->p_szc < pszc); 4750 ASSERT(pp->p_vnode != NULL && pp->p_vnode != &kvp && 4751 !IS_SWAPFSVP(pp->p_vnode)); 4752 tpp = pp + 1; 4753 for (i = 1; i < npgs; i++, tpp++) { 4754 page_unlock(tpp); 4755 } 4756 pszc = pp->p_szc; 4757 goto retry; 4758 } 4759 return (1); 4760 } 4761 4762 void 4763 group_page_unlock(page_t *pp) 4764 { 4765 page_t *tpp; 4766 pgcnt_t npgs, i; 4767 4768 ASSERT(PAGE_LOCKED(pp)); 4769 ASSERT(!PP_ISFREE(pp)); 4770 ASSERT(pp == PP_PAGEROOT(pp)); 4771 npgs = page_get_pagecnt(pp->p_szc); 4772 for (i = 1, tpp = pp + 1; i < npgs; i++, tpp++) { 4773 page_unlock(tpp); 4774 } 4775 } 4776 4777 /* 4778 * returns 4779 * 0 : on success and *nrelocp is number of relocated PAGESIZE pages 4780 * ERANGE : this is not a base page 4781 * EBUSY : failure to get locks on the page/pages 4782 * ENOMEM : failure to obtain replacement pages 4783 * EAGAIN : OBP has not yet completed its boot-time handoff to the kernel 4784 * EIO : An error occurred while trying to copy the page data 4785 * 4786 * Return with all constituent members of target and replacement 4787 * SE_EXCL locked. It is the callers responsibility to drop the 4788 * locks. 4789 */ 4790 int 4791 do_page_relocate( 4792 page_t **target, 4793 page_t **replacement, 4794 int grouplock, 4795 spgcnt_t *nrelocp, 4796 lgrp_t *lgrp) 4797 { 4798 page_t *first_repl; 4799 page_t *repl; 4800 page_t *targ; 4801 page_t *pl = NULL; 4802 uint_t ppattr; 4803 pfn_t pfn, repl_pfn; 4804 uint_t szc; 4805 spgcnt_t npgs, i; 4806 int repl_contig = 0; 4807 uint_t flags = 0; 4808 spgcnt_t dofree = 0; 4809 4810 *nrelocp = 0; 4811 4812 #if defined(__sparc) 4813 /* 4814 * We need to wait till OBP has completed 4815 * its boot-time handoff of its resources to the kernel 4816 * before we allow page relocation 4817 */ 4818 if (page_relocate_ready == 0) { 4819 return (EAGAIN); 4820 } 4821 #endif 4822 4823 /* 4824 * If this is not a base page, 4825 * just return with 0x0 pages relocated. 4826 */ 4827 targ = *target; 4828 ASSERT(PAGE_EXCL(targ)); 4829 ASSERT(!PP_ISFREE(targ)); 4830 szc = targ->p_szc; 4831 ASSERT(szc < mmu_page_sizes); 4832 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4833 pfn = targ->p_pagenum; 4834 if (pfn != PFN_BASE(pfn, szc)) { 4835 VM_STAT_ADD(vmm_vmstats.ppr_relocnoroot[szc]); 4836 return (ERANGE); 4837 } 4838 4839 if ((repl = *replacement) != NULL && repl->p_szc >= szc) { 4840 repl_pfn = repl->p_pagenum; 4841 if (repl_pfn != PFN_BASE(repl_pfn, szc)) { 4842 VM_STAT_ADD(vmm_vmstats.ppr_reloc_replnoroot[szc]); 4843 return (ERANGE); 4844 } 4845 repl_contig = 1; 4846 } 4847 4848 /* 4849 * We must lock all members of this large page or we cannot 4850 * relocate any part of it. 4851 */ 4852 if (grouplock != 0 && !group_page_trylock(targ, SE_EXCL)) { 4853 VM_STAT_ADD(vmm_vmstats.ppr_relocnolock[targ->p_szc]); 4854 return (EBUSY); 4855 } 4856 4857 /* 4858 * reread szc it could have been decreased before 4859 * group_page_trylock() was done. 4860 */ 4861 szc = targ->p_szc; 4862 ASSERT(szc < mmu_page_sizes); 4863 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4864 ASSERT(pfn == PFN_BASE(pfn, szc)); 4865 4866 npgs = page_get_pagecnt(targ->p_szc); 4867 4868 if (repl == NULL) { 4869 dofree = npgs; /* Size of target page in MMU pages */ 4870 if (!page_create_wait(dofree, 0)) { 4871 if (grouplock != 0) { 4872 group_page_unlock(targ); 4873 } 4874 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4875 return (ENOMEM); 4876 } 4877 4878 /* 4879 * seg kmem pages require that the target and replacement 4880 * page be the same pagesize. 4881 */ 4882 flags = (targ->p_vnode == &kvp) ? PGR_SAMESZC : 0; 4883 repl = page_get_replacement_page(targ, lgrp, flags); 4884 if (repl == NULL) { 4885 if (grouplock != 0) { 4886 group_page_unlock(targ); 4887 } 4888 page_create_putback(dofree); 4889 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4890 return (ENOMEM); 4891 } 4892 } 4893 #ifdef DEBUG 4894 else { 4895 ASSERT(PAGE_LOCKED(repl)); 4896 } 4897 #endif /* DEBUG */ 4898 4899 #if defined(__sparc) 4900 /* 4901 * Let hat_page_relocate() complete the relocation if it's kernel page 4902 */ 4903 if (targ->p_vnode == &kvp) { 4904 *replacement = repl; 4905 if (hat_page_relocate(target, replacement, nrelocp) != 0) { 4906 if (grouplock != 0) { 4907 group_page_unlock(targ); 4908 } 4909 if (dofree) { 4910 *replacement = NULL; 4911 page_free_replacement_page(repl); 4912 page_create_putback(dofree); 4913 } 4914 VM_STAT_ADD(vmm_vmstats.ppr_krelocfail[szc]); 4915 return (EAGAIN); 4916 } 4917 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 4918 return (0); 4919 } 4920 #else 4921 #if defined(lint) 4922 dofree = dofree; 4923 #endif 4924 #endif 4925 4926 first_repl = repl; 4927 4928 for (i = 0; i < npgs; i++) { 4929 ASSERT(PAGE_EXCL(targ)); 4930 ASSERT(targ->p_slckcnt == 0); 4931 ASSERT(repl->p_slckcnt == 0); 4932 4933 (void) hat_pageunload(targ, HAT_FORCE_PGUNLOAD); 4934 4935 ASSERT(hat_page_getshare(targ) == 0); 4936 ASSERT(!PP_ISFREE(targ)); 4937 ASSERT(targ->p_pagenum == (pfn + i)); 4938 ASSERT(repl_contig == 0 || 4939 repl->p_pagenum == (repl_pfn + i)); 4940 4941 /* 4942 * Copy the page contents and attributes then 4943 * relocate the page in the page hash. 4944 */ 4945 if (ppcopy(targ, repl) == 0) { 4946 targ = *target; 4947 repl = first_repl; 4948 VM_STAT_ADD(vmm_vmstats.ppr_copyfail); 4949 if (grouplock != 0) { 4950 group_page_unlock(targ); 4951 } 4952 if (dofree) { 4953 *replacement = NULL; 4954 page_free_replacement_page(repl); 4955 page_create_putback(dofree); 4956 } 4957 return (EIO); 4958 } 4959 4960 targ++; 4961 if (repl_contig != 0) { 4962 repl++; 4963 } else { 4964 repl = repl->p_next; 4965 } 4966 } 4967 4968 repl = first_repl; 4969 targ = *target; 4970 4971 for (i = 0; i < npgs; i++) { 4972 ppattr = hat_page_getattr(targ, (P_MOD | P_REF | P_RO)); 4973 page_clr_all_props(repl); 4974 page_set_props(repl, ppattr); 4975 page_relocate_hash(repl, targ); 4976 4977 ASSERT(hat_page_getshare(targ) == 0); 4978 ASSERT(hat_page_getshare(repl) == 0); 4979 /* 4980 * Now clear the props on targ, after the 4981 * page_relocate_hash(), they no longer 4982 * have any meaning. 4983 */ 4984 page_clr_all_props(targ); 4985 ASSERT(targ->p_next == targ); 4986 ASSERT(targ->p_prev == targ); 4987 page_list_concat(&pl, &targ); 4988 4989 targ++; 4990 if (repl_contig != 0) { 4991 repl++; 4992 } else { 4993 repl = repl->p_next; 4994 } 4995 } 4996 /* assert that we have come full circle with repl */ 4997 ASSERT(repl_contig == 1 || first_repl == repl); 4998 4999 *target = pl; 5000 if (*replacement == NULL) { 5001 ASSERT(first_repl == repl); 5002 *replacement = repl; 5003 } 5004 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 5005 *nrelocp = npgs; 5006 return (0); 5007 } 5008 /* 5009 * On success returns 0 and *nrelocp the number of PAGESIZE pages relocated. 5010 */ 5011 int 5012 page_relocate( 5013 page_t **target, 5014 page_t **replacement, 5015 int grouplock, 5016 int freetarget, 5017 spgcnt_t *nrelocp, 5018 lgrp_t *lgrp) 5019 { 5020 spgcnt_t ret; 5021 5022 /* do_page_relocate returns 0 on success or errno value */ 5023 ret = do_page_relocate(target, replacement, grouplock, nrelocp, lgrp); 5024 5025 if (ret != 0 || freetarget == 0) { 5026 return (ret); 5027 } 5028 if (*nrelocp == 1) { 5029 ASSERT(*target != NULL); 5030 page_free(*target, 1); 5031 } else { 5032 page_t *tpp = *target; 5033 uint_t szc = tpp->p_szc; 5034 pgcnt_t npgs = page_get_pagecnt(szc); 5035 ASSERT(npgs > 1); 5036 ASSERT(szc != 0); 5037 do { 5038 ASSERT(PAGE_EXCL(tpp)); 5039 ASSERT(!hat_page_is_mapped(tpp)); 5040 ASSERT(tpp->p_szc == szc); 5041 PP_SETFREE(tpp); 5042 PP_SETAGED(tpp); 5043 npgs--; 5044 } while ((tpp = tpp->p_next) != *target); 5045 ASSERT(npgs == 0); 5046 page_list_add_pages(*target, 0); 5047 npgs = page_get_pagecnt(szc); 5048 page_create_putback(npgs); 5049 } 5050 return (ret); 5051 } 5052 5053 /* 5054 * it is up to the caller to deal with pcf accounting. 5055 */ 5056 void 5057 page_free_replacement_page(page_t *pplist) 5058 { 5059 page_t *pp; 5060 5061 while (pplist != NULL) { 5062 /* 5063 * pp_targ is a linked list. 5064 */ 5065 pp = pplist; 5066 if (pp->p_szc == 0) { 5067 page_sub(&pplist, pp); 5068 page_clr_all_props(pp); 5069 PP_SETFREE(pp); 5070 PP_SETAGED(pp); 5071 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 5072 page_unlock(pp); 5073 VM_STAT_ADD(pagecnt.pc_free_replacement_page[0]); 5074 } else { 5075 spgcnt_t curnpgs = page_get_pagecnt(pp->p_szc); 5076 page_t *tpp; 5077 page_list_break(&pp, &pplist, curnpgs); 5078 tpp = pp; 5079 do { 5080 ASSERT(PAGE_EXCL(tpp)); 5081 ASSERT(!hat_page_is_mapped(tpp)); 5082 page_clr_all_props(pp); 5083 PP_SETFREE(tpp); 5084 PP_SETAGED(tpp); 5085 } while ((tpp = tpp->p_next) != pp); 5086 page_list_add_pages(pp, 0); 5087 VM_STAT_ADD(pagecnt.pc_free_replacement_page[1]); 5088 } 5089 } 5090 } 5091 5092 /* 5093 * Relocate target to non-relocatable replacement page. 5094 */ 5095 int 5096 page_relocate_cage(page_t **target, page_t **replacement) 5097 { 5098 page_t *tpp, *rpp; 5099 spgcnt_t pgcnt, npgs; 5100 int result; 5101 5102 tpp = *target; 5103 5104 ASSERT(PAGE_EXCL(tpp)); 5105 ASSERT(tpp->p_szc == 0); 5106 5107 pgcnt = btop(page_get_pagesize(tpp->p_szc)); 5108 5109 do { 5110 (void) page_create_wait(pgcnt, PG_WAIT | PG_NORELOC); 5111 rpp = page_get_replacement_page(tpp, NULL, PGR_NORELOC); 5112 if (rpp == NULL) { 5113 page_create_putback(pgcnt); 5114 kcage_cageout_wakeup(); 5115 } 5116 } while (rpp == NULL); 5117 5118 ASSERT(PP_ISNORELOC(rpp)); 5119 5120 result = page_relocate(&tpp, &rpp, 0, 1, &npgs, NULL); 5121 5122 if (result == 0) { 5123 *replacement = rpp; 5124 if (pgcnt != npgs) 5125 panic("page_relocate_cage: partial relocation"); 5126 } 5127 5128 return (result); 5129 } 5130 5131 /* 5132 * Release the page lock on a page, place on cachelist 5133 * tail if no longer mapped. Caller can let us know if 5134 * the page is known to be clean. 5135 */ 5136 int 5137 page_release(page_t *pp, int checkmod) 5138 { 5139 int status; 5140 5141 ASSERT(PAGE_LOCKED(pp) && !PP_ISFREE(pp) && 5142 (pp->p_vnode != NULL)); 5143 5144 if (!hat_page_is_mapped(pp) && !IS_SWAPVP(pp->p_vnode) && 5145 ((PAGE_SHARED(pp) && page_tryupgrade(pp)) || PAGE_EXCL(pp)) && 5146 pp->p_lckcnt == 0 && pp->p_cowcnt == 0 && 5147 !hat_page_is_mapped(pp)) { 5148 5149 /* 5150 * If page is modified, unlock it 5151 * 5152 * (p_nrm & P_MOD) bit has the latest stuff because: 5153 * (1) We found that this page doesn't have any mappings 5154 * _after_ holding SE_EXCL and 5155 * (2) We didn't drop SE_EXCL lock after the check in (1) 5156 */ 5157 if (checkmod && hat_ismod(pp)) { 5158 page_unlock(pp); 5159 status = PGREL_MOD; 5160 } else { 5161 /*LINTED: constant in conditional context*/ 5162 VN_DISPOSE(pp, B_FREE, 0, kcred); 5163 status = PGREL_CLEAN; 5164 } 5165 } else { 5166 page_unlock(pp); 5167 status = PGREL_NOTREL; 5168 } 5169 return (status); 5170 } 5171 5172 /* 5173 * Given a constituent page, try to demote the large page on the freelist. 5174 * 5175 * Returns nonzero if the page could be demoted successfully. Returns with 5176 * the constituent page still locked. 5177 */ 5178 int 5179 page_try_demote_free_pages(page_t *pp) 5180 { 5181 page_t *rootpp = pp; 5182 pfn_t pfn = page_pptonum(pp); 5183 spgcnt_t npgs; 5184 uint_t szc = pp->p_szc; 5185 5186 ASSERT(PP_ISFREE(pp)); 5187 ASSERT(PAGE_EXCL(pp)); 5188 5189 /* 5190 * Adjust rootpp and lock it, if `pp' is not the base 5191 * constituent page. 5192 */ 5193 npgs = page_get_pagecnt(pp->p_szc); 5194 if (npgs == 1) { 5195 return (0); 5196 } 5197 5198 if (!IS_P2ALIGNED(pfn, npgs)) { 5199 pfn = P2ALIGN(pfn, npgs); 5200 rootpp = page_numtopp_nolock(pfn); 5201 } 5202 5203 if (pp != rootpp && !page_trylock(rootpp, SE_EXCL)) { 5204 return (0); 5205 } 5206 5207 if (rootpp->p_szc != szc) { 5208 if (pp != rootpp) 5209 page_unlock(rootpp); 5210 return (0); 5211 } 5212 5213 page_demote_free_pages(rootpp); 5214 5215 if (pp != rootpp) 5216 page_unlock(rootpp); 5217 5218 ASSERT(PP_ISFREE(pp)); 5219 ASSERT(PAGE_EXCL(pp)); 5220 return (1); 5221 } 5222 5223 /* 5224 * Given a constituent page, try to demote the large page. 5225 * 5226 * Returns nonzero if the page could be demoted successfully. Returns with 5227 * the constituent page still locked. 5228 */ 5229 int 5230 page_try_demote_pages(page_t *pp) 5231 { 5232 page_t *tpp, *rootpp = pp; 5233 pfn_t pfn = page_pptonum(pp); 5234 spgcnt_t i, npgs; 5235 uint_t szc = pp->p_szc; 5236 vnode_t *vp = pp->p_vnode; 5237 5238 ASSERT(PAGE_EXCL(pp)); 5239 5240 VM_STAT_ADD(pagecnt.pc_try_demote_pages[0]); 5241 5242 if (pp->p_szc == 0) { 5243 VM_STAT_ADD(pagecnt.pc_try_demote_pages[1]); 5244 return (1); 5245 } 5246 5247 if (vp != NULL && !IS_SWAPFSVP(vp) && vp != &kvp) { 5248 VM_STAT_ADD(pagecnt.pc_try_demote_pages[2]); 5249 page_demote_vp_pages(pp); 5250 ASSERT(pp->p_szc == 0); 5251 return (1); 5252 } 5253 5254 /* 5255 * Adjust rootpp if passed in is not the base 5256 * constituent page. 5257 */ 5258 npgs = page_get_pagecnt(pp->p_szc); 5259 ASSERT(npgs > 1); 5260 if (!IS_P2ALIGNED(pfn, npgs)) { 5261 pfn = P2ALIGN(pfn, npgs); 5262 rootpp = page_numtopp_nolock(pfn); 5263 VM_STAT_ADD(pagecnt.pc_try_demote_pages[3]); 5264 ASSERT(rootpp->p_vnode != NULL); 5265 ASSERT(rootpp->p_szc == szc); 5266 } 5267 5268 /* 5269 * We can't demote kernel pages since we can't hat_unload() 5270 * the mappings. 5271 */ 5272 if (rootpp->p_vnode == &kvp) 5273 return (0); 5274 5275 /* 5276 * Attempt to lock all constituent pages except the page passed 5277 * in since it's already locked. 5278 */ 5279 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5280 ASSERT(!PP_ISFREE(tpp)); 5281 ASSERT(tpp->p_vnode != NULL); 5282 5283 if (tpp != pp && !page_trylock(tpp, SE_EXCL)) 5284 break; 5285 ASSERT(tpp->p_szc == rootpp->p_szc); 5286 ASSERT(page_pptonum(tpp) == page_pptonum(rootpp) + i); 5287 } 5288 5289 /* 5290 * If we failed to lock them all then unlock what we have 5291 * locked so far and bail. 5292 */ 5293 if (i < npgs) { 5294 tpp = rootpp; 5295 while (i-- > 0) { 5296 if (tpp != pp) 5297 page_unlock(tpp); 5298 tpp++; 5299 } 5300 VM_STAT_ADD(pagecnt.pc_try_demote_pages[4]); 5301 return (0); 5302 } 5303 5304 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5305 ASSERT(PAGE_EXCL(tpp)); 5306 ASSERT(tpp->p_slckcnt == 0); 5307 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 5308 tpp->p_szc = 0; 5309 } 5310 5311 /* 5312 * Unlock all pages except the page passed in. 5313 */ 5314 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5315 ASSERT(!hat_page_is_mapped(tpp)); 5316 if (tpp != pp) 5317 page_unlock(tpp); 5318 } 5319 5320 VM_STAT_ADD(pagecnt.pc_try_demote_pages[5]); 5321 return (1); 5322 } 5323 5324 /* 5325 * Called by page_free() and page_destroy() to demote the page size code 5326 * (p_szc) to 0 (since we can't just put a single PAGESIZE page with non zero 5327 * p_szc on free list, neither can we just clear p_szc of a single page_t 5328 * within a large page since it will break other code that relies on p_szc 5329 * being the same for all page_t's of a large page). Anonymous pages should 5330 * never end up here because anon_map_getpages() cannot deal with p_szc 5331 * changes after a single constituent page is locked. While anonymous or 5332 * kernel large pages are demoted or freed the entire large page at a time 5333 * with all constituent pages locked EXCL for the file system pages we 5334 * have to be able to demote a large page (i.e. decrease all constituent pages 5335 * p_szc) with only just an EXCL lock on one of constituent pages. The reason 5336 * we can easily deal with anonymous page demotion the entire large page at a 5337 * time is that those operation originate at address space level and concern 5338 * the entire large page region with actual demotion only done when pages are 5339 * not shared with any other processes (therefore we can always get EXCL lock 5340 * on all anonymous constituent pages after clearing segment page 5341 * cache). However file system pages can be truncated or invalidated at a 5342 * PAGESIZE level from the file system side and end up in page_free() or 5343 * page_destroy() (we also allow only part of the large page to be SOFTLOCKed 5344 * and therfore pageout should be able to demote a large page by EXCL locking 5345 * any constituent page that is not under SOFTLOCK). In those cases we cannot 5346 * rely on being able to lock EXCL all constituent pages. 5347 * 5348 * To prevent szc changes on file system pages one has to lock all constituent 5349 * pages at least SHARED (or call page_szc_lock()). The only subsystem that 5350 * doesn't rely on locking all constituent pages (or using page_szc_lock()) to 5351 * prevent szc changes is hat layer that uses its own page level mlist 5352 * locks. hat assumes that szc doesn't change after mlist lock for a page is 5353 * taken. Therefore we need to change szc under hat level locks if we only 5354 * have an EXCL lock on a single constituent page and hat still references any 5355 * of constituent pages. (Note we can't "ignore" hat layer by simply 5356 * hat_pageunload() all constituent pages without having EXCL locks on all of 5357 * constituent pages). We use hat_page_demote() call to safely demote szc of 5358 * all constituent pages under hat locks when we only have an EXCL lock on one 5359 * of constituent pages. 5360 * 5361 * This routine calls page_szc_lock() before calling hat_page_demote() to 5362 * allow segvn in one special case not to lock all constituent pages SHARED 5363 * before calling hat_memload_array() that relies on p_szc not changeing even 5364 * before hat level mlist lock is taken. In that case segvn uses 5365 * page_szc_lock() to prevent hat_page_demote() changeing p_szc values. 5366 * 5367 * Anonymous or kernel page demotion still has to lock all pages exclusively 5368 * and do hat_pageunload() on all constituent pages before demoting the page 5369 * therefore there's no need for anonymous or kernel page demotion to use 5370 * hat_page_demote() mechanism. 5371 * 5372 * hat_page_demote() removes all large mappings that map pp and then decreases 5373 * p_szc starting from the last constituent page of the large page. By working 5374 * from the tail of a large page in pfn decreasing order allows one looking at 5375 * the root page to know that hat_page_demote() is done for root's szc area. 5376 * e.g. if a root page has szc 1 one knows it only has to lock all constituent 5377 * pages within szc 1 area to prevent szc changes because hat_page_demote() 5378 * that started on this page when it had szc > 1 is done for this szc 1 area. 5379 * 5380 * We are guranteed that all constituent pages of pp's large page belong to 5381 * the same vnode with the consecutive offsets increasing in the direction of 5382 * the pfn i.e. the identity of constituent pages can't change until their 5383 * p_szc is decreased. Therefore it's safe for hat_page_demote() to remove 5384 * large mappings to pp even though we don't lock any constituent page except 5385 * pp (i.e. we won't unload e.g. kernel locked page). 5386 */ 5387 static void 5388 page_demote_vp_pages(page_t *pp) 5389 { 5390 kmutex_t *mtx; 5391 5392 ASSERT(PAGE_EXCL(pp)); 5393 ASSERT(!PP_ISFREE(pp)); 5394 ASSERT(pp->p_vnode != NULL); 5395 ASSERT(!IS_SWAPFSVP(pp->p_vnode)); 5396 ASSERT(pp->p_vnode != &kvp); 5397 5398 VM_STAT_ADD(pagecnt.pc_demote_pages[0]); 5399 5400 mtx = page_szc_lock(pp); 5401 if (mtx != NULL) { 5402 hat_page_demote(pp); 5403 mutex_exit(mtx); 5404 } 5405 ASSERT(pp->p_szc == 0); 5406 } 5407 5408 /* 5409 * Mark any existing pages for migration in the given range 5410 */ 5411 void 5412 page_mark_migrate(struct seg *seg, caddr_t addr, size_t len, 5413 struct anon_map *amp, ulong_t anon_index, vnode_t *vp, 5414 u_offset_t vnoff, int rflag) 5415 { 5416 struct anon *ap; 5417 vnode_t *curvp; 5418 lgrp_t *from; 5419 pgcnt_t i; 5420 pgcnt_t nlocked; 5421 u_offset_t off; 5422 pfn_t pfn; 5423 size_t pgsz; 5424 size_t segpgsz; 5425 pgcnt_t pages; 5426 uint_t pszc; 5427 page_t **ppa; 5428 pgcnt_t ppa_nentries; 5429 page_t *pp; 5430 caddr_t va; 5431 ulong_t an_idx; 5432 anon_sync_obj_t cookie; 5433 5434 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5435 5436 /* 5437 * Don't do anything if don't need to do lgroup optimizations 5438 * on this system 5439 */ 5440 if (!lgrp_optimizations()) 5441 return; 5442 5443 /* 5444 * Align address and length to (potentially large) page boundary 5445 */ 5446 segpgsz = page_get_pagesize(seg->s_szc); 5447 addr = (caddr_t)P2ALIGN((uintptr_t)addr, segpgsz); 5448 if (rflag) 5449 len = P2ROUNDUP(len, segpgsz); 5450 5451 /* 5452 * Allocate page array to accomodate largest page size 5453 */ 5454 pgsz = page_get_pagesize(page_num_pagesizes() - 1); 5455 ppa_nentries = btop(pgsz); 5456 ppa = kmem_zalloc(ppa_nentries * sizeof (page_t *), KM_SLEEP); 5457 5458 /* 5459 * Do one (large) page at a time 5460 */ 5461 va = addr; 5462 while (va < addr + len) { 5463 /* 5464 * Lookup (root) page for vnode and offset corresponding to 5465 * this virtual address 5466 * Try anonmap first since there may be copy-on-write 5467 * pages, but initialize vnode pointer and offset using 5468 * vnode arguments just in case there isn't an amp. 5469 */ 5470 curvp = vp; 5471 off = vnoff + va - seg->s_base; 5472 if (amp) { 5473 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5474 an_idx = anon_index + seg_page(seg, va); 5475 anon_array_enter(amp, an_idx, &cookie); 5476 ap = anon_get_ptr(amp->ahp, an_idx); 5477 if (ap) 5478 swap_xlate(ap, &curvp, &off); 5479 anon_array_exit(&cookie); 5480 ANON_LOCK_EXIT(&->a_rwlock); 5481 } 5482 5483 pp = NULL; 5484 if (curvp) 5485 pp = page_lookup(curvp, off, SE_SHARED); 5486 5487 /* 5488 * If there isn't a page at this virtual address, 5489 * skip to next page 5490 */ 5491 if (pp == NULL) { 5492 va += PAGESIZE; 5493 continue; 5494 } 5495 5496 /* 5497 * Figure out which lgroup this page is in for kstats 5498 */ 5499 pfn = page_pptonum(pp); 5500 from = lgrp_pfn_to_lgrp(pfn); 5501 5502 /* 5503 * Get page size, and round up and skip to next page boundary 5504 * if unaligned address 5505 */ 5506 pszc = pp->p_szc; 5507 pgsz = page_get_pagesize(pszc); 5508 pages = btop(pgsz); 5509 if (!IS_P2ALIGNED(va, pgsz) || 5510 !IS_P2ALIGNED(pfn, pages) || 5511 pgsz > segpgsz) { 5512 pgsz = MIN(pgsz, segpgsz); 5513 page_unlock(pp); 5514 i = btop(P2END((uintptr_t)va, pgsz) - 5515 (uintptr_t)va); 5516 va = (caddr_t)P2END((uintptr_t)va, pgsz); 5517 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, i); 5518 continue; 5519 } 5520 5521 /* 5522 * Upgrade to exclusive lock on page 5523 */ 5524 if (!page_tryupgrade(pp)) { 5525 page_unlock(pp); 5526 va += pgsz; 5527 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5528 btop(pgsz)); 5529 continue; 5530 } 5531 5532 /* 5533 * Remember pages locked exclusively and how many 5534 */ 5535 ppa[0] = pp; 5536 nlocked = 1; 5537 5538 /* 5539 * Lock constituent pages if this is large page 5540 */ 5541 if (pages > 1) { 5542 /* 5543 * Lock all constituents except root page, since it 5544 * should be locked already. 5545 */ 5546 for (i = 1; i < pages; i++) { 5547 pp++; 5548 if (!page_trylock(pp, SE_EXCL)) { 5549 break; 5550 } 5551 if (PP_ISFREE(pp) || 5552 pp->p_szc != pszc) { 5553 /* 5554 * hat_page_demote() raced in with us. 5555 */ 5556 ASSERT(!IS_SWAPFSVP(curvp)); 5557 page_unlock(pp); 5558 break; 5559 } 5560 ppa[nlocked] = pp; 5561 nlocked++; 5562 } 5563 } 5564 5565 /* 5566 * If all constituent pages couldn't be locked, 5567 * unlock pages locked so far and skip to next page. 5568 */ 5569 if (nlocked != pages) { 5570 for (i = 0; i < nlocked; i++) 5571 page_unlock(ppa[i]); 5572 va += pgsz; 5573 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5574 btop(pgsz)); 5575 continue; 5576 } 5577 5578 /* 5579 * hat_page_demote() can no longer happen 5580 * since last cons page had the right p_szc after 5581 * all cons pages were locked. all cons pages 5582 * should now have the same p_szc. 5583 */ 5584 5585 /* 5586 * All constituent pages locked successfully, so mark 5587 * large page for migration and unload the mappings of 5588 * constituent pages, so a fault will occur on any part of the 5589 * large page 5590 */ 5591 PP_SETMIGRATE(ppa[0]); 5592 for (i = 0; i < nlocked; i++) { 5593 pp = ppa[i]; 5594 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 5595 ASSERT(hat_page_getshare(pp) == 0); 5596 page_unlock(pp); 5597 } 5598 lgrp_stat_add(from->lgrp_id, LGRP_PMM_PGS, nlocked); 5599 5600 va += pgsz; 5601 } 5602 kmem_free(ppa, ppa_nentries * sizeof (page_t *)); 5603 } 5604 5605 /* 5606 * Migrate any pages that have been marked for migration in the given range 5607 */ 5608 void 5609 page_migrate( 5610 struct seg *seg, 5611 caddr_t addr, 5612 page_t **ppa, 5613 pgcnt_t npages) 5614 { 5615 lgrp_t *from; 5616 lgrp_t *to; 5617 page_t *newpp; 5618 page_t *pp; 5619 pfn_t pfn; 5620 size_t pgsz; 5621 spgcnt_t page_cnt; 5622 spgcnt_t i; 5623 uint_t pszc; 5624 5625 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5626 5627 while (npages > 0) { 5628 pp = *ppa; 5629 pszc = pp->p_szc; 5630 pgsz = page_get_pagesize(pszc); 5631 page_cnt = btop(pgsz); 5632 5633 /* 5634 * Check to see whether this page is marked for migration 5635 * 5636 * Assume that root page of large page is marked for 5637 * migration and none of the other constituent pages 5638 * are marked. This really simplifies clearing the 5639 * migrate bit by not having to clear it from each 5640 * constituent page. 5641 * 5642 * note we don't want to relocate an entire large page if 5643 * someone is only using one subpage. 5644 */ 5645 if (npages < page_cnt) 5646 break; 5647 5648 /* 5649 * Is it marked for migration? 5650 */ 5651 if (!PP_ISMIGRATE(pp)) 5652 goto next; 5653 5654 /* 5655 * Determine lgroups that page is being migrated between 5656 */ 5657 pfn = page_pptonum(pp); 5658 if (!IS_P2ALIGNED(pfn, page_cnt)) { 5659 break; 5660 } 5661 from = lgrp_pfn_to_lgrp(pfn); 5662 to = lgrp_mem_choose(seg, addr, pgsz); 5663 5664 /* 5665 * Check to see whether we are trying to migrate page to lgroup 5666 * where it is allocated already 5667 */ 5668 if (to == from) { 5669 PP_CLRMIGRATE(pp); 5670 goto next; 5671 } 5672 5673 /* 5674 * Need to get exclusive lock's to migrate 5675 */ 5676 for (i = 0; i < page_cnt; i++) { 5677 ASSERT(PAGE_LOCKED(ppa[i])); 5678 if (page_pptonum(ppa[i]) != pfn + i || 5679 ppa[i]->p_szc != pszc) { 5680 break; 5681 } 5682 if (!page_tryupgrade(ppa[i])) { 5683 lgrp_stat_add(from->lgrp_id, 5684 LGRP_PM_FAIL_LOCK_PGS, 5685 page_cnt); 5686 break; 5687 } 5688 } 5689 if (i != page_cnt) { 5690 while (--i != -1) { 5691 page_downgrade(ppa[i]); 5692 } 5693 goto next; 5694 } 5695 5696 (void) page_create_wait(page_cnt, PG_WAIT); 5697 newpp = page_get_replacement_page(pp, to, PGR_SAMESZC); 5698 if (newpp == NULL) { 5699 page_create_putback(page_cnt); 5700 for (i = 0; i < page_cnt; i++) { 5701 page_downgrade(ppa[i]); 5702 } 5703 lgrp_stat_add(to->lgrp_id, LGRP_PM_FAIL_ALLOC_PGS, 5704 page_cnt); 5705 goto next; 5706 } 5707 ASSERT(newpp->p_szc == pszc); 5708 /* 5709 * Clear migrate bit and relocate page 5710 */ 5711 PP_CLRMIGRATE(pp); 5712 if (page_relocate(&pp, &newpp, 0, 1, &page_cnt, to)) { 5713 panic("page_migrate: page_relocate failed"); 5714 } 5715 ASSERT(page_cnt * PAGESIZE == pgsz); 5716 5717 /* 5718 * Keep stats for number of pages migrated from and to 5719 * each lgroup 5720 */ 5721 lgrp_stat_add(from->lgrp_id, LGRP_PM_SRC_PGS, page_cnt); 5722 lgrp_stat_add(to->lgrp_id, LGRP_PM_DEST_PGS, page_cnt); 5723 /* 5724 * update the page_t array we were passed in and 5725 * unlink constituent pages of a large page. 5726 */ 5727 for (i = 0; i < page_cnt; ++i, ++pp) { 5728 ASSERT(PAGE_EXCL(newpp)); 5729 ASSERT(newpp->p_szc == pszc); 5730 ppa[i] = newpp; 5731 pp = newpp; 5732 page_sub(&newpp, pp); 5733 page_downgrade(pp); 5734 } 5735 ASSERT(newpp == NULL); 5736 next: 5737 addr += pgsz; 5738 ppa += page_cnt; 5739 npages -= page_cnt; 5740 } 5741 } 5742 5743 ulong_t mem_waiters = 0; 5744 ulong_t max_count = 20; 5745 #define MAX_DELAY 0x1ff 5746 5747 /* 5748 * Check if enough memory is available to proceed. 5749 * Depending on system configuration and how much memory is 5750 * reserved for swap we need to check against two variables. 5751 * e.g. on systems with little physical swap availrmem can be 5752 * more reliable indicator of how much memory is available. 5753 * On systems with large phys swap freemem can be better indicator. 5754 * If freemem drops below threshold level don't return an error 5755 * immediately but wake up pageout to free memory and block. 5756 * This is done number of times. If pageout is not able to free 5757 * memory within certain time return an error. 5758 * The same applies for availrmem but kmem_reap is used to 5759 * free memory. 5760 */ 5761 int 5762 page_mem_avail(pgcnt_t npages) 5763 { 5764 ulong_t count; 5765 5766 #if defined(__i386) 5767 if (freemem > desfree + npages && 5768 availrmem > swapfs_reserve + npages && 5769 btop(vmem_size(heap_arena, VMEM_FREE)) > tune.t_minarmem + 5770 npages) 5771 return (1); 5772 #else 5773 if (freemem > desfree + npages && 5774 availrmem > swapfs_reserve + npages) 5775 return (1); 5776 #endif 5777 5778 count = max_count; 5779 atomic_add_long(&mem_waiters, 1); 5780 5781 while (freemem < desfree + npages && --count) { 5782 cv_signal(&proc_pageout->p_cv); 5783 if (delay_sig(hz + (mem_waiters & MAX_DELAY))) { 5784 atomic_add_long(&mem_waiters, -1); 5785 return (0); 5786 } 5787 } 5788 if (count == 0) { 5789 atomic_add_long(&mem_waiters, -1); 5790 return (0); 5791 } 5792 5793 count = max_count; 5794 while (availrmem < swapfs_reserve + npages && --count) { 5795 kmem_reap(); 5796 if (delay_sig(hz + (mem_waiters & MAX_DELAY))) { 5797 atomic_add_long(&mem_waiters, -1); 5798 return (0); 5799 } 5800 } 5801 atomic_add_long(&mem_waiters, -1); 5802 if (count == 0) 5803 return (0); 5804 5805 #if defined(__i386) 5806 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 5807 tune.t_minarmem + npages) 5808 return (0); 5809 #endif 5810 return (1); 5811 } 5812 5813 #define MAX_CNT 60 /* max num of iterations */ 5814 /* 5815 * Reclaim/reserve availrmem for npages. 5816 * If there is not enough memory start reaping seg, kmem caches. 5817 * Start pageout scanner (via page_needfree()). 5818 * Exit after ~ MAX_CNT s regardless of how much memory has been released. 5819 * Note: There is no guarantee that any availrmem will be freed as 5820 * this memory typically is locked (kernel heap) or reserved for swap. 5821 * Also due to memory fragmentation kmem allocator may not be able 5822 * to free any memory (single user allocated buffer will prevent 5823 * freeing slab or a page). 5824 */ 5825 int 5826 page_reclaim_mem(pgcnt_t npages, pgcnt_t epages, int adjust) 5827 { 5828 int i = 0; 5829 int ret = 0; 5830 pgcnt_t deficit; 5831 pgcnt_t old_availrmem; 5832 5833 mutex_enter(&freemem_lock); 5834 old_availrmem = availrmem - 1; 5835 while ((availrmem < tune.t_minarmem + npages + epages) && 5836 (old_availrmem < availrmem) && (i++ < MAX_CNT)) { 5837 old_availrmem = availrmem; 5838 deficit = tune.t_minarmem + npages + epages - availrmem; 5839 mutex_exit(&freemem_lock); 5840 page_needfree(deficit); 5841 seg_preap(); 5842 kmem_reap(); 5843 delay(hz); 5844 page_needfree(-(spgcnt_t)deficit); 5845 mutex_enter(&freemem_lock); 5846 } 5847 5848 if (adjust && (availrmem >= tune.t_minarmem + npages + epages)) { 5849 availrmem -= npages; 5850 ret = 1; 5851 } 5852 5853 mutex_exit(&freemem_lock); 5854 5855 return (ret); 5856 } 5857 5858 /* 5859 * Search the memory segments to locate the desired page. Within a 5860 * segment, pages increase linearly with one page structure per 5861 * physical page frame (size PAGESIZE). The search begins 5862 * with the segment that was accessed last, to take advantage of locality. 5863 * If the hint misses, we start from the beginning of the sorted memseg list 5864 */ 5865 5866 5867 /* 5868 * Some data structures for pfn to pp lookup. 5869 */ 5870 ulong_t mhash_per_slot; 5871 struct memseg *memseg_hash[N_MEM_SLOTS]; 5872 5873 page_t * 5874 page_numtopp_nolock(pfn_t pfnum) 5875 { 5876 struct memseg *seg; 5877 page_t *pp; 5878 vm_cpu_data_t *vc = CPU->cpu_vm_data; 5879 5880 ASSERT(vc != NULL); 5881 5882 MEMSEG_STAT_INCR(nsearch); 5883 5884 /* Try last winner first */ 5885 if (((seg = vc->vc_pnum_memseg) != NULL) && 5886 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5887 MEMSEG_STAT_INCR(nlastwon); 5888 pp = seg->pages + (pfnum - seg->pages_base); 5889 if (pp->p_pagenum == pfnum) 5890 return ((page_t *)pp); 5891 } 5892 5893 /* Else Try hash */ 5894 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5895 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5896 MEMSEG_STAT_INCR(nhashwon); 5897 vc->vc_pnum_memseg = seg; 5898 pp = seg->pages + (pfnum - seg->pages_base); 5899 if (pp->p_pagenum == pfnum) 5900 return ((page_t *)pp); 5901 } 5902 5903 /* Else Brute force */ 5904 for (seg = memsegs; seg != NULL; seg = seg->next) { 5905 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5906 vc->vc_pnum_memseg = seg; 5907 pp = seg->pages + (pfnum - seg->pages_base); 5908 return ((page_t *)pp); 5909 } 5910 } 5911 vc->vc_pnum_memseg = NULL; 5912 MEMSEG_STAT_INCR(nnotfound); 5913 return ((page_t *)NULL); 5914 5915 } 5916 5917 struct memseg * 5918 page_numtomemseg_nolock(pfn_t pfnum) 5919 { 5920 struct memseg *seg; 5921 page_t *pp; 5922 5923 /* Try hash */ 5924 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5925 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5926 pp = seg->pages + (pfnum - seg->pages_base); 5927 if (pp->p_pagenum == pfnum) 5928 return (seg); 5929 } 5930 5931 /* Else Brute force */ 5932 for (seg = memsegs; seg != NULL; seg = seg->next) { 5933 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5934 return (seg); 5935 } 5936 } 5937 return ((struct memseg *)NULL); 5938 } 5939 5940 /* 5941 * Given a page and a count return the page struct that is 5942 * n structs away from the current one in the global page 5943 * list. 5944 * 5945 * This function wraps to the first page upon 5946 * reaching the end of the memseg list. 5947 */ 5948 page_t * 5949 page_nextn(page_t *pp, ulong_t n) 5950 { 5951 struct memseg *seg; 5952 page_t *ppn; 5953 vm_cpu_data_t *vc = (vm_cpu_data_t *)CPU->cpu_vm_data; 5954 5955 ASSERT(vc != NULL); 5956 5957 if (((seg = vc->vc_pnext_memseg) == NULL) || 5958 (seg->pages_base == seg->pages_end) || 5959 !(pp >= seg->pages && pp < seg->epages)) { 5960 5961 for (seg = memsegs; seg; seg = seg->next) { 5962 if (pp >= seg->pages && pp < seg->epages) 5963 break; 5964 } 5965 5966 if (seg == NULL) { 5967 /* Memory delete got in, return something valid. */ 5968 /* TODO: fix me. */ 5969 seg = memsegs; 5970 pp = seg->pages; 5971 } 5972 } 5973 5974 /* check for wraparound - possible if n is large */ 5975 while ((ppn = (pp + n)) >= seg->epages || ppn < pp) { 5976 n -= seg->epages - pp; 5977 seg = seg->next; 5978 if (seg == NULL) 5979 seg = memsegs; 5980 pp = seg->pages; 5981 } 5982 vc->vc_pnext_memseg = seg; 5983 return (ppn); 5984 } 5985 5986 /* 5987 * Initialize for a loop using page_next_scan_large(). 5988 */ 5989 page_t * 5990 page_next_scan_init(void **cookie) 5991 { 5992 ASSERT(cookie != NULL); 5993 *cookie = (void *)memsegs; 5994 return ((page_t *)memsegs->pages); 5995 } 5996 5997 /* 5998 * Return the next page in a scan of page_t's, assuming we want 5999 * to skip over sub-pages within larger page sizes. 6000 * 6001 * The cookie is used to keep track of the current memseg. 6002 */ 6003 page_t * 6004 page_next_scan_large( 6005 page_t *pp, 6006 ulong_t *n, 6007 void **cookie) 6008 { 6009 struct memseg *seg = (struct memseg *)*cookie; 6010 page_t *new_pp; 6011 ulong_t cnt; 6012 pfn_t pfn; 6013 6014 6015 /* 6016 * get the count of page_t's to skip based on the page size 6017 */ 6018 ASSERT(pp != NULL); 6019 if (pp->p_szc == 0) { 6020 cnt = 1; 6021 } else { 6022 pfn = page_pptonum(pp); 6023 cnt = page_get_pagecnt(pp->p_szc); 6024 cnt -= pfn & (cnt - 1); 6025 } 6026 *n += cnt; 6027 new_pp = pp + cnt; 6028 6029 /* 6030 * Catch if we went past the end of the current memory segment. If so, 6031 * just move to the next segment with pages. 6032 */ 6033 if (new_pp >= seg->epages) { 6034 do { 6035 seg = seg->next; 6036 if (seg == NULL) 6037 seg = memsegs; 6038 } while (seg->pages == seg->epages); 6039 new_pp = seg->pages; 6040 *cookie = (void *)seg; 6041 } 6042 6043 return (new_pp); 6044 } 6045 6046 6047 /* 6048 * Returns next page in list. Note: this function wraps 6049 * to the first page in the list upon reaching the end 6050 * of the list. Callers should be aware of this fact. 6051 */ 6052 6053 /* We should change this be a #define */ 6054 6055 page_t * 6056 page_next(page_t *pp) 6057 { 6058 return (page_nextn(pp, 1)); 6059 } 6060 6061 page_t * 6062 page_first() 6063 { 6064 return ((page_t *)memsegs->pages); 6065 } 6066 6067 6068 /* 6069 * This routine is called at boot with the initial memory configuration 6070 * and when memory is added or removed. 6071 */ 6072 void 6073 build_pfn_hash() 6074 { 6075 pfn_t cur; 6076 pgcnt_t index; 6077 struct memseg *pseg; 6078 int i; 6079 6080 /* 6081 * Clear memseg_hash array. 6082 * Since memory add/delete is designed to operate concurrently 6083 * with normal operation, the hash rebuild must be able to run 6084 * concurrently with page_numtopp_nolock(). To support this 6085 * functionality, assignments to memseg_hash array members must 6086 * be done atomically. 6087 * 6088 * NOTE: bzero() does not currently guarantee this for kernel 6089 * threads, and cannot be used here. 6090 */ 6091 for (i = 0; i < N_MEM_SLOTS; i++) 6092 memseg_hash[i] = NULL; 6093 6094 hat_kpm_mseghash_clear(N_MEM_SLOTS); 6095 6096 /* 6097 * Physmax is the last valid pfn. 6098 */ 6099 mhash_per_slot = (physmax + 1) >> MEM_HASH_SHIFT; 6100 for (pseg = memsegs; pseg != NULL; pseg = pseg->next) { 6101 index = MEMSEG_PFN_HASH(pseg->pages_base); 6102 cur = pseg->pages_base; 6103 do { 6104 if (index >= N_MEM_SLOTS) 6105 index = MEMSEG_PFN_HASH(cur); 6106 6107 if (memseg_hash[index] == NULL || 6108 memseg_hash[index]->pages_base > pseg->pages_base) { 6109 memseg_hash[index] = pseg; 6110 hat_kpm_mseghash_update(index, pseg); 6111 } 6112 cur += mhash_per_slot; 6113 index++; 6114 } while (cur < pseg->pages_end); 6115 } 6116 } 6117 6118 /* 6119 * Return the pagenum for the pp 6120 */ 6121 pfn_t 6122 page_pptonum(page_t *pp) 6123 { 6124 return (pp->p_pagenum); 6125 } 6126 6127 /* 6128 * interface to the referenced and modified etc bits 6129 * in the PSM part of the page struct 6130 * when no locking is desired. 6131 */ 6132 void 6133 page_set_props(page_t *pp, uint_t flags) 6134 { 6135 ASSERT((flags & ~(P_MOD | P_REF | P_RO)) == 0); 6136 pp->p_nrm |= (uchar_t)flags; 6137 } 6138 6139 void 6140 page_clr_all_props(page_t *pp) 6141 { 6142 pp->p_nrm = 0; 6143 } 6144 6145 /* 6146 * Clear p_lckcnt and p_cowcnt, adjusting freemem if required. 6147 */ 6148 int 6149 page_clear_lck_cow(page_t *pp, int adjust) 6150 { 6151 int f_amount; 6152 6153 ASSERT(PAGE_EXCL(pp)); 6154 6155 /* 6156 * The page_struct_lock need not be acquired here since 6157 * we require the caller hold the page exclusively locked. 6158 */ 6159 f_amount = 0; 6160 if (pp->p_lckcnt) { 6161 f_amount = 1; 6162 pp->p_lckcnt = 0; 6163 } 6164 if (pp->p_cowcnt) { 6165 f_amount += pp->p_cowcnt; 6166 pp->p_cowcnt = 0; 6167 } 6168 6169 if (adjust && f_amount) { 6170 mutex_enter(&freemem_lock); 6171 availrmem += f_amount; 6172 mutex_exit(&freemem_lock); 6173 } 6174 6175 return (f_amount); 6176 } 6177 6178 /* 6179 * The following functions is called from free_vp_pages() 6180 * for an inexact estimate of a newly free'd page... 6181 */ 6182 ulong_t 6183 page_share_cnt(page_t *pp) 6184 { 6185 return (hat_page_getshare(pp)); 6186 } 6187 6188 int 6189 page_isshared(page_t *pp) 6190 { 6191 return (hat_page_getshare(pp) > 1); 6192 } 6193 6194 int 6195 page_isfree(page_t *pp) 6196 { 6197 return (PP_ISFREE(pp)); 6198 } 6199 6200 int 6201 page_isref(page_t *pp) 6202 { 6203 return (hat_page_getattr(pp, P_REF)); 6204 } 6205 6206 int 6207 page_ismod(page_t *pp) 6208 { 6209 return (hat_page_getattr(pp, P_MOD)); 6210 } 6211 6212 /* 6213 * Reclaim the given constituent page from the freelist, regardless of it's 6214 * size. The page will be demoted as required. 6215 * Returns 1 on success or 0 on failure. 6216 * 6217 * The page is unlocked if it can't be reclaimed (when freemem == 0). 6218 * If `lock' is non-null, it will be dropped and re-acquired if 6219 * the routine must wait while freemem is 0. 6220 */ 6221 int 6222 page_reclaim_page(page_t *pp, kmutex_t *lock) 6223 { 6224 struct pcf *p; 6225 uint_t pcf_index; 6226 struct cpu *cpup; 6227 uint_t i; 6228 pgcnt_t collected = 0; 6229 6230 ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1); 6231 ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp)); 6232 6233 /* 6234 * If `freemem' is 0, we cannot reclaim this page from the 6235 * freelist, so release every lock we might hold: the page, 6236 * and the `lock' before blocking. 6237 * 6238 * The only way `freemem' can become 0 while there are pages 6239 * marked free (have their p->p_free bit set) is when the 6240 * system is low on memory and doing a page_create(). In 6241 * order to guarantee that once page_create() starts acquiring 6242 * pages it will be able to get all that it needs since `freemem' 6243 * was decreased by the requested amount. So, we need to release 6244 * this page, and let page_create() have it. 6245 * 6246 * Since `freemem' being zero is not supposed to happen, just 6247 * use the usual hash stuff as a starting point. If that bucket 6248 * is empty, then assume the worst, and start at the beginning 6249 * of the pcf array. If we always start at the beginning 6250 * when acquiring more than one pcf lock, there won't be any 6251 * deadlock problems. 6252 */ 6253 6254 /* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */ 6255 6256 if (freemem <= throttlefree && !page_create_throttle(1, 0)) { 6257 pcf_acquire_all(); 6258 goto page_reclaim_nomem; 6259 } 6260 6261 pcf_index = PCF_INDEX(); 6262 p = &pcf[pcf_index]; 6263 mutex_enter(&p->pcf_lock); 6264 if (p->pcf_count > 0) { 6265 collected = 1; 6266 p->pcf_count -= 1; 6267 } 6268 mutex_exit(&p->pcf_lock); 6269 6270 if (!collected) { 6271 VM_STAT_ADD(page_reclaim_zero); 6272 /* 6273 * Check again. Its possible that some other thread 6274 * could have been right behind us, and added one 6275 * to a list somewhere. Acquire each of the pcf locks 6276 * until we find a page. 6277 */ 6278 p = pcf; 6279 for (i = 0; i < PCF_FANOUT; i++) { 6280 mutex_enter(&p->pcf_lock); 6281 if (p->pcf_count) { 6282 if (p->pcf_count > 0) { 6283 p->pcf_count -= 1; 6284 collected = 1; 6285 break; 6286 } 6287 } 6288 p++; 6289 } 6290 6291 if (!collected) { 6292 page_reclaim_nomem: 6293 /* 6294 * We really can't have page `pp'. 6295 * Time for the no-memory dance with 6296 * page_free(). This is just like 6297 * page_create_wait(). Plus the added 6298 * attraction of releasing whatever mutex 6299 * we held when we were called with in `lock'. 6300 * Page_unlock() will wakeup any thread 6301 * waiting around for this page. 6302 */ 6303 if (lock) { 6304 VM_STAT_ADD(page_reclaim_zero_locked); 6305 mutex_exit(lock); 6306 } 6307 page_unlock(pp); 6308 6309 /* 6310 * get this before we drop all the pcf locks. 6311 */ 6312 mutex_enter(&new_freemem_lock); 6313 6314 p = pcf; 6315 for (i = 0; i < PCF_FANOUT; i++) { 6316 p->pcf_wait++; 6317 mutex_exit(&p->pcf_lock); 6318 p++; 6319 } 6320 6321 freemem_wait++; 6322 cv_wait(&freemem_cv, &new_freemem_lock); 6323 freemem_wait--; 6324 6325 mutex_exit(&new_freemem_lock); 6326 6327 if (lock) { 6328 mutex_enter(lock); 6329 } 6330 return (0); 6331 } 6332 6333 /* 6334 * We beat the PCF bins over the head until 6335 * we got the memory that we wanted. 6336 * The pcf accounting has been done, 6337 * though none of the pcf_wait flags have been set, 6338 * drop the locks and continue on. 6339 */ 6340 ASSERT(collected == 1); 6341 while (p >= pcf) { 6342 mutex_exit(&p->pcf_lock); 6343 p--; 6344 } 6345 } 6346 6347 /* 6348 * freemem is not protected by any lock. Thus, we cannot 6349 * have any assertion containing freemem here. 6350 */ 6351 freemem -= 1; 6352 6353 VM_STAT_ADD(pagecnt.pc_reclaim); 6354 if (PP_ISAGED(pp)) { 6355 page_list_sub(pp, PG_FREE_LIST); 6356 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_FREE, 6357 "page_reclaim_page_free:pp %p", pp); 6358 } else { 6359 page_list_sub(pp, PG_CACHE_LIST); 6360 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_CACHE, 6361 "page_reclaim_page_cache:pp %p", pp); 6362 } 6363 6364 /* 6365 * The page we took off the freelist must be szc 0 as 6366 * we used page_list_sub which will demote the page if needed. 6367 */ 6368 ASSERT(pp->p_szc == 0); 6369 6370 /* 6371 * clear the p_free & p_age bits since this page is no longer 6372 * on the free list. Notice that there was a brief time where 6373 * a page is marked as free, but is not on the list. 6374 * 6375 * Set the reference bit to protect against immediate pageout. 6376 */ 6377 PP_CLRFREE(pp); 6378 PP_CLRAGED(pp); 6379 page_set_props(pp, P_REF); 6380 6381 CPU_STATS_ENTER_K(); 6382 cpup = CPU; /* get cpup now that CPU cannot change */ 6383 CPU_STATS_ADDQ(cpup, vm, pgrec, 1); 6384 CPU_STATS_ADDQ(cpup, vm, pgfrec, 1); 6385 CPU_STATS_EXIT_K(); 6386 6387 return (1); 6388 } 6389 6390 /* 6391 * The following code all currently relates to the page capture logic: 6392 * 6393 * This logic is used for cases where there is a desire to claim a certain 6394 * physical page in the system for the caller. As it may not be possible 6395 * to capture the page immediately, the p_toxic bits are used in the page 6396 * structure to indicate that someone wants to capture this page. When the 6397 * page gets unlocked, the toxic flag will be noted and an attempt to capture 6398 * the page will be made. If it is successful, the original callers callback 6399 * will be called with the page to do with it what they please. 6400 * 6401 * There is also an async thread which wakes up to attempt to capture 6402 * pages occasionally which have the capture bit set. All of the pages which 6403 * need to be captured asynchronously have been inserted into the 6404 * page_capture_hash and thus this thread walks that hash list. Items in the 6405 * hash have an expiration time so this thread handles that as well by removing 6406 * the item from the hash if it has expired. 6407 * 6408 * Some important things to note are: 6409 * - if the PR_CAPTURE bit is set on a page, then the page is in the 6410 * page_capture_hash. The page_capture_hash_head.pchh_mutex is needed 6411 * to set and clear this bit, and while the lock is held is the only time 6412 * you can add or remove an entry from the hash. 6413 * - the PR_CAPTURE bit can only be set and cleared while holding the 6414 * page_capture_hash_head.pchh_mutex 6415 * - the t_flag field of the thread struct is used with the T_CAPTURING 6416 * flag to prevent recursion while dealing with large pages. 6417 * - pages which need to be retired never expire on the page_capture_hash. 6418 */ 6419 6420 static void page_capture_thread(void); 6421 static kthread_t *pc_thread_id; 6422 kcondvar_t pc_cv; 6423 static kmutex_t pc_thread_mutex; 6424 static clock_t pc_thread_shortwait; 6425 static clock_t pc_thread_longwait; 6426 6427 struct page_capture_callback pc_cb[PC_NUM_CALLBACKS]; 6428 6429 /* Note that this is a circular linked list */ 6430 typedef struct page_capture_hash_bucket { 6431 page_t *pp; 6432 uint_t szc; 6433 uint_t flags; 6434 clock_t expires; /* lbolt at which this request expires. */ 6435 void *datap; /* Cached data passed in for callback */ 6436 struct page_capture_hash_bucket *next; 6437 struct page_capture_hash_bucket *prev; 6438 } page_capture_hash_bucket_t; 6439 6440 /* 6441 * Each hash bucket will have it's own mutex and two lists which are: 6442 * active (0): represents requests which have not been processed by 6443 * the page_capture async thread yet. 6444 * walked (1): represents requests which have been processed by the 6445 * page_capture async thread within it's given walk of this bucket. 6446 * 6447 * These are all needed so that we can synchronize all async page_capture 6448 * events. When the async thread moves to a new bucket, it will append the 6449 * walked list to the active list and walk each item one at a time, moving it 6450 * from the active list to the walked list. Thus if there is an async request 6451 * outstanding for a given page, it will always be in one of the two lists. 6452 * New requests will always be added to the active list. 6453 * If we were not able to capture a page before the request expired, we'd free 6454 * up the request structure which would indicate to page_capture that there is 6455 * no longer a need for the given page, and clear the PR_CAPTURE flag if 6456 * possible. 6457 */ 6458 typedef struct page_capture_hash_head { 6459 kmutex_t pchh_mutex; 6460 uint_t num_pages; 6461 page_capture_hash_bucket_t lists[2]; /* sentinel nodes */ 6462 } page_capture_hash_head_t; 6463 6464 #ifdef DEBUG 6465 #define NUM_PAGE_CAPTURE_BUCKETS 4 6466 #else 6467 #define NUM_PAGE_CAPTURE_BUCKETS 64 6468 #endif 6469 6470 page_capture_hash_head_t page_capture_hash[NUM_PAGE_CAPTURE_BUCKETS]; 6471 6472 /* for now use a very simple hash based upon the size of a page struct */ 6473 #define PAGE_CAPTURE_HASH(pp) \ 6474 ((int)(((uintptr_t)pp >> 7) & (NUM_PAGE_CAPTURE_BUCKETS - 1))) 6475 6476 extern pgcnt_t swapfs_minfree; 6477 6478 int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap); 6479 6480 /* 6481 * a callback function is required for page capture requests. 6482 */ 6483 void 6484 page_capture_register_callback(uint_t index, clock_t duration, 6485 int (*cb_func)(page_t *, void *, uint_t)) 6486 { 6487 ASSERT(pc_cb[index].cb_active == 0); 6488 ASSERT(cb_func != NULL); 6489 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6490 pc_cb[index].duration = duration; 6491 pc_cb[index].cb_func = cb_func; 6492 pc_cb[index].cb_active = 1; 6493 rw_exit(&pc_cb[index].cb_rwlock); 6494 } 6495 6496 void 6497 page_capture_unregister_callback(uint_t index) 6498 { 6499 int i, j; 6500 struct page_capture_hash_bucket *bp1; 6501 struct page_capture_hash_bucket *bp2; 6502 struct page_capture_hash_bucket *head = NULL; 6503 uint_t flags = (1 << index); 6504 6505 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6506 ASSERT(pc_cb[index].cb_active == 1); 6507 pc_cb[index].duration = 0; /* Paranoia */ 6508 pc_cb[index].cb_func = NULL; /* Paranoia */ 6509 pc_cb[index].cb_active = 0; 6510 rw_exit(&pc_cb[index].cb_rwlock); 6511 6512 /* 6513 * Just move all the entries to a private list which we can walk 6514 * through without the need to hold any locks. 6515 * No more requests can get added to the hash lists for this consumer 6516 * as the cb_active field for the callback has been cleared. 6517 */ 6518 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 6519 mutex_enter(&page_capture_hash[i].pchh_mutex); 6520 for (j = 0; j < 2; j++) { 6521 bp1 = page_capture_hash[i].lists[j].next; 6522 /* walk through all but first (sentinel) element */ 6523 while (bp1 != &page_capture_hash[i].lists[j]) { 6524 bp2 = bp1; 6525 if (bp2->flags & flags) { 6526 bp1 = bp2->next; 6527 bp1->prev = bp2->prev; 6528 bp2->prev->next = bp1; 6529 bp2->next = head; 6530 head = bp2; 6531 /* 6532 * Clear the PR_CAPTURE bit as we 6533 * hold appropriate locks here. 6534 */ 6535 page_clrtoxic(head->pp, PR_CAPTURE); 6536 page_capture_hash[i].num_pages--; 6537 continue; 6538 } 6539 bp1 = bp1->next; 6540 } 6541 } 6542 mutex_exit(&page_capture_hash[i].pchh_mutex); 6543 } 6544 6545 while (head != NULL) { 6546 bp1 = head; 6547 head = head->next; 6548 kmem_free(bp1, sizeof (*bp1)); 6549 } 6550 } 6551 6552 6553 /* 6554 * Find pp in the active list and move it to the walked list if it 6555 * exists. 6556 * Note that most often pp should be at the front of the active list 6557 * as it is currently used and thus there is no other sort of optimization 6558 * being done here as this is a linked list data structure. 6559 * Returns 1 on successful move or 0 if page could not be found. 6560 */ 6561 static int 6562 page_capture_move_to_walked(page_t *pp) 6563 { 6564 page_capture_hash_bucket_t *bp; 6565 int index; 6566 6567 index = PAGE_CAPTURE_HASH(pp); 6568 6569 mutex_enter(&page_capture_hash[index].pchh_mutex); 6570 bp = page_capture_hash[index].lists[0].next; 6571 while (bp != &page_capture_hash[index].lists[0]) { 6572 if (bp->pp == pp) { 6573 /* Remove from old list */ 6574 bp->next->prev = bp->prev; 6575 bp->prev->next = bp->next; 6576 6577 /* Add to new list */ 6578 bp->next = page_capture_hash[index].lists[1].next; 6579 bp->prev = &page_capture_hash[index].lists[1]; 6580 page_capture_hash[index].lists[1].next = bp; 6581 bp->next->prev = bp; 6582 mutex_exit(&page_capture_hash[index].pchh_mutex); 6583 6584 return (1); 6585 } 6586 bp = bp->next; 6587 } 6588 mutex_exit(&page_capture_hash[index].pchh_mutex); 6589 return (0); 6590 } 6591 6592 /* 6593 * Add a new entry to the page capture hash. The only case where a new 6594 * entry is not added is when the page capture consumer is no longer registered. 6595 * In this case, we'll silently not add the page to the hash. We know that 6596 * page retire will always be registered for the case where we are currently 6597 * unretiring a page and thus there are no conflicts. 6598 */ 6599 static void 6600 page_capture_add_hash(page_t *pp, uint_t szc, uint_t flags, void *datap) 6601 { 6602 page_capture_hash_bucket_t *bp1; 6603 page_capture_hash_bucket_t *bp2; 6604 int index; 6605 int cb_index; 6606 int i; 6607 #ifdef DEBUG 6608 page_capture_hash_bucket_t *tp1; 6609 int l; 6610 #endif 6611 6612 ASSERT(!(flags & CAPTURE_ASYNC)); 6613 6614 bp1 = kmem_alloc(sizeof (struct page_capture_hash_bucket), KM_SLEEP); 6615 6616 bp1->pp = pp; 6617 bp1->szc = szc; 6618 bp1->flags = flags; 6619 bp1->datap = datap; 6620 6621 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6622 if ((flags >> cb_index) & 1) { 6623 break; 6624 } 6625 } 6626 6627 ASSERT(cb_index != PC_NUM_CALLBACKS); 6628 6629 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6630 if (pc_cb[cb_index].cb_active) { 6631 if (pc_cb[cb_index].duration == -1) { 6632 bp1->expires = (clock_t)-1; 6633 } else { 6634 bp1->expires = lbolt + pc_cb[cb_index].duration; 6635 } 6636 } else { 6637 /* There's no callback registered so don't add to the hash */ 6638 rw_exit(&pc_cb[cb_index].cb_rwlock); 6639 kmem_free(bp1, sizeof (*bp1)); 6640 return; 6641 } 6642 6643 index = PAGE_CAPTURE_HASH(pp); 6644 6645 /* 6646 * Only allow capture flag to be modified under this mutex. 6647 * Prevents multiple entries for same page getting added. 6648 */ 6649 mutex_enter(&page_capture_hash[index].pchh_mutex); 6650 6651 /* 6652 * if not already on the hash, set capture bit and add to the hash 6653 */ 6654 if (!(pp->p_toxic & PR_CAPTURE)) { 6655 #ifdef DEBUG 6656 /* Check for duplicate entries */ 6657 for (l = 0; l < 2; l++) { 6658 tp1 = page_capture_hash[index].lists[l].next; 6659 while (tp1 != &page_capture_hash[index].lists[l]) { 6660 if (tp1->pp == pp) { 6661 panic("page pp 0x%p already on hash " 6662 "at 0x%p\n", pp, tp1); 6663 } 6664 tp1 = tp1->next; 6665 } 6666 } 6667 6668 #endif 6669 page_settoxic(pp, PR_CAPTURE); 6670 bp1->next = page_capture_hash[index].lists[0].next; 6671 bp1->prev = &page_capture_hash[index].lists[0]; 6672 bp1->next->prev = bp1; 6673 page_capture_hash[index].lists[0].next = bp1; 6674 page_capture_hash[index].num_pages++; 6675 mutex_exit(&page_capture_hash[index].pchh_mutex); 6676 rw_exit(&pc_cb[cb_index].cb_rwlock); 6677 cv_signal(&pc_cv); 6678 return; 6679 } 6680 6681 /* 6682 * A page retire request will replace any other request. 6683 * A second physmem request which is for a different process than 6684 * the currently registered one will be dropped as there is 6685 * no way to hold the private data for both calls. 6686 * In the future, once there are more callers, this will have to 6687 * be worked out better as there needs to be private storage for 6688 * at least each type of caller (maybe have datap be an array of 6689 * *void's so that we can index based upon callers index). 6690 */ 6691 6692 /* walk hash list to update expire time */ 6693 for (i = 0; i < 2; i++) { 6694 bp2 = page_capture_hash[index].lists[i].next; 6695 while (bp2 != &page_capture_hash[index].lists[i]) { 6696 if (bp2->pp == pp) { 6697 if (flags & CAPTURE_RETIRE) { 6698 if (!(bp2->flags & CAPTURE_RETIRE)) { 6699 bp2->flags = flags; 6700 bp2->expires = bp1->expires; 6701 bp2->datap = datap; 6702 } 6703 } else { 6704 ASSERT(flags & CAPTURE_PHYSMEM); 6705 if (!(bp2->flags & CAPTURE_RETIRE) && 6706 (datap == bp2->datap)) { 6707 bp2->expires = bp1->expires; 6708 } 6709 } 6710 mutex_exit(&page_capture_hash[index]. 6711 pchh_mutex); 6712 rw_exit(&pc_cb[cb_index].cb_rwlock); 6713 kmem_free(bp1, sizeof (*bp1)); 6714 return; 6715 } 6716 bp2 = bp2->next; 6717 } 6718 } 6719 6720 /* 6721 * the PR_CAPTURE flag is protected by the page_capture_hash mutexes 6722 * and thus it either has to be set or not set and can't change 6723 * while holding the mutex above. 6724 */ 6725 panic("page_capture_add_hash, PR_CAPTURE flag set on pp %p\n", pp); 6726 } 6727 6728 /* 6729 * We have a page in our hands, lets try and make it ours by turning 6730 * it into a clean page like it had just come off the freelists. 6731 * 6732 * Returns 0 on success, with the page still EXCL locked. 6733 * On failure, the page will be unlocked, and returns EAGAIN 6734 */ 6735 static int 6736 page_capture_clean_page(page_t *pp) 6737 { 6738 page_t *newpp; 6739 int skip_unlock = 0; 6740 spgcnt_t count; 6741 page_t *tpp; 6742 int ret = 0; 6743 int extra; 6744 6745 ASSERT(PAGE_EXCL(pp)); 6746 ASSERT(!PP_RETIRED(pp)); 6747 ASSERT(curthread->t_flag & T_CAPTURING); 6748 6749 if (PP_ISFREE(pp)) { 6750 if (!page_reclaim_page(pp, NULL)) { 6751 skip_unlock = 1; 6752 ret = EAGAIN; 6753 goto cleanup; 6754 } 6755 if (pp->p_vnode != NULL) { 6756 /* 6757 * Since this page came from the 6758 * cachelist, we must destroy the 6759 * old vnode association. 6760 */ 6761 page_hashout(pp, NULL); 6762 } 6763 goto cleanup; 6764 } 6765 6766 /* 6767 * If we know page_relocate will fail, skip it 6768 * It could still fail due to a UE on another page but we 6769 * can't do anything about that. 6770 */ 6771 if (pp->p_toxic & PR_UE) { 6772 goto skip_relocate; 6773 } 6774 6775 /* 6776 * It's possible that pages can not have a vnode as fsflush comes 6777 * through and cleans up these pages. It's ugly but that's how it is. 6778 */ 6779 if (pp->p_vnode == NULL) { 6780 goto skip_relocate; 6781 } 6782 6783 /* 6784 * Page was not free, so lets try to relocate it. 6785 * page_relocate only works with root pages, so if this is not a root 6786 * page, we need to demote it to try and relocate it. 6787 * Unfortunately this is the best we can do right now. 6788 */ 6789 newpp = NULL; 6790 if ((pp->p_szc > 0) && (pp != PP_PAGEROOT(pp))) { 6791 if (page_try_demote_pages(pp) == 0) { 6792 ret = EAGAIN; 6793 goto cleanup; 6794 } 6795 } 6796 ret = page_relocate(&pp, &newpp, 1, 0, &count, NULL); 6797 if (ret == 0) { 6798 page_t *npp; 6799 /* unlock the new page(s) */ 6800 while (count-- > 0) { 6801 ASSERT(newpp != NULL); 6802 npp = newpp; 6803 page_sub(&newpp, npp); 6804 page_unlock(npp); 6805 } 6806 ASSERT(newpp == NULL); 6807 /* 6808 * Check to see if the page we have is too large. 6809 * If so, demote it freeing up the extra pages. 6810 */ 6811 if (pp->p_szc > 0) { 6812 /* For now demote extra pages to szc == 0 */ 6813 extra = page_get_pagecnt(pp->p_szc) - 1; 6814 while (extra > 0) { 6815 tpp = pp->p_next; 6816 page_sub(&pp, tpp); 6817 tpp->p_szc = 0; 6818 page_free(tpp, 1); 6819 extra--; 6820 } 6821 /* Make sure to set our page to szc 0 as well */ 6822 ASSERT(pp->p_next == pp && pp->p_prev == pp); 6823 pp->p_szc = 0; 6824 } 6825 goto cleanup; 6826 } else if (ret == EIO) { 6827 ret = EAGAIN; 6828 goto cleanup; 6829 } else { 6830 /* 6831 * Need to reset return type as we failed to relocate the page 6832 * but that does not mean that some of the next steps will not 6833 * work. 6834 */ 6835 ret = 0; 6836 } 6837 6838 skip_relocate: 6839 6840 if (pp->p_szc > 0) { 6841 if (page_try_demote_pages(pp) == 0) { 6842 ret = EAGAIN; 6843 goto cleanup; 6844 } 6845 } 6846 6847 ASSERT(pp->p_szc == 0); 6848 6849 if (hat_ismod(pp)) { 6850 ret = EAGAIN; 6851 goto cleanup; 6852 } 6853 if (PP_ISKVP(pp)) { 6854 ret = EAGAIN; 6855 goto cleanup; 6856 } 6857 if (pp->p_lckcnt || pp->p_cowcnt) { 6858 ret = EAGAIN; 6859 goto cleanup; 6860 } 6861 6862 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 6863 ASSERT(!hat_page_is_mapped(pp)); 6864 6865 if (hat_ismod(pp)) { 6866 /* 6867 * This is a semi-odd case as the page is now modified but not 6868 * mapped as we just unloaded the mappings above. 6869 */ 6870 ret = EAGAIN; 6871 goto cleanup; 6872 } 6873 if (pp->p_vnode != NULL) { 6874 page_hashout(pp, NULL); 6875 } 6876 6877 /* 6878 * At this point, the page should be in a clean state and 6879 * we can do whatever we want with it. 6880 */ 6881 6882 cleanup: 6883 if (ret != 0) { 6884 if (!skip_unlock) { 6885 page_unlock(pp); 6886 } 6887 } else { 6888 ASSERT(pp->p_szc == 0); 6889 ASSERT(PAGE_EXCL(pp)); 6890 6891 pp->p_next = pp; 6892 pp->p_prev = pp; 6893 } 6894 return (ret); 6895 } 6896 6897 /* 6898 * Various callers of page_trycapture() can have different restrictions upon 6899 * what memory they have access to. 6900 * Returns 0 on success, with the following error codes on failure: 6901 * EPERM - The requested page is long term locked, and thus repeated 6902 * requests to capture this page will likely fail. 6903 * ENOMEM - There was not enough free memory in the system to safely 6904 * map the requested page. 6905 * ENOENT - The requested page was inside the kernel cage, and the 6906 * PHYSMEM_CAGE flag was not set. 6907 */ 6908 int 6909 page_capture_pre_checks(page_t *pp, uint_t flags) 6910 { 6911 #if defined(__sparc) 6912 extern struct vnode prom_ppages; 6913 #endif /* __sparc */ 6914 6915 ASSERT(pp != NULL); 6916 6917 /* only physmem currently has restrictions */ 6918 if (!(flags & CAPTURE_PHYSMEM)) { 6919 return (0); 6920 } 6921 6922 #if defined(__sparc) 6923 if (pp->p_vnode == &prom_ppages) { 6924 return (EPERM); 6925 } 6926 6927 if (PP_ISNORELOC(pp) && !(flags & CAPTURE_GET_CAGE)) { 6928 return (ENOENT); 6929 } 6930 6931 if (PP_ISNORELOCKERNEL(pp)) { 6932 return (EPERM); 6933 } 6934 #else 6935 if (PP_ISKVP(pp)) { 6936 return (EPERM); 6937 } 6938 #endif /* __sparc */ 6939 6940 if (availrmem < swapfs_minfree) { 6941 /* 6942 * We won't try to capture this page as we are 6943 * running low on memory. 6944 */ 6945 return (ENOMEM); 6946 } 6947 return (0); 6948 } 6949 6950 /* 6951 * Once we have a page in our mits, go ahead and complete the capture 6952 * operation. 6953 * Returns 1 on failure where page is no longer needed 6954 * Returns 0 on success 6955 * Returns -1 if there was a transient failure. 6956 * Failure cases must release the SE_EXCL lock on pp (usually via page_free). 6957 */ 6958 int 6959 page_capture_take_action(page_t *pp, uint_t flags, void *datap) 6960 { 6961 int cb_index; 6962 int ret = 0; 6963 page_capture_hash_bucket_t *bp1; 6964 page_capture_hash_bucket_t *bp2; 6965 int index; 6966 int found = 0; 6967 int i; 6968 6969 ASSERT(PAGE_EXCL(pp)); 6970 ASSERT(curthread->t_flag & T_CAPTURING); 6971 6972 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6973 if ((flags >> cb_index) & 1) { 6974 break; 6975 } 6976 } 6977 ASSERT(cb_index < PC_NUM_CALLBACKS); 6978 6979 /* 6980 * Remove the entry from the page_capture hash, but don't free it yet 6981 * as we may need to put it back. 6982 * Since we own the page at this point in time, we should find it 6983 * in the hash if this is an ASYNC call. If we don't it's likely 6984 * that the page_capture_async() thread decided that this request 6985 * had expired, in which case we just continue on. 6986 */ 6987 if (flags & CAPTURE_ASYNC) { 6988 6989 index = PAGE_CAPTURE_HASH(pp); 6990 6991 mutex_enter(&page_capture_hash[index].pchh_mutex); 6992 for (i = 0; i < 2 && !found; i++) { 6993 bp1 = page_capture_hash[index].lists[i].next; 6994 while (bp1 != &page_capture_hash[index].lists[i]) { 6995 if (bp1->pp == pp) { 6996 bp1->next->prev = bp1->prev; 6997 bp1->prev->next = bp1->next; 6998 page_capture_hash[index].num_pages--; 6999 page_clrtoxic(pp, PR_CAPTURE); 7000 found = 1; 7001 break; 7002 } 7003 bp1 = bp1->next; 7004 } 7005 } 7006 mutex_exit(&page_capture_hash[index].pchh_mutex); 7007 } 7008 7009 /* Synchronize with the unregister func. */ 7010 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 7011 if (!pc_cb[cb_index].cb_active) { 7012 page_free(pp, 1); 7013 rw_exit(&pc_cb[cb_index].cb_rwlock); 7014 if (found) { 7015 kmem_free(bp1, sizeof (*bp1)); 7016 } 7017 return (1); 7018 } 7019 7020 /* 7021 * We need to remove the entry from the page capture hash and turn off 7022 * the PR_CAPTURE bit before calling the callback. We'll need to cache 7023 * the entry here, and then based upon the return value, cleanup 7024 * appropriately or re-add it to the hash, making sure that someone else 7025 * hasn't already done so. 7026 * It should be rare for the callback to fail and thus it's ok for 7027 * the failure path to be a bit complicated as the success path is 7028 * cleaner and the locking rules are easier to follow. 7029 */ 7030 7031 ret = pc_cb[cb_index].cb_func(pp, datap, flags); 7032 7033 rw_exit(&pc_cb[cb_index].cb_rwlock); 7034 7035 /* 7036 * If this was an ASYNC request, we need to cleanup the hash if the 7037 * callback was successful or if the request was no longer valid. 7038 * For non-ASYNC requests, we return failure to map and the caller 7039 * will take care of adding the request to the hash. 7040 * Note also that the callback itself is responsible for the page 7041 * at this point in time in terms of locking ... The most common 7042 * case for the failure path should just be a page_free. 7043 */ 7044 if (ret >= 0) { 7045 if (found) { 7046 kmem_free(bp1, sizeof (*bp1)); 7047 } 7048 return (ret); 7049 } 7050 if (!found) { 7051 return (ret); 7052 } 7053 7054 ASSERT(flags & CAPTURE_ASYNC); 7055 7056 /* 7057 * Check for expiration time first as we can just free it up if it's 7058 * expired. 7059 */ 7060 if (lbolt > bp1->expires && bp1->expires != -1) { 7061 kmem_free(bp1, sizeof (*bp1)); 7062 return (ret); 7063 } 7064 7065 /* 7066 * The callback failed and there used to be an entry in the hash for 7067 * this page, so we need to add it back to the hash. 7068 */ 7069 mutex_enter(&page_capture_hash[index].pchh_mutex); 7070 if (!(pp->p_toxic & PR_CAPTURE)) { 7071 /* just add bp1 back to head of walked list */ 7072 page_settoxic(pp, PR_CAPTURE); 7073 bp1->next = page_capture_hash[index].lists[1].next; 7074 bp1->prev = &page_capture_hash[index].lists[1]; 7075 bp1->next->prev = bp1; 7076 page_capture_hash[index].lists[1].next = bp1; 7077 page_capture_hash[index].num_pages++; 7078 mutex_exit(&page_capture_hash[index].pchh_mutex); 7079 return (ret); 7080 } 7081 7082 /* 7083 * Otherwise there was a new capture request added to list 7084 * Need to make sure that our original data is represented if 7085 * appropriate. 7086 */ 7087 for (i = 0; i < 2; i++) { 7088 bp2 = page_capture_hash[index].lists[i].next; 7089 while (bp2 != &page_capture_hash[index].lists[i]) { 7090 if (bp2->pp == pp) { 7091 if (bp1->flags & CAPTURE_RETIRE) { 7092 if (!(bp2->flags & CAPTURE_RETIRE)) { 7093 bp2->szc = bp1->szc; 7094 bp2->flags = bp1->flags; 7095 bp2->expires = bp1->expires; 7096 bp2->datap = bp1->datap; 7097 } 7098 } else { 7099 ASSERT(bp1->flags & CAPTURE_PHYSMEM); 7100 if (!(bp2->flags & CAPTURE_RETIRE)) { 7101 bp2->szc = bp1->szc; 7102 bp2->flags = bp1->flags; 7103 bp2->expires = bp1->expires; 7104 bp2->datap = bp1->datap; 7105 } 7106 } 7107 mutex_exit(&page_capture_hash[index]. 7108 pchh_mutex); 7109 kmem_free(bp1, sizeof (*bp1)); 7110 return (ret); 7111 } 7112 bp2 = bp2->next; 7113 } 7114 } 7115 panic("PR_CAPTURE set but not on hash for pp 0x%p\n", pp); 7116 /*NOTREACHED*/ 7117 } 7118 7119 /* 7120 * Try to capture the given page for the caller specified in the flags 7121 * parameter. The page will either be captured and handed over to the 7122 * appropriate callback, or will be queued up in the page capture hash 7123 * to be captured asynchronously. 7124 * If the current request is due to an async capture, the page must be 7125 * exclusively locked before calling this function. 7126 * Currently szc must be 0 but in the future this should be expandable to 7127 * other page sizes. 7128 * Returns 0 on success, with the following error codes on failure: 7129 * EPERM - The requested page is long term locked, and thus repeated 7130 * requests to capture this page will likely fail. 7131 * ENOMEM - There was not enough free memory in the system to safely 7132 * map the requested page. 7133 * ENOENT - The requested page was inside the kernel cage, and the 7134 * CAPTURE_GET_CAGE flag was not set. 7135 * EAGAIN - The requested page could not be capturead at this point in 7136 * time but future requests will likely work. 7137 * EBUSY - The requested page is retired and the CAPTURE_GET_RETIRED flag 7138 * was not set. 7139 */ 7140 int 7141 page_itrycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 7142 { 7143 int ret; 7144 int cb_index; 7145 7146 if (flags & CAPTURE_ASYNC) { 7147 ASSERT(PAGE_EXCL(pp)); 7148 goto async; 7149 } 7150 7151 /* Make sure there's enough availrmem ... */ 7152 ret = page_capture_pre_checks(pp, flags); 7153 if (ret != 0) { 7154 return (ret); 7155 } 7156 7157 if (!page_trylock(pp, SE_EXCL)) { 7158 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 7159 if ((flags >> cb_index) & 1) { 7160 break; 7161 } 7162 } 7163 ASSERT(cb_index < PC_NUM_CALLBACKS); 7164 ret = EAGAIN; 7165 /* Special case for retired pages */ 7166 if (PP_RETIRED(pp)) { 7167 if (flags & CAPTURE_GET_RETIRED) { 7168 if (!page_unretire_pp(pp, PR_UNR_TEMP)) { 7169 /* 7170 * Need to set capture bit and add to 7171 * hash so that the page will be 7172 * retired when freed. 7173 */ 7174 page_capture_add_hash(pp, szc, 7175 CAPTURE_RETIRE, NULL); 7176 ret = 0; 7177 goto own_page; 7178 } 7179 } else { 7180 return (EBUSY); 7181 } 7182 } 7183 page_capture_add_hash(pp, szc, flags, datap); 7184 return (ret); 7185 } 7186 7187 async: 7188 ASSERT(PAGE_EXCL(pp)); 7189 7190 /* Need to check for physmem async requests that availrmem is sane */ 7191 if ((flags & (CAPTURE_ASYNC | CAPTURE_PHYSMEM)) == 7192 (CAPTURE_ASYNC | CAPTURE_PHYSMEM) && 7193 (availrmem < swapfs_minfree)) { 7194 page_unlock(pp); 7195 return (ENOMEM); 7196 } 7197 7198 ret = page_capture_clean_page(pp); 7199 7200 if (ret != 0) { 7201 /* We failed to get the page, so lets add it to the hash */ 7202 if (!(flags & CAPTURE_ASYNC)) { 7203 page_capture_add_hash(pp, szc, flags, datap); 7204 } 7205 return (ret); 7206 } 7207 7208 own_page: 7209 ASSERT(PAGE_EXCL(pp)); 7210 ASSERT(pp->p_szc == 0); 7211 7212 /* Call the callback */ 7213 ret = page_capture_take_action(pp, flags, datap); 7214 7215 if (ret == 0) { 7216 return (0); 7217 } 7218 7219 /* 7220 * Note that in the failure cases from page_capture_take_action, the 7221 * EXCL lock will have already been dropped. 7222 */ 7223 if ((ret == -1) && (!(flags & CAPTURE_ASYNC))) { 7224 page_capture_add_hash(pp, szc, flags, datap); 7225 } 7226 return (EAGAIN); 7227 } 7228 7229 int 7230 page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 7231 { 7232 int ret; 7233 7234 curthread->t_flag |= T_CAPTURING; 7235 ret = page_itrycapture(pp, szc, flags, datap); 7236 curthread->t_flag &= ~T_CAPTURING; /* xor works as we know its set */ 7237 return (ret); 7238 } 7239 7240 /* 7241 * When unlocking a page which has the PR_CAPTURE bit set, this routine 7242 * gets called to try and capture the page. 7243 */ 7244 void 7245 page_unlock_capture(page_t *pp) 7246 { 7247 page_capture_hash_bucket_t *bp; 7248 int index; 7249 int i; 7250 uint_t szc; 7251 uint_t flags = 0; 7252 void *datap; 7253 kmutex_t *mp; 7254 extern vnode_t retired_pages; 7255 7256 /* 7257 * We need to protect against a possible deadlock here where we own 7258 * the vnode page hash mutex and want to acquire it again as there 7259 * are locations in the code, where we unlock a page while holding 7260 * the mutex which can lead to the page being captured and eventually 7261 * end up here. As we may be hashing out the old page and hashing into 7262 * the retire vnode, we need to make sure we don't own them. 7263 * Other callbacks who do hash operations also need to make sure that 7264 * before they hashin to a vnode that they do not currently own the 7265 * vphm mutex otherwise there will be a panic. 7266 */ 7267 if (mutex_owned(page_vnode_mutex(&retired_pages))) { 7268 page_unlock(pp); 7269 return; 7270 } 7271 if (pp->p_vnode != NULL && mutex_owned(page_vnode_mutex(pp->p_vnode))) { 7272 page_unlock(pp); 7273 return; 7274 } 7275 7276 index = PAGE_CAPTURE_HASH(pp); 7277 7278 mp = &page_capture_hash[index].pchh_mutex; 7279 mutex_enter(mp); 7280 for (i = 0; i < 2; i++) { 7281 bp = page_capture_hash[index].lists[i].next; 7282 while (bp != &page_capture_hash[index].lists[i]) { 7283 if (bp->pp == pp) { 7284 szc = bp->szc; 7285 flags = bp->flags | CAPTURE_ASYNC; 7286 datap = bp->datap; 7287 mutex_exit(mp); 7288 (void) page_trycapture(pp, szc, flags, datap); 7289 return; 7290 } 7291 bp = bp->next; 7292 } 7293 } 7294 7295 /* Failed to find page in hash so clear flags and unlock it. */ 7296 page_clrtoxic(pp, PR_CAPTURE); 7297 page_unlock(pp); 7298 7299 mutex_exit(mp); 7300 } 7301 7302 void 7303 page_capture_init() 7304 { 7305 int i; 7306 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7307 page_capture_hash[i].lists[0].next = 7308 &page_capture_hash[i].lists[0]; 7309 page_capture_hash[i].lists[0].prev = 7310 &page_capture_hash[i].lists[0]; 7311 page_capture_hash[i].lists[1].next = 7312 &page_capture_hash[i].lists[1]; 7313 page_capture_hash[i].lists[1].prev = 7314 &page_capture_hash[i].lists[1]; 7315 } 7316 7317 pc_thread_shortwait = 23 * hz; 7318 pc_thread_longwait = 1201 * hz; 7319 mutex_init(&pc_thread_mutex, NULL, MUTEX_DEFAULT, NULL); 7320 cv_init(&pc_cv, NULL, CV_DEFAULT, NULL); 7321 pc_thread_id = thread_create(NULL, 0, page_capture_thread, NULL, 0, &p0, 7322 TS_RUN, minclsyspri); 7323 } 7324 7325 /* 7326 * It is necessary to scrub any failing pages prior to reboot in order to 7327 * prevent a latent error trap from occurring on the next boot. 7328 */ 7329 void 7330 page_retire_mdboot() 7331 { 7332 page_t *pp; 7333 int i, j; 7334 page_capture_hash_bucket_t *bp; 7335 7336 /* walk lists looking for pages to scrub */ 7337 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7338 if (page_capture_hash[i].num_pages == 0) 7339 continue; 7340 7341 mutex_enter(&page_capture_hash[i].pchh_mutex); 7342 7343 for (j = 0; j < 2; j++) { 7344 bp = page_capture_hash[i].lists[j].next; 7345 while (bp != &page_capture_hash[i].lists[j]) { 7346 pp = bp->pp; 7347 if (!PP_ISKVP(pp) && PP_TOXIC(pp)) { 7348 pp->p_selock = -1; /* pacify ASSERTs */ 7349 PP_CLRFREE(pp); 7350 pagescrub(pp, 0, PAGESIZE); 7351 pp->p_selock = 0; 7352 } 7353 bp = bp->next; 7354 } 7355 } 7356 mutex_exit(&page_capture_hash[i].pchh_mutex); 7357 } 7358 } 7359 7360 /* 7361 * Walk the page_capture_hash trying to capture pages and also cleanup old 7362 * entries which have expired. 7363 */ 7364 void 7365 page_capture_async() 7366 { 7367 page_t *pp; 7368 int i; 7369 int ret; 7370 page_capture_hash_bucket_t *bp1, *bp2; 7371 uint_t szc; 7372 uint_t flags; 7373 void *datap; 7374 7375 /* If there are outstanding pages to be captured, get to work */ 7376 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7377 if (page_capture_hash[i].num_pages == 0) 7378 continue; 7379 /* Append list 1 to list 0 and then walk through list 0 */ 7380 mutex_enter(&page_capture_hash[i].pchh_mutex); 7381 bp1 = &page_capture_hash[i].lists[1]; 7382 bp2 = bp1->next; 7383 if (bp1 != bp2) { 7384 bp1->prev->next = page_capture_hash[i].lists[0].next; 7385 bp2->prev = &page_capture_hash[i].lists[0]; 7386 page_capture_hash[i].lists[0].next->prev = bp1->prev; 7387 page_capture_hash[i].lists[0].next = bp2; 7388 bp1->next = bp1; 7389 bp1->prev = bp1; 7390 } 7391 7392 /* list[1] will be empty now */ 7393 7394 bp1 = page_capture_hash[i].lists[0].next; 7395 while (bp1 != &page_capture_hash[i].lists[0]) { 7396 /* Check expiration time */ 7397 if ((lbolt > bp1->expires && bp1->expires != -1) || 7398 page_deleted(bp1->pp)) { 7399 page_capture_hash[i].lists[0].next = bp1->next; 7400 bp1->next->prev = 7401 &page_capture_hash[i].lists[0]; 7402 page_capture_hash[i].num_pages--; 7403 7404 /* 7405 * We can safely remove the PR_CAPTURE bit 7406 * without holding the EXCL lock on the page 7407 * as the PR_CAPTURE bit requres that the 7408 * page_capture_hash[].pchh_mutex be held 7409 * to modify it. 7410 */ 7411 page_clrtoxic(bp1->pp, PR_CAPTURE); 7412 mutex_exit(&page_capture_hash[i].pchh_mutex); 7413 kmem_free(bp1, sizeof (*bp1)); 7414 mutex_enter(&page_capture_hash[i].pchh_mutex); 7415 bp1 = page_capture_hash[i].lists[0].next; 7416 continue; 7417 } 7418 pp = bp1->pp; 7419 szc = bp1->szc; 7420 flags = bp1->flags; 7421 datap = bp1->datap; 7422 mutex_exit(&page_capture_hash[i].pchh_mutex); 7423 if (page_trylock(pp, SE_EXCL)) { 7424 ret = page_trycapture(pp, szc, 7425 flags | CAPTURE_ASYNC, datap); 7426 } else { 7427 ret = 1; /* move to walked hash */ 7428 } 7429 7430 if (ret != 0) { 7431 /* Move to walked hash */ 7432 (void) page_capture_move_to_walked(pp); 7433 } 7434 mutex_enter(&page_capture_hash[i].pchh_mutex); 7435 bp1 = page_capture_hash[i].lists[0].next; 7436 } 7437 7438 mutex_exit(&page_capture_hash[i].pchh_mutex); 7439 } 7440 } 7441 7442 /* 7443 * The page_capture_thread loops forever, looking to see if there are 7444 * pages still waiting to be captured. 7445 */ 7446 static void 7447 page_capture_thread(void) 7448 { 7449 callb_cpr_t c; 7450 int outstanding; 7451 int i; 7452 7453 CALLB_CPR_INIT(&c, &pc_thread_mutex, callb_generic_cpr, "page_capture"); 7454 7455 mutex_enter(&pc_thread_mutex); 7456 for (;;) { 7457 outstanding = 0; 7458 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) 7459 outstanding += page_capture_hash[i].num_pages; 7460 if (outstanding) { 7461 /* 7462 * Do we really want to be this aggressive for things 7463 * other than page_retire? 7464 * Maybe have a counter for each callback type to 7465 * guide how aggressive we should be here. 7466 * Thus if there's at least one page for page_retire 7467 * we go ahead and reap like this. 7468 */ 7469 kmem_reap(); 7470 seg_preap(); 7471 page_capture_async(); 7472 CALLB_CPR_SAFE_BEGIN(&c); 7473 (void) cv_timedwait(&pc_cv, &pc_thread_mutex, 7474 lbolt + pc_thread_shortwait); 7475 CALLB_CPR_SAFE_END(&c, &pc_thread_mutex); 7476 } else { 7477 CALLB_CPR_SAFE_BEGIN(&c); 7478 (void) cv_timedwait(&pc_cv, &pc_thread_mutex, 7479 lbolt + pc_thread_longwait); 7480 CALLB_CPR_SAFE_END(&c, &pc_thread_mutex); 7481 } 7482 } 7483 /*NOTREACHED*/ 7484 } 7485