1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * University Copyright- Copyright (c) 1982, 1986, 1988 31 * The Regents of the University of California 32 * All Rights Reserved 33 * 34 * University Acknowledgment- Portions of this document are derived from 35 * software developed by the University of California, Berkeley, and its 36 * contributors. 37 */ 38 39 #pragma ident "%Z%%M% %I% %E% SMI" 40 41 /* 42 * VM - physical page management. 43 */ 44 45 #include <sys/types.h> 46 #include <sys/t_lock.h> 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/errno.h> 50 #include <sys/time.h> 51 #include <sys/vnode.h> 52 #include <sys/vm.h> 53 #include <sys/vtrace.h> 54 #include <sys/swap.h> 55 #include <sys/cmn_err.h> 56 #include <sys/tuneable.h> 57 #include <sys/sysmacros.h> 58 #include <sys/cpuvar.h> 59 #include <sys/callb.h> 60 #include <sys/debug.h> 61 #include <sys/tnf_probe.h> 62 #include <sys/condvar_impl.h> 63 #include <sys/mem_config.h> 64 #include <sys/mem_cage.h> 65 #include <sys/kmem.h> 66 #include <sys/atomic.h> 67 #include <sys/strlog.h> 68 #include <sys/mman.h> 69 #include <sys/ontrap.h> 70 #include <sys/lgrp.h> 71 #include <sys/vfs.h> 72 73 #include <vm/hat.h> 74 #include <vm/anon.h> 75 #include <vm/page.h> 76 #include <vm/seg.h> 77 #include <vm/pvn.h> 78 #include <vm/seg_kmem.h> 79 #include <vm/vm_dep.h> 80 #include <sys/vm_usage.h> 81 #include <fs/fs_subr.h> 82 #include <sys/ddi.h> 83 #include <sys/modctl.h> 84 85 static int nopageage = 0; 86 87 static pgcnt_t max_page_get; /* max page_get request size in pages */ 88 pgcnt_t total_pages = 0; /* total number of pages (used by /proc) */ 89 90 /* 91 * freemem_lock protects all freemem variables: 92 * availrmem. Also this lock protects the globals which track the 93 * availrmem changes for accurate kernel footprint calculation. 94 * See below for an explanation of these 95 * globals. 96 */ 97 kmutex_t freemem_lock; 98 pgcnt_t availrmem; 99 pgcnt_t availrmem_initial; 100 101 /* 102 * These globals track availrmem changes to get a more accurate 103 * estimate of tke kernel size. Historically pp_kernel is used for 104 * kernel size and is based on availrmem. But availrmem is adjusted for 105 * locked pages in the system not just for kernel locked pages. 106 * These new counters will track the pages locked through segvn and 107 * by explicit user locking. 108 * 109 * segvn_pages_locked : This keeps track on a global basis how many pages 110 * are currently locked because of I/O. 111 * 112 * pages_locked : How many pages are locked becuase of user specified 113 * locking through mlock or plock. 114 * 115 * pages_useclaim,pages_claimed : These two variables track the 116 * cliam adjustments because of the protection changes on a segvn segment. 117 * 118 * All these globals are protected by the same lock which protects availrmem. 119 */ 120 pgcnt_t segvn_pages_locked; 121 pgcnt_t pages_locked; 122 pgcnt_t pages_useclaim; 123 pgcnt_t pages_claimed; 124 125 126 /* 127 * new_freemem_lock protects freemem, freemem_wait & freemem_cv. 128 */ 129 static kmutex_t new_freemem_lock; 130 static uint_t freemem_wait; /* someone waiting for freemem */ 131 static kcondvar_t freemem_cv; 132 133 /* 134 * The logical page free list is maintained as two lists, the 'free' 135 * and the 'cache' lists. 136 * The free list contains those pages that should be reused first. 137 * 138 * The implementation of the lists is machine dependent. 139 * page_get_freelist(), page_get_cachelist(), 140 * page_list_sub(), and page_list_add() 141 * form the interface to the machine dependent implementation. 142 * 143 * Pages with p_free set are on the cache list. 144 * Pages with p_free and p_age set are on the free list, 145 * 146 * A page may be locked while on either list. 147 */ 148 149 /* 150 * free list accounting stuff. 151 * 152 * 153 * Spread out the value for the number of pages on the 154 * page free and page cache lists. If there is just one 155 * value, then it must be under just one lock. 156 * The lock contention and cache traffic are a real bother. 157 * 158 * When we acquire and then drop a single pcf lock 159 * we can start in the middle of the array of pcf structures. 160 * If we acquire more than one pcf lock at a time, we need to 161 * start at the front to avoid deadlocking. 162 * 163 * pcf_count holds the number of pages in each pool. 164 * 165 * pcf_block is set when page_create_get_something() has asked the 166 * PSM page freelist and page cachelist routines without specifying 167 * a color and nothing came back. This is used to block anything 168 * else from moving pages from one list to the other while the 169 * lists are searched again. If a page is freeed while pcf_block is 170 * set, then pcf_reserve is incremented. pcgs_unblock() takes care 171 * of clearning pcf_block, doing the wakeups, etc. 172 */ 173 174 #if NCPU <= 4 175 #define PAD 2 176 #define PCF_FANOUT 4 177 static uint_t pcf_mask = PCF_FANOUT - 1; 178 #else 179 #define PAD 10 180 #ifdef sun4v 181 #define PCF_FANOUT 32 182 #else 183 #define PCF_FANOUT 128 184 #endif 185 static uint_t pcf_mask = PCF_FANOUT - 1; 186 #endif 187 188 struct pcf { 189 kmutex_t pcf_lock; /* protects the structure */ 190 uint_t pcf_count; /* page count */ 191 uint_t pcf_wait; /* number of waiters */ 192 uint_t pcf_block; /* pcgs flag to page_free() */ 193 uint_t pcf_reserve; /* pages freed after pcf_block set */ 194 uint_t pcf_fill[PAD]; /* to line up on the caches */ 195 }; 196 197 static struct pcf pcf[PCF_FANOUT]; 198 #define PCF_INDEX() ((CPU->cpu_id) & (pcf_mask)) 199 200 kmutex_t pcgs_lock; /* serializes page_create_get_ */ 201 kmutex_t pcgs_cagelock; /* serializes NOSLEEP cage allocs */ 202 kmutex_t pcgs_wait_lock; /* used for delay in pcgs */ 203 static kcondvar_t pcgs_cv; /* cv for delay in pcgs */ 204 205 #ifdef VM_STATS 206 207 /* 208 * No locks, but so what, they are only statistics. 209 */ 210 211 static struct page_tcnt { 212 int pc_free_cache; /* free's into cache list */ 213 int pc_free_dontneed; /* free's with dontneed */ 214 int pc_free_pageout; /* free's from pageout */ 215 int pc_free_free; /* free's into free list */ 216 int pc_free_pages; /* free's into large page free list */ 217 int pc_destroy_pages; /* large page destroy's */ 218 int pc_get_cache; /* get's from cache list */ 219 int pc_get_free; /* get's from free list */ 220 int pc_reclaim; /* reclaim's */ 221 int pc_abortfree; /* abort's of free pages */ 222 int pc_find_hit; /* find's that find page */ 223 int pc_find_miss; /* find's that don't find page */ 224 int pc_destroy_free; /* # of free pages destroyed */ 225 #define PC_HASH_CNT (4*PAGE_HASHAVELEN) 226 int pc_find_hashlen[PC_HASH_CNT+1]; 227 int pc_addclaim_pages; 228 int pc_subclaim_pages; 229 int pc_free_replacement_page[2]; 230 int pc_try_demote_pages[6]; 231 int pc_demote_pages[2]; 232 } pagecnt; 233 234 uint_t hashin_count; 235 uint_t hashin_not_held; 236 uint_t hashin_already; 237 238 uint_t hashout_count; 239 uint_t hashout_not_held; 240 241 uint_t page_create_count; 242 uint_t page_create_not_enough; 243 uint_t page_create_not_enough_again; 244 uint_t page_create_zero; 245 uint_t page_create_hashout; 246 uint_t page_create_page_lock_failed; 247 uint_t page_create_trylock_failed; 248 uint_t page_create_found_one; 249 uint_t page_create_hashin_failed; 250 uint_t page_create_dropped_phm; 251 252 uint_t page_create_new; 253 uint_t page_create_exists; 254 uint_t page_create_putbacks; 255 uint_t page_create_overshoot; 256 257 uint_t page_reclaim_zero; 258 uint_t page_reclaim_zero_locked; 259 260 uint_t page_rename_exists; 261 uint_t page_rename_count; 262 263 uint_t page_lookup_cnt[20]; 264 uint_t page_lookup_nowait_cnt[10]; 265 uint_t page_find_cnt; 266 uint_t page_exists_cnt; 267 uint_t page_exists_forreal_cnt; 268 uint_t page_lookup_dev_cnt; 269 uint_t get_cachelist_cnt; 270 uint_t page_create_cnt[10]; 271 uint_t alloc_pages[8]; 272 uint_t page_exphcontg[19]; 273 uint_t page_create_large_cnt[10]; 274 275 /* 276 * Collects statistics. 277 */ 278 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 279 uint_t mylen = 0; \ 280 \ 281 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash, mylen++) { \ 282 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 283 break; \ 284 } \ 285 if ((pp) != NULL) \ 286 pagecnt.pc_find_hit++; \ 287 else \ 288 pagecnt.pc_find_miss++; \ 289 if (mylen > PC_HASH_CNT) \ 290 mylen = PC_HASH_CNT; \ 291 pagecnt.pc_find_hashlen[mylen]++; \ 292 } 293 294 #else /* VM_STATS */ 295 296 /* 297 * Don't collect statistics 298 */ 299 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 300 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \ 301 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 302 break; \ 303 } \ 304 } 305 306 #endif /* VM_STATS */ 307 308 309 310 #ifdef DEBUG 311 #define MEMSEG_SEARCH_STATS 312 #endif 313 314 #ifdef MEMSEG_SEARCH_STATS 315 struct memseg_stats { 316 uint_t nsearch; 317 uint_t nlastwon; 318 uint_t nhashwon; 319 uint_t nnotfound; 320 } memseg_stats; 321 322 #define MEMSEG_STAT_INCR(v) \ 323 atomic_add_32(&memseg_stats.v, 1) 324 #else 325 #define MEMSEG_STAT_INCR(x) 326 #endif 327 328 struct memseg *memsegs; /* list of memory segments */ 329 330 331 static void page_init_mem_config(void); 332 static int page_do_hashin(page_t *, vnode_t *, u_offset_t); 333 static void page_do_hashout(page_t *); 334 static void page_capture_init(); 335 int page_capture_take_action(page_t *, uint_t, void *); 336 337 static void page_demote_vp_pages(page_t *); 338 339 /* 340 * vm subsystem related initialization 341 */ 342 void 343 vm_init(void) 344 { 345 boolean_t callb_vm_cpr(void *, int); 346 347 (void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm"); 348 page_init_mem_config(); 349 page_retire_init(); 350 vm_usage_init(); 351 page_capture_init(); 352 } 353 354 /* 355 * This function is called at startup and when memory is added or deleted. 356 */ 357 void 358 init_pages_pp_maximum() 359 { 360 static pgcnt_t p_min; 361 static pgcnt_t pages_pp_maximum_startup; 362 static pgcnt_t avrmem_delta; 363 static int init_done; 364 static int user_set; /* true if set in /etc/system */ 365 366 if (init_done == 0) { 367 368 /* If the user specified a value, save it */ 369 if (pages_pp_maximum != 0) { 370 user_set = 1; 371 pages_pp_maximum_startup = pages_pp_maximum; 372 } 373 374 /* 375 * Setting of pages_pp_maximum is based first time 376 * on the value of availrmem just after the start-up 377 * allocations. To preserve this relationship at run 378 * time, use a delta from availrmem_initial. 379 */ 380 ASSERT(availrmem_initial >= availrmem); 381 avrmem_delta = availrmem_initial - availrmem; 382 383 /* The allowable floor of pages_pp_maximum */ 384 p_min = tune.t_minarmem + 100; 385 386 /* Make sure we don't come through here again. */ 387 init_done = 1; 388 } 389 /* 390 * Determine pages_pp_maximum, the number of currently available 391 * pages (availrmem) that can't be `locked'. If not set by 392 * the user, we set it to 4% of the currently available memory 393 * plus 4MB. 394 * But we also insist that it be greater than tune.t_minarmem; 395 * otherwise a process could lock down a lot of memory, get swapped 396 * out, and never have enough to get swapped back in. 397 */ 398 if (user_set) 399 pages_pp_maximum = pages_pp_maximum_startup; 400 else 401 pages_pp_maximum = ((availrmem_initial - avrmem_delta) / 25) 402 + btop(4 * 1024 * 1024); 403 404 if (pages_pp_maximum <= p_min) { 405 pages_pp_maximum = p_min; 406 } 407 } 408 409 void 410 set_max_page_get(pgcnt_t target_total_pages) 411 { 412 max_page_get = target_total_pages / 2; 413 } 414 415 static pgcnt_t pending_delete; 416 417 /*ARGSUSED*/ 418 static void 419 page_mem_config_post_add( 420 void *arg, 421 pgcnt_t delta_pages) 422 { 423 set_max_page_get(total_pages - pending_delete); 424 init_pages_pp_maximum(); 425 } 426 427 /*ARGSUSED*/ 428 static int 429 page_mem_config_pre_del( 430 void *arg, 431 pgcnt_t delta_pages) 432 { 433 pgcnt_t nv; 434 435 nv = atomic_add_long_nv(&pending_delete, (spgcnt_t)delta_pages); 436 set_max_page_get(total_pages - nv); 437 return (0); 438 } 439 440 /*ARGSUSED*/ 441 static void 442 page_mem_config_post_del( 443 void *arg, 444 pgcnt_t delta_pages, 445 int cancelled) 446 { 447 pgcnt_t nv; 448 449 nv = atomic_add_long_nv(&pending_delete, -(spgcnt_t)delta_pages); 450 set_max_page_get(total_pages - nv); 451 if (!cancelled) 452 init_pages_pp_maximum(); 453 } 454 455 static kphysm_setup_vector_t page_mem_config_vec = { 456 KPHYSM_SETUP_VECTOR_VERSION, 457 page_mem_config_post_add, 458 page_mem_config_pre_del, 459 page_mem_config_post_del, 460 }; 461 462 static void 463 page_init_mem_config(void) 464 { 465 int ret; 466 467 ret = kphysm_setup_func_register(&page_mem_config_vec, (void *)NULL); 468 ASSERT(ret == 0); 469 } 470 471 /* 472 * Evenly spread out the PCF counters for large free pages 473 */ 474 static void 475 page_free_large_ctr(pgcnt_t npages) 476 { 477 static struct pcf *p = pcf; 478 pgcnt_t lump; 479 480 freemem += npages; 481 482 lump = roundup(npages, PCF_FANOUT) / PCF_FANOUT; 483 484 while (npages > 0) { 485 486 ASSERT(!p->pcf_block); 487 488 if (lump < npages) { 489 p->pcf_count += (uint_t)lump; 490 npages -= lump; 491 } else { 492 p->pcf_count += (uint_t)npages; 493 npages = 0; 494 } 495 496 ASSERT(!p->pcf_wait); 497 498 if (++p > &pcf[PCF_FANOUT - 1]) 499 p = pcf; 500 } 501 502 ASSERT(npages == 0); 503 } 504 505 /* 506 * Add a physical chunk of memory to the system freee lists during startup. 507 * Platform specific startup() allocates the memory for the page structs. 508 * 509 * num - number of page structures 510 * base - page number (pfn) to be associated with the first page. 511 * 512 * Since we are doing this during startup (ie. single threaded), we will 513 * use shortcut routines to avoid any locking overhead while putting all 514 * these pages on the freelists. 515 * 516 * NOTE: Any changes performed to page_free(), must also be performed to 517 * add_physmem() since this is how we initialize all page_t's at 518 * boot time. 519 */ 520 void 521 add_physmem( 522 page_t *pp, 523 pgcnt_t num, 524 pfn_t pnum) 525 { 526 page_t *root = NULL; 527 uint_t szc = page_num_pagesizes() - 1; 528 pgcnt_t large = page_get_pagecnt(szc); 529 pgcnt_t cnt = 0; 530 531 TRACE_2(TR_FAC_VM, TR_PAGE_INIT, 532 "add_physmem:pp %p num %lu", pp, num); 533 534 /* 535 * Arbitrarily limit the max page_get request 536 * to 1/2 of the page structs we have. 537 */ 538 total_pages += num; 539 set_max_page_get(total_pages); 540 541 PLCNT_MODIFY_MAX(pnum, (long)num); 542 543 /* 544 * The physical space for the pages array 545 * representing ram pages has already been 546 * allocated. Here we initialize each lock 547 * in the page structure, and put each on 548 * the free list 549 */ 550 for (; num; pp++, pnum++, num--) { 551 552 /* 553 * this needs to fill in the page number 554 * and do any other arch specific initialization 555 */ 556 add_physmem_cb(pp, pnum); 557 558 pp->p_lckcnt = 0; 559 pp->p_cowcnt = 0; 560 pp->p_slckcnt = 0; 561 562 /* 563 * Initialize the page lock as unlocked, since nobody 564 * can see or access this page yet. 565 */ 566 pp->p_selock = 0; 567 568 /* 569 * Initialize IO lock 570 */ 571 page_iolock_init(pp); 572 573 /* 574 * initialize other fields in the page_t 575 */ 576 PP_SETFREE(pp); 577 page_clr_all_props(pp); 578 PP_SETAGED(pp); 579 pp->p_offset = (u_offset_t)-1; 580 pp->p_next = pp; 581 pp->p_prev = pp; 582 583 /* 584 * Simple case: System doesn't support large pages. 585 */ 586 if (szc == 0) { 587 pp->p_szc = 0; 588 page_free_at_startup(pp); 589 continue; 590 } 591 592 /* 593 * Handle unaligned pages, we collect them up onto 594 * the root page until we have a full large page. 595 */ 596 if (!IS_P2ALIGNED(pnum, large)) { 597 598 /* 599 * If not in a large page, 600 * just free as small page. 601 */ 602 if (root == NULL) { 603 pp->p_szc = 0; 604 page_free_at_startup(pp); 605 continue; 606 } 607 608 /* 609 * Link a constituent page into the large page. 610 */ 611 pp->p_szc = szc; 612 page_list_concat(&root, &pp); 613 614 /* 615 * When large page is fully formed, free it. 616 */ 617 if (++cnt == large) { 618 page_free_large_ctr(cnt); 619 page_list_add_pages(root, PG_LIST_ISINIT); 620 root = NULL; 621 cnt = 0; 622 } 623 continue; 624 } 625 626 /* 627 * At this point we have a page number which 628 * is aligned. We assert that we aren't already 629 * in a different large page. 630 */ 631 ASSERT(IS_P2ALIGNED(pnum, large)); 632 ASSERT(root == NULL && cnt == 0); 633 634 /* 635 * If insufficient number of pages left to form 636 * a large page, just free the small page. 637 */ 638 if (num < large) { 639 pp->p_szc = 0; 640 page_free_at_startup(pp); 641 continue; 642 } 643 644 /* 645 * Otherwise start a new large page. 646 */ 647 pp->p_szc = szc; 648 cnt++; 649 root = pp; 650 } 651 ASSERT(root == NULL && cnt == 0); 652 } 653 654 /* 655 * Find a page representing the specified [vp, offset]. 656 * If we find the page but it is intransit coming in, 657 * it will have an "exclusive" lock and we wait for 658 * the i/o to complete. A page found on the free list 659 * is always reclaimed and then locked. On success, the page 660 * is locked, its data is valid and it isn't on the free 661 * list, while a NULL is returned if the page doesn't exist. 662 */ 663 page_t * 664 page_lookup(vnode_t *vp, u_offset_t off, se_t se) 665 { 666 return (page_lookup_create(vp, off, se, NULL, NULL, 0)); 667 } 668 669 /* 670 * Find a page representing the specified [vp, offset]. 671 * We either return the one we found or, if passed in, 672 * create one with identity of [vp, offset] of the 673 * pre-allocated page. If we find exsisting page but it is 674 * intransit coming in, it will have an "exclusive" lock 675 * and we wait for the i/o to complete. A page found on 676 * the free list is always reclaimed and then locked. 677 * On success, the page is locked, its data is valid and 678 * it isn't on the free list, while a NULL is returned 679 * if the page doesn't exist and newpp is NULL; 680 */ 681 page_t * 682 page_lookup_create( 683 vnode_t *vp, 684 u_offset_t off, 685 se_t se, 686 page_t *newpp, 687 spgcnt_t *nrelocp, 688 int flags) 689 { 690 page_t *pp; 691 kmutex_t *phm; 692 ulong_t index; 693 uint_t hash_locked; 694 uint_t es; 695 696 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 697 VM_STAT_ADD(page_lookup_cnt[0]); 698 ASSERT(newpp ? PAGE_EXCL(newpp) : 1); 699 700 /* 701 * Acquire the appropriate page hash lock since 702 * we have to search the hash list. Pages that 703 * hash to this list can't change identity while 704 * this lock is held. 705 */ 706 hash_locked = 0; 707 index = PAGE_HASH_FUNC(vp, off); 708 phm = NULL; 709 top: 710 PAGE_HASH_SEARCH(index, pp, vp, off); 711 if (pp != NULL) { 712 VM_STAT_ADD(page_lookup_cnt[1]); 713 es = (newpp != NULL) ? 1 : 0; 714 es |= flags; 715 if (!hash_locked) { 716 VM_STAT_ADD(page_lookup_cnt[2]); 717 if (!page_try_reclaim_lock(pp, se, es)) { 718 /* 719 * On a miss, acquire the phm. Then 720 * next time, page_lock() will be called, 721 * causing a wait if the page is busy. 722 * just looping with page_trylock() would 723 * get pretty boring. 724 */ 725 VM_STAT_ADD(page_lookup_cnt[3]); 726 phm = PAGE_HASH_MUTEX(index); 727 mutex_enter(phm); 728 hash_locked = 1; 729 goto top; 730 } 731 } else { 732 VM_STAT_ADD(page_lookup_cnt[4]); 733 if (!page_lock_es(pp, se, phm, P_RECLAIM, es)) { 734 VM_STAT_ADD(page_lookup_cnt[5]); 735 goto top; 736 } 737 } 738 739 /* 740 * Since `pp' is locked it can not change identity now. 741 * Reconfirm we locked the correct page. 742 * 743 * Both the p_vnode and p_offset *must* be cast volatile 744 * to force a reload of their values: The PAGE_HASH_SEARCH 745 * macro will have stuffed p_vnode and p_offset into 746 * registers before calling page_trylock(); another thread, 747 * actually holding the hash lock, could have changed the 748 * page's identity in memory, but our registers would not 749 * be changed, fooling the reconfirmation. If the hash 750 * lock was held during the search, the casting would 751 * not be needed. 752 */ 753 VM_STAT_ADD(page_lookup_cnt[6]); 754 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 755 ((volatile u_offset_t)(pp->p_offset) != off)) { 756 VM_STAT_ADD(page_lookup_cnt[7]); 757 if (hash_locked) { 758 panic("page_lookup_create: lost page %p", 759 (void *)pp); 760 /*NOTREACHED*/ 761 } 762 page_unlock(pp); 763 phm = PAGE_HASH_MUTEX(index); 764 mutex_enter(phm); 765 hash_locked = 1; 766 goto top; 767 } 768 769 /* 770 * If page_trylock() was called, then pp may still be on 771 * the cachelist (can't be on the free list, it would not 772 * have been found in the search). If it is on the 773 * cachelist it must be pulled now. To pull the page from 774 * the cachelist, it must be exclusively locked. 775 * 776 * The other big difference between page_trylock() and 777 * page_lock(), is that page_lock() will pull the 778 * page from whatever free list (the cache list in this 779 * case) the page is on. If page_trylock() was used 780 * above, then we have to do the reclaim ourselves. 781 */ 782 if ((!hash_locked) && (PP_ISFREE(pp))) { 783 ASSERT(PP_ISAGED(pp) == 0); 784 VM_STAT_ADD(page_lookup_cnt[8]); 785 786 /* 787 * page_relcaim will insure that we 788 * have this page exclusively 789 */ 790 791 if (!page_reclaim(pp, NULL)) { 792 /* 793 * Page_reclaim dropped whatever lock 794 * we held. 795 */ 796 VM_STAT_ADD(page_lookup_cnt[9]); 797 phm = PAGE_HASH_MUTEX(index); 798 mutex_enter(phm); 799 hash_locked = 1; 800 goto top; 801 } else if (se == SE_SHARED && newpp == NULL) { 802 VM_STAT_ADD(page_lookup_cnt[10]); 803 page_downgrade(pp); 804 } 805 } 806 807 if (hash_locked) { 808 mutex_exit(phm); 809 } 810 811 if (newpp != NULL && pp->p_szc < newpp->p_szc && 812 PAGE_EXCL(pp) && nrelocp != NULL) { 813 ASSERT(nrelocp != NULL); 814 (void) page_relocate(&pp, &newpp, 1, 1, nrelocp, 815 NULL); 816 if (*nrelocp > 0) { 817 VM_STAT_COND_ADD(*nrelocp == 1, 818 page_lookup_cnt[11]); 819 VM_STAT_COND_ADD(*nrelocp > 1, 820 page_lookup_cnt[12]); 821 pp = newpp; 822 se = SE_EXCL; 823 } else { 824 if (se == SE_SHARED) { 825 page_downgrade(pp); 826 } 827 VM_STAT_ADD(page_lookup_cnt[13]); 828 } 829 } else if (newpp != NULL && nrelocp != NULL) { 830 if (PAGE_EXCL(pp) && se == SE_SHARED) { 831 page_downgrade(pp); 832 } 833 VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc, 834 page_lookup_cnt[14]); 835 VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc, 836 page_lookup_cnt[15]); 837 VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc, 838 page_lookup_cnt[16]); 839 } else if (newpp != NULL && PAGE_EXCL(pp)) { 840 se = SE_EXCL; 841 } 842 } else if (!hash_locked) { 843 VM_STAT_ADD(page_lookup_cnt[17]); 844 phm = PAGE_HASH_MUTEX(index); 845 mutex_enter(phm); 846 hash_locked = 1; 847 goto top; 848 } else if (newpp != NULL) { 849 /* 850 * If we have a preallocated page then 851 * insert it now and basically behave like 852 * page_create. 853 */ 854 VM_STAT_ADD(page_lookup_cnt[18]); 855 /* 856 * Since we hold the page hash mutex and 857 * just searched for this page, page_hashin 858 * had better not fail. If it does, that 859 * means some thread did not follow the 860 * page hash mutex rules. Panic now and 861 * get it over with. As usual, go down 862 * holding all the locks. 863 */ 864 ASSERT(MUTEX_HELD(phm)); 865 if (!page_hashin(newpp, vp, off, phm)) { 866 ASSERT(MUTEX_HELD(phm)); 867 panic("page_lookup_create: hashin failed %p %p %llx %p", 868 (void *)newpp, (void *)vp, off, (void *)phm); 869 /*NOTREACHED*/ 870 } 871 ASSERT(MUTEX_HELD(phm)); 872 mutex_exit(phm); 873 phm = NULL; 874 page_set_props(newpp, P_REF); 875 page_io_lock(newpp); 876 pp = newpp; 877 se = SE_EXCL; 878 } else { 879 VM_STAT_ADD(page_lookup_cnt[19]); 880 mutex_exit(phm); 881 } 882 883 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 884 885 ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1); 886 887 return (pp); 888 } 889 890 /* 891 * Search the hash list for the page representing the 892 * specified [vp, offset] and return it locked. Skip 893 * free pages and pages that cannot be locked as requested. 894 * Used while attempting to kluster pages. 895 */ 896 page_t * 897 page_lookup_nowait(vnode_t *vp, u_offset_t off, se_t se) 898 { 899 page_t *pp; 900 kmutex_t *phm; 901 ulong_t index; 902 uint_t locked; 903 904 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 905 VM_STAT_ADD(page_lookup_nowait_cnt[0]); 906 907 index = PAGE_HASH_FUNC(vp, off); 908 PAGE_HASH_SEARCH(index, pp, vp, off); 909 locked = 0; 910 if (pp == NULL) { 911 top: 912 VM_STAT_ADD(page_lookup_nowait_cnt[1]); 913 locked = 1; 914 phm = PAGE_HASH_MUTEX(index); 915 mutex_enter(phm); 916 PAGE_HASH_SEARCH(index, pp, vp, off); 917 } 918 919 if (pp == NULL || PP_ISFREE(pp)) { 920 VM_STAT_ADD(page_lookup_nowait_cnt[2]); 921 pp = NULL; 922 } else { 923 if (!page_trylock(pp, se)) { 924 VM_STAT_ADD(page_lookup_nowait_cnt[3]); 925 pp = NULL; 926 } else { 927 VM_STAT_ADD(page_lookup_nowait_cnt[4]); 928 /* 929 * See the comment in page_lookup() 930 */ 931 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 932 ((u_offset_t)(pp->p_offset) != off)) { 933 VM_STAT_ADD(page_lookup_nowait_cnt[5]); 934 if (locked) { 935 panic("page_lookup_nowait %p", 936 (void *)pp); 937 /*NOTREACHED*/ 938 } 939 page_unlock(pp); 940 goto top; 941 } 942 if (PP_ISFREE(pp)) { 943 VM_STAT_ADD(page_lookup_nowait_cnt[6]); 944 page_unlock(pp); 945 pp = NULL; 946 } 947 } 948 } 949 if (locked) { 950 VM_STAT_ADD(page_lookup_nowait_cnt[7]); 951 mutex_exit(phm); 952 } 953 954 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 955 956 return (pp); 957 } 958 959 /* 960 * Search the hash list for a page with the specified [vp, off] 961 * that is known to exist and is already locked. This routine 962 * is typically used by segment SOFTUNLOCK routines. 963 */ 964 page_t * 965 page_find(vnode_t *vp, u_offset_t off) 966 { 967 page_t *pp; 968 kmutex_t *phm; 969 ulong_t index; 970 971 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 972 VM_STAT_ADD(page_find_cnt); 973 974 index = PAGE_HASH_FUNC(vp, off); 975 phm = PAGE_HASH_MUTEX(index); 976 977 mutex_enter(phm); 978 PAGE_HASH_SEARCH(index, pp, vp, off); 979 mutex_exit(phm); 980 981 ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr); 982 return (pp); 983 } 984 985 /* 986 * Determine whether a page with the specified [vp, off] 987 * currently exists in the system. Obviously this should 988 * only be considered as a hint since nothing prevents the 989 * page from disappearing or appearing immediately after 990 * the return from this routine. Subsequently, we don't 991 * even bother to lock the list. 992 */ 993 page_t * 994 page_exists(vnode_t *vp, u_offset_t off) 995 { 996 page_t *pp; 997 ulong_t index; 998 999 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1000 VM_STAT_ADD(page_exists_cnt); 1001 1002 index = PAGE_HASH_FUNC(vp, off); 1003 PAGE_HASH_SEARCH(index, pp, vp, off); 1004 1005 return (pp); 1006 } 1007 1008 /* 1009 * Determine if physically contiguous pages exist for [vp, off] - [vp, off + 1010 * page_size(szc)) range. if they exist and ppa is not NULL fill ppa array 1011 * with these pages locked SHARED. If necessary reclaim pages from 1012 * freelist. Return 1 if contiguous pages exist and 0 otherwise. 1013 * 1014 * If we fail to lock pages still return 1 if pages exist and contiguous. 1015 * But in this case return value is just a hint. ppa array won't be filled. 1016 * Caller should initialize ppa[0] as NULL to distinguish return value. 1017 * 1018 * Returns 0 if pages don't exist or not physically contiguous. 1019 * 1020 * This routine doesn't work for anonymous(swapfs) pages. 1021 */ 1022 int 1023 page_exists_physcontig(vnode_t *vp, u_offset_t off, uint_t szc, page_t *ppa[]) 1024 { 1025 pgcnt_t pages; 1026 pfn_t pfn; 1027 page_t *rootpp; 1028 pgcnt_t i; 1029 pgcnt_t j; 1030 u_offset_t save_off = off; 1031 ulong_t index; 1032 kmutex_t *phm; 1033 page_t *pp; 1034 uint_t pszc; 1035 int loopcnt = 0; 1036 1037 ASSERT(szc != 0); 1038 ASSERT(vp != NULL); 1039 ASSERT(!IS_SWAPFSVP(vp)); 1040 ASSERT(!VN_ISKAS(vp)); 1041 1042 again: 1043 if (++loopcnt > 3) { 1044 VM_STAT_ADD(page_exphcontg[0]); 1045 return (0); 1046 } 1047 1048 index = PAGE_HASH_FUNC(vp, off); 1049 phm = PAGE_HASH_MUTEX(index); 1050 1051 mutex_enter(phm); 1052 PAGE_HASH_SEARCH(index, pp, vp, off); 1053 mutex_exit(phm); 1054 1055 VM_STAT_ADD(page_exphcontg[1]); 1056 1057 if (pp == NULL) { 1058 VM_STAT_ADD(page_exphcontg[2]); 1059 return (0); 1060 } 1061 1062 pages = page_get_pagecnt(szc); 1063 rootpp = pp; 1064 pfn = rootpp->p_pagenum; 1065 1066 if ((pszc = pp->p_szc) >= szc && ppa != NULL) { 1067 VM_STAT_ADD(page_exphcontg[3]); 1068 if (!page_trylock(pp, SE_SHARED)) { 1069 VM_STAT_ADD(page_exphcontg[4]); 1070 return (1); 1071 } 1072 if (pp->p_szc != pszc || pp->p_vnode != vp || 1073 pp->p_offset != off) { 1074 VM_STAT_ADD(page_exphcontg[5]); 1075 page_unlock(pp); 1076 off = save_off; 1077 goto again; 1078 } 1079 /* 1080 * szc was non zero and vnode and offset matched after we 1081 * locked the page it means it can't become free on us. 1082 */ 1083 ASSERT(!PP_ISFREE(pp)); 1084 if (!IS_P2ALIGNED(pfn, pages)) { 1085 page_unlock(pp); 1086 return (0); 1087 } 1088 ppa[0] = pp; 1089 pp++; 1090 off += PAGESIZE; 1091 pfn++; 1092 for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1093 if (!page_trylock(pp, SE_SHARED)) { 1094 VM_STAT_ADD(page_exphcontg[6]); 1095 pp--; 1096 while (i-- > 0) { 1097 page_unlock(pp); 1098 pp--; 1099 } 1100 ppa[0] = NULL; 1101 return (1); 1102 } 1103 if (pp->p_szc != pszc) { 1104 VM_STAT_ADD(page_exphcontg[7]); 1105 page_unlock(pp); 1106 pp--; 1107 while (i-- > 0) { 1108 page_unlock(pp); 1109 pp--; 1110 } 1111 ppa[0] = NULL; 1112 off = save_off; 1113 goto again; 1114 } 1115 /* 1116 * szc the same as for previous already locked pages 1117 * with right identity. Since this page had correct 1118 * szc after we locked it can't get freed or destroyed 1119 * and therefore must have the expected identity. 1120 */ 1121 ASSERT(!PP_ISFREE(pp)); 1122 if (pp->p_vnode != vp || 1123 pp->p_offset != off) { 1124 panic("page_exists_physcontig: " 1125 "large page identity doesn't match"); 1126 } 1127 ppa[i] = pp; 1128 ASSERT(pp->p_pagenum == pfn); 1129 } 1130 VM_STAT_ADD(page_exphcontg[8]); 1131 ppa[pages] = NULL; 1132 return (1); 1133 } else if (pszc >= szc) { 1134 VM_STAT_ADD(page_exphcontg[9]); 1135 if (!IS_P2ALIGNED(pfn, pages)) { 1136 return (0); 1137 } 1138 return (1); 1139 } 1140 1141 if (!IS_P2ALIGNED(pfn, pages)) { 1142 VM_STAT_ADD(page_exphcontg[10]); 1143 return (0); 1144 } 1145 1146 if (page_numtomemseg_nolock(pfn) != 1147 page_numtomemseg_nolock(pfn + pages - 1)) { 1148 VM_STAT_ADD(page_exphcontg[11]); 1149 return (0); 1150 } 1151 1152 /* 1153 * We loop up 4 times across pages to promote page size. 1154 * We're extra cautious to promote page size atomically with respect 1155 * to everybody else. But we can probably optimize into 1 loop if 1156 * this becomes an issue. 1157 */ 1158 1159 for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1160 ASSERT(pp->p_pagenum == pfn); 1161 if (!page_trylock(pp, SE_EXCL)) { 1162 VM_STAT_ADD(page_exphcontg[12]); 1163 break; 1164 } 1165 if (pp->p_vnode != vp || 1166 pp->p_offset != off) { 1167 VM_STAT_ADD(page_exphcontg[13]); 1168 page_unlock(pp); 1169 break; 1170 } 1171 if (pp->p_szc >= szc) { 1172 ASSERT(i == 0); 1173 page_unlock(pp); 1174 off = save_off; 1175 goto again; 1176 } 1177 } 1178 1179 if (i != pages) { 1180 VM_STAT_ADD(page_exphcontg[14]); 1181 --pp; 1182 while (i-- > 0) { 1183 page_unlock(pp); 1184 --pp; 1185 } 1186 return (0); 1187 } 1188 1189 pp = rootpp; 1190 for (i = 0; i < pages; i++, pp++) { 1191 if (PP_ISFREE(pp)) { 1192 VM_STAT_ADD(page_exphcontg[15]); 1193 ASSERT(!PP_ISAGED(pp)); 1194 ASSERT(pp->p_szc == 0); 1195 if (!page_reclaim(pp, NULL)) { 1196 break; 1197 } 1198 } else { 1199 ASSERT(pp->p_szc < szc); 1200 VM_STAT_ADD(page_exphcontg[16]); 1201 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 1202 } 1203 } 1204 if (i < pages) { 1205 VM_STAT_ADD(page_exphcontg[17]); 1206 /* 1207 * page_reclaim failed because we were out of memory. 1208 * drop the rest of the locks and return because this page 1209 * must be already reallocated anyway. 1210 */ 1211 pp = rootpp; 1212 for (j = 0; j < pages; j++, pp++) { 1213 if (j != i) { 1214 page_unlock(pp); 1215 } 1216 } 1217 return (0); 1218 } 1219 1220 off = save_off; 1221 pp = rootpp; 1222 for (i = 0; i < pages; i++, pp++, off += PAGESIZE) { 1223 ASSERT(PAGE_EXCL(pp)); 1224 ASSERT(!PP_ISFREE(pp)); 1225 ASSERT(!hat_page_is_mapped(pp)); 1226 ASSERT(pp->p_vnode == vp); 1227 ASSERT(pp->p_offset == off); 1228 pp->p_szc = szc; 1229 } 1230 pp = rootpp; 1231 for (i = 0; i < pages; i++, pp++) { 1232 if (ppa == NULL) { 1233 page_unlock(pp); 1234 } else { 1235 ppa[i] = pp; 1236 page_downgrade(ppa[i]); 1237 } 1238 } 1239 if (ppa != NULL) { 1240 ppa[pages] = NULL; 1241 } 1242 VM_STAT_ADD(page_exphcontg[18]); 1243 ASSERT(vp->v_pages != NULL); 1244 return (1); 1245 } 1246 1247 /* 1248 * Determine whether a page with the specified [vp, off] 1249 * currently exists in the system and if so return its 1250 * size code. Obviously this should only be considered as 1251 * a hint since nothing prevents the page from disappearing 1252 * or appearing immediately after the return from this routine. 1253 */ 1254 int 1255 page_exists_forreal(vnode_t *vp, u_offset_t off, uint_t *szc) 1256 { 1257 page_t *pp; 1258 kmutex_t *phm; 1259 ulong_t index; 1260 int rc = 0; 1261 1262 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1263 ASSERT(szc != NULL); 1264 VM_STAT_ADD(page_exists_forreal_cnt); 1265 1266 index = PAGE_HASH_FUNC(vp, off); 1267 phm = PAGE_HASH_MUTEX(index); 1268 1269 mutex_enter(phm); 1270 PAGE_HASH_SEARCH(index, pp, vp, off); 1271 if (pp != NULL) { 1272 *szc = pp->p_szc; 1273 rc = 1; 1274 } 1275 mutex_exit(phm); 1276 return (rc); 1277 } 1278 1279 /* wakeup threads waiting for pages in page_create_get_something() */ 1280 void 1281 wakeup_pcgs(void) 1282 { 1283 if (!CV_HAS_WAITERS(&pcgs_cv)) 1284 return; 1285 cv_broadcast(&pcgs_cv); 1286 } 1287 1288 /* 1289 * 'freemem' is used all over the kernel as an indication of how many 1290 * pages are free (either on the cache list or on the free page list) 1291 * in the system. In very few places is a really accurate 'freemem' 1292 * needed. To avoid contention of the lock protecting a the 1293 * single freemem, it was spread out into NCPU buckets. Set_freemem 1294 * sets freemem to the total of all NCPU buckets. It is called from 1295 * clock() on each TICK. 1296 */ 1297 void 1298 set_freemem() 1299 { 1300 struct pcf *p; 1301 ulong_t t; 1302 uint_t i; 1303 1304 t = 0; 1305 p = pcf; 1306 for (i = 0; i < PCF_FANOUT; i++) { 1307 t += p->pcf_count; 1308 p++; 1309 } 1310 freemem = t; 1311 1312 /* 1313 * Don't worry about grabbing mutex. It's not that 1314 * critical if we miss a tick or two. This is 1315 * where we wakeup possible delayers in 1316 * page_create_get_something(). 1317 */ 1318 wakeup_pcgs(); 1319 } 1320 1321 ulong_t 1322 get_freemem() 1323 { 1324 struct pcf *p; 1325 ulong_t t; 1326 uint_t i; 1327 1328 t = 0; 1329 p = pcf; 1330 for (i = 0; i < PCF_FANOUT; i++) { 1331 t += p->pcf_count; 1332 p++; 1333 } 1334 /* 1335 * We just calculated it, might as well set it. 1336 */ 1337 freemem = t; 1338 return (t); 1339 } 1340 1341 /* 1342 * Acquire all of the page cache & free (pcf) locks. 1343 */ 1344 void 1345 pcf_acquire_all() 1346 { 1347 struct pcf *p; 1348 uint_t i; 1349 1350 p = pcf; 1351 for (i = 0; i < PCF_FANOUT; i++) { 1352 mutex_enter(&p->pcf_lock); 1353 p++; 1354 } 1355 } 1356 1357 /* 1358 * Release all the pcf_locks. 1359 */ 1360 void 1361 pcf_release_all() 1362 { 1363 struct pcf *p; 1364 uint_t i; 1365 1366 p = pcf; 1367 for (i = 0; i < PCF_FANOUT; i++) { 1368 mutex_exit(&p->pcf_lock); 1369 p++; 1370 } 1371 } 1372 1373 /* 1374 * Inform the VM system that we need some pages freed up. 1375 * Calls must be symmetric, e.g.: 1376 * 1377 * page_needfree(100); 1378 * wait a bit; 1379 * page_needfree(-100); 1380 */ 1381 void 1382 page_needfree(spgcnt_t npages) 1383 { 1384 mutex_enter(&new_freemem_lock); 1385 needfree += npages; 1386 mutex_exit(&new_freemem_lock); 1387 } 1388 1389 /* 1390 * Throttle for page_create(): try to prevent freemem from dropping 1391 * below throttlefree. We can't provide a 100% guarantee because 1392 * KM_NOSLEEP allocations, page_reclaim(), and various other things 1393 * nibble away at the freelist. However, we can block all PG_WAIT 1394 * allocations until memory becomes available. The motivation is 1395 * that several things can fall apart when there's no free memory: 1396 * 1397 * (1) If pageout() needs memory to push a page, the system deadlocks. 1398 * 1399 * (2) By (broken) specification, timeout(9F) can neither fail nor 1400 * block, so it has no choice but to panic the system if it 1401 * cannot allocate a callout structure. 1402 * 1403 * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block; 1404 * it panics if it cannot allocate a callback structure. 1405 * 1406 * (4) Untold numbers of third-party drivers have not yet been hardened 1407 * against KM_NOSLEEP and/or allocb() failures; they simply assume 1408 * success and panic the system with a data fault on failure. 1409 * (The long-term solution to this particular problem is to ship 1410 * hostile fault-injecting DEBUG kernels with the DDK.) 1411 * 1412 * It is theoretically impossible to guarantee success of non-blocking 1413 * allocations, but in practice, this throttle is very hard to break. 1414 */ 1415 static int 1416 page_create_throttle(pgcnt_t npages, int flags) 1417 { 1418 ulong_t fm; 1419 uint_t i; 1420 pgcnt_t tf; /* effective value of throttlefree */ 1421 1422 /* 1423 * Never deny pages when: 1424 * - it's a thread that cannot block [NOMEMWAIT()] 1425 * - the allocation cannot block and must not fail 1426 * - the allocation cannot block and is pageout dispensated 1427 */ 1428 if (NOMEMWAIT() || 1429 ((flags & (PG_WAIT | PG_PANIC)) == PG_PANIC) || 1430 ((flags & (PG_WAIT | PG_PUSHPAGE)) == PG_PUSHPAGE)) 1431 return (1); 1432 1433 /* 1434 * If the allocation can't block, we look favorably upon it 1435 * unless we're below pageout_reserve. In that case we fail 1436 * the allocation because we want to make sure there are a few 1437 * pages available for pageout. 1438 */ 1439 if ((flags & PG_WAIT) == 0) 1440 return (freemem >= npages + pageout_reserve); 1441 1442 /* Calculate the effective throttlefree value */ 1443 tf = throttlefree - 1444 ((flags & PG_PUSHPAGE) ? pageout_reserve : 0); 1445 1446 cv_signal(&proc_pageout->p_cv); 1447 1448 while (freemem < npages + tf) { 1449 pcf_acquire_all(); 1450 mutex_enter(&new_freemem_lock); 1451 fm = 0; 1452 for (i = 0; i < PCF_FANOUT; i++) { 1453 fm += pcf[i].pcf_count; 1454 pcf[i].pcf_wait++; 1455 mutex_exit(&pcf[i].pcf_lock); 1456 } 1457 freemem = fm; 1458 needfree += npages; 1459 freemem_wait++; 1460 cv_wait(&freemem_cv, &new_freemem_lock); 1461 freemem_wait--; 1462 needfree -= npages; 1463 mutex_exit(&new_freemem_lock); 1464 } 1465 return (1); 1466 } 1467 1468 /* 1469 * page_create_wait() is called to either coalecse pages from the 1470 * different pcf buckets or to wait because there simply are not 1471 * enough pages to satisfy the caller's request. 1472 * 1473 * Sadly, this is called from platform/vm/vm_machdep.c 1474 */ 1475 int 1476 page_create_wait(size_t npages, uint_t flags) 1477 { 1478 pgcnt_t total; 1479 uint_t i; 1480 struct pcf *p; 1481 1482 /* 1483 * Wait until there are enough free pages to satisfy our 1484 * entire request. 1485 * We set needfree += npages before prodding pageout, to make sure 1486 * it does real work when npages > lotsfree > freemem. 1487 */ 1488 VM_STAT_ADD(page_create_not_enough); 1489 1490 ASSERT(!kcage_on ? !(flags & PG_NORELOC) : 1); 1491 checkagain: 1492 if ((flags & PG_NORELOC) && 1493 kcage_freemem < kcage_throttlefree + npages) 1494 (void) kcage_create_throttle(npages, flags); 1495 1496 if (freemem < npages + throttlefree) 1497 if (!page_create_throttle(npages, flags)) 1498 return (0); 1499 1500 /* 1501 * Since page_create_va() looked at every 1502 * bucket, assume we are going to have to wait. 1503 * Get all of the pcf locks. 1504 */ 1505 total = 0; 1506 p = pcf; 1507 for (i = 0; i < PCF_FANOUT; i++) { 1508 mutex_enter(&p->pcf_lock); 1509 total += p->pcf_count; 1510 if (total >= npages) { 1511 /* 1512 * Wow! There are enough pages laying around 1513 * to satisfy the request. Do the accounting, 1514 * drop the locks we acquired, and go back. 1515 * 1516 * freemem is not protected by any lock. So, 1517 * we cannot have any assertion containing 1518 * freemem. 1519 */ 1520 freemem -= npages; 1521 1522 while (p >= pcf) { 1523 if (p->pcf_count <= npages) { 1524 npages -= p->pcf_count; 1525 p->pcf_count = 0; 1526 } else { 1527 p->pcf_count -= (uint_t)npages; 1528 npages = 0; 1529 } 1530 mutex_exit(&p->pcf_lock); 1531 p--; 1532 } 1533 ASSERT(npages == 0); 1534 return (1); 1535 } 1536 p++; 1537 } 1538 1539 /* 1540 * All of the pcf locks are held, there are not enough pages 1541 * to satisfy the request (npages < total). 1542 * Be sure to acquire the new_freemem_lock before dropping 1543 * the pcf locks. This prevents dropping wakeups in page_free(). 1544 * The order is always pcf_lock then new_freemem_lock. 1545 * 1546 * Since we hold all the pcf locks, it is a good time to set freemem. 1547 * 1548 * If the caller does not want to wait, return now. 1549 * Else turn the pageout daemon loose to find something 1550 * and wait till it does. 1551 * 1552 */ 1553 freemem = total; 1554 1555 if ((flags & PG_WAIT) == 0) { 1556 pcf_release_all(); 1557 1558 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_NOMEM, 1559 "page_create_nomem:npages %ld freemem %ld", npages, freemem); 1560 return (0); 1561 } 1562 1563 ASSERT(proc_pageout != NULL); 1564 cv_signal(&proc_pageout->p_cv); 1565 1566 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_START, 1567 "page_create_sleep_start: freemem %ld needfree %ld", 1568 freemem, needfree); 1569 1570 /* 1571 * We are going to wait. 1572 * We currently hold all of the pcf_locks, 1573 * get the new_freemem_lock (it protects freemem_wait), 1574 * before dropping the pcf_locks. 1575 */ 1576 mutex_enter(&new_freemem_lock); 1577 1578 p = pcf; 1579 for (i = 0; i < PCF_FANOUT; i++) { 1580 p->pcf_wait++; 1581 mutex_exit(&p->pcf_lock); 1582 p++; 1583 } 1584 1585 needfree += npages; 1586 freemem_wait++; 1587 1588 cv_wait(&freemem_cv, &new_freemem_lock); 1589 1590 freemem_wait--; 1591 needfree -= npages; 1592 1593 mutex_exit(&new_freemem_lock); 1594 1595 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_END, 1596 "page_create_sleep_end: freemem %ld needfree %ld", 1597 freemem, needfree); 1598 1599 VM_STAT_ADD(page_create_not_enough_again); 1600 goto checkagain; 1601 } 1602 1603 /* 1604 * A routine to do the opposite of page_create_wait(). 1605 */ 1606 void 1607 page_create_putback(spgcnt_t npages) 1608 { 1609 struct pcf *p; 1610 pgcnt_t lump; 1611 uint_t *which; 1612 1613 /* 1614 * When a contiguous lump is broken up, we have to 1615 * deal with lots of pages (min 64) so lets spread 1616 * the wealth around. 1617 */ 1618 lump = roundup(npages, PCF_FANOUT) / PCF_FANOUT; 1619 freemem += npages; 1620 1621 for (p = pcf; (npages > 0) && (p < &pcf[PCF_FANOUT]); p++) { 1622 which = &p->pcf_count; 1623 1624 mutex_enter(&p->pcf_lock); 1625 1626 if (p->pcf_block) { 1627 which = &p->pcf_reserve; 1628 } 1629 1630 if (lump < npages) { 1631 *which += (uint_t)lump; 1632 npages -= lump; 1633 } else { 1634 *which += (uint_t)npages; 1635 npages = 0; 1636 } 1637 1638 if (p->pcf_wait) { 1639 mutex_enter(&new_freemem_lock); 1640 /* 1641 * Check to see if some other thread 1642 * is actually waiting. Another bucket 1643 * may have woken it up by now. If there 1644 * are no waiters, then set our pcf_wait 1645 * count to zero to avoid coming in here 1646 * next time. 1647 */ 1648 if (freemem_wait) { 1649 if (npages > 1) { 1650 cv_broadcast(&freemem_cv); 1651 } else { 1652 cv_signal(&freemem_cv); 1653 } 1654 p->pcf_wait--; 1655 } else { 1656 p->pcf_wait = 0; 1657 } 1658 mutex_exit(&new_freemem_lock); 1659 } 1660 mutex_exit(&p->pcf_lock); 1661 } 1662 ASSERT(npages == 0); 1663 } 1664 1665 /* 1666 * A helper routine for page_create_get_something. 1667 * The indenting got to deep down there. 1668 * Unblock the pcf counters. Any pages freed after 1669 * pcf_block got set are moved to pcf_count and 1670 * wakeups (cv_broadcast() or cv_signal()) are done as needed. 1671 */ 1672 static void 1673 pcgs_unblock(void) 1674 { 1675 int i; 1676 struct pcf *p; 1677 1678 /* Update freemem while we're here. */ 1679 freemem = 0; 1680 p = pcf; 1681 for (i = 0; i < PCF_FANOUT; i++) { 1682 mutex_enter(&p->pcf_lock); 1683 ASSERT(p->pcf_count == 0); 1684 p->pcf_count = p->pcf_reserve; 1685 p->pcf_block = 0; 1686 freemem += p->pcf_count; 1687 if (p->pcf_wait) { 1688 mutex_enter(&new_freemem_lock); 1689 if (freemem_wait) { 1690 if (p->pcf_reserve > 1) { 1691 cv_broadcast(&freemem_cv); 1692 p->pcf_wait = 0; 1693 } else { 1694 cv_signal(&freemem_cv); 1695 p->pcf_wait--; 1696 } 1697 } else { 1698 p->pcf_wait = 0; 1699 } 1700 mutex_exit(&new_freemem_lock); 1701 } 1702 p->pcf_reserve = 0; 1703 mutex_exit(&p->pcf_lock); 1704 p++; 1705 } 1706 } 1707 1708 /* 1709 * Called from page_create_va() when both the cache and free lists 1710 * have been checked once. 1711 * 1712 * Either returns a page or panics since the accounting was done 1713 * way before we got here. 1714 * 1715 * We don't come here often, so leave the accounting on permanently. 1716 */ 1717 1718 #define MAX_PCGS 100 1719 1720 #ifdef DEBUG 1721 #define PCGS_TRIES 100 1722 #else /* DEBUG */ 1723 #define PCGS_TRIES 10 1724 #endif /* DEBUG */ 1725 1726 #ifdef VM_STATS 1727 uint_t pcgs_counts[PCGS_TRIES]; 1728 uint_t pcgs_too_many; 1729 uint_t pcgs_entered; 1730 uint_t pcgs_entered_noreloc; 1731 uint_t pcgs_locked; 1732 uint_t pcgs_cagelocked; 1733 #endif /* VM_STATS */ 1734 1735 static page_t * 1736 page_create_get_something(vnode_t *vp, u_offset_t off, struct seg *seg, 1737 caddr_t vaddr, uint_t flags) 1738 { 1739 uint_t count; 1740 page_t *pp; 1741 uint_t locked, i; 1742 struct pcf *p; 1743 lgrp_t *lgrp; 1744 int cagelocked = 0; 1745 1746 VM_STAT_ADD(pcgs_entered); 1747 1748 /* 1749 * Tap any reserve freelists: if we fail now, we'll die 1750 * since the page(s) we're looking for have already been 1751 * accounted for. 1752 */ 1753 flags |= PG_PANIC; 1754 1755 if ((flags & PG_NORELOC) != 0) { 1756 VM_STAT_ADD(pcgs_entered_noreloc); 1757 /* 1758 * Requests for free pages from critical threads 1759 * such as pageout still won't throttle here, but 1760 * we must try again, to give the cageout thread 1761 * another chance to catch up. Since we already 1762 * accounted for the pages, we had better get them 1763 * this time. 1764 * 1765 * N.B. All non-critical threads acquire the pcgs_cagelock 1766 * to serialize access to the freelists. This implements a 1767 * turnstile-type synchornization to avoid starvation of 1768 * critical requests for PG_NORELOC memory by non-critical 1769 * threads: all non-critical threads must acquire a 'ticket' 1770 * before passing through, which entails making sure 1771 * kcage_freemem won't fall below minfree prior to grabbing 1772 * pages from the freelists. 1773 */ 1774 if (kcage_create_throttle(1, flags) == KCT_NONCRIT) { 1775 mutex_enter(&pcgs_cagelock); 1776 cagelocked = 1; 1777 VM_STAT_ADD(pcgs_cagelocked); 1778 } 1779 } 1780 1781 /* 1782 * Time to get serious. 1783 * We failed to get a `correctly colored' page from both the 1784 * free and cache lists. 1785 * We escalate in stage. 1786 * 1787 * First try both lists without worring about color. 1788 * 1789 * Then, grab all page accounting locks (ie. pcf[]) and 1790 * steal any pages that they have and set the pcf_block flag to 1791 * stop deletions from the lists. This will help because 1792 * a page can get added to the free list while we are looking 1793 * at the cache list, then another page could be added to the cache 1794 * list allowing the page on the free list to be removed as we 1795 * move from looking at the cache list to the free list. This 1796 * could happen over and over. We would never find the page 1797 * we have accounted for. 1798 * 1799 * Noreloc pages are a subset of the global (relocatable) page pool. 1800 * They are not tracked separately in the pcf bins, so it is 1801 * impossible to know when doing pcf accounting if the available 1802 * page(s) are noreloc pages or not. When looking for a noreloc page 1803 * it is quite easy to end up here even if the global (relocatable) 1804 * page pool has plenty of free pages but the noreloc pool is empty. 1805 * 1806 * When the noreloc pool is empty (or low), additional noreloc pages 1807 * are created by converting pages from the global page pool. This 1808 * process will stall during pcf accounting if the pcf bins are 1809 * already locked. Such is the case when a noreloc allocation is 1810 * looping here in page_create_get_something waiting for more noreloc 1811 * pages to appear. 1812 * 1813 * Short of adding a new field to the pcf bins to accurately track 1814 * the number of free noreloc pages, we instead do not grab the 1815 * pcgs_lock, do not set the pcf blocks and do not timeout when 1816 * allocating a noreloc page. This allows noreloc allocations to 1817 * loop without blocking global page pool allocations. 1818 * 1819 * NOTE: the behaviour of page_create_get_something has not changed 1820 * for the case of global page pool allocations. 1821 */ 1822 1823 flags &= ~PG_MATCH_COLOR; 1824 locked = 0; 1825 #if defined(__i386) || defined(__amd64) 1826 /* 1827 * page_create_get_something may be called because 4g memory may be 1828 * depleted. Set flags to allow for relocation of base page below 1829 * 4g if necessary. 1830 */ 1831 if (physmax4g) 1832 flags |= (PGI_PGCPSZC0 | PGI_PGCPHIPRI); 1833 #endif 1834 1835 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 1836 1837 for (count = 0; kcage_on || count < MAX_PCGS; count++) { 1838 pp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 1839 flags, lgrp); 1840 if (pp == NULL) { 1841 pp = page_get_cachelist(vp, off, seg, vaddr, 1842 flags, lgrp); 1843 } 1844 if (pp == NULL) { 1845 /* 1846 * Serialize. Don't fight with other pcgs(). 1847 */ 1848 if (!locked && (!kcage_on || !(flags & PG_NORELOC))) { 1849 mutex_enter(&pcgs_lock); 1850 VM_STAT_ADD(pcgs_locked); 1851 locked = 1; 1852 p = pcf; 1853 for (i = 0; i < PCF_FANOUT; i++) { 1854 mutex_enter(&p->pcf_lock); 1855 ASSERT(p->pcf_block == 0); 1856 p->pcf_block = 1; 1857 p->pcf_reserve = p->pcf_count; 1858 p->pcf_count = 0; 1859 mutex_exit(&p->pcf_lock); 1860 p++; 1861 } 1862 freemem = 0; 1863 } 1864 1865 if (count) { 1866 /* 1867 * Since page_free() puts pages on 1868 * a list then accounts for it, we 1869 * just have to wait for page_free() 1870 * to unlock any page it was working 1871 * with. The page_lock()-page_reclaim() 1872 * path falls in the same boat. 1873 * 1874 * We don't need to check on the 1875 * PG_WAIT flag, we have already 1876 * accounted for the page we are 1877 * looking for in page_create_va(). 1878 * 1879 * We just wait a moment to let any 1880 * locked pages on the lists free up, 1881 * then continue around and try again. 1882 * 1883 * Will be awakened by set_freemem(). 1884 */ 1885 mutex_enter(&pcgs_wait_lock); 1886 cv_wait(&pcgs_cv, &pcgs_wait_lock); 1887 mutex_exit(&pcgs_wait_lock); 1888 } 1889 } else { 1890 #ifdef VM_STATS 1891 if (count >= PCGS_TRIES) { 1892 VM_STAT_ADD(pcgs_too_many); 1893 } else { 1894 VM_STAT_ADD(pcgs_counts[count]); 1895 } 1896 #endif 1897 if (locked) { 1898 pcgs_unblock(); 1899 mutex_exit(&pcgs_lock); 1900 } 1901 if (cagelocked) 1902 mutex_exit(&pcgs_cagelock); 1903 return (pp); 1904 } 1905 } 1906 /* 1907 * we go down holding the pcf locks. 1908 */ 1909 panic("no %spage found %d", 1910 ((flags & PG_NORELOC) ? "non-reloc " : ""), count); 1911 /*NOTREACHED*/ 1912 } 1913 1914 /* 1915 * Create enough pages for "bytes" worth of data starting at 1916 * "off" in "vp". 1917 * 1918 * Where flag must be one of: 1919 * 1920 * PG_EXCL: Exclusive create (fail if any page already 1921 * exists in the page cache) which does not 1922 * wait for memory to become available. 1923 * 1924 * PG_WAIT: Non-exclusive create which can wait for 1925 * memory to become available. 1926 * 1927 * PG_PHYSCONTIG: Allocate physically contiguous pages. 1928 * (Not Supported) 1929 * 1930 * A doubly linked list of pages is returned to the caller. Each page 1931 * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock) 1932 * lock. 1933 * 1934 * Unable to change the parameters to page_create() in a minor release, 1935 * we renamed page_create() to page_create_va(), changed all known calls 1936 * from page_create() to page_create_va(), and created this wrapper. 1937 * 1938 * Upon a major release, we should break compatibility by deleting this 1939 * wrapper, and replacing all the strings "page_create_va", with "page_create". 1940 * 1941 * NOTE: There is a copy of this interface as page_create_io() in 1942 * i86/vm/vm_machdep.c. Any bugs fixed here should be applied 1943 * there. 1944 */ 1945 page_t * 1946 page_create(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags) 1947 { 1948 caddr_t random_vaddr; 1949 struct seg kseg; 1950 1951 #ifdef DEBUG 1952 cmn_err(CE_WARN, "Using deprecated interface page_create: caller %p", 1953 (void *)caller()); 1954 #endif 1955 1956 random_vaddr = (caddr_t)(((uintptr_t)vp >> 7) ^ 1957 (uintptr_t)(off >> PAGESHIFT)); 1958 kseg.s_as = &kas; 1959 1960 return (page_create_va(vp, off, bytes, flags, &kseg, random_vaddr)); 1961 } 1962 1963 #ifdef DEBUG 1964 uint32_t pg_alloc_pgs_mtbf = 0; 1965 #endif 1966 1967 /* 1968 * Used for large page support. It will attempt to allocate 1969 * a large page(s) off the freelist. 1970 * 1971 * Returns non zero on failure. 1972 */ 1973 int 1974 page_alloc_pages(struct vnode *vp, struct seg *seg, caddr_t addr, 1975 page_t **basepp, page_t *ppa[], uint_t szc, int anypgsz) 1976 { 1977 pgcnt_t npgs, curnpgs, totpgs; 1978 size_t pgsz; 1979 page_t *pplist = NULL, *pp; 1980 int err = 0; 1981 lgrp_t *lgrp; 1982 1983 ASSERT(szc != 0 && szc <= (page_num_pagesizes() - 1)); 1984 1985 VM_STAT_ADD(alloc_pages[0]); 1986 1987 #ifdef DEBUG 1988 if (pg_alloc_pgs_mtbf && !(gethrtime() % pg_alloc_pgs_mtbf)) { 1989 return (ENOMEM); 1990 } 1991 #endif 1992 1993 pgsz = page_get_pagesize(szc); 1994 totpgs = curnpgs = npgs = pgsz >> PAGESHIFT; 1995 1996 ASSERT(((uintptr_t)addr & (pgsz - 1)) == 0); 1997 /* 1998 * One must be NULL but not both. 1999 * And one must be non NULL but not both. 2000 */ 2001 ASSERT(basepp != NULL || ppa != NULL); 2002 ASSERT(basepp == NULL || ppa == NULL); 2003 2004 (void) page_create_wait(npgs, PG_WAIT); 2005 2006 while (npgs && szc) { 2007 lgrp = lgrp_mem_choose(seg, addr, pgsz); 2008 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 0, lgrp); 2009 if (pp != NULL) { 2010 VM_STAT_ADD(alloc_pages[1]); 2011 page_list_concat(&pplist, &pp); 2012 ASSERT(npgs >= curnpgs); 2013 npgs -= curnpgs; 2014 } else if (anypgsz) { 2015 VM_STAT_ADD(alloc_pages[2]); 2016 szc--; 2017 pgsz = page_get_pagesize(szc); 2018 curnpgs = pgsz >> PAGESHIFT; 2019 } else { 2020 VM_STAT_ADD(alloc_pages[3]); 2021 ASSERT(npgs == totpgs); 2022 page_create_putback(npgs); 2023 return (ENOMEM); 2024 } 2025 } 2026 if (szc == 0) { 2027 VM_STAT_ADD(alloc_pages[4]); 2028 ASSERT(npgs != 0); 2029 page_create_putback(npgs); 2030 err = ENOMEM; 2031 } else if (basepp != NULL) { 2032 ASSERT(npgs == 0); 2033 ASSERT(ppa == NULL); 2034 *basepp = pplist; 2035 } 2036 2037 npgs = totpgs - npgs; 2038 pp = pplist; 2039 2040 /* 2041 * Clear the free and age bits. Also if we were passed in a ppa then 2042 * fill it in with all the constituent pages from the large page. But 2043 * if we failed to allocate all the pages just free what we got. 2044 */ 2045 while (npgs != 0) { 2046 ASSERT(PP_ISFREE(pp)); 2047 ASSERT(PP_ISAGED(pp)); 2048 if (ppa != NULL || err != 0) { 2049 if (err == 0) { 2050 VM_STAT_ADD(alloc_pages[5]); 2051 PP_CLRFREE(pp); 2052 PP_CLRAGED(pp); 2053 page_sub(&pplist, pp); 2054 *ppa++ = pp; 2055 npgs--; 2056 } else { 2057 VM_STAT_ADD(alloc_pages[6]); 2058 ASSERT(pp->p_szc != 0); 2059 curnpgs = page_get_pagecnt(pp->p_szc); 2060 page_list_break(&pp, &pplist, curnpgs); 2061 page_list_add_pages(pp, 0); 2062 page_create_putback(curnpgs); 2063 ASSERT(npgs >= curnpgs); 2064 npgs -= curnpgs; 2065 } 2066 pp = pplist; 2067 } else { 2068 VM_STAT_ADD(alloc_pages[7]); 2069 PP_CLRFREE(pp); 2070 PP_CLRAGED(pp); 2071 pp = pp->p_next; 2072 npgs--; 2073 } 2074 } 2075 return (err); 2076 } 2077 2078 /* 2079 * Get a single large page off of the freelists, and set it up for use. 2080 * Number of bytes requested must be a supported page size. 2081 * 2082 * Note that this call may fail even if there is sufficient 2083 * memory available or PG_WAIT is set, so the caller must 2084 * be willing to fallback on page_create_va(), block and retry, 2085 * or fail the requester. 2086 */ 2087 page_t * 2088 page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2089 struct seg *seg, caddr_t vaddr, void *arg) 2090 { 2091 pgcnt_t npages, pcftotal; 2092 page_t *pp; 2093 page_t *rootpp; 2094 lgrp_t *lgrp; 2095 uint_t enough; 2096 uint_t pcf_index; 2097 uint_t i; 2098 struct pcf *p; 2099 struct pcf *q; 2100 lgrp_id_t *lgrpid = (lgrp_id_t *)arg; 2101 2102 ASSERT(vp != NULL); 2103 2104 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2105 PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == 0); 2106 /* but no others */ 2107 2108 ASSERT((flags & PG_EXCL) == PG_EXCL); 2109 2110 npages = btop(bytes); 2111 2112 if (!kcage_on || panicstr) { 2113 /* 2114 * Cage is OFF, or we are single threaded in 2115 * panic, so make everything a RELOC request. 2116 */ 2117 flags &= ~PG_NORELOC; 2118 } 2119 2120 /* 2121 * Make sure there's adequate physical memory available. 2122 * Note: PG_WAIT is ignored here. 2123 */ 2124 if (freemem <= throttlefree + npages) { 2125 VM_STAT_ADD(page_create_large_cnt[1]); 2126 return (NULL); 2127 } 2128 2129 /* 2130 * If cage is on, dampen draw from cage when available 2131 * cage space is low. 2132 */ 2133 if ((flags & (PG_NORELOC | PG_WAIT)) == (PG_NORELOC | PG_WAIT) && 2134 kcage_freemem < kcage_throttlefree + npages) { 2135 2136 /* 2137 * The cage is on, the caller wants PG_NORELOC 2138 * pages and available cage memory is very low. 2139 * Call kcage_create_throttle() to attempt to 2140 * control demand on the cage. 2141 */ 2142 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) { 2143 VM_STAT_ADD(page_create_large_cnt[2]); 2144 return (NULL); 2145 } 2146 } 2147 2148 enough = 0; 2149 pcf_index = PCF_INDEX(); 2150 p = &pcf[pcf_index]; 2151 q = &pcf[PCF_FANOUT]; 2152 for (pcftotal = 0, i = 0; i < PCF_FANOUT; i++) { 2153 if (p->pcf_count > npages) { 2154 /* 2155 * a good one to try. 2156 */ 2157 mutex_enter(&p->pcf_lock); 2158 if (p->pcf_count > npages) { 2159 p->pcf_count -= (uint_t)npages; 2160 /* 2161 * freemem is not protected by any lock. 2162 * Thus, we cannot have any assertion 2163 * containing freemem here. 2164 */ 2165 freemem -= npages; 2166 enough = 1; 2167 mutex_exit(&p->pcf_lock); 2168 break; 2169 } 2170 mutex_exit(&p->pcf_lock); 2171 } 2172 pcftotal += p->pcf_count; 2173 p++; 2174 if (p >= q) { 2175 p = pcf; 2176 } 2177 } 2178 2179 if (!enough) { 2180 /* If there isn't enough memory available, give up. */ 2181 if (pcftotal < npages) { 2182 VM_STAT_ADD(page_create_large_cnt[3]); 2183 return (NULL); 2184 } 2185 2186 /* try to collect pages from several pcf bins */ 2187 for (p = pcf, pcftotal = 0, i = 0; i < PCF_FANOUT; i++) { 2188 mutex_enter(&p->pcf_lock); 2189 pcftotal += p->pcf_count; 2190 if (pcftotal >= npages) { 2191 /* 2192 * Wow! There are enough pages laying around 2193 * to satisfy the request. Do the accounting, 2194 * drop the locks we acquired, and go back. 2195 * 2196 * freemem is not protected by any lock. So, 2197 * we cannot have any assertion containing 2198 * freemem. 2199 */ 2200 pgcnt_t tpages = npages; 2201 freemem -= npages; 2202 while (p >= pcf) { 2203 if (p->pcf_count <= tpages) { 2204 tpages -= p->pcf_count; 2205 p->pcf_count = 0; 2206 } else { 2207 p->pcf_count -= (uint_t)tpages; 2208 tpages = 0; 2209 } 2210 mutex_exit(&p->pcf_lock); 2211 p--; 2212 } 2213 ASSERT(tpages == 0); 2214 break; 2215 } 2216 p++; 2217 } 2218 if (i == PCF_FANOUT) { 2219 /* failed to collect pages - release the locks */ 2220 while (--p >= pcf) { 2221 mutex_exit(&p->pcf_lock); 2222 } 2223 VM_STAT_ADD(page_create_large_cnt[4]); 2224 return (NULL); 2225 } 2226 } 2227 2228 /* 2229 * This is where this function behaves fundamentally differently 2230 * than page_create_va(); since we're intending to map the page 2231 * with a single TTE, we have to get it as a physically contiguous 2232 * hardware pagesize chunk. If we can't, we fail. 2233 */ 2234 if (lgrpid != NULL && *lgrpid >= 0 && *lgrpid <= lgrp_alloc_max && 2235 LGRP_EXISTS(lgrp_table[*lgrpid])) 2236 lgrp = lgrp_table[*lgrpid]; 2237 else 2238 lgrp = lgrp_mem_choose(seg, vaddr, bytes); 2239 2240 if ((rootpp = page_get_freelist(&kvp, off, seg, vaddr, 2241 bytes, flags & ~PG_MATCH_COLOR, lgrp)) == NULL) { 2242 page_create_putback(npages); 2243 VM_STAT_ADD(page_create_large_cnt[5]); 2244 return (NULL); 2245 } 2246 2247 /* 2248 * if we got the page with the wrong mtype give it back this is a 2249 * workaround for CR 6249718. When CR 6249718 is fixed we never get 2250 * inside "if" and the workaround becomes just a nop 2251 */ 2252 if (kcage_on && (flags & PG_NORELOC) && !PP_ISNORELOC(rootpp)) { 2253 page_list_add_pages(rootpp, 0); 2254 page_create_putback(npages); 2255 VM_STAT_ADD(page_create_large_cnt[6]); 2256 return (NULL); 2257 } 2258 2259 /* 2260 * If satisfying this request has left us with too little 2261 * memory, start the wheels turning to get some back. The 2262 * first clause of the test prevents waking up the pageout 2263 * daemon in situations where it would decide that there's 2264 * nothing to do. 2265 */ 2266 if (nscan < desscan && freemem < minfree) { 2267 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2268 "pageout_cv_signal:freemem %ld", freemem); 2269 cv_signal(&proc_pageout->p_cv); 2270 } 2271 2272 pp = rootpp; 2273 while (npages--) { 2274 ASSERT(PAGE_EXCL(pp)); 2275 ASSERT(pp->p_vnode == NULL); 2276 ASSERT(!hat_page_is_mapped(pp)); 2277 PP_CLRFREE(pp); 2278 PP_CLRAGED(pp); 2279 if (!page_hashin(pp, vp, off, NULL)) 2280 panic("page_create_large: hashin failed: page %p", 2281 (void *)pp); 2282 page_io_lock(pp); 2283 off += PAGESIZE; 2284 pp = pp->p_next; 2285 } 2286 2287 VM_STAT_ADD(page_create_large_cnt[0]); 2288 return (rootpp); 2289 } 2290 2291 page_t * 2292 page_create_va(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2293 struct seg *seg, caddr_t vaddr) 2294 { 2295 page_t *plist = NULL; 2296 pgcnt_t npages; 2297 pgcnt_t found_on_free = 0; 2298 pgcnt_t pages_req; 2299 page_t *npp = NULL; 2300 uint_t enough; 2301 uint_t i; 2302 uint_t pcf_index; 2303 struct pcf *p; 2304 struct pcf *q; 2305 lgrp_t *lgrp; 2306 2307 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START, 2308 "page_create_start:vp %p off %llx bytes %lu flags %x", 2309 vp, off, bytes, flags); 2310 2311 ASSERT(bytes != 0 && vp != NULL); 2312 2313 if ((flags & PG_EXCL) == 0 && (flags & PG_WAIT) == 0) { 2314 panic("page_create: invalid flags"); 2315 /*NOTREACHED*/ 2316 } 2317 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2318 PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == 0); 2319 /* but no others */ 2320 2321 pages_req = npages = btopr(bytes); 2322 /* 2323 * Try to see whether request is too large to *ever* be 2324 * satisfied, in order to prevent deadlock. We arbitrarily 2325 * decide to limit maximum size requests to max_page_get. 2326 */ 2327 if (npages >= max_page_get) { 2328 if ((flags & PG_WAIT) == 0) { 2329 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_TOOBIG, 2330 "page_create_toobig:vp %p off %llx npages " 2331 "%lu max_page_get %lu", 2332 vp, off, npages, max_page_get); 2333 return (NULL); 2334 } else { 2335 cmn_err(CE_WARN, 2336 "Request for too much kernel memory " 2337 "(%lu bytes), will hang forever", bytes); 2338 for (;;) 2339 delay(1000000000); 2340 } 2341 } 2342 2343 if (!kcage_on || panicstr) { 2344 /* 2345 * Cage is OFF, or we are single threaded in 2346 * panic, so make everything a RELOC request. 2347 */ 2348 flags &= ~PG_NORELOC; 2349 } 2350 2351 if (freemem <= throttlefree + npages) 2352 if (!page_create_throttle(npages, flags)) 2353 return (NULL); 2354 2355 /* 2356 * If cage is on, dampen draw from cage when available 2357 * cage space is low. 2358 */ 2359 if ((flags & PG_NORELOC) && 2360 kcage_freemem < kcage_throttlefree + npages) { 2361 2362 /* 2363 * The cage is on, the caller wants PG_NORELOC 2364 * pages and available cage memory is very low. 2365 * Call kcage_create_throttle() to attempt to 2366 * control demand on the cage. 2367 */ 2368 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) 2369 return (NULL); 2370 } 2371 2372 VM_STAT_ADD(page_create_cnt[0]); 2373 2374 enough = 0; 2375 pcf_index = PCF_INDEX(); 2376 2377 p = &pcf[pcf_index]; 2378 q = &pcf[PCF_FANOUT]; 2379 for (i = 0; i < PCF_FANOUT; i++) { 2380 if (p->pcf_count > npages) { 2381 /* 2382 * a good one to try. 2383 */ 2384 mutex_enter(&p->pcf_lock); 2385 if (p->pcf_count > npages) { 2386 p->pcf_count -= (uint_t)npages; 2387 /* 2388 * freemem is not protected by any lock. 2389 * Thus, we cannot have any assertion 2390 * containing freemem here. 2391 */ 2392 freemem -= npages; 2393 enough = 1; 2394 mutex_exit(&p->pcf_lock); 2395 break; 2396 } 2397 mutex_exit(&p->pcf_lock); 2398 } 2399 p++; 2400 if (p >= q) { 2401 p = pcf; 2402 } 2403 } 2404 2405 if (!enough) { 2406 /* 2407 * Have to look harder. If npages is greater than 2408 * one, then we might have to coalecse the counters. 2409 * 2410 * Go wait. We come back having accounted 2411 * for the memory. 2412 */ 2413 VM_STAT_ADD(page_create_cnt[1]); 2414 if (!page_create_wait(npages, flags)) { 2415 VM_STAT_ADD(page_create_cnt[2]); 2416 return (NULL); 2417 } 2418 } 2419 2420 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS, 2421 "page_create_success:vp %p off %llx", vp, off); 2422 2423 /* 2424 * If satisfying this request has left us with too little 2425 * memory, start the wheels turning to get some back. The 2426 * first clause of the test prevents waking up the pageout 2427 * daemon in situations where it would decide that there's 2428 * nothing to do. 2429 */ 2430 if (nscan < desscan && freemem < minfree) { 2431 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2432 "pageout_cv_signal:freemem %ld", freemem); 2433 cv_signal(&proc_pageout->p_cv); 2434 } 2435 2436 /* 2437 * Loop around collecting the requested number of pages. 2438 * Most of the time, we have to `create' a new page. With 2439 * this in mind, pull the page off the free list before 2440 * getting the hash lock. This will minimize the hash 2441 * lock hold time, nesting, and the like. If it turns 2442 * out we don't need the page, we put it back at the end. 2443 */ 2444 while (npages--) { 2445 page_t *pp; 2446 kmutex_t *phm = NULL; 2447 ulong_t index; 2448 2449 index = PAGE_HASH_FUNC(vp, off); 2450 top: 2451 ASSERT(phm == NULL); 2452 ASSERT(index == PAGE_HASH_FUNC(vp, off)); 2453 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 2454 2455 if (npp == NULL) { 2456 /* 2457 * Try to get a page from the freelist (ie, 2458 * a page with no [vp, off] tag). If that 2459 * fails, use the cachelist. 2460 * 2461 * During the first attempt at both the free 2462 * and cache lists we try for the correct color. 2463 */ 2464 /* 2465 * XXXX-how do we deal with virtual indexed 2466 * caches and and colors? 2467 */ 2468 VM_STAT_ADD(page_create_cnt[4]); 2469 /* 2470 * Get lgroup to allocate next page of shared memory 2471 * from and use it to specify where to allocate 2472 * the physical memory 2473 */ 2474 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 2475 npp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 2476 flags | PG_MATCH_COLOR, lgrp); 2477 if (npp == NULL) { 2478 npp = page_get_cachelist(vp, off, seg, 2479 vaddr, flags | PG_MATCH_COLOR, lgrp); 2480 if (npp == NULL) { 2481 npp = page_create_get_something(vp, 2482 off, seg, vaddr, 2483 flags & ~PG_MATCH_COLOR); 2484 } 2485 2486 if (PP_ISAGED(npp) == 0) { 2487 /* 2488 * Since this page came from the 2489 * cachelist, we must destroy the 2490 * old vnode association. 2491 */ 2492 page_hashout(npp, NULL); 2493 } 2494 } 2495 } 2496 2497 /* 2498 * We own this page! 2499 */ 2500 ASSERT(PAGE_EXCL(npp)); 2501 ASSERT(npp->p_vnode == NULL); 2502 ASSERT(!hat_page_is_mapped(npp)); 2503 PP_CLRFREE(npp); 2504 PP_CLRAGED(npp); 2505 2506 /* 2507 * Here we have a page in our hot little mits and are 2508 * just waiting to stuff it on the appropriate lists. 2509 * Get the mutex and check to see if it really does 2510 * not exist. 2511 */ 2512 phm = PAGE_HASH_MUTEX(index); 2513 mutex_enter(phm); 2514 PAGE_HASH_SEARCH(index, pp, vp, off); 2515 if (pp == NULL) { 2516 VM_STAT_ADD(page_create_new); 2517 pp = npp; 2518 npp = NULL; 2519 if (!page_hashin(pp, vp, off, phm)) { 2520 /* 2521 * Since we hold the page hash mutex and 2522 * just searched for this page, page_hashin 2523 * had better not fail. If it does, that 2524 * means somethread did not follow the 2525 * page hash mutex rules. Panic now and 2526 * get it over with. As usual, go down 2527 * holding all the locks. 2528 */ 2529 ASSERT(MUTEX_HELD(phm)); 2530 panic("page_create: " 2531 "hashin failed %p %p %llx %p", 2532 (void *)pp, (void *)vp, off, (void *)phm); 2533 /*NOTREACHED*/ 2534 } 2535 ASSERT(MUTEX_HELD(phm)); 2536 mutex_exit(phm); 2537 phm = NULL; 2538 2539 /* 2540 * Hat layer locking need not be done to set 2541 * the following bits since the page is not hashed 2542 * and was on the free list (i.e., had no mappings). 2543 * 2544 * Set the reference bit to protect 2545 * against immediate pageout 2546 * 2547 * XXXmh modify freelist code to set reference 2548 * bit so we don't have to do it here. 2549 */ 2550 page_set_props(pp, P_REF); 2551 found_on_free++; 2552 } else { 2553 VM_STAT_ADD(page_create_exists); 2554 if (flags & PG_EXCL) { 2555 /* 2556 * Found an existing page, and the caller 2557 * wanted all new pages. Undo all of the work 2558 * we have done. 2559 */ 2560 mutex_exit(phm); 2561 phm = NULL; 2562 while (plist != NULL) { 2563 pp = plist; 2564 page_sub(&plist, pp); 2565 page_io_unlock(pp); 2566 /* large pages should not end up here */ 2567 ASSERT(pp->p_szc == 0); 2568 /*LINTED: constant in conditional ctx*/ 2569 VN_DISPOSE(pp, B_INVAL, 0, kcred); 2570 } 2571 VM_STAT_ADD(page_create_found_one); 2572 goto fail; 2573 } 2574 ASSERT(flags & PG_WAIT); 2575 if (!page_lock(pp, SE_EXCL, phm, P_NO_RECLAIM)) { 2576 /* 2577 * Start all over again if we blocked trying 2578 * to lock the page. 2579 */ 2580 mutex_exit(phm); 2581 VM_STAT_ADD(page_create_page_lock_failed); 2582 phm = NULL; 2583 goto top; 2584 } 2585 mutex_exit(phm); 2586 phm = NULL; 2587 2588 if (PP_ISFREE(pp)) { 2589 ASSERT(PP_ISAGED(pp) == 0); 2590 VM_STAT_ADD(pagecnt.pc_get_cache); 2591 page_list_sub(pp, PG_CACHE_LIST); 2592 PP_CLRFREE(pp); 2593 found_on_free++; 2594 } 2595 } 2596 2597 /* 2598 * Got a page! It is locked. Acquire the i/o 2599 * lock since we are going to use the p_next and 2600 * p_prev fields to link the requested pages together. 2601 */ 2602 page_io_lock(pp); 2603 page_add(&plist, pp); 2604 plist = plist->p_next; 2605 off += PAGESIZE; 2606 vaddr += PAGESIZE; 2607 } 2608 2609 ASSERT((flags & PG_EXCL) ? (found_on_free == pages_req) : 1); 2610 fail: 2611 if (npp != NULL) { 2612 /* 2613 * Did not need this page after all. 2614 * Put it back on the free list. 2615 */ 2616 VM_STAT_ADD(page_create_putbacks); 2617 PP_SETFREE(npp); 2618 PP_SETAGED(npp); 2619 npp->p_offset = (u_offset_t)-1; 2620 page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL); 2621 page_unlock(npp); 2622 2623 } 2624 2625 ASSERT(pages_req >= found_on_free); 2626 2627 { 2628 uint_t overshoot = (uint_t)(pages_req - found_on_free); 2629 2630 if (overshoot) { 2631 VM_STAT_ADD(page_create_overshoot); 2632 p = &pcf[pcf_index]; 2633 mutex_enter(&p->pcf_lock); 2634 if (p->pcf_block) { 2635 p->pcf_reserve += overshoot; 2636 } else { 2637 p->pcf_count += overshoot; 2638 if (p->pcf_wait) { 2639 mutex_enter(&new_freemem_lock); 2640 if (freemem_wait) { 2641 cv_signal(&freemem_cv); 2642 p->pcf_wait--; 2643 } else { 2644 p->pcf_wait = 0; 2645 } 2646 mutex_exit(&new_freemem_lock); 2647 } 2648 } 2649 mutex_exit(&p->pcf_lock); 2650 /* freemem is approximate, so this test OK */ 2651 if (!p->pcf_block) 2652 freemem += overshoot; 2653 } 2654 } 2655 2656 return (plist); 2657 } 2658 2659 /* 2660 * One or more constituent pages of this large page has been marked 2661 * toxic. Simply demote the large page to PAGESIZE pages and let 2662 * page_free() handle it. This routine should only be called by 2663 * large page free routines (page_free_pages() and page_destroy_pages(). 2664 * All pages are locked SE_EXCL and have already been marked free. 2665 */ 2666 static void 2667 page_free_toxic_pages(page_t *rootpp) 2668 { 2669 page_t *tpp; 2670 pgcnt_t i, pgcnt = page_get_pagecnt(rootpp->p_szc); 2671 uint_t szc = rootpp->p_szc; 2672 2673 for (i = 0, tpp = rootpp; i < pgcnt; i++, tpp = tpp->p_next) { 2674 ASSERT(tpp->p_szc == szc); 2675 ASSERT((PAGE_EXCL(tpp) && 2676 !page_iolock_assert(tpp)) || panicstr); 2677 tpp->p_szc = 0; 2678 } 2679 2680 while (rootpp != NULL) { 2681 tpp = rootpp; 2682 page_sub(&rootpp, tpp); 2683 ASSERT(PP_ISFREE(tpp)); 2684 PP_CLRFREE(tpp); 2685 page_free(tpp, 1); 2686 } 2687 } 2688 2689 /* 2690 * Put page on the "free" list. 2691 * The free list is really two lists maintained by 2692 * the PSM of whatever machine we happen to be on. 2693 */ 2694 void 2695 page_free(page_t *pp, int dontneed) 2696 { 2697 struct pcf *p; 2698 uint_t pcf_index; 2699 2700 ASSERT((PAGE_EXCL(pp) && 2701 !page_iolock_assert(pp)) || panicstr); 2702 2703 if (PP_ISFREE(pp)) { 2704 panic("page_free: page %p is free", (void *)pp); 2705 } 2706 2707 if (pp->p_szc != 0) { 2708 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 2709 PP_ISKAS(pp)) { 2710 panic("page_free: anon or kernel " 2711 "or no vnode large page %p", (void *)pp); 2712 } 2713 page_demote_vp_pages(pp); 2714 ASSERT(pp->p_szc == 0); 2715 } 2716 2717 /* 2718 * The page_struct_lock need not be acquired to examine these 2719 * fields since the page has an "exclusive" lock. 2720 */ 2721 if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 2722 pp->p_slckcnt != 0) { 2723 panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d " 2724 "slckcnt = %d", pp, page_pptonum(pp), pp->p_lckcnt, 2725 pp->p_cowcnt, pp->p_slckcnt); 2726 /*NOTREACHED*/ 2727 } 2728 2729 ASSERT(!hat_page_getshare(pp)); 2730 2731 PP_SETFREE(pp); 2732 ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) || 2733 !hat_ismod(pp)); 2734 page_clr_all_props(pp); 2735 ASSERT(!hat_page_getshare(pp)); 2736 2737 /* 2738 * Now we add the page to the head of the free list. 2739 * But if this page is associated with a paged vnode 2740 * then we adjust the head forward so that the page is 2741 * effectively at the end of the list. 2742 */ 2743 if (pp->p_vnode == NULL) { 2744 /* 2745 * Page has no identity, put it on the free list. 2746 */ 2747 PP_SETAGED(pp); 2748 pp->p_offset = (u_offset_t)-1; 2749 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 2750 VM_STAT_ADD(pagecnt.pc_free_free); 2751 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2752 "page_free_free:pp %p", pp); 2753 } else { 2754 PP_CLRAGED(pp); 2755 2756 if (!dontneed || nopageage) { 2757 /* move it to the tail of the list */ 2758 page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL); 2759 2760 VM_STAT_ADD(pagecnt.pc_free_cache); 2761 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL, 2762 "page_free_cache_tail:pp %p", pp); 2763 } else { 2764 page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD); 2765 2766 VM_STAT_ADD(pagecnt.pc_free_dontneed); 2767 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD, 2768 "page_free_cache_head:pp %p", pp); 2769 } 2770 } 2771 page_unlock(pp); 2772 2773 /* 2774 * Now do the `freemem' accounting. 2775 */ 2776 pcf_index = PCF_INDEX(); 2777 p = &pcf[pcf_index]; 2778 2779 mutex_enter(&p->pcf_lock); 2780 if (p->pcf_block) { 2781 p->pcf_reserve += 1; 2782 } else { 2783 p->pcf_count += 1; 2784 if (p->pcf_wait) { 2785 mutex_enter(&new_freemem_lock); 2786 /* 2787 * Check to see if some other thread 2788 * is actually waiting. Another bucket 2789 * may have woken it up by now. If there 2790 * are no waiters, then set our pcf_wait 2791 * count to zero to avoid coming in here 2792 * next time. Also, since only one page 2793 * was put on the free list, just wake 2794 * up one waiter. 2795 */ 2796 if (freemem_wait) { 2797 cv_signal(&freemem_cv); 2798 p->pcf_wait--; 2799 } else { 2800 p->pcf_wait = 0; 2801 } 2802 mutex_exit(&new_freemem_lock); 2803 } 2804 } 2805 mutex_exit(&p->pcf_lock); 2806 2807 /* freemem is approximate, so this test OK */ 2808 if (!p->pcf_block) 2809 freemem += 1; 2810 } 2811 2812 /* 2813 * Put page on the "free" list during intial startup. 2814 * This happens during initial single threaded execution. 2815 */ 2816 void 2817 page_free_at_startup(page_t *pp) 2818 { 2819 struct pcf *p; 2820 uint_t pcf_index; 2821 2822 page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT); 2823 VM_STAT_ADD(pagecnt.pc_free_free); 2824 2825 /* 2826 * Now do the `freemem' accounting. 2827 */ 2828 pcf_index = PCF_INDEX(); 2829 p = &pcf[pcf_index]; 2830 2831 ASSERT(p->pcf_block == 0); 2832 ASSERT(p->pcf_wait == 0); 2833 p->pcf_count += 1; 2834 2835 /* freemem is approximate, so this is OK */ 2836 freemem += 1; 2837 } 2838 2839 void 2840 page_free_pages(page_t *pp) 2841 { 2842 page_t *tpp, *rootpp = NULL; 2843 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 2844 pgcnt_t i; 2845 uint_t szc = pp->p_szc; 2846 2847 VM_STAT_ADD(pagecnt.pc_free_pages); 2848 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2849 "page_free_free:pp %p", pp); 2850 2851 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 2852 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 2853 panic("page_free_pages: not root page %p", (void *)pp); 2854 /*NOTREACHED*/ 2855 } 2856 2857 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 2858 ASSERT((PAGE_EXCL(tpp) && 2859 !page_iolock_assert(tpp)) || panicstr); 2860 if (PP_ISFREE(tpp)) { 2861 panic("page_free_pages: page %p is free", (void *)tpp); 2862 /*NOTREACHED*/ 2863 } 2864 if (hat_page_is_mapped(tpp) || tpp->p_lckcnt != 0 || 2865 tpp->p_cowcnt != 0 || tpp->p_slckcnt != 0) { 2866 panic("page_free_pages %p", (void *)tpp); 2867 /*NOTREACHED*/ 2868 } 2869 2870 ASSERT(!hat_page_getshare(tpp)); 2871 ASSERT(tpp->p_vnode == NULL); 2872 ASSERT(tpp->p_szc == szc); 2873 2874 PP_SETFREE(tpp); 2875 page_clr_all_props(tpp); 2876 PP_SETAGED(tpp); 2877 tpp->p_offset = (u_offset_t)-1; 2878 ASSERT(tpp->p_next == tpp); 2879 ASSERT(tpp->p_prev == tpp); 2880 page_list_concat(&rootpp, &tpp); 2881 } 2882 ASSERT(rootpp == pp); 2883 2884 page_list_add_pages(rootpp, 0); 2885 page_create_putback(pgcnt); 2886 } 2887 2888 int free_pages = 1; 2889 2890 /* 2891 * This routine attempts to return pages to the cachelist via page_release(). 2892 * It does not *have* to be successful in all cases, since the pageout scanner 2893 * will catch any pages it misses. It does need to be fast and not introduce 2894 * too much overhead. 2895 * 2896 * If a page isn't found on the unlocked sweep of the page_hash bucket, we 2897 * don't lock and retry. This is ok, since the page scanner will eventually 2898 * find any page we miss in free_vp_pages(). 2899 */ 2900 void 2901 free_vp_pages(vnode_t *vp, u_offset_t off, size_t len) 2902 { 2903 page_t *pp; 2904 u_offset_t eoff; 2905 extern int swap_in_range(vnode_t *, u_offset_t, size_t); 2906 2907 eoff = off + len; 2908 2909 if (free_pages == 0) 2910 return; 2911 if (swap_in_range(vp, off, len)) 2912 return; 2913 2914 for (; off < eoff; off += PAGESIZE) { 2915 2916 /* 2917 * find the page using a fast, but inexact search. It'll be OK 2918 * if a few pages slip through the cracks here. 2919 */ 2920 pp = page_exists(vp, off); 2921 2922 /* 2923 * If we didn't find the page (it may not exist), the page 2924 * is free, looks still in use (shared), or we can't lock it, 2925 * just give up. 2926 */ 2927 if (pp == NULL || 2928 PP_ISFREE(pp) || 2929 page_share_cnt(pp) > 0 || 2930 !page_trylock(pp, SE_EXCL)) 2931 continue; 2932 2933 /* 2934 * Once we have locked pp, verify that it's still the 2935 * correct page and not already free 2936 */ 2937 ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL)); 2938 if (pp->p_vnode != vp || pp->p_offset != off || PP_ISFREE(pp)) { 2939 page_unlock(pp); 2940 continue; 2941 } 2942 2943 /* 2944 * try to release the page... 2945 */ 2946 (void) page_release(pp, 1); 2947 } 2948 } 2949 2950 /* 2951 * Reclaim the given page from the free list. 2952 * If pp is part of a large pages, only the given constituent page is reclaimed 2953 * and the large page it belonged to will be demoted. This can only happen 2954 * if the page is not on the cachelist. 2955 * 2956 * Returns 1 on success or 0 on failure. 2957 * 2958 * The page is unlocked if it can't be reclaimed (when freemem == 0). 2959 * If `lock' is non-null, it will be dropped and re-acquired if 2960 * the routine must wait while freemem is 0. 2961 * 2962 * As it turns out, boot_getpages() does this. It picks a page, 2963 * based on where OBP mapped in some address, gets its pfn, searches 2964 * the memsegs, locks the page, then pulls it off the free list! 2965 */ 2966 int 2967 page_reclaim(page_t *pp, kmutex_t *lock) 2968 { 2969 struct pcf *p; 2970 uint_t pcf_index; 2971 struct cpu *cpup; 2972 int enough; 2973 uint_t i; 2974 2975 ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1); 2976 ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp)); 2977 2978 /* 2979 * If `freemem' is 0, we cannot reclaim this page from the 2980 * freelist, so release every lock we might hold: the page, 2981 * and the `lock' before blocking. 2982 * 2983 * The only way `freemem' can become 0 while there are pages 2984 * marked free (have their p->p_free bit set) is when the 2985 * system is low on memory and doing a page_create(). In 2986 * order to guarantee that once page_create() starts acquiring 2987 * pages it will be able to get all that it needs since `freemem' 2988 * was decreased by the requested amount. So, we need to release 2989 * this page, and let page_create() have it. 2990 * 2991 * Since `freemem' being zero is not supposed to happen, just 2992 * use the usual hash stuff as a starting point. If that bucket 2993 * is empty, then assume the worst, and start at the beginning 2994 * of the pcf array. If we always start at the beginning 2995 * when acquiring more than one pcf lock, there won't be any 2996 * deadlock problems. 2997 */ 2998 2999 /* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */ 3000 3001 if (freemem <= throttlefree && !page_create_throttle(1l, 0)) { 3002 pcf_acquire_all(); 3003 goto page_reclaim_nomem; 3004 } 3005 3006 enough = 0; 3007 pcf_index = PCF_INDEX(); 3008 p = &pcf[pcf_index]; 3009 mutex_enter(&p->pcf_lock); 3010 if (p->pcf_count >= 1) { 3011 enough = 1; 3012 p->pcf_count--; 3013 } 3014 mutex_exit(&p->pcf_lock); 3015 3016 if (!enough) { 3017 VM_STAT_ADD(page_reclaim_zero); 3018 /* 3019 * Check again. Its possible that some other thread 3020 * could have been right behind us, and added one 3021 * to a list somewhere. Acquire each of the pcf locks 3022 * until we find a page. 3023 */ 3024 p = pcf; 3025 for (i = 0; i < PCF_FANOUT; i++) { 3026 mutex_enter(&p->pcf_lock); 3027 if (p->pcf_count >= 1) { 3028 p->pcf_count -= 1; 3029 enough = 1; 3030 break; 3031 } 3032 p++; 3033 } 3034 3035 if (!enough) { 3036 page_reclaim_nomem: 3037 /* 3038 * We really can't have page `pp'. 3039 * Time for the no-memory dance with 3040 * page_free(). This is just like 3041 * page_create_wait(). Plus the added 3042 * attraction of releasing whatever mutex 3043 * we held when we were called with in `lock'. 3044 * Page_unlock() will wakeup any thread 3045 * waiting around for this page. 3046 */ 3047 if (lock) { 3048 VM_STAT_ADD(page_reclaim_zero_locked); 3049 mutex_exit(lock); 3050 } 3051 page_unlock(pp); 3052 3053 /* 3054 * get this before we drop all the pcf locks. 3055 */ 3056 mutex_enter(&new_freemem_lock); 3057 3058 p = pcf; 3059 for (i = 0; i < PCF_FANOUT; i++) { 3060 p->pcf_wait++; 3061 mutex_exit(&p->pcf_lock); 3062 p++; 3063 } 3064 3065 freemem_wait++; 3066 cv_wait(&freemem_cv, &new_freemem_lock); 3067 freemem_wait--; 3068 3069 mutex_exit(&new_freemem_lock); 3070 3071 if (lock) { 3072 mutex_enter(lock); 3073 } 3074 return (0); 3075 } 3076 3077 /* 3078 * The pcf accounting has been done, 3079 * though none of the pcf_wait flags have been set, 3080 * drop the locks and continue on. 3081 */ 3082 while (p >= pcf) { 3083 mutex_exit(&p->pcf_lock); 3084 p--; 3085 } 3086 } 3087 3088 /* 3089 * freemem is not protected by any lock. Thus, we cannot 3090 * have any assertion containing freemem here. 3091 */ 3092 freemem -= 1; 3093 3094 VM_STAT_ADD(pagecnt.pc_reclaim); 3095 3096 /* 3097 * page_list_sub will handle the case where pp is a large page. 3098 * It's possible that the page was promoted while on the freelist 3099 */ 3100 if (PP_ISAGED(pp)) { 3101 page_list_sub(pp, PG_FREE_LIST); 3102 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_FREE, 3103 "page_reclaim_free:pp %p", pp); 3104 } else { 3105 page_list_sub(pp, PG_CACHE_LIST); 3106 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_CACHE, 3107 "page_reclaim_cache:pp %p", pp); 3108 } 3109 3110 /* 3111 * clear the p_free & p_age bits since this page is no longer 3112 * on the free list. Notice that there was a brief time where 3113 * a page is marked as free, but is not on the list. 3114 * 3115 * Set the reference bit to protect against immediate pageout. 3116 */ 3117 PP_CLRFREE(pp); 3118 PP_CLRAGED(pp); 3119 page_set_props(pp, P_REF); 3120 3121 CPU_STATS_ENTER_K(); 3122 cpup = CPU; /* get cpup now that CPU cannot change */ 3123 CPU_STATS_ADDQ(cpup, vm, pgrec, 1); 3124 CPU_STATS_ADDQ(cpup, vm, pgfrec, 1); 3125 CPU_STATS_EXIT_K(); 3126 ASSERT(pp->p_szc == 0); 3127 3128 return (1); 3129 } 3130 3131 /* 3132 * Destroy identity of the page and put it back on 3133 * the page free list. Assumes that the caller has 3134 * acquired the "exclusive" lock on the page. 3135 */ 3136 void 3137 page_destroy(page_t *pp, int dontfree) 3138 { 3139 ASSERT((PAGE_EXCL(pp) && 3140 !page_iolock_assert(pp)) || panicstr); 3141 ASSERT(pp->p_slckcnt == 0 || panicstr); 3142 3143 if (pp->p_szc != 0) { 3144 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 3145 PP_ISKAS(pp)) { 3146 panic("page_destroy: anon or kernel or no vnode " 3147 "large page %p", (void *)pp); 3148 } 3149 page_demote_vp_pages(pp); 3150 ASSERT(pp->p_szc == 0); 3151 } 3152 3153 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy:pp %p", pp); 3154 3155 /* 3156 * Unload translations, if any, then hash out the 3157 * page to erase its identity. 3158 */ 3159 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3160 page_hashout(pp, NULL); 3161 3162 if (!dontfree) { 3163 /* 3164 * Acquire the "freemem_lock" for availrmem. 3165 * The page_struct_lock need not be acquired for lckcnt 3166 * and cowcnt since the page has an "exclusive" lock. 3167 */ 3168 if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) { 3169 mutex_enter(&freemem_lock); 3170 if (pp->p_lckcnt != 0) { 3171 availrmem++; 3172 pp->p_lckcnt = 0; 3173 } 3174 if (pp->p_cowcnt != 0) { 3175 availrmem += pp->p_cowcnt; 3176 pp->p_cowcnt = 0; 3177 } 3178 mutex_exit(&freemem_lock); 3179 } 3180 /* 3181 * Put the page on the "free" list. 3182 */ 3183 page_free(pp, 0); 3184 } 3185 } 3186 3187 void 3188 page_destroy_pages(page_t *pp) 3189 { 3190 3191 page_t *tpp, *rootpp = NULL; 3192 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 3193 pgcnt_t i, pglcks = 0; 3194 uint_t szc = pp->p_szc; 3195 3196 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 3197 3198 VM_STAT_ADD(pagecnt.pc_destroy_pages); 3199 3200 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy_pages:pp %p", pp); 3201 3202 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 3203 panic("page_destroy_pages: not root page %p", (void *)pp); 3204 /*NOTREACHED*/ 3205 } 3206 3207 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 3208 ASSERT((PAGE_EXCL(tpp) && 3209 !page_iolock_assert(tpp)) || panicstr); 3210 ASSERT(tpp->p_slckcnt == 0 || panicstr); 3211 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 3212 page_hashout(tpp, NULL); 3213 ASSERT(tpp->p_offset == (u_offset_t)-1); 3214 if (tpp->p_lckcnt != 0) { 3215 pglcks++; 3216 tpp->p_lckcnt = 0; 3217 } else if (tpp->p_cowcnt != 0) { 3218 pglcks += tpp->p_cowcnt; 3219 tpp->p_cowcnt = 0; 3220 } 3221 ASSERT(!hat_page_getshare(tpp)); 3222 ASSERT(tpp->p_vnode == NULL); 3223 ASSERT(tpp->p_szc == szc); 3224 3225 PP_SETFREE(tpp); 3226 page_clr_all_props(tpp); 3227 PP_SETAGED(tpp); 3228 ASSERT(tpp->p_next == tpp); 3229 ASSERT(tpp->p_prev == tpp); 3230 page_list_concat(&rootpp, &tpp); 3231 } 3232 3233 ASSERT(rootpp == pp); 3234 if (pglcks != 0) { 3235 mutex_enter(&freemem_lock); 3236 availrmem += pglcks; 3237 mutex_exit(&freemem_lock); 3238 } 3239 3240 page_list_add_pages(rootpp, 0); 3241 page_create_putback(pgcnt); 3242 } 3243 3244 /* 3245 * Similar to page_destroy(), but destroys pages which are 3246 * locked and known to be on the page free list. Since 3247 * the page is known to be free and locked, no one can access 3248 * it. 3249 * 3250 * Also, the number of free pages does not change. 3251 */ 3252 void 3253 page_destroy_free(page_t *pp) 3254 { 3255 ASSERT(PAGE_EXCL(pp)); 3256 ASSERT(PP_ISFREE(pp)); 3257 ASSERT(pp->p_vnode); 3258 ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0); 3259 ASSERT(!hat_page_is_mapped(pp)); 3260 ASSERT(PP_ISAGED(pp) == 0); 3261 ASSERT(pp->p_szc == 0); 3262 3263 VM_STAT_ADD(pagecnt.pc_destroy_free); 3264 page_list_sub(pp, PG_CACHE_LIST); 3265 3266 page_hashout(pp, NULL); 3267 ASSERT(pp->p_vnode == NULL); 3268 ASSERT(pp->p_offset == (u_offset_t)-1); 3269 ASSERT(pp->p_hash == NULL); 3270 3271 PP_SETAGED(pp); 3272 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 3273 page_unlock(pp); 3274 3275 mutex_enter(&new_freemem_lock); 3276 if (freemem_wait) { 3277 cv_signal(&freemem_cv); 3278 } 3279 mutex_exit(&new_freemem_lock); 3280 } 3281 3282 /* 3283 * Rename the page "opp" to have an identity specified 3284 * by [vp, off]. If a page already exists with this name 3285 * it is locked and destroyed. Note that the page's 3286 * translations are not unloaded during the rename. 3287 * 3288 * This routine is used by the anon layer to "steal" the 3289 * original page and is not unlike destroying a page and 3290 * creating a new page using the same page frame. 3291 * 3292 * XXX -- Could deadlock if caller 1 tries to rename A to B while 3293 * caller 2 tries to rename B to A. 3294 */ 3295 void 3296 page_rename(page_t *opp, vnode_t *vp, u_offset_t off) 3297 { 3298 page_t *pp; 3299 int olckcnt = 0; 3300 int ocowcnt = 0; 3301 kmutex_t *phm; 3302 ulong_t index; 3303 3304 ASSERT(PAGE_EXCL(opp) && !page_iolock_assert(opp)); 3305 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3306 ASSERT(PP_ISFREE(opp) == 0); 3307 3308 VM_STAT_ADD(page_rename_count); 3309 3310 TRACE_3(TR_FAC_VM, TR_PAGE_RENAME, 3311 "page rename:pp %p vp %p off %llx", opp, vp, off); 3312 3313 /* 3314 * CacheFS may call page_rename for a large NFS page 3315 * when both CacheFS and NFS mount points are used 3316 * by applications. Demote this large page before 3317 * renaming it, to ensure that there are no "partial" 3318 * large pages left lying around. 3319 */ 3320 if (opp->p_szc != 0) { 3321 vnode_t *ovp = opp->p_vnode; 3322 ASSERT(ovp != NULL); 3323 ASSERT(!IS_SWAPFSVP(ovp)); 3324 ASSERT(!VN_ISKAS(ovp)); 3325 page_demote_vp_pages(opp); 3326 ASSERT(opp->p_szc == 0); 3327 } 3328 3329 page_hashout(opp, NULL); 3330 PP_CLRAGED(opp); 3331 3332 /* 3333 * Acquire the appropriate page hash lock, since 3334 * we're going to rename the page. 3335 */ 3336 index = PAGE_HASH_FUNC(vp, off); 3337 phm = PAGE_HASH_MUTEX(index); 3338 mutex_enter(phm); 3339 top: 3340 /* 3341 * Look for an existing page with this name and destroy it if found. 3342 * By holding the page hash lock all the way to the page_hashin() 3343 * call, we are assured that no page can be created with this 3344 * identity. In the case when the phm lock is dropped to undo any 3345 * hat layer mappings, the existing page is held with an "exclusive" 3346 * lock, again preventing another page from being created with 3347 * this identity. 3348 */ 3349 PAGE_HASH_SEARCH(index, pp, vp, off); 3350 if (pp != NULL) { 3351 VM_STAT_ADD(page_rename_exists); 3352 3353 /* 3354 * As it turns out, this is one of only two places where 3355 * page_lock() needs to hold the passed in lock in the 3356 * successful case. In all of the others, the lock could 3357 * be dropped as soon as the attempt is made to lock 3358 * the page. It is tempting to add yet another arguement, 3359 * PL_KEEP or PL_DROP, to let page_lock know what to do. 3360 */ 3361 if (!page_lock(pp, SE_EXCL, phm, P_RECLAIM)) { 3362 /* 3363 * Went to sleep because the page could not 3364 * be locked. We were woken up when the page 3365 * was unlocked, or when the page was destroyed. 3366 * In either case, `phm' was dropped while we 3367 * slept. Hence we should not just roar through 3368 * this loop. 3369 */ 3370 goto top; 3371 } 3372 3373 /* 3374 * If an existing page is a large page, then demote 3375 * it to ensure that no "partial" large pages are 3376 * "created" after page_rename. An existing page 3377 * can be a CacheFS page, and can't belong to swapfs. 3378 */ 3379 if (hat_page_is_mapped(pp)) { 3380 /* 3381 * Unload translations. Since we hold the 3382 * exclusive lock on this page, the page 3383 * can not be changed while we drop phm. 3384 * This is also not a lock protocol violation, 3385 * but rather the proper way to do things. 3386 */ 3387 mutex_exit(phm); 3388 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3389 if (pp->p_szc != 0) { 3390 ASSERT(!IS_SWAPFSVP(vp)); 3391 ASSERT(!VN_ISKAS(vp)); 3392 page_demote_vp_pages(pp); 3393 ASSERT(pp->p_szc == 0); 3394 } 3395 mutex_enter(phm); 3396 } else if (pp->p_szc != 0) { 3397 ASSERT(!IS_SWAPFSVP(vp)); 3398 ASSERT(!VN_ISKAS(vp)); 3399 mutex_exit(phm); 3400 page_demote_vp_pages(pp); 3401 ASSERT(pp->p_szc == 0); 3402 mutex_enter(phm); 3403 } 3404 page_hashout(pp, phm); 3405 } 3406 /* 3407 * Hash in the page with the new identity. 3408 */ 3409 if (!page_hashin(opp, vp, off, phm)) { 3410 /* 3411 * We were holding phm while we searched for [vp, off] 3412 * and only dropped phm if we found and locked a page. 3413 * If we can't create this page now, then some thing 3414 * is really broken. 3415 */ 3416 panic("page_rename: Can't hash in page: %p", (void *)pp); 3417 /*NOTREACHED*/ 3418 } 3419 3420 ASSERT(MUTEX_HELD(phm)); 3421 mutex_exit(phm); 3422 3423 /* 3424 * Now that we have dropped phm, lets get around to finishing up 3425 * with pp. 3426 */ 3427 if (pp != NULL) { 3428 ASSERT(!hat_page_is_mapped(pp)); 3429 /* for now large pages should not end up here */ 3430 ASSERT(pp->p_szc == 0); 3431 /* 3432 * Save the locks for transfer to the new page and then 3433 * clear them so page_free doesn't think they're important. 3434 * The page_struct_lock need not be acquired for lckcnt and 3435 * cowcnt since the page has an "exclusive" lock. 3436 */ 3437 olckcnt = pp->p_lckcnt; 3438 ocowcnt = pp->p_cowcnt; 3439 pp->p_lckcnt = pp->p_cowcnt = 0; 3440 3441 /* 3442 * Put the page on the "free" list after we drop 3443 * the lock. The less work under the lock the better. 3444 */ 3445 /*LINTED: constant in conditional context*/ 3446 VN_DISPOSE(pp, B_FREE, 0, kcred); 3447 } 3448 3449 /* 3450 * Transfer the lock count from the old page (if any). 3451 * The page_struct_lock need not be acquired for lckcnt and 3452 * cowcnt since the page has an "exclusive" lock. 3453 */ 3454 opp->p_lckcnt += olckcnt; 3455 opp->p_cowcnt += ocowcnt; 3456 } 3457 3458 /* 3459 * low level routine to add page `pp' to the hash and vp chains for [vp, offset] 3460 * 3461 * Pages are normally inserted at the start of a vnode's v_pages list. 3462 * If the vnode is VMODSORT and the page is modified, it goes at the end. 3463 * This can happen when a modified page is relocated for DR. 3464 * 3465 * Returns 1 on success and 0 on failure. 3466 */ 3467 static int 3468 page_do_hashin(page_t *pp, vnode_t *vp, u_offset_t offset) 3469 { 3470 page_t **listp; 3471 page_t *tp; 3472 ulong_t index; 3473 3474 ASSERT(PAGE_EXCL(pp)); 3475 ASSERT(vp != NULL); 3476 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3477 3478 /* 3479 * Be sure to set these up before the page is inserted on the hash 3480 * list. As soon as the page is placed on the list some other 3481 * thread might get confused and wonder how this page could 3482 * possibly hash to this list. 3483 */ 3484 pp->p_vnode = vp; 3485 pp->p_offset = offset; 3486 3487 /* 3488 * record if this page is on a swap vnode 3489 */ 3490 if ((vp->v_flag & VISSWAP) != 0) 3491 PP_SETSWAP(pp); 3492 3493 index = PAGE_HASH_FUNC(vp, offset); 3494 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(index))); 3495 listp = &page_hash[index]; 3496 3497 /* 3498 * If this page is already hashed in, fail this attempt to add it. 3499 */ 3500 for (tp = *listp; tp != NULL; tp = tp->p_hash) { 3501 if (tp->p_vnode == vp && tp->p_offset == offset) { 3502 pp->p_vnode = NULL; 3503 pp->p_offset = (u_offset_t)(-1); 3504 return (0); 3505 } 3506 } 3507 pp->p_hash = *listp; 3508 *listp = pp; 3509 3510 /* 3511 * Add the page to the vnode's list of pages 3512 */ 3513 if (vp->v_pages != NULL && IS_VMODSORT(vp) && hat_ismod(pp)) 3514 listp = &vp->v_pages->p_vpprev->p_vpnext; 3515 else 3516 listp = &vp->v_pages; 3517 3518 page_vpadd(listp, pp); 3519 3520 return (1); 3521 } 3522 3523 /* 3524 * Add page `pp' to both the hash and vp chains for [vp, offset]. 3525 * 3526 * Returns 1 on success and 0 on failure. 3527 * If hold is passed in, it is not dropped. 3528 */ 3529 int 3530 page_hashin(page_t *pp, vnode_t *vp, u_offset_t offset, kmutex_t *hold) 3531 { 3532 kmutex_t *phm = NULL; 3533 kmutex_t *vphm; 3534 int rc; 3535 3536 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3537 3538 TRACE_3(TR_FAC_VM, TR_PAGE_HASHIN, 3539 "page_hashin:pp %p vp %p offset %llx", 3540 pp, vp, offset); 3541 3542 VM_STAT_ADD(hashin_count); 3543 3544 if (hold != NULL) 3545 phm = hold; 3546 else { 3547 VM_STAT_ADD(hashin_not_held); 3548 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, offset)); 3549 mutex_enter(phm); 3550 } 3551 3552 vphm = page_vnode_mutex(vp); 3553 mutex_enter(vphm); 3554 rc = page_do_hashin(pp, vp, offset); 3555 mutex_exit(vphm); 3556 if (hold == NULL) 3557 mutex_exit(phm); 3558 if (rc == 0) 3559 VM_STAT_ADD(hashin_already); 3560 return (rc); 3561 } 3562 3563 /* 3564 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3565 * All mutexes must be held 3566 */ 3567 static void 3568 page_do_hashout(page_t *pp) 3569 { 3570 page_t **hpp; 3571 page_t *hp; 3572 vnode_t *vp = pp->p_vnode; 3573 3574 ASSERT(vp != NULL); 3575 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3576 3577 /* 3578 * First, take pp off of its hash chain. 3579 */ 3580 hpp = &page_hash[PAGE_HASH_FUNC(vp, pp->p_offset)]; 3581 3582 for (;;) { 3583 hp = *hpp; 3584 if (hp == pp) 3585 break; 3586 if (hp == NULL) { 3587 panic("page_do_hashout"); 3588 /*NOTREACHED*/ 3589 } 3590 hpp = &hp->p_hash; 3591 } 3592 *hpp = pp->p_hash; 3593 3594 /* 3595 * Now remove it from its associated vnode. 3596 */ 3597 if (vp->v_pages) 3598 page_vpsub(&vp->v_pages, pp); 3599 3600 pp->p_hash = NULL; 3601 page_clr_all_props(pp); 3602 PP_CLRSWAP(pp); 3603 pp->p_vnode = NULL; 3604 pp->p_offset = (u_offset_t)-1; 3605 } 3606 3607 /* 3608 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3609 * 3610 * When `phm' is non-NULL it contains the address of the mutex protecting the 3611 * hash list pp is on. It is not dropped. 3612 */ 3613 void 3614 page_hashout(page_t *pp, kmutex_t *phm) 3615 { 3616 vnode_t *vp; 3617 ulong_t index; 3618 kmutex_t *nphm; 3619 kmutex_t *vphm; 3620 kmutex_t *sep; 3621 3622 ASSERT(phm != NULL ? MUTEX_HELD(phm) : 1); 3623 ASSERT(pp->p_vnode != NULL); 3624 ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr); 3625 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp->p_vnode))); 3626 3627 vp = pp->p_vnode; 3628 3629 TRACE_2(TR_FAC_VM, TR_PAGE_HASHOUT, 3630 "page_hashout:pp %p vp %p", pp, vp); 3631 3632 /* Kernel probe */ 3633 TNF_PROBE_2(page_unmap, "vm pagefault", /* CSTYLED */, 3634 tnf_opaque, vnode, vp, 3635 tnf_offset, offset, pp->p_offset); 3636 3637 /* 3638 * 3639 */ 3640 VM_STAT_ADD(hashout_count); 3641 index = PAGE_HASH_FUNC(vp, pp->p_offset); 3642 if (phm == NULL) { 3643 VM_STAT_ADD(hashout_not_held); 3644 nphm = PAGE_HASH_MUTEX(index); 3645 mutex_enter(nphm); 3646 } 3647 ASSERT(phm ? phm == PAGE_HASH_MUTEX(index) : 1); 3648 3649 3650 /* 3651 * grab page vnode mutex and remove it... 3652 */ 3653 vphm = page_vnode_mutex(vp); 3654 mutex_enter(vphm); 3655 3656 page_do_hashout(pp); 3657 3658 mutex_exit(vphm); 3659 if (phm == NULL) 3660 mutex_exit(nphm); 3661 3662 /* 3663 * Wake up processes waiting for this page. The page's 3664 * identity has been changed, and is probably not the 3665 * desired page any longer. 3666 */ 3667 sep = page_se_mutex(pp); 3668 mutex_enter(sep); 3669 pp->p_selock &= ~SE_EWANTED; 3670 if (CV_HAS_WAITERS(&pp->p_cv)) 3671 cv_broadcast(&pp->p_cv); 3672 mutex_exit(sep); 3673 } 3674 3675 /* 3676 * Add the page to the front of a linked list of pages 3677 * using the p_next & p_prev pointers for the list. 3678 * The caller is responsible for protecting the list pointers. 3679 */ 3680 void 3681 page_add(page_t **ppp, page_t *pp) 3682 { 3683 ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3684 3685 page_add_common(ppp, pp); 3686 } 3687 3688 3689 3690 /* 3691 * Common code for page_add() and mach_page_add() 3692 */ 3693 void 3694 page_add_common(page_t **ppp, page_t *pp) 3695 { 3696 if (*ppp == NULL) { 3697 pp->p_next = pp->p_prev = pp; 3698 } else { 3699 pp->p_next = *ppp; 3700 pp->p_prev = (*ppp)->p_prev; 3701 (*ppp)->p_prev = pp; 3702 pp->p_prev->p_next = pp; 3703 } 3704 *ppp = pp; 3705 } 3706 3707 3708 /* 3709 * Remove this page from a linked list of pages 3710 * using the p_next & p_prev pointers for the list. 3711 * 3712 * The caller is responsible for protecting the list pointers. 3713 */ 3714 void 3715 page_sub(page_t **ppp, page_t *pp) 3716 { 3717 ASSERT((PP_ISFREE(pp)) ? 1 : 3718 (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3719 3720 if (*ppp == NULL || pp == NULL) { 3721 panic("page_sub: bad arg(s): pp %p, *ppp %p", 3722 (void *)pp, (void *)(*ppp)); 3723 /*NOTREACHED*/ 3724 } 3725 3726 page_sub_common(ppp, pp); 3727 } 3728 3729 3730 /* 3731 * Common code for page_sub() and mach_page_sub() 3732 */ 3733 void 3734 page_sub_common(page_t **ppp, page_t *pp) 3735 { 3736 if (*ppp == pp) 3737 *ppp = pp->p_next; /* go to next page */ 3738 3739 if (*ppp == pp) 3740 *ppp = NULL; /* page list is gone */ 3741 else { 3742 pp->p_prev->p_next = pp->p_next; 3743 pp->p_next->p_prev = pp->p_prev; 3744 } 3745 pp->p_prev = pp->p_next = pp; /* make pp a list of one */ 3746 } 3747 3748 3749 /* 3750 * Break page list cppp into two lists with npages in the first list. 3751 * The tail is returned in nppp. 3752 */ 3753 void 3754 page_list_break(page_t **oppp, page_t **nppp, pgcnt_t npages) 3755 { 3756 page_t *s1pp = *oppp; 3757 page_t *s2pp; 3758 page_t *e1pp, *e2pp; 3759 long n = 0; 3760 3761 if (s1pp == NULL) { 3762 *nppp = NULL; 3763 return; 3764 } 3765 if (npages == 0) { 3766 *nppp = s1pp; 3767 *oppp = NULL; 3768 return; 3769 } 3770 for (n = 0, s2pp = *oppp; n < npages; n++) { 3771 s2pp = s2pp->p_next; 3772 } 3773 /* Fix head and tail of new lists */ 3774 e1pp = s2pp->p_prev; 3775 e2pp = s1pp->p_prev; 3776 s1pp->p_prev = e1pp; 3777 e1pp->p_next = s1pp; 3778 s2pp->p_prev = e2pp; 3779 e2pp->p_next = s2pp; 3780 3781 /* second list empty */ 3782 if (s2pp == s1pp) { 3783 *oppp = s1pp; 3784 *nppp = NULL; 3785 } else { 3786 *oppp = s1pp; 3787 *nppp = s2pp; 3788 } 3789 } 3790 3791 /* 3792 * Concatenate page list nppp onto the end of list ppp. 3793 */ 3794 void 3795 page_list_concat(page_t **ppp, page_t **nppp) 3796 { 3797 page_t *s1pp, *s2pp, *e1pp, *e2pp; 3798 3799 if (*nppp == NULL) { 3800 return; 3801 } 3802 if (*ppp == NULL) { 3803 *ppp = *nppp; 3804 return; 3805 } 3806 s1pp = *ppp; 3807 e1pp = s1pp->p_prev; 3808 s2pp = *nppp; 3809 e2pp = s2pp->p_prev; 3810 s1pp->p_prev = e2pp; 3811 e2pp->p_next = s1pp; 3812 e1pp->p_next = s2pp; 3813 s2pp->p_prev = e1pp; 3814 } 3815 3816 /* 3817 * return the next page in the page list 3818 */ 3819 page_t * 3820 page_list_next(page_t *pp) 3821 { 3822 return (pp->p_next); 3823 } 3824 3825 3826 /* 3827 * Add the page to the front of the linked list of pages 3828 * using p_vpnext/p_vpprev pointers for the list. 3829 * 3830 * The caller is responsible for protecting the lists. 3831 */ 3832 void 3833 page_vpadd(page_t **ppp, page_t *pp) 3834 { 3835 if (*ppp == NULL) { 3836 pp->p_vpnext = pp->p_vpprev = pp; 3837 } else { 3838 pp->p_vpnext = *ppp; 3839 pp->p_vpprev = (*ppp)->p_vpprev; 3840 (*ppp)->p_vpprev = pp; 3841 pp->p_vpprev->p_vpnext = pp; 3842 } 3843 *ppp = pp; 3844 } 3845 3846 /* 3847 * Remove this page from the linked list of pages 3848 * using p_vpnext/p_vpprev pointers for the list. 3849 * 3850 * The caller is responsible for protecting the lists. 3851 */ 3852 void 3853 page_vpsub(page_t **ppp, page_t *pp) 3854 { 3855 if (*ppp == NULL || pp == NULL) { 3856 panic("page_vpsub: bad arg(s): pp %p, *ppp %p", 3857 (void *)pp, (void *)(*ppp)); 3858 /*NOTREACHED*/ 3859 } 3860 3861 if (*ppp == pp) 3862 *ppp = pp->p_vpnext; /* go to next page */ 3863 3864 if (*ppp == pp) 3865 *ppp = NULL; /* page list is gone */ 3866 else { 3867 pp->p_vpprev->p_vpnext = pp->p_vpnext; 3868 pp->p_vpnext->p_vpprev = pp->p_vpprev; 3869 } 3870 pp->p_vpprev = pp->p_vpnext = pp; /* make pp a list of one */ 3871 } 3872 3873 /* 3874 * Lock a physical page into memory "long term". Used to support "lock 3875 * in memory" functions. Accepts the page to be locked, and a cow variable 3876 * to indicate whether a the lock will travel to the new page during 3877 * a potential copy-on-write. 3878 */ 3879 int 3880 page_pp_lock( 3881 page_t *pp, /* page to be locked */ 3882 int cow, /* cow lock */ 3883 int kernel) /* must succeed -- ignore checking */ 3884 { 3885 int r = 0; /* result -- assume failure */ 3886 3887 ASSERT(PAGE_LOCKED(pp)); 3888 3889 page_struct_lock(pp); 3890 /* 3891 * Acquire the "freemem_lock" for availrmem. 3892 */ 3893 if (cow) { 3894 mutex_enter(&freemem_lock); 3895 if ((availrmem > pages_pp_maximum) && 3896 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 3897 availrmem--; 3898 pages_locked++; 3899 mutex_exit(&freemem_lock); 3900 r = 1; 3901 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 3902 cmn_err(CE_WARN, 3903 "COW lock limit reached on pfn 0x%lx", 3904 page_pptonum(pp)); 3905 } 3906 } else 3907 mutex_exit(&freemem_lock); 3908 } else { 3909 if (pp->p_lckcnt) { 3910 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 3911 r = 1; 3912 if (++pp->p_lckcnt == 3913 (ushort_t)PAGE_LOCK_MAXIMUM) { 3914 cmn_err(CE_WARN, "Page lock limit " 3915 "reached on pfn 0x%lx", 3916 page_pptonum(pp)); 3917 } 3918 } 3919 } else { 3920 if (kernel) { 3921 /* availrmem accounting done by caller */ 3922 ++pp->p_lckcnt; 3923 r = 1; 3924 } else { 3925 mutex_enter(&freemem_lock); 3926 if (availrmem > pages_pp_maximum) { 3927 availrmem--; 3928 pages_locked++; 3929 ++pp->p_lckcnt; 3930 r = 1; 3931 } 3932 mutex_exit(&freemem_lock); 3933 } 3934 } 3935 } 3936 page_struct_unlock(pp); 3937 return (r); 3938 } 3939 3940 /* 3941 * Decommit a lock on a physical page frame. Account for cow locks if 3942 * appropriate. 3943 */ 3944 void 3945 page_pp_unlock( 3946 page_t *pp, /* page to be unlocked */ 3947 int cow, /* expect cow lock */ 3948 int kernel) /* this was a kernel lock */ 3949 { 3950 ASSERT(PAGE_LOCKED(pp)); 3951 3952 page_struct_lock(pp); 3953 /* 3954 * Acquire the "freemem_lock" for availrmem. 3955 * If cowcnt or lcknt is already 0 do nothing; i.e., we 3956 * could be called to unlock even if nothing is locked. This could 3957 * happen if locked file pages were truncated (removing the lock) 3958 * and the file was grown again and new pages faulted in; the new 3959 * pages are unlocked but the segment still thinks they're locked. 3960 */ 3961 if (cow) { 3962 if (pp->p_cowcnt) { 3963 mutex_enter(&freemem_lock); 3964 pp->p_cowcnt--; 3965 availrmem++; 3966 pages_locked--; 3967 mutex_exit(&freemem_lock); 3968 } 3969 } else { 3970 if (pp->p_lckcnt && --pp->p_lckcnt == 0) { 3971 if (!kernel) { 3972 mutex_enter(&freemem_lock); 3973 availrmem++; 3974 pages_locked--; 3975 mutex_exit(&freemem_lock); 3976 } 3977 } 3978 } 3979 page_struct_unlock(pp); 3980 } 3981 3982 /* 3983 * This routine reserves availrmem for npages; 3984 * flags: KM_NOSLEEP or KM_SLEEP 3985 * returns 1 on success or 0 on failure 3986 */ 3987 int 3988 page_resv(pgcnt_t npages, uint_t flags) 3989 { 3990 mutex_enter(&freemem_lock); 3991 while (availrmem < tune.t_minarmem + npages) { 3992 if (flags & KM_NOSLEEP) { 3993 mutex_exit(&freemem_lock); 3994 return (0); 3995 } 3996 mutex_exit(&freemem_lock); 3997 page_needfree(npages); 3998 kmem_reap(); 3999 delay(hz >> 2); 4000 page_needfree(-(spgcnt_t)npages); 4001 mutex_enter(&freemem_lock); 4002 } 4003 availrmem -= npages; 4004 mutex_exit(&freemem_lock); 4005 return (1); 4006 } 4007 4008 /* 4009 * This routine unreserves availrmem for npages; 4010 */ 4011 void 4012 page_unresv(pgcnt_t npages) 4013 { 4014 mutex_enter(&freemem_lock); 4015 availrmem += npages; 4016 mutex_exit(&freemem_lock); 4017 } 4018 4019 /* 4020 * See Statement at the beginning of segvn_lockop() regarding 4021 * the way we handle cowcnts and lckcnts. 4022 * 4023 * Transfer cowcnt on 'opp' to cowcnt on 'npp' if the vpage 4024 * that breaks COW has PROT_WRITE. 4025 * 4026 * Note that, we may also break COW in case we are softlocking 4027 * on read access during physio; 4028 * in this softlock case, the vpage may not have PROT_WRITE. 4029 * So, we need to transfer lckcnt on 'opp' to lckcnt on 'npp' 4030 * if the vpage doesn't have PROT_WRITE. 4031 * 4032 * This routine is never called if we are stealing a page 4033 * in anon_private. 4034 * 4035 * The caller subtracted from availrmem for read only mapping. 4036 * if lckcnt is 1 increment availrmem. 4037 */ 4038 void 4039 page_pp_useclaim( 4040 page_t *opp, /* original page frame losing lock */ 4041 page_t *npp, /* new page frame gaining lock */ 4042 uint_t write_perm) /* set if vpage has PROT_WRITE */ 4043 { 4044 int payback = 0; 4045 4046 ASSERT(PAGE_LOCKED(opp)); 4047 ASSERT(PAGE_LOCKED(npp)); 4048 4049 page_struct_lock(opp); 4050 4051 ASSERT(npp->p_cowcnt == 0); 4052 ASSERT(npp->p_lckcnt == 0); 4053 4054 /* Don't use claim if nothing is locked (see page_pp_unlock above) */ 4055 if ((write_perm && opp->p_cowcnt != 0) || 4056 (!write_perm && opp->p_lckcnt != 0)) { 4057 4058 if (write_perm) { 4059 npp->p_cowcnt++; 4060 ASSERT(opp->p_cowcnt != 0); 4061 opp->p_cowcnt--; 4062 } else { 4063 4064 ASSERT(opp->p_lckcnt != 0); 4065 4066 /* 4067 * We didn't need availrmem decremented if p_lckcnt on 4068 * original page is 1. Here, we are unlocking 4069 * read-only copy belonging to original page and 4070 * are locking a copy belonging to new page. 4071 */ 4072 if (opp->p_lckcnt == 1) 4073 payback = 1; 4074 4075 npp->p_lckcnt++; 4076 opp->p_lckcnt--; 4077 } 4078 } 4079 if (payback) { 4080 mutex_enter(&freemem_lock); 4081 availrmem++; 4082 pages_useclaim--; 4083 mutex_exit(&freemem_lock); 4084 } 4085 page_struct_unlock(opp); 4086 } 4087 4088 /* 4089 * Simple claim adjust functions -- used to support changes in 4090 * claims due to changes in access permissions. Used by segvn_setprot(). 4091 */ 4092 int 4093 page_addclaim(page_t *pp) 4094 { 4095 int r = 0; /* result */ 4096 4097 ASSERT(PAGE_LOCKED(pp)); 4098 4099 page_struct_lock(pp); 4100 ASSERT(pp->p_lckcnt != 0); 4101 4102 if (pp->p_lckcnt == 1) { 4103 if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4104 --pp->p_lckcnt; 4105 r = 1; 4106 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4107 cmn_err(CE_WARN, 4108 "COW lock limit reached on pfn 0x%lx", 4109 page_pptonum(pp)); 4110 } 4111 } 4112 } else { 4113 mutex_enter(&freemem_lock); 4114 if ((availrmem > pages_pp_maximum) && 4115 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 4116 --availrmem; 4117 ++pages_claimed; 4118 mutex_exit(&freemem_lock); 4119 --pp->p_lckcnt; 4120 r = 1; 4121 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4122 cmn_err(CE_WARN, 4123 "COW lock limit reached on pfn 0x%lx", 4124 page_pptonum(pp)); 4125 } 4126 } else 4127 mutex_exit(&freemem_lock); 4128 } 4129 page_struct_unlock(pp); 4130 return (r); 4131 } 4132 4133 int 4134 page_subclaim(page_t *pp) 4135 { 4136 int r = 0; 4137 4138 ASSERT(PAGE_LOCKED(pp)); 4139 4140 page_struct_lock(pp); 4141 ASSERT(pp->p_cowcnt != 0); 4142 4143 if (pp->p_lckcnt) { 4144 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4145 r = 1; 4146 /* 4147 * for availrmem 4148 */ 4149 mutex_enter(&freemem_lock); 4150 availrmem++; 4151 pages_claimed--; 4152 mutex_exit(&freemem_lock); 4153 4154 pp->p_cowcnt--; 4155 4156 if (++pp->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4157 cmn_err(CE_WARN, 4158 "Page lock limit reached on pfn 0x%lx", 4159 page_pptonum(pp)); 4160 } 4161 } 4162 } else { 4163 r = 1; 4164 pp->p_cowcnt--; 4165 pp->p_lckcnt++; 4166 } 4167 page_struct_unlock(pp); 4168 return (r); 4169 } 4170 4171 int 4172 page_addclaim_pages(page_t **ppa) 4173 { 4174 4175 pgcnt_t lckpgs = 0, pg_idx; 4176 4177 VM_STAT_ADD(pagecnt.pc_addclaim_pages); 4178 4179 mutex_enter(&page_llock); 4180 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4181 4182 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4183 ASSERT(ppa[pg_idx]->p_lckcnt != 0); 4184 if (ppa[pg_idx]->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4185 mutex_exit(&page_llock); 4186 return (0); 4187 } 4188 if (ppa[pg_idx]->p_lckcnt > 1) 4189 lckpgs++; 4190 } 4191 4192 if (lckpgs != 0) { 4193 mutex_enter(&freemem_lock); 4194 if (availrmem >= pages_pp_maximum + lckpgs) { 4195 availrmem -= lckpgs; 4196 pages_claimed += lckpgs; 4197 } else { 4198 mutex_exit(&freemem_lock); 4199 mutex_exit(&page_llock); 4200 return (0); 4201 } 4202 mutex_exit(&freemem_lock); 4203 } 4204 4205 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4206 ppa[pg_idx]->p_lckcnt--; 4207 ppa[pg_idx]->p_cowcnt++; 4208 } 4209 mutex_exit(&page_llock); 4210 return (1); 4211 } 4212 4213 int 4214 page_subclaim_pages(page_t **ppa) 4215 { 4216 pgcnt_t ulckpgs = 0, pg_idx; 4217 4218 VM_STAT_ADD(pagecnt.pc_subclaim_pages); 4219 4220 mutex_enter(&page_llock); 4221 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4222 4223 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4224 ASSERT(ppa[pg_idx]->p_cowcnt != 0); 4225 if (ppa[pg_idx]->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4226 mutex_exit(&page_llock); 4227 return (0); 4228 } 4229 if (ppa[pg_idx]->p_lckcnt != 0) 4230 ulckpgs++; 4231 } 4232 4233 if (ulckpgs != 0) { 4234 mutex_enter(&freemem_lock); 4235 availrmem += ulckpgs; 4236 pages_claimed -= ulckpgs; 4237 mutex_exit(&freemem_lock); 4238 } 4239 4240 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4241 ppa[pg_idx]->p_cowcnt--; 4242 ppa[pg_idx]->p_lckcnt++; 4243 4244 } 4245 mutex_exit(&page_llock); 4246 return (1); 4247 } 4248 4249 page_t * 4250 page_numtopp(pfn_t pfnum, se_t se) 4251 { 4252 page_t *pp; 4253 4254 retry: 4255 pp = page_numtopp_nolock(pfnum); 4256 if (pp == NULL) { 4257 return ((page_t *)NULL); 4258 } 4259 4260 /* 4261 * Acquire the appropriate lock on the page. 4262 */ 4263 while (!page_lock(pp, se, (kmutex_t *)NULL, P_RECLAIM)) { 4264 if (page_pptonum(pp) != pfnum) 4265 goto retry; 4266 continue; 4267 } 4268 4269 if (page_pptonum(pp) != pfnum) { 4270 page_unlock(pp); 4271 goto retry; 4272 } 4273 4274 return (pp); 4275 } 4276 4277 page_t * 4278 page_numtopp_noreclaim(pfn_t pfnum, se_t se) 4279 { 4280 page_t *pp; 4281 4282 retry: 4283 pp = page_numtopp_nolock(pfnum); 4284 if (pp == NULL) { 4285 return ((page_t *)NULL); 4286 } 4287 4288 /* 4289 * Acquire the appropriate lock on the page. 4290 */ 4291 while (!page_lock(pp, se, (kmutex_t *)NULL, P_NO_RECLAIM)) { 4292 if (page_pptonum(pp) != pfnum) 4293 goto retry; 4294 continue; 4295 } 4296 4297 if (page_pptonum(pp) != pfnum) { 4298 page_unlock(pp); 4299 goto retry; 4300 } 4301 4302 return (pp); 4303 } 4304 4305 /* 4306 * This routine is like page_numtopp, but will only return page structs 4307 * for pages which are ok for loading into hardware using the page struct. 4308 */ 4309 page_t * 4310 page_numtopp_nowait(pfn_t pfnum, se_t se) 4311 { 4312 page_t *pp; 4313 4314 retry: 4315 pp = page_numtopp_nolock(pfnum); 4316 if (pp == NULL) { 4317 return ((page_t *)NULL); 4318 } 4319 4320 /* 4321 * Try to acquire the appropriate lock on the page. 4322 */ 4323 if (PP_ISFREE(pp)) 4324 pp = NULL; 4325 else { 4326 if (!page_trylock(pp, se)) 4327 pp = NULL; 4328 else { 4329 if (page_pptonum(pp) != pfnum) { 4330 page_unlock(pp); 4331 goto retry; 4332 } 4333 if (PP_ISFREE(pp)) { 4334 page_unlock(pp); 4335 pp = NULL; 4336 } 4337 } 4338 } 4339 return (pp); 4340 } 4341 4342 /* 4343 * Returns a count of dirty pages that are in the process 4344 * of being written out. If 'cleanit' is set, try to push the page. 4345 */ 4346 pgcnt_t 4347 page_busy(int cleanit) 4348 { 4349 page_t *page0 = page_first(); 4350 page_t *pp = page0; 4351 pgcnt_t nppbusy = 0; 4352 u_offset_t off; 4353 4354 do { 4355 vnode_t *vp = pp->p_vnode; 4356 4357 /* 4358 * A page is a candidate for syncing if it is: 4359 * 4360 * (a) On neither the freelist nor the cachelist 4361 * (b) Hashed onto a vnode 4362 * (c) Not a kernel page 4363 * (d) Dirty 4364 * (e) Not part of a swapfile 4365 * (f) a page which belongs to a real vnode; eg has a non-null 4366 * v_vfsp pointer. 4367 * (g) Backed by a filesystem which doesn't have a 4368 * stubbed-out sync operation 4369 */ 4370 if (!PP_ISFREE(pp) && vp != NULL && !VN_ISKAS(vp) && 4371 hat_ismod(pp) && !IS_SWAPVP(vp) && vp->v_vfsp != NULL && 4372 vfs_can_sync(vp->v_vfsp)) { 4373 nppbusy++; 4374 vfs_syncprogress(); 4375 4376 if (!cleanit) 4377 continue; 4378 if (!page_trylock(pp, SE_EXCL)) 4379 continue; 4380 4381 if (PP_ISFREE(pp) || vp == NULL || IS_SWAPVP(vp) || 4382 pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 4383 !(hat_pagesync(pp, 4384 HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) & P_MOD)) { 4385 page_unlock(pp); 4386 continue; 4387 } 4388 off = pp->p_offset; 4389 VN_HOLD(vp); 4390 page_unlock(pp); 4391 (void) VOP_PUTPAGE(vp, off, PAGESIZE, 4392 B_ASYNC | B_FREE, kcred); 4393 VN_RELE(vp); 4394 } 4395 } while ((pp = page_next(pp)) != page0); 4396 4397 return (nppbusy); 4398 } 4399 4400 void page_invalidate_pages(void); 4401 4402 /* 4403 * callback handler to vm sub-system 4404 * 4405 * callers make sure no recursive entries to this func. 4406 */ 4407 /*ARGSUSED*/ 4408 boolean_t 4409 callb_vm_cpr(void *arg, int code) 4410 { 4411 if (code == CB_CODE_CPR_CHKPT) 4412 page_invalidate_pages(); 4413 return (B_TRUE); 4414 } 4415 4416 /* 4417 * Invalidate all pages of the system. 4418 * It shouldn't be called until all user page activities are all stopped. 4419 */ 4420 void 4421 page_invalidate_pages() 4422 { 4423 page_t *pp; 4424 page_t *page0; 4425 pgcnt_t nbusypages; 4426 int retry = 0; 4427 const int MAXRETRIES = 4; 4428 #if defined(__sparc) 4429 extern struct vnode prom_ppages; 4430 #endif /* __sparc */ 4431 4432 top: 4433 /* 4434 * Flush dirty pages and destroy the clean ones. 4435 */ 4436 nbusypages = 0; 4437 4438 pp = page0 = page_first(); 4439 do { 4440 struct vnode *vp; 4441 u_offset_t offset; 4442 int mod; 4443 4444 /* 4445 * skip the page if it has no vnode or the page associated 4446 * with the kernel vnode or prom allocated kernel mem. 4447 */ 4448 #if defined(__sparc) 4449 if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp) || 4450 vp == &prom_ppages) 4451 #else /* x86 doesn't have prom or prom_ppage */ 4452 if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp)) 4453 #endif /* __sparc */ 4454 continue; 4455 4456 /* 4457 * skip the page which is already free invalidated. 4458 */ 4459 if (PP_ISFREE(pp) && PP_ISAGED(pp)) 4460 continue; 4461 4462 /* 4463 * skip pages that are already locked or can't be "exclusively" 4464 * locked or are already free. After we lock the page, check 4465 * the free and age bits again to be sure it's not destroied 4466 * yet. 4467 * To achieve max. parallelization, we use page_trylock instead 4468 * of page_lock so that we don't get block on individual pages 4469 * while we have thousands of other pages to process. 4470 */ 4471 if (!page_trylock(pp, SE_EXCL)) { 4472 nbusypages++; 4473 continue; 4474 } else if (PP_ISFREE(pp)) { 4475 if (!PP_ISAGED(pp)) { 4476 page_destroy_free(pp); 4477 } else { 4478 page_unlock(pp); 4479 } 4480 continue; 4481 } 4482 /* 4483 * Is this page involved in some I/O? shared? 4484 * 4485 * The page_struct_lock need not be acquired to 4486 * examine these fields since the page has an 4487 * "exclusive" lock. 4488 */ 4489 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 4490 page_unlock(pp); 4491 continue; 4492 } 4493 4494 if (vp->v_type == VCHR) { 4495 panic("vp->v_type == VCHR"); 4496 /*NOTREACHED*/ 4497 } 4498 4499 if (!page_try_demote_pages(pp)) { 4500 page_unlock(pp); 4501 continue; 4502 } 4503 4504 /* 4505 * Check the modified bit. Leave the bits alone in hardware 4506 * (they will be modified if we do the putpage). 4507 */ 4508 mod = (hat_pagesync(pp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) 4509 & P_MOD); 4510 if (mod) { 4511 offset = pp->p_offset; 4512 /* 4513 * Hold the vnode before releasing the page lock 4514 * to prevent it from being freed and re-used by 4515 * some other thread. 4516 */ 4517 VN_HOLD(vp); 4518 page_unlock(pp); 4519 /* 4520 * No error return is checked here. Callers such as 4521 * cpr deals with the dirty pages at the dump time 4522 * if this putpage fails. 4523 */ 4524 (void) VOP_PUTPAGE(vp, offset, PAGESIZE, B_INVAL, 4525 kcred); 4526 VN_RELE(vp); 4527 } else { 4528 page_destroy(pp, 0); 4529 } 4530 } while ((pp = page_next(pp)) != page0); 4531 if (nbusypages && retry++ < MAXRETRIES) { 4532 delay(1); 4533 goto top; 4534 } 4535 } 4536 4537 /* 4538 * Replace the page "old" with the page "new" on the page hash and vnode lists 4539 * 4540 * the replacemnt must be done in place, ie the equivalent sequence: 4541 * 4542 * vp = old->p_vnode; 4543 * off = old->p_offset; 4544 * page_do_hashout(old) 4545 * page_do_hashin(new, vp, off) 4546 * 4547 * doesn't work, since 4548 * 1) if old is the only page on the vnode, the v_pages list has a window 4549 * where it looks empty. This will break file system assumptions. 4550 * and 4551 * 2) pvn_vplist_dirty() can't deal with pages moving on the v_pages list. 4552 */ 4553 static void 4554 page_do_relocate_hash(page_t *new, page_t *old) 4555 { 4556 page_t **hash_list; 4557 vnode_t *vp = old->p_vnode; 4558 kmutex_t *sep; 4559 4560 ASSERT(PAGE_EXCL(old)); 4561 ASSERT(PAGE_EXCL(new)); 4562 ASSERT(vp != NULL); 4563 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 4564 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, old->p_offset)))); 4565 4566 /* 4567 * First find old page on the page hash list 4568 */ 4569 hash_list = &page_hash[PAGE_HASH_FUNC(vp, old->p_offset)]; 4570 4571 for (;;) { 4572 if (*hash_list == old) 4573 break; 4574 if (*hash_list == NULL) { 4575 panic("page_do_hashout"); 4576 /*NOTREACHED*/ 4577 } 4578 hash_list = &(*hash_list)->p_hash; 4579 } 4580 4581 /* 4582 * update new and replace old with new on the page hash list 4583 */ 4584 new->p_vnode = old->p_vnode; 4585 new->p_offset = old->p_offset; 4586 new->p_hash = old->p_hash; 4587 *hash_list = new; 4588 4589 if ((new->p_vnode->v_flag & VISSWAP) != 0) 4590 PP_SETSWAP(new); 4591 4592 /* 4593 * replace old with new on the vnode's page list 4594 */ 4595 if (old->p_vpnext == old) { 4596 new->p_vpnext = new; 4597 new->p_vpprev = new; 4598 } else { 4599 new->p_vpnext = old->p_vpnext; 4600 new->p_vpprev = old->p_vpprev; 4601 new->p_vpnext->p_vpprev = new; 4602 new->p_vpprev->p_vpnext = new; 4603 } 4604 if (vp->v_pages == old) 4605 vp->v_pages = new; 4606 4607 /* 4608 * clear out the old page 4609 */ 4610 old->p_hash = NULL; 4611 old->p_vpnext = NULL; 4612 old->p_vpprev = NULL; 4613 old->p_vnode = NULL; 4614 PP_CLRSWAP(old); 4615 old->p_offset = (u_offset_t)-1; 4616 page_clr_all_props(old); 4617 4618 /* 4619 * Wake up processes waiting for this page. The page's 4620 * identity has been changed, and is probably not the 4621 * desired page any longer. 4622 */ 4623 sep = page_se_mutex(old); 4624 mutex_enter(sep); 4625 old->p_selock &= ~SE_EWANTED; 4626 if (CV_HAS_WAITERS(&old->p_cv)) 4627 cv_broadcast(&old->p_cv); 4628 mutex_exit(sep); 4629 } 4630 4631 /* 4632 * This function moves the identity of page "pp_old" to page "pp_new". 4633 * Both pages must be locked on entry. "pp_new" is free, has no identity, 4634 * and need not be hashed out from anywhere. 4635 */ 4636 void 4637 page_relocate_hash(page_t *pp_new, page_t *pp_old) 4638 { 4639 vnode_t *vp = pp_old->p_vnode; 4640 u_offset_t off = pp_old->p_offset; 4641 kmutex_t *phm, *vphm; 4642 4643 /* 4644 * Rehash two pages 4645 */ 4646 ASSERT(PAGE_EXCL(pp_old)); 4647 ASSERT(PAGE_EXCL(pp_new)); 4648 ASSERT(vp != NULL); 4649 ASSERT(pp_new->p_vnode == NULL); 4650 4651 /* 4652 * hashout then hashin while holding the mutexes 4653 */ 4654 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, off)); 4655 mutex_enter(phm); 4656 vphm = page_vnode_mutex(vp); 4657 mutex_enter(vphm); 4658 4659 page_do_relocate_hash(pp_new, pp_old); 4660 4661 mutex_exit(vphm); 4662 mutex_exit(phm); 4663 4664 /* 4665 * The page_struct_lock need not be acquired for lckcnt and 4666 * cowcnt since the page has an "exclusive" lock. 4667 */ 4668 ASSERT(pp_new->p_lckcnt == 0); 4669 ASSERT(pp_new->p_cowcnt == 0); 4670 pp_new->p_lckcnt = pp_old->p_lckcnt; 4671 pp_new->p_cowcnt = pp_old->p_cowcnt; 4672 pp_old->p_lckcnt = pp_old->p_cowcnt = 0; 4673 4674 /* The following comment preserved from page_flip(). */ 4675 /* XXX - Do we need to protect fsdata? */ 4676 pp_new->p_fsdata = pp_old->p_fsdata; 4677 } 4678 4679 /* 4680 * Helper routine used to lock all remaining members of a 4681 * large page. The caller is responsible for passing in a locked 4682 * pp. If pp is a large page, then it succeeds in locking all the 4683 * remaining constituent pages or it returns with only the 4684 * original page locked. 4685 * 4686 * Returns 1 on success, 0 on failure. 4687 * 4688 * If success is returned this routine gurantees p_szc for all constituent 4689 * pages of a large page pp belongs to can't change. To achieve this we 4690 * recheck szc of pp after locking all constituent pages and retry if szc 4691 * changed (it could only decrease). Since hat_page_demote() needs an EXCL 4692 * lock on one of constituent pages it can't be running after all constituent 4693 * pages are locked. hat_page_demote() with a lock on a constituent page 4694 * outside of this large page (i.e. pp belonged to a larger large page) is 4695 * already done with all constituent pages of pp since the root's p_szc is 4696 * changed last. Thefore no need to synchronize with hat_page_demote() that 4697 * locked a constituent page outside of pp's current large page. 4698 */ 4699 #ifdef DEBUG 4700 uint32_t gpg_trylock_mtbf = 0; 4701 #endif 4702 4703 int 4704 group_page_trylock(page_t *pp, se_t se) 4705 { 4706 page_t *tpp; 4707 pgcnt_t npgs, i, j; 4708 uint_t pszc = pp->p_szc; 4709 4710 #ifdef DEBUG 4711 if (gpg_trylock_mtbf && !(gethrtime() % gpg_trylock_mtbf)) { 4712 return (0); 4713 } 4714 #endif 4715 4716 if (pp != PP_GROUPLEADER(pp, pszc)) { 4717 return (0); 4718 } 4719 4720 retry: 4721 ASSERT(PAGE_LOCKED_SE(pp, se)); 4722 ASSERT(!PP_ISFREE(pp)); 4723 if (pszc == 0) { 4724 return (1); 4725 } 4726 npgs = page_get_pagecnt(pszc); 4727 tpp = pp + 1; 4728 for (i = 1; i < npgs; i++, tpp++) { 4729 if (!page_trylock(tpp, se)) { 4730 tpp = pp + 1; 4731 for (j = 1; j < i; j++, tpp++) { 4732 page_unlock(tpp); 4733 } 4734 return (0); 4735 } 4736 } 4737 if (pp->p_szc != pszc) { 4738 ASSERT(pp->p_szc < pszc); 4739 ASSERT(pp->p_vnode != NULL && !PP_ISKAS(pp) && 4740 !IS_SWAPFSVP(pp->p_vnode)); 4741 tpp = pp + 1; 4742 for (i = 1; i < npgs; i++, tpp++) { 4743 page_unlock(tpp); 4744 } 4745 pszc = pp->p_szc; 4746 goto retry; 4747 } 4748 return (1); 4749 } 4750 4751 void 4752 group_page_unlock(page_t *pp) 4753 { 4754 page_t *tpp; 4755 pgcnt_t npgs, i; 4756 4757 ASSERT(PAGE_LOCKED(pp)); 4758 ASSERT(!PP_ISFREE(pp)); 4759 ASSERT(pp == PP_PAGEROOT(pp)); 4760 npgs = page_get_pagecnt(pp->p_szc); 4761 for (i = 1, tpp = pp + 1; i < npgs; i++, tpp++) { 4762 page_unlock(tpp); 4763 } 4764 } 4765 4766 /* 4767 * returns 4768 * 0 : on success and *nrelocp is number of relocated PAGESIZE pages 4769 * ERANGE : this is not a base page 4770 * EBUSY : failure to get locks on the page/pages 4771 * ENOMEM : failure to obtain replacement pages 4772 * EAGAIN : OBP has not yet completed its boot-time handoff to the kernel 4773 * EIO : An error occurred while trying to copy the page data 4774 * 4775 * Return with all constituent members of target and replacement 4776 * SE_EXCL locked. It is the callers responsibility to drop the 4777 * locks. 4778 */ 4779 int 4780 do_page_relocate( 4781 page_t **target, 4782 page_t **replacement, 4783 int grouplock, 4784 spgcnt_t *nrelocp, 4785 lgrp_t *lgrp) 4786 { 4787 page_t *first_repl; 4788 page_t *repl; 4789 page_t *targ; 4790 page_t *pl = NULL; 4791 uint_t ppattr; 4792 pfn_t pfn, repl_pfn; 4793 uint_t szc; 4794 spgcnt_t npgs, i; 4795 int repl_contig = 0; 4796 uint_t flags = 0; 4797 spgcnt_t dofree = 0; 4798 4799 *nrelocp = 0; 4800 4801 #if defined(__sparc) 4802 /* 4803 * We need to wait till OBP has completed 4804 * its boot-time handoff of its resources to the kernel 4805 * before we allow page relocation 4806 */ 4807 if (page_relocate_ready == 0) { 4808 return (EAGAIN); 4809 } 4810 #endif 4811 4812 /* 4813 * If this is not a base page, 4814 * just return with 0x0 pages relocated. 4815 */ 4816 targ = *target; 4817 ASSERT(PAGE_EXCL(targ)); 4818 ASSERT(!PP_ISFREE(targ)); 4819 szc = targ->p_szc; 4820 ASSERT(szc < mmu_page_sizes); 4821 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4822 pfn = targ->p_pagenum; 4823 if (pfn != PFN_BASE(pfn, szc)) { 4824 VM_STAT_ADD(vmm_vmstats.ppr_relocnoroot[szc]); 4825 return (ERANGE); 4826 } 4827 4828 if ((repl = *replacement) != NULL && repl->p_szc >= szc) { 4829 repl_pfn = repl->p_pagenum; 4830 if (repl_pfn != PFN_BASE(repl_pfn, szc)) { 4831 VM_STAT_ADD(vmm_vmstats.ppr_reloc_replnoroot[szc]); 4832 return (ERANGE); 4833 } 4834 repl_contig = 1; 4835 } 4836 4837 /* 4838 * We must lock all members of this large page or we cannot 4839 * relocate any part of it. 4840 */ 4841 if (grouplock != 0 && !group_page_trylock(targ, SE_EXCL)) { 4842 VM_STAT_ADD(vmm_vmstats.ppr_relocnolock[targ->p_szc]); 4843 return (EBUSY); 4844 } 4845 4846 /* 4847 * reread szc it could have been decreased before 4848 * group_page_trylock() was done. 4849 */ 4850 szc = targ->p_szc; 4851 ASSERT(szc < mmu_page_sizes); 4852 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4853 ASSERT(pfn == PFN_BASE(pfn, szc)); 4854 4855 npgs = page_get_pagecnt(targ->p_szc); 4856 4857 if (repl == NULL) { 4858 dofree = npgs; /* Size of target page in MMU pages */ 4859 if (!page_create_wait(dofree, 0)) { 4860 if (grouplock != 0) { 4861 group_page_unlock(targ); 4862 } 4863 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4864 return (ENOMEM); 4865 } 4866 4867 /* 4868 * seg kmem pages require that the target and replacement 4869 * page be the same pagesize. 4870 */ 4871 flags = (VN_ISKAS(targ->p_vnode)) ? PGR_SAMESZC : 0; 4872 repl = page_get_replacement_page(targ, lgrp, flags); 4873 if (repl == NULL) { 4874 if (grouplock != 0) { 4875 group_page_unlock(targ); 4876 } 4877 page_create_putback(dofree); 4878 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4879 return (ENOMEM); 4880 } 4881 } 4882 #ifdef DEBUG 4883 else { 4884 ASSERT(PAGE_LOCKED(repl)); 4885 } 4886 #endif /* DEBUG */ 4887 4888 #if defined(__sparc) 4889 /* 4890 * Let hat_page_relocate() complete the relocation if it's kernel page 4891 */ 4892 if (VN_ISKAS(targ->p_vnode)) { 4893 *replacement = repl; 4894 if (hat_page_relocate(target, replacement, nrelocp) != 0) { 4895 if (grouplock != 0) { 4896 group_page_unlock(targ); 4897 } 4898 if (dofree) { 4899 *replacement = NULL; 4900 page_free_replacement_page(repl); 4901 page_create_putback(dofree); 4902 } 4903 VM_STAT_ADD(vmm_vmstats.ppr_krelocfail[szc]); 4904 return (EAGAIN); 4905 } 4906 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 4907 return (0); 4908 } 4909 #else 4910 #if defined(lint) 4911 dofree = dofree; 4912 #endif 4913 #endif 4914 4915 first_repl = repl; 4916 4917 for (i = 0; i < npgs; i++) { 4918 ASSERT(PAGE_EXCL(targ)); 4919 ASSERT(targ->p_slckcnt == 0); 4920 ASSERT(repl->p_slckcnt == 0); 4921 4922 (void) hat_pageunload(targ, HAT_FORCE_PGUNLOAD); 4923 4924 ASSERT(hat_page_getshare(targ) == 0); 4925 ASSERT(!PP_ISFREE(targ)); 4926 ASSERT(targ->p_pagenum == (pfn + i)); 4927 ASSERT(repl_contig == 0 || 4928 repl->p_pagenum == (repl_pfn + i)); 4929 4930 /* 4931 * Copy the page contents and attributes then 4932 * relocate the page in the page hash. 4933 */ 4934 if (ppcopy(targ, repl) == 0) { 4935 targ = *target; 4936 repl = first_repl; 4937 VM_STAT_ADD(vmm_vmstats.ppr_copyfail); 4938 if (grouplock != 0) { 4939 group_page_unlock(targ); 4940 } 4941 if (dofree) { 4942 *replacement = NULL; 4943 page_free_replacement_page(repl); 4944 page_create_putback(dofree); 4945 } 4946 return (EIO); 4947 } 4948 4949 targ++; 4950 if (repl_contig != 0) { 4951 repl++; 4952 } else { 4953 repl = repl->p_next; 4954 } 4955 } 4956 4957 repl = first_repl; 4958 targ = *target; 4959 4960 for (i = 0; i < npgs; i++) { 4961 ppattr = hat_page_getattr(targ, (P_MOD | P_REF | P_RO)); 4962 page_clr_all_props(repl); 4963 page_set_props(repl, ppattr); 4964 page_relocate_hash(repl, targ); 4965 4966 ASSERT(hat_page_getshare(targ) == 0); 4967 ASSERT(hat_page_getshare(repl) == 0); 4968 /* 4969 * Now clear the props on targ, after the 4970 * page_relocate_hash(), they no longer 4971 * have any meaning. 4972 */ 4973 page_clr_all_props(targ); 4974 ASSERT(targ->p_next == targ); 4975 ASSERT(targ->p_prev == targ); 4976 page_list_concat(&pl, &targ); 4977 4978 targ++; 4979 if (repl_contig != 0) { 4980 repl++; 4981 } else { 4982 repl = repl->p_next; 4983 } 4984 } 4985 /* assert that we have come full circle with repl */ 4986 ASSERT(repl_contig == 1 || first_repl == repl); 4987 4988 *target = pl; 4989 if (*replacement == NULL) { 4990 ASSERT(first_repl == repl); 4991 *replacement = repl; 4992 } 4993 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 4994 *nrelocp = npgs; 4995 return (0); 4996 } 4997 /* 4998 * On success returns 0 and *nrelocp the number of PAGESIZE pages relocated. 4999 */ 5000 int 5001 page_relocate( 5002 page_t **target, 5003 page_t **replacement, 5004 int grouplock, 5005 int freetarget, 5006 spgcnt_t *nrelocp, 5007 lgrp_t *lgrp) 5008 { 5009 spgcnt_t ret; 5010 5011 /* do_page_relocate returns 0 on success or errno value */ 5012 ret = do_page_relocate(target, replacement, grouplock, nrelocp, lgrp); 5013 5014 if (ret != 0 || freetarget == 0) { 5015 return (ret); 5016 } 5017 if (*nrelocp == 1) { 5018 ASSERT(*target != NULL); 5019 page_free(*target, 1); 5020 } else { 5021 page_t *tpp = *target; 5022 uint_t szc = tpp->p_szc; 5023 pgcnt_t npgs = page_get_pagecnt(szc); 5024 ASSERT(npgs > 1); 5025 ASSERT(szc != 0); 5026 do { 5027 ASSERT(PAGE_EXCL(tpp)); 5028 ASSERT(!hat_page_is_mapped(tpp)); 5029 ASSERT(tpp->p_szc == szc); 5030 PP_SETFREE(tpp); 5031 PP_SETAGED(tpp); 5032 npgs--; 5033 } while ((tpp = tpp->p_next) != *target); 5034 ASSERT(npgs == 0); 5035 page_list_add_pages(*target, 0); 5036 npgs = page_get_pagecnt(szc); 5037 page_create_putback(npgs); 5038 } 5039 return (ret); 5040 } 5041 5042 /* 5043 * it is up to the caller to deal with pcf accounting. 5044 */ 5045 void 5046 page_free_replacement_page(page_t *pplist) 5047 { 5048 page_t *pp; 5049 5050 while (pplist != NULL) { 5051 /* 5052 * pp_targ is a linked list. 5053 */ 5054 pp = pplist; 5055 if (pp->p_szc == 0) { 5056 page_sub(&pplist, pp); 5057 page_clr_all_props(pp); 5058 PP_SETFREE(pp); 5059 PP_SETAGED(pp); 5060 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 5061 page_unlock(pp); 5062 VM_STAT_ADD(pagecnt.pc_free_replacement_page[0]); 5063 } else { 5064 spgcnt_t curnpgs = page_get_pagecnt(pp->p_szc); 5065 page_t *tpp; 5066 page_list_break(&pp, &pplist, curnpgs); 5067 tpp = pp; 5068 do { 5069 ASSERT(PAGE_EXCL(tpp)); 5070 ASSERT(!hat_page_is_mapped(tpp)); 5071 page_clr_all_props(pp); 5072 PP_SETFREE(tpp); 5073 PP_SETAGED(tpp); 5074 } while ((tpp = tpp->p_next) != pp); 5075 page_list_add_pages(pp, 0); 5076 VM_STAT_ADD(pagecnt.pc_free_replacement_page[1]); 5077 } 5078 } 5079 } 5080 5081 /* 5082 * Relocate target to non-relocatable replacement page. 5083 */ 5084 int 5085 page_relocate_cage(page_t **target, page_t **replacement) 5086 { 5087 page_t *tpp, *rpp; 5088 spgcnt_t pgcnt, npgs; 5089 int result; 5090 5091 tpp = *target; 5092 5093 ASSERT(PAGE_EXCL(tpp)); 5094 ASSERT(tpp->p_szc == 0); 5095 5096 pgcnt = btop(page_get_pagesize(tpp->p_szc)); 5097 5098 do { 5099 (void) page_create_wait(pgcnt, PG_WAIT | PG_NORELOC); 5100 rpp = page_get_replacement_page(tpp, NULL, PGR_NORELOC); 5101 if (rpp == NULL) { 5102 page_create_putback(pgcnt); 5103 kcage_cageout_wakeup(); 5104 } 5105 } while (rpp == NULL); 5106 5107 ASSERT(PP_ISNORELOC(rpp)); 5108 5109 result = page_relocate(&tpp, &rpp, 0, 1, &npgs, NULL); 5110 5111 if (result == 0) { 5112 *replacement = rpp; 5113 if (pgcnt != npgs) 5114 panic("page_relocate_cage: partial relocation"); 5115 } 5116 5117 return (result); 5118 } 5119 5120 /* 5121 * Release the page lock on a page, place on cachelist 5122 * tail if no longer mapped. Caller can let us know if 5123 * the page is known to be clean. 5124 */ 5125 int 5126 page_release(page_t *pp, int checkmod) 5127 { 5128 int status; 5129 5130 ASSERT(PAGE_LOCKED(pp) && !PP_ISFREE(pp) && 5131 (pp->p_vnode != NULL)); 5132 5133 if (!hat_page_is_mapped(pp) && !IS_SWAPVP(pp->p_vnode) && 5134 ((PAGE_SHARED(pp) && page_tryupgrade(pp)) || PAGE_EXCL(pp)) && 5135 pp->p_lckcnt == 0 && pp->p_cowcnt == 0 && 5136 !hat_page_is_mapped(pp)) { 5137 5138 /* 5139 * If page is modified, unlock it 5140 * 5141 * (p_nrm & P_MOD) bit has the latest stuff because: 5142 * (1) We found that this page doesn't have any mappings 5143 * _after_ holding SE_EXCL and 5144 * (2) We didn't drop SE_EXCL lock after the check in (1) 5145 */ 5146 if (checkmod && hat_ismod(pp)) { 5147 page_unlock(pp); 5148 status = PGREL_MOD; 5149 } else { 5150 /*LINTED: constant in conditional context*/ 5151 VN_DISPOSE(pp, B_FREE, 0, kcred); 5152 status = PGREL_CLEAN; 5153 } 5154 } else { 5155 page_unlock(pp); 5156 status = PGREL_NOTREL; 5157 } 5158 return (status); 5159 } 5160 5161 /* 5162 * Given a constituent page, try to demote the large page on the freelist. 5163 * 5164 * Returns nonzero if the page could be demoted successfully. Returns with 5165 * the constituent page still locked. 5166 */ 5167 int 5168 page_try_demote_free_pages(page_t *pp) 5169 { 5170 page_t *rootpp = pp; 5171 pfn_t pfn = page_pptonum(pp); 5172 spgcnt_t npgs; 5173 uint_t szc = pp->p_szc; 5174 5175 ASSERT(PP_ISFREE(pp)); 5176 ASSERT(PAGE_EXCL(pp)); 5177 5178 /* 5179 * Adjust rootpp and lock it, if `pp' is not the base 5180 * constituent page. 5181 */ 5182 npgs = page_get_pagecnt(pp->p_szc); 5183 if (npgs == 1) { 5184 return (0); 5185 } 5186 5187 if (!IS_P2ALIGNED(pfn, npgs)) { 5188 pfn = P2ALIGN(pfn, npgs); 5189 rootpp = page_numtopp_nolock(pfn); 5190 } 5191 5192 if (pp != rootpp && !page_trylock(rootpp, SE_EXCL)) { 5193 return (0); 5194 } 5195 5196 if (rootpp->p_szc != szc) { 5197 if (pp != rootpp) 5198 page_unlock(rootpp); 5199 return (0); 5200 } 5201 5202 page_demote_free_pages(rootpp); 5203 5204 if (pp != rootpp) 5205 page_unlock(rootpp); 5206 5207 ASSERT(PP_ISFREE(pp)); 5208 ASSERT(PAGE_EXCL(pp)); 5209 return (1); 5210 } 5211 5212 /* 5213 * Given a constituent page, try to demote the large page. 5214 * 5215 * Returns nonzero if the page could be demoted successfully. Returns with 5216 * the constituent page still locked. 5217 */ 5218 int 5219 page_try_demote_pages(page_t *pp) 5220 { 5221 page_t *tpp, *rootpp = pp; 5222 pfn_t pfn = page_pptonum(pp); 5223 spgcnt_t i, npgs; 5224 uint_t szc = pp->p_szc; 5225 vnode_t *vp = pp->p_vnode; 5226 5227 ASSERT(PAGE_EXCL(pp)); 5228 5229 VM_STAT_ADD(pagecnt.pc_try_demote_pages[0]); 5230 5231 if (pp->p_szc == 0) { 5232 VM_STAT_ADD(pagecnt.pc_try_demote_pages[1]); 5233 return (1); 5234 } 5235 5236 if (vp != NULL && !IS_SWAPFSVP(vp) && !VN_ISKAS(vp)) { 5237 VM_STAT_ADD(pagecnt.pc_try_demote_pages[2]); 5238 page_demote_vp_pages(pp); 5239 ASSERT(pp->p_szc == 0); 5240 return (1); 5241 } 5242 5243 /* 5244 * Adjust rootpp if passed in is not the base 5245 * constituent page. 5246 */ 5247 npgs = page_get_pagecnt(pp->p_szc); 5248 ASSERT(npgs > 1); 5249 if (!IS_P2ALIGNED(pfn, npgs)) { 5250 pfn = P2ALIGN(pfn, npgs); 5251 rootpp = page_numtopp_nolock(pfn); 5252 VM_STAT_ADD(pagecnt.pc_try_demote_pages[3]); 5253 ASSERT(rootpp->p_vnode != NULL); 5254 ASSERT(rootpp->p_szc == szc); 5255 } 5256 5257 /* 5258 * We can't demote kernel pages since we can't hat_unload() 5259 * the mappings. 5260 */ 5261 if (VN_ISKAS(rootpp->p_vnode)) 5262 return (0); 5263 5264 /* 5265 * Attempt to lock all constituent pages except the page passed 5266 * in since it's already locked. 5267 */ 5268 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5269 ASSERT(!PP_ISFREE(tpp)); 5270 ASSERT(tpp->p_vnode != NULL); 5271 5272 if (tpp != pp && !page_trylock(tpp, SE_EXCL)) 5273 break; 5274 ASSERT(tpp->p_szc == rootpp->p_szc); 5275 ASSERT(page_pptonum(tpp) == page_pptonum(rootpp) + i); 5276 } 5277 5278 /* 5279 * If we failed to lock them all then unlock what we have 5280 * locked so far and bail. 5281 */ 5282 if (i < npgs) { 5283 tpp = rootpp; 5284 while (i-- > 0) { 5285 if (tpp != pp) 5286 page_unlock(tpp); 5287 tpp++; 5288 } 5289 VM_STAT_ADD(pagecnt.pc_try_demote_pages[4]); 5290 return (0); 5291 } 5292 5293 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5294 ASSERT(PAGE_EXCL(tpp)); 5295 ASSERT(tpp->p_slckcnt == 0); 5296 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 5297 tpp->p_szc = 0; 5298 } 5299 5300 /* 5301 * Unlock all pages except the page passed in. 5302 */ 5303 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5304 ASSERT(!hat_page_is_mapped(tpp)); 5305 if (tpp != pp) 5306 page_unlock(tpp); 5307 } 5308 5309 VM_STAT_ADD(pagecnt.pc_try_demote_pages[5]); 5310 return (1); 5311 } 5312 5313 /* 5314 * Called by page_free() and page_destroy() to demote the page size code 5315 * (p_szc) to 0 (since we can't just put a single PAGESIZE page with non zero 5316 * p_szc on free list, neither can we just clear p_szc of a single page_t 5317 * within a large page since it will break other code that relies on p_szc 5318 * being the same for all page_t's of a large page). Anonymous pages should 5319 * never end up here because anon_map_getpages() cannot deal with p_szc 5320 * changes after a single constituent page is locked. While anonymous or 5321 * kernel large pages are demoted or freed the entire large page at a time 5322 * with all constituent pages locked EXCL for the file system pages we 5323 * have to be able to demote a large page (i.e. decrease all constituent pages 5324 * p_szc) with only just an EXCL lock on one of constituent pages. The reason 5325 * we can easily deal with anonymous page demotion the entire large page at a 5326 * time is that those operation originate at address space level and concern 5327 * the entire large page region with actual demotion only done when pages are 5328 * not shared with any other processes (therefore we can always get EXCL lock 5329 * on all anonymous constituent pages after clearing segment page 5330 * cache). However file system pages can be truncated or invalidated at a 5331 * PAGESIZE level from the file system side and end up in page_free() or 5332 * page_destroy() (we also allow only part of the large page to be SOFTLOCKed 5333 * and therfore pageout should be able to demote a large page by EXCL locking 5334 * any constituent page that is not under SOFTLOCK). In those cases we cannot 5335 * rely on being able to lock EXCL all constituent pages. 5336 * 5337 * To prevent szc changes on file system pages one has to lock all constituent 5338 * pages at least SHARED (or call page_szc_lock()). The only subsystem that 5339 * doesn't rely on locking all constituent pages (or using page_szc_lock()) to 5340 * prevent szc changes is hat layer that uses its own page level mlist 5341 * locks. hat assumes that szc doesn't change after mlist lock for a page is 5342 * taken. Therefore we need to change szc under hat level locks if we only 5343 * have an EXCL lock on a single constituent page and hat still references any 5344 * of constituent pages. (Note we can't "ignore" hat layer by simply 5345 * hat_pageunload() all constituent pages without having EXCL locks on all of 5346 * constituent pages). We use hat_page_demote() call to safely demote szc of 5347 * all constituent pages under hat locks when we only have an EXCL lock on one 5348 * of constituent pages. 5349 * 5350 * This routine calls page_szc_lock() before calling hat_page_demote() to 5351 * allow segvn in one special case not to lock all constituent pages SHARED 5352 * before calling hat_memload_array() that relies on p_szc not changeing even 5353 * before hat level mlist lock is taken. In that case segvn uses 5354 * page_szc_lock() to prevent hat_page_demote() changeing p_szc values. 5355 * 5356 * Anonymous or kernel page demotion still has to lock all pages exclusively 5357 * and do hat_pageunload() on all constituent pages before demoting the page 5358 * therefore there's no need for anonymous or kernel page demotion to use 5359 * hat_page_demote() mechanism. 5360 * 5361 * hat_page_demote() removes all large mappings that map pp and then decreases 5362 * p_szc starting from the last constituent page of the large page. By working 5363 * from the tail of a large page in pfn decreasing order allows one looking at 5364 * the root page to know that hat_page_demote() is done for root's szc area. 5365 * e.g. if a root page has szc 1 one knows it only has to lock all constituent 5366 * pages within szc 1 area to prevent szc changes because hat_page_demote() 5367 * that started on this page when it had szc > 1 is done for this szc 1 area. 5368 * 5369 * We are guranteed that all constituent pages of pp's large page belong to 5370 * the same vnode with the consecutive offsets increasing in the direction of 5371 * the pfn i.e. the identity of constituent pages can't change until their 5372 * p_szc is decreased. Therefore it's safe for hat_page_demote() to remove 5373 * large mappings to pp even though we don't lock any constituent page except 5374 * pp (i.e. we won't unload e.g. kernel locked page). 5375 */ 5376 static void 5377 page_demote_vp_pages(page_t *pp) 5378 { 5379 kmutex_t *mtx; 5380 5381 ASSERT(PAGE_EXCL(pp)); 5382 ASSERT(!PP_ISFREE(pp)); 5383 ASSERT(pp->p_vnode != NULL); 5384 ASSERT(!IS_SWAPFSVP(pp->p_vnode)); 5385 ASSERT(!PP_ISKAS(pp)); 5386 5387 VM_STAT_ADD(pagecnt.pc_demote_pages[0]); 5388 5389 mtx = page_szc_lock(pp); 5390 if (mtx != NULL) { 5391 hat_page_demote(pp); 5392 mutex_exit(mtx); 5393 } 5394 ASSERT(pp->p_szc == 0); 5395 } 5396 5397 /* 5398 * Mark any existing pages for migration in the given range 5399 */ 5400 void 5401 page_mark_migrate(struct seg *seg, caddr_t addr, size_t len, 5402 struct anon_map *amp, ulong_t anon_index, vnode_t *vp, 5403 u_offset_t vnoff, int rflag) 5404 { 5405 struct anon *ap; 5406 vnode_t *curvp; 5407 lgrp_t *from; 5408 pgcnt_t i; 5409 pgcnt_t nlocked; 5410 u_offset_t off; 5411 pfn_t pfn; 5412 size_t pgsz; 5413 size_t segpgsz; 5414 pgcnt_t pages; 5415 uint_t pszc; 5416 page_t **ppa; 5417 pgcnt_t ppa_nentries; 5418 page_t *pp; 5419 caddr_t va; 5420 ulong_t an_idx; 5421 anon_sync_obj_t cookie; 5422 5423 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5424 5425 /* 5426 * Don't do anything if don't need to do lgroup optimizations 5427 * on this system 5428 */ 5429 if (!lgrp_optimizations()) 5430 return; 5431 5432 /* 5433 * Align address and length to (potentially large) page boundary 5434 */ 5435 segpgsz = page_get_pagesize(seg->s_szc); 5436 addr = (caddr_t)P2ALIGN((uintptr_t)addr, segpgsz); 5437 if (rflag) 5438 len = P2ROUNDUP(len, segpgsz); 5439 5440 /* 5441 * Allocate page array to accomodate largest page size 5442 */ 5443 pgsz = page_get_pagesize(page_num_pagesizes() - 1); 5444 ppa_nentries = btop(pgsz); 5445 ppa = kmem_zalloc(ppa_nentries * sizeof (page_t *), KM_SLEEP); 5446 5447 /* 5448 * Do one (large) page at a time 5449 */ 5450 va = addr; 5451 while (va < addr + len) { 5452 /* 5453 * Lookup (root) page for vnode and offset corresponding to 5454 * this virtual address 5455 * Try anonmap first since there may be copy-on-write 5456 * pages, but initialize vnode pointer and offset using 5457 * vnode arguments just in case there isn't an amp. 5458 */ 5459 curvp = vp; 5460 off = vnoff + va - seg->s_base; 5461 if (amp) { 5462 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5463 an_idx = anon_index + seg_page(seg, va); 5464 anon_array_enter(amp, an_idx, &cookie); 5465 ap = anon_get_ptr(amp->ahp, an_idx); 5466 if (ap) 5467 swap_xlate(ap, &curvp, &off); 5468 anon_array_exit(&cookie); 5469 ANON_LOCK_EXIT(&->a_rwlock); 5470 } 5471 5472 pp = NULL; 5473 if (curvp) 5474 pp = page_lookup(curvp, off, SE_SHARED); 5475 5476 /* 5477 * If there isn't a page at this virtual address, 5478 * skip to next page 5479 */ 5480 if (pp == NULL) { 5481 va += PAGESIZE; 5482 continue; 5483 } 5484 5485 /* 5486 * Figure out which lgroup this page is in for kstats 5487 */ 5488 pfn = page_pptonum(pp); 5489 from = lgrp_pfn_to_lgrp(pfn); 5490 5491 /* 5492 * Get page size, and round up and skip to next page boundary 5493 * if unaligned address 5494 */ 5495 pszc = pp->p_szc; 5496 pgsz = page_get_pagesize(pszc); 5497 pages = btop(pgsz); 5498 if (!IS_P2ALIGNED(va, pgsz) || 5499 !IS_P2ALIGNED(pfn, pages) || 5500 pgsz > segpgsz) { 5501 pgsz = MIN(pgsz, segpgsz); 5502 page_unlock(pp); 5503 i = btop(P2END((uintptr_t)va, pgsz) - 5504 (uintptr_t)va); 5505 va = (caddr_t)P2END((uintptr_t)va, pgsz); 5506 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, i); 5507 continue; 5508 } 5509 5510 /* 5511 * Upgrade to exclusive lock on page 5512 */ 5513 if (!page_tryupgrade(pp)) { 5514 page_unlock(pp); 5515 va += pgsz; 5516 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5517 btop(pgsz)); 5518 continue; 5519 } 5520 5521 /* 5522 * Remember pages locked exclusively and how many 5523 */ 5524 ppa[0] = pp; 5525 nlocked = 1; 5526 5527 /* 5528 * Lock constituent pages if this is large page 5529 */ 5530 if (pages > 1) { 5531 /* 5532 * Lock all constituents except root page, since it 5533 * should be locked already. 5534 */ 5535 for (i = 1; i < pages; i++) { 5536 pp++; 5537 if (!page_trylock(pp, SE_EXCL)) { 5538 break; 5539 } 5540 if (PP_ISFREE(pp) || 5541 pp->p_szc != pszc) { 5542 /* 5543 * hat_page_demote() raced in with us. 5544 */ 5545 ASSERT(!IS_SWAPFSVP(curvp)); 5546 page_unlock(pp); 5547 break; 5548 } 5549 ppa[nlocked] = pp; 5550 nlocked++; 5551 } 5552 } 5553 5554 /* 5555 * If all constituent pages couldn't be locked, 5556 * unlock pages locked so far and skip to next page. 5557 */ 5558 if (nlocked != pages) { 5559 for (i = 0; i < nlocked; i++) 5560 page_unlock(ppa[i]); 5561 va += pgsz; 5562 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5563 btop(pgsz)); 5564 continue; 5565 } 5566 5567 /* 5568 * hat_page_demote() can no longer happen 5569 * since last cons page had the right p_szc after 5570 * all cons pages were locked. all cons pages 5571 * should now have the same p_szc. 5572 */ 5573 5574 /* 5575 * All constituent pages locked successfully, so mark 5576 * large page for migration and unload the mappings of 5577 * constituent pages, so a fault will occur on any part of the 5578 * large page 5579 */ 5580 PP_SETMIGRATE(ppa[0]); 5581 for (i = 0; i < nlocked; i++) { 5582 pp = ppa[i]; 5583 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 5584 ASSERT(hat_page_getshare(pp) == 0); 5585 page_unlock(pp); 5586 } 5587 lgrp_stat_add(from->lgrp_id, LGRP_PMM_PGS, nlocked); 5588 5589 va += pgsz; 5590 } 5591 kmem_free(ppa, ppa_nentries * sizeof (page_t *)); 5592 } 5593 5594 /* 5595 * Migrate any pages that have been marked for migration in the given range 5596 */ 5597 void 5598 page_migrate( 5599 struct seg *seg, 5600 caddr_t addr, 5601 page_t **ppa, 5602 pgcnt_t npages) 5603 { 5604 lgrp_t *from; 5605 lgrp_t *to; 5606 page_t *newpp; 5607 page_t *pp; 5608 pfn_t pfn; 5609 size_t pgsz; 5610 spgcnt_t page_cnt; 5611 spgcnt_t i; 5612 uint_t pszc; 5613 5614 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5615 5616 while (npages > 0) { 5617 pp = *ppa; 5618 pszc = pp->p_szc; 5619 pgsz = page_get_pagesize(pszc); 5620 page_cnt = btop(pgsz); 5621 5622 /* 5623 * Check to see whether this page is marked for migration 5624 * 5625 * Assume that root page of large page is marked for 5626 * migration and none of the other constituent pages 5627 * are marked. This really simplifies clearing the 5628 * migrate bit by not having to clear it from each 5629 * constituent page. 5630 * 5631 * note we don't want to relocate an entire large page if 5632 * someone is only using one subpage. 5633 */ 5634 if (npages < page_cnt) 5635 break; 5636 5637 /* 5638 * Is it marked for migration? 5639 */ 5640 if (!PP_ISMIGRATE(pp)) 5641 goto next; 5642 5643 /* 5644 * Determine lgroups that page is being migrated between 5645 */ 5646 pfn = page_pptonum(pp); 5647 if (!IS_P2ALIGNED(pfn, page_cnt)) { 5648 break; 5649 } 5650 from = lgrp_pfn_to_lgrp(pfn); 5651 to = lgrp_mem_choose(seg, addr, pgsz); 5652 5653 /* 5654 * Check to see whether we are trying to migrate page to lgroup 5655 * where it is allocated already 5656 */ 5657 if (to == from) { 5658 PP_CLRMIGRATE(pp); 5659 goto next; 5660 } 5661 5662 /* 5663 * Need to get exclusive lock's to migrate 5664 */ 5665 for (i = 0; i < page_cnt; i++) { 5666 ASSERT(PAGE_LOCKED(ppa[i])); 5667 if (page_pptonum(ppa[i]) != pfn + i || 5668 ppa[i]->p_szc != pszc) { 5669 break; 5670 } 5671 if (!page_tryupgrade(ppa[i])) { 5672 lgrp_stat_add(from->lgrp_id, 5673 LGRP_PM_FAIL_LOCK_PGS, 5674 page_cnt); 5675 break; 5676 } 5677 } 5678 if (i != page_cnt) { 5679 while (--i != -1) { 5680 page_downgrade(ppa[i]); 5681 } 5682 goto next; 5683 } 5684 5685 (void) page_create_wait(page_cnt, PG_WAIT); 5686 newpp = page_get_replacement_page(pp, to, PGR_SAMESZC); 5687 if (newpp == NULL) { 5688 page_create_putback(page_cnt); 5689 for (i = 0; i < page_cnt; i++) { 5690 page_downgrade(ppa[i]); 5691 } 5692 lgrp_stat_add(to->lgrp_id, LGRP_PM_FAIL_ALLOC_PGS, 5693 page_cnt); 5694 goto next; 5695 } 5696 ASSERT(newpp->p_szc == pszc); 5697 /* 5698 * Clear migrate bit and relocate page 5699 */ 5700 PP_CLRMIGRATE(pp); 5701 if (page_relocate(&pp, &newpp, 0, 1, &page_cnt, to)) { 5702 panic("page_migrate: page_relocate failed"); 5703 } 5704 ASSERT(page_cnt * PAGESIZE == pgsz); 5705 5706 /* 5707 * Keep stats for number of pages migrated from and to 5708 * each lgroup 5709 */ 5710 lgrp_stat_add(from->lgrp_id, LGRP_PM_SRC_PGS, page_cnt); 5711 lgrp_stat_add(to->lgrp_id, LGRP_PM_DEST_PGS, page_cnt); 5712 /* 5713 * update the page_t array we were passed in and 5714 * unlink constituent pages of a large page. 5715 */ 5716 for (i = 0; i < page_cnt; ++i, ++pp) { 5717 ASSERT(PAGE_EXCL(newpp)); 5718 ASSERT(newpp->p_szc == pszc); 5719 ppa[i] = newpp; 5720 pp = newpp; 5721 page_sub(&newpp, pp); 5722 page_downgrade(pp); 5723 } 5724 ASSERT(newpp == NULL); 5725 next: 5726 addr += pgsz; 5727 ppa += page_cnt; 5728 npages -= page_cnt; 5729 } 5730 } 5731 5732 ulong_t mem_waiters = 0; 5733 ulong_t max_count = 20; 5734 #define MAX_DELAY 0x1ff 5735 5736 /* 5737 * Check if enough memory is available to proceed. 5738 * Depending on system configuration and how much memory is 5739 * reserved for swap we need to check against two variables. 5740 * e.g. on systems with little physical swap availrmem can be 5741 * more reliable indicator of how much memory is available. 5742 * On systems with large phys swap freemem can be better indicator. 5743 * If freemem drops below threshold level don't return an error 5744 * immediately but wake up pageout to free memory and block. 5745 * This is done number of times. If pageout is not able to free 5746 * memory within certain time return an error. 5747 * The same applies for availrmem but kmem_reap is used to 5748 * free memory. 5749 */ 5750 int 5751 page_mem_avail(pgcnt_t npages) 5752 { 5753 ulong_t count; 5754 5755 #if defined(__i386) 5756 if (freemem > desfree + npages && 5757 availrmem > swapfs_reserve + npages && 5758 btop(vmem_size(heap_arena, VMEM_FREE)) > tune.t_minarmem + 5759 npages) 5760 return (1); 5761 #else 5762 if (freemem > desfree + npages && 5763 availrmem > swapfs_reserve + npages) 5764 return (1); 5765 #endif 5766 5767 count = max_count; 5768 atomic_add_long(&mem_waiters, 1); 5769 5770 while (freemem < desfree + npages && --count) { 5771 cv_signal(&proc_pageout->p_cv); 5772 if (delay_sig(hz + (mem_waiters & MAX_DELAY))) { 5773 atomic_add_long(&mem_waiters, -1); 5774 return (0); 5775 } 5776 } 5777 if (count == 0) { 5778 atomic_add_long(&mem_waiters, -1); 5779 return (0); 5780 } 5781 5782 count = max_count; 5783 while (availrmem < swapfs_reserve + npages && --count) { 5784 kmem_reap(); 5785 if (delay_sig(hz + (mem_waiters & MAX_DELAY))) { 5786 atomic_add_long(&mem_waiters, -1); 5787 return (0); 5788 } 5789 } 5790 atomic_add_long(&mem_waiters, -1); 5791 if (count == 0) 5792 return (0); 5793 5794 #if defined(__i386) 5795 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 5796 tune.t_minarmem + npages) 5797 return (0); 5798 #endif 5799 return (1); 5800 } 5801 5802 #define MAX_CNT 60 /* max num of iterations */ 5803 /* 5804 * Reclaim/reserve availrmem for npages. 5805 * If there is not enough memory start reaping seg, kmem caches. 5806 * Start pageout scanner (via page_needfree()). 5807 * Exit after ~ MAX_CNT s regardless of how much memory has been released. 5808 * Note: There is no guarantee that any availrmem will be freed as 5809 * this memory typically is locked (kernel heap) or reserved for swap. 5810 * Also due to memory fragmentation kmem allocator may not be able 5811 * to free any memory (single user allocated buffer will prevent 5812 * freeing slab or a page). 5813 */ 5814 int 5815 page_reclaim_mem(pgcnt_t npages, pgcnt_t epages, int adjust) 5816 { 5817 int i = 0; 5818 int ret = 0; 5819 pgcnt_t deficit; 5820 pgcnt_t old_availrmem; 5821 5822 mutex_enter(&freemem_lock); 5823 old_availrmem = availrmem - 1; 5824 while ((availrmem < tune.t_minarmem + npages + epages) && 5825 (old_availrmem < availrmem) && (i++ < MAX_CNT)) { 5826 old_availrmem = availrmem; 5827 deficit = tune.t_minarmem + npages + epages - availrmem; 5828 mutex_exit(&freemem_lock); 5829 page_needfree(deficit); 5830 seg_preap(); 5831 kmem_reap(); 5832 delay(hz); 5833 page_needfree(-(spgcnt_t)deficit); 5834 mutex_enter(&freemem_lock); 5835 } 5836 5837 if (adjust && (availrmem >= tune.t_minarmem + npages + epages)) { 5838 availrmem -= npages; 5839 ret = 1; 5840 } 5841 5842 mutex_exit(&freemem_lock); 5843 5844 return (ret); 5845 } 5846 5847 /* 5848 * Search the memory segments to locate the desired page. Within a 5849 * segment, pages increase linearly with one page structure per 5850 * physical page frame (size PAGESIZE). The search begins 5851 * with the segment that was accessed last, to take advantage of locality. 5852 * If the hint misses, we start from the beginning of the sorted memseg list 5853 */ 5854 5855 5856 /* 5857 * Some data structures for pfn to pp lookup. 5858 */ 5859 ulong_t mhash_per_slot; 5860 struct memseg *memseg_hash[N_MEM_SLOTS]; 5861 5862 page_t * 5863 page_numtopp_nolock(pfn_t pfnum) 5864 { 5865 struct memseg *seg; 5866 page_t *pp; 5867 vm_cpu_data_t *vc = CPU->cpu_vm_data; 5868 5869 ASSERT(vc != NULL); 5870 5871 MEMSEG_STAT_INCR(nsearch); 5872 5873 /* Try last winner first */ 5874 if (((seg = vc->vc_pnum_memseg) != NULL) && 5875 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5876 MEMSEG_STAT_INCR(nlastwon); 5877 pp = seg->pages + (pfnum - seg->pages_base); 5878 if (pp->p_pagenum == pfnum) 5879 return ((page_t *)pp); 5880 } 5881 5882 /* Else Try hash */ 5883 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5884 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5885 MEMSEG_STAT_INCR(nhashwon); 5886 vc->vc_pnum_memseg = seg; 5887 pp = seg->pages + (pfnum - seg->pages_base); 5888 if (pp->p_pagenum == pfnum) 5889 return ((page_t *)pp); 5890 } 5891 5892 /* Else Brute force */ 5893 for (seg = memsegs; seg != NULL; seg = seg->next) { 5894 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5895 vc->vc_pnum_memseg = seg; 5896 pp = seg->pages + (pfnum - seg->pages_base); 5897 return ((page_t *)pp); 5898 } 5899 } 5900 vc->vc_pnum_memseg = NULL; 5901 MEMSEG_STAT_INCR(nnotfound); 5902 return ((page_t *)NULL); 5903 5904 } 5905 5906 struct memseg * 5907 page_numtomemseg_nolock(pfn_t pfnum) 5908 { 5909 struct memseg *seg; 5910 page_t *pp; 5911 5912 /* Try hash */ 5913 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5914 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5915 pp = seg->pages + (pfnum - seg->pages_base); 5916 if (pp->p_pagenum == pfnum) 5917 return (seg); 5918 } 5919 5920 /* Else Brute force */ 5921 for (seg = memsegs; seg != NULL; seg = seg->next) { 5922 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5923 return (seg); 5924 } 5925 } 5926 return ((struct memseg *)NULL); 5927 } 5928 5929 /* 5930 * Given a page and a count return the page struct that is 5931 * n structs away from the current one in the global page 5932 * list. 5933 * 5934 * This function wraps to the first page upon 5935 * reaching the end of the memseg list. 5936 */ 5937 page_t * 5938 page_nextn(page_t *pp, ulong_t n) 5939 { 5940 struct memseg *seg; 5941 page_t *ppn; 5942 vm_cpu_data_t *vc = (vm_cpu_data_t *)CPU->cpu_vm_data; 5943 5944 ASSERT(vc != NULL); 5945 5946 if (((seg = vc->vc_pnext_memseg) == NULL) || 5947 (seg->pages_base == seg->pages_end) || 5948 !(pp >= seg->pages && pp < seg->epages)) { 5949 5950 for (seg = memsegs; seg; seg = seg->next) { 5951 if (pp >= seg->pages && pp < seg->epages) 5952 break; 5953 } 5954 5955 if (seg == NULL) { 5956 /* Memory delete got in, return something valid. */ 5957 /* TODO: fix me. */ 5958 seg = memsegs; 5959 pp = seg->pages; 5960 } 5961 } 5962 5963 /* check for wraparound - possible if n is large */ 5964 while ((ppn = (pp + n)) >= seg->epages || ppn < pp) { 5965 n -= seg->epages - pp; 5966 seg = seg->next; 5967 if (seg == NULL) 5968 seg = memsegs; 5969 pp = seg->pages; 5970 } 5971 vc->vc_pnext_memseg = seg; 5972 return (ppn); 5973 } 5974 5975 /* 5976 * Initialize for a loop using page_next_scan_large(). 5977 */ 5978 page_t * 5979 page_next_scan_init(void **cookie) 5980 { 5981 ASSERT(cookie != NULL); 5982 *cookie = (void *)memsegs; 5983 return ((page_t *)memsegs->pages); 5984 } 5985 5986 /* 5987 * Return the next page in a scan of page_t's, assuming we want 5988 * to skip over sub-pages within larger page sizes. 5989 * 5990 * The cookie is used to keep track of the current memseg. 5991 */ 5992 page_t * 5993 page_next_scan_large( 5994 page_t *pp, 5995 ulong_t *n, 5996 void **cookie) 5997 { 5998 struct memseg *seg = (struct memseg *)*cookie; 5999 page_t *new_pp; 6000 ulong_t cnt; 6001 pfn_t pfn; 6002 6003 6004 /* 6005 * get the count of page_t's to skip based on the page size 6006 */ 6007 ASSERT(pp != NULL); 6008 if (pp->p_szc == 0) { 6009 cnt = 1; 6010 } else { 6011 pfn = page_pptonum(pp); 6012 cnt = page_get_pagecnt(pp->p_szc); 6013 cnt -= pfn & (cnt - 1); 6014 } 6015 *n += cnt; 6016 new_pp = pp + cnt; 6017 6018 /* 6019 * Catch if we went past the end of the current memory segment. If so, 6020 * just move to the next segment with pages. 6021 */ 6022 if (new_pp >= seg->epages) { 6023 do { 6024 seg = seg->next; 6025 if (seg == NULL) 6026 seg = memsegs; 6027 } while (seg->pages == seg->epages); 6028 new_pp = seg->pages; 6029 *cookie = (void *)seg; 6030 } 6031 6032 return (new_pp); 6033 } 6034 6035 6036 /* 6037 * Returns next page in list. Note: this function wraps 6038 * to the first page in the list upon reaching the end 6039 * of the list. Callers should be aware of this fact. 6040 */ 6041 6042 /* We should change this be a #define */ 6043 6044 page_t * 6045 page_next(page_t *pp) 6046 { 6047 return (page_nextn(pp, 1)); 6048 } 6049 6050 page_t * 6051 page_first() 6052 { 6053 return ((page_t *)memsegs->pages); 6054 } 6055 6056 6057 /* 6058 * This routine is called at boot with the initial memory configuration 6059 * and when memory is added or removed. 6060 */ 6061 void 6062 build_pfn_hash() 6063 { 6064 pfn_t cur; 6065 pgcnt_t index; 6066 struct memseg *pseg; 6067 int i; 6068 6069 /* 6070 * Clear memseg_hash array. 6071 * Since memory add/delete is designed to operate concurrently 6072 * with normal operation, the hash rebuild must be able to run 6073 * concurrently with page_numtopp_nolock(). To support this 6074 * functionality, assignments to memseg_hash array members must 6075 * be done atomically. 6076 * 6077 * NOTE: bzero() does not currently guarantee this for kernel 6078 * threads, and cannot be used here. 6079 */ 6080 for (i = 0; i < N_MEM_SLOTS; i++) 6081 memseg_hash[i] = NULL; 6082 6083 hat_kpm_mseghash_clear(N_MEM_SLOTS); 6084 6085 /* 6086 * Physmax is the last valid pfn. 6087 */ 6088 mhash_per_slot = (physmax + 1) >> MEM_HASH_SHIFT; 6089 for (pseg = memsegs; pseg != NULL; pseg = pseg->next) { 6090 index = MEMSEG_PFN_HASH(pseg->pages_base); 6091 cur = pseg->pages_base; 6092 do { 6093 if (index >= N_MEM_SLOTS) 6094 index = MEMSEG_PFN_HASH(cur); 6095 6096 if (memseg_hash[index] == NULL || 6097 memseg_hash[index]->pages_base > pseg->pages_base) { 6098 memseg_hash[index] = pseg; 6099 hat_kpm_mseghash_update(index, pseg); 6100 } 6101 cur += mhash_per_slot; 6102 index++; 6103 } while (cur < pseg->pages_end); 6104 } 6105 } 6106 6107 /* 6108 * Return the pagenum for the pp 6109 */ 6110 pfn_t 6111 page_pptonum(page_t *pp) 6112 { 6113 return (pp->p_pagenum); 6114 } 6115 6116 /* 6117 * interface to the referenced and modified etc bits 6118 * in the PSM part of the page struct 6119 * when no locking is desired. 6120 */ 6121 void 6122 page_set_props(page_t *pp, uint_t flags) 6123 { 6124 ASSERT((flags & ~(P_MOD | P_REF | P_RO)) == 0); 6125 pp->p_nrm |= (uchar_t)flags; 6126 } 6127 6128 void 6129 page_clr_all_props(page_t *pp) 6130 { 6131 pp->p_nrm = 0; 6132 } 6133 6134 /* 6135 * Clear p_lckcnt and p_cowcnt, adjusting freemem if required. 6136 */ 6137 int 6138 page_clear_lck_cow(page_t *pp, int adjust) 6139 { 6140 int f_amount; 6141 6142 ASSERT(PAGE_EXCL(pp)); 6143 6144 /* 6145 * The page_struct_lock need not be acquired here since 6146 * we require the caller hold the page exclusively locked. 6147 */ 6148 f_amount = 0; 6149 if (pp->p_lckcnt) { 6150 f_amount = 1; 6151 pp->p_lckcnt = 0; 6152 } 6153 if (pp->p_cowcnt) { 6154 f_amount += pp->p_cowcnt; 6155 pp->p_cowcnt = 0; 6156 } 6157 6158 if (adjust && f_amount) { 6159 mutex_enter(&freemem_lock); 6160 availrmem += f_amount; 6161 mutex_exit(&freemem_lock); 6162 } 6163 6164 return (f_amount); 6165 } 6166 6167 /* 6168 * The following functions is called from free_vp_pages() 6169 * for an inexact estimate of a newly free'd page... 6170 */ 6171 ulong_t 6172 page_share_cnt(page_t *pp) 6173 { 6174 return (hat_page_getshare(pp)); 6175 } 6176 6177 int 6178 page_isshared(page_t *pp) 6179 { 6180 return (hat_page_getshare(pp) > 1); 6181 } 6182 6183 int 6184 page_isfree(page_t *pp) 6185 { 6186 return (PP_ISFREE(pp)); 6187 } 6188 6189 int 6190 page_isref(page_t *pp) 6191 { 6192 return (hat_page_getattr(pp, P_REF)); 6193 } 6194 6195 int 6196 page_ismod(page_t *pp) 6197 { 6198 return (hat_page_getattr(pp, P_MOD)); 6199 } 6200 6201 /* 6202 * The following code all currently relates to the page capture logic: 6203 * 6204 * This logic is used for cases where there is a desire to claim a certain 6205 * physical page in the system for the caller. As it may not be possible 6206 * to capture the page immediately, the p_toxic bits are used in the page 6207 * structure to indicate that someone wants to capture this page. When the 6208 * page gets unlocked, the toxic flag will be noted and an attempt to capture 6209 * the page will be made. If it is successful, the original callers callback 6210 * will be called with the page to do with it what they please. 6211 * 6212 * There is also an async thread which wakes up to attempt to capture 6213 * pages occasionally which have the capture bit set. All of the pages which 6214 * need to be captured asynchronously have been inserted into the 6215 * page_capture_hash and thus this thread walks that hash list. Items in the 6216 * hash have an expiration time so this thread handles that as well by removing 6217 * the item from the hash if it has expired. 6218 * 6219 * Some important things to note are: 6220 * - if the PR_CAPTURE bit is set on a page, then the page is in the 6221 * page_capture_hash. The page_capture_hash_head.pchh_mutex is needed 6222 * to set and clear this bit, and while the lock is held is the only time 6223 * you can add or remove an entry from the hash. 6224 * - the PR_CAPTURE bit can only be set and cleared while holding the 6225 * page_capture_hash_head.pchh_mutex 6226 * - the t_flag field of the thread struct is used with the T_CAPTURING 6227 * flag to prevent recursion while dealing with large pages. 6228 * - pages which need to be retired never expire on the page_capture_hash. 6229 */ 6230 6231 static void page_capture_thread(void); 6232 static kthread_t *pc_thread_id; 6233 kcondvar_t pc_cv; 6234 static kmutex_t pc_thread_mutex; 6235 static clock_t pc_thread_shortwait; 6236 static clock_t pc_thread_longwait; 6237 static int pc_thread_ism_retry; 6238 6239 struct page_capture_callback pc_cb[PC_NUM_CALLBACKS]; 6240 6241 /* Note that this is a circular linked list */ 6242 typedef struct page_capture_hash_bucket { 6243 page_t *pp; 6244 uint_t szc; 6245 uint_t flags; 6246 clock_t expires; /* lbolt at which this request expires. */ 6247 void *datap; /* Cached data passed in for callback */ 6248 struct page_capture_hash_bucket *next; 6249 struct page_capture_hash_bucket *prev; 6250 } page_capture_hash_bucket_t; 6251 6252 /* 6253 * Each hash bucket will have it's own mutex and two lists which are: 6254 * active (0): represents requests which have not been processed by 6255 * the page_capture async thread yet. 6256 * walked (1): represents requests which have been processed by the 6257 * page_capture async thread within it's given walk of this bucket. 6258 * 6259 * These are all needed so that we can synchronize all async page_capture 6260 * events. When the async thread moves to a new bucket, it will append the 6261 * walked list to the active list and walk each item one at a time, moving it 6262 * from the active list to the walked list. Thus if there is an async request 6263 * outstanding for a given page, it will always be in one of the two lists. 6264 * New requests will always be added to the active list. 6265 * If we were not able to capture a page before the request expired, we'd free 6266 * up the request structure which would indicate to page_capture that there is 6267 * no longer a need for the given page, and clear the PR_CAPTURE flag if 6268 * possible. 6269 */ 6270 typedef struct page_capture_hash_head { 6271 kmutex_t pchh_mutex; 6272 uint_t num_pages; 6273 page_capture_hash_bucket_t lists[2]; /* sentinel nodes */ 6274 } page_capture_hash_head_t; 6275 6276 #ifdef DEBUG 6277 #define NUM_PAGE_CAPTURE_BUCKETS 4 6278 #else 6279 #define NUM_PAGE_CAPTURE_BUCKETS 64 6280 #endif 6281 6282 page_capture_hash_head_t page_capture_hash[NUM_PAGE_CAPTURE_BUCKETS]; 6283 6284 /* for now use a very simple hash based upon the size of a page struct */ 6285 #define PAGE_CAPTURE_HASH(pp) \ 6286 ((int)(((uintptr_t)pp >> 7) & (NUM_PAGE_CAPTURE_BUCKETS - 1))) 6287 6288 extern pgcnt_t swapfs_minfree; 6289 6290 int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap); 6291 6292 /* 6293 * a callback function is required for page capture requests. 6294 */ 6295 void 6296 page_capture_register_callback(uint_t index, clock_t duration, 6297 int (*cb_func)(page_t *, void *, uint_t)) 6298 { 6299 ASSERT(pc_cb[index].cb_active == 0); 6300 ASSERT(cb_func != NULL); 6301 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6302 pc_cb[index].duration = duration; 6303 pc_cb[index].cb_func = cb_func; 6304 pc_cb[index].cb_active = 1; 6305 rw_exit(&pc_cb[index].cb_rwlock); 6306 } 6307 6308 void 6309 page_capture_unregister_callback(uint_t index) 6310 { 6311 int i, j; 6312 struct page_capture_hash_bucket *bp1; 6313 struct page_capture_hash_bucket *bp2; 6314 struct page_capture_hash_bucket *head = NULL; 6315 uint_t flags = (1 << index); 6316 6317 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6318 ASSERT(pc_cb[index].cb_active == 1); 6319 pc_cb[index].duration = 0; /* Paranoia */ 6320 pc_cb[index].cb_func = NULL; /* Paranoia */ 6321 pc_cb[index].cb_active = 0; 6322 rw_exit(&pc_cb[index].cb_rwlock); 6323 6324 /* 6325 * Just move all the entries to a private list which we can walk 6326 * through without the need to hold any locks. 6327 * No more requests can get added to the hash lists for this consumer 6328 * as the cb_active field for the callback has been cleared. 6329 */ 6330 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 6331 mutex_enter(&page_capture_hash[i].pchh_mutex); 6332 for (j = 0; j < 2; j++) { 6333 bp1 = page_capture_hash[i].lists[j].next; 6334 /* walk through all but first (sentinel) element */ 6335 while (bp1 != &page_capture_hash[i].lists[j]) { 6336 bp2 = bp1; 6337 if (bp2->flags & flags) { 6338 bp1 = bp2->next; 6339 bp1->prev = bp2->prev; 6340 bp2->prev->next = bp1; 6341 bp2->next = head; 6342 head = bp2; 6343 /* 6344 * Clear the PR_CAPTURE bit as we 6345 * hold appropriate locks here. 6346 */ 6347 page_clrtoxic(head->pp, PR_CAPTURE); 6348 page_capture_hash[i].num_pages--; 6349 continue; 6350 } 6351 bp1 = bp1->next; 6352 } 6353 } 6354 mutex_exit(&page_capture_hash[i].pchh_mutex); 6355 } 6356 6357 while (head != NULL) { 6358 bp1 = head; 6359 head = head->next; 6360 kmem_free(bp1, sizeof (*bp1)); 6361 } 6362 } 6363 6364 6365 /* 6366 * Find pp in the active list and move it to the walked list if it 6367 * exists. 6368 * Note that most often pp should be at the front of the active list 6369 * as it is currently used and thus there is no other sort of optimization 6370 * being done here as this is a linked list data structure. 6371 * Returns 1 on successful move or 0 if page could not be found. 6372 */ 6373 static int 6374 page_capture_move_to_walked(page_t *pp) 6375 { 6376 page_capture_hash_bucket_t *bp; 6377 int index; 6378 6379 index = PAGE_CAPTURE_HASH(pp); 6380 6381 mutex_enter(&page_capture_hash[index].pchh_mutex); 6382 bp = page_capture_hash[index].lists[0].next; 6383 while (bp != &page_capture_hash[index].lists[0]) { 6384 if (bp->pp == pp) { 6385 /* Remove from old list */ 6386 bp->next->prev = bp->prev; 6387 bp->prev->next = bp->next; 6388 6389 /* Add to new list */ 6390 bp->next = page_capture_hash[index].lists[1].next; 6391 bp->prev = &page_capture_hash[index].lists[1]; 6392 page_capture_hash[index].lists[1].next = bp; 6393 bp->next->prev = bp; 6394 mutex_exit(&page_capture_hash[index].pchh_mutex); 6395 6396 return (1); 6397 } 6398 bp = bp->next; 6399 } 6400 mutex_exit(&page_capture_hash[index].pchh_mutex); 6401 return (0); 6402 } 6403 6404 /* 6405 * Add a new entry to the page capture hash. The only case where a new 6406 * entry is not added is when the page capture consumer is no longer registered. 6407 * In this case, we'll silently not add the page to the hash. We know that 6408 * page retire will always be registered for the case where we are currently 6409 * unretiring a page and thus there are no conflicts. 6410 */ 6411 static void 6412 page_capture_add_hash(page_t *pp, uint_t szc, uint_t flags, void *datap) 6413 { 6414 page_capture_hash_bucket_t *bp1; 6415 page_capture_hash_bucket_t *bp2; 6416 int index; 6417 int cb_index; 6418 int i; 6419 #ifdef DEBUG 6420 page_capture_hash_bucket_t *tp1; 6421 int l; 6422 #endif 6423 6424 ASSERT(!(flags & CAPTURE_ASYNC)); 6425 6426 bp1 = kmem_alloc(sizeof (struct page_capture_hash_bucket), KM_SLEEP); 6427 6428 bp1->pp = pp; 6429 bp1->szc = szc; 6430 bp1->flags = flags; 6431 bp1->datap = datap; 6432 6433 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6434 if ((flags >> cb_index) & 1) { 6435 break; 6436 } 6437 } 6438 6439 ASSERT(cb_index != PC_NUM_CALLBACKS); 6440 6441 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6442 if (pc_cb[cb_index].cb_active) { 6443 if (pc_cb[cb_index].duration == -1) { 6444 bp1->expires = (clock_t)-1; 6445 } else { 6446 bp1->expires = lbolt + pc_cb[cb_index].duration; 6447 } 6448 } else { 6449 /* There's no callback registered so don't add to the hash */ 6450 rw_exit(&pc_cb[cb_index].cb_rwlock); 6451 kmem_free(bp1, sizeof (*bp1)); 6452 return; 6453 } 6454 6455 index = PAGE_CAPTURE_HASH(pp); 6456 6457 /* 6458 * Only allow capture flag to be modified under this mutex. 6459 * Prevents multiple entries for same page getting added. 6460 */ 6461 mutex_enter(&page_capture_hash[index].pchh_mutex); 6462 6463 /* 6464 * if not already on the hash, set capture bit and add to the hash 6465 */ 6466 if (!(pp->p_toxic & PR_CAPTURE)) { 6467 #ifdef DEBUG 6468 /* Check for duplicate entries */ 6469 for (l = 0; l < 2; l++) { 6470 tp1 = page_capture_hash[index].lists[l].next; 6471 while (tp1 != &page_capture_hash[index].lists[l]) { 6472 if (tp1->pp == pp) { 6473 panic("page pp 0x%p already on hash " 6474 "at 0x%p\n", pp, tp1); 6475 } 6476 tp1 = tp1->next; 6477 } 6478 } 6479 6480 #endif 6481 page_settoxic(pp, PR_CAPTURE); 6482 bp1->next = page_capture_hash[index].lists[0].next; 6483 bp1->prev = &page_capture_hash[index].lists[0]; 6484 bp1->next->prev = bp1; 6485 page_capture_hash[index].lists[0].next = bp1; 6486 page_capture_hash[index].num_pages++; 6487 if (flags & CAPTURE_RETIRE) { 6488 page_retire_incr_pend_count(); 6489 } 6490 mutex_exit(&page_capture_hash[index].pchh_mutex); 6491 rw_exit(&pc_cb[cb_index].cb_rwlock); 6492 cv_signal(&pc_cv); 6493 return; 6494 } 6495 6496 /* 6497 * A page retire request will replace any other request. 6498 * A second physmem request which is for a different process than 6499 * the currently registered one will be dropped as there is 6500 * no way to hold the private data for both calls. 6501 * In the future, once there are more callers, this will have to 6502 * be worked out better as there needs to be private storage for 6503 * at least each type of caller (maybe have datap be an array of 6504 * *void's so that we can index based upon callers index). 6505 */ 6506 6507 /* walk hash list to update expire time */ 6508 for (i = 0; i < 2; i++) { 6509 bp2 = page_capture_hash[index].lists[i].next; 6510 while (bp2 != &page_capture_hash[index].lists[i]) { 6511 if (bp2->pp == pp) { 6512 if (flags & CAPTURE_RETIRE) { 6513 if (!(bp2->flags & CAPTURE_RETIRE)) { 6514 page_retire_incr_pend_count(); 6515 bp2->flags = flags; 6516 bp2->expires = bp1->expires; 6517 bp2->datap = datap; 6518 } 6519 } else { 6520 ASSERT(flags & CAPTURE_PHYSMEM); 6521 if (!(bp2->flags & CAPTURE_RETIRE) && 6522 (datap == bp2->datap)) { 6523 bp2->expires = bp1->expires; 6524 } 6525 } 6526 mutex_exit(&page_capture_hash[index]. 6527 pchh_mutex); 6528 rw_exit(&pc_cb[cb_index].cb_rwlock); 6529 kmem_free(bp1, sizeof (*bp1)); 6530 return; 6531 } 6532 bp2 = bp2->next; 6533 } 6534 } 6535 6536 /* 6537 * the PR_CAPTURE flag is protected by the page_capture_hash mutexes 6538 * and thus it either has to be set or not set and can't change 6539 * while holding the mutex above. 6540 */ 6541 panic("page_capture_add_hash, PR_CAPTURE flag set on pp %p\n", pp); 6542 } 6543 6544 /* 6545 * We have a page in our hands, lets try and make it ours by turning 6546 * it into a clean page like it had just come off the freelists. 6547 * 6548 * Returns 0 on success, with the page still EXCL locked. 6549 * On failure, the page will be unlocked, and returns EAGAIN 6550 */ 6551 static int 6552 page_capture_clean_page(page_t *pp) 6553 { 6554 page_t *newpp; 6555 int skip_unlock = 0; 6556 spgcnt_t count; 6557 page_t *tpp; 6558 int ret = 0; 6559 int extra; 6560 6561 ASSERT(PAGE_EXCL(pp)); 6562 ASSERT(!PP_RETIRED(pp)); 6563 ASSERT(curthread->t_flag & T_CAPTURING); 6564 6565 if (PP_ISFREE(pp)) { 6566 if (!page_reclaim(pp, NULL)) { 6567 skip_unlock = 1; 6568 ret = EAGAIN; 6569 goto cleanup; 6570 } 6571 ASSERT(pp->p_szc == 0); 6572 if (pp->p_vnode != NULL) { 6573 /* 6574 * Since this page came from the 6575 * cachelist, we must destroy the 6576 * old vnode association. 6577 */ 6578 page_hashout(pp, NULL); 6579 } 6580 goto cleanup; 6581 } 6582 6583 /* 6584 * If we know page_relocate will fail, skip it 6585 * It could still fail due to a UE on another page but we 6586 * can't do anything about that. 6587 */ 6588 if (pp->p_toxic & PR_UE) { 6589 goto skip_relocate; 6590 } 6591 6592 /* 6593 * It's possible that pages can not have a vnode as fsflush comes 6594 * through and cleans up these pages. It's ugly but that's how it is. 6595 */ 6596 if (pp->p_vnode == NULL) { 6597 goto skip_relocate; 6598 } 6599 6600 /* 6601 * Page was not free, so lets try to relocate it. 6602 * page_relocate only works with root pages, so if this is not a root 6603 * page, we need to demote it to try and relocate it. 6604 * Unfortunately this is the best we can do right now. 6605 */ 6606 newpp = NULL; 6607 if ((pp->p_szc > 0) && (pp != PP_PAGEROOT(pp))) { 6608 if (page_try_demote_pages(pp) == 0) { 6609 ret = EAGAIN; 6610 goto cleanup; 6611 } 6612 } 6613 ret = page_relocate(&pp, &newpp, 1, 0, &count, NULL); 6614 if (ret == 0) { 6615 page_t *npp; 6616 /* unlock the new page(s) */ 6617 while (count-- > 0) { 6618 ASSERT(newpp != NULL); 6619 npp = newpp; 6620 page_sub(&newpp, npp); 6621 page_unlock(npp); 6622 } 6623 ASSERT(newpp == NULL); 6624 /* 6625 * Check to see if the page we have is too large. 6626 * If so, demote it freeing up the extra pages. 6627 */ 6628 if (pp->p_szc > 0) { 6629 /* For now demote extra pages to szc == 0 */ 6630 extra = page_get_pagecnt(pp->p_szc) - 1; 6631 while (extra > 0) { 6632 tpp = pp->p_next; 6633 page_sub(&pp, tpp); 6634 tpp->p_szc = 0; 6635 page_free(tpp, 1); 6636 extra--; 6637 } 6638 /* Make sure to set our page to szc 0 as well */ 6639 ASSERT(pp->p_next == pp && pp->p_prev == pp); 6640 pp->p_szc = 0; 6641 } 6642 goto cleanup; 6643 } else if (ret == EIO) { 6644 ret = EAGAIN; 6645 goto cleanup; 6646 } else { 6647 /* 6648 * Need to reset return type as we failed to relocate the page 6649 * but that does not mean that some of the next steps will not 6650 * work. 6651 */ 6652 ret = 0; 6653 } 6654 6655 skip_relocate: 6656 6657 if (pp->p_szc > 0) { 6658 if (page_try_demote_pages(pp) == 0) { 6659 ret = EAGAIN; 6660 goto cleanup; 6661 } 6662 } 6663 6664 ASSERT(pp->p_szc == 0); 6665 6666 if (hat_ismod(pp)) { 6667 ret = EAGAIN; 6668 goto cleanup; 6669 } 6670 if (PP_ISKAS(pp)) { 6671 ret = EAGAIN; 6672 goto cleanup; 6673 } 6674 if (pp->p_lckcnt || pp->p_cowcnt) { 6675 ret = EAGAIN; 6676 goto cleanup; 6677 } 6678 6679 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 6680 ASSERT(!hat_page_is_mapped(pp)); 6681 6682 if (hat_ismod(pp)) { 6683 /* 6684 * This is a semi-odd case as the page is now modified but not 6685 * mapped as we just unloaded the mappings above. 6686 */ 6687 ret = EAGAIN; 6688 goto cleanup; 6689 } 6690 if (pp->p_vnode != NULL) { 6691 page_hashout(pp, NULL); 6692 } 6693 6694 /* 6695 * At this point, the page should be in a clean state and 6696 * we can do whatever we want with it. 6697 */ 6698 6699 cleanup: 6700 if (ret != 0) { 6701 if (!skip_unlock) { 6702 page_unlock(pp); 6703 } 6704 } else { 6705 ASSERT(pp->p_szc == 0); 6706 ASSERT(PAGE_EXCL(pp)); 6707 6708 pp->p_next = pp; 6709 pp->p_prev = pp; 6710 } 6711 return (ret); 6712 } 6713 6714 /* 6715 * Various callers of page_trycapture() can have different restrictions upon 6716 * what memory they have access to. 6717 * Returns 0 on success, with the following error codes on failure: 6718 * EPERM - The requested page is long term locked, and thus repeated 6719 * requests to capture this page will likely fail. 6720 * ENOMEM - There was not enough free memory in the system to safely 6721 * map the requested page. 6722 * ENOENT - The requested page was inside the kernel cage, and the 6723 * PHYSMEM_CAGE flag was not set. 6724 */ 6725 int 6726 page_capture_pre_checks(page_t *pp, uint_t flags) 6727 { 6728 #if defined(__sparc) 6729 extern struct vnode prom_ppages; 6730 #endif /* __sparc */ 6731 6732 ASSERT(pp != NULL); 6733 6734 /* only physmem currently has restrictions */ 6735 if (!(flags & CAPTURE_PHYSMEM)) { 6736 return (0); 6737 } 6738 6739 #if defined(__sparc) 6740 if (pp->p_vnode == &prom_ppages) { 6741 return (EPERM); 6742 } 6743 6744 if (PP_ISNORELOC(pp) && !(flags & CAPTURE_GET_CAGE)) { 6745 return (ENOENT); 6746 } 6747 6748 if (PP_ISNORELOCKERNEL(pp)) { 6749 return (EPERM); 6750 } 6751 #else 6752 if (PP_ISKAS(pp)) { 6753 return (EPERM); 6754 } 6755 #endif /* __sparc */ 6756 6757 if (availrmem < swapfs_minfree) { 6758 /* 6759 * We won't try to capture this page as we are 6760 * running low on memory. 6761 */ 6762 return (ENOMEM); 6763 } 6764 return (0); 6765 } 6766 6767 /* 6768 * Once we have a page in our mits, go ahead and complete the capture 6769 * operation. 6770 * Returns 1 on failure where page is no longer needed 6771 * Returns 0 on success 6772 * Returns -1 if there was a transient failure. 6773 * Failure cases must release the SE_EXCL lock on pp (usually via page_free). 6774 */ 6775 int 6776 page_capture_take_action(page_t *pp, uint_t flags, void *datap) 6777 { 6778 int cb_index; 6779 int ret = 0; 6780 page_capture_hash_bucket_t *bp1; 6781 page_capture_hash_bucket_t *bp2; 6782 int index; 6783 int found = 0; 6784 int i; 6785 6786 ASSERT(PAGE_EXCL(pp)); 6787 ASSERT(curthread->t_flag & T_CAPTURING); 6788 6789 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6790 if ((flags >> cb_index) & 1) { 6791 break; 6792 } 6793 } 6794 ASSERT(cb_index < PC_NUM_CALLBACKS); 6795 6796 /* 6797 * Remove the entry from the page_capture hash, but don't free it yet 6798 * as we may need to put it back. 6799 * Since we own the page at this point in time, we should find it 6800 * in the hash if this is an ASYNC call. If we don't it's likely 6801 * that the page_capture_async() thread decided that this request 6802 * had expired, in which case we just continue on. 6803 */ 6804 if (flags & CAPTURE_ASYNC) { 6805 6806 index = PAGE_CAPTURE_HASH(pp); 6807 6808 mutex_enter(&page_capture_hash[index].pchh_mutex); 6809 for (i = 0; i < 2 && !found; i++) { 6810 bp1 = page_capture_hash[index].lists[i].next; 6811 while (bp1 != &page_capture_hash[index].lists[i]) { 6812 if (bp1->pp == pp) { 6813 bp1->next->prev = bp1->prev; 6814 bp1->prev->next = bp1->next; 6815 page_capture_hash[index].num_pages--; 6816 page_clrtoxic(pp, PR_CAPTURE); 6817 found = 1; 6818 break; 6819 } 6820 bp1 = bp1->next; 6821 } 6822 } 6823 mutex_exit(&page_capture_hash[index].pchh_mutex); 6824 } 6825 6826 /* Synchronize with the unregister func. */ 6827 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6828 if (!pc_cb[cb_index].cb_active) { 6829 page_free(pp, 1); 6830 rw_exit(&pc_cb[cb_index].cb_rwlock); 6831 if (found) { 6832 kmem_free(bp1, sizeof (*bp1)); 6833 } 6834 return (1); 6835 } 6836 6837 /* 6838 * We need to remove the entry from the page capture hash and turn off 6839 * the PR_CAPTURE bit before calling the callback. We'll need to cache 6840 * the entry here, and then based upon the return value, cleanup 6841 * appropriately or re-add it to the hash, making sure that someone else 6842 * hasn't already done so. 6843 * It should be rare for the callback to fail and thus it's ok for 6844 * the failure path to be a bit complicated as the success path is 6845 * cleaner and the locking rules are easier to follow. 6846 */ 6847 6848 ret = pc_cb[cb_index].cb_func(pp, datap, flags); 6849 6850 rw_exit(&pc_cb[cb_index].cb_rwlock); 6851 6852 /* 6853 * If this was an ASYNC request, we need to cleanup the hash if the 6854 * callback was successful or if the request was no longer valid. 6855 * For non-ASYNC requests, we return failure to map and the caller 6856 * will take care of adding the request to the hash. 6857 * Note also that the callback itself is responsible for the page 6858 * at this point in time in terms of locking ... The most common 6859 * case for the failure path should just be a page_free. 6860 */ 6861 if (ret >= 0) { 6862 if (found) { 6863 if (bp1->flags & CAPTURE_RETIRE) { 6864 page_retire_decr_pend_count(); 6865 } 6866 kmem_free(bp1, sizeof (*bp1)); 6867 } 6868 return (ret); 6869 } 6870 if (!found) { 6871 return (ret); 6872 } 6873 6874 ASSERT(flags & CAPTURE_ASYNC); 6875 6876 /* 6877 * Check for expiration time first as we can just free it up if it's 6878 * expired. 6879 */ 6880 if (lbolt > bp1->expires && bp1->expires != -1) { 6881 kmem_free(bp1, sizeof (*bp1)); 6882 return (ret); 6883 } 6884 6885 /* 6886 * The callback failed and there used to be an entry in the hash for 6887 * this page, so we need to add it back to the hash. 6888 */ 6889 mutex_enter(&page_capture_hash[index].pchh_mutex); 6890 if (!(pp->p_toxic & PR_CAPTURE)) { 6891 /* just add bp1 back to head of walked list */ 6892 page_settoxic(pp, PR_CAPTURE); 6893 bp1->next = page_capture_hash[index].lists[1].next; 6894 bp1->prev = &page_capture_hash[index].lists[1]; 6895 bp1->next->prev = bp1; 6896 page_capture_hash[index].lists[1].next = bp1; 6897 page_capture_hash[index].num_pages++; 6898 mutex_exit(&page_capture_hash[index].pchh_mutex); 6899 return (ret); 6900 } 6901 6902 /* 6903 * Otherwise there was a new capture request added to list 6904 * Need to make sure that our original data is represented if 6905 * appropriate. 6906 */ 6907 for (i = 0; i < 2; i++) { 6908 bp2 = page_capture_hash[index].lists[i].next; 6909 while (bp2 != &page_capture_hash[index].lists[i]) { 6910 if (bp2->pp == pp) { 6911 if (bp1->flags & CAPTURE_RETIRE) { 6912 if (!(bp2->flags & CAPTURE_RETIRE)) { 6913 bp2->szc = bp1->szc; 6914 bp2->flags = bp1->flags; 6915 bp2->expires = bp1->expires; 6916 bp2->datap = bp1->datap; 6917 } 6918 } else { 6919 ASSERT(bp1->flags & CAPTURE_PHYSMEM); 6920 if (!(bp2->flags & CAPTURE_RETIRE)) { 6921 bp2->szc = bp1->szc; 6922 bp2->flags = bp1->flags; 6923 bp2->expires = bp1->expires; 6924 bp2->datap = bp1->datap; 6925 } 6926 } 6927 mutex_exit(&page_capture_hash[index]. 6928 pchh_mutex); 6929 kmem_free(bp1, sizeof (*bp1)); 6930 return (ret); 6931 } 6932 bp2 = bp2->next; 6933 } 6934 } 6935 panic("PR_CAPTURE set but not on hash for pp 0x%p\n", pp); 6936 /*NOTREACHED*/ 6937 } 6938 6939 /* 6940 * Try to capture the given page for the caller specified in the flags 6941 * parameter. The page will either be captured and handed over to the 6942 * appropriate callback, or will be queued up in the page capture hash 6943 * to be captured asynchronously. 6944 * If the current request is due to an async capture, the page must be 6945 * exclusively locked before calling this function. 6946 * Currently szc must be 0 but in the future this should be expandable to 6947 * other page sizes. 6948 * Returns 0 on success, with the following error codes on failure: 6949 * EPERM - The requested page is long term locked, and thus repeated 6950 * requests to capture this page will likely fail. 6951 * ENOMEM - There was not enough free memory in the system to safely 6952 * map the requested page. 6953 * ENOENT - The requested page was inside the kernel cage, and the 6954 * CAPTURE_GET_CAGE flag was not set. 6955 * EAGAIN - The requested page could not be capturead at this point in 6956 * time but future requests will likely work. 6957 * EBUSY - The requested page is retired and the CAPTURE_GET_RETIRED flag 6958 * was not set. 6959 */ 6960 int 6961 page_itrycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 6962 { 6963 int ret; 6964 int cb_index; 6965 6966 if (flags & CAPTURE_ASYNC) { 6967 ASSERT(PAGE_EXCL(pp)); 6968 goto async; 6969 } 6970 6971 /* Make sure there's enough availrmem ... */ 6972 ret = page_capture_pre_checks(pp, flags); 6973 if (ret != 0) { 6974 return (ret); 6975 } 6976 6977 if (!page_trylock(pp, SE_EXCL)) { 6978 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6979 if ((flags >> cb_index) & 1) { 6980 break; 6981 } 6982 } 6983 ASSERT(cb_index < PC_NUM_CALLBACKS); 6984 ret = EAGAIN; 6985 /* Special case for retired pages */ 6986 if (PP_RETIRED(pp)) { 6987 if (flags & CAPTURE_GET_RETIRED) { 6988 if (!page_unretire_pp(pp, PR_UNR_TEMP)) { 6989 /* 6990 * Need to set capture bit and add to 6991 * hash so that the page will be 6992 * retired when freed. 6993 */ 6994 page_capture_add_hash(pp, szc, 6995 CAPTURE_RETIRE, NULL); 6996 ret = 0; 6997 goto own_page; 6998 } 6999 } else { 7000 return (EBUSY); 7001 } 7002 } 7003 page_capture_add_hash(pp, szc, flags, datap); 7004 return (ret); 7005 } 7006 7007 async: 7008 ASSERT(PAGE_EXCL(pp)); 7009 7010 /* Need to check for physmem async requests that availrmem is sane */ 7011 if ((flags & (CAPTURE_ASYNC | CAPTURE_PHYSMEM)) == 7012 (CAPTURE_ASYNC | CAPTURE_PHYSMEM) && 7013 (availrmem < swapfs_minfree)) { 7014 page_unlock(pp); 7015 return (ENOMEM); 7016 } 7017 7018 ret = page_capture_clean_page(pp); 7019 7020 if (ret != 0) { 7021 /* We failed to get the page, so lets add it to the hash */ 7022 if (!(flags & CAPTURE_ASYNC)) { 7023 page_capture_add_hash(pp, szc, flags, datap); 7024 } 7025 return (ret); 7026 } 7027 7028 own_page: 7029 ASSERT(PAGE_EXCL(pp)); 7030 ASSERT(pp->p_szc == 0); 7031 7032 /* Call the callback */ 7033 ret = page_capture_take_action(pp, flags, datap); 7034 7035 if (ret == 0) { 7036 return (0); 7037 } 7038 7039 /* 7040 * Note that in the failure cases from page_capture_take_action, the 7041 * EXCL lock will have already been dropped. 7042 */ 7043 if ((ret == -1) && (!(flags & CAPTURE_ASYNC))) { 7044 page_capture_add_hash(pp, szc, flags, datap); 7045 } 7046 return (EAGAIN); 7047 } 7048 7049 int 7050 page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 7051 { 7052 int ret; 7053 7054 curthread->t_flag |= T_CAPTURING; 7055 ret = page_itrycapture(pp, szc, flags, datap); 7056 curthread->t_flag &= ~T_CAPTURING; /* xor works as we know its set */ 7057 return (ret); 7058 } 7059 7060 /* 7061 * When unlocking a page which has the PR_CAPTURE bit set, this routine 7062 * gets called to try and capture the page. 7063 */ 7064 void 7065 page_unlock_capture(page_t *pp) 7066 { 7067 page_capture_hash_bucket_t *bp; 7068 int index; 7069 int i; 7070 uint_t szc; 7071 uint_t flags = 0; 7072 void *datap; 7073 kmutex_t *mp; 7074 extern vnode_t retired_pages; 7075 7076 /* 7077 * We need to protect against a possible deadlock here where we own 7078 * the vnode page hash mutex and want to acquire it again as there 7079 * are locations in the code, where we unlock a page while holding 7080 * the mutex which can lead to the page being captured and eventually 7081 * end up here. As we may be hashing out the old page and hashing into 7082 * the retire vnode, we need to make sure we don't own them. 7083 * Other callbacks who do hash operations also need to make sure that 7084 * before they hashin to a vnode that they do not currently own the 7085 * vphm mutex otherwise there will be a panic. 7086 */ 7087 if (mutex_owned(page_vnode_mutex(&retired_pages))) { 7088 page_unlock_nocapture(pp); 7089 return; 7090 } 7091 if (pp->p_vnode != NULL && mutex_owned(page_vnode_mutex(pp->p_vnode))) { 7092 page_unlock_nocapture(pp); 7093 return; 7094 } 7095 7096 index = PAGE_CAPTURE_HASH(pp); 7097 7098 mp = &page_capture_hash[index].pchh_mutex; 7099 mutex_enter(mp); 7100 for (i = 0; i < 2; i++) { 7101 bp = page_capture_hash[index].lists[i].next; 7102 while (bp != &page_capture_hash[index].lists[i]) { 7103 if (bp->pp == pp) { 7104 szc = bp->szc; 7105 flags = bp->flags | CAPTURE_ASYNC; 7106 datap = bp->datap; 7107 mutex_exit(mp); 7108 (void) page_trycapture(pp, szc, flags, datap); 7109 return; 7110 } 7111 bp = bp->next; 7112 } 7113 } 7114 7115 /* Failed to find page in hash so clear flags and unlock it. */ 7116 page_clrtoxic(pp, PR_CAPTURE); 7117 page_unlock(pp); 7118 7119 mutex_exit(mp); 7120 } 7121 7122 void 7123 page_capture_init() 7124 { 7125 int i; 7126 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7127 page_capture_hash[i].lists[0].next = 7128 &page_capture_hash[i].lists[0]; 7129 page_capture_hash[i].lists[0].prev = 7130 &page_capture_hash[i].lists[0]; 7131 page_capture_hash[i].lists[1].next = 7132 &page_capture_hash[i].lists[1]; 7133 page_capture_hash[i].lists[1].prev = 7134 &page_capture_hash[i].lists[1]; 7135 } 7136 7137 pc_thread_shortwait = 23 * hz; 7138 pc_thread_longwait = 1201 * hz; 7139 pc_thread_ism_retry = 3; 7140 mutex_init(&pc_thread_mutex, NULL, MUTEX_DEFAULT, NULL); 7141 cv_init(&pc_cv, NULL, CV_DEFAULT, NULL); 7142 pc_thread_id = thread_create(NULL, 0, page_capture_thread, NULL, 0, &p0, 7143 TS_RUN, minclsyspri); 7144 } 7145 7146 /* 7147 * It is necessary to scrub any failing pages prior to reboot in order to 7148 * prevent a latent error trap from occurring on the next boot. 7149 */ 7150 void 7151 page_retire_mdboot() 7152 { 7153 page_t *pp; 7154 int i, j; 7155 page_capture_hash_bucket_t *bp; 7156 7157 /* walk lists looking for pages to scrub */ 7158 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7159 if (page_capture_hash[i].num_pages == 0) 7160 continue; 7161 7162 mutex_enter(&page_capture_hash[i].pchh_mutex); 7163 7164 for (j = 0; j < 2; j++) { 7165 bp = page_capture_hash[i].lists[j].next; 7166 while (bp != &page_capture_hash[i].lists[j]) { 7167 pp = bp->pp; 7168 if (!PP_ISKAS(pp) && PP_TOXIC(pp)) { 7169 pp->p_selock = -1; /* pacify ASSERTs */ 7170 PP_CLRFREE(pp); 7171 pagescrub(pp, 0, PAGESIZE); 7172 pp->p_selock = 0; 7173 } 7174 bp = bp->next; 7175 } 7176 } 7177 mutex_exit(&page_capture_hash[i].pchh_mutex); 7178 } 7179 } 7180 7181 /* 7182 * Walk the page_capture_hash trying to capture pages and also cleanup old 7183 * entries which have expired. 7184 */ 7185 void 7186 page_capture_async() 7187 { 7188 page_t *pp; 7189 int i; 7190 int ret; 7191 page_capture_hash_bucket_t *bp1, *bp2; 7192 uint_t szc; 7193 uint_t flags; 7194 void *datap; 7195 7196 /* If there are outstanding pages to be captured, get to work */ 7197 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7198 if (page_capture_hash[i].num_pages == 0) 7199 continue; 7200 /* Append list 1 to list 0 and then walk through list 0 */ 7201 mutex_enter(&page_capture_hash[i].pchh_mutex); 7202 bp1 = &page_capture_hash[i].lists[1]; 7203 bp2 = bp1->next; 7204 if (bp1 != bp2) { 7205 bp1->prev->next = page_capture_hash[i].lists[0].next; 7206 bp2->prev = &page_capture_hash[i].lists[0]; 7207 page_capture_hash[i].lists[0].next->prev = bp1->prev; 7208 page_capture_hash[i].lists[0].next = bp2; 7209 bp1->next = bp1; 7210 bp1->prev = bp1; 7211 } 7212 7213 /* list[1] will be empty now */ 7214 7215 bp1 = page_capture_hash[i].lists[0].next; 7216 while (bp1 != &page_capture_hash[i].lists[0]) { 7217 /* Check expiration time */ 7218 if ((lbolt > bp1->expires && bp1->expires != -1) || 7219 page_deleted(bp1->pp)) { 7220 page_capture_hash[i].lists[0].next = bp1->next; 7221 bp1->next->prev = 7222 &page_capture_hash[i].lists[0]; 7223 page_capture_hash[i].num_pages--; 7224 7225 /* 7226 * We can safely remove the PR_CAPTURE bit 7227 * without holding the EXCL lock on the page 7228 * as the PR_CAPTURE bit requres that the 7229 * page_capture_hash[].pchh_mutex be held 7230 * to modify it. 7231 */ 7232 page_clrtoxic(bp1->pp, PR_CAPTURE); 7233 mutex_exit(&page_capture_hash[i].pchh_mutex); 7234 kmem_free(bp1, sizeof (*bp1)); 7235 mutex_enter(&page_capture_hash[i].pchh_mutex); 7236 bp1 = page_capture_hash[i].lists[0].next; 7237 continue; 7238 } 7239 pp = bp1->pp; 7240 szc = bp1->szc; 7241 flags = bp1->flags; 7242 datap = bp1->datap; 7243 mutex_exit(&page_capture_hash[i].pchh_mutex); 7244 if (page_trylock(pp, SE_EXCL)) { 7245 ret = page_trycapture(pp, szc, 7246 flags | CAPTURE_ASYNC, datap); 7247 } else { 7248 ret = 1; /* move to walked hash */ 7249 } 7250 7251 if (ret != 0) { 7252 /* Move to walked hash */ 7253 (void) page_capture_move_to_walked(pp); 7254 } 7255 mutex_enter(&page_capture_hash[i].pchh_mutex); 7256 bp1 = page_capture_hash[i].lists[0].next; 7257 } 7258 7259 mutex_exit(&page_capture_hash[i].pchh_mutex); 7260 } 7261 } 7262 7263 /* 7264 * This function is called by the page_capture_thread, and is needed in 7265 * in order to initiate aio cleanup, so that pages used in aio 7266 * will be unlocked and subsequently retired by page_capture_thread. 7267 */ 7268 static int 7269 do_aio_cleanup(void) 7270 { 7271 proc_t *procp; 7272 int (*aio_cleanup_dr_delete_memory)(proc_t *); 7273 int cleaned = 0; 7274 7275 if (modload("sys", "kaio") == -1) { 7276 cmn_err(CE_WARN, "do_aio_cleanup: cannot load kaio"); 7277 return (0); 7278 } 7279 /* 7280 * We use the aio_cleanup_dr_delete_memory function to 7281 * initiate the actual clean up; this function will wake 7282 * up the per-process aio_cleanup_thread. 7283 */ 7284 aio_cleanup_dr_delete_memory = (int (*)(proc_t *)) 7285 modgetsymvalue("aio_cleanup_dr_delete_memory", 0); 7286 if (aio_cleanup_dr_delete_memory == NULL) { 7287 cmn_err(CE_WARN, 7288 "aio_cleanup_dr_delete_memory not found in kaio"); 7289 return (0); 7290 } 7291 mutex_enter(&pidlock); 7292 for (procp = practive; (procp != NULL); procp = procp->p_next) { 7293 mutex_enter(&procp->p_lock); 7294 if (procp->p_aio != NULL) { 7295 /* cleanup proc's outstanding kaio */ 7296 cleaned += (*aio_cleanup_dr_delete_memory)(procp); 7297 } 7298 mutex_exit(&procp->p_lock); 7299 } 7300 mutex_exit(&pidlock); 7301 return (cleaned); 7302 } 7303 7304 /* 7305 * helper function for page_capture_thread 7306 */ 7307 static void 7308 page_capture_handle_outstanding(void) 7309 { 7310 extern size_t spt_used; 7311 int ntry; 7312 7313 if (!page_retire_pend_count()) { 7314 /* 7315 * Do we really want to be this aggressive 7316 * for things other than page_retire? 7317 * Maybe have a counter for each callback 7318 * type to guide how aggressive we should 7319 * be here. Thus if there's at least one 7320 * page for page_retire we go ahead and reap 7321 * like this. 7322 */ 7323 kmem_reap(); 7324 seg_preap(); 7325 page_capture_async(); 7326 } else { 7327 /* 7328 * There are pages pending retirement, so 7329 * we reap prior to attempting to capture. 7330 */ 7331 kmem_reap(); 7332 /* 7333 * When ISM is in use, we need to disable and 7334 * purge the seg_pcache, and initiate aio 7335 * cleanup in order to release page locks and 7336 * subsquently retire pages in need of 7337 * retirement. 7338 */ 7339 if (spt_used) { 7340 /* disable and purge seg_pcache */ 7341 (void) seg_p_disable(); 7342 for (ntry = 0; ntry < pc_thread_ism_retry; ntry++) { 7343 if (!page_retire_pend_count()) 7344 break; 7345 if (do_aio_cleanup()) { 7346 /* 7347 * allow the apps cleanup threads 7348 * to run 7349 */ 7350 delay(pc_thread_shortwait); 7351 } 7352 page_capture_async(); 7353 } 7354 /* reenable seg_pcache */ 7355 seg_p_enable(); 7356 } else { 7357 seg_preap(); 7358 page_capture_async(); 7359 } 7360 } 7361 } 7362 7363 /* 7364 * The page_capture_thread loops forever, looking to see if there are 7365 * pages still waiting to be captured. 7366 */ 7367 static void 7368 page_capture_thread(void) 7369 { 7370 callb_cpr_t c; 7371 int outstanding; 7372 int i; 7373 7374 CALLB_CPR_INIT(&c, &pc_thread_mutex, callb_generic_cpr, "page_capture"); 7375 7376 mutex_enter(&pc_thread_mutex); 7377 for (;;) { 7378 outstanding = 0; 7379 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) 7380 outstanding += page_capture_hash[i].num_pages; 7381 if (outstanding) { 7382 page_capture_handle_outstanding(); 7383 CALLB_CPR_SAFE_BEGIN(&c); 7384 (void) cv_timedwait(&pc_cv, &pc_thread_mutex, 7385 lbolt + pc_thread_shortwait); 7386 CALLB_CPR_SAFE_END(&c, &pc_thread_mutex); 7387 } else { 7388 CALLB_CPR_SAFE_BEGIN(&c); 7389 (void) cv_timedwait(&pc_cv, &pc_thread_mutex, 7390 lbolt + pc_thread_longwait); 7391 CALLB_CPR_SAFE_END(&c, &pc_thread_mutex); 7392 } 7393 } 7394 /*NOTREACHED*/ 7395 } 7396