1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * University Copyright- Copyright (c) 1982, 1986, 1988 31 * The Regents of the University of California 32 * All Rights Reserved 33 * 34 * University Acknowledgment- Portions of this document are derived from 35 * software developed by the University of California, Berkeley, and its 36 * contributors. 37 */ 38 39 #pragma ident "%Z%%M% %I% %E% SMI" 40 41 /* 42 * VM - physical page management. 43 */ 44 45 #include <sys/types.h> 46 #include <sys/t_lock.h> 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/errno.h> 50 #include <sys/time.h> 51 #include <sys/vnode.h> 52 #include <sys/vm.h> 53 #include <sys/vtrace.h> 54 #include <sys/swap.h> 55 #include <sys/cmn_err.h> 56 #include <sys/tuneable.h> 57 #include <sys/sysmacros.h> 58 #include <sys/cpuvar.h> 59 #include <sys/callb.h> 60 #include <sys/debug.h> 61 #include <sys/tnf_probe.h> 62 #include <sys/condvar_impl.h> 63 #include <sys/mem_config.h> 64 #include <sys/mem_cage.h> 65 #include <sys/kmem.h> 66 #include <sys/atomic.h> 67 #include <sys/strlog.h> 68 #include <sys/mman.h> 69 #include <sys/ontrap.h> 70 #include <sys/lgrp.h> 71 #include <sys/vfs.h> 72 73 #include <vm/hat.h> 74 #include <vm/anon.h> 75 #include <vm/page.h> 76 #include <vm/seg.h> 77 #include <vm/pvn.h> 78 #include <vm/seg_kmem.h> 79 #include <vm/vm_dep.h> 80 #include <sys/vm_usage.h> 81 #include <fs/fs_subr.h> 82 #include <sys/ddi.h> 83 #include <sys/modctl.h> 84 85 static int nopageage = 0; 86 87 static pgcnt_t max_page_get; /* max page_get request size in pages */ 88 pgcnt_t total_pages = 0; /* total number of pages (used by /proc) */ 89 90 /* 91 * freemem_lock protects all freemem variables: 92 * availrmem. Also this lock protects the globals which track the 93 * availrmem changes for accurate kernel footprint calculation. 94 * See below for an explanation of these 95 * globals. 96 */ 97 kmutex_t freemem_lock; 98 pgcnt_t availrmem; 99 pgcnt_t availrmem_initial; 100 101 /* 102 * These globals track availrmem changes to get a more accurate 103 * estimate of tke kernel size. Historically pp_kernel is used for 104 * kernel size and is based on availrmem. But availrmem is adjusted for 105 * locked pages in the system not just for kernel locked pages. 106 * These new counters will track the pages locked through segvn and 107 * by explicit user locking. 108 * 109 * pages_locked : How many pages are locked because of user specified 110 * locking through mlock or plock. 111 * 112 * pages_useclaim,pages_claimed : These two variables track the 113 * claim adjustments because of the protection changes on a segvn segment. 114 * 115 * All these globals are protected by the same lock which protects availrmem. 116 */ 117 pgcnt_t pages_locked = 0; 118 pgcnt_t pages_useclaim = 0; 119 pgcnt_t pages_claimed = 0; 120 121 122 /* 123 * new_freemem_lock protects freemem, freemem_wait & freemem_cv. 124 */ 125 static kmutex_t new_freemem_lock; 126 static uint_t freemem_wait; /* someone waiting for freemem */ 127 static kcondvar_t freemem_cv; 128 129 /* 130 * The logical page free list is maintained as two lists, the 'free' 131 * and the 'cache' lists. 132 * The free list contains those pages that should be reused first. 133 * 134 * The implementation of the lists is machine dependent. 135 * page_get_freelist(), page_get_cachelist(), 136 * page_list_sub(), and page_list_add() 137 * form the interface to the machine dependent implementation. 138 * 139 * Pages with p_free set are on the cache list. 140 * Pages with p_free and p_age set are on the free list, 141 * 142 * A page may be locked while on either list. 143 */ 144 145 /* 146 * free list accounting stuff. 147 * 148 * 149 * Spread out the value for the number of pages on the 150 * page free and page cache lists. If there is just one 151 * value, then it must be under just one lock. 152 * The lock contention and cache traffic are a real bother. 153 * 154 * When we acquire and then drop a single pcf lock 155 * we can start in the middle of the array of pcf structures. 156 * If we acquire more than one pcf lock at a time, we need to 157 * start at the front to avoid deadlocking. 158 * 159 * pcf_count holds the number of pages in each pool. 160 * 161 * pcf_block is set when page_create_get_something() has asked the 162 * PSM page freelist and page cachelist routines without specifying 163 * a color and nothing came back. This is used to block anything 164 * else from moving pages from one list to the other while the 165 * lists are searched again. If a page is freeed while pcf_block is 166 * set, then pcf_reserve is incremented. pcgs_unblock() takes care 167 * of clearning pcf_block, doing the wakeups, etc. 168 */ 169 170 #define MAX_PCF_FANOUT NCPU 171 static uint_t pcf_fanout = 1; /* Will get changed at boot time */ 172 static uint_t pcf_fanout_mask = 0; 173 174 struct pcf { 175 kmutex_t pcf_lock; /* protects the structure */ 176 uint_t pcf_count; /* page count */ 177 uint_t pcf_wait; /* number of waiters */ 178 uint_t pcf_block; /* pcgs flag to page_free() */ 179 uint_t pcf_reserve; /* pages freed after pcf_block set */ 180 uint_t pcf_fill[10]; /* to line up on the caches */ 181 }; 182 183 /* 184 * PCF_INDEX hash needs to be dynamic (every so often the hash changes where 185 * it will hash the cpu to). This is done to prevent a drain condition 186 * from happening. This drain condition will occur when pcf_count decrement 187 * occurs on cpu A and the increment of pcf_count always occurs on cpu B. An 188 * example of this shows up with device interrupts. The dma buffer is allocated 189 * by the cpu requesting the IO thus the pcf_count is decremented based on that. 190 * When the memory is returned by the interrupt thread, the pcf_count will be 191 * incremented based on the cpu servicing the interrupt. 192 */ 193 static struct pcf pcf[MAX_PCF_FANOUT]; 194 #define PCF_INDEX() ((int)(((long)CPU->cpu_seqid) + \ 195 (randtick() >> 24)) & (pcf_fanout_mask)) 196 197 static int pcf_decrement_bucket(pgcnt_t); 198 static int pcf_decrement_multiple(pgcnt_t *, pgcnt_t, int); 199 200 kmutex_t pcgs_lock; /* serializes page_create_get_ */ 201 kmutex_t pcgs_cagelock; /* serializes NOSLEEP cage allocs */ 202 kmutex_t pcgs_wait_lock; /* used for delay in pcgs */ 203 static kcondvar_t pcgs_cv; /* cv for delay in pcgs */ 204 205 #ifdef VM_STATS 206 207 /* 208 * No locks, but so what, they are only statistics. 209 */ 210 211 static struct page_tcnt { 212 int pc_free_cache; /* free's into cache list */ 213 int pc_free_dontneed; /* free's with dontneed */ 214 int pc_free_pageout; /* free's from pageout */ 215 int pc_free_free; /* free's into free list */ 216 int pc_free_pages; /* free's into large page free list */ 217 int pc_destroy_pages; /* large page destroy's */ 218 int pc_get_cache; /* get's from cache list */ 219 int pc_get_free; /* get's from free list */ 220 int pc_reclaim; /* reclaim's */ 221 int pc_abortfree; /* abort's of free pages */ 222 int pc_find_hit; /* find's that find page */ 223 int pc_find_miss; /* find's that don't find page */ 224 int pc_destroy_free; /* # of free pages destroyed */ 225 #define PC_HASH_CNT (4*PAGE_HASHAVELEN) 226 int pc_find_hashlen[PC_HASH_CNT+1]; 227 int pc_addclaim_pages; 228 int pc_subclaim_pages; 229 int pc_free_replacement_page[2]; 230 int pc_try_demote_pages[6]; 231 int pc_demote_pages[2]; 232 } pagecnt; 233 234 uint_t hashin_count; 235 uint_t hashin_not_held; 236 uint_t hashin_already; 237 238 uint_t hashout_count; 239 uint_t hashout_not_held; 240 241 uint_t page_create_count; 242 uint_t page_create_not_enough; 243 uint_t page_create_not_enough_again; 244 uint_t page_create_zero; 245 uint_t page_create_hashout; 246 uint_t page_create_page_lock_failed; 247 uint_t page_create_trylock_failed; 248 uint_t page_create_found_one; 249 uint_t page_create_hashin_failed; 250 uint_t page_create_dropped_phm; 251 252 uint_t page_create_new; 253 uint_t page_create_exists; 254 uint_t page_create_putbacks; 255 uint_t page_create_overshoot; 256 257 uint_t page_reclaim_zero; 258 uint_t page_reclaim_zero_locked; 259 260 uint_t page_rename_exists; 261 uint_t page_rename_count; 262 263 uint_t page_lookup_cnt[20]; 264 uint_t page_lookup_nowait_cnt[10]; 265 uint_t page_find_cnt; 266 uint_t page_exists_cnt; 267 uint_t page_exists_forreal_cnt; 268 uint_t page_lookup_dev_cnt; 269 uint_t get_cachelist_cnt; 270 uint_t page_create_cnt[10]; 271 uint_t alloc_pages[9]; 272 uint_t page_exphcontg[19]; 273 uint_t page_create_large_cnt[10]; 274 275 /* 276 * Collects statistics. 277 */ 278 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 279 uint_t mylen = 0; \ 280 \ 281 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash, mylen++) { \ 282 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 283 break; \ 284 } \ 285 if ((pp) != NULL) \ 286 pagecnt.pc_find_hit++; \ 287 else \ 288 pagecnt.pc_find_miss++; \ 289 if (mylen > PC_HASH_CNT) \ 290 mylen = PC_HASH_CNT; \ 291 pagecnt.pc_find_hashlen[mylen]++; \ 292 } 293 294 #else /* VM_STATS */ 295 296 /* 297 * Don't collect statistics 298 */ 299 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 300 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \ 301 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 302 break; \ 303 } \ 304 } 305 306 #endif /* VM_STATS */ 307 308 309 310 #ifdef DEBUG 311 #define MEMSEG_SEARCH_STATS 312 #endif 313 314 #ifdef MEMSEG_SEARCH_STATS 315 struct memseg_stats { 316 uint_t nsearch; 317 uint_t nlastwon; 318 uint_t nhashwon; 319 uint_t nnotfound; 320 } memseg_stats; 321 322 #define MEMSEG_STAT_INCR(v) \ 323 atomic_add_32(&memseg_stats.v, 1) 324 #else 325 #define MEMSEG_STAT_INCR(x) 326 #endif 327 328 struct memseg *memsegs; /* list of memory segments */ 329 330 /* 331 * /etc/system tunable to control large page allocation hueristic. 332 * 333 * Setting to LPAP_LOCAL will heavily prefer the local lgroup over remote lgroup 334 * for large page allocation requests. If a large page is not readily 335 * avaliable on the local freelists we will go through additional effort 336 * to create a large page, potentially moving smaller pages around to coalesce 337 * larger pages in the local lgroup. 338 * Default value of LPAP_DEFAULT will go to remote freelists if large pages 339 * are not readily available in the local lgroup. 340 */ 341 enum lpap { 342 LPAP_DEFAULT, /* default large page allocation policy */ 343 LPAP_LOCAL /* local large page allocation policy */ 344 }; 345 346 enum lpap lpg_alloc_prefer = LPAP_DEFAULT; 347 348 static void page_init_mem_config(void); 349 static int page_do_hashin(page_t *, vnode_t *, u_offset_t); 350 static void page_do_hashout(page_t *); 351 static void page_capture_init(); 352 int page_capture_take_action(page_t *, uint_t, void *); 353 354 static void page_demote_vp_pages(page_t *); 355 356 357 void 358 pcf_init(void) 359 360 { 361 if (boot_ncpus != -1) { 362 pcf_fanout = boot_ncpus; 363 } else { 364 pcf_fanout = max_ncpus; 365 } 366 #ifdef sun4v 367 /* 368 * Force at least 4 buckets if possible for sun4v. 369 */ 370 pcf_fanout = MAX(pcf_fanout, 4); 371 #endif /* sun4v */ 372 373 /* 374 * Round up to the nearest power of 2. 375 */ 376 pcf_fanout = MIN(pcf_fanout, MAX_PCF_FANOUT); 377 if (!ISP2(pcf_fanout)) { 378 pcf_fanout = 1 << highbit(pcf_fanout); 379 380 if (pcf_fanout > MAX_PCF_FANOUT) { 381 pcf_fanout = 1 << (highbit(MAX_PCF_FANOUT) - 1); 382 } 383 } 384 pcf_fanout_mask = pcf_fanout - 1; 385 } 386 387 /* 388 * vm subsystem related initialization 389 */ 390 void 391 vm_init(void) 392 { 393 boolean_t callb_vm_cpr(void *, int); 394 395 (void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm"); 396 page_init_mem_config(); 397 page_retire_init(); 398 vm_usage_init(); 399 page_capture_init(); 400 } 401 402 /* 403 * This function is called at startup and when memory is added or deleted. 404 */ 405 void 406 init_pages_pp_maximum() 407 { 408 static pgcnt_t p_min; 409 static pgcnt_t pages_pp_maximum_startup; 410 static pgcnt_t avrmem_delta; 411 static int init_done; 412 static int user_set; /* true if set in /etc/system */ 413 414 if (init_done == 0) { 415 416 /* If the user specified a value, save it */ 417 if (pages_pp_maximum != 0) { 418 user_set = 1; 419 pages_pp_maximum_startup = pages_pp_maximum; 420 } 421 422 /* 423 * Setting of pages_pp_maximum is based first time 424 * on the value of availrmem just after the start-up 425 * allocations. To preserve this relationship at run 426 * time, use a delta from availrmem_initial. 427 */ 428 ASSERT(availrmem_initial >= availrmem); 429 avrmem_delta = availrmem_initial - availrmem; 430 431 /* The allowable floor of pages_pp_maximum */ 432 p_min = tune.t_minarmem + 100; 433 434 /* Make sure we don't come through here again. */ 435 init_done = 1; 436 } 437 /* 438 * Determine pages_pp_maximum, the number of currently available 439 * pages (availrmem) that can't be `locked'. If not set by 440 * the user, we set it to 4% of the currently available memory 441 * plus 4MB. 442 * But we also insist that it be greater than tune.t_minarmem; 443 * otherwise a process could lock down a lot of memory, get swapped 444 * out, and never have enough to get swapped back in. 445 */ 446 if (user_set) 447 pages_pp_maximum = pages_pp_maximum_startup; 448 else 449 pages_pp_maximum = ((availrmem_initial - avrmem_delta) / 25) 450 + btop(4 * 1024 * 1024); 451 452 if (pages_pp_maximum <= p_min) { 453 pages_pp_maximum = p_min; 454 } 455 } 456 457 void 458 set_max_page_get(pgcnt_t target_total_pages) 459 { 460 max_page_get = target_total_pages / 2; 461 } 462 463 static pgcnt_t pending_delete; 464 465 /*ARGSUSED*/ 466 static void 467 page_mem_config_post_add( 468 void *arg, 469 pgcnt_t delta_pages) 470 { 471 set_max_page_get(total_pages - pending_delete); 472 init_pages_pp_maximum(); 473 } 474 475 /*ARGSUSED*/ 476 static int 477 page_mem_config_pre_del( 478 void *arg, 479 pgcnt_t delta_pages) 480 { 481 pgcnt_t nv; 482 483 nv = atomic_add_long_nv(&pending_delete, (spgcnt_t)delta_pages); 484 set_max_page_get(total_pages - nv); 485 return (0); 486 } 487 488 /*ARGSUSED*/ 489 static void 490 page_mem_config_post_del( 491 void *arg, 492 pgcnt_t delta_pages, 493 int cancelled) 494 { 495 pgcnt_t nv; 496 497 nv = atomic_add_long_nv(&pending_delete, -(spgcnt_t)delta_pages); 498 set_max_page_get(total_pages - nv); 499 if (!cancelled) 500 init_pages_pp_maximum(); 501 } 502 503 static kphysm_setup_vector_t page_mem_config_vec = { 504 KPHYSM_SETUP_VECTOR_VERSION, 505 page_mem_config_post_add, 506 page_mem_config_pre_del, 507 page_mem_config_post_del, 508 }; 509 510 static void 511 page_init_mem_config(void) 512 { 513 int ret; 514 515 ret = kphysm_setup_func_register(&page_mem_config_vec, (void *)NULL); 516 ASSERT(ret == 0); 517 } 518 519 /* 520 * Evenly spread out the PCF counters for large free pages 521 */ 522 static void 523 page_free_large_ctr(pgcnt_t npages) 524 { 525 static struct pcf *p = pcf; 526 pgcnt_t lump; 527 528 freemem += npages; 529 530 lump = roundup(npages, pcf_fanout) / pcf_fanout; 531 532 while (npages > 0) { 533 534 ASSERT(!p->pcf_block); 535 536 if (lump < npages) { 537 p->pcf_count += (uint_t)lump; 538 npages -= lump; 539 } else { 540 p->pcf_count += (uint_t)npages; 541 npages = 0; 542 } 543 544 ASSERT(!p->pcf_wait); 545 546 if (++p > &pcf[pcf_fanout - 1]) 547 p = pcf; 548 } 549 550 ASSERT(npages == 0); 551 } 552 553 /* 554 * Add a physical chunk of memory to the system free lists during startup. 555 * Platform specific startup() allocates the memory for the page structs. 556 * 557 * num - number of page structures 558 * base - page number (pfn) to be associated with the first page. 559 * 560 * Since we are doing this during startup (ie. single threaded), we will 561 * use shortcut routines to avoid any locking overhead while putting all 562 * these pages on the freelists. 563 * 564 * NOTE: Any changes performed to page_free(), must also be performed to 565 * add_physmem() since this is how we initialize all page_t's at 566 * boot time. 567 */ 568 void 569 add_physmem( 570 page_t *pp, 571 pgcnt_t num, 572 pfn_t pnum) 573 { 574 page_t *root = NULL; 575 uint_t szc = page_num_pagesizes() - 1; 576 pgcnt_t large = page_get_pagecnt(szc); 577 pgcnt_t cnt = 0; 578 579 TRACE_2(TR_FAC_VM, TR_PAGE_INIT, 580 "add_physmem:pp %p num %lu", pp, num); 581 582 /* 583 * Arbitrarily limit the max page_get request 584 * to 1/2 of the page structs we have. 585 */ 586 total_pages += num; 587 set_max_page_get(total_pages); 588 589 PLCNT_MODIFY_MAX(pnum, (long)num); 590 591 /* 592 * The physical space for the pages array 593 * representing ram pages has already been 594 * allocated. Here we initialize each lock 595 * in the page structure, and put each on 596 * the free list 597 */ 598 for (; num; pp++, pnum++, num--) { 599 600 /* 601 * this needs to fill in the page number 602 * and do any other arch specific initialization 603 */ 604 add_physmem_cb(pp, pnum); 605 606 pp->p_lckcnt = 0; 607 pp->p_cowcnt = 0; 608 pp->p_slckcnt = 0; 609 610 /* 611 * Initialize the page lock as unlocked, since nobody 612 * can see or access this page yet. 613 */ 614 pp->p_selock = 0; 615 616 /* 617 * Initialize IO lock 618 */ 619 page_iolock_init(pp); 620 621 /* 622 * initialize other fields in the page_t 623 */ 624 PP_SETFREE(pp); 625 page_clr_all_props(pp); 626 PP_SETAGED(pp); 627 pp->p_offset = (u_offset_t)-1; 628 pp->p_next = pp; 629 pp->p_prev = pp; 630 631 /* 632 * Simple case: System doesn't support large pages. 633 */ 634 if (szc == 0) { 635 pp->p_szc = 0; 636 page_free_at_startup(pp); 637 continue; 638 } 639 640 /* 641 * Handle unaligned pages, we collect them up onto 642 * the root page until we have a full large page. 643 */ 644 if (!IS_P2ALIGNED(pnum, large)) { 645 646 /* 647 * If not in a large page, 648 * just free as small page. 649 */ 650 if (root == NULL) { 651 pp->p_szc = 0; 652 page_free_at_startup(pp); 653 continue; 654 } 655 656 /* 657 * Link a constituent page into the large page. 658 */ 659 pp->p_szc = szc; 660 page_list_concat(&root, &pp); 661 662 /* 663 * When large page is fully formed, free it. 664 */ 665 if (++cnt == large) { 666 page_free_large_ctr(cnt); 667 page_list_add_pages(root, PG_LIST_ISINIT); 668 root = NULL; 669 cnt = 0; 670 } 671 continue; 672 } 673 674 /* 675 * At this point we have a page number which 676 * is aligned. We assert that we aren't already 677 * in a different large page. 678 */ 679 ASSERT(IS_P2ALIGNED(pnum, large)); 680 ASSERT(root == NULL && cnt == 0); 681 682 /* 683 * If insufficient number of pages left to form 684 * a large page, just free the small page. 685 */ 686 if (num < large) { 687 pp->p_szc = 0; 688 page_free_at_startup(pp); 689 continue; 690 } 691 692 /* 693 * Otherwise start a new large page. 694 */ 695 pp->p_szc = szc; 696 cnt++; 697 root = pp; 698 } 699 ASSERT(root == NULL && cnt == 0); 700 } 701 702 /* 703 * Find a page representing the specified [vp, offset]. 704 * If we find the page but it is intransit coming in, 705 * it will have an "exclusive" lock and we wait for 706 * the i/o to complete. A page found on the free list 707 * is always reclaimed and then locked. On success, the page 708 * is locked, its data is valid and it isn't on the free 709 * list, while a NULL is returned if the page doesn't exist. 710 */ 711 page_t * 712 page_lookup(vnode_t *vp, u_offset_t off, se_t se) 713 { 714 return (page_lookup_create(vp, off, se, NULL, NULL, 0)); 715 } 716 717 /* 718 * Find a page representing the specified [vp, offset]. 719 * We either return the one we found or, if passed in, 720 * create one with identity of [vp, offset] of the 721 * pre-allocated page. If we find existing page but it is 722 * intransit coming in, it will have an "exclusive" lock 723 * and we wait for the i/o to complete. A page found on 724 * the free list is always reclaimed and then locked. 725 * On success, the page is locked, its data is valid and 726 * it isn't on the free list, while a NULL is returned 727 * if the page doesn't exist and newpp is NULL; 728 */ 729 page_t * 730 page_lookup_create( 731 vnode_t *vp, 732 u_offset_t off, 733 se_t se, 734 page_t *newpp, 735 spgcnt_t *nrelocp, 736 int flags) 737 { 738 page_t *pp; 739 kmutex_t *phm; 740 ulong_t index; 741 uint_t hash_locked; 742 uint_t es; 743 744 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 745 VM_STAT_ADD(page_lookup_cnt[0]); 746 ASSERT(newpp ? PAGE_EXCL(newpp) : 1); 747 748 /* 749 * Acquire the appropriate page hash lock since 750 * we have to search the hash list. Pages that 751 * hash to this list can't change identity while 752 * this lock is held. 753 */ 754 hash_locked = 0; 755 index = PAGE_HASH_FUNC(vp, off); 756 phm = NULL; 757 top: 758 PAGE_HASH_SEARCH(index, pp, vp, off); 759 if (pp != NULL) { 760 VM_STAT_ADD(page_lookup_cnt[1]); 761 es = (newpp != NULL) ? 1 : 0; 762 es |= flags; 763 if (!hash_locked) { 764 VM_STAT_ADD(page_lookup_cnt[2]); 765 if (!page_try_reclaim_lock(pp, se, es)) { 766 /* 767 * On a miss, acquire the phm. Then 768 * next time, page_lock() will be called, 769 * causing a wait if the page is busy. 770 * just looping with page_trylock() would 771 * get pretty boring. 772 */ 773 VM_STAT_ADD(page_lookup_cnt[3]); 774 phm = PAGE_HASH_MUTEX(index); 775 mutex_enter(phm); 776 hash_locked = 1; 777 goto top; 778 } 779 } else { 780 VM_STAT_ADD(page_lookup_cnt[4]); 781 if (!page_lock_es(pp, se, phm, P_RECLAIM, es)) { 782 VM_STAT_ADD(page_lookup_cnt[5]); 783 goto top; 784 } 785 } 786 787 /* 788 * Since `pp' is locked it can not change identity now. 789 * Reconfirm we locked the correct page. 790 * 791 * Both the p_vnode and p_offset *must* be cast volatile 792 * to force a reload of their values: The PAGE_HASH_SEARCH 793 * macro will have stuffed p_vnode and p_offset into 794 * registers before calling page_trylock(); another thread, 795 * actually holding the hash lock, could have changed the 796 * page's identity in memory, but our registers would not 797 * be changed, fooling the reconfirmation. If the hash 798 * lock was held during the search, the casting would 799 * not be needed. 800 */ 801 VM_STAT_ADD(page_lookup_cnt[6]); 802 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 803 ((volatile u_offset_t)(pp->p_offset) != off)) { 804 VM_STAT_ADD(page_lookup_cnt[7]); 805 if (hash_locked) { 806 panic("page_lookup_create: lost page %p", 807 (void *)pp); 808 /*NOTREACHED*/ 809 } 810 page_unlock(pp); 811 phm = PAGE_HASH_MUTEX(index); 812 mutex_enter(phm); 813 hash_locked = 1; 814 goto top; 815 } 816 817 /* 818 * If page_trylock() was called, then pp may still be on 819 * the cachelist (can't be on the free list, it would not 820 * have been found in the search). If it is on the 821 * cachelist it must be pulled now. To pull the page from 822 * the cachelist, it must be exclusively locked. 823 * 824 * The other big difference between page_trylock() and 825 * page_lock(), is that page_lock() will pull the 826 * page from whatever free list (the cache list in this 827 * case) the page is on. If page_trylock() was used 828 * above, then we have to do the reclaim ourselves. 829 */ 830 if ((!hash_locked) && (PP_ISFREE(pp))) { 831 ASSERT(PP_ISAGED(pp) == 0); 832 VM_STAT_ADD(page_lookup_cnt[8]); 833 834 /* 835 * page_relcaim will insure that we 836 * have this page exclusively 837 */ 838 839 if (!page_reclaim(pp, NULL)) { 840 /* 841 * Page_reclaim dropped whatever lock 842 * we held. 843 */ 844 VM_STAT_ADD(page_lookup_cnt[9]); 845 phm = PAGE_HASH_MUTEX(index); 846 mutex_enter(phm); 847 hash_locked = 1; 848 goto top; 849 } else if (se == SE_SHARED && newpp == NULL) { 850 VM_STAT_ADD(page_lookup_cnt[10]); 851 page_downgrade(pp); 852 } 853 } 854 855 if (hash_locked) { 856 mutex_exit(phm); 857 } 858 859 if (newpp != NULL && pp->p_szc < newpp->p_szc && 860 PAGE_EXCL(pp) && nrelocp != NULL) { 861 ASSERT(nrelocp != NULL); 862 (void) page_relocate(&pp, &newpp, 1, 1, nrelocp, 863 NULL); 864 if (*nrelocp > 0) { 865 VM_STAT_COND_ADD(*nrelocp == 1, 866 page_lookup_cnt[11]); 867 VM_STAT_COND_ADD(*nrelocp > 1, 868 page_lookup_cnt[12]); 869 pp = newpp; 870 se = SE_EXCL; 871 } else { 872 if (se == SE_SHARED) { 873 page_downgrade(pp); 874 } 875 VM_STAT_ADD(page_lookup_cnt[13]); 876 } 877 } else if (newpp != NULL && nrelocp != NULL) { 878 if (PAGE_EXCL(pp) && se == SE_SHARED) { 879 page_downgrade(pp); 880 } 881 VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc, 882 page_lookup_cnt[14]); 883 VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc, 884 page_lookup_cnt[15]); 885 VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc, 886 page_lookup_cnt[16]); 887 } else if (newpp != NULL && PAGE_EXCL(pp)) { 888 se = SE_EXCL; 889 } 890 } else if (!hash_locked) { 891 VM_STAT_ADD(page_lookup_cnt[17]); 892 phm = PAGE_HASH_MUTEX(index); 893 mutex_enter(phm); 894 hash_locked = 1; 895 goto top; 896 } else if (newpp != NULL) { 897 /* 898 * If we have a preallocated page then 899 * insert it now and basically behave like 900 * page_create. 901 */ 902 VM_STAT_ADD(page_lookup_cnt[18]); 903 /* 904 * Since we hold the page hash mutex and 905 * just searched for this page, page_hashin 906 * had better not fail. If it does, that 907 * means some thread did not follow the 908 * page hash mutex rules. Panic now and 909 * get it over with. As usual, go down 910 * holding all the locks. 911 */ 912 ASSERT(MUTEX_HELD(phm)); 913 if (!page_hashin(newpp, vp, off, phm)) { 914 ASSERT(MUTEX_HELD(phm)); 915 panic("page_lookup_create: hashin failed %p %p %llx %p", 916 (void *)newpp, (void *)vp, off, (void *)phm); 917 /*NOTREACHED*/ 918 } 919 ASSERT(MUTEX_HELD(phm)); 920 mutex_exit(phm); 921 phm = NULL; 922 page_set_props(newpp, P_REF); 923 page_io_lock(newpp); 924 pp = newpp; 925 se = SE_EXCL; 926 } else { 927 VM_STAT_ADD(page_lookup_cnt[19]); 928 mutex_exit(phm); 929 } 930 931 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 932 933 ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1); 934 935 return (pp); 936 } 937 938 /* 939 * Search the hash list for the page representing the 940 * specified [vp, offset] and return it locked. Skip 941 * free pages and pages that cannot be locked as requested. 942 * Used while attempting to kluster pages. 943 */ 944 page_t * 945 page_lookup_nowait(vnode_t *vp, u_offset_t off, se_t se) 946 { 947 page_t *pp; 948 kmutex_t *phm; 949 ulong_t index; 950 uint_t locked; 951 952 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 953 VM_STAT_ADD(page_lookup_nowait_cnt[0]); 954 955 index = PAGE_HASH_FUNC(vp, off); 956 PAGE_HASH_SEARCH(index, pp, vp, off); 957 locked = 0; 958 if (pp == NULL) { 959 top: 960 VM_STAT_ADD(page_lookup_nowait_cnt[1]); 961 locked = 1; 962 phm = PAGE_HASH_MUTEX(index); 963 mutex_enter(phm); 964 PAGE_HASH_SEARCH(index, pp, vp, off); 965 } 966 967 if (pp == NULL || PP_ISFREE(pp)) { 968 VM_STAT_ADD(page_lookup_nowait_cnt[2]); 969 pp = NULL; 970 } else { 971 if (!page_trylock(pp, se)) { 972 VM_STAT_ADD(page_lookup_nowait_cnt[3]); 973 pp = NULL; 974 } else { 975 VM_STAT_ADD(page_lookup_nowait_cnt[4]); 976 /* 977 * See the comment in page_lookup() 978 */ 979 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 980 ((u_offset_t)(pp->p_offset) != off)) { 981 VM_STAT_ADD(page_lookup_nowait_cnt[5]); 982 if (locked) { 983 panic("page_lookup_nowait %p", 984 (void *)pp); 985 /*NOTREACHED*/ 986 } 987 page_unlock(pp); 988 goto top; 989 } 990 if (PP_ISFREE(pp)) { 991 VM_STAT_ADD(page_lookup_nowait_cnt[6]); 992 page_unlock(pp); 993 pp = NULL; 994 } 995 } 996 } 997 if (locked) { 998 VM_STAT_ADD(page_lookup_nowait_cnt[7]); 999 mutex_exit(phm); 1000 } 1001 1002 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 1003 1004 return (pp); 1005 } 1006 1007 /* 1008 * Search the hash list for a page with the specified [vp, off] 1009 * that is known to exist and is already locked. This routine 1010 * is typically used by segment SOFTUNLOCK routines. 1011 */ 1012 page_t * 1013 page_find(vnode_t *vp, u_offset_t off) 1014 { 1015 page_t *pp; 1016 kmutex_t *phm; 1017 ulong_t index; 1018 1019 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1020 VM_STAT_ADD(page_find_cnt); 1021 1022 index = PAGE_HASH_FUNC(vp, off); 1023 phm = PAGE_HASH_MUTEX(index); 1024 1025 mutex_enter(phm); 1026 PAGE_HASH_SEARCH(index, pp, vp, off); 1027 mutex_exit(phm); 1028 1029 ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr); 1030 return (pp); 1031 } 1032 1033 /* 1034 * Determine whether a page with the specified [vp, off] 1035 * currently exists in the system. Obviously this should 1036 * only be considered as a hint since nothing prevents the 1037 * page from disappearing or appearing immediately after 1038 * the return from this routine. Subsequently, we don't 1039 * even bother to lock the list. 1040 */ 1041 page_t * 1042 page_exists(vnode_t *vp, u_offset_t off) 1043 { 1044 page_t *pp; 1045 ulong_t index; 1046 1047 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1048 VM_STAT_ADD(page_exists_cnt); 1049 1050 index = PAGE_HASH_FUNC(vp, off); 1051 PAGE_HASH_SEARCH(index, pp, vp, off); 1052 1053 return (pp); 1054 } 1055 1056 /* 1057 * Determine if physically contiguous pages exist for [vp, off] - [vp, off + 1058 * page_size(szc)) range. if they exist and ppa is not NULL fill ppa array 1059 * with these pages locked SHARED. If necessary reclaim pages from 1060 * freelist. Return 1 if contiguous pages exist and 0 otherwise. 1061 * 1062 * If we fail to lock pages still return 1 if pages exist and contiguous. 1063 * But in this case return value is just a hint. ppa array won't be filled. 1064 * Caller should initialize ppa[0] as NULL to distinguish return value. 1065 * 1066 * Returns 0 if pages don't exist or not physically contiguous. 1067 * 1068 * This routine doesn't work for anonymous(swapfs) pages. 1069 */ 1070 int 1071 page_exists_physcontig(vnode_t *vp, u_offset_t off, uint_t szc, page_t *ppa[]) 1072 { 1073 pgcnt_t pages; 1074 pfn_t pfn; 1075 page_t *rootpp; 1076 pgcnt_t i; 1077 pgcnt_t j; 1078 u_offset_t save_off = off; 1079 ulong_t index; 1080 kmutex_t *phm; 1081 page_t *pp; 1082 uint_t pszc; 1083 int loopcnt = 0; 1084 1085 ASSERT(szc != 0); 1086 ASSERT(vp != NULL); 1087 ASSERT(!IS_SWAPFSVP(vp)); 1088 ASSERT(!VN_ISKAS(vp)); 1089 1090 again: 1091 if (++loopcnt > 3) { 1092 VM_STAT_ADD(page_exphcontg[0]); 1093 return (0); 1094 } 1095 1096 index = PAGE_HASH_FUNC(vp, off); 1097 phm = PAGE_HASH_MUTEX(index); 1098 1099 mutex_enter(phm); 1100 PAGE_HASH_SEARCH(index, pp, vp, off); 1101 mutex_exit(phm); 1102 1103 VM_STAT_ADD(page_exphcontg[1]); 1104 1105 if (pp == NULL) { 1106 VM_STAT_ADD(page_exphcontg[2]); 1107 return (0); 1108 } 1109 1110 pages = page_get_pagecnt(szc); 1111 rootpp = pp; 1112 pfn = rootpp->p_pagenum; 1113 1114 if ((pszc = pp->p_szc) >= szc && ppa != NULL) { 1115 VM_STAT_ADD(page_exphcontg[3]); 1116 if (!page_trylock(pp, SE_SHARED)) { 1117 VM_STAT_ADD(page_exphcontg[4]); 1118 return (1); 1119 } 1120 if (pp->p_szc != pszc || pp->p_vnode != vp || 1121 pp->p_offset != off) { 1122 VM_STAT_ADD(page_exphcontg[5]); 1123 page_unlock(pp); 1124 off = save_off; 1125 goto again; 1126 } 1127 /* 1128 * szc was non zero and vnode and offset matched after we 1129 * locked the page it means it can't become free on us. 1130 */ 1131 ASSERT(!PP_ISFREE(pp)); 1132 if (!IS_P2ALIGNED(pfn, pages)) { 1133 page_unlock(pp); 1134 return (0); 1135 } 1136 ppa[0] = pp; 1137 pp++; 1138 off += PAGESIZE; 1139 pfn++; 1140 for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1141 if (!page_trylock(pp, SE_SHARED)) { 1142 VM_STAT_ADD(page_exphcontg[6]); 1143 pp--; 1144 while (i-- > 0) { 1145 page_unlock(pp); 1146 pp--; 1147 } 1148 ppa[0] = NULL; 1149 return (1); 1150 } 1151 if (pp->p_szc != pszc) { 1152 VM_STAT_ADD(page_exphcontg[7]); 1153 page_unlock(pp); 1154 pp--; 1155 while (i-- > 0) { 1156 page_unlock(pp); 1157 pp--; 1158 } 1159 ppa[0] = NULL; 1160 off = save_off; 1161 goto again; 1162 } 1163 /* 1164 * szc the same as for previous already locked pages 1165 * with right identity. Since this page had correct 1166 * szc after we locked it can't get freed or destroyed 1167 * and therefore must have the expected identity. 1168 */ 1169 ASSERT(!PP_ISFREE(pp)); 1170 if (pp->p_vnode != vp || 1171 pp->p_offset != off) { 1172 panic("page_exists_physcontig: " 1173 "large page identity doesn't match"); 1174 } 1175 ppa[i] = pp; 1176 ASSERT(pp->p_pagenum == pfn); 1177 } 1178 VM_STAT_ADD(page_exphcontg[8]); 1179 ppa[pages] = NULL; 1180 return (1); 1181 } else if (pszc >= szc) { 1182 VM_STAT_ADD(page_exphcontg[9]); 1183 if (!IS_P2ALIGNED(pfn, pages)) { 1184 return (0); 1185 } 1186 return (1); 1187 } 1188 1189 if (!IS_P2ALIGNED(pfn, pages)) { 1190 VM_STAT_ADD(page_exphcontg[10]); 1191 return (0); 1192 } 1193 1194 if (page_numtomemseg_nolock(pfn) != 1195 page_numtomemseg_nolock(pfn + pages - 1)) { 1196 VM_STAT_ADD(page_exphcontg[11]); 1197 return (0); 1198 } 1199 1200 /* 1201 * We loop up 4 times across pages to promote page size. 1202 * We're extra cautious to promote page size atomically with respect 1203 * to everybody else. But we can probably optimize into 1 loop if 1204 * this becomes an issue. 1205 */ 1206 1207 for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1208 ASSERT(pp->p_pagenum == pfn); 1209 if (!page_trylock(pp, SE_EXCL)) { 1210 VM_STAT_ADD(page_exphcontg[12]); 1211 break; 1212 } 1213 if (pp->p_vnode != vp || 1214 pp->p_offset != off) { 1215 VM_STAT_ADD(page_exphcontg[13]); 1216 page_unlock(pp); 1217 break; 1218 } 1219 if (pp->p_szc >= szc) { 1220 ASSERT(i == 0); 1221 page_unlock(pp); 1222 off = save_off; 1223 goto again; 1224 } 1225 } 1226 1227 if (i != pages) { 1228 VM_STAT_ADD(page_exphcontg[14]); 1229 --pp; 1230 while (i-- > 0) { 1231 page_unlock(pp); 1232 --pp; 1233 } 1234 return (0); 1235 } 1236 1237 pp = rootpp; 1238 for (i = 0; i < pages; i++, pp++) { 1239 if (PP_ISFREE(pp)) { 1240 VM_STAT_ADD(page_exphcontg[15]); 1241 ASSERT(!PP_ISAGED(pp)); 1242 ASSERT(pp->p_szc == 0); 1243 if (!page_reclaim(pp, NULL)) { 1244 break; 1245 } 1246 } else { 1247 ASSERT(pp->p_szc < szc); 1248 VM_STAT_ADD(page_exphcontg[16]); 1249 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 1250 } 1251 } 1252 if (i < pages) { 1253 VM_STAT_ADD(page_exphcontg[17]); 1254 /* 1255 * page_reclaim failed because we were out of memory. 1256 * drop the rest of the locks and return because this page 1257 * must be already reallocated anyway. 1258 */ 1259 pp = rootpp; 1260 for (j = 0; j < pages; j++, pp++) { 1261 if (j != i) { 1262 page_unlock(pp); 1263 } 1264 } 1265 return (0); 1266 } 1267 1268 off = save_off; 1269 pp = rootpp; 1270 for (i = 0; i < pages; i++, pp++, off += PAGESIZE) { 1271 ASSERT(PAGE_EXCL(pp)); 1272 ASSERT(!PP_ISFREE(pp)); 1273 ASSERT(!hat_page_is_mapped(pp)); 1274 ASSERT(pp->p_vnode == vp); 1275 ASSERT(pp->p_offset == off); 1276 pp->p_szc = szc; 1277 } 1278 pp = rootpp; 1279 for (i = 0; i < pages; i++, pp++) { 1280 if (ppa == NULL) { 1281 page_unlock(pp); 1282 } else { 1283 ppa[i] = pp; 1284 page_downgrade(ppa[i]); 1285 } 1286 } 1287 if (ppa != NULL) { 1288 ppa[pages] = NULL; 1289 } 1290 VM_STAT_ADD(page_exphcontg[18]); 1291 ASSERT(vp->v_pages != NULL); 1292 return (1); 1293 } 1294 1295 /* 1296 * Determine whether a page with the specified [vp, off] 1297 * currently exists in the system and if so return its 1298 * size code. Obviously this should only be considered as 1299 * a hint since nothing prevents the page from disappearing 1300 * or appearing immediately after the return from this routine. 1301 */ 1302 int 1303 page_exists_forreal(vnode_t *vp, u_offset_t off, uint_t *szc) 1304 { 1305 page_t *pp; 1306 kmutex_t *phm; 1307 ulong_t index; 1308 int rc = 0; 1309 1310 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1311 ASSERT(szc != NULL); 1312 VM_STAT_ADD(page_exists_forreal_cnt); 1313 1314 index = PAGE_HASH_FUNC(vp, off); 1315 phm = PAGE_HASH_MUTEX(index); 1316 1317 mutex_enter(phm); 1318 PAGE_HASH_SEARCH(index, pp, vp, off); 1319 if (pp != NULL) { 1320 *szc = pp->p_szc; 1321 rc = 1; 1322 } 1323 mutex_exit(phm); 1324 return (rc); 1325 } 1326 1327 /* wakeup threads waiting for pages in page_create_get_something() */ 1328 void 1329 wakeup_pcgs(void) 1330 { 1331 if (!CV_HAS_WAITERS(&pcgs_cv)) 1332 return; 1333 cv_broadcast(&pcgs_cv); 1334 } 1335 1336 /* 1337 * 'freemem' is used all over the kernel as an indication of how many 1338 * pages are free (either on the cache list or on the free page list) 1339 * in the system. In very few places is a really accurate 'freemem' 1340 * needed. To avoid contention of the lock protecting a the 1341 * single freemem, it was spread out into NCPU buckets. Set_freemem 1342 * sets freemem to the total of all NCPU buckets. It is called from 1343 * clock() on each TICK. 1344 */ 1345 void 1346 set_freemem() 1347 { 1348 struct pcf *p; 1349 ulong_t t; 1350 uint_t i; 1351 1352 t = 0; 1353 p = pcf; 1354 for (i = 0; i < pcf_fanout; i++) { 1355 t += p->pcf_count; 1356 p++; 1357 } 1358 freemem = t; 1359 1360 /* 1361 * Don't worry about grabbing mutex. It's not that 1362 * critical if we miss a tick or two. This is 1363 * where we wakeup possible delayers in 1364 * page_create_get_something(). 1365 */ 1366 wakeup_pcgs(); 1367 } 1368 1369 ulong_t 1370 get_freemem() 1371 { 1372 struct pcf *p; 1373 ulong_t t; 1374 uint_t i; 1375 1376 t = 0; 1377 p = pcf; 1378 for (i = 0; i < pcf_fanout; i++) { 1379 t += p->pcf_count; 1380 p++; 1381 } 1382 /* 1383 * We just calculated it, might as well set it. 1384 */ 1385 freemem = t; 1386 return (t); 1387 } 1388 1389 /* 1390 * Acquire all of the page cache & free (pcf) locks. 1391 */ 1392 void 1393 pcf_acquire_all() 1394 { 1395 struct pcf *p; 1396 uint_t i; 1397 1398 p = pcf; 1399 for (i = 0; i < pcf_fanout; i++) { 1400 mutex_enter(&p->pcf_lock); 1401 p++; 1402 } 1403 } 1404 1405 /* 1406 * Release all the pcf_locks. 1407 */ 1408 void 1409 pcf_release_all() 1410 { 1411 struct pcf *p; 1412 uint_t i; 1413 1414 p = pcf; 1415 for (i = 0; i < pcf_fanout; i++) { 1416 mutex_exit(&p->pcf_lock); 1417 p++; 1418 } 1419 } 1420 1421 /* 1422 * Inform the VM system that we need some pages freed up. 1423 * Calls must be symmetric, e.g.: 1424 * 1425 * page_needfree(100); 1426 * wait a bit; 1427 * page_needfree(-100); 1428 */ 1429 void 1430 page_needfree(spgcnt_t npages) 1431 { 1432 mutex_enter(&new_freemem_lock); 1433 needfree += npages; 1434 mutex_exit(&new_freemem_lock); 1435 } 1436 1437 /* 1438 * Throttle for page_create(): try to prevent freemem from dropping 1439 * below throttlefree. We can't provide a 100% guarantee because 1440 * KM_NOSLEEP allocations, page_reclaim(), and various other things 1441 * nibble away at the freelist. However, we can block all PG_WAIT 1442 * allocations until memory becomes available. The motivation is 1443 * that several things can fall apart when there's no free memory: 1444 * 1445 * (1) If pageout() needs memory to push a page, the system deadlocks. 1446 * 1447 * (2) By (broken) specification, timeout(9F) can neither fail nor 1448 * block, so it has no choice but to panic the system if it 1449 * cannot allocate a callout structure. 1450 * 1451 * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block; 1452 * it panics if it cannot allocate a callback structure. 1453 * 1454 * (4) Untold numbers of third-party drivers have not yet been hardened 1455 * against KM_NOSLEEP and/or allocb() failures; they simply assume 1456 * success and panic the system with a data fault on failure. 1457 * (The long-term solution to this particular problem is to ship 1458 * hostile fault-injecting DEBUG kernels with the DDK.) 1459 * 1460 * It is theoretically impossible to guarantee success of non-blocking 1461 * allocations, but in practice, this throttle is very hard to break. 1462 */ 1463 static int 1464 page_create_throttle(pgcnt_t npages, int flags) 1465 { 1466 ulong_t fm; 1467 uint_t i; 1468 pgcnt_t tf; /* effective value of throttlefree */ 1469 1470 /* 1471 * Never deny pages when: 1472 * - it's a thread that cannot block [NOMEMWAIT()] 1473 * - the allocation cannot block and must not fail 1474 * - the allocation cannot block and is pageout dispensated 1475 */ 1476 if (NOMEMWAIT() || 1477 ((flags & (PG_WAIT | PG_PANIC)) == PG_PANIC) || 1478 ((flags & (PG_WAIT | PG_PUSHPAGE)) == PG_PUSHPAGE)) 1479 return (1); 1480 1481 /* 1482 * If the allocation can't block, we look favorably upon it 1483 * unless we're below pageout_reserve. In that case we fail 1484 * the allocation because we want to make sure there are a few 1485 * pages available for pageout. 1486 */ 1487 if ((flags & PG_WAIT) == 0) 1488 return (freemem >= npages + pageout_reserve); 1489 1490 /* Calculate the effective throttlefree value */ 1491 tf = throttlefree - 1492 ((flags & PG_PUSHPAGE) ? pageout_reserve : 0); 1493 1494 cv_signal(&proc_pageout->p_cv); 1495 1496 for (;;) { 1497 fm = 0; 1498 pcf_acquire_all(); 1499 mutex_enter(&new_freemem_lock); 1500 for (i = 0; i < pcf_fanout; i++) { 1501 fm += pcf[i].pcf_count; 1502 pcf[i].pcf_wait++; 1503 mutex_exit(&pcf[i].pcf_lock); 1504 } 1505 freemem = fm; 1506 if (freemem >= npages + tf) { 1507 mutex_exit(&new_freemem_lock); 1508 break; 1509 } 1510 needfree += npages; 1511 freemem_wait++; 1512 cv_wait(&freemem_cv, &new_freemem_lock); 1513 freemem_wait--; 1514 needfree -= npages; 1515 mutex_exit(&new_freemem_lock); 1516 } 1517 return (1); 1518 } 1519 1520 /* 1521 * page_create_wait() is called to either coalesce pages from the 1522 * different pcf buckets or to wait because there simply are not 1523 * enough pages to satisfy the caller's request. 1524 * 1525 * Sadly, this is called from platform/vm/vm_machdep.c 1526 */ 1527 int 1528 page_create_wait(pgcnt_t npages, uint_t flags) 1529 { 1530 pgcnt_t total; 1531 uint_t i; 1532 struct pcf *p; 1533 1534 /* 1535 * Wait until there are enough free pages to satisfy our 1536 * entire request. 1537 * We set needfree += npages before prodding pageout, to make sure 1538 * it does real work when npages > lotsfree > freemem. 1539 */ 1540 VM_STAT_ADD(page_create_not_enough); 1541 1542 ASSERT(!kcage_on ? !(flags & PG_NORELOC) : 1); 1543 checkagain: 1544 if ((flags & PG_NORELOC) && 1545 kcage_freemem < kcage_throttlefree + npages) 1546 (void) kcage_create_throttle(npages, flags); 1547 1548 if (freemem < npages + throttlefree) 1549 if (!page_create_throttle(npages, flags)) 1550 return (0); 1551 1552 if (pcf_decrement_bucket(npages) || 1553 pcf_decrement_multiple(&total, npages, 0)) 1554 return (1); 1555 1556 /* 1557 * All of the pcf locks are held, there are not enough pages 1558 * to satisfy the request (npages < total). 1559 * Be sure to acquire the new_freemem_lock before dropping 1560 * the pcf locks. This prevents dropping wakeups in page_free(). 1561 * The order is always pcf_lock then new_freemem_lock. 1562 * 1563 * Since we hold all the pcf locks, it is a good time to set freemem. 1564 * 1565 * If the caller does not want to wait, return now. 1566 * Else turn the pageout daemon loose to find something 1567 * and wait till it does. 1568 * 1569 */ 1570 freemem = total; 1571 1572 if ((flags & PG_WAIT) == 0) { 1573 pcf_release_all(); 1574 1575 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_NOMEM, 1576 "page_create_nomem:npages %ld freemem %ld", npages, freemem); 1577 return (0); 1578 } 1579 1580 ASSERT(proc_pageout != NULL); 1581 cv_signal(&proc_pageout->p_cv); 1582 1583 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_START, 1584 "page_create_sleep_start: freemem %ld needfree %ld", 1585 freemem, needfree); 1586 1587 /* 1588 * We are going to wait. 1589 * We currently hold all of the pcf_locks, 1590 * get the new_freemem_lock (it protects freemem_wait), 1591 * before dropping the pcf_locks. 1592 */ 1593 mutex_enter(&new_freemem_lock); 1594 1595 p = pcf; 1596 for (i = 0; i < pcf_fanout; i++) { 1597 p->pcf_wait++; 1598 mutex_exit(&p->pcf_lock); 1599 p++; 1600 } 1601 1602 needfree += npages; 1603 freemem_wait++; 1604 1605 cv_wait(&freemem_cv, &new_freemem_lock); 1606 1607 freemem_wait--; 1608 needfree -= npages; 1609 1610 mutex_exit(&new_freemem_lock); 1611 1612 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_END, 1613 "page_create_sleep_end: freemem %ld needfree %ld", 1614 freemem, needfree); 1615 1616 VM_STAT_ADD(page_create_not_enough_again); 1617 goto checkagain; 1618 } 1619 /* 1620 * A routine to do the opposite of page_create_wait(). 1621 */ 1622 void 1623 page_create_putback(spgcnt_t npages) 1624 { 1625 struct pcf *p; 1626 pgcnt_t lump; 1627 uint_t *which; 1628 1629 /* 1630 * When a contiguous lump is broken up, we have to 1631 * deal with lots of pages (min 64) so lets spread 1632 * the wealth around. 1633 */ 1634 lump = roundup(npages, pcf_fanout) / pcf_fanout; 1635 freemem += npages; 1636 1637 for (p = pcf; (npages > 0) && (p < &pcf[pcf_fanout]); p++) { 1638 which = &p->pcf_count; 1639 1640 mutex_enter(&p->pcf_lock); 1641 1642 if (p->pcf_block) { 1643 which = &p->pcf_reserve; 1644 } 1645 1646 if (lump < npages) { 1647 *which += (uint_t)lump; 1648 npages -= lump; 1649 } else { 1650 *which += (uint_t)npages; 1651 npages = 0; 1652 } 1653 1654 if (p->pcf_wait) { 1655 mutex_enter(&new_freemem_lock); 1656 /* 1657 * Check to see if some other thread 1658 * is actually waiting. Another bucket 1659 * may have woken it up by now. If there 1660 * are no waiters, then set our pcf_wait 1661 * count to zero to avoid coming in here 1662 * next time. 1663 */ 1664 if (freemem_wait) { 1665 if (npages > 1) { 1666 cv_broadcast(&freemem_cv); 1667 } else { 1668 cv_signal(&freemem_cv); 1669 } 1670 p->pcf_wait--; 1671 } else { 1672 p->pcf_wait = 0; 1673 } 1674 mutex_exit(&new_freemem_lock); 1675 } 1676 mutex_exit(&p->pcf_lock); 1677 } 1678 ASSERT(npages == 0); 1679 } 1680 1681 /* 1682 * A helper routine for page_create_get_something. 1683 * The indenting got to deep down there. 1684 * Unblock the pcf counters. Any pages freed after 1685 * pcf_block got set are moved to pcf_count and 1686 * wakeups (cv_broadcast() or cv_signal()) are done as needed. 1687 */ 1688 static void 1689 pcgs_unblock(void) 1690 { 1691 int i; 1692 struct pcf *p; 1693 1694 /* Update freemem while we're here. */ 1695 freemem = 0; 1696 p = pcf; 1697 for (i = 0; i < pcf_fanout; i++) { 1698 mutex_enter(&p->pcf_lock); 1699 ASSERT(p->pcf_count == 0); 1700 p->pcf_count = p->pcf_reserve; 1701 p->pcf_block = 0; 1702 freemem += p->pcf_count; 1703 if (p->pcf_wait) { 1704 mutex_enter(&new_freemem_lock); 1705 if (freemem_wait) { 1706 if (p->pcf_reserve > 1) { 1707 cv_broadcast(&freemem_cv); 1708 p->pcf_wait = 0; 1709 } else { 1710 cv_signal(&freemem_cv); 1711 p->pcf_wait--; 1712 } 1713 } else { 1714 p->pcf_wait = 0; 1715 } 1716 mutex_exit(&new_freemem_lock); 1717 } 1718 p->pcf_reserve = 0; 1719 mutex_exit(&p->pcf_lock); 1720 p++; 1721 } 1722 } 1723 1724 /* 1725 * Called from page_create_va() when both the cache and free lists 1726 * have been checked once. 1727 * 1728 * Either returns a page or panics since the accounting was done 1729 * way before we got here. 1730 * 1731 * We don't come here often, so leave the accounting on permanently. 1732 */ 1733 1734 #define MAX_PCGS 100 1735 1736 #ifdef DEBUG 1737 #define PCGS_TRIES 100 1738 #else /* DEBUG */ 1739 #define PCGS_TRIES 10 1740 #endif /* DEBUG */ 1741 1742 #ifdef VM_STATS 1743 uint_t pcgs_counts[PCGS_TRIES]; 1744 uint_t pcgs_too_many; 1745 uint_t pcgs_entered; 1746 uint_t pcgs_entered_noreloc; 1747 uint_t pcgs_locked; 1748 uint_t pcgs_cagelocked; 1749 #endif /* VM_STATS */ 1750 1751 static page_t * 1752 page_create_get_something(vnode_t *vp, u_offset_t off, struct seg *seg, 1753 caddr_t vaddr, uint_t flags) 1754 { 1755 uint_t count; 1756 page_t *pp; 1757 uint_t locked, i; 1758 struct pcf *p; 1759 lgrp_t *lgrp; 1760 int cagelocked = 0; 1761 1762 VM_STAT_ADD(pcgs_entered); 1763 1764 /* 1765 * Tap any reserve freelists: if we fail now, we'll die 1766 * since the page(s) we're looking for have already been 1767 * accounted for. 1768 */ 1769 flags |= PG_PANIC; 1770 1771 if ((flags & PG_NORELOC) != 0) { 1772 VM_STAT_ADD(pcgs_entered_noreloc); 1773 /* 1774 * Requests for free pages from critical threads 1775 * such as pageout still won't throttle here, but 1776 * we must try again, to give the cageout thread 1777 * another chance to catch up. Since we already 1778 * accounted for the pages, we had better get them 1779 * this time. 1780 * 1781 * N.B. All non-critical threads acquire the pcgs_cagelock 1782 * to serialize access to the freelists. This implements a 1783 * turnstile-type synchornization to avoid starvation of 1784 * critical requests for PG_NORELOC memory by non-critical 1785 * threads: all non-critical threads must acquire a 'ticket' 1786 * before passing through, which entails making sure 1787 * kcage_freemem won't fall below minfree prior to grabbing 1788 * pages from the freelists. 1789 */ 1790 if (kcage_create_throttle(1, flags) == KCT_NONCRIT) { 1791 mutex_enter(&pcgs_cagelock); 1792 cagelocked = 1; 1793 VM_STAT_ADD(pcgs_cagelocked); 1794 } 1795 } 1796 1797 /* 1798 * Time to get serious. 1799 * We failed to get a `correctly colored' page from both the 1800 * free and cache lists. 1801 * We escalate in stage. 1802 * 1803 * First try both lists without worring about color. 1804 * 1805 * Then, grab all page accounting locks (ie. pcf[]) and 1806 * steal any pages that they have and set the pcf_block flag to 1807 * stop deletions from the lists. This will help because 1808 * a page can get added to the free list while we are looking 1809 * at the cache list, then another page could be added to the cache 1810 * list allowing the page on the free list to be removed as we 1811 * move from looking at the cache list to the free list. This 1812 * could happen over and over. We would never find the page 1813 * we have accounted for. 1814 * 1815 * Noreloc pages are a subset of the global (relocatable) page pool. 1816 * They are not tracked separately in the pcf bins, so it is 1817 * impossible to know when doing pcf accounting if the available 1818 * page(s) are noreloc pages or not. When looking for a noreloc page 1819 * it is quite easy to end up here even if the global (relocatable) 1820 * page pool has plenty of free pages but the noreloc pool is empty. 1821 * 1822 * When the noreloc pool is empty (or low), additional noreloc pages 1823 * are created by converting pages from the global page pool. This 1824 * process will stall during pcf accounting if the pcf bins are 1825 * already locked. Such is the case when a noreloc allocation is 1826 * looping here in page_create_get_something waiting for more noreloc 1827 * pages to appear. 1828 * 1829 * Short of adding a new field to the pcf bins to accurately track 1830 * the number of free noreloc pages, we instead do not grab the 1831 * pcgs_lock, do not set the pcf blocks and do not timeout when 1832 * allocating a noreloc page. This allows noreloc allocations to 1833 * loop without blocking global page pool allocations. 1834 * 1835 * NOTE: the behaviour of page_create_get_something has not changed 1836 * for the case of global page pool allocations. 1837 */ 1838 1839 flags &= ~PG_MATCH_COLOR; 1840 locked = 0; 1841 #if defined(__i386) || defined(__amd64) 1842 flags = page_create_update_flags_x86(flags); 1843 #endif 1844 1845 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 1846 1847 for (count = 0; kcage_on || count < MAX_PCGS; count++) { 1848 pp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 1849 flags, lgrp); 1850 if (pp == NULL) { 1851 pp = page_get_cachelist(vp, off, seg, vaddr, 1852 flags, lgrp); 1853 } 1854 if (pp == NULL) { 1855 /* 1856 * Serialize. Don't fight with other pcgs(). 1857 */ 1858 if (!locked && (!kcage_on || !(flags & PG_NORELOC))) { 1859 mutex_enter(&pcgs_lock); 1860 VM_STAT_ADD(pcgs_locked); 1861 locked = 1; 1862 p = pcf; 1863 for (i = 0; i < pcf_fanout; i++) { 1864 mutex_enter(&p->pcf_lock); 1865 ASSERT(p->pcf_block == 0); 1866 p->pcf_block = 1; 1867 p->pcf_reserve = p->pcf_count; 1868 p->pcf_count = 0; 1869 mutex_exit(&p->pcf_lock); 1870 p++; 1871 } 1872 freemem = 0; 1873 } 1874 1875 if (count) { 1876 /* 1877 * Since page_free() puts pages on 1878 * a list then accounts for it, we 1879 * just have to wait for page_free() 1880 * to unlock any page it was working 1881 * with. The page_lock()-page_reclaim() 1882 * path falls in the same boat. 1883 * 1884 * We don't need to check on the 1885 * PG_WAIT flag, we have already 1886 * accounted for the page we are 1887 * looking for in page_create_va(). 1888 * 1889 * We just wait a moment to let any 1890 * locked pages on the lists free up, 1891 * then continue around and try again. 1892 * 1893 * Will be awakened by set_freemem(). 1894 */ 1895 mutex_enter(&pcgs_wait_lock); 1896 cv_wait(&pcgs_cv, &pcgs_wait_lock); 1897 mutex_exit(&pcgs_wait_lock); 1898 } 1899 } else { 1900 #ifdef VM_STATS 1901 if (count >= PCGS_TRIES) { 1902 VM_STAT_ADD(pcgs_too_many); 1903 } else { 1904 VM_STAT_ADD(pcgs_counts[count]); 1905 } 1906 #endif 1907 if (locked) { 1908 pcgs_unblock(); 1909 mutex_exit(&pcgs_lock); 1910 } 1911 if (cagelocked) 1912 mutex_exit(&pcgs_cagelock); 1913 return (pp); 1914 } 1915 } 1916 /* 1917 * we go down holding the pcf locks. 1918 */ 1919 panic("no %spage found %d", 1920 ((flags & PG_NORELOC) ? "non-reloc " : ""), count); 1921 /*NOTREACHED*/ 1922 } 1923 1924 /* 1925 * Create enough pages for "bytes" worth of data starting at 1926 * "off" in "vp". 1927 * 1928 * Where flag must be one of: 1929 * 1930 * PG_EXCL: Exclusive create (fail if any page already 1931 * exists in the page cache) which does not 1932 * wait for memory to become available. 1933 * 1934 * PG_WAIT: Non-exclusive create which can wait for 1935 * memory to become available. 1936 * 1937 * PG_PHYSCONTIG: Allocate physically contiguous pages. 1938 * (Not Supported) 1939 * 1940 * A doubly linked list of pages is returned to the caller. Each page 1941 * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock) 1942 * lock. 1943 * 1944 * Unable to change the parameters to page_create() in a minor release, 1945 * we renamed page_create() to page_create_va(), changed all known calls 1946 * from page_create() to page_create_va(), and created this wrapper. 1947 * 1948 * Upon a major release, we should break compatibility by deleting this 1949 * wrapper, and replacing all the strings "page_create_va", with "page_create". 1950 * 1951 * NOTE: There is a copy of this interface as page_create_io() in 1952 * i86/vm/vm_machdep.c. Any bugs fixed here should be applied 1953 * there. 1954 */ 1955 page_t * 1956 page_create(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags) 1957 { 1958 caddr_t random_vaddr; 1959 struct seg kseg; 1960 1961 #ifdef DEBUG 1962 cmn_err(CE_WARN, "Using deprecated interface page_create: caller %p", 1963 (void *)caller()); 1964 #endif 1965 1966 random_vaddr = (caddr_t)(((uintptr_t)vp >> 7) ^ 1967 (uintptr_t)(off >> PAGESHIFT)); 1968 kseg.s_as = &kas; 1969 1970 return (page_create_va(vp, off, bytes, flags, &kseg, random_vaddr)); 1971 } 1972 1973 #ifdef DEBUG 1974 uint32_t pg_alloc_pgs_mtbf = 0; 1975 #endif 1976 1977 /* 1978 * Used for large page support. It will attempt to allocate 1979 * a large page(s) off the freelist. 1980 * 1981 * Returns non zero on failure. 1982 */ 1983 int 1984 page_alloc_pages(struct vnode *vp, struct seg *seg, caddr_t addr, 1985 page_t **basepp, page_t *ppa[], uint_t szc, int anypgsz, int pgflags) 1986 { 1987 pgcnt_t npgs, curnpgs, totpgs; 1988 size_t pgsz; 1989 page_t *pplist = NULL, *pp; 1990 int err = 0; 1991 lgrp_t *lgrp; 1992 1993 ASSERT(szc != 0 && szc <= (page_num_pagesizes() - 1)); 1994 ASSERT(pgflags == 0 || pgflags == PG_LOCAL); 1995 1996 /* 1997 * Check if system heavily prefers local large pages over remote 1998 * on systems with multiple lgroups. 1999 */ 2000 if (lpg_alloc_prefer == LPAP_LOCAL && nlgrps > 1) { 2001 pgflags = PG_LOCAL; 2002 } 2003 2004 VM_STAT_ADD(alloc_pages[0]); 2005 2006 #ifdef DEBUG 2007 if (pg_alloc_pgs_mtbf && !(gethrtime() % pg_alloc_pgs_mtbf)) { 2008 return (ENOMEM); 2009 } 2010 #endif 2011 2012 /* 2013 * One must be NULL but not both. 2014 * And one must be non NULL but not both. 2015 */ 2016 ASSERT(basepp != NULL || ppa != NULL); 2017 ASSERT(basepp == NULL || ppa == NULL); 2018 2019 #if defined(__i386) || defined(__amd64) 2020 while (page_chk_freelist(szc) == 0) { 2021 VM_STAT_ADD(alloc_pages[8]); 2022 if (anypgsz == 0 || --szc == 0) 2023 return (ENOMEM); 2024 } 2025 #endif 2026 2027 pgsz = page_get_pagesize(szc); 2028 totpgs = curnpgs = npgs = pgsz >> PAGESHIFT; 2029 2030 ASSERT(((uintptr_t)addr & (pgsz - 1)) == 0); 2031 2032 (void) page_create_wait(npgs, PG_WAIT); 2033 2034 while (npgs && szc) { 2035 lgrp = lgrp_mem_choose(seg, addr, pgsz); 2036 if (pgflags == PG_LOCAL) { 2037 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2038 pgflags, lgrp); 2039 if (pp == NULL) { 2040 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2041 0, lgrp); 2042 } 2043 } else { 2044 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2045 0, lgrp); 2046 } 2047 if (pp != NULL) { 2048 VM_STAT_ADD(alloc_pages[1]); 2049 page_list_concat(&pplist, &pp); 2050 ASSERT(npgs >= curnpgs); 2051 npgs -= curnpgs; 2052 } else if (anypgsz) { 2053 VM_STAT_ADD(alloc_pages[2]); 2054 szc--; 2055 pgsz = page_get_pagesize(szc); 2056 curnpgs = pgsz >> PAGESHIFT; 2057 } else { 2058 VM_STAT_ADD(alloc_pages[3]); 2059 ASSERT(npgs == totpgs); 2060 page_create_putback(npgs); 2061 return (ENOMEM); 2062 } 2063 } 2064 if (szc == 0) { 2065 VM_STAT_ADD(alloc_pages[4]); 2066 ASSERT(npgs != 0); 2067 page_create_putback(npgs); 2068 err = ENOMEM; 2069 } else if (basepp != NULL) { 2070 ASSERT(npgs == 0); 2071 ASSERT(ppa == NULL); 2072 *basepp = pplist; 2073 } 2074 2075 npgs = totpgs - npgs; 2076 pp = pplist; 2077 2078 /* 2079 * Clear the free and age bits. Also if we were passed in a ppa then 2080 * fill it in with all the constituent pages from the large page. But 2081 * if we failed to allocate all the pages just free what we got. 2082 */ 2083 while (npgs != 0) { 2084 ASSERT(PP_ISFREE(pp)); 2085 ASSERT(PP_ISAGED(pp)); 2086 if (ppa != NULL || err != 0) { 2087 if (err == 0) { 2088 VM_STAT_ADD(alloc_pages[5]); 2089 PP_CLRFREE(pp); 2090 PP_CLRAGED(pp); 2091 page_sub(&pplist, pp); 2092 *ppa++ = pp; 2093 npgs--; 2094 } else { 2095 VM_STAT_ADD(alloc_pages[6]); 2096 ASSERT(pp->p_szc != 0); 2097 curnpgs = page_get_pagecnt(pp->p_szc); 2098 page_list_break(&pp, &pplist, curnpgs); 2099 page_list_add_pages(pp, 0); 2100 page_create_putback(curnpgs); 2101 ASSERT(npgs >= curnpgs); 2102 npgs -= curnpgs; 2103 } 2104 pp = pplist; 2105 } else { 2106 VM_STAT_ADD(alloc_pages[7]); 2107 PP_CLRFREE(pp); 2108 PP_CLRAGED(pp); 2109 pp = pp->p_next; 2110 npgs--; 2111 } 2112 } 2113 return (err); 2114 } 2115 2116 /* 2117 * Get a single large page off of the freelists, and set it up for use. 2118 * Number of bytes requested must be a supported page size. 2119 * 2120 * Note that this call may fail even if there is sufficient 2121 * memory available or PG_WAIT is set, so the caller must 2122 * be willing to fallback on page_create_va(), block and retry, 2123 * or fail the requester. 2124 */ 2125 page_t * 2126 page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2127 struct seg *seg, caddr_t vaddr, void *arg) 2128 { 2129 pgcnt_t npages; 2130 page_t *pp; 2131 page_t *rootpp; 2132 lgrp_t *lgrp; 2133 lgrp_id_t *lgrpid = (lgrp_id_t *)arg; 2134 2135 ASSERT(vp != NULL); 2136 2137 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2138 PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == 0); 2139 /* but no others */ 2140 2141 ASSERT((flags & PG_EXCL) == PG_EXCL); 2142 2143 npages = btop(bytes); 2144 2145 if (!kcage_on || panicstr) { 2146 /* 2147 * Cage is OFF, or we are single threaded in 2148 * panic, so make everything a RELOC request. 2149 */ 2150 flags &= ~PG_NORELOC; 2151 } 2152 2153 /* 2154 * Make sure there's adequate physical memory available. 2155 * Note: PG_WAIT is ignored here. 2156 */ 2157 if (freemem <= throttlefree + npages) { 2158 VM_STAT_ADD(page_create_large_cnt[1]); 2159 return (NULL); 2160 } 2161 2162 /* 2163 * If cage is on, dampen draw from cage when available 2164 * cage space is low. 2165 */ 2166 if ((flags & (PG_NORELOC | PG_WAIT)) == (PG_NORELOC | PG_WAIT) && 2167 kcage_freemem < kcage_throttlefree + npages) { 2168 2169 /* 2170 * The cage is on, the caller wants PG_NORELOC 2171 * pages and available cage memory is very low. 2172 * Call kcage_create_throttle() to attempt to 2173 * control demand on the cage. 2174 */ 2175 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) { 2176 VM_STAT_ADD(page_create_large_cnt[2]); 2177 return (NULL); 2178 } 2179 } 2180 2181 if (!pcf_decrement_bucket(npages) && 2182 !pcf_decrement_multiple(NULL, npages, 1)) { 2183 VM_STAT_ADD(page_create_large_cnt[4]); 2184 return (NULL); 2185 } 2186 2187 /* 2188 * This is where this function behaves fundamentally differently 2189 * than page_create_va(); since we're intending to map the page 2190 * with a single TTE, we have to get it as a physically contiguous 2191 * hardware pagesize chunk. If we can't, we fail. 2192 */ 2193 if (lgrpid != NULL && *lgrpid >= 0 && *lgrpid <= lgrp_alloc_max && 2194 LGRP_EXISTS(lgrp_table[*lgrpid])) 2195 lgrp = lgrp_table[*lgrpid]; 2196 else 2197 lgrp = lgrp_mem_choose(seg, vaddr, bytes); 2198 2199 if ((rootpp = page_get_freelist(&kvp, off, seg, vaddr, 2200 bytes, flags & ~PG_MATCH_COLOR, lgrp)) == NULL) { 2201 page_create_putback(npages); 2202 VM_STAT_ADD(page_create_large_cnt[5]); 2203 return (NULL); 2204 } 2205 2206 /* 2207 * if we got the page with the wrong mtype give it back this is a 2208 * workaround for CR 6249718. When CR 6249718 is fixed we never get 2209 * inside "if" and the workaround becomes just a nop 2210 */ 2211 if (kcage_on && (flags & PG_NORELOC) && !PP_ISNORELOC(rootpp)) { 2212 page_list_add_pages(rootpp, 0); 2213 page_create_putback(npages); 2214 VM_STAT_ADD(page_create_large_cnt[6]); 2215 return (NULL); 2216 } 2217 2218 /* 2219 * If satisfying this request has left us with too little 2220 * memory, start the wheels turning to get some back. The 2221 * first clause of the test prevents waking up the pageout 2222 * daemon in situations where it would decide that there's 2223 * nothing to do. 2224 */ 2225 if (nscan < desscan && freemem < minfree) { 2226 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2227 "pageout_cv_signal:freemem %ld", freemem); 2228 cv_signal(&proc_pageout->p_cv); 2229 } 2230 2231 pp = rootpp; 2232 while (npages--) { 2233 ASSERT(PAGE_EXCL(pp)); 2234 ASSERT(pp->p_vnode == NULL); 2235 ASSERT(!hat_page_is_mapped(pp)); 2236 PP_CLRFREE(pp); 2237 PP_CLRAGED(pp); 2238 if (!page_hashin(pp, vp, off, NULL)) 2239 panic("page_create_large: hashin failed: page %p", 2240 (void *)pp); 2241 page_io_lock(pp); 2242 off += PAGESIZE; 2243 pp = pp->p_next; 2244 } 2245 2246 VM_STAT_ADD(page_create_large_cnt[0]); 2247 return (rootpp); 2248 } 2249 2250 page_t * 2251 page_create_va(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2252 struct seg *seg, caddr_t vaddr) 2253 { 2254 page_t *plist = NULL; 2255 pgcnt_t npages; 2256 pgcnt_t found_on_free = 0; 2257 pgcnt_t pages_req; 2258 page_t *npp = NULL; 2259 struct pcf *p; 2260 lgrp_t *lgrp; 2261 2262 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START, 2263 "page_create_start:vp %p off %llx bytes %lu flags %x", 2264 vp, off, bytes, flags); 2265 2266 ASSERT(bytes != 0 && vp != NULL); 2267 2268 if ((flags & PG_EXCL) == 0 && (flags & PG_WAIT) == 0) { 2269 panic("page_create: invalid flags"); 2270 /*NOTREACHED*/ 2271 } 2272 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2273 PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == 0); 2274 /* but no others */ 2275 2276 pages_req = npages = btopr(bytes); 2277 /* 2278 * Try to see whether request is too large to *ever* be 2279 * satisfied, in order to prevent deadlock. We arbitrarily 2280 * decide to limit maximum size requests to max_page_get. 2281 */ 2282 if (npages >= max_page_get) { 2283 if ((flags & PG_WAIT) == 0) { 2284 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_TOOBIG, 2285 "page_create_toobig:vp %p off %llx npages " 2286 "%lu max_page_get %lu", 2287 vp, off, npages, max_page_get); 2288 return (NULL); 2289 } else { 2290 cmn_err(CE_WARN, 2291 "Request for too much kernel memory " 2292 "(%lu bytes), will hang forever", bytes); 2293 for (;;) 2294 delay(1000000000); 2295 } 2296 } 2297 2298 if (!kcage_on || panicstr) { 2299 /* 2300 * Cage is OFF, or we are single threaded in 2301 * panic, so make everything a RELOC request. 2302 */ 2303 flags &= ~PG_NORELOC; 2304 } 2305 2306 if (freemem <= throttlefree + npages) 2307 if (!page_create_throttle(npages, flags)) 2308 return (NULL); 2309 2310 /* 2311 * If cage is on, dampen draw from cage when available 2312 * cage space is low. 2313 */ 2314 if ((flags & PG_NORELOC) && 2315 kcage_freemem < kcage_throttlefree + npages) { 2316 2317 /* 2318 * The cage is on, the caller wants PG_NORELOC 2319 * pages and available cage memory is very low. 2320 * Call kcage_create_throttle() to attempt to 2321 * control demand on the cage. 2322 */ 2323 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) 2324 return (NULL); 2325 } 2326 2327 VM_STAT_ADD(page_create_cnt[0]); 2328 2329 if (!pcf_decrement_bucket(npages)) { 2330 /* 2331 * Have to look harder. If npages is greater than 2332 * one, then we might have to coalesce the counters. 2333 * 2334 * Go wait. We come back having accounted 2335 * for the memory. 2336 */ 2337 VM_STAT_ADD(page_create_cnt[1]); 2338 if (!page_create_wait(npages, flags)) { 2339 VM_STAT_ADD(page_create_cnt[2]); 2340 return (NULL); 2341 } 2342 } 2343 2344 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS, 2345 "page_create_success:vp %p off %llx", vp, off); 2346 2347 /* 2348 * If satisfying this request has left us with too little 2349 * memory, start the wheels turning to get some back. The 2350 * first clause of the test prevents waking up the pageout 2351 * daemon in situations where it would decide that there's 2352 * nothing to do. 2353 */ 2354 if (nscan < desscan && freemem < minfree) { 2355 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2356 "pageout_cv_signal:freemem %ld", freemem); 2357 cv_signal(&proc_pageout->p_cv); 2358 } 2359 2360 /* 2361 * Loop around collecting the requested number of pages. 2362 * Most of the time, we have to `create' a new page. With 2363 * this in mind, pull the page off the free list before 2364 * getting the hash lock. This will minimize the hash 2365 * lock hold time, nesting, and the like. If it turns 2366 * out we don't need the page, we put it back at the end. 2367 */ 2368 while (npages--) { 2369 page_t *pp; 2370 kmutex_t *phm = NULL; 2371 ulong_t index; 2372 2373 index = PAGE_HASH_FUNC(vp, off); 2374 top: 2375 ASSERT(phm == NULL); 2376 ASSERT(index == PAGE_HASH_FUNC(vp, off)); 2377 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 2378 2379 if (npp == NULL) { 2380 /* 2381 * Try to get a page from the freelist (ie, 2382 * a page with no [vp, off] tag). If that 2383 * fails, use the cachelist. 2384 * 2385 * During the first attempt at both the free 2386 * and cache lists we try for the correct color. 2387 */ 2388 /* 2389 * XXXX-how do we deal with virtual indexed 2390 * caches and and colors? 2391 */ 2392 VM_STAT_ADD(page_create_cnt[4]); 2393 /* 2394 * Get lgroup to allocate next page of shared memory 2395 * from and use it to specify where to allocate 2396 * the physical memory 2397 */ 2398 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 2399 npp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 2400 flags | PG_MATCH_COLOR, lgrp); 2401 if (npp == NULL) { 2402 npp = page_get_cachelist(vp, off, seg, 2403 vaddr, flags | PG_MATCH_COLOR, lgrp); 2404 if (npp == NULL) { 2405 npp = page_create_get_something(vp, 2406 off, seg, vaddr, 2407 flags & ~PG_MATCH_COLOR); 2408 } 2409 2410 if (PP_ISAGED(npp) == 0) { 2411 /* 2412 * Since this page came from the 2413 * cachelist, we must destroy the 2414 * old vnode association. 2415 */ 2416 page_hashout(npp, NULL); 2417 } 2418 } 2419 } 2420 2421 /* 2422 * We own this page! 2423 */ 2424 ASSERT(PAGE_EXCL(npp)); 2425 ASSERT(npp->p_vnode == NULL); 2426 ASSERT(!hat_page_is_mapped(npp)); 2427 PP_CLRFREE(npp); 2428 PP_CLRAGED(npp); 2429 2430 /* 2431 * Here we have a page in our hot little mits and are 2432 * just waiting to stuff it on the appropriate lists. 2433 * Get the mutex and check to see if it really does 2434 * not exist. 2435 */ 2436 phm = PAGE_HASH_MUTEX(index); 2437 mutex_enter(phm); 2438 PAGE_HASH_SEARCH(index, pp, vp, off); 2439 if (pp == NULL) { 2440 VM_STAT_ADD(page_create_new); 2441 pp = npp; 2442 npp = NULL; 2443 if (!page_hashin(pp, vp, off, phm)) { 2444 /* 2445 * Since we hold the page hash mutex and 2446 * just searched for this page, page_hashin 2447 * had better not fail. If it does, that 2448 * means somethread did not follow the 2449 * page hash mutex rules. Panic now and 2450 * get it over with. As usual, go down 2451 * holding all the locks. 2452 */ 2453 ASSERT(MUTEX_HELD(phm)); 2454 panic("page_create: " 2455 "hashin failed %p %p %llx %p", 2456 (void *)pp, (void *)vp, off, (void *)phm); 2457 /*NOTREACHED*/ 2458 } 2459 ASSERT(MUTEX_HELD(phm)); 2460 mutex_exit(phm); 2461 phm = NULL; 2462 2463 /* 2464 * Hat layer locking need not be done to set 2465 * the following bits since the page is not hashed 2466 * and was on the free list (i.e., had no mappings). 2467 * 2468 * Set the reference bit to protect 2469 * against immediate pageout 2470 * 2471 * XXXmh modify freelist code to set reference 2472 * bit so we don't have to do it here. 2473 */ 2474 page_set_props(pp, P_REF); 2475 found_on_free++; 2476 } else { 2477 VM_STAT_ADD(page_create_exists); 2478 if (flags & PG_EXCL) { 2479 /* 2480 * Found an existing page, and the caller 2481 * wanted all new pages. Undo all of the work 2482 * we have done. 2483 */ 2484 mutex_exit(phm); 2485 phm = NULL; 2486 while (plist != NULL) { 2487 pp = plist; 2488 page_sub(&plist, pp); 2489 page_io_unlock(pp); 2490 /* large pages should not end up here */ 2491 ASSERT(pp->p_szc == 0); 2492 /*LINTED: constant in conditional ctx*/ 2493 VN_DISPOSE(pp, B_INVAL, 0, kcred); 2494 } 2495 VM_STAT_ADD(page_create_found_one); 2496 goto fail; 2497 } 2498 ASSERT(flags & PG_WAIT); 2499 if (!page_lock(pp, SE_EXCL, phm, P_NO_RECLAIM)) { 2500 /* 2501 * Start all over again if we blocked trying 2502 * to lock the page. 2503 */ 2504 mutex_exit(phm); 2505 VM_STAT_ADD(page_create_page_lock_failed); 2506 phm = NULL; 2507 goto top; 2508 } 2509 mutex_exit(phm); 2510 phm = NULL; 2511 2512 if (PP_ISFREE(pp)) { 2513 ASSERT(PP_ISAGED(pp) == 0); 2514 VM_STAT_ADD(pagecnt.pc_get_cache); 2515 page_list_sub(pp, PG_CACHE_LIST); 2516 PP_CLRFREE(pp); 2517 found_on_free++; 2518 } 2519 } 2520 2521 /* 2522 * Got a page! It is locked. Acquire the i/o 2523 * lock since we are going to use the p_next and 2524 * p_prev fields to link the requested pages together. 2525 */ 2526 page_io_lock(pp); 2527 page_add(&plist, pp); 2528 plist = plist->p_next; 2529 off += PAGESIZE; 2530 vaddr += PAGESIZE; 2531 } 2532 2533 ASSERT((flags & PG_EXCL) ? (found_on_free == pages_req) : 1); 2534 fail: 2535 if (npp != NULL) { 2536 /* 2537 * Did not need this page after all. 2538 * Put it back on the free list. 2539 */ 2540 VM_STAT_ADD(page_create_putbacks); 2541 PP_SETFREE(npp); 2542 PP_SETAGED(npp); 2543 npp->p_offset = (u_offset_t)-1; 2544 page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL); 2545 page_unlock(npp); 2546 2547 } 2548 2549 ASSERT(pages_req >= found_on_free); 2550 2551 { 2552 uint_t overshoot = (uint_t)(pages_req - found_on_free); 2553 2554 if (overshoot) { 2555 VM_STAT_ADD(page_create_overshoot); 2556 p = &pcf[PCF_INDEX()]; 2557 mutex_enter(&p->pcf_lock); 2558 if (p->pcf_block) { 2559 p->pcf_reserve += overshoot; 2560 } else { 2561 p->pcf_count += overshoot; 2562 if (p->pcf_wait) { 2563 mutex_enter(&new_freemem_lock); 2564 if (freemem_wait) { 2565 cv_signal(&freemem_cv); 2566 p->pcf_wait--; 2567 } else { 2568 p->pcf_wait = 0; 2569 } 2570 mutex_exit(&new_freemem_lock); 2571 } 2572 } 2573 mutex_exit(&p->pcf_lock); 2574 /* freemem is approximate, so this test OK */ 2575 if (!p->pcf_block) 2576 freemem += overshoot; 2577 } 2578 } 2579 2580 return (plist); 2581 } 2582 2583 /* 2584 * One or more constituent pages of this large page has been marked 2585 * toxic. Simply demote the large page to PAGESIZE pages and let 2586 * page_free() handle it. This routine should only be called by 2587 * large page free routines (page_free_pages() and page_destroy_pages(). 2588 * All pages are locked SE_EXCL and have already been marked free. 2589 */ 2590 static void 2591 page_free_toxic_pages(page_t *rootpp) 2592 { 2593 page_t *tpp; 2594 pgcnt_t i, pgcnt = page_get_pagecnt(rootpp->p_szc); 2595 uint_t szc = rootpp->p_szc; 2596 2597 for (i = 0, tpp = rootpp; i < pgcnt; i++, tpp = tpp->p_next) { 2598 ASSERT(tpp->p_szc == szc); 2599 ASSERT((PAGE_EXCL(tpp) && 2600 !page_iolock_assert(tpp)) || panicstr); 2601 tpp->p_szc = 0; 2602 } 2603 2604 while (rootpp != NULL) { 2605 tpp = rootpp; 2606 page_sub(&rootpp, tpp); 2607 ASSERT(PP_ISFREE(tpp)); 2608 PP_CLRFREE(tpp); 2609 page_free(tpp, 1); 2610 } 2611 } 2612 2613 /* 2614 * Put page on the "free" list. 2615 * The free list is really two lists maintained by 2616 * the PSM of whatever machine we happen to be on. 2617 */ 2618 void 2619 page_free(page_t *pp, int dontneed) 2620 { 2621 struct pcf *p; 2622 uint_t pcf_index; 2623 2624 ASSERT((PAGE_EXCL(pp) && 2625 !page_iolock_assert(pp)) || panicstr); 2626 2627 if (PP_ISFREE(pp)) { 2628 panic("page_free: page %p is free", (void *)pp); 2629 } 2630 2631 if (pp->p_szc != 0) { 2632 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 2633 PP_ISKAS(pp)) { 2634 panic("page_free: anon or kernel " 2635 "or no vnode large page %p", (void *)pp); 2636 } 2637 page_demote_vp_pages(pp); 2638 ASSERT(pp->p_szc == 0); 2639 } 2640 2641 /* 2642 * The page_struct_lock need not be acquired to examine these 2643 * fields since the page has an "exclusive" lock. 2644 */ 2645 if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 2646 pp->p_slckcnt != 0) { 2647 panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d " 2648 "slckcnt = %d", pp, page_pptonum(pp), pp->p_lckcnt, 2649 pp->p_cowcnt, pp->p_slckcnt); 2650 /*NOTREACHED*/ 2651 } 2652 2653 ASSERT(!hat_page_getshare(pp)); 2654 2655 PP_SETFREE(pp); 2656 ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) || 2657 !hat_ismod(pp)); 2658 page_clr_all_props(pp); 2659 ASSERT(!hat_page_getshare(pp)); 2660 2661 /* 2662 * Now we add the page to the head of the free list. 2663 * But if this page is associated with a paged vnode 2664 * then we adjust the head forward so that the page is 2665 * effectively at the end of the list. 2666 */ 2667 if (pp->p_vnode == NULL) { 2668 /* 2669 * Page has no identity, put it on the free list. 2670 */ 2671 PP_SETAGED(pp); 2672 pp->p_offset = (u_offset_t)-1; 2673 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 2674 VM_STAT_ADD(pagecnt.pc_free_free); 2675 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2676 "page_free_free:pp %p", pp); 2677 } else { 2678 PP_CLRAGED(pp); 2679 2680 if (!dontneed || nopageage) { 2681 /* move it to the tail of the list */ 2682 page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL); 2683 2684 VM_STAT_ADD(pagecnt.pc_free_cache); 2685 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL, 2686 "page_free_cache_tail:pp %p", pp); 2687 } else { 2688 page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD); 2689 2690 VM_STAT_ADD(pagecnt.pc_free_dontneed); 2691 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD, 2692 "page_free_cache_head:pp %p", pp); 2693 } 2694 } 2695 page_unlock(pp); 2696 2697 /* 2698 * Now do the `freemem' accounting. 2699 */ 2700 pcf_index = PCF_INDEX(); 2701 p = &pcf[pcf_index]; 2702 2703 mutex_enter(&p->pcf_lock); 2704 if (p->pcf_block) { 2705 p->pcf_reserve += 1; 2706 } else { 2707 p->pcf_count += 1; 2708 if (p->pcf_wait) { 2709 mutex_enter(&new_freemem_lock); 2710 /* 2711 * Check to see if some other thread 2712 * is actually waiting. Another bucket 2713 * may have woken it up by now. If there 2714 * are no waiters, then set our pcf_wait 2715 * count to zero to avoid coming in here 2716 * next time. Also, since only one page 2717 * was put on the free list, just wake 2718 * up one waiter. 2719 */ 2720 if (freemem_wait) { 2721 cv_signal(&freemem_cv); 2722 p->pcf_wait--; 2723 } else { 2724 p->pcf_wait = 0; 2725 } 2726 mutex_exit(&new_freemem_lock); 2727 } 2728 } 2729 mutex_exit(&p->pcf_lock); 2730 2731 /* freemem is approximate, so this test OK */ 2732 if (!p->pcf_block) 2733 freemem += 1; 2734 } 2735 2736 /* 2737 * Put page on the "free" list during intial startup. 2738 * This happens during initial single threaded execution. 2739 */ 2740 void 2741 page_free_at_startup(page_t *pp) 2742 { 2743 struct pcf *p; 2744 uint_t pcf_index; 2745 2746 page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT); 2747 VM_STAT_ADD(pagecnt.pc_free_free); 2748 2749 /* 2750 * Now do the `freemem' accounting. 2751 */ 2752 pcf_index = PCF_INDEX(); 2753 p = &pcf[pcf_index]; 2754 2755 ASSERT(p->pcf_block == 0); 2756 ASSERT(p->pcf_wait == 0); 2757 p->pcf_count += 1; 2758 2759 /* freemem is approximate, so this is OK */ 2760 freemem += 1; 2761 } 2762 2763 void 2764 page_free_pages(page_t *pp) 2765 { 2766 page_t *tpp, *rootpp = NULL; 2767 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 2768 pgcnt_t i; 2769 uint_t szc = pp->p_szc; 2770 2771 VM_STAT_ADD(pagecnt.pc_free_pages); 2772 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2773 "page_free_free:pp %p", pp); 2774 2775 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 2776 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 2777 panic("page_free_pages: not root page %p", (void *)pp); 2778 /*NOTREACHED*/ 2779 } 2780 2781 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 2782 ASSERT((PAGE_EXCL(tpp) && 2783 !page_iolock_assert(tpp)) || panicstr); 2784 if (PP_ISFREE(tpp)) { 2785 panic("page_free_pages: page %p is free", (void *)tpp); 2786 /*NOTREACHED*/ 2787 } 2788 if (hat_page_is_mapped(tpp) || tpp->p_lckcnt != 0 || 2789 tpp->p_cowcnt != 0 || tpp->p_slckcnt != 0) { 2790 panic("page_free_pages %p", (void *)tpp); 2791 /*NOTREACHED*/ 2792 } 2793 2794 ASSERT(!hat_page_getshare(tpp)); 2795 ASSERT(tpp->p_vnode == NULL); 2796 ASSERT(tpp->p_szc == szc); 2797 2798 PP_SETFREE(tpp); 2799 page_clr_all_props(tpp); 2800 PP_SETAGED(tpp); 2801 tpp->p_offset = (u_offset_t)-1; 2802 ASSERT(tpp->p_next == tpp); 2803 ASSERT(tpp->p_prev == tpp); 2804 page_list_concat(&rootpp, &tpp); 2805 } 2806 ASSERT(rootpp == pp); 2807 2808 page_list_add_pages(rootpp, 0); 2809 page_create_putback(pgcnt); 2810 } 2811 2812 int free_pages = 1; 2813 2814 /* 2815 * This routine attempts to return pages to the cachelist via page_release(). 2816 * It does not *have* to be successful in all cases, since the pageout scanner 2817 * will catch any pages it misses. It does need to be fast and not introduce 2818 * too much overhead. 2819 * 2820 * If a page isn't found on the unlocked sweep of the page_hash bucket, we 2821 * don't lock and retry. This is ok, since the page scanner will eventually 2822 * find any page we miss in free_vp_pages(). 2823 */ 2824 void 2825 free_vp_pages(vnode_t *vp, u_offset_t off, size_t len) 2826 { 2827 page_t *pp; 2828 u_offset_t eoff; 2829 extern int swap_in_range(vnode_t *, u_offset_t, size_t); 2830 2831 eoff = off + len; 2832 2833 if (free_pages == 0) 2834 return; 2835 if (swap_in_range(vp, off, len)) 2836 return; 2837 2838 for (; off < eoff; off += PAGESIZE) { 2839 2840 /* 2841 * find the page using a fast, but inexact search. It'll be OK 2842 * if a few pages slip through the cracks here. 2843 */ 2844 pp = page_exists(vp, off); 2845 2846 /* 2847 * If we didn't find the page (it may not exist), the page 2848 * is free, looks still in use (shared), or we can't lock it, 2849 * just give up. 2850 */ 2851 if (pp == NULL || 2852 PP_ISFREE(pp) || 2853 page_share_cnt(pp) > 0 || 2854 !page_trylock(pp, SE_EXCL)) 2855 continue; 2856 2857 /* 2858 * Once we have locked pp, verify that it's still the 2859 * correct page and not already free 2860 */ 2861 ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL)); 2862 if (pp->p_vnode != vp || pp->p_offset != off || PP_ISFREE(pp)) { 2863 page_unlock(pp); 2864 continue; 2865 } 2866 2867 /* 2868 * try to release the page... 2869 */ 2870 (void) page_release(pp, 1); 2871 } 2872 } 2873 2874 /* 2875 * Reclaim the given page from the free list. 2876 * If pp is part of a large pages, only the given constituent page is reclaimed 2877 * and the large page it belonged to will be demoted. This can only happen 2878 * if the page is not on the cachelist. 2879 * 2880 * Returns 1 on success or 0 on failure. 2881 * 2882 * The page is unlocked if it can't be reclaimed (when freemem == 0). 2883 * If `lock' is non-null, it will be dropped and re-acquired if 2884 * the routine must wait while freemem is 0. 2885 * 2886 * As it turns out, boot_getpages() does this. It picks a page, 2887 * based on where OBP mapped in some address, gets its pfn, searches 2888 * the memsegs, locks the page, then pulls it off the free list! 2889 */ 2890 int 2891 page_reclaim(page_t *pp, kmutex_t *lock) 2892 { 2893 struct pcf *p; 2894 struct cpu *cpup; 2895 int enough; 2896 uint_t i; 2897 2898 ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1); 2899 ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp)); 2900 2901 /* 2902 * If `freemem' is 0, we cannot reclaim this page from the 2903 * freelist, so release every lock we might hold: the page, 2904 * and the `lock' before blocking. 2905 * 2906 * The only way `freemem' can become 0 while there are pages 2907 * marked free (have their p->p_free bit set) is when the 2908 * system is low on memory and doing a page_create(). In 2909 * order to guarantee that once page_create() starts acquiring 2910 * pages it will be able to get all that it needs since `freemem' 2911 * was decreased by the requested amount. So, we need to release 2912 * this page, and let page_create() have it. 2913 * 2914 * Since `freemem' being zero is not supposed to happen, just 2915 * use the usual hash stuff as a starting point. If that bucket 2916 * is empty, then assume the worst, and start at the beginning 2917 * of the pcf array. If we always start at the beginning 2918 * when acquiring more than one pcf lock, there won't be any 2919 * deadlock problems. 2920 */ 2921 2922 /* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */ 2923 2924 if (freemem <= throttlefree && !page_create_throttle(1l, 0)) { 2925 pcf_acquire_all(); 2926 goto page_reclaim_nomem; 2927 } 2928 2929 enough = pcf_decrement_bucket(1); 2930 2931 if (!enough) { 2932 VM_STAT_ADD(page_reclaim_zero); 2933 /* 2934 * Check again. Its possible that some other thread 2935 * could have been right behind us, and added one 2936 * to a list somewhere. Acquire each of the pcf locks 2937 * until we find a page. 2938 */ 2939 p = pcf; 2940 for (i = 0; i < pcf_fanout; i++) { 2941 mutex_enter(&p->pcf_lock); 2942 if (p->pcf_count >= 1) { 2943 p->pcf_count -= 1; 2944 enough = 1; 2945 break; 2946 } 2947 p++; 2948 } 2949 2950 if (!enough) { 2951 page_reclaim_nomem: 2952 /* 2953 * We really can't have page `pp'. 2954 * Time for the no-memory dance with 2955 * page_free(). This is just like 2956 * page_create_wait(). Plus the added 2957 * attraction of releasing whatever mutex 2958 * we held when we were called with in `lock'. 2959 * Page_unlock() will wakeup any thread 2960 * waiting around for this page. 2961 */ 2962 if (lock) { 2963 VM_STAT_ADD(page_reclaim_zero_locked); 2964 mutex_exit(lock); 2965 } 2966 page_unlock(pp); 2967 2968 /* 2969 * get this before we drop all the pcf locks. 2970 */ 2971 mutex_enter(&new_freemem_lock); 2972 2973 p = pcf; 2974 for (i = 0; i < pcf_fanout; i++) { 2975 p->pcf_wait++; 2976 mutex_exit(&p->pcf_lock); 2977 p++; 2978 } 2979 2980 freemem_wait++; 2981 cv_wait(&freemem_cv, &new_freemem_lock); 2982 freemem_wait--; 2983 2984 mutex_exit(&new_freemem_lock); 2985 2986 if (lock) { 2987 mutex_enter(lock); 2988 } 2989 return (0); 2990 } 2991 2992 /* 2993 * The pcf accounting has been done, 2994 * though none of the pcf_wait flags have been set, 2995 * drop the locks and continue on. 2996 */ 2997 while (p >= pcf) { 2998 mutex_exit(&p->pcf_lock); 2999 p--; 3000 } 3001 } 3002 3003 /* 3004 * freemem is not protected by any lock. Thus, we cannot 3005 * have any assertion containing freemem here. 3006 */ 3007 freemem -= 1; 3008 3009 VM_STAT_ADD(pagecnt.pc_reclaim); 3010 3011 /* 3012 * page_list_sub will handle the case where pp is a large page. 3013 * It's possible that the page was promoted while on the freelist 3014 */ 3015 if (PP_ISAGED(pp)) { 3016 page_list_sub(pp, PG_FREE_LIST); 3017 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_FREE, 3018 "page_reclaim_free:pp %p", pp); 3019 } else { 3020 page_list_sub(pp, PG_CACHE_LIST); 3021 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_CACHE, 3022 "page_reclaim_cache:pp %p", pp); 3023 } 3024 3025 /* 3026 * clear the p_free & p_age bits since this page is no longer 3027 * on the free list. Notice that there was a brief time where 3028 * a page is marked as free, but is not on the list. 3029 * 3030 * Set the reference bit to protect against immediate pageout. 3031 */ 3032 PP_CLRFREE(pp); 3033 PP_CLRAGED(pp); 3034 page_set_props(pp, P_REF); 3035 3036 CPU_STATS_ENTER_K(); 3037 cpup = CPU; /* get cpup now that CPU cannot change */ 3038 CPU_STATS_ADDQ(cpup, vm, pgrec, 1); 3039 CPU_STATS_ADDQ(cpup, vm, pgfrec, 1); 3040 CPU_STATS_EXIT_K(); 3041 ASSERT(pp->p_szc == 0); 3042 3043 return (1); 3044 } 3045 3046 /* 3047 * Destroy identity of the page and put it back on 3048 * the page free list. Assumes that the caller has 3049 * acquired the "exclusive" lock on the page. 3050 */ 3051 void 3052 page_destroy(page_t *pp, int dontfree) 3053 { 3054 ASSERT((PAGE_EXCL(pp) && 3055 !page_iolock_assert(pp)) || panicstr); 3056 ASSERT(pp->p_slckcnt == 0 || panicstr); 3057 3058 if (pp->p_szc != 0) { 3059 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 3060 PP_ISKAS(pp)) { 3061 panic("page_destroy: anon or kernel or no vnode " 3062 "large page %p", (void *)pp); 3063 } 3064 page_demote_vp_pages(pp); 3065 ASSERT(pp->p_szc == 0); 3066 } 3067 3068 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy:pp %p", pp); 3069 3070 /* 3071 * Unload translations, if any, then hash out the 3072 * page to erase its identity. 3073 */ 3074 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3075 page_hashout(pp, NULL); 3076 3077 if (!dontfree) { 3078 /* 3079 * Acquire the "freemem_lock" for availrmem. 3080 * The page_struct_lock need not be acquired for lckcnt 3081 * and cowcnt since the page has an "exclusive" lock. 3082 */ 3083 if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) { 3084 mutex_enter(&freemem_lock); 3085 if (pp->p_lckcnt != 0) { 3086 availrmem++; 3087 pp->p_lckcnt = 0; 3088 } 3089 if (pp->p_cowcnt != 0) { 3090 availrmem += pp->p_cowcnt; 3091 pp->p_cowcnt = 0; 3092 } 3093 mutex_exit(&freemem_lock); 3094 } 3095 /* 3096 * Put the page on the "free" list. 3097 */ 3098 page_free(pp, 0); 3099 } 3100 } 3101 3102 void 3103 page_destroy_pages(page_t *pp) 3104 { 3105 3106 page_t *tpp, *rootpp = NULL; 3107 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 3108 pgcnt_t i, pglcks = 0; 3109 uint_t szc = pp->p_szc; 3110 3111 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 3112 3113 VM_STAT_ADD(pagecnt.pc_destroy_pages); 3114 3115 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy_pages:pp %p", pp); 3116 3117 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 3118 panic("page_destroy_pages: not root page %p", (void *)pp); 3119 /*NOTREACHED*/ 3120 } 3121 3122 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 3123 ASSERT((PAGE_EXCL(tpp) && 3124 !page_iolock_assert(tpp)) || panicstr); 3125 ASSERT(tpp->p_slckcnt == 0 || panicstr); 3126 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 3127 page_hashout(tpp, NULL); 3128 ASSERT(tpp->p_offset == (u_offset_t)-1); 3129 if (tpp->p_lckcnt != 0) { 3130 pglcks++; 3131 tpp->p_lckcnt = 0; 3132 } else if (tpp->p_cowcnt != 0) { 3133 pglcks += tpp->p_cowcnt; 3134 tpp->p_cowcnt = 0; 3135 } 3136 ASSERT(!hat_page_getshare(tpp)); 3137 ASSERT(tpp->p_vnode == NULL); 3138 ASSERT(tpp->p_szc == szc); 3139 3140 PP_SETFREE(tpp); 3141 page_clr_all_props(tpp); 3142 PP_SETAGED(tpp); 3143 ASSERT(tpp->p_next == tpp); 3144 ASSERT(tpp->p_prev == tpp); 3145 page_list_concat(&rootpp, &tpp); 3146 } 3147 3148 ASSERT(rootpp == pp); 3149 if (pglcks != 0) { 3150 mutex_enter(&freemem_lock); 3151 availrmem += pglcks; 3152 mutex_exit(&freemem_lock); 3153 } 3154 3155 page_list_add_pages(rootpp, 0); 3156 page_create_putback(pgcnt); 3157 } 3158 3159 /* 3160 * Similar to page_destroy(), but destroys pages which are 3161 * locked and known to be on the page free list. Since 3162 * the page is known to be free and locked, no one can access 3163 * it. 3164 * 3165 * Also, the number of free pages does not change. 3166 */ 3167 void 3168 page_destroy_free(page_t *pp) 3169 { 3170 ASSERT(PAGE_EXCL(pp)); 3171 ASSERT(PP_ISFREE(pp)); 3172 ASSERT(pp->p_vnode); 3173 ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0); 3174 ASSERT(!hat_page_is_mapped(pp)); 3175 ASSERT(PP_ISAGED(pp) == 0); 3176 ASSERT(pp->p_szc == 0); 3177 3178 VM_STAT_ADD(pagecnt.pc_destroy_free); 3179 page_list_sub(pp, PG_CACHE_LIST); 3180 3181 page_hashout(pp, NULL); 3182 ASSERT(pp->p_vnode == NULL); 3183 ASSERT(pp->p_offset == (u_offset_t)-1); 3184 ASSERT(pp->p_hash == NULL); 3185 3186 PP_SETAGED(pp); 3187 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 3188 page_unlock(pp); 3189 3190 mutex_enter(&new_freemem_lock); 3191 if (freemem_wait) { 3192 cv_signal(&freemem_cv); 3193 } 3194 mutex_exit(&new_freemem_lock); 3195 } 3196 3197 /* 3198 * Rename the page "opp" to have an identity specified 3199 * by [vp, off]. If a page already exists with this name 3200 * it is locked and destroyed. Note that the page's 3201 * translations are not unloaded during the rename. 3202 * 3203 * This routine is used by the anon layer to "steal" the 3204 * original page and is not unlike destroying a page and 3205 * creating a new page using the same page frame. 3206 * 3207 * XXX -- Could deadlock if caller 1 tries to rename A to B while 3208 * caller 2 tries to rename B to A. 3209 */ 3210 void 3211 page_rename(page_t *opp, vnode_t *vp, u_offset_t off) 3212 { 3213 page_t *pp; 3214 int olckcnt = 0; 3215 int ocowcnt = 0; 3216 kmutex_t *phm; 3217 ulong_t index; 3218 3219 ASSERT(PAGE_EXCL(opp) && !page_iolock_assert(opp)); 3220 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3221 ASSERT(PP_ISFREE(opp) == 0); 3222 3223 VM_STAT_ADD(page_rename_count); 3224 3225 TRACE_3(TR_FAC_VM, TR_PAGE_RENAME, 3226 "page rename:pp %p vp %p off %llx", opp, vp, off); 3227 3228 /* 3229 * CacheFS may call page_rename for a large NFS page 3230 * when both CacheFS and NFS mount points are used 3231 * by applications. Demote this large page before 3232 * renaming it, to ensure that there are no "partial" 3233 * large pages left lying around. 3234 */ 3235 if (opp->p_szc != 0) { 3236 vnode_t *ovp = opp->p_vnode; 3237 ASSERT(ovp != NULL); 3238 ASSERT(!IS_SWAPFSVP(ovp)); 3239 ASSERT(!VN_ISKAS(ovp)); 3240 page_demote_vp_pages(opp); 3241 ASSERT(opp->p_szc == 0); 3242 } 3243 3244 page_hashout(opp, NULL); 3245 PP_CLRAGED(opp); 3246 3247 /* 3248 * Acquire the appropriate page hash lock, since 3249 * we're going to rename the page. 3250 */ 3251 index = PAGE_HASH_FUNC(vp, off); 3252 phm = PAGE_HASH_MUTEX(index); 3253 mutex_enter(phm); 3254 top: 3255 /* 3256 * Look for an existing page with this name and destroy it if found. 3257 * By holding the page hash lock all the way to the page_hashin() 3258 * call, we are assured that no page can be created with this 3259 * identity. In the case when the phm lock is dropped to undo any 3260 * hat layer mappings, the existing page is held with an "exclusive" 3261 * lock, again preventing another page from being created with 3262 * this identity. 3263 */ 3264 PAGE_HASH_SEARCH(index, pp, vp, off); 3265 if (pp != NULL) { 3266 VM_STAT_ADD(page_rename_exists); 3267 3268 /* 3269 * As it turns out, this is one of only two places where 3270 * page_lock() needs to hold the passed in lock in the 3271 * successful case. In all of the others, the lock could 3272 * be dropped as soon as the attempt is made to lock 3273 * the page. It is tempting to add yet another arguement, 3274 * PL_KEEP or PL_DROP, to let page_lock know what to do. 3275 */ 3276 if (!page_lock(pp, SE_EXCL, phm, P_RECLAIM)) { 3277 /* 3278 * Went to sleep because the page could not 3279 * be locked. We were woken up when the page 3280 * was unlocked, or when the page was destroyed. 3281 * In either case, `phm' was dropped while we 3282 * slept. Hence we should not just roar through 3283 * this loop. 3284 */ 3285 goto top; 3286 } 3287 3288 /* 3289 * If an existing page is a large page, then demote 3290 * it to ensure that no "partial" large pages are 3291 * "created" after page_rename. An existing page 3292 * can be a CacheFS page, and can't belong to swapfs. 3293 */ 3294 if (hat_page_is_mapped(pp)) { 3295 /* 3296 * Unload translations. Since we hold the 3297 * exclusive lock on this page, the page 3298 * can not be changed while we drop phm. 3299 * This is also not a lock protocol violation, 3300 * but rather the proper way to do things. 3301 */ 3302 mutex_exit(phm); 3303 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3304 if (pp->p_szc != 0) { 3305 ASSERT(!IS_SWAPFSVP(vp)); 3306 ASSERT(!VN_ISKAS(vp)); 3307 page_demote_vp_pages(pp); 3308 ASSERT(pp->p_szc == 0); 3309 } 3310 mutex_enter(phm); 3311 } else if (pp->p_szc != 0) { 3312 ASSERT(!IS_SWAPFSVP(vp)); 3313 ASSERT(!VN_ISKAS(vp)); 3314 mutex_exit(phm); 3315 page_demote_vp_pages(pp); 3316 ASSERT(pp->p_szc == 0); 3317 mutex_enter(phm); 3318 } 3319 page_hashout(pp, phm); 3320 } 3321 /* 3322 * Hash in the page with the new identity. 3323 */ 3324 if (!page_hashin(opp, vp, off, phm)) { 3325 /* 3326 * We were holding phm while we searched for [vp, off] 3327 * and only dropped phm if we found and locked a page. 3328 * If we can't create this page now, then some thing 3329 * is really broken. 3330 */ 3331 panic("page_rename: Can't hash in page: %p", (void *)pp); 3332 /*NOTREACHED*/ 3333 } 3334 3335 ASSERT(MUTEX_HELD(phm)); 3336 mutex_exit(phm); 3337 3338 /* 3339 * Now that we have dropped phm, lets get around to finishing up 3340 * with pp. 3341 */ 3342 if (pp != NULL) { 3343 ASSERT(!hat_page_is_mapped(pp)); 3344 /* for now large pages should not end up here */ 3345 ASSERT(pp->p_szc == 0); 3346 /* 3347 * Save the locks for transfer to the new page and then 3348 * clear them so page_free doesn't think they're important. 3349 * The page_struct_lock need not be acquired for lckcnt and 3350 * cowcnt since the page has an "exclusive" lock. 3351 */ 3352 olckcnt = pp->p_lckcnt; 3353 ocowcnt = pp->p_cowcnt; 3354 pp->p_lckcnt = pp->p_cowcnt = 0; 3355 3356 /* 3357 * Put the page on the "free" list after we drop 3358 * the lock. The less work under the lock the better. 3359 */ 3360 /*LINTED: constant in conditional context*/ 3361 VN_DISPOSE(pp, B_FREE, 0, kcred); 3362 } 3363 3364 /* 3365 * Transfer the lock count from the old page (if any). 3366 * The page_struct_lock need not be acquired for lckcnt and 3367 * cowcnt since the page has an "exclusive" lock. 3368 */ 3369 opp->p_lckcnt += olckcnt; 3370 opp->p_cowcnt += ocowcnt; 3371 } 3372 3373 /* 3374 * low level routine to add page `pp' to the hash and vp chains for [vp, offset] 3375 * 3376 * Pages are normally inserted at the start of a vnode's v_pages list. 3377 * If the vnode is VMODSORT and the page is modified, it goes at the end. 3378 * This can happen when a modified page is relocated for DR. 3379 * 3380 * Returns 1 on success and 0 on failure. 3381 */ 3382 static int 3383 page_do_hashin(page_t *pp, vnode_t *vp, u_offset_t offset) 3384 { 3385 page_t **listp; 3386 page_t *tp; 3387 ulong_t index; 3388 3389 ASSERT(PAGE_EXCL(pp)); 3390 ASSERT(vp != NULL); 3391 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3392 3393 /* 3394 * Be sure to set these up before the page is inserted on the hash 3395 * list. As soon as the page is placed on the list some other 3396 * thread might get confused and wonder how this page could 3397 * possibly hash to this list. 3398 */ 3399 pp->p_vnode = vp; 3400 pp->p_offset = offset; 3401 3402 /* 3403 * record if this page is on a swap vnode 3404 */ 3405 if ((vp->v_flag & VISSWAP) != 0) 3406 PP_SETSWAP(pp); 3407 3408 index = PAGE_HASH_FUNC(vp, offset); 3409 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(index))); 3410 listp = &page_hash[index]; 3411 3412 /* 3413 * If this page is already hashed in, fail this attempt to add it. 3414 */ 3415 for (tp = *listp; tp != NULL; tp = tp->p_hash) { 3416 if (tp->p_vnode == vp && tp->p_offset == offset) { 3417 pp->p_vnode = NULL; 3418 pp->p_offset = (u_offset_t)(-1); 3419 return (0); 3420 } 3421 } 3422 pp->p_hash = *listp; 3423 *listp = pp; 3424 3425 /* 3426 * Add the page to the vnode's list of pages 3427 */ 3428 if (vp->v_pages != NULL && IS_VMODSORT(vp) && hat_ismod(pp)) 3429 listp = &vp->v_pages->p_vpprev->p_vpnext; 3430 else 3431 listp = &vp->v_pages; 3432 3433 page_vpadd(listp, pp); 3434 3435 return (1); 3436 } 3437 3438 /* 3439 * Add page `pp' to both the hash and vp chains for [vp, offset]. 3440 * 3441 * Returns 1 on success and 0 on failure. 3442 * If hold is passed in, it is not dropped. 3443 */ 3444 int 3445 page_hashin(page_t *pp, vnode_t *vp, u_offset_t offset, kmutex_t *hold) 3446 { 3447 kmutex_t *phm = NULL; 3448 kmutex_t *vphm; 3449 int rc; 3450 3451 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3452 3453 TRACE_3(TR_FAC_VM, TR_PAGE_HASHIN, 3454 "page_hashin:pp %p vp %p offset %llx", 3455 pp, vp, offset); 3456 3457 VM_STAT_ADD(hashin_count); 3458 3459 if (hold != NULL) 3460 phm = hold; 3461 else { 3462 VM_STAT_ADD(hashin_not_held); 3463 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, offset)); 3464 mutex_enter(phm); 3465 } 3466 3467 vphm = page_vnode_mutex(vp); 3468 mutex_enter(vphm); 3469 rc = page_do_hashin(pp, vp, offset); 3470 mutex_exit(vphm); 3471 if (hold == NULL) 3472 mutex_exit(phm); 3473 if (rc == 0) 3474 VM_STAT_ADD(hashin_already); 3475 return (rc); 3476 } 3477 3478 /* 3479 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3480 * All mutexes must be held 3481 */ 3482 static void 3483 page_do_hashout(page_t *pp) 3484 { 3485 page_t **hpp; 3486 page_t *hp; 3487 vnode_t *vp = pp->p_vnode; 3488 3489 ASSERT(vp != NULL); 3490 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3491 3492 /* 3493 * First, take pp off of its hash chain. 3494 */ 3495 hpp = &page_hash[PAGE_HASH_FUNC(vp, pp->p_offset)]; 3496 3497 for (;;) { 3498 hp = *hpp; 3499 if (hp == pp) 3500 break; 3501 if (hp == NULL) { 3502 panic("page_do_hashout"); 3503 /*NOTREACHED*/ 3504 } 3505 hpp = &hp->p_hash; 3506 } 3507 *hpp = pp->p_hash; 3508 3509 /* 3510 * Now remove it from its associated vnode. 3511 */ 3512 if (vp->v_pages) 3513 page_vpsub(&vp->v_pages, pp); 3514 3515 pp->p_hash = NULL; 3516 page_clr_all_props(pp); 3517 PP_CLRSWAP(pp); 3518 pp->p_vnode = NULL; 3519 pp->p_offset = (u_offset_t)-1; 3520 } 3521 3522 /* 3523 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3524 * 3525 * When `phm' is non-NULL it contains the address of the mutex protecting the 3526 * hash list pp is on. It is not dropped. 3527 */ 3528 void 3529 page_hashout(page_t *pp, kmutex_t *phm) 3530 { 3531 vnode_t *vp; 3532 ulong_t index; 3533 kmutex_t *nphm; 3534 kmutex_t *vphm; 3535 kmutex_t *sep; 3536 3537 ASSERT(phm != NULL ? MUTEX_HELD(phm) : 1); 3538 ASSERT(pp->p_vnode != NULL); 3539 ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr); 3540 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp->p_vnode))); 3541 3542 vp = pp->p_vnode; 3543 3544 TRACE_2(TR_FAC_VM, TR_PAGE_HASHOUT, 3545 "page_hashout:pp %p vp %p", pp, vp); 3546 3547 /* Kernel probe */ 3548 TNF_PROBE_2(page_unmap, "vm pagefault", /* CSTYLED */, 3549 tnf_opaque, vnode, vp, 3550 tnf_offset, offset, pp->p_offset); 3551 3552 /* 3553 * 3554 */ 3555 VM_STAT_ADD(hashout_count); 3556 index = PAGE_HASH_FUNC(vp, pp->p_offset); 3557 if (phm == NULL) { 3558 VM_STAT_ADD(hashout_not_held); 3559 nphm = PAGE_HASH_MUTEX(index); 3560 mutex_enter(nphm); 3561 } 3562 ASSERT(phm ? phm == PAGE_HASH_MUTEX(index) : 1); 3563 3564 3565 /* 3566 * grab page vnode mutex and remove it... 3567 */ 3568 vphm = page_vnode_mutex(vp); 3569 mutex_enter(vphm); 3570 3571 page_do_hashout(pp); 3572 3573 mutex_exit(vphm); 3574 if (phm == NULL) 3575 mutex_exit(nphm); 3576 3577 /* 3578 * Wake up processes waiting for this page. The page's 3579 * identity has been changed, and is probably not the 3580 * desired page any longer. 3581 */ 3582 sep = page_se_mutex(pp); 3583 mutex_enter(sep); 3584 pp->p_selock &= ~SE_EWANTED; 3585 if (CV_HAS_WAITERS(&pp->p_cv)) 3586 cv_broadcast(&pp->p_cv); 3587 mutex_exit(sep); 3588 } 3589 3590 /* 3591 * Add the page to the front of a linked list of pages 3592 * using the p_next & p_prev pointers for the list. 3593 * The caller is responsible for protecting the list pointers. 3594 */ 3595 void 3596 page_add(page_t **ppp, page_t *pp) 3597 { 3598 ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3599 3600 page_add_common(ppp, pp); 3601 } 3602 3603 3604 3605 /* 3606 * Common code for page_add() and mach_page_add() 3607 */ 3608 void 3609 page_add_common(page_t **ppp, page_t *pp) 3610 { 3611 if (*ppp == NULL) { 3612 pp->p_next = pp->p_prev = pp; 3613 } else { 3614 pp->p_next = *ppp; 3615 pp->p_prev = (*ppp)->p_prev; 3616 (*ppp)->p_prev = pp; 3617 pp->p_prev->p_next = pp; 3618 } 3619 *ppp = pp; 3620 } 3621 3622 3623 /* 3624 * Remove this page from a linked list of pages 3625 * using the p_next & p_prev pointers for the list. 3626 * 3627 * The caller is responsible for protecting the list pointers. 3628 */ 3629 void 3630 page_sub(page_t **ppp, page_t *pp) 3631 { 3632 ASSERT((PP_ISFREE(pp)) ? 1 : 3633 (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3634 3635 if (*ppp == NULL || pp == NULL) { 3636 panic("page_sub: bad arg(s): pp %p, *ppp %p", 3637 (void *)pp, (void *)(*ppp)); 3638 /*NOTREACHED*/ 3639 } 3640 3641 page_sub_common(ppp, pp); 3642 } 3643 3644 3645 /* 3646 * Common code for page_sub() and mach_page_sub() 3647 */ 3648 void 3649 page_sub_common(page_t **ppp, page_t *pp) 3650 { 3651 if (*ppp == pp) 3652 *ppp = pp->p_next; /* go to next page */ 3653 3654 if (*ppp == pp) 3655 *ppp = NULL; /* page list is gone */ 3656 else { 3657 pp->p_prev->p_next = pp->p_next; 3658 pp->p_next->p_prev = pp->p_prev; 3659 } 3660 pp->p_prev = pp->p_next = pp; /* make pp a list of one */ 3661 } 3662 3663 3664 /* 3665 * Break page list cppp into two lists with npages in the first list. 3666 * The tail is returned in nppp. 3667 */ 3668 void 3669 page_list_break(page_t **oppp, page_t **nppp, pgcnt_t npages) 3670 { 3671 page_t *s1pp = *oppp; 3672 page_t *s2pp; 3673 page_t *e1pp, *e2pp; 3674 long n = 0; 3675 3676 if (s1pp == NULL) { 3677 *nppp = NULL; 3678 return; 3679 } 3680 if (npages == 0) { 3681 *nppp = s1pp; 3682 *oppp = NULL; 3683 return; 3684 } 3685 for (n = 0, s2pp = *oppp; n < npages; n++) { 3686 s2pp = s2pp->p_next; 3687 } 3688 /* Fix head and tail of new lists */ 3689 e1pp = s2pp->p_prev; 3690 e2pp = s1pp->p_prev; 3691 s1pp->p_prev = e1pp; 3692 e1pp->p_next = s1pp; 3693 s2pp->p_prev = e2pp; 3694 e2pp->p_next = s2pp; 3695 3696 /* second list empty */ 3697 if (s2pp == s1pp) { 3698 *oppp = s1pp; 3699 *nppp = NULL; 3700 } else { 3701 *oppp = s1pp; 3702 *nppp = s2pp; 3703 } 3704 } 3705 3706 /* 3707 * Concatenate page list nppp onto the end of list ppp. 3708 */ 3709 void 3710 page_list_concat(page_t **ppp, page_t **nppp) 3711 { 3712 page_t *s1pp, *s2pp, *e1pp, *e2pp; 3713 3714 if (*nppp == NULL) { 3715 return; 3716 } 3717 if (*ppp == NULL) { 3718 *ppp = *nppp; 3719 return; 3720 } 3721 s1pp = *ppp; 3722 e1pp = s1pp->p_prev; 3723 s2pp = *nppp; 3724 e2pp = s2pp->p_prev; 3725 s1pp->p_prev = e2pp; 3726 e2pp->p_next = s1pp; 3727 e1pp->p_next = s2pp; 3728 s2pp->p_prev = e1pp; 3729 } 3730 3731 /* 3732 * return the next page in the page list 3733 */ 3734 page_t * 3735 page_list_next(page_t *pp) 3736 { 3737 return (pp->p_next); 3738 } 3739 3740 3741 /* 3742 * Add the page to the front of the linked list of pages 3743 * using p_vpnext/p_vpprev pointers for the list. 3744 * 3745 * The caller is responsible for protecting the lists. 3746 */ 3747 void 3748 page_vpadd(page_t **ppp, page_t *pp) 3749 { 3750 if (*ppp == NULL) { 3751 pp->p_vpnext = pp->p_vpprev = pp; 3752 } else { 3753 pp->p_vpnext = *ppp; 3754 pp->p_vpprev = (*ppp)->p_vpprev; 3755 (*ppp)->p_vpprev = pp; 3756 pp->p_vpprev->p_vpnext = pp; 3757 } 3758 *ppp = pp; 3759 } 3760 3761 /* 3762 * Remove this page from the linked list of pages 3763 * using p_vpnext/p_vpprev pointers for the list. 3764 * 3765 * The caller is responsible for protecting the lists. 3766 */ 3767 void 3768 page_vpsub(page_t **ppp, page_t *pp) 3769 { 3770 if (*ppp == NULL || pp == NULL) { 3771 panic("page_vpsub: bad arg(s): pp %p, *ppp %p", 3772 (void *)pp, (void *)(*ppp)); 3773 /*NOTREACHED*/ 3774 } 3775 3776 if (*ppp == pp) 3777 *ppp = pp->p_vpnext; /* go to next page */ 3778 3779 if (*ppp == pp) 3780 *ppp = NULL; /* page list is gone */ 3781 else { 3782 pp->p_vpprev->p_vpnext = pp->p_vpnext; 3783 pp->p_vpnext->p_vpprev = pp->p_vpprev; 3784 } 3785 pp->p_vpprev = pp->p_vpnext = pp; /* make pp a list of one */ 3786 } 3787 3788 /* 3789 * Lock a physical page into memory "long term". Used to support "lock 3790 * in memory" functions. Accepts the page to be locked, and a cow variable 3791 * to indicate whether a the lock will travel to the new page during 3792 * a potential copy-on-write. 3793 */ 3794 int 3795 page_pp_lock( 3796 page_t *pp, /* page to be locked */ 3797 int cow, /* cow lock */ 3798 int kernel) /* must succeed -- ignore checking */ 3799 { 3800 int r = 0; /* result -- assume failure */ 3801 3802 ASSERT(PAGE_LOCKED(pp)); 3803 3804 page_struct_lock(pp); 3805 /* 3806 * Acquire the "freemem_lock" for availrmem. 3807 */ 3808 if (cow) { 3809 mutex_enter(&freemem_lock); 3810 if ((availrmem > pages_pp_maximum) && 3811 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 3812 availrmem--; 3813 pages_locked++; 3814 mutex_exit(&freemem_lock); 3815 r = 1; 3816 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 3817 cmn_err(CE_WARN, 3818 "COW lock limit reached on pfn 0x%lx", 3819 page_pptonum(pp)); 3820 } 3821 } else 3822 mutex_exit(&freemem_lock); 3823 } else { 3824 if (pp->p_lckcnt) { 3825 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 3826 r = 1; 3827 if (++pp->p_lckcnt == 3828 (ushort_t)PAGE_LOCK_MAXIMUM) { 3829 cmn_err(CE_WARN, "Page lock limit " 3830 "reached on pfn 0x%lx", 3831 page_pptonum(pp)); 3832 } 3833 } 3834 } else { 3835 if (kernel) { 3836 /* availrmem accounting done by caller */ 3837 ++pp->p_lckcnt; 3838 r = 1; 3839 } else { 3840 mutex_enter(&freemem_lock); 3841 if (availrmem > pages_pp_maximum) { 3842 availrmem--; 3843 pages_locked++; 3844 ++pp->p_lckcnt; 3845 r = 1; 3846 } 3847 mutex_exit(&freemem_lock); 3848 } 3849 } 3850 } 3851 page_struct_unlock(pp); 3852 return (r); 3853 } 3854 3855 /* 3856 * Decommit a lock on a physical page frame. Account for cow locks if 3857 * appropriate. 3858 */ 3859 void 3860 page_pp_unlock( 3861 page_t *pp, /* page to be unlocked */ 3862 int cow, /* expect cow lock */ 3863 int kernel) /* this was a kernel lock */ 3864 { 3865 ASSERT(PAGE_LOCKED(pp)); 3866 3867 page_struct_lock(pp); 3868 /* 3869 * Acquire the "freemem_lock" for availrmem. 3870 * If cowcnt or lcknt is already 0 do nothing; i.e., we 3871 * could be called to unlock even if nothing is locked. This could 3872 * happen if locked file pages were truncated (removing the lock) 3873 * and the file was grown again and new pages faulted in; the new 3874 * pages are unlocked but the segment still thinks they're locked. 3875 */ 3876 if (cow) { 3877 if (pp->p_cowcnt) { 3878 mutex_enter(&freemem_lock); 3879 pp->p_cowcnt--; 3880 availrmem++; 3881 pages_locked--; 3882 mutex_exit(&freemem_lock); 3883 } 3884 } else { 3885 if (pp->p_lckcnt && --pp->p_lckcnt == 0) { 3886 if (!kernel) { 3887 mutex_enter(&freemem_lock); 3888 availrmem++; 3889 pages_locked--; 3890 mutex_exit(&freemem_lock); 3891 } 3892 } 3893 } 3894 page_struct_unlock(pp); 3895 } 3896 3897 /* 3898 * This routine reserves availrmem for npages; 3899 * flags: KM_NOSLEEP or KM_SLEEP 3900 * returns 1 on success or 0 on failure 3901 */ 3902 int 3903 page_resv(pgcnt_t npages, uint_t flags) 3904 { 3905 mutex_enter(&freemem_lock); 3906 while (availrmem < tune.t_minarmem + npages) { 3907 if (flags & KM_NOSLEEP) { 3908 mutex_exit(&freemem_lock); 3909 return (0); 3910 } 3911 mutex_exit(&freemem_lock); 3912 page_needfree(npages); 3913 kmem_reap(); 3914 delay(hz >> 2); 3915 page_needfree(-(spgcnt_t)npages); 3916 mutex_enter(&freemem_lock); 3917 } 3918 availrmem -= npages; 3919 mutex_exit(&freemem_lock); 3920 return (1); 3921 } 3922 3923 /* 3924 * This routine unreserves availrmem for npages; 3925 */ 3926 void 3927 page_unresv(pgcnt_t npages) 3928 { 3929 mutex_enter(&freemem_lock); 3930 availrmem += npages; 3931 mutex_exit(&freemem_lock); 3932 } 3933 3934 /* 3935 * See Statement at the beginning of segvn_lockop() regarding 3936 * the way we handle cowcnts and lckcnts. 3937 * 3938 * Transfer cowcnt on 'opp' to cowcnt on 'npp' if the vpage 3939 * that breaks COW has PROT_WRITE. 3940 * 3941 * Note that, we may also break COW in case we are softlocking 3942 * on read access during physio; 3943 * in this softlock case, the vpage may not have PROT_WRITE. 3944 * So, we need to transfer lckcnt on 'opp' to lckcnt on 'npp' 3945 * if the vpage doesn't have PROT_WRITE. 3946 * 3947 * This routine is never called if we are stealing a page 3948 * in anon_private. 3949 * 3950 * The caller subtracted from availrmem for read only mapping. 3951 * if lckcnt is 1 increment availrmem. 3952 */ 3953 void 3954 page_pp_useclaim( 3955 page_t *opp, /* original page frame losing lock */ 3956 page_t *npp, /* new page frame gaining lock */ 3957 uint_t write_perm) /* set if vpage has PROT_WRITE */ 3958 { 3959 int payback = 0; 3960 3961 ASSERT(PAGE_LOCKED(opp)); 3962 ASSERT(PAGE_LOCKED(npp)); 3963 3964 page_struct_lock(opp); 3965 3966 ASSERT(npp->p_cowcnt == 0); 3967 ASSERT(npp->p_lckcnt == 0); 3968 3969 /* Don't use claim if nothing is locked (see page_pp_unlock above) */ 3970 if ((write_perm && opp->p_cowcnt != 0) || 3971 (!write_perm && opp->p_lckcnt != 0)) { 3972 3973 if (write_perm) { 3974 npp->p_cowcnt++; 3975 ASSERT(opp->p_cowcnt != 0); 3976 opp->p_cowcnt--; 3977 } else { 3978 3979 ASSERT(opp->p_lckcnt != 0); 3980 3981 /* 3982 * We didn't need availrmem decremented if p_lckcnt on 3983 * original page is 1. Here, we are unlocking 3984 * read-only copy belonging to original page and 3985 * are locking a copy belonging to new page. 3986 */ 3987 if (opp->p_lckcnt == 1) 3988 payback = 1; 3989 3990 npp->p_lckcnt++; 3991 opp->p_lckcnt--; 3992 } 3993 } 3994 if (payback) { 3995 mutex_enter(&freemem_lock); 3996 availrmem++; 3997 pages_useclaim--; 3998 mutex_exit(&freemem_lock); 3999 } 4000 page_struct_unlock(opp); 4001 } 4002 4003 /* 4004 * Simple claim adjust functions -- used to support changes in 4005 * claims due to changes in access permissions. Used by segvn_setprot(). 4006 */ 4007 int 4008 page_addclaim(page_t *pp) 4009 { 4010 int r = 0; /* result */ 4011 4012 ASSERT(PAGE_LOCKED(pp)); 4013 4014 page_struct_lock(pp); 4015 ASSERT(pp->p_lckcnt != 0); 4016 4017 if (pp->p_lckcnt == 1) { 4018 if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4019 --pp->p_lckcnt; 4020 r = 1; 4021 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4022 cmn_err(CE_WARN, 4023 "COW lock limit reached on pfn 0x%lx", 4024 page_pptonum(pp)); 4025 } 4026 } 4027 } else { 4028 mutex_enter(&freemem_lock); 4029 if ((availrmem > pages_pp_maximum) && 4030 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 4031 --availrmem; 4032 ++pages_claimed; 4033 mutex_exit(&freemem_lock); 4034 --pp->p_lckcnt; 4035 r = 1; 4036 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4037 cmn_err(CE_WARN, 4038 "COW lock limit reached on pfn 0x%lx", 4039 page_pptonum(pp)); 4040 } 4041 } else 4042 mutex_exit(&freemem_lock); 4043 } 4044 page_struct_unlock(pp); 4045 return (r); 4046 } 4047 4048 int 4049 page_subclaim(page_t *pp) 4050 { 4051 int r = 0; 4052 4053 ASSERT(PAGE_LOCKED(pp)); 4054 4055 page_struct_lock(pp); 4056 ASSERT(pp->p_cowcnt != 0); 4057 4058 if (pp->p_lckcnt) { 4059 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4060 r = 1; 4061 /* 4062 * for availrmem 4063 */ 4064 mutex_enter(&freemem_lock); 4065 availrmem++; 4066 pages_claimed--; 4067 mutex_exit(&freemem_lock); 4068 4069 pp->p_cowcnt--; 4070 4071 if (++pp->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4072 cmn_err(CE_WARN, 4073 "Page lock limit reached on pfn 0x%lx", 4074 page_pptonum(pp)); 4075 } 4076 } 4077 } else { 4078 r = 1; 4079 pp->p_cowcnt--; 4080 pp->p_lckcnt++; 4081 } 4082 page_struct_unlock(pp); 4083 return (r); 4084 } 4085 4086 int 4087 page_addclaim_pages(page_t **ppa) 4088 { 4089 4090 pgcnt_t lckpgs = 0, pg_idx; 4091 4092 VM_STAT_ADD(pagecnt.pc_addclaim_pages); 4093 4094 mutex_enter(&page_llock); 4095 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4096 4097 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4098 ASSERT(ppa[pg_idx]->p_lckcnt != 0); 4099 if (ppa[pg_idx]->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4100 mutex_exit(&page_llock); 4101 return (0); 4102 } 4103 if (ppa[pg_idx]->p_lckcnt > 1) 4104 lckpgs++; 4105 } 4106 4107 if (lckpgs != 0) { 4108 mutex_enter(&freemem_lock); 4109 if (availrmem >= pages_pp_maximum + lckpgs) { 4110 availrmem -= lckpgs; 4111 pages_claimed += lckpgs; 4112 } else { 4113 mutex_exit(&freemem_lock); 4114 mutex_exit(&page_llock); 4115 return (0); 4116 } 4117 mutex_exit(&freemem_lock); 4118 } 4119 4120 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4121 ppa[pg_idx]->p_lckcnt--; 4122 ppa[pg_idx]->p_cowcnt++; 4123 } 4124 mutex_exit(&page_llock); 4125 return (1); 4126 } 4127 4128 int 4129 page_subclaim_pages(page_t **ppa) 4130 { 4131 pgcnt_t ulckpgs = 0, pg_idx; 4132 4133 VM_STAT_ADD(pagecnt.pc_subclaim_pages); 4134 4135 mutex_enter(&page_llock); 4136 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4137 4138 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4139 ASSERT(ppa[pg_idx]->p_cowcnt != 0); 4140 if (ppa[pg_idx]->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4141 mutex_exit(&page_llock); 4142 return (0); 4143 } 4144 if (ppa[pg_idx]->p_lckcnt != 0) 4145 ulckpgs++; 4146 } 4147 4148 if (ulckpgs != 0) { 4149 mutex_enter(&freemem_lock); 4150 availrmem += ulckpgs; 4151 pages_claimed -= ulckpgs; 4152 mutex_exit(&freemem_lock); 4153 } 4154 4155 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4156 ppa[pg_idx]->p_cowcnt--; 4157 ppa[pg_idx]->p_lckcnt++; 4158 4159 } 4160 mutex_exit(&page_llock); 4161 return (1); 4162 } 4163 4164 page_t * 4165 page_numtopp(pfn_t pfnum, se_t se) 4166 { 4167 page_t *pp; 4168 4169 retry: 4170 pp = page_numtopp_nolock(pfnum); 4171 if (pp == NULL) { 4172 return ((page_t *)NULL); 4173 } 4174 4175 /* 4176 * Acquire the appropriate lock on the page. 4177 */ 4178 while (!page_lock(pp, se, (kmutex_t *)NULL, P_RECLAIM)) { 4179 if (page_pptonum(pp) != pfnum) 4180 goto retry; 4181 continue; 4182 } 4183 4184 if (page_pptonum(pp) != pfnum) { 4185 page_unlock(pp); 4186 goto retry; 4187 } 4188 4189 return (pp); 4190 } 4191 4192 page_t * 4193 page_numtopp_noreclaim(pfn_t pfnum, se_t se) 4194 { 4195 page_t *pp; 4196 4197 retry: 4198 pp = page_numtopp_nolock(pfnum); 4199 if (pp == NULL) { 4200 return ((page_t *)NULL); 4201 } 4202 4203 /* 4204 * Acquire the appropriate lock on the page. 4205 */ 4206 while (!page_lock(pp, se, (kmutex_t *)NULL, P_NO_RECLAIM)) { 4207 if (page_pptonum(pp) != pfnum) 4208 goto retry; 4209 continue; 4210 } 4211 4212 if (page_pptonum(pp) != pfnum) { 4213 page_unlock(pp); 4214 goto retry; 4215 } 4216 4217 return (pp); 4218 } 4219 4220 /* 4221 * This routine is like page_numtopp, but will only return page structs 4222 * for pages which are ok for loading into hardware using the page struct. 4223 */ 4224 page_t * 4225 page_numtopp_nowait(pfn_t pfnum, se_t se) 4226 { 4227 page_t *pp; 4228 4229 retry: 4230 pp = page_numtopp_nolock(pfnum); 4231 if (pp == NULL) { 4232 return ((page_t *)NULL); 4233 } 4234 4235 /* 4236 * Try to acquire the appropriate lock on the page. 4237 */ 4238 if (PP_ISFREE(pp)) 4239 pp = NULL; 4240 else { 4241 if (!page_trylock(pp, se)) 4242 pp = NULL; 4243 else { 4244 if (page_pptonum(pp) != pfnum) { 4245 page_unlock(pp); 4246 goto retry; 4247 } 4248 if (PP_ISFREE(pp)) { 4249 page_unlock(pp); 4250 pp = NULL; 4251 } 4252 } 4253 } 4254 return (pp); 4255 } 4256 4257 /* 4258 * Returns a count of dirty pages that are in the process 4259 * of being written out. If 'cleanit' is set, try to push the page. 4260 */ 4261 pgcnt_t 4262 page_busy(int cleanit) 4263 { 4264 page_t *page0 = page_first(); 4265 page_t *pp = page0; 4266 pgcnt_t nppbusy = 0; 4267 u_offset_t off; 4268 4269 do { 4270 vnode_t *vp = pp->p_vnode; 4271 4272 /* 4273 * A page is a candidate for syncing if it is: 4274 * 4275 * (a) On neither the freelist nor the cachelist 4276 * (b) Hashed onto a vnode 4277 * (c) Not a kernel page 4278 * (d) Dirty 4279 * (e) Not part of a swapfile 4280 * (f) a page which belongs to a real vnode; eg has a non-null 4281 * v_vfsp pointer. 4282 * (g) Backed by a filesystem which doesn't have a 4283 * stubbed-out sync operation 4284 */ 4285 if (!PP_ISFREE(pp) && vp != NULL && !VN_ISKAS(vp) && 4286 hat_ismod(pp) && !IS_SWAPVP(vp) && vp->v_vfsp != NULL && 4287 vfs_can_sync(vp->v_vfsp)) { 4288 nppbusy++; 4289 vfs_syncprogress(); 4290 4291 if (!cleanit) 4292 continue; 4293 if (!page_trylock(pp, SE_EXCL)) 4294 continue; 4295 4296 if (PP_ISFREE(pp) || vp == NULL || IS_SWAPVP(vp) || 4297 pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 4298 !(hat_pagesync(pp, 4299 HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) & P_MOD)) { 4300 page_unlock(pp); 4301 continue; 4302 } 4303 off = pp->p_offset; 4304 VN_HOLD(vp); 4305 page_unlock(pp); 4306 (void) VOP_PUTPAGE(vp, off, PAGESIZE, 4307 B_ASYNC | B_FREE, kcred, NULL); 4308 VN_RELE(vp); 4309 } 4310 } while ((pp = page_next(pp)) != page0); 4311 4312 return (nppbusy); 4313 } 4314 4315 void page_invalidate_pages(void); 4316 4317 /* 4318 * callback handler to vm sub-system 4319 * 4320 * callers make sure no recursive entries to this func. 4321 */ 4322 /*ARGSUSED*/ 4323 boolean_t 4324 callb_vm_cpr(void *arg, int code) 4325 { 4326 if (code == CB_CODE_CPR_CHKPT) 4327 page_invalidate_pages(); 4328 return (B_TRUE); 4329 } 4330 4331 /* 4332 * Invalidate all pages of the system. 4333 * It shouldn't be called until all user page activities are all stopped. 4334 */ 4335 void 4336 page_invalidate_pages() 4337 { 4338 page_t *pp; 4339 page_t *page0; 4340 pgcnt_t nbusypages; 4341 int retry = 0; 4342 const int MAXRETRIES = 4; 4343 #if defined(__sparc) 4344 extern struct vnode prom_ppages; 4345 #endif /* __sparc */ 4346 4347 top: 4348 /* 4349 * Flush dirty pages and destroy the clean ones. 4350 */ 4351 nbusypages = 0; 4352 4353 pp = page0 = page_first(); 4354 do { 4355 struct vnode *vp; 4356 u_offset_t offset; 4357 int mod; 4358 4359 /* 4360 * skip the page if it has no vnode or the page associated 4361 * with the kernel vnode or prom allocated kernel mem. 4362 */ 4363 #if defined(__sparc) 4364 if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp) || 4365 vp == &prom_ppages) 4366 #else /* x86 doesn't have prom or prom_ppage */ 4367 if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp)) 4368 #endif /* __sparc */ 4369 continue; 4370 4371 /* 4372 * skip the page which is already free invalidated. 4373 */ 4374 if (PP_ISFREE(pp) && PP_ISAGED(pp)) 4375 continue; 4376 4377 /* 4378 * skip pages that are already locked or can't be "exclusively" 4379 * locked or are already free. After we lock the page, check 4380 * the free and age bits again to be sure it's not destroied 4381 * yet. 4382 * To achieve max. parallelization, we use page_trylock instead 4383 * of page_lock so that we don't get block on individual pages 4384 * while we have thousands of other pages to process. 4385 */ 4386 if (!page_trylock(pp, SE_EXCL)) { 4387 nbusypages++; 4388 continue; 4389 } else if (PP_ISFREE(pp)) { 4390 if (!PP_ISAGED(pp)) { 4391 page_destroy_free(pp); 4392 } else { 4393 page_unlock(pp); 4394 } 4395 continue; 4396 } 4397 /* 4398 * Is this page involved in some I/O? shared? 4399 * 4400 * The page_struct_lock need not be acquired to 4401 * examine these fields since the page has an 4402 * "exclusive" lock. 4403 */ 4404 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 4405 page_unlock(pp); 4406 continue; 4407 } 4408 4409 if (vp->v_type == VCHR) { 4410 panic("vp->v_type == VCHR"); 4411 /*NOTREACHED*/ 4412 } 4413 4414 if (!page_try_demote_pages(pp)) { 4415 page_unlock(pp); 4416 continue; 4417 } 4418 4419 /* 4420 * Check the modified bit. Leave the bits alone in hardware 4421 * (they will be modified if we do the putpage). 4422 */ 4423 mod = (hat_pagesync(pp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) 4424 & P_MOD); 4425 if (mod) { 4426 offset = pp->p_offset; 4427 /* 4428 * Hold the vnode before releasing the page lock 4429 * to prevent it from being freed and re-used by 4430 * some other thread. 4431 */ 4432 VN_HOLD(vp); 4433 page_unlock(pp); 4434 /* 4435 * No error return is checked here. Callers such as 4436 * cpr deals with the dirty pages at the dump time 4437 * if this putpage fails. 4438 */ 4439 (void) VOP_PUTPAGE(vp, offset, PAGESIZE, B_INVAL, 4440 kcred, NULL); 4441 VN_RELE(vp); 4442 } else { 4443 page_destroy(pp, 0); 4444 } 4445 } while ((pp = page_next(pp)) != page0); 4446 if (nbusypages && retry++ < MAXRETRIES) { 4447 delay(1); 4448 goto top; 4449 } 4450 } 4451 4452 /* 4453 * Replace the page "old" with the page "new" on the page hash and vnode lists 4454 * 4455 * the replacement must be done in place, ie the equivalent sequence: 4456 * 4457 * vp = old->p_vnode; 4458 * off = old->p_offset; 4459 * page_do_hashout(old) 4460 * page_do_hashin(new, vp, off) 4461 * 4462 * doesn't work, since 4463 * 1) if old is the only page on the vnode, the v_pages list has a window 4464 * where it looks empty. This will break file system assumptions. 4465 * and 4466 * 2) pvn_vplist_dirty() can't deal with pages moving on the v_pages list. 4467 */ 4468 static void 4469 page_do_relocate_hash(page_t *new, page_t *old) 4470 { 4471 page_t **hash_list; 4472 vnode_t *vp = old->p_vnode; 4473 kmutex_t *sep; 4474 4475 ASSERT(PAGE_EXCL(old)); 4476 ASSERT(PAGE_EXCL(new)); 4477 ASSERT(vp != NULL); 4478 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 4479 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, old->p_offset)))); 4480 4481 /* 4482 * First find old page on the page hash list 4483 */ 4484 hash_list = &page_hash[PAGE_HASH_FUNC(vp, old->p_offset)]; 4485 4486 for (;;) { 4487 if (*hash_list == old) 4488 break; 4489 if (*hash_list == NULL) { 4490 panic("page_do_hashout"); 4491 /*NOTREACHED*/ 4492 } 4493 hash_list = &(*hash_list)->p_hash; 4494 } 4495 4496 /* 4497 * update new and replace old with new on the page hash list 4498 */ 4499 new->p_vnode = old->p_vnode; 4500 new->p_offset = old->p_offset; 4501 new->p_hash = old->p_hash; 4502 *hash_list = new; 4503 4504 if ((new->p_vnode->v_flag & VISSWAP) != 0) 4505 PP_SETSWAP(new); 4506 4507 /* 4508 * replace old with new on the vnode's page list 4509 */ 4510 if (old->p_vpnext == old) { 4511 new->p_vpnext = new; 4512 new->p_vpprev = new; 4513 } else { 4514 new->p_vpnext = old->p_vpnext; 4515 new->p_vpprev = old->p_vpprev; 4516 new->p_vpnext->p_vpprev = new; 4517 new->p_vpprev->p_vpnext = new; 4518 } 4519 if (vp->v_pages == old) 4520 vp->v_pages = new; 4521 4522 /* 4523 * clear out the old page 4524 */ 4525 old->p_hash = NULL; 4526 old->p_vpnext = NULL; 4527 old->p_vpprev = NULL; 4528 old->p_vnode = NULL; 4529 PP_CLRSWAP(old); 4530 old->p_offset = (u_offset_t)-1; 4531 page_clr_all_props(old); 4532 4533 /* 4534 * Wake up processes waiting for this page. The page's 4535 * identity has been changed, and is probably not the 4536 * desired page any longer. 4537 */ 4538 sep = page_se_mutex(old); 4539 mutex_enter(sep); 4540 old->p_selock &= ~SE_EWANTED; 4541 if (CV_HAS_WAITERS(&old->p_cv)) 4542 cv_broadcast(&old->p_cv); 4543 mutex_exit(sep); 4544 } 4545 4546 /* 4547 * This function moves the identity of page "pp_old" to page "pp_new". 4548 * Both pages must be locked on entry. "pp_new" is free, has no identity, 4549 * and need not be hashed out from anywhere. 4550 */ 4551 void 4552 page_relocate_hash(page_t *pp_new, page_t *pp_old) 4553 { 4554 vnode_t *vp = pp_old->p_vnode; 4555 u_offset_t off = pp_old->p_offset; 4556 kmutex_t *phm, *vphm; 4557 4558 /* 4559 * Rehash two pages 4560 */ 4561 ASSERT(PAGE_EXCL(pp_old)); 4562 ASSERT(PAGE_EXCL(pp_new)); 4563 ASSERT(vp != NULL); 4564 ASSERT(pp_new->p_vnode == NULL); 4565 4566 /* 4567 * hashout then hashin while holding the mutexes 4568 */ 4569 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, off)); 4570 mutex_enter(phm); 4571 vphm = page_vnode_mutex(vp); 4572 mutex_enter(vphm); 4573 4574 page_do_relocate_hash(pp_new, pp_old); 4575 4576 mutex_exit(vphm); 4577 mutex_exit(phm); 4578 4579 /* 4580 * The page_struct_lock need not be acquired for lckcnt and 4581 * cowcnt since the page has an "exclusive" lock. 4582 */ 4583 ASSERT(pp_new->p_lckcnt == 0); 4584 ASSERT(pp_new->p_cowcnt == 0); 4585 pp_new->p_lckcnt = pp_old->p_lckcnt; 4586 pp_new->p_cowcnt = pp_old->p_cowcnt; 4587 pp_old->p_lckcnt = pp_old->p_cowcnt = 0; 4588 4589 /* The following comment preserved from page_flip(). */ 4590 /* XXX - Do we need to protect fsdata? */ 4591 pp_new->p_fsdata = pp_old->p_fsdata; 4592 } 4593 4594 /* 4595 * Helper routine used to lock all remaining members of a 4596 * large page. The caller is responsible for passing in a locked 4597 * pp. If pp is a large page, then it succeeds in locking all the 4598 * remaining constituent pages or it returns with only the 4599 * original page locked. 4600 * 4601 * Returns 1 on success, 0 on failure. 4602 * 4603 * If success is returned this routine guarantees p_szc for all constituent 4604 * pages of a large page pp belongs to can't change. To achieve this we 4605 * recheck szc of pp after locking all constituent pages and retry if szc 4606 * changed (it could only decrease). Since hat_page_demote() needs an EXCL 4607 * lock on one of constituent pages it can't be running after all constituent 4608 * pages are locked. hat_page_demote() with a lock on a constituent page 4609 * outside of this large page (i.e. pp belonged to a larger large page) is 4610 * already done with all constituent pages of pp since the root's p_szc is 4611 * changed last. Therefore no need to synchronize with hat_page_demote() that 4612 * locked a constituent page outside of pp's current large page. 4613 */ 4614 #ifdef DEBUG 4615 uint32_t gpg_trylock_mtbf = 0; 4616 #endif 4617 4618 int 4619 group_page_trylock(page_t *pp, se_t se) 4620 { 4621 page_t *tpp; 4622 pgcnt_t npgs, i, j; 4623 uint_t pszc = pp->p_szc; 4624 4625 #ifdef DEBUG 4626 if (gpg_trylock_mtbf && !(gethrtime() % gpg_trylock_mtbf)) { 4627 return (0); 4628 } 4629 #endif 4630 4631 if (pp != PP_GROUPLEADER(pp, pszc)) { 4632 return (0); 4633 } 4634 4635 retry: 4636 ASSERT(PAGE_LOCKED_SE(pp, se)); 4637 ASSERT(!PP_ISFREE(pp)); 4638 if (pszc == 0) { 4639 return (1); 4640 } 4641 npgs = page_get_pagecnt(pszc); 4642 tpp = pp + 1; 4643 for (i = 1; i < npgs; i++, tpp++) { 4644 if (!page_trylock(tpp, se)) { 4645 tpp = pp + 1; 4646 for (j = 1; j < i; j++, tpp++) { 4647 page_unlock(tpp); 4648 } 4649 return (0); 4650 } 4651 } 4652 if (pp->p_szc != pszc) { 4653 ASSERT(pp->p_szc < pszc); 4654 ASSERT(pp->p_vnode != NULL && !PP_ISKAS(pp) && 4655 !IS_SWAPFSVP(pp->p_vnode)); 4656 tpp = pp + 1; 4657 for (i = 1; i < npgs; i++, tpp++) { 4658 page_unlock(tpp); 4659 } 4660 pszc = pp->p_szc; 4661 goto retry; 4662 } 4663 return (1); 4664 } 4665 4666 void 4667 group_page_unlock(page_t *pp) 4668 { 4669 page_t *tpp; 4670 pgcnt_t npgs, i; 4671 4672 ASSERT(PAGE_LOCKED(pp)); 4673 ASSERT(!PP_ISFREE(pp)); 4674 ASSERT(pp == PP_PAGEROOT(pp)); 4675 npgs = page_get_pagecnt(pp->p_szc); 4676 for (i = 1, tpp = pp + 1; i < npgs; i++, tpp++) { 4677 page_unlock(tpp); 4678 } 4679 } 4680 4681 /* 4682 * returns 4683 * 0 : on success and *nrelocp is number of relocated PAGESIZE pages 4684 * ERANGE : this is not a base page 4685 * EBUSY : failure to get locks on the page/pages 4686 * ENOMEM : failure to obtain replacement pages 4687 * EAGAIN : OBP has not yet completed its boot-time handoff to the kernel 4688 * EIO : An error occurred while trying to copy the page data 4689 * 4690 * Return with all constituent members of target and replacement 4691 * SE_EXCL locked. It is the callers responsibility to drop the 4692 * locks. 4693 */ 4694 int 4695 do_page_relocate( 4696 page_t **target, 4697 page_t **replacement, 4698 int grouplock, 4699 spgcnt_t *nrelocp, 4700 lgrp_t *lgrp) 4701 { 4702 page_t *first_repl; 4703 page_t *repl; 4704 page_t *targ; 4705 page_t *pl = NULL; 4706 uint_t ppattr; 4707 pfn_t pfn, repl_pfn; 4708 uint_t szc; 4709 spgcnt_t npgs, i; 4710 int repl_contig = 0; 4711 uint_t flags = 0; 4712 spgcnt_t dofree = 0; 4713 4714 *nrelocp = 0; 4715 4716 #if defined(__sparc) 4717 /* 4718 * We need to wait till OBP has completed 4719 * its boot-time handoff of its resources to the kernel 4720 * before we allow page relocation 4721 */ 4722 if (page_relocate_ready == 0) { 4723 return (EAGAIN); 4724 } 4725 #endif 4726 4727 /* 4728 * If this is not a base page, 4729 * just return with 0x0 pages relocated. 4730 */ 4731 targ = *target; 4732 ASSERT(PAGE_EXCL(targ)); 4733 ASSERT(!PP_ISFREE(targ)); 4734 szc = targ->p_szc; 4735 ASSERT(szc < mmu_page_sizes); 4736 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4737 pfn = targ->p_pagenum; 4738 if (pfn != PFN_BASE(pfn, szc)) { 4739 VM_STAT_ADD(vmm_vmstats.ppr_relocnoroot[szc]); 4740 return (ERANGE); 4741 } 4742 4743 if ((repl = *replacement) != NULL && repl->p_szc >= szc) { 4744 repl_pfn = repl->p_pagenum; 4745 if (repl_pfn != PFN_BASE(repl_pfn, szc)) { 4746 VM_STAT_ADD(vmm_vmstats.ppr_reloc_replnoroot[szc]); 4747 return (ERANGE); 4748 } 4749 repl_contig = 1; 4750 } 4751 4752 /* 4753 * We must lock all members of this large page or we cannot 4754 * relocate any part of it. 4755 */ 4756 if (grouplock != 0 && !group_page_trylock(targ, SE_EXCL)) { 4757 VM_STAT_ADD(vmm_vmstats.ppr_relocnolock[targ->p_szc]); 4758 return (EBUSY); 4759 } 4760 4761 /* 4762 * reread szc it could have been decreased before 4763 * group_page_trylock() was done. 4764 */ 4765 szc = targ->p_szc; 4766 ASSERT(szc < mmu_page_sizes); 4767 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4768 ASSERT(pfn == PFN_BASE(pfn, szc)); 4769 4770 npgs = page_get_pagecnt(targ->p_szc); 4771 4772 if (repl == NULL) { 4773 dofree = npgs; /* Size of target page in MMU pages */ 4774 if (!page_create_wait(dofree, 0)) { 4775 if (grouplock != 0) { 4776 group_page_unlock(targ); 4777 } 4778 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4779 return (ENOMEM); 4780 } 4781 4782 /* 4783 * seg kmem pages require that the target and replacement 4784 * page be the same pagesize. 4785 */ 4786 flags = (VN_ISKAS(targ->p_vnode)) ? PGR_SAMESZC : 0; 4787 repl = page_get_replacement_page(targ, lgrp, flags); 4788 if (repl == NULL) { 4789 if (grouplock != 0) { 4790 group_page_unlock(targ); 4791 } 4792 page_create_putback(dofree); 4793 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4794 return (ENOMEM); 4795 } 4796 } 4797 #ifdef DEBUG 4798 else { 4799 ASSERT(PAGE_LOCKED(repl)); 4800 } 4801 #endif /* DEBUG */ 4802 4803 #if defined(__sparc) 4804 /* 4805 * Let hat_page_relocate() complete the relocation if it's kernel page 4806 */ 4807 if (VN_ISKAS(targ->p_vnode)) { 4808 *replacement = repl; 4809 if (hat_page_relocate(target, replacement, nrelocp) != 0) { 4810 if (grouplock != 0) { 4811 group_page_unlock(targ); 4812 } 4813 if (dofree) { 4814 *replacement = NULL; 4815 page_free_replacement_page(repl); 4816 page_create_putback(dofree); 4817 } 4818 VM_STAT_ADD(vmm_vmstats.ppr_krelocfail[szc]); 4819 return (EAGAIN); 4820 } 4821 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 4822 return (0); 4823 } 4824 #else 4825 #if defined(lint) 4826 dofree = dofree; 4827 #endif 4828 #endif 4829 4830 first_repl = repl; 4831 4832 for (i = 0; i < npgs; i++) { 4833 ASSERT(PAGE_EXCL(targ)); 4834 ASSERT(targ->p_slckcnt == 0); 4835 ASSERT(repl->p_slckcnt == 0); 4836 4837 (void) hat_pageunload(targ, HAT_FORCE_PGUNLOAD); 4838 4839 ASSERT(hat_page_getshare(targ) == 0); 4840 ASSERT(!PP_ISFREE(targ)); 4841 ASSERT(targ->p_pagenum == (pfn + i)); 4842 ASSERT(repl_contig == 0 || 4843 repl->p_pagenum == (repl_pfn + i)); 4844 4845 /* 4846 * Copy the page contents and attributes then 4847 * relocate the page in the page hash. 4848 */ 4849 if (ppcopy(targ, repl) == 0) { 4850 targ = *target; 4851 repl = first_repl; 4852 VM_STAT_ADD(vmm_vmstats.ppr_copyfail); 4853 if (grouplock != 0) { 4854 group_page_unlock(targ); 4855 } 4856 if (dofree) { 4857 *replacement = NULL; 4858 page_free_replacement_page(repl); 4859 page_create_putback(dofree); 4860 } 4861 return (EIO); 4862 } 4863 4864 targ++; 4865 if (repl_contig != 0) { 4866 repl++; 4867 } else { 4868 repl = repl->p_next; 4869 } 4870 } 4871 4872 repl = first_repl; 4873 targ = *target; 4874 4875 for (i = 0; i < npgs; i++) { 4876 ppattr = hat_page_getattr(targ, (P_MOD | P_REF | P_RO)); 4877 page_clr_all_props(repl); 4878 page_set_props(repl, ppattr); 4879 page_relocate_hash(repl, targ); 4880 4881 ASSERT(hat_page_getshare(targ) == 0); 4882 ASSERT(hat_page_getshare(repl) == 0); 4883 /* 4884 * Now clear the props on targ, after the 4885 * page_relocate_hash(), they no longer 4886 * have any meaning. 4887 */ 4888 page_clr_all_props(targ); 4889 ASSERT(targ->p_next == targ); 4890 ASSERT(targ->p_prev == targ); 4891 page_list_concat(&pl, &targ); 4892 4893 targ++; 4894 if (repl_contig != 0) { 4895 repl++; 4896 } else { 4897 repl = repl->p_next; 4898 } 4899 } 4900 /* assert that we have come full circle with repl */ 4901 ASSERT(repl_contig == 1 || first_repl == repl); 4902 4903 *target = pl; 4904 if (*replacement == NULL) { 4905 ASSERT(first_repl == repl); 4906 *replacement = repl; 4907 } 4908 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 4909 *nrelocp = npgs; 4910 return (0); 4911 } 4912 /* 4913 * On success returns 0 and *nrelocp the number of PAGESIZE pages relocated. 4914 */ 4915 int 4916 page_relocate( 4917 page_t **target, 4918 page_t **replacement, 4919 int grouplock, 4920 int freetarget, 4921 spgcnt_t *nrelocp, 4922 lgrp_t *lgrp) 4923 { 4924 spgcnt_t ret; 4925 4926 /* do_page_relocate returns 0 on success or errno value */ 4927 ret = do_page_relocate(target, replacement, grouplock, nrelocp, lgrp); 4928 4929 if (ret != 0 || freetarget == 0) { 4930 return (ret); 4931 } 4932 if (*nrelocp == 1) { 4933 ASSERT(*target != NULL); 4934 page_free(*target, 1); 4935 } else { 4936 page_t *tpp = *target; 4937 uint_t szc = tpp->p_szc; 4938 pgcnt_t npgs = page_get_pagecnt(szc); 4939 ASSERT(npgs > 1); 4940 ASSERT(szc != 0); 4941 do { 4942 ASSERT(PAGE_EXCL(tpp)); 4943 ASSERT(!hat_page_is_mapped(tpp)); 4944 ASSERT(tpp->p_szc == szc); 4945 PP_SETFREE(tpp); 4946 PP_SETAGED(tpp); 4947 npgs--; 4948 } while ((tpp = tpp->p_next) != *target); 4949 ASSERT(npgs == 0); 4950 page_list_add_pages(*target, 0); 4951 npgs = page_get_pagecnt(szc); 4952 page_create_putback(npgs); 4953 } 4954 return (ret); 4955 } 4956 4957 /* 4958 * it is up to the caller to deal with pcf accounting. 4959 */ 4960 void 4961 page_free_replacement_page(page_t *pplist) 4962 { 4963 page_t *pp; 4964 4965 while (pplist != NULL) { 4966 /* 4967 * pp_targ is a linked list. 4968 */ 4969 pp = pplist; 4970 if (pp->p_szc == 0) { 4971 page_sub(&pplist, pp); 4972 page_clr_all_props(pp); 4973 PP_SETFREE(pp); 4974 PP_SETAGED(pp); 4975 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 4976 page_unlock(pp); 4977 VM_STAT_ADD(pagecnt.pc_free_replacement_page[0]); 4978 } else { 4979 spgcnt_t curnpgs = page_get_pagecnt(pp->p_szc); 4980 page_t *tpp; 4981 page_list_break(&pp, &pplist, curnpgs); 4982 tpp = pp; 4983 do { 4984 ASSERT(PAGE_EXCL(tpp)); 4985 ASSERT(!hat_page_is_mapped(tpp)); 4986 page_clr_all_props(pp); 4987 PP_SETFREE(tpp); 4988 PP_SETAGED(tpp); 4989 } while ((tpp = tpp->p_next) != pp); 4990 page_list_add_pages(pp, 0); 4991 VM_STAT_ADD(pagecnt.pc_free_replacement_page[1]); 4992 } 4993 } 4994 } 4995 4996 /* 4997 * Relocate target to non-relocatable replacement page. 4998 */ 4999 int 5000 page_relocate_cage(page_t **target, page_t **replacement) 5001 { 5002 page_t *tpp, *rpp; 5003 spgcnt_t pgcnt, npgs; 5004 int result; 5005 5006 tpp = *target; 5007 5008 ASSERT(PAGE_EXCL(tpp)); 5009 ASSERT(tpp->p_szc == 0); 5010 5011 pgcnt = btop(page_get_pagesize(tpp->p_szc)); 5012 5013 do { 5014 (void) page_create_wait(pgcnt, PG_WAIT | PG_NORELOC); 5015 rpp = page_get_replacement_page(tpp, NULL, PGR_NORELOC); 5016 if (rpp == NULL) { 5017 page_create_putback(pgcnt); 5018 kcage_cageout_wakeup(); 5019 } 5020 } while (rpp == NULL); 5021 5022 ASSERT(PP_ISNORELOC(rpp)); 5023 5024 result = page_relocate(&tpp, &rpp, 0, 1, &npgs, NULL); 5025 5026 if (result == 0) { 5027 *replacement = rpp; 5028 if (pgcnt != npgs) 5029 panic("page_relocate_cage: partial relocation"); 5030 } 5031 5032 return (result); 5033 } 5034 5035 /* 5036 * Release the page lock on a page, place on cachelist 5037 * tail if no longer mapped. Caller can let us know if 5038 * the page is known to be clean. 5039 */ 5040 int 5041 page_release(page_t *pp, int checkmod) 5042 { 5043 int status; 5044 5045 ASSERT(PAGE_LOCKED(pp) && !PP_ISFREE(pp) && 5046 (pp->p_vnode != NULL)); 5047 5048 if (!hat_page_is_mapped(pp) && !IS_SWAPVP(pp->p_vnode) && 5049 ((PAGE_SHARED(pp) && page_tryupgrade(pp)) || PAGE_EXCL(pp)) && 5050 pp->p_lckcnt == 0 && pp->p_cowcnt == 0 && 5051 !hat_page_is_mapped(pp)) { 5052 5053 /* 5054 * If page is modified, unlock it 5055 * 5056 * (p_nrm & P_MOD) bit has the latest stuff because: 5057 * (1) We found that this page doesn't have any mappings 5058 * _after_ holding SE_EXCL and 5059 * (2) We didn't drop SE_EXCL lock after the check in (1) 5060 */ 5061 if (checkmod && hat_ismod(pp)) { 5062 page_unlock(pp); 5063 status = PGREL_MOD; 5064 } else { 5065 /*LINTED: constant in conditional context*/ 5066 VN_DISPOSE(pp, B_FREE, 0, kcred); 5067 status = PGREL_CLEAN; 5068 } 5069 } else { 5070 page_unlock(pp); 5071 status = PGREL_NOTREL; 5072 } 5073 return (status); 5074 } 5075 5076 /* 5077 * Given a constituent page, try to demote the large page on the freelist. 5078 * 5079 * Returns nonzero if the page could be demoted successfully. Returns with 5080 * the constituent page still locked. 5081 */ 5082 int 5083 page_try_demote_free_pages(page_t *pp) 5084 { 5085 page_t *rootpp = pp; 5086 pfn_t pfn = page_pptonum(pp); 5087 spgcnt_t npgs; 5088 uint_t szc = pp->p_szc; 5089 5090 ASSERT(PP_ISFREE(pp)); 5091 ASSERT(PAGE_EXCL(pp)); 5092 5093 /* 5094 * Adjust rootpp and lock it, if `pp' is not the base 5095 * constituent page. 5096 */ 5097 npgs = page_get_pagecnt(pp->p_szc); 5098 if (npgs == 1) { 5099 return (0); 5100 } 5101 5102 if (!IS_P2ALIGNED(pfn, npgs)) { 5103 pfn = P2ALIGN(pfn, npgs); 5104 rootpp = page_numtopp_nolock(pfn); 5105 } 5106 5107 if (pp != rootpp && !page_trylock(rootpp, SE_EXCL)) { 5108 return (0); 5109 } 5110 5111 if (rootpp->p_szc != szc) { 5112 if (pp != rootpp) 5113 page_unlock(rootpp); 5114 return (0); 5115 } 5116 5117 page_demote_free_pages(rootpp); 5118 5119 if (pp != rootpp) 5120 page_unlock(rootpp); 5121 5122 ASSERT(PP_ISFREE(pp)); 5123 ASSERT(PAGE_EXCL(pp)); 5124 return (1); 5125 } 5126 5127 /* 5128 * Given a constituent page, try to demote the large page. 5129 * 5130 * Returns nonzero if the page could be demoted successfully. Returns with 5131 * the constituent page still locked. 5132 */ 5133 int 5134 page_try_demote_pages(page_t *pp) 5135 { 5136 page_t *tpp, *rootpp = pp; 5137 pfn_t pfn = page_pptonum(pp); 5138 spgcnt_t i, npgs; 5139 uint_t szc = pp->p_szc; 5140 vnode_t *vp = pp->p_vnode; 5141 5142 ASSERT(PAGE_EXCL(pp)); 5143 5144 VM_STAT_ADD(pagecnt.pc_try_demote_pages[0]); 5145 5146 if (pp->p_szc == 0) { 5147 VM_STAT_ADD(pagecnt.pc_try_demote_pages[1]); 5148 return (1); 5149 } 5150 5151 if (vp != NULL && !IS_SWAPFSVP(vp) && !VN_ISKAS(vp)) { 5152 VM_STAT_ADD(pagecnt.pc_try_demote_pages[2]); 5153 page_demote_vp_pages(pp); 5154 ASSERT(pp->p_szc == 0); 5155 return (1); 5156 } 5157 5158 /* 5159 * Adjust rootpp if passed in is not the base 5160 * constituent page. 5161 */ 5162 npgs = page_get_pagecnt(pp->p_szc); 5163 ASSERT(npgs > 1); 5164 if (!IS_P2ALIGNED(pfn, npgs)) { 5165 pfn = P2ALIGN(pfn, npgs); 5166 rootpp = page_numtopp_nolock(pfn); 5167 VM_STAT_ADD(pagecnt.pc_try_demote_pages[3]); 5168 ASSERT(rootpp->p_vnode != NULL); 5169 ASSERT(rootpp->p_szc == szc); 5170 } 5171 5172 /* 5173 * We can't demote kernel pages since we can't hat_unload() 5174 * the mappings. 5175 */ 5176 if (VN_ISKAS(rootpp->p_vnode)) 5177 return (0); 5178 5179 /* 5180 * Attempt to lock all constituent pages except the page passed 5181 * in since it's already locked. 5182 */ 5183 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5184 ASSERT(!PP_ISFREE(tpp)); 5185 ASSERT(tpp->p_vnode != NULL); 5186 5187 if (tpp != pp && !page_trylock(tpp, SE_EXCL)) 5188 break; 5189 ASSERT(tpp->p_szc == rootpp->p_szc); 5190 ASSERT(page_pptonum(tpp) == page_pptonum(rootpp) + i); 5191 } 5192 5193 /* 5194 * If we failed to lock them all then unlock what we have 5195 * locked so far and bail. 5196 */ 5197 if (i < npgs) { 5198 tpp = rootpp; 5199 while (i-- > 0) { 5200 if (tpp != pp) 5201 page_unlock(tpp); 5202 tpp++; 5203 } 5204 VM_STAT_ADD(pagecnt.pc_try_demote_pages[4]); 5205 return (0); 5206 } 5207 5208 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5209 ASSERT(PAGE_EXCL(tpp)); 5210 ASSERT(tpp->p_slckcnt == 0); 5211 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 5212 tpp->p_szc = 0; 5213 } 5214 5215 /* 5216 * Unlock all pages except the page passed in. 5217 */ 5218 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5219 ASSERT(!hat_page_is_mapped(tpp)); 5220 if (tpp != pp) 5221 page_unlock(tpp); 5222 } 5223 5224 VM_STAT_ADD(pagecnt.pc_try_demote_pages[5]); 5225 return (1); 5226 } 5227 5228 /* 5229 * Called by page_free() and page_destroy() to demote the page size code 5230 * (p_szc) to 0 (since we can't just put a single PAGESIZE page with non zero 5231 * p_szc on free list, neither can we just clear p_szc of a single page_t 5232 * within a large page since it will break other code that relies on p_szc 5233 * being the same for all page_t's of a large page). Anonymous pages should 5234 * never end up here because anon_map_getpages() cannot deal with p_szc 5235 * changes after a single constituent page is locked. While anonymous or 5236 * kernel large pages are demoted or freed the entire large page at a time 5237 * with all constituent pages locked EXCL for the file system pages we 5238 * have to be able to demote a large page (i.e. decrease all constituent pages 5239 * p_szc) with only just an EXCL lock on one of constituent pages. The reason 5240 * we can easily deal with anonymous page demotion the entire large page at a 5241 * time is that those operation originate at address space level and concern 5242 * the entire large page region with actual demotion only done when pages are 5243 * not shared with any other processes (therefore we can always get EXCL lock 5244 * on all anonymous constituent pages after clearing segment page 5245 * cache). However file system pages can be truncated or invalidated at a 5246 * PAGESIZE level from the file system side and end up in page_free() or 5247 * page_destroy() (we also allow only part of the large page to be SOFTLOCKed 5248 * and therefore pageout should be able to demote a large page by EXCL locking 5249 * any constituent page that is not under SOFTLOCK). In those cases we cannot 5250 * rely on being able to lock EXCL all constituent pages. 5251 * 5252 * To prevent szc changes on file system pages one has to lock all constituent 5253 * pages at least SHARED (or call page_szc_lock()). The only subsystem that 5254 * doesn't rely on locking all constituent pages (or using page_szc_lock()) to 5255 * prevent szc changes is hat layer that uses its own page level mlist 5256 * locks. hat assumes that szc doesn't change after mlist lock for a page is 5257 * taken. Therefore we need to change szc under hat level locks if we only 5258 * have an EXCL lock on a single constituent page and hat still references any 5259 * of constituent pages. (Note we can't "ignore" hat layer by simply 5260 * hat_pageunload() all constituent pages without having EXCL locks on all of 5261 * constituent pages). We use hat_page_demote() call to safely demote szc of 5262 * all constituent pages under hat locks when we only have an EXCL lock on one 5263 * of constituent pages. 5264 * 5265 * This routine calls page_szc_lock() before calling hat_page_demote() to 5266 * allow segvn in one special case not to lock all constituent pages SHARED 5267 * before calling hat_memload_array() that relies on p_szc not changing even 5268 * before hat level mlist lock is taken. In that case segvn uses 5269 * page_szc_lock() to prevent hat_page_demote() changing p_szc values. 5270 * 5271 * Anonymous or kernel page demotion still has to lock all pages exclusively 5272 * and do hat_pageunload() on all constituent pages before demoting the page 5273 * therefore there's no need for anonymous or kernel page demotion to use 5274 * hat_page_demote() mechanism. 5275 * 5276 * hat_page_demote() removes all large mappings that map pp and then decreases 5277 * p_szc starting from the last constituent page of the large page. By working 5278 * from the tail of a large page in pfn decreasing order allows one looking at 5279 * the root page to know that hat_page_demote() is done for root's szc area. 5280 * e.g. if a root page has szc 1 one knows it only has to lock all constituent 5281 * pages within szc 1 area to prevent szc changes because hat_page_demote() 5282 * that started on this page when it had szc > 1 is done for this szc 1 area. 5283 * 5284 * We are guaranteed that all constituent pages of pp's large page belong to 5285 * the same vnode with the consecutive offsets increasing in the direction of 5286 * the pfn i.e. the identity of constituent pages can't change until their 5287 * p_szc is decreased. Therefore it's safe for hat_page_demote() to remove 5288 * large mappings to pp even though we don't lock any constituent page except 5289 * pp (i.e. we won't unload e.g. kernel locked page). 5290 */ 5291 static void 5292 page_demote_vp_pages(page_t *pp) 5293 { 5294 kmutex_t *mtx; 5295 5296 ASSERT(PAGE_EXCL(pp)); 5297 ASSERT(!PP_ISFREE(pp)); 5298 ASSERT(pp->p_vnode != NULL); 5299 ASSERT(!IS_SWAPFSVP(pp->p_vnode)); 5300 ASSERT(!PP_ISKAS(pp)); 5301 5302 VM_STAT_ADD(pagecnt.pc_demote_pages[0]); 5303 5304 mtx = page_szc_lock(pp); 5305 if (mtx != NULL) { 5306 hat_page_demote(pp); 5307 mutex_exit(mtx); 5308 } 5309 ASSERT(pp->p_szc == 0); 5310 } 5311 5312 /* 5313 * Mark any existing pages for migration in the given range 5314 */ 5315 void 5316 page_mark_migrate(struct seg *seg, caddr_t addr, size_t len, 5317 struct anon_map *amp, ulong_t anon_index, vnode_t *vp, 5318 u_offset_t vnoff, int rflag) 5319 { 5320 struct anon *ap; 5321 vnode_t *curvp; 5322 lgrp_t *from; 5323 pgcnt_t i; 5324 pgcnt_t nlocked; 5325 u_offset_t off; 5326 pfn_t pfn; 5327 size_t pgsz; 5328 size_t segpgsz; 5329 pgcnt_t pages; 5330 uint_t pszc; 5331 page_t **ppa; 5332 pgcnt_t ppa_nentries; 5333 page_t *pp; 5334 caddr_t va; 5335 ulong_t an_idx; 5336 anon_sync_obj_t cookie; 5337 5338 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5339 5340 /* 5341 * Don't do anything if don't need to do lgroup optimizations 5342 * on this system 5343 */ 5344 if (!lgrp_optimizations()) 5345 return; 5346 5347 /* 5348 * Align address and length to (potentially large) page boundary 5349 */ 5350 segpgsz = page_get_pagesize(seg->s_szc); 5351 addr = (caddr_t)P2ALIGN((uintptr_t)addr, segpgsz); 5352 if (rflag) 5353 len = P2ROUNDUP(len, segpgsz); 5354 5355 /* 5356 * Allocate page array to accommodate largest page size 5357 */ 5358 pgsz = page_get_pagesize(page_num_pagesizes() - 1); 5359 ppa_nentries = btop(pgsz); 5360 ppa = kmem_zalloc(ppa_nentries * sizeof (page_t *), KM_SLEEP); 5361 5362 /* 5363 * Do one (large) page at a time 5364 */ 5365 va = addr; 5366 while (va < addr + len) { 5367 /* 5368 * Lookup (root) page for vnode and offset corresponding to 5369 * this virtual address 5370 * Try anonmap first since there may be copy-on-write 5371 * pages, but initialize vnode pointer and offset using 5372 * vnode arguments just in case there isn't an amp. 5373 */ 5374 curvp = vp; 5375 off = vnoff + va - seg->s_base; 5376 if (amp) { 5377 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5378 an_idx = anon_index + seg_page(seg, va); 5379 anon_array_enter(amp, an_idx, &cookie); 5380 ap = anon_get_ptr(amp->ahp, an_idx); 5381 if (ap) 5382 swap_xlate(ap, &curvp, &off); 5383 anon_array_exit(&cookie); 5384 ANON_LOCK_EXIT(&->a_rwlock); 5385 } 5386 5387 pp = NULL; 5388 if (curvp) 5389 pp = page_lookup(curvp, off, SE_SHARED); 5390 5391 /* 5392 * If there isn't a page at this virtual address, 5393 * skip to next page 5394 */ 5395 if (pp == NULL) { 5396 va += PAGESIZE; 5397 continue; 5398 } 5399 5400 /* 5401 * Figure out which lgroup this page is in for kstats 5402 */ 5403 pfn = page_pptonum(pp); 5404 from = lgrp_pfn_to_lgrp(pfn); 5405 5406 /* 5407 * Get page size, and round up and skip to next page boundary 5408 * if unaligned address 5409 */ 5410 pszc = pp->p_szc; 5411 pgsz = page_get_pagesize(pszc); 5412 pages = btop(pgsz); 5413 if (!IS_P2ALIGNED(va, pgsz) || 5414 !IS_P2ALIGNED(pfn, pages) || 5415 pgsz > segpgsz) { 5416 pgsz = MIN(pgsz, segpgsz); 5417 page_unlock(pp); 5418 i = btop(P2END((uintptr_t)va, pgsz) - 5419 (uintptr_t)va); 5420 va = (caddr_t)P2END((uintptr_t)va, pgsz); 5421 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, i); 5422 continue; 5423 } 5424 5425 /* 5426 * Upgrade to exclusive lock on page 5427 */ 5428 if (!page_tryupgrade(pp)) { 5429 page_unlock(pp); 5430 va += pgsz; 5431 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5432 btop(pgsz)); 5433 continue; 5434 } 5435 5436 /* 5437 * Remember pages locked exclusively and how many 5438 */ 5439 ppa[0] = pp; 5440 nlocked = 1; 5441 5442 /* 5443 * Lock constituent pages if this is large page 5444 */ 5445 if (pages > 1) { 5446 /* 5447 * Lock all constituents except root page, since it 5448 * should be locked already. 5449 */ 5450 for (i = 1; i < pages; i++) { 5451 pp++; 5452 if (!page_trylock(pp, SE_EXCL)) { 5453 break; 5454 } 5455 if (PP_ISFREE(pp) || 5456 pp->p_szc != pszc) { 5457 /* 5458 * hat_page_demote() raced in with us. 5459 */ 5460 ASSERT(!IS_SWAPFSVP(curvp)); 5461 page_unlock(pp); 5462 break; 5463 } 5464 ppa[nlocked] = pp; 5465 nlocked++; 5466 } 5467 } 5468 5469 /* 5470 * If all constituent pages couldn't be locked, 5471 * unlock pages locked so far and skip to next page. 5472 */ 5473 if (nlocked != pages) { 5474 for (i = 0; i < nlocked; i++) 5475 page_unlock(ppa[i]); 5476 va += pgsz; 5477 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5478 btop(pgsz)); 5479 continue; 5480 } 5481 5482 /* 5483 * hat_page_demote() can no longer happen 5484 * since last cons page had the right p_szc after 5485 * all cons pages were locked. all cons pages 5486 * should now have the same p_szc. 5487 */ 5488 5489 /* 5490 * All constituent pages locked successfully, so mark 5491 * large page for migration and unload the mappings of 5492 * constituent pages, so a fault will occur on any part of the 5493 * large page 5494 */ 5495 PP_SETMIGRATE(ppa[0]); 5496 for (i = 0; i < nlocked; i++) { 5497 pp = ppa[i]; 5498 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 5499 ASSERT(hat_page_getshare(pp) == 0); 5500 page_unlock(pp); 5501 } 5502 lgrp_stat_add(from->lgrp_id, LGRP_PMM_PGS, nlocked); 5503 5504 va += pgsz; 5505 } 5506 kmem_free(ppa, ppa_nentries * sizeof (page_t *)); 5507 } 5508 5509 /* 5510 * Migrate any pages that have been marked for migration in the given range 5511 */ 5512 void 5513 page_migrate( 5514 struct seg *seg, 5515 caddr_t addr, 5516 page_t **ppa, 5517 pgcnt_t npages) 5518 { 5519 lgrp_t *from; 5520 lgrp_t *to; 5521 page_t *newpp; 5522 page_t *pp; 5523 pfn_t pfn; 5524 size_t pgsz; 5525 spgcnt_t page_cnt; 5526 spgcnt_t i; 5527 uint_t pszc; 5528 5529 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5530 5531 while (npages > 0) { 5532 pp = *ppa; 5533 pszc = pp->p_szc; 5534 pgsz = page_get_pagesize(pszc); 5535 page_cnt = btop(pgsz); 5536 5537 /* 5538 * Check to see whether this page is marked for migration 5539 * 5540 * Assume that root page of large page is marked for 5541 * migration and none of the other constituent pages 5542 * are marked. This really simplifies clearing the 5543 * migrate bit by not having to clear it from each 5544 * constituent page. 5545 * 5546 * note we don't want to relocate an entire large page if 5547 * someone is only using one subpage. 5548 */ 5549 if (npages < page_cnt) 5550 break; 5551 5552 /* 5553 * Is it marked for migration? 5554 */ 5555 if (!PP_ISMIGRATE(pp)) 5556 goto next; 5557 5558 /* 5559 * Determine lgroups that page is being migrated between 5560 */ 5561 pfn = page_pptonum(pp); 5562 if (!IS_P2ALIGNED(pfn, page_cnt)) { 5563 break; 5564 } 5565 from = lgrp_pfn_to_lgrp(pfn); 5566 to = lgrp_mem_choose(seg, addr, pgsz); 5567 5568 /* 5569 * Need to get exclusive lock's to migrate 5570 */ 5571 for (i = 0; i < page_cnt; i++) { 5572 ASSERT(PAGE_LOCKED(ppa[i])); 5573 if (page_pptonum(ppa[i]) != pfn + i || 5574 ppa[i]->p_szc != pszc) { 5575 break; 5576 } 5577 if (!page_tryupgrade(ppa[i])) { 5578 lgrp_stat_add(from->lgrp_id, 5579 LGRP_PM_FAIL_LOCK_PGS, 5580 page_cnt); 5581 break; 5582 } 5583 5584 /* 5585 * Check to see whether we are trying to migrate 5586 * page to lgroup where it is allocated already. 5587 * If so, clear the migrate bit and skip to next 5588 * page. 5589 */ 5590 if (i == 0 && to == from) { 5591 PP_CLRMIGRATE(ppa[0]); 5592 page_downgrade(ppa[0]); 5593 goto next; 5594 } 5595 } 5596 5597 /* 5598 * If all constituent pages couldn't be locked, 5599 * unlock pages locked so far and skip to next page. 5600 */ 5601 if (i != page_cnt) { 5602 while (--i != -1) { 5603 page_downgrade(ppa[i]); 5604 } 5605 goto next; 5606 } 5607 5608 (void) page_create_wait(page_cnt, PG_WAIT); 5609 newpp = page_get_replacement_page(pp, to, PGR_SAMESZC); 5610 if (newpp == NULL) { 5611 page_create_putback(page_cnt); 5612 for (i = 0; i < page_cnt; i++) { 5613 page_downgrade(ppa[i]); 5614 } 5615 lgrp_stat_add(to->lgrp_id, LGRP_PM_FAIL_ALLOC_PGS, 5616 page_cnt); 5617 goto next; 5618 } 5619 ASSERT(newpp->p_szc == pszc); 5620 /* 5621 * Clear migrate bit and relocate page 5622 */ 5623 PP_CLRMIGRATE(pp); 5624 if (page_relocate(&pp, &newpp, 0, 1, &page_cnt, to)) { 5625 panic("page_migrate: page_relocate failed"); 5626 } 5627 ASSERT(page_cnt * PAGESIZE == pgsz); 5628 5629 /* 5630 * Keep stats for number of pages migrated from and to 5631 * each lgroup 5632 */ 5633 lgrp_stat_add(from->lgrp_id, LGRP_PM_SRC_PGS, page_cnt); 5634 lgrp_stat_add(to->lgrp_id, LGRP_PM_DEST_PGS, page_cnt); 5635 /* 5636 * update the page_t array we were passed in and 5637 * unlink constituent pages of a large page. 5638 */ 5639 for (i = 0; i < page_cnt; ++i, ++pp) { 5640 ASSERT(PAGE_EXCL(newpp)); 5641 ASSERT(newpp->p_szc == pszc); 5642 ppa[i] = newpp; 5643 pp = newpp; 5644 page_sub(&newpp, pp); 5645 page_downgrade(pp); 5646 } 5647 ASSERT(newpp == NULL); 5648 next: 5649 addr += pgsz; 5650 ppa += page_cnt; 5651 npages -= page_cnt; 5652 } 5653 } 5654 5655 ulong_t mem_waiters = 0; 5656 ulong_t max_count = 20; 5657 #define MAX_DELAY 0x1ff 5658 5659 /* 5660 * Check if enough memory is available to proceed. 5661 * Depending on system configuration and how much memory is 5662 * reserved for swap we need to check against two variables. 5663 * e.g. on systems with little physical swap availrmem can be 5664 * more reliable indicator of how much memory is available. 5665 * On systems with large phys swap freemem can be better indicator. 5666 * If freemem drops below threshold level don't return an error 5667 * immediately but wake up pageout to free memory and block. 5668 * This is done number of times. If pageout is not able to free 5669 * memory within certain time return an error. 5670 * The same applies for availrmem but kmem_reap is used to 5671 * free memory. 5672 */ 5673 int 5674 page_mem_avail(pgcnt_t npages) 5675 { 5676 ulong_t count; 5677 5678 #if defined(__i386) 5679 if (freemem > desfree + npages && 5680 availrmem > swapfs_reserve + npages && 5681 btop(vmem_size(heap_arena, VMEM_FREE)) > tune.t_minarmem + 5682 npages) 5683 return (1); 5684 #else 5685 if (freemem > desfree + npages && 5686 availrmem > swapfs_reserve + npages) 5687 return (1); 5688 #endif 5689 5690 count = max_count; 5691 atomic_add_long(&mem_waiters, 1); 5692 5693 while (freemem < desfree + npages && --count) { 5694 cv_signal(&proc_pageout->p_cv); 5695 if (delay_sig(hz + (mem_waiters & MAX_DELAY))) { 5696 atomic_add_long(&mem_waiters, -1); 5697 return (0); 5698 } 5699 } 5700 if (count == 0) { 5701 atomic_add_long(&mem_waiters, -1); 5702 return (0); 5703 } 5704 5705 count = max_count; 5706 while (availrmem < swapfs_reserve + npages && --count) { 5707 kmem_reap(); 5708 if (delay_sig(hz + (mem_waiters & MAX_DELAY))) { 5709 atomic_add_long(&mem_waiters, -1); 5710 return (0); 5711 } 5712 } 5713 atomic_add_long(&mem_waiters, -1); 5714 if (count == 0) 5715 return (0); 5716 5717 #if defined(__i386) 5718 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 5719 tune.t_minarmem + npages) 5720 return (0); 5721 #endif 5722 return (1); 5723 } 5724 5725 #define MAX_CNT 60 /* max num of iterations */ 5726 /* 5727 * Reclaim/reserve availrmem for npages. 5728 * If there is not enough memory start reaping seg, kmem caches. 5729 * Start pageout scanner (via page_needfree()). 5730 * Exit after ~ MAX_CNT s regardless of how much memory has been released. 5731 * Note: There is no guarantee that any availrmem will be freed as 5732 * this memory typically is locked (kernel heap) or reserved for swap. 5733 * Also due to memory fragmentation kmem allocator may not be able 5734 * to free any memory (single user allocated buffer will prevent 5735 * freeing slab or a page). 5736 */ 5737 int 5738 page_reclaim_mem(pgcnt_t npages, pgcnt_t epages, int adjust) 5739 { 5740 int i = 0; 5741 int ret = 0; 5742 pgcnt_t deficit; 5743 pgcnt_t old_availrmem; 5744 5745 mutex_enter(&freemem_lock); 5746 old_availrmem = availrmem - 1; 5747 while ((availrmem < tune.t_minarmem + npages + epages) && 5748 (old_availrmem < availrmem) && (i++ < MAX_CNT)) { 5749 old_availrmem = availrmem; 5750 deficit = tune.t_minarmem + npages + epages - availrmem; 5751 mutex_exit(&freemem_lock); 5752 page_needfree(deficit); 5753 kmem_reap(); 5754 delay(hz); 5755 page_needfree(-(spgcnt_t)deficit); 5756 mutex_enter(&freemem_lock); 5757 } 5758 5759 if (adjust && (availrmem >= tune.t_minarmem + npages + epages)) { 5760 availrmem -= npages; 5761 ret = 1; 5762 } 5763 5764 mutex_exit(&freemem_lock); 5765 5766 return (ret); 5767 } 5768 5769 /* 5770 * Search the memory segments to locate the desired page. Within a 5771 * segment, pages increase linearly with one page structure per 5772 * physical page frame (size PAGESIZE). The search begins 5773 * with the segment that was accessed last, to take advantage of locality. 5774 * If the hint misses, we start from the beginning of the sorted memseg list 5775 */ 5776 5777 5778 /* 5779 * Some data structures for pfn to pp lookup. 5780 */ 5781 ulong_t mhash_per_slot; 5782 struct memseg *memseg_hash[N_MEM_SLOTS]; 5783 5784 page_t * 5785 page_numtopp_nolock(pfn_t pfnum) 5786 { 5787 struct memseg *seg; 5788 page_t *pp; 5789 vm_cpu_data_t *vc = CPU->cpu_vm_data; 5790 5791 ASSERT(vc != NULL); 5792 5793 MEMSEG_STAT_INCR(nsearch); 5794 5795 /* Try last winner first */ 5796 if (((seg = vc->vc_pnum_memseg) != NULL) && 5797 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5798 MEMSEG_STAT_INCR(nlastwon); 5799 pp = seg->pages + (pfnum - seg->pages_base); 5800 if (pp->p_pagenum == pfnum) 5801 return ((page_t *)pp); 5802 } 5803 5804 /* Else Try hash */ 5805 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5806 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5807 MEMSEG_STAT_INCR(nhashwon); 5808 vc->vc_pnum_memseg = seg; 5809 pp = seg->pages + (pfnum - seg->pages_base); 5810 if (pp->p_pagenum == pfnum) 5811 return ((page_t *)pp); 5812 } 5813 5814 /* Else Brute force */ 5815 for (seg = memsegs; seg != NULL; seg = seg->next) { 5816 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5817 vc->vc_pnum_memseg = seg; 5818 pp = seg->pages + (pfnum - seg->pages_base); 5819 return ((page_t *)pp); 5820 } 5821 } 5822 vc->vc_pnum_memseg = NULL; 5823 MEMSEG_STAT_INCR(nnotfound); 5824 return ((page_t *)NULL); 5825 5826 } 5827 5828 struct memseg * 5829 page_numtomemseg_nolock(pfn_t pfnum) 5830 { 5831 struct memseg *seg; 5832 page_t *pp; 5833 5834 /* Try hash */ 5835 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5836 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5837 pp = seg->pages + (pfnum - seg->pages_base); 5838 if (pp->p_pagenum == pfnum) 5839 return (seg); 5840 } 5841 5842 /* Else Brute force */ 5843 for (seg = memsegs; seg != NULL; seg = seg->next) { 5844 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5845 return (seg); 5846 } 5847 } 5848 return ((struct memseg *)NULL); 5849 } 5850 5851 /* 5852 * Given a page and a count return the page struct that is 5853 * n structs away from the current one in the global page 5854 * list. 5855 * 5856 * This function wraps to the first page upon 5857 * reaching the end of the memseg list. 5858 */ 5859 page_t * 5860 page_nextn(page_t *pp, ulong_t n) 5861 { 5862 struct memseg *seg; 5863 page_t *ppn; 5864 vm_cpu_data_t *vc = (vm_cpu_data_t *)CPU->cpu_vm_data; 5865 5866 ASSERT(vc != NULL); 5867 5868 if (((seg = vc->vc_pnext_memseg) == NULL) || 5869 (seg->pages_base == seg->pages_end) || 5870 !(pp >= seg->pages && pp < seg->epages)) { 5871 5872 for (seg = memsegs; seg; seg = seg->next) { 5873 if (pp >= seg->pages && pp < seg->epages) 5874 break; 5875 } 5876 5877 if (seg == NULL) { 5878 /* Memory delete got in, return something valid. */ 5879 /* TODO: fix me. */ 5880 seg = memsegs; 5881 pp = seg->pages; 5882 } 5883 } 5884 5885 /* check for wraparound - possible if n is large */ 5886 while ((ppn = (pp + n)) >= seg->epages || ppn < pp) { 5887 n -= seg->epages - pp; 5888 seg = seg->next; 5889 if (seg == NULL) 5890 seg = memsegs; 5891 pp = seg->pages; 5892 } 5893 vc->vc_pnext_memseg = seg; 5894 return (ppn); 5895 } 5896 5897 /* 5898 * Initialize for a loop using page_next_scan_large(). 5899 */ 5900 page_t * 5901 page_next_scan_init(void **cookie) 5902 { 5903 ASSERT(cookie != NULL); 5904 *cookie = (void *)memsegs; 5905 return ((page_t *)memsegs->pages); 5906 } 5907 5908 /* 5909 * Return the next page in a scan of page_t's, assuming we want 5910 * to skip over sub-pages within larger page sizes. 5911 * 5912 * The cookie is used to keep track of the current memseg. 5913 */ 5914 page_t * 5915 page_next_scan_large( 5916 page_t *pp, 5917 ulong_t *n, 5918 void **cookie) 5919 { 5920 struct memseg *seg = (struct memseg *)*cookie; 5921 page_t *new_pp; 5922 ulong_t cnt; 5923 pfn_t pfn; 5924 5925 5926 /* 5927 * get the count of page_t's to skip based on the page size 5928 */ 5929 ASSERT(pp != NULL); 5930 if (pp->p_szc == 0) { 5931 cnt = 1; 5932 } else { 5933 pfn = page_pptonum(pp); 5934 cnt = page_get_pagecnt(pp->p_szc); 5935 cnt -= pfn & (cnt - 1); 5936 } 5937 *n += cnt; 5938 new_pp = pp + cnt; 5939 5940 /* 5941 * Catch if we went past the end of the current memory segment. If so, 5942 * just move to the next segment with pages. 5943 */ 5944 if (new_pp >= seg->epages) { 5945 do { 5946 seg = seg->next; 5947 if (seg == NULL) 5948 seg = memsegs; 5949 } while (seg->pages == seg->epages); 5950 new_pp = seg->pages; 5951 *cookie = (void *)seg; 5952 } 5953 5954 return (new_pp); 5955 } 5956 5957 5958 /* 5959 * Returns next page in list. Note: this function wraps 5960 * to the first page in the list upon reaching the end 5961 * of the list. Callers should be aware of this fact. 5962 */ 5963 5964 /* We should change this be a #define */ 5965 5966 page_t * 5967 page_next(page_t *pp) 5968 { 5969 return (page_nextn(pp, 1)); 5970 } 5971 5972 page_t * 5973 page_first() 5974 { 5975 return ((page_t *)memsegs->pages); 5976 } 5977 5978 5979 /* 5980 * This routine is called at boot with the initial memory configuration 5981 * and when memory is added or removed. 5982 */ 5983 void 5984 build_pfn_hash() 5985 { 5986 pfn_t cur; 5987 pgcnt_t index; 5988 struct memseg *pseg; 5989 int i; 5990 5991 /* 5992 * Clear memseg_hash array. 5993 * Since memory add/delete is designed to operate concurrently 5994 * with normal operation, the hash rebuild must be able to run 5995 * concurrently with page_numtopp_nolock(). To support this 5996 * functionality, assignments to memseg_hash array members must 5997 * be done atomically. 5998 * 5999 * NOTE: bzero() does not currently guarantee this for kernel 6000 * threads, and cannot be used here. 6001 */ 6002 for (i = 0; i < N_MEM_SLOTS; i++) 6003 memseg_hash[i] = NULL; 6004 6005 hat_kpm_mseghash_clear(N_MEM_SLOTS); 6006 6007 /* 6008 * Physmax is the last valid pfn. 6009 */ 6010 mhash_per_slot = (physmax + 1) >> MEM_HASH_SHIFT; 6011 for (pseg = memsegs; pseg != NULL; pseg = pseg->next) { 6012 index = MEMSEG_PFN_HASH(pseg->pages_base); 6013 cur = pseg->pages_base; 6014 do { 6015 if (index >= N_MEM_SLOTS) 6016 index = MEMSEG_PFN_HASH(cur); 6017 6018 if (memseg_hash[index] == NULL || 6019 memseg_hash[index]->pages_base > pseg->pages_base) { 6020 memseg_hash[index] = pseg; 6021 hat_kpm_mseghash_update(index, pseg); 6022 } 6023 cur += mhash_per_slot; 6024 index++; 6025 } while (cur < pseg->pages_end); 6026 } 6027 } 6028 6029 /* 6030 * Return the pagenum for the pp 6031 */ 6032 pfn_t 6033 page_pptonum(page_t *pp) 6034 { 6035 return (pp->p_pagenum); 6036 } 6037 6038 /* 6039 * interface to the referenced and modified etc bits 6040 * in the PSM part of the page struct 6041 * when no locking is desired. 6042 */ 6043 void 6044 page_set_props(page_t *pp, uint_t flags) 6045 { 6046 ASSERT((flags & ~(P_MOD | P_REF | P_RO)) == 0); 6047 pp->p_nrm |= (uchar_t)flags; 6048 } 6049 6050 void 6051 page_clr_all_props(page_t *pp) 6052 { 6053 pp->p_nrm = 0; 6054 } 6055 6056 /* 6057 * Clear p_lckcnt and p_cowcnt, adjusting freemem if required. 6058 */ 6059 int 6060 page_clear_lck_cow(page_t *pp, int adjust) 6061 { 6062 int f_amount; 6063 6064 ASSERT(PAGE_EXCL(pp)); 6065 6066 /* 6067 * The page_struct_lock need not be acquired here since 6068 * we require the caller hold the page exclusively locked. 6069 */ 6070 f_amount = 0; 6071 if (pp->p_lckcnt) { 6072 f_amount = 1; 6073 pp->p_lckcnt = 0; 6074 } 6075 if (pp->p_cowcnt) { 6076 f_amount += pp->p_cowcnt; 6077 pp->p_cowcnt = 0; 6078 } 6079 6080 if (adjust && f_amount) { 6081 mutex_enter(&freemem_lock); 6082 availrmem += f_amount; 6083 mutex_exit(&freemem_lock); 6084 } 6085 6086 return (f_amount); 6087 } 6088 6089 /* 6090 * The following functions is called from free_vp_pages() 6091 * for an inexact estimate of a newly free'd page... 6092 */ 6093 ulong_t 6094 page_share_cnt(page_t *pp) 6095 { 6096 return (hat_page_getshare(pp)); 6097 } 6098 6099 int 6100 page_isshared(page_t *pp) 6101 { 6102 return (hat_page_checkshare(pp, 1)); 6103 } 6104 6105 int 6106 page_isfree(page_t *pp) 6107 { 6108 return (PP_ISFREE(pp)); 6109 } 6110 6111 int 6112 page_isref(page_t *pp) 6113 { 6114 return (hat_page_getattr(pp, P_REF)); 6115 } 6116 6117 int 6118 page_ismod(page_t *pp) 6119 { 6120 return (hat_page_getattr(pp, P_MOD)); 6121 } 6122 6123 /* 6124 * The following code all currently relates to the page capture logic: 6125 * 6126 * This logic is used for cases where there is a desire to claim a certain 6127 * physical page in the system for the caller. As it may not be possible 6128 * to capture the page immediately, the p_toxic bits are used in the page 6129 * structure to indicate that someone wants to capture this page. When the 6130 * page gets unlocked, the toxic flag will be noted and an attempt to capture 6131 * the page will be made. If it is successful, the original callers callback 6132 * will be called with the page to do with it what they please. 6133 * 6134 * There is also an async thread which wakes up to attempt to capture 6135 * pages occasionally which have the capture bit set. All of the pages which 6136 * need to be captured asynchronously have been inserted into the 6137 * page_capture_hash and thus this thread walks that hash list. Items in the 6138 * hash have an expiration time so this thread handles that as well by removing 6139 * the item from the hash if it has expired. 6140 * 6141 * Some important things to note are: 6142 * - if the PR_CAPTURE bit is set on a page, then the page is in the 6143 * page_capture_hash. The page_capture_hash_head.pchh_mutex is needed 6144 * to set and clear this bit, and while the lock is held is the only time 6145 * you can add or remove an entry from the hash. 6146 * - the PR_CAPTURE bit can only be set and cleared while holding the 6147 * page_capture_hash_head.pchh_mutex 6148 * - the t_flag field of the thread struct is used with the T_CAPTURING 6149 * flag to prevent recursion while dealing with large pages. 6150 * - pages which need to be retired never expire on the page_capture_hash. 6151 */ 6152 6153 static void page_capture_thread(void); 6154 static kthread_t *pc_thread_id; 6155 kcondvar_t pc_cv; 6156 static kmutex_t pc_thread_mutex; 6157 static clock_t pc_thread_shortwait; 6158 static clock_t pc_thread_longwait; 6159 static int pc_thread_retry; 6160 6161 struct page_capture_callback pc_cb[PC_NUM_CALLBACKS]; 6162 6163 /* Note that this is a circular linked list */ 6164 typedef struct page_capture_hash_bucket { 6165 page_t *pp; 6166 uint_t szc; 6167 uint_t flags; 6168 clock_t expires; /* lbolt at which this request expires. */ 6169 void *datap; /* Cached data passed in for callback */ 6170 struct page_capture_hash_bucket *next; 6171 struct page_capture_hash_bucket *prev; 6172 } page_capture_hash_bucket_t; 6173 6174 /* 6175 * Each hash bucket will have it's own mutex and two lists which are: 6176 * active (0): represents requests which have not been processed by 6177 * the page_capture async thread yet. 6178 * walked (1): represents requests which have been processed by the 6179 * page_capture async thread within it's given walk of this bucket. 6180 * 6181 * These are all needed so that we can synchronize all async page_capture 6182 * events. When the async thread moves to a new bucket, it will append the 6183 * walked list to the active list and walk each item one at a time, moving it 6184 * from the active list to the walked list. Thus if there is an async request 6185 * outstanding for a given page, it will always be in one of the two lists. 6186 * New requests will always be added to the active list. 6187 * If we were not able to capture a page before the request expired, we'd free 6188 * up the request structure which would indicate to page_capture that there is 6189 * no longer a need for the given page, and clear the PR_CAPTURE flag if 6190 * possible. 6191 */ 6192 typedef struct page_capture_hash_head { 6193 kmutex_t pchh_mutex; 6194 uint_t num_pages; 6195 page_capture_hash_bucket_t lists[2]; /* sentinel nodes */ 6196 } page_capture_hash_head_t; 6197 6198 #ifdef DEBUG 6199 #define NUM_PAGE_CAPTURE_BUCKETS 4 6200 #else 6201 #define NUM_PAGE_CAPTURE_BUCKETS 64 6202 #endif 6203 6204 page_capture_hash_head_t page_capture_hash[NUM_PAGE_CAPTURE_BUCKETS]; 6205 6206 /* for now use a very simple hash based upon the size of a page struct */ 6207 #define PAGE_CAPTURE_HASH(pp) \ 6208 ((int)(((uintptr_t)pp >> 7) & (NUM_PAGE_CAPTURE_BUCKETS - 1))) 6209 6210 extern pgcnt_t swapfs_minfree; 6211 6212 int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap); 6213 6214 /* 6215 * a callback function is required for page capture requests. 6216 */ 6217 void 6218 page_capture_register_callback(uint_t index, clock_t duration, 6219 int (*cb_func)(page_t *, void *, uint_t)) 6220 { 6221 ASSERT(pc_cb[index].cb_active == 0); 6222 ASSERT(cb_func != NULL); 6223 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6224 pc_cb[index].duration = duration; 6225 pc_cb[index].cb_func = cb_func; 6226 pc_cb[index].cb_active = 1; 6227 rw_exit(&pc_cb[index].cb_rwlock); 6228 } 6229 6230 void 6231 page_capture_unregister_callback(uint_t index) 6232 { 6233 int i, j; 6234 struct page_capture_hash_bucket *bp1; 6235 struct page_capture_hash_bucket *bp2; 6236 struct page_capture_hash_bucket *head = NULL; 6237 uint_t flags = (1 << index); 6238 6239 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6240 ASSERT(pc_cb[index].cb_active == 1); 6241 pc_cb[index].duration = 0; /* Paranoia */ 6242 pc_cb[index].cb_func = NULL; /* Paranoia */ 6243 pc_cb[index].cb_active = 0; 6244 rw_exit(&pc_cb[index].cb_rwlock); 6245 6246 /* 6247 * Just move all the entries to a private list which we can walk 6248 * through without the need to hold any locks. 6249 * No more requests can get added to the hash lists for this consumer 6250 * as the cb_active field for the callback has been cleared. 6251 */ 6252 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 6253 mutex_enter(&page_capture_hash[i].pchh_mutex); 6254 for (j = 0; j < 2; j++) { 6255 bp1 = page_capture_hash[i].lists[j].next; 6256 /* walk through all but first (sentinel) element */ 6257 while (bp1 != &page_capture_hash[i].lists[j]) { 6258 bp2 = bp1; 6259 if (bp2->flags & flags) { 6260 bp1 = bp2->next; 6261 bp1->prev = bp2->prev; 6262 bp2->prev->next = bp1; 6263 bp2->next = head; 6264 head = bp2; 6265 /* 6266 * Clear the PR_CAPTURE bit as we 6267 * hold appropriate locks here. 6268 */ 6269 page_clrtoxic(head->pp, PR_CAPTURE); 6270 page_capture_hash[i].num_pages--; 6271 continue; 6272 } 6273 bp1 = bp1->next; 6274 } 6275 } 6276 mutex_exit(&page_capture_hash[i].pchh_mutex); 6277 } 6278 6279 while (head != NULL) { 6280 bp1 = head; 6281 head = head->next; 6282 kmem_free(bp1, sizeof (*bp1)); 6283 } 6284 } 6285 6286 6287 /* 6288 * Find pp in the active list and move it to the walked list if it 6289 * exists. 6290 * Note that most often pp should be at the front of the active list 6291 * as it is currently used and thus there is no other sort of optimization 6292 * being done here as this is a linked list data structure. 6293 * Returns 1 on successful move or 0 if page could not be found. 6294 */ 6295 static int 6296 page_capture_move_to_walked(page_t *pp) 6297 { 6298 page_capture_hash_bucket_t *bp; 6299 int index; 6300 6301 index = PAGE_CAPTURE_HASH(pp); 6302 6303 mutex_enter(&page_capture_hash[index].pchh_mutex); 6304 bp = page_capture_hash[index].lists[0].next; 6305 while (bp != &page_capture_hash[index].lists[0]) { 6306 if (bp->pp == pp) { 6307 /* Remove from old list */ 6308 bp->next->prev = bp->prev; 6309 bp->prev->next = bp->next; 6310 6311 /* Add to new list */ 6312 bp->next = page_capture_hash[index].lists[1].next; 6313 bp->prev = &page_capture_hash[index].lists[1]; 6314 page_capture_hash[index].lists[1].next = bp; 6315 bp->next->prev = bp; 6316 mutex_exit(&page_capture_hash[index].pchh_mutex); 6317 6318 return (1); 6319 } 6320 bp = bp->next; 6321 } 6322 mutex_exit(&page_capture_hash[index].pchh_mutex); 6323 return (0); 6324 } 6325 6326 /* 6327 * Add a new entry to the page capture hash. The only case where a new 6328 * entry is not added is when the page capture consumer is no longer registered. 6329 * In this case, we'll silently not add the page to the hash. We know that 6330 * page retire will always be registered for the case where we are currently 6331 * unretiring a page and thus there are no conflicts. 6332 */ 6333 static void 6334 page_capture_add_hash(page_t *pp, uint_t szc, uint_t flags, void *datap) 6335 { 6336 page_capture_hash_bucket_t *bp1; 6337 page_capture_hash_bucket_t *bp2; 6338 int index; 6339 int cb_index; 6340 int i; 6341 #ifdef DEBUG 6342 page_capture_hash_bucket_t *tp1; 6343 int l; 6344 #endif 6345 6346 ASSERT(!(flags & CAPTURE_ASYNC)); 6347 6348 bp1 = kmem_alloc(sizeof (struct page_capture_hash_bucket), KM_SLEEP); 6349 6350 bp1->pp = pp; 6351 bp1->szc = szc; 6352 bp1->flags = flags; 6353 bp1->datap = datap; 6354 6355 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6356 if ((flags >> cb_index) & 1) { 6357 break; 6358 } 6359 } 6360 6361 ASSERT(cb_index != PC_NUM_CALLBACKS); 6362 6363 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6364 if (pc_cb[cb_index].cb_active) { 6365 if (pc_cb[cb_index].duration == -1) { 6366 bp1->expires = (clock_t)-1; 6367 } else { 6368 bp1->expires = lbolt + pc_cb[cb_index].duration; 6369 } 6370 } else { 6371 /* There's no callback registered so don't add to the hash */ 6372 rw_exit(&pc_cb[cb_index].cb_rwlock); 6373 kmem_free(bp1, sizeof (*bp1)); 6374 return; 6375 } 6376 6377 index = PAGE_CAPTURE_HASH(pp); 6378 6379 /* 6380 * Only allow capture flag to be modified under this mutex. 6381 * Prevents multiple entries for same page getting added. 6382 */ 6383 mutex_enter(&page_capture_hash[index].pchh_mutex); 6384 6385 /* 6386 * if not already on the hash, set capture bit and add to the hash 6387 */ 6388 if (!(pp->p_toxic & PR_CAPTURE)) { 6389 #ifdef DEBUG 6390 /* Check for duplicate entries */ 6391 for (l = 0; l < 2; l++) { 6392 tp1 = page_capture_hash[index].lists[l].next; 6393 while (tp1 != &page_capture_hash[index].lists[l]) { 6394 if (tp1->pp == pp) { 6395 panic("page pp 0x%p already on hash " 6396 "at 0x%p\n", pp, tp1); 6397 } 6398 tp1 = tp1->next; 6399 } 6400 } 6401 6402 #endif 6403 page_settoxic(pp, PR_CAPTURE); 6404 bp1->next = page_capture_hash[index].lists[0].next; 6405 bp1->prev = &page_capture_hash[index].lists[0]; 6406 bp1->next->prev = bp1; 6407 page_capture_hash[index].lists[0].next = bp1; 6408 page_capture_hash[index].num_pages++; 6409 if (flags & CAPTURE_RETIRE) { 6410 page_retire_incr_pend_count(); 6411 } 6412 mutex_exit(&page_capture_hash[index].pchh_mutex); 6413 rw_exit(&pc_cb[cb_index].cb_rwlock); 6414 cv_signal(&pc_cv); 6415 return; 6416 } 6417 6418 /* 6419 * A page retire request will replace any other request. 6420 * A second physmem request which is for a different process than 6421 * the currently registered one will be dropped as there is 6422 * no way to hold the private data for both calls. 6423 * In the future, once there are more callers, this will have to 6424 * be worked out better as there needs to be private storage for 6425 * at least each type of caller (maybe have datap be an array of 6426 * *void's so that we can index based upon callers index). 6427 */ 6428 6429 /* walk hash list to update expire time */ 6430 for (i = 0; i < 2; i++) { 6431 bp2 = page_capture_hash[index].lists[i].next; 6432 while (bp2 != &page_capture_hash[index].lists[i]) { 6433 if (bp2->pp == pp) { 6434 if (flags & CAPTURE_RETIRE) { 6435 if (!(bp2->flags & CAPTURE_RETIRE)) { 6436 page_retire_incr_pend_count(); 6437 bp2->flags = flags; 6438 bp2->expires = bp1->expires; 6439 bp2->datap = datap; 6440 } 6441 } else { 6442 ASSERT(flags & CAPTURE_PHYSMEM); 6443 if (!(bp2->flags & CAPTURE_RETIRE) && 6444 (datap == bp2->datap)) { 6445 bp2->expires = bp1->expires; 6446 } 6447 } 6448 mutex_exit(&page_capture_hash[index]. 6449 pchh_mutex); 6450 rw_exit(&pc_cb[cb_index].cb_rwlock); 6451 kmem_free(bp1, sizeof (*bp1)); 6452 return; 6453 } 6454 bp2 = bp2->next; 6455 } 6456 } 6457 6458 /* 6459 * the PR_CAPTURE flag is protected by the page_capture_hash mutexes 6460 * and thus it either has to be set or not set and can't change 6461 * while holding the mutex above. 6462 */ 6463 panic("page_capture_add_hash, PR_CAPTURE flag set on pp %p\n", pp); 6464 } 6465 6466 /* 6467 * We have a page in our hands, lets try and make it ours by turning 6468 * it into a clean page like it had just come off the freelists. 6469 * 6470 * Returns 0 on success, with the page still EXCL locked. 6471 * On failure, the page will be unlocked, and returns EAGAIN 6472 */ 6473 static int 6474 page_capture_clean_page(page_t *pp) 6475 { 6476 page_t *newpp; 6477 int skip_unlock = 0; 6478 spgcnt_t count; 6479 page_t *tpp; 6480 int ret = 0; 6481 int extra; 6482 6483 ASSERT(PAGE_EXCL(pp)); 6484 ASSERT(!PP_RETIRED(pp)); 6485 ASSERT(curthread->t_flag & T_CAPTURING); 6486 6487 if (PP_ISFREE(pp)) { 6488 if (!page_reclaim(pp, NULL)) { 6489 skip_unlock = 1; 6490 ret = EAGAIN; 6491 goto cleanup; 6492 } 6493 ASSERT(pp->p_szc == 0); 6494 if (pp->p_vnode != NULL) { 6495 /* 6496 * Since this page came from the 6497 * cachelist, we must destroy the 6498 * old vnode association. 6499 */ 6500 page_hashout(pp, NULL); 6501 } 6502 goto cleanup; 6503 } 6504 6505 /* 6506 * If we know page_relocate will fail, skip it 6507 * It could still fail due to a UE on another page but we 6508 * can't do anything about that. 6509 */ 6510 if (pp->p_toxic & PR_UE) { 6511 goto skip_relocate; 6512 } 6513 6514 /* 6515 * It's possible that pages can not have a vnode as fsflush comes 6516 * through and cleans up these pages. It's ugly but that's how it is. 6517 */ 6518 if (pp->p_vnode == NULL) { 6519 goto skip_relocate; 6520 } 6521 6522 /* 6523 * Page was not free, so lets try to relocate it. 6524 * page_relocate only works with root pages, so if this is not a root 6525 * page, we need to demote it to try and relocate it. 6526 * Unfortunately this is the best we can do right now. 6527 */ 6528 newpp = NULL; 6529 if ((pp->p_szc > 0) && (pp != PP_PAGEROOT(pp))) { 6530 if (page_try_demote_pages(pp) == 0) { 6531 ret = EAGAIN; 6532 goto cleanup; 6533 } 6534 } 6535 ret = page_relocate(&pp, &newpp, 1, 0, &count, NULL); 6536 if (ret == 0) { 6537 page_t *npp; 6538 /* unlock the new page(s) */ 6539 while (count-- > 0) { 6540 ASSERT(newpp != NULL); 6541 npp = newpp; 6542 page_sub(&newpp, npp); 6543 page_unlock(npp); 6544 } 6545 ASSERT(newpp == NULL); 6546 /* 6547 * Check to see if the page we have is too large. 6548 * If so, demote it freeing up the extra pages. 6549 */ 6550 if (pp->p_szc > 0) { 6551 /* For now demote extra pages to szc == 0 */ 6552 extra = page_get_pagecnt(pp->p_szc) - 1; 6553 while (extra > 0) { 6554 tpp = pp->p_next; 6555 page_sub(&pp, tpp); 6556 tpp->p_szc = 0; 6557 page_free(tpp, 1); 6558 extra--; 6559 } 6560 /* Make sure to set our page to szc 0 as well */ 6561 ASSERT(pp->p_next == pp && pp->p_prev == pp); 6562 pp->p_szc = 0; 6563 } 6564 goto cleanup; 6565 } else if (ret == EIO) { 6566 ret = EAGAIN; 6567 goto cleanup; 6568 } else { 6569 /* 6570 * Need to reset return type as we failed to relocate the page 6571 * but that does not mean that some of the next steps will not 6572 * work. 6573 */ 6574 ret = 0; 6575 } 6576 6577 skip_relocate: 6578 6579 if (pp->p_szc > 0) { 6580 if (page_try_demote_pages(pp) == 0) { 6581 ret = EAGAIN; 6582 goto cleanup; 6583 } 6584 } 6585 6586 ASSERT(pp->p_szc == 0); 6587 6588 if (hat_ismod(pp)) { 6589 ret = EAGAIN; 6590 goto cleanup; 6591 } 6592 if (PP_ISKAS(pp)) { 6593 ret = EAGAIN; 6594 goto cleanup; 6595 } 6596 if (pp->p_lckcnt || pp->p_cowcnt) { 6597 ret = EAGAIN; 6598 goto cleanup; 6599 } 6600 6601 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 6602 ASSERT(!hat_page_is_mapped(pp)); 6603 6604 if (hat_ismod(pp)) { 6605 /* 6606 * This is a semi-odd case as the page is now modified but not 6607 * mapped as we just unloaded the mappings above. 6608 */ 6609 ret = EAGAIN; 6610 goto cleanup; 6611 } 6612 if (pp->p_vnode != NULL) { 6613 page_hashout(pp, NULL); 6614 } 6615 6616 /* 6617 * At this point, the page should be in a clean state and 6618 * we can do whatever we want with it. 6619 */ 6620 6621 cleanup: 6622 if (ret != 0) { 6623 if (!skip_unlock) { 6624 page_unlock(pp); 6625 } 6626 } else { 6627 ASSERT(pp->p_szc == 0); 6628 ASSERT(PAGE_EXCL(pp)); 6629 6630 pp->p_next = pp; 6631 pp->p_prev = pp; 6632 } 6633 return (ret); 6634 } 6635 6636 /* 6637 * Various callers of page_trycapture() can have different restrictions upon 6638 * what memory they have access to. 6639 * Returns 0 on success, with the following error codes on failure: 6640 * EPERM - The requested page is long term locked, and thus repeated 6641 * requests to capture this page will likely fail. 6642 * ENOMEM - There was not enough free memory in the system to safely 6643 * map the requested page. 6644 * ENOENT - The requested page was inside the kernel cage, and the 6645 * PHYSMEM_CAGE flag was not set. 6646 */ 6647 int 6648 page_capture_pre_checks(page_t *pp, uint_t flags) 6649 { 6650 #if defined(__sparc) 6651 extern struct vnode prom_ppages; 6652 #endif /* __sparc */ 6653 6654 ASSERT(pp != NULL); 6655 6656 #if defined(__sparc) 6657 if (pp->p_vnode == &prom_ppages) { 6658 return (EPERM); 6659 } 6660 6661 if (PP_ISNORELOC(pp) && !(flags & CAPTURE_GET_CAGE) && 6662 (flags & CAPTURE_PHYSMEM)) { 6663 return (ENOENT); 6664 } 6665 6666 if (PP_ISNORELOCKERNEL(pp)) { 6667 return (EPERM); 6668 } 6669 #else 6670 if (PP_ISKAS(pp)) { 6671 return (EPERM); 6672 } 6673 #endif /* __sparc */ 6674 6675 /* only physmem currently has the restrictions checked below */ 6676 if (!(flags & CAPTURE_PHYSMEM)) { 6677 return (0); 6678 } 6679 6680 if (availrmem < swapfs_minfree) { 6681 /* 6682 * We won't try to capture this page as we are 6683 * running low on memory. 6684 */ 6685 return (ENOMEM); 6686 } 6687 return (0); 6688 } 6689 6690 /* 6691 * Once we have a page in our mits, go ahead and complete the capture 6692 * operation. 6693 * Returns 1 on failure where page is no longer needed 6694 * Returns 0 on success 6695 * Returns -1 if there was a transient failure. 6696 * Failure cases must release the SE_EXCL lock on pp (usually via page_free). 6697 */ 6698 int 6699 page_capture_take_action(page_t *pp, uint_t flags, void *datap) 6700 { 6701 int cb_index; 6702 int ret = 0; 6703 page_capture_hash_bucket_t *bp1; 6704 page_capture_hash_bucket_t *bp2; 6705 int index; 6706 int found = 0; 6707 int i; 6708 6709 ASSERT(PAGE_EXCL(pp)); 6710 ASSERT(curthread->t_flag & T_CAPTURING); 6711 6712 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6713 if ((flags >> cb_index) & 1) { 6714 break; 6715 } 6716 } 6717 ASSERT(cb_index < PC_NUM_CALLBACKS); 6718 6719 /* 6720 * Remove the entry from the page_capture hash, but don't free it yet 6721 * as we may need to put it back. 6722 * Since we own the page at this point in time, we should find it 6723 * in the hash if this is an ASYNC call. If we don't it's likely 6724 * that the page_capture_async() thread decided that this request 6725 * had expired, in which case we just continue on. 6726 */ 6727 if (flags & CAPTURE_ASYNC) { 6728 6729 index = PAGE_CAPTURE_HASH(pp); 6730 6731 mutex_enter(&page_capture_hash[index].pchh_mutex); 6732 for (i = 0; i < 2 && !found; i++) { 6733 bp1 = page_capture_hash[index].lists[i].next; 6734 while (bp1 != &page_capture_hash[index].lists[i]) { 6735 if (bp1->pp == pp) { 6736 bp1->next->prev = bp1->prev; 6737 bp1->prev->next = bp1->next; 6738 page_capture_hash[index].num_pages--; 6739 page_clrtoxic(pp, PR_CAPTURE); 6740 found = 1; 6741 break; 6742 } 6743 bp1 = bp1->next; 6744 } 6745 } 6746 mutex_exit(&page_capture_hash[index].pchh_mutex); 6747 } 6748 6749 /* Synchronize with the unregister func. */ 6750 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6751 if (!pc_cb[cb_index].cb_active) { 6752 page_free(pp, 1); 6753 rw_exit(&pc_cb[cb_index].cb_rwlock); 6754 if (found) { 6755 kmem_free(bp1, sizeof (*bp1)); 6756 } 6757 return (1); 6758 } 6759 6760 /* 6761 * We need to remove the entry from the page capture hash and turn off 6762 * the PR_CAPTURE bit before calling the callback. We'll need to cache 6763 * the entry here, and then based upon the return value, cleanup 6764 * appropriately or re-add it to the hash, making sure that someone else 6765 * hasn't already done so. 6766 * It should be rare for the callback to fail and thus it's ok for 6767 * the failure path to be a bit complicated as the success path is 6768 * cleaner and the locking rules are easier to follow. 6769 */ 6770 6771 ret = pc_cb[cb_index].cb_func(pp, datap, flags); 6772 6773 rw_exit(&pc_cb[cb_index].cb_rwlock); 6774 6775 /* 6776 * If this was an ASYNC request, we need to cleanup the hash if the 6777 * callback was successful or if the request was no longer valid. 6778 * For non-ASYNC requests, we return failure to map and the caller 6779 * will take care of adding the request to the hash. 6780 * Note also that the callback itself is responsible for the page 6781 * at this point in time in terms of locking ... The most common 6782 * case for the failure path should just be a page_free. 6783 */ 6784 if (ret >= 0) { 6785 if (found) { 6786 if (bp1->flags & CAPTURE_RETIRE) { 6787 page_retire_decr_pend_count(); 6788 } 6789 kmem_free(bp1, sizeof (*bp1)); 6790 } 6791 return (ret); 6792 } 6793 if (!found) { 6794 return (ret); 6795 } 6796 6797 ASSERT(flags & CAPTURE_ASYNC); 6798 6799 /* 6800 * Check for expiration time first as we can just free it up if it's 6801 * expired. 6802 */ 6803 if (lbolt > bp1->expires && bp1->expires != -1) { 6804 kmem_free(bp1, sizeof (*bp1)); 6805 return (ret); 6806 } 6807 6808 /* 6809 * The callback failed and there used to be an entry in the hash for 6810 * this page, so we need to add it back to the hash. 6811 */ 6812 mutex_enter(&page_capture_hash[index].pchh_mutex); 6813 if (!(pp->p_toxic & PR_CAPTURE)) { 6814 /* just add bp1 back to head of walked list */ 6815 page_settoxic(pp, PR_CAPTURE); 6816 bp1->next = page_capture_hash[index].lists[1].next; 6817 bp1->prev = &page_capture_hash[index].lists[1]; 6818 bp1->next->prev = bp1; 6819 page_capture_hash[index].lists[1].next = bp1; 6820 page_capture_hash[index].num_pages++; 6821 mutex_exit(&page_capture_hash[index].pchh_mutex); 6822 return (ret); 6823 } 6824 6825 /* 6826 * Otherwise there was a new capture request added to list 6827 * Need to make sure that our original data is represented if 6828 * appropriate. 6829 */ 6830 for (i = 0; i < 2; i++) { 6831 bp2 = page_capture_hash[index].lists[i].next; 6832 while (bp2 != &page_capture_hash[index].lists[i]) { 6833 if (bp2->pp == pp) { 6834 if (bp1->flags & CAPTURE_RETIRE) { 6835 if (!(bp2->flags & CAPTURE_RETIRE)) { 6836 bp2->szc = bp1->szc; 6837 bp2->flags = bp1->flags; 6838 bp2->expires = bp1->expires; 6839 bp2->datap = bp1->datap; 6840 } 6841 } else { 6842 ASSERT(bp1->flags & CAPTURE_PHYSMEM); 6843 if (!(bp2->flags & CAPTURE_RETIRE)) { 6844 bp2->szc = bp1->szc; 6845 bp2->flags = bp1->flags; 6846 bp2->expires = bp1->expires; 6847 bp2->datap = bp1->datap; 6848 } 6849 } 6850 mutex_exit(&page_capture_hash[index]. 6851 pchh_mutex); 6852 kmem_free(bp1, sizeof (*bp1)); 6853 return (ret); 6854 } 6855 bp2 = bp2->next; 6856 } 6857 } 6858 panic("PR_CAPTURE set but not on hash for pp 0x%p\n", pp); 6859 /*NOTREACHED*/ 6860 } 6861 6862 /* 6863 * Try to capture the given page for the caller specified in the flags 6864 * parameter. The page will either be captured and handed over to the 6865 * appropriate callback, or will be queued up in the page capture hash 6866 * to be captured asynchronously. 6867 * If the current request is due to an async capture, the page must be 6868 * exclusively locked before calling this function. 6869 * Currently szc must be 0 but in the future this should be expandable to 6870 * other page sizes. 6871 * Returns 0 on success, with the following error codes on failure: 6872 * EPERM - The requested page is long term locked, and thus repeated 6873 * requests to capture this page will likely fail. 6874 * ENOMEM - There was not enough free memory in the system to safely 6875 * map the requested page. 6876 * ENOENT - The requested page was inside the kernel cage, and the 6877 * CAPTURE_GET_CAGE flag was not set. 6878 * EAGAIN - The requested page could not be capturead at this point in 6879 * time but future requests will likely work. 6880 * EBUSY - The requested page is retired and the CAPTURE_GET_RETIRED flag 6881 * was not set. 6882 */ 6883 int 6884 page_itrycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 6885 { 6886 int ret; 6887 int cb_index; 6888 6889 if (flags & CAPTURE_ASYNC) { 6890 ASSERT(PAGE_EXCL(pp)); 6891 goto async; 6892 } 6893 6894 /* Make sure there's enough availrmem ... */ 6895 ret = page_capture_pre_checks(pp, flags); 6896 if (ret != 0) { 6897 return (ret); 6898 } 6899 6900 if (!page_trylock(pp, SE_EXCL)) { 6901 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6902 if ((flags >> cb_index) & 1) { 6903 break; 6904 } 6905 } 6906 ASSERT(cb_index < PC_NUM_CALLBACKS); 6907 ret = EAGAIN; 6908 /* Special case for retired pages */ 6909 if (PP_RETIRED(pp)) { 6910 if (flags & CAPTURE_GET_RETIRED) { 6911 if (!page_unretire_pp(pp, PR_UNR_TEMP)) { 6912 /* 6913 * Need to set capture bit and add to 6914 * hash so that the page will be 6915 * retired when freed. 6916 */ 6917 page_capture_add_hash(pp, szc, 6918 CAPTURE_RETIRE, NULL); 6919 ret = 0; 6920 goto own_page; 6921 } 6922 } else { 6923 return (EBUSY); 6924 } 6925 } 6926 page_capture_add_hash(pp, szc, flags, datap); 6927 return (ret); 6928 } 6929 6930 async: 6931 ASSERT(PAGE_EXCL(pp)); 6932 6933 /* Need to check for physmem async requests that availrmem is sane */ 6934 if ((flags & (CAPTURE_ASYNC | CAPTURE_PHYSMEM)) == 6935 (CAPTURE_ASYNC | CAPTURE_PHYSMEM) && 6936 (availrmem < swapfs_minfree)) { 6937 page_unlock(pp); 6938 return (ENOMEM); 6939 } 6940 6941 ret = page_capture_clean_page(pp); 6942 6943 if (ret != 0) { 6944 /* We failed to get the page, so lets add it to the hash */ 6945 if (!(flags & CAPTURE_ASYNC)) { 6946 page_capture_add_hash(pp, szc, flags, datap); 6947 } 6948 return (ret); 6949 } 6950 6951 own_page: 6952 ASSERT(PAGE_EXCL(pp)); 6953 ASSERT(pp->p_szc == 0); 6954 6955 /* Call the callback */ 6956 ret = page_capture_take_action(pp, flags, datap); 6957 6958 if (ret == 0) { 6959 return (0); 6960 } 6961 6962 /* 6963 * Note that in the failure cases from page_capture_take_action, the 6964 * EXCL lock will have already been dropped. 6965 */ 6966 if ((ret == -1) && (!(flags & CAPTURE_ASYNC))) { 6967 page_capture_add_hash(pp, szc, flags, datap); 6968 } 6969 return (EAGAIN); 6970 } 6971 6972 int 6973 page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 6974 { 6975 int ret; 6976 6977 curthread->t_flag |= T_CAPTURING; 6978 ret = page_itrycapture(pp, szc, flags, datap); 6979 curthread->t_flag &= ~T_CAPTURING; /* xor works as we know its set */ 6980 return (ret); 6981 } 6982 6983 /* 6984 * When unlocking a page which has the PR_CAPTURE bit set, this routine 6985 * gets called to try and capture the page. 6986 */ 6987 void 6988 page_unlock_capture(page_t *pp) 6989 { 6990 page_capture_hash_bucket_t *bp; 6991 int index; 6992 int i; 6993 uint_t szc; 6994 uint_t flags = 0; 6995 void *datap; 6996 kmutex_t *mp; 6997 extern vnode_t retired_pages; 6998 6999 /* 7000 * We need to protect against a possible deadlock here where we own 7001 * the vnode page hash mutex and want to acquire it again as there 7002 * are locations in the code, where we unlock a page while holding 7003 * the mutex which can lead to the page being captured and eventually 7004 * end up here. As we may be hashing out the old page and hashing into 7005 * the retire vnode, we need to make sure we don't own them. 7006 * Other callbacks who do hash operations also need to make sure that 7007 * before they hashin to a vnode that they do not currently own the 7008 * vphm mutex otherwise there will be a panic. 7009 */ 7010 if (mutex_owned(page_vnode_mutex(&retired_pages))) { 7011 page_unlock_nocapture(pp); 7012 return; 7013 } 7014 if (pp->p_vnode != NULL && mutex_owned(page_vnode_mutex(pp->p_vnode))) { 7015 page_unlock_nocapture(pp); 7016 return; 7017 } 7018 7019 index = PAGE_CAPTURE_HASH(pp); 7020 7021 mp = &page_capture_hash[index].pchh_mutex; 7022 mutex_enter(mp); 7023 for (i = 0; i < 2; i++) { 7024 bp = page_capture_hash[index].lists[i].next; 7025 while (bp != &page_capture_hash[index].lists[i]) { 7026 if (bp->pp == pp) { 7027 szc = bp->szc; 7028 flags = bp->flags | CAPTURE_ASYNC; 7029 datap = bp->datap; 7030 mutex_exit(mp); 7031 (void) page_trycapture(pp, szc, flags, datap); 7032 return; 7033 } 7034 bp = bp->next; 7035 } 7036 } 7037 7038 /* Failed to find page in hash so clear flags and unlock it. */ 7039 page_clrtoxic(pp, PR_CAPTURE); 7040 page_unlock(pp); 7041 7042 mutex_exit(mp); 7043 } 7044 7045 void 7046 page_capture_init() 7047 { 7048 int i; 7049 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7050 page_capture_hash[i].lists[0].next = 7051 &page_capture_hash[i].lists[0]; 7052 page_capture_hash[i].lists[0].prev = 7053 &page_capture_hash[i].lists[0]; 7054 page_capture_hash[i].lists[1].next = 7055 &page_capture_hash[i].lists[1]; 7056 page_capture_hash[i].lists[1].prev = 7057 &page_capture_hash[i].lists[1]; 7058 } 7059 7060 pc_thread_shortwait = 23 * hz; 7061 pc_thread_longwait = 1201 * hz; 7062 pc_thread_retry = 3; 7063 mutex_init(&pc_thread_mutex, NULL, MUTEX_DEFAULT, NULL); 7064 cv_init(&pc_cv, NULL, CV_DEFAULT, NULL); 7065 pc_thread_id = thread_create(NULL, 0, page_capture_thread, NULL, 0, &p0, 7066 TS_RUN, minclsyspri); 7067 } 7068 7069 /* 7070 * It is necessary to scrub any failing pages prior to reboot in order to 7071 * prevent a latent error trap from occurring on the next boot. 7072 */ 7073 void 7074 page_retire_mdboot() 7075 { 7076 page_t *pp; 7077 int i, j; 7078 page_capture_hash_bucket_t *bp; 7079 7080 /* walk lists looking for pages to scrub */ 7081 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7082 if (page_capture_hash[i].num_pages == 0) 7083 continue; 7084 7085 mutex_enter(&page_capture_hash[i].pchh_mutex); 7086 7087 for (j = 0; j < 2; j++) { 7088 bp = page_capture_hash[i].lists[j].next; 7089 while (bp != &page_capture_hash[i].lists[j]) { 7090 pp = bp->pp; 7091 if (!PP_ISKAS(pp) && PP_TOXIC(pp)) { 7092 pp->p_selock = -1; /* pacify ASSERTs */ 7093 PP_CLRFREE(pp); 7094 pagescrub(pp, 0, PAGESIZE); 7095 pp->p_selock = 0; 7096 } 7097 bp = bp->next; 7098 } 7099 } 7100 mutex_exit(&page_capture_hash[i].pchh_mutex); 7101 } 7102 } 7103 7104 /* 7105 * Walk the page_capture_hash trying to capture pages and also cleanup old 7106 * entries which have expired. 7107 */ 7108 void 7109 page_capture_async() 7110 { 7111 page_t *pp; 7112 int i; 7113 int ret; 7114 page_capture_hash_bucket_t *bp1, *bp2; 7115 uint_t szc; 7116 uint_t flags; 7117 void *datap; 7118 7119 /* If there are outstanding pages to be captured, get to work */ 7120 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7121 if (page_capture_hash[i].num_pages == 0) 7122 continue; 7123 /* Append list 1 to list 0 and then walk through list 0 */ 7124 mutex_enter(&page_capture_hash[i].pchh_mutex); 7125 bp1 = &page_capture_hash[i].lists[1]; 7126 bp2 = bp1->next; 7127 if (bp1 != bp2) { 7128 bp1->prev->next = page_capture_hash[i].lists[0].next; 7129 bp2->prev = &page_capture_hash[i].lists[0]; 7130 page_capture_hash[i].lists[0].next->prev = bp1->prev; 7131 page_capture_hash[i].lists[0].next = bp2; 7132 bp1->next = bp1; 7133 bp1->prev = bp1; 7134 } 7135 7136 /* list[1] will be empty now */ 7137 7138 bp1 = page_capture_hash[i].lists[0].next; 7139 while (bp1 != &page_capture_hash[i].lists[0]) { 7140 /* Check expiration time */ 7141 if ((lbolt > bp1->expires && bp1->expires != -1) || 7142 page_deleted(bp1->pp)) { 7143 page_capture_hash[i].lists[0].next = bp1->next; 7144 bp1->next->prev = 7145 &page_capture_hash[i].lists[0]; 7146 page_capture_hash[i].num_pages--; 7147 7148 /* 7149 * We can safely remove the PR_CAPTURE bit 7150 * without holding the EXCL lock on the page 7151 * as the PR_CAPTURE bit requres that the 7152 * page_capture_hash[].pchh_mutex be held 7153 * to modify it. 7154 */ 7155 page_clrtoxic(bp1->pp, PR_CAPTURE); 7156 mutex_exit(&page_capture_hash[i].pchh_mutex); 7157 kmem_free(bp1, sizeof (*bp1)); 7158 mutex_enter(&page_capture_hash[i].pchh_mutex); 7159 bp1 = page_capture_hash[i].lists[0].next; 7160 continue; 7161 } 7162 pp = bp1->pp; 7163 szc = bp1->szc; 7164 flags = bp1->flags; 7165 datap = bp1->datap; 7166 mutex_exit(&page_capture_hash[i].pchh_mutex); 7167 if (page_trylock(pp, SE_EXCL)) { 7168 ret = page_trycapture(pp, szc, 7169 flags | CAPTURE_ASYNC, datap); 7170 } else { 7171 ret = 1; /* move to walked hash */ 7172 } 7173 7174 if (ret != 0) { 7175 /* Move to walked hash */ 7176 (void) page_capture_move_to_walked(pp); 7177 } 7178 mutex_enter(&page_capture_hash[i].pchh_mutex); 7179 bp1 = page_capture_hash[i].lists[0].next; 7180 } 7181 7182 mutex_exit(&page_capture_hash[i].pchh_mutex); 7183 } 7184 } 7185 7186 /* 7187 * This function is called by the page_capture_thread, and is needed in 7188 * in order to initiate aio cleanup, so that pages used in aio 7189 * will be unlocked and subsequently retired by page_capture_thread. 7190 */ 7191 static int 7192 do_aio_cleanup(void) 7193 { 7194 proc_t *procp; 7195 int (*aio_cleanup_dr_delete_memory)(proc_t *); 7196 int cleaned = 0; 7197 7198 if (modload("sys", "kaio") == -1) { 7199 cmn_err(CE_WARN, "do_aio_cleanup: cannot load kaio"); 7200 return (0); 7201 } 7202 /* 7203 * We use the aio_cleanup_dr_delete_memory function to 7204 * initiate the actual clean up; this function will wake 7205 * up the per-process aio_cleanup_thread. 7206 */ 7207 aio_cleanup_dr_delete_memory = (int (*)(proc_t *)) 7208 modgetsymvalue("aio_cleanup_dr_delete_memory", 0); 7209 if (aio_cleanup_dr_delete_memory == NULL) { 7210 cmn_err(CE_WARN, 7211 "aio_cleanup_dr_delete_memory not found in kaio"); 7212 return (0); 7213 } 7214 mutex_enter(&pidlock); 7215 for (procp = practive; (procp != NULL); procp = procp->p_next) { 7216 mutex_enter(&procp->p_lock); 7217 if (procp->p_aio != NULL) { 7218 /* cleanup proc's outstanding kaio */ 7219 cleaned += (*aio_cleanup_dr_delete_memory)(procp); 7220 } 7221 mutex_exit(&procp->p_lock); 7222 } 7223 mutex_exit(&pidlock); 7224 return (cleaned); 7225 } 7226 7227 /* 7228 * helper function for page_capture_thread 7229 */ 7230 static void 7231 page_capture_handle_outstanding(void) 7232 { 7233 int ntry; 7234 7235 if (!page_retire_pend_count()) { 7236 /* 7237 * Do we really want to be this aggressive 7238 * for things other than page_retire? 7239 * Maybe have a counter for each callback 7240 * type to guide how aggressive we should 7241 * be here. Thus if there's at least one 7242 * page for page_retire we go ahead and reap 7243 * like this. 7244 */ 7245 kmem_reap(); 7246 seg_preap(); 7247 page_capture_async(); 7248 } else { 7249 /* 7250 * There are pages pending retirement, so 7251 * we reap prior to attempting to capture. 7252 */ 7253 kmem_reap(); 7254 7255 /* disable and purge seg_pcache */ 7256 (void) seg_p_disable(); 7257 for (ntry = 0; ntry < pc_thread_retry; ntry++) { 7258 if (!page_retire_pend_count()) 7259 break; 7260 if (do_aio_cleanup()) { 7261 /* 7262 * allow the apps cleanup threads 7263 * to run 7264 */ 7265 delay(pc_thread_shortwait); 7266 } 7267 page_capture_async(); 7268 } 7269 /* reenable seg_pcache */ 7270 seg_p_enable(); 7271 } 7272 } 7273 7274 /* 7275 * The page_capture_thread loops forever, looking to see if there are 7276 * pages still waiting to be captured. 7277 */ 7278 static void 7279 page_capture_thread(void) 7280 { 7281 callb_cpr_t c; 7282 int outstanding; 7283 int i; 7284 7285 CALLB_CPR_INIT(&c, &pc_thread_mutex, callb_generic_cpr, "page_capture"); 7286 7287 mutex_enter(&pc_thread_mutex); 7288 for (;;) { 7289 outstanding = 0; 7290 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) 7291 outstanding += page_capture_hash[i].num_pages; 7292 if (outstanding) { 7293 page_capture_handle_outstanding(); 7294 CALLB_CPR_SAFE_BEGIN(&c); 7295 (void) cv_timedwait(&pc_cv, &pc_thread_mutex, 7296 lbolt + pc_thread_shortwait); 7297 CALLB_CPR_SAFE_END(&c, &pc_thread_mutex); 7298 } else { 7299 CALLB_CPR_SAFE_BEGIN(&c); 7300 (void) cv_timedwait(&pc_cv, &pc_thread_mutex, 7301 lbolt + pc_thread_longwait); 7302 CALLB_CPR_SAFE_END(&c, &pc_thread_mutex); 7303 } 7304 } 7305 /*NOTREACHED*/ 7306 } 7307 /* 7308 * Attempt to locate a bucket that has enough pages to satisfy the request. 7309 * The initial check is done without the lock to avoid unneeded contention. 7310 * The function returns 1 if enough pages were found, else 0 if it could not 7311 * find enough pages in a bucket. 7312 */ 7313 static int 7314 pcf_decrement_bucket(pgcnt_t npages) 7315 { 7316 struct pcf *p; 7317 struct pcf *q; 7318 int i; 7319 7320 p = &pcf[PCF_INDEX()]; 7321 q = &pcf[pcf_fanout]; 7322 for (i = 0; i < pcf_fanout; i++) { 7323 if (p->pcf_count > npages) { 7324 /* 7325 * a good one to try. 7326 */ 7327 mutex_enter(&p->pcf_lock); 7328 if (p->pcf_count > npages) { 7329 p->pcf_count -= (uint_t)npages; 7330 /* 7331 * freemem is not protected by any lock. 7332 * Thus, we cannot have any assertion 7333 * containing freemem here. 7334 */ 7335 freemem -= npages; 7336 mutex_exit(&p->pcf_lock); 7337 return (1); 7338 } 7339 mutex_exit(&p->pcf_lock); 7340 } 7341 p++; 7342 if (p >= q) { 7343 p = pcf; 7344 } 7345 } 7346 return (0); 7347 } 7348 7349 /* 7350 * Arguments: 7351 * pcftotal_ret: If the value is not NULL and we have walked all the 7352 * buckets but did not find enough pages then it will 7353 * be set to the total number of pages in all the pcf 7354 * buckets. 7355 * npages: Is the number of pages we have been requested to 7356 * find. 7357 * unlock: If set to 0 we will leave the buckets locked if the 7358 * requested number of pages are not found. 7359 * 7360 * Go and try to satisfy the page request from any number of buckets. 7361 * This can be a very expensive operation as we have to lock the buckets 7362 * we are checking (and keep them locked), starting at bucket 0. 7363 * 7364 * The function returns 1 if enough pages were found, else 0 if it could not 7365 * find enough pages in the buckets. 7366 * 7367 */ 7368 static int 7369 pcf_decrement_multiple(pgcnt_t *pcftotal_ret, pgcnt_t npages, int unlock) 7370 { 7371 struct pcf *p; 7372 pgcnt_t pcftotal; 7373 int i; 7374 7375 p = pcf; 7376 /* try to collect pages from several pcf bins */ 7377 for (pcftotal = 0, i = 0; i < pcf_fanout; i++) { 7378 mutex_enter(&p->pcf_lock); 7379 pcftotal += p->pcf_count; 7380 if (pcftotal >= npages) { 7381 /* 7382 * Wow! There are enough pages laying around 7383 * to satisfy the request. Do the accounting, 7384 * drop the locks we acquired, and go back. 7385 * 7386 * freemem is not protected by any lock. So, 7387 * we cannot have any assertion containing 7388 * freemem. 7389 */ 7390 freemem -= npages; 7391 while (p >= pcf) { 7392 if (p->pcf_count <= npages) { 7393 npages -= p->pcf_count; 7394 p->pcf_count = 0; 7395 } else { 7396 p->pcf_count -= (uint_t)npages; 7397 npages = 0; 7398 } 7399 mutex_exit(&p->pcf_lock); 7400 p--; 7401 } 7402 ASSERT(npages == 0); 7403 return (1); 7404 } 7405 p++; 7406 } 7407 if (unlock) { 7408 /* failed to collect pages - release the locks */ 7409 while (--p >= pcf) { 7410 mutex_exit(&p->pcf_lock); 7411 } 7412 } 7413 if (pcftotal_ret != NULL) 7414 *pcftotal_ret = pcftotal; 7415 return (0); 7416 } 7417