1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 26 /* All Rights Reserved */ 27 28 /* 29 * University Copyright- Copyright (c) 1982, 1986, 1988 30 * The Regents of the University of California 31 * All Rights Reserved 32 * 33 * University Acknowledgment- Portions of this document are derived from 34 * software developed by the University of California, Berkeley, and its 35 * contributors. 36 */ 37 38 /* 39 * VM - physical page management. 40 */ 41 42 #include <sys/types.h> 43 #include <sys/t_lock.h> 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/errno.h> 47 #include <sys/time.h> 48 #include <sys/vnode.h> 49 #include <sys/vm.h> 50 #include <sys/vtrace.h> 51 #include <sys/swap.h> 52 #include <sys/cmn_err.h> 53 #include <sys/tuneable.h> 54 #include <sys/sysmacros.h> 55 #include <sys/cpuvar.h> 56 #include <sys/callb.h> 57 #include <sys/debug.h> 58 #include <sys/tnf_probe.h> 59 #include <sys/condvar_impl.h> 60 #include <sys/mem_config.h> 61 #include <sys/mem_cage.h> 62 #include <sys/kmem.h> 63 #include <sys/atomic.h> 64 #include <sys/strlog.h> 65 #include <sys/mman.h> 66 #include <sys/ontrap.h> 67 #include <sys/lgrp.h> 68 #include <sys/vfs.h> 69 70 #include <vm/hat.h> 71 #include <vm/anon.h> 72 #include <vm/page.h> 73 #include <vm/seg.h> 74 #include <vm/pvn.h> 75 #include <vm/seg_kmem.h> 76 #include <vm/vm_dep.h> 77 #include <sys/vm_usage.h> 78 #include <fs/fs_subr.h> 79 #include <sys/ddi.h> 80 #include <sys/modctl.h> 81 82 static int nopageage = 0; 83 84 static pgcnt_t max_page_get; /* max page_get request size in pages */ 85 pgcnt_t total_pages = 0; /* total number of pages (used by /proc) */ 86 87 /* 88 * freemem_lock protects all freemem variables: 89 * availrmem. Also this lock protects the globals which track the 90 * availrmem changes for accurate kernel footprint calculation. 91 * See below for an explanation of these 92 * globals. 93 */ 94 kmutex_t freemem_lock; 95 pgcnt_t availrmem; 96 pgcnt_t availrmem_initial; 97 98 /* 99 * These globals track availrmem changes to get a more accurate 100 * estimate of tke kernel size. Historically pp_kernel is used for 101 * kernel size and is based on availrmem. But availrmem is adjusted for 102 * locked pages in the system not just for kernel locked pages. 103 * These new counters will track the pages locked through segvn and 104 * by explicit user locking. 105 * 106 * pages_locked : How many pages are locked because of user specified 107 * locking through mlock or plock. 108 * 109 * pages_useclaim,pages_claimed : These two variables track the 110 * claim adjustments because of the protection changes on a segvn segment. 111 * 112 * The pages_locked is protected by pages_locked_lock lock. 113 * All other globals are protected by the same lock which protects availrmem. 114 */ 115 pgcnt_t pages_locked = 0; 116 pgcnt_t pages_useclaim = 0; 117 pgcnt_t pages_claimed = 0; 118 kmutex_t pages_locked_lock; 119 120 /* 121 * new_freemem_lock protects freemem, freemem_wait & freemem_cv. 122 */ 123 static kmutex_t new_freemem_lock; 124 static uint_t freemem_wait; /* someone waiting for freemem */ 125 static kcondvar_t freemem_cv; 126 127 /* 128 * The logical page free list is maintained as two lists, the 'free' 129 * and the 'cache' lists. 130 * The free list contains those pages that should be reused first. 131 * 132 * The implementation of the lists is machine dependent. 133 * page_get_freelist(), page_get_cachelist(), 134 * page_list_sub(), and page_list_add() 135 * form the interface to the machine dependent implementation. 136 * 137 * Pages with p_free set are on the cache list. 138 * Pages with p_free and p_age set are on the free list, 139 * 140 * A page may be locked while on either list. 141 */ 142 143 /* 144 * free list accounting stuff. 145 * 146 * 147 * Spread out the value for the number of pages on the 148 * page free and page cache lists. If there is just one 149 * value, then it must be under just one lock. 150 * The lock contention and cache traffic are a real bother. 151 * 152 * When we acquire and then drop a single pcf lock 153 * we can start in the middle of the array of pcf structures. 154 * If we acquire more than one pcf lock at a time, we need to 155 * start at the front to avoid deadlocking. 156 * 157 * pcf_count holds the number of pages in each pool. 158 * 159 * pcf_block is set when page_create_get_something() has asked the 160 * PSM page freelist and page cachelist routines without specifying 161 * a color and nothing came back. This is used to block anything 162 * else from moving pages from one list to the other while the 163 * lists are searched again. If a page is freeed while pcf_block is 164 * set, then pcf_reserve is incremented. pcgs_unblock() takes care 165 * of clearning pcf_block, doing the wakeups, etc. 166 */ 167 168 #define MAX_PCF_FANOUT NCPU 169 static uint_t pcf_fanout = 1; /* Will get changed at boot time */ 170 static uint_t pcf_fanout_mask = 0; 171 172 struct pcf { 173 kmutex_t pcf_lock; /* protects the structure */ 174 uint_t pcf_count; /* page count */ 175 uint_t pcf_wait; /* number of waiters */ 176 uint_t pcf_block; /* pcgs flag to page_free() */ 177 uint_t pcf_reserve; /* pages freed after pcf_block set */ 178 uint_t pcf_fill[10]; /* to line up on the caches */ 179 }; 180 181 /* 182 * PCF_INDEX hash needs to be dynamic (every so often the hash changes where 183 * it will hash the cpu to). This is done to prevent a drain condition 184 * from happening. This drain condition will occur when pcf_count decrement 185 * occurs on cpu A and the increment of pcf_count always occurs on cpu B. An 186 * example of this shows up with device interrupts. The dma buffer is allocated 187 * by the cpu requesting the IO thus the pcf_count is decremented based on that. 188 * When the memory is returned by the interrupt thread, the pcf_count will be 189 * incremented based on the cpu servicing the interrupt. 190 */ 191 static struct pcf pcf[MAX_PCF_FANOUT]; 192 #define PCF_INDEX() ((int)(((long)CPU->cpu_seqid) + \ 193 (randtick() >> 24)) & (pcf_fanout_mask)) 194 195 static int pcf_decrement_bucket(pgcnt_t); 196 static int pcf_decrement_multiple(pgcnt_t *, pgcnt_t, int); 197 198 kmutex_t pcgs_lock; /* serializes page_create_get_ */ 199 kmutex_t pcgs_cagelock; /* serializes NOSLEEP cage allocs */ 200 kmutex_t pcgs_wait_lock; /* used for delay in pcgs */ 201 static kcondvar_t pcgs_cv; /* cv for delay in pcgs */ 202 203 #ifdef VM_STATS 204 205 /* 206 * No locks, but so what, they are only statistics. 207 */ 208 209 static struct page_tcnt { 210 int pc_free_cache; /* free's into cache list */ 211 int pc_free_dontneed; /* free's with dontneed */ 212 int pc_free_pageout; /* free's from pageout */ 213 int pc_free_free; /* free's into free list */ 214 int pc_free_pages; /* free's into large page free list */ 215 int pc_destroy_pages; /* large page destroy's */ 216 int pc_get_cache; /* get's from cache list */ 217 int pc_get_free; /* get's from free list */ 218 int pc_reclaim; /* reclaim's */ 219 int pc_abortfree; /* abort's of free pages */ 220 int pc_find_hit; /* find's that find page */ 221 int pc_find_miss; /* find's that don't find page */ 222 int pc_destroy_free; /* # of free pages destroyed */ 223 #define PC_HASH_CNT (4*PAGE_HASHAVELEN) 224 int pc_find_hashlen[PC_HASH_CNT+1]; 225 int pc_addclaim_pages; 226 int pc_subclaim_pages; 227 int pc_free_replacement_page[2]; 228 int pc_try_demote_pages[6]; 229 int pc_demote_pages[2]; 230 } pagecnt; 231 232 uint_t hashin_count; 233 uint_t hashin_not_held; 234 uint_t hashin_already; 235 236 uint_t hashout_count; 237 uint_t hashout_not_held; 238 239 uint_t page_create_count; 240 uint_t page_create_not_enough; 241 uint_t page_create_not_enough_again; 242 uint_t page_create_zero; 243 uint_t page_create_hashout; 244 uint_t page_create_page_lock_failed; 245 uint_t page_create_trylock_failed; 246 uint_t page_create_found_one; 247 uint_t page_create_hashin_failed; 248 uint_t page_create_dropped_phm; 249 250 uint_t page_create_new; 251 uint_t page_create_exists; 252 uint_t page_create_putbacks; 253 uint_t page_create_overshoot; 254 255 uint_t page_reclaim_zero; 256 uint_t page_reclaim_zero_locked; 257 258 uint_t page_rename_exists; 259 uint_t page_rename_count; 260 261 uint_t page_lookup_cnt[20]; 262 uint_t page_lookup_nowait_cnt[10]; 263 uint_t page_find_cnt; 264 uint_t page_exists_cnt; 265 uint_t page_exists_forreal_cnt; 266 uint_t page_lookup_dev_cnt; 267 uint_t get_cachelist_cnt; 268 uint_t page_create_cnt[10]; 269 uint_t alloc_pages[9]; 270 uint_t page_exphcontg[19]; 271 uint_t page_create_large_cnt[10]; 272 273 /* 274 * Collects statistics. 275 */ 276 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 277 uint_t mylen = 0; \ 278 \ 279 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash, mylen++) { \ 280 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 281 break; \ 282 } \ 283 if ((pp) != NULL) \ 284 pagecnt.pc_find_hit++; \ 285 else \ 286 pagecnt.pc_find_miss++; \ 287 if (mylen > PC_HASH_CNT) \ 288 mylen = PC_HASH_CNT; \ 289 pagecnt.pc_find_hashlen[mylen]++; \ 290 } 291 292 #else /* VM_STATS */ 293 294 /* 295 * Don't collect statistics 296 */ 297 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 298 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \ 299 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 300 break; \ 301 } \ 302 } 303 304 #endif /* VM_STATS */ 305 306 307 308 #ifdef DEBUG 309 #define MEMSEG_SEARCH_STATS 310 #endif 311 312 #ifdef MEMSEG_SEARCH_STATS 313 struct memseg_stats { 314 uint_t nsearch; 315 uint_t nlastwon; 316 uint_t nhashwon; 317 uint_t nnotfound; 318 } memseg_stats; 319 320 #define MEMSEG_STAT_INCR(v) \ 321 atomic_add_32(&memseg_stats.v, 1) 322 #else 323 #define MEMSEG_STAT_INCR(x) 324 #endif 325 326 struct memseg *memsegs; /* list of memory segments */ 327 328 /* 329 * /etc/system tunable to control large page allocation hueristic. 330 * 331 * Setting to LPAP_LOCAL will heavily prefer the local lgroup over remote lgroup 332 * for large page allocation requests. If a large page is not readily 333 * avaliable on the local freelists we will go through additional effort 334 * to create a large page, potentially moving smaller pages around to coalesce 335 * larger pages in the local lgroup. 336 * Default value of LPAP_DEFAULT will go to remote freelists if large pages 337 * are not readily available in the local lgroup. 338 */ 339 enum lpap { 340 LPAP_DEFAULT, /* default large page allocation policy */ 341 LPAP_LOCAL /* local large page allocation policy */ 342 }; 343 344 enum lpap lpg_alloc_prefer = LPAP_DEFAULT; 345 346 static void page_init_mem_config(void); 347 static int page_do_hashin(page_t *, vnode_t *, u_offset_t); 348 static void page_do_hashout(page_t *); 349 static void page_capture_init(); 350 int page_capture_take_action(page_t *, uint_t, void *); 351 352 static void page_demote_vp_pages(page_t *); 353 354 355 void 356 pcf_init(void) 357 358 { 359 if (boot_ncpus != -1) { 360 pcf_fanout = boot_ncpus; 361 } else { 362 pcf_fanout = max_ncpus; 363 } 364 #ifdef sun4v 365 /* 366 * Force at least 4 buckets if possible for sun4v. 367 */ 368 pcf_fanout = MAX(pcf_fanout, 4); 369 #endif /* sun4v */ 370 371 /* 372 * Round up to the nearest power of 2. 373 */ 374 pcf_fanout = MIN(pcf_fanout, MAX_PCF_FANOUT); 375 if (!ISP2(pcf_fanout)) { 376 pcf_fanout = 1 << highbit(pcf_fanout); 377 378 if (pcf_fanout > MAX_PCF_FANOUT) { 379 pcf_fanout = 1 << (highbit(MAX_PCF_FANOUT) - 1); 380 } 381 } 382 pcf_fanout_mask = pcf_fanout - 1; 383 } 384 385 /* 386 * vm subsystem related initialization 387 */ 388 void 389 vm_init(void) 390 { 391 boolean_t callb_vm_cpr(void *, int); 392 393 (void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm"); 394 page_init_mem_config(); 395 page_retire_init(); 396 vm_usage_init(); 397 page_capture_init(); 398 } 399 400 /* 401 * This function is called at startup and when memory is added or deleted. 402 */ 403 void 404 init_pages_pp_maximum() 405 { 406 static pgcnt_t p_min; 407 static pgcnt_t pages_pp_maximum_startup; 408 static pgcnt_t avrmem_delta; 409 static int init_done; 410 static int user_set; /* true if set in /etc/system */ 411 412 if (init_done == 0) { 413 414 /* If the user specified a value, save it */ 415 if (pages_pp_maximum != 0) { 416 user_set = 1; 417 pages_pp_maximum_startup = pages_pp_maximum; 418 } 419 420 /* 421 * Setting of pages_pp_maximum is based first time 422 * on the value of availrmem just after the start-up 423 * allocations. To preserve this relationship at run 424 * time, use a delta from availrmem_initial. 425 */ 426 ASSERT(availrmem_initial >= availrmem); 427 avrmem_delta = availrmem_initial - availrmem; 428 429 /* The allowable floor of pages_pp_maximum */ 430 p_min = tune.t_minarmem + 100; 431 432 /* Make sure we don't come through here again. */ 433 init_done = 1; 434 } 435 /* 436 * Determine pages_pp_maximum, the number of currently available 437 * pages (availrmem) that can't be `locked'. If not set by 438 * the user, we set it to 4% of the currently available memory 439 * plus 4MB. 440 * But we also insist that it be greater than tune.t_minarmem; 441 * otherwise a process could lock down a lot of memory, get swapped 442 * out, and never have enough to get swapped back in. 443 */ 444 if (user_set) 445 pages_pp_maximum = pages_pp_maximum_startup; 446 else 447 pages_pp_maximum = ((availrmem_initial - avrmem_delta) / 25) 448 + btop(4 * 1024 * 1024); 449 450 if (pages_pp_maximum <= p_min) { 451 pages_pp_maximum = p_min; 452 } 453 } 454 455 void 456 set_max_page_get(pgcnt_t target_total_pages) 457 { 458 max_page_get = target_total_pages / 2; 459 } 460 461 static pgcnt_t pending_delete; 462 463 /*ARGSUSED*/ 464 static void 465 page_mem_config_post_add( 466 void *arg, 467 pgcnt_t delta_pages) 468 { 469 set_max_page_get(total_pages - pending_delete); 470 init_pages_pp_maximum(); 471 } 472 473 /*ARGSUSED*/ 474 static int 475 page_mem_config_pre_del( 476 void *arg, 477 pgcnt_t delta_pages) 478 { 479 pgcnt_t nv; 480 481 nv = atomic_add_long_nv(&pending_delete, (spgcnt_t)delta_pages); 482 set_max_page_get(total_pages - nv); 483 return (0); 484 } 485 486 /*ARGSUSED*/ 487 static void 488 page_mem_config_post_del( 489 void *arg, 490 pgcnt_t delta_pages, 491 int cancelled) 492 { 493 pgcnt_t nv; 494 495 nv = atomic_add_long_nv(&pending_delete, -(spgcnt_t)delta_pages); 496 set_max_page_get(total_pages - nv); 497 if (!cancelled) 498 init_pages_pp_maximum(); 499 } 500 501 static kphysm_setup_vector_t page_mem_config_vec = { 502 KPHYSM_SETUP_VECTOR_VERSION, 503 page_mem_config_post_add, 504 page_mem_config_pre_del, 505 page_mem_config_post_del, 506 }; 507 508 static void 509 page_init_mem_config(void) 510 { 511 int ret; 512 513 ret = kphysm_setup_func_register(&page_mem_config_vec, (void *)NULL); 514 ASSERT(ret == 0); 515 } 516 517 /* 518 * Evenly spread out the PCF counters for large free pages 519 */ 520 static void 521 page_free_large_ctr(pgcnt_t npages) 522 { 523 static struct pcf *p = pcf; 524 pgcnt_t lump; 525 526 freemem += npages; 527 528 lump = roundup(npages, pcf_fanout) / pcf_fanout; 529 530 while (npages > 0) { 531 532 ASSERT(!p->pcf_block); 533 534 if (lump < npages) { 535 p->pcf_count += (uint_t)lump; 536 npages -= lump; 537 } else { 538 p->pcf_count += (uint_t)npages; 539 npages = 0; 540 } 541 542 ASSERT(!p->pcf_wait); 543 544 if (++p > &pcf[pcf_fanout - 1]) 545 p = pcf; 546 } 547 548 ASSERT(npages == 0); 549 } 550 551 /* 552 * Add a physical chunk of memory to the system free lists during startup. 553 * Platform specific startup() allocates the memory for the page structs. 554 * 555 * num - number of page structures 556 * base - page number (pfn) to be associated with the first page. 557 * 558 * Since we are doing this during startup (ie. single threaded), we will 559 * use shortcut routines to avoid any locking overhead while putting all 560 * these pages on the freelists. 561 * 562 * NOTE: Any changes performed to page_free(), must also be performed to 563 * add_physmem() since this is how we initialize all page_t's at 564 * boot time. 565 */ 566 void 567 add_physmem( 568 page_t *pp, 569 pgcnt_t num, 570 pfn_t pnum) 571 { 572 page_t *root = NULL; 573 uint_t szc = page_num_pagesizes() - 1; 574 pgcnt_t large = page_get_pagecnt(szc); 575 pgcnt_t cnt = 0; 576 577 TRACE_2(TR_FAC_VM, TR_PAGE_INIT, 578 "add_physmem:pp %p num %lu", pp, num); 579 580 /* 581 * Arbitrarily limit the max page_get request 582 * to 1/2 of the page structs we have. 583 */ 584 total_pages += num; 585 set_max_page_get(total_pages); 586 587 PLCNT_MODIFY_MAX(pnum, (long)num); 588 589 /* 590 * The physical space for the pages array 591 * representing ram pages has already been 592 * allocated. Here we initialize each lock 593 * in the page structure, and put each on 594 * the free list 595 */ 596 for (; num; pp++, pnum++, num--) { 597 598 /* 599 * this needs to fill in the page number 600 * and do any other arch specific initialization 601 */ 602 add_physmem_cb(pp, pnum); 603 604 pp->p_lckcnt = 0; 605 pp->p_cowcnt = 0; 606 pp->p_slckcnt = 0; 607 608 /* 609 * Initialize the page lock as unlocked, since nobody 610 * can see or access this page yet. 611 */ 612 pp->p_selock = 0; 613 614 /* 615 * Initialize IO lock 616 */ 617 page_iolock_init(pp); 618 619 /* 620 * initialize other fields in the page_t 621 */ 622 PP_SETFREE(pp); 623 page_clr_all_props(pp); 624 PP_SETAGED(pp); 625 pp->p_offset = (u_offset_t)-1; 626 pp->p_next = pp; 627 pp->p_prev = pp; 628 629 /* 630 * Simple case: System doesn't support large pages. 631 */ 632 if (szc == 0) { 633 pp->p_szc = 0; 634 page_free_at_startup(pp); 635 continue; 636 } 637 638 /* 639 * Handle unaligned pages, we collect them up onto 640 * the root page until we have a full large page. 641 */ 642 if (!IS_P2ALIGNED(pnum, large)) { 643 644 /* 645 * If not in a large page, 646 * just free as small page. 647 */ 648 if (root == NULL) { 649 pp->p_szc = 0; 650 page_free_at_startup(pp); 651 continue; 652 } 653 654 /* 655 * Link a constituent page into the large page. 656 */ 657 pp->p_szc = szc; 658 page_list_concat(&root, &pp); 659 660 /* 661 * When large page is fully formed, free it. 662 */ 663 if (++cnt == large) { 664 page_free_large_ctr(cnt); 665 page_list_add_pages(root, PG_LIST_ISINIT); 666 root = NULL; 667 cnt = 0; 668 } 669 continue; 670 } 671 672 /* 673 * At this point we have a page number which 674 * is aligned. We assert that we aren't already 675 * in a different large page. 676 */ 677 ASSERT(IS_P2ALIGNED(pnum, large)); 678 ASSERT(root == NULL && cnt == 0); 679 680 /* 681 * If insufficient number of pages left to form 682 * a large page, just free the small page. 683 */ 684 if (num < large) { 685 pp->p_szc = 0; 686 page_free_at_startup(pp); 687 continue; 688 } 689 690 /* 691 * Otherwise start a new large page. 692 */ 693 pp->p_szc = szc; 694 cnt++; 695 root = pp; 696 } 697 ASSERT(root == NULL && cnt == 0); 698 } 699 700 /* 701 * Find a page representing the specified [vp, offset]. 702 * If we find the page but it is intransit coming in, 703 * it will have an "exclusive" lock and we wait for 704 * the i/o to complete. A page found on the free list 705 * is always reclaimed and then locked. On success, the page 706 * is locked, its data is valid and it isn't on the free 707 * list, while a NULL is returned if the page doesn't exist. 708 */ 709 page_t * 710 page_lookup(vnode_t *vp, u_offset_t off, se_t se) 711 { 712 return (page_lookup_create(vp, off, se, NULL, NULL, 0)); 713 } 714 715 /* 716 * Find a page representing the specified [vp, offset]. 717 * We either return the one we found or, if passed in, 718 * create one with identity of [vp, offset] of the 719 * pre-allocated page. If we find existing page but it is 720 * intransit coming in, it will have an "exclusive" lock 721 * and we wait for the i/o to complete. A page found on 722 * the free list is always reclaimed and then locked. 723 * On success, the page is locked, its data is valid and 724 * it isn't on the free list, while a NULL is returned 725 * if the page doesn't exist and newpp is NULL; 726 */ 727 page_t * 728 page_lookup_create( 729 vnode_t *vp, 730 u_offset_t off, 731 se_t se, 732 page_t *newpp, 733 spgcnt_t *nrelocp, 734 int flags) 735 { 736 page_t *pp; 737 kmutex_t *phm; 738 ulong_t index; 739 uint_t hash_locked; 740 uint_t es; 741 742 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 743 VM_STAT_ADD(page_lookup_cnt[0]); 744 ASSERT(newpp ? PAGE_EXCL(newpp) : 1); 745 746 /* 747 * Acquire the appropriate page hash lock since 748 * we have to search the hash list. Pages that 749 * hash to this list can't change identity while 750 * this lock is held. 751 */ 752 hash_locked = 0; 753 index = PAGE_HASH_FUNC(vp, off); 754 phm = NULL; 755 top: 756 PAGE_HASH_SEARCH(index, pp, vp, off); 757 if (pp != NULL) { 758 VM_STAT_ADD(page_lookup_cnt[1]); 759 es = (newpp != NULL) ? 1 : 0; 760 es |= flags; 761 if (!hash_locked) { 762 VM_STAT_ADD(page_lookup_cnt[2]); 763 if (!page_try_reclaim_lock(pp, se, es)) { 764 /* 765 * On a miss, acquire the phm. Then 766 * next time, page_lock() will be called, 767 * causing a wait if the page is busy. 768 * just looping with page_trylock() would 769 * get pretty boring. 770 */ 771 VM_STAT_ADD(page_lookup_cnt[3]); 772 phm = PAGE_HASH_MUTEX(index); 773 mutex_enter(phm); 774 hash_locked = 1; 775 goto top; 776 } 777 } else { 778 VM_STAT_ADD(page_lookup_cnt[4]); 779 if (!page_lock_es(pp, se, phm, P_RECLAIM, es)) { 780 VM_STAT_ADD(page_lookup_cnt[5]); 781 goto top; 782 } 783 } 784 785 /* 786 * Since `pp' is locked it can not change identity now. 787 * Reconfirm we locked the correct page. 788 * 789 * Both the p_vnode and p_offset *must* be cast volatile 790 * to force a reload of their values: The PAGE_HASH_SEARCH 791 * macro will have stuffed p_vnode and p_offset into 792 * registers before calling page_trylock(); another thread, 793 * actually holding the hash lock, could have changed the 794 * page's identity in memory, but our registers would not 795 * be changed, fooling the reconfirmation. If the hash 796 * lock was held during the search, the casting would 797 * not be needed. 798 */ 799 VM_STAT_ADD(page_lookup_cnt[6]); 800 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 801 ((volatile u_offset_t)(pp->p_offset) != off)) { 802 VM_STAT_ADD(page_lookup_cnt[7]); 803 if (hash_locked) { 804 panic("page_lookup_create: lost page %p", 805 (void *)pp); 806 /*NOTREACHED*/ 807 } 808 page_unlock(pp); 809 phm = PAGE_HASH_MUTEX(index); 810 mutex_enter(phm); 811 hash_locked = 1; 812 goto top; 813 } 814 815 /* 816 * If page_trylock() was called, then pp may still be on 817 * the cachelist (can't be on the free list, it would not 818 * have been found in the search). If it is on the 819 * cachelist it must be pulled now. To pull the page from 820 * the cachelist, it must be exclusively locked. 821 * 822 * The other big difference between page_trylock() and 823 * page_lock(), is that page_lock() will pull the 824 * page from whatever free list (the cache list in this 825 * case) the page is on. If page_trylock() was used 826 * above, then we have to do the reclaim ourselves. 827 */ 828 if ((!hash_locked) && (PP_ISFREE(pp))) { 829 ASSERT(PP_ISAGED(pp) == 0); 830 VM_STAT_ADD(page_lookup_cnt[8]); 831 832 /* 833 * page_relcaim will insure that we 834 * have this page exclusively 835 */ 836 837 if (!page_reclaim(pp, NULL)) { 838 /* 839 * Page_reclaim dropped whatever lock 840 * we held. 841 */ 842 VM_STAT_ADD(page_lookup_cnt[9]); 843 phm = PAGE_HASH_MUTEX(index); 844 mutex_enter(phm); 845 hash_locked = 1; 846 goto top; 847 } else if (se == SE_SHARED && newpp == NULL) { 848 VM_STAT_ADD(page_lookup_cnt[10]); 849 page_downgrade(pp); 850 } 851 } 852 853 if (hash_locked) { 854 mutex_exit(phm); 855 } 856 857 if (newpp != NULL && pp->p_szc < newpp->p_szc && 858 PAGE_EXCL(pp) && nrelocp != NULL) { 859 ASSERT(nrelocp != NULL); 860 (void) page_relocate(&pp, &newpp, 1, 1, nrelocp, 861 NULL); 862 if (*nrelocp > 0) { 863 VM_STAT_COND_ADD(*nrelocp == 1, 864 page_lookup_cnt[11]); 865 VM_STAT_COND_ADD(*nrelocp > 1, 866 page_lookup_cnt[12]); 867 pp = newpp; 868 se = SE_EXCL; 869 } else { 870 if (se == SE_SHARED) { 871 page_downgrade(pp); 872 } 873 VM_STAT_ADD(page_lookup_cnt[13]); 874 } 875 } else if (newpp != NULL && nrelocp != NULL) { 876 if (PAGE_EXCL(pp) && se == SE_SHARED) { 877 page_downgrade(pp); 878 } 879 VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc, 880 page_lookup_cnt[14]); 881 VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc, 882 page_lookup_cnt[15]); 883 VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc, 884 page_lookup_cnt[16]); 885 } else if (newpp != NULL && PAGE_EXCL(pp)) { 886 se = SE_EXCL; 887 } 888 } else if (!hash_locked) { 889 VM_STAT_ADD(page_lookup_cnt[17]); 890 phm = PAGE_HASH_MUTEX(index); 891 mutex_enter(phm); 892 hash_locked = 1; 893 goto top; 894 } else if (newpp != NULL) { 895 /* 896 * If we have a preallocated page then 897 * insert it now and basically behave like 898 * page_create. 899 */ 900 VM_STAT_ADD(page_lookup_cnt[18]); 901 /* 902 * Since we hold the page hash mutex and 903 * just searched for this page, page_hashin 904 * had better not fail. If it does, that 905 * means some thread did not follow the 906 * page hash mutex rules. Panic now and 907 * get it over with. As usual, go down 908 * holding all the locks. 909 */ 910 ASSERT(MUTEX_HELD(phm)); 911 if (!page_hashin(newpp, vp, off, phm)) { 912 ASSERT(MUTEX_HELD(phm)); 913 panic("page_lookup_create: hashin failed %p %p %llx %p", 914 (void *)newpp, (void *)vp, off, (void *)phm); 915 /*NOTREACHED*/ 916 } 917 ASSERT(MUTEX_HELD(phm)); 918 mutex_exit(phm); 919 phm = NULL; 920 page_set_props(newpp, P_REF); 921 page_io_lock(newpp); 922 pp = newpp; 923 se = SE_EXCL; 924 } else { 925 VM_STAT_ADD(page_lookup_cnt[19]); 926 mutex_exit(phm); 927 } 928 929 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 930 931 ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1); 932 933 return (pp); 934 } 935 936 /* 937 * Search the hash list for the page representing the 938 * specified [vp, offset] and return it locked. Skip 939 * free pages and pages that cannot be locked as requested. 940 * Used while attempting to kluster pages. 941 */ 942 page_t * 943 page_lookup_nowait(vnode_t *vp, u_offset_t off, se_t se) 944 { 945 page_t *pp; 946 kmutex_t *phm; 947 ulong_t index; 948 uint_t locked; 949 950 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 951 VM_STAT_ADD(page_lookup_nowait_cnt[0]); 952 953 index = PAGE_HASH_FUNC(vp, off); 954 PAGE_HASH_SEARCH(index, pp, vp, off); 955 locked = 0; 956 if (pp == NULL) { 957 top: 958 VM_STAT_ADD(page_lookup_nowait_cnt[1]); 959 locked = 1; 960 phm = PAGE_HASH_MUTEX(index); 961 mutex_enter(phm); 962 PAGE_HASH_SEARCH(index, pp, vp, off); 963 } 964 965 if (pp == NULL || PP_ISFREE(pp)) { 966 VM_STAT_ADD(page_lookup_nowait_cnt[2]); 967 pp = NULL; 968 } else { 969 if (!page_trylock(pp, se)) { 970 VM_STAT_ADD(page_lookup_nowait_cnt[3]); 971 pp = NULL; 972 } else { 973 VM_STAT_ADD(page_lookup_nowait_cnt[4]); 974 /* 975 * See the comment in page_lookup() 976 */ 977 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 978 ((u_offset_t)(pp->p_offset) != off)) { 979 VM_STAT_ADD(page_lookup_nowait_cnt[5]); 980 if (locked) { 981 panic("page_lookup_nowait %p", 982 (void *)pp); 983 /*NOTREACHED*/ 984 } 985 page_unlock(pp); 986 goto top; 987 } 988 if (PP_ISFREE(pp)) { 989 VM_STAT_ADD(page_lookup_nowait_cnt[6]); 990 page_unlock(pp); 991 pp = NULL; 992 } 993 } 994 } 995 if (locked) { 996 VM_STAT_ADD(page_lookup_nowait_cnt[7]); 997 mutex_exit(phm); 998 } 999 1000 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 1001 1002 return (pp); 1003 } 1004 1005 /* 1006 * Search the hash list for a page with the specified [vp, off] 1007 * that is known to exist and is already locked. This routine 1008 * is typically used by segment SOFTUNLOCK routines. 1009 */ 1010 page_t * 1011 page_find(vnode_t *vp, u_offset_t off) 1012 { 1013 page_t *pp; 1014 kmutex_t *phm; 1015 ulong_t index; 1016 1017 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1018 VM_STAT_ADD(page_find_cnt); 1019 1020 index = PAGE_HASH_FUNC(vp, off); 1021 phm = PAGE_HASH_MUTEX(index); 1022 1023 mutex_enter(phm); 1024 PAGE_HASH_SEARCH(index, pp, vp, off); 1025 mutex_exit(phm); 1026 1027 ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr); 1028 return (pp); 1029 } 1030 1031 /* 1032 * Determine whether a page with the specified [vp, off] 1033 * currently exists in the system. Obviously this should 1034 * only be considered as a hint since nothing prevents the 1035 * page from disappearing or appearing immediately after 1036 * the return from this routine. Subsequently, we don't 1037 * even bother to lock the list. 1038 */ 1039 page_t * 1040 page_exists(vnode_t *vp, u_offset_t off) 1041 { 1042 page_t *pp; 1043 ulong_t index; 1044 1045 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1046 VM_STAT_ADD(page_exists_cnt); 1047 1048 index = PAGE_HASH_FUNC(vp, off); 1049 PAGE_HASH_SEARCH(index, pp, vp, off); 1050 1051 return (pp); 1052 } 1053 1054 /* 1055 * Determine if physically contiguous pages exist for [vp, off] - [vp, off + 1056 * page_size(szc)) range. if they exist and ppa is not NULL fill ppa array 1057 * with these pages locked SHARED. If necessary reclaim pages from 1058 * freelist. Return 1 if contiguous pages exist and 0 otherwise. 1059 * 1060 * If we fail to lock pages still return 1 if pages exist and contiguous. 1061 * But in this case return value is just a hint. ppa array won't be filled. 1062 * Caller should initialize ppa[0] as NULL to distinguish return value. 1063 * 1064 * Returns 0 if pages don't exist or not physically contiguous. 1065 * 1066 * This routine doesn't work for anonymous(swapfs) pages. 1067 */ 1068 int 1069 page_exists_physcontig(vnode_t *vp, u_offset_t off, uint_t szc, page_t *ppa[]) 1070 { 1071 pgcnt_t pages; 1072 pfn_t pfn; 1073 page_t *rootpp; 1074 pgcnt_t i; 1075 pgcnt_t j; 1076 u_offset_t save_off = off; 1077 ulong_t index; 1078 kmutex_t *phm; 1079 page_t *pp; 1080 uint_t pszc; 1081 int loopcnt = 0; 1082 1083 ASSERT(szc != 0); 1084 ASSERT(vp != NULL); 1085 ASSERT(!IS_SWAPFSVP(vp)); 1086 ASSERT(!VN_ISKAS(vp)); 1087 1088 again: 1089 if (++loopcnt > 3) { 1090 VM_STAT_ADD(page_exphcontg[0]); 1091 return (0); 1092 } 1093 1094 index = PAGE_HASH_FUNC(vp, off); 1095 phm = PAGE_HASH_MUTEX(index); 1096 1097 mutex_enter(phm); 1098 PAGE_HASH_SEARCH(index, pp, vp, off); 1099 mutex_exit(phm); 1100 1101 VM_STAT_ADD(page_exphcontg[1]); 1102 1103 if (pp == NULL) { 1104 VM_STAT_ADD(page_exphcontg[2]); 1105 return (0); 1106 } 1107 1108 pages = page_get_pagecnt(szc); 1109 rootpp = pp; 1110 pfn = rootpp->p_pagenum; 1111 1112 if ((pszc = pp->p_szc) >= szc && ppa != NULL) { 1113 VM_STAT_ADD(page_exphcontg[3]); 1114 if (!page_trylock(pp, SE_SHARED)) { 1115 VM_STAT_ADD(page_exphcontg[4]); 1116 return (1); 1117 } 1118 /* 1119 * Also check whether p_pagenum was modified by DR. 1120 */ 1121 if (pp->p_szc != pszc || pp->p_vnode != vp || 1122 pp->p_offset != off || pp->p_pagenum != pfn) { 1123 VM_STAT_ADD(page_exphcontg[5]); 1124 page_unlock(pp); 1125 off = save_off; 1126 goto again; 1127 } 1128 /* 1129 * szc was non zero and vnode and offset matched after we 1130 * locked the page it means it can't become free on us. 1131 */ 1132 ASSERT(!PP_ISFREE(pp)); 1133 if (!IS_P2ALIGNED(pfn, pages)) { 1134 page_unlock(pp); 1135 return (0); 1136 } 1137 ppa[0] = pp; 1138 pp++; 1139 off += PAGESIZE; 1140 pfn++; 1141 for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1142 if (!page_trylock(pp, SE_SHARED)) { 1143 VM_STAT_ADD(page_exphcontg[6]); 1144 pp--; 1145 while (i-- > 0) { 1146 page_unlock(pp); 1147 pp--; 1148 } 1149 ppa[0] = NULL; 1150 return (1); 1151 } 1152 if (pp->p_szc != pszc) { 1153 VM_STAT_ADD(page_exphcontg[7]); 1154 page_unlock(pp); 1155 pp--; 1156 while (i-- > 0) { 1157 page_unlock(pp); 1158 pp--; 1159 } 1160 ppa[0] = NULL; 1161 off = save_off; 1162 goto again; 1163 } 1164 /* 1165 * szc the same as for previous already locked pages 1166 * with right identity. Since this page had correct 1167 * szc after we locked it can't get freed or destroyed 1168 * and therefore must have the expected identity. 1169 */ 1170 ASSERT(!PP_ISFREE(pp)); 1171 if (pp->p_vnode != vp || 1172 pp->p_offset != off) { 1173 panic("page_exists_physcontig: " 1174 "large page identity doesn't match"); 1175 } 1176 ppa[i] = pp; 1177 ASSERT(pp->p_pagenum == pfn); 1178 } 1179 VM_STAT_ADD(page_exphcontg[8]); 1180 ppa[pages] = NULL; 1181 return (1); 1182 } else if (pszc >= szc) { 1183 VM_STAT_ADD(page_exphcontg[9]); 1184 if (!IS_P2ALIGNED(pfn, pages)) { 1185 return (0); 1186 } 1187 return (1); 1188 } 1189 1190 if (!IS_P2ALIGNED(pfn, pages)) { 1191 VM_STAT_ADD(page_exphcontg[10]); 1192 return (0); 1193 } 1194 1195 if (page_numtomemseg_nolock(pfn) != 1196 page_numtomemseg_nolock(pfn + pages - 1)) { 1197 VM_STAT_ADD(page_exphcontg[11]); 1198 return (0); 1199 } 1200 1201 /* 1202 * We loop up 4 times across pages to promote page size. 1203 * We're extra cautious to promote page size atomically with respect 1204 * to everybody else. But we can probably optimize into 1 loop if 1205 * this becomes an issue. 1206 */ 1207 1208 for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1209 if (!page_trylock(pp, SE_EXCL)) { 1210 VM_STAT_ADD(page_exphcontg[12]); 1211 break; 1212 } 1213 /* 1214 * Check whether p_pagenum was modified by DR. 1215 */ 1216 if (pp->p_pagenum != pfn) { 1217 page_unlock(pp); 1218 break; 1219 } 1220 if (pp->p_vnode != vp || 1221 pp->p_offset != off) { 1222 VM_STAT_ADD(page_exphcontg[13]); 1223 page_unlock(pp); 1224 break; 1225 } 1226 if (pp->p_szc >= szc) { 1227 ASSERT(i == 0); 1228 page_unlock(pp); 1229 off = save_off; 1230 goto again; 1231 } 1232 } 1233 1234 if (i != pages) { 1235 VM_STAT_ADD(page_exphcontg[14]); 1236 --pp; 1237 while (i-- > 0) { 1238 page_unlock(pp); 1239 --pp; 1240 } 1241 return (0); 1242 } 1243 1244 pp = rootpp; 1245 for (i = 0; i < pages; i++, pp++) { 1246 if (PP_ISFREE(pp)) { 1247 VM_STAT_ADD(page_exphcontg[15]); 1248 ASSERT(!PP_ISAGED(pp)); 1249 ASSERT(pp->p_szc == 0); 1250 if (!page_reclaim(pp, NULL)) { 1251 break; 1252 } 1253 } else { 1254 ASSERT(pp->p_szc < szc); 1255 VM_STAT_ADD(page_exphcontg[16]); 1256 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 1257 } 1258 } 1259 if (i < pages) { 1260 VM_STAT_ADD(page_exphcontg[17]); 1261 /* 1262 * page_reclaim failed because we were out of memory. 1263 * drop the rest of the locks and return because this page 1264 * must be already reallocated anyway. 1265 */ 1266 pp = rootpp; 1267 for (j = 0; j < pages; j++, pp++) { 1268 if (j != i) { 1269 page_unlock(pp); 1270 } 1271 } 1272 return (0); 1273 } 1274 1275 off = save_off; 1276 pp = rootpp; 1277 for (i = 0; i < pages; i++, pp++, off += PAGESIZE) { 1278 ASSERT(PAGE_EXCL(pp)); 1279 ASSERT(!PP_ISFREE(pp)); 1280 ASSERT(!hat_page_is_mapped(pp)); 1281 ASSERT(pp->p_vnode == vp); 1282 ASSERT(pp->p_offset == off); 1283 pp->p_szc = szc; 1284 } 1285 pp = rootpp; 1286 for (i = 0; i < pages; i++, pp++) { 1287 if (ppa == NULL) { 1288 page_unlock(pp); 1289 } else { 1290 ppa[i] = pp; 1291 page_downgrade(ppa[i]); 1292 } 1293 } 1294 if (ppa != NULL) { 1295 ppa[pages] = NULL; 1296 } 1297 VM_STAT_ADD(page_exphcontg[18]); 1298 ASSERT(vp->v_pages != NULL); 1299 return (1); 1300 } 1301 1302 /* 1303 * Determine whether a page with the specified [vp, off] 1304 * currently exists in the system and if so return its 1305 * size code. Obviously this should only be considered as 1306 * a hint since nothing prevents the page from disappearing 1307 * or appearing immediately after the return from this routine. 1308 */ 1309 int 1310 page_exists_forreal(vnode_t *vp, u_offset_t off, uint_t *szc) 1311 { 1312 page_t *pp; 1313 kmutex_t *phm; 1314 ulong_t index; 1315 int rc = 0; 1316 1317 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1318 ASSERT(szc != NULL); 1319 VM_STAT_ADD(page_exists_forreal_cnt); 1320 1321 index = PAGE_HASH_FUNC(vp, off); 1322 phm = PAGE_HASH_MUTEX(index); 1323 1324 mutex_enter(phm); 1325 PAGE_HASH_SEARCH(index, pp, vp, off); 1326 if (pp != NULL) { 1327 *szc = pp->p_szc; 1328 rc = 1; 1329 } 1330 mutex_exit(phm); 1331 return (rc); 1332 } 1333 1334 /* wakeup threads waiting for pages in page_create_get_something() */ 1335 void 1336 wakeup_pcgs(void) 1337 { 1338 if (!CV_HAS_WAITERS(&pcgs_cv)) 1339 return; 1340 cv_broadcast(&pcgs_cv); 1341 } 1342 1343 /* 1344 * 'freemem' is used all over the kernel as an indication of how many 1345 * pages are free (either on the cache list or on the free page list) 1346 * in the system. In very few places is a really accurate 'freemem' 1347 * needed. To avoid contention of the lock protecting a the 1348 * single freemem, it was spread out into NCPU buckets. Set_freemem 1349 * sets freemem to the total of all NCPU buckets. It is called from 1350 * clock() on each TICK. 1351 */ 1352 void 1353 set_freemem() 1354 { 1355 struct pcf *p; 1356 ulong_t t; 1357 uint_t i; 1358 1359 t = 0; 1360 p = pcf; 1361 for (i = 0; i < pcf_fanout; i++) { 1362 t += p->pcf_count; 1363 p++; 1364 } 1365 freemem = t; 1366 1367 /* 1368 * Don't worry about grabbing mutex. It's not that 1369 * critical if we miss a tick or two. This is 1370 * where we wakeup possible delayers in 1371 * page_create_get_something(). 1372 */ 1373 wakeup_pcgs(); 1374 } 1375 1376 ulong_t 1377 get_freemem() 1378 { 1379 struct pcf *p; 1380 ulong_t t; 1381 uint_t i; 1382 1383 t = 0; 1384 p = pcf; 1385 for (i = 0; i < pcf_fanout; i++) { 1386 t += p->pcf_count; 1387 p++; 1388 } 1389 /* 1390 * We just calculated it, might as well set it. 1391 */ 1392 freemem = t; 1393 return (t); 1394 } 1395 1396 /* 1397 * Acquire all of the page cache & free (pcf) locks. 1398 */ 1399 void 1400 pcf_acquire_all() 1401 { 1402 struct pcf *p; 1403 uint_t i; 1404 1405 p = pcf; 1406 for (i = 0; i < pcf_fanout; i++) { 1407 mutex_enter(&p->pcf_lock); 1408 p++; 1409 } 1410 } 1411 1412 /* 1413 * Release all the pcf_locks. 1414 */ 1415 void 1416 pcf_release_all() 1417 { 1418 struct pcf *p; 1419 uint_t i; 1420 1421 p = pcf; 1422 for (i = 0; i < pcf_fanout; i++) { 1423 mutex_exit(&p->pcf_lock); 1424 p++; 1425 } 1426 } 1427 1428 /* 1429 * Inform the VM system that we need some pages freed up. 1430 * Calls must be symmetric, e.g.: 1431 * 1432 * page_needfree(100); 1433 * wait a bit; 1434 * page_needfree(-100); 1435 */ 1436 void 1437 page_needfree(spgcnt_t npages) 1438 { 1439 mutex_enter(&new_freemem_lock); 1440 needfree += npages; 1441 mutex_exit(&new_freemem_lock); 1442 } 1443 1444 /* 1445 * Throttle for page_create(): try to prevent freemem from dropping 1446 * below throttlefree. We can't provide a 100% guarantee because 1447 * KM_NOSLEEP allocations, page_reclaim(), and various other things 1448 * nibble away at the freelist. However, we can block all PG_WAIT 1449 * allocations until memory becomes available. The motivation is 1450 * that several things can fall apart when there's no free memory: 1451 * 1452 * (1) If pageout() needs memory to push a page, the system deadlocks. 1453 * 1454 * (2) By (broken) specification, timeout(9F) can neither fail nor 1455 * block, so it has no choice but to panic the system if it 1456 * cannot allocate a callout structure. 1457 * 1458 * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block; 1459 * it panics if it cannot allocate a callback structure. 1460 * 1461 * (4) Untold numbers of third-party drivers have not yet been hardened 1462 * against KM_NOSLEEP and/or allocb() failures; they simply assume 1463 * success and panic the system with a data fault on failure. 1464 * (The long-term solution to this particular problem is to ship 1465 * hostile fault-injecting DEBUG kernels with the DDK.) 1466 * 1467 * It is theoretically impossible to guarantee success of non-blocking 1468 * allocations, but in practice, this throttle is very hard to break. 1469 */ 1470 static int 1471 page_create_throttle(pgcnt_t npages, int flags) 1472 { 1473 ulong_t fm; 1474 uint_t i; 1475 pgcnt_t tf; /* effective value of throttlefree */ 1476 1477 /* 1478 * Normal priority allocations. 1479 */ 1480 if ((flags & (PG_WAIT | PG_NORMALPRI)) == PG_NORMALPRI) { 1481 ASSERT(!(flags & (PG_PANIC | PG_PUSHPAGE))); 1482 return (freemem >= npages + throttlefree); 1483 } 1484 1485 /* 1486 * Never deny pages when: 1487 * - it's a thread that cannot block [NOMEMWAIT()] 1488 * - the allocation cannot block and must not fail 1489 * - the allocation cannot block and is pageout dispensated 1490 */ 1491 if (NOMEMWAIT() || 1492 ((flags & (PG_WAIT | PG_PANIC)) == PG_PANIC) || 1493 ((flags & (PG_WAIT | PG_PUSHPAGE)) == PG_PUSHPAGE)) 1494 return (1); 1495 1496 /* 1497 * If the allocation can't block, we look favorably upon it 1498 * unless we're below pageout_reserve. In that case we fail 1499 * the allocation because we want to make sure there are a few 1500 * pages available for pageout. 1501 */ 1502 if ((flags & PG_WAIT) == 0) 1503 return (freemem >= npages + pageout_reserve); 1504 1505 /* Calculate the effective throttlefree value */ 1506 tf = throttlefree - 1507 ((flags & PG_PUSHPAGE) ? pageout_reserve : 0); 1508 1509 cv_signal(&proc_pageout->p_cv); 1510 1511 for (;;) { 1512 fm = 0; 1513 pcf_acquire_all(); 1514 mutex_enter(&new_freemem_lock); 1515 for (i = 0; i < pcf_fanout; i++) { 1516 fm += pcf[i].pcf_count; 1517 pcf[i].pcf_wait++; 1518 mutex_exit(&pcf[i].pcf_lock); 1519 } 1520 freemem = fm; 1521 if (freemem >= npages + tf) { 1522 mutex_exit(&new_freemem_lock); 1523 break; 1524 } 1525 needfree += npages; 1526 freemem_wait++; 1527 cv_wait(&freemem_cv, &new_freemem_lock); 1528 freemem_wait--; 1529 needfree -= npages; 1530 mutex_exit(&new_freemem_lock); 1531 } 1532 return (1); 1533 } 1534 1535 /* 1536 * page_create_wait() is called to either coalesce pages from the 1537 * different pcf buckets or to wait because there simply are not 1538 * enough pages to satisfy the caller's request. 1539 * 1540 * Sadly, this is called from platform/vm/vm_machdep.c 1541 */ 1542 int 1543 page_create_wait(pgcnt_t npages, uint_t flags) 1544 { 1545 pgcnt_t total; 1546 uint_t i; 1547 struct pcf *p; 1548 1549 /* 1550 * Wait until there are enough free pages to satisfy our 1551 * entire request. 1552 * We set needfree += npages before prodding pageout, to make sure 1553 * it does real work when npages > lotsfree > freemem. 1554 */ 1555 VM_STAT_ADD(page_create_not_enough); 1556 1557 ASSERT(!kcage_on ? !(flags & PG_NORELOC) : 1); 1558 checkagain: 1559 if ((flags & PG_NORELOC) && 1560 kcage_freemem < kcage_throttlefree + npages) 1561 (void) kcage_create_throttle(npages, flags); 1562 1563 if (freemem < npages + throttlefree) 1564 if (!page_create_throttle(npages, flags)) 1565 return (0); 1566 1567 if (pcf_decrement_bucket(npages) || 1568 pcf_decrement_multiple(&total, npages, 0)) 1569 return (1); 1570 1571 /* 1572 * All of the pcf locks are held, there are not enough pages 1573 * to satisfy the request (npages < total). 1574 * Be sure to acquire the new_freemem_lock before dropping 1575 * the pcf locks. This prevents dropping wakeups in page_free(). 1576 * The order is always pcf_lock then new_freemem_lock. 1577 * 1578 * Since we hold all the pcf locks, it is a good time to set freemem. 1579 * 1580 * If the caller does not want to wait, return now. 1581 * Else turn the pageout daemon loose to find something 1582 * and wait till it does. 1583 * 1584 */ 1585 freemem = total; 1586 1587 if ((flags & PG_WAIT) == 0) { 1588 pcf_release_all(); 1589 1590 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_NOMEM, 1591 "page_create_nomem:npages %ld freemem %ld", npages, freemem); 1592 return (0); 1593 } 1594 1595 ASSERT(proc_pageout != NULL); 1596 cv_signal(&proc_pageout->p_cv); 1597 1598 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_START, 1599 "page_create_sleep_start: freemem %ld needfree %ld", 1600 freemem, needfree); 1601 1602 /* 1603 * We are going to wait. 1604 * We currently hold all of the pcf_locks, 1605 * get the new_freemem_lock (it protects freemem_wait), 1606 * before dropping the pcf_locks. 1607 */ 1608 mutex_enter(&new_freemem_lock); 1609 1610 p = pcf; 1611 for (i = 0; i < pcf_fanout; i++) { 1612 p->pcf_wait++; 1613 mutex_exit(&p->pcf_lock); 1614 p++; 1615 } 1616 1617 needfree += npages; 1618 freemem_wait++; 1619 1620 cv_wait(&freemem_cv, &new_freemem_lock); 1621 1622 freemem_wait--; 1623 needfree -= npages; 1624 1625 mutex_exit(&new_freemem_lock); 1626 1627 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_END, 1628 "page_create_sleep_end: freemem %ld needfree %ld", 1629 freemem, needfree); 1630 1631 VM_STAT_ADD(page_create_not_enough_again); 1632 goto checkagain; 1633 } 1634 /* 1635 * A routine to do the opposite of page_create_wait(). 1636 */ 1637 void 1638 page_create_putback(spgcnt_t npages) 1639 { 1640 struct pcf *p; 1641 pgcnt_t lump; 1642 uint_t *which; 1643 1644 /* 1645 * When a contiguous lump is broken up, we have to 1646 * deal with lots of pages (min 64) so lets spread 1647 * the wealth around. 1648 */ 1649 lump = roundup(npages, pcf_fanout) / pcf_fanout; 1650 freemem += npages; 1651 1652 for (p = pcf; (npages > 0) && (p < &pcf[pcf_fanout]); p++) { 1653 which = &p->pcf_count; 1654 1655 mutex_enter(&p->pcf_lock); 1656 1657 if (p->pcf_block) { 1658 which = &p->pcf_reserve; 1659 } 1660 1661 if (lump < npages) { 1662 *which += (uint_t)lump; 1663 npages -= lump; 1664 } else { 1665 *which += (uint_t)npages; 1666 npages = 0; 1667 } 1668 1669 if (p->pcf_wait) { 1670 mutex_enter(&new_freemem_lock); 1671 /* 1672 * Check to see if some other thread 1673 * is actually waiting. Another bucket 1674 * may have woken it up by now. If there 1675 * are no waiters, then set our pcf_wait 1676 * count to zero to avoid coming in here 1677 * next time. 1678 */ 1679 if (freemem_wait) { 1680 if (npages > 1) { 1681 cv_broadcast(&freemem_cv); 1682 } else { 1683 cv_signal(&freemem_cv); 1684 } 1685 p->pcf_wait--; 1686 } else { 1687 p->pcf_wait = 0; 1688 } 1689 mutex_exit(&new_freemem_lock); 1690 } 1691 mutex_exit(&p->pcf_lock); 1692 } 1693 ASSERT(npages == 0); 1694 } 1695 1696 /* 1697 * A helper routine for page_create_get_something. 1698 * The indenting got to deep down there. 1699 * Unblock the pcf counters. Any pages freed after 1700 * pcf_block got set are moved to pcf_count and 1701 * wakeups (cv_broadcast() or cv_signal()) are done as needed. 1702 */ 1703 static void 1704 pcgs_unblock(void) 1705 { 1706 int i; 1707 struct pcf *p; 1708 1709 /* Update freemem while we're here. */ 1710 freemem = 0; 1711 p = pcf; 1712 for (i = 0; i < pcf_fanout; i++) { 1713 mutex_enter(&p->pcf_lock); 1714 ASSERT(p->pcf_count == 0); 1715 p->pcf_count = p->pcf_reserve; 1716 p->pcf_block = 0; 1717 freemem += p->pcf_count; 1718 if (p->pcf_wait) { 1719 mutex_enter(&new_freemem_lock); 1720 if (freemem_wait) { 1721 if (p->pcf_reserve > 1) { 1722 cv_broadcast(&freemem_cv); 1723 p->pcf_wait = 0; 1724 } else { 1725 cv_signal(&freemem_cv); 1726 p->pcf_wait--; 1727 } 1728 } else { 1729 p->pcf_wait = 0; 1730 } 1731 mutex_exit(&new_freemem_lock); 1732 } 1733 p->pcf_reserve = 0; 1734 mutex_exit(&p->pcf_lock); 1735 p++; 1736 } 1737 } 1738 1739 /* 1740 * Called from page_create_va() when both the cache and free lists 1741 * have been checked once. 1742 * 1743 * Either returns a page or panics since the accounting was done 1744 * way before we got here. 1745 * 1746 * We don't come here often, so leave the accounting on permanently. 1747 */ 1748 1749 #define MAX_PCGS 100 1750 1751 #ifdef DEBUG 1752 #define PCGS_TRIES 100 1753 #else /* DEBUG */ 1754 #define PCGS_TRIES 10 1755 #endif /* DEBUG */ 1756 1757 #ifdef VM_STATS 1758 uint_t pcgs_counts[PCGS_TRIES]; 1759 uint_t pcgs_too_many; 1760 uint_t pcgs_entered; 1761 uint_t pcgs_entered_noreloc; 1762 uint_t pcgs_locked; 1763 uint_t pcgs_cagelocked; 1764 #endif /* VM_STATS */ 1765 1766 static page_t * 1767 page_create_get_something(vnode_t *vp, u_offset_t off, struct seg *seg, 1768 caddr_t vaddr, uint_t flags) 1769 { 1770 uint_t count; 1771 page_t *pp; 1772 uint_t locked, i; 1773 struct pcf *p; 1774 lgrp_t *lgrp; 1775 int cagelocked = 0; 1776 1777 VM_STAT_ADD(pcgs_entered); 1778 1779 /* 1780 * Tap any reserve freelists: if we fail now, we'll die 1781 * since the page(s) we're looking for have already been 1782 * accounted for. 1783 */ 1784 flags |= PG_PANIC; 1785 1786 if ((flags & PG_NORELOC) != 0) { 1787 VM_STAT_ADD(pcgs_entered_noreloc); 1788 /* 1789 * Requests for free pages from critical threads 1790 * such as pageout still won't throttle here, but 1791 * we must try again, to give the cageout thread 1792 * another chance to catch up. Since we already 1793 * accounted for the pages, we had better get them 1794 * this time. 1795 * 1796 * N.B. All non-critical threads acquire the pcgs_cagelock 1797 * to serialize access to the freelists. This implements a 1798 * turnstile-type synchornization to avoid starvation of 1799 * critical requests for PG_NORELOC memory by non-critical 1800 * threads: all non-critical threads must acquire a 'ticket' 1801 * before passing through, which entails making sure 1802 * kcage_freemem won't fall below minfree prior to grabbing 1803 * pages from the freelists. 1804 */ 1805 if (kcage_create_throttle(1, flags) == KCT_NONCRIT) { 1806 mutex_enter(&pcgs_cagelock); 1807 cagelocked = 1; 1808 VM_STAT_ADD(pcgs_cagelocked); 1809 } 1810 } 1811 1812 /* 1813 * Time to get serious. 1814 * We failed to get a `correctly colored' page from both the 1815 * free and cache lists. 1816 * We escalate in stage. 1817 * 1818 * First try both lists without worring about color. 1819 * 1820 * Then, grab all page accounting locks (ie. pcf[]) and 1821 * steal any pages that they have and set the pcf_block flag to 1822 * stop deletions from the lists. This will help because 1823 * a page can get added to the free list while we are looking 1824 * at the cache list, then another page could be added to the cache 1825 * list allowing the page on the free list to be removed as we 1826 * move from looking at the cache list to the free list. This 1827 * could happen over and over. We would never find the page 1828 * we have accounted for. 1829 * 1830 * Noreloc pages are a subset of the global (relocatable) page pool. 1831 * They are not tracked separately in the pcf bins, so it is 1832 * impossible to know when doing pcf accounting if the available 1833 * page(s) are noreloc pages or not. When looking for a noreloc page 1834 * it is quite easy to end up here even if the global (relocatable) 1835 * page pool has plenty of free pages but the noreloc pool is empty. 1836 * 1837 * When the noreloc pool is empty (or low), additional noreloc pages 1838 * are created by converting pages from the global page pool. This 1839 * process will stall during pcf accounting if the pcf bins are 1840 * already locked. Such is the case when a noreloc allocation is 1841 * looping here in page_create_get_something waiting for more noreloc 1842 * pages to appear. 1843 * 1844 * Short of adding a new field to the pcf bins to accurately track 1845 * the number of free noreloc pages, we instead do not grab the 1846 * pcgs_lock, do not set the pcf blocks and do not timeout when 1847 * allocating a noreloc page. This allows noreloc allocations to 1848 * loop without blocking global page pool allocations. 1849 * 1850 * NOTE: the behaviour of page_create_get_something has not changed 1851 * for the case of global page pool allocations. 1852 */ 1853 1854 flags &= ~PG_MATCH_COLOR; 1855 locked = 0; 1856 #if defined(__i386) || defined(__amd64) 1857 flags = page_create_update_flags_x86(flags); 1858 #endif 1859 1860 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 1861 1862 for (count = 0; kcage_on || count < MAX_PCGS; count++) { 1863 pp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 1864 flags, lgrp); 1865 if (pp == NULL) { 1866 pp = page_get_cachelist(vp, off, seg, vaddr, 1867 flags, lgrp); 1868 } 1869 if (pp == NULL) { 1870 /* 1871 * Serialize. Don't fight with other pcgs(). 1872 */ 1873 if (!locked && (!kcage_on || !(flags & PG_NORELOC))) { 1874 mutex_enter(&pcgs_lock); 1875 VM_STAT_ADD(pcgs_locked); 1876 locked = 1; 1877 p = pcf; 1878 for (i = 0; i < pcf_fanout; i++) { 1879 mutex_enter(&p->pcf_lock); 1880 ASSERT(p->pcf_block == 0); 1881 p->pcf_block = 1; 1882 p->pcf_reserve = p->pcf_count; 1883 p->pcf_count = 0; 1884 mutex_exit(&p->pcf_lock); 1885 p++; 1886 } 1887 freemem = 0; 1888 } 1889 1890 if (count) { 1891 /* 1892 * Since page_free() puts pages on 1893 * a list then accounts for it, we 1894 * just have to wait for page_free() 1895 * to unlock any page it was working 1896 * with. The page_lock()-page_reclaim() 1897 * path falls in the same boat. 1898 * 1899 * We don't need to check on the 1900 * PG_WAIT flag, we have already 1901 * accounted for the page we are 1902 * looking for in page_create_va(). 1903 * 1904 * We just wait a moment to let any 1905 * locked pages on the lists free up, 1906 * then continue around and try again. 1907 * 1908 * Will be awakened by set_freemem(). 1909 */ 1910 mutex_enter(&pcgs_wait_lock); 1911 cv_wait(&pcgs_cv, &pcgs_wait_lock); 1912 mutex_exit(&pcgs_wait_lock); 1913 } 1914 } else { 1915 #ifdef VM_STATS 1916 if (count >= PCGS_TRIES) { 1917 VM_STAT_ADD(pcgs_too_many); 1918 } else { 1919 VM_STAT_ADD(pcgs_counts[count]); 1920 } 1921 #endif 1922 if (locked) { 1923 pcgs_unblock(); 1924 mutex_exit(&pcgs_lock); 1925 } 1926 if (cagelocked) 1927 mutex_exit(&pcgs_cagelock); 1928 return (pp); 1929 } 1930 } 1931 /* 1932 * we go down holding the pcf locks. 1933 */ 1934 panic("no %spage found %d", 1935 ((flags & PG_NORELOC) ? "non-reloc " : ""), count); 1936 /*NOTREACHED*/ 1937 } 1938 1939 /* 1940 * Create enough pages for "bytes" worth of data starting at 1941 * "off" in "vp". 1942 * 1943 * Where flag must be one of: 1944 * 1945 * PG_EXCL: Exclusive create (fail if any page already 1946 * exists in the page cache) which does not 1947 * wait for memory to become available. 1948 * 1949 * PG_WAIT: Non-exclusive create which can wait for 1950 * memory to become available. 1951 * 1952 * PG_PHYSCONTIG: Allocate physically contiguous pages. 1953 * (Not Supported) 1954 * 1955 * A doubly linked list of pages is returned to the caller. Each page 1956 * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock) 1957 * lock. 1958 * 1959 * Unable to change the parameters to page_create() in a minor release, 1960 * we renamed page_create() to page_create_va(), changed all known calls 1961 * from page_create() to page_create_va(), and created this wrapper. 1962 * 1963 * Upon a major release, we should break compatibility by deleting this 1964 * wrapper, and replacing all the strings "page_create_va", with "page_create". 1965 * 1966 * NOTE: There is a copy of this interface as page_create_io() in 1967 * i86/vm/vm_machdep.c. Any bugs fixed here should be applied 1968 * there. 1969 */ 1970 page_t * 1971 page_create(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags) 1972 { 1973 caddr_t random_vaddr; 1974 struct seg kseg; 1975 1976 #ifdef DEBUG 1977 cmn_err(CE_WARN, "Using deprecated interface page_create: caller %p", 1978 (void *)caller()); 1979 #endif 1980 1981 random_vaddr = (caddr_t)(((uintptr_t)vp >> 7) ^ 1982 (uintptr_t)(off >> PAGESHIFT)); 1983 kseg.s_as = &kas; 1984 1985 return (page_create_va(vp, off, bytes, flags, &kseg, random_vaddr)); 1986 } 1987 1988 #ifdef DEBUG 1989 uint32_t pg_alloc_pgs_mtbf = 0; 1990 #endif 1991 1992 /* 1993 * Used for large page support. It will attempt to allocate 1994 * a large page(s) off the freelist. 1995 * 1996 * Returns non zero on failure. 1997 */ 1998 int 1999 page_alloc_pages(struct vnode *vp, struct seg *seg, caddr_t addr, 2000 page_t **basepp, page_t *ppa[], uint_t szc, int anypgsz, int pgflags) 2001 { 2002 pgcnt_t npgs, curnpgs, totpgs; 2003 size_t pgsz; 2004 page_t *pplist = NULL, *pp; 2005 int err = 0; 2006 lgrp_t *lgrp; 2007 2008 ASSERT(szc != 0 && szc <= (page_num_pagesizes() - 1)); 2009 ASSERT(pgflags == 0 || pgflags == PG_LOCAL); 2010 2011 /* 2012 * Check if system heavily prefers local large pages over remote 2013 * on systems with multiple lgroups. 2014 */ 2015 if (lpg_alloc_prefer == LPAP_LOCAL && nlgrps > 1) { 2016 pgflags = PG_LOCAL; 2017 } 2018 2019 VM_STAT_ADD(alloc_pages[0]); 2020 2021 #ifdef DEBUG 2022 if (pg_alloc_pgs_mtbf && !(gethrtime() % pg_alloc_pgs_mtbf)) { 2023 return (ENOMEM); 2024 } 2025 #endif 2026 2027 /* 2028 * One must be NULL but not both. 2029 * And one must be non NULL but not both. 2030 */ 2031 ASSERT(basepp != NULL || ppa != NULL); 2032 ASSERT(basepp == NULL || ppa == NULL); 2033 2034 #if defined(__i386) || defined(__amd64) 2035 while (page_chk_freelist(szc) == 0) { 2036 VM_STAT_ADD(alloc_pages[8]); 2037 if (anypgsz == 0 || --szc == 0) 2038 return (ENOMEM); 2039 } 2040 #endif 2041 2042 pgsz = page_get_pagesize(szc); 2043 totpgs = curnpgs = npgs = pgsz >> PAGESHIFT; 2044 2045 ASSERT(((uintptr_t)addr & (pgsz - 1)) == 0); 2046 2047 (void) page_create_wait(npgs, PG_WAIT); 2048 2049 while (npgs && szc) { 2050 lgrp = lgrp_mem_choose(seg, addr, pgsz); 2051 if (pgflags == PG_LOCAL) { 2052 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2053 pgflags, lgrp); 2054 if (pp == NULL) { 2055 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2056 0, lgrp); 2057 } 2058 } else { 2059 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2060 0, lgrp); 2061 } 2062 if (pp != NULL) { 2063 VM_STAT_ADD(alloc_pages[1]); 2064 page_list_concat(&pplist, &pp); 2065 ASSERT(npgs >= curnpgs); 2066 npgs -= curnpgs; 2067 } else if (anypgsz) { 2068 VM_STAT_ADD(alloc_pages[2]); 2069 szc--; 2070 pgsz = page_get_pagesize(szc); 2071 curnpgs = pgsz >> PAGESHIFT; 2072 } else { 2073 VM_STAT_ADD(alloc_pages[3]); 2074 ASSERT(npgs == totpgs); 2075 page_create_putback(npgs); 2076 return (ENOMEM); 2077 } 2078 } 2079 if (szc == 0) { 2080 VM_STAT_ADD(alloc_pages[4]); 2081 ASSERT(npgs != 0); 2082 page_create_putback(npgs); 2083 err = ENOMEM; 2084 } else if (basepp != NULL) { 2085 ASSERT(npgs == 0); 2086 ASSERT(ppa == NULL); 2087 *basepp = pplist; 2088 } 2089 2090 npgs = totpgs - npgs; 2091 pp = pplist; 2092 2093 /* 2094 * Clear the free and age bits. Also if we were passed in a ppa then 2095 * fill it in with all the constituent pages from the large page. But 2096 * if we failed to allocate all the pages just free what we got. 2097 */ 2098 while (npgs != 0) { 2099 ASSERT(PP_ISFREE(pp)); 2100 ASSERT(PP_ISAGED(pp)); 2101 if (ppa != NULL || err != 0) { 2102 if (err == 0) { 2103 VM_STAT_ADD(alloc_pages[5]); 2104 PP_CLRFREE(pp); 2105 PP_CLRAGED(pp); 2106 page_sub(&pplist, pp); 2107 *ppa++ = pp; 2108 npgs--; 2109 } else { 2110 VM_STAT_ADD(alloc_pages[6]); 2111 ASSERT(pp->p_szc != 0); 2112 curnpgs = page_get_pagecnt(pp->p_szc); 2113 page_list_break(&pp, &pplist, curnpgs); 2114 page_list_add_pages(pp, 0); 2115 page_create_putback(curnpgs); 2116 ASSERT(npgs >= curnpgs); 2117 npgs -= curnpgs; 2118 } 2119 pp = pplist; 2120 } else { 2121 VM_STAT_ADD(alloc_pages[7]); 2122 PP_CLRFREE(pp); 2123 PP_CLRAGED(pp); 2124 pp = pp->p_next; 2125 npgs--; 2126 } 2127 } 2128 return (err); 2129 } 2130 2131 /* 2132 * Get a single large page off of the freelists, and set it up for use. 2133 * Number of bytes requested must be a supported page size. 2134 * 2135 * Note that this call may fail even if there is sufficient 2136 * memory available or PG_WAIT is set, so the caller must 2137 * be willing to fallback on page_create_va(), block and retry, 2138 * or fail the requester. 2139 */ 2140 page_t * 2141 page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2142 struct seg *seg, caddr_t vaddr, void *arg) 2143 { 2144 pgcnt_t npages; 2145 page_t *pp; 2146 page_t *rootpp; 2147 lgrp_t *lgrp; 2148 lgrp_id_t *lgrpid = (lgrp_id_t *)arg; 2149 2150 ASSERT(vp != NULL); 2151 2152 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2153 PG_NORELOC | PG_PANIC | PG_PUSHPAGE | PG_NORMALPRI)) == 0); 2154 /* but no others */ 2155 2156 ASSERT((flags & PG_EXCL) == PG_EXCL); 2157 2158 npages = btop(bytes); 2159 2160 if (!kcage_on || panicstr) { 2161 /* 2162 * Cage is OFF, or we are single threaded in 2163 * panic, so make everything a RELOC request. 2164 */ 2165 flags &= ~PG_NORELOC; 2166 } 2167 2168 /* 2169 * Make sure there's adequate physical memory available. 2170 * Note: PG_WAIT is ignored here. 2171 */ 2172 if (freemem <= throttlefree + npages) { 2173 VM_STAT_ADD(page_create_large_cnt[1]); 2174 return (NULL); 2175 } 2176 2177 /* 2178 * If cage is on, dampen draw from cage when available 2179 * cage space is low. 2180 */ 2181 if ((flags & (PG_NORELOC | PG_WAIT)) == (PG_NORELOC | PG_WAIT) && 2182 kcage_freemem < kcage_throttlefree + npages) { 2183 2184 /* 2185 * The cage is on, the caller wants PG_NORELOC 2186 * pages and available cage memory is very low. 2187 * Call kcage_create_throttle() to attempt to 2188 * control demand on the cage. 2189 */ 2190 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) { 2191 VM_STAT_ADD(page_create_large_cnt[2]); 2192 return (NULL); 2193 } 2194 } 2195 2196 if (!pcf_decrement_bucket(npages) && 2197 !pcf_decrement_multiple(NULL, npages, 1)) { 2198 VM_STAT_ADD(page_create_large_cnt[4]); 2199 return (NULL); 2200 } 2201 2202 /* 2203 * This is where this function behaves fundamentally differently 2204 * than page_create_va(); since we're intending to map the page 2205 * with a single TTE, we have to get it as a physically contiguous 2206 * hardware pagesize chunk. If we can't, we fail. 2207 */ 2208 if (lgrpid != NULL && *lgrpid >= 0 && *lgrpid <= lgrp_alloc_max && 2209 LGRP_EXISTS(lgrp_table[*lgrpid])) 2210 lgrp = lgrp_table[*lgrpid]; 2211 else 2212 lgrp = lgrp_mem_choose(seg, vaddr, bytes); 2213 2214 if ((rootpp = page_get_freelist(&kvp, off, seg, vaddr, 2215 bytes, flags & ~PG_MATCH_COLOR, lgrp)) == NULL) { 2216 page_create_putback(npages); 2217 VM_STAT_ADD(page_create_large_cnt[5]); 2218 return (NULL); 2219 } 2220 2221 /* 2222 * if we got the page with the wrong mtype give it back this is a 2223 * workaround for CR 6249718. When CR 6249718 is fixed we never get 2224 * inside "if" and the workaround becomes just a nop 2225 */ 2226 if (kcage_on && (flags & PG_NORELOC) && !PP_ISNORELOC(rootpp)) { 2227 page_list_add_pages(rootpp, 0); 2228 page_create_putback(npages); 2229 VM_STAT_ADD(page_create_large_cnt[6]); 2230 return (NULL); 2231 } 2232 2233 /* 2234 * If satisfying this request has left us with too little 2235 * memory, start the wheels turning to get some back. The 2236 * first clause of the test prevents waking up the pageout 2237 * daemon in situations where it would decide that there's 2238 * nothing to do. 2239 */ 2240 if (nscan < desscan && freemem < minfree) { 2241 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2242 "pageout_cv_signal:freemem %ld", freemem); 2243 cv_signal(&proc_pageout->p_cv); 2244 } 2245 2246 pp = rootpp; 2247 while (npages--) { 2248 ASSERT(PAGE_EXCL(pp)); 2249 ASSERT(pp->p_vnode == NULL); 2250 ASSERT(!hat_page_is_mapped(pp)); 2251 PP_CLRFREE(pp); 2252 PP_CLRAGED(pp); 2253 if (!page_hashin(pp, vp, off, NULL)) 2254 panic("page_create_large: hashin failed: page %p", 2255 (void *)pp); 2256 page_io_lock(pp); 2257 off += PAGESIZE; 2258 pp = pp->p_next; 2259 } 2260 2261 VM_STAT_ADD(page_create_large_cnt[0]); 2262 return (rootpp); 2263 } 2264 2265 page_t * 2266 page_create_va(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2267 struct seg *seg, caddr_t vaddr) 2268 { 2269 page_t *plist = NULL; 2270 pgcnt_t npages; 2271 pgcnt_t found_on_free = 0; 2272 pgcnt_t pages_req; 2273 page_t *npp = NULL; 2274 struct pcf *p; 2275 lgrp_t *lgrp; 2276 2277 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START, 2278 "page_create_start:vp %p off %llx bytes %lu flags %x", 2279 vp, off, bytes, flags); 2280 2281 ASSERT(bytes != 0 && vp != NULL); 2282 2283 if ((flags & PG_EXCL) == 0 && (flags & PG_WAIT) == 0) { 2284 panic("page_create: invalid flags"); 2285 /*NOTREACHED*/ 2286 } 2287 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2288 PG_NORELOC | PG_PANIC | PG_PUSHPAGE | PG_NORMALPRI)) == 0); 2289 /* but no others */ 2290 2291 pages_req = npages = btopr(bytes); 2292 /* 2293 * Try to see whether request is too large to *ever* be 2294 * satisfied, in order to prevent deadlock. We arbitrarily 2295 * decide to limit maximum size requests to max_page_get. 2296 */ 2297 if (npages >= max_page_get) { 2298 if ((flags & PG_WAIT) == 0) { 2299 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_TOOBIG, 2300 "page_create_toobig:vp %p off %llx npages " 2301 "%lu max_page_get %lu", 2302 vp, off, npages, max_page_get); 2303 return (NULL); 2304 } else { 2305 cmn_err(CE_WARN, 2306 "Request for too much kernel memory " 2307 "(%lu bytes), will hang forever", bytes); 2308 for (;;) 2309 delay(1000000000); 2310 } 2311 } 2312 2313 if (!kcage_on || panicstr) { 2314 /* 2315 * Cage is OFF, or we are single threaded in 2316 * panic, so make everything a RELOC request. 2317 */ 2318 flags &= ~PG_NORELOC; 2319 } 2320 2321 if (freemem <= throttlefree + npages) 2322 if (!page_create_throttle(npages, flags)) 2323 return (NULL); 2324 2325 /* 2326 * If cage is on, dampen draw from cage when available 2327 * cage space is low. 2328 */ 2329 if ((flags & PG_NORELOC) && 2330 kcage_freemem < kcage_throttlefree + npages) { 2331 2332 /* 2333 * The cage is on, the caller wants PG_NORELOC 2334 * pages and available cage memory is very low. 2335 * Call kcage_create_throttle() to attempt to 2336 * control demand on the cage. 2337 */ 2338 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) 2339 return (NULL); 2340 } 2341 2342 VM_STAT_ADD(page_create_cnt[0]); 2343 2344 if (!pcf_decrement_bucket(npages)) { 2345 /* 2346 * Have to look harder. If npages is greater than 2347 * one, then we might have to coalesce the counters. 2348 * 2349 * Go wait. We come back having accounted 2350 * for the memory. 2351 */ 2352 VM_STAT_ADD(page_create_cnt[1]); 2353 if (!page_create_wait(npages, flags)) { 2354 VM_STAT_ADD(page_create_cnt[2]); 2355 return (NULL); 2356 } 2357 } 2358 2359 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS, 2360 "page_create_success:vp %p off %llx", vp, off); 2361 2362 /* 2363 * If satisfying this request has left us with too little 2364 * memory, start the wheels turning to get some back. The 2365 * first clause of the test prevents waking up the pageout 2366 * daemon in situations where it would decide that there's 2367 * nothing to do. 2368 */ 2369 if (nscan < desscan && freemem < minfree) { 2370 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2371 "pageout_cv_signal:freemem %ld", freemem); 2372 cv_signal(&proc_pageout->p_cv); 2373 } 2374 2375 /* 2376 * Loop around collecting the requested number of pages. 2377 * Most of the time, we have to `create' a new page. With 2378 * this in mind, pull the page off the free list before 2379 * getting the hash lock. This will minimize the hash 2380 * lock hold time, nesting, and the like. If it turns 2381 * out we don't need the page, we put it back at the end. 2382 */ 2383 while (npages--) { 2384 page_t *pp; 2385 kmutex_t *phm = NULL; 2386 ulong_t index; 2387 2388 index = PAGE_HASH_FUNC(vp, off); 2389 top: 2390 ASSERT(phm == NULL); 2391 ASSERT(index == PAGE_HASH_FUNC(vp, off)); 2392 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 2393 2394 if (npp == NULL) { 2395 /* 2396 * Try to get a page from the freelist (ie, 2397 * a page with no [vp, off] tag). If that 2398 * fails, use the cachelist. 2399 * 2400 * During the first attempt at both the free 2401 * and cache lists we try for the correct color. 2402 */ 2403 /* 2404 * XXXX-how do we deal with virtual indexed 2405 * caches and and colors? 2406 */ 2407 VM_STAT_ADD(page_create_cnt[4]); 2408 /* 2409 * Get lgroup to allocate next page of shared memory 2410 * from and use it to specify where to allocate 2411 * the physical memory 2412 */ 2413 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 2414 npp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 2415 flags | PG_MATCH_COLOR, lgrp); 2416 if (npp == NULL) { 2417 npp = page_get_cachelist(vp, off, seg, 2418 vaddr, flags | PG_MATCH_COLOR, lgrp); 2419 if (npp == NULL) { 2420 npp = page_create_get_something(vp, 2421 off, seg, vaddr, 2422 flags & ~PG_MATCH_COLOR); 2423 } 2424 2425 if (PP_ISAGED(npp) == 0) { 2426 /* 2427 * Since this page came from the 2428 * cachelist, we must destroy the 2429 * old vnode association. 2430 */ 2431 page_hashout(npp, NULL); 2432 } 2433 } 2434 } 2435 2436 /* 2437 * We own this page! 2438 */ 2439 ASSERT(PAGE_EXCL(npp)); 2440 ASSERT(npp->p_vnode == NULL); 2441 ASSERT(!hat_page_is_mapped(npp)); 2442 PP_CLRFREE(npp); 2443 PP_CLRAGED(npp); 2444 2445 /* 2446 * Here we have a page in our hot little mits and are 2447 * just waiting to stuff it on the appropriate lists. 2448 * Get the mutex and check to see if it really does 2449 * not exist. 2450 */ 2451 phm = PAGE_HASH_MUTEX(index); 2452 mutex_enter(phm); 2453 PAGE_HASH_SEARCH(index, pp, vp, off); 2454 if (pp == NULL) { 2455 VM_STAT_ADD(page_create_new); 2456 pp = npp; 2457 npp = NULL; 2458 if (!page_hashin(pp, vp, off, phm)) { 2459 /* 2460 * Since we hold the page hash mutex and 2461 * just searched for this page, page_hashin 2462 * had better not fail. If it does, that 2463 * means somethread did not follow the 2464 * page hash mutex rules. Panic now and 2465 * get it over with. As usual, go down 2466 * holding all the locks. 2467 */ 2468 ASSERT(MUTEX_HELD(phm)); 2469 panic("page_create: " 2470 "hashin failed %p %p %llx %p", 2471 (void *)pp, (void *)vp, off, (void *)phm); 2472 /*NOTREACHED*/ 2473 } 2474 ASSERT(MUTEX_HELD(phm)); 2475 mutex_exit(phm); 2476 phm = NULL; 2477 2478 /* 2479 * Hat layer locking need not be done to set 2480 * the following bits since the page is not hashed 2481 * and was on the free list (i.e., had no mappings). 2482 * 2483 * Set the reference bit to protect 2484 * against immediate pageout 2485 * 2486 * XXXmh modify freelist code to set reference 2487 * bit so we don't have to do it here. 2488 */ 2489 page_set_props(pp, P_REF); 2490 found_on_free++; 2491 } else { 2492 VM_STAT_ADD(page_create_exists); 2493 if (flags & PG_EXCL) { 2494 /* 2495 * Found an existing page, and the caller 2496 * wanted all new pages. Undo all of the work 2497 * we have done. 2498 */ 2499 mutex_exit(phm); 2500 phm = NULL; 2501 while (plist != NULL) { 2502 pp = plist; 2503 page_sub(&plist, pp); 2504 page_io_unlock(pp); 2505 /* large pages should not end up here */ 2506 ASSERT(pp->p_szc == 0); 2507 /*LINTED: constant in conditional ctx*/ 2508 VN_DISPOSE(pp, B_INVAL, 0, kcred); 2509 } 2510 VM_STAT_ADD(page_create_found_one); 2511 goto fail; 2512 } 2513 ASSERT(flags & PG_WAIT); 2514 if (!page_lock(pp, SE_EXCL, phm, P_NO_RECLAIM)) { 2515 /* 2516 * Start all over again if we blocked trying 2517 * to lock the page. 2518 */ 2519 mutex_exit(phm); 2520 VM_STAT_ADD(page_create_page_lock_failed); 2521 phm = NULL; 2522 goto top; 2523 } 2524 mutex_exit(phm); 2525 phm = NULL; 2526 2527 if (PP_ISFREE(pp)) { 2528 ASSERT(PP_ISAGED(pp) == 0); 2529 VM_STAT_ADD(pagecnt.pc_get_cache); 2530 page_list_sub(pp, PG_CACHE_LIST); 2531 PP_CLRFREE(pp); 2532 found_on_free++; 2533 } 2534 } 2535 2536 /* 2537 * Got a page! It is locked. Acquire the i/o 2538 * lock since we are going to use the p_next and 2539 * p_prev fields to link the requested pages together. 2540 */ 2541 page_io_lock(pp); 2542 page_add(&plist, pp); 2543 plist = plist->p_next; 2544 off += PAGESIZE; 2545 vaddr += PAGESIZE; 2546 } 2547 2548 ASSERT((flags & PG_EXCL) ? (found_on_free == pages_req) : 1); 2549 fail: 2550 if (npp != NULL) { 2551 /* 2552 * Did not need this page after all. 2553 * Put it back on the free list. 2554 */ 2555 VM_STAT_ADD(page_create_putbacks); 2556 PP_SETFREE(npp); 2557 PP_SETAGED(npp); 2558 npp->p_offset = (u_offset_t)-1; 2559 page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL); 2560 page_unlock(npp); 2561 2562 } 2563 2564 ASSERT(pages_req >= found_on_free); 2565 2566 { 2567 uint_t overshoot = (uint_t)(pages_req - found_on_free); 2568 2569 if (overshoot) { 2570 VM_STAT_ADD(page_create_overshoot); 2571 p = &pcf[PCF_INDEX()]; 2572 mutex_enter(&p->pcf_lock); 2573 if (p->pcf_block) { 2574 p->pcf_reserve += overshoot; 2575 } else { 2576 p->pcf_count += overshoot; 2577 if (p->pcf_wait) { 2578 mutex_enter(&new_freemem_lock); 2579 if (freemem_wait) { 2580 cv_signal(&freemem_cv); 2581 p->pcf_wait--; 2582 } else { 2583 p->pcf_wait = 0; 2584 } 2585 mutex_exit(&new_freemem_lock); 2586 } 2587 } 2588 mutex_exit(&p->pcf_lock); 2589 /* freemem is approximate, so this test OK */ 2590 if (!p->pcf_block) 2591 freemem += overshoot; 2592 } 2593 } 2594 2595 return (plist); 2596 } 2597 2598 /* 2599 * One or more constituent pages of this large page has been marked 2600 * toxic. Simply demote the large page to PAGESIZE pages and let 2601 * page_free() handle it. This routine should only be called by 2602 * large page free routines (page_free_pages() and page_destroy_pages(). 2603 * All pages are locked SE_EXCL and have already been marked free. 2604 */ 2605 static void 2606 page_free_toxic_pages(page_t *rootpp) 2607 { 2608 page_t *tpp; 2609 pgcnt_t i, pgcnt = page_get_pagecnt(rootpp->p_szc); 2610 uint_t szc = rootpp->p_szc; 2611 2612 for (i = 0, tpp = rootpp; i < pgcnt; i++, tpp = tpp->p_next) { 2613 ASSERT(tpp->p_szc == szc); 2614 ASSERT((PAGE_EXCL(tpp) && 2615 !page_iolock_assert(tpp)) || panicstr); 2616 tpp->p_szc = 0; 2617 } 2618 2619 while (rootpp != NULL) { 2620 tpp = rootpp; 2621 page_sub(&rootpp, tpp); 2622 ASSERT(PP_ISFREE(tpp)); 2623 PP_CLRFREE(tpp); 2624 page_free(tpp, 1); 2625 } 2626 } 2627 2628 /* 2629 * Put page on the "free" list. 2630 * The free list is really two lists maintained by 2631 * the PSM of whatever machine we happen to be on. 2632 */ 2633 void 2634 page_free(page_t *pp, int dontneed) 2635 { 2636 struct pcf *p; 2637 uint_t pcf_index; 2638 2639 ASSERT((PAGE_EXCL(pp) && 2640 !page_iolock_assert(pp)) || panicstr); 2641 2642 if (PP_ISFREE(pp)) { 2643 panic("page_free: page %p is free", (void *)pp); 2644 } 2645 2646 if (pp->p_szc != 0) { 2647 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 2648 PP_ISKAS(pp)) { 2649 panic("page_free: anon or kernel " 2650 "or no vnode large page %p", (void *)pp); 2651 } 2652 page_demote_vp_pages(pp); 2653 ASSERT(pp->p_szc == 0); 2654 } 2655 2656 /* 2657 * The page_struct_lock need not be acquired to examine these 2658 * fields since the page has an "exclusive" lock. 2659 */ 2660 if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 2661 pp->p_slckcnt != 0) { 2662 panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d " 2663 "slckcnt = %d", (void *)pp, page_pptonum(pp), pp->p_lckcnt, 2664 pp->p_cowcnt, pp->p_slckcnt); 2665 /*NOTREACHED*/ 2666 } 2667 2668 ASSERT(!hat_page_getshare(pp)); 2669 2670 PP_SETFREE(pp); 2671 ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) || 2672 !hat_ismod(pp)); 2673 page_clr_all_props(pp); 2674 ASSERT(!hat_page_getshare(pp)); 2675 2676 /* 2677 * Now we add the page to the head of the free list. 2678 * But if this page is associated with a paged vnode 2679 * then we adjust the head forward so that the page is 2680 * effectively at the end of the list. 2681 */ 2682 if (pp->p_vnode == NULL) { 2683 /* 2684 * Page has no identity, put it on the free list. 2685 */ 2686 PP_SETAGED(pp); 2687 pp->p_offset = (u_offset_t)-1; 2688 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 2689 VM_STAT_ADD(pagecnt.pc_free_free); 2690 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2691 "page_free_free:pp %p", pp); 2692 } else { 2693 PP_CLRAGED(pp); 2694 2695 if (!dontneed || nopageage) { 2696 /* move it to the tail of the list */ 2697 page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL); 2698 2699 VM_STAT_ADD(pagecnt.pc_free_cache); 2700 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL, 2701 "page_free_cache_tail:pp %p", pp); 2702 } else { 2703 page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD); 2704 2705 VM_STAT_ADD(pagecnt.pc_free_dontneed); 2706 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD, 2707 "page_free_cache_head:pp %p", pp); 2708 } 2709 } 2710 page_unlock(pp); 2711 2712 /* 2713 * Now do the `freemem' accounting. 2714 */ 2715 pcf_index = PCF_INDEX(); 2716 p = &pcf[pcf_index]; 2717 2718 mutex_enter(&p->pcf_lock); 2719 if (p->pcf_block) { 2720 p->pcf_reserve += 1; 2721 } else { 2722 p->pcf_count += 1; 2723 if (p->pcf_wait) { 2724 mutex_enter(&new_freemem_lock); 2725 /* 2726 * Check to see if some other thread 2727 * is actually waiting. Another bucket 2728 * may have woken it up by now. If there 2729 * are no waiters, then set our pcf_wait 2730 * count to zero to avoid coming in here 2731 * next time. Also, since only one page 2732 * was put on the free list, just wake 2733 * up one waiter. 2734 */ 2735 if (freemem_wait) { 2736 cv_signal(&freemem_cv); 2737 p->pcf_wait--; 2738 } else { 2739 p->pcf_wait = 0; 2740 } 2741 mutex_exit(&new_freemem_lock); 2742 } 2743 } 2744 mutex_exit(&p->pcf_lock); 2745 2746 /* freemem is approximate, so this test OK */ 2747 if (!p->pcf_block) 2748 freemem += 1; 2749 } 2750 2751 /* 2752 * Put page on the "free" list during intial startup. 2753 * This happens during initial single threaded execution. 2754 */ 2755 void 2756 page_free_at_startup(page_t *pp) 2757 { 2758 struct pcf *p; 2759 uint_t pcf_index; 2760 2761 page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT); 2762 VM_STAT_ADD(pagecnt.pc_free_free); 2763 2764 /* 2765 * Now do the `freemem' accounting. 2766 */ 2767 pcf_index = PCF_INDEX(); 2768 p = &pcf[pcf_index]; 2769 2770 ASSERT(p->pcf_block == 0); 2771 ASSERT(p->pcf_wait == 0); 2772 p->pcf_count += 1; 2773 2774 /* freemem is approximate, so this is OK */ 2775 freemem += 1; 2776 } 2777 2778 void 2779 page_free_pages(page_t *pp) 2780 { 2781 page_t *tpp, *rootpp = NULL; 2782 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 2783 pgcnt_t i; 2784 uint_t szc = pp->p_szc; 2785 2786 VM_STAT_ADD(pagecnt.pc_free_pages); 2787 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2788 "page_free_free:pp %p", pp); 2789 2790 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 2791 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 2792 panic("page_free_pages: not root page %p", (void *)pp); 2793 /*NOTREACHED*/ 2794 } 2795 2796 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 2797 ASSERT((PAGE_EXCL(tpp) && 2798 !page_iolock_assert(tpp)) || panicstr); 2799 if (PP_ISFREE(tpp)) { 2800 panic("page_free_pages: page %p is free", (void *)tpp); 2801 /*NOTREACHED*/ 2802 } 2803 if (hat_page_is_mapped(tpp) || tpp->p_lckcnt != 0 || 2804 tpp->p_cowcnt != 0 || tpp->p_slckcnt != 0) { 2805 panic("page_free_pages %p", (void *)tpp); 2806 /*NOTREACHED*/ 2807 } 2808 2809 ASSERT(!hat_page_getshare(tpp)); 2810 ASSERT(tpp->p_vnode == NULL); 2811 ASSERT(tpp->p_szc == szc); 2812 2813 PP_SETFREE(tpp); 2814 page_clr_all_props(tpp); 2815 PP_SETAGED(tpp); 2816 tpp->p_offset = (u_offset_t)-1; 2817 ASSERT(tpp->p_next == tpp); 2818 ASSERT(tpp->p_prev == tpp); 2819 page_list_concat(&rootpp, &tpp); 2820 } 2821 ASSERT(rootpp == pp); 2822 2823 page_list_add_pages(rootpp, 0); 2824 page_create_putback(pgcnt); 2825 } 2826 2827 int free_pages = 1; 2828 2829 /* 2830 * This routine attempts to return pages to the cachelist via page_release(). 2831 * It does not *have* to be successful in all cases, since the pageout scanner 2832 * will catch any pages it misses. It does need to be fast and not introduce 2833 * too much overhead. 2834 * 2835 * If a page isn't found on the unlocked sweep of the page_hash bucket, we 2836 * don't lock and retry. This is ok, since the page scanner will eventually 2837 * find any page we miss in free_vp_pages(). 2838 */ 2839 void 2840 free_vp_pages(vnode_t *vp, u_offset_t off, size_t len) 2841 { 2842 page_t *pp; 2843 u_offset_t eoff; 2844 extern int swap_in_range(vnode_t *, u_offset_t, size_t); 2845 2846 eoff = off + len; 2847 2848 if (free_pages == 0) 2849 return; 2850 if (swap_in_range(vp, off, len)) 2851 return; 2852 2853 for (; off < eoff; off += PAGESIZE) { 2854 2855 /* 2856 * find the page using a fast, but inexact search. It'll be OK 2857 * if a few pages slip through the cracks here. 2858 */ 2859 pp = page_exists(vp, off); 2860 2861 /* 2862 * If we didn't find the page (it may not exist), the page 2863 * is free, looks still in use (shared), or we can't lock it, 2864 * just give up. 2865 */ 2866 if (pp == NULL || 2867 PP_ISFREE(pp) || 2868 page_share_cnt(pp) > 0 || 2869 !page_trylock(pp, SE_EXCL)) 2870 continue; 2871 2872 /* 2873 * Once we have locked pp, verify that it's still the 2874 * correct page and not already free 2875 */ 2876 ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL)); 2877 if (pp->p_vnode != vp || pp->p_offset != off || PP_ISFREE(pp)) { 2878 page_unlock(pp); 2879 continue; 2880 } 2881 2882 /* 2883 * try to release the page... 2884 */ 2885 (void) page_release(pp, 1); 2886 } 2887 } 2888 2889 /* 2890 * Reclaim the given page from the free list. 2891 * If pp is part of a large pages, only the given constituent page is reclaimed 2892 * and the large page it belonged to will be demoted. This can only happen 2893 * if the page is not on the cachelist. 2894 * 2895 * Returns 1 on success or 0 on failure. 2896 * 2897 * The page is unlocked if it can't be reclaimed (when freemem == 0). 2898 * If `lock' is non-null, it will be dropped and re-acquired if 2899 * the routine must wait while freemem is 0. 2900 * 2901 * As it turns out, boot_getpages() does this. It picks a page, 2902 * based on where OBP mapped in some address, gets its pfn, searches 2903 * the memsegs, locks the page, then pulls it off the free list! 2904 */ 2905 int 2906 page_reclaim(page_t *pp, kmutex_t *lock) 2907 { 2908 struct pcf *p; 2909 struct cpu *cpup; 2910 int enough; 2911 uint_t i; 2912 2913 ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1); 2914 ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp)); 2915 2916 /* 2917 * If `freemem' is 0, we cannot reclaim this page from the 2918 * freelist, so release every lock we might hold: the page, 2919 * and the `lock' before blocking. 2920 * 2921 * The only way `freemem' can become 0 while there are pages 2922 * marked free (have their p->p_free bit set) is when the 2923 * system is low on memory and doing a page_create(). In 2924 * order to guarantee that once page_create() starts acquiring 2925 * pages it will be able to get all that it needs since `freemem' 2926 * was decreased by the requested amount. So, we need to release 2927 * this page, and let page_create() have it. 2928 * 2929 * Since `freemem' being zero is not supposed to happen, just 2930 * use the usual hash stuff as a starting point. If that bucket 2931 * is empty, then assume the worst, and start at the beginning 2932 * of the pcf array. If we always start at the beginning 2933 * when acquiring more than one pcf lock, there won't be any 2934 * deadlock problems. 2935 */ 2936 2937 /* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */ 2938 2939 if (freemem <= throttlefree && !page_create_throttle(1l, 0)) { 2940 pcf_acquire_all(); 2941 goto page_reclaim_nomem; 2942 } 2943 2944 enough = pcf_decrement_bucket(1); 2945 2946 if (!enough) { 2947 VM_STAT_ADD(page_reclaim_zero); 2948 /* 2949 * Check again. Its possible that some other thread 2950 * could have been right behind us, and added one 2951 * to a list somewhere. Acquire each of the pcf locks 2952 * until we find a page. 2953 */ 2954 p = pcf; 2955 for (i = 0; i < pcf_fanout; i++) { 2956 mutex_enter(&p->pcf_lock); 2957 if (p->pcf_count >= 1) { 2958 p->pcf_count -= 1; 2959 /* 2960 * freemem is not protected by any lock. Thus, 2961 * we cannot have any assertion containing 2962 * freemem here. 2963 */ 2964 freemem -= 1; 2965 enough = 1; 2966 break; 2967 } 2968 p++; 2969 } 2970 2971 if (!enough) { 2972 page_reclaim_nomem: 2973 /* 2974 * We really can't have page `pp'. 2975 * Time for the no-memory dance with 2976 * page_free(). This is just like 2977 * page_create_wait(). Plus the added 2978 * attraction of releasing whatever mutex 2979 * we held when we were called with in `lock'. 2980 * Page_unlock() will wakeup any thread 2981 * waiting around for this page. 2982 */ 2983 if (lock) { 2984 VM_STAT_ADD(page_reclaim_zero_locked); 2985 mutex_exit(lock); 2986 } 2987 page_unlock(pp); 2988 2989 /* 2990 * get this before we drop all the pcf locks. 2991 */ 2992 mutex_enter(&new_freemem_lock); 2993 2994 p = pcf; 2995 for (i = 0; i < pcf_fanout; i++) { 2996 p->pcf_wait++; 2997 mutex_exit(&p->pcf_lock); 2998 p++; 2999 } 3000 3001 freemem_wait++; 3002 cv_wait(&freemem_cv, &new_freemem_lock); 3003 freemem_wait--; 3004 3005 mutex_exit(&new_freemem_lock); 3006 3007 if (lock) { 3008 mutex_enter(lock); 3009 } 3010 return (0); 3011 } 3012 3013 /* 3014 * The pcf accounting has been done, 3015 * though none of the pcf_wait flags have been set, 3016 * drop the locks and continue on. 3017 */ 3018 while (p >= pcf) { 3019 mutex_exit(&p->pcf_lock); 3020 p--; 3021 } 3022 } 3023 3024 3025 VM_STAT_ADD(pagecnt.pc_reclaim); 3026 3027 /* 3028 * page_list_sub will handle the case where pp is a large page. 3029 * It's possible that the page was promoted while on the freelist 3030 */ 3031 if (PP_ISAGED(pp)) { 3032 page_list_sub(pp, PG_FREE_LIST); 3033 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_FREE, 3034 "page_reclaim_free:pp %p", pp); 3035 } else { 3036 page_list_sub(pp, PG_CACHE_LIST); 3037 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_CACHE, 3038 "page_reclaim_cache:pp %p", pp); 3039 } 3040 3041 /* 3042 * clear the p_free & p_age bits since this page is no longer 3043 * on the free list. Notice that there was a brief time where 3044 * a page is marked as free, but is not on the list. 3045 * 3046 * Set the reference bit to protect against immediate pageout. 3047 */ 3048 PP_CLRFREE(pp); 3049 PP_CLRAGED(pp); 3050 page_set_props(pp, P_REF); 3051 3052 CPU_STATS_ENTER_K(); 3053 cpup = CPU; /* get cpup now that CPU cannot change */ 3054 CPU_STATS_ADDQ(cpup, vm, pgrec, 1); 3055 CPU_STATS_ADDQ(cpup, vm, pgfrec, 1); 3056 CPU_STATS_EXIT_K(); 3057 ASSERT(pp->p_szc == 0); 3058 3059 return (1); 3060 } 3061 3062 /* 3063 * Destroy identity of the page and put it back on 3064 * the page free list. Assumes that the caller has 3065 * acquired the "exclusive" lock on the page. 3066 */ 3067 void 3068 page_destroy(page_t *pp, int dontfree) 3069 { 3070 ASSERT((PAGE_EXCL(pp) && 3071 !page_iolock_assert(pp)) || panicstr); 3072 ASSERT(pp->p_slckcnt == 0 || panicstr); 3073 3074 if (pp->p_szc != 0) { 3075 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 3076 PP_ISKAS(pp)) { 3077 panic("page_destroy: anon or kernel or no vnode " 3078 "large page %p", (void *)pp); 3079 } 3080 page_demote_vp_pages(pp); 3081 ASSERT(pp->p_szc == 0); 3082 } 3083 3084 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy:pp %p", pp); 3085 3086 /* 3087 * Unload translations, if any, then hash out the 3088 * page to erase its identity. 3089 */ 3090 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3091 page_hashout(pp, NULL); 3092 3093 if (!dontfree) { 3094 /* 3095 * Acquire the "freemem_lock" for availrmem. 3096 * The page_struct_lock need not be acquired for lckcnt 3097 * and cowcnt since the page has an "exclusive" lock. 3098 * We are doing a modified version of page_pp_unlock here. 3099 */ 3100 if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) { 3101 if (pp->p_lckcnt != 0) { 3102 /* 3103 * Page has not been unlocked via 3104 * page_pp_unlock(). Therefore 3105 * anon_swap_restore() is called to unlock 3106 * swap for this page so its swap can be 3107 * unreserved. 3108 */ 3109 anon_swap_restore(1); 3110 mutex_enter(&pages_locked_lock); 3111 pages_locked--; 3112 mutex_exit(&pages_locked_lock); 3113 pp->p_lckcnt = 0; 3114 } 3115 if (pp->p_cowcnt != 0) { 3116 anon_swap_restore(pp->p_cowcnt); 3117 mutex_enter(&pages_locked_lock); 3118 pages_locked -= pp->p_cowcnt; 3119 mutex_exit(&pages_locked_lock); 3120 pp->p_cowcnt = 0; 3121 } 3122 } 3123 /* 3124 * Put the page on the "free" list. 3125 */ 3126 page_free(pp, 0); 3127 } 3128 } 3129 3130 void 3131 page_destroy_pages(page_t *pp) 3132 { 3133 3134 page_t *tpp, *rootpp = NULL; 3135 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 3136 pgcnt_t i, pglcks = 0; 3137 uint_t szc = pp->p_szc; 3138 3139 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 3140 3141 VM_STAT_ADD(pagecnt.pc_destroy_pages); 3142 3143 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy_pages:pp %p", pp); 3144 3145 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 3146 panic("page_destroy_pages: not root page %p", (void *)pp); 3147 /*NOTREACHED*/ 3148 } 3149 3150 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 3151 ASSERT((PAGE_EXCL(tpp) && 3152 !page_iolock_assert(tpp)) || panicstr); 3153 ASSERT(tpp->p_slckcnt == 0 || panicstr); 3154 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 3155 page_hashout(tpp, NULL); 3156 ASSERT(tpp->p_offset == (u_offset_t)-1); 3157 if (tpp->p_lckcnt != 0) { 3158 pglcks++; 3159 tpp->p_lckcnt = 0; 3160 } else if (tpp->p_cowcnt != 0) { 3161 pglcks += tpp->p_cowcnt; 3162 tpp->p_cowcnt = 0; 3163 } 3164 ASSERT(!hat_page_getshare(tpp)); 3165 ASSERT(tpp->p_vnode == NULL); 3166 ASSERT(tpp->p_szc == szc); 3167 3168 PP_SETFREE(tpp); 3169 page_clr_all_props(tpp); 3170 PP_SETAGED(tpp); 3171 ASSERT(tpp->p_next == tpp); 3172 ASSERT(tpp->p_prev == tpp); 3173 page_list_concat(&rootpp, &tpp); 3174 } 3175 3176 ASSERT(rootpp == pp); 3177 if (pglcks != 0) { 3178 mutex_enter(&freemem_lock); 3179 availrmem += pglcks; 3180 mutex_exit(&freemem_lock); 3181 } 3182 3183 page_list_add_pages(rootpp, 0); 3184 page_create_putback(pgcnt); 3185 } 3186 3187 /* 3188 * Similar to page_destroy(), but destroys pages which are 3189 * locked and known to be on the page free list. Since 3190 * the page is known to be free and locked, no one can access 3191 * it. 3192 * 3193 * Also, the number of free pages does not change. 3194 */ 3195 void 3196 page_destroy_free(page_t *pp) 3197 { 3198 ASSERT(PAGE_EXCL(pp)); 3199 ASSERT(PP_ISFREE(pp)); 3200 ASSERT(pp->p_vnode); 3201 ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0); 3202 ASSERT(!hat_page_is_mapped(pp)); 3203 ASSERT(PP_ISAGED(pp) == 0); 3204 ASSERT(pp->p_szc == 0); 3205 3206 VM_STAT_ADD(pagecnt.pc_destroy_free); 3207 page_list_sub(pp, PG_CACHE_LIST); 3208 3209 page_hashout(pp, NULL); 3210 ASSERT(pp->p_vnode == NULL); 3211 ASSERT(pp->p_offset == (u_offset_t)-1); 3212 ASSERT(pp->p_hash == NULL); 3213 3214 PP_SETAGED(pp); 3215 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 3216 page_unlock(pp); 3217 3218 mutex_enter(&new_freemem_lock); 3219 if (freemem_wait) { 3220 cv_signal(&freemem_cv); 3221 } 3222 mutex_exit(&new_freemem_lock); 3223 } 3224 3225 /* 3226 * Rename the page "opp" to have an identity specified 3227 * by [vp, off]. If a page already exists with this name 3228 * it is locked and destroyed. Note that the page's 3229 * translations are not unloaded during the rename. 3230 * 3231 * This routine is used by the anon layer to "steal" the 3232 * original page and is not unlike destroying a page and 3233 * creating a new page using the same page frame. 3234 * 3235 * XXX -- Could deadlock if caller 1 tries to rename A to B while 3236 * caller 2 tries to rename B to A. 3237 */ 3238 void 3239 page_rename(page_t *opp, vnode_t *vp, u_offset_t off) 3240 { 3241 page_t *pp; 3242 int olckcnt = 0; 3243 int ocowcnt = 0; 3244 kmutex_t *phm; 3245 ulong_t index; 3246 3247 ASSERT(PAGE_EXCL(opp) && !page_iolock_assert(opp)); 3248 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3249 ASSERT(PP_ISFREE(opp) == 0); 3250 3251 VM_STAT_ADD(page_rename_count); 3252 3253 TRACE_3(TR_FAC_VM, TR_PAGE_RENAME, 3254 "page rename:pp %p vp %p off %llx", opp, vp, off); 3255 3256 /* 3257 * CacheFS may call page_rename for a large NFS page 3258 * when both CacheFS and NFS mount points are used 3259 * by applications. Demote this large page before 3260 * renaming it, to ensure that there are no "partial" 3261 * large pages left lying around. 3262 */ 3263 if (opp->p_szc != 0) { 3264 vnode_t *ovp = opp->p_vnode; 3265 ASSERT(ovp != NULL); 3266 ASSERT(!IS_SWAPFSVP(ovp)); 3267 ASSERT(!VN_ISKAS(ovp)); 3268 page_demote_vp_pages(opp); 3269 ASSERT(opp->p_szc == 0); 3270 } 3271 3272 page_hashout(opp, NULL); 3273 PP_CLRAGED(opp); 3274 3275 /* 3276 * Acquire the appropriate page hash lock, since 3277 * we're going to rename the page. 3278 */ 3279 index = PAGE_HASH_FUNC(vp, off); 3280 phm = PAGE_HASH_MUTEX(index); 3281 mutex_enter(phm); 3282 top: 3283 /* 3284 * Look for an existing page with this name and destroy it if found. 3285 * By holding the page hash lock all the way to the page_hashin() 3286 * call, we are assured that no page can be created with this 3287 * identity. In the case when the phm lock is dropped to undo any 3288 * hat layer mappings, the existing page is held with an "exclusive" 3289 * lock, again preventing another page from being created with 3290 * this identity. 3291 */ 3292 PAGE_HASH_SEARCH(index, pp, vp, off); 3293 if (pp != NULL) { 3294 VM_STAT_ADD(page_rename_exists); 3295 3296 /* 3297 * As it turns out, this is one of only two places where 3298 * page_lock() needs to hold the passed in lock in the 3299 * successful case. In all of the others, the lock could 3300 * be dropped as soon as the attempt is made to lock 3301 * the page. It is tempting to add yet another arguement, 3302 * PL_KEEP or PL_DROP, to let page_lock know what to do. 3303 */ 3304 if (!page_lock(pp, SE_EXCL, phm, P_RECLAIM)) { 3305 /* 3306 * Went to sleep because the page could not 3307 * be locked. We were woken up when the page 3308 * was unlocked, or when the page was destroyed. 3309 * In either case, `phm' was dropped while we 3310 * slept. Hence we should not just roar through 3311 * this loop. 3312 */ 3313 goto top; 3314 } 3315 3316 /* 3317 * If an existing page is a large page, then demote 3318 * it to ensure that no "partial" large pages are 3319 * "created" after page_rename. An existing page 3320 * can be a CacheFS page, and can't belong to swapfs. 3321 */ 3322 if (hat_page_is_mapped(pp)) { 3323 /* 3324 * Unload translations. Since we hold the 3325 * exclusive lock on this page, the page 3326 * can not be changed while we drop phm. 3327 * This is also not a lock protocol violation, 3328 * but rather the proper way to do things. 3329 */ 3330 mutex_exit(phm); 3331 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3332 if (pp->p_szc != 0) { 3333 ASSERT(!IS_SWAPFSVP(vp)); 3334 ASSERT(!VN_ISKAS(vp)); 3335 page_demote_vp_pages(pp); 3336 ASSERT(pp->p_szc == 0); 3337 } 3338 mutex_enter(phm); 3339 } else if (pp->p_szc != 0) { 3340 ASSERT(!IS_SWAPFSVP(vp)); 3341 ASSERT(!VN_ISKAS(vp)); 3342 mutex_exit(phm); 3343 page_demote_vp_pages(pp); 3344 ASSERT(pp->p_szc == 0); 3345 mutex_enter(phm); 3346 } 3347 page_hashout(pp, phm); 3348 } 3349 /* 3350 * Hash in the page with the new identity. 3351 */ 3352 if (!page_hashin(opp, vp, off, phm)) { 3353 /* 3354 * We were holding phm while we searched for [vp, off] 3355 * and only dropped phm if we found and locked a page. 3356 * If we can't create this page now, then some thing 3357 * is really broken. 3358 */ 3359 panic("page_rename: Can't hash in page: %p", (void *)pp); 3360 /*NOTREACHED*/ 3361 } 3362 3363 ASSERT(MUTEX_HELD(phm)); 3364 mutex_exit(phm); 3365 3366 /* 3367 * Now that we have dropped phm, lets get around to finishing up 3368 * with pp. 3369 */ 3370 if (pp != NULL) { 3371 ASSERT(!hat_page_is_mapped(pp)); 3372 /* for now large pages should not end up here */ 3373 ASSERT(pp->p_szc == 0); 3374 /* 3375 * Save the locks for transfer to the new page and then 3376 * clear them so page_free doesn't think they're important. 3377 * The page_struct_lock need not be acquired for lckcnt and 3378 * cowcnt since the page has an "exclusive" lock. 3379 */ 3380 olckcnt = pp->p_lckcnt; 3381 ocowcnt = pp->p_cowcnt; 3382 pp->p_lckcnt = pp->p_cowcnt = 0; 3383 3384 /* 3385 * Put the page on the "free" list after we drop 3386 * the lock. The less work under the lock the better. 3387 */ 3388 /*LINTED: constant in conditional context*/ 3389 VN_DISPOSE(pp, B_FREE, 0, kcred); 3390 } 3391 3392 /* 3393 * Transfer the lock count from the old page (if any). 3394 * The page_struct_lock need not be acquired for lckcnt and 3395 * cowcnt since the page has an "exclusive" lock. 3396 */ 3397 opp->p_lckcnt += olckcnt; 3398 opp->p_cowcnt += ocowcnt; 3399 } 3400 3401 /* 3402 * low level routine to add page `pp' to the hash and vp chains for [vp, offset] 3403 * 3404 * Pages are normally inserted at the start of a vnode's v_pages list. 3405 * If the vnode is VMODSORT and the page is modified, it goes at the end. 3406 * This can happen when a modified page is relocated for DR. 3407 * 3408 * Returns 1 on success and 0 on failure. 3409 */ 3410 static int 3411 page_do_hashin(page_t *pp, vnode_t *vp, u_offset_t offset) 3412 { 3413 page_t **listp; 3414 page_t *tp; 3415 ulong_t index; 3416 3417 ASSERT(PAGE_EXCL(pp)); 3418 ASSERT(vp != NULL); 3419 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3420 3421 /* 3422 * Be sure to set these up before the page is inserted on the hash 3423 * list. As soon as the page is placed on the list some other 3424 * thread might get confused and wonder how this page could 3425 * possibly hash to this list. 3426 */ 3427 pp->p_vnode = vp; 3428 pp->p_offset = offset; 3429 3430 /* 3431 * record if this page is on a swap vnode 3432 */ 3433 if ((vp->v_flag & VISSWAP) != 0) 3434 PP_SETSWAP(pp); 3435 3436 index = PAGE_HASH_FUNC(vp, offset); 3437 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(index))); 3438 listp = &page_hash[index]; 3439 3440 /* 3441 * If this page is already hashed in, fail this attempt to add it. 3442 */ 3443 for (tp = *listp; tp != NULL; tp = tp->p_hash) { 3444 if (tp->p_vnode == vp && tp->p_offset == offset) { 3445 pp->p_vnode = NULL; 3446 pp->p_offset = (u_offset_t)(-1); 3447 return (0); 3448 } 3449 } 3450 pp->p_hash = *listp; 3451 *listp = pp; 3452 3453 /* 3454 * Add the page to the vnode's list of pages 3455 */ 3456 if (vp->v_pages != NULL && IS_VMODSORT(vp) && hat_ismod(pp)) 3457 listp = &vp->v_pages->p_vpprev->p_vpnext; 3458 else 3459 listp = &vp->v_pages; 3460 3461 page_vpadd(listp, pp); 3462 3463 return (1); 3464 } 3465 3466 /* 3467 * Add page `pp' to both the hash and vp chains for [vp, offset]. 3468 * 3469 * Returns 1 on success and 0 on failure. 3470 * If hold is passed in, it is not dropped. 3471 */ 3472 int 3473 page_hashin(page_t *pp, vnode_t *vp, u_offset_t offset, kmutex_t *hold) 3474 { 3475 kmutex_t *phm = NULL; 3476 kmutex_t *vphm; 3477 int rc; 3478 3479 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3480 ASSERT(pp->p_fsdata == 0 || panicstr); 3481 3482 TRACE_3(TR_FAC_VM, TR_PAGE_HASHIN, 3483 "page_hashin:pp %p vp %p offset %llx", 3484 pp, vp, offset); 3485 3486 VM_STAT_ADD(hashin_count); 3487 3488 if (hold != NULL) 3489 phm = hold; 3490 else { 3491 VM_STAT_ADD(hashin_not_held); 3492 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, offset)); 3493 mutex_enter(phm); 3494 } 3495 3496 vphm = page_vnode_mutex(vp); 3497 mutex_enter(vphm); 3498 rc = page_do_hashin(pp, vp, offset); 3499 mutex_exit(vphm); 3500 if (hold == NULL) 3501 mutex_exit(phm); 3502 if (rc == 0) 3503 VM_STAT_ADD(hashin_already); 3504 return (rc); 3505 } 3506 3507 /* 3508 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3509 * All mutexes must be held 3510 */ 3511 static void 3512 page_do_hashout(page_t *pp) 3513 { 3514 page_t **hpp; 3515 page_t *hp; 3516 vnode_t *vp = pp->p_vnode; 3517 3518 ASSERT(vp != NULL); 3519 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3520 3521 /* 3522 * First, take pp off of its hash chain. 3523 */ 3524 hpp = &page_hash[PAGE_HASH_FUNC(vp, pp->p_offset)]; 3525 3526 for (;;) { 3527 hp = *hpp; 3528 if (hp == pp) 3529 break; 3530 if (hp == NULL) { 3531 panic("page_do_hashout"); 3532 /*NOTREACHED*/ 3533 } 3534 hpp = &hp->p_hash; 3535 } 3536 *hpp = pp->p_hash; 3537 3538 /* 3539 * Now remove it from its associated vnode. 3540 */ 3541 if (vp->v_pages) 3542 page_vpsub(&vp->v_pages, pp); 3543 3544 pp->p_hash = NULL; 3545 page_clr_all_props(pp); 3546 PP_CLRSWAP(pp); 3547 pp->p_vnode = NULL; 3548 pp->p_offset = (u_offset_t)-1; 3549 pp->p_fsdata = 0; 3550 } 3551 3552 /* 3553 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3554 * 3555 * When `phm' is non-NULL it contains the address of the mutex protecting the 3556 * hash list pp is on. It is not dropped. 3557 */ 3558 void 3559 page_hashout(page_t *pp, kmutex_t *phm) 3560 { 3561 vnode_t *vp; 3562 ulong_t index; 3563 kmutex_t *nphm; 3564 kmutex_t *vphm; 3565 kmutex_t *sep; 3566 3567 ASSERT(phm != NULL ? MUTEX_HELD(phm) : 1); 3568 ASSERT(pp->p_vnode != NULL); 3569 ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr); 3570 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp->p_vnode))); 3571 3572 vp = pp->p_vnode; 3573 3574 TRACE_2(TR_FAC_VM, TR_PAGE_HASHOUT, 3575 "page_hashout:pp %p vp %p", pp, vp); 3576 3577 /* Kernel probe */ 3578 TNF_PROBE_2(page_unmap, "vm pagefault", /* CSTYLED */, 3579 tnf_opaque, vnode, vp, 3580 tnf_offset, offset, pp->p_offset); 3581 3582 /* 3583 * 3584 */ 3585 VM_STAT_ADD(hashout_count); 3586 index = PAGE_HASH_FUNC(vp, pp->p_offset); 3587 if (phm == NULL) { 3588 VM_STAT_ADD(hashout_not_held); 3589 nphm = PAGE_HASH_MUTEX(index); 3590 mutex_enter(nphm); 3591 } 3592 ASSERT(phm ? phm == PAGE_HASH_MUTEX(index) : 1); 3593 3594 3595 /* 3596 * grab page vnode mutex and remove it... 3597 */ 3598 vphm = page_vnode_mutex(vp); 3599 mutex_enter(vphm); 3600 3601 page_do_hashout(pp); 3602 3603 mutex_exit(vphm); 3604 if (phm == NULL) 3605 mutex_exit(nphm); 3606 3607 /* 3608 * Wake up processes waiting for this page. The page's 3609 * identity has been changed, and is probably not the 3610 * desired page any longer. 3611 */ 3612 sep = page_se_mutex(pp); 3613 mutex_enter(sep); 3614 pp->p_selock &= ~SE_EWANTED; 3615 if (CV_HAS_WAITERS(&pp->p_cv)) 3616 cv_broadcast(&pp->p_cv); 3617 mutex_exit(sep); 3618 } 3619 3620 /* 3621 * Add the page to the front of a linked list of pages 3622 * using the p_next & p_prev pointers for the list. 3623 * The caller is responsible for protecting the list pointers. 3624 */ 3625 void 3626 page_add(page_t **ppp, page_t *pp) 3627 { 3628 ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3629 3630 page_add_common(ppp, pp); 3631 } 3632 3633 3634 3635 /* 3636 * Common code for page_add() and mach_page_add() 3637 */ 3638 void 3639 page_add_common(page_t **ppp, page_t *pp) 3640 { 3641 if (*ppp == NULL) { 3642 pp->p_next = pp->p_prev = pp; 3643 } else { 3644 pp->p_next = *ppp; 3645 pp->p_prev = (*ppp)->p_prev; 3646 (*ppp)->p_prev = pp; 3647 pp->p_prev->p_next = pp; 3648 } 3649 *ppp = pp; 3650 } 3651 3652 3653 /* 3654 * Remove this page from a linked list of pages 3655 * using the p_next & p_prev pointers for the list. 3656 * 3657 * The caller is responsible for protecting the list pointers. 3658 */ 3659 void 3660 page_sub(page_t **ppp, page_t *pp) 3661 { 3662 ASSERT((PP_ISFREE(pp)) ? 1 : 3663 (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3664 3665 if (*ppp == NULL || pp == NULL) { 3666 panic("page_sub: bad arg(s): pp %p, *ppp %p", 3667 (void *)pp, (void *)(*ppp)); 3668 /*NOTREACHED*/ 3669 } 3670 3671 page_sub_common(ppp, pp); 3672 } 3673 3674 3675 /* 3676 * Common code for page_sub() and mach_page_sub() 3677 */ 3678 void 3679 page_sub_common(page_t **ppp, page_t *pp) 3680 { 3681 if (*ppp == pp) 3682 *ppp = pp->p_next; /* go to next page */ 3683 3684 if (*ppp == pp) 3685 *ppp = NULL; /* page list is gone */ 3686 else { 3687 pp->p_prev->p_next = pp->p_next; 3688 pp->p_next->p_prev = pp->p_prev; 3689 } 3690 pp->p_prev = pp->p_next = pp; /* make pp a list of one */ 3691 } 3692 3693 3694 /* 3695 * Break page list cppp into two lists with npages in the first list. 3696 * The tail is returned in nppp. 3697 */ 3698 void 3699 page_list_break(page_t **oppp, page_t **nppp, pgcnt_t npages) 3700 { 3701 page_t *s1pp = *oppp; 3702 page_t *s2pp; 3703 page_t *e1pp, *e2pp; 3704 long n = 0; 3705 3706 if (s1pp == NULL) { 3707 *nppp = NULL; 3708 return; 3709 } 3710 if (npages == 0) { 3711 *nppp = s1pp; 3712 *oppp = NULL; 3713 return; 3714 } 3715 for (n = 0, s2pp = *oppp; n < npages; n++) { 3716 s2pp = s2pp->p_next; 3717 } 3718 /* Fix head and tail of new lists */ 3719 e1pp = s2pp->p_prev; 3720 e2pp = s1pp->p_prev; 3721 s1pp->p_prev = e1pp; 3722 e1pp->p_next = s1pp; 3723 s2pp->p_prev = e2pp; 3724 e2pp->p_next = s2pp; 3725 3726 /* second list empty */ 3727 if (s2pp == s1pp) { 3728 *oppp = s1pp; 3729 *nppp = NULL; 3730 } else { 3731 *oppp = s1pp; 3732 *nppp = s2pp; 3733 } 3734 } 3735 3736 /* 3737 * Concatenate page list nppp onto the end of list ppp. 3738 */ 3739 void 3740 page_list_concat(page_t **ppp, page_t **nppp) 3741 { 3742 page_t *s1pp, *s2pp, *e1pp, *e2pp; 3743 3744 if (*nppp == NULL) { 3745 return; 3746 } 3747 if (*ppp == NULL) { 3748 *ppp = *nppp; 3749 return; 3750 } 3751 s1pp = *ppp; 3752 e1pp = s1pp->p_prev; 3753 s2pp = *nppp; 3754 e2pp = s2pp->p_prev; 3755 s1pp->p_prev = e2pp; 3756 e2pp->p_next = s1pp; 3757 e1pp->p_next = s2pp; 3758 s2pp->p_prev = e1pp; 3759 } 3760 3761 /* 3762 * return the next page in the page list 3763 */ 3764 page_t * 3765 page_list_next(page_t *pp) 3766 { 3767 return (pp->p_next); 3768 } 3769 3770 3771 /* 3772 * Add the page to the front of the linked list of pages 3773 * using p_vpnext/p_vpprev pointers for the list. 3774 * 3775 * The caller is responsible for protecting the lists. 3776 */ 3777 void 3778 page_vpadd(page_t **ppp, page_t *pp) 3779 { 3780 if (*ppp == NULL) { 3781 pp->p_vpnext = pp->p_vpprev = pp; 3782 } else { 3783 pp->p_vpnext = *ppp; 3784 pp->p_vpprev = (*ppp)->p_vpprev; 3785 (*ppp)->p_vpprev = pp; 3786 pp->p_vpprev->p_vpnext = pp; 3787 } 3788 *ppp = pp; 3789 } 3790 3791 /* 3792 * Remove this page from the linked list of pages 3793 * using p_vpnext/p_vpprev pointers for the list. 3794 * 3795 * The caller is responsible for protecting the lists. 3796 */ 3797 void 3798 page_vpsub(page_t **ppp, page_t *pp) 3799 { 3800 if (*ppp == NULL || pp == NULL) { 3801 panic("page_vpsub: bad arg(s): pp %p, *ppp %p", 3802 (void *)pp, (void *)(*ppp)); 3803 /*NOTREACHED*/ 3804 } 3805 3806 if (*ppp == pp) 3807 *ppp = pp->p_vpnext; /* go to next page */ 3808 3809 if (*ppp == pp) 3810 *ppp = NULL; /* page list is gone */ 3811 else { 3812 pp->p_vpprev->p_vpnext = pp->p_vpnext; 3813 pp->p_vpnext->p_vpprev = pp->p_vpprev; 3814 } 3815 pp->p_vpprev = pp->p_vpnext = pp; /* make pp a list of one */ 3816 } 3817 3818 /* 3819 * Lock a physical page into memory "long term". Used to support "lock 3820 * in memory" functions. Accepts the page to be locked, and a cow variable 3821 * to indicate whether a the lock will travel to the new page during 3822 * a potential copy-on-write. 3823 */ 3824 int 3825 page_pp_lock( 3826 page_t *pp, /* page to be locked */ 3827 int cow, /* cow lock */ 3828 int kernel) /* must succeed -- ignore checking */ 3829 { 3830 int r = 0; /* result -- assume failure */ 3831 3832 ASSERT(PAGE_LOCKED(pp)); 3833 3834 page_struct_lock(pp); 3835 /* 3836 * Acquire the "freemem_lock" for availrmem. 3837 */ 3838 if (cow) { 3839 if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 3840 if (!anon_swap_adjust(1, pages_pp_maximum, 0)) { 3841 mutex_enter(&pages_locked_lock); 3842 pages_locked++; 3843 mutex_exit(&pages_locked_lock); 3844 r = 1; 3845 if (++pp->p_cowcnt == 3846 (ushort_t)PAGE_LOCK_MAXIMUM) { 3847 cmn_err(CE_WARN, 3848 "COW lock limit on pfn 0x%lx", 3849 page_pptonum(pp)); 3850 } 3851 } 3852 } 3853 } else { 3854 if (pp->p_lckcnt) { 3855 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 3856 r = 1; 3857 if (++pp->p_lckcnt == 3858 (ushort_t)PAGE_LOCK_MAXIMUM) { 3859 cmn_err(CE_WARN, "Page lock limit " 3860 "reached on pfn 0x%lx", 3861 page_pptonum(pp)); 3862 } 3863 } 3864 } else { 3865 if (kernel) { 3866 /* availrmem accounting done by caller */ 3867 ++pp->p_lckcnt; 3868 r = 1; 3869 } else { 3870 if (!anon_swap_adjust(1, pages_pp_maximum, 0)) { 3871 mutex_enter(&pages_locked_lock); 3872 pages_locked++; 3873 mutex_exit(&pages_locked_lock); 3874 ++pp->p_lckcnt; 3875 r = 1; 3876 } 3877 } 3878 } 3879 } 3880 page_struct_unlock(pp); 3881 return (r); 3882 } 3883 3884 /* 3885 * Decommit a lock on a physical page frame. Account for cow locks if 3886 * appropriate. 3887 */ 3888 void 3889 page_pp_unlock( 3890 page_t *pp, /* page to be unlocked */ 3891 int cow, /* expect cow lock */ 3892 int kernel) /* this was a kernel lock */ 3893 { 3894 ASSERT(PAGE_LOCKED(pp)); 3895 3896 page_struct_lock(pp); 3897 /* 3898 * Acquire the "freemem_lock" for availrmem. 3899 * If cowcnt or lcknt is already 0 do nothing; i.e., we 3900 * could be called to unlock even if nothing is locked. This could 3901 * happen if locked file pages were truncated (removing the lock) 3902 * and the file was grown again and new pages faulted in; the new 3903 * pages are unlocked but the segment still thinks they're locked. 3904 */ 3905 if (cow) { 3906 if (pp->p_cowcnt) { 3907 anon_swap_restore(1); 3908 pp->p_cowcnt--; 3909 mutex_enter(&pages_locked_lock); 3910 pages_locked--; 3911 mutex_exit(&pages_locked_lock); 3912 } 3913 } else { 3914 if (pp->p_lckcnt && --pp->p_lckcnt == 0) { 3915 if (!kernel) { 3916 anon_swap_restore(1); 3917 mutex_enter(&pages_locked_lock); 3918 pages_locked--; 3919 mutex_exit(&pages_locked_lock); 3920 } 3921 } 3922 } 3923 page_struct_unlock(pp); 3924 } 3925 3926 /* 3927 * This routine reserves availrmem for npages; 3928 * flags: KM_NOSLEEP or KM_SLEEP 3929 * returns 1 on success or 0 on failure 3930 */ 3931 int 3932 page_resv(pgcnt_t npages, uint_t flags) 3933 { 3934 mutex_enter(&freemem_lock); 3935 while (availrmem < tune.t_minarmem + npages) { 3936 if (flags & KM_NOSLEEP) { 3937 mutex_exit(&freemem_lock); 3938 return (0); 3939 } 3940 mutex_exit(&freemem_lock); 3941 page_needfree(npages); 3942 kmem_reap(); 3943 delay(hz >> 2); 3944 page_needfree(-(spgcnt_t)npages); 3945 mutex_enter(&freemem_lock); 3946 } 3947 availrmem -= npages; 3948 mutex_exit(&freemem_lock); 3949 return (1); 3950 } 3951 3952 /* 3953 * This routine unreserves availrmem for npages; 3954 */ 3955 void 3956 page_unresv(pgcnt_t npages) 3957 { 3958 mutex_enter(&freemem_lock); 3959 availrmem += npages; 3960 mutex_exit(&freemem_lock); 3961 } 3962 3963 /* 3964 * See Statement at the beginning of segvn_lockop() regarding 3965 * the way we handle cowcnts and lckcnts. 3966 * 3967 * Transfer cowcnt on 'opp' to cowcnt on 'npp' if the vpage 3968 * that breaks COW has PROT_WRITE. 3969 * 3970 * Note that, we may also break COW in case we are softlocking 3971 * on read access during physio; 3972 * in this softlock case, the vpage may not have PROT_WRITE. 3973 * So, we need to transfer lckcnt on 'opp' to lckcnt on 'npp' 3974 * if the vpage doesn't have PROT_WRITE. 3975 * 3976 * This routine is never called if we are stealing a page 3977 * in anon_private. 3978 * 3979 * The caller subtracted from availrmem for read only mapping. 3980 * if lckcnt is 1 increment availrmem. 3981 */ 3982 void 3983 page_pp_useclaim( 3984 page_t *opp, /* original page frame losing lock */ 3985 page_t *npp, /* new page frame gaining lock */ 3986 uint_t write_perm) /* set if vpage has PROT_WRITE */ 3987 { 3988 int payback = 0; 3989 int nidx, oidx; 3990 3991 ASSERT(PAGE_LOCKED(opp)); 3992 ASSERT(PAGE_LOCKED(npp)); 3993 3994 /* 3995 * Since we have two pages we probably have two locks. We need to take 3996 * them in a defined order to avoid deadlocks. It's also possible they 3997 * both hash to the same lock in which case this is a non-issue. 3998 */ 3999 nidx = PAGE_LLOCK_HASH(PP_PAGEROOT(npp)); 4000 oidx = PAGE_LLOCK_HASH(PP_PAGEROOT(opp)); 4001 if (nidx < oidx) { 4002 page_struct_lock(npp); 4003 page_struct_lock(opp); 4004 } else if (oidx < nidx) { 4005 page_struct_lock(opp); 4006 page_struct_lock(npp); 4007 } else { /* The pages hash to the same lock */ 4008 page_struct_lock(npp); 4009 } 4010 4011 ASSERT(npp->p_cowcnt == 0); 4012 ASSERT(npp->p_lckcnt == 0); 4013 4014 /* Don't use claim if nothing is locked (see page_pp_unlock above) */ 4015 if ((write_perm && opp->p_cowcnt != 0) || 4016 (!write_perm && opp->p_lckcnt != 0)) { 4017 4018 if (write_perm) { 4019 npp->p_cowcnt++; 4020 ASSERT(opp->p_cowcnt != 0); 4021 opp->p_cowcnt--; 4022 } else { 4023 4024 ASSERT(opp->p_lckcnt != 0); 4025 4026 /* 4027 * We didn't need availrmem decremented if p_lckcnt on 4028 * original page is 1. Here, we are unlocking 4029 * read-only copy belonging to original page and 4030 * are locking a copy belonging to new page. 4031 */ 4032 if (opp->p_lckcnt == 1) 4033 payback = 1; 4034 4035 npp->p_lckcnt++; 4036 opp->p_lckcnt--; 4037 } 4038 } 4039 if (payback) { 4040 mutex_enter(&freemem_lock); 4041 availrmem++; 4042 pages_useclaim--; 4043 mutex_exit(&freemem_lock); 4044 } 4045 4046 if (nidx < oidx) { 4047 page_struct_unlock(opp); 4048 page_struct_unlock(npp); 4049 } else if (oidx < nidx) { 4050 page_struct_unlock(npp); 4051 page_struct_unlock(opp); 4052 } else { /* The pages hash to the same lock */ 4053 page_struct_unlock(npp); 4054 } 4055 } 4056 4057 /* 4058 * Simple claim adjust functions -- used to support changes in 4059 * claims due to changes in access permissions. Used by segvn_setprot(). 4060 */ 4061 int 4062 page_addclaim(page_t *pp) 4063 { 4064 int r = 0; /* result */ 4065 4066 ASSERT(PAGE_LOCKED(pp)); 4067 4068 page_struct_lock(pp); 4069 ASSERT(pp->p_lckcnt != 0); 4070 4071 if (pp->p_lckcnt == 1) { 4072 if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4073 --pp->p_lckcnt; 4074 r = 1; 4075 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4076 cmn_err(CE_WARN, 4077 "COW lock limit reached on pfn 0x%lx", 4078 page_pptonum(pp)); 4079 } 4080 } 4081 } else { 4082 mutex_enter(&freemem_lock); 4083 if ((availrmem > pages_pp_maximum) && 4084 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 4085 --availrmem; 4086 ++pages_claimed; 4087 mutex_exit(&freemem_lock); 4088 --pp->p_lckcnt; 4089 r = 1; 4090 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4091 cmn_err(CE_WARN, 4092 "COW lock limit reached on pfn 0x%lx", 4093 page_pptonum(pp)); 4094 } 4095 } else 4096 mutex_exit(&freemem_lock); 4097 } 4098 page_struct_unlock(pp); 4099 return (r); 4100 } 4101 4102 int 4103 page_subclaim(page_t *pp) 4104 { 4105 int r = 0; 4106 4107 ASSERT(PAGE_LOCKED(pp)); 4108 4109 page_struct_lock(pp); 4110 ASSERT(pp->p_cowcnt != 0); 4111 4112 if (pp->p_lckcnt) { 4113 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4114 r = 1; 4115 /* 4116 * for availrmem 4117 */ 4118 mutex_enter(&freemem_lock); 4119 availrmem++; 4120 pages_claimed--; 4121 mutex_exit(&freemem_lock); 4122 4123 pp->p_cowcnt--; 4124 4125 if (++pp->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4126 cmn_err(CE_WARN, 4127 "Page lock limit reached on pfn 0x%lx", 4128 page_pptonum(pp)); 4129 } 4130 } 4131 } else { 4132 r = 1; 4133 pp->p_cowcnt--; 4134 pp->p_lckcnt++; 4135 } 4136 page_struct_unlock(pp); 4137 return (r); 4138 } 4139 4140 /* 4141 * Variant of page_addclaim(), where ppa[] contains the pages of a single large 4142 * page. 4143 */ 4144 int 4145 page_addclaim_pages(page_t **ppa) 4146 { 4147 pgcnt_t lckpgs = 0, pg_idx; 4148 4149 VM_STAT_ADD(pagecnt.pc_addclaim_pages); 4150 4151 /* 4152 * Only need to take the page struct lock on the large page root. 4153 */ 4154 page_struct_lock(ppa[0]); 4155 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4156 4157 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4158 ASSERT(ppa[pg_idx]->p_lckcnt != 0); 4159 if (ppa[pg_idx]->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4160 page_struct_unlock(ppa[0]); 4161 return (0); 4162 } 4163 if (ppa[pg_idx]->p_lckcnt > 1) 4164 lckpgs++; 4165 } 4166 4167 if (lckpgs != 0) { 4168 mutex_enter(&freemem_lock); 4169 if (availrmem >= pages_pp_maximum + lckpgs) { 4170 availrmem -= lckpgs; 4171 pages_claimed += lckpgs; 4172 } else { 4173 mutex_exit(&freemem_lock); 4174 page_struct_unlock(ppa[0]); 4175 return (0); 4176 } 4177 mutex_exit(&freemem_lock); 4178 } 4179 4180 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4181 ppa[pg_idx]->p_lckcnt--; 4182 ppa[pg_idx]->p_cowcnt++; 4183 } 4184 page_struct_unlock(ppa[0]); 4185 return (1); 4186 } 4187 4188 /* 4189 * Variant of page_subclaim(), where ppa[] contains the pages of a single large 4190 * page. 4191 */ 4192 int 4193 page_subclaim_pages(page_t **ppa) 4194 { 4195 pgcnt_t ulckpgs = 0, pg_idx; 4196 4197 VM_STAT_ADD(pagecnt.pc_subclaim_pages); 4198 4199 /* 4200 * Only need to take the page struct lock on the large page root. 4201 */ 4202 page_struct_lock(ppa[0]); 4203 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4204 4205 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4206 ASSERT(ppa[pg_idx]->p_cowcnt != 0); 4207 if (ppa[pg_idx]->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4208 page_struct_unlock(ppa[0]); 4209 return (0); 4210 } 4211 if (ppa[pg_idx]->p_lckcnt != 0) 4212 ulckpgs++; 4213 } 4214 4215 if (ulckpgs != 0) { 4216 mutex_enter(&freemem_lock); 4217 availrmem += ulckpgs; 4218 pages_claimed -= ulckpgs; 4219 mutex_exit(&freemem_lock); 4220 } 4221 4222 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4223 ppa[pg_idx]->p_cowcnt--; 4224 ppa[pg_idx]->p_lckcnt++; 4225 4226 } 4227 page_struct_unlock(ppa[0]); 4228 return (1); 4229 } 4230 4231 page_t * 4232 page_numtopp(pfn_t pfnum, se_t se) 4233 { 4234 page_t *pp; 4235 4236 retry: 4237 pp = page_numtopp_nolock(pfnum); 4238 if (pp == NULL) { 4239 return ((page_t *)NULL); 4240 } 4241 4242 /* 4243 * Acquire the appropriate lock on the page. 4244 */ 4245 while (!page_lock(pp, se, (kmutex_t *)NULL, P_RECLAIM)) { 4246 if (page_pptonum(pp) != pfnum) 4247 goto retry; 4248 continue; 4249 } 4250 4251 if (page_pptonum(pp) != pfnum) { 4252 page_unlock(pp); 4253 goto retry; 4254 } 4255 4256 return (pp); 4257 } 4258 4259 page_t * 4260 page_numtopp_noreclaim(pfn_t pfnum, se_t se) 4261 { 4262 page_t *pp; 4263 4264 retry: 4265 pp = page_numtopp_nolock(pfnum); 4266 if (pp == NULL) { 4267 return ((page_t *)NULL); 4268 } 4269 4270 /* 4271 * Acquire the appropriate lock on the page. 4272 */ 4273 while (!page_lock(pp, se, (kmutex_t *)NULL, P_NO_RECLAIM)) { 4274 if (page_pptonum(pp) != pfnum) 4275 goto retry; 4276 continue; 4277 } 4278 4279 if (page_pptonum(pp) != pfnum) { 4280 page_unlock(pp); 4281 goto retry; 4282 } 4283 4284 return (pp); 4285 } 4286 4287 /* 4288 * This routine is like page_numtopp, but will only return page structs 4289 * for pages which are ok for loading into hardware using the page struct. 4290 */ 4291 page_t * 4292 page_numtopp_nowait(pfn_t pfnum, se_t se) 4293 { 4294 page_t *pp; 4295 4296 retry: 4297 pp = page_numtopp_nolock(pfnum); 4298 if (pp == NULL) { 4299 return ((page_t *)NULL); 4300 } 4301 4302 /* 4303 * Try to acquire the appropriate lock on the page. 4304 */ 4305 if (PP_ISFREE(pp)) 4306 pp = NULL; 4307 else { 4308 if (!page_trylock(pp, se)) 4309 pp = NULL; 4310 else { 4311 if (page_pptonum(pp) != pfnum) { 4312 page_unlock(pp); 4313 goto retry; 4314 } 4315 if (PP_ISFREE(pp)) { 4316 page_unlock(pp); 4317 pp = NULL; 4318 } 4319 } 4320 } 4321 return (pp); 4322 } 4323 4324 #define SYNC_PROGRESS_NPAGES 1000 4325 4326 /* 4327 * Returns a count of dirty pages that are in the process 4328 * of being written out. If 'cleanit' is set, try to push the page. 4329 */ 4330 pgcnt_t 4331 page_busy(int cleanit) 4332 { 4333 page_t *page0 = page_first(); 4334 page_t *pp = page0; 4335 pgcnt_t nppbusy = 0; 4336 int counter = 0; 4337 u_offset_t off; 4338 4339 do { 4340 vnode_t *vp = pp->p_vnode; 4341 4342 /* 4343 * Reset the sync timeout. The page list is very long 4344 * on large memory systems. 4345 */ 4346 if (++counter > SYNC_PROGRESS_NPAGES) { 4347 counter = 0; 4348 vfs_syncprogress(); 4349 } 4350 4351 /* 4352 * A page is a candidate for syncing if it is: 4353 * 4354 * (a) On neither the freelist nor the cachelist 4355 * (b) Hashed onto a vnode 4356 * (c) Not a kernel page 4357 * (d) Dirty 4358 * (e) Not part of a swapfile 4359 * (f) a page which belongs to a real vnode; eg has a non-null 4360 * v_vfsp pointer. 4361 * (g) Backed by a filesystem which doesn't have a 4362 * stubbed-out sync operation 4363 */ 4364 if (!PP_ISFREE(pp) && vp != NULL && !VN_ISKAS(vp) && 4365 hat_ismod(pp) && !IS_SWAPVP(vp) && vp->v_vfsp != NULL && 4366 vfs_can_sync(vp->v_vfsp)) { 4367 nppbusy++; 4368 4369 if (!cleanit) 4370 continue; 4371 if (!page_trylock(pp, SE_EXCL)) 4372 continue; 4373 4374 if (PP_ISFREE(pp) || vp == NULL || IS_SWAPVP(vp) || 4375 pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 4376 !(hat_pagesync(pp, 4377 HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) & P_MOD)) { 4378 page_unlock(pp); 4379 continue; 4380 } 4381 off = pp->p_offset; 4382 VN_HOLD(vp); 4383 page_unlock(pp); 4384 (void) VOP_PUTPAGE(vp, off, PAGESIZE, 4385 B_ASYNC | B_FREE, kcred, NULL); 4386 VN_RELE(vp); 4387 } 4388 } while ((pp = page_next(pp)) != page0); 4389 4390 vfs_syncprogress(); 4391 return (nppbusy); 4392 } 4393 4394 void page_invalidate_pages(void); 4395 4396 /* 4397 * callback handler to vm sub-system 4398 * 4399 * callers make sure no recursive entries to this func. 4400 */ 4401 /*ARGSUSED*/ 4402 boolean_t 4403 callb_vm_cpr(void *arg, int code) 4404 { 4405 if (code == CB_CODE_CPR_CHKPT) 4406 page_invalidate_pages(); 4407 return (B_TRUE); 4408 } 4409 4410 /* 4411 * Invalidate all pages of the system. 4412 * It shouldn't be called until all user page activities are all stopped. 4413 */ 4414 void 4415 page_invalidate_pages() 4416 { 4417 page_t *pp; 4418 page_t *page0; 4419 pgcnt_t nbusypages; 4420 int retry = 0; 4421 const int MAXRETRIES = 4; 4422 top: 4423 /* 4424 * Flush dirty pages and destroy the clean ones. 4425 */ 4426 nbusypages = 0; 4427 4428 pp = page0 = page_first(); 4429 do { 4430 struct vnode *vp; 4431 u_offset_t offset; 4432 int mod; 4433 4434 /* 4435 * skip the page if it has no vnode or the page associated 4436 * with the kernel vnode or prom allocated kernel mem. 4437 */ 4438 if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp)) 4439 continue; 4440 4441 /* 4442 * skip the page which is already free invalidated. 4443 */ 4444 if (PP_ISFREE(pp) && PP_ISAGED(pp)) 4445 continue; 4446 4447 /* 4448 * skip pages that are already locked or can't be "exclusively" 4449 * locked or are already free. After we lock the page, check 4450 * the free and age bits again to be sure it's not destroyed 4451 * yet. 4452 * To achieve max. parallelization, we use page_trylock instead 4453 * of page_lock so that we don't get block on individual pages 4454 * while we have thousands of other pages to process. 4455 */ 4456 if (!page_trylock(pp, SE_EXCL)) { 4457 nbusypages++; 4458 continue; 4459 } else if (PP_ISFREE(pp)) { 4460 if (!PP_ISAGED(pp)) { 4461 page_destroy_free(pp); 4462 } else { 4463 page_unlock(pp); 4464 } 4465 continue; 4466 } 4467 /* 4468 * Is this page involved in some I/O? shared? 4469 * 4470 * The page_struct_lock need not be acquired to 4471 * examine these fields since the page has an 4472 * "exclusive" lock. 4473 */ 4474 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 4475 page_unlock(pp); 4476 continue; 4477 } 4478 4479 if (vp->v_type == VCHR) { 4480 panic("vp->v_type == VCHR"); 4481 /*NOTREACHED*/ 4482 } 4483 4484 if (!page_try_demote_pages(pp)) { 4485 page_unlock(pp); 4486 continue; 4487 } 4488 4489 /* 4490 * Check the modified bit. Leave the bits alone in hardware 4491 * (they will be modified if we do the putpage). 4492 */ 4493 mod = (hat_pagesync(pp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) 4494 & P_MOD); 4495 if (mod) { 4496 offset = pp->p_offset; 4497 /* 4498 * Hold the vnode before releasing the page lock 4499 * to prevent it from being freed and re-used by 4500 * some other thread. 4501 */ 4502 VN_HOLD(vp); 4503 page_unlock(pp); 4504 /* 4505 * No error return is checked here. Callers such as 4506 * cpr deals with the dirty pages at the dump time 4507 * if this putpage fails. 4508 */ 4509 (void) VOP_PUTPAGE(vp, offset, PAGESIZE, B_INVAL, 4510 kcred, NULL); 4511 VN_RELE(vp); 4512 } else { 4513 /*LINTED: constant in conditional context*/ 4514 VN_DISPOSE(pp, B_INVAL, 0, kcred); 4515 } 4516 } while ((pp = page_next(pp)) != page0); 4517 if (nbusypages && retry++ < MAXRETRIES) { 4518 delay(1); 4519 goto top; 4520 } 4521 } 4522 4523 /* 4524 * Replace the page "old" with the page "new" on the page hash and vnode lists 4525 * 4526 * the replacement must be done in place, ie the equivalent sequence: 4527 * 4528 * vp = old->p_vnode; 4529 * off = old->p_offset; 4530 * page_do_hashout(old) 4531 * page_do_hashin(new, vp, off) 4532 * 4533 * doesn't work, since 4534 * 1) if old is the only page on the vnode, the v_pages list has a window 4535 * where it looks empty. This will break file system assumptions. 4536 * and 4537 * 2) pvn_vplist_dirty() can't deal with pages moving on the v_pages list. 4538 */ 4539 static void 4540 page_do_relocate_hash(page_t *new, page_t *old) 4541 { 4542 page_t **hash_list; 4543 vnode_t *vp = old->p_vnode; 4544 kmutex_t *sep; 4545 4546 ASSERT(PAGE_EXCL(old)); 4547 ASSERT(PAGE_EXCL(new)); 4548 ASSERT(vp != NULL); 4549 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 4550 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, old->p_offset)))); 4551 4552 /* 4553 * First find old page on the page hash list 4554 */ 4555 hash_list = &page_hash[PAGE_HASH_FUNC(vp, old->p_offset)]; 4556 4557 for (;;) { 4558 if (*hash_list == old) 4559 break; 4560 if (*hash_list == NULL) { 4561 panic("page_do_hashout"); 4562 /*NOTREACHED*/ 4563 } 4564 hash_list = &(*hash_list)->p_hash; 4565 } 4566 4567 /* 4568 * update new and replace old with new on the page hash list 4569 */ 4570 new->p_vnode = old->p_vnode; 4571 new->p_offset = old->p_offset; 4572 new->p_hash = old->p_hash; 4573 *hash_list = new; 4574 4575 if ((new->p_vnode->v_flag & VISSWAP) != 0) 4576 PP_SETSWAP(new); 4577 4578 /* 4579 * replace old with new on the vnode's page list 4580 */ 4581 if (old->p_vpnext == old) { 4582 new->p_vpnext = new; 4583 new->p_vpprev = new; 4584 } else { 4585 new->p_vpnext = old->p_vpnext; 4586 new->p_vpprev = old->p_vpprev; 4587 new->p_vpnext->p_vpprev = new; 4588 new->p_vpprev->p_vpnext = new; 4589 } 4590 if (vp->v_pages == old) 4591 vp->v_pages = new; 4592 4593 /* 4594 * clear out the old page 4595 */ 4596 old->p_hash = NULL; 4597 old->p_vpnext = NULL; 4598 old->p_vpprev = NULL; 4599 old->p_vnode = NULL; 4600 PP_CLRSWAP(old); 4601 old->p_offset = (u_offset_t)-1; 4602 page_clr_all_props(old); 4603 4604 /* 4605 * Wake up processes waiting for this page. The page's 4606 * identity has been changed, and is probably not the 4607 * desired page any longer. 4608 */ 4609 sep = page_se_mutex(old); 4610 mutex_enter(sep); 4611 old->p_selock &= ~SE_EWANTED; 4612 if (CV_HAS_WAITERS(&old->p_cv)) 4613 cv_broadcast(&old->p_cv); 4614 mutex_exit(sep); 4615 } 4616 4617 /* 4618 * This function moves the identity of page "pp_old" to page "pp_new". 4619 * Both pages must be locked on entry. "pp_new" is free, has no identity, 4620 * and need not be hashed out from anywhere. 4621 */ 4622 void 4623 page_relocate_hash(page_t *pp_new, page_t *pp_old) 4624 { 4625 vnode_t *vp = pp_old->p_vnode; 4626 u_offset_t off = pp_old->p_offset; 4627 kmutex_t *phm, *vphm; 4628 4629 /* 4630 * Rehash two pages 4631 */ 4632 ASSERT(PAGE_EXCL(pp_old)); 4633 ASSERT(PAGE_EXCL(pp_new)); 4634 ASSERT(vp != NULL); 4635 ASSERT(pp_new->p_vnode == NULL); 4636 4637 /* 4638 * hashout then hashin while holding the mutexes 4639 */ 4640 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, off)); 4641 mutex_enter(phm); 4642 vphm = page_vnode_mutex(vp); 4643 mutex_enter(vphm); 4644 4645 page_do_relocate_hash(pp_new, pp_old); 4646 4647 /* The following comment preserved from page_flip(). */ 4648 pp_new->p_fsdata = pp_old->p_fsdata; 4649 pp_old->p_fsdata = 0; 4650 mutex_exit(vphm); 4651 mutex_exit(phm); 4652 4653 /* 4654 * The page_struct_lock need not be acquired for lckcnt and 4655 * cowcnt since the page has an "exclusive" lock. 4656 */ 4657 ASSERT(pp_new->p_lckcnt == 0); 4658 ASSERT(pp_new->p_cowcnt == 0); 4659 pp_new->p_lckcnt = pp_old->p_lckcnt; 4660 pp_new->p_cowcnt = pp_old->p_cowcnt; 4661 pp_old->p_lckcnt = pp_old->p_cowcnt = 0; 4662 4663 } 4664 4665 /* 4666 * Helper routine used to lock all remaining members of a 4667 * large page. The caller is responsible for passing in a locked 4668 * pp. If pp is a large page, then it succeeds in locking all the 4669 * remaining constituent pages or it returns with only the 4670 * original page locked. 4671 * 4672 * Returns 1 on success, 0 on failure. 4673 * 4674 * If success is returned this routine guarantees p_szc for all constituent 4675 * pages of a large page pp belongs to can't change. To achieve this we 4676 * recheck szc of pp after locking all constituent pages and retry if szc 4677 * changed (it could only decrease). Since hat_page_demote() needs an EXCL 4678 * lock on one of constituent pages it can't be running after all constituent 4679 * pages are locked. hat_page_demote() with a lock on a constituent page 4680 * outside of this large page (i.e. pp belonged to a larger large page) is 4681 * already done with all constituent pages of pp since the root's p_szc is 4682 * changed last. Therefore no need to synchronize with hat_page_demote() that 4683 * locked a constituent page outside of pp's current large page. 4684 */ 4685 #ifdef DEBUG 4686 uint32_t gpg_trylock_mtbf = 0; 4687 #endif 4688 4689 int 4690 group_page_trylock(page_t *pp, se_t se) 4691 { 4692 page_t *tpp; 4693 pgcnt_t npgs, i, j; 4694 uint_t pszc = pp->p_szc; 4695 4696 #ifdef DEBUG 4697 if (gpg_trylock_mtbf && !(gethrtime() % gpg_trylock_mtbf)) { 4698 return (0); 4699 } 4700 #endif 4701 4702 if (pp != PP_GROUPLEADER(pp, pszc)) { 4703 return (0); 4704 } 4705 4706 retry: 4707 ASSERT(PAGE_LOCKED_SE(pp, se)); 4708 ASSERT(!PP_ISFREE(pp)); 4709 if (pszc == 0) { 4710 return (1); 4711 } 4712 npgs = page_get_pagecnt(pszc); 4713 tpp = pp + 1; 4714 for (i = 1; i < npgs; i++, tpp++) { 4715 if (!page_trylock(tpp, se)) { 4716 tpp = pp + 1; 4717 for (j = 1; j < i; j++, tpp++) { 4718 page_unlock(tpp); 4719 } 4720 return (0); 4721 } 4722 } 4723 if (pp->p_szc != pszc) { 4724 ASSERT(pp->p_szc < pszc); 4725 ASSERT(pp->p_vnode != NULL && !PP_ISKAS(pp) && 4726 !IS_SWAPFSVP(pp->p_vnode)); 4727 tpp = pp + 1; 4728 for (i = 1; i < npgs; i++, tpp++) { 4729 page_unlock(tpp); 4730 } 4731 pszc = pp->p_szc; 4732 goto retry; 4733 } 4734 return (1); 4735 } 4736 4737 void 4738 group_page_unlock(page_t *pp) 4739 { 4740 page_t *tpp; 4741 pgcnt_t npgs, i; 4742 4743 ASSERT(PAGE_LOCKED(pp)); 4744 ASSERT(!PP_ISFREE(pp)); 4745 ASSERT(pp == PP_PAGEROOT(pp)); 4746 npgs = page_get_pagecnt(pp->p_szc); 4747 for (i = 1, tpp = pp + 1; i < npgs; i++, tpp++) { 4748 page_unlock(tpp); 4749 } 4750 } 4751 4752 /* 4753 * returns 4754 * 0 : on success and *nrelocp is number of relocated PAGESIZE pages 4755 * ERANGE : this is not a base page 4756 * EBUSY : failure to get locks on the page/pages 4757 * ENOMEM : failure to obtain replacement pages 4758 * EAGAIN : OBP has not yet completed its boot-time handoff to the kernel 4759 * EIO : An error occurred while trying to copy the page data 4760 * 4761 * Return with all constituent members of target and replacement 4762 * SE_EXCL locked. It is the callers responsibility to drop the 4763 * locks. 4764 */ 4765 int 4766 do_page_relocate( 4767 page_t **target, 4768 page_t **replacement, 4769 int grouplock, 4770 spgcnt_t *nrelocp, 4771 lgrp_t *lgrp) 4772 { 4773 page_t *first_repl; 4774 page_t *repl; 4775 page_t *targ; 4776 page_t *pl = NULL; 4777 uint_t ppattr; 4778 pfn_t pfn, repl_pfn; 4779 uint_t szc; 4780 spgcnt_t npgs, i; 4781 int repl_contig = 0; 4782 uint_t flags = 0; 4783 spgcnt_t dofree = 0; 4784 4785 *nrelocp = 0; 4786 4787 #if defined(__sparc) 4788 /* 4789 * We need to wait till OBP has completed 4790 * its boot-time handoff of its resources to the kernel 4791 * before we allow page relocation 4792 */ 4793 if (page_relocate_ready == 0) { 4794 return (EAGAIN); 4795 } 4796 #endif 4797 4798 /* 4799 * If this is not a base page, 4800 * just return with 0x0 pages relocated. 4801 */ 4802 targ = *target; 4803 ASSERT(PAGE_EXCL(targ)); 4804 ASSERT(!PP_ISFREE(targ)); 4805 szc = targ->p_szc; 4806 ASSERT(szc < mmu_page_sizes); 4807 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4808 pfn = targ->p_pagenum; 4809 if (pfn != PFN_BASE(pfn, szc)) { 4810 VM_STAT_ADD(vmm_vmstats.ppr_relocnoroot[szc]); 4811 return (ERANGE); 4812 } 4813 4814 if ((repl = *replacement) != NULL && repl->p_szc >= szc) { 4815 repl_pfn = repl->p_pagenum; 4816 if (repl_pfn != PFN_BASE(repl_pfn, szc)) { 4817 VM_STAT_ADD(vmm_vmstats.ppr_reloc_replnoroot[szc]); 4818 return (ERANGE); 4819 } 4820 repl_contig = 1; 4821 } 4822 4823 /* 4824 * We must lock all members of this large page or we cannot 4825 * relocate any part of it. 4826 */ 4827 if (grouplock != 0 && !group_page_trylock(targ, SE_EXCL)) { 4828 VM_STAT_ADD(vmm_vmstats.ppr_relocnolock[targ->p_szc]); 4829 return (EBUSY); 4830 } 4831 4832 /* 4833 * reread szc it could have been decreased before 4834 * group_page_trylock() was done. 4835 */ 4836 szc = targ->p_szc; 4837 ASSERT(szc < mmu_page_sizes); 4838 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4839 ASSERT(pfn == PFN_BASE(pfn, szc)); 4840 4841 npgs = page_get_pagecnt(targ->p_szc); 4842 4843 if (repl == NULL) { 4844 dofree = npgs; /* Size of target page in MMU pages */ 4845 if (!page_create_wait(dofree, 0)) { 4846 if (grouplock != 0) { 4847 group_page_unlock(targ); 4848 } 4849 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4850 return (ENOMEM); 4851 } 4852 4853 /* 4854 * seg kmem pages require that the target and replacement 4855 * page be the same pagesize. 4856 */ 4857 flags = (VN_ISKAS(targ->p_vnode)) ? PGR_SAMESZC : 0; 4858 repl = page_get_replacement_page(targ, lgrp, flags); 4859 if (repl == NULL) { 4860 if (grouplock != 0) { 4861 group_page_unlock(targ); 4862 } 4863 page_create_putback(dofree); 4864 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4865 return (ENOMEM); 4866 } 4867 } 4868 #ifdef DEBUG 4869 else { 4870 ASSERT(PAGE_LOCKED(repl)); 4871 } 4872 #endif /* DEBUG */ 4873 4874 #if defined(__sparc) 4875 /* 4876 * Let hat_page_relocate() complete the relocation if it's kernel page 4877 */ 4878 if (VN_ISKAS(targ->p_vnode)) { 4879 *replacement = repl; 4880 if (hat_page_relocate(target, replacement, nrelocp) != 0) { 4881 if (grouplock != 0) { 4882 group_page_unlock(targ); 4883 } 4884 if (dofree) { 4885 *replacement = NULL; 4886 page_free_replacement_page(repl); 4887 page_create_putback(dofree); 4888 } 4889 VM_STAT_ADD(vmm_vmstats.ppr_krelocfail[szc]); 4890 return (EAGAIN); 4891 } 4892 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 4893 return (0); 4894 } 4895 #else 4896 #if defined(lint) 4897 dofree = dofree; 4898 #endif 4899 #endif 4900 4901 first_repl = repl; 4902 4903 for (i = 0; i < npgs; i++) { 4904 ASSERT(PAGE_EXCL(targ)); 4905 ASSERT(targ->p_slckcnt == 0); 4906 ASSERT(repl->p_slckcnt == 0); 4907 4908 (void) hat_pageunload(targ, HAT_FORCE_PGUNLOAD); 4909 4910 ASSERT(hat_page_getshare(targ) == 0); 4911 ASSERT(!PP_ISFREE(targ)); 4912 ASSERT(targ->p_pagenum == (pfn + i)); 4913 ASSERT(repl_contig == 0 || 4914 repl->p_pagenum == (repl_pfn + i)); 4915 4916 /* 4917 * Copy the page contents and attributes then 4918 * relocate the page in the page hash. 4919 */ 4920 if (ppcopy(targ, repl) == 0) { 4921 targ = *target; 4922 repl = first_repl; 4923 VM_STAT_ADD(vmm_vmstats.ppr_copyfail); 4924 if (grouplock != 0) { 4925 group_page_unlock(targ); 4926 } 4927 if (dofree) { 4928 *replacement = NULL; 4929 page_free_replacement_page(repl); 4930 page_create_putback(dofree); 4931 } 4932 return (EIO); 4933 } 4934 4935 targ++; 4936 if (repl_contig != 0) { 4937 repl++; 4938 } else { 4939 repl = repl->p_next; 4940 } 4941 } 4942 4943 repl = first_repl; 4944 targ = *target; 4945 4946 for (i = 0; i < npgs; i++) { 4947 ppattr = hat_page_getattr(targ, (P_MOD | P_REF | P_RO)); 4948 page_clr_all_props(repl); 4949 page_set_props(repl, ppattr); 4950 page_relocate_hash(repl, targ); 4951 4952 ASSERT(hat_page_getshare(targ) == 0); 4953 ASSERT(hat_page_getshare(repl) == 0); 4954 /* 4955 * Now clear the props on targ, after the 4956 * page_relocate_hash(), they no longer 4957 * have any meaning. 4958 */ 4959 page_clr_all_props(targ); 4960 ASSERT(targ->p_next == targ); 4961 ASSERT(targ->p_prev == targ); 4962 page_list_concat(&pl, &targ); 4963 4964 targ++; 4965 if (repl_contig != 0) { 4966 repl++; 4967 } else { 4968 repl = repl->p_next; 4969 } 4970 } 4971 /* assert that we have come full circle with repl */ 4972 ASSERT(repl_contig == 1 || first_repl == repl); 4973 4974 *target = pl; 4975 if (*replacement == NULL) { 4976 ASSERT(first_repl == repl); 4977 *replacement = repl; 4978 } 4979 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 4980 *nrelocp = npgs; 4981 return (0); 4982 } 4983 /* 4984 * On success returns 0 and *nrelocp the number of PAGESIZE pages relocated. 4985 */ 4986 int 4987 page_relocate( 4988 page_t **target, 4989 page_t **replacement, 4990 int grouplock, 4991 int freetarget, 4992 spgcnt_t *nrelocp, 4993 lgrp_t *lgrp) 4994 { 4995 spgcnt_t ret; 4996 4997 /* do_page_relocate returns 0 on success or errno value */ 4998 ret = do_page_relocate(target, replacement, grouplock, nrelocp, lgrp); 4999 5000 if (ret != 0 || freetarget == 0) { 5001 return (ret); 5002 } 5003 if (*nrelocp == 1) { 5004 ASSERT(*target != NULL); 5005 page_free(*target, 1); 5006 } else { 5007 page_t *tpp = *target; 5008 uint_t szc = tpp->p_szc; 5009 pgcnt_t npgs = page_get_pagecnt(szc); 5010 ASSERT(npgs > 1); 5011 ASSERT(szc != 0); 5012 do { 5013 ASSERT(PAGE_EXCL(tpp)); 5014 ASSERT(!hat_page_is_mapped(tpp)); 5015 ASSERT(tpp->p_szc == szc); 5016 PP_SETFREE(tpp); 5017 PP_SETAGED(tpp); 5018 npgs--; 5019 } while ((tpp = tpp->p_next) != *target); 5020 ASSERT(npgs == 0); 5021 page_list_add_pages(*target, 0); 5022 npgs = page_get_pagecnt(szc); 5023 page_create_putback(npgs); 5024 } 5025 return (ret); 5026 } 5027 5028 /* 5029 * it is up to the caller to deal with pcf accounting. 5030 */ 5031 void 5032 page_free_replacement_page(page_t *pplist) 5033 { 5034 page_t *pp; 5035 5036 while (pplist != NULL) { 5037 /* 5038 * pp_targ is a linked list. 5039 */ 5040 pp = pplist; 5041 if (pp->p_szc == 0) { 5042 page_sub(&pplist, pp); 5043 page_clr_all_props(pp); 5044 PP_SETFREE(pp); 5045 PP_SETAGED(pp); 5046 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 5047 page_unlock(pp); 5048 VM_STAT_ADD(pagecnt.pc_free_replacement_page[0]); 5049 } else { 5050 spgcnt_t curnpgs = page_get_pagecnt(pp->p_szc); 5051 page_t *tpp; 5052 page_list_break(&pp, &pplist, curnpgs); 5053 tpp = pp; 5054 do { 5055 ASSERT(PAGE_EXCL(tpp)); 5056 ASSERT(!hat_page_is_mapped(tpp)); 5057 page_clr_all_props(tpp); 5058 PP_SETFREE(tpp); 5059 PP_SETAGED(tpp); 5060 } while ((tpp = tpp->p_next) != pp); 5061 page_list_add_pages(pp, 0); 5062 VM_STAT_ADD(pagecnt.pc_free_replacement_page[1]); 5063 } 5064 } 5065 } 5066 5067 /* 5068 * Relocate target to non-relocatable replacement page. 5069 */ 5070 int 5071 page_relocate_cage(page_t **target, page_t **replacement) 5072 { 5073 page_t *tpp, *rpp; 5074 spgcnt_t pgcnt, npgs; 5075 int result; 5076 5077 tpp = *target; 5078 5079 ASSERT(PAGE_EXCL(tpp)); 5080 ASSERT(tpp->p_szc == 0); 5081 5082 pgcnt = btop(page_get_pagesize(tpp->p_szc)); 5083 5084 do { 5085 (void) page_create_wait(pgcnt, PG_WAIT | PG_NORELOC); 5086 rpp = page_get_replacement_page(tpp, NULL, PGR_NORELOC); 5087 if (rpp == NULL) { 5088 page_create_putback(pgcnt); 5089 kcage_cageout_wakeup(); 5090 } 5091 } while (rpp == NULL); 5092 5093 ASSERT(PP_ISNORELOC(rpp)); 5094 5095 result = page_relocate(&tpp, &rpp, 0, 1, &npgs, NULL); 5096 5097 if (result == 0) { 5098 *replacement = rpp; 5099 if (pgcnt != npgs) 5100 panic("page_relocate_cage: partial relocation"); 5101 } 5102 5103 return (result); 5104 } 5105 5106 /* 5107 * Release the page lock on a page, place on cachelist 5108 * tail if no longer mapped. Caller can let us know if 5109 * the page is known to be clean. 5110 */ 5111 int 5112 page_release(page_t *pp, int checkmod) 5113 { 5114 int status; 5115 5116 ASSERT(PAGE_LOCKED(pp) && !PP_ISFREE(pp) && 5117 (pp->p_vnode != NULL)); 5118 5119 if (!hat_page_is_mapped(pp) && !IS_SWAPVP(pp->p_vnode) && 5120 ((PAGE_SHARED(pp) && page_tryupgrade(pp)) || PAGE_EXCL(pp)) && 5121 pp->p_lckcnt == 0 && pp->p_cowcnt == 0 && 5122 !hat_page_is_mapped(pp)) { 5123 5124 /* 5125 * If page is modified, unlock it 5126 * 5127 * (p_nrm & P_MOD) bit has the latest stuff because: 5128 * (1) We found that this page doesn't have any mappings 5129 * _after_ holding SE_EXCL and 5130 * (2) We didn't drop SE_EXCL lock after the check in (1) 5131 */ 5132 if (checkmod && hat_ismod(pp)) { 5133 page_unlock(pp); 5134 status = PGREL_MOD; 5135 } else { 5136 /*LINTED: constant in conditional context*/ 5137 VN_DISPOSE(pp, B_FREE, 0, kcred); 5138 status = PGREL_CLEAN; 5139 } 5140 } else { 5141 page_unlock(pp); 5142 status = PGREL_NOTREL; 5143 } 5144 return (status); 5145 } 5146 5147 /* 5148 * Given a constituent page, try to demote the large page on the freelist. 5149 * 5150 * Returns nonzero if the page could be demoted successfully. Returns with 5151 * the constituent page still locked. 5152 */ 5153 int 5154 page_try_demote_free_pages(page_t *pp) 5155 { 5156 page_t *rootpp = pp; 5157 pfn_t pfn = page_pptonum(pp); 5158 spgcnt_t npgs; 5159 uint_t szc = pp->p_szc; 5160 5161 ASSERT(PP_ISFREE(pp)); 5162 ASSERT(PAGE_EXCL(pp)); 5163 5164 /* 5165 * Adjust rootpp and lock it, if `pp' is not the base 5166 * constituent page. 5167 */ 5168 npgs = page_get_pagecnt(pp->p_szc); 5169 if (npgs == 1) { 5170 return (0); 5171 } 5172 5173 if (!IS_P2ALIGNED(pfn, npgs)) { 5174 pfn = P2ALIGN(pfn, npgs); 5175 rootpp = page_numtopp_nolock(pfn); 5176 } 5177 5178 if (pp != rootpp && !page_trylock(rootpp, SE_EXCL)) { 5179 return (0); 5180 } 5181 5182 if (rootpp->p_szc != szc) { 5183 if (pp != rootpp) 5184 page_unlock(rootpp); 5185 return (0); 5186 } 5187 5188 page_demote_free_pages(rootpp); 5189 5190 if (pp != rootpp) 5191 page_unlock(rootpp); 5192 5193 ASSERT(PP_ISFREE(pp)); 5194 ASSERT(PAGE_EXCL(pp)); 5195 return (1); 5196 } 5197 5198 /* 5199 * Given a constituent page, try to demote the large page. 5200 * 5201 * Returns nonzero if the page could be demoted successfully. Returns with 5202 * the constituent page still locked. 5203 */ 5204 int 5205 page_try_demote_pages(page_t *pp) 5206 { 5207 page_t *tpp, *rootpp = pp; 5208 pfn_t pfn = page_pptonum(pp); 5209 spgcnt_t i, npgs; 5210 uint_t szc = pp->p_szc; 5211 vnode_t *vp = pp->p_vnode; 5212 5213 ASSERT(PAGE_EXCL(pp)); 5214 5215 VM_STAT_ADD(pagecnt.pc_try_demote_pages[0]); 5216 5217 if (pp->p_szc == 0) { 5218 VM_STAT_ADD(pagecnt.pc_try_demote_pages[1]); 5219 return (1); 5220 } 5221 5222 if (vp != NULL && !IS_SWAPFSVP(vp) && !VN_ISKAS(vp)) { 5223 VM_STAT_ADD(pagecnt.pc_try_demote_pages[2]); 5224 page_demote_vp_pages(pp); 5225 ASSERT(pp->p_szc == 0); 5226 return (1); 5227 } 5228 5229 /* 5230 * Adjust rootpp if passed in is not the base 5231 * constituent page. 5232 */ 5233 npgs = page_get_pagecnt(pp->p_szc); 5234 ASSERT(npgs > 1); 5235 if (!IS_P2ALIGNED(pfn, npgs)) { 5236 pfn = P2ALIGN(pfn, npgs); 5237 rootpp = page_numtopp_nolock(pfn); 5238 VM_STAT_ADD(pagecnt.pc_try_demote_pages[3]); 5239 ASSERT(rootpp->p_vnode != NULL); 5240 ASSERT(rootpp->p_szc == szc); 5241 } 5242 5243 /* 5244 * We can't demote kernel pages since we can't hat_unload() 5245 * the mappings. 5246 */ 5247 if (VN_ISKAS(rootpp->p_vnode)) 5248 return (0); 5249 5250 /* 5251 * Attempt to lock all constituent pages except the page passed 5252 * in since it's already locked. 5253 */ 5254 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5255 ASSERT(!PP_ISFREE(tpp)); 5256 ASSERT(tpp->p_vnode != NULL); 5257 5258 if (tpp != pp && !page_trylock(tpp, SE_EXCL)) 5259 break; 5260 ASSERT(tpp->p_szc == rootpp->p_szc); 5261 ASSERT(page_pptonum(tpp) == page_pptonum(rootpp) + i); 5262 } 5263 5264 /* 5265 * If we failed to lock them all then unlock what we have 5266 * locked so far and bail. 5267 */ 5268 if (i < npgs) { 5269 tpp = rootpp; 5270 while (i-- > 0) { 5271 if (tpp != pp) 5272 page_unlock(tpp); 5273 tpp++; 5274 } 5275 VM_STAT_ADD(pagecnt.pc_try_demote_pages[4]); 5276 return (0); 5277 } 5278 5279 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5280 ASSERT(PAGE_EXCL(tpp)); 5281 ASSERT(tpp->p_slckcnt == 0); 5282 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 5283 tpp->p_szc = 0; 5284 } 5285 5286 /* 5287 * Unlock all pages except the page passed in. 5288 */ 5289 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5290 ASSERT(!hat_page_is_mapped(tpp)); 5291 if (tpp != pp) 5292 page_unlock(tpp); 5293 } 5294 5295 VM_STAT_ADD(pagecnt.pc_try_demote_pages[5]); 5296 return (1); 5297 } 5298 5299 /* 5300 * Called by page_free() and page_destroy() to demote the page size code 5301 * (p_szc) to 0 (since we can't just put a single PAGESIZE page with non zero 5302 * p_szc on free list, neither can we just clear p_szc of a single page_t 5303 * within a large page since it will break other code that relies on p_szc 5304 * being the same for all page_t's of a large page). Anonymous pages should 5305 * never end up here because anon_map_getpages() cannot deal with p_szc 5306 * changes after a single constituent page is locked. While anonymous or 5307 * kernel large pages are demoted or freed the entire large page at a time 5308 * with all constituent pages locked EXCL for the file system pages we 5309 * have to be able to demote a large page (i.e. decrease all constituent pages 5310 * p_szc) with only just an EXCL lock on one of constituent pages. The reason 5311 * we can easily deal with anonymous page demotion the entire large page at a 5312 * time is that those operation originate at address space level and concern 5313 * the entire large page region with actual demotion only done when pages are 5314 * not shared with any other processes (therefore we can always get EXCL lock 5315 * on all anonymous constituent pages after clearing segment page 5316 * cache). However file system pages can be truncated or invalidated at a 5317 * PAGESIZE level from the file system side and end up in page_free() or 5318 * page_destroy() (we also allow only part of the large page to be SOFTLOCKed 5319 * and therefore pageout should be able to demote a large page by EXCL locking 5320 * any constituent page that is not under SOFTLOCK). In those cases we cannot 5321 * rely on being able to lock EXCL all constituent pages. 5322 * 5323 * To prevent szc changes on file system pages one has to lock all constituent 5324 * pages at least SHARED (or call page_szc_lock()). The only subsystem that 5325 * doesn't rely on locking all constituent pages (or using page_szc_lock()) to 5326 * prevent szc changes is hat layer that uses its own page level mlist 5327 * locks. hat assumes that szc doesn't change after mlist lock for a page is 5328 * taken. Therefore we need to change szc under hat level locks if we only 5329 * have an EXCL lock on a single constituent page and hat still references any 5330 * of constituent pages. (Note we can't "ignore" hat layer by simply 5331 * hat_pageunload() all constituent pages without having EXCL locks on all of 5332 * constituent pages). We use hat_page_demote() call to safely demote szc of 5333 * all constituent pages under hat locks when we only have an EXCL lock on one 5334 * of constituent pages. 5335 * 5336 * This routine calls page_szc_lock() before calling hat_page_demote() to 5337 * allow segvn in one special case not to lock all constituent pages SHARED 5338 * before calling hat_memload_array() that relies on p_szc not changing even 5339 * before hat level mlist lock is taken. In that case segvn uses 5340 * page_szc_lock() to prevent hat_page_demote() changing p_szc values. 5341 * 5342 * Anonymous or kernel page demotion still has to lock all pages exclusively 5343 * and do hat_pageunload() on all constituent pages before demoting the page 5344 * therefore there's no need for anonymous or kernel page demotion to use 5345 * hat_page_demote() mechanism. 5346 * 5347 * hat_page_demote() removes all large mappings that map pp and then decreases 5348 * p_szc starting from the last constituent page of the large page. By working 5349 * from the tail of a large page in pfn decreasing order allows one looking at 5350 * the root page to know that hat_page_demote() is done for root's szc area. 5351 * e.g. if a root page has szc 1 one knows it only has to lock all constituent 5352 * pages within szc 1 area to prevent szc changes because hat_page_demote() 5353 * that started on this page when it had szc > 1 is done for this szc 1 area. 5354 * 5355 * We are guaranteed that all constituent pages of pp's large page belong to 5356 * the same vnode with the consecutive offsets increasing in the direction of 5357 * the pfn i.e. the identity of constituent pages can't change until their 5358 * p_szc is decreased. Therefore it's safe for hat_page_demote() to remove 5359 * large mappings to pp even though we don't lock any constituent page except 5360 * pp (i.e. we won't unload e.g. kernel locked page). 5361 */ 5362 static void 5363 page_demote_vp_pages(page_t *pp) 5364 { 5365 kmutex_t *mtx; 5366 5367 ASSERT(PAGE_EXCL(pp)); 5368 ASSERT(!PP_ISFREE(pp)); 5369 ASSERT(pp->p_vnode != NULL); 5370 ASSERT(!IS_SWAPFSVP(pp->p_vnode)); 5371 ASSERT(!PP_ISKAS(pp)); 5372 5373 VM_STAT_ADD(pagecnt.pc_demote_pages[0]); 5374 5375 mtx = page_szc_lock(pp); 5376 if (mtx != NULL) { 5377 hat_page_demote(pp); 5378 mutex_exit(mtx); 5379 } 5380 ASSERT(pp->p_szc == 0); 5381 } 5382 5383 /* 5384 * Mark any existing pages for migration in the given range 5385 */ 5386 void 5387 page_mark_migrate(struct seg *seg, caddr_t addr, size_t len, 5388 struct anon_map *amp, ulong_t anon_index, vnode_t *vp, 5389 u_offset_t vnoff, int rflag) 5390 { 5391 struct anon *ap; 5392 vnode_t *curvp; 5393 lgrp_t *from; 5394 pgcnt_t nlocked; 5395 u_offset_t off; 5396 pfn_t pfn; 5397 size_t pgsz; 5398 size_t segpgsz; 5399 pgcnt_t pages; 5400 uint_t pszc; 5401 page_t *pp0, *pp; 5402 caddr_t va; 5403 ulong_t an_idx; 5404 anon_sync_obj_t cookie; 5405 5406 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5407 5408 /* 5409 * Don't do anything if don't need to do lgroup optimizations 5410 * on this system 5411 */ 5412 if (!lgrp_optimizations()) 5413 return; 5414 5415 /* 5416 * Align address and length to (potentially large) page boundary 5417 */ 5418 segpgsz = page_get_pagesize(seg->s_szc); 5419 addr = (caddr_t)P2ALIGN((uintptr_t)addr, segpgsz); 5420 if (rflag) 5421 len = P2ROUNDUP(len, segpgsz); 5422 5423 /* 5424 * Do one (large) page at a time 5425 */ 5426 va = addr; 5427 while (va < addr + len) { 5428 /* 5429 * Lookup (root) page for vnode and offset corresponding to 5430 * this virtual address 5431 * Try anonmap first since there may be copy-on-write 5432 * pages, but initialize vnode pointer and offset using 5433 * vnode arguments just in case there isn't an amp. 5434 */ 5435 curvp = vp; 5436 off = vnoff + va - seg->s_base; 5437 if (amp) { 5438 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5439 an_idx = anon_index + seg_page(seg, va); 5440 anon_array_enter(amp, an_idx, &cookie); 5441 ap = anon_get_ptr(amp->ahp, an_idx); 5442 if (ap) 5443 swap_xlate(ap, &curvp, &off); 5444 anon_array_exit(&cookie); 5445 ANON_LOCK_EXIT(&->a_rwlock); 5446 } 5447 5448 pp = NULL; 5449 if (curvp) 5450 pp = page_lookup(curvp, off, SE_SHARED); 5451 5452 /* 5453 * If there isn't a page at this virtual address, 5454 * skip to next page 5455 */ 5456 if (pp == NULL) { 5457 va += PAGESIZE; 5458 continue; 5459 } 5460 5461 /* 5462 * Figure out which lgroup this page is in for kstats 5463 */ 5464 pfn = page_pptonum(pp); 5465 from = lgrp_pfn_to_lgrp(pfn); 5466 5467 /* 5468 * Get page size, and round up and skip to next page boundary 5469 * if unaligned address 5470 */ 5471 pszc = pp->p_szc; 5472 pgsz = page_get_pagesize(pszc); 5473 pages = btop(pgsz); 5474 if (!IS_P2ALIGNED(va, pgsz) || 5475 !IS_P2ALIGNED(pfn, pages) || 5476 pgsz > segpgsz) { 5477 pgsz = MIN(pgsz, segpgsz); 5478 page_unlock(pp); 5479 pages = btop(P2END((uintptr_t)va, pgsz) - 5480 (uintptr_t)va); 5481 va = (caddr_t)P2END((uintptr_t)va, pgsz); 5482 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, pages); 5483 continue; 5484 } 5485 5486 /* 5487 * Upgrade to exclusive lock on page 5488 */ 5489 if (!page_tryupgrade(pp)) { 5490 page_unlock(pp); 5491 va += pgsz; 5492 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5493 btop(pgsz)); 5494 continue; 5495 } 5496 5497 pp0 = pp++; 5498 nlocked = 1; 5499 5500 /* 5501 * Lock constituent pages if this is large page 5502 */ 5503 if (pages > 1) { 5504 /* 5505 * Lock all constituents except root page, since it 5506 * should be locked already. 5507 */ 5508 for (; nlocked < pages; nlocked++) { 5509 if (!page_trylock(pp, SE_EXCL)) { 5510 break; 5511 } 5512 if (PP_ISFREE(pp) || 5513 pp->p_szc != pszc) { 5514 /* 5515 * hat_page_demote() raced in with us. 5516 */ 5517 ASSERT(!IS_SWAPFSVP(curvp)); 5518 page_unlock(pp); 5519 break; 5520 } 5521 pp++; 5522 } 5523 } 5524 5525 /* 5526 * If all constituent pages couldn't be locked, 5527 * unlock pages locked so far and skip to next page. 5528 */ 5529 if (nlocked < pages) { 5530 while (pp0 < pp) { 5531 page_unlock(pp0++); 5532 } 5533 va += pgsz; 5534 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5535 btop(pgsz)); 5536 continue; 5537 } 5538 5539 /* 5540 * hat_page_demote() can no longer happen 5541 * since last cons page had the right p_szc after 5542 * all cons pages were locked. all cons pages 5543 * should now have the same p_szc. 5544 */ 5545 5546 /* 5547 * All constituent pages locked successfully, so mark 5548 * large page for migration and unload the mappings of 5549 * constituent pages, so a fault will occur on any part of the 5550 * large page 5551 */ 5552 PP_SETMIGRATE(pp0); 5553 while (pp0 < pp) { 5554 (void) hat_pageunload(pp0, HAT_FORCE_PGUNLOAD); 5555 ASSERT(hat_page_getshare(pp0) == 0); 5556 page_unlock(pp0++); 5557 } 5558 lgrp_stat_add(from->lgrp_id, LGRP_PMM_PGS, nlocked); 5559 5560 va += pgsz; 5561 } 5562 } 5563 5564 /* 5565 * Migrate any pages that have been marked for migration in the given range 5566 */ 5567 void 5568 page_migrate( 5569 struct seg *seg, 5570 caddr_t addr, 5571 page_t **ppa, 5572 pgcnt_t npages) 5573 { 5574 lgrp_t *from; 5575 lgrp_t *to; 5576 page_t *newpp; 5577 page_t *pp; 5578 pfn_t pfn; 5579 size_t pgsz; 5580 spgcnt_t page_cnt; 5581 spgcnt_t i; 5582 uint_t pszc; 5583 5584 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5585 5586 while (npages > 0) { 5587 pp = *ppa; 5588 pszc = pp->p_szc; 5589 pgsz = page_get_pagesize(pszc); 5590 page_cnt = btop(pgsz); 5591 5592 /* 5593 * Check to see whether this page is marked for migration 5594 * 5595 * Assume that root page of large page is marked for 5596 * migration and none of the other constituent pages 5597 * are marked. This really simplifies clearing the 5598 * migrate bit by not having to clear it from each 5599 * constituent page. 5600 * 5601 * note we don't want to relocate an entire large page if 5602 * someone is only using one subpage. 5603 */ 5604 if (npages < page_cnt) 5605 break; 5606 5607 /* 5608 * Is it marked for migration? 5609 */ 5610 if (!PP_ISMIGRATE(pp)) 5611 goto next; 5612 5613 /* 5614 * Determine lgroups that page is being migrated between 5615 */ 5616 pfn = page_pptonum(pp); 5617 if (!IS_P2ALIGNED(pfn, page_cnt)) { 5618 break; 5619 } 5620 from = lgrp_pfn_to_lgrp(pfn); 5621 to = lgrp_mem_choose(seg, addr, pgsz); 5622 5623 /* 5624 * Need to get exclusive lock's to migrate 5625 */ 5626 for (i = 0; i < page_cnt; i++) { 5627 ASSERT(PAGE_LOCKED(ppa[i])); 5628 if (page_pptonum(ppa[i]) != pfn + i || 5629 ppa[i]->p_szc != pszc) { 5630 break; 5631 } 5632 if (!page_tryupgrade(ppa[i])) { 5633 lgrp_stat_add(from->lgrp_id, 5634 LGRP_PM_FAIL_LOCK_PGS, 5635 page_cnt); 5636 break; 5637 } 5638 5639 /* 5640 * Check to see whether we are trying to migrate 5641 * page to lgroup where it is allocated already. 5642 * If so, clear the migrate bit and skip to next 5643 * page. 5644 */ 5645 if (i == 0 && to == from) { 5646 PP_CLRMIGRATE(ppa[0]); 5647 page_downgrade(ppa[0]); 5648 goto next; 5649 } 5650 } 5651 5652 /* 5653 * If all constituent pages couldn't be locked, 5654 * unlock pages locked so far and skip to next page. 5655 */ 5656 if (i != page_cnt) { 5657 while (--i != -1) { 5658 page_downgrade(ppa[i]); 5659 } 5660 goto next; 5661 } 5662 5663 (void) page_create_wait(page_cnt, PG_WAIT); 5664 newpp = page_get_replacement_page(pp, to, PGR_SAMESZC); 5665 if (newpp == NULL) { 5666 page_create_putback(page_cnt); 5667 for (i = 0; i < page_cnt; i++) { 5668 page_downgrade(ppa[i]); 5669 } 5670 lgrp_stat_add(to->lgrp_id, LGRP_PM_FAIL_ALLOC_PGS, 5671 page_cnt); 5672 goto next; 5673 } 5674 ASSERT(newpp->p_szc == pszc); 5675 /* 5676 * Clear migrate bit and relocate page 5677 */ 5678 PP_CLRMIGRATE(pp); 5679 if (page_relocate(&pp, &newpp, 0, 1, &page_cnt, to)) { 5680 panic("page_migrate: page_relocate failed"); 5681 } 5682 ASSERT(page_cnt * PAGESIZE == pgsz); 5683 5684 /* 5685 * Keep stats for number of pages migrated from and to 5686 * each lgroup 5687 */ 5688 lgrp_stat_add(from->lgrp_id, LGRP_PM_SRC_PGS, page_cnt); 5689 lgrp_stat_add(to->lgrp_id, LGRP_PM_DEST_PGS, page_cnt); 5690 /* 5691 * update the page_t array we were passed in and 5692 * unlink constituent pages of a large page. 5693 */ 5694 for (i = 0; i < page_cnt; ++i, ++pp) { 5695 ASSERT(PAGE_EXCL(newpp)); 5696 ASSERT(newpp->p_szc == pszc); 5697 ppa[i] = newpp; 5698 pp = newpp; 5699 page_sub(&newpp, pp); 5700 page_downgrade(pp); 5701 } 5702 ASSERT(newpp == NULL); 5703 next: 5704 addr += pgsz; 5705 ppa += page_cnt; 5706 npages -= page_cnt; 5707 } 5708 } 5709 5710 ulong_t mem_waiters = 0; 5711 ulong_t max_count = 20; 5712 #define MAX_DELAY 0x1ff 5713 5714 /* 5715 * Check if enough memory is available to proceed. 5716 * Depending on system configuration and how much memory is 5717 * reserved for swap we need to check against two variables. 5718 * e.g. on systems with little physical swap availrmem can be 5719 * more reliable indicator of how much memory is available. 5720 * On systems with large phys swap freemem can be better indicator. 5721 * If freemem drops below threshold level don't return an error 5722 * immediately but wake up pageout to free memory and block. 5723 * This is done number of times. If pageout is not able to free 5724 * memory within certain time return an error. 5725 * The same applies for availrmem but kmem_reap is used to 5726 * free memory. 5727 */ 5728 int 5729 page_mem_avail(pgcnt_t npages) 5730 { 5731 ulong_t count; 5732 5733 #if defined(__i386) 5734 if (freemem > desfree + npages && 5735 availrmem > swapfs_reserve + npages && 5736 btop(vmem_size(heap_arena, VMEM_FREE)) > tune.t_minarmem + 5737 npages) 5738 return (1); 5739 #else 5740 if (freemem > desfree + npages && 5741 availrmem > swapfs_reserve + npages) 5742 return (1); 5743 #endif 5744 5745 count = max_count; 5746 atomic_add_long(&mem_waiters, 1); 5747 5748 while (freemem < desfree + npages && --count) { 5749 cv_signal(&proc_pageout->p_cv); 5750 if (delay_sig(hz + (mem_waiters & MAX_DELAY))) { 5751 atomic_add_long(&mem_waiters, -1); 5752 return (0); 5753 } 5754 } 5755 if (count == 0) { 5756 atomic_add_long(&mem_waiters, -1); 5757 return (0); 5758 } 5759 5760 count = max_count; 5761 while (availrmem < swapfs_reserve + npages && --count) { 5762 kmem_reap(); 5763 if (delay_sig(hz + (mem_waiters & MAX_DELAY))) { 5764 atomic_add_long(&mem_waiters, -1); 5765 return (0); 5766 } 5767 } 5768 atomic_add_long(&mem_waiters, -1); 5769 if (count == 0) 5770 return (0); 5771 5772 #if defined(__i386) 5773 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 5774 tune.t_minarmem + npages) 5775 return (0); 5776 #endif 5777 return (1); 5778 } 5779 5780 /* 5781 * Reclaim/reserve availrmem for npages. 5782 * If there is not enough memory start reaping seg, kmem caches. 5783 * Start pageout scanner (via page_needfree()). 5784 * Exit after max_cnt tries regardless of how much memory has been released. 5785 * Note: There is no guarantee that any availrmem will be freed as 5786 * this memory typically is locked (kernel heap) or reserved for swap. 5787 * Also due to memory fragmentation kmem allocator may not be able 5788 * to free any memory (single user allocated buffer will prevent 5789 * freeing slab or a page). 5790 */ 5791 int 5792 page_reclaim_mem(pgcnt_t npages, pgcnt_t epages, int adjust, int max_cnt) 5793 { 5794 int i = 0; 5795 int ret = 0; 5796 pgcnt_t deficit; 5797 pgcnt_t old_availrmem; 5798 5799 mutex_enter(&freemem_lock); 5800 old_availrmem = availrmem - 1; 5801 while ((availrmem < tune.t_minarmem + npages + epages) && 5802 (old_availrmem < availrmem) && (i++ < max_cnt)) { 5803 old_availrmem = availrmem; 5804 deficit = tune.t_minarmem + npages + epages - availrmem; 5805 mutex_exit(&freemem_lock); 5806 page_needfree(deficit); 5807 kmem_reap(); 5808 delay(hz); 5809 page_needfree(-(spgcnt_t)deficit); 5810 mutex_enter(&freemem_lock); 5811 } 5812 5813 if (adjust && (availrmem >= tune.t_minarmem + npages + epages)) { 5814 availrmem -= npages; 5815 ret = 1; 5816 } 5817 5818 mutex_exit(&freemem_lock); 5819 5820 return (ret); 5821 } 5822 5823 /* 5824 * Search the memory segments to locate the desired page. Within a 5825 * segment, pages increase linearly with one page structure per 5826 * physical page frame (size PAGESIZE). The search begins 5827 * with the segment that was accessed last, to take advantage of locality. 5828 * If the hint misses, we start from the beginning of the sorted memseg list 5829 */ 5830 5831 5832 /* 5833 * Some data structures for pfn to pp lookup. 5834 */ 5835 ulong_t mhash_per_slot; 5836 struct memseg *memseg_hash[N_MEM_SLOTS]; 5837 5838 page_t * 5839 page_numtopp_nolock(pfn_t pfnum) 5840 { 5841 struct memseg *seg; 5842 page_t *pp; 5843 vm_cpu_data_t *vc; 5844 5845 /* 5846 * We need to disable kernel preemption while referencing the 5847 * cpu_vm_data field in order to prevent us from being switched to 5848 * another cpu and trying to reference it after it has been freed. 5849 * This will keep us on cpu and prevent it from being removed while 5850 * we are still on it. 5851 * 5852 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg 5853 * which is being resued by DR who will flush those references 5854 * before modifying the reused memseg. See memseg_cpu_vm_flush(). 5855 */ 5856 kpreempt_disable(); 5857 vc = CPU->cpu_vm_data; 5858 ASSERT(vc != NULL); 5859 5860 MEMSEG_STAT_INCR(nsearch); 5861 5862 /* Try last winner first */ 5863 if (((seg = vc->vc_pnum_memseg) != NULL) && 5864 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5865 MEMSEG_STAT_INCR(nlastwon); 5866 pp = seg->pages + (pfnum - seg->pages_base); 5867 if (pp->p_pagenum == pfnum) { 5868 kpreempt_enable(); 5869 return ((page_t *)pp); 5870 } 5871 } 5872 5873 /* Else Try hash */ 5874 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5875 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5876 MEMSEG_STAT_INCR(nhashwon); 5877 vc->vc_pnum_memseg = seg; 5878 pp = seg->pages + (pfnum - seg->pages_base); 5879 if (pp->p_pagenum == pfnum) { 5880 kpreempt_enable(); 5881 return ((page_t *)pp); 5882 } 5883 } 5884 5885 /* Else Brute force */ 5886 for (seg = memsegs; seg != NULL; seg = seg->next) { 5887 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5888 vc->vc_pnum_memseg = seg; 5889 pp = seg->pages + (pfnum - seg->pages_base); 5890 if (pp->p_pagenum == pfnum) { 5891 kpreempt_enable(); 5892 return ((page_t *)pp); 5893 } 5894 } 5895 } 5896 vc->vc_pnum_memseg = NULL; 5897 kpreempt_enable(); 5898 MEMSEG_STAT_INCR(nnotfound); 5899 return ((page_t *)NULL); 5900 5901 } 5902 5903 struct memseg * 5904 page_numtomemseg_nolock(pfn_t pfnum) 5905 { 5906 struct memseg *seg; 5907 page_t *pp; 5908 5909 /* 5910 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg 5911 * which is being resued by DR who will flush those references 5912 * before modifying the reused memseg. See memseg_cpu_vm_flush(). 5913 */ 5914 kpreempt_disable(); 5915 /* Try hash */ 5916 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5917 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5918 pp = seg->pages + (pfnum - seg->pages_base); 5919 if (pp->p_pagenum == pfnum) { 5920 kpreempt_enable(); 5921 return (seg); 5922 } 5923 } 5924 5925 /* Else Brute force */ 5926 for (seg = memsegs; seg != NULL; seg = seg->next) { 5927 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5928 pp = seg->pages + (pfnum - seg->pages_base); 5929 if (pp->p_pagenum == pfnum) { 5930 kpreempt_enable(); 5931 return (seg); 5932 } 5933 } 5934 } 5935 kpreempt_enable(); 5936 return ((struct memseg *)NULL); 5937 } 5938 5939 /* 5940 * Given a page and a count return the page struct that is 5941 * n structs away from the current one in the global page 5942 * list. 5943 * 5944 * This function wraps to the first page upon 5945 * reaching the end of the memseg list. 5946 */ 5947 page_t * 5948 page_nextn(page_t *pp, ulong_t n) 5949 { 5950 struct memseg *seg; 5951 page_t *ppn; 5952 vm_cpu_data_t *vc; 5953 5954 /* 5955 * We need to disable kernel preemption while referencing the 5956 * cpu_vm_data field in order to prevent us from being switched to 5957 * another cpu and trying to reference it after it has been freed. 5958 * This will keep us on cpu and prevent it from being removed while 5959 * we are still on it. 5960 * 5961 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg 5962 * which is being resued by DR who will flush those references 5963 * before modifying the reused memseg. See memseg_cpu_vm_flush(). 5964 */ 5965 kpreempt_disable(); 5966 vc = (vm_cpu_data_t *)CPU->cpu_vm_data; 5967 5968 ASSERT(vc != NULL); 5969 5970 if (((seg = vc->vc_pnext_memseg) == NULL) || 5971 (seg->pages_base == seg->pages_end) || 5972 !(pp >= seg->pages && pp < seg->epages)) { 5973 5974 for (seg = memsegs; seg; seg = seg->next) { 5975 if (pp >= seg->pages && pp < seg->epages) 5976 break; 5977 } 5978 5979 if (seg == NULL) { 5980 /* Memory delete got in, return something valid. */ 5981 /* TODO: fix me. */ 5982 seg = memsegs; 5983 pp = seg->pages; 5984 } 5985 } 5986 5987 /* check for wraparound - possible if n is large */ 5988 while ((ppn = (pp + n)) >= seg->epages || ppn < pp) { 5989 n -= seg->epages - pp; 5990 seg = seg->next; 5991 if (seg == NULL) 5992 seg = memsegs; 5993 pp = seg->pages; 5994 } 5995 vc->vc_pnext_memseg = seg; 5996 kpreempt_enable(); 5997 return (ppn); 5998 } 5999 6000 /* 6001 * Initialize for a loop using page_next_scan_large(). 6002 */ 6003 page_t * 6004 page_next_scan_init(void **cookie) 6005 { 6006 ASSERT(cookie != NULL); 6007 *cookie = (void *)memsegs; 6008 return ((page_t *)memsegs->pages); 6009 } 6010 6011 /* 6012 * Return the next page in a scan of page_t's, assuming we want 6013 * to skip over sub-pages within larger page sizes. 6014 * 6015 * The cookie is used to keep track of the current memseg. 6016 */ 6017 page_t * 6018 page_next_scan_large( 6019 page_t *pp, 6020 ulong_t *n, 6021 void **cookie) 6022 { 6023 struct memseg *seg = (struct memseg *)*cookie; 6024 page_t *new_pp; 6025 ulong_t cnt; 6026 pfn_t pfn; 6027 6028 6029 /* 6030 * get the count of page_t's to skip based on the page size 6031 */ 6032 ASSERT(pp != NULL); 6033 if (pp->p_szc == 0) { 6034 cnt = 1; 6035 } else { 6036 pfn = page_pptonum(pp); 6037 cnt = page_get_pagecnt(pp->p_szc); 6038 cnt -= pfn & (cnt - 1); 6039 } 6040 *n += cnt; 6041 new_pp = pp + cnt; 6042 6043 /* 6044 * Catch if we went past the end of the current memory segment. If so, 6045 * just move to the next segment with pages. 6046 */ 6047 if (new_pp >= seg->epages || seg->pages_base == seg->pages_end) { 6048 do { 6049 seg = seg->next; 6050 if (seg == NULL) 6051 seg = memsegs; 6052 } while (seg->pages_base == seg->pages_end); 6053 new_pp = seg->pages; 6054 *cookie = (void *)seg; 6055 } 6056 6057 return (new_pp); 6058 } 6059 6060 6061 /* 6062 * Returns next page in list. Note: this function wraps 6063 * to the first page in the list upon reaching the end 6064 * of the list. Callers should be aware of this fact. 6065 */ 6066 6067 /* We should change this be a #define */ 6068 6069 page_t * 6070 page_next(page_t *pp) 6071 { 6072 return (page_nextn(pp, 1)); 6073 } 6074 6075 page_t * 6076 page_first() 6077 { 6078 return ((page_t *)memsegs->pages); 6079 } 6080 6081 6082 /* 6083 * This routine is called at boot with the initial memory configuration 6084 * and when memory is added or removed. 6085 */ 6086 void 6087 build_pfn_hash() 6088 { 6089 pfn_t cur; 6090 pgcnt_t index; 6091 struct memseg *pseg; 6092 int i; 6093 6094 /* 6095 * Clear memseg_hash array. 6096 * Since memory add/delete is designed to operate concurrently 6097 * with normal operation, the hash rebuild must be able to run 6098 * concurrently with page_numtopp_nolock(). To support this 6099 * functionality, assignments to memseg_hash array members must 6100 * be done atomically. 6101 * 6102 * NOTE: bzero() does not currently guarantee this for kernel 6103 * threads, and cannot be used here. 6104 */ 6105 for (i = 0; i < N_MEM_SLOTS; i++) 6106 memseg_hash[i] = NULL; 6107 6108 hat_kpm_mseghash_clear(N_MEM_SLOTS); 6109 6110 /* 6111 * Physmax is the last valid pfn. 6112 */ 6113 mhash_per_slot = (physmax + 1) >> MEM_HASH_SHIFT; 6114 for (pseg = memsegs; pseg != NULL; pseg = pseg->next) { 6115 index = MEMSEG_PFN_HASH(pseg->pages_base); 6116 cur = pseg->pages_base; 6117 do { 6118 if (index >= N_MEM_SLOTS) 6119 index = MEMSEG_PFN_HASH(cur); 6120 6121 if (memseg_hash[index] == NULL || 6122 memseg_hash[index]->pages_base > pseg->pages_base) { 6123 memseg_hash[index] = pseg; 6124 hat_kpm_mseghash_update(index, pseg); 6125 } 6126 cur += mhash_per_slot; 6127 index++; 6128 } while (cur < pseg->pages_end); 6129 } 6130 } 6131 6132 /* 6133 * Return the pagenum for the pp 6134 */ 6135 pfn_t 6136 page_pptonum(page_t *pp) 6137 { 6138 return (pp->p_pagenum); 6139 } 6140 6141 /* 6142 * interface to the referenced and modified etc bits 6143 * in the PSM part of the page struct 6144 * when no locking is desired. 6145 */ 6146 void 6147 page_set_props(page_t *pp, uint_t flags) 6148 { 6149 ASSERT((flags & ~(P_MOD | P_REF | P_RO)) == 0); 6150 pp->p_nrm |= (uchar_t)flags; 6151 } 6152 6153 void 6154 page_clr_all_props(page_t *pp) 6155 { 6156 pp->p_nrm = 0; 6157 } 6158 6159 /* 6160 * Clear p_lckcnt and p_cowcnt, adjusting freemem if required. 6161 */ 6162 int 6163 page_clear_lck_cow(page_t *pp, int adjust) 6164 { 6165 int f_amount; 6166 6167 ASSERT(PAGE_EXCL(pp)); 6168 6169 /* 6170 * The page_struct_lock need not be acquired here since 6171 * we require the caller hold the page exclusively locked. 6172 */ 6173 f_amount = 0; 6174 if (pp->p_lckcnt) { 6175 f_amount = 1; 6176 pp->p_lckcnt = 0; 6177 } 6178 if (pp->p_cowcnt) { 6179 f_amount += pp->p_cowcnt; 6180 pp->p_cowcnt = 0; 6181 } 6182 6183 if (adjust && f_amount) { 6184 mutex_enter(&freemem_lock); 6185 availrmem += f_amount; 6186 mutex_exit(&freemem_lock); 6187 } 6188 6189 return (f_amount); 6190 } 6191 6192 /* 6193 * The following functions is called from free_vp_pages() 6194 * for an inexact estimate of a newly free'd page... 6195 */ 6196 ulong_t 6197 page_share_cnt(page_t *pp) 6198 { 6199 return (hat_page_getshare(pp)); 6200 } 6201 6202 int 6203 page_isshared(page_t *pp) 6204 { 6205 return (hat_page_checkshare(pp, 1)); 6206 } 6207 6208 int 6209 page_isfree(page_t *pp) 6210 { 6211 return (PP_ISFREE(pp)); 6212 } 6213 6214 int 6215 page_isref(page_t *pp) 6216 { 6217 return (hat_page_getattr(pp, P_REF)); 6218 } 6219 6220 int 6221 page_ismod(page_t *pp) 6222 { 6223 return (hat_page_getattr(pp, P_MOD)); 6224 } 6225 6226 /* 6227 * The following code all currently relates to the page capture logic: 6228 * 6229 * This logic is used for cases where there is a desire to claim a certain 6230 * physical page in the system for the caller. As it may not be possible 6231 * to capture the page immediately, the p_toxic bits are used in the page 6232 * structure to indicate that someone wants to capture this page. When the 6233 * page gets unlocked, the toxic flag will be noted and an attempt to capture 6234 * the page will be made. If it is successful, the original callers callback 6235 * will be called with the page to do with it what they please. 6236 * 6237 * There is also an async thread which wakes up to attempt to capture 6238 * pages occasionally which have the capture bit set. All of the pages which 6239 * need to be captured asynchronously have been inserted into the 6240 * page_capture_hash and thus this thread walks that hash list. Items in the 6241 * hash have an expiration time so this thread handles that as well by removing 6242 * the item from the hash if it has expired. 6243 * 6244 * Some important things to note are: 6245 * - if the PR_CAPTURE bit is set on a page, then the page is in the 6246 * page_capture_hash. The page_capture_hash_head.pchh_mutex is needed 6247 * to set and clear this bit, and while the lock is held is the only time 6248 * you can add or remove an entry from the hash. 6249 * - the PR_CAPTURE bit can only be set and cleared while holding the 6250 * page_capture_hash_head.pchh_mutex 6251 * - the t_flag field of the thread struct is used with the T_CAPTURING 6252 * flag to prevent recursion while dealing with large pages. 6253 * - pages which need to be retired never expire on the page_capture_hash. 6254 */ 6255 6256 static void page_capture_thread(void); 6257 static kthread_t *pc_thread_id; 6258 kcondvar_t pc_cv; 6259 static kmutex_t pc_thread_mutex; 6260 static clock_t pc_thread_shortwait; 6261 static clock_t pc_thread_longwait; 6262 static int pc_thread_retry; 6263 6264 struct page_capture_callback pc_cb[PC_NUM_CALLBACKS]; 6265 6266 /* Note that this is a circular linked list */ 6267 typedef struct page_capture_hash_bucket { 6268 page_t *pp; 6269 uchar_t szc; 6270 uchar_t pri; 6271 uint_t flags; 6272 clock_t expires; /* lbolt at which this request expires. */ 6273 void *datap; /* Cached data passed in for callback */ 6274 struct page_capture_hash_bucket *next; 6275 struct page_capture_hash_bucket *prev; 6276 } page_capture_hash_bucket_t; 6277 6278 #define PC_PRI_HI 0 /* capture now */ 6279 #define PC_PRI_LO 1 /* capture later */ 6280 #define PC_NUM_PRI 2 6281 6282 #define PAGE_CAPTURE_PRIO(pp) (PP_ISRAF(pp) ? PC_PRI_LO : PC_PRI_HI) 6283 6284 6285 /* 6286 * Each hash bucket will have it's own mutex and two lists which are: 6287 * active (0): represents requests which have not been processed by 6288 * the page_capture async thread yet. 6289 * walked (1): represents requests which have been processed by the 6290 * page_capture async thread within it's given walk of this bucket. 6291 * 6292 * These are all needed so that we can synchronize all async page_capture 6293 * events. When the async thread moves to a new bucket, it will append the 6294 * walked list to the active list and walk each item one at a time, moving it 6295 * from the active list to the walked list. Thus if there is an async request 6296 * outstanding for a given page, it will always be in one of the two lists. 6297 * New requests will always be added to the active list. 6298 * If we were not able to capture a page before the request expired, we'd free 6299 * up the request structure which would indicate to page_capture that there is 6300 * no longer a need for the given page, and clear the PR_CAPTURE flag if 6301 * possible. 6302 */ 6303 typedef struct page_capture_hash_head { 6304 kmutex_t pchh_mutex; 6305 uint_t num_pages[PC_NUM_PRI]; 6306 page_capture_hash_bucket_t lists[2]; /* sentinel nodes */ 6307 } page_capture_hash_head_t; 6308 6309 #ifdef DEBUG 6310 #define NUM_PAGE_CAPTURE_BUCKETS 4 6311 #else 6312 #define NUM_PAGE_CAPTURE_BUCKETS 64 6313 #endif 6314 6315 page_capture_hash_head_t page_capture_hash[NUM_PAGE_CAPTURE_BUCKETS]; 6316 6317 /* for now use a very simple hash based upon the size of a page struct */ 6318 #define PAGE_CAPTURE_HASH(pp) \ 6319 ((int)(((uintptr_t)pp >> 7) & (NUM_PAGE_CAPTURE_BUCKETS - 1))) 6320 6321 extern pgcnt_t swapfs_minfree; 6322 6323 int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap); 6324 6325 /* 6326 * a callback function is required for page capture requests. 6327 */ 6328 void 6329 page_capture_register_callback(uint_t index, clock_t duration, 6330 int (*cb_func)(page_t *, void *, uint_t)) 6331 { 6332 ASSERT(pc_cb[index].cb_active == 0); 6333 ASSERT(cb_func != NULL); 6334 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6335 pc_cb[index].duration = duration; 6336 pc_cb[index].cb_func = cb_func; 6337 pc_cb[index].cb_active = 1; 6338 rw_exit(&pc_cb[index].cb_rwlock); 6339 } 6340 6341 void 6342 page_capture_unregister_callback(uint_t index) 6343 { 6344 int i, j; 6345 struct page_capture_hash_bucket *bp1; 6346 struct page_capture_hash_bucket *bp2; 6347 struct page_capture_hash_bucket *head = NULL; 6348 uint_t flags = (1 << index); 6349 6350 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6351 ASSERT(pc_cb[index].cb_active == 1); 6352 pc_cb[index].duration = 0; /* Paranoia */ 6353 pc_cb[index].cb_func = NULL; /* Paranoia */ 6354 pc_cb[index].cb_active = 0; 6355 rw_exit(&pc_cb[index].cb_rwlock); 6356 6357 /* 6358 * Just move all the entries to a private list which we can walk 6359 * through without the need to hold any locks. 6360 * No more requests can get added to the hash lists for this consumer 6361 * as the cb_active field for the callback has been cleared. 6362 */ 6363 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 6364 mutex_enter(&page_capture_hash[i].pchh_mutex); 6365 for (j = 0; j < 2; j++) { 6366 bp1 = page_capture_hash[i].lists[j].next; 6367 /* walk through all but first (sentinel) element */ 6368 while (bp1 != &page_capture_hash[i].lists[j]) { 6369 bp2 = bp1; 6370 if (bp2->flags & flags) { 6371 bp1 = bp2->next; 6372 bp1->prev = bp2->prev; 6373 bp2->prev->next = bp1; 6374 bp2->next = head; 6375 head = bp2; 6376 /* 6377 * Clear the PR_CAPTURE bit as we 6378 * hold appropriate locks here. 6379 */ 6380 page_clrtoxic(head->pp, PR_CAPTURE); 6381 page_capture_hash[i]. 6382 num_pages[bp2->pri]--; 6383 continue; 6384 } 6385 bp1 = bp1->next; 6386 } 6387 } 6388 mutex_exit(&page_capture_hash[i].pchh_mutex); 6389 } 6390 6391 while (head != NULL) { 6392 bp1 = head; 6393 head = head->next; 6394 kmem_free(bp1, sizeof (*bp1)); 6395 } 6396 } 6397 6398 6399 /* 6400 * Find pp in the active list and move it to the walked list if it 6401 * exists. 6402 * Note that most often pp should be at the front of the active list 6403 * as it is currently used and thus there is no other sort of optimization 6404 * being done here as this is a linked list data structure. 6405 * Returns 1 on successful move or 0 if page could not be found. 6406 */ 6407 static int 6408 page_capture_move_to_walked(page_t *pp) 6409 { 6410 page_capture_hash_bucket_t *bp; 6411 int index; 6412 6413 index = PAGE_CAPTURE_HASH(pp); 6414 6415 mutex_enter(&page_capture_hash[index].pchh_mutex); 6416 bp = page_capture_hash[index].lists[0].next; 6417 while (bp != &page_capture_hash[index].lists[0]) { 6418 if (bp->pp == pp) { 6419 /* Remove from old list */ 6420 bp->next->prev = bp->prev; 6421 bp->prev->next = bp->next; 6422 6423 /* Add to new list */ 6424 bp->next = page_capture_hash[index].lists[1].next; 6425 bp->prev = &page_capture_hash[index].lists[1]; 6426 page_capture_hash[index].lists[1].next = bp; 6427 bp->next->prev = bp; 6428 6429 /* 6430 * There is a small probability of page on a free 6431 * list being retired while being allocated 6432 * and before P_RAF is set on it. The page may 6433 * end up marked as high priority request instead 6434 * of low priority request. 6435 * If P_RAF page is not marked as low priority request 6436 * change it to low priority request. 6437 */ 6438 page_capture_hash[index].num_pages[bp->pri]--; 6439 bp->pri = PAGE_CAPTURE_PRIO(pp); 6440 page_capture_hash[index].num_pages[bp->pri]++; 6441 mutex_exit(&page_capture_hash[index].pchh_mutex); 6442 return (1); 6443 } 6444 bp = bp->next; 6445 } 6446 mutex_exit(&page_capture_hash[index].pchh_mutex); 6447 return (0); 6448 } 6449 6450 /* 6451 * Add a new entry to the page capture hash. The only case where a new 6452 * entry is not added is when the page capture consumer is no longer registered. 6453 * In this case, we'll silently not add the page to the hash. We know that 6454 * page retire will always be registered for the case where we are currently 6455 * unretiring a page and thus there are no conflicts. 6456 */ 6457 static void 6458 page_capture_add_hash(page_t *pp, uint_t szc, uint_t flags, void *datap) 6459 { 6460 page_capture_hash_bucket_t *bp1; 6461 page_capture_hash_bucket_t *bp2; 6462 int index; 6463 int cb_index; 6464 int i; 6465 uchar_t pri; 6466 #ifdef DEBUG 6467 page_capture_hash_bucket_t *tp1; 6468 int l; 6469 #endif 6470 6471 ASSERT(!(flags & CAPTURE_ASYNC)); 6472 6473 bp1 = kmem_alloc(sizeof (struct page_capture_hash_bucket), KM_SLEEP); 6474 6475 bp1->pp = pp; 6476 bp1->szc = szc; 6477 bp1->flags = flags; 6478 bp1->datap = datap; 6479 6480 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6481 if ((flags >> cb_index) & 1) { 6482 break; 6483 } 6484 } 6485 6486 ASSERT(cb_index != PC_NUM_CALLBACKS); 6487 6488 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6489 if (pc_cb[cb_index].cb_active) { 6490 if (pc_cb[cb_index].duration == -1) { 6491 bp1->expires = (clock_t)-1; 6492 } else { 6493 bp1->expires = ddi_get_lbolt() + 6494 pc_cb[cb_index].duration; 6495 } 6496 } else { 6497 /* There's no callback registered so don't add to the hash */ 6498 rw_exit(&pc_cb[cb_index].cb_rwlock); 6499 kmem_free(bp1, sizeof (*bp1)); 6500 return; 6501 } 6502 6503 index = PAGE_CAPTURE_HASH(pp); 6504 6505 /* 6506 * Only allow capture flag to be modified under this mutex. 6507 * Prevents multiple entries for same page getting added. 6508 */ 6509 mutex_enter(&page_capture_hash[index].pchh_mutex); 6510 6511 /* 6512 * if not already on the hash, set capture bit and add to the hash 6513 */ 6514 if (!(pp->p_toxic & PR_CAPTURE)) { 6515 #ifdef DEBUG 6516 /* Check for duplicate entries */ 6517 for (l = 0; l < 2; l++) { 6518 tp1 = page_capture_hash[index].lists[l].next; 6519 while (tp1 != &page_capture_hash[index].lists[l]) { 6520 if (tp1->pp == pp) { 6521 panic("page pp 0x%p already on hash " 6522 "at 0x%p\n", 6523 (void *)pp, (void *)tp1); 6524 } 6525 tp1 = tp1->next; 6526 } 6527 } 6528 6529 #endif 6530 page_settoxic(pp, PR_CAPTURE); 6531 pri = PAGE_CAPTURE_PRIO(pp); 6532 bp1->pri = pri; 6533 bp1->next = page_capture_hash[index].lists[0].next; 6534 bp1->prev = &page_capture_hash[index].lists[0]; 6535 bp1->next->prev = bp1; 6536 page_capture_hash[index].lists[0].next = bp1; 6537 page_capture_hash[index].num_pages[pri]++; 6538 if (flags & CAPTURE_RETIRE) { 6539 page_retire_incr_pend_count(datap); 6540 } 6541 mutex_exit(&page_capture_hash[index].pchh_mutex); 6542 rw_exit(&pc_cb[cb_index].cb_rwlock); 6543 cv_signal(&pc_cv); 6544 return; 6545 } 6546 6547 /* 6548 * A page retire request will replace any other request. 6549 * A second physmem request which is for a different process than 6550 * the currently registered one will be dropped as there is 6551 * no way to hold the private data for both calls. 6552 * In the future, once there are more callers, this will have to 6553 * be worked out better as there needs to be private storage for 6554 * at least each type of caller (maybe have datap be an array of 6555 * *void's so that we can index based upon callers index). 6556 */ 6557 6558 /* walk hash list to update expire time */ 6559 for (i = 0; i < 2; i++) { 6560 bp2 = page_capture_hash[index].lists[i].next; 6561 while (bp2 != &page_capture_hash[index].lists[i]) { 6562 if (bp2->pp == pp) { 6563 if (flags & CAPTURE_RETIRE) { 6564 if (!(bp2->flags & CAPTURE_RETIRE)) { 6565 page_retire_incr_pend_count( 6566 datap); 6567 bp2->flags = flags; 6568 bp2->expires = bp1->expires; 6569 bp2->datap = datap; 6570 } 6571 } else { 6572 ASSERT(flags & CAPTURE_PHYSMEM); 6573 if (!(bp2->flags & CAPTURE_RETIRE) && 6574 (datap == bp2->datap)) { 6575 bp2->expires = bp1->expires; 6576 } 6577 } 6578 mutex_exit(&page_capture_hash[index]. 6579 pchh_mutex); 6580 rw_exit(&pc_cb[cb_index].cb_rwlock); 6581 kmem_free(bp1, sizeof (*bp1)); 6582 return; 6583 } 6584 bp2 = bp2->next; 6585 } 6586 } 6587 6588 /* 6589 * the PR_CAPTURE flag is protected by the page_capture_hash mutexes 6590 * and thus it either has to be set or not set and can't change 6591 * while holding the mutex above. 6592 */ 6593 panic("page_capture_add_hash, PR_CAPTURE flag set on pp %p\n", 6594 (void *)pp); 6595 } 6596 6597 /* 6598 * We have a page in our hands, lets try and make it ours by turning 6599 * it into a clean page like it had just come off the freelists. 6600 * 6601 * Returns 0 on success, with the page still EXCL locked. 6602 * On failure, the page will be unlocked, and returns EAGAIN 6603 */ 6604 static int 6605 page_capture_clean_page(page_t *pp) 6606 { 6607 page_t *newpp; 6608 int skip_unlock = 0; 6609 spgcnt_t count; 6610 page_t *tpp; 6611 int ret = 0; 6612 int extra; 6613 6614 ASSERT(PAGE_EXCL(pp)); 6615 ASSERT(!PP_RETIRED(pp)); 6616 ASSERT(curthread->t_flag & T_CAPTURING); 6617 6618 if (PP_ISFREE(pp)) { 6619 if (!page_reclaim(pp, NULL)) { 6620 skip_unlock = 1; 6621 ret = EAGAIN; 6622 goto cleanup; 6623 } 6624 ASSERT(pp->p_szc == 0); 6625 if (pp->p_vnode != NULL) { 6626 /* 6627 * Since this page came from the 6628 * cachelist, we must destroy the 6629 * old vnode association. 6630 */ 6631 page_hashout(pp, NULL); 6632 } 6633 goto cleanup; 6634 } 6635 6636 /* 6637 * If we know page_relocate will fail, skip it 6638 * It could still fail due to a UE on another page but we 6639 * can't do anything about that. 6640 */ 6641 if (pp->p_toxic & PR_UE) { 6642 goto skip_relocate; 6643 } 6644 6645 /* 6646 * It's possible that pages can not have a vnode as fsflush comes 6647 * through and cleans up these pages. It's ugly but that's how it is. 6648 */ 6649 if (pp->p_vnode == NULL) { 6650 goto skip_relocate; 6651 } 6652 6653 /* 6654 * Page was not free, so lets try to relocate it. 6655 * page_relocate only works with root pages, so if this is not a root 6656 * page, we need to demote it to try and relocate it. 6657 * Unfortunately this is the best we can do right now. 6658 */ 6659 newpp = NULL; 6660 if ((pp->p_szc > 0) && (pp != PP_PAGEROOT(pp))) { 6661 if (page_try_demote_pages(pp) == 0) { 6662 ret = EAGAIN; 6663 goto cleanup; 6664 } 6665 } 6666 ret = page_relocate(&pp, &newpp, 1, 0, &count, NULL); 6667 if (ret == 0) { 6668 page_t *npp; 6669 /* unlock the new page(s) */ 6670 while (count-- > 0) { 6671 ASSERT(newpp != NULL); 6672 npp = newpp; 6673 page_sub(&newpp, npp); 6674 page_unlock(npp); 6675 } 6676 ASSERT(newpp == NULL); 6677 /* 6678 * Check to see if the page we have is too large. 6679 * If so, demote it freeing up the extra pages. 6680 */ 6681 if (pp->p_szc > 0) { 6682 /* For now demote extra pages to szc == 0 */ 6683 extra = page_get_pagecnt(pp->p_szc) - 1; 6684 while (extra > 0) { 6685 tpp = pp->p_next; 6686 page_sub(&pp, tpp); 6687 tpp->p_szc = 0; 6688 page_free(tpp, 1); 6689 extra--; 6690 } 6691 /* Make sure to set our page to szc 0 as well */ 6692 ASSERT(pp->p_next == pp && pp->p_prev == pp); 6693 pp->p_szc = 0; 6694 } 6695 goto cleanup; 6696 } else if (ret == EIO) { 6697 ret = EAGAIN; 6698 goto cleanup; 6699 } else { 6700 /* 6701 * Need to reset return type as we failed to relocate the page 6702 * but that does not mean that some of the next steps will not 6703 * work. 6704 */ 6705 ret = 0; 6706 } 6707 6708 skip_relocate: 6709 6710 if (pp->p_szc > 0) { 6711 if (page_try_demote_pages(pp) == 0) { 6712 ret = EAGAIN; 6713 goto cleanup; 6714 } 6715 } 6716 6717 ASSERT(pp->p_szc == 0); 6718 6719 if (hat_ismod(pp)) { 6720 ret = EAGAIN; 6721 goto cleanup; 6722 } 6723 if (PP_ISKAS(pp)) { 6724 ret = EAGAIN; 6725 goto cleanup; 6726 } 6727 if (pp->p_lckcnt || pp->p_cowcnt) { 6728 ret = EAGAIN; 6729 goto cleanup; 6730 } 6731 6732 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 6733 ASSERT(!hat_page_is_mapped(pp)); 6734 6735 if (hat_ismod(pp)) { 6736 /* 6737 * This is a semi-odd case as the page is now modified but not 6738 * mapped as we just unloaded the mappings above. 6739 */ 6740 ret = EAGAIN; 6741 goto cleanup; 6742 } 6743 if (pp->p_vnode != NULL) { 6744 page_hashout(pp, NULL); 6745 } 6746 6747 /* 6748 * At this point, the page should be in a clean state and 6749 * we can do whatever we want with it. 6750 */ 6751 6752 cleanup: 6753 if (ret != 0) { 6754 if (!skip_unlock) { 6755 page_unlock(pp); 6756 } 6757 } else { 6758 ASSERT(pp->p_szc == 0); 6759 ASSERT(PAGE_EXCL(pp)); 6760 6761 pp->p_next = pp; 6762 pp->p_prev = pp; 6763 } 6764 return (ret); 6765 } 6766 6767 /* 6768 * Various callers of page_trycapture() can have different restrictions upon 6769 * what memory they have access to. 6770 * Returns 0 on success, with the following error codes on failure: 6771 * EPERM - The requested page is long term locked, and thus repeated 6772 * requests to capture this page will likely fail. 6773 * ENOMEM - There was not enough free memory in the system to safely 6774 * map the requested page. 6775 * ENOENT - The requested page was inside the kernel cage, and the 6776 * PHYSMEM_CAGE flag was not set. 6777 */ 6778 int 6779 page_capture_pre_checks(page_t *pp, uint_t flags) 6780 { 6781 ASSERT(pp != NULL); 6782 6783 #if defined(__sparc) 6784 if (pp->p_vnode == &promvp) { 6785 return (EPERM); 6786 } 6787 6788 if (PP_ISNORELOC(pp) && !(flags & CAPTURE_GET_CAGE) && 6789 (flags & CAPTURE_PHYSMEM)) { 6790 return (ENOENT); 6791 } 6792 6793 if (PP_ISNORELOCKERNEL(pp)) { 6794 return (EPERM); 6795 } 6796 #else 6797 if (PP_ISKAS(pp)) { 6798 return (EPERM); 6799 } 6800 #endif /* __sparc */ 6801 6802 /* only physmem currently has the restrictions checked below */ 6803 if (!(flags & CAPTURE_PHYSMEM)) { 6804 return (0); 6805 } 6806 6807 if (availrmem < swapfs_minfree) { 6808 /* 6809 * We won't try to capture this page as we are 6810 * running low on memory. 6811 */ 6812 return (ENOMEM); 6813 } 6814 return (0); 6815 } 6816 6817 /* 6818 * Once we have a page in our mits, go ahead and complete the capture 6819 * operation. 6820 * Returns 1 on failure where page is no longer needed 6821 * Returns 0 on success 6822 * Returns -1 if there was a transient failure. 6823 * Failure cases must release the SE_EXCL lock on pp (usually via page_free). 6824 */ 6825 int 6826 page_capture_take_action(page_t *pp, uint_t flags, void *datap) 6827 { 6828 int cb_index; 6829 int ret = 0; 6830 page_capture_hash_bucket_t *bp1; 6831 page_capture_hash_bucket_t *bp2; 6832 int index; 6833 int found = 0; 6834 int i; 6835 6836 ASSERT(PAGE_EXCL(pp)); 6837 ASSERT(curthread->t_flag & T_CAPTURING); 6838 6839 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6840 if ((flags >> cb_index) & 1) { 6841 break; 6842 } 6843 } 6844 ASSERT(cb_index < PC_NUM_CALLBACKS); 6845 6846 /* 6847 * Remove the entry from the page_capture hash, but don't free it yet 6848 * as we may need to put it back. 6849 * Since we own the page at this point in time, we should find it 6850 * in the hash if this is an ASYNC call. If we don't it's likely 6851 * that the page_capture_async() thread decided that this request 6852 * had expired, in which case we just continue on. 6853 */ 6854 if (flags & CAPTURE_ASYNC) { 6855 6856 index = PAGE_CAPTURE_HASH(pp); 6857 6858 mutex_enter(&page_capture_hash[index].pchh_mutex); 6859 for (i = 0; i < 2 && !found; i++) { 6860 bp1 = page_capture_hash[index].lists[i].next; 6861 while (bp1 != &page_capture_hash[index].lists[i]) { 6862 if (bp1->pp == pp) { 6863 bp1->next->prev = bp1->prev; 6864 bp1->prev->next = bp1->next; 6865 page_capture_hash[index]. 6866 num_pages[bp1->pri]--; 6867 page_clrtoxic(pp, PR_CAPTURE); 6868 found = 1; 6869 break; 6870 } 6871 bp1 = bp1->next; 6872 } 6873 } 6874 mutex_exit(&page_capture_hash[index].pchh_mutex); 6875 } 6876 6877 /* Synchronize with the unregister func. */ 6878 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6879 if (!pc_cb[cb_index].cb_active) { 6880 page_free(pp, 1); 6881 rw_exit(&pc_cb[cb_index].cb_rwlock); 6882 if (found) { 6883 kmem_free(bp1, sizeof (*bp1)); 6884 } 6885 return (1); 6886 } 6887 6888 /* 6889 * We need to remove the entry from the page capture hash and turn off 6890 * the PR_CAPTURE bit before calling the callback. We'll need to cache 6891 * the entry here, and then based upon the return value, cleanup 6892 * appropriately or re-add it to the hash, making sure that someone else 6893 * hasn't already done so. 6894 * It should be rare for the callback to fail and thus it's ok for 6895 * the failure path to be a bit complicated as the success path is 6896 * cleaner and the locking rules are easier to follow. 6897 */ 6898 6899 ret = pc_cb[cb_index].cb_func(pp, datap, flags); 6900 6901 rw_exit(&pc_cb[cb_index].cb_rwlock); 6902 6903 /* 6904 * If this was an ASYNC request, we need to cleanup the hash if the 6905 * callback was successful or if the request was no longer valid. 6906 * For non-ASYNC requests, we return failure to map and the caller 6907 * will take care of adding the request to the hash. 6908 * Note also that the callback itself is responsible for the page 6909 * at this point in time in terms of locking ... The most common 6910 * case for the failure path should just be a page_free. 6911 */ 6912 if (ret >= 0) { 6913 if (found) { 6914 if (bp1->flags & CAPTURE_RETIRE) { 6915 page_retire_decr_pend_count(datap); 6916 } 6917 kmem_free(bp1, sizeof (*bp1)); 6918 } 6919 return (ret); 6920 } 6921 if (!found) { 6922 return (ret); 6923 } 6924 6925 ASSERT(flags & CAPTURE_ASYNC); 6926 6927 /* 6928 * Check for expiration time first as we can just free it up if it's 6929 * expired. 6930 */ 6931 if (ddi_get_lbolt() > bp1->expires && bp1->expires != -1) { 6932 kmem_free(bp1, sizeof (*bp1)); 6933 return (ret); 6934 } 6935 6936 /* 6937 * The callback failed and there used to be an entry in the hash for 6938 * this page, so we need to add it back to the hash. 6939 */ 6940 mutex_enter(&page_capture_hash[index].pchh_mutex); 6941 if (!(pp->p_toxic & PR_CAPTURE)) { 6942 /* just add bp1 back to head of walked list */ 6943 page_settoxic(pp, PR_CAPTURE); 6944 bp1->next = page_capture_hash[index].lists[1].next; 6945 bp1->prev = &page_capture_hash[index].lists[1]; 6946 bp1->next->prev = bp1; 6947 bp1->pri = PAGE_CAPTURE_PRIO(pp); 6948 page_capture_hash[index].lists[1].next = bp1; 6949 page_capture_hash[index].num_pages[bp1->pri]++; 6950 mutex_exit(&page_capture_hash[index].pchh_mutex); 6951 return (ret); 6952 } 6953 6954 /* 6955 * Otherwise there was a new capture request added to list 6956 * Need to make sure that our original data is represented if 6957 * appropriate. 6958 */ 6959 for (i = 0; i < 2; i++) { 6960 bp2 = page_capture_hash[index].lists[i].next; 6961 while (bp2 != &page_capture_hash[index].lists[i]) { 6962 if (bp2->pp == pp) { 6963 if (bp1->flags & CAPTURE_RETIRE) { 6964 if (!(bp2->flags & CAPTURE_RETIRE)) { 6965 bp2->szc = bp1->szc; 6966 bp2->flags = bp1->flags; 6967 bp2->expires = bp1->expires; 6968 bp2->datap = bp1->datap; 6969 } 6970 } else { 6971 ASSERT(bp1->flags & CAPTURE_PHYSMEM); 6972 if (!(bp2->flags & CAPTURE_RETIRE)) { 6973 bp2->szc = bp1->szc; 6974 bp2->flags = bp1->flags; 6975 bp2->expires = bp1->expires; 6976 bp2->datap = bp1->datap; 6977 } 6978 } 6979 page_capture_hash[index].num_pages[bp2->pri]--; 6980 bp2->pri = PAGE_CAPTURE_PRIO(pp); 6981 page_capture_hash[index].num_pages[bp2->pri]++; 6982 mutex_exit(&page_capture_hash[index]. 6983 pchh_mutex); 6984 kmem_free(bp1, sizeof (*bp1)); 6985 return (ret); 6986 } 6987 bp2 = bp2->next; 6988 } 6989 } 6990 panic("PR_CAPTURE set but not on hash for pp 0x%p\n", (void *)pp); 6991 /*NOTREACHED*/ 6992 } 6993 6994 /* 6995 * Try to capture the given page for the caller specified in the flags 6996 * parameter. The page will either be captured and handed over to the 6997 * appropriate callback, or will be queued up in the page capture hash 6998 * to be captured asynchronously. 6999 * If the current request is due to an async capture, the page must be 7000 * exclusively locked before calling this function. 7001 * Currently szc must be 0 but in the future this should be expandable to 7002 * other page sizes. 7003 * Returns 0 on success, with the following error codes on failure: 7004 * EPERM - The requested page is long term locked, and thus repeated 7005 * requests to capture this page will likely fail. 7006 * ENOMEM - There was not enough free memory in the system to safely 7007 * map the requested page. 7008 * ENOENT - The requested page was inside the kernel cage, and the 7009 * CAPTURE_GET_CAGE flag was not set. 7010 * EAGAIN - The requested page could not be capturead at this point in 7011 * time but future requests will likely work. 7012 * EBUSY - The requested page is retired and the CAPTURE_GET_RETIRED flag 7013 * was not set. 7014 */ 7015 int 7016 page_itrycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 7017 { 7018 int ret; 7019 int cb_index; 7020 7021 if (flags & CAPTURE_ASYNC) { 7022 ASSERT(PAGE_EXCL(pp)); 7023 goto async; 7024 } 7025 7026 /* Make sure there's enough availrmem ... */ 7027 ret = page_capture_pre_checks(pp, flags); 7028 if (ret != 0) { 7029 return (ret); 7030 } 7031 7032 if (!page_trylock(pp, SE_EXCL)) { 7033 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 7034 if ((flags >> cb_index) & 1) { 7035 break; 7036 } 7037 } 7038 ASSERT(cb_index < PC_NUM_CALLBACKS); 7039 ret = EAGAIN; 7040 /* Special case for retired pages */ 7041 if (PP_RETIRED(pp)) { 7042 if (flags & CAPTURE_GET_RETIRED) { 7043 if (!page_unretire_pp(pp, PR_UNR_TEMP)) { 7044 /* 7045 * Need to set capture bit and add to 7046 * hash so that the page will be 7047 * retired when freed. 7048 */ 7049 page_capture_add_hash(pp, szc, 7050 CAPTURE_RETIRE, NULL); 7051 ret = 0; 7052 goto own_page; 7053 } 7054 } else { 7055 return (EBUSY); 7056 } 7057 } 7058 page_capture_add_hash(pp, szc, flags, datap); 7059 return (ret); 7060 } 7061 7062 async: 7063 ASSERT(PAGE_EXCL(pp)); 7064 7065 /* Need to check for physmem async requests that availrmem is sane */ 7066 if ((flags & (CAPTURE_ASYNC | CAPTURE_PHYSMEM)) == 7067 (CAPTURE_ASYNC | CAPTURE_PHYSMEM) && 7068 (availrmem < swapfs_minfree)) { 7069 page_unlock(pp); 7070 return (ENOMEM); 7071 } 7072 7073 ret = page_capture_clean_page(pp); 7074 7075 if (ret != 0) { 7076 /* We failed to get the page, so lets add it to the hash */ 7077 if (!(flags & CAPTURE_ASYNC)) { 7078 page_capture_add_hash(pp, szc, flags, datap); 7079 } 7080 return (ret); 7081 } 7082 7083 own_page: 7084 ASSERT(PAGE_EXCL(pp)); 7085 ASSERT(pp->p_szc == 0); 7086 7087 /* Call the callback */ 7088 ret = page_capture_take_action(pp, flags, datap); 7089 7090 if (ret == 0) { 7091 return (0); 7092 } 7093 7094 /* 7095 * Note that in the failure cases from page_capture_take_action, the 7096 * EXCL lock will have already been dropped. 7097 */ 7098 if ((ret == -1) && (!(flags & CAPTURE_ASYNC))) { 7099 page_capture_add_hash(pp, szc, flags, datap); 7100 } 7101 return (EAGAIN); 7102 } 7103 7104 int 7105 page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 7106 { 7107 int ret; 7108 7109 curthread->t_flag |= T_CAPTURING; 7110 ret = page_itrycapture(pp, szc, flags, datap); 7111 curthread->t_flag &= ~T_CAPTURING; /* xor works as we know its set */ 7112 return (ret); 7113 } 7114 7115 /* 7116 * When unlocking a page which has the PR_CAPTURE bit set, this routine 7117 * gets called to try and capture the page. 7118 */ 7119 void 7120 page_unlock_capture(page_t *pp) 7121 { 7122 page_capture_hash_bucket_t *bp; 7123 int index; 7124 int i; 7125 uint_t szc; 7126 uint_t flags = 0; 7127 void *datap; 7128 kmutex_t *mp; 7129 extern vnode_t retired_pages; 7130 7131 /* 7132 * We need to protect against a possible deadlock here where we own 7133 * the vnode page hash mutex and want to acquire it again as there 7134 * are locations in the code, where we unlock a page while holding 7135 * the mutex which can lead to the page being captured and eventually 7136 * end up here. As we may be hashing out the old page and hashing into 7137 * the retire vnode, we need to make sure we don't own them. 7138 * Other callbacks who do hash operations also need to make sure that 7139 * before they hashin to a vnode that they do not currently own the 7140 * vphm mutex otherwise there will be a panic. 7141 */ 7142 if (mutex_owned(page_vnode_mutex(&retired_pages))) { 7143 page_unlock_nocapture(pp); 7144 return; 7145 } 7146 if (pp->p_vnode != NULL && mutex_owned(page_vnode_mutex(pp->p_vnode))) { 7147 page_unlock_nocapture(pp); 7148 return; 7149 } 7150 7151 index = PAGE_CAPTURE_HASH(pp); 7152 7153 mp = &page_capture_hash[index].pchh_mutex; 7154 mutex_enter(mp); 7155 for (i = 0; i < 2; i++) { 7156 bp = page_capture_hash[index].lists[i].next; 7157 while (bp != &page_capture_hash[index].lists[i]) { 7158 if (bp->pp == pp) { 7159 szc = bp->szc; 7160 flags = bp->flags | CAPTURE_ASYNC; 7161 datap = bp->datap; 7162 mutex_exit(mp); 7163 (void) page_trycapture(pp, szc, flags, datap); 7164 return; 7165 } 7166 bp = bp->next; 7167 } 7168 } 7169 7170 /* Failed to find page in hash so clear flags and unlock it. */ 7171 page_clrtoxic(pp, PR_CAPTURE); 7172 page_unlock(pp); 7173 7174 mutex_exit(mp); 7175 } 7176 7177 void 7178 page_capture_init() 7179 { 7180 int i; 7181 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7182 page_capture_hash[i].lists[0].next = 7183 &page_capture_hash[i].lists[0]; 7184 page_capture_hash[i].lists[0].prev = 7185 &page_capture_hash[i].lists[0]; 7186 page_capture_hash[i].lists[1].next = 7187 &page_capture_hash[i].lists[1]; 7188 page_capture_hash[i].lists[1].prev = 7189 &page_capture_hash[i].lists[1]; 7190 } 7191 7192 pc_thread_shortwait = 23 * hz; 7193 pc_thread_longwait = 1201 * hz; 7194 pc_thread_retry = 3; 7195 mutex_init(&pc_thread_mutex, NULL, MUTEX_DEFAULT, NULL); 7196 cv_init(&pc_cv, NULL, CV_DEFAULT, NULL); 7197 pc_thread_id = thread_create(NULL, 0, page_capture_thread, NULL, 0, &p0, 7198 TS_RUN, minclsyspri); 7199 } 7200 7201 /* 7202 * It is necessary to scrub any failing pages prior to reboot in order to 7203 * prevent a latent error trap from occurring on the next boot. 7204 */ 7205 void 7206 page_retire_mdboot() 7207 { 7208 page_t *pp; 7209 int i, j; 7210 page_capture_hash_bucket_t *bp; 7211 uchar_t pri; 7212 7213 /* walk lists looking for pages to scrub */ 7214 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7215 for (pri = 0; pri < PC_NUM_PRI; pri++) { 7216 if (page_capture_hash[i].num_pages[pri] != 0) { 7217 break; 7218 } 7219 } 7220 if (pri == PC_NUM_PRI) 7221 continue; 7222 7223 mutex_enter(&page_capture_hash[i].pchh_mutex); 7224 7225 for (j = 0; j < 2; j++) { 7226 bp = page_capture_hash[i].lists[j].next; 7227 while (bp != &page_capture_hash[i].lists[j]) { 7228 pp = bp->pp; 7229 if (PP_TOXIC(pp)) { 7230 if (page_trylock(pp, SE_EXCL)) { 7231 PP_CLRFREE(pp); 7232 pagescrub(pp, 0, PAGESIZE); 7233 page_unlock(pp); 7234 } 7235 } 7236 bp = bp->next; 7237 } 7238 } 7239 mutex_exit(&page_capture_hash[i].pchh_mutex); 7240 } 7241 } 7242 7243 /* 7244 * Walk the page_capture_hash trying to capture pages and also cleanup old 7245 * entries which have expired. 7246 */ 7247 void 7248 page_capture_async() 7249 { 7250 page_t *pp; 7251 int i; 7252 int ret; 7253 page_capture_hash_bucket_t *bp1, *bp2; 7254 uint_t szc; 7255 uint_t flags; 7256 void *datap; 7257 uchar_t pri; 7258 7259 /* If there are outstanding pages to be captured, get to work */ 7260 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7261 for (pri = 0; pri < PC_NUM_PRI; pri++) { 7262 if (page_capture_hash[i].num_pages[pri] != 0) 7263 break; 7264 } 7265 if (pri == PC_NUM_PRI) 7266 continue; 7267 7268 /* Append list 1 to list 0 and then walk through list 0 */ 7269 mutex_enter(&page_capture_hash[i].pchh_mutex); 7270 bp1 = &page_capture_hash[i].lists[1]; 7271 bp2 = bp1->next; 7272 if (bp1 != bp2) { 7273 bp1->prev->next = page_capture_hash[i].lists[0].next; 7274 bp2->prev = &page_capture_hash[i].lists[0]; 7275 page_capture_hash[i].lists[0].next->prev = bp1->prev; 7276 page_capture_hash[i].lists[0].next = bp2; 7277 bp1->next = bp1; 7278 bp1->prev = bp1; 7279 } 7280 7281 /* list[1] will be empty now */ 7282 7283 bp1 = page_capture_hash[i].lists[0].next; 7284 while (bp1 != &page_capture_hash[i].lists[0]) { 7285 /* Check expiration time */ 7286 if ((ddi_get_lbolt() > bp1->expires && 7287 bp1->expires != -1) || 7288 page_deleted(bp1->pp)) { 7289 page_capture_hash[i].lists[0].next = bp1->next; 7290 bp1->next->prev = 7291 &page_capture_hash[i].lists[0]; 7292 page_capture_hash[i].num_pages[bp1->pri]--; 7293 7294 /* 7295 * We can safely remove the PR_CAPTURE bit 7296 * without holding the EXCL lock on the page 7297 * as the PR_CAPTURE bit requres that the 7298 * page_capture_hash[].pchh_mutex be held 7299 * to modify it. 7300 */ 7301 page_clrtoxic(bp1->pp, PR_CAPTURE); 7302 mutex_exit(&page_capture_hash[i].pchh_mutex); 7303 kmem_free(bp1, sizeof (*bp1)); 7304 mutex_enter(&page_capture_hash[i].pchh_mutex); 7305 bp1 = page_capture_hash[i].lists[0].next; 7306 continue; 7307 } 7308 pp = bp1->pp; 7309 szc = bp1->szc; 7310 flags = bp1->flags; 7311 datap = bp1->datap; 7312 mutex_exit(&page_capture_hash[i].pchh_mutex); 7313 if (page_trylock(pp, SE_EXCL)) { 7314 ret = page_trycapture(pp, szc, 7315 flags | CAPTURE_ASYNC, datap); 7316 } else { 7317 ret = 1; /* move to walked hash */ 7318 } 7319 7320 if (ret != 0) { 7321 /* Move to walked hash */ 7322 (void) page_capture_move_to_walked(pp); 7323 } 7324 mutex_enter(&page_capture_hash[i].pchh_mutex); 7325 bp1 = page_capture_hash[i].lists[0].next; 7326 } 7327 7328 mutex_exit(&page_capture_hash[i].pchh_mutex); 7329 } 7330 } 7331 7332 /* 7333 * This function is called by the page_capture_thread, and is needed in 7334 * in order to initiate aio cleanup, so that pages used in aio 7335 * will be unlocked and subsequently retired by page_capture_thread. 7336 */ 7337 static int 7338 do_aio_cleanup(void) 7339 { 7340 proc_t *procp; 7341 int (*aio_cleanup_dr_delete_memory)(proc_t *); 7342 int cleaned = 0; 7343 7344 if (modload("sys", "kaio") == -1) { 7345 cmn_err(CE_WARN, "do_aio_cleanup: cannot load kaio"); 7346 return (0); 7347 } 7348 /* 7349 * We use the aio_cleanup_dr_delete_memory function to 7350 * initiate the actual clean up; this function will wake 7351 * up the per-process aio_cleanup_thread. 7352 */ 7353 aio_cleanup_dr_delete_memory = (int (*)(proc_t *)) 7354 modgetsymvalue("aio_cleanup_dr_delete_memory", 0); 7355 if (aio_cleanup_dr_delete_memory == NULL) { 7356 cmn_err(CE_WARN, 7357 "aio_cleanup_dr_delete_memory not found in kaio"); 7358 return (0); 7359 } 7360 mutex_enter(&pidlock); 7361 for (procp = practive; (procp != NULL); procp = procp->p_next) { 7362 mutex_enter(&procp->p_lock); 7363 if (procp->p_aio != NULL) { 7364 /* cleanup proc's outstanding kaio */ 7365 cleaned += (*aio_cleanup_dr_delete_memory)(procp); 7366 } 7367 mutex_exit(&procp->p_lock); 7368 } 7369 mutex_exit(&pidlock); 7370 return (cleaned); 7371 } 7372 7373 /* 7374 * helper function for page_capture_thread 7375 */ 7376 static void 7377 page_capture_handle_outstanding(void) 7378 { 7379 int ntry; 7380 7381 /* Reap pages before attempting capture pages */ 7382 kmem_reap(); 7383 7384 if ((page_retire_pend_count() > page_retire_pend_kas_count()) && 7385 hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 7386 /* 7387 * Note: Purging only for platforms that support 7388 * ISM hat_pageunload() - mainly SPARC. On x86/x64 7389 * platforms ISM pages SE_SHARED locked until destroyed. 7390 */ 7391 7392 /* disable and purge seg_pcache */ 7393 (void) seg_p_disable(); 7394 for (ntry = 0; ntry < pc_thread_retry; ntry++) { 7395 if (!page_retire_pend_count()) 7396 break; 7397 if (do_aio_cleanup()) { 7398 /* 7399 * allow the apps cleanup threads 7400 * to run 7401 */ 7402 delay(pc_thread_shortwait); 7403 } 7404 page_capture_async(); 7405 } 7406 /* reenable seg_pcache */ 7407 seg_p_enable(); 7408 7409 /* completed what can be done. break out */ 7410 return; 7411 } 7412 7413 /* 7414 * For kernel pages and/or unsupported HAT_DYNAMIC_ISM_UNMAP, reap 7415 * and then attempt to capture. 7416 */ 7417 seg_preap(); 7418 page_capture_async(); 7419 } 7420 7421 /* 7422 * The page_capture_thread loops forever, looking to see if there are 7423 * pages still waiting to be captured. 7424 */ 7425 static void 7426 page_capture_thread(void) 7427 { 7428 callb_cpr_t c; 7429 int i; 7430 int high_pri_pages; 7431 int low_pri_pages; 7432 clock_t timeout; 7433 7434 CALLB_CPR_INIT(&c, &pc_thread_mutex, callb_generic_cpr, "page_capture"); 7435 7436 mutex_enter(&pc_thread_mutex); 7437 for (;;) { 7438 high_pri_pages = 0; 7439 low_pri_pages = 0; 7440 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7441 high_pri_pages += 7442 page_capture_hash[i].num_pages[PC_PRI_HI]; 7443 low_pri_pages += 7444 page_capture_hash[i].num_pages[PC_PRI_LO]; 7445 } 7446 7447 timeout = pc_thread_longwait; 7448 if (high_pri_pages != 0) { 7449 timeout = pc_thread_shortwait; 7450 page_capture_handle_outstanding(); 7451 } else if (low_pri_pages != 0) { 7452 page_capture_async(); 7453 } 7454 CALLB_CPR_SAFE_BEGIN(&c); 7455 (void) cv_reltimedwait(&pc_cv, &pc_thread_mutex, 7456 timeout, TR_CLOCK_TICK); 7457 CALLB_CPR_SAFE_END(&c, &pc_thread_mutex); 7458 } 7459 /*NOTREACHED*/ 7460 } 7461 /* 7462 * Attempt to locate a bucket that has enough pages to satisfy the request. 7463 * The initial check is done without the lock to avoid unneeded contention. 7464 * The function returns 1 if enough pages were found, else 0 if it could not 7465 * find enough pages in a bucket. 7466 */ 7467 static int 7468 pcf_decrement_bucket(pgcnt_t npages) 7469 { 7470 struct pcf *p; 7471 struct pcf *q; 7472 int i; 7473 7474 p = &pcf[PCF_INDEX()]; 7475 q = &pcf[pcf_fanout]; 7476 for (i = 0; i < pcf_fanout; i++) { 7477 if (p->pcf_count > npages) { 7478 /* 7479 * a good one to try. 7480 */ 7481 mutex_enter(&p->pcf_lock); 7482 if (p->pcf_count > npages) { 7483 p->pcf_count -= (uint_t)npages; 7484 /* 7485 * freemem is not protected by any lock. 7486 * Thus, we cannot have any assertion 7487 * containing freemem here. 7488 */ 7489 freemem -= npages; 7490 mutex_exit(&p->pcf_lock); 7491 return (1); 7492 } 7493 mutex_exit(&p->pcf_lock); 7494 } 7495 p++; 7496 if (p >= q) { 7497 p = pcf; 7498 } 7499 } 7500 return (0); 7501 } 7502 7503 /* 7504 * Arguments: 7505 * pcftotal_ret: If the value is not NULL and we have walked all the 7506 * buckets but did not find enough pages then it will 7507 * be set to the total number of pages in all the pcf 7508 * buckets. 7509 * npages: Is the number of pages we have been requested to 7510 * find. 7511 * unlock: If set to 0 we will leave the buckets locked if the 7512 * requested number of pages are not found. 7513 * 7514 * Go and try to satisfy the page request from any number of buckets. 7515 * This can be a very expensive operation as we have to lock the buckets 7516 * we are checking (and keep them locked), starting at bucket 0. 7517 * 7518 * The function returns 1 if enough pages were found, else 0 if it could not 7519 * find enough pages in the buckets. 7520 * 7521 */ 7522 static int 7523 pcf_decrement_multiple(pgcnt_t *pcftotal_ret, pgcnt_t npages, int unlock) 7524 { 7525 struct pcf *p; 7526 pgcnt_t pcftotal; 7527 int i; 7528 7529 p = pcf; 7530 /* try to collect pages from several pcf bins */ 7531 for (pcftotal = 0, i = 0; i < pcf_fanout; i++) { 7532 mutex_enter(&p->pcf_lock); 7533 pcftotal += p->pcf_count; 7534 if (pcftotal >= npages) { 7535 /* 7536 * Wow! There are enough pages laying around 7537 * to satisfy the request. Do the accounting, 7538 * drop the locks we acquired, and go back. 7539 * 7540 * freemem is not protected by any lock. So, 7541 * we cannot have any assertion containing 7542 * freemem. 7543 */ 7544 freemem -= npages; 7545 while (p >= pcf) { 7546 if (p->pcf_count <= npages) { 7547 npages -= p->pcf_count; 7548 p->pcf_count = 0; 7549 } else { 7550 p->pcf_count -= (uint_t)npages; 7551 npages = 0; 7552 } 7553 mutex_exit(&p->pcf_lock); 7554 p--; 7555 } 7556 ASSERT(npages == 0); 7557 return (1); 7558 } 7559 p++; 7560 } 7561 if (unlock) { 7562 /* failed to collect pages - release the locks */ 7563 while (--p >= pcf) { 7564 mutex_exit(&p->pcf_lock); 7565 } 7566 } 7567 if (pcftotal_ret != NULL) 7568 *pcftotal_ret = pcftotal; 7569 return (0); 7570 } 7571