1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * University Copyright- Copyright (c) 1982, 1986, 1988 31 * The Regents of the University of California 32 * All Rights Reserved 33 * 34 * University Acknowledgment- Portions of this document are derived from 35 * software developed by the University of California, Berkeley, and its 36 * contributors. 37 */ 38 39 /* 40 * VM - physical page management. 41 */ 42 43 #include <sys/types.h> 44 #include <sys/t_lock.h> 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/errno.h> 48 #include <sys/time.h> 49 #include <sys/vnode.h> 50 #include <sys/vm.h> 51 #include <sys/vtrace.h> 52 #include <sys/swap.h> 53 #include <sys/cmn_err.h> 54 #include <sys/tuneable.h> 55 #include <sys/sysmacros.h> 56 #include <sys/cpuvar.h> 57 #include <sys/callb.h> 58 #include <sys/debug.h> 59 #include <sys/tnf_probe.h> 60 #include <sys/condvar_impl.h> 61 #include <sys/mem_config.h> 62 #include <sys/mem_cage.h> 63 #include <sys/kmem.h> 64 #include <sys/atomic.h> 65 #include <sys/strlog.h> 66 #include <sys/mman.h> 67 #include <sys/ontrap.h> 68 #include <sys/lgrp.h> 69 #include <sys/vfs.h> 70 71 #include <vm/hat.h> 72 #include <vm/anon.h> 73 #include <vm/page.h> 74 #include <vm/seg.h> 75 #include <vm/pvn.h> 76 #include <vm/seg_kmem.h> 77 #include <vm/vm_dep.h> 78 #include <sys/vm_usage.h> 79 #include <fs/fs_subr.h> 80 #include <sys/ddi.h> 81 #include <sys/modctl.h> 82 83 static int nopageage = 0; 84 85 static pgcnt_t max_page_get; /* max page_get request size in pages */ 86 pgcnt_t total_pages = 0; /* total number of pages (used by /proc) */ 87 88 /* 89 * freemem_lock protects all freemem variables: 90 * availrmem. Also this lock protects the globals which track the 91 * availrmem changes for accurate kernel footprint calculation. 92 * See below for an explanation of these 93 * globals. 94 */ 95 kmutex_t freemem_lock; 96 pgcnt_t availrmem; 97 pgcnt_t availrmem_initial; 98 99 /* 100 * These globals track availrmem changes to get a more accurate 101 * estimate of tke kernel size. Historically pp_kernel is used for 102 * kernel size and is based on availrmem. But availrmem is adjusted for 103 * locked pages in the system not just for kernel locked pages. 104 * These new counters will track the pages locked through segvn and 105 * by explicit user locking. 106 * 107 * pages_locked : How many pages are locked because of user specified 108 * locking through mlock or plock. 109 * 110 * pages_useclaim,pages_claimed : These two variables track the 111 * claim adjustments because of the protection changes on a segvn segment. 112 * 113 * All these globals are protected by the same lock which protects availrmem. 114 */ 115 pgcnt_t pages_locked = 0; 116 pgcnt_t pages_useclaim = 0; 117 pgcnt_t pages_claimed = 0; 118 119 120 /* 121 * new_freemem_lock protects freemem, freemem_wait & freemem_cv. 122 */ 123 static kmutex_t new_freemem_lock; 124 static uint_t freemem_wait; /* someone waiting for freemem */ 125 static kcondvar_t freemem_cv; 126 127 /* 128 * The logical page free list is maintained as two lists, the 'free' 129 * and the 'cache' lists. 130 * The free list contains those pages that should be reused first. 131 * 132 * The implementation of the lists is machine dependent. 133 * page_get_freelist(), page_get_cachelist(), 134 * page_list_sub(), and page_list_add() 135 * form the interface to the machine dependent implementation. 136 * 137 * Pages with p_free set are on the cache list. 138 * Pages with p_free and p_age set are on the free list, 139 * 140 * A page may be locked while on either list. 141 */ 142 143 /* 144 * free list accounting stuff. 145 * 146 * 147 * Spread out the value for the number of pages on the 148 * page free and page cache lists. If there is just one 149 * value, then it must be under just one lock. 150 * The lock contention and cache traffic are a real bother. 151 * 152 * When we acquire and then drop a single pcf lock 153 * we can start in the middle of the array of pcf structures. 154 * If we acquire more than one pcf lock at a time, we need to 155 * start at the front to avoid deadlocking. 156 * 157 * pcf_count holds the number of pages in each pool. 158 * 159 * pcf_block is set when page_create_get_something() has asked the 160 * PSM page freelist and page cachelist routines without specifying 161 * a color and nothing came back. This is used to block anything 162 * else from moving pages from one list to the other while the 163 * lists are searched again. If a page is freeed while pcf_block is 164 * set, then pcf_reserve is incremented. pcgs_unblock() takes care 165 * of clearning pcf_block, doing the wakeups, etc. 166 */ 167 168 #define MAX_PCF_FANOUT NCPU 169 static uint_t pcf_fanout = 1; /* Will get changed at boot time */ 170 static uint_t pcf_fanout_mask = 0; 171 172 struct pcf { 173 kmutex_t pcf_lock; /* protects the structure */ 174 uint_t pcf_count; /* page count */ 175 uint_t pcf_wait; /* number of waiters */ 176 uint_t pcf_block; /* pcgs flag to page_free() */ 177 uint_t pcf_reserve; /* pages freed after pcf_block set */ 178 uint_t pcf_fill[10]; /* to line up on the caches */ 179 }; 180 181 /* 182 * PCF_INDEX hash needs to be dynamic (every so often the hash changes where 183 * it will hash the cpu to). This is done to prevent a drain condition 184 * from happening. This drain condition will occur when pcf_count decrement 185 * occurs on cpu A and the increment of pcf_count always occurs on cpu B. An 186 * example of this shows up with device interrupts. The dma buffer is allocated 187 * by the cpu requesting the IO thus the pcf_count is decremented based on that. 188 * When the memory is returned by the interrupt thread, the pcf_count will be 189 * incremented based on the cpu servicing the interrupt. 190 */ 191 static struct pcf pcf[MAX_PCF_FANOUT]; 192 #define PCF_INDEX() ((int)(((long)CPU->cpu_seqid) + \ 193 (randtick() >> 24)) & (pcf_fanout_mask)) 194 195 static int pcf_decrement_bucket(pgcnt_t); 196 static int pcf_decrement_multiple(pgcnt_t *, pgcnt_t, int); 197 198 kmutex_t pcgs_lock; /* serializes page_create_get_ */ 199 kmutex_t pcgs_cagelock; /* serializes NOSLEEP cage allocs */ 200 kmutex_t pcgs_wait_lock; /* used for delay in pcgs */ 201 static kcondvar_t pcgs_cv; /* cv for delay in pcgs */ 202 203 #ifdef VM_STATS 204 205 /* 206 * No locks, but so what, they are only statistics. 207 */ 208 209 static struct page_tcnt { 210 int pc_free_cache; /* free's into cache list */ 211 int pc_free_dontneed; /* free's with dontneed */ 212 int pc_free_pageout; /* free's from pageout */ 213 int pc_free_free; /* free's into free list */ 214 int pc_free_pages; /* free's into large page free list */ 215 int pc_destroy_pages; /* large page destroy's */ 216 int pc_get_cache; /* get's from cache list */ 217 int pc_get_free; /* get's from free list */ 218 int pc_reclaim; /* reclaim's */ 219 int pc_abortfree; /* abort's of free pages */ 220 int pc_find_hit; /* find's that find page */ 221 int pc_find_miss; /* find's that don't find page */ 222 int pc_destroy_free; /* # of free pages destroyed */ 223 #define PC_HASH_CNT (4*PAGE_HASHAVELEN) 224 int pc_find_hashlen[PC_HASH_CNT+1]; 225 int pc_addclaim_pages; 226 int pc_subclaim_pages; 227 int pc_free_replacement_page[2]; 228 int pc_try_demote_pages[6]; 229 int pc_demote_pages[2]; 230 } pagecnt; 231 232 uint_t hashin_count; 233 uint_t hashin_not_held; 234 uint_t hashin_already; 235 236 uint_t hashout_count; 237 uint_t hashout_not_held; 238 239 uint_t page_create_count; 240 uint_t page_create_not_enough; 241 uint_t page_create_not_enough_again; 242 uint_t page_create_zero; 243 uint_t page_create_hashout; 244 uint_t page_create_page_lock_failed; 245 uint_t page_create_trylock_failed; 246 uint_t page_create_found_one; 247 uint_t page_create_hashin_failed; 248 uint_t page_create_dropped_phm; 249 250 uint_t page_create_new; 251 uint_t page_create_exists; 252 uint_t page_create_putbacks; 253 uint_t page_create_overshoot; 254 255 uint_t page_reclaim_zero; 256 uint_t page_reclaim_zero_locked; 257 258 uint_t page_rename_exists; 259 uint_t page_rename_count; 260 261 uint_t page_lookup_cnt[20]; 262 uint_t page_lookup_nowait_cnt[10]; 263 uint_t page_find_cnt; 264 uint_t page_exists_cnt; 265 uint_t page_exists_forreal_cnt; 266 uint_t page_lookup_dev_cnt; 267 uint_t get_cachelist_cnt; 268 uint_t page_create_cnt[10]; 269 uint_t alloc_pages[9]; 270 uint_t page_exphcontg[19]; 271 uint_t page_create_large_cnt[10]; 272 273 /* 274 * Collects statistics. 275 */ 276 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 277 uint_t mylen = 0; \ 278 \ 279 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash, mylen++) { \ 280 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 281 break; \ 282 } \ 283 if ((pp) != NULL) \ 284 pagecnt.pc_find_hit++; \ 285 else \ 286 pagecnt.pc_find_miss++; \ 287 if (mylen > PC_HASH_CNT) \ 288 mylen = PC_HASH_CNT; \ 289 pagecnt.pc_find_hashlen[mylen]++; \ 290 } 291 292 #else /* VM_STATS */ 293 294 /* 295 * Don't collect statistics 296 */ 297 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 298 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \ 299 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 300 break; \ 301 } \ 302 } 303 304 #endif /* VM_STATS */ 305 306 307 308 #ifdef DEBUG 309 #define MEMSEG_SEARCH_STATS 310 #endif 311 312 #ifdef MEMSEG_SEARCH_STATS 313 struct memseg_stats { 314 uint_t nsearch; 315 uint_t nlastwon; 316 uint_t nhashwon; 317 uint_t nnotfound; 318 } memseg_stats; 319 320 #define MEMSEG_STAT_INCR(v) \ 321 atomic_add_32(&memseg_stats.v, 1) 322 #else 323 #define MEMSEG_STAT_INCR(x) 324 #endif 325 326 struct memseg *memsegs; /* list of memory segments */ 327 328 /* 329 * /etc/system tunable to control large page allocation hueristic. 330 * 331 * Setting to LPAP_LOCAL will heavily prefer the local lgroup over remote lgroup 332 * for large page allocation requests. If a large page is not readily 333 * avaliable on the local freelists we will go through additional effort 334 * to create a large page, potentially moving smaller pages around to coalesce 335 * larger pages in the local lgroup. 336 * Default value of LPAP_DEFAULT will go to remote freelists if large pages 337 * are not readily available in the local lgroup. 338 */ 339 enum lpap { 340 LPAP_DEFAULT, /* default large page allocation policy */ 341 LPAP_LOCAL /* local large page allocation policy */ 342 }; 343 344 enum lpap lpg_alloc_prefer = LPAP_DEFAULT; 345 346 static void page_init_mem_config(void); 347 static int page_do_hashin(page_t *, vnode_t *, u_offset_t); 348 static void page_do_hashout(page_t *); 349 static void page_capture_init(); 350 int page_capture_take_action(page_t *, uint_t, void *); 351 352 static void page_demote_vp_pages(page_t *); 353 354 355 void 356 pcf_init(void) 357 358 { 359 if (boot_ncpus != -1) { 360 pcf_fanout = boot_ncpus; 361 } else { 362 pcf_fanout = max_ncpus; 363 } 364 #ifdef sun4v 365 /* 366 * Force at least 4 buckets if possible for sun4v. 367 */ 368 pcf_fanout = MAX(pcf_fanout, 4); 369 #endif /* sun4v */ 370 371 /* 372 * Round up to the nearest power of 2. 373 */ 374 pcf_fanout = MIN(pcf_fanout, MAX_PCF_FANOUT); 375 if (!ISP2(pcf_fanout)) { 376 pcf_fanout = 1 << highbit(pcf_fanout); 377 378 if (pcf_fanout > MAX_PCF_FANOUT) { 379 pcf_fanout = 1 << (highbit(MAX_PCF_FANOUT) - 1); 380 } 381 } 382 pcf_fanout_mask = pcf_fanout - 1; 383 } 384 385 /* 386 * vm subsystem related initialization 387 */ 388 void 389 vm_init(void) 390 { 391 boolean_t callb_vm_cpr(void *, int); 392 393 (void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm"); 394 page_init_mem_config(); 395 page_retire_init(); 396 vm_usage_init(); 397 page_capture_init(); 398 } 399 400 /* 401 * This function is called at startup and when memory is added or deleted. 402 */ 403 void 404 init_pages_pp_maximum() 405 { 406 static pgcnt_t p_min; 407 static pgcnt_t pages_pp_maximum_startup; 408 static pgcnt_t avrmem_delta; 409 static int init_done; 410 static int user_set; /* true if set in /etc/system */ 411 412 if (init_done == 0) { 413 414 /* If the user specified a value, save it */ 415 if (pages_pp_maximum != 0) { 416 user_set = 1; 417 pages_pp_maximum_startup = pages_pp_maximum; 418 } 419 420 /* 421 * Setting of pages_pp_maximum is based first time 422 * on the value of availrmem just after the start-up 423 * allocations. To preserve this relationship at run 424 * time, use a delta from availrmem_initial. 425 */ 426 ASSERT(availrmem_initial >= availrmem); 427 avrmem_delta = availrmem_initial - availrmem; 428 429 /* The allowable floor of pages_pp_maximum */ 430 p_min = tune.t_minarmem + 100; 431 432 /* Make sure we don't come through here again. */ 433 init_done = 1; 434 } 435 /* 436 * Determine pages_pp_maximum, the number of currently available 437 * pages (availrmem) that can't be `locked'. If not set by 438 * the user, we set it to 4% of the currently available memory 439 * plus 4MB. 440 * But we also insist that it be greater than tune.t_minarmem; 441 * otherwise a process could lock down a lot of memory, get swapped 442 * out, and never have enough to get swapped back in. 443 */ 444 if (user_set) 445 pages_pp_maximum = pages_pp_maximum_startup; 446 else 447 pages_pp_maximum = ((availrmem_initial - avrmem_delta) / 25) 448 + btop(4 * 1024 * 1024); 449 450 if (pages_pp_maximum <= p_min) { 451 pages_pp_maximum = p_min; 452 } 453 } 454 455 void 456 set_max_page_get(pgcnt_t target_total_pages) 457 { 458 max_page_get = target_total_pages / 2; 459 } 460 461 static pgcnt_t pending_delete; 462 463 /*ARGSUSED*/ 464 static void 465 page_mem_config_post_add( 466 void *arg, 467 pgcnt_t delta_pages) 468 { 469 set_max_page_get(total_pages - pending_delete); 470 init_pages_pp_maximum(); 471 } 472 473 /*ARGSUSED*/ 474 static int 475 page_mem_config_pre_del( 476 void *arg, 477 pgcnt_t delta_pages) 478 { 479 pgcnt_t nv; 480 481 nv = atomic_add_long_nv(&pending_delete, (spgcnt_t)delta_pages); 482 set_max_page_get(total_pages - nv); 483 return (0); 484 } 485 486 /*ARGSUSED*/ 487 static void 488 page_mem_config_post_del( 489 void *arg, 490 pgcnt_t delta_pages, 491 int cancelled) 492 { 493 pgcnt_t nv; 494 495 nv = atomic_add_long_nv(&pending_delete, -(spgcnt_t)delta_pages); 496 set_max_page_get(total_pages - nv); 497 if (!cancelled) 498 init_pages_pp_maximum(); 499 } 500 501 static kphysm_setup_vector_t page_mem_config_vec = { 502 KPHYSM_SETUP_VECTOR_VERSION, 503 page_mem_config_post_add, 504 page_mem_config_pre_del, 505 page_mem_config_post_del, 506 }; 507 508 static void 509 page_init_mem_config(void) 510 { 511 int ret; 512 513 ret = kphysm_setup_func_register(&page_mem_config_vec, (void *)NULL); 514 ASSERT(ret == 0); 515 } 516 517 /* 518 * Evenly spread out the PCF counters for large free pages 519 */ 520 static void 521 page_free_large_ctr(pgcnt_t npages) 522 { 523 static struct pcf *p = pcf; 524 pgcnt_t lump; 525 526 freemem += npages; 527 528 lump = roundup(npages, pcf_fanout) / pcf_fanout; 529 530 while (npages > 0) { 531 532 ASSERT(!p->pcf_block); 533 534 if (lump < npages) { 535 p->pcf_count += (uint_t)lump; 536 npages -= lump; 537 } else { 538 p->pcf_count += (uint_t)npages; 539 npages = 0; 540 } 541 542 ASSERT(!p->pcf_wait); 543 544 if (++p > &pcf[pcf_fanout - 1]) 545 p = pcf; 546 } 547 548 ASSERT(npages == 0); 549 } 550 551 /* 552 * Add a physical chunk of memory to the system free lists during startup. 553 * Platform specific startup() allocates the memory for the page structs. 554 * 555 * num - number of page structures 556 * base - page number (pfn) to be associated with the first page. 557 * 558 * Since we are doing this during startup (ie. single threaded), we will 559 * use shortcut routines to avoid any locking overhead while putting all 560 * these pages on the freelists. 561 * 562 * NOTE: Any changes performed to page_free(), must also be performed to 563 * add_physmem() since this is how we initialize all page_t's at 564 * boot time. 565 */ 566 void 567 add_physmem( 568 page_t *pp, 569 pgcnt_t num, 570 pfn_t pnum) 571 { 572 page_t *root = NULL; 573 uint_t szc = page_num_pagesizes() - 1; 574 pgcnt_t large = page_get_pagecnt(szc); 575 pgcnt_t cnt = 0; 576 577 TRACE_2(TR_FAC_VM, TR_PAGE_INIT, 578 "add_physmem:pp %p num %lu", pp, num); 579 580 /* 581 * Arbitrarily limit the max page_get request 582 * to 1/2 of the page structs we have. 583 */ 584 total_pages += num; 585 set_max_page_get(total_pages); 586 587 PLCNT_MODIFY_MAX(pnum, (long)num); 588 589 /* 590 * The physical space for the pages array 591 * representing ram pages has already been 592 * allocated. Here we initialize each lock 593 * in the page structure, and put each on 594 * the free list 595 */ 596 for (; num; pp++, pnum++, num--) { 597 598 /* 599 * this needs to fill in the page number 600 * and do any other arch specific initialization 601 */ 602 add_physmem_cb(pp, pnum); 603 604 pp->p_lckcnt = 0; 605 pp->p_cowcnt = 0; 606 pp->p_slckcnt = 0; 607 608 /* 609 * Initialize the page lock as unlocked, since nobody 610 * can see or access this page yet. 611 */ 612 pp->p_selock = 0; 613 614 /* 615 * Initialize IO lock 616 */ 617 page_iolock_init(pp); 618 619 /* 620 * initialize other fields in the page_t 621 */ 622 PP_SETFREE(pp); 623 page_clr_all_props(pp); 624 PP_SETAGED(pp); 625 pp->p_offset = (u_offset_t)-1; 626 pp->p_next = pp; 627 pp->p_prev = pp; 628 629 /* 630 * Simple case: System doesn't support large pages. 631 */ 632 if (szc == 0) { 633 pp->p_szc = 0; 634 page_free_at_startup(pp); 635 continue; 636 } 637 638 /* 639 * Handle unaligned pages, we collect them up onto 640 * the root page until we have a full large page. 641 */ 642 if (!IS_P2ALIGNED(pnum, large)) { 643 644 /* 645 * If not in a large page, 646 * just free as small page. 647 */ 648 if (root == NULL) { 649 pp->p_szc = 0; 650 page_free_at_startup(pp); 651 continue; 652 } 653 654 /* 655 * Link a constituent page into the large page. 656 */ 657 pp->p_szc = szc; 658 page_list_concat(&root, &pp); 659 660 /* 661 * When large page is fully formed, free it. 662 */ 663 if (++cnt == large) { 664 page_free_large_ctr(cnt); 665 page_list_add_pages(root, PG_LIST_ISINIT); 666 root = NULL; 667 cnt = 0; 668 } 669 continue; 670 } 671 672 /* 673 * At this point we have a page number which 674 * is aligned. We assert that we aren't already 675 * in a different large page. 676 */ 677 ASSERT(IS_P2ALIGNED(pnum, large)); 678 ASSERT(root == NULL && cnt == 0); 679 680 /* 681 * If insufficient number of pages left to form 682 * a large page, just free the small page. 683 */ 684 if (num < large) { 685 pp->p_szc = 0; 686 page_free_at_startup(pp); 687 continue; 688 } 689 690 /* 691 * Otherwise start a new large page. 692 */ 693 pp->p_szc = szc; 694 cnt++; 695 root = pp; 696 } 697 ASSERT(root == NULL && cnt == 0); 698 } 699 700 /* 701 * Find a page representing the specified [vp, offset]. 702 * If we find the page but it is intransit coming in, 703 * it will have an "exclusive" lock and we wait for 704 * the i/o to complete. A page found on the free list 705 * is always reclaimed and then locked. On success, the page 706 * is locked, its data is valid and it isn't on the free 707 * list, while a NULL is returned if the page doesn't exist. 708 */ 709 page_t * 710 page_lookup(vnode_t *vp, u_offset_t off, se_t se) 711 { 712 return (page_lookup_create(vp, off, se, NULL, NULL, 0)); 713 } 714 715 /* 716 * Find a page representing the specified [vp, offset]. 717 * We either return the one we found or, if passed in, 718 * create one with identity of [vp, offset] of the 719 * pre-allocated page. If we find existing page but it is 720 * intransit coming in, it will have an "exclusive" lock 721 * and we wait for the i/o to complete. A page found on 722 * the free list is always reclaimed and then locked. 723 * On success, the page is locked, its data is valid and 724 * it isn't on the free list, while a NULL is returned 725 * if the page doesn't exist and newpp is NULL; 726 */ 727 page_t * 728 page_lookup_create( 729 vnode_t *vp, 730 u_offset_t off, 731 se_t se, 732 page_t *newpp, 733 spgcnt_t *nrelocp, 734 int flags) 735 { 736 page_t *pp; 737 kmutex_t *phm; 738 ulong_t index; 739 uint_t hash_locked; 740 uint_t es; 741 742 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 743 VM_STAT_ADD(page_lookup_cnt[0]); 744 ASSERT(newpp ? PAGE_EXCL(newpp) : 1); 745 746 /* 747 * Acquire the appropriate page hash lock since 748 * we have to search the hash list. Pages that 749 * hash to this list can't change identity while 750 * this lock is held. 751 */ 752 hash_locked = 0; 753 index = PAGE_HASH_FUNC(vp, off); 754 phm = NULL; 755 top: 756 PAGE_HASH_SEARCH(index, pp, vp, off); 757 if (pp != NULL) { 758 VM_STAT_ADD(page_lookup_cnt[1]); 759 es = (newpp != NULL) ? 1 : 0; 760 es |= flags; 761 if (!hash_locked) { 762 VM_STAT_ADD(page_lookup_cnt[2]); 763 if (!page_try_reclaim_lock(pp, se, es)) { 764 /* 765 * On a miss, acquire the phm. Then 766 * next time, page_lock() will be called, 767 * causing a wait if the page is busy. 768 * just looping with page_trylock() would 769 * get pretty boring. 770 */ 771 VM_STAT_ADD(page_lookup_cnt[3]); 772 phm = PAGE_HASH_MUTEX(index); 773 mutex_enter(phm); 774 hash_locked = 1; 775 goto top; 776 } 777 } else { 778 VM_STAT_ADD(page_lookup_cnt[4]); 779 if (!page_lock_es(pp, se, phm, P_RECLAIM, es)) { 780 VM_STAT_ADD(page_lookup_cnt[5]); 781 goto top; 782 } 783 } 784 785 /* 786 * Since `pp' is locked it can not change identity now. 787 * Reconfirm we locked the correct page. 788 * 789 * Both the p_vnode and p_offset *must* be cast volatile 790 * to force a reload of their values: The PAGE_HASH_SEARCH 791 * macro will have stuffed p_vnode and p_offset into 792 * registers before calling page_trylock(); another thread, 793 * actually holding the hash lock, could have changed the 794 * page's identity in memory, but our registers would not 795 * be changed, fooling the reconfirmation. If the hash 796 * lock was held during the search, the casting would 797 * not be needed. 798 */ 799 VM_STAT_ADD(page_lookup_cnt[6]); 800 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 801 ((volatile u_offset_t)(pp->p_offset) != off)) { 802 VM_STAT_ADD(page_lookup_cnt[7]); 803 if (hash_locked) { 804 panic("page_lookup_create: lost page %p", 805 (void *)pp); 806 /*NOTREACHED*/ 807 } 808 page_unlock(pp); 809 phm = PAGE_HASH_MUTEX(index); 810 mutex_enter(phm); 811 hash_locked = 1; 812 goto top; 813 } 814 815 /* 816 * If page_trylock() was called, then pp may still be on 817 * the cachelist (can't be on the free list, it would not 818 * have been found in the search). If it is on the 819 * cachelist it must be pulled now. To pull the page from 820 * the cachelist, it must be exclusively locked. 821 * 822 * The other big difference between page_trylock() and 823 * page_lock(), is that page_lock() will pull the 824 * page from whatever free list (the cache list in this 825 * case) the page is on. If page_trylock() was used 826 * above, then we have to do the reclaim ourselves. 827 */ 828 if ((!hash_locked) && (PP_ISFREE(pp))) { 829 ASSERT(PP_ISAGED(pp) == 0); 830 VM_STAT_ADD(page_lookup_cnt[8]); 831 832 /* 833 * page_relcaim will insure that we 834 * have this page exclusively 835 */ 836 837 if (!page_reclaim(pp, NULL)) { 838 /* 839 * Page_reclaim dropped whatever lock 840 * we held. 841 */ 842 VM_STAT_ADD(page_lookup_cnt[9]); 843 phm = PAGE_HASH_MUTEX(index); 844 mutex_enter(phm); 845 hash_locked = 1; 846 goto top; 847 } else if (se == SE_SHARED && newpp == NULL) { 848 VM_STAT_ADD(page_lookup_cnt[10]); 849 page_downgrade(pp); 850 } 851 } 852 853 if (hash_locked) { 854 mutex_exit(phm); 855 } 856 857 if (newpp != NULL && pp->p_szc < newpp->p_szc && 858 PAGE_EXCL(pp) && nrelocp != NULL) { 859 ASSERT(nrelocp != NULL); 860 (void) page_relocate(&pp, &newpp, 1, 1, nrelocp, 861 NULL); 862 if (*nrelocp > 0) { 863 VM_STAT_COND_ADD(*nrelocp == 1, 864 page_lookup_cnt[11]); 865 VM_STAT_COND_ADD(*nrelocp > 1, 866 page_lookup_cnt[12]); 867 pp = newpp; 868 se = SE_EXCL; 869 } else { 870 if (se == SE_SHARED) { 871 page_downgrade(pp); 872 } 873 VM_STAT_ADD(page_lookup_cnt[13]); 874 } 875 } else if (newpp != NULL && nrelocp != NULL) { 876 if (PAGE_EXCL(pp) && se == SE_SHARED) { 877 page_downgrade(pp); 878 } 879 VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc, 880 page_lookup_cnt[14]); 881 VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc, 882 page_lookup_cnt[15]); 883 VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc, 884 page_lookup_cnt[16]); 885 } else if (newpp != NULL && PAGE_EXCL(pp)) { 886 se = SE_EXCL; 887 } 888 } else if (!hash_locked) { 889 VM_STAT_ADD(page_lookup_cnt[17]); 890 phm = PAGE_HASH_MUTEX(index); 891 mutex_enter(phm); 892 hash_locked = 1; 893 goto top; 894 } else if (newpp != NULL) { 895 /* 896 * If we have a preallocated page then 897 * insert it now and basically behave like 898 * page_create. 899 */ 900 VM_STAT_ADD(page_lookup_cnt[18]); 901 /* 902 * Since we hold the page hash mutex and 903 * just searched for this page, page_hashin 904 * had better not fail. If it does, that 905 * means some thread did not follow the 906 * page hash mutex rules. Panic now and 907 * get it over with. As usual, go down 908 * holding all the locks. 909 */ 910 ASSERT(MUTEX_HELD(phm)); 911 if (!page_hashin(newpp, vp, off, phm)) { 912 ASSERT(MUTEX_HELD(phm)); 913 panic("page_lookup_create: hashin failed %p %p %llx %p", 914 (void *)newpp, (void *)vp, off, (void *)phm); 915 /*NOTREACHED*/ 916 } 917 ASSERT(MUTEX_HELD(phm)); 918 mutex_exit(phm); 919 phm = NULL; 920 page_set_props(newpp, P_REF); 921 page_io_lock(newpp); 922 pp = newpp; 923 se = SE_EXCL; 924 } else { 925 VM_STAT_ADD(page_lookup_cnt[19]); 926 mutex_exit(phm); 927 } 928 929 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 930 931 ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1); 932 933 return (pp); 934 } 935 936 /* 937 * Search the hash list for the page representing the 938 * specified [vp, offset] and return it locked. Skip 939 * free pages and pages that cannot be locked as requested. 940 * Used while attempting to kluster pages. 941 */ 942 page_t * 943 page_lookup_nowait(vnode_t *vp, u_offset_t off, se_t se) 944 { 945 page_t *pp; 946 kmutex_t *phm; 947 ulong_t index; 948 uint_t locked; 949 950 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 951 VM_STAT_ADD(page_lookup_nowait_cnt[0]); 952 953 index = PAGE_HASH_FUNC(vp, off); 954 PAGE_HASH_SEARCH(index, pp, vp, off); 955 locked = 0; 956 if (pp == NULL) { 957 top: 958 VM_STAT_ADD(page_lookup_nowait_cnt[1]); 959 locked = 1; 960 phm = PAGE_HASH_MUTEX(index); 961 mutex_enter(phm); 962 PAGE_HASH_SEARCH(index, pp, vp, off); 963 } 964 965 if (pp == NULL || PP_ISFREE(pp)) { 966 VM_STAT_ADD(page_lookup_nowait_cnt[2]); 967 pp = NULL; 968 } else { 969 if (!page_trylock(pp, se)) { 970 VM_STAT_ADD(page_lookup_nowait_cnt[3]); 971 pp = NULL; 972 } else { 973 VM_STAT_ADD(page_lookup_nowait_cnt[4]); 974 /* 975 * See the comment in page_lookup() 976 */ 977 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 978 ((u_offset_t)(pp->p_offset) != off)) { 979 VM_STAT_ADD(page_lookup_nowait_cnt[5]); 980 if (locked) { 981 panic("page_lookup_nowait %p", 982 (void *)pp); 983 /*NOTREACHED*/ 984 } 985 page_unlock(pp); 986 goto top; 987 } 988 if (PP_ISFREE(pp)) { 989 VM_STAT_ADD(page_lookup_nowait_cnt[6]); 990 page_unlock(pp); 991 pp = NULL; 992 } 993 } 994 } 995 if (locked) { 996 VM_STAT_ADD(page_lookup_nowait_cnt[7]); 997 mutex_exit(phm); 998 } 999 1000 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 1001 1002 return (pp); 1003 } 1004 1005 /* 1006 * Search the hash list for a page with the specified [vp, off] 1007 * that is known to exist and is already locked. This routine 1008 * is typically used by segment SOFTUNLOCK routines. 1009 */ 1010 page_t * 1011 page_find(vnode_t *vp, u_offset_t off) 1012 { 1013 page_t *pp; 1014 kmutex_t *phm; 1015 ulong_t index; 1016 1017 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1018 VM_STAT_ADD(page_find_cnt); 1019 1020 index = PAGE_HASH_FUNC(vp, off); 1021 phm = PAGE_HASH_MUTEX(index); 1022 1023 mutex_enter(phm); 1024 PAGE_HASH_SEARCH(index, pp, vp, off); 1025 mutex_exit(phm); 1026 1027 ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr); 1028 return (pp); 1029 } 1030 1031 /* 1032 * Determine whether a page with the specified [vp, off] 1033 * currently exists in the system. Obviously this should 1034 * only be considered as a hint since nothing prevents the 1035 * page from disappearing or appearing immediately after 1036 * the return from this routine. Subsequently, we don't 1037 * even bother to lock the list. 1038 */ 1039 page_t * 1040 page_exists(vnode_t *vp, u_offset_t off) 1041 { 1042 page_t *pp; 1043 ulong_t index; 1044 1045 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1046 VM_STAT_ADD(page_exists_cnt); 1047 1048 index = PAGE_HASH_FUNC(vp, off); 1049 PAGE_HASH_SEARCH(index, pp, vp, off); 1050 1051 return (pp); 1052 } 1053 1054 /* 1055 * Determine if physically contiguous pages exist for [vp, off] - [vp, off + 1056 * page_size(szc)) range. if they exist and ppa is not NULL fill ppa array 1057 * with these pages locked SHARED. If necessary reclaim pages from 1058 * freelist. Return 1 if contiguous pages exist and 0 otherwise. 1059 * 1060 * If we fail to lock pages still return 1 if pages exist and contiguous. 1061 * But in this case return value is just a hint. ppa array won't be filled. 1062 * Caller should initialize ppa[0] as NULL to distinguish return value. 1063 * 1064 * Returns 0 if pages don't exist or not physically contiguous. 1065 * 1066 * This routine doesn't work for anonymous(swapfs) pages. 1067 */ 1068 int 1069 page_exists_physcontig(vnode_t *vp, u_offset_t off, uint_t szc, page_t *ppa[]) 1070 { 1071 pgcnt_t pages; 1072 pfn_t pfn; 1073 page_t *rootpp; 1074 pgcnt_t i; 1075 pgcnt_t j; 1076 u_offset_t save_off = off; 1077 ulong_t index; 1078 kmutex_t *phm; 1079 page_t *pp; 1080 uint_t pszc; 1081 int loopcnt = 0; 1082 1083 ASSERT(szc != 0); 1084 ASSERT(vp != NULL); 1085 ASSERT(!IS_SWAPFSVP(vp)); 1086 ASSERT(!VN_ISKAS(vp)); 1087 1088 again: 1089 if (++loopcnt > 3) { 1090 VM_STAT_ADD(page_exphcontg[0]); 1091 return (0); 1092 } 1093 1094 index = PAGE_HASH_FUNC(vp, off); 1095 phm = PAGE_HASH_MUTEX(index); 1096 1097 mutex_enter(phm); 1098 PAGE_HASH_SEARCH(index, pp, vp, off); 1099 mutex_exit(phm); 1100 1101 VM_STAT_ADD(page_exphcontg[1]); 1102 1103 if (pp == NULL) { 1104 VM_STAT_ADD(page_exphcontg[2]); 1105 return (0); 1106 } 1107 1108 pages = page_get_pagecnt(szc); 1109 rootpp = pp; 1110 pfn = rootpp->p_pagenum; 1111 1112 if ((pszc = pp->p_szc) >= szc && ppa != NULL) { 1113 VM_STAT_ADD(page_exphcontg[3]); 1114 if (!page_trylock(pp, SE_SHARED)) { 1115 VM_STAT_ADD(page_exphcontg[4]); 1116 return (1); 1117 } 1118 /* 1119 * Also check whether p_pagenum was modified by DR. 1120 */ 1121 if (pp->p_szc != pszc || pp->p_vnode != vp || 1122 pp->p_offset != off || pp->p_pagenum != pfn) { 1123 VM_STAT_ADD(page_exphcontg[5]); 1124 page_unlock(pp); 1125 off = save_off; 1126 goto again; 1127 } 1128 /* 1129 * szc was non zero and vnode and offset matched after we 1130 * locked the page it means it can't become free on us. 1131 */ 1132 ASSERT(!PP_ISFREE(pp)); 1133 if (!IS_P2ALIGNED(pfn, pages)) { 1134 page_unlock(pp); 1135 return (0); 1136 } 1137 ppa[0] = pp; 1138 pp++; 1139 off += PAGESIZE; 1140 pfn++; 1141 for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1142 if (!page_trylock(pp, SE_SHARED)) { 1143 VM_STAT_ADD(page_exphcontg[6]); 1144 pp--; 1145 while (i-- > 0) { 1146 page_unlock(pp); 1147 pp--; 1148 } 1149 ppa[0] = NULL; 1150 return (1); 1151 } 1152 if (pp->p_szc != pszc) { 1153 VM_STAT_ADD(page_exphcontg[7]); 1154 page_unlock(pp); 1155 pp--; 1156 while (i-- > 0) { 1157 page_unlock(pp); 1158 pp--; 1159 } 1160 ppa[0] = NULL; 1161 off = save_off; 1162 goto again; 1163 } 1164 /* 1165 * szc the same as for previous already locked pages 1166 * with right identity. Since this page had correct 1167 * szc after we locked it can't get freed or destroyed 1168 * and therefore must have the expected identity. 1169 */ 1170 ASSERT(!PP_ISFREE(pp)); 1171 if (pp->p_vnode != vp || 1172 pp->p_offset != off) { 1173 panic("page_exists_physcontig: " 1174 "large page identity doesn't match"); 1175 } 1176 ppa[i] = pp; 1177 ASSERT(pp->p_pagenum == pfn); 1178 } 1179 VM_STAT_ADD(page_exphcontg[8]); 1180 ppa[pages] = NULL; 1181 return (1); 1182 } else if (pszc >= szc) { 1183 VM_STAT_ADD(page_exphcontg[9]); 1184 if (!IS_P2ALIGNED(pfn, pages)) { 1185 return (0); 1186 } 1187 return (1); 1188 } 1189 1190 if (!IS_P2ALIGNED(pfn, pages)) { 1191 VM_STAT_ADD(page_exphcontg[10]); 1192 return (0); 1193 } 1194 1195 if (page_numtomemseg_nolock(pfn) != 1196 page_numtomemseg_nolock(pfn + pages - 1)) { 1197 VM_STAT_ADD(page_exphcontg[11]); 1198 return (0); 1199 } 1200 1201 /* 1202 * We loop up 4 times across pages to promote page size. 1203 * We're extra cautious to promote page size atomically with respect 1204 * to everybody else. But we can probably optimize into 1 loop if 1205 * this becomes an issue. 1206 */ 1207 1208 for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1209 if (!page_trylock(pp, SE_EXCL)) { 1210 VM_STAT_ADD(page_exphcontg[12]); 1211 break; 1212 } 1213 /* 1214 * Check whether p_pagenum was modified by DR. 1215 */ 1216 if (pp->p_pagenum != pfn) { 1217 page_unlock(pp); 1218 break; 1219 } 1220 if (pp->p_vnode != vp || 1221 pp->p_offset != off) { 1222 VM_STAT_ADD(page_exphcontg[13]); 1223 page_unlock(pp); 1224 break; 1225 } 1226 if (pp->p_szc >= szc) { 1227 ASSERT(i == 0); 1228 page_unlock(pp); 1229 off = save_off; 1230 goto again; 1231 } 1232 } 1233 1234 if (i != pages) { 1235 VM_STAT_ADD(page_exphcontg[14]); 1236 --pp; 1237 while (i-- > 0) { 1238 page_unlock(pp); 1239 --pp; 1240 } 1241 return (0); 1242 } 1243 1244 pp = rootpp; 1245 for (i = 0; i < pages; i++, pp++) { 1246 if (PP_ISFREE(pp)) { 1247 VM_STAT_ADD(page_exphcontg[15]); 1248 ASSERT(!PP_ISAGED(pp)); 1249 ASSERT(pp->p_szc == 0); 1250 if (!page_reclaim(pp, NULL)) { 1251 break; 1252 } 1253 } else { 1254 ASSERT(pp->p_szc < szc); 1255 VM_STAT_ADD(page_exphcontg[16]); 1256 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 1257 } 1258 } 1259 if (i < pages) { 1260 VM_STAT_ADD(page_exphcontg[17]); 1261 /* 1262 * page_reclaim failed because we were out of memory. 1263 * drop the rest of the locks and return because this page 1264 * must be already reallocated anyway. 1265 */ 1266 pp = rootpp; 1267 for (j = 0; j < pages; j++, pp++) { 1268 if (j != i) { 1269 page_unlock(pp); 1270 } 1271 } 1272 return (0); 1273 } 1274 1275 off = save_off; 1276 pp = rootpp; 1277 for (i = 0; i < pages; i++, pp++, off += PAGESIZE) { 1278 ASSERT(PAGE_EXCL(pp)); 1279 ASSERT(!PP_ISFREE(pp)); 1280 ASSERT(!hat_page_is_mapped(pp)); 1281 ASSERT(pp->p_vnode == vp); 1282 ASSERT(pp->p_offset == off); 1283 pp->p_szc = szc; 1284 } 1285 pp = rootpp; 1286 for (i = 0; i < pages; i++, pp++) { 1287 if (ppa == NULL) { 1288 page_unlock(pp); 1289 } else { 1290 ppa[i] = pp; 1291 page_downgrade(ppa[i]); 1292 } 1293 } 1294 if (ppa != NULL) { 1295 ppa[pages] = NULL; 1296 } 1297 VM_STAT_ADD(page_exphcontg[18]); 1298 ASSERT(vp->v_pages != NULL); 1299 return (1); 1300 } 1301 1302 /* 1303 * Determine whether a page with the specified [vp, off] 1304 * currently exists in the system and if so return its 1305 * size code. Obviously this should only be considered as 1306 * a hint since nothing prevents the page from disappearing 1307 * or appearing immediately after the return from this routine. 1308 */ 1309 int 1310 page_exists_forreal(vnode_t *vp, u_offset_t off, uint_t *szc) 1311 { 1312 page_t *pp; 1313 kmutex_t *phm; 1314 ulong_t index; 1315 int rc = 0; 1316 1317 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1318 ASSERT(szc != NULL); 1319 VM_STAT_ADD(page_exists_forreal_cnt); 1320 1321 index = PAGE_HASH_FUNC(vp, off); 1322 phm = PAGE_HASH_MUTEX(index); 1323 1324 mutex_enter(phm); 1325 PAGE_HASH_SEARCH(index, pp, vp, off); 1326 if (pp != NULL) { 1327 *szc = pp->p_szc; 1328 rc = 1; 1329 } 1330 mutex_exit(phm); 1331 return (rc); 1332 } 1333 1334 /* wakeup threads waiting for pages in page_create_get_something() */ 1335 void 1336 wakeup_pcgs(void) 1337 { 1338 if (!CV_HAS_WAITERS(&pcgs_cv)) 1339 return; 1340 cv_broadcast(&pcgs_cv); 1341 } 1342 1343 /* 1344 * 'freemem' is used all over the kernel as an indication of how many 1345 * pages are free (either on the cache list or on the free page list) 1346 * in the system. In very few places is a really accurate 'freemem' 1347 * needed. To avoid contention of the lock protecting a the 1348 * single freemem, it was spread out into NCPU buckets. Set_freemem 1349 * sets freemem to the total of all NCPU buckets. It is called from 1350 * clock() on each TICK. 1351 */ 1352 void 1353 set_freemem() 1354 { 1355 struct pcf *p; 1356 ulong_t t; 1357 uint_t i; 1358 1359 t = 0; 1360 p = pcf; 1361 for (i = 0; i < pcf_fanout; i++) { 1362 t += p->pcf_count; 1363 p++; 1364 } 1365 freemem = t; 1366 1367 /* 1368 * Don't worry about grabbing mutex. It's not that 1369 * critical if we miss a tick or two. This is 1370 * where we wakeup possible delayers in 1371 * page_create_get_something(). 1372 */ 1373 wakeup_pcgs(); 1374 } 1375 1376 ulong_t 1377 get_freemem() 1378 { 1379 struct pcf *p; 1380 ulong_t t; 1381 uint_t i; 1382 1383 t = 0; 1384 p = pcf; 1385 for (i = 0; i < pcf_fanout; i++) { 1386 t += p->pcf_count; 1387 p++; 1388 } 1389 /* 1390 * We just calculated it, might as well set it. 1391 */ 1392 freemem = t; 1393 return (t); 1394 } 1395 1396 /* 1397 * Acquire all of the page cache & free (pcf) locks. 1398 */ 1399 void 1400 pcf_acquire_all() 1401 { 1402 struct pcf *p; 1403 uint_t i; 1404 1405 p = pcf; 1406 for (i = 0; i < pcf_fanout; i++) { 1407 mutex_enter(&p->pcf_lock); 1408 p++; 1409 } 1410 } 1411 1412 /* 1413 * Release all the pcf_locks. 1414 */ 1415 void 1416 pcf_release_all() 1417 { 1418 struct pcf *p; 1419 uint_t i; 1420 1421 p = pcf; 1422 for (i = 0; i < pcf_fanout; i++) { 1423 mutex_exit(&p->pcf_lock); 1424 p++; 1425 } 1426 } 1427 1428 /* 1429 * Inform the VM system that we need some pages freed up. 1430 * Calls must be symmetric, e.g.: 1431 * 1432 * page_needfree(100); 1433 * wait a bit; 1434 * page_needfree(-100); 1435 */ 1436 void 1437 page_needfree(spgcnt_t npages) 1438 { 1439 mutex_enter(&new_freemem_lock); 1440 needfree += npages; 1441 mutex_exit(&new_freemem_lock); 1442 } 1443 1444 /* 1445 * Throttle for page_create(): try to prevent freemem from dropping 1446 * below throttlefree. We can't provide a 100% guarantee because 1447 * KM_NOSLEEP allocations, page_reclaim(), and various other things 1448 * nibble away at the freelist. However, we can block all PG_WAIT 1449 * allocations until memory becomes available. The motivation is 1450 * that several things can fall apart when there's no free memory: 1451 * 1452 * (1) If pageout() needs memory to push a page, the system deadlocks. 1453 * 1454 * (2) By (broken) specification, timeout(9F) can neither fail nor 1455 * block, so it has no choice but to panic the system if it 1456 * cannot allocate a callout structure. 1457 * 1458 * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block; 1459 * it panics if it cannot allocate a callback structure. 1460 * 1461 * (4) Untold numbers of third-party drivers have not yet been hardened 1462 * against KM_NOSLEEP and/or allocb() failures; they simply assume 1463 * success and panic the system with a data fault on failure. 1464 * (The long-term solution to this particular problem is to ship 1465 * hostile fault-injecting DEBUG kernels with the DDK.) 1466 * 1467 * It is theoretically impossible to guarantee success of non-blocking 1468 * allocations, but in practice, this throttle is very hard to break. 1469 */ 1470 static int 1471 page_create_throttle(pgcnt_t npages, int flags) 1472 { 1473 ulong_t fm; 1474 uint_t i; 1475 pgcnt_t tf; /* effective value of throttlefree */ 1476 1477 /* 1478 * Never deny pages when: 1479 * - it's a thread that cannot block [NOMEMWAIT()] 1480 * - the allocation cannot block and must not fail 1481 * - the allocation cannot block and is pageout dispensated 1482 */ 1483 if (NOMEMWAIT() || 1484 ((flags & (PG_WAIT | PG_PANIC)) == PG_PANIC) || 1485 ((flags & (PG_WAIT | PG_PUSHPAGE)) == PG_PUSHPAGE)) 1486 return (1); 1487 1488 /* 1489 * If the allocation can't block, we look favorably upon it 1490 * unless we're below pageout_reserve. In that case we fail 1491 * the allocation because we want to make sure there are a few 1492 * pages available for pageout. 1493 */ 1494 if ((flags & PG_WAIT) == 0) 1495 return (freemem >= npages + pageout_reserve); 1496 1497 /* Calculate the effective throttlefree value */ 1498 tf = throttlefree - 1499 ((flags & PG_PUSHPAGE) ? pageout_reserve : 0); 1500 1501 cv_signal(&proc_pageout->p_cv); 1502 1503 for (;;) { 1504 fm = 0; 1505 pcf_acquire_all(); 1506 mutex_enter(&new_freemem_lock); 1507 for (i = 0; i < pcf_fanout; i++) { 1508 fm += pcf[i].pcf_count; 1509 pcf[i].pcf_wait++; 1510 mutex_exit(&pcf[i].pcf_lock); 1511 } 1512 freemem = fm; 1513 if (freemem >= npages + tf) { 1514 mutex_exit(&new_freemem_lock); 1515 break; 1516 } 1517 needfree += npages; 1518 freemem_wait++; 1519 cv_wait(&freemem_cv, &new_freemem_lock); 1520 freemem_wait--; 1521 needfree -= npages; 1522 mutex_exit(&new_freemem_lock); 1523 } 1524 return (1); 1525 } 1526 1527 /* 1528 * page_create_wait() is called to either coalesce pages from the 1529 * different pcf buckets or to wait because there simply are not 1530 * enough pages to satisfy the caller's request. 1531 * 1532 * Sadly, this is called from platform/vm/vm_machdep.c 1533 */ 1534 int 1535 page_create_wait(pgcnt_t npages, uint_t flags) 1536 { 1537 pgcnt_t total; 1538 uint_t i; 1539 struct pcf *p; 1540 1541 /* 1542 * Wait until there are enough free pages to satisfy our 1543 * entire request. 1544 * We set needfree += npages before prodding pageout, to make sure 1545 * it does real work when npages > lotsfree > freemem. 1546 */ 1547 VM_STAT_ADD(page_create_not_enough); 1548 1549 ASSERT(!kcage_on ? !(flags & PG_NORELOC) : 1); 1550 checkagain: 1551 if ((flags & PG_NORELOC) && 1552 kcage_freemem < kcage_throttlefree + npages) 1553 (void) kcage_create_throttle(npages, flags); 1554 1555 if (freemem < npages + throttlefree) 1556 if (!page_create_throttle(npages, flags)) 1557 return (0); 1558 1559 if (pcf_decrement_bucket(npages) || 1560 pcf_decrement_multiple(&total, npages, 0)) 1561 return (1); 1562 1563 /* 1564 * All of the pcf locks are held, there are not enough pages 1565 * to satisfy the request (npages < total). 1566 * Be sure to acquire the new_freemem_lock before dropping 1567 * the pcf locks. This prevents dropping wakeups in page_free(). 1568 * The order is always pcf_lock then new_freemem_lock. 1569 * 1570 * Since we hold all the pcf locks, it is a good time to set freemem. 1571 * 1572 * If the caller does not want to wait, return now. 1573 * Else turn the pageout daemon loose to find something 1574 * and wait till it does. 1575 * 1576 */ 1577 freemem = total; 1578 1579 if ((flags & PG_WAIT) == 0) { 1580 pcf_release_all(); 1581 1582 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_NOMEM, 1583 "page_create_nomem:npages %ld freemem %ld", npages, freemem); 1584 return (0); 1585 } 1586 1587 ASSERT(proc_pageout != NULL); 1588 cv_signal(&proc_pageout->p_cv); 1589 1590 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_START, 1591 "page_create_sleep_start: freemem %ld needfree %ld", 1592 freemem, needfree); 1593 1594 /* 1595 * We are going to wait. 1596 * We currently hold all of the pcf_locks, 1597 * get the new_freemem_lock (it protects freemem_wait), 1598 * before dropping the pcf_locks. 1599 */ 1600 mutex_enter(&new_freemem_lock); 1601 1602 p = pcf; 1603 for (i = 0; i < pcf_fanout; i++) { 1604 p->pcf_wait++; 1605 mutex_exit(&p->pcf_lock); 1606 p++; 1607 } 1608 1609 needfree += npages; 1610 freemem_wait++; 1611 1612 cv_wait(&freemem_cv, &new_freemem_lock); 1613 1614 freemem_wait--; 1615 needfree -= npages; 1616 1617 mutex_exit(&new_freemem_lock); 1618 1619 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_END, 1620 "page_create_sleep_end: freemem %ld needfree %ld", 1621 freemem, needfree); 1622 1623 VM_STAT_ADD(page_create_not_enough_again); 1624 goto checkagain; 1625 } 1626 /* 1627 * A routine to do the opposite of page_create_wait(). 1628 */ 1629 void 1630 page_create_putback(spgcnt_t npages) 1631 { 1632 struct pcf *p; 1633 pgcnt_t lump; 1634 uint_t *which; 1635 1636 /* 1637 * When a contiguous lump is broken up, we have to 1638 * deal with lots of pages (min 64) so lets spread 1639 * the wealth around. 1640 */ 1641 lump = roundup(npages, pcf_fanout) / pcf_fanout; 1642 freemem += npages; 1643 1644 for (p = pcf; (npages > 0) && (p < &pcf[pcf_fanout]); p++) { 1645 which = &p->pcf_count; 1646 1647 mutex_enter(&p->pcf_lock); 1648 1649 if (p->pcf_block) { 1650 which = &p->pcf_reserve; 1651 } 1652 1653 if (lump < npages) { 1654 *which += (uint_t)lump; 1655 npages -= lump; 1656 } else { 1657 *which += (uint_t)npages; 1658 npages = 0; 1659 } 1660 1661 if (p->pcf_wait) { 1662 mutex_enter(&new_freemem_lock); 1663 /* 1664 * Check to see if some other thread 1665 * is actually waiting. Another bucket 1666 * may have woken it up by now. If there 1667 * are no waiters, then set our pcf_wait 1668 * count to zero to avoid coming in here 1669 * next time. 1670 */ 1671 if (freemem_wait) { 1672 if (npages > 1) { 1673 cv_broadcast(&freemem_cv); 1674 } else { 1675 cv_signal(&freemem_cv); 1676 } 1677 p->pcf_wait--; 1678 } else { 1679 p->pcf_wait = 0; 1680 } 1681 mutex_exit(&new_freemem_lock); 1682 } 1683 mutex_exit(&p->pcf_lock); 1684 } 1685 ASSERT(npages == 0); 1686 } 1687 1688 /* 1689 * A helper routine for page_create_get_something. 1690 * The indenting got to deep down there. 1691 * Unblock the pcf counters. Any pages freed after 1692 * pcf_block got set are moved to pcf_count and 1693 * wakeups (cv_broadcast() or cv_signal()) are done as needed. 1694 */ 1695 static void 1696 pcgs_unblock(void) 1697 { 1698 int i; 1699 struct pcf *p; 1700 1701 /* Update freemem while we're here. */ 1702 freemem = 0; 1703 p = pcf; 1704 for (i = 0; i < pcf_fanout; i++) { 1705 mutex_enter(&p->pcf_lock); 1706 ASSERT(p->pcf_count == 0); 1707 p->pcf_count = p->pcf_reserve; 1708 p->pcf_block = 0; 1709 freemem += p->pcf_count; 1710 if (p->pcf_wait) { 1711 mutex_enter(&new_freemem_lock); 1712 if (freemem_wait) { 1713 if (p->pcf_reserve > 1) { 1714 cv_broadcast(&freemem_cv); 1715 p->pcf_wait = 0; 1716 } else { 1717 cv_signal(&freemem_cv); 1718 p->pcf_wait--; 1719 } 1720 } else { 1721 p->pcf_wait = 0; 1722 } 1723 mutex_exit(&new_freemem_lock); 1724 } 1725 p->pcf_reserve = 0; 1726 mutex_exit(&p->pcf_lock); 1727 p++; 1728 } 1729 } 1730 1731 /* 1732 * Called from page_create_va() when both the cache and free lists 1733 * have been checked once. 1734 * 1735 * Either returns a page or panics since the accounting was done 1736 * way before we got here. 1737 * 1738 * We don't come here often, so leave the accounting on permanently. 1739 */ 1740 1741 #define MAX_PCGS 100 1742 1743 #ifdef DEBUG 1744 #define PCGS_TRIES 100 1745 #else /* DEBUG */ 1746 #define PCGS_TRIES 10 1747 #endif /* DEBUG */ 1748 1749 #ifdef VM_STATS 1750 uint_t pcgs_counts[PCGS_TRIES]; 1751 uint_t pcgs_too_many; 1752 uint_t pcgs_entered; 1753 uint_t pcgs_entered_noreloc; 1754 uint_t pcgs_locked; 1755 uint_t pcgs_cagelocked; 1756 #endif /* VM_STATS */ 1757 1758 static page_t * 1759 page_create_get_something(vnode_t *vp, u_offset_t off, struct seg *seg, 1760 caddr_t vaddr, uint_t flags) 1761 { 1762 uint_t count; 1763 page_t *pp; 1764 uint_t locked, i; 1765 struct pcf *p; 1766 lgrp_t *lgrp; 1767 int cagelocked = 0; 1768 1769 VM_STAT_ADD(pcgs_entered); 1770 1771 /* 1772 * Tap any reserve freelists: if we fail now, we'll die 1773 * since the page(s) we're looking for have already been 1774 * accounted for. 1775 */ 1776 flags |= PG_PANIC; 1777 1778 if ((flags & PG_NORELOC) != 0) { 1779 VM_STAT_ADD(pcgs_entered_noreloc); 1780 /* 1781 * Requests for free pages from critical threads 1782 * such as pageout still won't throttle here, but 1783 * we must try again, to give the cageout thread 1784 * another chance to catch up. Since we already 1785 * accounted for the pages, we had better get them 1786 * this time. 1787 * 1788 * N.B. All non-critical threads acquire the pcgs_cagelock 1789 * to serialize access to the freelists. This implements a 1790 * turnstile-type synchornization to avoid starvation of 1791 * critical requests for PG_NORELOC memory by non-critical 1792 * threads: all non-critical threads must acquire a 'ticket' 1793 * before passing through, which entails making sure 1794 * kcage_freemem won't fall below minfree prior to grabbing 1795 * pages from the freelists. 1796 */ 1797 if (kcage_create_throttle(1, flags) == KCT_NONCRIT) { 1798 mutex_enter(&pcgs_cagelock); 1799 cagelocked = 1; 1800 VM_STAT_ADD(pcgs_cagelocked); 1801 } 1802 } 1803 1804 /* 1805 * Time to get serious. 1806 * We failed to get a `correctly colored' page from both the 1807 * free and cache lists. 1808 * We escalate in stage. 1809 * 1810 * First try both lists without worring about color. 1811 * 1812 * Then, grab all page accounting locks (ie. pcf[]) and 1813 * steal any pages that they have and set the pcf_block flag to 1814 * stop deletions from the lists. This will help because 1815 * a page can get added to the free list while we are looking 1816 * at the cache list, then another page could be added to the cache 1817 * list allowing the page on the free list to be removed as we 1818 * move from looking at the cache list to the free list. This 1819 * could happen over and over. We would never find the page 1820 * we have accounted for. 1821 * 1822 * Noreloc pages are a subset of the global (relocatable) page pool. 1823 * They are not tracked separately in the pcf bins, so it is 1824 * impossible to know when doing pcf accounting if the available 1825 * page(s) are noreloc pages or not. When looking for a noreloc page 1826 * it is quite easy to end up here even if the global (relocatable) 1827 * page pool has plenty of free pages but the noreloc pool is empty. 1828 * 1829 * When the noreloc pool is empty (or low), additional noreloc pages 1830 * are created by converting pages from the global page pool. This 1831 * process will stall during pcf accounting if the pcf bins are 1832 * already locked. Such is the case when a noreloc allocation is 1833 * looping here in page_create_get_something waiting for more noreloc 1834 * pages to appear. 1835 * 1836 * Short of adding a new field to the pcf bins to accurately track 1837 * the number of free noreloc pages, we instead do not grab the 1838 * pcgs_lock, do not set the pcf blocks and do not timeout when 1839 * allocating a noreloc page. This allows noreloc allocations to 1840 * loop without blocking global page pool allocations. 1841 * 1842 * NOTE: the behaviour of page_create_get_something has not changed 1843 * for the case of global page pool allocations. 1844 */ 1845 1846 flags &= ~PG_MATCH_COLOR; 1847 locked = 0; 1848 #if defined(__i386) || defined(__amd64) 1849 flags = page_create_update_flags_x86(flags); 1850 #endif 1851 1852 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 1853 1854 for (count = 0; kcage_on || count < MAX_PCGS; count++) { 1855 pp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 1856 flags, lgrp); 1857 if (pp == NULL) { 1858 pp = page_get_cachelist(vp, off, seg, vaddr, 1859 flags, lgrp); 1860 } 1861 if (pp == NULL) { 1862 /* 1863 * Serialize. Don't fight with other pcgs(). 1864 */ 1865 if (!locked && (!kcage_on || !(flags & PG_NORELOC))) { 1866 mutex_enter(&pcgs_lock); 1867 VM_STAT_ADD(pcgs_locked); 1868 locked = 1; 1869 p = pcf; 1870 for (i = 0; i < pcf_fanout; i++) { 1871 mutex_enter(&p->pcf_lock); 1872 ASSERT(p->pcf_block == 0); 1873 p->pcf_block = 1; 1874 p->pcf_reserve = p->pcf_count; 1875 p->pcf_count = 0; 1876 mutex_exit(&p->pcf_lock); 1877 p++; 1878 } 1879 freemem = 0; 1880 } 1881 1882 if (count) { 1883 /* 1884 * Since page_free() puts pages on 1885 * a list then accounts for it, we 1886 * just have to wait for page_free() 1887 * to unlock any page it was working 1888 * with. The page_lock()-page_reclaim() 1889 * path falls in the same boat. 1890 * 1891 * We don't need to check on the 1892 * PG_WAIT flag, we have already 1893 * accounted for the page we are 1894 * looking for in page_create_va(). 1895 * 1896 * We just wait a moment to let any 1897 * locked pages on the lists free up, 1898 * then continue around and try again. 1899 * 1900 * Will be awakened by set_freemem(). 1901 */ 1902 mutex_enter(&pcgs_wait_lock); 1903 cv_wait(&pcgs_cv, &pcgs_wait_lock); 1904 mutex_exit(&pcgs_wait_lock); 1905 } 1906 } else { 1907 #ifdef VM_STATS 1908 if (count >= PCGS_TRIES) { 1909 VM_STAT_ADD(pcgs_too_many); 1910 } else { 1911 VM_STAT_ADD(pcgs_counts[count]); 1912 } 1913 #endif 1914 if (locked) { 1915 pcgs_unblock(); 1916 mutex_exit(&pcgs_lock); 1917 } 1918 if (cagelocked) 1919 mutex_exit(&pcgs_cagelock); 1920 return (pp); 1921 } 1922 } 1923 /* 1924 * we go down holding the pcf locks. 1925 */ 1926 panic("no %spage found %d", 1927 ((flags & PG_NORELOC) ? "non-reloc " : ""), count); 1928 /*NOTREACHED*/ 1929 } 1930 1931 /* 1932 * Create enough pages for "bytes" worth of data starting at 1933 * "off" in "vp". 1934 * 1935 * Where flag must be one of: 1936 * 1937 * PG_EXCL: Exclusive create (fail if any page already 1938 * exists in the page cache) which does not 1939 * wait for memory to become available. 1940 * 1941 * PG_WAIT: Non-exclusive create which can wait for 1942 * memory to become available. 1943 * 1944 * PG_PHYSCONTIG: Allocate physically contiguous pages. 1945 * (Not Supported) 1946 * 1947 * A doubly linked list of pages is returned to the caller. Each page 1948 * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock) 1949 * lock. 1950 * 1951 * Unable to change the parameters to page_create() in a minor release, 1952 * we renamed page_create() to page_create_va(), changed all known calls 1953 * from page_create() to page_create_va(), and created this wrapper. 1954 * 1955 * Upon a major release, we should break compatibility by deleting this 1956 * wrapper, and replacing all the strings "page_create_va", with "page_create". 1957 * 1958 * NOTE: There is a copy of this interface as page_create_io() in 1959 * i86/vm/vm_machdep.c. Any bugs fixed here should be applied 1960 * there. 1961 */ 1962 page_t * 1963 page_create(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags) 1964 { 1965 caddr_t random_vaddr; 1966 struct seg kseg; 1967 1968 #ifdef DEBUG 1969 cmn_err(CE_WARN, "Using deprecated interface page_create: caller %p", 1970 (void *)caller()); 1971 #endif 1972 1973 random_vaddr = (caddr_t)(((uintptr_t)vp >> 7) ^ 1974 (uintptr_t)(off >> PAGESHIFT)); 1975 kseg.s_as = &kas; 1976 1977 return (page_create_va(vp, off, bytes, flags, &kseg, random_vaddr)); 1978 } 1979 1980 #ifdef DEBUG 1981 uint32_t pg_alloc_pgs_mtbf = 0; 1982 #endif 1983 1984 /* 1985 * Used for large page support. It will attempt to allocate 1986 * a large page(s) off the freelist. 1987 * 1988 * Returns non zero on failure. 1989 */ 1990 int 1991 page_alloc_pages(struct vnode *vp, struct seg *seg, caddr_t addr, 1992 page_t **basepp, page_t *ppa[], uint_t szc, int anypgsz, int pgflags) 1993 { 1994 pgcnt_t npgs, curnpgs, totpgs; 1995 size_t pgsz; 1996 page_t *pplist = NULL, *pp; 1997 int err = 0; 1998 lgrp_t *lgrp; 1999 2000 ASSERT(szc != 0 && szc <= (page_num_pagesizes() - 1)); 2001 ASSERT(pgflags == 0 || pgflags == PG_LOCAL); 2002 2003 /* 2004 * Check if system heavily prefers local large pages over remote 2005 * on systems with multiple lgroups. 2006 */ 2007 if (lpg_alloc_prefer == LPAP_LOCAL && nlgrps > 1) { 2008 pgflags = PG_LOCAL; 2009 } 2010 2011 VM_STAT_ADD(alloc_pages[0]); 2012 2013 #ifdef DEBUG 2014 if (pg_alloc_pgs_mtbf && !(gethrtime() % pg_alloc_pgs_mtbf)) { 2015 return (ENOMEM); 2016 } 2017 #endif 2018 2019 /* 2020 * One must be NULL but not both. 2021 * And one must be non NULL but not both. 2022 */ 2023 ASSERT(basepp != NULL || ppa != NULL); 2024 ASSERT(basepp == NULL || ppa == NULL); 2025 2026 #if defined(__i386) || defined(__amd64) 2027 while (page_chk_freelist(szc) == 0) { 2028 VM_STAT_ADD(alloc_pages[8]); 2029 if (anypgsz == 0 || --szc == 0) 2030 return (ENOMEM); 2031 } 2032 #endif 2033 2034 pgsz = page_get_pagesize(szc); 2035 totpgs = curnpgs = npgs = pgsz >> PAGESHIFT; 2036 2037 ASSERT(((uintptr_t)addr & (pgsz - 1)) == 0); 2038 2039 (void) page_create_wait(npgs, PG_WAIT); 2040 2041 while (npgs && szc) { 2042 lgrp = lgrp_mem_choose(seg, addr, pgsz); 2043 if (pgflags == PG_LOCAL) { 2044 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2045 pgflags, lgrp); 2046 if (pp == NULL) { 2047 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2048 0, lgrp); 2049 } 2050 } else { 2051 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2052 0, lgrp); 2053 } 2054 if (pp != NULL) { 2055 VM_STAT_ADD(alloc_pages[1]); 2056 page_list_concat(&pplist, &pp); 2057 ASSERT(npgs >= curnpgs); 2058 npgs -= curnpgs; 2059 } else if (anypgsz) { 2060 VM_STAT_ADD(alloc_pages[2]); 2061 szc--; 2062 pgsz = page_get_pagesize(szc); 2063 curnpgs = pgsz >> PAGESHIFT; 2064 } else { 2065 VM_STAT_ADD(alloc_pages[3]); 2066 ASSERT(npgs == totpgs); 2067 page_create_putback(npgs); 2068 return (ENOMEM); 2069 } 2070 } 2071 if (szc == 0) { 2072 VM_STAT_ADD(alloc_pages[4]); 2073 ASSERT(npgs != 0); 2074 page_create_putback(npgs); 2075 err = ENOMEM; 2076 } else if (basepp != NULL) { 2077 ASSERT(npgs == 0); 2078 ASSERT(ppa == NULL); 2079 *basepp = pplist; 2080 } 2081 2082 npgs = totpgs - npgs; 2083 pp = pplist; 2084 2085 /* 2086 * Clear the free and age bits. Also if we were passed in a ppa then 2087 * fill it in with all the constituent pages from the large page. But 2088 * if we failed to allocate all the pages just free what we got. 2089 */ 2090 while (npgs != 0) { 2091 ASSERT(PP_ISFREE(pp)); 2092 ASSERT(PP_ISAGED(pp)); 2093 if (ppa != NULL || err != 0) { 2094 if (err == 0) { 2095 VM_STAT_ADD(alloc_pages[5]); 2096 PP_CLRFREE(pp); 2097 PP_CLRAGED(pp); 2098 page_sub(&pplist, pp); 2099 *ppa++ = pp; 2100 npgs--; 2101 } else { 2102 VM_STAT_ADD(alloc_pages[6]); 2103 ASSERT(pp->p_szc != 0); 2104 curnpgs = page_get_pagecnt(pp->p_szc); 2105 page_list_break(&pp, &pplist, curnpgs); 2106 page_list_add_pages(pp, 0); 2107 page_create_putback(curnpgs); 2108 ASSERT(npgs >= curnpgs); 2109 npgs -= curnpgs; 2110 } 2111 pp = pplist; 2112 } else { 2113 VM_STAT_ADD(alloc_pages[7]); 2114 PP_CLRFREE(pp); 2115 PP_CLRAGED(pp); 2116 pp = pp->p_next; 2117 npgs--; 2118 } 2119 } 2120 return (err); 2121 } 2122 2123 /* 2124 * Get a single large page off of the freelists, and set it up for use. 2125 * Number of bytes requested must be a supported page size. 2126 * 2127 * Note that this call may fail even if there is sufficient 2128 * memory available or PG_WAIT is set, so the caller must 2129 * be willing to fallback on page_create_va(), block and retry, 2130 * or fail the requester. 2131 */ 2132 page_t * 2133 page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2134 struct seg *seg, caddr_t vaddr, void *arg) 2135 { 2136 pgcnt_t npages; 2137 page_t *pp; 2138 page_t *rootpp; 2139 lgrp_t *lgrp; 2140 lgrp_id_t *lgrpid = (lgrp_id_t *)arg; 2141 2142 ASSERT(vp != NULL); 2143 2144 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2145 PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == 0); 2146 /* but no others */ 2147 2148 ASSERT((flags & PG_EXCL) == PG_EXCL); 2149 2150 npages = btop(bytes); 2151 2152 if (!kcage_on || panicstr) { 2153 /* 2154 * Cage is OFF, or we are single threaded in 2155 * panic, so make everything a RELOC request. 2156 */ 2157 flags &= ~PG_NORELOC; 2158 } 2159 2160 /* 2161 * Make sure there's adequate physical memory available. 2162 * Note: PG_WAIT is ignored here. 2163 */ 2164 if (freemem <= throttlefree + npages) { 2165 VM_STAT_ADD(page_create_large_cnt[1]); 2166 return (NULL); 2167 } 2168 2169 /* 2170 * If cage is on, dampen draw from cage when available 2171 * cage space is low. 2172 */ 2173 if ((flags & (PG_NORELOC | PG_WAIT)) == (PG_NORELOC | PG_WAIT) && 2174 kcage_freemem < kcage_throttlefree + npages) { 2175 2176 /* 2177 * The cage is on, the caller wants PG_NORELOC 2178 * pages and available cage memory is very low. 2179 * Call kcage_create_throttle() to attempt to 2180 * control demand on the cage. 2181 */ 2182 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) { 2183 VM_STAT_ADD(page_create_large_cnt[2]); 2184 return (NULL); 2185 } 2186 } 2187 2188 if (!pcf_decrement_bucket(npages) && 2189 !pcf_decrement_multiple(NULL, npages, 1)) { 2190 VM_STAT_ADD(page_create_large_cnt[4]); 2191 return (NULL); 2192 } 2193 2194 /* 2195 * This is where this function behaves fundamentally differently 2196 * than page_create_va(); since we're intending to map the page 2197 * with a single TTE, we have to get it as a physically contiguous 2198 * hardware pagesize chunk. If we can't, we fail. 2199 */ 2200 if (lgrpid != NULL && *lgrpid >= 0 && *lgrpid <= lgrp_alloc_max && 2201 LGRP_EXISTS(lgrp_table[*lgrpid])) 2202 lgrp = lgrp_table[*lgrpid]; 2203 else 2204 lgrp = lgrp_mem_choose(seg, vaddr, bytes); 2205 2206 if ((rootpp = page_get_freelist(&kvp, off, seg, vaddr, 2207 bytes, flags & ~PG_MATCH_COLOR, lgrp)) == NULL) { 2208 page_create_putback(npages); 2209 VM_STAT_ADD(page_create_large_cnt[5]); 2210 return (NULL); 2211 } 2212 2213 /* 2214 * if we got the page with the wrong mtype give it back this is a 2215 * workaround for CR 6249718. When CR 6249718 is fixed we never get 2216 * inside "if" and the workaround becomes just a nop 2217 */ 2218 if (kcage_on && (flags & PG_NORELOC) && !PP_ISNORELOC(rootpp)) { 2219 page_list_add_pages(rootpp, 0); 2220 page_create_putback(npages); 2221 VM_STAT_ADD(page_create_large_cnt[6]); 2222 return (NULL); 2223 } 2224 2225 /* 2226 * If satisfying this request has left us with too little 2227 * memory, start the wheels turning to get some back. The 2228 * first clause of the test prevents waking up the pageout 2229 * daemon in situations where it would decide that there's 2230 * nothing to do. 2231 */ 2232 if (nscan < desscan && freemem < minfree) { 2233 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2234 "pageout_cv_signal:freemem %ld", freemem); 2235 cv_signal(&proc_pageout->p_cv); 2236 } 2237 2238 pp = rootpp; 2239 while (npages--) { 2240 ASSERT(PAGE_EXCL(pp)); 2241 ASSERT(pp->p_vnode == NULL); 2242 ASSERT(!hat_page_is_mapped(pp)); 2243 PP_CLRFREE(pp); 2244 PP_CLRAGED(pp); 2245 if (!page_hashin(pp, vp, off, NULL)) 2246 panic("page_create_large: hashin failed: page %p", 2247 (void *)pp); 2248 page_io_lock(pp); 2249 off += PAGESIZE; 2250 pp = pp->p_next; 2251 } 2252 2253 VM_STAT_ADD(page_create_large_cnt[0]); 2254 return (rootpp); 2255 } 2256 2257 page_t * 2258 page_create_va(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2259 struct seg *seg, caddr_t vaddr) 2260 { 2261 page_t *plist = NULL; 2262 pgcnt_t npages; 2263 pgcnt_t found_on_free = 0; 2264 pgcnt_t pages_req; 2265 page_t *npp = NULL; 2266 struct pcf *p; 2267 lgrp_t *lgrp; 2268 2269 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START, 2270 "page_create_start:vp %p off %llx bytes %lu flags %x", 2271 vp, off, bytes, flags); 2272 2273 ASSERT(bytes != 0 && vp != NULL); 2274 2275 if ((flags & PG_EXCL) == 0 && (flags & PG_WAIT) == 0) { 2276 panic("page_create: invalid flags"); 2277 /*NOTREACHED*/ 2278 } 2279 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2280 PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == 0); 2281 /* but no others */ 2282 2283 pages_req = npages = btopr(bytes); 2284 /* 2285 * Try to see whether request is too large to *ever* be 2286 * satisfied, in order to prevent deadlock. We arbitrarily 2287 * decide to limit maximum size requests to max_page_get. 2288 */ 2289 if (npages >= max_page_get) { 2290 if ((flags & PG_WAIT) == 0) { 2291 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_TOOBIG, 2292 "page_create_toobig:vp %p off %llx npages " 2293 "%lu max_page_get %lu", 2294 vp, off, npages, max_page_get); 2295 return (NULL); 2296 } else { 2297 cmn_err(CE_WARN, 2298 "Request for too much kernel memory " 2299 "(%lu bytes), will hang forever", bytes); 2300 for (;;) 2301 delay(1000000000); 2302 } 2303 } 2304 2305 if (!kcage_on || panicstr) { 2306 /* 2307 * Cage is OFF, or we are single threaded in 2308 * panic, so make everything a RELOC request. 2309 */ 2310 flags &= ~PG_NORELOC; 2311 } 2312 2313 if (freemem <= throttlefree + npages) 2314 if (!page_create_throttle(npages, flags)) 2315 return (NULL); 2316 2317 /* 2318 * If cage is on, dampen draw from cage when available 2319 * cage space is low. 2320 */ 2321 if ((flags & PG_NORELOC) && 2322 kcage_freemem < kcage_throttlefree + npages) { 2323 2324 /* 2325 * The cage is on, the caller wants PG_NORELOC 2326 * pages and available cage memory is very low. 2327 * Call kcage_create_throttle() to attempt to 2328 * control demand on the cage. 2329 */ 2330 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) 2331 return (NULL); 2332 } 2333 2334 VM_STAT_ADD(page_create_cnt[0]); 2335 2336 if (!pcf_decrement_bucket(npages)) { 2337 /* 2338 * Have to look harder. If npages is greater than 2339 * one, then we might have to coalesce the counters. 2340 * 2341 * Go wait. We come back having accounted 2342 * for the memory. 2343 */ 2344 VM_STAT_ADD(page_create_cnt[1]); 2345 if (!page_create_wait(npages, flags)) { 2346 VM_STAT_ADD(page_create_cnt[2]); 2347 return (NULL); 2348 } 2349 } 2350 2351 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS, 2352 "page_create_success:vp %p off %llx", vp, off); 2353 2354 /* 2355 * If satisfying this request has left us with too little 2356 * memory, start the wheels turning to get some back. The 2357 * first clause of the test prevents waking up the pageout 2358 * daemon in situations where it would decide that there's 2359 * nothing to do. 2360 */ 2361 if (nscan < desscan && freemem < minfree) { 2362 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2363 "pageout_cv_signal:freemem %ld", freemem); 2364 cv_signal(&proc_pageout->p_cv); 2365 } 2366 2367 /* 2368 * Loop around collecting the requested number of pages. 2369 * Most of the time, we have to `create' a new page. With 2370 * this in mind, pull the page off the free list before 2371 * getting the hash lock. This will minimize the hash 2372 * lock hold time, nesting, and the like. If it turns 2373 * out we don't need the page, we put it back at the end. 2374 */ 2375 while (npages--) { 2376 page_t *pp; 2377 kmutex_t *phm = NULL; 2378 ulong_t index; 2379 2380 index = PAGE_HASH_FUNC(vp, off); 2381 top: 2382 ASSERT(phm == NULL); 2383 ASSERT(index == PAGE_HASH_FUNC(vp, off)); 2384 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 2385 2386 if (npp == NULL) { 2387 /* 2388 * Try to get a page from the freelist (ie, 2389 * a page with no [vp, off] tag). If that 2390 * fails, use the cachelist. 2391 * 2392 * During the first attempt at both the free 2393 * and cache lists we try for the correct color. 2394 */ 2395 /* 2396 * XXXX-how do we deal with virtual indexed 2397 * caches and and colors? 2398 */ 2399 VM_STAT_ADD(page_create_cnt[4]); 2400 /* 2401 * Get lgroup to allocate next page of shared memory 2402 * from and use it to specify where to allocate 2403 * the physical memory 2404 */ 2405 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 2406 npp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 2407 flags | PG_MATCH_COLOR, lgrp); 2408 if (npp == NULL) { 2409 npp = page_get_cachelist(vp, off, seg, 2410 vaddr, flags | PG_MATCH_COLOR, lgrp); 2411 if (npp == NULL) { 2412 npp = page_create_get_something(vp, 2413 off, seg, vaddr, 2414 flags & ~PG_MATCH_COLOR); 2415 } 2416 2417 if (PP_ISAGED(npp) == 0) { 2418 /* 2419 * Since this page came from the 2420 * cachelist, we must destroy the 2421 * old vnode association. 2422 */ 2423 page_hashout(npp, NULL); 2424 } 2425 } 2426 } 2427 2428 /* 2429 * We own this page! 2430 */ 2431 ASSERT(PAGE_EXCL(npp)); 2432 ASSERT(npp->p_vnode == NULL); 2433 ASSERT(!hat_page_is_mapped(npp)); 2434 PP_CLRFREE(npp); 2435 PP_CLRAGED(npp); 2436 2437 /* 2438 * Here we have a page in our hot little mits and are 2439 * just waiting to stuff it on the appropriate lists. 2440 * Get the mutex and check to see if it really does 2441 * not exist. 2442 */ 2443 phm = PAGE_HASH_MUTEX(index); 2444 mutex_enter(phm); 2445 PAGE_HASH_SEARCH(index, pp, vp, off); 2446 if (pp == NULL) { 2447 VM_STAT_ADD(page_create_new); 2448 pp = npp; 2449 npp = NULL; 2450 if (!page_hashin(pp, vp, off, phm)) { 2451 /* 2452 * Since we hold the page hash mutex and 2453 * just searched for this page, page_hashin 2454 * had better not fail. If it does, that 2455 * means somethread did not follow the 2456 * page hash mutex rules. Panic now and 2457 * get it over with. As usual, go down 2458 * holding all the locks. 2459 */ 2460 ASSERT(MUTEX_HELD(phm)); 2461 panic("page_create: " 2462 "hashin failed %p %p %llx %p", 2463 (void *)pp, (void *)vp, off, (void *)phm); 2464 /*NOTREACHED*/ 2465 } 2466 ASSERT(MUTEX_HELD(phm)); 2467 mutex_exit(phm); 2468 phm = NULL; 2469 2470 /* 2471 * Hat layer locking need not be done to set 2472 * the following bits since the page is not hashed 2473 * and was on the free list (i.e., had no mappings). 2474 * 2475 * Set the reference bit to protect 2476 * against immediate pageout 2477 * 2478 * XXXmh modify freelist code to set reference 2479 * bit so we don't have to do it here. 2480 */ 2481 page_set_props(pp, P_REF); 2482 found_on_free++; 2483 } else { 2484 VM_STAT_ADD(page_create_exists); 2485 if (flags & PG_EXCL) { 2486 /* 2487 * Found an existing page, and the caller 2488 * wanted all new pages. Undo all of the work 2489 * we have done. 2490 */ 2491 mutex_exit(phm); 2492 phm = NULL; 2493 while (plist != NULL) { 2494 pp = plist; 2495 page_sub(&plist, pp); 2496 page_io_unlock(pp); 2497 /* large pages should not end up here */ 2498 ASSERT(pp->p_szc == 0); 2499 /*LINTED: constant in conditional ctx*/ 2500 VN_DISPOSE(pp, B_INVAL, 0, kcred); 2501 } 2502 VM_STAT_ADD(page_create_found_one); 2503 goto fail; 2504 } 2505 ASSERT(flags & PG_WAIT); 2506 if (!page_lock(pp, SE_EXCL, phm, P_NO_RECLAIM)) { 2507 /* 2508 * Start all over again if we blocked trying 2509 * to lock the page. 2510 */ 2511 mutex_exit(phm); 2512 VM_STAT_ADD(page_create_page_lock_failed); 2513 phm = NULL; 2514 goto top; 2515 } 2516 mutex_exit(phm); 2517 phm = NULL; 2518 2519 if (PP_ISFREE(pp)) { 2520 ASSERT(PP_ISAGED(pp) == 0); 2521 VM_STAT_ADD(pagecnt.pc_get_cache); 2522 page_list_sub(pp, PG_CACHE_LIST); 2523 PP_CLRFREE(pp); 2524 found_on_free++; 2525 } 2526 } 2527 2528 /* 2529 * Got a page! It is locked. Acquire the i/o 2530 * lock since we are going to use the p_next and 2531 * p_prev fields to link the requested pages together. 2532 */ 2533 page_io_lock(pp); 2534 page_add(&plist, pp); 2535 plist = plist->p_next; 2536 off += PAGESIZE; 2537 vaddr += PAGESIZE; 2538 } 2539 2540 ASSERT((flags & PG_EXCL) ? (found_on_free == pages_req) : 1); 2541 fail: 2542 if (npp != NULL) { 2543 /* 2544 * Did not need this page after all. 2545 * Put it back on the free list. 2546 */ 2547 VM_STAT_ADD(page_create_putbacks); 2548 PP_SETFREE(npp); 2549 PP_SETAGED(npp); 2550 npp->p_offset = (u_offset_t)-1; 2551 page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL); 2552 page_unlock(npp); 2553 2554 } 2555 2556 ASSERT(pages_req >= found_on_free); 2557 2558 { 2559 uint_t overshoot = (uint_t)(pages_req - found_on_free); 2560 2561 if (overshoot) { 2562 VM_STAT_ADD(page_create_overshoot); 2563 p = &pcf[PCF_INDEX()]; 2564 mutex_enter(&p->pcf_lock); 2565 if (p->pcf_block) { 2566 p->pcf_reserve += overshoot; 2567 } else { 2568 p->pcf_count += overshoot; 2569 if (p->pcf_wait) { 2570 mutex_enter(&new_freemem_lock); 2571 if (freemem_wait) { 2572 cv_signal(&freemem_cv); 2573 p->pcf_wait--; 2574 } else { 2575 p->pcf_wait = 0; 2576 } 2577 mutex_exit(&new_freemem_lock); 2578 } 2579 } 2580 mutex_exit(&p->pcf_lock); 2581 /* freemem is approximate, so this test OK */ 2582 if (!p->pcf_block) 2583 freemem += overshoot; 2584 } 2585 } 2586 2587 return (plist); 2588 } 2589 2590 /* 2591 * One or more constituent pages of this large page has been marked 2592 * toxic. Simply demote the large page to PAGESIZE pages and let 2593 * page_free() handle it. This routine should only be called by 2594 * large page free routines (page_free_pages() and page_destroy_pages(). 2595 * All pages are locked SE_EXCL and have already been marked free. 2596 */ 2597 static void 2598 page_free_toxic_pages(page_t *rootpp) 2599 { 2600 page_t *tpp; 2601 pgcnt_t i, pgcnt = page_get_pagecnt(rootpp->p_szc); 2602 uint_t szc = rootpp->p_szc; 2603 2604 for (i = 0, tpp = rootpp; i < pgcnt; i++, tpp = tpp->p_next) { 2605 ASSERT(tpp->p_szc == szc); 2606 ASSERT((PAGE_EXCL(tpp) && 2607 !page_iolock_assert(tpp)) || panicstr); 2608 tpp->p_szc = 0; 2609 } 2610 2611 while (rootpp != NULL) { 2612 tpp = rootpp; 2613 page_sub(&rootpp, tpp); 2614 ASSERT(PP_ISFREE(tpp)); 2615 PP_CLRFREE(tpp); 2616 page_free(tpp, 1); 2617 } 2618 } 2619 2620 /* 2621 * Put page on the "free" list. 2622 * The free list is really two lists maintained by 2623 * the PSM of whatever machine we happen to be on. 2624 */ 2625 void 2626 page_free(page_t *pp, int dontneed) 2627 { 2628 struct pcf *p; 2629 uint_t pcf_index; 2630 2631 ASSERT((PAGE_EXCL(pp) && 2632 !page_iolock_assert(pp)) || panicstr); 2633 2634 if (PP_ISFREE(pp)) { 2635 panic("page_free: page %p is free", (void *)pp); 2636 } 2637 2638 if (pp->p_szc != 0) { 2639 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 2640 PP_ISKAS(pp)) { 2641 panic("page_free: anon or kernel " 2642 "or no vnode large page %p", (void *)pp); 2643 } 2644 page_demote_vp_pages(pp); 2645 ASSERT(pp->p_szc == 0); 2646 } 2647 2648 /* 2649 * The page_struct_lock need not be acquired to examine these 2650 * fields since the page has an "exclusive" lock. 2651 */ 2652 if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 2653 pp->p_slckcnt != 0) { 2654 panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d " 2655 "slckcnt = %d", (void *)pp, page_pptonum(pp), pp->p_lckcnt, 2656 pp->p_cowcnt, pp->p_slckcnt); 2657 /*NOTREACHED*/ 2658 } 2659 2660 ASSERT(!hat_page_getshare(pp)); 2661 2662 PP_SETFREE(pp); 2663 ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) || 2664 !hat_ismod(pp)); 2665 page_clr_all_props(pp); 2666 ASSERT(!hat_page_getshare(pp)); 2667 2668 /* 2669 * Now we add the page to the head of the free list. 2670 * But if this page is associated with a paged vnode 2671 * then we adjust the head forward so that the page is 2672 * effectively at the end of the list. 2673 */ 2674 if (pp->p_vnode == NULL) { 2675 /* 2676 * Page has no identity, put it on the free list. 2677 */ 2678 PP_SETAGED(pp); 2679 pp->p_offset = (u_offset_t)-1; 2680 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 2681 VM_STAT_ADD(pagecnt.pc_free_free); 2682 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2683 "page_free_free:pp %p", pp); 2684 } else { 2685 PP_CLRAGED(pp); 2686 2687 if (!dontneed || nopageage) { 2688 /* move it to the tail of the list */ 2689 page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL); 2690 2691 VM_STAT_ADD(pagecnt.pc_free_cache); 2692 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL, 2693 "page_free_cache_tail:pp %p", pp); 2694 } else { 2695 page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD); 2696 2697 VM_STAT_ADD(pagecnt.pc_free_dontneed); 2698 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD, 2699 "page_free_cache_head:pp %p", pp); 2700 } 2701 } 2702 page_unlock(pp); 2703 2704 /* 2705 * Now do the `freemem' accounting. 2706 */ 2707 pcf_index = PCF_INDEX(); 2708 p = &pcf[pcf_index]; 2709 2710 mutex_enter(&p->pcf_lock); 2711 if (p->pcf_block) { 2712 p->pcf_reserve += 1; 2713 } else { 2714 p->pcf_count += 1; 2715 if (p->pcf_wait) { 2716 mutex_enter(&new_freemem_lock); 2717 /* 2718 * Check to see if some other thread 2719 * is actually waiting. Another bucket 2720 * may have woken it up by now. If there 2721 * are no waiters, then set our pcf_wait 2722 * count to zero to avoid coming in here 2723 * next time. Also, since only one page 2724 * was put on the free list, just wake 2725 * up one waiter. 2726 */ 2727 if (freemem_wait) { 2728 cv_signal(&freemem_cv); 2729 p->pcf_wait--; 2730 } else { 2731 p->pcf_wait = 0; 2732 } 2733 mutex_exit(&new_freemem_lock); 2734 } 2735 } 2736 mutex_exit(&p->pcf_lock); 2737 2738 /* freemem is approximate, so this test OK */ 2739 if (!p->pcf_block) 2740 freemem += 1; 2741 } 2742 2743 /* 2744 * Put page on the "free" list during intial startup. 2745 * This happens during initial single threaded execution. 2746 */ 2747 void 2748 page_free_at_startup(page_t *pp) 2749 { 2750 struct pcf *p; 2751 uint_t pcf_index; 2752 2753 page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT); 2754 VM_STAT_ADD(pagecnt.pc_free_free); 2755 2756 /* 2757 * Now do the `freemem' accounting. 2758 */ 2759 pcf_index = PCF_INDEX(); 2760 p = &pcf[pcf_index]; 2761 2762 ASSERT(p->pcf_block == 0); 2763 ASSERT(p->pcf_wait == 0); 2764 p->pcf_count += 1; 2765 2766 /* freemem is approximate, so this is OK */ 2767 freemem += 1; 2768 } 2769 2770 void 2771 page_free_pages(page_t *pp) 2772 { 2773 page_t *tpp, *rootpp = NULL; 2774 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 2775 pgcnt_t i; 2776 uint_t szc = pp->p_szc; 2777 2778 VM_STAT_ADD(pagecnt.pc_free_pages); 2779 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2780 "page_free_free:pp %p", pp); 2781 2782 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 2783 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 2784 panic("page_free_pages: not root page %p", (void *)pp); 2785 /*NOTREACHED*/ 2786 } 2787 2788 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 2789 ASSERT((PAGE_EXCL(tpp) && 2790 !page_iolock_assert(tpp)) || panicstr); 2791 if (PP_ISFREE(tpp)) { 2792 panic("page_free_pages: page %p is free", (void *)tpp); 2793 /*NOTREACHED*/ 2794 } 2795 if (hat_page_is_mapped(tpp) || tpp->p_lckcnt != 0 || 2796 tpp->p_cowcnt != 0 || tpp->p_slckcnt != 0) { 2797 panic("page_free_pages %p", (void *)tpp); 2798 /*NOTREACHED*/ 2799 } 2800 2801 ASSERT(!hat_page_getshare(tpp)); 2802 ASSERT(tpp->p_vnode == NULL); 2803 ASSERT(tpp->p_szc == szc); 2804 2805 PP_SETFREE(tpp); 2806 page_clr_all_props(tpp); 2807 PP_SETAGED(tpp); 2808 tpp->p_offset = (u_offset_t)-1; 2809 ASSERT(tpp->p_next == tpp); 2810 ASSERT(tpp->p_prev == tpp); 2811 page_list_concat(&rootpp, &tpp); 2812 } 2813 ASSERT(rootpp == pp); 2814 2815 page_list_add_pages(rootpp, 0); 2816 page_create_putback(pgcnt); 2817 } 2818 2819 int free_pages = 1; 2820 2821 /* 2822 * This routine attempts to return pages to the cachelist via page_release(). 2823 * It does not *have* to be successful in all cases, since the pageout scanner 2824 * will catch any pages it misses. It does need to be fast and not introduce 2825 * too much overhead. 2826 * 2827 * If a page isn't found on the unlocked sweep of the page_hash bucket, we 2828 * don't lock and retry. This is ok, since the page scanner will eventually 2829 * find any page we miss in free_vp_pages(). 2830 */ 2831 void 2832 free_vp_pages(vnode_t *vp, u_offset_t off, size_t len) 2833 { 2834 page_t *pp; 2835 u_offset_t eoff; 2836 extern int swap_in_range(vnode_t *, u_offset_t, size_t); 2837 2838 eoff = off + len; 2839 2840 if (free_pages == 0) 2841 return; 2842 if (swap_in_range(vp, off, len)) 2843 return; 2844 2845 for (; off < eoff; off += PAGESIZE) { 2846 2847 /* 2848 * find the page using a fast, but inexact search. It'll be OK 2849 * if a few pages slip through the cracks here. 2850 */ 2851 pp = page_exists(vp, off); 2852 2853 /* 2854 * If we didn't find the page (it may not exist), the page 2855 * is free, looks still in use (shared), or we can't lock it, 2856 * just give up. 2857 */ 2858 if (pp == NULL || 2859 PP_ISFREE(pp) || 2860 page_share_cnt(pp) > 0 || 2861 !page_trylock(pp, SE_EXCL)) 2862 continue; 2863 2864 /* 2865 * Once we have locked pp, verify that it's still the 2866 * correct page and not already free 2867 */ 2868 ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL)); 2869 if (pp->p_vnode != vp || pp->p_offset != off || PP_ISFREE(pp)) { 2870 page_unlock(pp); 2871 continue; 2872 } 2873 2874 /* 2875 * try to release the page... 2876 */ 2877 (void) page_release(pp, 1); 2878 } 2879 } 2880 2881 /* 2882 * Reclaim the given page from the free list. 2883 * If pp is part of a large pages, only the given constituent page is reclaimed 2884 * and the large page it belonged to will be demoted. This can only happen 2885 * if the page is not on the cachelist. 2886 * 2887 * Returns 1 on success or 0 on failure. 2888 * 2889 * The page is unlocked if it can't be reclaimed (when freemem == 0). 2890 * If `lock' is non-null, it will be dropped and re-acquired if 2891 * the routine must wait while freemem is 0. 2892 * 2893 * As it turns out, boot_getpages() does this. It picks a page, 2894 * based on where OBP mapped in some address, gets its pfn, searches 2895 * the memsegs, locks the page, then pulls it off the free list! 2896 */ 2897 int 2898 page_reclaim(page_t *pp, kmutex_t *lock) 2899 { 2900 struct pcf *p; 2901 struct cpu *cpup; 2902 int enough; 2903 uint_t i; 2904 2905 ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1); 2906 ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp)); 2907 2908 /* 2909 * If `freemem' is 0, we cannot reclaim this page from the 2910 * freelist, so release every lock we might hold: the page, 2911 * and the `lock' before blocking. 2912 * 2913 * The only way `freemem' can become 0 while there are pages 2914 * marked free (have their p->p_free bit set) is when the 2915 * system is low on memory and doing a page_create(). In 2916 * order to guarantee that once page_create() starts acquiring 2917 * pages it will be able to get all that it needs since `freemem' 2918 * was decreased by the requested amount. So, we need to release 2919 * this page, and let page_create() have it. 2920 * 2921 * Since `freemem' being zero is not supposed to happen, just 2922 * use the usual hash stuff as a starting point. If that bucket 2923 * is empty, then assume the worst, and start at the beginning 2924 * of the pcf array. If we always start at the beginning 2925 * when acquiring more than one pcf lock, there won't be any 2926 * deadlock problems. 2927 */ 2928 2929 /* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */ 2930 2931 if (freemem <= throttlefree && !page_create_throttle(1l, 0)) { 2932 pcf_acquire_all(); 2933 goto page_reclaim_nomem; 2934 } 2935 2936 enough = pcf_decrement_bucket(1); 2937 2938 if (!enough) { 2939 VM_STAT_ADD(page_reclaim_zero); 2940 /* 2941 * Check again. Its possible that some other thread 2942 * could have been right behind us, and added one 2943 * to a list somewhere. Acquire each of the pcf locks 2944 * until we find a page. 2945 */ 2946 p = pcf; 2947 for (i = 0; i < pcf_fanout; i++) { 2948 mutex_enter(&p->pcf_lock); 2949 if (p->pcf_count >= 1) { 2950 p->pcf_count -= 1; 2951 /* 2952 * freemem is not protected by any lock. Thus, 2953 * we cannot have any assertion containing 2954 * freemem here. 2955 */ 2956 freemem -= 1; 2957 enough = 1; 2958 break; 2959 } 2960 p++; 2961 } 2962 2963 if (!enough) { 2964 page_reclaim_nomem: 2965 /* 2966 * We really can't have page `pp'. 2967 * Time for the no-memory dance with 2968 * page_free(). This is just like 2969 * page_create_wait(). Plus the added 2970 * attraction of releasing whatever mutex 2971 * we held when we were called with in `lock'. 2972 * Page_unlock() will wakeup any thread 2973 * waiting around for this page. 2974 */ 2975 if (lock) { 2976 VM_STAT_ADD(page_reclaim_zero_locked); 2977 mutex_exit(lock); 2978 } 2979 page_unlock(pp); 2980 2981 /* 2982 * get this before we drop all the pcf locks. 2983 */ 2984 mutex_enter(&new_freemem_lock); 2985 2986 p = pcf; 2987 for (i = 0; i < pcf_fanout; i++) { 2988 p->pcf_wait++; 2989 mutex_exit(&p->pcf_lock); 2990 p++; 2991 } 2992 2993 freemem_wait++; 2994 cv_wait(&freemem_cv, &new_freemem_lock); 2995 freemem_wait--; 2996 2997 mutex_exit(&new_freemem_lock); 2998 2999 if (lock) { 3000 mutex_enter(lock); 3001 } 3002 return (0); 3003 } 3004 3005 /* 3006 * The pcf accounting has been done, 3007 * though none of the pcf_wait flags have been set, 3008 * drop the locks and continue on. 3009 */ 3010 while (p >= pcf) { 3011 mutex_exit(&p->pcf_lock); 3012 p--; 3013 } 3014 } 3015 3016 3017 VM_STAT_ADD(pagecnt.pc_reclaim); 3018 3019 /* 3020 * page_list_sub will handle the case where pp is a large page. 3021 * It's possible that the page was promoted while on the freelist 3022 */ 3023 if (PP_ISAGED(pp)) { 3024 page_list_sub(pp, PG_FREE_LIST); 3025 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_FREE, 3026 "page_reclaim_free:pp %p", pp); 3027 } else { 3028 page_list_sub(pp, PG_CACHE_LIST); 3029 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_CACHE, 3030 "page_reclaim_cache:pp %p", pp); 3031 } 3032 3033 /* 3034 * clear the p_free & p_age bits since this page is no longer 3035 * on the free list. Notice that there was a brief time where 3036 * a page is marked as free, but is not on the list. 3037 * 3038 * Set the reference bit to protect against immediate pageout. 3039 */ 3040 PP_CLRFREE(pp); 3041 PP_CLRAGED(pp); 3042 page_set_props(pp, P_REF); 3043 3044 CPU_STATS_ENTER_K(); 3045 cpup = CPU; /* get cpup now that CPU cannot change */ 3046 CPU_STATS_ADDQ(cpup, vm, pgrec, 1); 3047 CPU_STATS_ADDQ(cpup, vm, pgfrec, 1); 3048 CPU_STATS_EXIT_K(); 3049 ASSERT(pp->p_szc == 0); 3050 3051 return (1); 3052 } 3053 3054 /* 3055 * Destroy identity of the page and put it back on 3056 * the page free list. Assumes that the caller has 3057 * acquired the "exclusive" lock on the page. 3058 */ 3059 void 3060 page_destroy(page_t *pp, int dontfree) 3061 { 3062 ASSERT((PAGE_EXCL(pp) && 3063 !page_iolock_assert(pp)) || panicstr); 3064 ASSERT(pp->p_slckcnt == 0 || panicstr); 3065 3066 if (pp->p_szc != 0) { 3067 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 3068 PP_ISKAS(pp)) { 3069 panic("page_destroy: anon or kernel or no vnode " 3070 "large page %p", (void *)pp); 3071 } 3072 page_demote_vp_pages(pp); 3073 ASSERT(pp->p_szc == 0); 3074 } 3075 3076 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy:pp %p", pp); 3077 3078 /* 3079 * Unload translations, if any, then hash out the 3080 * page to erase its identity. 3081 */ 3082 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3083 page_hashout(pp, NULL); 3084 3085 if (!dontfree) { 3086 /* 3087 * Acquire the "freemem_lock" for availrmem. 3088 * The page_struct_lock need not be acquired for lckcnt 3089 * and cowcnt since the page has an "exclusive" lock. 3090 * We are doing a modified version of page_pp_unlock here. 3091 */ 3092 if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) { 3093 mutex_enter(&freemem_lock); 3094 if (pp->p_lckcnt != 0) { 3095 availrmem++; 3096 pages_locked--; 3097 pp->p_lckcnt = 0; 3098 } 3099 if (pp->p_cowcnt != 0) { 3100 availrmem += pp->p_cowcnt; 3101 pages_locked -= pp->p_cowcnt; 3102 pp->p_cowcnt = 0; 3103 } 3104 mutex_exit(&freemem_lock); 3105 } 3106 /* 3107 * Put the page on the "free" list. 3108 */ 3109 page_free(pp, 0); 3110 } 3111 } 3112 3113 void 3114 page_destroy_pages(page_t *pp) 3115 { 3116 3117 page_t *tpp, *rootpp = NULL; 3118 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 3119 pgcnt_t i, pglcks = 0; 3120 uint_t szc = pp->p_szc; 3121 3122 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 3123 3124 VM_STAT_ADD(pagecnt.pc_destroy_pages); 3125 3126 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy_pages:pp %p", pp); 3127 3128 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 3129 panic("page_destroy_pages: not root page %p", (void *)pp); 3130 /*NOTREACHED*/ 3131 } 3132 3133 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 3134 ASSERT((PAGE_EXCL(tpp) && 3135 !page_iolock_assert(tpp)) || panicstr); 3136 ASSERT(tpp->p_slckcnt == 0 || panicstr); 3137 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 3138 page_hashout(tpp, NULL); 3139 ASSERT(tpp->p_offset == (u_offset_t)-1); 3140 if (tpp->p_lckcnt != 0) { 3141 pglcks++; 3142 tpp->p_lckcnt = 0; 3143 } else if (tpp->p_cowcnt != 0) { 3144 pglcks += tpp->p_cowcnt; 3145 tpp->p_cowcnt = 0; 3146 } 3147 ASSERT(!hat_page_getshare(tpp)); 3148 ASSERT(tpp->p_vnode == NULL); 3149 ASSERT(tpp->p_szc == szc); 3150 3151 PP_SETFREE(tpp); 3152 page_clr_all_props(tpp); 3153 PP_SETAGED(tpp); 3154 ASSERT(tpp->p_next == tpp); 3155 ASSERT(tpp->p_prev == tpp); 3156 page_list_concat(&rootpp, &tpp); 3157 } 3158 3159 ASSERT(rootpp == pp); 3160 if (pglcks != 0) { 3161 mutex_enter(&freemem_lock); 3162 availrmem += pglcks; 3163 mutex_exit(&freemem_lock); 3164 } 3165 3166 page_list_add_pages(rootpp, 0); 3167 page_create_putback(pgcnt); 3168 } 3169 3170 /* 3171 * Similar to page_destroy(), but destroys pages which are 3172 * locked and known to be on the page free list. Since 3173 * the page is known to be free and locked, no one can access 3174 * it. 3175 * 3176 * Also, the number of free pages does not change. 3177 */ 3178 void 3179 page_destroy_free(page_t *pp) 3180 { 3181 ASSERT(PAGE_EXCL(pp)); 3182 ASSERT(PP_ISFREE(pp)); 3183 ASSERT(pp->p_vnode); 3184 ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0); 3185 ASSERT(!hat_page_is_mapped(pp)); 3186 ASSERT(PP_ISAGED(pp) == 0); 3187 ASSERT(pp->p_szc == 0); 3188 3189 VM_STAT_ADD(pagecnt.pc_destroy_free); 3190 page_list_sub(pp, PG_CACHE_LIST); 3191 3192 page_hashout(pp, NULL); 3193 ASSERT(pp->p_vnode == NULL); 3194 ASSERT(pp->p_offset == (u_offset_t)-1); 3195 ASSERT(pp->p_hash == NULL); 3196 3197 PP_SETAGED(pp); 3198 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 3199 page_unlock(pp); 3200 3201 mutex_enter(&new_freemem_lock); 3202 if (freemem_wait) { 3203 cv_signal(&freemem_cv); 3204 } 3205 mutex_exit(&new_freemem_lock); 3206 } 3207 3208 /* 3209 * Rename the page "opp" to have an identity specified 3210 * by [vp, off]. If a page already exists with this name 3211 * it is locked and destroyed. Note that the page's 3212 * translations are not unloaded during the rename. 3213 * 3214 * This routine is used by the anon layer to "steal" the 3215 * original page and is not unlike destroying a page and 3216 * creating a new page using the same page frame. 3217 * 3218 * XXX -- Could deadlock if caller 1 tries to rename A to B while 3219 * caller 2 tries to rename B to A. 3220 */ 3221 void 3222 page_rename(page_t *opp, vnode_t *vp, u_offset_t off) 3223 { 3224 page_t *pp; 3225 int olckcnt = 0; 3226 int ocowcnt = 0; 3227 kmutex_t *phm; 3228 ulong_t index; 3229 3230 ASSERT(PAGE_EXCL(opp) && !page_iolock_assert(opp)); 3231 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3232 ASSERT(PP_ISFREE(opp) == 0); 3233 3234 VM_STAT_ADD(page_rename_count); 3235 3236 TRACE_3(TR_FAC_VM, TR_PAGE_RENAME, 3237 "page rename:pp %p vp %p off %llx", opp, vp, off); 3238 3239 /* 3240 * CacheFS may call page_rename for a large NFS page 3241 * when both CacheFS and NFS mount points are used 3242 * by applications. Demote this large page before 3243 * renaming it, to ensure that there are no "partial" 3244 * large pages left lying around. 3245 */ 3246 if (opp->p_szc != 0) { 3247 vnode_t *ovp = opp->p_vnode; 3248 ASSERT(ovp != NULL); 3249 ASSERT(!IS_SWAPFSVP(ovp)); 3250 ASSERT(!VN_ISKAS(ovp)); 3251 page_demote_vp_pages(opp); 3252 ASSERT(opp->p_szc == 0); 3253 } 3254 3255 page_hashout(opp, NULL); 3256 PP_CLRAGED(opp); 3257 3258 /* 3259 * Acquire the appropriate page hash lock, since 3260 * we're going to rename the page. 3261 */ 3262 index = PAGE_HASH_FUNC(vp, off); 3263 phm = PAGE_HASH_MUTEX(index); 3264 mutex_enter(phm); 3265 top: 3266 /* 3267 * Look for an existing page with this name and destroy it if found. 3268 * By holding the page hash lock all the way to the page_hashin() 3269 * call, we are assured that no page can be created with this 3270 * identity. In the case when the phm lock is dropped to undo any 3271 * hat layer mappings, the existing page is held with an "exclusive" 3272 * lock, again preventing another page from being created with 3273 * this identity. 3274 */ 3275 PAGE_HASH_SEARCH(index, pp, vp, off); 3276 if (pp != NULL) { 3277 VM_STAT_ADD(page_rename_exists); 3278 3279 /* 3280 * As it turns out, this is one of only two places where 3281 * page_lock() needs to hold the passed in lock in the 3282 * successful case. In all of the others, the lock could 3283 * be dropped as soon as the attempt is made to lock 3284 * the page. It is tempting to add yet another arguement, 3285 * PL_KEEP or PL_DROP, to let page_lock know what to do. 3286 */ 3287 if (!page_lock(pp, SE_EXCL, phm, P_RECLAIM)) { 3288 /* 3289 * Went to sleep because the page could not 3290 * be locked. We were woken up when the page 3291 * was unlocked, or when the page was destroyed. 3292 * In either case, `phm' was dropped while we 3293 * slept. Hence we should not just roar through 3294 * this loop. 3295 */ 3296 goto top; 3297 } 3298 3299 /* 3300 * If an existing page is a large page, then demote 3301 * it to ensure that no "partial" large pages are 3302 * "created" after page_rename. An existing page 3303 * can be a CacheFS page, and can't belong to swapfs. 3304 */ 3305 if (hat_page_is_mapped(pp)) { 3306 /* 3307 * Unload translations. Since we hold the 3308 * exclusive lock on this page, the page 3309 * can not be changed while we drop phm. 3310 * This is also not a lock protocol violation, 3311 * but rather the proper way to do things. 3312 */ 3313 mutex_exit(phm); 3314 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3315 if (pp->p_szc != 0) { 3316 ASSERT(!IS_SWAPFSVP(vp)); 3317 ASSERT(!VN_ISKAS(vp)); 3318 page_demote_vp_pages(pp); 3319 ASSERT(pp->p_szc == 0); 3320 } 3321 mutex_enter(phm); 3322 } else if (pp->p_szc != 0) { 3323 ASSERT(!IS_SWAPFSVP(vp)); 3324 ASSERT(!VN_ISKAS(vp)); 3325 mutex_exit(phm); 3326 page_demote_vp_pages(pp); 3327 ASSERT(pp->p_szc == 0); 3328 mutex_enter(phm); 3329 } 3330 page_hashout(pp, phm); 3331 } 3332 /* 3333 * Hash in the page with the new identity. 3334 */ 3335 if (!page_hashin(opp, vp, off, phm)) { 3336 /* 3337 * We were holding phm while we searched for [vp, off] 3338 * and only dropped phm if we found and locked a page. 3339 * If we can't create this page now, then some thing 3340 * is really broken. 3341 */ 3342 panic("page_rename: Can't hash in page: %p", (void *)pp); 3343 /*NOTREACHED*/ 3344 } 3345 3346 ASSERT(MUTEX_HELD(phm)); 3347 mutex_exit(phm); 3348 3349 /* 3350 * Now that we have dropped phm, lets get around to finishing up 3351 * with pp. 3352 */ 3353 if (pp != NULL) { 3354 ASSERT(!hat_page_is_mapped(pp)); 3355 /* for now large pages should not end up here */ 3356 ASSERT(pp->p_szc == 0); 3357 /* 3358 * Save the locks for transfer to the new page and then 3359 * clear them so page_free doesn't think they're important. 3360 * The page_struct_lock need not be acquired for lckcnt and 3361 * cowcnt since the page has an "exclusive" lock. 3362 */ 3363 olckcnt = pp->p_lckcnt; 3364 ocowcnt = pp->p_cowcnt; 3365 pp->p_lckcnt = pp->p_cowcnt = 0; 3366 3367 /* 3368 * Put the page on the "free" list after we drop 3369 * the lock. The less work under the lock the better. 3370 */ 3371 /*LINTED: constant in conditional context*/ 3372 VN_DISPOSE(pp, B_FREE, 0, kcred); 3373 } 3374 3375 /* 3376 * Transfer the lock count from the old page (if any). 3377 * The page_struct_lock need not be acquired for lckcnt and 3378 * cowcnt since the page has an "exclusive" lock. 3379 */ 3380 opp->p_lckcnt += olckcnt; 3381 opp->p_cowcnt += ocowcnt; 3382 } 3383 3384 /* 3385 * low level routine to add page `pp' to the hash and vp chains for [vp, offset] 3386 * 3387 * Pages are normally inserted at the start of a vnode's v_pages list. 3388 * If the vnode is VMODSORT and the page is modified, it goes at the end. 3389 * This can happen when a modified page is relocated for DR. 3390 * 3391 * Returns 1 on success and 0 on failure. 3392 */ 3393 static int 3394 page_do_hashin(page_t *pp, vnode_t *vp, u_offset_t offset) 3395 { 3396 page_t **listp; 3397 page_t *tp; 3398 ulong_t index; 3399 3400 ASSERT(PAGE_EXCL(pp)); 3401 ASSERT(vp != NULL); 3402 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3403 3404 /* 3405 * Be sure to set these up before the page is inserted on the hash 3406 * list. As soon as the page is placed on the list some other 3407 * thread might get confused and wonder how this page could 3408 * possibly hash to this list. 3409 */ 3410 pp->p_vnode = vp; 3411 pp->p_offset = offset; 3412 3413 /* 3414 * record if this page is on a swap vnode 3415 */ 3416 if ((vp->v_flag & VISSWAP) != 0) 3417 PP_SETSWAP(pp); 3418 3419 index = PAGE_HASH_FUNC(vp, offset); 3420 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(index))); 3421 listp = &page_hash[index]; 3422 3423 /* 3424 * If this page is already hashed in, fail this attempt to add it. 3425 */ 3426 for (tp = *listp; tp != NULL; tp = tp->p_hash) { 3427 if (tp->p_vnode == vp && tp->p_offset == offset) { 3428 pp->p_vnode = NULL; 3429 pp->p_offset = (u_offset_t)(-1); 3430 return (0); 3431 } 3432 } 3433 pp->p_hash = *listp; 3434 *listp = pp; 3435 3436 /* 3437 * Add the page to the vnode's list of pages 3438 */ 3439 if (vp->v_pages != NULL && IS_VMODSORT(vp) && hat_ismod(pp)) 3440 listp = &vp->v_pages->p_vpprev->p_vpnext; 3441 else 3442 listp = &vp->v_pages; 3443 3444 page_vpadd(listp, pp); 3445 3446 return (1); 3447 } 3448 3449 /* 3450 * Add page `pp' to both the hash and vp chains for [vp, offset]. 3451 * 3452 * Returns 1 on success and 0 on failure. 3453 * If hold is passed in, it is not dropped. 3454 */ 3455 int 3456 page_hashin(page_t *pp, vnode_t *vp, u_offset_t offset, kmutex_t *hold) 3457 { 3458 kmutex_t *phm = NULL; 3459 kmutex_t *vphm; 3460 int rc; 3461 3462 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3463 ASSERT(pp->p_fsdata == 0 || panicstr); 3464 3465 TRACE_3(TR_FAC_VM, TR_PAGE_HASHIN, 3466 "page_hashin:pp %p vp %p offset %llx", 3467 pp, vp, offset); 3468 3469 VM_STAT_ADD(hashin_count); 3470 3471 if (hold != NULL) 3472 phm = hold; 3473 else { 3474 VM_STAT_ADD(hashin_not_held); 3475 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, offset)); 3476 mutex_enter(phm); 3477 } 3478 3479 vphm = page_vnode_mutex(vp); 3480 mutex_enter(vphm); 3481 rc = page_do_hashin(pp, vp, offset); 3482 mutex_exit(vphm); 3483 if (hold == NULL) 3484 mutex_exit(phm); 3485 if (rc == 0) 3486 VM_STAT_ADD(hashin_already); 3487 return (rc); 3488 } 3489 3490 /* 3491 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3492 * All mutexes must be held 3493 */ 3494 static void 3495 page_do_hashout(page_t *pp) 3496 { 3497 page_t **hpp; 3498 page_t *hp; 3499 vnode_t *vp = pp->p_vnode; 3500 3501 ASSERT(vp != NULL); 3502 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3503 3504 /* 3505 * First, take pp off of its hash chain. 3506 */ 3507 hpp = &page_hash[PAGE_HASH_FUNC(vp, pp->p_offset)]; 3508 3509 for (;;) { 3510 hp = *hpp; 3511 if (hp == pp) 3512 break; 3513 if (hp == NULL) { 3514 panic("page_do_hashout"); 3515 /*NOTREACHED*/ 3516 } 3517 hpp = &hp->p_hash; 3518 } 3519 *hpp = pp->p_hash; 3520 3521 /* 3522 * Now remove it from its associated vnode. 3523 */ 3524 if (vp->v_pages) 3525 page_vpsub(&vp->v_pages, pp); 3526 3527 pp->p_hash = NULL; 3528 page_clr_all_props(pp); 3529 PP_CLRSWAP(pp); 3530 pp->p_vnode = NULL; 3531 pp->p_offset = (u_offset_t)-1; 3532 pp->p_fsdata = 0; 3533 } 3534 3535 /* 3536 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3537 * 3538 * When `phm' is non-NULL it contains the address of the mutex protecting the 3539 * hash list pp is on. It is not dropped. 3540 */ 3541 void 3542 page_hashout(page_t *pp, kmutex_t *phm) 3543 { 3544 vnode_t *vp; 3545 ulong_t index; 3546 kmutex_t *nphm; 3547 kmutex_t *vphm; 3548 kmutex_t *sep; 3549 3550 ASSERT(phm != NULL ? MUTEX_HELD(phm) : 1); 3551 ASSERT(pp->p_vnode != NULL); 3552 ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr); 3553 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp->p_vnode))); 3554 3555 vp = pp->p_vnode; 3556 3557 TRACE_2(TR_FAC_VM, TR_PAGE_HASHOUT, 3558 "page_hashout:pp %p vp %p", pp, vp); 3559 3560 /* Kernel probe */ 3561 TNF_PROBE_2(page_unmap, "vm pagefault", /* CSTYLED */, 3562 tnf_opaque, vnode, vp, 3563 tnf_offset, offset, pp->p_offset); 3564 3565 /* 3566 * 3567 */ 3568 VM_STAT_ADD(hashout_count); 3569 index = PAGE_HASH_FUNC(vp, pp->p_offset); 3570 if (phm == NULL) { 3571 VM_STAT_ADD(hashout_not_held); 3572 nphm = PAGE_HASH_MUTEX(index); 3573 mutex_enter(nphm); 3574 } 3575 ASSERT(phm ? phm == PAGE_HASH_MUTEX(index) : 1); 3576 3577 3578 /* 3579 * grab page vnode mutex and remove it... 3580 */ 3581 vphm = page_vnode_mutex(vp); 3582 mutex_enter(vphm); 3583 3584 page_do_hashout(pp); 3585 3586 mutex_exit(vphm); 3587 if (phm == NULL) 3588 mutex_exit(nphm); 3589 3590 /* 3591 * Wake up processes waiting for this page. The page's 3592 * identity has been changed, and is probably not the 3593 * desired page any longer. 3594 */ 3595 sep = page_se_mutex(pp); 3596 mutex_enter(sep); 3597 pp->p_selock &= ~SE_EWANTED; 3598 if (CV_HAS_WAITERS(&pp->p_cv)) 3599 cv_broadcast(&pp->p_cv); 3600 mutex_exit(sep); 3601 } 3602 3603 /* 3604 * Add the page to the front of a linked list of pages 3605 * using the p_next & p_prev pointers for the list. 3606 * The caller is responsible for protecting the list pointers. 3607 */ 3608 void 3609 page_add(page_t **ppp, page_t *pp) 3610 { 3611 ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3612 3613 page_add_common(ppp, pp); 3614 } 3615 3616 3617 3618 /* 3619 * Common code for page_add() and mach_page_add() 3620 */ 3621 void 3622 page_add_common(page_t **ppp, page_t *pp) 3623 { 3624 if (*ppp == NULL) { 3625 pp->p_next = pp->p_prev = pp; 3626 } else { 3627 pp->p_next = *ppp; 3628 pp->p_prev = (*ppp)->p_prev; 3629 (*ppp)->p_prev = pp; 3630 pp->p_prev->p_next = pp; 3631 } 3632 *ppp = pp; 3633 } 3634 3635 3636 /* 3637 * Remove this page from a linked list of pages 3638 * using the p_next & p_prev pointers for the list. 3639 * 3640 * The caller is responsible for protecting the list pointers. 3641 */ 3642 void 3643 page_sub(page_t **ppp, page_t *pp) 3644 { 3645 ASSERT((PP_ISFREE(pp)) ? 1 : 3646 (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3647 3648 if (*ppp == NULL || pp == NULL) { 3649 panic("page_sub: bad arg(s): pp %p, *ppp %p", 3650 (void *)pp, (void *)(*ppp)); 3651 /*NOTREACHED*/ 3652 } 3653 3654 page_sub_common(ppp, pp); 3655 } 3656 3657 3658 /* 3659 * Common code for page_sub() and mach_page_sub() 3660 */ 3661 void 3662 page_sub_common(page_t **ppp, page_t *pp) 3663 { 3664 if (*ppp == pp) 3665 *ppp = pp->p_next; /* go to next page */ 3666 3667 if (*ppp == pp) 3668 *ppp = NULL; /* page list is gone */ 3669 else { 3670 pp->p_prev->p_next = pp->p_next; 3671 pp->p_next->p_prev = pp->p_prev; 3672 } 3673 pp->p_prev = pp->p_next = pp; /* make pp a list of one */ 3674 } 3675 3676 3677 /* 3678 * Break page list cppp into two lists with npages in the first list. 3679 * The tail is returned in nppp. 3680 */ 3681 void 3682 page_list_break(page_t **oppp, page_t **nppp, pgcnt_t npages) 3683 { 3684 page_t *s1pp = *oppp; 3685 page_t *s2pp; 3686 page_t *e1pp, *e2pp; 3687 long n = 0; 3688 3689 if (s1pp == NULL) { 3690 *nppp = NULL; 3691 return; 3692 } 3693 if (npages == 0) { 3694 *nppp = s1pp; 3695 *oppp = NULL; 3696 return; 3697 } 3698 for (n = 0, s2pp = *oppp; n < npages; n++) { 3699 s2pp = s2pp->p_next; 3700 } 3701 /* Fix head and tail of new lists */ 3702 e1pp = s2pp->p_prev; 3703 e2pp = s1pp->p_prev; 3704 s1pp->p_prev = e1pp; 3705 e1pp->p_next = s1pp; 3706 s2pp->p_prev = e2pp; 3707 e2pp->p_next = s2pp; 3708 3709 /* second list empty */ 3710 if (s2pp == s1pp) { 3711 *oppp = s1pp; 3712 *nppp = NULL; 3713 } else { 3714 *oppp = s1pp; 3715 *nppp = s2pp; 3716 } 3717 } 3718 3719 /* 3720 * Concatenate page list nppp onto the end of list ppp. 3721 */ 3722 void 3723 page_list_concat(page_t **ppp, page_t **nppp) 3724 { 3725 page_t *s1pp, *s2pp, *e1pp, *e2pp; 3726 3727 if (*nppp == NULL) { 3728 return; 3729 } 3730 if (*ppp == NULL) { 3731 *ppp = *nppp; 3732 return; 3733 } 3734 s1pp = *ppp; 3735 e1pp = s1pp->p_prev; 3736 s2pp = *nppp; 3737 e2pp = s2pp->p_prev; 3738 s1pp->p_prev = e2pp; 3739 e2pp->p_next = s1pp; 3740 e1pp->p_next = s2pp; 3741 s2pp->p_prev = e1pp; 3742 } 3743 3744 /* 3745 * return the next page in the page list 3746 */ 3747 page_t * 3748 page_list_next(page_t *pp) 3749 { 3750 return (pp->p_next); 3751 } 3752 3753 3754 /* 3755 * Add the page to the front of the linked list of pages 3756 * using p_vpnext/p_vpprev pointers for the list. 3757 * 3758 * The caller is responsible for protecting the lists. 3759 */ 3760 void 3761 page_vpadd(page_t **ppp, page_t *pp) 3762 { 3763 if (*ppp == NULL) { 3764 pp->p_vpnext = pp->p_vpprev = pp; 3765 } else { 3766 pp->p_vpnext = *ppp; 3767 pp->p_vpprev = (*ppp)->p_vpprev; 3768 (*ppp)->p_vpprev = pp; 3769 pp->p_vpprev->p_vpnext = pp; 3770 } 3771 *ppp = pp; 3772 } 3773 3774 /* 3775 * Remove this page from the linked list of pages 3776 * using p_vpnext/p_vpprev pointers for the list. 3777 * 3778 * The caller is responsible for protecting the lists. 3779 */ 3780 void 3781 page_vpsub(page_t **ppp, page_t *pp) 3782 { 3783 if (*ppp == NULL || pp == NULL) { 3784 panic("page_vpsub: bad arg(s): pp %p, *ppp %p", 3785 (void *)pp, (void *)(*ppp)); 3786 /*NOTREACHED*/ 3787 } 3788 3789 if (*ppp == pp) 3790 *ppp = pp->p_vpnext; /* go to next page */ 3791 3792 if (*ppp == pp) 3793 *ppp = NULL; /* page list is gone */ 3794 else { 3795 pp->p_vpprev->p_vpnext = pp->p_vpnext; 3796 pp->p_vpnext->p_vpprev = pp->p_vpprev; 3797 } 3798 pp->p_vpprev = pp->p_vpnext = pp; /* make pp a list of one */ 3799 } 3800 3801 /* 3802 * Lock a physical page into memory "long term". Used to support "lock 3803 * in memory" functions. Accepts the page to be locked, and a cow variable 3804 * to indicate whether a the lock will travel to the new page during 3805 * a potential copy-on-write. 3806 */ 3807 int 3808 page_pp_lock( 3809 page_t *pp, /* page to be locked */ 3810 int cow, /* cow lock */ 3811 int kernel) /* must succeed -- ignore checking */ 3812 { 3813 int r = 0; /* result -- assume failure */ 3814 3815 ASSERT(PAGE_LOCKED(pp)); 3816 3817 page_struct_lock(pp); 3818 /* 3819 * Acquire the "freemem_lock" for availrmem. 3820 */ 3821 if (cow) { 3822 mutex_enter(&freemem_lock); 3823 if ((availrmem > pages_pp_maximum) && 3824 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 3825 availrmem--; 3826 pages_locked++; 3827 mutex_exit(&freemem_lock); 3828 r = 1; 3829 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 3830 cmn_err(CE_WARN, 3831 "COW lock limit reached on pfn 0x%lx", 3832 page_pptonum(pp)); 3833 } 3834 } else 3835 mutex_exit(&freemem_lock); 3836 } else { 3837 if (pp->p_lckcnt) { 3838 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 3839 r = 1; 3840 if (++pp->p_lckcnt == 3841 (ushort_t)PAGE_LOCK_MAXIMUM) { 3842 cmn_err(CE_WARN, "Page lock limit " 3843 "reached on pfn 0x%lx", 3844 page_pptonum(pp)); 3845 } 3846 } 3847 } else { 3848 if (kernel) { 3849 /* availrmem accounting done by caller */ 3850 ++pp->p_lckcnt; 3851 r = 1; 3852 } else { 3853 mutex_enter(&freemem_lock); 3854 if (availrmem > pages_pp_maximum) { 3855 availrmem--; 3856 pages_locked++; 3857 ++pp->p_lckcnt; 3858 r = 1; 3859 } 3860 mutex_exit(&freemem_lock); 3861 } 3862 } 3863 } 3864 page_struct_unlock(pp); 3865 return (r); 3866 } 3867 3868 /* 3869 * Decommit a lock on a physical page frame. Account for cow locks if 3870 * appropriate. 3871 */ 3872 void 3873 page_pp_unlock( 3874 page_t *pp, /* page to be unlocked */ 3875 int cow, /* expect cow lock */ 3876 int kernel) /* this was a kernel lock */ 3877 { 3878 ASSERT(PAGE_LOCKED(pp)); 3879 3880 page_struct_lock(pp); 3881 /* 3882 * Acquire the "freemem_lock" for availrmem. 3883 * If cowcnt or lcknt is already 0 do nothing; i.e., we 3884 * could be called to unlock even if nothing is locked. This could 3885 * happen if locked file pages were truncated (removing the lock) 3886 * and the file was grown again and new pages faulted in; the new 3887 * pages are unlocked but the segment still thinks they're locked. 3888 */ 3889 if (cow) { 3890 if (pp->p_cowcnt) { 3891 mutex_enter(&freemem_lock); 3892 pp->p_cowcnt--; 3893 availrmem++; 3894 pages_locked--; 3895 mutex_exit(&freemem_lock); 3896 } 3897 } else { 3898 if (pp->p_lckcnt && --pp->p_lckcnt == 0) { 3899 if (!kernel) { 3900 mutex_enter(&freemem_lock); 3901 availrmem++; 3902 pages_locked--; 3903 mutex_exit(&freemem_lock); 3904 } 3905 } 3906 } 3907 page_struct_unlock(pp); 3908 } 3909 3910 /* 3911 * This routine reserves availrmem for npages; 3912 * flags: KM_NOSLEEP or KM_SLEEP 3913 * returns 1 on success or 0 on failure 3914 */ 3915 int 3916 page_resv(pgcnt_t npages, uint_t flags) 3917 { 3918 mutex_enter(&freemem_lock); 3919 while (availrmem < tune.t_minarmem + npages) { 3920 if (flags & KM_NOSLEEP) { 3921 mutex_exit(&freemem_lock); 3922 return (0); 3923 } 3924 mutex_exit(&freemem_lock); 3925 page_needfree(npages); 3926 kmem_reap(); 3927 delay(hz >> 2); 3928 page_needfree(-(spgcnt_t)npages); 3929 mutex_enter(&freemem_lock); 3930 } 3931 availrmem -= npages; 3932 mutex_exit(&freemem_lock); 3933 return (1); 3934 } 3935 3936 /* 3937 * This routine unreserves availrmem for npages; 3938 */ 3939 void 3940 page_unresv(pgcnt_t npages) 3941 { 3942 mutex_enter(&freemem_lock); 3943 availrmem += npages; 3944 mutex_exit(&freemem_lock); 3945 } 3946 3947 /* 3948 * See Statement at the beginning of segvn_lockop() regarding 3949 * the way we handle cowcnts and lckcnts. 3950 * 3951 * Transfer cowcnt on 'opp' to cowcnt on 'npp' if the vpage 3952 * that breaks COW has PROT_WRITE. 3953 * 3954 * Note that, we may also break COW in case we are softlocking 3955 * on read access during physio; 3956 * in this softlock case, the vpage may not have PROT_WRITE. 3957 * So, we need to transfer lckcnt on 'opp' to lckcnt on 'npp' 3958 * if the vpage doesn't have PROT_WRITE. 3959 * 3960 * This routine is never called if we are stealing a page 3961 * in anon_private. 3962 * 3963 * The caller subtracted from availrmem for read only mapping. 3964 * if lckcnt is 1 increment availrmem. 3965 */ 3966 void 3967 page_pp_useclaim( 3968 page_t *opp, /* original page frame losing lock */ 3969 page_t *npp, /* new page frame gaining lock */ 3970 uint_t write_perm) /* set if vpage has PROT_WRITE */ 3971 { 3972 int payback = 0; 3973 3974 ASSERT(PAGE_LOCKED(opp)); 3975 ASSERT(PAGE_LOCKED(npp)); 3976 3977 page_struct_lock(opp); 3978 3979 ASSERT(npp->p_cowcnt == 0); 3980 ASSERT(npp->p_lckcnt == 0); 3981 3982 /* Don't use claim if nothing is locked (see page_pp_unlock above) */ 3983 if ((write_perm && opp->p_cowcnt != 0) || 3984 (!write_perm && opp->p_lckcnt != 0)) { 3985 3986 if (write_perm) { 3987 npp->p_cowcnt++; 3988 ASSERT(opp->p_cowcnt != 0); 3989 opp->p_cowcnt--; 3990 } else { 3991 3992 ASSERT(opp->p_lckcnt != 0); 3993 3994 /* 3995 * We didn't need availrmem decremented if p_lckcnt on 3996 * original page is 1. Here, we are unlocking 3997 * read-only copy belonging to original page and 3998 * are locking a copy belonging to new page. 3999 */ 4000 if (opp->p_lckcnt == 1) 4001 payback = 1; 4002 4003 npp->p_lckcnt++; 4004 opp->p_lckcnt--; 4005 } 4006 } 4007 if (payback) { 4008 mutex_enter(&freemem_lock); 4009 availrmem++; 4010 pages_useclaim--; 4011 mutex_exit(&freemem_lock); 4012 } 4013 page_struct_unlock(opp); 4014 } 4015 4016 /* 4017 * Simple claim adjust functions -- used to support changes in 4018 * claims due to changes in access permissions. Used by segvn_setprot(). 4019 */ 4020 int 4021 page_addclaim(page_t *pp) 4022 { 4023 int r = 0; /* result */ 4024 4025 ASSERT(PAGE_LOCKED(pp)); 4026 4027 page_struct_lock(pp); 4028 ASSERT(pp->p_lckcnt != 0); 4029 4030 if (pp->p_lckcnt == 1) { 4031 if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4032 --pp->p_lckcnt; 4033 r = 1; 4034 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4035 cmn_err(CE_WARN, 4036 "COW lock limit reached on pfn 0x%lx", 4037 page_pptonum(pp)); 4038 } 4039 } 4040 } else { 4041 mutex_enter(&freemem_lock); 4042 if ((availrmem > pages_pp_maximum) && 4043 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 4044 --availrmem; 4045 ++pages_claimed; 4046 mutex_exit(&freemem_lock); 4047 --pp->p_lckcnt; 4048 r = 1; 4049 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4050 cmn_err(CE_WARN, 4051 "COW lock limit reached on pfn 0x%lx", 4052 page_pptonum(pp)); 4053 } 4054 } else 4055 mutex_exit(&freemem_lock); 4056 } 4057 page_struct_unlock(pp); 4058 return (r); 4059 } 4060 4061 int 4062 page_subclaim(page_t *pp) 4063 { 4064 int r = 0; 4065 4066 ASSERT(PAGE_LOCKED(pp)); 4067 4068 page_struct_lock(pp); 4069 ASSERT(pp->p_cowcnt != 0); 4070 4071 if (pp->p_lckcnt) { 4072 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4073 r = 1; 4074 /* 4075 * for availrmem 4076 */ 4077 mutex_enter(&freemem_lock); 4078 availrmem++; 4079 pages_claimed--; 4080 mutex_exit(&freemem_lock); 4081 4082 pp->p_cowcnt--; 4083 4084 if (++pp->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4085 cmn_err(CE_WARN, 4086 "Page lock limit reached on pfn 0x%lx", 4087 page_pptonum(pp)); 4088 } 4089 } 4090 } else { 4091 r = 1; 4092 pp->p_cowcnt--; 4093 pp->p_lckcnt++; 4094 } 4095 page_struct_unlock(pp); 4096 return (r); 4097 } 4098 4099 int 4100 page_addclaim_pages(page_t **ppa) 4101 { 4102 4103 pgcnt_t lckpgs = 0, pg_idx; 4104 4105 VM_STAT_ADD(pagecnt.pc_addclaim_pages); 4106 4107 mutex_enter(&page_llock); 4108 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4109 4110 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4111 ASSERT(ppa[pg_idx]->p_lckcnt != 0); 4112 if (ppa[pg_idx]->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4113 mutex_exit(&page_llock); 4114 return (0); 4115 } 4116 if (ppa[pg_idx]->p_lckcnt > 1) 4117 lckpgs++; 4118 } 4119 4120 if (lckpgs != 0) { 4121 mutex_enter(&freemem_lock); 4122 if (availrmem >= pages_pp_maximum + lckpgs) { 4123 availrmem -= lckpgs; 4124 pages_claimed += lckpgs; 4125 } else { 4126 mutex_exit(&freemem_lock); 4127 mutex_exit(&page_llock); 4128 return (0); 4129 } 4130 mutex_exit(&freemem_lock); 4131 } 4132 4133 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4134 ppa[pg_idx]->p_lckcnt--; 4135 ppa[pg_idx]->p_cowcnt++; 4136 } 4137 mutex_exit(&page_llock); 4138 return (1); 4139 } 4140 4141 int 4142 page_subclaim_pages(page_t **ppa) 4143 { 4144 pgcnt_t ulckpgs = 0, pg_idx; 4145 4146 VM_STAT_ADD(pagecnt.pc_subclaim_pages); 4147 4148 mutex_enter(&page_llock); 4149 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4150 4151 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4152 ASSERT(ppa[pg_idx]->p_cowcnt != 0); 4153 if (ppa[pg_idx]->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4154 mutex_exit(&page_llock); 4155 return (0); 4156 } 4157 if (ppa[pg_idx]->p_lckcnt != 0) 4158 ulckpgs++; 4159 } 4160 4161 if (ulckpgs != 0) { 4162 mutex_enter(&freemem_lock); 4163 availrmem += ulckpgs; 4164 pages_claimed -= ulckpgs; 4165 mutex_exit(&freemem_lock); 4166 } 4167 4168 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4169 ppa[pg_idx]->p_cowcnt--; 4170 ppa[pg_idx]->p_lckcnt++; 4171 4172 } 4173 mutex_exit(&page_llock); 4174 return (1); 4175 } 4176 4177 page_t * 4178 page_numtopp(pfn_t pfnum, se_t se) 4179 { 4180 page_t *pp; 4181 4182 retry: 4183 pp = page_numtopp_nolock(pfnum); 4184 if (pp == NULL) { 4185 return ((page_t *)NULL); 4186 } 4187 4188 /* 4189 * Acquire the appropriate lock on the page. 4190 */ 4191 while (!page_lock(pp, se, (kmutex_t *)NULL, P_RECLAIM)) { 4192 if (page_pptonum(pp) != pfnum) 4193 goto retry; 4194 continue; 4195 } 4196 4197 if (page_pptonum(pp) != pfnum) { 4198 page_unlock(pp); 4199 goto retry; 4200 } 4201 4202 return (pp); 4203 } 4204 4205 page_t * 4206 page_numtopp_noreclaim(pfn_t pfnum, se_t se) 4207 { 4208 page_t *pp; 4209 4210 retry: 4211 pp = page_numtopp_nolock(pfnum); 4212 if (pp == NULL) { 4213 return ((page_t *)NULL); 4214 } 4215 4216 /* 4217 * Acquire the appropriate lock on the page. 4218 */ 4219 while (!page_lock(pp, se, (kmutex_t *)NULL, P_NO_RECLAIM)) { 4220 if (page_pptonum(pp) != pfnum) 4221 goto retry; 4222 continue; 4223 } 4224 4225 if (page_pptonum(pp) != pfnum) { 4226 page_unlock(pp); 4227 goto retry; 4228 } 4229 4230 return (pp); 4231 } 4232 4233 /* 4234 * This routine is like page_numtopp, but will only return page structs 4235 * for pages which are ok for loading into hardware using the page struct. 4236 */ 4237 page_t * 4238 page_numtopp_nowait(pfn_t pfnum, se_t se) 4239 { 4240 page_t *pp; 4241 4242 retry: 4243 pp = page_numtopp_nolock(pfnum); 4244 if (pp == NULL) { 4245 return ((page_t *)NULL); 4246 } 4247 4248 /* 4249 * Try to acquire the appropriate lock on the page. 4250 */ 4251 if (PP_ISFREE(pp)) 4252 pp = NULL; 4253 else { 4254 if (!page_trylock(pp, se)) 4255 pp = NULL; 4256 else { 4257 if (page_pptonum(pp) != pfnum) { 4258 page_unlock(pp); 4259 goto retry; 4260 } 4261 if (PP_ISFREE(pp)) { 4262 page_unlock(pp); 4263 pp = NULL; 4264 } 4265 } 4266 } 4267 return (pp); 4268 } 4269 4270 #define SYNC_PROGRESS_NPAGES 1000 4271 4272 /* 4273 * Returns a count of dirty pages that are in the process 4274 * of being written out. If 'cleanit' is set, try to push the page. 4275 */ 4276 pgcnt_t 4277 page_busy(int cleanit) 4278 { 4279 page_t *page0 = page_first(); 4280 page_t *pp = page0; 4281 pgcnt_t nppbusy = 0; 4282 int counter = 0; 4283 u_offset_t off; 4284 4285 do { 4286 vnode_t *vp = pp->p_vnode; 4287 4288 /* 4289 * Reset the sync timeout. The page list is very long 4290 * on large memory systems. 4291 */ 4292 if (++counter > SYNC_PROGRESS_NPAGES) { 4293 counter = 0; 4294 vfs_syncprogress(); 4295 } 4296 4297 /* 4298 * A page is a candidate for syncing if it is: 4299 * 4300 * (a) On neither the freelist nor the cachelist 4301 * (b) Hashed onto a vnode 4302 * (c) Not a kernel page 4303 * (d) Dirty 4304 * (e) Not part of a swapfile 4305 * (f) a page which belongs to a real vnode; eg has a non-null 4306 * v_vfsp pointer. 4307 * (g) Backed by a filesystem which doesn't have a 4308 * stubbed-out sync operation 4309 */ 4310 if (!PP_ISFREE(pp) && vp != NULL && !VN_ISKAS(vp) && 4311 hat_ismod(pp) && !IS_SWAPVP(vp) && vp->v_vfsp != NULL && 4312 vfs_can_sync(vp->v_vfsp)) { 4313 nppbusy++; 4314 4315 if (!cleanit) 4316 continue; 4317 if (!page_trylock(pp, SE_EXCL)) 4318 continue; 4319 4320 if (PP_ISFREE(pp) || vp == NULL || IS_SWAPVP(vp) || 4321 pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 4322 !(hat_pagesync(pp, 4323 HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) & P_MOD)) { 4324 page_unlock(pp); 4325 continue; 4326 } 4327 off = pp->p_offset; 4328 VN_HOLD(vp); 4329 page_unlock(pp); 4330 (void) VOP_PUTPAGE(vp, off, PAGESIZE, 4331 B_ASYNC | B_FREE, kcred, NULL); 4332 VN_RELE(vp); 4333 } 4334 } while ((pp = page_next(pp)) != page0); 4335 4336 vfs_syncprogress(); 4337 return (nppbusy); 4338 } 4339 4340 void page_invalidate_pages(void); 4341 4342 /* 4343 * callback handler to vm sub-system 4344 * 4345 * callers make sure no recursive entries to this func. 4346 */ 4347 /*ARGSUSED*/ 4348 boolean_t 4349 callb_vm_cpr(void *arg, int code) 4350 { 4351 if (code == CB_CODE_CPR_CHKPT) 4352 page_invalidate_pages(); 4353 return (B_TRUE); 4354 } 4355 4356 /* 4357 * Invalidate all pages of the system. 4358 * It shouldn't be called until all user page activities are all stopped. 4359 */ 4360 void 4361 page_invalidate_pages() 4362 { 4363 page_t *pp; 4364 page_t *page0; 4365 pgcnt_t nbusypages; 4366 int retry = 0; 4367 const int MAXRETRIES = 4; 4368 top: 4369 /* 4370 * Flush dirty pages and destroy the clean ones. 4371 */ 4372 nbusypages = 0; 4373 4374 pp = page0 = page_first(); 4375 do { 4376 struct vnode *vp; 4377 u_offset_t offset; 4378 int mod; 4379 4380 /* 4381 * skip the page if it has no vnode or the page associated 4382 * with the kernel vnode or prom allocated kernel mem. 4383 */ 4384 if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp)) 4385 continue; 4386 4387 /* 4388 * skip the page which is already free invalidated. 4389 */ 4390 if (PP_ISFREE(pp) && PP_ISAGED(pp)) 4391 continue; 4392 4393 /* 4394 * skip pages that are already locked or can't be "exclusively" 4395 * locked or are already free. After we lock the page, check 4396 * the free and age bits again to be sure it's not destroyed 4397 * yet. 4398 * To achieve max. parallelization, we use page_trylock instead 4399 * of page_lock so that we don't get block on individual pages 4400 * while we have thousands of other pages to process. 4401 */ 4402 if (!page_trylock(pp, SE_EXCL)) { 4403 nbusypages++; 4404 continue; 4405 } else if (PP_ISFREE(pp)) { 4406 if (!PP_ISAGED(pp)) { 4407 page_destroy_free(pp); 4408 } else { 4409 page_unlock(pp); 4410 } 4411 continue; 4412 } 4413 /* 4414 * Is this page involved in some I/O? shared? 4415 * 4416 * The page_struct_lock need not be acquired to 4417 * examine these fields since the page has an 4418 * "exclusive" lock. 4419 */ 4420 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 4421 page_unlock(pp); 4422 continue; 4423 } 4424 4425 if (vp->v_type == VCHR) { 4426 panic("vp->v_type == VCHR"); 4427 /*NOTREACHED*/ 4428 } 4429 4430 if (!page_try_demote_pages(pp)) { 4431 page_unlock(pp); 4432 continue; 4433 } 4434 4435 /* 4436 * Check the modified bit. Leave the bits alone in hardware 4437 * (they will be modified if we do the putpage). 4438 */ 4439 mod = (hat_pagesync(pp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) 4440 & P_MOD); 4441 if (mod) { 4442 offset = pp->p_offset; 4443 /* 4444 * Hold the vnode before releasing the page lock 4445 * to prevent it from being freed and re-used by 4446 * some other thread. 4447 */ 4448 VN_HOLD(vp); 4449 page_unlock(pp); 4450 /* 4451 * No error return is checked here. Callers such as 4452 * cpr deals with the dirty pages at the dump time 4453 * if this putpage fails. 4454 */ 4455 (void) VOP_PUTPAGE(vp, offset, PAGESIZE, B_INVAL, 4456 kcred, NULL); 4457 VN_RELE(vp); 4458 } else { 4459 /*LINTED: constant in conditional context*/ 4460 VN_DISPOSE(pp, B_INVAL, 0, kcred); 4461 } 4462 } while ((pp = page_next(pp)) != page0); 4463 if (nbusypages && retry++ < MAXRETRIES) { 4464 delay(1); 4465 goto top; 4466 } 4467 } 4468 4469 /* 4470 * Replace the page "old" with the page "new" on the page hash and vnode lists 4471 * 4472 * the replacement must be done in place, ie the equivalent sequence: 4473 * 4474 * vp = old->p_vnode; 4475 * off = old->p_offset; 4476 * page_do_hashout(old) 4477 * page_do_hashin(new, vp, off) 4478 * 4479 * doesn't work, since 4480 * 1) if old is the only page on the vnode, the v_pages list has a window 4481 * where it looks empty. This will break file system assumptions. 4482 * and 4483 * 2) pvn_vplist_dirty() can't deal with pages moving on the v_pages list. 4484 */ 4485 static void 4486 page_do_relocate_hash(page_t *new, page_t *old) 4487 { 4488 page_t **hash_list; 4489 vnode_t *vp = old->p_vnode; 4490 kmutex_t *sep; 4491 4492 ASSERT(PAGE_EXCL(old)); 4493 ASSERT(PAGE_EXCL(new)); 4494 ASSERT(vp != NULL); 4495 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 4496 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, old->p_offset)))); 4497 4498 /* 4499 * First find old page on the page hash list 4500 */ 4501 hash_list = &page_hash[PAGE_HASH_FUNC(vp, old->p_offset)]; 4502 4503 for (;;) { 4504 if (*hash_list == old) 4505 break; 4506 if (*hash_list == NULL) { 4507 panic("page_do_hashout"); 4508 /*NOTREACHED*/ 4509 } 4510 hash_list = &(*hash_list)->p_hash; 4511 } 4512 4513 /* 4514 * update new and replace old with new on the page hash list 4515 */ 4516 new->p_vnode = old->p_vnode; 4517 new->p_offset = old->p_offset; 4518 new->p_hash = old->p_hash; 4519 *hash_list = new; 4520 4521 if ((new->p_vnode->v_flag & VISSWAP) != 0) 4522 PP_SETSWAP(new); 4523 4524 /* 4525 * replace old with new on the vnode's page list 4526 */ 4527 if (old->p_vpnext == old) { 4528 new->p_vpnext = new; 4529 new->p_vpprev = new; 4530 } else { 4531 new->p_vpnext = old->p_vpnext; 4532 new->p_vpprev = old->p_vpprev; 4533 new->p_vpnext->p_vpprev = new; 4534 new->p_vpprev->p_vpnext = new; 4535 } 4536 if (vp->v_pages == old) 4537 vp->v_pages = new; 4538 4539 /* 4540 * clear out the old page 4541 */ 4542 old->p_hash = NULL; 4543 old->p_vpnext = NULL; 4544 old->p_vpprev = NULL; 4545 old->p_vnode = NULL; 4546 PP_CLRSWAP(old); 4547 old->p_offset = (u_offset_t)-1; 4548 page_clr_all_props(old); 4549 4550 /* 4551 * Wake up processes waiting for this page. The page's 4552 * identity has been changed, and is probably not the 4553 * desired page any longer. 4554 */ 4555 sep = page_se_mutex(old); 4556 mutex_enter(sep); 4557 old->p_selock &= ~SE_EWANTED; 4558 if (CV_HAS_WAITERS(&old->p_cv)) 4559 cv_broadcast(&old->p_cv); 4560 mutex_exit(sep); 4561 } 4562 4563 /* 4564 * This function moves the identity of page "pp_old" to page "pp_new". 4565 * Both pages must be locked on entry. "pp_new" is free, has no identity, 4566 * and need not be hashed out from anywhere. 4567 */ 4568 void 4569 page_relocate_hash(page_t *pp_new, page_t *pp_old) 4570 { 4571 vnode_t *vp = pp_old->p_vnode; 4572 u_offset_t off = pp_old->p_offset; 4573 kmutex_t *phm, *vphm; 4574 4575 /* 4576 * Rehash two pages 4577 */ 4578 ASSERT(PAGE_EXCL(pp_old)); 4579 ASSERT(PAGE_EXCL(pp_new)); 4580 ASSERT(vp != NULL); 4581 ASSERT(pp_new->p_vnode == NULL); 4582 4583 /* 4584 * hashout then hashin while holding the mutexes 4585 */ 4586 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, off)); 4587 mutex_enter(phm); 4588 vphm = page_vnode_mutex(vp); 4589 mutex_enter(vphm); 4590 4591 page_do_relocate_hash(pp_new, pp_old); 4592 4593 /* The following comment preserved from page_flip(). */ 4594 pp_new->p_fsdata = pp_old->p_fsdata; 4595 pp_old->p_fsdata = 0; 4596 mutex_exit(vphm); 4597 mutex_exit(phm); 4598 4599 /* 4600 * The page_struct_lock need not be acquired for lckcnt and 4601 * cowcnt since the page has an "exclusive" lock. 4602 */ 4603 ASSERT(pp_new->p_lckcnt == 0); 4604 ASSERT(pp_new->p_cowcnt == 0); 4605 pp_new->p_lckcnt = pp_old->p_lckcnt; 4606 pp_new->p_cowcnt = pp_old->p_cowcnt; 4607 pp_old->p_lckcnt = pp_old->p_cowcnt = 0; 4608 4609 } 4610 4611 /* 4612 * Helper routine used to lock all remaining members of a 4613 * large page. The caller is responsible for passing in a locked 4614 * pp. If pp is a large page, then it succeeds in locking all the 4615 * remaining constituent pages or it returns with only the 4616 * original page locked. 4617 * 4618 * Returns 1 on success, 0 on failure. 4619 * 4620 * If success is returned this routine guarantees p_szc for all constituent 4621 * pages of a large page pp belongs to can't change. To achieve this we 4622 * recheck szc of pp after locking all constituent pages and retry if szc 4623 * changed (it could only decrease). Since hat_page_demote() needs an EXCL 4624 * lock on one of constituent pages it can't be running after all constituent 4625 * pages are locked. hat_page_demote() with a lock on a constituent page 4626 * outside of this large page (i.e. pp belonged to a larger large page) is 4627 * already done with all constituent pages of pp since the root's p_szc is 4628 * changed last. Therefore no need to synchronize with hat_page_demote() that 4629 * locked a constituent page outside of pp's current large page. 4630 */ 4631 #ifdef DEBUG 4632 uint32_t gpg_trylock_mtbf = 0; 4633 #endif 4634 4635 int 4636 group_page_trylock(page_t *pp, se_t se) 4637 { 4638 page_t *tpp; 4639 pgcnt_t npgs, i, j; 4640 uint_t pszc = pp->p_szc; 4641 4642 #ifdef DEBUG 4643 if (gpg_trylock_mtbf && !(gethrtime() % gpg_trylock_mtbf)) { 4644 return (0); 4645 } 4646 #endif 4647 4648 if (pp != PP_GROUPLEADER(pp, pszc)) { 4649 return (0); 4650 } 4651 4652 retry: 4653 ASSERT(PAGE_LOCKED_SE(pp, se)); 4654 ASSERT(!PP_ISFREE(pp)); 4655 if (pszc == 0) { 4656 return (1); 4657 } 4658 npgs = page_get_pagecnt(pszc); 4659 tpp = pp + 1; 4660 for (i = 1; i < npgs; i++, tpp++) { 4661 if (!page_trylock(tpp, se)) { 4662 tpp = pp + 1; 4663 for (j = 1; j < i; j++, tpp++) { 4664 page_unlock(tpp); 4665 } 4666 return (0); 4667 } 4668 } 4669 if (pp->p_szc != pszc) { 4670 ASSERT(pp->p_szc < pszc); 4671 ASSERT(pp->p_vnode != NULL && !PP_ISKAS(pp) && 4672 !IS_SWAPFSVP(pp->p_vnode)); 4673 tpp = pp + 1; 4674 for (i = 1; i < npgs; i++, tpp++) { 4675 page_unlock(tpp); 4676 } 4677 pszc = pp->p_szc; 4678 goto retry; 4679 } 4680 return (1); 4681 } 4682 4683 void 4684 group_page_unlock(page_t *pp) 4685 { 4686 page_t *tpp; 4687 pgcnt_t npgs, i; 4688 4689 ASSERT(PAGE_LOCKED(pp)); 4690 ASSERT(!PP_ISFREE(pp)); 4691 ASSERT(pp == PP_PAGEROOT(pp)); 4692 npgs = page_get_pagecnt(pp->p_szc); 4693 for (i = 1, tpp = pp + 1; i < npgs; i++, tpp++) { 4694 page_unlock(tpp); 4695 } 4696 } 4697 4698 /* 4699 * returns 4700 * 0 : on success and *nrelocp is number of relocated PAGESIZE pages 4701 * ERANGE : this is not a base page 4702 * EBUSY : failure to get locks on the page/pages 4703 * ENOMEM : failure to obtain replacement pages 4704 * EAGAIN : OBP has not yet completed its boot-time handoff to the kernel 4705 * EIO : An error occurred while trying to copy the page data 4706 * 4707 * Return with all constituent members of target and replacement 4708 * SE_EXCL locked. It is the callers responsibility to drop the 4709 * locks. 4710 */ 4711 int 4712 do_page_relocate( 4713 page_t **target, 4714 page_t **replacement, 4715 int grouplock, 4716 spgcnt_t *nrelocp, 4717 lgrp_t *lgrp) 4718 { 4719 page_t *first_repl; 4720 page_t *repl; 4721 page_t *targ; 4722 page_t *pl = NULL; 4723 uint_t ppattr; 4724 pfn_t pfn, repl_pfn; 4725 uint_t szc; 4726 spgcnt_t npgs, i; 4727 int repl_contig = 0; 4728 uint_t flags = 0; 4729 spgcnt_t dofree = 0; 4730 4731 *nrelocp = 0; 4732 4733 #if defined(__sparc) 4734 /* 4735 * We need to wait till OBP has completed 4736 * its boot-time handoff of its resources to the kernel 4737 * before we allow page relocation 4738 */ 4739 if (page_relocate_ready == 0) { 4740 return (EAGAIN); 4741 } 4742 #endif 4743 4744 /* 4745 * If this is not a base page, 4746 * just return with 0x0 pages relocated. 4747 */ 4748 targ = *target; 4749 ASSERT(PAGE_EXCL(targ)); 4750 ASSERT(!PP_ISFREE(targ)); 4751 szc = targ->p_szc; 4752 ASSERT(szc < mmu_page_sizes); 4753 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4754 pfn = targ->p_pagenum; 4755 if (pfn != PFN_BASE(pfn, szc)) { 4756 VM_STAT_ADD(vmm_vmstats.ppr_relocnoroot[szc]); 4757 return (ERANGE); 4758 } 4759 4760 if ((repl = *replacement) != NULL && repl->p_szc >= szc) { 4761 repl_pfn = repl->p_pagenum; 4762 if (repl_pfn != PFN_BASE(repl_pfn, szc)) { 4763 VM_STAT_ADD(vmm_vmstats.ppr_reloc_replnoroot[szc]); 4764 return (ERANGE); 4765 } 4766 repl_contig = 1; 4767 } 4768 4769 /* 4770 * We must lock all members of this large page or we cannot 4771 * relocate any part of it. 4772 */ 4773 if (grouplock != 0 && !group_page_trylock(targ, SE_EXCL)) { 4774 VM_STAT_ADD(vmm_vmstats.ppr_relocnolock[targ->p_szc]); 4775 return (EBUSY); 4776 } 4777 4778 /* 4779 * reread szc it could have been decreased before 4780 * group_page_trylock() was done. 4781 */ 4782 szc = targ->p_szc; 4783 ASSERT(szc < mmu_page_sizes); 4784 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4785 ASSERT(pfn == PFN_BASE(pfn, szc)); 4786 4787 npgs = page_get_pagecnt(targ->p_szc); 4788 4789 if (repl == NULL) { 4790 dofree = npgs; /* Size of target page in MMU pages */ 4791 if (!page_create_wait(dofree, 0)) { 4792 if (grouplock != 0) { 4793 group_page_unlock(targ); 4794 } 4795 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4796 return (ENOMEM); 4797 } 4798 4799 /* 4800 * seg kmem pages require that the target and replacement 4801 * page be the same pagesize. 4802 */ 4803 flags = (VN_ISKAS(targ->p_vnode)) ? PGR_SAMESZC : 0; 4804 repl = page_get_replacement_page(targ, lgrp, flags); 4805 if (repl == NULL) { 4806 if (grouplock != 0) { 4807 group_page_unlock(targ); 4808 } 4809 page_create_putback(dofree); 4810 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4811 return (ENOMEM); 4812 } 4813 } 4814 #ifdef DEBUG 4815 else { 4816 ASSERT(PAGE_LOCKED(repl)); 4817 } 4818 #endif /* DEBUG */ 4819 4820 #if defined(__sparc) 4821 /* 4822 * Let hat_page_relocate() complete the relocation if it's kernel page 4823 */ 4824 if (VN_ISKAS(targ->p_vnode)) { 4825 *replacement = repl; 4826 if (hat_page_relocate(target, replacement, nrelocp) != 0) { 4827 if (grouplock != 0) { 4828 group_page_unlock(targ); 4829 } 4830 if (dofree) { 4831 *replacement = NULL; 4832 page_free_replacement_page(repl); 4833 page_create_putback(dofree); 4834 } 4835 VM_STAT_ADD(vmm_vmstats.ppr_krelocfail[szc]); 4836 return (EAGAIN); 4837 } 4838 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 4839 return (0); 4840 } 4841 #else 4842 #if defined(lint) 4843 dofree = dofree; 4844 #endif 4845 #endif 4846 4847 first_repl = repl; 4848 4849 for (i = 0; i < npgs; i++) { 4850 ASSERT(PAGE_EXCL(targ)); 4851 ASSERT(targ->p_slckcnt == 0); 4852 ASSERT(repl->p_slckcnt == 0); 4853 4854 (void) hat_pageunload(targ, HAT_FORCE_PGUNLOAD); 4855 4856 ASSERT(hat_page_getshare(targ) == 0); 4857 ASSERT(!PP_ISFREE(targ)); 4858 ASSERT(targ->p_pagenum == (pfn + i)); 4859 ASSERT(repl_contig == 0 || 4860 repl->p_pagenum == (repl_pfn + i)); 4861 4862 /* 4863 * Copy the page contents and attributes then 4864 * relocate the page in the page hash. 4865 */ 4866 if (ppcopy(targ, repl) == 0) { 4867 targ = *target; 4868 repl = first_repl; 4869 VM_STAT_ADD(vmm_vmstats.ppr_copyfail); 4870 if (grouplock != 0) { 4871 group_page_unlock(targ); 4872 } 4873 if (dofree) { 4874 *replacement = NULL; 4875 page_free_replacement_page(repl); 4876 page_create_putback(dofree); 4877 } 4878 return (EIO); 4879 } 4880 4881 targ++; 4882 if (repl_contig != 0) { 4883 repl++; 4884 } else { 4885 repl = repl->p_next; 4886 } 4887 } 4888 4889 repl = first_repl; 4890 targ = *target; 4891 4892 for (i = 0; i < npgs; i++) { 4893 ppattr = hat_page_getattr(targ, (P_MOD | P_REF | P_RO)); 4894 page_clr_all_props(repl); 4895 page_set_props(repl, ppattr); 4896 page_relocate_hash(repl, targ); 4897 4898 ASSERT(hat_page_getshare(targ) == 0); 4899 ASSERT(hat_page_getshare(repl) == 0); 4900 /* 4901 * Now clear the props on targ, after the 4902 * page_relocate_hash(), they no longer 4903 * have any meaning. 4904 */ 4905 page_clr_all_props(targ); 4906 ASSERT(targ->p_next == targ); 4907 ASSERT(targ->p_prev == targ); 4908 page_list_concat(&pl, &targ); 4909 4910 targ++; 4911 if (repl_contig != 0) { 4912 repl++; 4913 } else { 4914 repl = repl->p_next; 4915 } 4916 } 4917 /* assert that we have come full circle with repl */ 4918 ASSERT(repl_contig == 1 || first_repl == repl); 4919 4920 *target = pl; 4921 if (*replacement == NULL) { 4922 ASSERT(first_repl == repl); 4923 *replacement = repl; 4924 } 4925 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 4926 *nrelocp = npgs; 4927 return (0); 4928 } 4929 /* 4930 * On success returns 0 and *nrelocp the number of PAGESIZE pages relocated. 4931 */ 4932 int 4933 page_relocate( 4934 page_t **target, 4935 page_t **replacement, 4936 int grouplock, 4937 int freetarget, 4938 spgcnt_t *nrelocp, 4939 lgrp_t *lgrp) 4940 { 4941 spgcnt_t ret; 4942 4943 /* do_page_relocate returns 0 on success or errno value */ 4944 ret = do_page_relocate(target, replacement, grouplock, nrelocp, lgrp); 4945 4946 if (ret != 0 || freetarget == 0) { 4947 return (ret); 4948 } 4949 if (*nrelocp == 1) { 4950 ASSERT(*target != NULL); 4951 page_free(*target, 1); 4952 } else { 4953 page_t *tpp = *target; 4954 uint_t szc = tpp->p_szc; 4955 pgcnt_t npgs = page_get_pagecnt(szc); 4956 ASSERT(npgs > 1); 4957 ASSERT(szc != 0); 4958 do { 4959 ASSERT(PAGE_EXCL(tpp)); 4960 ASSERT(!hat_page_is_mapped(tpp)); 4961 ASSERT(tpp->p_szc == szc); 4962 PP_SETFREE(tpp); 4963 PP_SETAGED(tpp); 4964 npgs--; 4965 } while ((tpp = tpp->p_next) != *target); 4966 ASSERT(npgs == 0); 4967 page_list_add_pages(*target, 0); 4968 npgs = page_get_pagecnt(szc); 4969 page_create_putback(npgs); 4970 } 4971 return (ret); 4972 } 4973 4974 /* 4975 * it is up to the caller to deal with pcf accounting. 4976 */ 4977 void 4978 page_free_replacement_page(page_t *pplist) 4979 { 4980 page_t *pp; 4981 4982 while (pplist != NULL) { 4983 /* 4984 * pp_targ is a linked list. 4985 */ 4986 pp = pplist; 4987 if (pp->p_szc == 0) { 4988 page_sub(&pplist, pp); 4989 page_clr_all_props(pp); 4990 PP_SETFREE(pp); 4991 PP_SETAGED(pp); 4992 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 4993 page_unlock(pp); 4994 VM_STAT_ADD(pagecnt.pc_free_replacement_page[0]); 4995 } else { 4996 spgcnt_t curnpgs = page_get_pagecnt(pp->p_szc); 4997 page_t *tpp; 4998 page_list_break(&pp, &pplist, curnpgs); 4999 tpp = pp; 5000 do { 5001 ASSERT(PAGE_EXCL(tpp)); 5002 ASSERT(!hat_page_is_mapped(tpp)); 5003 page_clr_all_props(tpp); 5004 PP_SETFREE(tpp); 5005 PP_SETAGED(tpp); 5006 } while ((tpp = tpp->p_next) != pp); 5007 page_list_add_pages(pp, 0); 5008 VM_STAT_ADD(pagecnt.pc_free_replacement_page[1]); 5009 } 5010 } 5011 } 5012 5013 /* 5014 * Relocate target to non-relocatable replacement page. 5015 */ 5016 int 5017 page_relocate_cage(page_t **target, page_t **replacement) 5018 { 5019 page_t *tpp, *rpp; 5020 spgcnt_t pgcnt, npgs; 5021 int result; 5022 5023 tpp = *target; 5024 5025 ASSERT(PAGE_EXCL(tpp)); 5026 ASSERT(tpp->p_szc == 0); 5027 5028 pgcnt = btop(page_get_pagesize(tpp->p_szc)); 5029 5030 do { 5031 (void) page_create_wait(pgcnt, PG_WAIT | PG_NORELOC); 5032 rpp = page_get_replacement_page(tpp, NULL, PGR_NORELOC); 5033 if (rpp == NULL) { 5034 page_create_putback(pgcnt); 5035 kcage_cageout_wakeup(); 5036 } 5037 } while (rpp == NULL); 5038 5039 ASSERT(PP_ISNORELOC(rpp)); 5040 5041 result = page_relocate(&tpp, &rpp, 0, 1, &npgs, NULL); 5042 5043 if (result == 0) { 5044 *replacement = rpp; 5045 if (pgcnt != npgs) 5046 panic("page_relocate_cage: partial relocation"); 5047 } 5048 5049 return (result); 5050 } 5051 5052 /* 5053 * Release the page lock on a page, place on cachelist 5054 * tail if no longer mapped. Caller can let us know if 5055 * the page is known to be clean. 5056 */ 5057 int 5058 page_release(page_t *pp, int checkmod) 5059 { 5060 int status; 5061 5062 ASSERT(PAGE_LOCKED(pp) && !PP_ISFREE(pp) && 5063 (pp->p_vnode != NULL)); 5064 5065 if (!hat_page_is_mapped(pp) && !IS_SWAPVP(pp->p_vnode) && 5066 ((PAGE_SHARED(pp) && page_tryupgrade(pp)) || PAGE_EXCL(pp)) && 5067 pp->p_lckcnt == 0 && pp->p_cowcnt == 0 && 5068 !hat_page_is_mapped(pp)) { 5069 5070 /* 5071 * If page is modified, unlock it 5072 * 5073 * (p_nrm & P_MOD) bit has the latest stuff because: 5074 * (1) We found that this page doesn't have any mappings 5075 * _after_ holding SE_EXCL and 5076 * (2) We didn't drop SE_EXCL lock after the check in (1) 5077 */ 5078 if (checkmod && hat_ismod(pp)) { 5079 page_unlock(pp); 5080 status = PGREL_MOD; 5081 } else { 5082 /*LINTED: constant in conditional context*/ 5083 VN_DISPOSE(pp, B_FREE, 0, kcred); 5084 status = PGREL_CLEAN; 5085 } 5086 } else { 5087 page_unlock(pp); 5088 status = PGREL_NOTREL; 5089 } 5090 return (status); 5091 } 5092 5093 /* 5094 * Given a constituent page, try to demote the large page on the freelist. 5095 * 5096 * Returns nonzero if the page could be demoted successfully. Returns with 5097 * the constituent page still locked. 5098 */ 5099 int 5100 page_try_demote_free_pages(page_t *pp) 5101 { 5102 page_t *rootpp = pp; 5103 pfn_t pfn = page_pptonum(pp); 5104 spgcnt_t npgs; 5105 uint_t szc = pp->p_szc; 5106 5107 ASSERT(PP_ISFREE(pp)); 5108 ASSERT(PAGE_EXCL(pp)); 5109 5110 /* 5111 * Adjust rootpp and lock it, if `pp' is not the base 5112 * constituent page. 5113 */ 5114 npgs = page_get_pagecnt(pp->p_szc); 5115 if (npgs == 1) { 5116 return (0); 5117 } 5118 5119 if (!IS_P2ALIGNED(pfn, npgs)) { 5120 pfn = P2ALIGN(pfn, npgs); 5121 rootpp = page_numtopp_nolock(pfn); 5122 } 5123 5124 if (pp != rootpp && !page_trylock(rootpp, SE_EXCL)) { 5125 return (0); 5126 } 5127 5128 if (rootpp->p_szc != szc) { 5129 if (pp != rootpp) 5130 page_unlock(rootpp); 5131 return (0); 5132 } 5133 5134 page_demote_free_pages(rootpp); 5135 5136 if (pp != rootpp) 5137 page_unlock(rootpp); 5138 5139 ASSERT(PP_ISFREE(pp)); 5140 ASSERT(PAGE_EXCL(pp)); 5141 return (1); 5142 } 5143 5144 /* 5145 * Given a constituent page, try to demote the large page. 5146 * 5147 * Returns nonzero if the page could be demoted successfully. Returns with 5148 * the constituent page still locked. 5149 */ 5150 int 5151 page_try_demote_pages(page_t *pp) 5152 { 5153 page_t *tpp, *rootpp = pp; 5154 pfn_t pfn = page_pptonum(pp); 5155 spgcnt_t i, npgs; 5156 uint_t szc = pp->p_szc; 5157 vnode_t *vp = pp->p_vnode; 5158 5159 ASSERT(PAGE_EXCL(pp)); 5160 5161 VM_STAT_ADD(pagecnt.pc_try_demote_pages[0]); 5162 5163 if (pp->p_szc == 0) { 5164 VM_STAT_ADD(pagecnt.pc_try_demote_pages[1]); 5165 return (1); 5166 } 5167 5168 if (vp != NULL && !IS_SWAPFSVP(vp) && !VN_ISKAS(vp)) { 5169 VM_STAT_ADD(pagecnt.pc_try_demote_pages[2]); 5170 page_demote_vp_pages(pp); 5171 ASSERT(pp->p_szc == 0); 5172 return (1); 5173 } 5174 5175 /* 5176 * Adjust rootpp if passed in is not the base 5177 * constituent page. 5178 */ 5179 npgs = page_get_pagecnt(pp->p_szc); 5180 ASSERT(npgs > 1); 5181 if (!IS_P2ALIGNED(pfn, npgs)) { 5182 pfn = P2ALIGN(pfn, npgs); 5183 rootpp = page_numtopp_nolock(pfn); 5184 VM_STAT_ADD(pagecnt.pc_try_demote_pages[3]); 5185 ASSERT(rootpp->p_vnode != NULL); 5186 ASSERT(rootpp->p_szc == szc); 5187 } 5188 5189 /* 5190 * We can't demote kernel pages since we can't hat_unload() 5191 * the mappings. 5192 */ 5193 if (VN_ISKAS(rootpp->p_vnode)) 5194 return (0); 5195 5196 /* 5197 * Attempt to lock all constituent pages except the page passed 5198 * in since it's already locked. 5199 */ 5200 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5201 ASSERT(!PP_ISFREE(tpp)); 5202 ASSERT(tpp->p_vnode != NULL); 5203 5204 if (tpp != pp && !page_trylock(tpp, SE_EXCL)) 5205 break; 5206 ASSERT(tpp->p_szc == rootpp->p_szc); 5207 ASSERT(page_pptonum(tpp) == page_pptonum(rootpp) + i); 5208 } 5209 5210 /* 5211 * If we failed to lock them all then unlock what we have 5212 * locked so far and bail. 5213 */ 5214 if (i < npgs) { 5215 tpp = rootpp; 5216 while (i-- > 0) { 5217 if (tpp != pp) 5218 page_unlock(tpp); 5219 tpp++; 5220 } 5221 VM_STAT_ADD(pagecnt.pc_try_demote_pages[4]); 5222 return (0); 5223 } 5224 5225 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5226 ASSERT(PAGE_EXCL(tpp)); 5227 ASSERT(tpp->p_slckcnt == 0); 5228 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 5229 tpp->p_szc = 0; 5230 } 5231 5232 /* 5233 * Unlock all pages except the page passed in. 5234 */ 5235 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5236 ASSERT(!hat_page_is_mapped(tpp)); 5237 if (tpp != pp) 5238 page_unlock(tpp); 5239 } 5240 5241 VM_STAT_ADD(pagecnt.pc_try_demote_pages[5]); 5242 return (1); 5243 } 5244 5245 /* 5246 * Called by page_free() and page_destroy() to demote the page size code 5247 * (p_szc) to 0 (since we can't just put a single PAGESIZE page with non zero 5248 * p_szc on free list, neither can we just clear p_szc of a single page_t 5249 * within a large page since it will break other code that relies on p_szc 5250 * being the same for all page_t's of a large page). Anonymous pages should 5251 * never end up here because anon_map_getpages() cannot deal with p_szc 5252 * changes after a single constituent page is locked. While anonymous or 5253 * kernel large pages are demoted or freed the entire large page at a time 5254 * with all constituent pages locked EXCL for the file system pages we 5255 * have to be able to demote a large page (i.e. decrease all constituent pages 5256 * p_szc) with only just an EXCL lock on one of constituent pages. The reason 5257 * we can easily deal with anonymous page demotion the entire large page at a 5258 * time is that those operation originate at address space level and concern 5259 * the entire large page region with actual demotion only done when pages are 5260 * not shared with any other processes (therefore we can always get EXCL lock 5261 * on all anonymous constituent pages after clearing segment page 5262 * cache). However file system pages can be truncated or invalidated at a 5263 * PAGESIZE level from the file system side and end up in page_free() or 5264 * page_destroy() (we also allow only part of the large page to be SOFTLOCKed 5265 * and therefore pageout should be able to demote a large page by EXCL locking 5266 * any constituent page that is not under SOFTLOCK). In those cases we cannot 5267 * rely on being able to lock EXCL all constituent pages. 5268 * 5269 * To prevent szc changes on file system pages one has to lock all constituent 5270 * pages at least SHARED (or call page_szc_lock()). The only subsystem that 5271 * doesn't rely on locking all constituent pages (or using page_szc_lock()) to 5272 * prevent szc changes is hat layer that uses its own page level mlist 5273 * locks. hat assumes that szc doesn't change after mlist lock for a page is 5274 * taken. Therefore we need to change szc under hat level locks if we only 5275 * have an EXCL lock on a single constituent page and hat still references any 5276 * of constituent pages. (Note we can't "ignore" hat layer by simply 5277 * hat_pageunload() all constituent pages without having EXCL locks on all of 5278 * constituent pages). We use hat_page_demote() call to safely demote szc of 5279 * all constituent pages under hat locks when we only have an EXCL lock on one 5280 * of constituent pages. 5281 * 5282 * This routine calls page_szc_lock() before calling hat_page_demote() to 5283 * allow segvn in one special case not to lock all constituent pages SHARED 5284 * before calling hat_memload_array() that relies on p_szc not changing even 5285 * before hat level mlist lock is taken. In that case segvn uses 5286 * page_szc_lock() to prevent hat_page_demote() changing p_szc values. 5287 * 5288 * Anonymous or kernel page demotion still has to lock all pages exclusively 5289 * and do hat_pageunload() on all constituent pages before demoting the page 5290 * therefore there's no need for anonymous or kernel page demotion to use 5291 * hat_page_demote() mechanism. 5292 * 5293 * hat_page_demote() removes all large mappings that map pp and then decreases 5294 * p_szc starting from the last constituent page of the large page. By working 5295 * from the tail of a large page in pfn decreasing order allows one looking at 5296 * the root page to know that hat_page_demote() is done for root's szc area. 5297 * e.g. if a root page has szc 1 one knows it only has to lock all constituent 5298 * pages within szc 1 area to prevent szc changes because hat_page_demote() 5299 * that started on this page when it had szc > 1 is done for this szc 1 area. 5300 * 5301 * We are guaranteed that all constituent pages of pp's large page belong to 5302 * the same vnode with the consecutive offsets increasing in the direction of 5303 * the pfn i.e. the identity of constituent pages can't change until their 5304 * p_szc is decreased. Therefore it's safe for hat_page_demote() to remove 5305 * large mappings to pp even though we don't lock any constituent page except 5306 * pp (i.e. we won't unload e.g. kernel locked page). 5307 */ 5308 static void 5309 page_demote_vp_pages(page_t *pp) 5310 { 5311 kmutex_t *mtx; 5312 5313 ASSERT(PAGE_EXCL(pp)); 5314 ASSERT(!PP_ISFREE(pp)); 5315 ASSERT(pp->p_vnode != NULL); 5316 ASSERT(!IS_SWAPFSVP(pp->p_vnode)); 5317 ASSERT(!PP_ISKAS(pp)); 5318 5319 VM_STAT_ADD(pagecnt.pc_demote_pages[0]); 5320 5321 mtx = page_szc_lock(pp); 5322 if (mtx != NULL) { 5323 hat_page_demote(pp); 5324 mutex_exit(mtx); 5325 } 5326 ASSERT(pp->p_szc == 0); 5327 } 5328 5329 /* 5330 * Mark any existing pages for migration in the given range 5331 */ 5332 void 5333 page_mark_migrate(struct seg *seg, caddr_t addr, size_t len, 5334 struct anon_map *amp, ulong_t anon_index, vnode_t *vp, 5335 u_offset_t vnoff, int rflag) 5336 { 5337 struct anon *ap; 5338 vnode_t *curvp; 5339 lgrp_t *from; 5340 pgcnt_t i; 5341 pgcnt_t nlocked; 5342 u_offset_t off; 5343 pfn_t pfn; 5344 size_t pgsz; 5345 size_t segpgsz; 5346 pgcnt_t pages; 5347 uint_t pszc; 5348 page_t **ppa; 5349 pgcnt_t ppa_nentries; 5350 page_t *pp; 5351 caddr_t va; 5352 ulong_t an_idx; 5353 anon_sync_obj_t cookie; 5354 5355 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5356 5357 /* 5358 * Don't do anything if don't need to do lgroup optimizations 5359 * on this system 5360 */ 5361 if (!lgrp_optimizations()) 5362 return; 5363 5364 /* 5365 * Align address and length to (potentially large) page boundary 5366 */ 5367 segpgsz = page_get_pagesize(seg->s_szc); 5368 addr = (caddr_t)P2ALIGN((uintptr_t)addr, segpgsz); 5369 if (rflag) 5370 len = P2ROUNDUP(len, segpgsz); 5371 5372 /* 5373 * Allocate page array to accommodate largest page size 5374 */ 5375 pgsz = page_get_pagesize(page_num_pagesizes() - 1); 5376 ppa_nentries = btop(pgsz); 5377 ppa = kmem_zalloc(ppa_nentries * sizeof (page_t *), KM_SLEEP); 5378 5379 /* 5380 * Do one (large) page at a time 5381 */ 5382 va = addr; 5383 while (va < addr + len) { 5384 /* 5385 * Lookup (root) page for vnode and offset corresponding to 5386 * this virtual address 5387 * Try anonmap first since there may be copy-on-write 5388 * pages, but initialize vnode pointer and offset using 5389 * vnode arguments just in case there isn't an amp. 5390 */ 5391 curvp = vp; 5392 off = vnoff + va - seg->s_base; 5393 if (amp) { 5394 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5395 an_idx = anon_index + seg_page(seg, va); 5396 anon_array_enter(amp, an_idx, &cookie); 5397 ap = anon_get_ptr(amp->ahp, an_idx); 5398 if (ap) 5399 swap_xlate(ap, &curvp, &off); 5400 anon_array_exit(&cookie); 5401 ANON_LOCK_EXIT(&->a_rwlock); 5402 } 5403 5404 pp = NULL; 5405 if (curvp) 5406 pp = page_lookup(curvp, off, SE_SHARED); 5407 5408 /* 5409 * If there isn't a page at this virtual address, 5410 * skip to next page 5411 */ 5412 if (pp == NULL) { 5413 va += PAGESIZE; 5414 continue; 5415 } 5416 5417 /* 5418 * Figure out which lgroup this page is in for kstats 5419 */ 5420 pfn = page_pptonum(pp); 5421 from = lgrp_pfn_to_lgrp(pfn); 5422 5423 /* 5424 * Get page size, and round up and skip to next page boundary 5425 * if unaligned address 5426 */ 5427 pszc = pp->p_szc; 5428 pgsz = page_get_pagesize(pszc); 5429 pages = btop(pgsz); 5430 if (!IS_P2ALIGNED(va, pgsz) || 5431 !IS_P2ALIGNED(pfn, pages) || 5432 pgsz > segpgsz) { 5433 pgsz = MIN(pgsz, segpgsz); 5434 page_unlock(pp); 5435 i = btop(P2END((uintptr_t)va, pgsz) - 5436 (uintptr_t)va); 5437 va = (caddr_t)P2END((uintptr_t)va, pgsz); 5438 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, i); 5439 continue; 5440 } 5441 5442 /* 5443 * Upgrade to exclusive lock on page 5444 */ 5445 if (!page_tryupgrade(pp)) { 5446 page_unlock(pp); 5447 va += pgsz; 5448 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5449 btop(pgsz)); 5450 continue; 5451 } 5452 5453 /* 5454 * Remember pages locked exclusively and how many 5455 */ 5456 ppa[0] = pp; 5457 nlocked = 1; 5458 5459 /* 5460 * Lock constituent pages if this is large page 5461 */ 5462 if (pages > 1) { 5463 /* 5464 * Lock all constituents except root page, since it 5465 * should be locked already. 5466 */ 5467 for (i = 1; i < pages; i++) { 5468 pp++; 5469 if (!page_trylock(pp, SE_EXCL)) { 5470 break; 5471 } 5472 if (PP_ISFREE(pp) || 5473 pp->p_szc != pszc) { 5474 /* 5475 * hat_page_demote() raced in with us. 5476 */ 5477 ASSERT(!IS_SWAPFSVP(curvp)); 5478 page_unlock(pp); 5479 break; 5480 } 5481 ppa[nlocked] = pp; 5482 nlocked++; 5483 } 5484 } 5485 5486 /* 5487 * If all constituent pages couldn't be locked, 5488 * unlock pages locked so far and skip to next page. 5489 */ 5490 if (nlocked != pages) { 5491 for (i = 0; i < nlocked; i++) 5492 page_unlock(ppa[i]); 5493 va += pgsz; 5494 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5495 btop(pgsz)); 5496 continue; 5497 } 5498 5499 /* 5500 * hat_page_demote() can no longer happen 5501 * since last cons page had the right p_szc after 5502 * all cons pages were locked. all cons pages 5503 * should now have the same p_szc. 5504 */ 5505 5506 /* 5507 * All constituent pages locked successfully, so mark 5508 * large page for migration and unload the mappings of 5509 * constituent pages, so a fault will occur on any part of the 5510 * large page 5511 */ 5512 PP_SETMIGRATE(ppa[0]); 5513 for (i = 0; i < nlocked; i++) { 5514 pp = ppa[i]; 5515 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 5516 ASSERT(hat_page_getshare(pp) == 0); 5517 page_unlock(pp); 5518 } 5519 lgrp_stat_add(from->lgrp_id, LGRP_PMM_PGS, nlocked); 5520 5521 va += pgsz; 5522 } 5523 kmem_free(ppa, ppa_nentries * sizeof (page_t *)); 5524 } 5525 5526 /* 5527 * Migrate any pages that have been marked for migration in the given range 5528 */ 5529 void 5530 page_migrate( 5531 struct seg *seg, 5532 caddr_t addr, 5533 page_t **ppa, 5534 pgcnt_t npages) 5535 { 5536 lgrp_t *from; 5537 lgrp_t *to; 5538 page_t *newpp; 5539 page_t *pp; 5540 pfn_t pfn; 5541 size_t pgsz; 5542 spgcnt_t page_cnt; 5543 spgcnt_t i; 5544 uint_t pszc; 5545 5546 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5547 5548 while (npages > 0) { 5549 pp = *ppa; 5550 pszc = pp->p_szc; 5551 pgsz = page_get_pagesize(pszc); 5552 page_cnt = btop(pgsz); 5553 5554 /* 5555 * Check to see whether this page is marked for migration 5556 * 5557 * Assume that root page of large page is marked for 5558 * migration and none of the other constituent pages 5559 * are marked. This really simplifies clearing the 5560 * migrate bit by not having to clear it from each 5561 * constituent page. 5562 * 5563 * note we don't want to relocate an entire large page if 5564 * someone is only using one subpage. 5565 */ 5566 if (npages < page_cnt) 5567 break; 5568 5569 /* 5570 * Is it marked for migration? 5571 */ 5572 if (!PP_ISMIGRATE(pp)) 5573 goto next; 5574 5575 /* 5576 * Determine lgroups that page is being migrated between 5577 */ 5578 pfn = page_pptonum(pp); 5579 if (!IS_P2ALIGNED(pfn, page_cnt)) { 5580 break; 5581 } 5582 from = lgrp_pfn_to_lgrp(pfn); 5583 to = lgrp_mem_choose(seg, addr, pgsz); 5584 5585 /* 5586 * Need to get exclusive lock's to migrate 5587 */ 5588 for (i = 0; i < page_cnt; i++) { 5589 ASSERT(PAGE_LOCKED(ppa[i])); 5590 if (page_pptonum(ppa[i]) != pfn + i || 5591 ppa[i]->p_szc != pszc) { 5592 break; 5593 } 5594 if (!page_tryupgrade(ppa[i])) { 5595 lgrp_stat_add(from->lgrp_id, 5596 LGRP_PM_FAIL_LOCK_PGS, 5597 page_cnt); 5598 break; 5599 } 5600 5601 /* 5602 * Check to see whether we are trying to migrate 5603 * page to lgroup where it is allocated already. 5604 * If so, clear the migrate bit and skip to next 5605 * page. 5606 */ 5607 if (i == 0 && to == from) { 5608 PP_CLRMIGRATE(ppa[0]); 5609 page_downgrade(ppa[0]); 5610 goto next; 5611 } 5612 } 5613 5614 /* 5615 * If all constituent pages couldn't be locked, 5616 * unlock pages locked so far and skip to next page. 5617 */ 5618 if (i != page_cnt) { 5619 while (--i != -1) { 5620 page_downgrade(ppa[i]); 5621 } 5622 goto next; 5623 } 5624 5625 (void) page_create_wait(page_cnt, PG_WAIT); 5626 newpp = page_get_replacement_page(pp, to, PGR_SAMESZC); 5627 if (newpp == NULL) { 5628 page_create_putback(page_cnt); 5629 for (i = 0; i < page_cnt; i++) { 5630 page_downgrade(ppa[i]); 5631 } 5632 lgrp_stat_add(to->lgrp_id, LGRP_PM_FAIL_ALLOC_PGS, 5633 page_cnt); 5634 goto next; 5635 } 5636 ASSERT(newpp->p_szc == pszc); 5637 /* 5638 * Clear migrate bit and relocate page 5639 */ 5640 PP_CLRMIGRATE(pp); 5641 if (page_relocate(&pp, &newpp, 0, 1, &page_cnt, to)) { 5642 panic("page_migrate: page_relocate failed"); 5643 } 5644 ASSERT(page_cnt * PAGESIZE == pgsz); 5645 5646 /* 5647 * Keep stats for number of pages migrated from and to 5648 * each lgroup 5649 */ 5650 lgrp_stat_add(from->lgrp_id, LGRP_PM_SRC_PGS, page_cnt); 5651 lgrp_stat_add(to->lgrp_id, LGRP_PM_DEST_PGS, page_cnt); 5652 /* 5653 * update the page_t array we were passed in and 5654 * unlink constituent pages of a large page. 5655 */ 5656 for (i = 0; i < page_cnt; ++i, ++pp) { 5657 ASSERT(PAGE_EXCL(newpp)); 5658 ASSERT(newpp->p_szc == pszc); 5659 ppa[i] = newpp; 5660 pp = newpp; 5661 page_sub(&newpp, pp); 5662 page_downgrade(pp); 5663 } 5664 ASSERT(newpp == NULL); 5665 next: 5666 addr += pgsz; 5667 ppa += page_cnt; 5668 npages -= page_cnt; 5669 } 5670 } 5671 5672 ulong_t mem_waiters = 0; 5673 ulong_t max_count = 20; 5674 #define MAX_DELAY 0x1ff 5675 5676 /* 5677 * Check if enough memory is available to proceed. 5678 * Depending on system configuration and how much memory is 5679 * reserved for swap we need to check against two variables. 5680 * e.g. on systems with little physical swap availrmem can be 5681 * more reliable indicator of how much memory is available. 5682 * On systems with large phys swap freemem can be better indicator. 5683 * If freemem drops below threshold level don't return an error 5684 * immediately but wake up pageout to free memory and block. 5685 * This is done number of times. If pageout is not able to free 5686 * memory within certain time return an error. 5687 * The same applies for availrmem but kmem_reap is used to 5688 * free memory. 5689 */ 5690 int 5691 page_mem_avail(pgcnt_t npages) 5692 { 5693 ulong_t count; 5694 5695 #if defined(__i386) 5696 if (freemem > desfree + npages && 5697 availrmem > swapfs_reserve + npages && 5698 btop(vmem_size(heap_arena, VMEM_FREE)) > tune.t_minarmem + 5699 npages) 5700 return (1); 5701 #else 5702 if (freemem > desfree + npages && 5703 availrmem > swapfs_reserve + npages) 5704 return (1); 5705 #endif 5706 5707 count = max_count; 5708 atomic_add_long(&mem_waiters, 1); 5709 5710 while (freemem < desfree + npages && --count) { 5711 cv_signal(&proc_pageout->p_cv); 5712 if (delay_sig(hz + (mem_waiters & MAX_DELAY))) { 5713 atomic_add_long(&mem_waiters, -1); 5714 return (0); 5715 } 5716 } 5717 if (count == 0) { 5718 atomic_add_long(&mem_waiters, -1); 5719 return (0); 5720 } 5721 5722 count = max_count; 5723 while (availrmem < swapfs_reserve + npages && --count) { 5724 kmem_reap(); 5725 if (delay_sig(hz + (mem_waiters & MAX_DELAY))) { 5726 atomic_add_long(&mem_waiters, -1); 5727 return (0); 5728 } 5729 } 5730 atomic_add_long(&mem_waiters, -1); 5731 if (count == 0) 5732 return (0); 5733 5734 #if defined(__i386) 5735 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 5736 tune.t_minarmem + npages) 5737 return (0); 5738 #endif 5739 return (1); 5740 } 5741 5742 #define MAX_CNT 60 /* max num of iterations */ 5743 /* 5744 * Reclaim/reserve availrmem for npages. 5745 * If there is not enough memory start reaping seg, kmem caches. 5746 * Start pageout scanner (via page_needfree()). 5747 * Exit after ~ MAX_CNT s regardless of how much memory has been released. 5748 * Note: There is no guarantee that any availrmem will be freed as 5749 * this memory typically is locked (kernel heap) or reserved for swap. 5750 * Also due to memory fragmentation kmem allocator may not be able 5751 * to free any memory (single user allocated buffer will prevent 5752 * freeing slab or a page). 5753 */ 5754 int 5755 page_reclaim_mem(pgcnt_t npages, pgcnt_t epages, int adjust) 5756 { 5757 int i = 0; 5758 int ret = 0; 5759 pgcnt_t deficit; 5760 pgcnt_t old_availrmem; 5761 5762 mutex_enter(&freemem_lock); 5763 old_availrmem = availrmem - 1; 5764 while ((availrmem < tune.t_minarmem + npages + epages) && 5765 (old_availrmem < availrmem) && (i++ < MAX_CNT)) { 5766 old_availrmem = availrmem; 5767 deficit = tune.t_minarmem + npages + epages - availrmem; 5768 mutex_exit(&freemem_lock); 5769 page_needfree(deficit); 5770 kmem_reap(); 5771 delay(hz); 5772 page_needfree(-(spgcnt_t)deficit); 5773 mutex_enter(&freemem_lock); 5774 } 5775 5776 if (adjust && (availrmem >= tune.t_minarmem + npages + epages)) { 5777 availrmem -= npages; 5778 ret = 1; 5779 } 5780 5781 mutex_exit(&freemem_lock); 5782 5783 return (ret); 5784 } 5785 5786 /* 5787 * Search the memory segments to locate the desired page. Within a 5788 * segment, pages increase linearly with one page structure per 5789 * physical page frame (size PAGESIZE). The search begins 5790 * with the segment that was accessed last, to take advantage of locality. 5791 * If the hint misses, we start from the beginning of the sorted memseg list 5792 */ 5793 5794 5795 /* 5796 * Some data structures for pfn to pp lookup. 5797 */ 5798 ulong_t mhash_per_slot; 5799 struct memseg *memseg_hash[N_MEM_SLOTS]; 5800 5801 page_t * 5802 page_numtopp_nolock(pfn_t pfnum) 5803 { 5804 struct memseg *seg; 5805 page_t *pp; 5806 vm_cpu_data_t *vc; 5807 5808 /* 5809 * We need to disable kernel preemption while referencing the 5810 * cpu_vm_data field in order to prevent us from being switched to 5811 * another cpu and trying to reference it after it has been freed. 5812 * This will keep us on cpu and prevent it from being removed while 5813 * we are still on it. 5814 * 5815 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg 5816 * which is being resued by DR who will flush those references 5817 * before modifying the reused memseg. See memseg_cpu_vm_flush(). 5818 */ 5819 kpreempt_disable(); 5820 vc = CPU->cpu_vm_data; 5821 ASSERT(vc != NULL); 5822 5823 MEMSEG_STAT_INCR(nsearch); 5824 5825 /* Try last winner first */ 5826 if (((seg = vc->vc_pnum_memseg) != NULL) && 5827 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5828 MEMSEG_STAT_INCR(nlastwon); 5829 pp = seg->pages + (pfnum - seg->pages_base); 5830 if (pp->p_pagenum == pfnum) { 5831 kpreempt_enable(); 5832 return ((page_t *)pp); 5833 } 5834 } 5835 5836 /* Else Try hash */ 5837 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5838 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5839 MEMSEG_STAT_INCR(nhashwon); 5840 vc->vc_pnum_memseg = seg; 5841 pp = seg->pages + (pfnum - seg->pages_base); 5842 if (pp->p_pagenum == pfnum) { 5843 kpreempt_enable(); 5844 return ((page_t *)pp); 5845 } 5846 } 5847 5848 /* Else Brute force */ 5849 for (seg = memsegs; seg != NULL; seg = seg->next) { 5850 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5851 vc->vc_pnum_memseg = seg; 5852 pp = seg->pages + (pfnum - seg->pages_base); 5853 if (pp->p_pagenum == pfnum) { 5854 kpreempt_enable(); 5855 return ((page_t *)pp); 5856 } 5857 } 5858 } 5859 vc->vc_pnum_memseg = NULL; 5860 kpreempt_enable(); 5861 MEMSEG_STAT_INCR(nnotfound); 5862 return ((page_t *)NULL); 5863 5864 } 5865 5866 struct memseg * 5867 page_numtomemseg_nolock(pfn_t pfnum) 5868 { 5869 struct memseg *seg; 5870 page_t *pp; 5871 5872 /* 5873 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg 5874 * which is being resued by DR who will flush those references 5875 * before modifying the reused memseg. See memseg_cpu_vm_flush(). 5876 */ 5877 kpreempt_disable(); 5878 /* Try hash */ 5879 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5880 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5881 pp = seg->pages + (pfnum - seg->pages_base); 5882 if (pp->p_pagenum == pfnum) { 5883 kpreempt_enable(); 5884 return (seg); 5885 } 5886 } 5887 5888 /* Else Brute force */ 5889 for (seg = memsegs; seg != NULL; seg = seg->next) { 5890 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5891 pp = seg->pages + (pfnum - seg->pages_base); 5892 if (pp->p_pagenum == pfnum) { 5893 kpreempt_enable(); 5894 return (seg); 5895 } 5896 } 5897 } 5898 kpreempt_enable(); 5899 return ((struct memseg *)NULL); 5900 } 5901 5902 /* 5903 * Given a page and a count return the page struct that is 5904 * n structs away from the current one in the global page 5905 * list. 5906 * 5907 * This function wraps to the first page upon 5908 * reaching the end of the memseg list. 5909 */ 5910 page_t * 5911 page_nextn(page_t *pp, ulong_t n) 5912 { 5913 struct memseg *seg; 5914 page_t *ppn; 5915 vm_cpu_data_t *vc; 5916 5917 /* 5918 * We need to disable kernel preemption while referencing the 5919 * cpu_vm_data field in order to prevent us from being switched to 5920 * another cpu and trying to reference it after it has been freed. 5921 * This will keep us on cpu and prevent it from being removed while 5922 * we are still on it. 5923 * 5924 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg 5925 * which is being resued by DR who will flush those references 5926 * before modifying the reused memseg. See memseg_cpu_vm_flush(). 5927 */ 5928 kpreempt_disable(); 5929 vc = (vm_cpu_data_t *)CPU->cpu_vm_data; 5930 5931 ASSERT(vc != NULL); 5932 5933 if (((seg = vc->vc_pnext_memseg) == NULL) || 5934 (seg->pages_base == seg->pages_end) || 5935 !(pp >= seg->pages && pp < seg->epages)) { 5936 5937 for (seg = memsegs; seg; seg = seg->next) { 5938 if (pp >= seg->pages && pp < seg->epages) 5939 break; 5940 } 5941 5942 if (seg == NULL) { 5943 /* Memory delete got in, return something valid. */ 5944 /* TODO: fix me. */ 5945 seg = memsegs; 5946 pp = seg->pages; 5947 } 5948 } 5949 5950 /* check for wraparound - possible if n is large */ 5951 while ((ppn = (pp + n)) >= seg->epages || ppn < pp) { 5952 n -= seg->epages - pp; 5953 seg = seg->next; 5954 if (seg == NULL) 5955 seg = memsegs; 5956 pp = seg->pages; 5957 } 5958 vc->vc_pnext_memseg = seg; 5959 kpreempt_enable(); 5960 return (ppn); 5961 } 5962 5963 /* 5964 * Initialize for a loop using page_next_scan_large(). 5965 */ 5966 page_t * 5967 page_next_scan_init(void **cookie) 5968 { 5969 ASSERT(cookie != NULL); 5970 *cookie = (void *)memsegs; 5971 return ((page_t *)memsegs->pages); 5972 } 5973 5974 /* 5975 * Return the next page in a scan of page_t's, assuming we want 5976 * to skip over sub-pages within larger page sizes. 5977 * 5978 * The cookie is used to keep track of the current memseg. 5979 */ 5980 page_t * 5981 page_next_scan_large( 5982 page_t *pp, 5983 ulong_t *n, 5984 void **cookie) 5985 { 5986 struct memseg *seg = (struct memseg *)*cookie; 5987 page_t *new_pp; 5988 ulong_t cnt; 5989 pfn_t pfn; 5990 5991 5992 /* 5993 * get the count of page_t's to skip based on the page size 5994 */ 5995 ASSERT(pp != NULL); 5996 if (pp->p_szc == 0) { 5997 cnt = 1; 5998 } else { 5999 pfn = page_pptonum(pp); 6000 cnt = page_get_pagecnt(pp->p_szc); 6001 cnt -= pfn & (cnt - 1); 6002 } 6003 *n += cnt; 6004 new_pp = pp + cnt; 6005 6006 /* 6007 * Catch if we went past the end of the current memory segment. If so, 6008 * just move to the next segment with pages. 6009 */ 6010 if (new_pp >= seg->epages || seg->pages_base == seg->pages_end) { 6011 do { 6012 seg = seg->next; 6013 if (seg == NULL) 6014 seg = memsegs; 6015 } while (seg->pages_base == seg->pages_end); 6016 new_pp = seg->pages; 6017 *cookie = (void *)seg; 6018 } 6019 6020 return (new_pp); 6021 } 6022 6023 6024 /* 6025 * Returns next page in list. Note: this function wraps 6026 * to the first page in the list upon reaching the end 6027 * of the list. Callers should be aware of this fact. 6028 */ 6029 6030 /* We should change this be a #define */ 6031 6032 page_t * 6033 page_next(page_t *pp) 6034 { 6035 return (page_nextn(pp, 1)); 6036 } 6037 6038 page_t * 6039 page_first() 6040 { 6041 return ((page_t *)memsegs->pages); 6042 } 6043 6044 6045 /* 6046 * This routine is called at boot with the initial memory configuration 6047 * and when memory is added or removed. 6048 */ 6049 void 6050 build_pfn_hash() 6051 { 6052 pfn_t cur; 6053 pgcnt_t index; 6054 struct memseg *pseg; 6055 int i; 6056 6057 /* 6058 * Clear memseg_hash array. 6059 * Since memory add/delete is designed to operate concurrently 6060 * with normal operation, the hash rebuild must be able to run 6061 * concurrently with page_numtopp_nolock(). To support this 6062 * functionality, assignments to memseg_hash array members must 6063 * be done atomically. 6064 * 6065 * NOTE: bzero() does not currently guarantee this for kernel 6066 * threads, and cannot be used here. 6067 */ 6068 for (i = 0; i < N_MEM_SLOTS; i++) 6069 memseg_hash[i] = NULL; 6070 6071 hat_kpm_mseghash_clear(N_MEM_SLOTS); 6072 6073 /* 6074 * Physmax is the last valid pfn. 6075 */ 6076 mhash_per_slot = (physmax + 1) >> MEM_HASH_SHIFT; 6077 for (pseg = memsegs; pseg != NULL; pseg = pseg->next) { 6078 index = MEMSEG_PFN_HASH(pseg->pages_base); 6079 cur = pseg->pages_base; 6080 do { 6081 if (index >= N_MEM_SLOTS) 6082 index = MEMSEG_PFN_HASH(cur); 6083 6084 if (memseg_hash[index] == NULL || 6085 memseg_hash[index]->pages_base > pseg->pages_base) { 6086 memseg_hash[index] = pseg; 6087 hat_kpm_mseghash_update(index, pseg); 6088 } 6089 cur += mhash_per_slot; 6090 index++; 6091 } while (cur < pseg->pages_end); 6092 } 6093 } 6094 6095 /* 6096 * Return the pagenum for the pp 6097 */ 6098 pfn_t 6099 page_pptonum(page_t *pp) 6100 { 6101 return (pp->p_pagenum); 6102 } 6103 6104 /* 6105 * interface to the referenced and modified etc bits 6106 * in the PSM part of the page struct 6107 * when no locking is desired. 6108 */ 6109 void 6110 page_set_props(page_t *pp, uint_t flags) 6111 { 6112 ASSERT((flags & ~(P_MOD | P_REF | P_RO)) == 0); 6113 pp->p_nrm |= (uchar_t)flags; 6114 } 6115 6116 void 6117 page_clr_all_props(page_t *pp) 6118 { 6119 pp->p_nrm = 0; 6120 } 6121 6122 /* 6123 * Clear p_lckcnt and p_cowcnt, adjusting freemem if required. 6124 */ 6125 int 6126 page_clear_lck_cow(page_t *pp, int adjust) 6127 { 6128 int f_amount; 6129 6130 ASSERT(PAGE_EXCL(pp)); 6131 6132 /* 6133 * The page_struct_lock need not be acquired here since 6134 * we require the caller hold the page exclusively locked. 6135 */ 6136 f_amount = 0; 6137 if (pp->p_lckcnt) { 6138 f_amount = 1; 6139 pp->p_lckcnt = 0; 6140 } 6141 if (pp->p_cowcnt) { 6142 f_amount += pp->p_cowcnt; 6143 pp->p_cowcnt = 0; 6144 } 6145 6146 if (adjust && f_amount) { 6147 mutex_enter(&freemem_lock); 6148 availrmem += f_amount; 6149 mutex_exit(&freemem_lock); 6150 } 6151 6152 return (f_amount); 6153 } 6154 6155 /* 6156 * The following functions is called from free_vp_pages() 6157 * for an inexact estimate of a newly free'd page... 6158 */ 6159 ulong_t 6160 page_share_cnt(page_t *pp) 6161 { 6162 return (hat_page_getshare(pp)); 6163 } 6164 6165 int 6166 page_isshared(page_t *pp) 6167 { 6168 return (hat_page_checkshare(pp, 1)); 6169 } 6170 6171 int 6172 page_isfree(page_t *pp) 6173 { 6174 return (PP_ISFREE(pp)); 6175 } 6176 6177 int 6178 page_isref(page_t *pp) 6179 { 6180 return (hat_page_getattr(pp, P_REF)); 6181 } 6182 6183 int 6184 page_ismod(page_t *pp) 6185 { 6186 return (hat_page_getattr(pp, P_MOD)); 6187 } 6188 6189 /* 6190 * The following code all currently relates to the page capture logic: 6191 * 6192 * This logic is used for cases where there is a desire to claim a certain 6193 * physical page in the system for the caller. As it may not be possible 6194 * to capture the page immediately, the p_toxic bits are used in the page 6195 * structure to indicate that someone wants to capture this page. When the 6196 * page gets unlocked, the toxic flag will be noted and an attempt to capture 6197 * the page will be made. If it is successful, the original callers callback 6198 * will be called with the page to do with it what they please. 6199 * 6200 * There is also an async thread which wakes up to attempt to capture 6201 * pages occasionally which have the capture bit set. All of the pages which 6202 * need to be captured asynchronously have been inserted into the 6203 * page_capture_hash and thus this thread walks that hash list. Items in the 6204 * hash have an expiration time so this thread handles that as well by removing 6205 * the item from the hash if it has expired. 6206 * 6207 * Some important things to note are: 6208 * - if the PR_CAPTURE bit is set on a page, then the page is in the 6209 * page_capture_hash. The page_capture_hash_head.pchh_mutex is needed 6210 * to set and clear this bit, and while the lock is held is the only time 6211 * you can add or remove an entry from the hash. 6212 * - the PR_CAPTURE bit can only be set and cleared while holding the 6213 * page_capture_hash_head.pchh_mutex 6214 * - the t_flag field of the thread struct is used with the T_CAPTURING 6215 * flag to prevent recursion while dealing with large pages. 6216 * - pages which need to be retired never expire on the page_capture_hash. 6217 */ 6218 6219 static void page_capture_thread(void); 6220 static kthread_t *pc_thread_id; 6221 kcondvar_t pc_cv; 6222 static kmutex_t pc_thread_mutex; 6223 static clock_t pc_thread_shortwait; 6224 static clock_t pc_thread_longwait; 6225 static int pc_thread_retry; 6226 6227 struct page_capture_callback pc_cb[PC_NUM_CALLBACKS]; 6228 6229 /* Note that this is a circular linked list */ 6230 typedef struct page_capture_hash_bucket { 6231 page_t *pp; 6232 uint_t szc; 6233 uint_t flags; 6234 clock_t expires; /* lbolt at which this request expires. */ 6235 void *datap; /* Cached data passed in for callback */ 6236 struct page_capture_hash_bucket *next; 6237 struct page_capture_hash_bucket *prev; 6238 } page_capture_hash_bucket_t; 6239 6240 /* 6241 * Each hash bucket will have it's own mutex and two lists which are: 6242 * active (0): represents requests which have not been processed by 6243 * the page_capture async thread yet. 6244 * walked (1): represents requests which have been processed by the 6245 * page_capture async thread within it's given walk of this bucket. 6246 * 6247 * These are all needed so that we can synchronize all async page_capture 6248 * events. When the async thread moves to a new bucket, it will append the 6249 * walked list to the active list and walk each item one at a time, moving it 6250 * from the active list to the walked list. Thus if there is an async request 6251 * outstanding for a given page, it will always be in one of the two lists. 6252 * New requests will always be added to the active list. 6253 * If we were not able to capture a page before the request expired, we'd free 6254 * up the request structure which would indicate to page_capture that there is 6255 * no longer a need for the given page, and clear the PR_CAPTURE flag if 6256 * possible. 6257 */ 6258 typedef struct page_capture_hash_head { 6259 kmutex_t pchh_mutex; 6260 uint_t num_pages; 6261 page_capture_hash_bucket_t lists[2]; /* sentinel nodes */ 6262 } page_capture_hash_head_t; 6263 6264 #ifdef DEBUG 6265 #define NUM_PAGE_CAPTURE_BUCKETS 4 6266 #else 6267 #define NUM_PAGE_CAPTURE_BUCKETS 64 6268 #endif 6269 6270 page_capture_hash_head_t page_capture_hash[NUM_PAGE_CAPTURE_BUCKETS]; 6271 6272 /* for now use a very simple hash based upon the size of a page struct */ 6273 #define PAGE_CAPTURE_HASH(pp) \ 6274 ((int)(((uintptr_t)pp >> 7) & (NUM_PAGE_CAPTURE_BUCKETS - 1))) 6275 6276 extern pgcnt_t swapfs_minfree; 6277 6278 int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap); 6279 6280 /* 6281 * a callback function is required for page capture requests. 6282 */ 6283 void 6284 page_capture_register_callback(uint_t index, clock_t duration, 6285 int (*cb_func)(page_t *, void *, uint_t)) 6286 { 6287 ASSERT(pc_cb[index].cb_active == 0); 6288 ASSERT(cb_func != NULL); 6289 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6290 pc_cb[index].duration = duration; 6291 pc_cb[index].cb_func = cb_func; 6292 pc_cb[index].cb_active = 1; 6293 rw_exit(&pc_cb[index].cb_rwlock); 6294 } 6295 6296 void 6297 page_capture_unregister_callback(uint_t index) 6298 { 6299 int i, j; 6300 struct page_capture_hash_bucket *bp1; 6301 struct page_capture_hash_bucket *bp2; 6302 struct page_capture_hash_bucket *head = NULL; 6303 uint_t flags = (1 << index); 6304 6305 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6306 ASSERT(pc_cb[index].cb_active == 1); 6307 pc_cb[index].duration = 0; /* Paranoia */ 6308 pc_cb[index].cb_func = NULL; /* Paranoia */ 6309 pc_cb[index].cb_active = 0; 6310 rw_exit(&pc_cb[index].cb_rwlock); 6311 6312 /* 6313 * Just move all the entries to a private list which we can walk 6314 * through without the need to hold any locks. 6315 * No more requests can get added to the hash lists for this consumer 6316 * as the cb_active field for the callback has been cleared. 6317 */ 6318 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 6319 mutex_enter(&page_capture_hash[i].pchh_mutex); 6320 for (j = 0; j < 2; j++) { 6321 bp1 = page_capture_hash[i].lists[j].next; 6322 /* walk through all but first (sentinel) element */ 6323 while (bp1 != &page_capture_hash[i].lists[j]) { 6324 bp2 = bp1; 6325 if (bp2->flags & flags) { 6326 bp1 = bp2->next; 6327 bp1->prev = bp2->prev; 6328 bp2->prev->next = bp1; 6329 bp2->next = head; 6330 head = bp2; 6331 /* 6332 * Clear the PR_CAPTURE bit as we 6333 * hold appropriate locks here. 6334 */ 6335 page_clrtoxic(head->pp, PR_CAPTURE); 6336 page_capture_hash[i].num_pages--; 6337 continue; 6338 } 6339 bp1 = bp1->next; 6340 } 6341 } 6342 mutex_exit(&page_capture_hash[i].pchh_mutex); 6343 } 6344 6345 while (head != NULL) { 6346 bp1 = head; 6347 head = head->next; 6348 kmem_free(bp1, sizeof (*bp1)); 6349 } 6350 } 6351 6352 6353 /* 6354 * Find pp in the active list and move it to the walked list if it 6355 * exists. 6356 * Note that most often pp should be at the front of the active list 6357 * as it is currently used and thus there is no other sort of optimization 6358 * being done here as this is a linked list data structure. 6359 * Returns 1 on successful move or 0 if page could not be found. 6360 */ 6361 static int 6362 page_capture_move_to_walked(page_t *pp) 6363 { 6364 page_capture_hash_bucket_t *bp; 6365 int index; 6366 6367 index = PAGE_CAPTURE_HASH(pp); 6368 6369 mutex_enter(&page_capture_hash[index].pchh_mutex); 6370 bp = page_capture_hash[index].lists[0].next; 6371 while (bp != &page_capture_hash[index].lists[0]) { 6372 if (bp->pp == pp) { 6373 /* Remove from old list */ 6374 bp->next->prev = bp->prev; 6375 bp->prev->next = bp->next; 6376 6377 /* Add to new list */ 6378 bp->next = page_capture_hash[index].lists[1].next; 6379 bp->prev = &page_capture_hash[index].lists[1]; 6380 page_capture_hash[index].lists[1].next = bp; 6381 bp->next->prev = bp; 6382 mutex_exit(&page_capture_hash[index].pchh_mutex); 6383 6384 return (1); 6385 } 6386 bp = bp->next; 6387 } 6388 mutex_exit(&page_capture_hash[index].pchh_mutex); 6389 return (0); 6390 } 6391 6392 /* 6393 * Add a new entry to the page capture hash. The only case where a new 6394 * entry is not added is when the page capture consumer is no longer registered. 6395 * In this case, we'll silently not add the page to the hash. We know that 6396 * page retire will always be registered for the case where we are currently 6397 * unretiring a page and thus there are no conflicts. 6398 */ 6399 static void 6400 page_capture_add_hash(page_t *pp, uint_t szc, uint_t flags, void *datap) 6401 { 6402 page_capture_hash_bucket_t *bp1; 6403 page_capture_hash_bucket_t *bp2; 6404 int index; 6405 int cb_index; 6406 int i; 6407 #ifdef DEBUG 6408 page_capture_hash_bucket_t *tp1; 6409 int l; 6410 #endif 6411 6412 ASSERT(!(flags & CAPTURE_ASYNC)); 6413 6414 bp1 = kmem_alloc(sizeof (struct page_capture_hash_bucket), KM_SLEEP); 6415 6416 bp1->pp = pp; 6417 bp1->szc = szc; 6418 bp1->flags = flags; 6419 bp1->datap = datap; 6420 6421 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6422 if ((flags >> cb_index) & 1) { 6423 break; 6424 } 6425 } 6426 6427 ASSERT(cb_index != PC_NUM_CALLBACKS); 6428 6429 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6430 if (pc_cb[cb_index].cb_active) { 6431 if (pc_cb[cb_index].duration == -1) { 6432 bp1->expires = (clock_t)-1; 6433 } else { 6434 bp1->expires = ddi_get_lbolt() + 6435 pc_cb[cb_index].duration; 6436 } 6437 } else { 6438 /* There's no callback registered so don't add to the hash */ 6439 rw_exit(&pc_cb[cb_index].cb_rwlock); 6440 kmem_free(bp1, sizeof (*bp1)); 6441 return; 6442 } 6443 6444 index = PAGE_CAPTURE_HASH(pp); 6445 6446 /* 6447 * Only allow capture flag to be modified under this mutex. 6448 * Prevents multiple entries for same page getting added. 6449 */ 6450 mutex_enter(&page_capture_hash[index].pchh_mutex); 6451 6452 /* 6453 * if not already on the hash, set capture bit and add to the hash 6454 */ 6455 if (!(pp->p_toxic & PR_CAPTURE)) { 6456 #ifdef DEBUG 6457 /* Check for duplicate entries */ 6458 for (l = 0; l < 2; l++) { 6459 tp1 = page_capture_hash[index].lists[l].next; 6460 while (tp1 != &page_capture_hash[index].lists[l]) { 6461 if (tp1->pp == pp) { 6462 panic("page pp 0x%p already on hash " 6463 "at 0x%p\n", 6464 (void *)pp, (void *)tp1); 6465 } 6466 tp1 = tp1->next; 6467 } 6468 } 6469 6470 #endif 6471 page_settoxic(pp, PR_CAPTURE); 6472 bp1->next = page_capture_hash[index].lists[0].next; 6473 bp1->prev = &page_capture_hash[index].lists[0]; 6474 bp1->next->prev = bp1; 6475 page_capture_hash[index].lists[0].next = bp1; 6476 page_capture_hash[index].num_pages++; 6477 if (flags & CAPTURE_RETIRE) { 6478 page_retire_incr_pend_count(datap); 6479 } 6480 mutex_exit(&page_capture_hash[index].pchh_mutex); 6481 rw_exit(&pc_cb[cb_index].cb_rwlock); 6482 cv_signal(&pc_cv); 6483 return; 6484 } 6485 6486 /* 6487 * A page retire request will replace any other request. 6488 * A second physmem request which is for a different process than 6489 * the currently registered one will be dropped as there is 6490 * no way to hold the private data for both calls. 6491 * In the future, once there are more callers, this will have to 6492 * be worked out better as there needs to be private storage for 6493 * at least each type of caller (maybe have datap be an array of 6494 * *void's so that we can index based upon callers index). 6495 */ 6496 6497 /* walk hash list to update expire time */ 6498 for (i = 0; i < 2; i++) { 6499 bp2 = page_capture_hash[index].lists[i].next; 6500 while (bp2 != &page_capture_hash[index].lists[i]) { 6501 if (bp2->pp == pp) { 6502 if (flags & CAPTURE_RETIRE) { 6503 if (!(bp2->flags & CAPTURE_RETIRE)) { 6504 page_retire_incr_pend_count( 6505 datap); 6506 bp2->flags = flags; 6507 bp2->expires = bp1->expires; 6508 bp2->datap = datap; 6509 } 6510 } else { 6511 ASSERT(flags & CAPTURE_PHYSMEM); 6512 if (!(bp2->flags & CAPTURE_RETIRE) && 6513 (datap == bp2->datap)) { 6514 bp2->expires = bp1->expires; 6515 } 6516 } 6517 mutex_exit(&page_capture_hash[index]. 6518 pchh_mutex); 6519 rw_exit(&pc_cb[cb_index].cb_rwlock); 6520 kmem_free(bp1, sizeof (*bp1)); 6521 return; 6522 } 6523 bp2 = bp2->next; 6524 } 6525 } 6526 6527 /* 6528 * the PR_CAPTURE flag is protected by the page_capture_hash mutexes 6529 * and thus it either has to be set or not set and can't change 6530 * while holding the mutex above. 6531 */ 6532 panic("page_capture_add_hash, PR_CAPTURE flag set on pp %p\n", 6533 (void *)pp); 6534 } 6535 6536 /* 6537 * We have a page in our hands, lets try and make it ours by turning 6538 * it into a clean page like it had just come off the freelists. 6539 * 6540 * Returns 0 on success, with the page still EXCL locked. 6541 * On failure, the page will be unlocked, and returns EAGAIN 6542 */ 6543 static int 6544 page_capture_clean_page(page_t *pp) 6545 { 6546 page_t *newpp; 6547 int skip_unlock = 0; 6548 spgcnt_t count; 6549 page_t *tpp; 6550 int ret = 0; 6551 int extra; 6552 6553 ASSERT(PAGE_EXCL(pp)); 6554 ASSERT(!PP_RETIRED(pp)); 6555 ASSERT(curthread->t_flag & T_CAPTURING); 6556 6557 if (PP_ISFREE(pp)) { 6558 if (!page_reclaim(pp, NULL)) { 6559 skip_unlock = 1; 6560 ret = EAGAIN; 6561 goto cleanup; 6562 } 6563 ASSERT(pp->p_szc == 0); 6564 if (pp->p_vnode != NULL) { 6565 /* 6566 * Since this page came from the 6567 * cachelist, we must destroy the 6568 * old vnode association. 6569 */ 6570 page_hashout(pp, NULL); 6571 } 6572 goto cleanup; 6573 } 6574 6575 /* 6576 * If we know page_relocate will fail, skip it 6577 * It could still fail due to a UE on another page but we 6578 * can't do anything about that. 6579 */ 6580 if (pp->p_toxic & PR_UE) { 6581 goto skip_relocate; 6582 } 6583 6584 /* 6585 * It's possible that pages can not have a vnode as fsflush comes 6586 * through and cleans up these pages. It's ugly but that's how it is. 6587 */ 6588 if (pp->p_vnode == NULL) { 6589 goto skip_relocate; 6590 } 6591 6592 /* 6593 * Page was not free, so lets try to relocate it. 6594 * page_relocate only works with root pages, so if this is not a root 6595 * page, we need to demote it to try and relocate it. 6596 * Unfortunately this is the best we can do right now. 6597 */ 6598 newpp = NULL; 6599 if ((pp->p_szc > 0) && (pp != PP_PAGEROOT(pp))) { 6600 if (page_try_demote_pages(pp) == 0) { 6601 ret = EAGAIN; 6602 goto cleanup; 6603 } 6604 } 6605 ret = page_relocate(&pp, &newpp, 1, 0, &count, NULL); 6606 if (ret == 0) { 6607 page_t *npp; 6608 /* unlock the new page(s) */ 6609 while (count-- > 0) { 6610 ASSERT(newpp != NULL); 6611 npp = newpp; 6612 page_sub(&newpp, npp); 6613 page_unlock(npp); 6614 } 6615 ASSERT(newpp == NULL); 6616 /* 6617 * Check to see if the page we have is too large. 6618 * If so, demote it freeing up the extra pages. 6619 */ 6620 if (pp->p_szc > 0) { 6621 /* For now demote extra pages to szc == 0 */ 6622 extra = page_get_pagecnt(pp->p_szc) - 1; 6623 while (extra > 0) { 6624 tpp = pp->p_next; 6625 page_sub(&pp, tpp); 6626 tpp->p_szc = 0; 6627 page_free(tpp, 1); 6628 extra--; 6629 } 6630 /* Make sure to set our page to szc 0 as well */ 6631 ASSERT(pp->p_next == pp && pp->p_prev == pp); 6632 pp->p_szc = 0; 6633 } 6634 goto cleanup; 6635 } else if (ret == EIO) { 6636 ret = EAGAIN; 6637 goto cleanup; 6638 } else { 6639 /* 6640 * Need to reset return type as we failed to relocate the page 6641 * but that does not mean that some of the next steps will not 6642 * work. 6643 */ 6644 ret = 0; 6645 } 6646 6647 skip_relocate: 6648 6649 if (pp->p_szc > 0) { 6650 if (page_try_demote_pages(pp) == 0) { 6651 ret = EAGAIN; 6652 goto cleanup; 6653 } 6654 } 6655 6656 ASSERT(pp->p_szc == 0); 6657 6658 if (hat_ismod(pp)) { 6659 ret = EAGAIN; 6660 goto cleanup; 6661 } 6662 if (PP_ISKAS(pp)) { 6663 ret = EAGAIN; 6664 goto cleanup; 6665 } 6666 if (pp->p_lckcnt || pp->p_cowcnt) { 6667 ret = EAGAIN; 6668 goto cleanup; 6669 } 6670 6671 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 6672 ASSERT(!hat_page_is_mapped(pp)); 6673 6674 if (hat_ismod(pp)) { 6675 /* 6676 * This is a semi-odd case as the page is now modified but not 6677 * mapped as we just unloaded the mappings above. 6678 */ 6679 ret = EAGAIN; 6680 goto cleanup; 6681 } 6682 if (pp->p_vnode != NULL) { 6683 page_hashout(pp, NULL); 6684 } 6685 6686 /* 6687 * At this point, the page should be in a clean state and 6688 * we can do whatever we want with it. 6689 */ 6690 6691 cleanup: 6692 if (ret != 0) { 6693 if (!skip_unlock) { 6694 page_unlock(pp); 6695 } 6696 } else { 6697 ASSERT(pp->p_szc == 0); 6698 ASSERT(PAGE_EXCL(pp)); 6699 6700 pp->p_next = pp; 6701 pp->p_prev = pp; 6702 } 6703 return (ret); 6704 } 6705 6706 /* 6707 * Various callers of page_trycapture() can have different restrictions upon 6708 * what memory they have access to. 6709 * Returns 0 on success, with the following error codes on failure: 6710 * EPERM - The requested page is long term locked, and thus repeated 6711 * requests to capture this page will likely fail. 6712 * ENOMEM - There was not enough free memory in the system to safely 6713 * map the requested page. 6714 * ENOENT - The requested page was inside the kernel cage, and the 6715 * PHYSMEM_CAGE flag was not set. 6716 */ 6717 int 6718 page_capture_pre_checks(page_t *pp, uint_t flags) 6719 { 6720 ASSERT(pp != NULL); 6721 6722 #if defined(__sparc) 6723 if (pp->p_vnode == &promvp) { 6724 return (EPERM); 6725 } 6726 6727 if (PP_ISNORELOC(pp) && !(flags & CAPTURE_GET_CAGE) && 6728 (flags & CAPTURE_PHYSMEM)) { 6729 return (ENOENT); 6730 } 6731 6732 if (PP_ISNORELOCKERNEL(pp)) { 6733 return (EPERM); 6734 } 6735 #else 6736 if (PP_ISKAS(pp)) { 6737 return (EPERM); 6738 } 6739 #endif /* __sparc */ 6740 6741 /* only physmem currently has the restrictions checked below */ 6742 if (!(flags & CAPTURE_PHYSMEM)) { 6743 return (0); 6744 } 6745 6746 if (availrmem < swapfs_minfree) { 6747 /* 6748 * We won't try to capture this page as we are 6749 * running low on memory. 6750 */ 6751 return (ENOMEM); 6752 } 6753 return (0); 6754 } 6755 6756 /* 6757 * Once we have a page in our mits, go ahead and complete the capture 6758 * operation. 6759 * Returns 1 on failure where page is no longer needed 6760 * Returns 0 on success 6761 * Returns -1 if there was a transient failure. 6762 * Failure cases must release the SE_EXCL lock on pp (usually via page_free). 6763 */ 6764 int 6765 page_capture_take_action(page_t *pp, uint_t flags, void *datap) 6766 { 6767 int cb_index; 6768 int ret = 0; 6769 page_capture_hash_bucket_t *bp1; 6770 page_capture_hash_bucket_t *bp2; 6771 int index; 6772 int found = 0; 6773 int i; 6774 6775 ASSERT(PAGE_EXCL(pp)); 6776 ASSERT(curthread->t_flag & T_CAPTURING); 6777 6778 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6779 if ((flags >> cb_index) & 1) { 6780 break; 6781 } 6782 } 6783 ASSERT(cb_index < PC_NUM_CALLBACKS); 6784 6785 /* 6786 * Remove the entry from the page_capture hash, but don't free it yet 6787 * as we may need to put it back. 6788 * Since we own the page at this point in time, we should find it 6789 * in the hash if this is an ASYNC call. If we don't it's likely 6790 * that the page_capture_async() thread decided that this request 6791 * had expired, in which case we just continue on. 6792 */ 6793 if (flags & CAPTURE_ASYNC) { 6794 6795 index = PAGE_CAPTURE_HASH(pp); 6796 6797 mutex_enter(&page_capture_hash[index].pchh_mutex); 6798 for (i = 0; i < 2 && !found; i++) { 6799 bp1 = page_capture_hash[index].lists[i].next; 6800 while (bp1 != &page_capture_hash[index].lists[i]) { 6801 if (bp1->pp == pp) { 6802 bp1->next->prev = bp1->prev; 6803 bp1->prev->next = bp1->next; 6804 page_capture_hash[index].num_pages--; 6805 page_clrtoxic(pp, PR_CAPTURE); 6806 found = 1; 6807 break; 6808 } 6809 bp1 = bp1->next; 6810 } 6811 } 6812 mutex_exit(&page_capture_hash[index].pchh_mutex); 6813 } 6814 6815 /* Synchronize with the unregister func. */ 6816 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6817 if (!pc_cb[cb_index].cb_active) { 6818 page_free(pp, 1); 6819 rw_exit(&pc_cb[cb_index].cb_rwlock); 6820 if (found) { 6821 kmem_free(bp1, sizeof (*bp1)); 6822 } 6823 return (1); 6824 } 6825 6826 /* 6827 * We need to remove the entry from the page capture hash and turn off 6828 * the PR_CAPTURE bit before calling the callback. We'll need to cache 6829 * the entry here, and then based upon the return value, cleanup 6830 * appropriately or re-add it to the hash, making sure that someone else 6831 * hasn't already done so. 6832 * It should be rare for the callback to fail and thus it's ok for 6833 * the failure path to be a bit complicated as the success path is 6834 * cleaner and the locking rules are easier to follow. 6835 */ 6836 6837 ret = pc_cb[cb_index].cb_func(pp, datap, flags); 6838 6839 rw_exit(&pc_cb[cb_index].cb_rwlock); 6840 6841 /* 6842 * If this was an ASYNC request, we need to cleanup the hash if the 6843 * callback was successful or if the request was no longer valid. 6844 * For non-ASYNC requests, we return failure to map and the caller 6845 * will take care of adding the request to the hash. 6846 * Note also that the callback itself is responsible for the page 6847 * at this point in time in terms of locking ... The most common 6848 * case for the failure path should just be a page_free. 6849 */ 6850 if (ret >= 0) { 6851 if (found) { 6852 if (bp1->flags & CAPTURE_RETIRE) { 6853 page_retire_decr_pend_count(datap); 6854 } 6855 kmem_free(bp1, sizeof (*bp1)); 6856 } 6857 return (ret); 6858 } 6859 if (!found) { 6860 return (ret); 6861 } 6862 6863 ASSERT(flags & CAPTURE_ASYNC); 6864 6865 /* 6866 * Check for expiration time first as we can just free it up if it's 6867 * expired. 6868 */ 6869 if (ddi_get_lbolt() > bp1->expires && bp1->expires != -1) { 6870 kmem_free(bp1, sizeof (*bp1)); 6871 return (ret); 6872 } 6873 6874 /* 6875 * The callback failed and there used to be an entry in the hash for 6876 * this page, so we need to add it back to the hash. 6877 */ 6878 mutex_enter(&page_capture_hash[index].pchh_mutex); 6879 if (!(pp->p_toxic & PR_CAPTURE)) { 6880 /* just add bp1 back to head of walked list */ 6881 page_settoxic(pp, PR_CAPTURE); 6882 bp1->next = page_capture_hash[index].lists[1].next; 6883 bp1->prev = &page_capture_hash[index].lists[1]; 6884 bp1->next->prev = bp1; 6885 page_capture_hash[index].lists[1].next = bp1; 6886 page_capture_hash[index].num_pages++; 6887 mutex_exit(&page_capture_hash[index].pchh_mutex); 6888 return (ret); 6889 } 6890 6891 /* 6892 * Otherwise there was a new capture request added to list 6893 * Need to make sure that our original data is represented if 6894 * appropriate. 6895 */ 6896 for (i = 0; i < 2; i++) { 6897 bp2 = page_capture_hash[index].lists[i].next; 6898 while (bp2 != &page_capture_hash[index].lists[i]) { 6899 if (bp2->pp == pp) { 6900 if (bp1->flags & CAPTURE_RETIRE) { 6901 if (!(bp2->flags & CAPTURE_RETIRE)) { 6902 bp2->szc = bp1->szc; 6903 bp2->flags = bp1->flags; 6904 bp2->expires = bp1->expires; 6905 bp2->datap = bp1->datap; 6906 } 6907 } else { 6908 ASSERT(bp1->flags & CAPTURE_PHYSMEM); 6909 if (!(bp2->flags & CAPTURE_RETIRE)) { 6910 bp2->szc = bp1->szc; 6911 bp2->flags = bp1->flags; 6912 bp2->expires = bp1->expires; 6913 bp2->datap = bp1->datap; 6914 } 6915 } 6916 mutex_exit(&page_capture_hash[index]. 6917 pchh_mutex); 6918 kmem_free(bp1, sizeof (*bp1)); 6919 return (ret); 6920 } 6921 bp2 = bp2->next; 6922 } 6923 } 6924 panic("PR_CAPTURE set but not on hash for pp 0x%p\n", (void *)pp); 6925 /*NOTREACHED*/ 6926 } 6927 6928 /* 6929 * Try to capture the given page for the caller specified in the flags 6930 * parameter. The page will either be captured and handed over to the 6931 * appropriate callback, or will be queued up in the page capture hash 6932 * to be captured asynchronously. 6933 * If the current request is due to an async capture, the page must be 6934 * exclusively locked before calling this function. 6935 * Currently szc must be 0 but in the future this should be expandable to 6936 * other page sizes. 6937 * Returns 0 on success, with the following error codes on failure: 6938 * EPERM - The requested page is long term locked, and thus repeated 6939 * requests to capture this page will likely fail. 6940 * ENOMEM - There was not enough free memory in the system to safely 6941 * map the requested page. 6942 * ENOENT - The requested page was inside the kernel cage, and the 6943 * CAPTURE_GET_CAGE flag was not set. 6944 * EAGAIN - The requested page could not be capturead at this point in 6945 * time but future requests will likely work. 6946 * EBUSY - The requested page is retired and the CAPTURE_GET_RETIRED flag 6947 * was not set. 6948 */ 6949 int 6950 page_itrycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 6951 { 6952 int ret; 6953 int cb_index; 6954 6955 if (flags & CAPTURE_ASYNC) { 6956 ASSERT(PAGE_EXCL(pp)); 6957 goto async; 6958 } 6959 6960 /* Make sure there's enough availrmem ... */ 6961 ret = page_capture_pre_checks(pp, flags); 6962 if (ret != 0) { 6963 return (ret); 6964 } 6965 6966 if (!page_trylock(pp, SE_EXCL)) { 6967 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6968 if ((flags >> cb_index) & 1) { 6969 break; 6970 } 6971 } 6972 ASSERT(cb_index < PC_NUM_CALLBACKS); 6973 ret = EAGAIN; 6974 /* Special case for retired pages */ 6975 if (PP_RETIRED(pp)) { 6976 if (flags & CAPTURE_GET_RETIRED) { 6977 if (!page_unretire_pp(pp, PR_UNR_TEMP)) { 6978 /* 6979 * Need to set capture bit and add to 6980 * hash so that the page will be 6981 * retired when freed. 6982 */ 6983 page_capture_add_hash(pp, szc, 6984 CAPTURE_RETIRE, NULL); 6985 ret = 0; 6986 goto own_page; 6987 } 6988 } else { 6989 return (EBUSY); 6990 } 6991 } 6992 page_capture_add_hash(pp, szc, flags, datap); 6993 return (ret); 6994 } 6995 6996 async: 6997 ASSERT(PAGE_EXCL(pp)); 6998 6999 /* Need to check for physmem async requests that availrmem is sane */ 7000 if ((flags & (CAPTURE_ASYNC | CAPTURE_PHYSMEM)) == 7001 (CAPTURE_ASYNC | CAPTURE_PHYSMEM) && 7002 (availrmem < swapfs_minfree)) { 7003 page_unlock(pp); 7004 return (ENOMEM); 7005 } 7006 7007 ret = page_capture_clean_page(pp); 7008 7009 if (ret != 0) { 7010 /* We failed to get the page, so lets add it to the hash */ 7011 if (!(flags & CAPTURE_ASYNC)) { 7012 page_capture_add_hash(pp, szc, flags, datap); 7013 } 7014 return (ret); 7015 } 7016 7017 own_page: 7018 ASSERT(PAGE_EXCL(pp)); 7019 ASSERT(pp->p_szc == 0); 7020 7021 /* Call the callback */ 7022 ret = page_capture_take_action(pp, flags, datap); 7023 7024 if (ret == 0) { 7025 return (0); 7026 } 7027 7028 /* 7029 * Note that in the failure cases from page_capture_take_action, the 7030 * EXCL lock will have already been dropped. 7031 */ 7032 if ((ret == -1) && (!(flags & CAPTURE_ASYNC))) { 7033 page_capture_add_hash(pp, szc, flags, datap); 7034 } 7035 return (EAGAIN); 7036 } 7037 7038 int 7039 page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 7040 { 7041 int ret; 7042 7043 curthread->t_flag |= T_CAPTURING; 7044 ret = page_itrycapture(pp, szc, flags, datap); 7045 curthread->t_flag &= ~T_CAPTURING; /* xor works as we know its set */ 7046 return (ret); 7047 } 7048 7049 /* 7050 * When unlocking a page which has the PR_CAPTURE bit set, this routine 7051 * gets called to try and capture the page. 7052 */ 7053 void 7054 page_unlock_capture(page_t *pp) 7055 { 7056 page_capture_hash_bucket_t *bp; 7057 int index; 7058 int i; 7059 uint_t szc; 7060 uint_t flags = 0; 7061 void *datap; 7062 kmutex_t *mp; 7063 extern vnode_t retired_pages; 7064 7065 /* 7066 * We need to protect against a possible deadlock here where we own 7067 * the vnode page hash mutex and want to acquire it again as there 7068 * are locations in the code, where we unlock a page while holding 7069 * the mutex which can lead to the page being captured and eventually 7070 * end up here. As we may be hashing out the old page and hashing into 7071 * the retire vnode, we need to make sure we don't own them. 7072 * Other callbacks who do hash operations also need to make sure that 7073 * before they hashin to a vnode that they do not currently own the 7074 * vphm mutex otherwise there will be a panic. 7075 */ 7076 if (mutex_owned(page_vnode_mutex(&retired_pages))) { 7077 page_unlock_nocapture(pp); 7078 return; 7079 } 7080 if (pp->p_vnode != NULL && mutex_owned(page_vnode_mutex(pp->p_vnode))) { 7081 page_unlock_nocapture(pp); 7082 return; 7083 } 7084 7085 index = PAGE_CAPTURE_HASH(pp); 7086 7087 mp = &page_capture_hash[index].pchh_mutex; 7088 mutex_enter(mp); 7089 for (i = 0; i < 2; i++) { 7090 bp = page_capture_hash[index].lists[i].next; 7091 while (bp != &page_capture_hash[index].lists[i]) { 7092 if (bp->pp == pp) { 7093 szc = bp->szc; 7094 flags = bp->flags | CAPTURE_ASYNC; 7095 datap = bp->datap; 7096 mutex_exit(mp); 7097 (void) page_trycapture(pp, szc, flags, datap); 7098 return; 7099 } 7100 bp = bp->next; 7101 } 7102 } 7103 7104 /* Failed to find page in hash so clear flags and unlock it. */ 7105 page_clrtoxic(pp, PR_CAPTURE); 7106 page_unlock(pp); 7107 7108 mutex_exit(mp); 7109 } 7110 7111 void 7112 page_capture_init() 7113 { 7114 int i; 7115 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7116 page_capture_hash[i].lists[0].next = 7117 &page_capture_hash[i].lists[0]; 7118 page_capture_hash[i].lists[0].prev = 7119 &page_capture_hash[i].lists[0]; 7120 page_capture_hash[i].lists[1].next = 7121 &page_capture_hash[i].lists[1]; 7122 page_capture_hash[i].lists[1].prev = 7123 &page_capture_hash[i].lists[1]; 7124 } 7125 7126 pc_thread_shortwait = 23 * hz; 7127 pc_thread_longwait = 1201 * hz; 7128 pc_thread_retry = 3; 7129 mutex_init(&pc_thread_mutex, NULL, MUTEX_DEFAULT, NULL); 7130 cv_init(&pc_cv, NULL, CV_DEFAULT, NULL); 7131 pc_thread_id = thread_create(NULL, 0, page_capture_thread, NULL, 0, &p0, 7132 TS_RUN, minclsyspri); 7133 } 7134 7135 /* 7136 * It is necessary to scrub any failing pages prior to reboot in order to 7137 * prevent a latent error trap from occurring on the next boot. 7138 */ 7139 void 7140 page_retire_mdboot() 7141 { 7142 page_t *pp; 7143 int i, j; 7144 page_capture_hash_bucket_t *bp; 7145 7146 /* walk lists looking for pages to scrub */ 7147 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7148 if (page_capture_hash[i].num_pages == 0) 7149 continue; 7150 7151 mutex_enter(&page_capture_hash[i].pchh_mutex); 7152 7153 for (j = 0; j < 2; j++) { 7154 bp = page_capture_hash[i].lists[j].next; 7155 while (bp != &page_capture_hash[i].lists[j]) { 7156 pp = bp->pp; 7157 if (PP_TOXIC(pp)) { 7158 if (page_trylock(pp, SE_EXCL)) { 7159 PP_CLRFREE(pp); 7160 pagescrub(pp, 0, PAGESIZE); 7161 page_unlock(pp); 7162 } 7163 } 7164 bp = bp->next; 7165 } 7166 } 7167 mutex_exit(&page_capture_hash[i].pchh_mutex); 7168 } 7169 } 7170 7171 /* 7172 * Walk the page_capture_hash trying to capture pages and also cleanup old 7173 * entries which have expired. 7174 */ 7175 void 7176 page_capture_async() 7177 { 7178 page_t *pp; 7179 int i; 7180 int ret; 7181 page_capture_hash_bucket_t *bp1, *bp2; 7182 uint_t szc; 7183 uint_t flags; 7184 void *datap; 7185 7186 /* If there are outstanding pages to be captured, get to work */ 7187 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7188 if (page_capture_hash[i].num_pages == 0) 7189 continue; 7190 /* Append list 1 to list 0 and then walk through list 0 */ 7191 mutex_enter(&page_capture_hash[i].pchh_mutex); 7192 bp1 = &page_capture_hash[i].lists[1]; 7193 bp2 = bp1->next; 7194 if (bp1 != bp2) { 7195 bp1->prev->next = page_capture_hash[i].lists[0].next; 7196 bp2->prev = &page_capture_hash[i].lists[0]; 7197 page_capture_hash[i].lists[0].next->prev = bp1->prev; 7198 page_capture_hash[i].lists[0].next = bp2; 7199 bp1->next = bp1; 7200 bp1->prev = bp1; 7201 } 7202 7203 /* list[1] will be empty now */ 7204 7205 bp1 = page_capture_hash[i].lists[0].next; 7206 while (bp1 != &page_capture_hash[i].lists[0]) { 7207 /* Check expiration time */ 7208 if ((ddi_get_lbolt() > bp1->expires && 7209 bp1->expires != -1) || 7210 page_deleted(bp1->pp)) { 7211 page_capture_hash[i].lists[0].next = bp1->next; 7212 bp1->next->prev = 7213 &page_capture_hash[i].lists[0]; 7214 page_capture_hash[i].num_pages--; 7215 7216 /* 7217 * We can safely remove the PR_CAPTURE bit 7218 * without holding the EXCL lock on the page 7219 * as the PR_CAPTURE bit requres that the 7220 * page_capture_hash[].pchh_mutex be held 7221 * to modify it. 7222 */ 7223 page_clrtoxic(bp1->pp, PR_CAPTURE); 7224 mutex_exit(&page_capture_hash[i].pchh_mutex); 7225 kmem_free(bp1, sizeof (*bp1)); 7226 mutex_enter(&page_capture_hash[i].pchh_mutex); 7227 bp1 = page_capture_hash[i].lists[0].next; 7228 continue; 7229 } 7230 pp = bp1->pp; 7231 szc = bp1->szc; 7232 flags = bp1->flags; 7233 datap = bp1->datap; 7234 mutex_exit(&page_capture_hash[i].pchh_mutex); 7235 if (page_trylock(pp, SE_EXCL)) { 7236 ret = page_trycapture(pp, szc, 7237 flags | CAPTURE_ASYNC, datap); 7238 } else { 7239 ret = 1; /* move to walked hash */ 7240 } 7241 7242 if (ret != 0) { 7243 /* Move to walked hash */ 7244 (void) page_capture_move_to_walked(pp); 7245 } 7246 mutex_enter(&page_capture_hash[i].pchh_mutex); 7247 bp1 = page_capture_hash[i].lists[0].next; 7248 } 7249 7250 mutex_exit(&page_capture_hash[i].pchh_mutex); 7251 } 7252 } 7253 7254 /* 7255 * This function is called by the page_capture_thread, and is needed in 7256 * in order to initiate aio cleanup, so that pages used in aio 7257 * will be unlocked and subsequently retired by page_capture_thread. 7258 */ 7259 static int 7260 do_aio_cleanup(void) 7261 { 7262 proc_t *procp; 7263 int (*aio_cleanup_dr_delete_memory)(proc_t *); 7264 int cleaned = 0; 7265 7266 if (modload("sys", "kaio") == -1) { 7267 cmn_err(CE_WARN, "do_aio_cleanup: cannot load kaio"); 7268 return (0); 7269 } 7270 /* 7271 * We use the aio_cleanup_dr_delete_memory function to 7272 * initiate the actual clean up; this function will wake 7273 * up the per-process aio_cleanup_thread. 7274 */ 7275 aio_cleanup_dr_delete_memory = (int (*)(proc_t *)) 7276 modgetsymvalue("aio_cleanup_dr_delete_memory", 0); 7277 if (aio_cleanup_dr_delete_memory == NULL) { 7278 cmn_err(CE_WARN, 7279 "aio_cleanup_dr_delete_memory not found in kaio"); 7280 return (0); 7281 } 7282 mutex_enter(&pidlock); 7283 for (procp = practive; (procp != NULL); procp = procp->p_next) { 7284 mutex_enter(&procp->p_lock); 7285 if (procp->p_aio != NULL) { 7286 /* cleanup proc's outstanding kaio */ 7287 cleaned += (*aio_cleanup_dr_delete_memory)(procp); 7288 } 7289 mutex_exit(&procp->p_lock); 7290 } 7291 mutex_exit(&pidlock); 7292 return (cleaned); 7293 } 7294 7295 /* 7296 * helper function for page_capture_thread 7297 */ 7298 static void 7299 page_capture_handle_outstanding(void) 7300 { 7301 int ntry; 7302 7303 /* Reap pages before attempting capture pages */ 7304 kmem_reap(); 7305 7306 if ((page_retire_pend_count() > page_retire_pend_kas_count()) && 7307 hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 7308 /* 7309 * Note: Purging only for platforms that support 7310 * ISM hat_pageunload() - mainly SPARC. On x86/x64 7311 * platforms ISM pages SE_SHARED locked until destroyed. 7312 */ 7313 7314 /* disable and purge seg_pcache */ 7315 (void) seg_p_disable(); 7316 for (ntry = 0; ntry < pc_thread_retry; ntry++) { 7317 if (!page_retire_pend_count()) 7318 break; 7319 if (do_aio_cleanup()) { 7320 /* 7321 * allow the apps cleanup threads 7322 * to run 7323 */ 7324 delay(pc_thread_shortwait); 7325 } 7326 page_capture_async(); 7327 } 7328 /* reenable seg_pcache */ 7329 seg_p_enable(); 7330 7331 /* completed what can be done. break out */ 7332 return; 7333 } 7334 7335 /* 7336 * For kernel pages and/or unsupported HAT_DYNAMIC_ISM_UNMAP, reap 7337 * and then attempt to capture. 7338 */ 7339 seg_preap(); 7340 page_capture_async(); 7341 } 7342 7343 /* 7344 * The page_capture_thread loops forever, looking to see if there are 7345 * pages still waiting to be captured. 7346 */ 7347 static void 7348 page_capture_thread(void) 7349 { 7350 callb_cpr_t c; 7351 int outstanding; 7352 int i; 7353 7354 CALLB_CPR_INIT(&c, &pc_thread_mutex, callb_generic_cpr, "page_capture"); 7355 7356 mutex_enter(&pc_thread_mutex); 7357 for (;;) { 7358 outstanding = 0; 7359 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) 7360 outstanding += page_capture_hash[i].num_pages; 7361 if (outstanding) { 7362 page_capture_handle_outstanding(); 7363 CALLB_CPR_SAFE_BEGIN(&c); 7364 (void) cv_reltimedwait(&pc_cv, &pc_thread_mutex, 7365 pc_thread_shortwait, TR_CLOCK_TICK); 7366 CALLB_CPR_SAFE_END(&c, &pc_thread_mutex); 7367 } else { 7368 CALLB_CPR_SAFE_BEGIN(&c); 7369 (void) cv_reltimedwait(&pc_cv, &pc_thread_mutex, 7370 pc_thread_longwait, TR_CLOCK_TICK); 7371 CALLB_CPR_SAFE_END(&c, &pc_thread_mutex); 7372 } 7373 } 7374 /*NOTREACHED*/ 7375 } 7376 /* 7377 * Attempt to locate a bucket that has enough pages to satisfy the request. 7378 * The initial check is done without the lock to avoid unneeded contention. 7379 * The function returns 1 if enough pages were found, else 0 if it could not 7380 * find enough pages in a bucket. 7381 */ 7382 static int 7383 pcf_decrement_bucket(pgcnt_t npages) 7384 { 7385 struct pcf *p; 7386 struct pcf *q; 7387 int i; 7388 7389 p = &pcf[PCF_INDEX()]; 7390 q = &pcf[pcf_fanout]; 7391 for (i = 0; i < pcf_fanout; i++) { 7392 if (p->pcf_count > npages) { 7393 /* 7394 * a good one to try. 7395 */ 7396 mutex_enter(&p->pcf_lock); 7397 if (p->pcf_count > npages) { 7398 p->pcf_count -= (uint_t)npages; 7399 /* 7400 * freemem is not protected by any lock. 7401 * Thus, we cannot have any assertion 7402 * containing freemem here. 7403 */ 7404 freemem -= npages; 7405 mutex_exit(&p->pcf_lock); 7406 return (1); 7407 } 7408 mutex_exit(&p->pcf_lock); 7409 } 7410 p++; 7411 if (p >= q) { 7412 p = pcf; 7413 } 7414 } 7415 return (0); 7416 } 7417 7418 /* 7419 * Arguments: 7420 * pcftotal_ret: If the value is not NULL and we have walked all the 7421 * buckets but did not find enough pages then it will 7422 * be set to the total number of pages in all the pcf 7423 * buckets. 7424 * npages: Is the number of pages we have been requested to 7425 * find. 7426 * unlock: If set to 0 we will leave the buckets locked if the 7427 * requested number of pages are not found. 7428 * 7429 * Go and try to satisfy the page request from any number of buckets. 7430 * This can be a very expensive operation as we have to lock the buckets 7431 * we are checking (and keep them locked), starting at bucket 0. 7432 * 7433 * The function returns 1 if enough pages were found, else 0 if it could not 7434 * find enough pages in the buckets. 7435 * 7436 */ 7437 static int 7438 pcf_decrement_multiple(pgcnt_t *pcftotal_ret, pgcnt_t npages, int unlock) 7439 { 7440 struct pcf *p; 7441 pgcnt_t pcftotal; 7442 int i; 7443 7444 p = pcf; 7445 /* try to collect pages from several pcf bins */ 7446 for (pcftotal = 0, i = 0; i < pcf_fanout; i++) { 7447 mutex_enter(&p->pcf_lock); 7448 pcftotal += p->pcf_count; 7449 if (pcftotal >= npages) { 7450 /* 7451 * Wow! There are enough pages laying around 7452 * to satisfy the request. Do the accounting, 7453 * drop the locks we acquired, and go back. 7454 * 7455 * freemem is not protected by any lock. So, 7456 * we cannot have any assertion containing 7457 * freemem. 7458 */ 7459 freemem -= npages; 7460 while (p >= pcf) { 7461 if (p->pcf_count <= npages) { 7462 npages -= p->pcf_count; 7463 p->pcf_count = 0; 7464 } else { 7465 p->pcf_count -= (uint_t)npages; 7466 npages = 0; 7467 } 7468 mutex_exit(&p->pcf_lock); 7469 p--; 7470 } 7471 ASSERT(npages == 0); 7472 return (1); 7473 } 7474 p++; 7475 } 7476 if (unlock) { 7477 /* failed to collect pages - release the locks */ 7478 while (--p >= pcf) { 7479 mutex_exit(&p->pcf_lock); 7480 } 7481 } 7482 if (pcftotal_ret != NULL) 7483 *pcftotal_ret = pcftotal; 7484 return (0); 7485 } 7486