1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * University Copyright- Copyright (c) 1982, 1986, 1988 31 * The Regents of the University of California 32 * All Rights Reserved 33 * 34 * University Acknowledgment- Portions of this document are derived from 35 * software developed by the University of California, Berkeley, and its 36 * contributors. 37 */ 38 39 #pragma ident "%Z%%M% %I% %E% SMI" 40 41 /* 42 * VM - physical page management. 43 */ 44 45 #include <sys/types.h> 46 #include <sys/t_lock.h> 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/errno.h> 50 #include <sys/time.h> 51 #include <sys/vnode.h> 52 #include <sys/vm.h> 53 #include <sys/vtrace.h> 54 #include <sys/swap.h> 55 #include <sys/cmn_err.h> 56 #include <sys/tuneable.h> 57 #include <sys/sysmacros.h> 58 #include <sys/cpuvar.h> 59 #include <sys/callb.h> 60 #include <sys/debug.h> 61 #include <sys/tnf_probe.h> 62 #include <sys/condvar_impl.h> 63 #include <sys/mem_config.h> 64 #include <sys/mem_cage.h> 65 #include <sys/kmem.h> 66 #include <sys/atomic.h> 67 #include <sys/strlog.h> 68 #include <sys/mman.h> 69 #include <sys/ontrap.h> 70 #include <sys/lgrp.h> 71 #include <sys/vfs.h> 72 73 #include <vm/hat.h> 74 #include <vm/anon.h> 75 #include <vm/page.h> 76 #include <vm/seg.h> 77 #include <vm/pvn.h> 78 #include <vm/seg_kmem.h> 79 #include <vm/vm_dep.h> 80 #include <sys/vm_usage.h> 81 #include <fs/fs_subr.h> 82 #include <sys/ddi.h> 83 #include <sys/modctl.h> 84 85 static int nopageage = 0; 86 87 static pgcnt_t max_page_get; /* max page_get request size in pages */ 88 pgcnt_t total_pages = 0; /* total number of pages (used by /proc) */ 89 90 /* 91 * freemem_lock protects all freemem variables: 92 * availrmem. Also this lock protects the globals which track the 93 * availrmem changes for accurate kernel footprint calculation. 94 * See below for an explanation of these 95 * globals. 96 */ 97 kmutex_t freemem_lock; 98 pgcnt_t availrmem; 99 pgcnt_t availrmem_initial; 100 101 /* 102 * These globals track availrmem changes to get a more accurate 103 * estimate of tke kernel size. Historically pp_kernel is used for 104 * kernel size and is based on availrmem. But availrmem is adjusted for 105 * locked pages in the system not just for kernel locked pages. 106 * These new counters will track the pages locked through segvn and 107 * by explicit user locking. 108 * 109 * pages_locked : How many pages are locked because of user specified 110 * locking through mlock or plock. 111 * 112 * pages_useclaim,pages_claimed : These two variables track the 113 * claim adjustments because of the protection changes on a segvn segment. 114 * 115 * All these globals are protected by the same lock which protects availrmem. 116 */ 117 pgcnt_t pages_locked = 0; 118 pgcnt_t pages_useclaim = 0; 119 pgcnt_t pages_claimed = 0; 120 121 122 /* 123 * new_freemem_lock protects freemem, freemem_wait & freemem_cv. 124 */ 125 static kmutex_t new_freemem_lock; 126 static uint_t freemem_wait; /* someone waiting for freemem */ 127 static kcondvar_t freemem_cv; 128 129 /* 130 * The logical page free list is maintained as two lists, the 'free' 131 * and the 'cache' lists. 132 * The free list contains those pages that should be reused first. 133 * 134 * The implementation of the lists is machine dependent. 135 * page_get_freelist(), page_get_cachelist(), 136 * page_list_sub(), and page_list_add() 137 * form the interface to the machine dependent implementation. 138 * 139 * Pages with p_free set are on the cache list. 140 * Pages with p_free and p_age set are on the free list, 141 * 142 * A page may be locked while on either list. 143 */ 144 145 /* 146 * free list accounting stuff. 147 * 148 * 149 * Spread out the value for the number of pages on the 150 * page free and page cache lists. If there is just one 151 * value, then it must be under just one lock. 152 * The lock contention and cache traffic are a real bother. 153 * 154 * When we acquire and then drop a single pcf lock 155 * we can start in the middle of the array of pcf structures. 156 * If we acquire more than one pcf lock at a time, we need to 157 * start at the front to avoid deadlocking. 158 * 159 * pcf_count holds the number of pages in each pool. 160 * 161 * pcf_block is set when page_create_get_something() has asked the 162 * PSM page freelist and page cachelist routines without specifying 163 * a color and nothing came back. This is used to block anything 164 * else from moving pages from one list to the other while the 165 * lists are searched again. If a page is freeed while pcf_block is 166 * set, then pcf_reserve is incremented. pcgs_unblock() takes care 167 * of clearning pcf_block, doing the wakeups, etc. 168 */ 169 170 #if NCPU <= 4 171 #define PAD 2 172 #define PCF_FANOUT 4 173 static uint_t pcf_mask = PCF_FANOUT - 1; 174 #else 175 #define PAD 10 176 #ifdef sun4v 177 #define PCF_FANOUT 32 178 #else 179 #define PCF_FANOUT 128 180 #endif 181 static uint_t pcf_mask = PCF_FANOUT - 1; 182 #endif 183 184 struct pcf { 185 kmutex_t pcf_lock; /* protects the structure */ 186 uint_t pcf_count; /* page count */ 187 uint_t pcf_wait; /* number of waiters */ 188 uint_t pcf_block; /* pcgs flag to page_free() */ 189 uint_t pcf_reserve; /* pages freed after pcf_block set */ 190 uint_t pcf_fill[PAD]; /* to line up on the caches */ 191 }; 192 193 static struct pcf pcf[PCF_FANOUT]; 194 #define PCF_INDEX() ((CPU->cpu_id) & (pcf_mask)) 195 196 kmutex_t pcgs_lock; /* serializes page_create_get_ */ 197 kmutex_t pcgs_cagelock; /* serializes NOSLEEP cage allocs */ 198 kmutex_t pcgs_wait_lock; /* used for delay in pcgs */ 199 static kcondvar_t pcgs_cv; /* cv for delay in pcgs */ 200 201 #ifdef VM_STATS 202 203 /* 204 * No locks, but so what, they are only statistics. 205 */ 206 207 static struct page_tcnt { 208 int pc_free_cache; /* free's into cache list */ 209 int pc_free_dontneed; /* free's with dontneed */ 210 int pc_free_pageout; /* free's from pageout */ 211 int pc_free_free; /* free's into free list */ 212 int pc_free_pages; /* free's into large page free list */ 213 int pc_destroy_pages; /* large page destroy's */ 214 int pc_get_cache; /* get's from cache list */ 215 int pc_get_free; /* get's from free list */ 216 int pc_reclaim; /* reclaim's */ 217 int pc_abortfree; /* abort's of free pages */ 218 int pc_find_hit; /* find's that find page */ 219 int pc_find_miss; /* find's that don't find page */ 220 int pc_destroy_free; /* # of free pages destroyed */ 221 #define PC_HASH_CNT (4*PAGE_HASHAVELEN) 222 int pc_find_hashlen[PC_HASH_CNT+1]; 223 int pc_addclaim_pages; 224 int pc_subclaim_pages; 225 int pc_free_replacement_page[2]; 226 int pc_try_demote_pages[6]; 227 int pc_demote_pages[2]; 228 } pagecnt; 229 230 uint_t hashin_count; 231 uint_t hashin_not_held; 232 uint_t hashin_already; 233 234 uint_t hashout_count; 235 uint_t hashout_not_held; 236 237 uint_t page_create_count; 238 uint_t page_create_not_enough; 239 uint_t page_create_not_enough_again; 240 uint_t page_create_zero; 241 uint_t page_create_hashout; 242 uint_t page_create_page_lock_failed; 243 uint_t page_create_trylock_failed; 244 uint_t page_create_found_one; 245 uint_t page_create_hashin_failed; 246 uint_t page_create_dropped_phm; 247 248 uint_t page_create_new; 249 uint_t page_create_exists; 250 uint_t page_create_putbacks; 251 uint_t page_create_overshoot; 252 253 uint_t page_reclaim_zero; 254 uint_t page_reclaim_zero_locked; 255 256 uint_t page_rename_exists; 257 uint_t page_rename_count; 258 259 uint_t page_lookup_cnt[20]; 260 uint_t page_lookup_nowait_cnt[10]; 261 uint_t page_find_cnt; 262 uint_t page_exists_cnt; 263 uint_t page_exists_forreal_cnt; 264 uint_t page_lookup_dev_cnt; 265 uint_t get_cachelist_cnt; 266 uint_t page_create_cnt[10]; 267 uint_t alloc_pages[9]; 268 uint_t page_exphcontg[19]; 269 uint_t page_create_large_cnt[10]; 270 271 /* 272 * Collects statistics. 273 */ 274 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 275 uint_t mylen = 0; \ 276 \ 277 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash, mylen++) { \ 278 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 279 break; \ 280 } \ 281 if ((pp) != NULL) \ 282 pagecnt.pc_find_hit++; \ 283 else \ 284 pagecnt.pc_find_miss++; \ 285 if (mylen > PC_HASH_CNT) \ 286 mylen = PC_HASH_CNT; \ 287 pagecnt.pc_find_hashlen[mylen]++; \ 288 } 289 290 #else /* VM_STATS */ 291 292 /* 293 * Don't collect statistics 294 */ 295 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 296 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \ 297 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 298 break; \ 299 } \ 300 } 301 302 #endif /* VM_STATS */ 303 304 305 306 #ifdef DEBUG 307 #define MEMSEG_SEARCH_STATS 308 #endif 309 310 #ifdef MEMSEG_SEARCH_STATS 311 struct memseg_stats { 312 uint_t nsearch; 313 uint_t nlastwon; 314 uint_t nhashwon; 315 uint_t nnotfound; 316 } memseg_stats; 317 318 #define MEMSEG_STAT_INCR(v) \ 319 atomic_add_32(&memseg_stats.v, 1) 320 #else 321 #define MEMSEG_STAT_INCR(x) 322 #endif 323 324 struct memseg *memsegs; /* list of memory segments */ 325 326 /* 327 * /etc/system tunable to control large page allocation hueristic. 328 * 329 * Setting to LPAP_LOCAL will heavily prefer the local lgroup over remote lgroup 330 * for large page allocation requests. If a large page is not readily 331 * avaliable on the local freelists we will go through additional effort 332 * to create a large page, potentially moving smaller pages around to coalesce 333 * larger pages in the local lgroup. 334 * Default value of LPAP_DEFAULT will go to remote freelists if large pages 335 * are not readily available in the local lgroup. 336 */ 337 enum lpap { 338 LPAP_DEFAULT, /* default large page allocation policy */ 339 LPAP_LOCAL /* local large page allocation policy */ 340 }; 341 342 enum lpap lpg_alloc_prefer = LPAP_DEFAULT; 343 344 static void page_init_mem_config(void); 345 static int page_do_hashin(page_t *, vnode_t *, u_offset_t); 346 static void page_do_hashout(page_t *); 347 static void page_capture_init(); 348 int page_capture_take_action(page_t *, uint_t, void *); 349 350 static void page_demote_vp_pages(page_t *); 351 352 /* 353 * vm subsystem related initialization 354 */ 355 void 356 vm_init(void) 357 { 358 boolean_t callb_vm_cpr(void *, int); 359 360 (void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm"); 361 page_init_mem_config(); 362 page_retire_init(); 363 vm_usage_init(); 364 page_capture_init(); 365 } 366 367 /* 368 * This function is called at startup and when memory is added or deleted. 369 */ 370 void 371 init_pages_pp_maximum() 372 { 373 static pgcnt_t p_min; 374 static pgcnt_t pages_pp_maximum_startup; 375 static pgcnt_t avrmem_delta; 376 static int init_done; 377 static int user_set; /* true if set in /etc/system */ 378 379 if (init_done == 0) { 380 381 /* If the user specified a value, save it */ 382 if (pages_pp_maximum != 0) { 383 user_set = 1; 384 pages_pp_maximum_startup = pages_pp_maximum; 385 } 386 387 /* 388 * Setting of pages_pp_maximum is based first time 389 * on the value of availrmem just after the start-up 390 * allocations. To preserve this relationship at run 391 * time, use a delta from availrmem_initial. 392 */ 393 ASSERT(availrmem_initial >= availrmem); 394 avrmem_delta = availrmem_initial - availrmem; 395 396 /* The allowable floor of pages_pp_maximum */ 397 p_min = tune.t_minarmem + 100; 398 399 /* Make sure we don't come through here again. */ 400 init_done = 1; 401 } 402 /* 403 * Determine pages_pp_maximum, the number of currently available 404 * pages (availrmem) that can't be `locked'. If not set by 405 * the user, we set it to 4% of the currently available memory 406 * plus 4MB. 407 * But we also insist that it be greater than tune.t_minarmem; 408 * otherwise a process could lock down a lot of memory, get swapped 409 * out, and never have enough to get swapped back in. 410 */ 411 if (user_set) 412 pages_pp_maximum = pages_pp_maximum_startup; 413 else 414 pages_pp_maximum = ((availrmem_initial - avrmem_delta) / 25) 415 + btop(4 * 1024 * 1024); 416 417 if (pages_pp_maximum <= p_min) { 418 pages_pp_maximum = p_min; 419 } 420 } 421 422 void 423 set_max_page_get(pgcnt_t target_total_pages) 424 { 425 max_page_get = target_total_pages / 2; 426 } 427 428 static pgcnt_t pending_delete; 429 430 /*ARGSUSED*/ 431 static void 432 page_mem_config_post_add( 433 void *arg, 434 pgcnt_t delta_pages) 435 { 436 set_max_page_get(total_pages - pending_delete); 437 init_pages_pp_maximum(); 438 } 439 440 /*ARGSUSED*/ 441 static int 442 page_mem_config_pre_del( 443 void *arg, 444 pgcnt_t delta_pages) 445 { 446 pgcnt_t nv; 447 448 nv = atomic_add_long_nv(&pending_delete, (spgcnt_t)delta_pages); 449 set_max_page_get(total_pages - nv); 450 return (0); 451 } 452 453 /*ARGSUSED*/ 454 static void 455 page_mem_config_post_del( 456 void *arg, 457 pgcnt_t delta_pages, 458 int cancelled) 459 { 460 pgcnt_t nv; 461 462 nv = atomic_add_long_nv(&pending_delete, -(spgcnt_t)delta_pages); 463 set_max_page_get(total_pages - nv); 464 if (!cancelled) 465 init_pages_pp_maximum(); 466 } 467 468 static kphysm_setup_vector_t page_mem_config_vec = { 469 KPHYSM_SETUP_VECTOR_VERSION, 470 page_mem_config_post_add, 471 page_mem_config_pre_del, 472 page_mem_config_post_del, 473 }; 474 475 static void 476 page_init_mem_config(void) 477 { 478 int ret; 479 480 ret = kphysm_setup_func_register(&page_mem_config_vec, (void *)NULL); 481 ASSERT(ret == 0); 482 } 483 484 /* 485 * Evenly spread out the PCF counters for large free pages 486 */ 487 static void 488 page_free_large_ctr(pgcnt_t npages) 489 { 490 static struct pcf *p = pcf; 491 pgcnt_t lump; 492 493 freemem += npages; 494 495 lump = roundup(npages, PCF_FANOUT) / PCF_FANOUT; 496 497 while (npages > 0) { 498 499 ASSERT(!p->pcf_block); 500 501 if (lump < npages) { 502 p->pcf_count += (uint_t)lump; 503 npages -= lump; 504 } else { 505 p->pcf_count += (uint_t)npages; 506 npages = 0; 507 } 508 509 ASSERT(!p->pcf_wait); 510 511 if (++p > &pcf[PCF_FANOUT - 1]) 512 p = pcf; 513 } 514 515 ASSERT(npages == 0); 516 } 517 518 /* 519 * Add a physical chunk of memory to the system free lists during startup. 520 * Platform specific startup() allocates the memory for the page structs. 521 * 522 * num - number of page structures 523 * base - page number (pfn) to be associated with the first page. 524 * 525 * Since we are doing this during startup (ie. single threaded), we will 526 * use shortcut routines to avoid any locking overhead while putting all 527 * these pages on the freelists. 528 * 529 * NOTE: Any changes performed to page_free(), must also be performed to 530 * add_physmem() since this is how we initialize all page_t's at 531 * boot time. 532 */ 533 void 534 add_physmem( 535 page_t *pp, 536 pgcnt_t num, 537 pfn_t pnum) 538 { 539 page_t *root = NULL; 540 uint_t szc = page_num_pagesizes() - 1; 541 pgcnt_t large = page_get_pagecnt(szc); 542 pgcnt_t cnt = 0; 543 544 TRACE_2(TR_FAC_VM, TR_PAGE_INIT, 545 "add_physmem:pp %p num %lu", pp, num); 546 547 /* 548 * Arbitrarily limit the max page_get request 549 * to 1/2 of the page structs we have. 550 */ 551 total_pages += num; 552 set_max_page_get(total_pages); 553 554 PLCNT_MODIFY_MAX(pnum, (long)num); 555 556 /* 557 * The physical space for the pages array 558 * representing ram pages has already been 559 * allocated. Here we initialize each lock 560 * in the page structure, and put each on 561 * the free list 562 */ 563 for (; num; pp++, pnum++, num--) { 564 565 /* 566 * this needs to fill in the page number 567 * and do any other arch specific initialization 568 */ 569 add_physmem_cb(pp, pnum); 570 571 pp->p_lckcnt = 0; 572 pp->p_cowcnt = 0; 573 pp->p_slckcnt = 0; 574 575 /* 576 * Initialize the page lock as unlocked, since nobody 577 * can see or access this page yet. 578 */ 579 pp->p_selock = 0; 580 581 /* 582 * Initialize IO lock 583 */ 584 page_iolock_init(pp); 585 586 /* 587 * initialize other fields in the page_t 588 */ 589 PP_SETFREE(pp); 590 page_clr_all_props(pp); 591 PP_SETAGED(pp); 592 pp->p_offset = (u_offset_t)-1; 593 pp->p_next = pp; 594 pp->p_prev = pp; 595 596 /* 597 * Simple case: System doesn't support large pages. 598 */ 599 if (szc == 0) { 600 pp->p_szc = 0; 601 page_free_at_startup(pp); 602 continue; 603 } 604 605 /* 606 * Handle unaligned pages, we collect them up onto 607 * the root page until we have a full large page. 608 */ 609 if (!IS_P2ALIGNED(pnum, large)) { 610 611 /* 612 * If not in a large page, 613 * just free as small page. 614 */ 615 if (root == NULL) { 616 pp->p_szc = 0; 617 page_free_at_startup(pp); 618 continue; 619 } 620 621 /* 622 * Link a constituent page into the large page. 623 */ 624 pp->p_szc = szc; 625 page_list_concat(&root, &pp); 626 627 /* 628 * When large page is fully formed, free it. 629 */ 630 if (++cnt == large) { 631 page_free_large_ctr(cnt); 632 page_list_add_pages(root, PG_LIST_ISINIT); 633 root = NULL; 634 cnt = 0; 635 } 636 continue; 637 } 638 639 /* 640 * At this point we have a page number which 641 * is aligned. We assert that we aren't already 642 * in a different large page. 643 */ 644 ASSERT(IS_P2ALIGNED(pnum, large)); 645 ASSERT(root == NULL && cnt == 0); 646 647 /* 648 * If insufficient number of pages left to form 649 * a large page, just free the small page. 650 */ 651 if (num < large) { 652 pp->p_szc = 0; 653 page_free_at_startup(pp); 654 continue; 655 } 656 657 /* 658 * Otherwise start a new large page. 659 */ 660 pp->p_szc = szc; 661 cnt++; 662 root = pp; 663 } 664 ASSERT(root == NULL && cnt == 0); 665 } 666 667 /* 668 * Find a page representing the specified [vp, offset]. 669 * If we find the page but it is intransit coming in, 670 * it will have an "exclusive" lock and we wait for 671 * the i/o to complete. A page found on the free list 672 * is always reclaimed and then locked. On success, the page 673 * is locked, its data is valid and it isn't on the free 674 * list, while a NULL is returned if the page doesn't exist. 675 */ 676 page_t * 677 page_lookup(vnode_t *vp, u_offset_t off, se_t se) 678 { 679 return (page_lookup_create(vp, off, se, NULL, NULL, 0)); 680 } 681 682 /* 683 * Find a page representing the specified [vp, offset]. 684 * We either return the one we found or, if passed in, 685 * create one with identity of [vp, offset] of the 686 * pre-allocated page. If we find existing page but it is 687 * intransit coming in, it will have an "exclusive" lock 688 * and we wait for the i/o to complete. A page found on 689 * the free list is always reclaimed and then locked. 690 * On success, the page is locked, its data is valid and 691 * it isn't on the free list, while a NULL is returned 692 * if the page doesn't exist and newpp is NULL; 693 */ 694 page_t * 695 page_lookup_create( 696 vnode_t *vp, 697 u_offset_t off, 698 se_t se, 699 page_t *newpp, 700 spgcnt_t *nrelocp, 701 int flags) 702 { 703 page_t *pp; 704 kmutex_t *phm; 705 ulong_t index; 706 uint_t hash_locked; 707 uint_t es; 708 709 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 710 VM_STAT_ADD(page_lookup_cnt[0]); 711 ASSERT(newpp ? PAGE_EXCL(newpp) : 1); 712 713 /* 714 * Acquire the appropriate page hash lock since 715 * we have to search the hash list. Pages that 716 * hash to this list can't change identity while 717 * this lock is held. 718 */ 719 hash_locked = 0; 720 index = PAGE_HASH_FUNC(vp, off); 721 phm = NULL; 722 top: 723 PAGE_HASH_SEARCH(index, pp, vp, off); 724 if (pp != NULL) { 725 VM_STAT_ADD(page_lookup_cnt[1]); 726 es = (newpp != NULL) ? 1 : 0; 727 es |= flags; 728 if (!hash_locked) { 729 VM_STAT_ADD(page_lookup_cnt[2]); 730 if (!page_try_reclaim_lock(pp, se, es)) { 731 /* 732 * On a miss, acquire the phm. Then 733 * next time, page_lock() will be called, 734 * causing a wait if the page is busy. 735 * just looping with page_trylock() would 736 * get pretty boring. 737 */ 738 VM_STAT_ADD(page_lookup_cnt[3]); 739 phm = PAGE_HASH_MUTEX(index); 740 mutex_enter(phm); 741 hash_locked = 1; 742 goto top; 743 } 744 } else { 745 VM_STAT_ADD(page_lookup_cnt[4]); 746 if (!page_lock_es(pp, se, phm, P_RECLAIM, es)) { 747 VM_STAT_ADD(page_lookup_cnt[5]); 748 goto top; 749 } 750 } 751 752 /* 753 * Since `pp' is locked it can not change identity now. 754 * Reconfirm we locked the correct page. 755 * 756 * Both the p_vnode and p_offset *must* be cast volatile 757 * to force a reload of their values: The PAGE_HASH_SEARCH 758 * macro will have stuffed p_vnode and p_offset into 759 * registers before calling page_trylock(); another thread, 760 * actually holding the hash lock, could have changed the 761 * page's identity in memory, but our registers would not 762 * be changed, fooling the reconfirmation. If the hash 763 * lock was held during the search, the casting would 764 * not be needed. 765 */ 766 VM_STAT_ADD(page_lookup_cnt[6]); 767 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 768 ((volatile u_offset_t)(pp->p_offset) != off)) { 769 VM_STAT_ADD(page_lookup_cnt[7]); 770 if (hash_locked) { 771 panic("page_lookup_create: lost page %p", 772 (void *)pp); 773 /*NOTREACHED*/ 774 } 775 page_unlock(pp); 776 phm = PAGE_HASH_MUTEX(index); 777 mutex_enter(phm); 778 hash_locked = 1; 779 goto top; 780 } 781 782 /* 783 * If page_trylock() was called, then pp may still be on 784 * the cachelist (can't be on the free list, it would not 785 * have been found in the search). If it is on the 786 * cachelist it must be pulled now. To pull the page from 787 * the cachelist, it must be exclusively locked. 788 * 789 * The other big difference between page_trylock() and 790 * page_lock(), is that page_lock() will pull the 791 * page from whatever free list (the cache list in this 792 * case) the page is on. If page_trylock() was used 793 * above, then we have to do the reclaim ourselves. 794 */ 795 if ((!hash_locked) && (PP_ISFREE(pp))) { 796 ASSERT(PP_ISAGED(pp) == 0); 797 VM_STAT_ADD(page_lookup_cnt[8]); 798 799 /* 800 * page_relcaim will insure that we 801 * have this page exclusively 802 */ 803 804 if (!page_reclaim(pp, NULL)) { 805 /* 806 * Page_reclaim dropped whatever lock 807 * we held. 808 */ 809 VM_STAT_ADD(page_lookup_cnt[9]); 810 phm = PAGE_HASH_MUTEX(index); 811 mutex_enter(phm); 812 hash_locked = 1; 813 goto top; 814 } else if (se == SE_SHARED && newpp == NULL) { 815 VM_STAT_ADD(page_lookup_cnt[10]); 816 page_downgrade(pp); 817 } 818 } 819 820 if (hash_locked) { 821 mutex_exit(phm); 822 } 823 824 if (newpp != NULL && pp->p_szc < newpp->p_szc && 825 PAGE_EXCL(pp) && nrelocp != NULL) { 826 ASSERT(nrelocp != NULL); 827 (void) page_relocate(&pp, &newpp, 1, 1, nrelocp, 828 NULL); 829 if (*nrelocp > 0) { 830 VM_STAT_COND_ADD(*nrelocp == 1, 831 page_lookup_cnt[11]); 832 VM_STAT_COND_ADD(*nrelocp > 1, 833 page_lookup_cnt[12]); 834 pp = newpp; 835 se = SE_EXCL; 836 } else { 837 if (se == SE_SHARED) { 838 page_downgrade(pp); 839 } 840 VM_STAT_ADD(page_lookup_cnt[13]); 841 } 842 } else if (newpp != NULL && nrelocp != NULL) { 843 if (PAGE_EXCL(pp) && se == SE_SHARED) { 844 page_downgrade(pp); 845 } 846 VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc, 847 page_lookup_cnt[14]); 848 VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc, 849 page_lookup_cnt[15]); 850 VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc, 851 page_lookup_cnt[16]); 852 } else if (newpp != NULL && PAGE_EXCL(pp)) { 853 se = SE_EXCL; 854 } 855 } else if (!hash_locked) { 856 VM_STAT_ADD(page_lookup_cnt[17]); 857 phm = PAGE_HASH_MUTEX(index); 858 mutex_enter(phm); 859 hash_locked = 1; 860 goto top; 861 } else if (newpp != NULL) { 862 /* 863 * If we have a preallocated page then 864 * insert it now and basically behave like 865 * page_create. 866 */ 867 VM_STAT_ADD(page_lookup_cnt[18]); 868 /* 869 * Since we hold the page hash mutex and 870 * just searched for this page, page_hashin 871 * had better not fail. If it does, that 872 * means some thread did not follow the 873 * page hash mutex rules. Panic now and 874 * get it over with. As usual, go down 875 * holding all the locks. 876 */ 877 ASSERT(MUTEX_HELD(phm)); 878 if (!page_hashin(newpp, vp, off, phm)) { 879 ASSERT(MUTEX_HELD(phm)); 880 panic("page_lookup_create: hashin failed %p %p %llx %p", 881 (void *)newpp, (void *)vp, off, (void *)phm); 882 /*NOTREACHED*/ 883 } 884 ASSERT(MUTEX_HELD(phm)); 885 mutex_exit(phm); 886 phm = NULL; 887 page_set_props(newpp, P_REF); 888 page_io_lock(newpp); 889 pp = newpp; 890 se = SE_EXCL; 891 } else { 892 VM_STAT_ADD(page_lookup_cnt[19]); 893 mutex_exit(phm); 894 } 895 896 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 897 898 ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1); 899 900 return (pp); 901 } 902 903 /* 904 * Search the hash list for the page representing the 905 * specified [vp, offset] and return it locked. Skip 906 * free pages and pages that cannot be locked as requested. 907 * Used while attempting to kluster pages. 908 */ 909 page_t * 910 page_lookup_nowait(vnode_t *vp, u_offset_t off, se_t se) 911 { 912 page_t *pp; 913 kmutex_t *phm; 914 ulong_t index; 915 uint_t locked; 916 917 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 918 VM_STAT_ADD(page_lookup_nowait_cnt[0]); 919 920 index = PAGE_HASH_FUNC(vp, off); 921 PAGE_HASH_SEARCH(index, pp, vp, off); 922 locked = 0; 923 if (pp == NULL) { 924 top: 925 VM_STAT_ADD(page_lookup_nowait_cnt[1]); 926 locked = 1; 927 phm = PAGE_HASH_MUTEX(index); 928 mutex_enter(phm); 929 PAGE_HASH_SEARCH(index, pp, vp, off); 930 } 931 932 if (pp == NULL || PP_ISFREE(pp)) { 933 VM_STAT_ADD(page_lookup_nowait_cnt[2]); 934 pp = NULL; 935 } else { 936 if (!page_trylock(pp, se)) { 937 VM_STAT_ADD(page_lookup_nowait_cnt[3]); 938 pp = NULL; 939 } else { 940 VM_STAT_ADD(page_lookup_nowait_cnt[4]); 941 /* 942 * See the comment in page_lookup() 943 */ 944 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 945 ((u_offset_t)(pp->p_offset) != off)) { 946 VM_STAT_ADD(page_lookup_nowait_cnt[5]); 947 if (locked) { 948 panic("page_lookup_nowait %p", 949 (void *)pp); 950 /*NOTREACHED*/ 951 } 952 page_unlock(pp); 953 goto top; 954 } 955 if (PP_ISFREE(pp)) { 956 VM_STAT_ADD(page_lookup_nowait_cnt[6]); 957 page_unlock(pp); 958 pp = NULL; 959 } 960 } 961 } 962 if (locked) { 963 VM_STAT_ADD(page_lookup_nowait_cnt[7]); 964 mutex_exit(phm); 965 } 966 967 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 968 969 return (pp); 970 } 971 972 /* 973 * Search the hash list for a page with the specified [vp, off] 974 * that is known to exist and is already locked. This routine 975 * is typically used by segment SOFTUNLOCK routines. 976 */ 977 page_t * 978 page_find(vnode_t *vp, u_offset_t off) 979 { 980 page_t *pp; 981 kmutex_t *phm; 982 ulong_t index; 983 984 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 985 VM_STAT_ADD(page_find_cnt); 986 987 index = PAGE_HASH_FUNC(vp, off); 988 phm = PAGE_HASH_MUTEX(index); 989 990 mutex_enter(phm); 991 PAGE_HASH_SEARCH(index, pp, vp, off); 992 mutex_exit(phm); 993 994 ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr); 995 return (pp); 996 } 997 998 /* 999 * Determine whether a page with the specified [vp, off] 1000 * currently exists in the system. Obviously this should 1001 * only be considered as a hint since nothing prevents the 1002 * page from disappearing or appearing immediately after 1003 * the return from this routine. Subsequently, we don't 1004 * even bother to lock the list. 1005 */ 1006 page_t * 1007 page_exists(vnode_t *vp, u_offset_t off) 1008 { 1009 page_t *pp; 1010 ulong_t index; 1011 1012 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1013 VM_STAT_ADD(page_exists_cnt); 1014 1015 index = PAGE_HASH_FUNC(vp, off); 1016 PAGE_HASH_SEARCH(index, pp, vp, off); 1017 1018 return (pp); 1019 } 1020 1021 /* 1022 * Determine if physically contiguous pages exist for [vp, off] - [vp, off + 1023 * page_size(szc)) range. if they exist and ppa is not NULL fill ppa array 1024 * with these pages locked SHARED. If necessary reclaim pages from 1025 * freelist. Return 1 if contiguous pages exist and 0 otherwise. 1026 * 1027 * If we fail to lock pages still return 1 if pages exist and contiguous. 1028 * But in this case return value is just a hint. ppa array won't be filled. 1029 * Caller should initialize ppa[0] as NULL to distinguish return value. 1030 * 1031 * Returns 0 if pages don't exist or not physically contiguous. 1032 * 1033 * This routine doesn't work for anonymous(swapfs) pages. 1034 */ 1035 int 1036 page_exists_physcontig(vnode_t *vp, u_offset_t off, uint_t szc, page_t *ppa[]) 1037 { 1038 pgcnt_t pages; 1039 pfn_t pfn; 1040 page_t *rootpp; 1041 pgcnt_t i; 1042 pgcnt_t j; 1043 u_offset_t save_off = off; 1044 ulong_t index; 1045 kmutex_t *phm; 1046 page_t *pp; 1047 uint_t pszc; 1048 int loopcnt = 0; 1049 1050 ASSERT(szc != 0); 1051 ASSERT(vp != NULL); 1052 ASSERT(!IS_SWAPFSVP(vp)); 1053 ASSERT(!VN_ISKAS(vp)); 1054 1055 again: 1056 if (++loopcnt > 3) { 1057 VM_STAT_ADD(page_exphcontg[0]); 1058 return (0); 1059 } 1060 1061 index = PAGE_HASH_FUNC(vp, off); 1062 phm = PAGE_HASH_MUTEX(index); 1063 1064 mutex_enter(phm); 1065 PAGE_HASH_SEARCH(index, pp, vp, off); 1066 mutex_exit(phm); 1067 1068 VM_STAT_ADD(page_exphcontg[1]); 1069 1070 if (pp == NULL) { 1071 VM_STAT_ADD(page_exphcontg[2]); 1072 return (0); 1073 } 1074 1075 pages = page_get_pagecnt(szc); 1076 rootpp = pp; 1077 pfn = rootpp->p_pagenum; 1078 1079 if ((pszc = pp->p_szc) >= szc && ppa != NULL) { 1080 VM_STAT_ADD(page_exphcontg[3]); 1081 if (!page_trylock(pp, SE_SHARED)) { 1082 VM_STAT_ADD(page_exphcontg[4]); 1083 return (1); 1084 } 1085 if (pp->p_szc != pszc || pp->p_vnode != vp || 1086 pp->p_offset != off) { 1087 VM_STAT_ADD(page_exphcontg[5]); 1088 page_unlock(pp); 1089 off = save_off; 1090 goto again; 1091 } 1092 /* 1093 * szc was non zero and vnode and offset matched after we 1094 * locked the page it means it can't become free on us. 1095 */ 1096 ASSERT(!PP_ISFREE(pp)); 1097 if (!IS_P2ALIGNED(pfn, pages)) { 1098 page_unlock(pp); 1099 return (0); 1100 } 1101 ppa[0] = pp; 1102 pp++; 1103 off += PAGESIZE; 1104 pfn++; 1105 for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1106 if (!page_trylock(pp, SE_SHARED)) { 1107 VM_STAT_ADD(page_exphcontg[6]); 1108 pp--; 1109 while (i-- > 0) { 1110 page_unlock(pp); 1111 pp--; 1112 } 1113 ppa[0] = NULL; 1114 return (1); 1115 } 1116 if (pp->p_szc != pszc) { 1117 VM_STAT_ADD(page_exphcontg[7]); 1118 page_unlock(pp); 1119 pp--; 1120 while (i-- > 0) { 1121 page_unlock(pp); 1122 pp--; 1123 } 1124 ppa[0] = NULL; 1125 off = save_off; 1126 goto again; 1127 } 1128 /* 1129 * szc the same as for previous already locked pages 1130 * with right identity. Since this page had correct 1131 * szc after we locked it can't get freed or destroyed 1132 * and therefore must have the expected identity. 1133 */ 1134 ASSERT(!PP_ISFREE(pp)); 1135 if (pp->p_vnode != vp || 1136 pp->p_offset != off) { 1137 panic("page_exists_physcontig: " 1138 "large page identity doesn't match"); 1139 } 1140 ppa[i] = pp; 1141 ASSERT(pp->p_pagenum == pfn); 1142 } 1143 VM_STAT_ADD(page_exphcontg[8]); 1144 ppa[pages] = NULL; 1145 return (1); 1146 } else if (pszc >= szc) { 1147 VM_STAT_ADD(page_exphcontg[9]); 1148 if (!IS_P2ALIGNED(pfn, pages)) { 1149 return (0); 1150 } 1151 return (1); 1152 } 1153 1154 if (!IS_P2ALIGNED(pfn, pages)) { 1155 VM_STAT_ADD(page_exphcontg[10]); 1156 return (0); 1157 } 1158 1159 if (page_numtomemseg_nolock(pfn) != 1160 page_numtomemseg_nolock(pfn + pages - 1)) { 1161 VM_STAT_ADD(page_exphcontg[11]); 1162 return (0); 1163 } 1164 1165 /* 1166 * We loop up 4 times across pages to promote page size. 1167 * We're extra cautious to promote page size atomically with respect 1168 * to everybody else. But we can probably optimize into 1 loop if 1169 * this becomes an issue. 1170 */ 1171 1172 for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1173 ASSERT(pp->p_pagenum == pfn); 1174 if (!page_trylock(pp, SE_EXCL)) { 1175 VM_STAT_ADD(page_exphcontg[12]); 1176 break; 1177 } 1178 if (pp->p_vnode != vp || 1179 pp->p_offset != off) { 1180 VM_STAT_ADD(page_exphcontg[13]); 1181 page_unlock(pp); 1182 break; 1183 } 1184 if (pp->p_szc >= szc) { 1185 ASSERT(i == 0); 1186 page_unlock(pp); 1187 off = save_off; 1188 goto again; 1189 } 1190 } 1191 1192 if (i != pages) { 1193 VM_STAT_ADD(page_exphcontg[14]); 1194 --pp; 1195 while (i-- > 0) { 1196 page_unlock(pp); 1197 --pp; 1198 } 1199 return (0); 1200 } 1201 1202 pp = rootpp; 1203 for (i = 0; i < pages; i++, pp++) { 1204 if (PP_ISFREE(pp)) { 1205 VM_STAT_ADD(page_exphcontg[15]); 1206 ASSERT(!PP_ISAGED(pp)); 1207 ASSERT(pp->p_szc == 0); 1208 if (!page_reclaim(pp, NULL)) { 1209 break; 1210 } 1211 } else { 1212 ASSERT(pp->p_szc < szc); 1213 VM_STAT_ADD(page_exphcontg[16]); 1214 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 1215 } 1216 } 1217 if (i < pages) { 1218 VM_STAT_ADD(page_exphcontg[17]); 1219 /* 1220 * page_reclaim failed because we were out of memory. 1221 * drop the rest of the locks and return because this page 1222 * must be already reallocated anyway. 1223 */ 1224 pp = rootpp; 1225 for (j = 0; j < pages; j++, pp++) { 1226 if (j != i) { 1227 page_unlock(pp); 1228 } 1229 } 1230 return (0); 1231 } 1232 1233 off = save_off; 1234 pp = rootpp; 1235 for (i = 0; i < pages; i++, pp++, off += PAGESIZE) { 1236 ASSERT(PAGE_EXCL(pp)); 1237 ASSERT(!PP_ISFREE(pp)); 1238 ASSERT(!hat_page_is_mapped(pp)); 1239 ASSERT(pp->p_vnode == vp); 1240 ASSERT(pp->p_offset == off); 1241 pp->p_szc = szc; 1242 } 1243 pp = rootpp; 1244 for (i = 0; i < pages; i++, pp++) { 1245 if (ppa == NULL) { 1246 page_unlock(pp); 1247 } else { 1248 ppa[i] = pp; 1249 page_downgrade(ppa[i]); 1250 } 1251 } 1252 if (ppa != NULL) { 1253 ppa[pages] = NULL; 1254 } 1255 VM_STAT_ADD(page_exphcontg[18]); 1256 ASSERT(vp->v_pages != NULL); 1257 return (1); 1258 } 1259 1260 /* 1261 * Determine whether a page with the specified [vp, off] 1262 * currently exists in the system and if so return its 1263 * size code. Obviously this should only be considered as 1264 * a hint since nothing prevents the page from disappearing 1265 * or appearing immediately after the return from this routine. 1266 */ 1267 int 1268 page_exists_forreal(vnode_t *vp, u_offset_t off, uint_t *szc) 1269 { 1270 page_t *pp; 1271 kmutex_t *phm; 1272 ulong_t index; 1273 int rc = 0; 1274 1275 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1276 ASSERT(szc != NULL); 1277 VM_STAT_ADD(page_exists_forreal_cnt); 1278 1279 index = PAGE_HASH_FUNC(vp, off); 1280 phm = PAGE_HASH_MUTEX(index); 1281 1282 mutex_enter(phm); 1283 PAGE_HASH_SEARCH(index, pp, vp, off); 1284 if (pp != NULL) { 1285 *szc = pp->p_szc; 1286 rc = 1; 1287 } 1288 mutex_exit(phm); 1289 return (rc); 1290 } 1291 1292 /* wakeup threads waiting for pages in page_create_get_something() */ 1293 void 1294 wakeup_pcgs(void) 1295 { 1296 if (!CV_HAS_WAITERS(&pcgs_cv)) 1297 return; 1298 cv_broadcast(&pcgs_cv); 1299 } 1300 1301 /* 1302 * 'freemem' is used all over the kernel as an indication of how many 1303 * pages are free (either on the cache list or on the free page list) 1304 * in the system. In very few places is a really accurate 'freemem' 1305 * needed. To avoid contention of the lock protecting a the 1306 * single freemem, it was spread out into NCPU buckets. Set_freemem 1307 * sets freemem to the total of all NCPU buckets. It is called from 1308 * clock() on each TICK. 1309 */ 1310 void 1311 set_freemem() 1312 { 1313 struct pcf *p; 1314 ulong_t t; 1315 uint_t i; 1316 1317 t = 0; 1318 p = pcf; 1319 for (i = 0; i < PCF_FANOUT; i++) { 1320 t += p->pcf_count; 1321 p++; 1322 } 1323 freemem = t; 1324 1325 /* 1326 * Don't worry about grabbing mutex. It's not that 1327 * critical if we miss a tick or two. This is 1328 * where we wakeup possible delayers in 1329 * page_create_get_something(). 1330 */ 1331 wakeup_pcgs(); 1332 } 1333 1334 ulong_t 1335 get_freemem() 1336 { 1337 struct pcf *p; 1338 ulong_t t; 1339 uint_t i; 1340 1341 t = 0; 1342 p = pcf; 1343 for (i = 0; i < PCF_FANOUT; i++) { 1344 t += p->pcf_count; 1345 p++; 1346 } 1347 /* 1348 * We just calculated it, might as well set it. 1349 */ 1350 freemem = t; 1351 return (t); 1352 } 1353 1354 /* 1355 * Acquire all of the page cache & free (pcf) locks. 1356 */ 1357 void 1358 pcf_acquire_all() 1359 { 1360 struct pcf *p; 1361 uint_t i; 1362 1363 p = pcf; 1364 for (i = 0; i < PCF_FANOUT; i++) { 1365 mutex_enter(&p->pcf_lock); 1366 p++; 1367 } 1368 } 1369 1370 /* 1371 * Release all the pcf_locks. 1372 */ 1373 void 1374 pcf_release_all() 1375 { 1376 struct pcf *p; 1377 uint_t i; 1378 1379 p = pcf; 1380 for (i = 0; i < PCF_FANOUT; i++) { 1381 mutex_exit(&p->pcf_lock); 1382 p++; 1383 } 1384 } 1385 1386 /* 1387 * Inform the VM system that we need some pages freed up. 1388 * Calls must be symmetric, e.g.: 1389 * 1390 * page_needfree(100); 1391 * wait a bit; 1392 * page_needfree(-100); 1393 */ 1394 void 1395 page_needfree(spgcnt_t npages) 1396 { 1397 mutex_enter(&new_freemem_lock); 1398 needfree += npages; 1399 mutex_exit(&new_freemem_lock); 1400 } 1401 1402 /* 1403 * Throttle for page_create(): try to prevent freemem from dropping 1404 * below throttlefree. We can't provide a 100% guarantee because 1405 * KM_NOSLEEP allocations, page_reclaim(), and various other things 1406 * nibble away at the freelist. However, we can block all PG_WAIT 1407 * allocations until memory becomes available. The motivation is 1408 * that several things can fall apart when there's no free memory: 1409 * 1410 * (1) If pageout() needs memory to push a page, the system deadlocks. 1411 * 1412 * (2) By (broken) specification, timeout(9F) can neither fail nor 1413 * block, so it has no choice but to panic the system if it 1414 * cannot allocate a callout structure. 1415 * 1416 * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block; 1417 * it panics if it cannot allocate a callback structure. 1418 * 1419 * (4) Untold numbers of third-party drivers have not yet been hardened 1420 * against KM_NOSLEEP and/or allocb() failures; they simply assume 1421 * success and panic the system with a data fault on failure. 1422 * (The long-term solution to this particular problem is to ship 1423 * hostile fault-injecting DEBUG kernels with the DDK.) 1424 * 1425 * It is theoretically impossible to guarantee success of non-blocking 1426 * allocations, but in practice, this throttle is very hard to break. 1427 */ 1428 static int 1429 page_create_throttle(pgcnt_t npages, int flags) 1430 { 1431 ulong_t fm; 1432 uint_t i; 1433 pgcnt_t tf; /* effective value of throttlefree */ 1434 1435 /* 1436 * Never deny pages when: 1437 * - it's a thread that cannot block [NOMEMWAIT()] 1438 * - the allocation cannot block and must not fail 1439 * - the allocation cannot block and is pageout dispensated 1440 */ 1441 if (NOMEMWAIT() || 1442 ((flags & (PG_WAIT | PG_PANIC)) == PG_PANIC) || 1443 ((flags & (PG_WAIT | PG_PUSHPAGE)) == PG_PUSHPAGE)) 1444 return (1); 1445 1446 /* 1447 * If the allocation can't block, we look favorably upon it 1448 * unless we're below pageout_reserve. In that case we fail 1449 * the allocation because we want to make sure there are a few 1450 * pages available for pageout. 1451 */ 1452 if ((flags & PG_WAIT) == 0) 1453 return (freemem >= npages + pageout_reserve); 1454 1455 /* Calculate the effective throttlefree value */ 1456 tf = throttlefree - 1457 ((flags & PG_PUSHPAGE) ? pageout_reserve : 0); 1458 1459 cv_signal(&proc_pageout->p_cv); 1460 1461 for (;;) { 1462 fm = 0; 1463 pcf_acquire_all(); 1464 mutex_enter(&new_freemem_lock); 1465 for (i = 0; i < PCF_FANOUT; i++) { 1466 fm += pcf[i].pcf_count; 1467 pcf[i].pcf_wait++; 1468 mutex_exit(&pcf[i].pcf_lock); 1469 } 1470 freemem = fm; 1471 if (freemem >= npages + tf) { 1472 mutex_exit(&new_freemem_lock); 1473 break; 1474 } 1475 needfree += npages; 1476 freemem_wait++; 1477 cv_wait(&freemem_cv, &new_freemem_lock); 1478 freemem_wait--; 1479 needfree -= npages; 1480 mutex_exit(&new_freemem_lock); 1481 } 1482 return (1); 1483 } 1484 1485 /* 1486 * page_create_wait() is called to either coalesce pages from the 1487 * different pcf buckets or to wait because there simply are not 1488 * enough pages to satisfy the caller's request. 1489 * 1490 * Sadly, this is called from platform/vm/vm_machdep.c 1491 */ 1492 int 1493 page_create_wait(size_t npages, uint_t flags) 1494 { 1495 pgcnt_t total; 1496 uint_t i; 1497 struct pcf *p; 1498 1499 /* 1500 * Wait until there are enough free pages to satisfy our 1501 * entire request. 1502 * We set needfree += npages before prodding pageout, to make sure 1503 * it does real work when npages > lotsfree > freemem. 1504 */ 1505 VM_STAT_ADD(page_create_not_enough); 1506 1507 ASSERT(!kcage_on ? !(flags & PG_NORELOC) : 1); 1508 checkagain: 1509 if ((flags & PG_NORELOC) && 1510 kcage_freemem < kcage_throttlefree + npages) 1511 (void) kcage_create_throttle(npages, flags); 1512 1513 if (freemem < npages + throttlefree) 1514 if (!page_create_throttle(npages, flags)) 1515 return (0); 1516 1517 /* 1518 * Since page_create_va() looked at every 1519 * bucket, assume we are going to have to wait. 1520 * Get all of the pcf locks. 1521 */ 1522 total = 0; 1523 p = pcf; 1524 for (i = 0; i < PCF_FANOUT; i++) { 1525 mutex_enter(&p->pcf_lock); 1526 total += p->pcf_count; 1527 if (total >= npages) { 1528 /* 1529 * Wow! There are enough pages laying around 1530 * to satisfy the request. Do the accounting, 1531 * drop the locks we acquired, and go back. 1532 * 1533 * freemem is not protected by any lock. So, 1534 * we cannot have any assertion containing 1535 * freemem. 1536 */ 1537 freemem -= npages; 1538 1539 while (p >= pcf) { 1540 if (p->pcf_count <= npages) { 1541 npages -= p->pcf_count; 1542 p->pcf_count = 0; 1543 } else { 1544 p->pcf_count -= (uint_t)npages; 1545 npages = 0; 1546 } 1547 mutex_exit(&p->pcf_lock); 1548 p--; 1549 } 1550 ASSERT(npages == 0); 1551 return (1); 1552 } 1553 p++; 1554 } 1555 1556 /* 1557 * All of the pcf locks are held, there are not enough pages 1558 * to satisfy the request (npages < total). 1559 * Be sure to acquire the new_freemem_lock before dropping 1560 * the pcf locks. This prevents dropping wakeups in page_free(). 1561 * The order is always pcf_lock then new_freemem_lock. 1562 * 1563 * Since we hold all the pcf locks, it is a good time to set freemem. 1564 * 1565 * If the caller does not want to wait, return now. 1566 * Else turn the pageout daemon loose to find something 1567 * and wait till it does. 1568 * 1569 */ 1570 freemem = total; 1571 1572 if ((flags & PG_WAIT) == 0) { 1573 pcf_release_all(); 1574 1575 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_NOMEM, 1576 "page_create_nomem:npages %ld freemem %ld", npages, freemem); 1577 return (0); 1578 } 1579 1580 ASSERT(proc_pageout != NULL); 1581 cv_signal(&proc_pageout->p_cv); 1582 1583 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_START, 1584 "page_create_sleep_start: freemem %ld needfree %ld", 1585 freemem, needfree); 1586 1587 /* 1588 * We are going to wait. 1589 * We currently hold all of the pcf_locks, 1590 * get the new_freemem_lock (it protects freemem_wait), 1591 * before dropping the pcf_locks. 1592 */ 1593 mutex_enter(&new_freemem_lock); 1594 1595 p = pcf; 1596 for (i = 0; i < PCF_FANOUT; i++) { 1597 p->pcf_wait++; 1598 mutex_exit(&p->pcf_lock); 1599 p++; 1600 } 1601 1602 needfree += npages; 1603 freemem_wait++; 1604 1605 cv_wait(&freemem_cv, &new_freemem_lock); 1606 1607 freemem_wait--; 1608 needfree -= npages; 1609 1610 mutex_exit(&new_freemem_lock); 1611 1612 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_END, 1613 "page_create_sleep_end: freemem %ld needfree %ld", 1614 freemem, needfree); 1615 1616 VM_STAT_ADD(page_create_not_enough_again); 1617 goto checkagain; 1618 } 1619 1620 /* 1621 * A routine to do the opposite of page_create_wait(). 1622 */ 1623 void 1624 page_create_putback(spgcnt_t npages) 1625 { 1626 struct pcf *p; 1627 pgcnt_t lump; 1628 uint_t *which; 1629 1630 /* 1631 * When a contiguous lump is broken up, we have to 1632 * deal with lots of pages (min 64) so lets spread 1633 * the wealth around. 1634 */ 1635 lump = roundup(npages, PCF_FANOUT) / PCF_FANOUT; 1636 freemem += npages; 1637 1638 for (p = pcf; (npages > 0) && (p < &pcf[PCF_FANOUT]); p++) { 1639 which = &p->pcf_count; 1640 1641 mutex_enter(&p->pcf_lock); 1642 1643 if (p->pcf_block) { 1644 which = &p->pcf_reserve; 1645 } 1646 1647 if (lump < npages) { 1648 *which += (uint_t)lump; 1649 npages -= lump; 1650 } else { 1651 *which += (uint_t)npages; 1652 npages = 0; 1653 } 1654 1655 if (p->pcf_wait) { 1656 mutex_enter(&new_freemem_lock); 1657 /* 1658 * Check to see if some other thread 1659 * is actually waiting. Another bucket 1660 * may have woken it up by now. If there 1661 * are no waiters, then set our pcf_wait 1662 * count to zero to avoid coming in here 1663 * next time. 1664 */ 1665 if (freemem_wait) { 1666 if (npages > 1) { 1667 cv_broadcast(&freemem_cv); 1668 } else { 1669 cv_signal(&freemem_cv); 1670 } 1671 p->pcf_wait--; 1672 } else { 1673 p->pcf_wait = 0; 1674 } 1675 mutex_exit(&new_freemem_lock); 1676 } 1677 mutex_exit(&p->pcf_lock); 1678 } 1679 ASSERT(npages == 0); 1680 } 1681 1682 /* 1683 * A helper routine for page_create_get_something. 1684 * The indenting got to deep down there. 1685 * Unblock the pcf counters. Any pages freed after 1686 * pcf_block got set are moved to pcf_count and 1687 * wakeups (cv_broadcast() or cv_signal()) are done as needed. 1688 */ 1689 static void 1690 pcgs_unblock(void) 1691 { 1692 int i; 1693 struct pcf *p; 1694 1695 /* Update freemem while we're here. */ 1696 freemem = 0; 1697 p = pcf; 1698 for (i = 0; i < PCF_FANOUT; i++) { 1699 mutex_enter(&p->pcf_lock); 1700 ASSERT(p->pcf_count == 0); 1701 p->pcf_count = p->pcf_reserve; 1702 p->pcf_block = 0; 1703 freemem += p->pcf_count; 1704 if (p->pcf_wait) { 1705 mutex_enter(&new_freemem_lock); 1706 if (freemem_wait) { 1707 if (p->pcf_reserve > 1) { 1708 cv_broadcast(&freemem_cv); 1709 p->pcf_wait = 0; 1710 } else { 1711 cv_signal(&freemem_cv); 1712 p->pcf_wait--; 1713 } 1714 } else { 1715 p->pcf_wait = 0; 1716 } 1717 mutex_exit(&new_freemem_lock); 1718 } 1719 p->pcf_reserve = 0; 1720 mutex_exit(&p->pcf_lock); 1721 p++; 1722 } 1723 } 1724 1725 /* 1726 * Called from page_create_va() when both the cache and free lists 1727 * have been checked once. 1728 * 1729 * Either returns a page or panics since the accounting was done 1730 * way before we got here. 1731 * 1732 * We don't come here often, so leave the accounting on permanently. 1733 */ 1734 1735 #define MAX_PCGS 100 1736 1737 #ifdef DEBUG 1738 #define PCGS_TRIES 100 1739 #else /* DEBUG */ 1740 #define PCGS_TRIES 10 1741 #endif /* DEBUG */ 1742 1743 #ifdef VM_STATS 1744 uint_t pcgs_counts[PCGS_TRIES]; 1745 uint_t pcgs_too_many; 1746 uint_t pcgs_entered; 1747 uint_t pcgs_entered_noreloc; 1748 uint_t pcgs_locked; 1749 uint_t pcgs_cagelocked; 1750 #endif /* VM_STATS */ 1751 1752 static page_t * 1753 page_create_get_something(vnode_t *vp, u_offset_t off, struct seg *seg, 1754 caddr_t vaddr, uint_t flags) 1755 { 1756 uint_t count; 1757 page_t *pp; 1758 uint_t locked, i; 1759 struct pcf *p; 1760 lgrp_t *lgrp; 1761 int cagelocked = 0; 1762 1763 VM_STAT_ADD(pcgs_entered); 1764 1765 /* 1766 * Tap any reserve freelists: if we fail now, we'll die 1767 * since the page(s) we're looking for have already been 1768 * accounted for. 1769 */ 1770 flags |= PG_PANIC; 1771 1772 if ((flags & PG_NORELOC) != 0) { 1773 VM_STAT_ADD(pcgs_entered_noreloc); 1774 /* 1775 * Requests for free pages from critical threads 1776 * such as pageout still won't throttle here, but 1777 * we must try again, to give the cageout thread 1778 * another chance to catch up. Since we already 1779 * accounted for the pages, we had better get them 1780 * this time. 1781 * 1782 * N.B. All non-critical threads acquire the pcgs_cagelock 1783 * to serialize access to the freelists. This implements a 1784 * turnstile-type synchornization to avoid starvation of 1785 * critical requests for PG_NORELOC memory by non-critical 1786 * threads: all non-critical threads must acquire a 'ticket' 1787 * before passing through, which entails making sure 1788 * kcage_freemem won't fall below minfree prior to grabbing 1789 * pages from the freelists. 1790 */ 1791 if (kcage_create_throttle(1, flags) == KCT_NONCRIT) { 1792 mutex_enter(&pcgs_cagelock); 1793 cagelocked = 1; 1794 VM_STAT_ADD(pcgs_cagelocked); 1795 } 1796 } 1797 1798 /* 1799 * Time to get serious. 1800 * We failed to get a `correctly colored' page from both the 1801 * free and cache lists. 1802 * We escalate in stage. 1803 * 1804 * First try both lists without worring about color. 1805 * 1806 * Then, grab all page accounting locks (ie. pcf[]) and 1807 * steal any pages that they have and set the pcf_block flag to 1808 * stop deletions from the lists. This will help because 1809 * a page can get added to the free list while we are looking 1810 * at the cache list, then another page could be added to the cache 1811 * list allowing the page on the free list to be removed as we 1812 * move from looking at the cache list to the free list. This 1813 * could happen over and over. We would never find the page 1814 * we have accounted for. 1815 * 1816 * Noreloc pages are a subset of the global (relocatable) page pool. 1817 * They are not tracked separately in the pcf bins, so it is 1818 * impossible to know when doing pcf accounting if the available 1819 * page(s) are noreloc pages or not. When looking for a noreloc page 1820 * it is quite easy to end up here even if the global (relocatable) 1821 * page pool has plenty of free pages but the noreloc pool is empty. 1822 * 1823 * When the noreloc pool is empty (or low), additional noreloc pages 1824 * are created by converting pages from the global page pool. This 1825 * process will stall during pcf accounting if the pcf bins are 1826 * already locked. Such is the case when a noreloc allocation is 1827 * looping here in page_create_get_something waiting for more noreloc 1828 * pages to appear. 1829 * 1830 * Short of adding a new field to the pcf bins to accurately track 1831 * the number of free noreloc pages, we instead do not grab the 1832 * pcgs_lock, do not set the pcf blocks and do not timeout when 1833 * allocating a noreloc page. This allows noreloc allocations to 1834 * loop without blocking global page pool allocations. 1835 * 1836 * NOTE: the behaviour of page_create_get_something has not changed 1837 * for the case of global page pool allocations. 1838 */ 1839 1840 flags &= ~PG_MATCH_COLOR; 1841 locked = 0; 1842 #if defined(__i386) || defined(__amd64) 1843 flags = page_create_update_flags_x86(flags); 1844 #endif 1845 1846 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 1847 1848 for (count = 0; kcage_on || count < MAX_PCGS; count++) { 1849 pp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 1850 flags, lgrp); 1851 if (pp == NULL) { 1852 pp = page_get_cachelist(vp, off, seg, vaddr, 1853 flags, lgrp); 1854 } 1855 if (pp == NULL) { 1856 /* 1857 * Serialize. Don't fight with other pcgs(). 1858 */ 1859 if (!locked && (!kcage_on || !(flags & PG_NORELOC))) { 1860 mutex_enter(&pcgs_lock); 1861 VM_STAT_ADD(pcgs_locked); 1862 locked = 1; 1863 p = pcf; 1864 for (i = 0; i < PCF_FANOUT; i++) { 1865 mutex_enter(&p->pcf_lock); 1866 ASSERT(p->pcf_block == 0); 1867 p->pcf_block = 1; 1868 p->pcf_reserve = p->pcf_count; 1869 p->pcf_count = 0; 1870 mutex_exit(&p->pcf_lock); 1871 p++; 1872 } 1873 freemem = 0; 1874 } 1875 1876 if (count) { 1877 /* 1878 * Since page_free() puts pages on 1879 * a list then accounts for it, we 1880 * just have to wait for page_free() 1881 * to unlock any page it was working 1882 * with. The page_lock()-page_reclaim() 1883 * path falls in the same boat. 1884 * 1885 * We don't need to check on the 1886 * PG_WAIT flag, we have already 1887 * accounted for the page we are 1888 * looking for in page_create_va(). 1889 * 1890 * We just wait a moment to let any 1891 * locked pages on the lists free up, 1892 * then continue around and try again. 1893 * 1894 * Will be awakened by set_freemem(). 1895 */ 1896 mutex_enter(&pcgs_wait_lock); 1897 cv_wait(&pcgs_cv, &pcgs_wait_lock); 1898 mutex_exit(&pcgs_wait_lock); 1899 } 1900 } else { 1901 #ifdef VM_STATS 1902 if (count >= PCGS_TRIES) { 1903 VM_STAT_ADD(pcgs_too_many); 1904 } else { 1905 VM_STAT_ADD(pcgs_counts[count]); 1906 } 1907 #endif 1908 if (locked) { 1909 pcgs_unblock(); 1910 mutex_exit(&pcgs_lock); 1911 } 1912 if (cagelocked) 1913 mutex_exit(&pcgs_cagelock); 1914 return (pp); 1915 } 1916 } 1917 /* 1918 * we go down holding the pcf locks. 1919 */ 1920 panic("no %spage found %d", 1921 ((flags & PG_NORELOC) ? "non-reloc " : ""), count); 1922 /*NOTREACHED*/ 1923 } 1924 1925 /* 1926 * Create enough pages for "bytes" worth of data starting at 1927 * "off" in "vp". 1928 * 1929 * Where flag must be one of: 1930 * 1931 * PG_EXCL: Exclusive create (fail if any page already 1932 * exists in the page cache) which does not 1933 * wait for memory to become available. 1934 * 1935 * PG_WAIT: Non-exclusive create which can wait for 1936 * memory to become available. 1937 * 1938 * PG_PHYSCONTIG: Allocate physically contiguous pages. 1939 * (Not Supported) 1940 * 1941 * A doubly linked list of pages is returned to the caller. Each page 1942 * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock) 1943 * lock. 1944 * 1945 * Unable to change the parameters to page_create() in a minor release, 1946 * we renamed page_create() to page_create_va(), changed all known calls 1947 * from page_create() to page_create_va(), and created this wrapper. 1948 * 1949 * Upon a major release, we should break compatibility by deleting this 1950 * wrapper, and replacing all the strings "page_create_va", with "page_create". 1951 * 1952 * NOTE: There is a copy of this interface as page_create_io() in 1953 * i86/vm/vm_machdep.c. Any bugs fixed here should be applied 1954 * there. 1955 */ 1956 page_t * 1957 page_create(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags) 1958 { 1959 caddr_t random_vaddr; 1960 struct seg kseg; 1961 1962 #ifdef DEBUG 1963 cmn_err(CE_WARN, "Using deprecated interface page_create: caller %p", 1964 (void *)caller()); 1965 #endif 1966 1967 random_vaddr = (caddr_t)(((uintptr_t)vp >> 7) ^ 1968 (uintptr_t)(off >> PAGESHIFT)); 1969 kseg.s_as = &kas; 1970 1971 return (page_create_va(vp, off, bytes, flags, &kseg, random_vaddr)); 1972 } 1973 1974 #ifdef DEBUG 1975 uint32_t pg_alloc_pgs_mtbf = 0; 1976 #endif 1977 1978 /* 1979 * Used for large page support. It will attempt to allocate 1980 * a large page(s) off the freelist. 1981 * 1982 * Returns non zero on failure. 1983 */ 1984 int 1985 page_alloc_pages(struct vnode *vp, struct seg *seg, caddr_t addr, 1986 page_t **basepp, page_t *ppa[], uint_t szc, int anypgsz, int pgflags) 1987 { 1988 pgcnt_t npgs, curnpgs, totpgs; 1989 size_t pgsz; 1990 page_t *pplist = NULL, *pp; 1991 int err = 0; 1992 lgrp_t *lgrp; 1993 1994 ASSERT(szc != 0 && szc <= (page_num_pagesizes() - 1)); 1995 ASSERT(pgflags == 0 || pgflags == PG_LOCAL); 1996 1997 /* 1998 * Check if system heavily prefers local large pages over remote 1999 * on systems with multiple lgroups. 2000 */ 2001 if (lpg_alloc_prefer == LPAP_LOCAL && nlgrps > 1) { 2002 pgflags = PG_LOCAL; 2003 } 2004 2005 VM_STAT_ADD(alloc_pages[0]); 2006 2007 #ifdef DEBUG 2008 if (pg_alloc_pgs_mtbf && !(gethrtime() % pg_alloc_pgs_mtbf)) { 2009 return (ENOMEM); 2010 } 2011 #endif 2012 2013 /* 2014 * One must be NULL but not both. 2015 * And one must be non NULL but not both. 2016 */ 2017 ASSERT(basepp != NULL || ppa != NULL); 2018 ASSERT(basepp == NULL || ppa == NULL); 2019 2020 #if defined(__i386) || defined(__amd64) 2021 while (page_chk_freelist(szc) == 0) { 2022 VM_STAT_ADD(alloc_pages[8]); 2023 if (anypgsz == 0 || --szc == 0) 2024 return (ENOMEM); 2025 } 2026 #endif 2027 2028 pgsz = page_get_pagesize(szc); 2029 totpgs = curnpgs = npgs = pgsz >> PAGESHIFT; 2030 2031 ASSERT(((uintptr_t)addr & (pgsz - 1)) == 0); 2032 2033 (void) page_create_wait(npgs, PG_WAIT); 2034 2035 while (npgs && szc) { 2036 lgrp = lgrp_mem_choose(seg, addr, pgsz); 2037 if (pgflags == PG_LOCAL) { 2038 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2039 pgflags, lgrp); 2040 if (pp == NULL) { 2041 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2042 0, lgrp); 2043 } 2044 } else { 2045 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2046 0, lgrp); 2047 } 2048 if (pp != NULL) { 2049 VM_STAT_ADD(alloc_pages[1]); 2050 page_list_concat(&pplist, &pp); 2051 ASSERT(npgs >= curnpgs); 2052 npgs -= curnpgs; 2053 } else if (anypgsz) { 2054 VM_STAT_ADD(alloc_pages[2]); 2055 szc--; 2056 pgsz = page_get_pagesize(szc); 2057 curnpgs = pgsz >> PAGESHIFT; 2058 } else { 2059 VM_STAT_ADD(alloc_pages[3]); 2060 ASSERT(npgs == totpgs); 2061 page_create_putback(npgs); 2062 return (ENOMEM); 2063 } 2064 } 2065 if (szc == 0) { 2066 VM_STAT_ADD(alloc_pages[4]); 2067 ASSERT(npgs != 0); 2068 page_create_putback(npgs); 2069 err = ENOMEM; 2070 } else if (basepp != NULL) { 2071 ASSERT(npgs == 0); 2072 ASSERT(ppa == NULL); 2073 *basepp = pplist; 2074 } 2075 2076 npgs = totpgs - npgs; 2077 pp = pplist; 2078 2079 /* 2080 * Clear the free and age bits. Also if we were passed in a ppa then 2081 * fill it in with all the constituent pages from the large page. But 2082 * if we failed to allocate all the pages just free what we got. 2083 */ 2084 while (npgs != 0) { 2085 ASSERT(PP_ISFREE(pp)); 2086 ASSERT(PP_ISAGED(pp)); 2087 if (ppa != NULL || err != 0) { 2088 if (err == 0) { 2089 VM_STAT_ADD(alloc_pages[5]); 2090 PP_CLRFREE(pp); 2091 PP_CLRAGED(pp); 2092 page_sub(&pplist, pp); 2093 *ppa++ = pp; 2094 npgs--; 2095 } else { 2096 VM_STAT_ADD(alloc_pages[6]); 2097 ASSERT(pp->p_szc != 0); 2098 curnpgs = page_get_pagecnt(pp->p_szc); 2099 page_list_break(&pp, &pplist, curnpgs); 2100 page_list_add_pages(pp, 0); 2101 page_create_putback(curnpgs); 2102 ASSERT(npgs >= curnpgs); 2103 npgs -= curnpgs; 2104 } 2105 pp = pplist; 2106 } else { 2107 VM_STAT_ADD(alloc_pages[7]); 2108 PP_CLRFREE(pp); 2109 PP_CLRAGED(pp); 2110 pp = pp->p_next; 2111 npgs--; 2112 } 2113 } 2114 return (err); 2115 } 2116 2117 /* 2118 * Get a single large page off of the freelists, and set it up for use. 2119 * Number of bytes requested must be a supported page size. 2120 * 2121 * Note that this call may fail even if there is sufficient 2122 * memory available or PG_WAIT is set, so the caller must 2123 * be willing to fallback on page_create_va(), block and retry, 2124 * or fail the requester. 2125 */ 2126 page_t * 2127 page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2128 struct seg *seg, caddr_t vaddr, void *arg) 2129 { 2130 pgcnt_t npages, pcftotal; 2131 page_t *pp; 2132 page_t *rootpp; 2133 lgrp_t *lgrp; 2134 uint_t enough; 2135 uint_t pcf_index; 2136 uint_t i; 2137 struct pcf *p; 2138 struct pcf *q; 2139 lgrp_id_t *lgrpid = (lgrp_id_t *)arg; 2140 2141 ASSERT(vp != NULL); 2142 2143 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2144 PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == 0); 2145 /* but no others */ 2146 2147 ASSERT((flags & PG_EXCL) == PG_EXCL); 2148 2149 npages = btop(bytes); 2150 2151 if (!kcage_on || panicstr) { 2152 /* 2153 * Cage is OFF, or we are single threaded in 2154 * panic, so make everything a RELOC request. 2155 */ 2156 flags &= ~PG_NORELOC; 2157 } 2158 2159 /* 2160 * Make sure there's adequate physical memory available. 2161 * Note: PG_WAIT is ignored here. 2162 */ 2163 if (freemem <= throttlefree + npages) { 2164 VM_STAT_ADD(page_create_large_cnt[1]); 2165 return (NULL); 2166 } 2167 2168 /* 2169 * If cage is on, dampen draw from cage when available 2170 * cage space is low. 2171 */ 2172 if ((flags & (PG_NORELOC | PG_WAIT)) == (PG_NORELOC | PG_WAIT) && 2173 kcage_freemem < kcage_throttlefree + npages) { 2174 2175 /* 2176 * The cage is on, the caller wants PG_NORELOC 2177 * pages and available cage memory is very low. 2178 * Call kcage_create_throttle() to attempt to 2179 * control demand on the cage. 2180 */ 2181 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) { 2182 VM_STAT_ADD(page_create_large_cnt[2]); 2183 return (NULL); 2184 } 2185 } 2186 2187 enough = 0; 2188 pcf_index = PCF_INDEX(); 2189 p = &pcf[pcf_index]; 2190 q = &pcf[PCF_FANOUT]; 2191 for (pcftotal = 0, i = 0; i < PCF_FANOUT; i++) { 2192 if (p->pcf_count > npages) { 2193 /* 2194 * a good one to try. 2195 */ 2196 mutex_enter(&p->pcf_lock); 2197 if (p->pcf_count > npages) { 2198 p->pcf_count -= (uint_t)npages; 2199 /* 2200 * freemem is not protected by any lock. 2201 * Thus, we cannot have any assertion 2202 * containing freemem here. 2203 */ 2204 freemem -= npages; 2205 enough = 1; 2206 mutex_exit(&p->pcf_lock); 2207 break; 2208 } 2209 mutex_exit(&p->pcf_lock); 2210 } 2211 pcftotal += p->pcf_count; 2212 p++; 2213 if (p >= q) { 2214 p = pcf; 2215 } 2216 } 2217 2218 if (!enough) { 2219 /* If there isn't enough memory available, give up. */ 2220 if (pcftotal < npages) { 2221 VM_STAT_ADD(page_create_large_cnt[3]); 2222 return (NULL); 2223 } 2224 2225 /* try to collect pages from several pcf bins */ 2226 for (p = pcf, pcftotal = 0, i = 0; i < PCF_FANOUT; i++) { 2227 mutex_enter(&p->pcf_lock); 2228 pcftotal += p->pcf_count; 2229 if (pcftotal >= npages) { 2230 /* 2231 * Wow! There are enough pages laying around 2232 * to satisfy the request. Do the accounting, 2233 * drop the locks we acquired, and go back. 2234 * 2235 * freemem is not protected by any lock. So, 2236 * we cannot have any assertion containing 2237 * freemem. 2238 */ 2239 pgcnt_t tpages = npages; 2240 freemem -= npages; 2241 while (p >= pcf) { 2242 if (p->pcf_count <= tpages) { 2243 tpages -= p->pcf_count; 2244 p->pcf_count = 0; 2245 } else { 2246 p->pcf_count -= (uint_t)tpages; 2247 tpages = 0; 2248 } 2249 mutex_exit(&p->pcf_lock); 2250 p--; 2251 } 2252 ASSERT(tpages == 0); 2253 break; 2254 } 2255 p++; 2256 } 2257 if (i == PCF_FANOUT) { 2258 /* failed to collect pages - release the locks */ 2259 while (--p >= pcf) { 2260 mutex_exit(&p->pcf_lock); 2261 } 2262 VM_STAT_ADD(page_create_large_cnt[4]); 2263 return (NULL); 2264 } 2265 } 2266 2267 /* 2268 * This is where this function behaves fundamentally differently 2269 * than page_create_va(); since we're intending to map the page 2270 * with a single TTE, we have to get it as a physically contiguous 2271 * hardware pagesize chunk. If we can't, we fail. 2272 */ 2273 if (lgrpid != NULL && *lgrpid >= 0 && *lgrpid <= lgrp_alloc_max && 2274 LGRP_EXISTS(lgrp_table[*lgrpid])) 2275 lgrp = lgrp_table[*lgrpid]; 2276 else 2277 lgrp = lgrp_mem_choose(seg, vaddr, bytes); 2278 2279 if ((rootpp = page_get_freelist(&kvp, off, seg, vaddr, 2280 bytes, flags & ~PG_MATCH_COLOR, lgrp)) == NULL) { 2281 page_create_putback(npages); 2282 VM_STAT_ADD(page_create_large_cnt[5]); 2283 return (NULL); 2284 } 2285 2286 /* 2287 * if we got the page with the wrong mtype give it back this is a 2288 * workaround for CR 6249718. When CR 6249718 is fixed we never get 2289 * inside "if" and the workaround becomes just a nop 2290 */ 2291 if (kcage_on && (flags & PG_NORELOC) && !PP_ISNORELOC(rootpp)) { 2292 page_list_add_pages(rootpp, 0); 2293 page_create_putback(npages); 2294 VM_STAT_ADD(page_create_large_cnt[6]); 2295 return (NULL); 2296 } 2297 2298 /* 2299 * If satisfying this request has left us with too little 2300 * memory, start the wheels turning to get some back. The 2301 * first clause of the test prevents waking up the pageout 2302 * daemon in situations where it would decide that there's 2303 * nothing to do. 2304 */ 2305 if (nscan < desscan && freemem < minfree) { 2306 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2307 "pageout_cv_signal:freemem %ld", freemem); 2308 cv_signal(&proc_pageout->p_cv); 2309 } 2310 2311 pp = rootpp; 2312 while (npages--) { 2313 ASSERT(PAGE_EXCL(pp)); 2314 ASSERT(pp->p_vnode == NULL); 2315 ASSERT(!hat_page_is_mapped(pp)); 2316 PP_CLRFREE(pp); 2317 PP_CLRAGED(pp); 2318 if (!page_hashin(pp, vp, off, NULL)) 2319 panic("page_create_large: hashin failed: page %p", 2320 (void *)pp); 2321 page_io_lock(pp); 2322 off += PAGESIZE; 2323 pp = pp->p_next; 2324 } 2325 2326 VM_STAT_ADD(page_create_large_cnt[0]); 2327 return (rootpp); 2328 } 2329 2330 page_t * 2331 page_create_va(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2332 struct seg *seg, caddr_t vaddr) 2333 { 2334 page_t *plist = NULL; 2335 pgcnt_t npages; 2336 pgcnt_t found_on_free = 0; 2337 pgcnt_t pages_req; 2338 page_t *npp = NULL; 2339 uint_t enough; 2340 uint_t i; 2341 uint_t pcf_index; 2342 struct pcf *p; 2343 struct pcf *q; 2344 lgrp_t *lgrp; 2345 2346 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START, 2347 "page_create_start:vp %p off %llx bytes %lu flags %x", 2348 vp, off, bytes, flags); 2349 2350 ASSERT(bytes != 0 && vp != NULL); 2351 2352 if ((flags & PG_EXCL) == 0 && (flags & PG_WAIT) == 0) { 2353 panic("page_create: invalid flags"); 2354 /*NOTREACHED*/ 2355 } 2356 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2357 PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == 0); 2358 /* but no others */ 2359 2360 pages_req = npages = btopr(bytes); 2361 /* 2362 * Try to see whether request is too large to *ever* be 2363 * satisfied, in order to prevent deadlock. We arbitrarily 2364 * decide to limit maximum size requests to max_page_get. 2365 */ 2366 if (npages >= max_page_get) { 2367 if ((flags & PG_WAIT) == 0) { 2368 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_TOOBIG, 2369 "page_create_toobig:vp %p off %llx npages " 2370 "%lu max_page_get %lu", 2371 vp, off, npages, max_page_get); 2372 return (NULL); 2373 } else { 2374 cmn_err(CE_WARN, 2375 "Request for too much kernel memory " 2376 "(%lu bytes), will hang forever", bytes); 2377 for (;;) 2378 delay(1000000000); 2379 } 2380 } 2381 2382 if (!kcage_on || panicstr) { 2383 /* 2384 * Cage is OFF, or we are single threaded in 2385 * panic, so make everything a RELOC request. 2386 */ 2387 flags &= ~PG_NORELOC; 2388 } 2389 2390 if (freemem <= throttlefree + npages) 2391 if (!page_create_throttle(npages, flags)) 2392 return (NULL); 2393 2394 /* 2395 * If cage is on, dampen draw from cage when available 2396 * cage space is low. 2397 */ 2398 if ((flags & PG_NORELOC) && 2399 kcage_freemem < kcage_throttlefree + npages) { 2400 2401 /* 2402 * The cage is on, the caller wants PG_NORELOC 2403 * pages and available cage memory is very low. 2404 * Call kcage_create_throttle() to attempt to 2405 * control demand on the cage. 2406 */ 2407 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) 2408 return (NULL); 2409 } 2410 2411 VM_STAT_ADD(page_create_cnt[0]); 2412 2413 enough = 0; 2414 pcf_index = PCF_INDEX(); 2415 2416 p = &pcf[pcf_index]; 2417 q = &pcf[PCF_FANOUT]; 2418 for (i = 0; i < PCF_FANOUT; i++) { 2419 if (p->pcf_count > npages) { 2420 /* 2421 * a good one to try. 2422 */ 2423 mutex_enter(&p->pcf_lock); 2424 if (p->pcf_count > npages) { 2425 p->pcf_count -= (uint_t)npages; 2426 /* 2427 * freemem is not protected by any lock. 2428 * Thus, we cannot have any assertion 2429 * containing freemem here. 2430 */ 2431 freemem -= npages; 2432 enough = 1; 2433 mutex_exit(&p->pcf_lock); 2434 break; 2435 } 2436 mutex_exit(&p->pcf_lock); 2437 } 2438 p++; 2439 if (p >= q) { 2440 p = pcf; 2441 } 2442 } 2443 2444 if (!enough) { 2445 /* 2446 * Have to look harder. If npages is greater than 2447 * one, then we might have to coalesce the counters. 2448 * 2449 * Go wait. We come back having accounted 2450 * for the memory. 2451 */ 2452 VM_STAT_ADD(page_create_cnt[1]); 2453 if (!page_create_wait(npages, flags)) { 2454 VM_STAT_ADD(page_create_cnt[2]); 2455 return (NULL); 2456 } 2457 } 2458 2459 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS, 2460 "page_create_success:vp %p off %llx", vp, off); 2461 2462 /* 2463 * If satisfying this request has left us with too little 2464 * memory, start the wheels turning to get some back. The 2465 * first clause of the test prevents waking up the pageout 2466 * daemon in situations where it would decide that there's 2467 * nothing to do. 2468 */ 2469 if (nscan < desscan && freemem < minfree) { 2470 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2471 "pageout_cv_signal:freemem %ld", freemem); 2472 cv_signal(&proc_pageout->p_cv); 2473 } 2474 2475 /* 2476 * Loop around collecting the requested number of pages. 2477 * Most of the time, we have to `create' a new page. With 2478 * this in mind, pull the page off the free list before 2479 * getting the hash lock. This will minimize the hash 2480 * lock hold time, nesting, and the like. If it turns 2481 * out we don't need the page, we put it back at the end. 2482 */ 2483 while (npages--) { 2484 page_t *pp; 2485 kmutex_t *phm = NULL; 2486 ulong_t index; 2487 2488 index = PAGE_HASH_FUNC(vp, off); 2489 top: 2490 ASSERT(phm == NULL); 2491 ASSERT(index == PAGE_HASH_FUNC(vp, off)); 2492 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 2493 2494 if (npp == NULL) { 2495 /* 2496 * Try to get a page from the freelist (ie, 2497 * a page with no [vp, off] tag). If that 2498 * fails, use the cachelist. 2499 * 2500 * During the first attempt at both the free 2501 * and cache lists we try for the correct color. 2502 */ 2503 /* 2504 * XXXX-how do we deal with virtual indexed 2505 * caches and and colors? 2506 */ 2507 VM_STAT_ADD(page_create_cnt[4]); 2508 /* 2509 * Get lgroup to allocate next page of shared memory 2510 * from and use it to specify where to allocate 2511 * the physical memory 2512 */ 2513 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 2514 npp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 2515 flags | PG_MATCH_COLOR, lgrp); 2516 if (npp == NULL) { 2517 npp = page_get_cachelist(vp, off, seg, 2518 vaddr, flags | PG_MATCH_COLOR, lgrp); 2519 if (npp == NULL) { 2520 npp = page_create_get_something(vp, 2521 off, seg, vaddr, 2522 flags & ~PG_MATCH_COLOR); 2523 } 2524 2525 if (PP_ISAGED(npp) == 0) { 2526 /* 2527 * Since this page came from the 2528 * cachelist, we must destroy the 2529 * old vnode association. 2530 */ 2531 page_hashout(npp, NULL); 2532 } 2533 } 2534 } 2535 2536 /* 2537 * We own this page! 2538 */ 2539 ASSERT(PAGE_EXCL(npp)); 2540 ASSERT(npp->p_vnode == NULL); 2541 ASSERT(!hat_page_is_mapped(npp)); 2542 PP_CLRFREE(npp); 2543 PP_CLRAGED(npp); 2544 2545 /* 2546 * Here we have a page in our hot little mits and are 2547 * just waiting to stuff it on the appropriate lists. 2548 * Get the mutex and check to see if it really does 2549 * not exist. 2550 */ 2551 phm = PAGE_HASH_MUTEX(index); 2552 mutex_enter(phm); 2553 PAGE_HASH_SEARCH(index, pp, vp, off); 2554 if (pp == NULL) { 2555 VM_STAT_ADD(page_create_new); 2556 pp = npp; 2557 npp = NULL; 2558 if (!page_hashin(pp, vp, off, phm)) { 2559 /* 2560 * Since we hold the page hash mutex and 2561 * just searched for this page, page_hashin 2562 * had better not fail. If it does, that 2563 * means somethread did not follow the 2564 * page hash mutex rules. Panic now and 2565 * get it over with. As usual, go down 2566 * holding all the locks. 2567 */ 2568 ASSERT(MUTEX_HELD(phm)); 2569 panic("page_create: " 2570 "hashin failed %p %p %llx %p", 2571 (void *)pp, (void *)vp, off, (void *)phm); 2572 /*NOTREACHED*/ 2573 } 2574 ASSERT(MUTEX_HELD(phm)); 2575 mutex_exit(phm); 2576 phm = NULL; 2577 2578 /* 2579 * Hat layer locking need not be done to set 2580 * the following bits since the page is not hashed 2581 * and was on the free list (i.e., had no mappings). 2582 * 2583 * Set the reference bit to protect 2584 * against immediate pageout 2585 * 2586 * XXXmh modify freelist code to set reference 2587 * bit so we don't have to do it here. 2588 */ 2589 page_set_props(pp, P_REF); 2590 found_on_free++; 2591 } else { 2592 VM_STAT_ADD(page_create_exists); 2593 if (flags & PG_EXCL) { 2594 /* 2595 * Found an existing page, and the caller 2596 * wanted all new pages. Undo all of the work 2597 * we have done. 2598 */ 2599 mutex_exit(phm); 2600 phm = NULL; 2601 while (plist != NULL) { 2602 pp = plist; 2603 page_sub(&plist, pp); 2604 page_io_unlock(pp); 2605 /* large pages should not end up here */ 2606 ASSERT(pp->p_szc == 0); 2607 /*LINTED: constant in conditional ctx*/ 2608 VN_DISPOSE(pp, B_INVAL, 0, kcred); 2609 } 2610 VM_STAT_ADD(page_create_found_one); 2611 goto fail; 2612 } 2613 ASSERT(flags & PG_WAIT); 2614 if (!page_lock(pp, SE_EXCL, phm, P_NO_RECLAIM)) { 2615 /* 2616 * Start all over again if we blocked trying 2617 * to lock the page. 2618 */ 2619 mutex_exit(phm); 2620 VM_STAT_ADD(page_create_page_lock_failed); 2621 phm = NULL; 2622 goto top; 2623 } 2624 mutex_exit(phm); 2625 phm = NULL; 2626 2627 if (PP_ISFREE(pp)) { 2628 ASSERT(PP_ISAGED(pp) == 0); 2629 VM_STAT_ADD(pagecnt.pc_get_cache); 2630 page_list_sub(pp, PG_CACHE_LIST); 2631 PP_CLRFREE(pp); 2632 found_on_free++; 2633 } 2634 } 2635 2636 /* 2637 * Got a page! It is locked. Acquire the i/o 2638 * lock since we are going to use the p_next and 2639 * p_prev fields to link the requested pages together. 2640 */ 2641 page_io_lock(pp); 2642 page_add(&plist, pp); 2643 plist = plist->p_next; 2644 off += PAGESIZE; 2645 vaddr += PAGESIZE; 2646 } 2647 2648 ASSERT((flags & PG_EXCL) ? (found_on_free == pages_req) : 1); 2649 fail: 2650 if (npp != NULL) { 2651 /* 2652 * Did not need this page after all. 2653 * Put it back on the free list. 2654 */ 2655 VM_STAT_ADD(page_create_putbacks); 2656 PP_SETFREE(npp); 2657 PP_SETAGED(npp); 2658 npp->p_offset = (u_offset_t)-1; 2659 page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL); 2660 page_unlock(npp); 2661 2662 } 2663 2664 ASSERT(pages_req >= found_on_free); 2665 2666 { 2667 uint_t overshoot = (uint_t)(pages_req - found_on_free); 2668 2669 if (overshoot) { 2670 VM_STAT_ADD(page_create_overshoot); 2671 p = &pcf[pcf_index]; 2672 mutex_enter(&p->pcf_lock); 2673 if (p->pcf_block) { 2674 p->pcf_reserve += overshoot; 2675 } else { 2676 p->pcf_count += overshoot; 2677 if (p->pcf_wait) { 2678 mutex_enter(&new_freemem_lock); 2679 if (freemem_wait) { 2680 cv_signal(&freemem_cv); 2681 p->pcf_wait--; 2682 } else { 2683 p->pcf_wait = 0; 2684 } 2685 mutex_exit(&new_freemem_lock); 2686 } 2687 } 2688 mutex_exit(&p->pcf_lock); 2689 /* freemem is approximate, so this test OK */ 2690 if (!p->pcf_block) 2691 freemem += overshoot; 2692 } 2693 } 2694 2695 return (plist); 2696 } 2697 2698 /* 2699 * One or more constituent pages of this large page has been marked 2700 * toxic. Simply demote the large page to PAGESIZE pages and let 2701 * page_free() handle it. This routine should only be called by 2702 * large page free routines (page_free_pages() and page_destroy_pages(). 2703 * All pages are locked SE_EXCL and have already been marked free. 2704 */ 2705 static void 2706 page_free_toxic_pages(page_t *rootpp) 2707 { 2708 page_t *tpp; 2709 pgcnt_t i, pgcnt = page_get_pagecnt(rootpp->p_szc); 2710 uint_t szc = rootpp->p_szc; 2711 2712 for (i = 0, tpp = rootpp; i < pgcnt; i++, tpp = tpp->p_next) { 2713 ASSERT(tpp->p_szc == szc); 2714 ASSERT((PAGE_EXCL(tpp) && 2715 !page_iolock_assert(tpp)) || panicstr); 2716 tpp->p_szc = 0; 2717 } 2718 2719 while (rootpp != NULL) { 2720 tpp = rootpp; 2721 page_sub(&rootpp, tpp); 2722 ASSERT(PP_ISFREE(tpp)); 2723 PP_CLRFREE(tpp); 2724 page_free(tpp, 1); 2725 } 2726 } 2727 2728 /* 2729 * Put page on the "free" list. 2730 * The free list is really two lists maintained by 2731 * the PSM of whatever machine we happen to be on. 2732 */ 2733 void 2734 page_free(page_t *pp, int dontneed) 2735 { 2736 struct pcf *p; 2737 uint_t pcf_index; 2738 2739 ASSERT((PAGE_EXCL(pp) && 2740 !page_iolock_assert(pp)) || panicstr); 2741 2742 if (PP_ISFREE(pp)) { 2743 panic("page_free: page %p is free", (void *)pp); 2744 } 2745 2746 if (pp->p_szc != 0) { 2747 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 2748 PP_ISKAS(pp)) { 2749 panic("page_free: anon or kernel " 2750 "or no vnode large page %p", (void *)pp); 2751 } 2752 page_demote_vp_pages(pp); 2753 ASSERT(pp->p_szc == 0); 2754 } 2755 2756 /* 2757 * The page_struct_lock need not be acquired to examine these 2758 * fields since the page has an "exclusive" lock. 2759 */ 2760 if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 2761 pp->p_slckcnt != 0) { 2762 panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d " 2763 "slckcnt = %d", pp, page_pptonum(pp), pp->p_lckcnt, 2764 pp->p_cowcnt, pp->p_slckcnt); 2765 /*NOTREACHED*/ 2766 } 2767 2768 ASSERT(!hat_page_getshare(pp)); 2769 2770 PP_SETFREE(pp); 2771 ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) || 2772 !hat_ismod(pp)); 2773 page_clr_all_props(pp); 2774 ASSERT(!hat_page_getshare(pp)); 2775 2776 /* 2777 * Now we add the page to the head of the free list. 2778 * But if this page is associated with a paged vnode 2779 * then we adjust the head forward so that the page is 2780 * effectively at the end of the list. 2781 */ 2782 if (pp->p_vnode == NULL) { 2783 /* 2784 * Page has no identity, put it on the free list. 2785 */ 2786 PP_SETAGED(pp); 2787 pp->p_offset = (u_offset_t)-1; 2788 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 2789 VM_STAT_ADD(pagecnt.pc_free_free); 2790 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2791 "page_free_free:pp %p", pp); 2792 } else { 2793 PP_CLRAGED(pp); 2794 2795 if (!dontneed || nopageage) { 2796 /* move it to the tail of the list */ 2797 page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL); 2798 2799 VM_STAT_ADD(pagecnt.pc_free_cache); 2800 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL, 2801 "page_free_cache_tail:pp %p", pp); 2802 } else { 2803 page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD); 2804 2805 VM_STAT_ADD(pagecnt.pc_free_dontneed); 2806 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD, 2807 "page_free_cache_head:pp %p", pp); 2808 } 2809 } 2810 page_unlock(pp); 2811 2812 /* 2813 * Now do the `freemem' accounting. 2814 */ 2815 pcf_index = PCF_INDEX(); 2816 p = &pcf[pcf_index]; 2817 2818 mutex_enter(&p->pcf_lock); 2819 if (p->pcf_block) { 2820 p->pcf_reserve += 1; 2821 } else { 2822 p->pcf_count += 1; 2823 if (p->pcf_wait) { 2824 mutex_enter(&new_freemem_lock); 2825 /* 2826 * Check to see if some other thread 2827 * is actually waiting. Another bucket 2828 * may have woken it up by now. If there 2829 * are no waiters, then set our pcf_wait 2830 * count to zero to avoid coming in here 2831 * next time. Also, since only one page 2832 * was put on the free list, just wake 2833 * up one waiter. 2834 */ 2835 if (freemem_wait) { 2836 cv_signal(&freemem_cv); 2837 p->pcf_wait--; 2838 } else { 2839 p->pcf_wait = 0; 2840 } 2841 mutex_exit(&new_freemem_lock); 2842 } 2843 } 2844 mutex_exit(&p->pcf_lock); 2845 2846 /* freemem is approximate, so this test OK */ 2847 if (!p->pcf_block) 2848 freemem += 1; 2849 } 2850 2851 /* 2852 * Put page on the "free" list during intial startup. 2853 * This happens during initial single threaded execution. 2854 */ 2855 void 2856 page_free_at_startup(page_t *pp) 2857 { 2858 struct pcf *p; 2859 uint_t pcf_index; 2860 2861 page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT); 2862 VM_STAT_ADD(pagecnt.pc_free_free); 2863 2864 /* 2865 * Now do the `freemem' accounting. 2866 */ 2867 pcf_index = PCF_INDEX(); 2868 p = &pcf[pcf_index]; 2869 2870 ASSERT(p->pcf_block == 0); 2871 ASSERT(p->pcf_wait == 0); 2872 p->pcf_count += 1; 2873 2874 /* freemem is approximate, so this is OK */ 2875 freemem += 1; 2876 } 2877 2878 void 2879 page_free_pages(page_t *pp) 2880 { 2881 page_t *tpp, *rootpp = NULL; 2882 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 2883 pgcnt_t i; 2884 uint_t szc = pp->p_szc; 2885 2886 VM_STAT_ADD(pagecnt.pc_free_pages); 2887 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2888 "page_free_free:pp %p", pp); 2889 2890 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 2891 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 2892 panic("page_free_pages: not root page %p", (void *)pp); 2893 /*NOTREACHED*/ 2894 } 2895 2896 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 2897 ASSERT((PAGE_EXCL(tpp) && 2898 !page_iolock_assert(tpp)) || panicstr); 2899 if (PP_ISFREE(tpp)) { 2900 panic("page_free_pages: page %p is free", (void *)tpp); 2901 /*NOTREACHED*/ 2902 } 2903 if (hat_page_is_mapped(tpp) || tpp->p_lckcnt != 0 || 2904 tpp->p_cowcnt != 0 || tpp->p_slckcnt != 0) { 2905 panic("page_free_pages %p", (void *)tpp); 2906 /*NOTREACHED*/ 2907 } 2908 2909 ASSERT(!hat_page_getshare(tpp)); 2910 ASSERT(tpp->p_vnode == NULL); 2911 ASSERT(tpp->p_szc == szc); 2912 2913 PP_SETFREE(tpp); 2914 page_clr_all_props(tpp); 2915 PP_SETAGED(tpp); 2916 tpp->p_offset = (u_offset_t)-1; 2917 ASSERT(tpp->p_next == tpp); 2918 ASSERT(tpp->p_prev == tpp); 2919 page_list_concat(&rootpp, &tpp); 2920 } 2921 ASSERT(rootpp == pp); 2922 2923 page_list_add_pages(rootpp, 0); 2924 page_create_putback(pgcnt); 2925 } 2926 2927 int free_pages = 1; 2928 2929 /* 2930 * This routine attempts to return pages to the cachelist via page_release(). 2931 * It does not *have* to be successful in all cases, since the pageout scanner 2932 * will catch any pages it misses. It does need to be fast and not introduce 2933 * too much overhead. 2934 * 2935 * If a page isn't found on the unlocked sweep of the page_hash bucket, we 2936 * don't lock and retry. This is ok, since the page scanner will eventually 2937 * find any page we miss in free_vp_pages(). 2938 */ 2939 void 2940 free_vp_pages(vnode_t *vp, u_offset_t off, size_t len) 2941 { 2942 page_t *pp; 2943 u_offset_t eoff; 2944 extern int swap_in_range(vnode_t *, u_offset_t, size_t); 2945 2946 eoff = off + len; 2947 2948 if (free_pages == 0) 2949 return; 2950 if (swap_in_range(vp, off, len)) 2951 return; 2952 2953 for (; off < eoff; off += PAGESIZE) { 2954 2955 /* 2956 * find the page using a fast, but inexact search. It'll be OK 2957 * if a few pages slip through the cracks here. 2958 */ 2959 pp = page_exists(vp, off); 2960 2961 /* 2962 * If we didn't find the page (it may not exist), the page 2963 * is free, looks still in use (shared), or we can't lock it, 2964 * just give up. 2965 */ 2966 if (pp == NULL || 2967 PP_ISFREE(pp) || 2968 page_share_cnt(pp) > 0 || 2969 !page_trylock(pp, SE_EXCL)) 2970 continue; 2971 2972 /* 2973 * Once we have locked pp, verify that it's still the 2974 * correct page and not already free 2975 */ 2976 ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL)); 2977 if (pp->p_vnode != vp || pp->p_offset != off || PP_ISFREE(pp)) { 2978 page_unlock(pp); 2979 continue; 2980 } 2981 2982 /* 2983 * try to release the page... 2984 */ 2985 (void) page_release(pp, 1); 2986 } 2987 } 2988 2989 /* 2990 * Reclaim the given page from the free list. 2991 * If pp is part of a large pages, only the given constituent page is reclaimed 2992 * and the large page it belonged to will be demoted. This can only happen 2993 * if the page is not on the cachelist. 2994 * 2995 * Returns 1 on success or 0 on failure. 2996 * 2997 * The page is unlocked if it can't be reclaimed (when freemem == 0). 2998 * If `lock' is non-null, it will be dropped and re-acquired if 2999 * the routine must wait while freemem is 0. 3000 * 3001 * As it turns out, boot_getpages() does this. It picks a page, 3002 * based on where OBP mapped in some address, gets its pfn, searches 3003 * the memsegs, locks the page, then pulls it off the free list! 3004 */ 3005 int 3006 page_reclaim(page_t *pp, kmutex_t *lock) 3007 { 3008 struct pcf *p; 3009 uint_t pcf_index; 3010 struct cpu *cpup; 3011 int enough; 3012 uint_t i; 3013 3014 ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1); 3015 ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp)); 3016 3017 /* 3018 * If `freemem' is 0, we cannot reclaim this page from the 3019 * freelist, so release every lock we might hold: the page, 3020 * and the `lock' before blocking. 3021 * 3022 * The only way `freemem' can become 0 while there are pages 3023 * marked free (have their p->p_free bit set) is when the 3024 * system is low on memory and doing a page_create(). In 3025 * order to guarantee that once page_create() starts acquiring 3026 * pages it will be able to get all that it needs since `freemem' 3027 * was decreased by the requested amount. So, we need to release 3028 * this page, and let page_create() have it. 3029 * 3030 * Since `freemem' being zero is not supposed to happen, just 3031 * use the usual hash stuff as a starting point. If that bucket 3032 * is empty, then assume the worst, and start at the beginning 3033 * of the pcf array. If we always start at the beginning 3034 * when acquiring more than one pcf lock, there won't be any 3035 * deadlock problems. 3036 */ 3037 3038 /* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */ 3039 3040 if (freemem <= throttlefree && !page_create_throttle(1l, 0)) { 3041 pcf_acquire_all(); 3042 goto page_reclaim_nomem; 3043 } 3044 3045 enough = 0; 3046 pcf_index = PCF_INDEX(); 3047 p = &pcf[pcf_index]; 3048 mutex_enter(&p->pcf_lock); 3049 if (p->pcf_count >= 1) { 3050 enough = 1; 3051 p->pcf_count--; 3052 } 3053 mutex_exit(&p->pcf_lock); 3054 3055 if (!enough) { 3056 VM_STAT_ADD(page_reclaim_zero); 3057 /* 3058 * Check again. Its possible that some other thread 3059 * could have been right behind us, and added one 3060 * to a list somewhere. Acquire each of the pcf locks 3061 * until we find a page. 3062 */ 3063 p = pcf; 3064 for (i = 0; i < PCF_FANOUT; i++) { 3065 mutex_enter(&p->pcf_lock); 3066 if (p->pcf_count >= 1) { 3067 p->pcf_count -= 1; 3068 enough = 1; 3069 break; 3070 } 3071 p++; 3072 } 3073 3074 if (!enough) { 3075 page_reclaim_nomem: 3076 /* 3077 * We really can't have page `pp'. 3078 * Time for the no-memory dance with 3079 * page_free(). This is just like 3080 * page_create_wait(). Plus the added 3081 * attraction of releasing whatever mutex 3082 * we held when we were called with in `lock'. 3083 * Page_unlock() will wakeup any thread 3084 * waiting around for this page. 3085 */ 3086 if (lock) { 3087 VM_STAT_ADD(page_reclaim_zero_locked); 3088 mutex_exit(lock); 3089 } 3090 page_unlock(pp); 3091 3092 /* 3093 * get this before we drop all the pcf locks. 3094 */ 3095 mutex_enter(&new_freemem_lock); 3096 3097 p = pcf; 3098 for (i = 0; i < PCF_FANOUT; i++) { 3099 p->pcf_wait++; 3100 mutex_exit(&p->pcf_lock); 3101 p++; 3102 } 3103 3104 freemem_wait++; 3105 cv_wait(&freemem_cv, &new_freemem_lock); 3106 freemem_wait--; 3107 3108 mutex_exit(&new_freemem_lock); 3109 3110 if (lock) { 3111 mutex_enter(lock); 3112 } 3113 return (0); 3114 } 3115 3116 /* 3117 * The pcf accounting has been done, 3118 * though none of the pcf_wait flags have been set, 3119 * drop the locks and continue on. 3120 */ 3121 while (p >= pcf) { 3122 mutex_exit(&p->pcf_lock); 3123 p--; 3124 } 3125 } 3126 3127 /* 3128 * freemem is not protected by any lock. Thus, we cannot 3129 * have any assertion containing freemem here. 3130 */ 3131 freemem -= 1; 3132 3133 VM_STAT_ADD(pagecnt.pc_reclaim); 3134 3135 /* 3136 * page_list_sub will handle the case where pp is a large page. 3137 * It's possible that the page was promoted while on the freelist 3138 */ 3139 if (PP_ISAGED(pp)) { 3140 page_list_sub(pp, PG_FREE_LIST); 3141 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_FREE, 3142 "page_reclaim_free:pp %p", pp); 3143 } else { 3144 page_list_sub(pp, PG_CACHE_LIST); 3145 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_CACHE, 3146 "page_reclaim_cache:pp %p", pp); 3147 } 3148 3149 /* 3150 * clear the p_free & p_age bits since this page is no longer 3151 * on the free list. Notice that there was a brief time where 3152 * a page is marked as free, but is not on the list. 3153 * 3154 * Set the reference bit to protect against immediate pageout. 3155 */ 3156 PP_CLRFREE(pp); 3157 PP_CLRAGED(pp); 3158 page_set_props(pp, P_REF); 3159 3160 CPU_STATS_ENTER_K(); 3161 cpup = CPU; /* get cpup now that CPU cannot change */ 3162 CPU_STATS_ADDQ(cpup, vm, pgrec, 1); 3163 CPU_STATS_ADDQ(cpup, vm, pgfrec, 1); 3164 CPU_STATS_EXIT_K(); 3165 ASSERT(pp->p_szc == 0); 3166 3167 return (1); 3168 } 3169 3170 /* 3171 * Destroy identity of the page and put it back on 3172 * the page free list. Assumes that the caller has 3173 * acquired the "exclusive" lock on the page. 3174 */ 3175 void 3176 page_destroy(page_t *pp, int dontfree) 3177 { 3178 ASSERT((PAGE_EXCL(pp) && 3179 !page_iolock_assert(pp)) || panicstr); 3180 ASSERT(pp->p_slckcnt == 0 || panicstr); 3181 3182 if (pp->p_szc != 0) { 3183 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 3184 PP_ISKAS(pp)) { 3185 panic("page_destroy: anon or kernel or no vnode " 3186 "large page %p", (void *)pp); 3187 } 3188 page_demote_vp_pages(pp); 3189 ASSERT(pp->p_szc == 0); 3190 } 3191 3192 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy:pp %p", pp); 3193 3194 /* 3195 * Unload translations, if any, then hash out the 3196 * page to erase its identity. 3197 */ 3198 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3199 page_hashout(pp, NULL); 3200 3201 if (!dontfree) { 3202 /* 3203 * Acquire the "freemem_lock" for availrmem. 3204 * The page_struct_lock need not be acquired for lckcnt 3205 * and cowcnt since the page has an "exclusive" lock. 3206 */ 3207 if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) { 3208 mutex_enter(&freemem_lock); 3209 if (pp->p_lckcnt != 0) { 3210 availrmem++; 3211 pp->p_lckcnt = 0; 3212 } 3213 if (pp->p_cowcnt != 0) { 3214 availrmem += pp->p_cowcnt; 3215 pp->p_cowcnt = 0; 3216 } 3217 mutex_exit(&freemem_lock); 3218 } 3219 /* 3220 * Put the page on the "free" list. 3221 */ 3222 page_free(pp, 0); 3223 } 3224 } 3225 3226 void 3227 page_destroy_pages(page_t *pp) 3228 { 3229 3230 page_t *tpp, *rootpp = NULL; 3231 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 3232 pgcnt_t i, pglcks = 0; 3233 uint_t szc = pp->p_szc; 3234 3235 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 3236 3237 VM_STAT_ADD(pagecnt.pc_destroy_pages); 3238 3239 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy_pages:pp %p", pp); 3240 3241 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 3242 panic("page_destroy_pages: not root page %p", (void *)pp); 3243 /*NOTREACHED*/ 3244 } 3245 3246 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 3247 ASSERT((PAGE_EXCL(tpp) && 3248 !page_iolock_assert(tpp)) || panicstr); 3249 ASSERT(tpp->p_slckcnt == 0 || panicstr); 3250 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 3251 page_hashout(tpp, NULL); 3252 ASSERT(tpp->p_offset == (u_offset_t)-1); 3253 if (tpp->p_lckcnt != 0) { 3254 pglcks++; 3255 tpp->p_lckcnt = 0; 3256 } else if (tpp->p_cowcnt != 0) { 3257 pglcks += tpp->p_cowcnt; 3258 tpp->p_cowcnt = 0; 3259 } 3260 ASSERT(!hat_page_getshare(tpp)); 3261 ASSERT(tpp->p_vnode == NULL); 3262 ASSERT(tpp->p_szc == szc); 3263 3264 PP_SETFREE(tpp); 3265 page_clr_all_props(tpp); 3266 PP_SETAGED(tpp); 3267 ASSERT(tpp->p_next == tpp); 3268 ASSERT(tpp->p_prev == tpp); 3269 page_list_concat(&rootpp, &tpp); 3270 } 3271 3272 ASSERT(rootpp == pp); 3273 if (pglcks != 0) { 3274 mutex_enter(&freemem_lock); 3275 availrmem += pglcks; 3276 mutex_exit(&freemem_lock); 3277 } 3278 3279 page_list_add_pages(rootpp, 0); 3280 page_create_putback(pgcnt); 3281 } 3282 3283 /* 3284 * Similar to page_destroy(), but destroys pages which are 3285 * locked and known to be on the page free list. Since 3286 * the page is known to be free and locked, no one can access 3287 * it. 3288 * 3289 * Also, the number of free pages does not change. 3290 */ 3291 void 3292 page_destroy_free(page_t *pp) 3293 { 3294 ASSERT(PAGE_EXCL(pp)); 3295 ASSERT(PP_ISFREE(pp)); 3296 ASSERT(pp->p_vnode); 3297 ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0); 3298 ASSERT(!hat_page_is_mapped(pp)); 3299 ASSERT(PP_ISAGED(pp) == 0); 3300 ASSERT(pp->p_szc == 0); 3301 3302 VM_STAT_ADD(pagecnt.pc_destroy_free); 3303 page_list_sub(pp, PG_CACHE_LIST); 3304 3305 page_hashout(pp, NULL); 3306 ASSERT(pp->p_vnode == NULL); 3307 ASSERT(pp->p_offset == (u_offset_t)-1); 3308 ASSERT(pp->p_hash == NULL); 3309 3310 PP_SETAGED(pp); 3311 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 3312 page_unlock(pp); 3313 3314 mutex_enter(&new_freemem_lock); 3315 if (freemem_wait) { 3316 cv_signal(&freemem_cv); 3317 } 3318 mutex_exit(&new_freemem_lock); 3319 } 3320 3321 /* 3322 * Rename the page "opp" to have an identity specified 3323 * by [vp, off]. If a page already exists with this name 3324 * it is locked and destroyed. Note that the page's 3325 * translations are not unloaded during the rename. 3326 * 3327 * This routine is used by the anon layer to "steal" the 3328 * original page and is not unlike destroying a page and 3329 * creating a new page using the same page frame. 3330 * 3331 * XXX -- Could deadlock if caller 1 tries to rename A to B while 3332 * caller 2 tries to rename B to A. 3333 */ 3334 void 3335 page_rename(page_t *opp, vnode_t *vp, u_offset_t off) 3336 { 3337 page_t *pp; 3338 int olckcnt = 0; 3339 int ocowcnt = 0; 3340 kmutex_t *phm; 3341 ulong_t index; 3342 3343 ASSERT(PAGE_EXCL(opp) && !page_iolock_assert(opp)); 3344 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3345 ASSERT(PP_ISFREE(opp) == 0); 3346 3347 VM_STAT_ADD(page_rename_count); 3348 3349 TRACE_3(TR_FAC_VM, TR_PAGE_RENAME, 3350 "page rename:pp %p vp %p off %llx", opp, vp, off); 3351 3352 /* 3353 * CacheFS may call page_rename for a large NFS page 3354 * when both CacheFS and NFS mount points are used 3355 * by applications. Demote this large page before 3356 * renaming it, to ensure that there are no "partial" 3357 * large pages left lying around. 3358 */ 3359 if (opp->p_szc != 0) { 3360 vnode_t *ovp = opp->p_vnode; 3361 ASSERT(ovp != NULL); 3362 ASSERT(!IS_SWAPFSVP(ovp)); 3363 ASSERT(!VN_ISKAS(ovp)); 3364 page_demote_vp_pages(opp); 3365 ASSERT(opp->p_szc == 0); 3366 } 3367 3368 page_hashout(opp, NULL); 3369 PP_CLRAGED(opp); 3370 3371 /* 3372 * Acquire the appropriate page hash lock, since 3373 * we're going to rename the page. 3374 */ 3375 index = PAGE_HASH_FUNC(vp, off); 3376 phm = PAGE_HASH_MUTEX(index); 3377 mutex_enter(phm); 3378 top: 3379 /* 3380 * Look for an existing page with this name and destroy it if found. 3381 * By holding the page hash lock all the way to the page_hashin() 3382 * call, we are assured that no page can be created with this 3383 * identity. In the case when the phm lock is dropped to undo any 3384 * hat layer mappings, the existing page is held with an "exclusive" 3385 * lock, again preventing another page from being created with 3386 * this identity. 3387 */ 3388 PAGE_HASH_SEARCH(index, pp, vp, off); 3389 if (pp != NULL) { 3390 VM_STAT_ADD(page_rename_exists); 3391 3392 /* 3393 * As it turns out, this is one of only two places where 3394 * page_lock() needs to hold the passed in lock in the 3395 * successful case. In all of the others, the lock could 3396 * be dropped as soon as the attempt is made to lock 3397 * the page. It is tempting to add yet another arguement, 3398 * PL_KEEP or PL_DROP, to let page_lock know what to do. 3399 */ 3400 if (!page_lock(pp, SE_EXCL, phm, P_RECLAIM)) { 3401 /* 3402 * Went to sleep because the page could not 3403 * be locked. We were woken up when the page 3404 * was unlocked, or when the page was destroyed. 3405 * In either case, `phm' was dropped while we 3406 * slept. Hence we should not just roar through 3407 * this loop. 3408 */ 3409 goto top; 3410 } 3411 3412 /* 3413 * If an existing page is a large page, then demote 3414 * it to ensure that no "partial" large pages are 3415 * "created" after page_rename. An existing page 3416 * can be a CacheFS page, and can't belong to swapfs. 3417 */ 3418 if (hat_page_is_mapped(pp)) { 3419 /* 3420 * Unload translations. Since we hold the 3421 * exclusive lock on this page, the page 3422 * can not be changed while we drop phm. 3423 * This is also not a lock protocol violation, 3424 * but rather the proper way to do things. 3425 */ 3426 mutex_exit(phm); 3427 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3428 if (pp->p_szc != 0) { 3429 ASSERT(!IS_SWAPFSVP(vp)); 3430 ASSERT(!VN_ISKAS(vp)); 3431 page_demote_vp_pages(pp); 3432 ASSERT(pp->p_szc == 0); 3433 } 3434 mutex_enter(phm); 3435 } else if (pp->p_szc != 0) { 3436 ASSERT(!IS_SWAPFSVP(vp)); 3437 ASSERT(!VN_ISKAS(vp)); 3438 mutex_exit(phm); 3439 page_demote_vp_pages(pp); 3440 ASSERT(pp->p_szc == 0); 3441 mutex_enter(phm); 3442 } 3443 page_hashout(pp, phm); 3444 } 3445 /* 3446 * Hash in the page with the new identity. 3447 */ 3448 if (!page_hashin(opp, vp, off, phm)) { 3449 /* 3450 * We were holding phm while we searched for [vp, off] 3451 * and only dropped phm if we found and locked a page. 3452 * If we can't create this page now, then some thing 3453 * is really broken. 3454 */ 3455 panic("page_rename: Can't hash in page: %p", (void *)pp); 3456 /*NOTREACHED*/ 3457 } 3458 3459 ASSERT(MUTEX_HELD(phm)); 3460 mutex_exit(phm); 3461 3462 /* 3463 * Now that we have dropped phm, lets get around to finishing up 3464 * with pp. 3465 */ 3466 if (pp != NULL) { 3467 ASSERT(!hat_page_is_mapped(pp)); 3468 /* for now large pages should not end up here */ 3469 ASSERT(pp->p_szc == 0); 3470 /* 3471 * Save the locks for transfer to the new page and then 3472 * clear them so page_free doesn't think they're important. 3473 * The page_struct_lock need not be acquired for lckcnt and 3474 * cowcnt since the page has an "exclusive" lock. 3475 */ 3476 olckcnt = pp->p_lckcnt; 3477 ocowcnt = pp->p_cowcnt; 3478 pp->p_lckcnt = pp->p_cowcnt = 0; 3479 3480 /* 3481 * Put the page on the "free" list after we drop 3482 * the lock. The less work under the lock the better. 3483 */ 3484 /*LINTED: constant in conditional context*/ 3485 VN_DISPOSE(pp, B_FREE, 0, kcred); 3486 } 3487 3488 /* 3489 * Transfer the lock count from the old page (if any). 3490 * The page_struct_lock need not be acquired for lckcnt and 3491 * cowcnt since the page has an "exclusive" lock. 3492 */ 3493 opp->p_lckcnt += olckcnt; 3494 opp->p_cowcnt += ocowcnt; 3495 } 3496 3497 /* 3498 * low level routine to add page `pp' to the hash and vp chains for [vp, offset] 3499 * 3500 * Pages are normally inserted at the start of a vnode's v_pages list. 3501 * If the vnode is VMODSORT and the page is modified, it goes at the end. 3502 * This can happen when a modified page is relocated for DR. 3503 * 3504 * Returns 1 on success and 0 on failure. 3505 */ 3506 static int 3507 page_do_hashin(page_t *pp, vnode_t *vp, u_offset_t offset) 3508 { 3509 page_t **listp; 3510 page_t *tp; 3511 ulong_t index; 3512 3513 ASSERT(PAGE_EXCL(pp)); 3514 ASSERT(vp != NULL); 3515 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3516 3517 /* 3518 * Be sure to set these up before the page is inserted on the hash 3519 * list. As soon as the page is placed on the list some other 3520 * thread might get confused and wonder how this page could 3521 * possibly hash to this list. 3522 */ 3523 pp->p_vnode = vp; 3524 pp->p_offset = offset; 3525 3526 /* 3527 * record if this page is on a swap vnode 3528 */ 3529 if ((vp->v_flag & VISSWAP) != 0) 3530 PP_SETSWAP(pp); 3531 3532 index = PAGE_HASH_FUNC(vp, offset); 3533 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(index))); 3534 listp = &page_hash[index]; 3535 3536 /* 3537 * If this page is already hashed in, fail this attempt to add it. 3538 */ 3539 for (tp = *listp; tp != NULL; tp = tp->p_hash) { 3540 if (tp->p_vnode == vp && tp->p_offset == offset) { 3541 pp->p_vnode = NULL; 3542 pp->p_offset = (u_offset_t)(-1); 3543 return (0); 3544 } 3545 } 3546 pp->p_hash = *listp; 3547 *listp = pp; 3548 3549 /* 3550 * Add the page to the vnode's list of pages 3551 */ 3552 if (vp->v_pages != NULL && IS_VMODSORT(vp) && hat_ismod(pp)) 3553 listp = &vp->v_pages->p_vpprev->p_vpnext; 3554 else 3555 listp = &vp->v_pages; 3556 3557 page_vpadd(listp, pp); 3558 3559 return (1); 3560 } 3561 3562 /* 3563 * Add page `pp' to both the hash and vp chains for [vp, offset]. 3564 * 3565 * Returns 1 on success and 0 on failure. 3566 * If hold is passed in, it is not dropped. 3567 */ 3568 int 3569 page_hashin(page_t *pp, vnode_t *vp, u_offset_t offset, kmutex_t *hold) 3570 { 3571 kmutex_t *phm = NULL; 3572 kmutex_t *vphm; 3573 int rc; 3574 3575 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3576 3577 TRACE_3(TR_FAC_VM, TR_PAGE_HASHIN, 3578 "page_hashin:pp %p vp %p offset %llx", 3579 pp, vp, offset); 3580 3581 VM_STAT_ADD(hashin_count); 3582 3583 if (hold != NULL) 3584 phm = hold; 3585 else { 3586 VM_STAT_ADD(hashin_not_held); 3587 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, offset)); 3588 mutex_enter(phm); 3589 } 3590 3591 vphm = page_vnode_mutex(vp); 3592 mutex_enter(vphm); 3593 rc = page_do_hashin(pp, vp, offset); 3594 mutex_exit(vphm); 3595 if (hold == NULL) 3596 mutex_exit(phm); 3597 if (rc == 0) 3598 VM_STAT_ADD(hashin_already); 3599 return (rc); 3600 } 3601 3602 /* 3603 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3604 * All mutexes must be held 3605 */ 3606 static void 3607 page_do_hashout(page_t *pp) 3608 { 3609 page_t **hpp; 3610 page_t *hp; 3611 vnode_t *vp = pp->p_vnode; 3612 3613 ASSERT(vp != NULL); 3614 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3615 3616 /* 3617 * First, take pp off of its hash chain. 3618 */ 3619 hpp = &page_hash[PAGE_HASH_FUNC(vp, pp->p_offset)]; 3620 3621 for (;;) { 3622 hp = *hpp; 3623 if (hp == pp) 3624 break; 3625 if (hp == NULL) { 3626 panic("page_do_hashout"); 3627 /*NOTREACHED*/ 3628 } 3629 hpp = &hp->p_hash; 3630 } 3631 *hpp = pp->p_hash; 3632 3633 /* 3634 * Now remove it from its associated vnode. 3635 */ 3636 if (vp->v_pages) 3637 page_vpsub(&vp->v_pages, pp); 3638 3639 pp->p_hash = NULL; 3640 page_clr_all_props(pp); 3641 PP_CLRSWAP(pp); 3642 pp->p_vnode = NULL; 3643 pp->p_offset = (u_offset_t)-1; 3644 } 3645 3646 /* 3647 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3648 * 3649 * When `phm' is non-NULL it contains the address of the mutex protecting the 3650 * hash list pp is on. It is not dropped. 3651 */ 3652 void 3653 page_hashout(page_t *pp, kmutex_t *phm) 3654 { 3655 vnode_t *vp; 3656 ulong_t index; 3657 kmutex_t *nphm; 3658 kmutex_t *vphm; 3659 kmutex_t *sep; 3660 3661 ASSERT(phm != NULL ? MUTEX_HELD(phm) : 1); 3662 ASSERT(pp->p_vnode != NULL); 3663 ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr); 3664 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp->p_vnode))); 3665 3666 vp = pp->p_vnode; 3667 3668 TRACE_2(TR_FAC_VM, TR_PAGE_HASHOUT, 3669 "page_hashout:pp %p vp %p", pp, vp); 3670 3671 /* Kernel probe */ 3672 TNF_PROBE_2(page_unmap, "vm pagefault", /* CSTYLED */, 3673 tnf_opaque, vnode, vp, 3674 tnf_offset, offset, pp->p_offset); 3675 3676 /* 3677 * 3678 */ 3679 VM_STAT_ADD(hashout_count); 3680 index = PAGE_HASH_FUNC(vp, pp->p_offset); 3681 if (phm == NULL) { 3682 VM_STAT_ADD(hashout_not_held); 3683 nphm = PAGE_HASH_MUTEX(index); 3684 mutex_enter(nphm); 3685 } 3686 ASSERT(phm ? phm == PAGE_HASH_MUTEX(index) : 1); 3687 3688 3689 /* 3690 * grab page vnode mutex and remove it... 3691 */ 3692 vphm = page_vnode_mutex(vp); 3693 mutex_enter(vphm); 3694 3695 page_do_hashout(pp); 3696 3697 mutex_exit(vphm); 3698 if (phm == NULL) 3699 mutex_exit(nphm); 3700 3701 /* 3702 * Wake up processes waiting for this page. The page's 3703 * identity has been changed, and is probably not the 3704 * desired page any longer. 3705 */ 3706 sep = page_se_mutex(pp); 3707 mutex_enter(sep); 3708 pp->p_selock &= ~SE_EWANTED; 3709 if (CV_HAS_WAITERS(&pp->p_cv)) 3710 cv_broadcast(&pp->p_cv); 3711 mutex_exit(sep); 3712 } 3713 3714 /* 3715 * Add the page to the front of a linked list of pages 3716 * using the p_next & p_prev pointers for the list. 3717 * The caller is responsible for protecting the list pointers. 3718 */ 3719 void 3720 page_add(page_t **ppp, page_t *pp) 3721 { 3722 ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3723 3724 page_add_common(ppp, pp); 3725 } 3726 3727 3728 3729 /* 3730 * Common code for page_add() and mach_page_add() 3731 */ 3732 void 3733 page_add_common(page_t **ppp, page_t *pp) 3734 { 3735 if (*ppp == NULL) { 3736 pp->p_next = pp->p_prev = pp; 3737 } else { 3738 pp->p_next = *ppp; 3739 pp->p_prev = (*ppp)->p_prev; 3740 (*ppp)->p_prev = pp; 3741 pp->p_prev->p_next = pp; 3742 } 3743 *ppp = pp; 3744 } 3745 3746 3747 /* 3748 * Remove this page from a linked list of pages 3749 * using the p_next & p_prev pointers for the list. 3750 * 3751 * The caller is responsible for protecting the list pointers. 3752 */ 3753 void 3754 page_sub(page_t **ppp, page_t *pp) 3755 { 3756 ASSERT((PP_ISFREE(pp)) ? 1 : 3757 (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3758 3759 if (*ppp == NULL || pp == NULL) { 3760 panic("page_sub: bad arg(s): pp %p, *ppp %p", 3761 (void *)pp, (void *)(*ppp)); 3762 /*NOTREACHED*/ 3763 } 3764 3765 page_sub_common(ppp, pp); 3766 } 3767 3768 3769 /* 3770 * Common code for page_sub() and mach_page_sub() 3771 */ 3772 void 3773 page_sub_common(page_t **ppp, page_t *pp) 3774 { 3775 if (*ppp == pp) 3776 *ppp = pp->p_next; /* go to next page */ 3777 3778 if (*ppp == pp) 3779 *ppp = NULL; /* page list is gone */ 3780 else { 3781 pp->p_prev->p_next = pp->p_next; 3782 pp->p_next->p_prev = pp->p_prev; 3783 } 3784 pp->p_prev = pp->p_next = pp; /* make pp a list of one */ 3785 } 3786 3787 3788 /* 3789 * Break page list cppp into two lists with npages in the first list. 3790 * The tail is returned in nppp. 3791 */ 3792 void 3793 page_list_break(page_t **oppp, page_t **nppp, pgcnt_t npages) 3794 { 3795 page_t *s1pp = *oppp; 3796 page_t *s2pp; 3797 page_t *e1pp, *e2pp; 3798 long n = 0; 3799 3800 if (s1pp == NULL) { 3801 *nppp = NULL; 3802 return; 3803 } 3804 if (npages == 0) { 3805 *nppp = s1pp; 3806 *oppp = NULL; 3807 return; 3808 } 3809 for (n = 0, s2pp = *oppp; n < npages; n++) { 3810 s2pp = s2pp->p_next; 3811 } 3812 /* Fix head and tail of new lists */ 3813 e1pp = s2pp->p_prev; 3814 e2pp = s1pp->p_prev; 3815 s1pp->p_prev = e1pp; 3816 e1pp->p_next = s1pp; 3817 s2pp->p_prev = e2pp; 3818 e2pp->p_next = s2pp; 3819 3820 /* second list empty */ 3821 if (s2pp == s1pp) { 3822 *oppp = s1pp; 3823 *nppp = NULL; 3824 } else { 3825 *oppp = s1pp; 3826 *nppp = s2pp; 3827 } 3828 } 3829 3830 /* 3831 * Concatenate page list nppp onto the end of list ppp. 3832 */ 3833 void 3834 page_list_concat(page_t **ppp, page_t **nppp) 3835 { 3836 page_t *s1pp, *s2pp, *e1pp, *e2pp; 3837 3838 if (*nppp == NULL) { 3839 return; 3840 } 3841 if (*ppp == NULL) { 3842 *ppp = *nppp; 3843 return; 3844 } 3845 s1pp = *ppp; 3846 e1pp = s1pp->p_prev; 3847 s2pp = *nppp; 3848 e2pp = s2pp->p_prev; 3849 s1pp->p_prev = e2pp; 3850 e2pp->p_next = s1pp; 3851 e1pp->p_next = s2pp; 3852 s2pp->p_prev = e1pp; 3853 } 3854 3855 /* 3856 * return the next page in the page list 3857 */ 3858 page_t * 3859 page_list_next(page_t *pp) 3860 { 3861 return (pp->p_next); 3862 } 3863 3864 3865 /* 3866 * Add the page to the front of the linked list of pages 3867 * using p_vpnext/p_vpprev pointers for the list. 3868 * 3869 * The caller is responsible for protecting the lists. 3870 */ 3871 void 3872 page_vpadd(page_t **ppp, page_t *pp) 3873 { 3874 if (*ppp == NULL) { 3875 pp->p_vpnext = pp->p_vpprev = pp; 3876 } else { 3877 pp->p_vpnext = *ppp; 3878 pp->p_vpprev = (*ppp)->p_vpprev; 3879 (*ppp)->p_vpprev = pp; 3880 pp->p_vpprev->p_vpnext = pp; 3881 } 3882 *ppp = pp; 3883 } 3884 3885 /* 3886 * Remove this page from the linked list of pages 3887 * using p_vpnext/p_vpprev pointers for the list. 3888 * 3889 * The caller is responsible for protecting the lists. 3890 */ 3891 void 3892 page_vpsub(page_t **ppp, page_t *pp) 3893 { 3894 if (*ppp == NULL || pp == NULL) { 3895 panic("page_vpsub: bad arg(s): pp %p, *ppp %p", 3896 (void *)pp, (void *)(*ppp)); 3897 /*NOTREACHED*/ 3898 } 3899 3900 if (*ppp == pp) 3901 *ppp = pp->p_vpnext; /* go to next page */ 3902 3903 if (*ppp == pp) 3904 *ppp = NULL; /* page list is gone */ 3905 else { 3906 pp->p_vpprev->p_vpnext = pp->p_vpnext; 3907 pp->p_vpnext->p_vpprev = pp->p_vpprev; 3908 } 3909 pp->p_vpprev = pp->p_vpnext = pp; /* make pp a list of one */ 3910 } 3911 3912 /* 3913 * Lock a physical page into memory "long term". Used to support "lock 3914 * in memory" functions. Accepts the page to be locked, and a cow variable 3915 * to indicate whether a the lock will travel to the new page during 3916 * a potential copy-on-write. 3917 */ 3918 int 3919 page_pp_lock( 3920 page_t *pp, /* page to be locked */ 3921 int cow, /* cow lock */ 3922 int kernel) /* must succeed -- ignore checking */ 3923 { 3924 int r = 0; /* result -- assume failure */ 3925 3926 ASSERT(PAGE_LOCKED(pp)); 3927 3928 page_struct_lock(pp); 3929 /* 3930 * Acquire the "freemem_lock" for availrmem. 3931 */ 3932 if (cow) { 3933 mutex_enter(&freemem_lock); 3934 if ((availrmem > pages_pp_maximum) && 3935 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 3936 availrmem--; 3937 pages_locked++; 3938 mutex_exit(&freemem_lock); 3939 r = 1; 3940 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 3941 cmn_err(CE_WARN, 3942 "COW lock limit reached on pfn 0x%lx", 3943 page_pptonum(pp)); 3944 } 3945 } else 3946 mutex_exit(&freemem_lock); 3947 } else { 3948 if (pp->p_lckcnt) { 3949 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 3950 r = 1; 3951 if (++pp->p_lckcnt == 3952 (ushort_t)PAGE_LOCK_MAXIMUM) { 3953 cmn_err(CE_WARN, "Page lock limit " 3954 "reached on pfn 0x%lx", 3955 page_pptonum(pp)); 3956 } 3957 } 3958 } else { 3959 if (kernel) { 3960 /* availrmem accounting done by caller */ 3961 ++pp->p_lckcnt; 3962 r = 1; 3963 } else { 3964 mutex_enter(&freemem_lock); 3965 if (availrmem > pages_pp_maximum) { 3966 availrmem--; 3967 pages_locked++; 3968 ++pp->p_lckcnt; 3969 r = 1; 3970 } 3971 mutex_exit(&freemem_lock); 3972 } 3973 } 3974 } 3975 page_struct_unlock(pp); 3976 return (r); 3977 } 3978 3979 /* 3980 * Decommit a lock on a physical page frame. Account for cow locks if 3981 * appropriate. 3982 */ 3983 void 3984 page_pp_unlock( 3985 page_t *pp, /* page to be unlocked */ 3986 int cow, /* expect cow lock */ 3987 int kernel) /* this was a kernel lock */ 3988 { 3989 ASSERT(PAGE_LOCKED(pp)); 3990 3991 page_struct_lock(pp); 3992 /* 3993 * Acquire the "freemem_lock" for availrmem. 3994 * If cowcnt or lcknt is already 0 do nothing; i.e., we 3995 * could be called to unlock even if nothing is locked. This could 3996 * happen if locked file pages were truncated (removing the lock) 3997 * and the file was grown again and new pages faulted in; the new 3998 * pages are unlocked but the segment still thinks they're locked. 3999 */ 4000 if (cow) { 4001 if (pp->p_cowcnt) { 4002 mutex_enter(&freemem_lock); 4003 pp->p_cowcnt--; 4004 availrmem++; 4005 pages_locked--; 4006 mutex_exit(&freemem_lock); 4007 } 4008 } else { 4009 if (pp->p_lckcnt && --pp->p_lckcnt == 0) { 4010 if (!kernel) { 4011 mutex_enter(&freemem_lock); 4012 availrmem++; 4013 pages_locked--; 4014 mutex_exit(&freemem_lock); 4015 } 4016 } 4017 } 4018 page_struct_unlock(pp); 4019 } 4020 4021 /* 4022 * This routine reserves availrmem for npages; 4023 * flags: KM_NOSLEEP or KM_SLEEP 4024 * returns 1 on success or 0 on failure 4025 */ 4026 int 4027 page_resv(pgcnt_t npages, uint_t flags) 4028 { 4029 mutex_enter(&freemem_lock); 4030 while (availrmem < tune.t_minarmem + npages) { 4031 if (flags & KM_NOSLEEP) { 4032 mutex_exit(&freemem_lock); 4033 return (0); 4034 } 4035 mutex_exit(&freemem_lock); 4036 page_needfree(npages); 4037 kmem_reap(); 4038 delay(hz >> 2); 4039 page_needfree(-(spgcnt_t)npages); 4040 mutex_enter(&freemem_lock); 4041 } 4042 availrmem -= npages; 4043 mutex_exit(&freemem_lock); 4044 return (1); 4045 } 4046 4047 /* 4048 * This routine unreserves availrmem for npages; 4049 */ 4050 void 4051 page_unresv(pgcnt_t npages) 4052 { 4053 mutex_enter(&freemem_lock); 4054 availrmem += npages; 4055 mutex_exit(&freemem_lock); 4056 } 4057 4058 /* 4059 * See Statement at the beginning of segvn_lockop() regarding 4060 * the way we handle cowcnts and lckcnts. 4061 * 4062 * Transfer cowcnt on 'opp' to cowcnt on 'npp' if the vpage 4063 * that breaks COW has PROT_WRITE. 4064 * 4065 * Note that, we may also break COW in case we are softlocking 4066 * on read access during physio; 4067 * in this softlock case, the vpage may not have PROT_WRITE. 4068 * So, we need to transfer lckcnt on 'opp' to lckcnt on 'npp' 4069 * if the vpage doesn't have PROT_WRITE. 4070 * 4071 * This routine is never called if we are stealing a page 4072 * in anon_private. 4073 * 4074 * The caller subtracted from availrmem for read only mapping. 4075 * if lckcnt is 1 increment availrmem. 4076 */ 4077 void 4078 page_pp_useclaim( 4079 page_t *opp, /* original page frame losing lock */ 4080 page_t *npp, /* new page frame gaining lock */ 4081 uint_t write_perm) /* set if vpage has PROT_WRITE */ 4082 { 4083 int payback = 0; 4084 4085 ASSERT(PAGE_LOCKED(opp)); 4086 ASSERT(PAGE_LOCKED(npp)); 4087 4088 page_struct_lock(opp); 4089 4090 ASSERT(npp->p_cowcnt == 0); 4091 ASSERT(npp->p_lckcnt == 0); 4092 4093 /* Don't use claim if nothing is locked (see page_pp_unlock above) */ 4094 if ((write_perm && opp->p_cowcnt != 0) || 4095 (!write_perm && opp->p_lckcnt != 0)) { 4096 4097 if (write_perm) { 4098 npp->p_cowcnt++; 4099 ASSERT(opp->p_cowcnt != 0); 4100 opp->p_cowcnt--; 4101 } else { 4102 4103 ASSERT(opp->p_lckcnt != 0); 4104 4105 /* 4106 * We didn't need availrmem decremented if p_lckcnt on 4107 * original page is 1. Here, we are unlocking 4108 * read-only copy belonging to original page and 4109 * are locking a copy belonging to new page. 4110 */ 4111 if (opp->p_lckcnt == 1) 4112 payback = 1; 4113 4114 npp->p_lckcnt++; 4115 opp->p_lckcnt--; 4116 } 4117 } 4118 if (payback) { 4119 mutex_enter(&freemem_lock); 4120 availrmem++; 4121 pages_useclaim--; 4122 mutex_exit(&freemem_lock); 4123 } 4124 page_struct_unlock(opp); 4125 } 4126 4127 /* 4128 * Simple claim adjust functions -- used to support changes in 4129 * claims due to changes in access permissions. Used by segvn_setprot(). 4130 */ 4131 int 4132 page_addclaim(page_t *pp) 4133 { 4134 int r = 0; /* result */ 4135 4136 ASSERT(PAGE_LOCKED(pp)); 4137 4138 page_struct_lock(pp); 4139 ASSERT(pp->p_lckcnt != 0); 4140 4141 if (pp->p_lckcnt == 1) { 4142 if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4143 --pp->p_lckcnt; 4144 r = 1; 4145 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4146 cmn_err(CE_WARN, 4147 "COW lock limit reached on pfn 0x%lx", 4148 page_pptonum(pp)); 4149 } 4150 } 4151 } else { 4152 mutex_enter(&freemem_lock); 4153 if ((availrmem > pages_pp_maximum) && 4154 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 4155 --availrmem; 4156 ++pages_claimed; 4157 mutex_exit(&freemem_lock); 4158 --pp->p_lckcnt; 4159 r = 1; 4160 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4161 cmn_err(CE_WARN, 4162 "COW lock limit reached on pfn 0x%lx", 4163 page_pptonum(pp)); 4164 } 4165 } else 4166 mutex_exit(&freemem_lock); 4167 } 4168 page_struct_unlock(pp); 4169 return (r); 4170 } 4171 4172 int 4173 page_subclaim(page_t *pp) 4174 { 4175 int r = 0; 4176 4177 ASSERT(PAGE_LOCKED(pp)); 4178 4179 page_struct_lock(pp); 4180 ASSERT(pp->p_cowcnt != 0); 4181 4182 if (pp->p_lckcnt) { 4183 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4184 r = 1; 4185 /* 4186 * for availrmem 4187 */ 4188 mutex_enter(&freemem_lock); 4189 availrmem++; 4190 pages_claimed--; 4191 mutex_exit(&freemem_lock); 4192 4193 pp->p_cowcnt--; 4194 4195 if (++pp->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4196 cmn_err(CE_WARN, 4197 "Page lock limit reached on pfn 0x%lx", 4198 page_pptonum(pp)); 4199 } 4200 } 4201 } else { 4202 r = 1; 4203 pp->p_cowcnt--; 4204 pp->p_lckcnt++; 4205 } 4206 page_struct_unlock(pp); 4207 return (r); 4208 } 4209 4210 int 4211 page_addclaim_pages(page_t **ppa) 4212 { 4213 4214 pgcnt_t lckpgs = 0, pg_idx; 4215 4216 VM_STAT_ADD(pagecnt.pc_addclaim_pages); 4217 4218 mutex_enter(&page_llock); 4219 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4220 4221 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4222 ASSERT(ppa[pg_idx]->p_lckcnt != 0); 4223 if (ppa[pg_idx]->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4224 mutex_exit(&page_llock); 4225 return (0); 4226 } 4227 if (ppa[pg_idx]->p_lckcnt > 1) 4228 lckpgs++; 4229 } 4230 4231 if (lckpgs != 0) { 4232 mutex_enter(&freemem_lock); 4233 if (availrmem >= pages_pp_maximum + lckpgs) { 4234 availrmem -= lckpgs; 4235 pages_claimed += lckpgs; 4236 } else { 4237 mutex_exit(&freemem_lock); 4238 mutex_exit(&page_llock); 4239 return (0); 4240 } 4241 mutex_exit(&freemem_lock); 4242 } 4243 4244 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4245 ppa[pg_idx]->p_lckcnt--; 4246 ppa[pg_idx]->p_cowcnt++; 4247 } 4248 mutex_exit(&page_llock); 4249 return (1); 4250 } 4251 4252 int 4253 page_subclaim_pages(page_t **ppa) 4254 { 4255 pgcnt_t ulckpgs = 0, pg_idx; 4256 4257 VM_STAT_ADD(pagecnt.pc_subclaim_pages); 4258 4259 mutex_enter(&page_llock); 4260 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4261 4262 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4263 ASSERT(ppa[pg_idx]->p_cowcnt != 0); 4264 if (ppa[pg_idx]->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4265 mutex_exit(&page_llock); 4266 return (0); 4267 } 4268 if (ppa[pg_idx]->p_lckcnt != 0) 4269 ulckpgs++; 4270 } 4271 4272 if (ulckpgs != 0) { 4273 mutex_enter(&freemem_lock); 4274 availrmem += ulckpgs; 4275 pages_claimed -= ulckpgs; 4276 mutex_exit(&freemem_lock); 4277 } 4278 4279 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4280 ppa[pg_idx]->p_cowcnt--; 4281 ppa[pg_idx]->p_lckcnt++; 4282 4283 } 4284 mutex_exit(&page_llock); 4285 return (1); 4286 } 4287 4288 page_t * 4289 page_numtopp(pfn_t pfnum, se_t se) 4290 { 4291 page_t *pp; 4292 4293 retry: 4294 pp = page_numtopp_nolock(pfnum); 4295 if (pp == NULL) { 4296 return ((page_t *)NULL); 4297 } 4298 4299 /* 4300 * Acquire the appropriate lock on the page. 4301 */ 4302 while (!page_lock(pp, se, (kmutex_t *)NULL, P_RECLAIM)) { 4303 if (page_pptonum(pp) != pfnum) 4304 goto retry; 4305 continue; 4306 } 4307 4308 if (page_pptonum(pp) != pfnum) { 4309 page_unlock(pp); 4310 goto retry; 4311 } 4312 4313 return (pp); 4314 } 4315 4316 page_t * 4317 page_numtopp_noreclaim(pfn_t pfnum, se_t se) 4318 { 4319 page_t *pp; 4320 4321 retry: 4322 pp = page_numtopp_nolock(pfnum); 4323 if (pp == NULL) { 4324 return ((page_t *)NULL); 4325 } 4326 4327 /* 4328 * Acquire the appropriate lock on the page. 4329 */ 4330 while (!page_lock(pp, se, (kmutex_t *)NULL, P_NO_RECLAIM)) { 4331 if (page_pptonum(pp) != pfnum) 4332 goto retry; 4333 continue; 4334 } 4335 4336 if (page_pptonum(pp) != pfnum) { 4337 page_unlock(pp); 4338 goto retry; 4339 } 4340 4341 return (pp); 4342 } 4343 4344 /* 4345 * This routine is like page_numtopp, but will only return page structs 4346 * for pages which are ok for loading into hardware using the page struct. 4347 */ 4348 page_t * 4349 page_numtopp_nowait(pfn_t pfnum, se_t se) 4350 { 4351 page_t *pp; 4352 4353 retry: 4354 pp = page_numtopp_nolock(pfnum); 4355 if (pp == NULL) { 4356 return ((page_t *)NULL); 4357 } 4358 4359 /* 4360 * Try to acquire the appropriate lock on the page. 4361 */ 4362 if (PP_ISFREE(pp)) 4363 pp = NULL; 4364 else { 4365 if (!page_trylock(pp, se)) 4366 pp = NULL; 4367 else { 4368 if (page_pptonum(pp) != pfnum) { 4369 page_unlock(pp); 4370 goto retry; 4371 } 4372 if (PP_ISFREE(pp)) { 4373 page_unlock(pp); 4374 pp = NULL; 4375 } 4376 } 4377 } 4378 return (pp); 4379 } 4380 4381 /* 4382 * Returns a count of dirty pages that are in the process 4383 * of being written out. If 'cleanit' is set, try to push the page. 4384 */ 4385 pgcnt_t 4386 page_busy(int cleanit) 4387 { 4388 page_t *page0 = page_first(); 4389 page_t *pp = page0; 4390 pgcnt_t nppbusy = 0; 4391 u_offset_t off; 4392 4393 do { 4394 vnode_t *vp = pp->p_vnode; 4395 4396 /* 4397 * A page is a candidate for syncing if it is: 4398 * 4399 * (a) On neither the freelist nor the cachelist 4400 * (b) Hashed onto a vnode 4401 * (c) Not a kernel page 4402 * (d) Dirty 4403 * (e) Not part of a swapfile 4404 * (f) a page which belongs to a real vnode; eg has a non-null 4405 * v_vfsp pointer. 4406 * (g) Backed by a filesystem which doesn't have a 4407 * stubbed-out sync operation 4408 */ 4409 if (!PP_ISFREE(pp) && vp != NULL && !VN_ISKAS(vp) && 4410 hat_ismod(pp) && !IS_SWAPVP(vp) && vp->v_vfsp != NULL && 4411 vfs_can_sync(vp->v_vfsp)) { 4412 nppbusy++; 4413 vfs_syncprogress(); 4414 4415 if (!cleanit) 4416 continue; 4417 if (!page_trylock(pp, SE_EXCL)) 4418 continue; 4419 4420 if (PP_ISFREE(pp) || vp == NULL || IS_SWAPVP(vp) || 4421 pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 4422 !(hat_pagesync(pp, 4423 HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) & P_MOD)) { 4424 page_unlock(pp); 4425 continue; 4426 } 4427 off = pp->p_offset; 4428 VN_HOLD(vp); 4429 page_unlock(pp); 4430 (void) VOP_PUTPAGE(vp, off, PAGESIZE, 4431 B_ASYNC | B_FREE, kcred, NULL); 4432 VN_RELE(vp); 4433 } 4434 } while ((pp = page_next(pp)) != page0); 4435 4436 return (nppbusy); 4437 } 4438 4439 void page_invalidate_pages(void); 4440 4441 /* 4442 * callback handler to vm sub-system 4443 * 4444 * callers make sure no recursive entries to this func. 4445 */ 4446 /*ARGSUSED*/ 4447 boolean_t 4448 callb_vm_cpr(void *arg, int code) 4449 { 4450 if (code == CB_CODE_CPR_CHKPT) 4451 page_invalidate_pages(); 4452 return (B_TRUE); 4453 } 4454 4455 /* 4456 * Invalidate all pages of the system. 4457 * It shouldn't be called until all user page activities are all stopped. 4458 */ 4459 void 4460 page_invalidate_pages() 4461 { 4462 page_t *pp; 4463 page_t *page0; 4464 pgcnt_t nbusypages; 4465 int retry = 0; 4466 const int MAXRETRIES = 4; 4467 #if defined(__sparc) 4468 extern struct vnode prom_ppages; 4469 #endif /* __sparc */ 4470 4471 top: 4472 /* 4473 * Flush dirty pages and destroy the clean ones. 4474 */ 4475 nbusypages = 0; 4476 4477 pp = page0 = page_first(); 4478 do { 4479 struct vnode *vp; 4480 u_offset_t offset; 4481 int mod; 4482 4483 /* 4484 * skip the page if it has no vnode or the page associated 4485 * with the kernel vnode or prom allocated kernel mem. 4486 */ 4487 #if defined(__sparc) 4488 if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp) || 4489 vp == &prom_ppages) 4490 #else /* x86 doesn't have prom or prom_ppage */ 4491 if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp)) 4492 #endif /* __sparc */ 4493 continue; 4494 4495 /* 4496 * skip the page which is already free invalidated. 4497 */ 4498 if (PP_ISFREE(pp) && PP_ISAGED(pp)) 4499 continue; 4500 4501 /* 4502 * skip pages that are already locked or can't be "exclusively" 4503 * locked or are already free. After we lock the page, check 4504 * the free and age bits again to be sure it's not destroied 4505 * yet. 4506 * To achieve max. parallelization, we use page_trylock instead 4507 * of page_lock so that we don't get block on individual pages 4508 * while we have thousands of other pages to process. 4509 */ 4510 if (!page_trylock(pp, SE_EXCL)) { 4511 nbusypages++; 4512 continue; 4513 } else if (PP_ISFREE(pp)) { 4514 if (!PP_ISAGED(pp)) { 4515 page_destroy_free(pp); 4516 } else { 4517 page_unlock(pp); 4518 } 4519 continue; 4520 } 4521 /* 4522 * Is this page involved in some I/O? shared? 4523 * 4524 * The page_struct_lock need not be acquired to 4525 * examine these fields since the page has an 4526 * "exclusive" lock. 4527 */ 4528 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 4529 page_unlock(pp); 4530 continue; 4531 } 4532 4533 if (vp->v_type == VCHR) { 4534 panic("vp->v_type == VCHR"); 4535 /*NOTREACHED*/ 4536 } 4537 4538 if (!page_try_demote_pages(pp)) { 4539 page_unlock(pp); 4540 continue; 4541 } 4542 4543 /* 4544 * Check the modified bit. Leave the bits alone in hardware 4545 * (they will be modified if we do the putpage). 4546 */ 4547 mod = (hat_pagesync(pp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) 4548 & P_MOD); 4549 if (mod) { 4550 offset = pp->p_offset; 4551 /* 4552 * Hold the vnode before releasing the page lock 4553 * to prevent it from being freed and re-used by 4554 * some other thread. 4555 */ 4556 VN_HOLD(vp); 4557 page_unlock(pp); 4558 /* 4559 * No error return is checked here. Callers such as 4560 * cpr deals with the dirty pages at the dump time 4561 * if this putpage fails. 4562 */ 4563 (void) VOP_PUTPAGE(vp, offset, PAGESIZE, B_INVAL, 4564 kcred, NULL); 4565 VN_RELE(vp); 4566 } else { 4567 page_destroy(pp, 0); 4568 } 4569 } while ((pp = page_next(pp)) != page0); 4570 if (nbusypages && retry++ < MAXRETRIES) { 4571 delay(1); 4572 goto top; 4573 } 4574 } 4575 4576 /* 4577 * Replace the page "old" with the page "new" on the page hash and vnode lists 4578 * 4579 * the replacement must be done in place, ie the equivalent sequence: 4580 * 4581 * vp = old->p_vnode; 4582 * off = old->p_offset; 4583 * page_do_hashout(old) 4584 * page_do_hashin(new, vp, off) 4585 * 4586 * doesn't work, since 4587 * 1) if old is the only page on the vnode, the v_pages list has a window 4588 * where it looks empty. This will break file system assumptions. 4589 * and 4590 * 2) pvn_vplist_dirty() can't deal with pages moving on the v_pages list. 4591 */ 4592 static void 4593 page_do_relocate_hash(page_t *new, page_t *old) 4594 { 4595 page_t **hash_list; 4596 vnode_t *vp = old->p_vnode; 4597 kmutex_t *sep; 4598 4599 ASSERT(PAGE_EXCL(old)); 4600 ASSERT(PAGE_EXCL(new)); 4601 ASSERT(vp != NULL); 4602 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 4603 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, old->p_offset)))); 4604 4605 /* 4606 * First find old page on the page hash list 4607 */ 4608 hash_list = &page_hash[PAGE_HASH_FUNC(vp, old->p_offset)]; 4609 4610 for (;;) { 4611 if (*hash_list == old) 4612 break; 4613 if (*hash_list == NULL) { 4614 panic("page_do_hashout"); 4615 /*NOTREACHED*/ 4616 } 4617 hash_list = &(*hash_list)->p_hash; 4618 } 4619 4620 /* 4621 * update new and replace old with new on the page hash list 4622 */ 4623 new->p_vnode = old->p_vnode; 4624 new->p_offset = old->p_offset; 4625 new->p_hash = old->p_hash; 4626 *hash_list = new; 4627 4628 if ((new->p_vnode->v_flag & VISSWAP) != 0) 4629 PP_SETSWAP(new); 4630 4631 /* 4632 * replace old with new on the vnode's page list 4633 */ 4634 if (old->p_vpnext == old) { 4635 new->p_vpnext = new; 4636 new->p_vpprev = new; 4637 } else { 4638 new->p_vpnext = old->p_vpnext; 4639 new->p_vpprev = old->p_vpprev; 4640 new->p_vpnext->p_vpprev = new; 4641 new->p_vpprev->p_vpnext = new; 4642 } 4643 if (vp->v_pages == old) 4644 vp->v_pages = new; 4645 4646 /* 4647 * clear out the old page 4648 */ 4649 old->p_hash = NULL; 4650 old->p_vpnext = NULL; 4651 old->p_vpprev = NULL; 4652 old->p_vnode = NULL; 4653 PP_CLRSWAP(old); 4654 old->p_offset = (u_offset_t)-1; 4655 page_clr_all_props(old); 4656 4657 /* 4658 * Wake up processes waiting for this page. The page's 4659 * identity has been changed, and is probably not the 4660 * desired page any longer. 4661 */ 4662 sep = page_se_mutex(old); 4663 mutex_enter(sep); 4664 old->p_selock &= ~SE_EWANTED; 4665 if (CV_HAS_WAITERS(&old->p_cv)) 4666 cv_broadcast(&old->p_cv); 4667 mutex_exit(sep); 4668 } 4669 4670 /* 4671 * This function moves the identity of page "pp_old" to page "pp_new". 4672 * Both pages must be locked on entry. "pp_new" is free, has no identity, 4673 * and need not be hashed out from anywhere. 4674 */ 4675 void 4676 page_relocate_hash(page_t *pp_new, page_t *pp_old) 4677 { 4678 vnode_t *vp = pp_old->p_vnode; 4679 u_offset_t off = pp_old->p_offset; 4680 kmutex_t *phm, *vphm; 4681 4682 /* 4683 * Rehash two pages 4684 */ 4685 ASSERT(PAGE_EXCL(pp_old)); 4686 ASSERT(PAGE_EXCL(pp_new)); 4687 ASSERT(vp != NULL); 4688 ASSERT(pp_new->p_vnode == NULL); 4689 4690 /* 4691 * hashout then hashin while holding the mutexes 4692 */ 4693 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, off)); 4694 mutex_enter(phm); 4695 vphm = page_vnode_mutex(vp); 4696 mutex_enter(vphm); 4697 4698 page_do_relocate_hash(pp_new, pp_old); 4699 4700 mutex_exit(vphm); 4701 mutex_exit(phm); 4702 4703 /* 4704 * The page_struct_lock need not be acquired for lckcnt and 4705 * cowcnt since the page has an "exclusive" lock. 4706 */ 4707 ASSERT(pp_new->p_lckcnt == 0); 4708 ASSERT(pp_new->p_cowcnt == 0); 4709 pp_new->p_lckcnt = pp_old->p_lckcnt; 4710 pp_new->p_cowcnt = pp_old->p_cowcnt; 4711 pp_old->p_lckcnt = pp_old->p_cowcnt = 0; 4712 4713 /* The following comment preserved from page_flip(). */ 4714 /* XXX - Do we need to protect fsdata? */ 4715 pp_new->p_fsdata = pp_old->p_fsdata; 4716 } 4717 4718 /* 4719 * Helper routine used to lock all remaining members of a 4720 * large page. The caller is responsible for passing in a locked 4721 * pp. If pp is a large page, then it succeeds in locking all the 4722 * remaining constituent pages or it returns with only the 4723 * original page locked. 4724 * 4725 * Returns 1 on success, 0 on failure. 4726 * 4727 * If success is returned this routine guarantees p_szc for all constituent 4728 * pages of a large page pp belongs to can't change. To achieve this we 4729 * recheck szc of pp after locking all constituent pages and retry if szc 4730 * changed (it could only decrease). Since hat_page_demote() needs an EXCL 4731 * lock on one of constituent pages it can't be running after all constituent 4732 * pages are locked. hat_page_demote() with a lock on a constituent page 4733 * outside of this large page (i.e. pp belonged to a larger large page) is 4734 * already done with all constituent pages of pp since the root's p_szc is 4735 * changed last. Therefore no need to synchronize with hat_page_demote() that 4736 * locked a constituent page outside of pp's current large page. 4737 */ 4738 #ifdef DEBUG 4739 uint32_t gpg_trylock_mtbf = 0; 4740 #endif 4741 4742 int 4743 group_page_trylock(page_t *pp, se_t se) 4744 { 4745 page_t *tpp; 4746 pgcnt_t npgs, i, j; 4747 uint_t pszc = pp->p_szc; 4748 4749 #ifdef DEBUG 4750 if (gpg_trylock_mtbf && !(gethrtime() % gpg_trylock_mtbf)) { 4751 return (0); 4752 } 4753 #endif 4754 4755 if (pp != PP_GROUPLEADER(pp, pszc)) { 4756 return (0); 4757 } 4758 4759 retry: 4760 ASSERT(PAGE_LOCKED_SE(pp, se)); 4761 ASSERT(!PP_ISFREE(pp)); 4762 if (pszc == 0) { 4763 return (1); 4764 } 4765 npgs = page_get_pagecnt(pszc); 4766 tpp = pp + 1; 4767 for (i = 1; i < npgs; i++, tpp++) { 4768 if (!page_trylock(tpp, se)) { 4769 tpp = pp + 1; 4770 for (j = 1; j < i; j++, tpp++) { 4771 page_unlock(tpp); 4772 } 4773 return (0); 4774 } 4775 } 4776 if (pp->p_szc != pszc) { 4777 ASSERT(pp->p_szc < pszc); 4778 ASSERT(pp->p_vnode != NULL && !PP_ISKAS(pp) && 4779 !IS_SWAPFSVP(pp->p_vnode)); 4780 tpp = pp + 1; 4781 for (i = 1; i < npgs; i++, tpp++) { 4782 page_unlock(tpp); 4783 } 4784 pszc = pp->p_szc; 4785 goto retry; 4786 } 4787 return (1); 4788 } 4789 4790 void 4791 group_page_unlock(page_t *pp) 4792 { 4793 page_t *tpp; 4794 pgcnt_t npgs, i; 4795 4796 ASSERT(PAGE_LOCKED(pp)); 4797 ASSERT(!PP_ISFREE(pp)); 4798 ASSERT(pp == PP_PAGEROOT(pp)); 4799 npgs = page_get_pagecnt(pp->p_szc); 4800 for (i = 1, tpp = pp + 1; i < npgs; i++, tpp++) { 4801 page_unlock(tpp); 4802 } 4803 } 4804 4805 /* 4806 * returns 4807 * 0 : on success and *nrelocp is number of relocated PAGESIZE pages 4808 * ERANGE : this is not a base page 4809 * EBUSY : failure to get locks on the page/pages 4810 * ENOMEM : failure to obtain replacement pages 4811 * EAGAIN : OBP has not yet completed its boot-time handoff to the kernel 4812 * EIO : An error occurred while trying to copy the page data 4813 * 4814 * Return with all constituent members of target and replacement 4815 * SE_EXCL locked. It is the callers responsibility to drop the 4816 * locks. 4817 */ 4818 int 4819 do_page_relocate( 4820 page_t **target, 4821 page_t **replacement, 4822 int grouplock, 4823 spgcnt_t *nrelocp, 4824 lgrp_t *lgrp) 4825 { 4826 page_t *first_repl; 4827 page_t *repl; 4828 page_t *targ; 4829 page_t *pl = NULL; 4830 uint_t ppattr; 4831 pfn_t pfn, repl_pfn; 4832 uint_t szc; 4833 spgcnt_t npgs, i; 4834 int repl_contig = 0; 4835 uint_t flags = 0; 4836 spgcnt_t dofree = 0; 4837 4838 *nrelocp = 0; 4839 4840 #if defined(__sparc) 4841 /* 4842 * We need to wait till OBP has completed 4843 * its boot-time handoff of its resources to the kernel 4844 * before we allow page relocation 4845 */ 4846 if (page_relocate_ready == 0) { 4847 return (EAGAIN); 4848 } 4849 #endif 4850 4851 /* 4852 * If this is not a base page, 4853 * just return with 0x0 pages relocated. 4854 */ 4855 targ = *target; 4856 ASSERT(PAGE_EXCL(targ)); 4857 ASSERT(!PP_ISFREE(targ)); 4858 szc = targ->p_szc; 4859 ASSERT(szc < mmu_page_sizes); 4860 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4861 pfn = targ->p_pagenum; 4862 if (pfn != PFN_BASE(pfn, szc)) { 4863 VM_STAT_ADD(vmm_vmstats.ppr_relocnoroot[szc]); 4864 return (ERANGE); 4865 } 4866 4867 if ((repl = *replacement) != NULL && repl->p_szc >= szc) { 4868 repl_pfn = repl->p_pagenum; 4869 if (repl_pfn != PFN_BASE(repl_pfn, szc)) { 4870 VM_STAT_ADD(vmm_vmstats.ppr_reloc_replnoroot[szc]); 4871 return (ERANGE); 4872 } 4873 repl_contig = 1; 4874 } 4875 4876 /* 4877 * We must lock all members of this large page or we cannot 4878 * relocate any part of it. 4879 */ 4880 if (grouplock != 0 && !group_page_trylock(targ, SE_EXCL)) { 4881 VM_STAT_ADD(vmm_vmstats.ppr_relocnolock[targ->p_szc]); 4882 return (EBUSY); 4883 } 4884 4885 /* 4886 * reread szc it could have been decreased before 4887 * group_page_trylock() was done. 4888 */ 4889 szc = targ->p_szc; 4890 ASSERT(szc < mmu_page_sizes); 4891 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4892 ASSERT(pfn == PFN_BASE(pfn, szc)); 4893 4894 npgs = page_get_pagecnt(targ->p_szc); 4895 4896 if (repl == NULL) { 4897 dofree = npgs; /* Size of target page in MMU pages */ 4898 if (!page_create_wait(dofree, 0)) { 4899 if (grouplock != 0) { 4900 group_page_unlock(targ); 4901 } 4902 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4903 return (ENOMEM); 4904 } 4905 4906 /* 4907 * seg kmem pages require that the target and replacement 4908 * page be the same pagesize. 4909 */ 4910 flags = (VN_ISKAS(targ->p_vnode)) ? PGR_SAMESZC : 0; 4911 repl = page_get_replacement_page(targ, lgrp, flags); 4912 if (repl == NULL) { 4913 if (grouplock != 0) { 4914 group_page_unlock(targ); 4915 } 4916 page_create_putback(dofree); 4917 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4918 return (ENOMEM); 4919 } 4920 } 4921 #ifdef DEBUG 4922 else { 4923 ASSERT(PAGE_LOCKED(repl)); 4924 } 4925 #endif /* DEBUG */ 4926 4927 #if defined(__sparc) 4928 /* 4929 * Let hat_page_relocate() complete the relocation if it's kernel page 4930 */ 4931 if (VN_ISKAS(targ->p_vnode)) { 4932 *replacement = repl; 4933 if (hat_page_relocate(target, replacement, nrelocp) != 0) { 4934 if (grouplock != 0) { 4935 group_page_unlock(targ); 4936 } 4937 if (dofree) { 4938 *replacement = NULL; 4939 page_free_replacement_page(repl); 4940 page_create_putback(dofree); 4941 } 4942 VM_STAT_ADD(vmm_vmstats.ppr_krelocfail[szc]); 4943 return (EAGAIN); 4944 } 4945 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 4946 return (0); 4947 } 4948 #else 4949 #if defined(lint) 4950 dofree = dofree; 4951 #endif 4952 #endif 4953 4954 first_repl = repl; 4955 4956 for (i = 0; i < npgs; i++) { 4957 ASSERT(PAGE_EXCL(targ)); 4958 ASSERT(targ->p_slckcnt == 0); 4959 ASSERT(repl->p_slckcnt == 0); 4960 4961 (void) hat_pageunload(targ, HAT_FORCE_PGUNLOAD); 4962 4963 ASSERT(hat_page_getshare(targ) == 0); 4964 ASSERT(!PP_ISFREE(targ)); 4965 ASSERT(targ->p_pagenum == (pfn + i)); 4966 ASSERT(repl_contig == 0 || 4967 repl->p_pagenum == (repl_pfn + i)); 4968 4969 /* 4970 * Copy the page contents and attributes then 4971 * relocate the page in the page hash. 4972 */ 4973 if (ppcopy(targ, repl) == 0) { 4974 targ = *target; 4975 repl = first_repl; 4976 VM_STAT_ADD(vmm_vmstats.ppr_copyfail); 4977 if (grouplock != 0) { 4978 group_page_unlock(targ); 4979 } 4980 if (dofree) { 4981 *replacement = NULL; 4982 page_free_replacement_page(repl); 4983 page_create_putback(dofree); 4984 } 4985 return (EIO); 4986 } 4987 4988 targ++; 4989 if (repl_contig != 0) { 4990 repl++; 4991 } else { 4992 repl = repl->p_next; 4993 } 4994 } 4995 4996 repl = first_repl; 4997 targ = *target; 4998 4999 for (i = 0; i < npgs; i++) { 5000 ppattr = hat_page_getattr(targ, (P_MOD | P_REF | P_RO)); 5001 page_clr_all_props(repl); 5002 page_set_props(repl, ppattr); 5003 page_relocate_hash(repl, targ); 5004 5005 ASSERT(hat_page_getshare(targ) == 0); 5006 ASSERT(hat_page_getshare(repl) == 0); 5007 /* 5008 * Now clear the props on targ, after the 5009 * page_relocate_hash(), they no longer 5010 * have any meaning. 5011 */ 5012 page_clr_all_props(targ); 5013 ASSERT(targ->p_next == targ); 5014 ASSERT(targ->p_prev == targ); 5015 page_list_concat(&pl, &targ); 5016 5017 targ++; 5018 if (repl_contig != 0) { 5019 repl++; 5020 } else { 5021 repl = repl->p_next; 5022 } 5023 } 5024 /* assert that we have come full circle with repl */ 5025 ASSERT(repl_contig == 1 || first_repl == repl); 5026 5027 *target = pl; 5028 if (*replacement == NULL) { 5029 ASSERT(first_repl == repl); 5030 *replacement = repl; 5031 } 5032 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 5033 *nrelocp = npgs; 5034 return (0); 5035 } 5036 /* 5037 * On success returns 0 and *nrelocp the number of PAGESIZE pages relocated. 5038 */ 5039 int 5040 page_relocate( 5041 page_t **target, 5042 page_t **replacement, 5043 int grouplock, 5044 int freetarget, 5045 spgcnt_t *nrelocp, 5046 lgrp_t *lgrp) 5047 { 5048 spgcnt_t ret; 5049 5050 /* do_page_relocate returns 0 on success or errno value */ 5051 ret = do_page_relocate(target, replacement, grouplock, nrelocp, lgrp); 5052 5053 if (ret != 0 || freetarget == 0) { 5054 return (ret); 5055 } 5056 if (*nrelocp == 1) { 5057 ASSERT(*target != NULL); 5058 page_free(*target, 1); 5059 } else { 5060 page_t *tpp = *target; 5061 uint_t szc = tpp->p_szc; 5062 pgcnt_t npgs = page_get_pagecnt(szc); 5063 ASSERT(npgs > 1); 5064 ASSERT(szc != 0); 5065 do { 5066 ASSERT(PAGE_EXCL(tpp)); 5067 ASSERT(!hat_page_is_mapped(tpp)); 5068 ASSERT(tpp->p_szc == szc); 5069 PP_SETFREE(tpp); 5070 PP_SETAGED(tpp); 5071 npgs--; 5072 } while ((tpp = tpp->p_next) != *target); 5073 ASSERT(npgs == 0); 5074 page_list_add_pages(*target, 0); 5075 npgs = page_get_pagecnt(szc); 5076 page_create_putback(npgs); 5077 } 5078 return (ret); 5079 } 5080 5081 /* 5082 * it is up to the caller to deal with pcf accounting. 5083 */ 5084 void 5085 page_free_replacement_page(page_t *pplist) 5086 { 5087 page_t *pp; 5088 5089 while (pplist != NULL) { 5090 /* 5091 * pp_targ is a linked list. 5092 */ 5093 pp = pplist; 5094 if (pp->p_szc == 0) { 5095 page_sub(&pplist, pp); 5096 page_clr_all_props(pp); 5097 PP_SETFREE(pp); 5098 PP_SETAGED(pp); 5099 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 5100 page_unlock(pp); 5101 VM_STAT_ADD(pagecnt.pc_free_replacement_page[0]); 5102 } else { 5103 spgcnt_t curnpgs = page_get_pagecnt(pp->p_szc); 5104 page_t *tpp; 5105 page_list_break(&pp, &pplist, curnpgs); 5106 tpp = pp; 5107 do { 5108 ASSERT(PAGE_EXCL(tpp)); 5109 ASSERT(!hat_page_is_mapped(tpp)); 5110 page_clr_all_props(pp); 5111 PP_SETFREE(tpp); 5112 PP_SETAGED(tpp); 5113 } while ((tpp = tpp->p_next) != pp); 5114 page_list_add_pages(pp, 0); 5115 VM_STAT_ADD(pagecnt.pc_free_replacement_page[1]); 5116 } 5117 } 5118 } 5119 5120 /* 5121 * Relocate target to non-relocatable replacement page. 5122 */ 5123 int 5124 page_relocate_cage(page_t **target, page_t **replacement) 5125 { 5126 page_t *tpp, *rpp; 5127 spgcnt_t pgcnt, npgs; 5128 int result; 5129 5130 tpp = *target; 5131 5132 ASSERT(PAGE_EXCL(tpp)); 5133 ASSERT(tpp->p_szc == 0); 5134 5135 pgcnt = btop(page_get_pagesize(tpp->p_szc)); 5136 5137 do { 5138 (void) page_create_wait(pgcnt, PG_WAIT | PG_NORELOC); 5139 rpp = page_get_replacement_page(tpp, NULL, PGR_NORELOC); 5140 if (rpp == NULL) { 5141 page_create_putback(pgcnt); 5142 kcage_cageout_wakeup(); 5143 } 5144 } while (rpp == NULL); 5145 5146 ASSERT(PP_ISNORELOC(rpp)); 5147 5148 result = page_relocate(&tpp, &rpp, 0, 1, &npgs, NULL); 5149 5150 if (result == 0) { 5151 *replacement = rpp; 5152 if (pgcnt != npgs) 5153 panic("page_relocate_cage: partial relocation"); 5154 } 5155 5156 return (result); 5157 } 5158 5159 /* 5160 * Release the page lock on a page, place on cachelist 5161 * tail if no longer mapped. Caller can let us know if 5162 * the page is known to be clean. 5163 */ 5164 int 5165 page_release(page_t *pp, int checkmod) 5166 { 5167 int status; 5168 5169 ASSERT(PAGE_LOCKED(pp) && !PP_ISFREE(pp) && 5170 (pp->p_vnode != NULL)); 5171 5172 if (!hat_page_is_mapped(pp) && !IS_SWAPVP(pp->p_vnode) && 5173 ((PAGE_SHARED(pp) && page_tryupgrade(pp)) || PAGE_EXCL(pp)) && 5174 pp->p_lckcnt == 0 && pp->p_cowcnt == 0 && 5175 !hat_page_is_mapped(pp)) { 5176 5177 /* 5178 * If page is modified, unlock it 5179 * 5180 * (p_nrm & P_MOD) bit has the latest stuff because: 5181 * (1) We found that this page doesn't have any mappings 5182 * _after_ holding SE_EXCL and 5183 * (2) We didn't drop SE_EXCL lock after the check in (1) 5184 */ 5185 if (checkmod && hat_ismod(pp)) { 5186 page_unlock(pp); 5187 status = PGREL_MOD; 5188 } else { 5189 /*LINTED: constant in conditional context*/ 5190 VN_DISPOSE(pp, B_FREE, 0, kcred); 5191 status = PGREL_CLEAN; 5192 } 5193 } else { 5194 page_unlock(pp); 5195 status = PGREL_NOTREL; 5196 } 5197 return (status); 5198 } 5199 5200 /* 5201 * Given a constituent page, try to demote the large page on the freelist. 5202 * 5203 * Returns nonzero if the page could be demoted successfully. Returns with 5204 * the constituent page still locked. 5205 */ 5206 int 5207 page_try_demote_free_pages(page_t *pp) 5208 { 5209 page_t *rootpp = pp; 5210 pfn_t pfn = page_pptonum(pp); 5211 spgcnt_t npgs; 5212 uint_t szc = pp->p_szc; 5213 5214 ASSERT(PP_ISFREE(pp)); 5215 ASSERT(PAGE_EXCL(pp)); 5216 5217 /* 5218 * Adjust rootpp and lock it, if `pp' is not the base 5219 * constituent page. 5220 */ 5221 npgs = page_get_pagecnt(pp->p_szc); 5222 if (npgs == 1) { 5223 return (0); 5224 } 5225 5226 if (!IS_P2ALIGNED(pfn, npgs)) { 5227 pfn = P2ALIGN(pfn, npgs); 5228 rootpp = page_numtopp_nolock(pfn); 5229 } 5230 5231 if (pp != rootpp && !page_trylock(rootpp, SE_EXCL)) { 5232 return (0); 5233 } 5234 5235 if (rootpp->p_szc != szc) { 5236 if (pp != rootpp) 5237 page_unlock(rootpp); 5238 return (0); 5239 } 5240 5241 page_demote_free_pages(rootpp); 5242 5243 if (pp != rootpp) 5244 page_unlock(rootpp); 5245 5246 ASSERT(PP_ISFREE(pp)); 5247 ASSERT(PAGE_EXCL(pp)); 5248 return (1); 5249 } 5250 5251 /* 5252 * Given a constituent page, try to demote the large page. 5253 * 5254 * Returns nonzero if the page could be demoted successfully. Returns with 5255 * the constituent page still locked. 5256 */ 5257 int 5258 page_try_demote_pages(page_t *pp) 5259 { 5260 page_t *tpp, *rootpp = pp; 5261 pfn_t pfn = page_pptonum(pp); 5262 spgcnt_t i, npgs; 5263 uint_t szc = pp->p_szc; 5264 vnode_t *vp = pp->p_vnode; 5265 5266 ASSERT(PAGE_EXCL(pp)); 5267 5268 VM_STAT_ADD(pagecnt.pc_try_demote_pages[0]); 5269 5270 if (pp->p_szc == 0) { 5271 VM_STAT_ADD(pagecnt.pc_try_demote_pages[1]); 5272 return (1); 5273 } 5274 5275 if (vp != NULL && !IS_SWAPFSVP(vp) && !VN_ISKAS(vp)) { 5276 VM_STAT_ADD(pagecnt.pc_try_demote_pages[2]); 5277 page_demote_vp_pages(pp); 5278 ASSERT(pp->p_szc == 0); 5279 return (1); 5280 } 5281 5282 /* 5283 * Adjust rootpp if passed in is not the base 5284 * constituent page. 5285 */ 5286 npgs = page_get_pagecnt(pp->p_szc); 5287 ASSERT(npgs > 1); 5288 if (!IS_P2ALIGNED(pfn, npgs)) { 5289 pfn = P2ALIGN(pfn, npgs); 5290 rootpp = page_numtopp_nolock(pfn); 5291 VM_STAT_ADD(pagecnt.pc_try_demote_pages[3]); 5292 ASSERT(rootpp->p_vnode != NULL); 5293 ASSERT(rootpp->p_szc == szc); 5294 } 5295 5296 /* 5297 * We can't demote kernel pages since we can't hat_unload() 5298 * the mappings. 5299 */ 5300 if (VN_ISKAS(rootpp->p_vnode)) 5301 return (0); 5302 5303 /* 5304 * Attempt to lock all constituent pages except the page passed 5305 * in since it's already locked. 5306 */ 5307 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5308 ASSERT(!PP_ISFREE(tpp)); 5309 ASSERT(tpp->p_vnode != NULL); 5310 5311 if (tpp != pp && !page_trylock(tpp, SE_EXCL)) 5312 break; 5313 ASSERT(tpp->p_szc == rootpp->p_szc); 5314 ASSERT(page_pptonum(tpp) == page_pptonum(rootpp) + i); 5315 } 5316 5317 /* 5318 * If we failed to lock them all then unlock what we have 5319 * locked so far and bail. 5320 */ 5321 if (i < npgs) { 5322 tpp = rootpp; 5323 while (i-- > 0) { 5324 if (tpp != pp) 5325 page_unlock(tpp); 5326 tpp++; 5327 } 5328 VM_STAT_ADD(pagecnt.pc_try_demote_pages[4]); 5329 return (0); 5330 } 5331 5332 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5333 ASSERT(PAGE_EXCL(tpp)); 5334 ASSERT(tpp->p_slckcnt == 0); 5335 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 5336 tpp->p_szc = 0; 5337 } 5338 5339 /* 5340 * Unlock all pages except the page passed in. 5341 */ 5342 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5343 ASSERT(!hat_page_is_mapped(tpp)); 5344 if (tpp != pp) 5345 page_unlock(tpp); 5346 } 5347 5348 VM_STAT_ADD(pagecnt.pc_try_demote_pages[5]); 5349 return (1); 5350 } 5351 5352 /* 5353 * Called by page_free() and page_destroy() to demote the page size code 5354 * (p_szc) to 0 (since we can't just put a single PAGESIZE page with non zero 5355 * p_szc on free list, neither can we just clear p_szc of a single page_t 5356 * within a large page since it will break other code that relies on p_szc 5357 * being the same for all page_t's of a large page). Anonymous pages should 5358 * never end up here because anon_map_getpages() cannot deal with p_szc 5359 * changes after a single constituent page is locked. While anonymous or 5360 * kernel large pages are demoted or freed the entire large page at a time 5361 * with all constituent pages locked EXCL for the file system pages we 5362 * have to be able to demote a large page (i.e. decrease all constituent pages 5363 * p_szc) with only just an EXCL lock on one of constituent pages. The reason 5364 * we can easily deal with anonymous page demotion the entire large page at a 5365 * time is that those operation originate at address space level and concern 5366 * the entire large page region with actual demotion only done when pages are 5367 * not shared with any other processes (therefore we can always get EXCL lock 5368 * on all anonymous constituent pages after clearing segment page 5369 * cache). However file system pages can be truncated or invalidated at a 5370 * PAGESIZE level from the file system side and end up in page_free() or 5371 * page_destroy() (we also allow only part of the large page to be SOFTLOCKed 5372 * and therefore pageout should be able to demote a large page by EXCL locking 5373 * any constituent page that is not under SOFTLOCK). In those cases we cannot 5374 * rely on being able to lock EXCL all constituent pages. 5375 * 5376 * To prevent szc changes on file system pages one has to lock all constituent 5377 * pages at least SHARED (or call page_szc_lock()). The only subsystem that 5378 * doesn't rely on locking all constituent pages (or using page_szc_lock()) to 5379 * prevent szc changes is hat layer that uses its own page level mlist 5380 * locks. hat assumes that szc doesn't change after mlist lock for a page is 5381 * taken. Therefore we need to change szc under hat level locks if we only 5382 * have an EXCL lock on a single constituent page and hat still references any 5383 * of constituent pages. (Note we can't "ignore" hat layer by simply 5384 * hat_pageunload() all constituent pages without having EXCL locks on all of 5385 * constituent pages). We use hat_page_demote() call to safely demote szc of 5386 * all constituent pages under hat locks when we only have an EXCL lock on one 5387 * of constituent pages. 5388 * 5389 * This routine calls page_szc_lock() before calling hat_page_demote() to 5390 * allow segvn in one special case not to lock all constituent pages SHARED 5391 * before calling hat_memload_array() that relies on p_szc not changing even 5392 * before hat level mlist lock is taken. In that case segvn uses 5393 * page_szc_lock() to prevent hat_page_demote() changing p_szc values. 5394 * 5395 * Anonymous or kernel page demotion still has to lock all pages exclusively 5396 * and do hat_pageunload() on all constituent pages before demoting the page 5397 * therefore there's no need for anonymous or kernel page demotion to use 5398 * hat_page_demote() mechanism. 5399 * 5400 * hat_page_demote() removes all large mappings that map pp and then decreases 5401 * p_szc starting from the last constituent page of the large page. By working 5402 * from the tail of a large page in pfn decreasing order allows one looking at 5403 * the root page to know that hat_page_demote() is done for root's szc area. 5404 * e.g. if a root page has szc 1 one knows it only has to lock all constituent 5405 * pages within szc 1 area to prevent szc changes because hat_page_demote() 5406 * that started on this page when it had szc > 1 is done for this szc 1 area. 5407 * 5408 * We are guaranteed that all constituent pages of pp's large page belong to 5409 * the same vnode with the consecutive offsets increasing in the direction of 5410 * the pfn i.e. the identity of constituent pages can't change until their 5411 * p_szc is decreased. Therefore it's safe for hat_page_demote() to remove 5412 * large mappings to pp even though we don't lock any constituent page except 5413 * pp (i.e. we won't unload e.g. kernel locked page). 5414 */ 5415 static void 5416 page_demote_vp_pages(page_t *pp) 5417 { 5418 kmutex_t *mtx; 5419 5420 ASSERT(PAGE_EXCL(pp)); 5421 ASSERT(!PP_ISFREE(pp)); 5422 ASSERT(pp->p_vnode != NULL); 5423 ASSERT(!IS_SWAPFSVP(pp->p_vnode)); 5424 ASSERT(!PP_ISKAS(pp)); 5425 5426 VM_STAT_ADD(pagecnt.pc_demote_pages[0]); 5427 5428 mtx = page_szc_lock(pp); 5429 if (mtx != NULL) { 5430 hat_page_demote(pp); 5431 mutex_exit(mtx); 5432 } 5433 ASSERT(pp->p_szc == 0); 5434 } 5435 5436 /* 5437 * Mark any existing pages for migration in the given range 5438 */ 5439 void 5440 page_mark_migrate(struct seg *seg, caddr_t addr, size_t len, 5441 struct anon_map *amp, ulong_t anon_index, vnode_t *vp, 5442 u_offset_t vnoff, int rflag) 5443 { 5444 struct anon *ap; 5445 vnode_t *curvp; 5446 lgrp_t *from; 5447 pgcnt_t i; 5448 pgcnt_t nlocked; 5449 u_offset_t off; 5450 pfn_t pfn; 5451 size_t pgsz; 5452 size_t segpgsz; 5453 pgcnt_t pages; 5454 uint_t pszc; 5455 page_t **ppa; 5456 pgcnt_t ppa_nentries; 5457 page_t *pp; 5458 caddr_t va; 5459 ulong_t an_idx; 5460 anon_sync_obj_t cookie; 5461 5462 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5463 5464 /* 5465 * Don't do anything if don't need to do lgroup optimizations 5466 * on this system 5467 */ 5468 if (!lgrp_optimizations()) 5469 return; 5470 5471 /* 5472 * Align address and length to (potentially large) page boundary 5473 */ 5474 segpgsz = page_get_pagesize(seg->s_szc); 5475 addr = (caddr_t)P2ALIGN((uintptr_t)addr, segpgsz); 5476 if (rflag) 5477 len = P2ROUNDUP(len, segpgsz); 5478 5479 /* 5480 * Allocate page array to accommodate largest page size 5481 */ 5482 pgsz = page_get_pagesize(page_num_pagesizes() - 1); 5483 ppa_nentries = btop(pgsz); 5484 ppa = kmem_zalloc(ppa_nentries * sizeof (page_t *), KM_SLEEP); 5485 5486 /* 5487 * Do one (large) page at a time 5488 */ 5489 va = addr; 5490 while (va < addr + len) { 5491 /* 5492 * Lookup (root) page for vnode and offset corresponding to 5493 * this virtual address 5494 * Try anonmap first since there may be copy-on-write 5495 * pages, but initialize vnode pointer and offset using 5496 * vnode arguments just in case there isn't an amp. 5497 */ 5498 curvp = vp; 5499 off = vnoff + va - seg->s_base; 5500 if (amp) { 5501 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5502 an_idx = anon_index + seg_page(seg, va); 5503 anon_array_enter(amp, an_idx, &cookie); 5504 ap = anon_get_ptr(amp->ahp, an_idx); 5505 if (ap) 5506 swap_xlate(ap, &curvp, &off); 5507 anon_array_exit(&cookie); 5508 ANON_LOCK_EXIT(&->a_rwlock); 5509 } 5510 5511 pp = NULL; 5512 if (curvp) 5513 pp = page_lookup(curvp, off, SE_SHARED); 5514 5515 /* 5516 * If there isn't a page at this virtual address, 5517 * skip to next page 5518 */ 5519 if (pp == NULL) { 5520 va += PAGESIZE; 5521 continue; 5522 } 5523 5524 /* 5525 * Figure out which lgroup this page is in for kstats 5526 */ 5527 pfn = page_pptonum(pp); 5528 from = lgrp_pfn_to_lgrp(pfn); 5529 5530 /* 5531 * Get page size, and round up and skip to next page boundary 5532 * if unaligned address 5533 */ 5534 pszc = pp->p_szc; 5535 pgsz = page_get_pagesize(pszc); 5536 pages = btop(pgsz); 5537 if (!IS_P2ALIGNED(va, pgsz) || 5538 !IS_P2ALIGNED(pfn, pages) || 5539 pgsz > segpgsz) { 5540 pgsz = MIN(pgsz, segpgsz); 5541 page_unlock(pp); 5542 i = btop(P2END((uintptr_t)va, pgsz) - 5543 (uintptr_t)va); 5544 va = (caddr_t)P2END((uintptr_t)va, pgsz); 5545 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, i); 5546 continue; 5547 } 5548 5549 /* 5550 * Upgrade to exclusive lock on page 5551 */ 5552 if (!page_tryupgrade(pp)) { 5553 page_unlock(pp); 5554 va += pgsz; 5555 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5556 btop(pgsz)); 5557 continue; 5558 } 5559 5560 /* 5561 * Remember pages locked exclusively and how many 5562 */ 5563 ppa[0] = pp; 5564 nlocked = 1; 5565 5566 /* 5567 * Lock constituent pages if this is large page 5568 */ 5569 if (pages > 1) { 5570 /* 5571 * Lock all constituents except root page, since it 5572 * should be locked already. 5573 */ 5574 for (i = 1; i < pages; i++) { 5575 pp++; 5576 if (!page_trylock(pp, SE_EXCL)) { 5577 break; 5578 } 5579 if (PP_ISFREE(pp) || 5580 pp->p_szc != pszc) { 5581 /* 5582 * hat_page_demote() raced in with us. 5583 */ 5584 ASSERT(!IS_SWAPFSVP(curvp)); 5585 page_unlock(pp); 5586 break; 5587 } 5588 ppa[nlocked] = pp; 5589 nlocked++; 5590 } 5591 } 5592 5593 /* 5594 * If all constituent pages couldn't be locked, 5595 * unlock pages locked so far and skip to next page. 5596 */ 5597 if (nlocked != pages) { 5598 for (i = 0; i < nlocked; i++) 5599 page_unlock(ppa[i]); 5600 va += pgsz; 5601 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5602 btop(pgsz)); 5603 continue; 5604 } 5605 5606 /* 5607 * hat_page_demote() can no longer happen 5608 * since last cons page had the right p_szc after 5609 * all cons pages were locked. all cons pages 5610 * should now have the same p_szc. 5611 */ 5612 5613 /* 5614 * All constituent pages locked successfully, so mark 5615 * large page for migration and unload the mappings of 5616 * constituent pages, so a fault will occur on any part of the 5617 * large page 5618 */ 5619 PP_SETMIGRATE(ppa[0]); 5620 for (i = 0; i < nlocked; i++) { 5621 pp = ppa[i]; 5622 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 5623 ASSERT(hat_page_getshare(pp) == 0); 5624 page_unlock(pp); 5625 } 5626 lgrp_stat_add(from->lgrp_id, LGRP_PMM_PGS, nlocked); 5627 5628 va += pgsz; 5629 } 5630 kmem_free(ppa, ppa_nentries * sizeof (page_t *)); 5631 } 5632 5633 /* 5634 * Migrate any pages that have been marked for migration in the given range 5635 */ 5636 void 5637 page_migrate( 5638 struct seg *seg, 5639 caddr_t addr, 5640 page_t **ppa, 5641 pgcnt_t npages) 5642 { 5643 lgrp_t *from; 5644 lgrp_t *to; 5645 page_t *newpp; 5646 page_t *pp; 5647 pfn_t pfn; 5648 size_t pgsz; 5649 spgcnt_t page_cnt; 5650 spgcnt_t i; 5651 uint_t pszc; 5652 5653 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5654 5655 while (npages > 0) { 5656 pp = *ppa; 5657 pszc = pp->p_szc; 5658 pgsz = page_get_pagesize(pszc); 5659 page_cnt = btop(pgsz); 5660 5661 /* 5662 * Check to see whether this page is marked for migration 5663 * 5664 * Assume that root page of large page is marked for 5665 * migration and none of the other constituent pages 5666 * are marked. This really simplifies clearing the 5667 * migrate bit by not having to clear it from each 5668 * constituent page. 5669 * 5670 * note we don't want to relocate an entire large page if 5671 * someone is only using one subpage. 5672 */ 5673 if (npages < page_cnt) 5674 break; 5675 5676 /* 5677 * Is it marked for migration? 5678 */ 5679 if (!PP_ISMIGRATE(pp)) 5680 goto next; 5681 5682 /* 5683 * Determine lgroups that page is being migrated between 5684 */ 5685 pfn = page_pptonum(pp); 5686 if (!IS_P2ALIGNED(pfn, page_cnt)) { 5687 break; 5688 } 5689 from = lgrp_pfn_to_lgrp(pfn); 5690 to = lgrp_mem_choose(seg, addr, pgsz); 5691 5692 /* 5693 * Need to get exclusive lock's to migrate 5694 */ 5695 for (i = 0; i < page_cnt; i++) { 5696 ASSERT(PAGE_LOCKED(ppa[i])); 5697 if (page_pptonum(ppa[i]) != pfn + i || 5698 ppa[i]->p_szc != pszc) { 5699 break; 5700 } 5701 if (!page_tryupgrade(ppa[i])) { 5702 lgrp_stat_add(from->lgrp_id, 5703 LGRP_PM_FAIL_LOCK_PGS, 5704 page_cnt); 5705 break; 5706 } 5707 5708 /* 5709 * Check to see whether we are trying to migrate 5710 * page to lgroup where it is allocated already. 5711 * If so, clear the migrate bit and skip to next 5712 * page. 5713 */ 5714 if (i == 0 && to == from) { 5715 PP_CLRMIGRATE(ppa[0]); 5716 page_downgrade(ppa[0]); 5717 goto next; 5718 } 5719 } 5720 5721 /* 5722 * If all constituent pages couldn't be locked, 5723 * unlock pages locked so far and skip to next page. 5724 */ 5725 if (i != page_cnt) { 5726 while (--i != -1) { 5727 page_downgrade(ppa[i]); 5728 } 5729 goto next; 5730 } 5731 5732 (void) page_create_wait(page_cnt, PG_WAIT); 5733 newpp = page_get_replacement_page(pp, to, PGR_SAMESZC); 5734 if (newpp == NULL) { 5735 page_create_putback(page_cnt); 5736 for (i = 0; i < page_cnt; i++) { 5737 page_downgrade(ppa[i]); 5738 } 5739 lgrp_stat_add(to->lgrp_id, LGRP_PM_FAIL_ALLOC_PGS, 5740 page_cnt); 5741 goto next; 5742 } 5743 ASSERT(newpp->p_szc == pszc); 5744 /* 5745 * Clear migrate bit and relocate page 5746 */ 5747 PP_CLRMIGRATE(pp); 5748 if (page_relocate(&pp, &newpp, 0, 1, &page_cnt, to)) { 5749 panic("page_migrate: page_relocate failed"); 5750 } 5751 ASSERT(page_cnt * PAGESIZE == pgsz); 5752 5753 /* 5754 * Keep stats for number of pages migrated from and to 5755 * each lgroup 5756 */ 5757 lgrp_stat_add(from->lgrp_id, LGRP_PM_SRC_PGS, page_cnt); 5758 lgrp_stat_add(to->lgrp_id, LGRP_PM_DEST_PGS, page_cnt); 5759 /* 5760 * update the page_t array we were passed in and 5761 * unlink constituent pages of a large page. 5762 */ 5763 for (i = 0; i < page_cnt; ++i, ++pp) { 5764 ASSERT(PAGE_EXCL(newpp)); 5765 ASSERT(newpp->p_szc == pszc); 5766 ppa[i] = newpp; 5767 pp = newpp; 5768 page_sub(&newpp, pp); 5769 page_downgrade(pp); 5770 } 5771 ASSERT(newpp == NULL); 5772 next: 5773 addr += pgsz; 5774 ppa += page_cnt; 5775 npages -= page_cnt; 5776 } 5777 } 5778 5779 ulong_t mem_waiters = 0; 5780 ulong_t max_count = 20; 5781 #define MAX_DELAY 0x1ff 5782 5783 /* 5784 * Check if enough memory is available to proceed. 5785 * Depending on system configuration and how much memory is 5786 * reserved for swap we need to check against two variables. 5787 * e.g. on systems with little physical swap availrmem can be 5788 * more reliable indicator of how much memory is available. 5789 * On systems with large phys swap freemem can be better indicator. 5790 * If freemem drops below threshold level don't return an error 5791 * immediately but wake up pageout to free memory and block. 5792 * This is done number of times. If pageout is not able to free 5793 * memory within certain time return an error. 5794 * The same applies for availrmem but kmem_reap is used to 5795 * free memory. 5796 */ 5797 int 5798 page_mem_avail(pgcnt_t npages) 5799 { 5800 ulong_t count; 5801 5802 #if defined(__i386) 5803 if (freemem > desfree + npages && 5804 availrmem > swapfs_reserve + npages && 5805 btop(vmem_size(heap_arena, VMEM_FREE)) > tune.t_minarmem + 5806 npages) 5807 return (1); 5808 #else 5809 if (freemem > desfree + npages && 5810 availrmem > swapfs_reserve + npages) 5811 return (1); 5812 #endif 5813 5814 count = max_count; 5815 atomic_add_long(&mem_waiters, 1); 5816 5817 while (freemem < desfree + npages && --count) { 5818 cv_signal(&proc_pageout->p_cv); 5819 if (delay_sig(hz + (mem_waiters & MAX_DELAY))) { 5820 atomic_add_long(&mem_waiters, -1); 5821 return (0); 5822 } 5823 } 5824 if (count == 0) { 5825 atomic_add_long(&mem_waiters, -1); 5826 return (0); 5827 } 5828 5829 count = max_count; 5830 while (availrmem < swapfs_reserve + npages && --count) { 5831 kmem_reap(); 5832 if (delay_sig(hz + (mem_waiters & MAX_DELAY))) { 5833 atomic_add_long(&mem_waiters, -1); 5834 return (0); 5835 } 5836 } 5837 atomic_add_long(&mem_waiters, -1); 5838 if (count == 0) 5839 return (0); 5840 5841 #if defined(__i386) 5842 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 5843 tune.t_minarmem + npages) 5844 return (0); 5845 #endif 5846 return (1); 5847 } 5848 5849 #define MAX_CNT 60 /* max num of iterations */ 5850 /* 5851 * Reclaim/reserve availrmem for npages. 5852 * If there is not enough memory start reaping seg, kmem caches. 5853 * Start pageout scanner (via page_needfree()). 5854 * Exit after ~ MAX_CNT s regardless of how much memory has been released. 5855 * Note: There is no guarantee that any availrmem will be freed as 5856 * this memory typically is locked (kernel heap) or reserved for swap. 5857 * Also due to memory fragmentation kmem allocator may not be able 5858 * to free any memory (single user allocated buffer will prevent 5859 * freeing slab or a page). 5860 */ 5861 int 5862 page_reclaim_mem(pgcnt_t npages, pgcnt_t epages, int adjust) 5863 { 5864 int i = 0; 5865 int ret = 0; 5866 pgcnt_t deficit; 5867 pgcnt_t old_availrmem; 5868 5869 mutex_enter(&freemem_lock); 5870 old_availrmem = availrmem - 1; 5871 while ((availrmem < tune.t_minarmem + npages + epages) && 5872 (old_availrmem < availrmem) && (i++ < MAX_CNT)) { 5873 old_availrmem = availrmem; 5874 deficit = tune.t_minarmem + npages + epages - availrmem; 5875 mutex_exit(&freemem_lock); 5876 page_needfree(deficit); 5877 kmem_reap(); 5878 delay(hz); 5879 page_needfree(-(spgcnt_t)deficit); 5880 mutex_enter(&freemem_lock); 5881 } 5882 5883 if (adjust && (availrmem >= tune.t_minarmem + npages + epages)) { 5884 availrmem -= npages; 5885 ret = 1; 5886 } 5887 5888 mutex_exit(&freemem_lock); 5889 5890 return (ret); 5891 } 5892 5893 /* 5894 * Search the memory segments to locate the desired page. Within a 5895 * segment, pages increase linearly with one page structure per 5896 * physical page frame (size PAGESIZE). The search begins 5897 * with the segment that was accessed last, to take advantage of locality. 5898 * If the hint misses, we start from the beginning of the sorted memseg list 5899 */ 5900 5901 5902 /* 5903 * Some data structures for pfn to pp lookup. 5904 */ 5905 ulong_t mhash_per_slot; 5906 struct memseg *memseg_hash[N_MEM_SLOTS]; 5907 5908 page_t * 5909 page_numtopp_nolock(pfn_t pfnum) 5910 { 5911 struct memseg *seg; 5912 page_t *pp; 5913 vm_cpu_data_t *vc = CPU->cpu_vm_data; 5914 5915 ASSERT(vc != NULL); 5916 5917 MEMSEG_STAT_INCR(nsearch); 5918 5919 /* Try last winner first */ 5920 if (((seg = vc->vc_pnum_memseg) != NULL) && 5921 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5922 MEMSEG_STAT_INCR(nlastwon); 5923 pp = seg->pages + (pfnum - seg->pages_base); 5924 if (pp->p_pagenum == pfnum) 5925 return ((page_t *)pp); 5926 } 5927 5928 /* Else Try hash */ 5929 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5930 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5931 MEMSEG_STAT_INCR(nhashwon); 5932 vc->vc_pnum_memseg = seg; 5933 pp = seg->pages + (pfnum - seg->pages_base); 5934 if (pp->p_pagenum == pfnum) 5935 return ((page_t *)pp); 5936 } 5937 5938 /* Else Brute force */ 5939 for (seg = memsegs; seg != NULL; seg = seg->next) { 5940 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5941 vc->vc_pnum_memseg = seg; 5942 pp = seg->pages + (pfnum - seg->pages_base); 5943 return ((page_t *)pp); 5944 } 5945 } 5946 vc->vc_pnum_memseg = NULL; 5947 MEMSEG_STAT_INCR(nnotfound); 5948 return ((page_t *)NULL); 5949 5950 } 5951 5952 struct memseg * 5953 page_numtomemseg_nolock(pfn_t pfnum) 5954 { 5955 struct memseg *seg; 5956 page_t *pp; 5957 5958 /* Try hash */ 5959 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5960 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5961 pp = seg->pages + (pfnum - seg->pages_base); 5962 if (pp->p_pagenum == pfnum) 5963 return (seg); 5964 } 5965 5966 /* Else Brute force */ 5967 for (seg = memsegs; seg != NULL; seg = seg->next) { 5968 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5969 return (seg); 5970 } 5971 } 5972 return ((struct memseg *)NULL); 5973 } 5974 5975 /* 5976 * Given a page and a count return the page struct that is 5977 * n structs away from the current one in the global page 5978 * list. 5979 * 5980 * This function wraps to the first page upon 5981 * reaching the end of the memseg list. 5982 */ 5983 page_t * 5984 page_nextn(page_t *pp, ulong_t n) 5985 { 5986 struct memseg *seg; 5987 page_t *ppn; 5988 vm_cpu_data_t *vc = (vm_cpu_data_t *)CPU->cpu_vm_data; 5989 5990 ASSERT(vc != NULL); 5991 5992 if (((seg = vc->vc_pnext_memseg) == NULL) || 5993 (seg->pages_base == seg->pages_end) || 5994 !(pp >= seg->pages && pp < seg->epages)) { 5995 5996 for (seg = memsegs; seg; seg = seg->next) { 5997 if (pp >= seg->pages && pp < seg->epages) 5998 break; 5999 } 6000 6001 if (seg == NULL) { 6002 /* Memory delete got in, return something valid. */ 6003 /* TODO: fix me. */ 6004 seg = memsegs; 6005 pp = seg->pages; 6006 } 6007 } 6008 6009 /* check for wraparound - possible if n is large */ 6010 while ((ppn = (pp + n)) >= seg->epages || ppn < pp) { 6011 n -= seg->epages - pp; 6012 seg = seg->next; 6013 if (seg == NULL) 6014 seg = memsegs; 6015 pp = seg->pages; 6016 } 6017 vc->vc_pnext_memseg = seg; 6018 return (ppn); 6019 } 6020 6021 /* 6022 * Initialize for a loop using page_next_scan_large(). 6023 */ 6024 page_t * 6025 page_next_scan_init(void **cookie) 6026 { 6027 ASSERT(cookie != NULL); 6028 *cookie = (void *)memsegs; 6029 return ((page_t *)memsegs->pages); 6030 } 6031 6032 /* 6033 * Return the next page in a scan of page_t's, assuming we want 6034 * to skip over sub-pages within larger page sizes. 6035 * 6036 * The cookie is used to keep track of the current memseg. 6037 */ 6038 page_t * 6039 page_next_scan_large( 6040 page_t *pp, 6041 ulong_t *n, 6042 void **cookie) 6043 { 6044 struct memseg *seg = (struct memseg *)*cookie; 6045 page_t *new_pp; 6046 ulong_t cnt; 6047 pfn_t pfn; 6048 6049 6050 /* 6051 * get the count of page_t's to skip based on the page size 6052 */ 6053 ASSERT(pp != NULL); 6054 if (pp->p_szc == 0) { 6055 cnt = 1; 6056 } else { 6057 pfn = page_pptonum(pp); 6058 cnt = page_get_pagecnt(pp->p_szc); 6059 cnt -= pfn & (cnt - 1); 6060 } 6061 *n += cnt; 6062 new_pp = pp + cnt; 6063 6064 /* 6065 * Catch if we went past the end of the current memory segment. If so, 6066 * just move to the next segment with pages. 6067 */ 6068 if (new_pp >= seg->epages) { 6069 do { 6070 seg = seg->next; 6071 if (seg == NULL) 6072 seg = memsegs; 6073 } while (seg->pages == seg->epages); 6074 new_pp = seg->pages; 6075 *cookie = (void *)seg; 6076 } 6077 6078 return (new_pp); 6079 } 6080 6081 6082 /* 6083 * Returns next page in list. Note: this function wraps 6084 * to the first page in the list upon reaching the end 6085 * of the list. Callers should be aware of this fact. 6086 */ 6087 6088 /* We should change this be a #define */ 6089 6090 page_t * 6091 page_next(page_t *pp) 6092 { 6093 return (page_nextn(pp, 1)); 6094 } 6095 6096 page_t * 6097 page_first() 6098 { 6099 return ((page_t *)memsegs->pages); 6100 } 6101 6102 6103 /* 6104 * This routine is called at boot with the initial memory configuration 6105 * and when memory is added or removed. 6106 */ 6107 void 6108 build_pfn_hash() 6109 { 6110 pfn_t cur; 6111 pgcnt_t index; 6112 struct memseg *pseg; 6113 int i; 6114 6115 /* 6116 * Clear memseg_hash array. 6117 * Since memory add/delete is designed to operate concurrently 6118 * with normal operation, the hash rebuild must be able to run 6119 * concurrently with page_numtopp_nolock(). To support this 6120 * functionality, assignments to memseg_hash array members must 6121 * be done atomically. 6122 * 6123 * NOTE: bzero() does not currently guarantee this for kernel 6124 * threads, and cannot be used here. 6125 */ 6126 for (i = 0; i < N_MEM_SLOTS; i++) 6127 memseg_hash[i] = NULL; 6128 6129 hat_kpm_mseghash_clear(N_MEM_SLOTS); 6130 6131 /* 6132 * Physmax is the last valid pfn. 6133 */ 6134 mhash_per_slot = (physmax + 1) >> MEM_HASH_SHIFT; 6135 for (pseg = memsegs; pseg != NULL; pseg = pseg->next) { 6136 index = MEMSEG_PFN_HASH(pseg->pages_base); 6137 cur = pseg->pages_base; 6138 do { 6139 if (index >= N_MEM_SLOTS) 6140 index = MEMSEG_PFN_HASH(cur); 6141 6142 if (memseg_hash[index] == NULL || 6143 memseg_hash[index]->pages_base > pseg->pages_base) { 6144 memseg_hash[index] = pseg; 6145 hat_kpm_mseghash_update(index, pseg); 6146 } 6147 cur += mhash_per_slot; 6148 index++; 6149 } while (cur < pseg->pages_end); 6150 } 6151 } 6152 6153 /* 6154 * Return the pagenum for the pp 6155 */ 6156 pfn_t 6157 page_pptonum(page_t *pp) 6158 { 6159 return (pp->p_pagenum); 6160 } 6161 6162 /* 6163 * interface to the referenced and modified etc bits 6164 * in the PSM part of the page struct 6165 * when no locking is desired. 6166 */ 6167 void 6168 page_set_props(page_t *pp, uint_t flags) 6169 { 6170 ASSERT((flags & ~(P_MOD | P_REF | P_RO)) == 0); 6171 pp->p_nrm |= (uchar_t)flags; 6172 } 6173 6174 void 6175 page_clr_all_props(page_t *pp) 6176 { 6177 pp->p_nrm = 0; 6178 } 6179 6180 /* 6181 * Clear p_lckcnt and p_cowcnt, adjusting freemem if required. 6182 */ 6183 int 6184 page_clear_lck_cow(page_t *pp, int adjust) 6185 { 6186 int f_amount; 6187 6188 ASSERT(PAGE_EXCL(pp)); 6189 6190 /* 6191 * The page_struct_lock need not be acquired here since 6192 * we require the caller hold the page exclusively locked. 6193 */ 6194 f_amount = 0; 6195 if (pp->p_lckcnt) { 6196 f_amount = 1; 6197 pp->p_lckcnt = 0; 6198 } 6199 if (pp->p_cowcnt) { 6200 f_amount += pp->p_cowcnt; 6201 pp->p_cowcnt = 0; 6202 } 6203 6204 if (adjust && f_amount) { 6205 mutex_enter(&freemem_lock); 6206 availrmem += f_amount; 6207 mutex_exit(&freemem_lock); 6208 } 6209 6210 return (f_amount); 6211 } 6212 6213 /* 6214 * The following functions is called from free_vp_pages() 6215 * for an inexact estimate of a newly free'd page... 6216 */ 6217 ulong_t 6218 page_share_cnt(page_t *pp) 6219 { 6220 return (hat_page_getshare(pp)); 6221 } 6222 6223 int 6224 page_isshared(page_t *pp) 6225 { 6226 return (hat_page_checkshare(pp, 1)); 6227 } 6228 6229 int 6230 page_isfree(page_t *pp) 6231 { 6232 return (PP_ISFREE(pp)); 6233 } 6234 6235 int 6236 page_isref(page_t *pp) 6237 { 6238 return (hat_page_getattr(pp, P_REF)); 6239 } 6240 6241 int 6242 page_ismod(page_t *pp) 6243 { 6244 return (hat_page_getattr(pp, P_MOD)); 6245 } 6246 6247 /* 6248 * The following code all currently relates to the page capture logic: 6249 * 6250 * This logic is used for cases where there is a desire to claim a certain 6251 * physical page in the system for the caller. As it may not be possible 6252 * to capture the page immediately, the p_toxic bits are used in the page 6253 * structure to indicate that someone wants to capture this page. When the 6254 * page gets unlocked, the toxic flag will be noted and an attempt to capture 6255 * the page will be made. If it is successful, the original callers callback 6256 * will be called with the page to do with it what they please. 6257 * 6258 * There is also an async thread which wakes up to attempt to capture 6259 * pages occasionally which have the capture bit set. All of the pages which 6260 * need to be captured asynchronously have been inserted into the 6261 * page_capture_hash and thus this thread walks that hash list. Items in the 6262 * hash have an expiration time so this thread handles that as well by removing 6263 * the item from the hash if it has expired. 6264 * 6265 * Some important things to note are: 6266 * - if the PR_CAPTURE bit is set on a page, then the page is in the 6267 * page_capture_hash. The page_capture_hash_head.pchh_mutex is needed 6268 * to set and clear this bit, and while the lock is held is the only time 6269 * you can add or remove an entry from the hash. 6270 * - the PR_CAPTURE bit can only be set and cleared while holding the 6271 * page_capture_hash_head.pchh_mutex 6272 * - the t_flag field of the thread struct is used with the T_CAPTURING 6273 * flag to prevent recursion while dealing with large pages. 6274 * - pages which need to be retired never expire on the page_capture_hash. 6275 */ 6276 6277 static void page_capture_thread(void); 6278 static kthread_t *pc_thread_id; 6279 kcondvar_t pc_cv; 6280 static kmutex_t pc_thread_mutex; 6281 static clock_t pc_thread_shortwait; 6282 static clock_t pc_thread_longwait; 6283 static int pc_thread_retry; 6284 6285 struct page_capture_callback pc_cb[PC_NUM_CALLBACKS]; 6286 6287 /* Note that this is a circular linked list */ 6288 typedef struct page_capture_hash_bucket { 6289 page_t *pp; 6290 uint_t szc; 6291 uint_t flags; 6292 clock_t expires; /* lbolt at which this request expires. */ 6293 void *datap; /* Cached data passed in for callback */ 6294 struct page_capture_hash_bucket *next; 6295 struct page_capture_hash_bucket *prev; 6296 } page_capture_hash_bucket_t; 6297 6298 /* 6299 * Each hash bucket will have it's own mutex and two lists which are: 6300 * active (0): represents requests which have not been processed by 6301 * the page_capture async thread yet. 6302 * walked (1): represents requests which have been processed by the 6303 * page_capture async thread within it's given walk of this bucket. 6304 * 6305 * These are all needed so that we can synchronize all async page_capture 6306 * events. When the async thread moves to a new bucket, it will append the 6307 * walked list to the active list and walk each item one at a time, moving it 6308 * from the active list to the walked list. Thus if there is an async request 6309 * outstanding for a given page, it will always be in one of the two lists. 6310 * New requests will always be added to the active list. 6311 * If we were not able to capture a page before the request expired, we'd free 6312 * up the request structure which would indicate to page_capture that there is 6313 * no longer a need for the given page, and clear the PR_CAPTURE flag if 6314 * possible. 6315 */ 6316 typedef struct page_capture_hash_head { 6317 kmutex_t pchh_mutex; 6318 uint_t num_pages; 6319 page_capture_hash_bucket_t lists[2]; /* sentinel nodes */ 6320 } page_capture_hash_head_t; 6321 6322 #ifdef DEBUG 6323 #define NUM_PAGE_CAPTURE_BUCKETS 4 6324 #else 6325 #define NUM_PAGE_CAPTURE_BUCKETS 64 6326 #endif 6327 6328 page_capture_hash_head_t page_capture_hash[NUM_PAGE_CAPTURE_BUCKETS]; 6329 6330 /* for now use a very simple hash based upon the size of a page struct */ 6331 #define PAGE_CAPTURE_HASH(pp) \ 6332 ((int)(((uintptr_t)pp >> 7) & (NUM_PAGE_CAPTURE_BUCKETS - 1))) 6333 6334 extern pgcnt_t swapfs_minfree; 6335 6336 int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap); 6337 6338 /* 6339 * a callback function is required for page capture requests. 6340 */ 6341 void 6342 page_capture_register_callback(uint_t index, clock_t duration, 6343 int (*cb_func)(page_t *, void *, uint_t)) 6344 { 6345 ASSERT(pc_cb[index].cb_active == 0); 6346 ASSERT(cb_func != NULL); 6347 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6348 pc_cb[index].duration = duration; 6349 pc_cb[index].cb_func = cb_func; 6350 pc_cb[index].cb_active = 1; 6351 rw_exit(&pc_cb[index].cb_rwlock); 6352 } 6353 6354 void 6355 page_capture_unregister_callback(uint_t index) 6356 { 6357 int i, j; 6358 struct page_capture_hash_bucket *bp1; 6359 struct page_capture_hash_bucket *bp2; 6360 struct page_capture_hash_bucket *head = NULL; 6361 uint_t flags = (1 << index); 6362 6363 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6364 ASSERT(pc_cb[index].cb_active == 1); 6365 pc_cb[index].duration = 0; /* Paranoia */ 6366 pc_cb[index].cb_func = NULL; /* Paranoia */ 6367 pc_cb[index].cb_active = 0; 6368 rw_exit(&pc_cb[index].cb_rwlock); 6369 6370 /* 6371 * Just move all the entries to a private list which we can walk 6372 * through without the need to hold any locks. 6373 * No more requests can get added to the hash lists for this consumer 6374 * as the cb_active field for the callback has been cleared. 6375 */ 6376 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 6377 mutex_enter(&page_capture_hash[i].pchh_mutex); 6378 for (j = 0; j < 2; j++) { 6379 bp1 = page_capture_hash[i].lists[j].next; 6380 /* walk through all but first (sentinel) element */ 6381 while (bp1 != &page_capture_hash[i].lists[j]) { 6382 bp2 = bp1; 6383 if (bp2->flags & flags) { 6384 bp1 = bp2->next; 6385 bp1->prev = bp2->prev; 6386 bp2->prev->next = bp1; 6387 bp2->next = head; 6388 head = bp2; 6389 /* 6390 * Clear the PR_CAPTURE bit as we 6391 * hold appropriate locks here. 6392 */ 6393 page_clrtoxic(head->pp, PR_CAPTURE); 6394 page_capture_hash[i].num_pages--; 6395 continue; 6396 } 6397 bp1 = bp1->next; 6398 } 6399 } 6400 mutex_exit(&page_capture_hash[i].pchh_mutex); 6401 } 6402 6403 while (head != NULL) { 6404 bp1 = head; 6405 head = head->next; 6406 kmem_free(bp1, sizeof (*bp1)); 6407 } 6408 } 6409 6410 6411 /* 6412 * Find pp in the active list and move it to the walked list if it 6413 * exists. 6414 * Note that most often pp should be at the front of the active list 6415 * as it is currently used and thus there is no other sort of optimization 6416 * being done here as this is a linked list data structure. 6417 * Returns 1 on successful move or 0 if page could not be found. 6418 */ 6419 static int 6420 page_capture_move_to_walked(page_t *pp) 6421 { 6422 page_capture_hash_bucket_t *bp; 6423 int index; 6424 6425 index = PAGE_CAPTURE_HASH(pp); 6426 6427 mutex_enter(&page_capture_hash[index].pchh_mutex); 6428 bp = page_capture_hash[index].lists[0].next; 6429 while (bp != &page_capture_hash[index].lists[0]) { 6430 if (bp->pp == pp) { 6431 /* Remove from old list */ 6432 bp->next->prev = bp->prev; 6433 bp->prev->next = bp->next; 6434 6435 /* Add to new list */ 6436 bp->next = page_capture_hash[index].lists[1].next; 6437 bp->prev = &page_capture_hash[index].lists[1]; 6438 page_capture_hash[index].lists[1].next = bp; 6439 bp->next->prev = bp; 6440 mutex_exit(&page_capture_hash[index].pchh_mutex); 6441 6442 return (1); 6443 } 6444 bp = bp->next; 6445 } 6446 mutex_exit(&page_capture_hash[index].pchh_mutex); 6447 return (0); 6448 } 6449 6450 /* 6451 * Add a new entry to the page capture hash. The only case where a new 6452 * entry is not added is when the page capture consumer is no longer registered. 6453 * In this case, we'll silently not add the page to the hash. We know that 6454 * page retire will always be registered for the case where we are currently 6455 * unretiring a page and thus there are no conflicts. 6456 */ 6457 static void 6458 page_capture_add_hash(page_t *pp, uint_t szc, uint_t flags, void *datap) 6459 { 6460 page_capture_hash_bucket_t *bp1; 6461 page_capture_hash_bucket_t *bp2; 6462 int index; 6463 int cb_index; 6464 int i; 6465 #ifdef DEBUG 6466 page_capture_hash_bucket_t *tp1; 6467 int l; 6468 #endif 6469 6470 ASSERT(!(flags & CAPTURE_ASYNC)); 6471 6472 bp1 = kmem_alloc(sizeof (struct page_capture_hash_bucket), KM_SLEEP); 6473 6474 bp1->pp = pp; 6475 bp1->szc = szc; 6476 bp1->flags = flags; 6477 bp1->datap = datap; 6478 6479 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6480 if ((flags >> cb_index) & 1) { 6481 break; 6482 } 6483 } 6484 6485 ASSERT(cb_index != PC_NUM_CALLBACKS); 6486 6487 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6488 if (pc_cb[cb_index].cb_active) { 6489 if (pc_cb[cb_index].duration == -1) { 6490 bp1->expires = (clock_t)-1; 6491 } else { 6492 bp1->expires = lbolt + pc_cb[cb_index].duration; 6493 } 6494 } else { 6495 /* There's no callback registered so don't add to the hash */ 6496 rw_exit(&pc_cb[cb_index].cb_rwlock); 6497 kmem_free(bp1, sizeof (*bp1)); 6498 return; 6499 } 6500 6501 index = PAGE_CAPTURE_HASH(pp); 6502 6503 /* 6504 * Only allow capture flag to be modified under this mutex. 6505 * Prevents multiple entries for same page getting added. 6506 */ 6507 mutex_enter(&page_capture_hash[index].pchh_mutex); 6508 6509 /* 6510 * if not already on the hash, set capture bit and add to the hash 6511 */ 6512 if (!(pp->p_toxic & PR_CAPTURE)) { 6513 #ifdef DEBUG 6514 /* Check for duplicate entries */ 6515 for (l = 0; l < 2; l++) { 6516 tp1 = page_capture_hash[index].lists[l].next; 6517 while (tp1 != &page_capture_hash[index].lists[l]) { 6518 if (tp1->pp == pp) { 6519 panic("page pp 0x%p already on hash " 6520 "at 0x%p\n", pp, tp1); 6521 } 6522 tp1 = tp1->next; 6523 } 6524 } 6525 6526 #endif 6527 page_settoxic(pp, PR_CAPTURE); 6528 bp1->next = page_capture_hash[index].lists[0].next; 6529 bp1->prev = &page_capture_hash[index].lists[0]; 6530 bp1->next->prev = bp1; 6531 page_capture_hash[index].lists[0].next = bp1; 6532 page_capture_hash[index].num_pages++; 6533 if (flags & CAPTURE_RETIRE) { 6534 page_retire_incr_pend_count(); 6535 } 6536 mutex_exit(&page_capture_hash[index].pchh_mutex); 6537 rw_exit(&pc_cb[cb_index].cb_rwlock); 6538 cv_signal(&pc_cv); 6539 return; 6540 } 6541 6542 /* 6543 * A page retire request will replace any other request. 6544 * A second physmem request which is for a different process than 6545 * the currently registered one will be dropped as there is 6546 * no way to hold the private data for both calls. 6547 * In the future, once there are more callers, this will have to 6548 * be worked out better as there needs to be private storage for 6549 * at least each type of caller (maybe have datap be an array of 6550 * *void's so that we can index based upon callers index). 6551 */ 6552 6553 /* walk hash list to update expire time */ 6554 for (i = 0; i < 2; i++) { 6555 bp2 = page_capture_hash[index].lists[i].next; 6556 while (bp2 != &page_capture_hash[index].lists[i]) { 6557 if (bp2->pp == pp) { 6558 if (flags & CAPTURE_RETIRE) { 6559 if (!(bp2->flags & CAPTURE_RETIRE)) { 6560 page_retire_incr_pend_count(); 6561 bp2->flags = flags; 6562 bp2->expires = bp1->expires; 6563 bp2->datap = datap; 6564 } 6565 } else { 6566 ASSERT(flags & CAPTURE_PHYSMEM); 6567 if (!(bp2->flags & CAPTURE_RETIRE) && 6568 (datap == bp2->datap)) { 6569 bp2->expires = bp1->expires; 6570 } 6571 } 6572 mutex_exit(&page_capture_hash[index]. 6573 pchh_mutex); 6574 rw_exit(&pc_cb[cb_index].cb_rwlock); 6575 kmem_free(bp1, sizeof (*bp1)); 6576 return; 6577 } 6578 bp2 = bp2->next; 6579 } 6580 } 6581 6582 /* 6583 * the PR_CAPTURE flag is protected by the page_capture_hash mutexes 6584 * and thus it either has to be set or not set and can't change 6585 * while holding the mutex above. 6586 */ 6587 panic("page_capture_add_hash, PR_CAPTURE flag set on pp %p\n", pp); 6588 } 6589 6590 /* 6591 * We have a page in our hands, lets try and make it ours by turning 6592 * it into a clean page like it had just come off the freelists. 6593 * 6594 * Returns 0 on success, with the page still EXCL locked. 6595 * On failure, the page will be unlocked, and returns EAGAIN 6596 */ 6597 static int 6598 page_capture_clean_page(page_t *pp) 6599 { 6600 page_t *newpp; 6601 int skip_unlock = 0; 6602 spgcnt_t count; 6603 page_t *tpp; 6604 int ret = 0; 6605 int extra; 6606 6607 ASSERT(PAGE_EXCL(pp)); 6608 ASSERT(!PP_RETIRED(pp)); 6609 ASSERT(curthread->t_flag & T_CAPTURING); 6610 6611 if (PP_ISFREE(pp)) { 6612 if (!page_reclaim(pp, NULL)) { 6613 skip_unlock = 1; 6614 ret = EAGAIN; 6615 goto cleanup; 6616 } 6617 ASSERT(pp->p_szc == 0); 6618 if (pp->p_vnode != NULL) { 6619 /* 6620 * Since this page came from the 6621 * cachelist, we must destroy the 6622 * old vnode association. 6623 */ 6624 page_hashout(pp, NULL); 6625 } 6626 goto cleanup; 6627 } 6628 6629 /* 6630 * If we know page_relocate will fail, skip it 6631 * It could still fail due to a UE on another page but we 6632 * can't do anything about that. 6633 */ 6634 if (pp->p_toxic & PR_UE) { 6635 goto skip_relocate; 6636 } 6637 6638 /* 6639 * It's possible that pages can not have a vnode as fsflush comes 6640 * through and cleans up these pages. It's ugly but that's how it is. 6641 */ 6642 if (pp->p_vnode == NULL) { 6643 goto skip_relocate; 6644 } 6645 6646 /* 6647 * Page was not free, so lets try to relocate it. 6648 * page_relocate only works with root pages, so if this is not a root 6649 * page, we need to demote it to try and relocate it. 6650 * Unfortunately this is the best we can do right now. 6651 */ 6652 newpp = NULL; 6653 if ((pp->p_szc > 0) && (pp != PP_PAGEROOT(pp))) { 6654 if (page_try_demote_pages(pp) == 0) { 6655 ret = EAGAIN; 6656 goto cleanup; 6657 } 6658 } 6659 ret = page_relocate(&pp, &newpp, 1, 0, &count, NULL); 6660 if (ret == 0) { 6661 page_t *npp; 6662 /* unlock the new page(s) */ 6663 while (count-- > 0) { 6664 ASSERT(newpp != NULL); 6665 npp = newpp; 6666 page_sub(&newpp, npp); 6667 page_unlock(npp); 6668 } 6669 ASSERT(newpp == NULL); 6670 /* 6671 * Check to see if the page we have is too large. 6672 * If so, demote it freeing up the extra pages. 6673 */ 6674 if (pp->p_szc > 0) { 6675 /* For now demote extra pages to szc == 0 */ 6676 extra = page_get_pagecnt(pp->p_szc) - 1; 6677 while (extra > 0) { 6678 tpp = pp->p_next; 6679 page_sub(&pp, tpp); 6680 tpp->p_szc = 0; 6681 page_free(tpp, 1); 6682 extra--; 6683 } 6684 /* Make sure to set our page to szc 0 as well */ 6685 ASSERT(pp->p_next == pp && pp->p_prev == pp); 6686 pp->p_szc = 0; 6687 } 6688 goto cleanup; 6689 } else if (ret == EIO) { 6690 ret = EAGAIN; 6691 goto cleanup; 6692 } else { 6693 /* 6694 * Need to reset return type as we failed to relocate the page 6695 * but that does not mean that some of the next steps will not 6696 * work. 6697 */ 6698 ret = 0; 6699 } 6700 6701 skip_relocate: 6702 6703 if (pp->p_szc > 0) { 6704 if (page_try_demote_pages(pp) == 0) { 6705 ret = EAGAIN; 6706 goto cleanup; 6707 } 6708 } 6709 6710 ASSERT(pp->p_szc == 0); 6711 6712 if (hat_ismod(pp)) { 6713 ret = EAGAIN; 6714 goto cleanup; 6715 } 6716 if (PP_ISKAS(pp)) { 6717 ret = EAGAIN; 6718 goto cleanup; 6719 } 6720 if (pp->p_lckcnt || pp->p_cowcnt) { 6721 ret = EAGAIN; 6722 goto cleanup; 6723 } 6724 6725 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 6726 ASSERT(!hat_page_is_mapped(pp)); 6727 6728 if (hat_ismod(pp)) { 6729 /* 6730 * This is a semi-odd case as the page is now modified but not 6731 * mapped as we just unloaded the mappings above. 6732 */ 6733 ret = EAGAIN; 6734 goto cleanup; 6735 } 6736 if (pp->p_vnode != NULL) { 6737 page_hashout(pp, NULL); 6738 } 6739 6740 /* 6741 * At this point, the page should be in a clean state and 6742 * we can do whatever we want with it. 6743 */ 6744 6745 cleanup: 6746 if (ret != 0) { 6747 if (!skip_unlock) { 6748 page_unlock(pp); 6749 } 6750 } else { 6751 ASSERT(pp->p_szc == 0); 6752 ASSERT(PAGE_EXCL(pp)); 6753 6754 pp->p_next = pp; 6755 pp->p_prev = pp; 6756 } 6757 return (ret); 6758 } 6759 6760 /* 6761 * Various callers of page_trycapture() can have different restrictions upon 6762 * what memory they have access to. 6763 * Returns 0 on success, with the following error codes on failure: 6764 * EPERM - The requested page is long term locked, and thus repeated 6765 * requests to capture this page will likely fail. 6766 * ENOMEM - There was not enough free memory in the system to safely 6767 * map the requested page. 6768 * ENOENT - The requested page was inside the kernel cage, and the 6769 * PHYSMEM_CAGE flag was not set. 6770 */ 6771 int 6772 page_capture_pre_checks(page_t *pp, uint_t flags) 6773 { 6774 #if defined(__sparc) 6775 extern struct vnode prom_ppages; 6776 #endif /* __sparc */ 6777 6778 ASSERT(pp != NULL); 6779 6780 #if defined(__sparc) 6781 if (pp->p_vnode == &prom_ppages) { 6782 return (EPERM); 6783 } 6784 6785 if (PP_ISNORELOC(pp) && !(flags & CAPTURE_GET_CAGE) && 6786 (flags & CAPTURE_PHYSMEM)) { 6787 return (ENOENT); 6788 } 6789 6790 if (PP_ISNORELOCKERNEL(pp)) { 6791 return (EPERM); 6792 } 6793 #else 6794 if (PP_ISKAS(pp)) { 6795 return (EPERM); 6796 } 6797 #endif /* __sparc */ 6798 6799 /* only physmem currently has the restrictions checked below */ 6800 if (!(flags & CAPTURE_PHYSMEM)) { 6801 return (0); 6802 } 6803 6804 if (availrmem < swapfs_minfree) { 6805 /* 6806 * We won't try to capture this page as we are 6807 * running low on memory. 6808 */ 6809 return (ENOMEM); 6810 } 6811 return (0); 6812 } 6813 6814 /* 6815 * Once we have a page in our mits, go ahead and complete the capture 6816 * operation. 6817 * Returns 1 on failure where page is no longer needed 6818 * Returns 0 on success 6819 * Returns -1 if there was a transient failure. 6820 * Failure cases must release the SE_EXCL lock on pp (usually via page_free). 6821 */ 6822 int 6823 page_capture_take_action(page_t *pp, uint_t flags, void *datap) 6824 { 6825 int cb_index; 6826 int ret = 0; 6827 page_capture_hash_bucket_t *bp1; 6828 page_capture_hash_bucket_t *bp2; 6829 int index; 6830 int found = 0; 6831 int i; 6832 6833 ASSERT(PAGE_EXCL(pp)); 6834 ASSERT(curthread->t_flag & T_CAPTURING); 6835 6836 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6837 if ((flags >> cb_index) & 1) { 6838 break; 6839 } 6840 } 6841 ASSERT(cb_index < PC_NUM_CALLBACKS); 6842 6843 /* 6844 * Remove the entry from the page_capture hash, but don't free it yet 6845 * as we may need to put it back. 6846 * Since we own the page at this point in time, we should find it 6847 * in the hash if this is an ASYNC call. If we don't it's likely 6848 * that the page_capture_async() thread decided that this request 6849 * had expired, in which case we just continue on. 6850 */ 6851 if (flags & CAPTURE_ASYNC) { 6852 6853 index = PAGE_CAPTURE_HASH(pp); 6854 6855 mutex_enter(&page_capture_hash[index].pchh_mutex); 6856 for (i = 0; i < 2 && !found; i++) { 6857 bp1 = page_capture_hash[index].lists[i].next; 6858 while (bp1 != &page_capture_hash[index].lists[i]) { 6859 if (bp1->pp == pp) { 6860 bp1->next->prev = bp1->prev; 6861 bp1->prev->next = bp1->next; 6862 page_capture_hash[index].num_pages--; 6863 page_clrtoxic(pp, PR_CAPTURE); 6864 found = 1; 6865 break; 6866 } 6867 bp1 = bp1->next; 6868 } 6869 } 6870 mutex_exit(&page_capture_hash[index].pchh_mutex); 6871 } 6872 6873 /* Synchronize with the unregister func. */ 6874 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6875 if (!pc_cb[cb_index].cb_active) { 6876 page_free(pp, 1); 6877 rw_exit(&pc_cb[cb_index].cb_rwlock); 6878 if (found) { 6879 kmem_free(bp1, sizeof (*bp1)); 6880 } 6881 return (1); 6882 } 6883 6884 /* 6885 * We need to remove the entry from the page capture hash and turn off 6886 * the PR_CAPTURE bit before calling the callback. We'll need to cache 6887 * the entry here, and then based upon the return value, cleanup 6888 * appropriately or re-add it to the hash, making sure that someone else 6889 * hasn't already done so. 6890 * It should be rare for the callback to fail and thus it's ok for 6891 * the failure path to be a bit complicated as the success path is 6892 * cleaner and the locking rules are easier to follow. 6893 */ 6894 6895 ret = pc_cb[cb_index].cb_func(pp, datap, flags); 6896 6897 rw_exit(&pc_cb[cb_index].cb_rwlock); 6898 6899 /* 6900 * If this was an ASYNC request, we need to cleanup the hash if the 6901 * callback was successful or if the request was no longer valid. 6902 * For non-ASYNC requests, we return failure to map and the caller 6903 * will take care of adding the request to the hash. 6904 * Note also that the callback itself is responsible for the page 6905 * at this point in time in terms of locking ... The most common 6906 * case for the failure path should just be a page_free. 6907 */ 6908 if (ret >= 0) { 6909 if (found) { 6910 if (bp1->flags & CAPTURE_RETIRE) { 6911 page_retire_decr_pend_count(); 6912 } 6913 kmem_free(bp1, sizeof (*bp1)); 6914 } 6915 return (ret); 6916 } 6917 if (!found) { 6918 return (ret); 6919 } 6920 6921 ASSERT(flags & CAPTURE_ASYNC); 6922 6923 /* 6924 * Check for expiration time first as we can just free it up if it's 6925 * expired. 6926 */ 6927 if (lbolt > bp1->expires && bp1->expires != -1) { 6928 kmem_free(bp1, sizeof (*bp1)); 6929 return (ret); 6930 } 6931 6932 /* 6933 * The callback failed and there used to be an entry in the hash for 6934 * this page, so we need to add it back to the hash. 6935 */ 6936 mutex_enter(&page_capture_hash[index].pchh_mutex); 6937 if (!(pp->p_toxic & PR_CAPTURE)) { 6938 /* just add bp1 back to head of walked list */ 6939 page_settoxic(pp, PR_CAPTURE); 6940 bp1->next = page_capture_hash[index].lists[1].next; 6941 bp1->prev = &page_capture_hash[index].lists[1]; 6942 bp1->next->prev = bp1; 6943 page_capture_hash[index].lists[1].next = bp1; 6944 page_capture_hash[index].num_pages++; 6945 mutex_exit(&page_capture_hash[index].pchh_mutex); 6946 return (ret); 6947 } 6948 6949 /* 6950 * Otherwise there was a new capture request added to list 6951 * Need to make sure that our original data is represented if 6952 * appropriate. 6953 */ 6954 for (i = 0; i < 2; i++) { 6955 bp2 = page_capture_hash[index].lists[i].next; 6956 while (bp2 != &page_capture_hash[index].lists[i]) { 6957 if (bp2->pp == pp) { 6958 if (bp1->flags & CAPTURE_RETIRE) { 6959 if (!(bp2->flags & CAPTURE_RETIRE)) { 6960 bp2->szc = bp1->szc; 6961 bp2->flags = bp1->flags; 6962 bp2->expires = bp1->expires; 6963 bp2->datap = bp1->datap; 6964 } 6965 } else { 6966 ASSERT(bp1->flags & CAPTURE_PHYSMEM); 6967 if (!(bp2->flags & CAPTURE_RETIRE)) { 6968 bp2->szc = bp1->szc; 6969 bp2->flags = bp1->flags; 6970 bp2->expires = bp1->expires; 6971 bp2->datap = bp1->datap; 6972 } 6973 } 6974 mutex_exit(&page_capture_hash[index]. 6975 pchh_mutex); 6976 kmem_free(bp1, sizeof (*bp1)); 6977 return (ret); 6978 } 6979 bp2 = bp2->next; 6980 } 6981 } 6982 panic("PR_CAPTURE set but not on hash for pp 0x%p\n", pp); 6983 /*NOTREACHED*/ 6984 } 6985 6986 /* 6987 * Try to capture the given page for the caller specified in the flags 6988 * parameter. The page will either be captured and handed over to the 6989 * appropriate callback, or will be queued up in the page capture hash 6990 * to be captured asynchronously. 6991 * If the current request is due to an async capture, the page must be 6992 * exclusively locked before calling this function. 6993 * Currently szc must be 0 but in the future this should be expandable to 6994 * other page sizes. 6995 * Returns 0 on success, with the following error codes on failure: 6996 * EPERM - The requested page is long term locked, and thus repeated 6997 * requests to capture this page will likely fail. 6998 * ENOMEM - There was not enough free memory in the system to safely 6999 * map the requested page. 7000 * ENOENT - The requested page was inside the kernel cage, and the 7001 * CAPTURE_GET_CAGE flag was not set. 7002 * EAGAIN - The requested page could not be capturead at this point in 7003 * time but future requests will likely work. 7004 * EBUSY - The requested page is retired and the CAPTURE_GET_RETIRED flag 7005 * was not set. 7006 */ 7007 int 7008 page_itrycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 7009 { 7010 int ret; 7011 int cb_index; 7012 7013 if (flags & CAPTURE_ASYNC) { 7014 ASSERT(PAGE_EXCL(pp)); 7015 goto async; 7016 } 7017 7018 /* Make sure there's enough availrmem ... */ 7019 ret = page_capture_pre_checks(pp, flags); 7020 if (ret != 0) { 7021 return (ret); 7022 } 7023 7024 if (!page_trylock(pp, SE_EXCL)) { 7025 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 7026 if ((flags >> cb_index) & 1) { 7027 break; 7028 } 7029 } 7030 ASSERT(cb_index < PC_NUM_CALLBACKS); 7031 ret = EAGAIN; 7032 /* Special case for retired pages */ 7033 if (PP_RETIRED(pp)) { 7034 if (flags & CAPTURE_GET_RETIRED) { 7035 if (!page_unretire_pp(pp, PR_UNR_TEMP)) { 7036 /* 7037 * Need to set capture bit and add to 7038 * hash so that the page will be 7039 * retired when freed. 7040 */ 7041 page_capture_add_hash(pp, szc, 7042 CAPTURE_RETIRE, NULL); 7043 ret = 0; 7044 goto own_page; 7045 } 7046 } else { 7047 return (EBUSY); 7048 } 7049 } 7050 page_capture_add_hash(pp, szc, flags, datap); 7051 return (ret); 7052 } 7053 7054 async: 7055 ASSERT(PAGE_EXCL(pp)); 7056 7057 /* Need to check for physmem async requests that availrmem is sane */ 7058 if ((flags & (CAPTURE_ASYNC | CAPTURE_PHYSMEM)) == 7059 (CAPTURE_ASYNC | CAPTURE_PHYSMEM) && 7060 (availrmem < swapfs_minfree)) { 7061 page_unlock(pp); 7062 return (ENOMEM); 7063 } 7064 7065 ret = page_capture_clean_page(pp); 7066 7067 if (ret != 0) { 7068 /* We failed to get the page, so lets add it to the hash */ 7069 if (!(flags & CAPTURE_ASYNC)) { 7070 page_capture_add_hash(pp, szc, flags, datap); 7071 } 7072 return (ret); 7073 } 7074 7075 own_page: 7076 ASSERT(PAGE_EXCL(pp)); 7077 ASSERT(pp->p_szc == 0); 7078 7079 /* Call the callback */ 7080 ret = page_capture_take_action(pp, flags, datap); 7081 7082 if (ret == 0) { 7083 return (0); 7084 } 7085 7086 /* 7087 * Note that in the failure cases from page_capture_take_action, the 7088 * EXCL lock will have already been dropped. 7089 */ 7090 if ((ret == -1) && (!(flags & CAPTURE_ASYNC))) { 7091 page_capture_add_hash(pp, szc, flags, datap); 7092 } 7093 return (EAGAIN); 7094 } 7095 7096 int 7097 page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 7098 { 7099 int ret; 7100 7101 curthread->t_flag |= T_CAPTURING; 7102 ret = page_itrycapture(pp, szc, flags, datap); 7103 curthread->t_flag &= ~T_CAPTURING; /* xor works as we know its set */ 7104 return (ret); 7105 } 7106 7107 /* 7108 * When unlocking a page which has the PR_CAPTURE bit set, this routine 7109 * gets called to try and capture the page. 7110 */ 7111 void 7112 page_unlock_capture(page_t *pp) 7113 { 7114 page_capture_hash_bucket_t *bp; 7115 int index; 7116 int i; 7117 uint_t szc; 7118 uint_t flags = 0; 7119 void *datap; 7120 kmutex_t *mp; 7121 extern vnode_t retired_pages; 7122 7123 /* 7124 * We need to protect against a possible deadlock here where we own 7125 * the vnode page hash mutex and want to acquire it again as there 7126 * are locations in the code, where we unlock a page while holding 7127 * the mutex which can lead to the page being captured and eventually 7128 * end up here. As we may be hashing out the old page and hashing into 7129 * the retire vnode, we need to make sure we don't own them. 7130 * Other callbacks who do hash operations also need to make sure that 7131 * before they hashin to a vnode that they do not currently own the 7132 * vphm mutex otherwise there will be a panic. 7133 */ 7134 if (mutex_owned(page_vnode_mutex(&retired_pages))) { 7135 page_unlock_nocapture(pp); 7136 return; 7137 } 7138 if (pp->p_vnode != NULL && mutex_owned(page_vnode_mutex(pp->p_vnode))) { 7139 page_unlock_nocapture(pp); 7140 return; 7141 } 7142 7143 index = PAGE_CAPTURE_HASH(pp); 7144 7145 mp = &page_capture_hash[index].pchh_mutex; 7146 mutex_enter(mp); 7147 for (i = 0; i < 2; i++) { 7148 bp = page_capture_hash[index].lists[i].next; 7149 while (bp != &page_capture_hash[index].lists[i]) { 7150 if (bp->pp == pp) { 7151 szc = bp->szc; 7152 flags = bp->flags | CAPTURE_ASYNC; 7153 datap = bp->datap; 7154 mutex_exit(mp); 7155 (void) page_trycapture(pp, szc, flags, datap); 7156 return; 7157 } 7158 bp = bp->next; 7159 } 7160 } 7161 7162 /* Failed to find page in hash so clear flags and unlock it. */ 7163 page_clrtoxic(pp, PR_CAPTURE); 7164 page_unlock(pp); 7165 7166 mutex_exit(mp); 7167 } 7168 7169 void 7170 page_capture_init() 7171 { 7172 int i; 7173 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7174 page_capture_hash[i].lists[0].next = 7175 &page_capture_hash[i].lists[0]; 7176 page_capture_hash[i].lists[0].prev = 7177 &page_capture_hash[i].lists[0]; 7178 page_capture_hash[i].lists[1].next = 7179 &page_capture_hash[i].lists[1]; 7180 page_capture_hash[i].lists[1].prev = 7181 &page_capture_hash[i].lists[1]; 7182 } 7183 7184 pc_thread_shortwait = 23 * hz; 7185 pc_thread_longwait = 1201 * hz; 7186 pc_thread_retry = 3; 7187 mutex_init(&pc_thread_mutex, NULL, MUTEX_DEFAULT, NULL); 7188 cv_init(&pc_cv, NULL, CV_DEFAULT, NULL); 7189 pc_thread_id = thread_create(NULL, 0, page_capture_thread, NULL, 0, &p0, 7190 TS_RUN, minclsyspri); 7191 } 7192 7193 /* 7194 * It is necessary to scrub any failing pages prior to reboot in order to 7195 * prevent a latent error trap from occurring on the next boot. 7196 */ 7197 void 7198 page_retire_mdboot() 7199 { 7200 page_t *pp; 7201 int i, j; 7202 page_capture_hash_bucket_t *bp; 7203 7204 /* walk lists looking for pages to scrub */ 7205 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7206 if (page_capture_hash[i].num_pages == 0) 7207 continue; 7208 7209 mutex_enter(&page_capture_hash[i].pchh_mutex); 7210 7211 for (j = 0; j < 2; j++) { 7212 bp = page_capture_hash[i].lists[j].next; 7213 while (bp != &page_capture_hash[i].lists[j]) { 7214 pp = bp->pp; 7215 if (!PP_ISKAS(pp) && PP_TOXIC(pp)) { 7216 pp->p_selock = -1; /* pacify ASSERTs */ 7217 PP_CLRFREE(pp); 7218 pagescrub(pp, 0, PAGESIZE); 7219 pp->p_selock = 0; 7220 } 7221 bp = bp->next; 7222 } 7223 } 7224 mutex_exit(&page_capture_hash[i].pchh_mutex); 7225 } 7226 } 7227 7228 /* 7229 * Walk the page_capture_hash trying to capture pages and also cleanup old 7230 * entries which have expired. 7231 */ 7232 void 7233 page_capture_async() 7234 { 7235 page_t *pp; 7236 int i; 7237 int ret; 7238 page_capture_hash_bucket_t *bp1, *bp2; 7239 uint_t szc; 7240 uint_t flags; 7241 void *datap; 7242 7243 /* If there are outstanding pages to be captured, get to work */ 7244 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7245 if (page_capture_hash[i].num_pages == 0) 7246 continue; 7247 /* Append list 1 to list 0 and then walk through list 0 */ 7248 mutex_enter(&page_capture_hash[i].pchh_mutex); 7249 bp1 = &page_capture_hash[i].lists[1]; 7250 bp2 = bp1->next; 7251 if (bp1 != bp2) { 7252 bp1->prev->next = page_capture_hash[i].lists[0].next; 7253 bp2->prev = &page_capture_hash[i].lists[0]; 7254 page_capture_hash[i].lists[0].next->prev = bp1->prev; 7255 page_capture_hash[i].lists[0].next = bp2; 7256 bp1->next = bp1; 7257 bp1->prev = bp1; 7258 } 7259 7260 /* list[1] will be empty now */ 7261 7262 bp1 = page_capture_hash[i].lists[0].next; 7263 while (bp1 != &page_capture_hash[i].lists[0]) { 7264 /* Check expiration time */ 7265 if ((lbolt > bp1->expires && bp1->expires != -1) || 7266 page_deleted(bp1->pp)) { 7267 page_capture_hash[i].lists[0].next = bp1->next; 7268 bp1->next->prev = 7269 &page_capture_hash[i].lists[0]; 7270 page_capture_hash[i].num_pages--; 7271 7272 /* 7273 * We can safely remove the PR_CAPTURE bit 7274 * without holding the EXCL lock on the page 7275 * as the PR_CAPTURE bit requres that the 7276 * page_capture_hash[].pchh_mutex be held 7277 * to modify it. 7278 */ 7279 page_clrtoxic(bp1->pp, PR_CAPTURE); 7280 mutex_exit(&page_capture_hash[i].pchh_mutex); 7281 kmem_free(bp1, sizeof (*bp1)); 7282 mutex_enter(&page_capture_hash[i].pchh_mutex); 7283 bp1 = page_capture_hash[i].lists[0].next; 7284 continue; 7285 } 7286 pp = bp1->pp; 7287 szc = bp1->szc; 7288 flags = bp1->flags; 7289 datap = bp1->datap; 7290 mutex_exit(&page_capture_hash[i].pchh_mutex); 7291 if (page_trylock(pp, SE_EXCL)) { 7292 ret = page_trycapture(pp, szc, 7293 flags | CAPTURE_ASYNC, datap); 7294 } else { 7295 ret = 1; /* move to walked hash */ 7296 } 7297 7298 if (ret != 0) { 7299 /* Move to walked hash */ 7300 (void) page_capture_move_to_walked(pp); 7301 } 7302 mutex_enter(&page_capture_hash[i].pchh_mutex); 7303 bp1 = page_capture_hash[i].lists[0].next; 7304 } 7305 7306 mutex_exit(&page_capture_hash[i].pchh_mutex); 7307 } 7308 } 7309 7310 /* 7311 * This function is called by the page_capture_thread, and is needed in 7312 * in order to initiate aio cleanup, so that pages used in aio 7313 * will be unlocked and subsequently retired by page_capture_thread. 7314 */ 7315 static int 7316 do_aio_cleanup(void) 7317 { 7318 proc_t *procp; 7319 int (*aio_cleanup_dr_delete_memory)(proc_t *); 7320 int cleaned = 0; 7321 7322 if (modload("sys", "kaio") == -1) { 7323 cmn_err(CE_WARN, "do_aio_cleanup: cannot load kaio"); 7324 return (0); 7325 } 7326 /* 7327 * We use the aio_cleanup_dr_delete_memory function to 7328 * initiate the actual clean up; this function will wake 7329 * up the per-process aio_cleanup_thread. 7330 */ 7331 aio_cleanup_dr_delete_memory = (int (*)(proc_t *)) 7332 modgetsymvalue("aio_cleanup_dr_delete_memory", 0); 7333 if (aio_cleanup_dr_delete_memory == NULL) { 7334 cmn_err(CE_WARN, 7335 "aio_cleanup_dr_delete_memory not found in kaio"); 7336 return (0); 7337 } 7338 mutex_enter(&pidlock); 7339 for (procp = practive; (procp != NULL); procp = procp->p_next) { 7340 mutex_enter(&procp->p_lock); 7341 if (procp->p_aio != NULL) { 7342 /* cleanup proc's outstanding kaio */ 7343 cleaned += (*aio_cleanup_dr_delete_memory)(procp); 7344 } 7345 mutex_exit(&procp->p_lock); 7346 } 7347 mutex_exit(&pidlock); 7348 return (cleaned); 7349 } 7350 7351 /* 7352 * helper function for page_capture_thread 7353 */ 7354 static void 7355 page_capture_handle_outstanding(void) 7356 { 7357 int ntry; 7358 7359 if (!page_retire_pend_count()) { 7360 /* 7361 * Do we really want to be this aggressive 7362 * for things other than page_retire? 7363 * Maybe have a counter for each callback 7364 * type to guide how aggressive we should 7365 * be here. Thus if there's at least one 7366 * page for page_retire we go ahead and reap 7367 * like this. 7368 */ 7369 kmem_reap(); 7370 seg_preap(); 7371 page_capture_async(); 7372 } else { 7373 /* 7374 * There are pages pending retirement, so 7375 * we reap prior to attempting to capture. 7376 */ 7377 kmem_reap(); 7378 7379 /* disable and purge seg_pcache */ 7380 (void) seg_p_disable(); 7381 for (ntry = 0; ntry < pc_thread_retry; ntry++) { 7382 if (!page_retire_pend_count()) 7383 break; 7384 if (do_aio_cleanup()) { 7385 /* 7386 * allow the apps cleanup threads 7387 * to run 7388 */ 7389 delay(pc_thread_shortwait); 7390 } 7391 page_capture_async(); 7392 } 7393 /* reenable seg_pcache */ 7394 seg_p_enable(); 7395 } 7396 } 7397 7398 /* 7399 * The page_capture_thread loops forever, looking to see if there are 7400 * pages still waiting to be captured. 7401 */ 7402 static void 7403 page_capture_thread(void) 7404 { 7405 callb_cpr_t c; 7406 int outstanding; 7407 int i; 7408 7409 CALLB_CPR_INIT(&c, &pc_thread_mutex, callb_generic_cpr, "page_capture"); 7410 7411 mutex_enter(&pc_thread_mutex); 7412 for (;;) { 7413 outstanding = 0; 7414 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) 7415 outstanding += page_capture_hash[i].num_pages; 7416 if (outstanding) { 7417 page_capture_handle_outstanding(); 7418 CALLB_CPR_SAFE_BEGIN(&c); 7419 (void) cv_timedwait(&pc_cv, &pc_thread_mutex, 7420 lbolt + pc_thread_shortwait); 7421 CALLB_CPR_SAFE_END(&c, &pc_thread_mutex); 7422 } else { 7423 CALLB_CPR_SAFE_BEGIN(&c); 7424 (void) cv_timedwait(&pc_cv, &pc_thread_mutex, 7425 lbolt + pc_thread_longwait); 7426 CALLB_CPR_SAFE_END(&c, &pc_thread_mutex); 7427 } 7428 } 7429 /*NOTREACHED*/ 7430 } 7431