1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * University Copyright- Copyright (c) 1982, 1986, 1988 31 * The Regents of the University of California 32 * All Rights Reserved 33 * 34 * University Acknowledgment- Portions of this document are derived from 35 * software developed by the University of California, Berkeley, and its 36 * contributors. 37 */ 38 39 #pragma ident "%Z%%M% %I% %E% SMI" 40 41 /* 42 * VM - physical page management. 43 */ 44 45 #include <sys/types.h> 46 #include <sys/t_lock.h> 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/errno.h> 50 #include <sys/time.h> 51 #include <sys/vnode.h> 52 #include <sys/vm.h> 53 #include <sys/vtrace.h> 54 #include <sys/swap.h> 55 #include <sys/cmn_err.h> 56 #include <sys/tuneable.h> 57 #include <sys/sysmacros.h> 58 #include <sys/cpuvar.h> 59 #include <sys/callb.h> 60 #include <sys/debug.h> 61 #include <sys/tnf_probe.h> 62 #include <sys/condvar_impl.h> 63 #include <sys/mem_config.h> 64 #include <sys/mem_cage.h> 65 #include <sys/kmem.h> 66 #include <sys/atomic.h> 67 #include <sys/strlog.h> 68 #include <sys/mman.h> 69 #include <sys/ontrap.h> 70 #include <sys/lgrp.h> 71 #include <sys/vfs.h> 72 73 #include <vm/hat.h> 74 #include <vm/anon.h> 75 #include <vm/page.h> 76 #include <vm/seg.h> 77 #include <vm/pvn.h> 78 #include <vm/seg_kmem.h> 79 #include <vm/vm_dep.h> 80 #include <sys/vm_usage.h> 81 #include <fs/fs_subr.h> 82 #include <sys/ddi.h> 83 #include <sys/modctl.h> 84 85 static int nopageage = 0; 86 87 static pgcnt_t max_page_get; /* max page_get request size in pages */ 88 pgcnt_t total_pages = 0; /* total number of pages (used by /proc) */ 89 90 /* 91 * freemem_lock protects all freemem variables: 92 * availrmem. Also this lock protects the globals which track the 93 * availrmem changes for accurate kernel footprint calculation. 94 * See below for an explanation of these 95 * globals. 96 */ 97 kmutex_t freemem_lock; 98 pgcnt_t availrmem; 99 pgcnt_t availrmem_initial; 100 101 /* 102 * These globals track availrmem changes to get a more accurate 103 * estimate of tke kernel size. Historically pp_kernel is used for 104 * kernel size and is based on availrmem. But availrmem is adjusted for 105 * locked pages in the system not just for kernel locked pages. 106 * These new counters will track the pages locked through segvn and 107 * by explicit user locking. 108 * 109 * segvn_pages_locked : This keeps track on a global basis how many pages 110 * are currently locked because of I/O. 111 * 112 * pages_locked : How many pages are locked because of user specified 113 * locking through mlock or plock. 114 * 115 * pages_useclaim,pages_claimed : These two variables track the 116 * claim adjustments because of the protection changes on a segvn segment. 117 * 118 * All these globals are protected by the same lock which protects availrmem. 119 */ 120 pgcnt_t segvn_pages_locked; 121 pgcnt_t pages_locked; 122 pgcnt_t pages_useclaim; 123 pgcnt_t pages_claimed; 124 125 126 /* 127 * new_freemem_lock protects freemem, freemem_wait & freemem_cv. 128 */ 129 static kmutex_t new_freemem_lock; 130 static uint_t freemem_wait; /* someone waiting for freemem */ 131 static kcondvar_t freemem_cv; 132 133 /* 134 * The logical page free list is maintained as two lists, the 'free' 135 * and the 'cache' lists. 136 * The free list contains those pages that should be reused first. 137 * 138 * The implementation of the lists is machine dependent. 139 * page_get_freelist(), page_get_cachelist(), 140 * page_list_sub(), and page_list_add() 141 * form the interface to the machine dependent implementation. 142 * 143 * Pages with p_free set are on the cache list. 144 * Pages with p_free and p_age set are on the free list, 145 * 146 * A page may be locked while on either list. 147 */ 148 149 /* 150 * free list accounting stuff. 151 * 152 * 153 * Spread out the value for the number of pages on the 154 * page free and page cache lists. If there is just one 155 * value, then it must be under just one lock. 156 * The lock contention and cache traffic are a real bother. 157 * 158 * When we acquire and then drop a single pcf lock 159 * we can start in the middle of the array of pcf structures. 160 * If we acquire more than one pcf lock at a time, we need to 161 * start at the front to avoid deadlocking. 162 * 163 * pcf_count holds the number of pages in each pool. 164 * 165 * pcf_block is set when page_create_get_something() has asked the 166 * PSM page freelist and page cachelist routines without specifying 167 * a color and nothing came back. This is used to block anything 168 * else from moving pages from one list to the other while the 169 * lists are searched again. If a page is freeed while pcf_block is 170 * set, then pcf_reserve is incremented. pcgs_unblock() takes care 171 * of clearning pcf_block, doing the wakeups, etc. 172 */ 173 174 #if NCPU <= 4 175 #define PAD 2 176 #define PCF_FANOUT 4 177 static uint_t pcf_mask = PCF_FANOUT - 1; 178 #else 179 #define PAD 10 180 #ifdef sun4v 181 #define PCF_FANOUT 32 182 #else 183 #define PCF_FANOUT 128 184 #endif 185 static uint_t pcf_mask = PCF_FANOUT - 1; 186 #endif 187 188 struct pcf { 189 kmutex_t pcf_lock; /* protects the structure */ 190 uint_t pcf_count; /* page count */ 191 uint_t pcf_wait; /* number of waiters */ 192 uint_t pcf_block; /* pcgs flag to page_free() */ 193 uint_t pcf_reserve; /* pages freed after pcf_block set */ 194 uint_t pcf_fill[PAD]; /* to line up on the caches */ 195 }; 196 197 static struct pcf pcf[PCF_FANOUT]; 198 #define PCF_INDEX() ((CPU->cpu_id) & (pcf_mask)) 199 200 kmutex_t pcgs_lock; /* serializes page_create_get_ */ 201 kmutex_t pcgs_cagelock; /* serializes NOSLEEP cage allocs */ 202 kmutex_t pcgs_wait_lock; /* used for delay in pcgs */ 203 static kcondvar_t pcgs_cv; /* cv for delay in pcgs */ 204 205 #ifdef VM_STATS 206 207 /* 208 * No locks, but so what, they are only statistics. 209 */ 210 211 static struct page_tcnt { 212 int pc_free_cache; /* free's into cache list */ 213 int pc_free_dontneed; /* free's with dontneed */ 214 int pc_free_pageout; /* free's from pageout */ 215 int pc_free_free; /* free's into free list */ 216 int pc_free_pages; /* free's into large page free list */ 217 int pc_destroy_pages; /* large page destroy's */ 218 int pc_get_cache; /* get's from cache list */ 219 int pc_get_free; /* get's from free list */ 220 int pc_reclaim; /* reclaim's */ 221 int pc_abortfree; /* abort's of free pages */ 222 int pc_find_hit; /* find's that find page */ 223 int pc_find_miss; /* find's that don't find page */ 224 int pc_destroy_free; /* # of free pages destroyed */ 225 #define PC_HASH_CNT (4*PAGE_HASHAVELEN) 226 int pc_find_hashlen[PC_HASH_CNT+1]; 227 int pc_addclaim_pages; 228 int pc_subclaim_pages; 229 int pc_free_replacement_page[2]; 230 int pc_try_demote_pages[6]; 231 int pc_demote_pages[2]; 232 } pagecnt; 233 234 uint_t hashin_count; 235 uint_t hashin_not_held; 236 uint_t hashin_already; 237 238 uint_t hashout_count; 239 uint_t hashout_not_held; 240 241 uint_t page_create_count; 242 uint_t page_create_not_enough; 243 uint_t page_create_not_enough_again; 244 uint_t page_create_zero; 245 uint_t page_create_hashout; 246 uint_t page_create_page_lock_failed; 247 uint_t page_create_trylock_failed; 248 uint_t page_create_found_one; 249 uint_t page_create_hashin_failed; 250 uint_t page_create_dropped_phm; 251 252 uint_t page_create_new; 253 uint_t page_create_exists; 254 uint_t page_create_putbacks; 255 uint_t page_create_overshoot; 256 257 uint_t page_reclaim_zero; 258 uint_t page_reclaim_zero_locked; 259 260 uint_t page_rename_exists; 261 uint_t page_rename_count; 262 263 uint_t page_lookup_cnt[20]; 264 uint_t page_lookup_nowait_cnt[10]; 265 uint_t page_find_cnt; 266 uint_t page_exists_cnt; 267 uint_t page_exists_forreal_cnt; 268 uint_t page_lookup_dev_cnt; 269 uint_t get_cachelist_cnt; 270 uint_t page_create_cnt[10]; 271 uint_t alloc_pages[9]; 272 uint_t page_exphcontg[19]; 273 uint_t page_create_large_cnt[10]; 274 275 /* 276 * Collects statistics. 277 */ 278 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 279 uint_t mylen = 0; \ 280 \ 281 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash, mylen++) { \ 282 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 283 break; \ 284 } \ 285 if ((pp) != NULL) \ 286 pagecnt.pc_find_hit++; \ 287 else \ 288 pagecnt.pc_find_miss++; \ 289 if (mylen > PC_HASH_CNT) \ 290 mylen = PC_HASH_CNT; \ 291 pagecnt.pc_find_hashlen[mylen]++; \ 292 } 293 294 #else /* VM_STATS */ 295 296 /* 297 * Don't collect statistics 298 */ 299 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 300 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \ 301 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 302 break; \ 303 } \ 304 } 305 306 #endif /* VM_STATS */ 307 308 309 310 #ifdef DEBUG 311 #define MEMSEG_SEARCH_STATS 312 #endif 313 314 #ifdef MEMSEG_SEARCH_STATS 315 struct memseg_stats { 316 uint_t nsearch; 317 uint_t nlastwon; 318 uint_t nhashwon; 319 uint_t nnotfound; 320 } memseg_stats; 321 322 #define MEMSEG_STAT_INCR(v) \ 323 atomic_add_32(&memseg_stats.v, 1) 324 #else 325 #define MEMSEG_STAT_INCR(x) 326 #endif 327 328 struct memseg *memsegs; /* list of memory segments */ 329 330 /* 331 * /etc/system tunable to control large page allocation hueristic. 332 * 333 * Setting to LPAP_LOCAL will heavily prefer the local lgroup over remote lgroup 334 * for large page allocation requests. If a large page is not readily 335 * avaliable on the local freelists we will go through additional effort 336 * to create a large page, potentially moving smaller pages around to coalesce 337 * larger pages in the local lgroup. 338 * Default value of LPAP_DEFAULT will go to remote freelists if large pages 339 * are not readily available in the local lgroup. 340 */ 341 enum lpap { 342 LPAP_DEFAULT, /* default large page allocation policy */ 343 LPAP_LOCAL /* local large page allocation policy */ 344 }; 345 346 enum lpap lpg_alloc_prefer = LPAP_DEFAULT; 347 348 static void page_init_mem_config(void); 349 static int page_do_hashin(page_t *, vnode_t *, u_offset_t); 350 static void page_do_hashout(page_t *); 351 static void page_capture_init(); 352 int page_capture_take_action(page_t *, uint_t, void *); 353 354 static void page_demote_vp_pages(page_t *); 355 356 /* 357 * vm subsystem related initialization 358 */ 359 void 360 vm_init(void) 361 { 362 boolean_t callb_vm_cpr(void *, int); 363 364 (void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm"); 365 page_init_mem_config(); 366 page_retire_init(); 367 vm_usage_init(); 368 page_capture_init(); 369 } 370 371 /* 372 * This function is called at startup and when memory is added or deleted. 373 */ 374 void 375 init_pages_pp_maximum() 376 { 377 static pgcnt_t p_min; 378 static pgcnt_t pages_pp_maximum_startup; 379 static pgcnt_t avrmem_delta; 380 static int init_done; 381 static int user_set; /* true if set in /etc/system */ 382 383 if (init_done == 0) { 384 385 /* If the user specified a value, save it */ 386 if (pages_pp_maximum != 0) { 387 user_set = 1; 388 pages_pp_maximum_startup = pages_pp_maximum; 389 } 390 391 /* 392 * Setting of pages_pp_maximum is based first time 393 * on the value of availrmem just after the start-up 394 * allocations. To preserve this relationship at run 395 * time, use a delta from availrmem_initial. 396 */ 397 ASSERT(availrmem_initial >= availrmem); 398 avrmem_delta = availrmem_initial - availrmem; 399 400 /* The allowable floor of pages_pp_maximum */ 401 p_min = tune.t_minarmem + 100; 402 403 /* Make sure we don't come through here again. */ 404 init_done = 1; 405 } 406 /* 407 * Determine pages_pp_maximum, the number of currently available 408 * pages (availrmem) that can't be `locked'. If not set by 409 * the user, we set it to 4% of the currently available memory 410 * plus 4MB. 411 * But we also insist that it be greater than tune.t_minarmem; 412 * otherwise a process could lock down a lot of memory, get swapped 413 * out, and never have enough to get swapped back in. 414 */ 415 if (user_set) 416 pages_pp_maximum = pages_pp_maximum_startup; 417 else 418 pages_pp_maximum = ((availrmem_initial - avrmem_delta) / 25) 419 + btop(4 * 1024 * 1024); 420 421 if (pages_pp_maximum <= p_min) { 422 pages_pp_maximum = p_min; 423 } 424 } 425 426 void 427 set_max_page_get(pgcnt_t target_total_pages) 428 { 429 max_page_get = target_total_pages / 2; 430 } 431 432 static pgcnt_t pending_delete; 433 434 /*ARGSUSED*/ 435 static void 436 page_mem_config_post_add( 437 void *arg, 438 pgcnt_t delta_pages) 439 { 440 set_max_page_get(total_pages - pending_delete); 441 init_pages_pp_maximum(); 442 } 443 444 /*ARGSUSED*/ 445 static int 446 page_mem_config_pre_del( 447 void *arg, 448 pgcnt_t delta_pages) 449 { 450 pgcnt_t nv; 451 452 nv = atomic_add_long_nv(&pending_delete, (spgcnt_t)delta_pages); 453 set_max_page_get(total_pages - nv); 454 return (0); 455 } 456 457 /*ARGSUSED*/ 458 static void 459 page_mem_config_post_del( 460 void *arg, 461 pgcnt_t delta_pages, 462 int cancelled) 463 { 464 pgcnt_t nv; 465 466 nv = atomic_add_long_nv(&pending_delete, -(spgcnt_t)delta_pages); 467 set_max_page_get(total_pages - nv); 468 if (!cancelled) 469 init_pages_pp_maximum(); 470 } 471 472 static kphysm_setup_vector_t page_mem_config_vec = { 473 KPHYSM_SETUP_VECTOR_VERSION, 474 page_mem_config_post_add, 475 page_mem_config_pre_del, 476 page_mem_config_post_del, 477 }; 478 479 static void 480 page_init_mem_config(void) 481 { 482 int ret; 483 484 ret = kphysm_setup_func_register(&page_mem_config_vec, (void *)NULL); 485 ASSERT(ret == 0); 486 } 487 488 /* 489 * Evenly spread out the PCF counters for large free pages 490 */ 491 static void 492 page_free_large_ctr(pgcnt_t npages) 493 { 494 static struct pcf *p = pcf; 495 pgcnt_t lump; 496 497 freemem += npages; 498 499 lump = roundup(npages, PCF_FANOUT) / PCF_FANOUT; 500 501 while (npages > 0) { 502 503 ASSERT(!p->pcf_block); 504 505 if (lump < npages) { 506 p->pcf_count += (uint_t)lump; 507 npages -= lump; 508 } else { 509 p->pcf_count += (uint_t)npages; 510 npages = 0; 511 } 512 513 ASSERT(!p->pcf_wait); 514 515 if (++p > &pcf[PCF_FANOUT - 1]) 516 p = pcf; 517 } 518 519 ASSERT(npages == 0); 520 } 521 522 /* 523 * Add a physical chunk of memory to the system free lists during startup. 524 * Platform specific startup() allocates the memory for the page structs. 525 * 526 * num - number of page structures 527 * base - page number (pfn) to be associated with the first page. 528 * 529 * Since we are doing this during startup (ie. single threaded), we will 530 * use shortcut routines to avoid any locking overhead while putting all 531 * these pages on the freelists. 532 * 533 * NOTE: Any changes performed to page_free(), must also be performed to 534 * add_physmem() since this is how we initialize all page_t's at 535 * boot time. 536 */ 537 void 538 add_physmem( 539 page_t *pp, 540 pgcnt_t num, 541 pfn_t pnum) 542 { 543 page_t *root = NULL; 544 uint_t szc = page_num_pagesizes() - 1; 545 pgcnt_t large = page_get_pagecnt(szc); 546 pgcnt_t cnt = 0; 547 548 TRACE_2(TR_FAC_VM, TR_PAGE_INIT, 549 "add_physmem:pp %p num %lu", pp, num); 550 551 /* 552 * Arbitrarily limit the max page_get request 553 * to 1/2 of the page structs we have. 554 */ 555 total_pages += num; 556 set_max_page_get(total_pages); 557 558 PLCNT_MODIFY_MAX(pnum, (long)num); 559 560 /* 561 * The physical space for the pages array 562 * representing ram pages has already been 563 * allocated. Here we initialize each lock 564 * in the page structure, and put each on 565 * the free list 566 */ 567 for (; num; pp++, pnum++, num--) { 568 569 /* 570 * this needs to fill in the page number 571 * and do any other arch specific initialization 572 */ 573 add_physmem_cb(pp, pnum); 574 575 pp->p_lckcnt = 0; 576 pp->p_cowcnt = 0; 577 pp->p_slckcnt = 0; 578 579 /* 580 * Initialize the page lock as unlocked, since nobody 581 * can see or access this page yet. 582 */ 583 pp->p_selock = 0; 584 585 /* 586 * Initialize IO lock 587 */ 588 page_iolock_init(pp); 589 590 /* 591 * initialize other fields in the page_t 592 */ 593 PP_SETFREE(pp); 594 page_clr_all_props(pp); 595 PP_SETAGED(pp); 596 pp->p_offset = (u_offset_t)-1; 597 pp->p_next = pp; 598 pp->p_prev = pp; 599 600 /* 601 * Simple case: System doesn't support large pages. 602 */ 603 if (szc == 0) { 604 pp->p_szc = 0; 605 page_free_at_startup(pp); 606 continue; 607 } 608 609 /* 610 * Handle unaligned pages, we collect them up onto 611 * the root page until we have a full large page. 612 */ 613 if (!IS_P2ALIGNED(pnum, large)) { 614 615 /* 616 * If not in a large page, 617 * just free as small page. 618 */ 619 if (root == NULL) { 620 pp->p_szc = 0; 621 page_free_at_startup(pp); 622 continue; 623 } 624 625 /* 626 * Link a constituent page into the large page. 627 */ 628 pp->p_szc = szc; 629 page_list_concat(&root, &pp); 630 631 /* 632 * When large page is fully formed, free it. 633 */ 634 if (++cnt == large) { 635 page_free_large_ctr(cnt); 636 page_list_add_pages(root, PG_LIST_ISINIT); 637 root = NULL; 638 cnt = 0; 639 } 640 continue; 641 } 642 643 /* 644 * At this point we have a page number which 645 * is aligned. We assert that we aren't already 646 * in a different large page. 647 */ 648 ASSERT(IS_P2ALIGNED(pnum, large)); 649 ASSERT(root == NULL && cnt == 0); 650 651 /* 652 * If insufficient number of pages left to form 653 * a large page, just free the small page. 654 */ 655 if (num < large) { 656 pp->p_szc = 0; 657 page_free_at_startup(pp); 658 continue; 659 } 660 661 /* 662 * Otherwise start a new large page. 663 */ 664 pp->p_szc = szc; 665 cnt++; 666 root = pp; 667 } 668 ASSERT(root == NULL && cnt == 0); 669 } 670 671 /* 672 * Find a page representing the specified [vp, offset]. 673 * If we find the page but it is intransit coming in, 674 * it will have an "exclusive" lock and we wait for 675 * the i/o to complete. A page found on the free list 676 * is always reclaimed and then locked. On success, the page 677 * is locked, its data is valid and it isn't on the free 678 * list, while a NULL is returned if the page doesn't exist. 679 */ 680 page_t * 681 page_lookup(vnode_t *vp, u_offset_t off, se_t se) 682 { 683 return (page_lookup_create(vp, off, se, NULL, NULL, 0)); 684 } 685 686 /* 687 * Find a page representing the specified [vp, offset]. 688 * We either return the one we found or, if passed in, 689 * create one with identity of [vp, offset] of the 690 * pre-allocated page. If we find existing page but it is 691 * intransit coming in, it will have an "exclusive" lock 692 * and we wait for the i/o to complete. A page found on 693 * the free list is always reclaimed and then locked. 694 * On success, the page is locked, its data is valid and 695 * it isn't on the free list, while a NULL is returned 696 * if the page doesn't exist and newpp is NULL; 697 */ 698 page_t * 699 page_lookup_create( 700 vnode_t *vp, 701 u_offset_t off, 702 se_t se, 703 page_t *newpp, 704 spgcnt_t *nrelocp, 705 int flags) 706 { 707 page_t *pp; 708 kmutex_t *phm; 709 ulong_t index; 710 uint_t hash_locked; 711 uint_t es; 712 713 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 714 VM_STAT_ADD(page_lookup_cnt[0]); 715 ASSERT(newpp ? PAGE_EXCL(newpp) : 1); 716 717 /* 718 * Acquire the appropriate page hash lock since 719 * we have to search the hash list. Pages that 720 * hash to this list can't change identity while 721 * this lock is held. 722 */ 723 hash_locked = 0; 724 index = PAGE_HASH_FUNC(vp, off); 725 phm = NULL; 726 top: 727 PAGE_HASH_SEARCH(index, pp, vp, off); 728 if (pp != NULL) { 729 VM_STAT_ADD(page_lookup_cnt[1]); 730 es = (newpp != NULL) ? 1 : 0; 731 es |= flags; 732 if (!hash_locked) { 733 VM_STAT_ADD(page_lookup_cnt[2]); 734 if (!page_try_reclaim_lock(pp, se, es)) { 735 /* 736 * On a miss, acquire the phm. Then 737 * next time, page_lock() will be called, 738 * causing a wait if the page is busy. 739 * just looping with page_trylock() would 740 * get pretty boring. 741 */ 742 VM_STAT_ADD(page_lookup_cnt[3]); 743 phm = PAGE_HASH_MUTEX(index); 744 mutex_enter(phm); 745 hash_locked = 1; 746 goto top; 747 } 748 } else { 749 VM_STAT_ADD(page_lookup_cnt[4]); 750 if (!page_lock_es(pp, se, phm, P_RECLAIM, es)) { 751 VM_STAT_ADD(page_lookup_cnt[5]); 752 goto top; 753 } 754 } 755 756 /* 757 * Since `pp' is locked it can not change identity now. 758 * Reconfirm we locked the correct page. 759 * 760 * Both the p_vnode and p_offset *must* be cast volatile 761 * to force a reload of their values: The PAGE_HASH_SEARCH 762 * macro will have stuffed p_vnode and p_offset into 763 * registers before calling page_trylock(); another thread, 764 * actually holding the hash lock, could have changed the 765 * page's identity in memory, but our registers would not 766 * be changed, fooling the reconfirmation. If the hash 767 * lock was held during the search, the casting would 768 * not be needed. 769 */ 770 VM_STAT_ADD(page_lookup_cnt[6]); 771 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 772 ((volatile u_offset_t)(pp->p_offset) != off)) { 773 VM_STAT_ADD(page_lookup_cnt[7]); 774 if (hash_locked) { 775 panic("page_lookup_create: lost page %p", 776 (void *)pp); 777 /*NOTREACHED*/ 778 } 779 page_unlock(pp); 780 phm = PAGE_HASH_MUTEX(index); 781 mutex_enter(phm); 782 hash_locked = 1; 783 goto top; 784 } 785 786 /* 787 * If page_trylock() was called, then pp may still be on 788 * the cachelist (can't be on the free list, it would not 789 * have been found in the search). If it is on the 790 * cachelist it must be pulled now. To pull the page from 791 * the cachelist, it must be exclusively locked. 792 * 793 * The other big difference between page_trylock() and 794 * page_lock(), is that page_lock() will pull the 795 * page from whatever free list (the cache list in this 796 * case) the page is on. If page_trylock() was used 797 * above, then we have to do the reclaim ourselves. 798 */ 799 if ((!hash_locked) && (PP_ISFREE(pp))) { 800 ASSERT(PP_ISAGED(pp) == 0); 801 VM_STAT_ADD(page_lookup_cnt[8]); 802 803 /* 804 * page_relcaim will insure that we 805 * have this page exclusively 806 */ 807 808 if (!page_reclaim(pp, NULL)) { 809 /* 810 * Page_reclaim dropped whatever lock 811 * we held. 812 */ 813 VM_STAT_ADD(page_lookup_cnt[9]); 814 phm = PAGE_HASH_MUTEX(index); 815 mutex_enter(phm); 816 hash_locked = 1; 817 goto top; 818 } else if (se == SE_SHARED && newpp == NULL) { 819 VM_STAT_ADD(page_lookup_cnt[10]); 820 page_downgrade(pp); 821 } 822 } 823 824 if (hash_locked) { 825 mutex_exit(phm); 826 } 827 828 if (newpp != NULL && pp->p_szc < newpp->p_szc && 829 PAGE_EXCL(pp) && nrelocp != NULL) { 830 ASSERT(nrelocp != NULL); 831 (void) page_relocate(&pp, &newpp, 1, 1, nrelocp, 832 NULL); 833 if (*nrelocp > 0) { 834 VM_STAT_COND_ADD(*nrelocp == 1, 835 page_lookup_cnt[11]); 836 VM_STAT_COND_ADD(*nrelocp > 1, 837 page_lookup_cnt[12]); 838 pp = newpp; 839 se = SE_EXCL; 840 } else { 841 if (se == SE_SHARED) { 842 page_downgrade(pp); 843 } 844 VM_STAT_ADD(page_lookup_cnt[13]); 845 } 846 } else if (newpp != NULL && nrelocp != NULL) { 847 if (PAGE_EXCL(pp) && se == SE_SHARED) { 848 page_downgrade(pp); 849 } 850 VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc, 851 page_lookup_cnt[14]); 852 VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc, 853 page_lookup_cnt[15]); 854 VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc, 855 page_lookup_cnt[16]); 856 } else if (newpp != NULL && PAGE_EXCL(pp)) { 857 se = SE_EXCL; 858 } 859 } else if (!hash_locked) { 860 VM_STAT_ADD(page_lookup_cnt[17]); 861 phm = PAGE_HASH_MUTEX(index); 862 mutex_enter(phm); 863 hash_locked = 1; 864 goto top; 865 } else if (newpp != NULL) { 866 /* 867 * If we have a preallocated page then 868 * insert it now and basically behave like 869 * page_create. 870 */ 871 VM_STAT_ADD(page_lookup_cnt[18]); 872 /* 873 * Since we hold the page hash mutex and 874 * just searched for this page, page_hashin 875 * had better not fail. If it does, that 876 * means some thread did not follow the 877 * page hash mutex rules. Panic now and 878 * get it over with. As usual, go down 879 * holding all the locks. 880 */ 881 ASSERT(MUTEX_HELD(phm)); 882 if (!page_hashin(newpp, vp, off, phm)) { 883 ASSERT(MUTEX_HELD(phm)); 884 panic("page_lookup_create: hashin failed %p %p %llx %p", 885 (void *)newpp, (void *)vp, off, (void *)phm); 886 /*NOTREACHED*/ 887 } 888 ASSERT(MUTEX_HELD(phm)); 889 mutex_exit(phm); 890 phm = NULL; 891 page_set_props(newpp, P_REF); 892 page_io_lock(newpp); 893 pp = newpp; 894 se = SE_EXCL; 895 } else { 896 VM_STAT_ADD(page_lookup_cnt[19]); 897 mutex_exit(phm); 898 } 899 900 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 901 902 ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1); 903 904 return (pp); 905 } 906 907 /* 908 * Search the hash list for the page representing the 909 * specified [vp, offset] and return it locked. Skip 910 * free pages and pages that cannot be locked as requested. 911 * Used while attempting to kluster pages. 912 */ 913 page_t * 914 page_lookup_nowait(vnode_t *vp, u_offset_t off, se_t se) 915 { 916 page_t *pp; 917 kmutex_t *phm; 918 ulong_t index; 919 uint_t locked; 920 921 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 922 VM_STAT_ADD(page_lookup_nowait_cnt[0]); 923 924 index = PAGE_HASH_FUNC(vp, off); 925 PAGE_HASH_SEARCH(index, pp, vp, off); 926 locked = 0; 927 if (pp == NULL) { 928 top: 929 VM_STAT_ADD(page_lookup_nowait_cnt[1]); 930 locked = 1; 931 phm = PAGE_HASH_MUTEX(index); 932 mutex_enter(phm); 933 PAGE_HASH_SEARCH(index, pp, vp, off); 934 } 935 936 if (pp == NULL || PP_ISFREE(pp)) { 937 VM_STAT_ADD(page_lookup_nowait_cnt[2]); 938 pp = NULL; 939 } else { 940 if (!page_trylock(pp, se)) { 941 VM_STAT_ADD(page_lookup_nowait_cnt[3]); 942 pp = NULL; 943 } else { 944 VM_STAT_ADD(page_lookup_nowait_cnt[4]); 945 /* 946 * See the comment in page_lookup() 947 */ 948 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 949 ((u_offset_t)(pp->p_offset) != off)) { 950 VM_STAT_ADD(page_lookup_nowait_cnt[5]); 951 if (locked) { 952 panic("page_lookup_nowait %p", 953 (void *)pp); 954 /*NOTREACHED*/ 955 } 956 page_unlock(pp); 957 goto top; 958 } 959 if (PP_ISFREE(pp)) { 960 VM_STAT_ADD(page_lookup_nowait_cnt[6]); 961 page_unlock(pp); 962 pp = NULL; 963 } 964 } 965 } 966 if (locked) { 967 VM_STAT_ADD(page_lookup_nowait_cnt[7]); 968 mutex_exit(phm); 969 } 970 971 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 972 973 return (pp); 974 } 975 976 /* 977 * Search the hash list for a page with the specified [vp, off] 978 * that is known to exist and is already locked. This routine 979 * is typically used by segment SOFTUNLOCK routines. 980 */ 981 page_t * 982 page_find(vnode_t *vp, u_offset_t off) 983 { 984 page_t *pp; 985 kmutex_t *phm; 986 ulong_t index; 987 988 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 989 VM_STAT_ADD(page_find_cnt); 990 991 index = PAGE_HASH_FUNC(vp, off); 992 phm = PAGE_HASH_MUTEX(index); 993 994 mutex_enter(phm); 995 PAGE_HASH_SEARCH(index, pp, vp, off); 996 mutex_exit(phm); 997 998 ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr); 999 return (pp); 1000 } 1001 1002 /* 1003 * Determine whether a page with the specified [vp, off] 1004 * currently exists in the system. Obviously this should 1005 * only be considered as a hint since nothing prevents the 1006 * page from disappearing or appearing immediately after 1007 * the return from this routine. Subsequently, we don't 1008 * even bother to lock the list. 1009 */ 1010 page_t * 1011 page_exists(vnode_t *vp, u_offset_t off) 1012 { 1013 page_t *pp; 1014 ulong_t index; 1015 1016 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1017 VM_STAT_ADD(page_exists_cnt); 1018 1019 index = PAGE_HASH_FUNC(vp, off); 1020 PAGE_HASH_SEARCH(index, pp, vp, off); 1021 1022 return (pp); 1023 } 1024 1025 /* 1026 * Determine if physically contiguous pages exist for [vp, off] - [vp, off + 1027 * page_size(szc)) range. if they exist and ppa is not NULL fill ppa array 1028 * with these pages locked SHARED. If necessary reclaim pages from 1029 * freelist. Return 1 if contiguous pages exist and 0 otherwise. 1030 * 1031 * If we fail to lock pages still return 1 if pages exist and contiguous. 1032 * But in this case return value is just a hint. ppa array won't be filled. 1033 * Caller should initialize ppa[0] as NULL to distinguish return value. 1034 * 1035 * Returns 0 if pages don't exist or not physically contiguous. 1036 * 1037 * This routine doesn't work for anonymous(swapfs) pages. 1038 */ 1039 int 1040 page_exists_physcontig(vnode_t *vp, u_offset_t off, uint_t szc, page_t *ppa[]) 1041 { 1042 pgcnt_t pages; 1043 pfn_t pfn; 1044 page_t *rootpp; 1045 pgcnt_t i; 1046 pgcnt_t j; 1047 u_offset_t save_off = off; 1048 ulong_t index; 1049 kmutex_t *phm; 1050 page_t *pp; 1051 uint_t pszc; 1052 int loopcnt = 0; 1053 1054 ASSERT(szc != 0); 1055 ASSERT(vp != NULL); 1056 ASSERT(!IS_SWAPFSVP(vp)); 1057 ASSERT(!VN_ISKAS(vp)); 1058 1059 again: 1060 if (++loopcnt > 3) { 1061 VM_STAT_ADD(page_exphcontg[0]); 1062 return (0); 1063 } 1064 1065 index = PAGE_HASH_FUNC(vp, off); 1066 phm = PAGE_HASH_MUTEX(index); 1067 1068 mutex_enter(phm); 1069 PAGE_HASH_SEARCH(index, pp, vp, off); 1070 mutex_exit(phm); 1071 1072 VM_STAT_ADD(page_exphcontg[1]); 1073 1074 if (pp == NULL) { 1075 VM_STAT_ADD(page_exphcontg[2]); 1076 return (0); 1077 } 1078 1079 pages = page_get_pagecnt(szc); 1080 rootpp = pp; 1081 pfn = rootpp->p_pagenum; 1082 1083 if ((pszc = pp->p_szc) >= szc && ppa != NULL) { 1084 VM_STAT_ADD(page_exphcontg[3]); 1085 if (!page_trylock(pp, SE_SHARED)) { 1086 VM_STAT_ADD(page_exphcontg[4]); 1087 return (1); 1088 } 1089 if (pp->p_szc != pszc || pp->p_vnode != vp || 1090 pp->p_offset != off) { 1091 VM_STAT_ADD(page_exphcontg[5]); 1092 page_unlock(pp); 1093 off = save_off; 1094 goto again; 1095 } 1096 /* 1097 * szc was non zero and vnode and offset matched after we 1098 * locked the page it means it can't become free on us. 1099 */ 1100 ASSERT(!PP_ISFREE(pp)); 1101 if (!IS_P2ALIGNED(pfn, pages)) { 1102 page_unlock(pp); 1103 return (0); 1104 } 1105 ppa[0] = pp; 1106 pp++; 1107 off += PAGESIZE; 1108 pfn++; 1109 for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1110 if (!page_trylock(pp, SE_SHARED)) { 1111 VM_STAT_ADD(page_exphcontg[6]); 1112 pp--; 1113 while (i-- > 0) { 1114 page_unlock(pp); 1115 pp--; 1116 } 1117 ppa[0] = NULL; 1118 return (1); 1119 } 1120 if (pp->p_szc != pszc) { 1121 VM_STAT_ADD(page_exphcontg[7]); 1122 page_unlock(pp); 1123 pp--; 1124 while (i-- > 0) { 1125 page_unlock(pp); 1126 pp--; 1127 } 1128 ppa[0] = NULL; 1129 off = save_off; 1130 goto again; 1131 } 1132 /* 1133 * szc the same as for previous already locked pages 1134 * with right identity. Since this page had correct 1135 * szc after we locked it can't get freed or destroyed 1136 * and therefore must have the expected identity. 1137 */ 1138 ASSERT(!PP_ISFREE(pp)); 1139 if (pp->p_vnode != vp || 1140 pp->p_offset != off) { 1141 panic("page_exists_physcontig: " 1142 "large page identity doesn't match"); 1143 } 1144 ppa[i] = pp; 1145 ASSERT(pp->p_pagenum == pfn); 1146 } 1147 VM_STAT_ADD(page_exphcontg[8]); 1148 ppa[pages] = NULL; 1149 return (1); 1150 } else if (pszc >= szc) { 1151 VM_STAT_ADD(page_exphcontg[9]); 1152 if (!IS_P2ALIGNED(pfn, pages)) { 1153 return (0); 1154 } 1155 return (1); 1156 } 1157 1158 if (!IS_P2ALIGNED(pfn, pages)) { 1159 VM_STAT_ADD(page_exphcontg[10]); 1160 return (0); 1161 } 1162 1163 if (page_numtomemseg_nolock(pfn) != 1164 page_numtomemseg_nolock(pfn + pages - 1)) { 1165 VM_STAT_ADD(page_exphcontg[11]); 1166 return (0); 1167 } 1168 1169 /* 1170 * We loop up 4 times across pages to promote page size. 1171 * We're extra cautious to promote page size atomically with respect 1172 * to everybody else. But we can probably optimize into 1 loop if 1173 * this becomes an issue. 1174 */ 1175 1176 for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1177 ASSERT(pp->p_pagenum == pfn); 1178 if (!page_trylock(pp, SE_EXCL)) { 1179 VM_STAT_ADD(page_exphcontg[12]); 1180 break; 1181 } 1182 if (pp->p_vnode != vp || 1183 pp->p_offset != off) { 1184 VM_STAT_ADD(page_exphcontg[13]); 1185 page_unlock(pp); 1186 break; 1187 } 1188 if (pp->p_szc >= szc) { 1189 ASSERT(i == 0); 1190 page_unlock(pp); 1191 off = save_off; 1192 goto again; 1193 } 1194 } 1195 1196 if (i != pages) { 1197 VM_STAT_ADD(page_exphcontg[14]); 1198 --pp; 1199 while (i-- > 0) { 1200 page_unlock(pp); 1201 --pp; 1202 } 1203 return (0); 1204 } 1205 1206 pp = rootpp; 1207 for (i = 0; i < pages; i++, pp++) { 1208 if (PP_ISFREE(pp)) { 1209 VM_STAT_ADD(page_exphcontg[15]); 1210 ASSERT(!PP_ISAGED(pp)); 1211 ASSERT(pp->p_szc == 0); 1212 if (!page_reclaim(pp, NULL)) { 1213 break; 1214 } 1215 } else { 1216 ASSERT(pp->p_szc < szc); 1217 VM_STAT_ADD(page_exphcontg[16]); 1218 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 1219 } 1220 } 1221 if (i < pages) { 1222 VM_STAT_ADD(page_exphcontg[17]); 1223 /* 1224 * page_reclaim failed because we were out of memory. 1225 * drop the rest of the locks and return because this page 1226 * must be already reallocated anyway. 1227 */ 1228 pp = rootpp; 1229 for (j = 0; j < pages; j++, pp++) { 1230 if (j != i) { 1231 page_unlock(pp); 1232 } 1233 } 1234 return (0); 1235 } 1236 1237 off = save_off; 1238 pp = rootpp; 1239 for (i = 0; i < pages; i++, pp++, off += PAGESIZE) { 1240 ASSERT(PAGE_EXCL(pp)); 1241 ASSERT(!PP_ISFREE(pp)); 1242 ASSERT(!hat_page_is_mapped(pp)); 1243 ASSERT(pp->p_vnode == vp); 1244 ASSERT(pp->p_offset == off); 1245 pp->p_szc = szc; 1246 } 1247 pp = rootpp; 1248 for (i = 0; i < pages; i++, pp++) { 1249 if (ppa == NULL) { 1250 page_unlock(pp); 1251 } else { 1252 ppa[i] = pp; 1253 page_downgrade(ppa[i]); 1254 } 1255 } 1256 if (ppa != NULL) { 1257 ppa[pages] = NULL; 1258 } 1259 VM_STAT_ADD(page_exphcontg[18]); 1260 ASSERT(vp->v_pages != NULL); 1261 return (1); 1262 } 1263 1264 /* 1265 * Determine whether a page with the specified [vp, off] 1266 * currently exists in the system and if so return its 1267 * size code. Obviously this should only be considered as 1268 * a hint since nothing prevents the page from disappearing 1269 * or appearing immediately after the return from this routine. 1270 */ 1271 int 1272 page_exists_forreal(vnode_t *vp, u_offset_t off, uint_t *szc) 1273 { 1274 page_t *pp; 1275 kmutex_t *phm; 1276 ulong_t index; 1277 int rc = 0; 1278 1279 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1280 ASSERT(szc != NULL); 1281 VM_STAT_ADD(page_exists_forreal_cnt); 1282 1283 index = PAGE_HASH_FUNC(vp, off); 1284 phm = PAGE_HASH_MUTEX(index); 1285 1286 mutex_enter(phm); 1287 PAGE_HASH_SEARCH(index, pp, vp, off); 1288 if (pp != NULL) { 1289 *szc = pp->p_szc; 1290 rc = 1; 1291 } 1292 mutex_exit(phm); 1293 return (rc); 1294 } 1295 1296 /* wakeup threads waiting for pages in page_create_get_something() */ 1297 void 1298 wakeup_pcgs(void) 1299 { 1300 if (!CV_HAS_WAITERS(&pcgs_cv)) 1301 return; 1302 cv_broadcast(&pcgs_cv); 1303 } 1304 1305 /* 1306 * 'freemem' is used all over the kernel as an indication of how many 1307 * pages are free (either on the cache list or on the free page list) 1308 * in the system. In very few places is a really accurate 'freemem' 1309 * needed. To avoid contention of the lock protecting a the 1310 * single freemem, it was spread out into NCPU buckets. Set_freemem 1311 * sets freemem to the total of all NCPU buckets. It is called from 1312 * clock() on each TICK. 1313 */ 1314 void 1315 set_freemem() 1316 { 1317 struct pcf *p; 1318 ulong_t t; 1319 uint_t i; 1320 1321 t = 0; 1322 p = pcf; 1323 for (i = 0; i < PCF_FANOUT; i++) { 1324 t += p->pcf_count; 1325 p++; 1326 } 1327 freemem = t; 1328 1329 /* 1330 * Don't worry about grabbing mutex. It's not that 1331 * critical if we miss a tick or two. This is 1332 * where we wakeup possible delayers in 1333 * page_create_get_something(). 1334 */ 1335 wakeup_pcgs(); 1336 } 1337 1338 ulong_t 1339 get_freemem() 1340 { 1341 struct pcf *p; 1342 ulong_t t; 1343 uint_t i; 1344 1345 t = 0; 1346 p = pcf; 1347 for (i = 0; i < PCF_FANOUT; i++) { 1348 t += p->pcf_count; 1349 p++; 1350 } 1351 /* 1352 * We just calculated it, might as well set it. 1353 */ 1354 freemem = t; 1355 return (t); 1356 } 1357 1358 /* 1359 * Acquire all of the page cache & free (pcf) locks. 1360 */ 1361 void 1362 pcf_acquire_all() 1363 { 1364 struct pcf *p; 1365 uint_t i; 1366 1367 p = pcf; 1368 for (i = 0; i < PCF_FANOUT; i++) { 1369 mutex_enter(&p->pcf_lock); 1370 p++; 1371 } 1372 } 1373 1374 /* 1375 * Release all the pcf_locks. 1376 */ 1377 void 1378 pcf_release_all() 1379 { 1380 struct pcf *p; 1381 uint_t i; 1382 1383 p = pcf; 1384 for (i = 0; i < PCF_FANOUT; i++) { 1385 mutex_exit(&p->pcf_lock); 1386 p++; 1387 } 1388 } 1389 1390 /* 1391 * Inform the VM system that we need some pages freed up. 1392 * Calls must be symmetric, e.g.: 1393 * 1394 * page_needfree(100); 1395 * wait a bit; 1396 * page_needfree(-100); 1397 */ 1398 void 1399 page_needfree(spgcnt_t npages) 1400 { 1401 mutex_enter(&new_freemem_lock); 1402 needfree += npages; 1403 mutex_exit(&new_freemem_lock); 1404 } 1405 1406 /* 1407 * Throttle for page_create(): try to prevent freemem from dropping 1408 * below throttlefree. We can't provide a 100% guarantee because 1409 * KM_NOSLEEP allocations, page_reclaim(), and various other things 1410 * nibble away at the freelist. However, we can block all PG_WAIT 1411 * allocations until memory becomes available. The motivation is 1412 * that several things can fall apart when there's no free memory: 1413 * 1414 * (1) If pageout() needs memory to push a page, the system deadlocks. 1415 * 1416 * (2) By (broken) specification, timeout(9F) can neither fail nor 1417 * block, so it has no choice but to panic the system if it 1418 * cannot allocate a callout structure. 1419 * 1420 * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block; 1421 * it panics if it cannot allocate a callback structure. 1422 * 1423 * (4) Untold numbers of third-party drivers have not yet been hardened 1424 * against KM_NOSLEEP and/or allocb() failures; they simply assume 1425 * success and panic the system with a data fault on failure. 1426 * (The long-term solution to this particular problem is to ship 1427 * hostile fault-injecting DEBUG kernels with the DDK.) 1428 * 1429 * It is theoretically impossible to guarantee success of non-blocking 1430 * allocations, but in practice, this throttle is very hard to break. 1431 */ 1432 static int 1433 page_create_throttle(pgcnt_t npages, int flags) 1434 { 1435 ulong_t fm; 1436 uint_t i; 1437 pgcnt_t tf; /* effective value of throttlefree */ 1438 1439 /* 1440 * Never deny pages when: 1441 * - it's a thread that cannot block [NOMEMWAIT()] 1442 * - the allocation cannot block and must not fail 1443 * - the allocation cannot block and is pageout dispensated 1444 */ 1445 if (NOMEMWAIT() || 1446 ((flags & (PG_WAIT | PG_PANIC)) == PG_PANIC) || 1447 ((flags & (PG_WAIT | PG_PUSHPAGE)) == PG_PUSHPAGE)) 1448 return (1); 1449 1450 /* 1451 * If the allocation can't block, we look favorably upon it 1452 * unless we're below pageout_reserve. In that case we fail 1453 * the allocation because we want to make sure there are a few 1454 * pages available for pageout. 1455 */ 1456 if ((flags & PG_WAIT) == 0) 1457 return (freemem >= npages + pageout_reserve); 1458 1459 /* Calculate the effective throttlefree value */ 1460 tf = throttlefree - 1461 ((flags & PG_PUSHPAGE) ? pageout_reserve : 0); 1462 1463 cv_signal(&proc_pageout->p_cv); 1464 1465 for (;;) { 1466 fm = 0; 1467 pcf_acquire_all(); 1468 mutex_enter(&new_freemem_lock); 1469 for (i = 0; i < PCF_FANOUT; i++) { 1470 fm += pcf[i].pcf_count; 1471 pcf[i].pcf_wait++; 1472 mutex_exit(&pcf[i].pcf_lock); 1473 } 1474 freemem = fm; 1475 if (freemem >= npages + tf) { 1476 mutex_exit(&new_freemem_lock); 1477 break; 1478 } 1479 needfree += npages; 1480 freemem_wait++; 1481 cv_wait(&freemem_cv, &new_freemem_lock); 1482 freemem_wait--; 1483 needfree -= npages; 1484 mutex_exit(&new_freemem_lock); 1485 } 1486 return (1); 1487 } 1488 1489 /* 1490 * page_create_wait() is called to either coalesce pages from the 1491 * different pcf buckets or to wait because there simply are not 1492 * enough pages to satisfy the caller's request. 1493 * 1494 * Sadly, this is called from platform/vm/vm_machdep.c 1495 */ 1496 int 1497 page_create_wait(size_t npages, uint_t flags) 1498 { 1499 pgcnt_t total; 1500 uint_t i; 1501 struct pcf *p; 1502 1503 /* 1504 * Wait until there are enough free pages to satisfy our 1505 * entire request. 1506 * We set needfree += npages before prodding pageout, to make sure 1507 * it does real work when npages > lotsfree > freemem. 1508 */ 1509 VM_STAT_ADD(page_create_not_enough); 1510 1511 ASSERT(!kcage_on ? !(flags & PG_NORELOC) : 1); 1512 checkagain: 1513 if ((flags & PG_NORELOC) && 1514 kcage_freemem < kcage_throttlefree + npages) 1515 (void) kcage_create_throttle(npages, flags); 1516 1517 if (freemem < npages + throttlefree) 1518 if (!page_create_throttle(npages, flags)) 1519 return (0); 1520 1521 /* 1522 * Since page_create_va() looked at every 1523 * bucket, assume we are going to have to wait. 1524 * Get all of the pcf locks. 1525 */ 1526 total = 0; 1527 p = pcf; 1528 for (i = 0; i < PCF_FANOUT; i++) { 1529 mutex_enter(&p->pcf_lock); 1530 total += p->pcf_count; 1531 if (total >= npages) { 1532 /* 1533 * Wow! There are enough pages laying around 1534 * to satisfy the request. Do the accounting, 1535 * drop the locks we acquired, and go back. 1536 * 1537 * freemem is not protected by any lock. So, 1538 * we cannot have any assertion containing 1539 * freemem. 1540 */ 1541 freemem -= npages; 1542 1543 while (p >= pcf) { 1544 if (p->pcf_count <= npages) { 1545 npages -= p->pcf_count; 1546 p->pcf_count = 0; 1547 } else { 1548 p->pcf_count -= (uint_t)npages; 1549 npages = 0; 1550 } 1551 mutex_exit(&p->pcf_lock); 1552 p--; 1553 } 1554 ASSERT(npages == 0); 1555 return (1); 1556 } 1557 p++; 1558 } 1559 1560 /* 1561 * All of the pcf locks are held, there are not enough pages 1562 * to satisfy the request (npages < total). 1563 * Be sure to acquire the new_freemem_lock before dropping 1564 * the pcf locks. This prevents dropping wakeups in page_free(). 1565 * The order is always pcf_lock then new_freemem_lock. 1566 * 1567 * Since we hold all the pcf locks, it is a good time to set freemem. 1568 * 1569 * If the caller does not want to wait, return now. 1570 * Else turn the pageout daemon loose to find something 1571 * and wait till it does. 1572 * 1573 */ 1574 freemem = total; 1575 1576 if ((flags & PG_WAIT) == 0) { 1577 pcf_release_all(); 1578 1579 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_NOMEM, 1580 "page_create_nomem:npages %ld freemem %ld", npages, freemem); 1581 return (0); 1582 } 1583 1584 ASSERT(proc_pageout != NULL); 1585 cv_signal(&proc_pageout->p_cv); 1586 1587 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_START, 1588 "page_create_sleep_start: freemem %ld needfree %ld", 1589 freemem, needfree); 1590 1591 /* 1592 * We are going to wait. 1593 * We currently hold all of the pcf_locks, 1594 * get the new_freemem_lock (it protects freemem_wait), 1595 * before dropping the pcf_locks. 1596 */ 1597 mutex_enter(&new_freemem_lock); 1598 1599 p = pcf; 1600 for (i = 0; i < PCF_FANOUT; i++) { 1601 p->pcf_wait++; 1602 mutex_exit(&p->pcf_lock); 1603 p++; 1604 } 1605 1606 needfree += npages; 1607 freemem_wait++; 1608 1609 cv_wait(&freemem_cv, &new_freemem_lock); 1610 1611 freemem_wait--; 1612 needfree -= npages; 1613 1614 mutex_exit(&new_freemem_lock); 1615 1616 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_END, 1617 "page_create_sleep_end: freemem %ld needfree %ld", 1618 freemem, needfree); 1619 1620 VM_STAT_ADD(page_create_not_enough_again); 1621 goto checkagain; 1622 } 1623 1624 /* 1625 * A routine to do the opposite of page_create_wait(). 1626 */ 1627 void 1628 page_create_putback(spgcnt_t npages) 1629 { 1630 struct pcf *p; 1631 pgcnt_t lump; 1632 uint_t *which; 1633 1634 /* 1635 * When a contiguous lump is broken up, we have to 1636 * deal with lots of pages (min 64) so lets spread 1637 * the wealth around. 1638 */ 1639 lump = roundup(npages, PCF_FANOUT) / PCF_FANOUT; 1640 freemem += npages; 1641 1642 for (p = pcf; (npages > 0) && (p < &pcf[PCF_FANOUT]); p++) { 1643 which = &p->pcf_count; 1644 1645 mutex_enter(&p->pcf_lock); 1646 1647 if (p->pcf_block) { 1648 which = &p->pcf_reserve; 1649 } 1650 1651 if (lump < npages) { 1652 *which += (uint_t)lump; 1653 npages -= lump; 1654 } else { 1655 *which += (uint_t)npages; 1656 npages = 0; 1657 } 1658 1659 if (p->pcf_wait) { 1660 mutex_enter(&new_freemem_lock); 1661 /* 1662 * Check to see if some other thread 1663 * is actually waiting. Another bucket 1664 * may have woken it up by now. If there 1665 * are no waiters, then set our pcf_wait 1666 * count to zero to avoid coming in here 1667 * next time. 1668 */ 1669 if (freemem_wait) { 1670 if (npages > 1) { 1671 cv_broadcast(&freemem_cv); 1672 } else { 1673 cv_signal(&freemem_cv); 1674 } 1675 p->pcf_wait--; 1676 } else { 1677 p->pcf_wait = 0; 1678 } 1679 mutex_exit(&new_freemem_lock); 1680 } 1681 mutex_exit(&p->pcf_lock); 1682 } 1683 ASSERT(npages == 0); 1684 } 1685 1686 /* 1687 * A helper routine for page_create_get_something. 1688 * The indenting got to deep down there. 1689 * Unblock the pcf counters. Any pages freed after 1690 * pcf_block got set are moved to pcf_count and 1691 * wakeups (cv_broadcast() or cv_signal()) are done as needed. 1692 */ 1693 static void 1694 pcgs_unblock(void) 1695 { 1696 int i; 1697 struct pcf *p; 1698 1699 /* Update freemem while we're here. */ 1700 freemem = 0; 1701 p = pcf; 1702 for (i = 0; i < PCF_FANOUT; i++) { 1703 mutex_enter(&p->pcf_lock); 1704 ASSERT(p->pcf_count == 0); 1705 p->pcf_count = p->pcf_reserve; 1706 p->pcf_block = 0; 1707 freemem += p->pcf_count; 1708 if (p->pcf_wait) { 1709 mutex_enter(&new_freemem_lock); 1710 if (freemem_wait) { 1711 if (p->pcf_reserve > 1) { 1712 cv_broadcast(&freemem_cv); 1713 p->pcf_wait = 0; 1714 } else { 1715 cv_signal(&freemem_cv); 1716 p->pcf_wait--; 1717 } 1718 } else { 1719 p->pcf_wait = 0; 1720 } 1721 mutex_exit(&new_freemem_lock); 1722 } 1723 p->pcf_reserve = 0; 1724 mutex_exit(&p->pcf_lock); 1725 p++; 1726 } 1727 } 1728 1729 /* 1730 * Called from page_create_va() when both the cache and free lists 1731 * have been checked once. 1732 * 1733 * Either returns a page or panics since the accounting was done 1734 * way before we got here. 1735 * 1736 * We don't come here often, so leave the accounting on permanently. 1737 */ 1738 1739 #define MAX_PCGS 100 1740 1741 #ifdef DEBUG 1742 #define PCGS_TRIES 100 1743 #else /* DEBUG */ 1744 #define PCGS_TRIES 10 1745 #endif /* DEBUG */ 1746 1747 #ifdef VM_STATS 1748 uint_t pcgs_counts[PCGS_TRIES]; 1749 uint_t pcgs_too_many; 1750 uint_t pcgs_entered; 1751 uint_t pcgs_entered_noreloc; 1752 uint_t pcgs_locked; 1753 uint_t pcgs_cagelocked; 1754 #endif /* VM_STATS */ 1755 1756 static page_t * 1757 page_create_get_something(vnode_t *vp, u_offset_t off, struct seg *seg, 1758 caddr_t vaddr, uint_t flags) 1759 { 1760 uint_t count; 1761 page_t *pp; 1762 uint_t locked, i; 1763 struct pcf *p; 1764 lgrp_t *lgrp; 1765 int cagelocked = 0; 1766 1767 VM_STAT_ADD(pcgs_entered); 1768 1769 /* 1770 * Tap any reserve freelists: if we fail now, we'll die 1771 * since the page(s) we're looking for have already been 1772 * accounted for. 1773 */ 1774 flags |= PG_PANIC; 1775 1776 if ((flags & PG_NORELOC) != 0) { 1777 VM_STAT_ADD(pcgs_entered_noreloc); 1778 /* 1779 * Requests for free pages from critical threads 1780 * such as pageout still won't throttle here, but 1781 * we must try again, to give the cageout thread 1782 * another chance to catch up. Since we already 1783 * accounted for the pages, we had better get them 1784 * this time. 1785 * 1786 * N.B. All non-critical threads acquire the pcgs_cagelock 1787 * to serialize access to the freelists. This implements a 1788 * turnstile-type synchornization to avoid starvation of 1789 * critical requests for PG_NORELOC memory by non-critical 1790 * threads: all non-critical threads must acquire a 'ticket' 1791 * before passing through, which entails making sure 1792 * kcage_freemem won't fall below minfree prior to grabbing 1793 * pages from the freelists. 1794 */ 1795 if (kcage_create_throttle(1, flags) == KCT_NONCRIT) { 1796 mutex_enter(&pcgs_cagelock); 1797 cagelocked = 1; 1798 VM_STAT_ADD(pcgs_cagelocked); 1799 } 1800 } 1801 1802 /* 1803 * Time to get serious. 1804 * We failed to get a `correctly colored' page from both the 1805 * free and cache lists. 1806 * We escalate in stage. 1807 * 1808 * First try both lists without worring about color. 1809 * 1810 * Then, grab all page accounting locks (ie. pcf[]) and 1811 * steal any pages that they have and set the pcf_block flag to 1812 * stop deletions from the lists. This will help because 1813 * a page can get added to the free list while we are looking 1814 * at the cache list, then another page could be added to the cache 1815 * list allowing the page on the free list to be removed as we 1816 * move from looking at the cache list to the free list. This 1817 * could happen over and over. We would never find the page 1818 * we have accounted for. 1819 * 1820 * Noreloc pages are a subset of the global (relocatable) page pool. 1821 * They are not tracked separately in the pcf bins, so it is 1822 * impossible to know when doing pcf accounting if the available 1823 * page(s) are noreloc pages or not. When looking for a noreloc page 1824 * it is quite easy to end up here even if the global (relocatable) 1825 * page pool has plenty of free pages but the noreloc pool is empty. 1826 * 1827 * When the noreloc pool is empty (or low), additional noreloc pages 1828 * are created by converting pages from the global page pool. This 1829 * process will stall during pcf accounting if the pcf bins are 1830 * already locked. Such is the case when a noreloc allocation is 1831 * looping here in page_create_get_something waiting for more noreloc 1832 * pages to appear. 1833 * 1834 * Short of adding a new field to the pcf bins to accurately track 1835 * the number of free noreloc pages, we instead do not grab the 1836 * pcgs_lock, do not set the pcf blocks and do not timeout when 1837 * allocating a noreloc page. This allows noreloc allocations to 1838 * loop without blocking global page pool allocations. 1839 * 1840 * NOTE: the behaviour of page_create_get_something has not changed 1841 * for the case of global page pool allocations. 1842 */ 1843 1844 flags &= ~PG_MATCH_COLOR; 1845 locked = 0; 1846 #if defined(__i386) || defined(__amd64) 1847 flags = page_create_update_flags_x86(flags); 1848 #endif 1849 1850 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 1851 1852 for (count = 0; kcage_on || count < MAX_PCGS; count++) { 1853 pp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 1854 flags, lgrp); 1855 if (pp == NULL) { 1856 pp = page_get_cachelist(vp, off, seg, vaddr, 1857 flags, lgrp); 1858 } 1859 if (pp == NULL) { 1860 /* 1861 * Serialize. Don't fight with other pcgs(). 1862 */ 1863 if (!locked && (!kcage_on || !(flags & PG_NORELOC))) { 1864 mutex_enter(&pcgs_lock); 1865 VM_STAT_ADD(pcgs_locked); 1866 locked = 1; 1867 p = pcf; 1868 for (i = 0; i < PCF_FANOUT; i++) { 1869 mutex_enter(&p->pcf_lock); 1870 ASSERT(p->pcf_block == 0); 1871 p->pcf_block = 1; 1872 p->pcf_reserve = p->pcf_count; 1873 p->pcf_count = 0; 1874 mutex_exit(&p->pcf_lock); 1875 p++; 1876 } 1877 freemem = 0; 1878 } 1879 1880 if (count) { 1881 /* 1882 * Since page_free() puts pages on 1883 * a list then accounts for it, we 1884 * just have to wait for page_free() 1885 * to unlock any page it was working 1886 * with. The page_lock()-page_reclaim() 1887 * path falls in the same boat. 1888 * 1889 * We don't need to check on the 1890 * PG_WAIT flag, we have already 1891 * accounted for the page we are 1892 * looking for in page_create_va(). 1893 * 1894 * We just wait a moment to let any 1895 * locked pages on the lists free up, 1896 * then continue around and try again. 1897 * 1898 * Will be awakened by set_freemem(). 1899 */ 1900 mutex_enter(&pcgs_wait_lock); 1901 cv_wait(&pcgs_cv, &pcgs_wait_lock); 1902 mutex_exit(&pcgs_wait_lock); 1903 } 1904 } else { 1905 #ifdef VM_STATS 1906 if (count >= PCGS_TRIES) { 1907 VM_STAT_ADD(pcgs_too_many); 1908 } else { 1909 VM_STAT_ADD(pcgs_counts[count]); 1910 } 1911 #endif 1912 if (locked) { 1913 pcgs_unblock(); 1914 mutex_exit(&pcgs_lock); 1915 } 1916 if (cagelocked) 1917 mutex_exit(&pcgs_cagelock); 1918 return (pp); 1919 } 1920 } 1921 /* 1922 * we go down holding the pcf locks. 1923 */ 1924 panic("no %spage found %d", 1925 ((flags & PG_NORELOC) ? "non-reloc " : ""), count); 1926 /*NOTREACHED*/ 1927 } 1928 1929 /* 1930 * Create enough pages for "bytes" worth of data starting at 1931 * "off" in "vp". 1932 * 1933 * Where flag must be one of: 1934 * 1935 * PG_EXCL: Exclusive create (fail if any page already 1936 * exists in the page cache) which does not 1937 * wait for memory to become available. 1938 * 1939 * PG_WAIT: Non-exclusive create which can wait for 1940 * memory to become available. 1941 * 1942 * PG_PHYSCONTIG: Allocate physically contiguous pages. 1943 * (Not Supported) 1944 * 1945 * A doubly linked list of pages is returned to the caller. Each page 1946 * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock) 1947 * lock. 1948 * 1949 * Unable to change the parameters to page_create() in a minor release, 1950 * we renamed page_create() to page_create_va(), changed all known calls 1951 * from page_create() to page_create_va(), and created this wrapper. 1952 * 1953 * Upon a major release, we should break compatibility by deleting this 1954 * wrapper, and replacing all the strings "page_create_va", with "page_create". 1955 * 1956 * NOTE: There is a copy of this interface as page_create_io() in 1957 * i86/vm/vm_machdep.c. Any bugs fixed here should be applied 1958 * there. 1959 */ 1960 page_t * 1961 page_create(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags) 1962 { 1963 caddr_t random_vaddr; 1964 struct seg kseg; 1965 1966 #ifdef DEBUG 1967 cmn_err(CE_WARN, "Using deprecated interface page_create: caller %p", 1968 (void *)caller()); 1969 #endif 1970 1971 random_vaddr = (caddr_t)(((uintptr_t)vp >> 7) ^ 1972 (uintptr_t)(off >> PAGESHIFT)); 1973 kseg.s_as = &kas; 1974 1975 return (page_create_va(vp, off, bytes, flags, &kseg, random_vaddr)); 1976 } 1977 1978 #ifdef DEBUG 1979 uint32_t pg_alloc_pgs_mtbf = 0; 1980 #endif 1981 1982 /* 1983 * Used for large page support. It will attempt to allocate 1984 * a large page(s) off the freelist. 1985 * 1986 * Returns non zero on failure. 1987 */ 1988 int 1989 page_alloc_pages(struct vnode *vp, struct seg *seg, caddr_t addr, 1990 page_t **basepp, page_t *ppa[], uint_t szc, int anypgsz, int pgflags) 1991 { 1992 pgcnt_t npgs, curnpgs, totpgs; 1993 size_t pgsz; 1994 page_t *pplist = NULL, *pp; 1995 int err = 0; 1996 lgrp_t *lgrp; 1997 1998 ASSERT(szc != 0 && szc <= (page_num_pagesizes() - 1)); 1999 ASSERT(pgflags == 0 || pgflags == PG_LOCAL); 2000 2001 /* 2002 * Check if system heavily prefers local large pages over remote 2003 * on systems with multiple lgroups. 2004 */ 2005 if (lpg_alloc_prefer == LPAP_LOCAL && nlgrps > 1) { 2006 pgflags = PG_LOCAL; 2007 } 2008 2009 VM_STAT_ADD(alloc_pages[0]); 2010 2011 #ifdef DEBUG 2012 if (pg_alloc_pgs_mtbf && !(gethrtime() % pg_alloc_pgs_mtbf)) { 2013 return (ENOMEM); 2014 } 2015 #endif 2016 2017 /* 2018 * One must be NULL but not both. 2019 * And one must be non NULL but not both. 2020 */ 2021 ASSERT(basepp != NULL || ppa != NULL); 2022 ASSERT(basepp == NULL || ppa == NULL); 2023 2024 #if defined(__i386) || defined(__amd64) 2025 while (page_chk_freelist(szc) == 0) { 2026 VM_STAT_ADD(alloc_pages[8]); 2027 if (anypgsz == 0 || --szc == 0) 2028 return (ENOMEM); 2029 } 2030 #endif 2031 2032 pgsz = page_get_pagesize(szc); 2033 totpgs = curnpgs = npgs = pgsz >> PAGESHIFT; 2034 2035 ASSERT(((uintptr_t)addr & (pgsz - 1)) == 0); 2036 2037 (void) page_create_wait(npgs, PG_WAIT); 2038 2039 while (npgs && szc) { 2040 lgrp = lgrp_mem_choose(seg, addr, pgsz); 2041 if (pgflags == PG_LOCAL) { 2042 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2043 pgflags, lgrp); 2044 if (pp == NULL) { 2045 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2046 0, lgrp); 2047 } 2048 } else { 2049 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2050 0, lgrp); 2051 } 2052 if (pp != NULL) { 2053 VM_STAT_ADD(alloc_pages[1]); 2054 page_list_concat(&pplist, &pp); 2055 ASSERT(npgs >= curnpgs); 2056 npgs -= curnpgs; 2057 } else if (anypgsz) { 2058 VM_STAT_ADD(alloc_pages[2]); 2059 szc--; 2060 pgsz = page_get_pagesize(szc); 2061 curnpgs = pgsz >> PAGESHIFT; 2062 } else { 2063 VM_STAT_ADD(alloc_pages[3]); 2064 ASSERT(npgs == totpgs); 2065 page_create_putback(npgs); 2066 return (ENOMEM); 2067 } 2068 } 2069 if (szc == 0) { 2070 VM_STAT_ADD(alloc_pages[4]); 2071 ASSERT(npgs != 0); 2072 page_create_putback(npgs); 2073 err = ENOMEM; 2074 } else if (basepp != NULL) { 2075 ASSERT(npgs == 0); 2076 ASSERT(ppa == NULL); 2077 *basepp = pplist; 2078 } 2079 2080 npgs = totpgs - npgs; 2081 pp = pplist; 2082 2083 /* 2084 * Clear the free and age bits. Also if we were passed in a ppa then 2085 * fill it in with all the constituent pages from the large page. But 2086 * if we failed to allocate all the pages just free what we got. 2087 */ 2088 while (npgs != 0) { 2089 ASSERT(PP_ISFREE(pp)); 2090 ASSERT(PP_ISAGED(pp)); 2091 if (ppa != NULL || err != 0) { 2092 if (err == 0) { 2093 VM_STAT_ADD(alloc_pages[5]); 2094 PP_CLRFREE(pp); 2095 PP_CLRAGED(pp); 2096 page_sub(&pplist, pp); 2097 *ppa++ = pp; 2098 npgs--; 2099 } else { 2100 VM_STAT_ADD(alloc_pages[6]); 2101 ASSERT(pp->p_szc != 0); 2102 curnpgs = page_get_pagecnt(pp->p_szc); 2103 page_list_break(&pp, &pplist, curnpgs); 2104 page_list_add_pages(pp, 0); 2105 page_create_putback(curnpgs); 2106 ASSERT(npgs >= curnpgs); 2107 npgs -= curnpgs; 2108 } 2109 pp = pplist; 2110 } else { 2111 VM_STAT_ADD(alloc_pages[7]); 2112 PP_CLRFREE(pp); 2113 PP_CLRAGED(pp); 2114 pp = pp->p_next; 2115 npgs--; 2116 } 2117 } 2118 return (err); 2119 } 2120 2121 /* 2122 * Get a single large page off of the freelists, and set it up for use. 2123 * Number of bytes requested must be a supported page size. 2124 * 2125 * Note that this call may fail even if there is sufficient 2126 * memory available or PG_WAIT is set, so the caller must 2127 * be willing to fallback on page_create_va(), block and retry, 2128 * or fail the requester. 2129 */ 2130 page_t * 2131 page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2132 struct seg *seg, caddr_t vaddr, void *arg) 2133 { 2134 pgcnt_t npages, pcftotal; 2135 page_t *pp; 2136 page_t *rootpp; 2137 lgrp_t *lgrp; 2138 uint_t enough; 2139 uint_t pcf_index; 2140 uint_t i; 2141 struct pcf *p; 2142 struct pcf *q; 2143 lgrp_id_t *lgrpid = (lgrp_id_t *)arg; 2144 2145 ASSERT(vp != NULL); 2146 2147 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2148 PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == 0); 2149 /* but no others */ 2150 2151 ASSERT((flags & PG_EXCL) == PG_EXCL); 2152 2153 npages = btop(bytes); 2154 2155 if (!kcage_on || panicstr) { 2156 /* 2157 * Cage is OFF, or we are single threaded in 2158 * panic, so make everything a RELOC request. 2159 */ 2160 flags &= ~PG_NORELOC; 2161 } 2162 2163 /* 2164 * Make sure there's adequate physical memory available. 2165 * Note: PG_WAIT is ignored here. 2166 */ 2167 if (freemem <= throttlefree + npages) { 2168 VM_STAT_ADD(page_create_large_cnt[1]); 2169 return (NULL); 2170 } 2171 2172 /* 2173 * If cage is on, dampen draw from cage when available 2174 * cage space is low. 2175 */ 2176 if ((flags & (PG_NORELOC | PG_WAIT)) == (PG_NORELOC | PG_WAIT) && 2177 kcage_freemem < kcage_throttlefree + npages) { 2178 2179 /* 2180 * The cage is on, the caller wants PG_NORELOC 2181 * pages and available cage memory is very low. 2182 * Call kcage_create_throttle() to attempt to 2183 * control demand on the cage. 2184 */ 2185 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) { 2186 VM_STAT_ADD(page_create_large_cnt[2]); 2187 return (NULL); 2188 } 2189 } 2190 2191 enough = 0; 2192 pcf_index = PCF_INDEX(); 2193 p = &pcf[pcf_index]; 2194 q = &pcf[PCF_FANOUT]; 2195 for (pcftotal = 0, i = 0; i < PCF_FANOUT; i++) { 2196 if (p->pcf_count > npages) { 2197 /* 2198 * a good one to try. 2199 */ 2200 mutex_enter(&p->pcf_lock); 2201 if (p->pcf_count > npages) { 2202 p->pcf_count -= (uint_t)npages; 2203 /* 2204 * freemem is not protected by any lock. 2205 * Thus, we cannot have any assertion 2206 * containing freemem here. 2207 */ 2208 freemem -= npages; 2209 enough = 1; 2210 mutex_exit(&p->pcf_lock); 2211 break; 2212 } 2213 mutex_exit(&p->pcf_lock); 2214 } 2215 pcftotal += p->pcf_count; 2216 p++; 2217 if (p >= q) { 2218 p = pcf; 2219 } 2220 } 2221 2222 if (!enough) { 2223 /* If there isn't enough memory available, give up. */ 2224 if (pcftotal < npages) { 2225 VM_STAT_ADD(page_create_large_cnt[3]); 2226 return (NULL); 2227 } 2228 2229 /* try to collect pages from several pcf bins */ 2230 for (p = pcf, pcftotal = 0, i = 0; i < PCF_FANOUT; i++) { 2231 mutex_enter(&p->pcf_lock); 2232 pcftotal += p->pcf_count; 2233 if (pcftotal >= npages) { 2234 /* 2235 * Wow! There are enough pages laying around 2236 * to satisfy the request. Do the accounting, 2237 * drop the locks we acquired, and go back. 2238 * 2239 * freemem is not protected by any lock. So, 2240 * we cannot have any assertion containing 2241 * freemem. 2242 */ 2243 pgcnt_t tpages = npages; 2244 freemem -= npages; 2245 while (p >= pcf) { 2246 if (p->pcf_count <= tpages) { 2247 tpages -= p->pcf_count; 2248 p->pcf_count = 0; 2249 } else { 2250 p->pcf_count -= (uint_t)tpages; 2251 tpages = 0; 2252 } 2253 mutex_exit(&p->pcf_lock); 2254 p--; 2255 } 2256 ASSERT(tpages == 0); 2257 break; 2258 } 2259 p++; 2260 } 2261 if (i == PCF_FANOUT) { 2262 /* failed to collect pages - release the locks */ 2263 while (--p >= pcf) { 2264 mutex_exit(&p->pcf_lock); 2265 } 2266 VM_STAT_ADD(page_create_large_cnt[4]); 2267 return (NULL); 2268 } 2269 } 2270 2271 /* 2272 * This is where this function behaves fundamentally differently 2273 * than page_create_va(); since we're intending to map the page 2274 * with a single TTE, we have to get it as a physically contiguous 2275 * hardware pagesize chunk. If we can't, we fail. 2276 */ 2277 if (lgrpid != NULL && *lgrpid >= 0 && *lgrpid <= lgrp_alloc_max && 2278 LGRP_EXISTS(lgrp_table[*lgrpid])) 2279 lgrp = lgrp_table[*lgrpid]; 2280 else 2281 lgrp = lgrp_mem_choose(seg, vaddr, bytes); 2282 2283 if ((rootpp = page_get_freelist(&kvp, off, seg, vaddr, 2284 bytes, flags & ~PG_MATCH_COLOR, lgrp)) == NULL) { 2285 page_create_putback(npages); 2286 VM_STAT_ADD(page_create_large_cnt[5]); 2287 return (NULL); 2288 } 2289 2290 /* 2291 * if we got the page with the wrong mtype give it back this is a 2292 * workaround for CR 6249718. When CR 6249718 is fixed we never get 2293 * inside "if" and the workaround becomes just a nop 2294 */ 2295 if (kcage_on && (flags & PG_NORELOC) && !PP_ISNORELOC(rootpp)) { 2296 page_list_add_pages(rootpp, 0); 2297 page_create_putback(npages); 2298 VM_STAT_ADD(page_create_large_cnt[6]); 2299 return (NULL); 2300 } 2301 2302 /* 2303 * If satisfying this request has left us with too little 2304 * memory, start the wheels turning to get some back. The 2305 * first clause of the test prevents waking up the pageout 2306 * daemon in situations where it would decide that there's 2307 * nothing to do. 2308 */ 2309 if (nscan < desscan && freemem < minfree) { 2310 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2311 "pageout_cv_signal:freemem %ld", freemem); 2312 cv_signal(&proc_pageout->p_cv); 2313 } 2314 2315 pp = rootpp; 2316 while (npages--) { 2317 ASSERT(PAGE_EXCL(pp)); 2318 ASSERT(pp->p_vnode == NULL); 2319 ASSERT(!hat_page_is_mapped(pp)); 2320 PP_CLRFREE(pp); 2321 PP_CLRAGED(pp); 2322 if (!page_hashin(pp, vp, off, NULL)) 2323 panic("page_create_large: hashin failed: page %p", 2324 (void *)pp); 2325 page_io_lock(pp); 2326 off += PAGESIZE; 2327 pp = pp->p_next; 2328 } 2329 2330 VM_STAT_ADD(page_create_large_cnt[0]); 2331 return (rootpp); 2332 } 2333 2334 page_t * 2335 page_create_va(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2336 struct seg *seg, caddr_t vaddr) 2337 { 2338 page_t *plist = NULL; 2339 pgcnt_t npages; 2340 pgcnt_t found_on_free = 0; 2341 pgcnt_t pages_req; 2342 page_t *npp = NULL; 2343 uint_t enough; 2344 uint_t i; 2345 uint_t pcf_index; 2346 struct pcf *p; 2347 struct pcf *q; 2348 lgrp_t *lgrp; 2349 2350 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START, 2351 "page_create_start:vp %p off %llx bytes %lu flags %x", 2352 vp, off, bytes, flags); 2353 2354 ASSERT(bytes != 0 && vp != NULL); 2355 2356 if ((flags & PG_EXCL) == 0 && (flags & PG_WAIT) == 0) { 2357 panic("page_create: invalid flags"); 2358 /*NOTREACHED*/ 2359 } 2360 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2361 PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == 0); 2362 /* but no others */ 2363 2364 pages_req = npages = btopr(bytes); 2365 /* 2366 * Try to see whether request is too large to *ever* be 2367 * satisfied, in order to prevent deadlock. We arbitrarily 2368 * decide to limit maximum size requests to max_page_get. 2369 */ 2370 if (npages >= max_page_get) { 2371 if ((flags & PG_WAIT) == 0) { 2372 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_TOOBIG, 2373 "page_create_toobig:vp %p off %llx npages " 2374 "%lu max_page_get %lu", 2375 vp, off, npages, max_page_get); 2376 return (NULL); 2377 } else { 2378 cmn_err(CE_WARN, 2379 "Request for too much kernel memory " 2380 "(%lu bytes), will hang forever", bytes); 2381 for (;;) 2382 delay(1000000000); 2383 } 2384 } 2385 2386 if (!kcage_on || panicstr) { 2387 /* 2388 * Cage is OFF, or we are single threaded in 2389 * panic, so make everything a RELOC request. 2390 */ 2391 flags &= ~PG_NORELOC; 2392 } 2393 2394 if (freemem <= throttlefree + npages) 2395 if (!page_create_throttle(npages, flags)) 2396 return (NULL); 2397 2398 /* 2399 * If cage is on, dampen draw from cage when available 2400 * cage space is low. 2401 */ 2402 if ((flags & PG_NORELOC) && 2403 kcage_freemem < kcage_throttlefree + npages) { 2404 2405 /* 2406 * The cage is on, the caller wants PG_NORELOC 2407 * pages and available cage memory is very low. 2408 * Call kcage_create_throttle() to attempt to 2409 * control demand on the cage. 2410 */ 2411 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) 2412 return (NULL); 2413 } 2414 2415 VM_STAT_ADD(page_create_cnt[0]); 2416 2417 enough = 0; 2418 pcf_index = PCF_INDEX(); 2419 2420 p = &pcf[pcf_index]; 2421 q = &pcf[PCF_FANOUT]; 2422 for (i = 0; i < PCF_FANOUT; i++) { 2423 if (p->pcf_count > npages) { 2424 /* 2425 * a good one to try. 2426 */ 2427 mutex_enter(&p->pcf_lock); 2428 if (p->pcf_count > npages) { 2429 p->pcf_count -= (uint_t)npages; 2430 /* 2431 * freemem is not protected by any lock. 2432 * Thus, we cannot have any assertion 2433 * containing freemem here. 2434 */ 2435 freemem -= npages; 2436 enough = 1; 2437 mutex_exit(&p->pcf_lock); 2438 break; 2439 } 2440 mutex_exit(&p->pcf_lock); 2441 } 2442 p++; 2443 if (p >= q) { 2444 p = pcf; 2445 } 2446 } 2447 2448 if (!enough) { 2449 /* 2450 * Have to look harder. If npages is greater than 2451 * one, then we might have to coalesce the counters. 2452 * 2453 * Go wait. We come back having accounted 2454 * for the memory. 2455 */ 2456 VM_STAT_ADD(page_create_cnt[1]); 2457 if (!page_create_wait(npages, flags)) { 2458 VM_STAT_ADD(page_create_cnt[2]); 2459 return (NULL); 2460 } 2461 } 2462 2463 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS, 2464 "page_create_success:vp %p off %llx", vp, off); 2465 2466 /* 2467 * If satisfying this request has left us with too little 2468 * memory, start the wheels turning to get some back. The 2469 * first clause of the test prevents waking up the pageout 2470 * daemon in situations where it would decide that there's 2471 * nothing to do. 2472 */ 2473 if (nscan < desscan && freemem < minfree) { 2474 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2475 "pageout_cv_signal:freemem %ld", freemem); 2476 cv_signal(&proc_pageout->p_cv); 2477 } 2478 2479 /* 2480 * Loop around collecting the requested number of pages. 2481 * Most of the time, we have to `create' a new page. With 2482 * this in mind, pull the page off the free list before 2483 * getting the hash lock. This will minimize the hash 2484 * lock hold time, nesting, and the like. If it turns 2485 * out we don't need the page, we put it back at the end. 2486 */ 2487 while (npages--) { 2488 page_t *pp; 2489 kmutex_t *phm = NULL; 2490 ulong_t index; 2491 2492 index = PAGE_HASH_FUNC(vp, off); 2493 top: 2494 ASSERT(phm == NULL); 2495 ASSERT(index == PAGE_HASH_FUNC(vp, off)); 2496 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 2497 2498 if (npp == NULL) { 2499 /* 2500 * Try to get a page from the freelist (ie, 2501 * a page with no [vp, off] tag). If that 2502 * fails, use the cachelist. 2503 * 2504 * During the first attempt at both the free 2505 * and cache lists we try for the correct color. 2506 */ 2507 /* 2508 * XXXX-how do we deal with virtual indexed 2509 * caches and and colors? 2510 */ 2511 VM_STAT_ADD(page_create_cnt[4]); 2512 /* 2513 * Get lgroup to allocate next page of shared memory 2514 * from and use it to specify where to allocate 2515 * the physical memory 2516 */ 2517 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 2518 npp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 2519 flags | PG_MATCH_COLOR, lgrp); 2520 if (npp == NULL) { 2521 npp = page_get_cachelist(vp, off, seg, 2522 vaddr, flags | PG_MATCH_COLOR, lgrp); 2523 if (npp == NULL) { 2524 npp = page_create_get_something(vp, 2525 off, seg, vaddr, 2526 flags & ~PG_MATCH_COLOR); 2527 } 2528 2529 if (PP_ISAGED(npp) == 0) { 2530 /* 2531 * Since this page came from the 2532 * cachelist, we must destroy the 2533 * old vnode association. 2534 */ 2535 page_hashout(npp, NULL); 2536 } 2537 } 2538 } 2539 2540 /* 2541 * We own this page! 2542 */ 2543 ASSERT(PAGE_EXCL(npp)); 2544 ASSERT(npp->p_vnode == NULL); 2545 ASSERT(!hat_page_is_mapped(npp)); 2546 PP_CLRFREE(npp); 2547 PP_CLRAGED(npp); 2548 2549 /* 2550 * Here we have a page in our hot little mits and are 2551 * just waiting to stuff it on the appropriate lists. 2552 * Get the mutex and check to see if it really does 2553 * not exist. 2554 */ 2555 phm = PAGE_HASH_MUTEX(index); 2556 mutex_enter(phm); 2557 PAGE_HASH_SEARCH(index, pp, vp, off); 2558 if (pp == NULL) { 2559 VM_STAT_ADD(page_create_new); 2560 pp = npp; 2561 npp = NULL; 2562 if (!page_hashin(pp, vp, off, phm)) { 2563 /* 2564 * Since we hold the page hash mutex and 2565 * just searched for this page, page_hashin 2566 * had better not fail. If it does, that 2567 * means somethread did not follow the 2568 * page hash mutex rules. Panic now and 2569 * get it over with. As usual, go down 2570 * holding all the locks. 2571 */ 2572 ASSERT(MUTEX_HELD(phm)); 2573 panic("page_create: " 2574 "hashin failed %p %p %llx %p", 2575 (void *)pp, (void *)vp, off, (void *)phm); 2576 /*NOTREACHED*/ 2577 } 2578 ASSERT(MUTEX_HELD(phm)); 2579 mutex_exit(phm); 2580 phm = NULL; 2581 2582 /* 2583 * Hat layer locking need not be done to set 2584 * the following bits since the page is not hashed 2585 * and was on the free list (i.e., had no mappings). 2586 * 2587 * Set the reference bit to protect 2588 * against immediate pageout 2589 * 2590 * XXXmh modify freelist code to set reference 2591 * bit so we don't have to do it here. 2592 */ 2593 page_set_props(pp, P_REF); 2594 found_on_free++; 2595 } else { 2596 VM_STAT_ADD(page_create_exists); 2597 if (flags & PG_EXCL) { 2598 /* 2599 * Found an existing page, and the caller 2600 * wanted all new pages. Undo all of the work 2601 * we have done. 2602 */ 2603 mutex_exit(phm); 2604 phm = NULL; 2605 while (plist != NULL) { 2606 pp = plist; 2607 page_sub(&plist, pp); 2608 page_io_unlock(pp); 2609 /* large pages should not end up here */ 2610 ASSERT(pp->p_szc == 0); 2611 /*LINTED: constant in conditional ctx*/ 2612 VN_DISPOSE(pp, B_INVAL, 0, kcred); 2613 } 2614 VM_STAT_ADD(page_create_found_one); 2615 goto fail; 2616 } 2617 ASSERT(flags & PG_WAIT); 2618 if (!page_lock(pp, SE_EXCL, phm, P_NO_RECLAIM)) { 2619 /* 2620 * Start all over again if we blocked trying 2621 * to lock the page. 2622 */ 2623 mutex_exit(phm); 2624 VM_STAT_ADD(page_create_page_lock_failed); 2625 phm = NULL; 2626 goto top; 2627 } 2628 mutex_exit(phm); 2629 phm = NULL; 2630 2631 if (PP_ISFREE(pp)) { 2632 ASSERT(PP_ISAGED(pp) == 0); 2633 VM_STAT_ADD(pagecnt.pc_get_cache); 2634 page_list_sub(pp, PG_CACHE_LIST); 2635 PP_CLRFREE(pp); 2636 found_on_free++; 2637 } 2638 } 2639 2640 /* 2641 * Got a page! It is locked. Acquire the i/o 2642 * lock since we are going to use the p_next and 2643 * p_prev fields to link the requested pages together. 2644 */ 2645 page_io_lock(pp); 2646 page_add(&plist, pp); 2647 plist = plist->p_next; 2648 off += PAGESIZE; 2649 vaddr += PAGESIZE; 2650 } 2651 2652 ASSERT((flags & PG_EXCL) ? (found_on_free == pages_req) : 1); 2653 fail: 2654 if (npp != NULL) { 2655 /* 2656 * Did not need this page after all. 2657 * Put it back on the free list. 2658 */ 2659 VM_STAT_ADD(page_create_putbacks); 2660 PP_SETFREE(npp); 2661 PP_SETAGED(npp); 2662 npp->p_offset = (u_offset_t)-1; 2663 page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL); 2664 page_unlock(npp); 2665 2666 } 2667 2668 ASSERT(pages_req >= found_on_free); 2669 2670 { 2671 uint_t overshoot = (uint_t)(pages_req - found_on_free); 2672 2673 if (overshoot) { 2674 VM_STAT_ADD(page_create_overshoot); 2675 p = &pcf[pcf_index]; 2676 mutex_enter(&p->pcf_lock); 2677 if (p->pcf_block) { 2678 p->pcf_reserve += overshoot; 2679 } else { 2680 p->pcf_count += overshoot; 2681 if (p->pcf_wait) { 2682 mutex_enter(&new_freemem_lock); 2683 if (freemem_wait) { 2684 cv_signal(&freemem_cv); 2685 p->pcf_wait--; 2686 } else { 2687 p->pcf_wait = 0; 2688 } 2689 mutex_exit(&new_freemem_lock); 2690 } 2691 } 2692 mutex_exit(&p->pcf_lock); 2693 /* freemem is approximate, so this test OK */ 2694 if (!p->pcf_block) 2695 freemem += overshoot; 2696 } 2697 } 2698 2699 return (plist); 2700 } 2701 2702 /* 2703 * One or more constituent pages of this large page has been marked 2704 * toxic. Simply demote the large page to PAGESIZE pages and let 2705 * page_free() handle it. This routine should only be called by 2706 * large page free routines (page_free_pages() and page_destroy_pages(). 2707 * All pages are locked SE_EXCL and have already been marked free. 2708 */ 2709 static void 2710 page_free_toxic_pages(page_t *rootpp) 2711 { 2712 page_t *tpp; 2713 pgcnt_t i, pgcnt = page_get_pagecnt(rootpp->p_szc); 2714 uint_t szc = rootpp->p_szc; 2715 2716 for (i = 0, tpp = rootpp; i < pgcnt; i++, tpp = tpp->p_next) { 2717 ASSERT(tpp->p_szc == szc); 2718 ASSERT((PAGE_EXCL(tpp) && 2719 !page_iolock_assert(tpp)) || panicstr); 2720 tpp->p_szc = 0; 2721 } 2722 2723 while (rootpp != NULL) { 2724 tpp = rootpp; 2725 page_sub(&rootpp, tpp); 2726 ASSERT(PP_ISFREE(tpp)); 2727 PP_CLRFREE(tpp); 2728 page_free(tpp, 1); 2729 } 2730 } 2731 2732 /* 2733 * Put page on the "free" list. 2734 * The free list is really two lists maintained by 2735 * the PSM of whatever machine we happen to be on. 2736 */ 2737 void 2738 page_free(page_t *pp, int dontneed) 2739 { 2740 struct pcf *p; 2741 uint_t pcf_index; 2742 2743 ASSERT((PAGE_EXCL(pp) && 2744 !page_iolock_assert(pp)) || panicstr); 2745 2746 if (PP_ISFREE(pp)) { 2747 panic("page_free: page %p is free", (void *)pp); 2748 } 2749 2750 if (pp->p_szc != 0) { 2751 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 2752 PP_ISKAS(pp)) { 2753 panic("page_free: anon or kernel " 2754 "or no vnode large page %p", (void *)pp); 2755 } 2756 page_demote_vp_pages(pp); 2757 ASSERT(pp->p_szc == 0); 2758 } 2759 2760 /* 2761 * The page_struct_lock need not be acquired to examine these 2762 * fields since the page has an "exclusive" lock. 2763 */ 2764 if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 2765 pp->p_slckcnt != 0) { 2766 panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d " 2767 "slckcnt = %d", pp, page_pptonum(pp), pp->p_lckcnt, 2768 pp->p_cowcnt, pp->p_slckcnt); 2769 /*NOTREACHED*/ 2770 } 2771 2772 ASSERT(!hat_page_getshare(pp)); 2773 2774 PP_SETFREE(pp); 2775 ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) || 2776 !hat_ismod(pp)); 2777 page_clr_all_props(pp); 2778 ASSERT(!hat_page_getshare(pp)); 2779 2780 /* 2781 * Now we add the page to the head of the free list. 2782 * But if this page is associated with a paged vnode 2783 * then we adjust the head forward so that the page is 2784 * effectively at the end of the list. 2785 */ 2786 if (pp->p_vnode == NULL) { 2787 /* 2788 * Page has no identity, put it on the free list. 2789 */ 2790 PP_SETAGED(pp); 2791 pp->p_offset = (u_offset_t)-1; 2792 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 2793 VM_STAT_ADD(pagecnt.pc_free_free); 2794 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2795 "page_free_free:pp %p", pp); 2796 } else { 2797 PP_CLRAGED(pp); 2798 2799 if (!dontneed || nopageage) { 2800 /* move it to the tail of the list */ 2801 page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL); 2802 2803 VM_STAT_ADD(pagecnt.pc_free_cache); 2804 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL, 2805 "page_free_cache_tail:pp %p", pp); 2806 } else { 2807 page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD); 2808 2809 VM_STAT_ADD(pagecnt.pc_free_dontneed); 2810 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD, 2811 "page_free_cache_head:pp %p", pp); 2812 } 2813 } 2814 page_unlock(pp); 2815 2816 /* 2817 * Now do the `freemem' accounting. 2818 */ 2819 pcf_index = PCF_INDEX(); 2820 p = &pcf[pcf_index]; 2821 2822 mutex_enter(&p->pcf_lock); 2823 if (p->pcf_block) { 2824 p->pcf_reserve += 1; 2825 } else { 2826 p->pcf_count += 1; 2827 if (p->pcf_wait) { 2828 mutex_enter(&new_freemem_lock); 2829 /* 2830 * Check to see if some other thread 2831 * is actually waiting. Another bucket 2832 * may have woken it up by now. If there 2833 * are no waiters, then set our pcf_wait 2834 * count to zero to avoid coming in here 2835 * next time. Also, since only one page 2836 * was put on the free list, just wake 2837 * up one waiter. 2838 */ 2839 if (freemem_wait) { 2840 cv_signal(&freemem_cv); 2841 p->pcf_wait--; 2842 } else { 2843 p->pcf_wait = 0; 2844 } 2845 mutex_exit(&new_freemem_lock); 2846 } 2847 } 2848 mutex_exit(&p->pcf_lock); 2849 2850 /* freemem is approximate, so this test OK */ 2851 if (!p->pcf_block) 2852 freemem += 1; 2853 } 2854 2855 /* 2856 * Put page on the "free" list during intial startup. 2857 * This happens during initial single threaded execution. 2858 */ 2859 void 2860 page_free_at_startup(page_t *pp) 2861 { 2862 struct pcf *p; 2863 uint_t pcf_index; 2864 2865 page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT); 2866 VM_STAT_ADD(pagecnt.pc_free_free); 2867 2868 /* 2869 * Now do the `freemem' accounting. 2870 */ 2871 pcf_index = PCF_INDEX(); 2872 p = &pcf[pcf_index]; 2873 2874 ASSERT(p->pcf_block == 0); 2875 ASSERT(p->pcf_wait == 0); 2876 p->pcf_count += 1; 2877 2878 /* freemem is approximate, so this is OK */ 2879 freemem += 1; 2880 } 2881 2882 void 2883 page_free_pages(page_t *pp) 2884 { 2885 page_t *tpp, *rootpp = NULL; 2886 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 2887 pgcnt_t i; 2888 uint_t szc = pp->p_szc; 2889 2890 VM_STAT_ADD(pagecnt.pc_free_pages); 2891 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2892 "page_free_free:pp %p", pp); 2893 2894 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 2895 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 2896 panic("page_free_pages: not root page %p", (void *)pp); 2897 /*NOTREACHED*/ 2898 } 2899 2900 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 2901 ASSERT((PAGE_EXCL(tpp) && 2902 !page_iolock_assert(tpp)) || panicstr); 2903 if (PP_ISFREE(tpp)) { 2904 panic("page_free_pages: page %p is free", (void *)tpp); 2905 /*NOTREACHED*/ 2906 } 2907 if (hat_page_is_mapped(tpp) || tpp->p_lckcnt != 0 || 2908 tpp->p_cowcnt != 0 || tpp->p_slckcnt != 0) { 2909 panic("page_free_pages %p", (void *)tpp); 2910 /*NOTREACHED*/ 2911 } 2912 2913 ASSERT(!hat_page_getshare(tpp)); 2914 ASSERT(tpp->p_vnode == NULL); 2915 ASSERT(tpp->p_szc == szc); 2916 2917 PP_SETFREE(tpp); 2918 page_clr_all_props(tpp); 2919 PP_SETAGED(tpp); 2920 tpp->p_offset = (u_offset_t)-1; 2921 ASSERT(tpp->p_next == tpp); 2922 ASSERT(tpp->p_prev == tpp); 2923 page_list_concat(&rootpp, &tpp); 2924 } 2925 ASSERT(rootpp == pp); 2926 2927 page_list_add_pages(rootpp, 0); 2928 page_create_putback(pgcnt); 2929 } 2930 2931 int free_pages = 1; 2932 2933 /* 2934 * This routine attempts to return pages to the cachelist via page_release(). 2935 * It does not *have* to be successful in all cases, since the pageout scanner 2936 * will catch any pages it misses. It does need to be fast and not introduce 2937 * too much overhead. 2938 * 2939 * If a page isn't found on the unlocked sweep of the page_hash bucket, we 2940 * don't lock and retry. This is ok, since the page scanner will eventually 2941 * find any page we miss in free_vp_pages(). 2942 */ 2943 void 2944 free_vp_pages(vnode_t *vp, u_offset_t off, size_t len) 2945 { 2946 page_t *pp; 2947 u_offset_t eoff; 2948 extern int swap_in_range(vnode_t *, u_offset_t, size_t); 2949 2950 eoff = off + len; 2951 2952 if (free_pages == 0) 2953 return; 2954 if (swap_in_range(vp, off, len)) 2955 return; 2956 2957 for (; off < eoff; off += PAGESIZE) { 2958 2959 /* 2960 * find the page using a fast, but inexact search. It'll be OK 2961 * if a few pages slip through the cracks here. 2962 */ 2963 pp = page_exists(vp, off); 2964 2965 /* 2966 * If we didn't find the page (it may not exist), the page 2967 * is free, looks still in use (shared), or we can't lock it, 2968 * just give up. 2969 */ 2970 if (pp == NULL || 2971 PP_ISFREE(pp) || 2972 page_share_cnt(pp) > 0 || 2973 !page_trylock(pp, SE_EXCL)) 2974 continue; 2975 2976 /* 2977 * Once we have locked pp, verify that it's still the 2978 * correct page and not already free 2979 */ 2980 ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL)); 2981 if (pp->p_vnode != vp || pp->p_offset != off || PP_ISFREE(pp)) { 2982 page_unlock(pp); 2983 continue; 2984 } 2985 2986 /* 2987 * try to release the page... 2988 */ 2989 (void) page_release(pp, 1); 2990 } 2991 } 2992 2993 /* 2994 * Reclaim the given page from the free list. 2995 * If pp is part of a large pages, only the given constituent page is reclaimed 2996 * and the large page it belonged to will be demoted. This can only happen 2997 * if the page is not on the cachelist. 2998 * 2999 * Returns 1 on success or 0 on failure. 3000 * 3001 * The page is unlocked if it can't be reclaimed (when freemem == 0). 3002 * If `lock' is non-null, it will be dropped and re-acquired if 3003 * the routine must wait while freemem is 0. 3004 * 3005 * As it turns out, boot_getpages() does this. It picks a page, 3006 * based on where OBP mapped in some address, gets its pfn, searches 3007 * the memsegs, locks the page, then pulls it off the free list! 3008 */ 3009 int 3010 page_reclaim(page_t *pp, kmutex_t *lock) 3011 { 3012 struct pcf *p; 3013 uint_t pcf_index; 3014 struct cpu *cpup; 3015 int enough; 3016 uint_t i; 3017 3018 ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1); 3019 ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp)); 3020 3021 /* 3022 * If `freemem' is 0, we cannot reclaim this page from the 3023 * freelist, so release every lock we might hold: the page, 3024 * and the `lock' before blocking. 3025 * 3026 * The only way `freemem' can become 0 while there are pages 3027 * marked free (have their p->p_free bit set) is when the 3028 * system is low on memory and doing a page_create(). In 3029 * order to guarantee that once page_create() starts acquiring 3030 * pages it will be able to get all that it needs since `freemem' 3031 * was decreased by the requested amount. So, we need to release 3032 * this page, and let page_create() have it. 3033 * 3034 * Since `freemem' being zero is not supposed to happen, just 3035 * use the usual hash stuff as a starting point. If that bucket 3036 * is empty, then assume the worst, and start at the beginning 3037 * of the pcf array. If we always start at the beginning 3038 * when acquiring more than one pcf lock, there won't be any 3039 * deadlock problems. 3040 */ 3041 3042 /* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */ 3043 3044 if (freemem <= throttlefree && !page_create_throttle(1l, 0)) { 3045 pcf_acquire_all(); 3046 goto page_reclaim_nomem; 3047 } 3048 3049 enough = 0; 3050 pcf_index = PCF_INDEX(); 3051 p = &pcf[pcf_index]; 3052 mutex_enter(&p->pcf_lock); 3053 if (p->pcf_count >= 1) { 3054 enough = 1; 3055 p->pcf_count--; 3056 } 3057 mutex_exit(&p->pcf_lock); 3058 3059 if (!enough) { 3060 VM_STAT_ADD(page_reclaim_zero); 3061 /* 3062 * Check again. Its possible that some other thread 3063 * could have been right behind us, and added one 3064 * to a list somewhere. Acquire each of the pcf locks 3065 * until we find a page. 3066 */ 3067 p = pcf; 3068 for (i = 0; i < PCF_FANOUT; i++) { 3069 mutex_enter(&p->pcf_lock); 3070 if (p->pcf_count >= 1) { 3071 p->pcf_count -= 1; 3072 enough = 1; 3073 break; 3074 } 3075 p++; 3076 } 3077 3078 if (!enough) { 3079 page_reclaim_nomem: 3080 /* 3081 * We really can't have page `pp'. 3082 * Time for the no-memory dance with 3083 * page_free(). This is just like 3084 * page_create_wait(). Plus the added 3085 * attraction of releasing whatever mutex 3086 * we held when we were called with in `lock'. 3087 * Page_unlock() will wakeup any thread 3088 * waiting around for this page. 3089 */ 3090 if (lock) { 3091 VM_STAT_ADD(page_reclaim_zero_locked); 3092 mutex_exit(lock); 3093 } 3094 page_unlock(pp); 3095 3096 /* 3097 * get this before we drop all the pcf locks. 3098 */ 3099 mutex_enter(&new_freemem_lock); 3100 3101 p = pcf; 3102 for (i = 0; i < PCF_FANOUT; i++) { 3103 p->pcf_wait++; 3104 mutex_exit(&p->pcf_lock); 3105 p++; 3106 } 3107 3108 freemem_wait++; 3109 cv_wait(&freemem_cv, &new_freemem_lock); 3110 freemem_wait--; 3111 3112 mutex_exit(&new_freemem_lock); 3113 3114 if (lock) { 3115 mutex_enter(lock); 3116 } 3117 return (0); 3118 } 3119 3120 /* 3121 * The pcf accounting has been done, 3122 * though none of the pcf_wait flags have been set, 3123 * drop the locks and continue on. 3124 */ 3125 while (p >= pcf) { 3126 mutex_exit(&p->pcf_lock); 3127 p--; 3128 } 3129 } 3130 3131 /* 3132 * freemem is not protected by any lock. Thus, we cannot 3133 * have any assertion containing freemem here. 3134 */ 3135 freemem -= 1; 3136 3137 VM_STAT_ADD(pagecnt.pc_reclaim); 3138 3139 /* 3140 * page_list_sub will handle the case where pp is a large page. 3141 * It's possible that the page was promoted while on the freelist 3142 */ 3143 if (PP_ISAGED(pp)) { 3144 page_list_sub(pp, PG_FREE_LIST); 3145 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_FREE, 3146 "page_reclaim_free:pp %p", pp); 3147 } else { 3148 page_list_sub(pp, PG_CACHE_LIST); 3149 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_CACHE, 3150 "page_reclaim_cache:pp %p", pp); 3151 } 3152 3153 /* 3154 * clear the p_free & p_age bits since this page is no longer 3155 * on the free list. Notice that there was a brief time where 3156 * a page is marked as free, but is not on the list. 3157 * 3158 * Set the reference bit to protect against immediate pageout. 3159 */ 3160 PP_CLRFREE(pp); 3161 PP_CLRAGED(pp); 3162 page_set_props(pp, P_REF); 3163 3164 CPU_STATS_ENTER_K(); 3165 cpup = CPU; /* get cpup now that CPU cannot change */ 3166 CPU_STATS_ADDQ(cpup, vm, pgrec, 1); 3167 CPU_STATS_ADDQ(cpup, vm, pgfrec, 1); 3168 CPU_STATS_EXIT_K(); 3169 ASSERT(pp->p_szc == 0); 3170 3171 return (1); 3172 } 3173 3174 /* 3175 * Destroy identity of the page and put it back on 3176 * the page free list. Assumes that the caller has 3177 * acquired the "exclusive" lock on the page. 3178 */ 3179 void 3180 page_destroy(page_t *pp, int dontfree) 3181 { 3182 ASSERT((PAGE_EXCL(pp) && 3183 !page_iolock_assert(pp)) || panicstr); 3184 ASSERT(pp->p_slckcnt == 0 || panicstr); 3185 3186 if (pp->p_szc != 0) { 3187 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 3188 PP_ISKAS(pp)) { 3189 panic("page_destroy: anon or kernel or no vnode " 3190 "large page %p", (void *)pp); 3191 } 3192 page_demote_vp_pages(pp); 3193 ASSERT(pp->p_szc == 0); 3194 } 3195 3196 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy:pp %p", pp); 3197 3198 /* 3199 * Unload translations, if any, then hash out the 3200 * page to erase its identity. 3201 */ 3202 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3203 page_hashout(pp, NULL); 3204 3205 if (!dontfree) { 3206 /* 3207 * Acquire the "freemem_lock" for availrmem. 3208 * The page_struct_lock need not be acquired for lckcnt 3209 * and cowcnt since the page has an "exclusive" lock. 3210 */ 3211 if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) { 3212 mutex_enter(&freemem_lock); 3213 if (pp->p_lckcnt != 0) { 3214 availrmem++; 3215 pp->p_lckcnt = 0; 3216 } 3217 if (pp->p_cowcnt != 0) { 3218 availrmem += pp->p_cowcnt; 3219 pp->p_cowcnt = 0; 3220 } 3221 mutex_exit(&freemem_lock); 3222 } 3223 /* 3224 * Put the page on the "free" list. 3225 */ 3226 page_free(pp, 0); 3227 } 3228 } 3229 3230 void 3231 page_destroy_pages(page_t *pp) 3232 { 3233 3234 page_t *tpp, *rootpp = NULL; 3235 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 3236 pgcnt_t i, pglcks = 0; 3237 uint_t szc = pp->p_szc; 3238 3239 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 3240 3241 VM_STAT_ADD(pagecnt.pc_destroy_pages); 3242 3243 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy_pages:pp %p", pp); 3244 3245 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 3246 panic("page_destroy_pages: not root page %p", (void *)pp); 3247 /*NOTREACHED*/ 3248 } 3249 3250 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 3251 ASSERT((PAGE_EXCL(tpp) && 3252 !page_iolock_assert(tpp)) || panicstr); 3253 ASSERT(tpp->p_slckcnt == 0 || panicstr); 3254 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 3255 page_hashout(tpp, NULL); 3256 ASSERT(tpp->p_offset == (u_offset_t)-1); 3257 if (tpp->p_lckcnt != 0) { 3258 pglcks++; 3259 tpp->p_lckcnt = 0; 3260 } else if (tpp->p_cowcnt != 0) { 3261 pglcks += tpp->p_cowcnt; 3262 tpp->p_cowcnt = 0; 3263 } 3264 ASSERT(!hat_page_getshare(tpp)); 3265 ASSERT(tpp->p_vnode == NULL); 3266 ASSERT(tpp->p_szc == szc); 3267 3268 PP_SETFREE(tpp); 3269 page_clr_all_props(tpp); 3270 PP_SETAGED(tpp); 3271 ASSERT(tpp->p_next == tpp); 3272 ASSERT(tpp->p_prev == tpp); 3273 page_list_concat(&rootpp, &tpp); 3274 } 3275 3276 ASSERT(rootpp == pp); 3277 if (pglcks != 0) { 3278 mutex_enter(&freemem_lock); 3279 availrmem += pglcks; 3280 mutex_exit(&freemem_lock); 3281 } 3282 3283 page_list_add_pages(rootpp, 0); 3284 page_create_putback(pgcnt); 3285 } 3286 3287 /* 3288 * Similar to page_destroy(), but destroys pages which are 3289 * locked and known to be on the page free list. Since 3290 * the page is known to be free and locked, no one can access 3291 * it. 3292 * 3293 * Also, the number of free pages does not change. 3294 */ 3295 void 3296 page_destroy_free(page_t *pp) 3297 { 3298 ASSERT(PAGE_EXCL(pp)); 3299 ASSERT(PP_ISFREE(pp)); 3300 ASSERT(pp->p_vnode); 3301 ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0); 3302 ASSERT(!hat_page_is_mapped(pp)); 3303 ASSERT(PP_ISAGED(pp) == 0); 3304 ASSERT(pp->p_szc == 0); 3305 3306 VM_STAT_ADD(pagecnt.pc_destroy_free); 3307 page_list_sub(pp, PG_CACHE_LIST); 3308 3309 page_hashout(pp, NULL); 3310 ASSERT(pp->p_vnode == NULL); 3311 ASSERT(pp->p_offset == (u_offset_t)-1); 3312 ASSERT(pp->p_hash == NULL); 3313 3314 PP_SETAGED(pp); 3315 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 3316 page_unlock(pp); 3317 3318 mutex_enter(&new_freemem_lock); 3319 if (freemem_wait) { 3320 cv_signal(&freemem_cv); 3321 } 3322 mutex_exit(&new_freemem_lock); 3323 } 3324 3325 /* 3326 * Rename the page "opp" to have an identity specified 3327 * by [vp, off]. If a page already exists with this name 3328 * it is locked and destroyed. Note that the page's 3329 * translations are not unloaded during the rename. 3330 * 3331 * This routine is used by the anon layer to "steal" the 3332 * original page and is not unlike destroying a page and 3333 * creating a new page using the same page frame. 3334 * 3335 * XXX -- Could deadlock if caller 1 tries to rename A to B while 3336 * caller 2 tries to rename B to A. 3337 */ 3338 void 3339 page_rename(page_t *opp, vnode_t *vp, u_offset_t off) 3340 { 3341 page_t *pp; 3342 int olckcnt = 0; 3343 int ocowcnt = 0; 3344 kmutex_t *phm; 3345 ulong_t index; 3346 3347 ASSERT(PAGE_EXCL(opp) && !page_iolock_assert(opp)); 3348 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3349 ASSERT(PP_ISFREE(opp) == 0); 3350 3351 VM_STAT_ADD(page_rename_count); 3352 3353 TRACE_3(TR_FAC_VM, TR_PAGE_RENAME, 3354 "page rename:pp %p vp %p off %llx", opp, vp, off); 3355 3356 /* 3357 * CacheFS may call page_rename for a large NFS page 3358 * when both CacheFS and NFS mount points are used 3359 * by applications. Demote this large page before 3360 * renaming it, to ensure that there are no "partial" 3361 * large pages left lying around. 3362 */ 3363 if (opp->p_szc != 0) { 3364 vnode_t *ovp = opp->p_vnode; 3365 ASSERT(ovp != NULL); 3366 ASSERT(!IS_SWAPFSVP(ovp)); 3367 ASSERT(!VN_ISKAS(ovp)); 3368 page_demote_vp_pages(opp); 3369 ASSERT(opp->p_szc == 0); 3370 } 3371 3372 page_hashout(opp, NULL); 3373 PP_CLRAGED(opp); 3374 3375 /* 3376 * Acquire the appropriate page hash lock, since 3377 * we're going to rename the page. 3378 */ 3379 index = PAGE_HASH_FUNC(vp, off); 3380 phm = PAGE_HASH_MUTEX(index); 3381 mutex_enter(phm); 3382 top: 3383 /* 3384 * Look for an existing page with this name and destroy it if found. 3385 * By holding the page hash lock all the way to the page_hashin() 3386 * call, we are assured that no page can be created with this 3387 * identity. In the case when the phm lock is dropped to undo any 3388 * hat layer mappings, the existing page is held with an "exclusive" 3389 * lock, again preventing another page from being created with 3390 * this identity. 3391 */ 3392 PAGE_HASH_SEARCH(index, pp, vp, off); 3393 if (pp != NULL) { 3394 VM_STAT_ADD(page_rename_exists); 3395 3396 /* 3397 * As it turns out, this is one of only two places where 3398 * page_lock() needs to hold the passed in lock in the 3399 * successful case. In all of the others, the lock could 3400 * be dropped as soon as the attempt is made to lock 3401 * the page. It is tempting to add yet another arguement, 3402 * PL_KEEP or PL_DROP, to let page_lock know what to do. 3403 */ 3404 if (!page_lock(pp, SE_EXCL, phm, P_RECLAIM)) { 3405 /* 3406 * Went to sleep because the page could not 3407 * be locked. We were woken up when the page 3408 * was unlocked, or when the page was destroyed. 3409 * In either case, `phm' was dropped while we 3410 * slept. Hence we should not just roar through 3411 * this loop. 3412 */ 3413 goto top; 3414 } 3415 3416 /* 3417 * If an existing page is a large page, then demote 3418 * it to ensure that no "partial" large pages are 3419 * "created" after page_rename. An existing page 3420 * can be a CacheFS page, and can't belong to swapfs. 3421 */ 3422 if (hat_page_is_mapped(pp)) { 3423 /* 3424 * Unload translations. Since we hold the 3425 * exclusive lock on this page, the page 3426 * can not be changed while we drop phm. 3427 * This is also not a lock protocol violation, 3428 * but rather the proper way to do things. 3429 */ 3430 mutex_exit(phm); 3431 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3432 if (pp->p_szc != 0) { 3433 ASSERT(!IS_SWAPFSVP(vp)); 3434 ASSERT(!VN_ISKAS(vp)); 3435 page_demote_vp_pages(pp); 3436 ASSERT(pp->p_szc == 0); 3437 } 3438 mutex_enter(phm); 3439 } else if (pp->p_szc != 0) { 3440 ASSERT(!IS_SWAPFSVP(vp)); 3441 ASSERT(!VN_ISKAS(vp)); 3442 mutex_exit(phm); 3443 page_demote_vp_pages(pp); 3444 ASSERT(pp->p_szc == 0); 3445 mutex_enter(phm); 3446 } 3447 page_hashout(pp, phm); 3448 } 3449 /* 3450 * Hash in the page with the new identity. 3451 */ 3452 if (!page_hashin(opp, vp, off, phm)) { 3453 /* 3454 * We were holding phm while we searched for [vp, off] 3455 * and only dropped phm if we found and locked a page. 3456 * If we can't create this page now, then some thing 3457 * is really broken. 3458 */ 3459 panic("page_rename: Can't hash in page: %p", (void *)pp); 3460 /*NOTREACHED*/ 3461 } 3462 3463 ASSERT(MUTEX_HELD(phm)); 3464 mutex_exit(phm); 3465 3466 /* 3467 * Now that we have dropped phm, lets get around to finishing up 3468 * with pp. 3469 */ 3470 if (pp != NULL) { 3471 ASSERT(!hat_page_is_mapped(pp)); 3472 /* for now large pages should not end up here */ 3473 ASSERT(pp->p_szc == 0); 3474 /* 3475 * Save the locks for transfer to the new page and then 3476 * clear them so page_free doesn't think they're important. 3477 * The page_struct_lock need not be acquired for lckcnt and 3478 * cowcnt since the page has an "exclusive" lock. 3479 */ 3480 olckcnt = pp->p_lckcnt; 3481 ocowcnt = pp->p_cowcnt; 3482 pp->p_lckcnt = pp->p_cowcnt = 0; 3483 3484 /* 3485 * Put the page on the "free" list after we drop 3486 * the lock. The less work under the lock the better. 3487 */ 3488 /*LINTED: constant in conditional context*/ 3489 VN_DISPOSE(pp, B_FREE, 0, kcred); 3490 } 3491 3492 /* 3493 * Transfer the lock count from the old page (if any). 3494 * The page_struct_lock need not be acquired for lckcnt and 3495 * cowcnt since the page has an "exclusive" lock. 3496 */ 3497 opp->p_lckcnt += olckcnt; 3498 opp->p_cowcnt += ocowcnt; 3499 } 3500 3501 /* 3502 * low level routine to add page `pp' to the hash and vp chains for [vp, offset] 3503 * 3504 * Pages are normally inserted at the start of a vnode's v_pages list. 3505 * If the vnode is VMODSORT and the page is modified, it goes at the end. 3506 * This can happen when a modified page is relocated for DR. 3507 * 3508 * Returns 1 on success and 0 on failure. 3509 */ 3510 static int 3511 page_do_hashin(page_t *pp, vnode_t *vp, u_offset_t offset) 3512 { 3513 page_t **listp; 3514 page_t *tp; 3515 ulong_t index; 3516 3517 ASSERT(PAGE_EXCL(pp)); 3518 ASSERT(vp != NULL); 3519 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3520 3521 /* 3522 * Be sure to set these up before the page is inserted on the hash 3523 * list. As soon as the page is placed on the list some other 3524 * thread might get confused and wonder how this page could 3525 * possibly hash to this list. 3526 */ 3527 pp->p_vnode = vp; 3528 pp->p_offset = offset; 3529 3530 /* 3531 * record if this page is on a swap vnode 3532 */ 3533 if ((vp->v_flag & VISSWAP) != 0) 3534 PP_SETSWAP(pp); 3535 3536 index = PAGE_HASH_FUNC(vp, offset); 3537 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(index))); 3538 listp = &page_hash[index]; 3539 3540 /* 3541 * If this page is already hashed in, fail this attempt to add it. 3542 */ 3543 for (tp = *listp; tp != NULL; tp = tp->p_hash) { 3544 if (tp->p_vnode == vp && tp->p_offset == offset) { 3545 pp->p_vnode = NULL; 3546 pp->p_offset = (u_offset_t)(-1); 3547 return (0); 3548 } 3549 } 3550 pp->p_hash = *listp; 3551 *listp = pp; 3552 3553 /* 3554 * Add the page to the vnode's list of pages 3555 */ 3556 if (vp->v_pages != NULL && IS_VMODSORT(vp) && hat_ismod(pp)) 3557 listp = &vp->v_pages->p_vpprev->p_vpnext; 3558 else 3559 listp = &vp->v_pages; 3560 3561 page_vpadd(listp, pp); 3562 3563 return (1); 3564 } 3565 3566 /* 3567 * Add page `pp' to both the hash and vp chains for [vp, offset]. 3568 * 3569 * Returns 1 on success and 0 on failure. 3570 * If hold is passed in, it is not dropped. 3571 */ 3572 int 3573 page_hashin(page_t *pp, vnode_t *vp, u_offset_t offset, kmutex_t *hold) 3574 { 3575 kmutex_t *phm = NULL; 3576 kmutex_t *vphm; 3577 int rc; 3578 3579 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3580 3581 TRACE_3(TR_FAC_VM, TR_PAGE_HASHIN, 3582 "page_hashin:pp %p vp %p offset %llx", 3583 pp, vp, offset); 3584 3585 VM_STAT_ADD(hashin_count); 3586 3587 if (hold != NULL) 3588 phm = hold; 3589 else { 3590 VM_STAT_ADD(hashin_not_held); 3591 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, offset)); 3592 mutex_enter(phm); 3593 } 3594 3595 vphm = page_vnode_mutex(vp); 3596 mutex_enter(vphm); 3597 rc = page_do_hashin(pp, vp, offset); 3598 mutex_exit(vphm); 3599 if (hold == NULL) 3600 mutex_exit(phm); 3601 if (rc == 0) 3602 VM_STAT_ADD(hashin_already); 3603 return (rc); 3604 } 3605 3606 /* 3607 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3608 * All mutexes must be held 3609 */ 3610 static void 3611 page_do_hashout(page_t *pp) 3612 { 3613 page_t **hpp; 3614 page_t *hp; 3615 vnode_t *vp = pp->p_vnode; 3616 3617 ASSERT(vp != NULL); 3618 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3619 3620 /* 3621 * First, take pp off of its hash chain. 3622 */ 3623 hpp = &page_hash[PAGE_HASH_FUNC(vp, pp->p_offset)]; 3624 3625 for (;;) { 3626 hp = *hpp; 3627 if (hp == pp) 3628 break; 3629 if (hp == NULL) { 3630 panic("page_do_hashout"); 3631 /*NOTREACHED*/ 3632 } 3633 hpp = &hp->p_hash; 3634 } 3635 *hpp = pp->p_hash; 3636 3637 /* 3638 * Now remove it from its associated vnode. 3639 */ 3640 if (vp->v_pages) 3641 page_vpsub(&vp->v_pages, pp); 3642 3643 pp->p_hash = NULL; 3644 page_clr_all_props(pp); 3645 PP_CLRSWAP(pp); 3646 pp->p_vnode = NULL; 3647 pp->p_offset = (u_offset_t)-1; 3648 } 3649 3650 /* 3651 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3652 * 3653 * When `phm' is non-NULL it contains the address of the mutex protecting the 3654 * hash list pp is on. It is not dropped. 3655 */ 3656 void 3657 page_hashout(page_t *pp, kmutex_t *phm) 3658 { 3659 vnode_t *vp; 3660 ulong_t index; 3661 kmutex_t *nphm; 3662 kmutex_t *vphm; 3663 kmutex_t *sep; 3664 3665 ASSERT(phm != NULL ? MUTEX_HELD(phm) : 1); 3666 ASSERT(pp->p_vnode != NULL); 3667 ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr); 3668 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp->p_vnode))); 3669 3670 vp = pp->p_vnode; 3671 3672 TRACE_2(TR_FAC_VM, TR_PAGE_HASHOUT, 3673 "page_hashout:pp %p vp %p", pp, vp); 3674 3675 /* Kernel probe */ 3676 TNF_PROBE_2(page_unmap, "vm pagefault", /* CSTYLED */, 3677 tnf_opaque, vnode, vp, 3678 tnf_offset, offset, pp->p_offset); 3679 3680 /* 3681 * 3682 */ 3683 VM_STAT_ADD(hashout_count); 3684 index = PAGE_HASH_FUNC(vp, pp->p_offset); 3685 if (phm == NULL) { 3686 VM_STAT_ADD(hashout_not_held); 3687 nphm = PAGE_HASH_MUTEX(index); 3688 mutex_enter(nphm); 3689 } 3690 ASSERT(phm ? phm == PAGE_HASH_MUTEX(index) : 1); 3691 3692 3693 /* 3694 * grab page vnode mutex and remove it... 3695 */ 3696 vphm = page_vnode_mutex(vp); 3697 mutex_enter(vphm); 3698 3699 page_do_hashout(pp); 3700 3701 mutex_exit(vphm); 3702 if (phm == NULL) 3703 mutex_exit(nphm); 3704 3705 /* 3706 * Wake up processes waiting for this page. The page's 3707 * identity has been changed, and is probably not the 3708 * desired page any longer. 3709 */ 3710 sep = page_se_mutex(pp); 3711 mutex_enter(sep); 3712 pp->p_selock &= ~SE_EWANTED; 3713 if (CV_HAS_WAITERS(&pp->p_cv)) 3714 cv_broadcast(&pp->p_cv); 3715 mutex_exit(sep); 3716 } 3717 3718 /* 3719 * Add the page to the front of a linked list of pages 3720 * using the p_next & p_prev pointers for the list. 3721 * The caller is responsible for protecting the list pointers. 3722 */ 3723 void 3724 page_add(page_t **ppp, page_t *pp) 3725 { 3726 ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3727 3728 page_add_common(ppp, pp); 3729 } 3730 3731 3732 3733 /* 3734 * Common code for page_add() and mach_page_add() 3735 */ 3736 void 3737 page_add_common(page_t **ppp, page_t *pp) 3738 { 3739 if (*ppp == NULL) { 3740 pp->p_next = pp->p_prev = pp; 3741 } else { 3742 pp->p_next = *ppp; 3743 pp->p_prev = (*ppp)->p_prev; 3744 (*ppp)->p_prev = pp; 3745 pp->p_prev->p_next = pp; 3746 } 3747 *ppp = pp; 3748 } 3749 3750 3751 /* 3752 * Remove this page from a linked list of pages 3753 * using the p_next & p_prev pointers for the list. 3754 * 3755 * The caller is responsible for protecting the list pointers. 3756 */ 3757 void 3758 page_sub(page_t **ppp, page_t *pp) 3759 { 3760 ASSERT((PP_ISFREE(pp)) ? 1 : 3761 (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3762 3763 if (*ppp == NULL || pp == NULL) { 3764 panic("page_sub: bad arg(s): pp %p, *ppp %p", 3765 (void *)pp, (void *)(*ppp)); 3766 /*NOTREACHED*/ 3767 } 3768 3769 page_sub_common(ppp, pp); 3770 } 3771 3772 3773 /* 3774 * Common code for page_sub() and mach_page_sub() 3775 */ 3776 void 3777 page_sub_common(page_t **ppp, page_t *pp) 3778 { 3779 if (*ppp == pp) 3780 *ppp = pp->p_next; /* go to next page */ 3781 3782 if (*ppp == pp) 3783 *ppp = NULL; /* page list is gone */ 3784 else { 3785 pp->p_prev->p_next = pp->p_next; 3786 pp->p_next->p_prev = pp->p_prev; 3787 } 3788 pp->p_prev = pp->p_next = pp; /* make pp a list of one */ 3789 } 3790 3791 3792 /* 3793 * Break page list cppp into two lists with npages in the first list. 3794 * The tail is returned in nppp. 3795 */ 3796 void 3797 page_list_break(page_t **oppp, page_t **nppp, pgcnt_t npages) 3798 { 3799 page_t *s1pp = *oppp; 3800 page_t *s2pp; 3801 page_t *e1pp, *e2pp; 3802 long n = 0; 3803 3804 if (s1pp == NULL) { 3805 *nppp = NULL; 3806 return; 3807 } 3808 if (npages == 0) { 3809 *nppp = s1pp; 3810 *oppp = NULL; 3811 return; 3812 } 3813 for (n = 0, s2pp = *oppp; n < npages; n++) { 3814 s2pp = s2pp->p_next; 3815 } 3816 /* Fix head and tail of new lists */ 3817 e1pp = s2pp->p_prev; 3818 e2pp = s1pp->p_prev; 3819 s1pp->p_prev = e1pp; 3820 e1pp->p_next = s1pp; 3821 s2pp->p_prev = e2pp; 3822 e2pp->p_next = s2pp; 3823 3824 /* second list empty */ 3825 if (s2pp == s1pp) { 3826 *oppp = s1pp; 3827 *nppp = NULL; 3828 } else { 3829 *oppp = s1pp; 3830 *nppp = s2pp; 3831 } 3832 } 3833 3834 /* 3835 * Concatenate page list nppp onto the end of list ppp. 3836 */ 3837 void 3838 page_list_concat(page_t **ppp, page_t **nppp) 3839 { 3840 page_t *s1pp, *s2pp, *e1pp, *e2pp; 3841 3842 if (*nppp == NULL) { 3843 return; 3844 } 3845 if (*ppp == NULL) { 3846 *ppp = *nppp; 3847 return; 3848 } 3849 s1pp = *ppp; 3850 e1pp = s1pp->p_prev; 3851 s2pp = *nppp; 3852 e2pp = s2pp->p_prev; 3853 s1pp->p_prev = e2pp; 3854 e2pp->p_next = s1pp; 3855 e1pp->p_next = s2pp; 3856 s2pp->p_prev = e1pp; 3857 } 3858 3859 /* 3860 * return the next page in the page list 3861 */ 3862 page_t * 3863 page_list_next(page_t *pp) 3864 { 3865 return (pp->p_next); 3866 } 3867 3868 3869 /* 3870 * Add the page to the front of the linked list of pages 3871 * using p_vpnext/p_vpprev pointers for the list. 3872 * 3873 * The caller is responsible for protecting the lists. 3874 */ 3875 void 3876 page_vpadd(page_t **ppp, page_t *pp) 3877 { 3878 if (*ppp == NULL) { 3879 pp->p_vpnext = pp->p_vpprev = pp; 3880 } else { 3881 pp->p_vpnext = *ppp; 3882 pp->p_vpprev = (*ppp)->p_vpprev; 3883 (*ppp)->p_vpprev = pp; 3884 pp->p_vpprev->p_vpnext = pp; 3885 } 3886 *ppp = pp; 3887 } 3888 3889 /* 3890 * Remove this page from the linked list of pages 3891 * using p_vpnext/p_vpprev pointers for the list. 3892 * 3893 * The caller is responsible for protecting the lists. 3894 */ 3895 void 3896 page_vpsub(page_t **ppp, page_t *pp) 3897 { 3898 if (*ppp == NULL || pp == NULL) { 3899 panic("page_vpsub: bad arg(s): pp %p, *ppp %p", 3900 (void *)pp, (void *)(*ppp)); 3901 /*NOTREACHED*/ 3902 } 3903 3904 if (*ppp == pp) 3905 *ppp = pp->p_vpnext; /* go to next page */ 3906 3907 if (*ppp == pp) 3908 *ppp = NULL; /* page list is gone */ 3909 else { 3910 pp->p_vpprev->p_vpnext = pp->p_vpnext; 3911 pp->p_vpnext->p_vpprev = pp->p_vpprev; 3912 } 3913 pp->p_vpprev = pp->p_vpnext = pp; /* make pp a list of one */ 3914 } 3915 3916 /* 3917 * Lock a physical page into memory "long term". Used to support "lock 3918 * in memory" functions. Accepts the page to be locked, and a cow variable 3919 * to indicate whether a the lock will travel to the new page during 3920 * a potential copy-on-write. 3921 */ 3922 int 3923 page_pp_lock( 3924 page_t *pp, /* page to be locked */ 3925 int cow, /* cow lock */ 3926 int kernel) /* must succeed -- ignore checking */ 3927 { 3928 int r = 0; /* result -- assume failure */ 3929 3930 ASSERT(PAGE_LOCKED(pp)); 3931 3932 page_struct_lock(pp); 3933 /* 3934 * Acquire the "freemem_lock" for availrmem. 3935 */ 3936 if (cow) { 3937 mutex_enter(&freemem_lock); 3938 if ((availrmem > pages_pp_maximum) && 3939 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 3940 availrmem--; 3941 pages_locked++; 3942 mutex_exit(&freemem_lock); 3943 r = 1; 3944 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 3945 cmn_err(CE_WARN, 3946 "COW lock limit reached on pfn 0x%lx", 3947 page_pptonum(pp)); 3948 } 3949 } else 3950 mutex_exit(&freemem_lock); 3951 } else { 3952 if (pp->p_lckcnt) { 3953 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 3954 r = 1; 3955 if (++pp->p_lckcnt == 3956 (ushort_t)PAGE_LOCK_MAXIMUM) { 3957 cmn_err(CE_WARN, "Page lock limit " 3958 "reached on pfn 0x%lx", 3959 page_pptonum(pp)); 3960 } 3961 } 3962 } else { 3963 if (kernel) { 3964 /* availrmem accounting done by caller */ 3965 ++pp->p_lckcnt; 3966 r = 1; 3967 } else { 3968 mutex_enter(&freemem_lock); 3969 if (availrmem > pages_pp_maximum) { 3970 availrmem--; 3971 pages_locked++; 3972 ++pp->p_lckcnt; 3973 r = 1; 3974 } 3975 mutex_exit(&freemem_lock); 3976 } 3977 } 3978 } 3979 page_struct_unlock(pp); 3980 return (r); 3981 } 3982 3983 /* 3984 * Decommit a lock on a physical page frame. Account for cow locks if 3985 * appropriate. 3986 */ 3987 void 3988 page_pp_unlock( 3989 page_t *pp, /* page to be unlocked */ 3990 int cow, /* expect cow lock */ 3991 int kernel) /* this was a kernel lock */ 3992 { 3993 ASSERT(PAGE_LOCKED(pp)); 3994 3995 page_struct_lock(pp); 3996 /* 3997 * Acquire the "freemem_lock" for availrmem. 3998 * If cowcnt or lcknt is already 0 do nothing; i.e., we 3999 * could be called to unlock even if nothing is locked. This could 4000 * happen if locked file pages were truncated (removing the lock) 4001 * and the file was grown again and new pages faulted in; the new 4002 * pages are unlocked but the segment still thinks they're locked. 4003 */ 4004 if (cow) { 4005 if (pp->p_cowcnt) { 4006 mutex_enter(&freemem_lock); 4007 pp->p_cowcnt--; 4008 availrmem++; 4009 pages_locked--; 4010 mutex_exit(&freemem_lock); 4011 } 4012 } else { 4013 if (pp->p_lckcnt && --pp->p_lckcnt == 0) { 4014 if (!kernel) { 4015 mutex_enter(&freemem_lock); 4016 availrmem++; 4017 pages_locked--; 4018 mutex_exit(&freemem_lock); 4019 } 4020 } 4021 } 4022 page_struct_unlock(pp); 4023 } 4024 4025 /* 4026 * This routine reserves availrmem for npages; 4027 * flags: KM_NOSLEEP or KM_SLEEP 4028 * returns 1 on success or 0 on failure 4029 */ 4030 int 4031 page_resv(pgcnt_t npages, uint_t flags) 4032 { 4033 mutex_enter(&freemem_lock); 4034 while (availrmem < tune.t_minarmem + npages) { 4035 if (flags & KM_NOSLEEP) { 4036 mutex_exit(&freemem_lock); 4037 return (0); 4038 } 4039 mutex_exit(&freemem_lock); 4040 page_needfree(npages); 4041 kmem_reap(); 4042 delay(hz >> 2); 4043 page_needfree(-(spgcnt_t)npages); 4044 mutex_enter(&freemem_lock); 4045 } 4046 availrmem -= npages; 4047 mutex_exit(&freemem_lock); 4048 return (1); 4049 } 4050 4051 /* 4052 * This routine unreserves availrmem for npages; 4053 */ 4054 void 4055 page_unresv(pgcnt_t npages) 4056 { 4057 mutex_enter(&freemem_lock); 4058 availrmem += npages; 4059 mutex_exit(&freemem_lock); 4060 } 4061 4062 /* 4063 * See Statement at the beginning of segvn_lockop() regarding 4064 * the way we handle cowcnts and lckcnts. 4065 * 4066 * Transfer cowcnt on 'opp' to cowcnt on 'npp' if the vpage 4067 * that breaks COW has PROT_WRITE. 4068 * 4069 * Note that, we may also break COW in case we are softlocking 4070 * on read access during physio; 4071 * in this softlock case, the vpage may not have PROT_WRITE. 4072 * So, we need to transfer lckcnt on 'opp' to lckcnt on 'npp' 4073 * if the vpage doesn't have PROT_WRITE. 4074 * 4075 * This routine is never called if we are stealing a page 4076 * in anon_private. 4077 * 4078 * The caller subtracted from availrmem for read only mapping. 4079 * if lckcnt is 1 increment availrmem. 4080 */ 4081 void 4082 page_pp_useclaim( 4083 page_t *opp, /* original page frame losing lock */ 4084 page_t *npp, /* new page frame gaining lock */ 4085 uint_t write_perm) /* set if vpage has PROT_WRITE */ 4086 { 4087 int payback = 0; 4088 4089 ASSERT(PAGE_LOCKED(opp)); 4090 ASSERT(PAGE_LOCKED(npp)); 4091 4092 page_struct_lock(opp); 4093 4094 ASSERT(npp->p_cowcnt == 0); 4095 ASSERT(npp->p_lckcnt == 0); 4096 4097 /* Don't use claim if nothing is locked (see page_pp_unlock above) */ 4098 if ((write_perm && opp->p_cowcnt != 0) || 4099 (!write_perm && opp->p_lckcnt != 0)) { 4100 4101 if (write_perm) { 4102 npp->p_cowcnt++; 4103 ASSERT(opp->p_cowcnt != 0); 4104 opp->p_cowcnt--; 4105 } else { 4106 4107 ASSERT(opp->p_lckcnt != 0); 4108 4109 /* 4110 * We didn't need availrmem decremented if p_lckcnt on 4111 * original page is 1. Here, we are unlocking 4112 * read-only copy belonging to original page and 4113 * are locking a copy belonging to new page. 4114 */ 4115 if (opp->p_lckcnt == 1) 4116 payback = 1; 4117 4118 npp->p_lckcnt++; 4119 opp->p_lckcnt--; 4120 } 4121 } 4122 if (payback) { 4123 mutex_enter(&freemem_lock); 4124 availrmem++; 4125 pages_useclaim--; 4126 mutex_exit(&freemem_lock); 4127 } 4128 page_struct_unlock(opp); 4129 } 4130 4131 /* 4132 * Simple claim adjust functions -- used to support changes in 4133 * claims due to changes in access permissions. Used by segvn_setprot(). 4134 */ 4135 int 4136 page_addclaim(page_t *pp) 4137 { 4138 int r = 0; /* result */ 4139 4140 ASSERT(PAGE_LOCKED(pp)); 4141 4142 page_struct_lock(pp); 4143 ASSERT(pp->p_lckcnt != 0); 4144 4145 if (pp->p_lckcnt == 1) { 4146 if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4147 --pp->p_lckcnt; 4148 r = 1; 4149 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4150 cmn_err(CE_WARN, 4151 "COW lock limit reached on pfn 0x%lx", 4152 page_pptonum(pp)); 4153 } 4154 } 4155 } else { 4156 mutex_enter(&freemem_lock); 4157 if ((availrmem > pages_pp_maximum) && 4158 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 4159 --availrmem; 4160 ++pages_claimed; 4161 mutex_exit(&freemem_lock); 4162 --pp->p_lckcnt; 4163 r = 1; 4164 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4165 cmn_err(CE_WARN, 4166 "COW lock limit reached on pfn 0x%lx", 4167 page_pptonum(pp)); 4168 } 4169 } else 4170 mutex_exit(&freemem_lock); 4171 } 4172 page_struct_unlock(pp); 4173 return (r); 4174 } 4175 4176 int 4177 page_subclaim(page_t *pp) 4178 { 4179 int r = 0; 4180 4181 ASSERT(PAGE_LOCKED(pp)); 4182 4183 page_struct_lock(pp); 4184 ASSERT(pp->p_cowcnt != 0); 4185 4186 if (pp->p_lckcnt) { 4187 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4188 r = 1; 4189 /* 4190 * for availrmem 4191 */ 4192 mutex_enter(&freemem_lock); 4193 availrmem++; 4194 pages_claimed--; 4195 mutex_exit(&freemem_lock); 4196 4197 pp->p_cowcnt--; 4198 4199 if (++pp->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4200 cmn_err(CE_WARN, 4201 "Page lock limit reached on pfn 0x%lx", 4202 page_pptonum(pp)); 4203 } 4204 } 4205 } else { 4206 r = 1; 4207 pp->p_cowcnt--; 4208 pp->p_lckcnt++; 4209 } 4210 page_struct_unlock(pp); 4211 return (r); 4212 } 4213 4214 int 4215 page_addclaim_pages(page_t **ppa) 4216 { 4217 4218 pgcnt_t lckpgs = 0, pg_idx; 4219 4220 VM_STAT_ADD(pagecnt.pc_addclaim_pages); 4221 4222 mutex_enter(&page_llock); 4223 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4224 4225 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4226 ASSERT(ppa[pg_idx]->p_lckcnt != 0); 4227 if (ppa[pg_idx]->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4228 mutex_exit(&page_llock); 4229 return (0); 4230 } 4231 if (ppa[pg_idx]->p_lckcnt > 1) 4232 lckpgs++; 4233 } 4234 4235 if (lckpgs != 0) { 4236 mutex_enter(&freemem_lock); 4237 if (availrmem >= pages_pp_maximum + lckpgs) { 4238 availrmem -= lckpgs; 4239 pages_claimed += lckpgs; 4240 } else { 4241 mutex_exit(&freemem_lock); 4242 mutex_exit(&page_llock); 4243 return (0); 4244 } 4245 mutex_exit(&freemem_lock); 4246 } 4247 4248 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4249 ppa[pg_idx]->p_lckcnt--; 4250 ppa[pg_idx]->p_cowcnt++; 4251 } 4252 mutex_exit(&page_llock); 4253 return (1); 4254 } 4255 4256 int 4257 page_subclaim_pages(page_t **ppa) 4258 { 4259 pgcnt_t ulckpgs = 0, pg_idx; 4260 4261 VM_STAT_ADD(pagecnt.pc_subclaim_pages); 4262 4263 mutex_enter(&page_llock); 4264 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4265 4266 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4267 ASSERT(ppa[pg_idx]->p_cowcnt != 0); 4268 if (ppa[pg_idx]->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4269 mutex_exit(&page_llock); 4270 return (0); 4271 } 4272 if (ppa[pg_idx]->p_lckcnt != 0) 4273 ulckpgs++; 4274 } 4275 4276 if (ulckpgs != 0) { 4277 mutex_enter(&freemem_lock); 4278 availrmem += ulckpgs; 4279 pages_claimed -= ulckpgs; 4280 mutex_exit(&freemem_lock); 4281 } 4282 4283 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4284 ppa[pg_idx]->p_cowcnt--; 4285 ppa[pg_idx]->p_lckcnt++; 4286 4287 } 4288 mutex_exit(&page_llock); 4289 return (1); 4290 } 4291 4292 page_t * 4293 page_numtopp(pfn_t pfnum, se_t se) 4294 { 4295 page_t *pp; 4296 4297 retry: 4298 pp = page_numtopp_nolock(pfnum); 4299 if (pp == NULL) { 4300 return ((page_t *)NULL); 4301 } 4302 4303 /* 4304 * Acquire the appropriate lock on the page. 4305 */ 4306 while (!page_lock(pp, se, (kmutex_t *)NULL, P_RECLAIM)) { 4307 if (page_pptonum(pp) != pfnum) 4308 goto retry; 4309 continue; 4310 } 4311 4312 if (page_pptonum(pp) != pfnum) { 4313 page_unlock(pp); 4314 goto retry; 4315 } 4316 4317 return (pp); 4318 } 4319 4320 page_t * 4321 page_numtopp_noreclaim(pfn_t pfnum, se_t se) 4322 { 4323 page_t *pp; 4324 4325 retry: 4326 pp = page_numtopp_nolock(pfnum); 4327 if (pp == NULL) { 4328 return ((page_t *)NULL); 4329 } 4330 4331 /* 4332 * Acquire the appropriate lock on the page. 4333 */ 4334 while (!page_lock(pp, se, (kmutex_t *)NULL, P_NO_RECLAIM)) { 4335 if (page_pptonum(pp) != pfnum) 4336 goto retry; 4337 continue; 4338 } 4339 4340 if (page_pptonum(pp) != pfnum) { 4341 page_unlock(pp); 4342 goto retry; 4343 } 4344 4345 return (pp); 4346 } 4347 4348 /* 4349 * This routine is like page_numtopp, but will only return page structs 4350 * for pages which are ok for loading into hardware using the page struct. 4351 */ 4352 page_t * 4353 page_numtopp_nowait(pfn_t pfnum, se_t se) 4354 { 4355 page_t *pp; 4356 4357 retry: 4358 pp = page_numtopp_nolock(pfnum); 4359 if (pp == NULL) { 4360 return ((page_t *)NULL); 4361 } 4362 4363 /* 4364 * Try to acquire the appropriate lock on the page. 4365 */ 4366 if (PP_ISFREE(pp)) 4367 pp = NULL; 4368 else { 4369 if (!page_trylock(pp, se)) 4370 pp = NULL; 4371 else { 4372 if (page_pptonum(pp) != pfnum) { 4373 page_unlock(pp); 4374 goto retry; 4375 } 4376 if (PP_ISFREE(pp)) { 4377 page_unlock(pp); 4378 pp = NULL; 4379 } 4380 } 4381 } 4382 return (pp); 4383 } 4384 4385 /* 4386 * Returns a count of dirty pages that are in the process 4387 * of being written out. If 'cleanit' is set, try to push the page. 4388 */ 4389 pgcnt_t 4390 page_busy(int cleanit) 4391 { 4392 page_t *page0 = page_first(); 4393 page_t *pp = page0; 4394 pgcnt_t nppbusy = 0; 4395 u_offset_t off; 4396 4397 do { 4398 vnode_t *vp = pp->p_vnode; 4399 4400 /* 4401 * A page is a candidate for syncing if it is: 4402 * 4403 * (a) On neither the freelist nor the cachelist 4404 * (b) Hashed onto a vnode 4405 * (c) Not a kernel page 4406 * (d) Dirty 4407 * (e) Not part of a swapfile 4408 * (f) a page which belongs to a real vnode; eg has a non-null 4409 * v_vfsp pointer. 4410 * (g) Backed by a filesystem which doesn't have a 4411 * stubbed-out sync operation 4412 */ 4413 if (!PP_ISFREE(pp) && vp != NULL && !VN_ISKAS(vp) && 4414 hat_ismod(pp) && !IS_SWAPVP(vp) && vp->v_vfsp != NULL && 4415 vfs_can_sync(vp->v_vfsp)) { 4416 nppbusy++; 4417 vfs_syncprogress(); 4418 4419 if (!cleanit) 4420 continue; 4421 if (!page_trylock(pp, SE_EXCL)) 4422 continue; 4423 4424 if (PP_ISFREE(pp) || vp == NULL || IS_SWAPVP(vp) || 4425 pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 4426 !(hat_pagesync(pp, 4427 HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) & P_MOD)) { 4428 page_unlock(pp); 4429 continue; 4430 } 4431 off = pp->p_offset; 4432 VN_HOLD(vp); 4433 page_unlock(pp); 4434 (void) VOP_PUTPAGE(vp, off, PAGESIZE, 4435 B_ASYNC | B_FREE, kcred, NULL); 4436 VN_RELE(vp); 4437 } 4438 } while ((pp = page_next(pp)) != page0); 4439 4440 return (nppbusy); 4441 } 4442 4443 void page_invalidate_pages(void); 4444 4445 /* 4446 * callback handler to vm sub-system 4447 * 4448 * callers make sure no recursive entries to this func. 4449 */ 4450 /*ARGSUSED*/ 4451 boolean_t 4452 callb_vm_cpr(void *arg, int code) 4453 { 4454 if (code == CB_CODE_CPR_CHKPT) 4455 page_invalidate_pages(); 4456 return (B_TRUE); 4457 } 4458 4459 /* 4460 * Invalidate all pages of the system. 4461 * It shouldn't be called until all user page activities are all stopped. 4462 */ 4463 void 4464 page_invalidate_pages() 4465 { 4466 page_t *pp; 4467 page_t *page0; 4468 pgcnt_t nbusypages; 4469 int retry = 0; 4470 const int MAXRETRIES = 4; 4471 #if defined(__sparc) 4472 extern struct vnode prom_ppages; 4473 #endif /* __sparc */ 4474 4475 top: 4476 /* 4477 * Flush dirty pages and destroy the clean ones. 4478 */ 4479 nbusypages = 0; 4480 4481 pp = page0 = page_first(); 4482 do { 4483 struct vnode *vp; 4484 u_offset_t offset; 4485 int mod; 4486 4487 /* 4488 * skip the page if it has no vnode or the page associated 4489 * with the kernel vnode or prom allocated kernel mem. 4490 */ 4491 #if defined(__sparc) 4492 if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp) || 4493 vp == &prom_ppages) 4494 #else /* x86 doesn't have prom or prom_ppage */ 4495 if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp)) 4496 #endif /* __sparc */ 4497 continue; 4498 4499 /* 4500 * skip the page which is already free invalidated. 4501 */ 4502 if (PP_ISFREE(pp) && PP_ISAGED(pp)) 4503 continue; 4504 4505 /* 4506 * skip pages that are already locked or can't be "exclusively" 4507 * locked or are already free. After we lock the page, check 4508 * the free and age bits again to be sure it's not destroied 4509 * yet. 4510 * To achieve max. parallelization, we use page_trylock instead 4511 * of page_lock so that we don't get block on individual pages 4512 * while we have thousands of other pages to process. 4513 */ 4514 if (!page_trylock(pp, SE_EXCL)) { 4515 nbusypages++; 4516 continue; 4517 } else if (PP_ISFREE(pp)) { 4518 if (!PP_ISAGED(pp)) { 4519 page_destroy_free(pp); 4520 } else { 4521 page_unlock(pp); 4522 } 4523 continue; 4524 } 4525 /* 4526 * Is this page involved in some I/O? shared? 4527 * 4528 * The page_struct_lock need not be acquired to 4529 * examine these fields since the page has an 4530 * "exclusive" lock. 4531 */ 4532 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 4533 page_unlock(pp); 4534 continue; 4535 } 4536 4537 if (vp->v_type == VCHR) { 4538 panic("vp->v_type == VCHR"); 4539 /*NOTREACHED*/ 4540 } 4541 4542 if (!page_try_demote_pages(pp)) { 4543 page_unlock(pp); 4544 continue; 4545 } 4546 4547 /* 4548 * Check the modified bit. Leave the bits alone in hardware 4549 * (they will be modified if we do the putpage). 4550 */ 4551 mod = (hat_pagesync(pp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) 4552 & P_MOD); 4553 if (mod) { 4554 offset = pp->p_offset; 4555 /* 4556 * Hold the vnode before releasing the page lock 4557 * to prevent it from being freed and re-used by 4558 * some other thread. 4559 */ 4560 VN_HOLD(vp); 4561 page_unlock(pp); 4562 /* 4563 * No error return is checked here. Callers such as 4564 * cpr deals with the dirty pages at the dump time 4565 * if this putpage fails. 4566 */ 4567 (void) VOP_PUTPAGE(vp, offset, PAGESIZE, B_INVAL, 4568 kcred, NULL); 4569 VN_RELE(vp); 4570 } else { 4571 page_destroy(pp, 0); 4572 } 4573 } while ((pp = page_next(pp)) != page0); 4574 if (nbusypages && retry++ < MAXRETRIES) { 4575 delay(1); 4576 goto top; 4577 } 4578 } 4579 4580 /* 4581 * Replace the page "old" with the page "new" on the page hash and vnode lists 4582 * 4583 * the replacement must be done in place, ie the equivalent sequence: 4584 * 4585 * vp = old->p_vnode; 4586 * off = old->p_offset; 4587 * page_do_hashout(old) 4588 * page_do_hashin(new, vp, off) 4589 * 4590 * doesn't work, since 4591 * 1) if old is the only page on the vnode, the v_pages list has a window 4592 * where it looks empty. This will break file system assumptions. 4593 * and 4594 * 2) pvn_vplist_dirty() can't deal with pages moving on the v_pages list. 4595 */ 4596 static void 4597 page_do_relocate_hash(page_t *new, page_t *old) 4598 { 4599 page_t **hash_list; 4600 vnode_t *vp = old->p_vnode; 4601 kmutex_t *sep; 4602 4603 ASSERT(PAGE_EXCL(old)); 4604 ASSERT(PAGE_EXCL(new)); 4605 ASSERT(vp != NULL); 4606 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 4607 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, old->p_offset)))); 4608 4609 /* 4610 * First find old page on the page hash list 4611 */ 4612 hash_list = &page_hash[PAGE_HASH_FUNC(vp, old->p_offset)]; 4613 4614 for (;;) { 4615 if (*hash_list == old) 4616 break; 4617 if (*hash_list == NULL) { 4618 panic("page_do_hashout"); 4619 /*NOTREACHED*/ 4620 } 4621 hash_list = &(*hash_list)->p_hash; 4622 } 4623 4624 /* 4625 * update new and replace old with new on the page hash list 4626 */ 4627 new->p_vnode = old->p_vnode; 4628 new->p_offset = old->p_offset; 4629 new->p_hash = old->p_hash; 4630 *hash_list = new; 4631 4632 if ((new->p_vnode->v_flag & VISSWAP) != 0) 4633 PP_SETSWAP(new); 4634 4635 /* 4636 * replace old with new on the vnode's page list 4637 */ 4638 if (old->p_vpnext == old) { 4639 new->p_vpnext = new; 4640 new->p_vpprev = new; 4641 } else { 4642 new->p_vpnext = old->p_vpnext; 4643 new->p_vpprev = old->p_vpprev; 4644 new->p_vpnext->p_vpprev = new; 4645 new->p_vpprev->p_vpnext = new; 4646 } 4647 if (vp->v_pages == old) 4648 vp->v_pages = new; 4649 4650 /* 4651 * clear out the old page 4652 */ 4653 old->p_hash = NULL; 4654 old->p_vpnext = NULL; 4655 old->p_vpprev = NULL; 4656 old->p_vnode = NULL; 4657 PP_CLRSWAP(old); 4658 old->p_offset = (u_offset_t)-1; 4659 page_clr_all_props(old); 4660 4661 /* 4662 * Wake up processes waiting for this page. The page's 4663 * identity has been changed, and is probably not the 4664 * desired page any longer. 4665 */ 4666 sep = page_se_mutex(old); 4667 mutex_enter(sep); 4668 old->p_selock &= ~SE_EWANTED; 4669 if (CV_HAS_WAITERS(&old->p_cv)) 4670 cv_broadcast(&old->p_cv); 4671 mutex_exit(sep); 4672 } 4673 4674 /* 4675 * This function moves the identity of page "pp_old" to page "pp_new". 4676 * Both pages must be locked on entry. "pp_new" is free, has no identity, 4677 * and need not be hashed out from anywhere. 4678 */ 4679 void 4680 page_relocate_hash(page_t *pp_new, page_t *pp_old) 4681 { 4682 vnode_t *vp = pp_old->p_vnode; 4683 u_offset_t off = pp_old->p_offset; 4684 kmutex_t *phm, *vphm; 4685 4686 /* 4687 * Rehash two pages 4688 */ 4689 ASSERT(PAGE_EXCL(pp_old)); 4690 ASSERT(PAGE_EXCL(pp_new)); 4691 ASSERT(vp != NULL); 4692 ASSERT(pp_new->p_vnode == NULL); 4693 4694 /* 4695 * hashout then hashin while holding the mutexes 4696 */ 4697 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, off)); 4698 mutex_enter(phm); 4699 vphm = page_vnode_mutex(vp); 4700 mutex_enter(vphm); 4701 4702 page_do_relocate_hash(pp_new, pp_old); 4703 4704 mutex_exit(vphm); 4705 mutex_exit(phm); 4706 4707 /* 4708 * The page_struct_lock need not be acquired for lckcnt and 4709 * cowcnt since the page has an "exclusive" lock. 4710 */ 4711 ASSERT(pp_new->p_lckcnt == 0); 4712 ASSERT(pp_new->p_cowcnt == 0); 4713 pp_new->p_lckcnt = pp_old->p_lckcnt; 4714 pp_new->p_cowcnt = pp_old->p_cowcnt; 4715 pp_old->p_lckcnt = pp_old->p_cowcnt = 0; 4716 4717 /* The following comment preserved from page_flip(). */ 4718 /* XXX - Do we need to protect fsdata? */ 4719 pp_new->p_fsdata = pp_old->p_fsdata; 4720 } 4721 4722 /* 4723 * Helper routine used to lock all remaining members of a 4724 * large page. The caller is responsible for passing in a locked 4725 * pp. If pp is a large page, then it succeeds in locking all the 4726 * remaining constituent pages or it returns with only the 4727 * original page locked. 4728 * 4729 * Returns 1 on success, 0 on failure. 4730 * 4731 * If success is returned this routine guarantees p_szc for all constituent 4732 * pages of a large page pp belongs to can't change. To achieve this we 4733 * recheck szc of pp after locking all constituent pages and retry if szc 4734 * changed (it could only decrease). Since hat_page_demote() needs an EXCL 4735 * lock on one of constituent pages it can't be running after all constituent 4736 * pages are locked. hat_page_demote() with a lock on a constituent page 4737 * outside of this large page (i.e. pp belonged to a larger large page) is 4738 * already done with all constituent pages of pp since the root's p_szc is 4739 * changed last. Therefore no need to synchronize with hat_page_demote() that 4740 * locked a constituent page outside of pp's current large page. 4741 */ 4742 #ifdef DEBUG 4743 uint32_t gpg_trylock_mtbf = 0; 4744 #endif 4745 4746 int 4747 group_page_trylock(page_t *pp, se_t se) 4748 { 4749 page_t *tpp; 4750 pgcnt_t npgs, i, j; 4751 uint_t pszc = pp->p_szc; 4752 4753 #ifdef DEBUG 4754 if (gpg_trylock_mtbf && !(gethrtime() % gpg_trylock_mtbf)) { 4755 return (0); 4756 } 4757 #endif 4758 4759 if (pp != PP_GROUPLEADER(pp, pszc)) { 4760 return (0); 4761 } 4762 4763 retry: 4764 ASSERT(PAGE_LOCKED_SE(pp, se)); 4765 ASSERT(!PP_ISFREE(pp)); 4766 if (pszc == 0) { 4767 return (1); 4768 } 4769 npgs = page_get_pagecnt(pszc); 4770 tpp = pp + 1; 4771 for (i = 1; i < npgs; i++, tpp++) { 4772 if (!page_trylock(tpp, se)) { 4773 tpp = pp + 1; 4774 for (j = 1; j < i; j++, tpp++) { 4775 page_unlock(tpp); 4776 } 4777 return (0); 4778 } 4779 } 4780 if (pp->p_szc != pszc) { 4781 ASSERT(pp->p_szc < pszc); 4782 ASSERT(pp->p_vnode != NULL && !PP_ISKAS(pp) && 4783 !IS_SWAPFSVP(pp->p_vnode)); 4784 tpp = pp + 1; 4785 for (i = 1; i < npgs; i++, tpp++) { 4786 page_unlock(tpp); 4787 } 4788 pszc = pp->p_szc; 4789 goto retry; 4790 } 4791 return (1); 4792 } 4793 4794 void 4795 group_page_unlock(page_t *pp) 4796 { 4797 page_t *tpp; 4798 pgcnt_t npgs, i; 4799 4800 ASSERT(PAGE_LOCKED(pp)); 4801 ASSERT(!PP_ISFREE(pp)); 4802 ASSERT(pp == PP_PAGEROOT(pp)); 4803 npgs = page_get_pagecnt(pp->p_szc); 4804 for (i = 1, tpp = pp + 1; i < npgs; i++, tpp++) { 4805 page_unlock(tpp); 4806 } 4807 } 4808 4809 /* 4810 * returns 4811 * 0 : on success and *nrelocp is number of relocated PAGESIZE pages 4812 * ERANGE : this is not a base page 4813 * EBUSY : failure to get locks on the page/pages 4814 * ENOMEM : failure to obtain replacement pages 4815 * EAGAIN : OBP has not yet completed its boot-time handoff to the kernel 4816 * EIO : An error occurred while trying to copy the page data 4817 * 4818 * Return with all constituent members of target and replacement 4819 * SE_EXCL locked. It is the callers responsibility to drop the 4820 * locks. 4821 */ 4822 int 4823 do_page_relocate( 4824 page_t **target, 4825 page_t **replacement, 4826 int grouplock, 4827 spgcnt_t *nrelocp, 4828 lgrp_t *lgrp) 4829 { 4830 page_t *first_repl; 4831 page_t *repl; 4832 page_t *targ; 4833 page_t *pl = NULL; 4834 uint_t ppattr; 4835 pfn_t pfn, repl_pfn; 4836 uint_t szc; 4837 spgcnt_t npgs, i; 4838 int repl_contig = 0; 4839 uint_t flags = 0; 4840 spgcnt_t dofree = 0; 4841 4842 *nrelocp = 0; 4843 4844 #if defined(__sparc) 4845 /* 4846 * We need to wait till OBP has completed 4847 * its boot-time handoff of its resources to the kernel 4848 * before we allow page relocation 4849 */ 4850 if (page_relocate_ready == 0) { 4851 return (EAGAIN); 4852 } 4853 #endif 4854 4855 /* 4856 * If this is not a base page, 4857 * just return with 0x0 pages relocated. 4858 */ 4859 targ = *target; 4860 ASSERT(PAGE_EXCL(targ)); 4861 ASSERT(!PP_ISFREE(targ)); 4862 szc = targ->p_szc; 4863 ASSERT(szc < mmu_page_sizes); 4864 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4865 pfn = targ->p_pagenum; 4866 if (pfn != PFN_BASE(pfn, szc)) { 4867 VM_STAT_ADD(vmm_vmstats.ppr_relocnoroot[szc]); 4868 return (ERANGE); 4869 } 4870 4871 if ((repl = *replacement) != NULL && repl->p_szc >= szc) { 4872 repl_pfn = repl->p_pagenum; 4873 if (repl_pfn != PFN_BASE(repl_pfn, szc)) { 4874 VM_STAT_ADD(vmm_vmstats.ppr_reloc_replnoroot[szc]); 4875 return (ERANGE); 4876 } 4877 repl_contig = 1; 4878 } 4879 4880 /* 4881 * We must lock all members of this large page or we cannot 4882 * relocate any part of it. 4883 */ 4884 if (grouplock != 0 && !group_page_trylock(targ, SE_EXCL)) { 4885 VM_STAT_ADD(vmm_vmstats.ppr_relocnolock[targ->p_szc]); 4886 return (EBUSY); 4887 } 4888 4889 /* 4890 * reread szc it could have been decreased before 4891 * group_page_trylock() was done. 4892 */ 4893 szc = targ->p_szc; 4894 ASSERT(szc < mmu_page_sizes); 4895 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4896 ASSERT(pfn == PFN_BASE(pfn, szc)); 4897 4898 npgs = page_get_pagecnt(targ->p_szc); 4899 4900 if (repl == NULL) { 4901 dofree = npgs; /* Size of target page in MMU pages */ 4902 if (!page_create_wait(dofree, 0)) { 4903 if (grouplock != 0) { 4904 group_page_unlock(targ); 4905 } 4906 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4907 return (ENOMEM); 4908 } 4909 4910 /* 4911 * seg kmem pages require that the target and replacement 4912 * page be the same pagesize. 4913 */ 4914 flags = (VN_ISKAS(targ->p_vnode)) ? PGR_SAMESZC : 0; 4915 repl = page_get_replacement_page(targ, lgrp, flags); 4916 if (repl == NULL) { 4917 if (grouplock != 0) { 4918 group_page_unlock(targ); 4919 } 4920 page_create_putback(dofree); 4921 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4922 return (ENOMEM); 4923 } 4924 } 4925 #ifdef DEBUG 4926 else { 4927 ASSERT(PAGE_LOCKED(repl)); 4928 } 4929 #endif /* DEBUG */ 4930 4931 #if defined(__sparc) 4932 /* 4933 * Let hat_page_relocate() complete the relocation if it's kernel page 4934 */ 4935 if (VN_ISKAS(targ->p_vnode)) { 4936 *replacement = repl; 4937 if (hat_page_relocate(target, replacement, nrelocp) != 0) { 4938 if (grouplock != 0) { 4939 group_page_unlock(targ); 4940 } 4941 if (dofree) { 4942 *replacement = NULL; 4943 page_free_replacement_page(repl); 4944 page_create_putback(dofree); 4945 } 4946 VM_STAT_ADD(vmm_vmstats.ppr_krelocfail[szc]); 4947 return (EAGAIN); 4948 } 4949 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 4950 return (0); 4951 } 4952 #else 4953 #if defined(lint) 4954 dofree = dofree; 4955 #endif 4956 #endif 4957 4958 first_repl = repl; 4959 4960 for (i = 0; i < npgs; i++) { 4961 ASSERT(PAGE_EXCL(targ)); 4962 ASSERT(targ->p_slckcnt == 0); 4963 ASSERT(repl->p_slckcnt == 0); 4964 4965 (void) hat_pageunload(targ, HAT_FORCE_PGUNLOAD); 4966 4967 ASSERT(hat_page_getshare(targ) == 0); 4968 ASSERT(!PP_ISFREE(targ)); 4969 ASSERT(targ->p_pagenum == (pfn + i)); 4970 ASSERT(repl_contig == 0 || 4971 repl->p_pagenum == (repl_pfn + i)); 4972 4973 /* 4974 * Copy the page contents and attributes then 4975 * relocate the page in the page hash. 4976 */ 4977 if (ppcopy(targ, repl) == 0) { 4978 targ = *target; 4979 repl = first_repl; 4980 VM_STAT_ADD(vmm_vmstats.ppr_copyfail); 4981 if (grouplock != 0) { 4982 group_page_unlock(targ); 4983 } 4984 if (dofree) { 4985 *replacement = NULL; 4986 page_free_replacement_page(repl); 4987 page_create_putback(dofree); 4988 } 4989 return (EIO); 4990 } 4991 4992 targ++; 4993 if (repl_contig != 0) { 4994 repl++; 4995 } else { 4996 repl = repl->p_next; 4997 } 4998 } 4999 5000 repl = first_repl; 5001 targ = *target; 5002 5003 for (i = 0; i < npgs; i++) { 5004 ppattr = hat_page_getattr(targ, (P_MOD | P_REF | P_RO)); 5005 page_clr_all_props(repl); 5006 page_set_props(repl, ppattr); 5007 page_relocate_hash(repl, targ); 5008 5009 ASSERT(hat_page_getshare(targ) == 0); 5010 ASSERT(hat_page_getshare(repl) == 0); 5011 /* 5012 * Now clear the props on targ, after the 5013 * page_relocate_hash(), they no longer 5014 * have any meaning. 5015 */ 5016 page_clr_all_props(targ); 5017 ASSERT(targ->p_next == targ); 5018 ASSERT(targ->p_prev == targ); 5019 page_list_concat(&pl, &targ); 5020 5021 targ++; 5022 if (repl_contig != 0) { 5023 repl++; 5024 } else { 5025 repl = repl->p_next; 5026 } 5027 } 5028 /* assert that we have come full circle with repl */ 5029 ASSERT(repl_contig == 1 || first_repl == repl); 5030 5031 *target = pl; 5032 if (*replacement == NULL) { 5033 ASSERT(first_repl == repl); 5034 *replacement = repl; 5035 } 5036 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 5037 *nrelocp = npgs; 5038 return (0); 5039 } 5040 /* 5041 * On success returns 0 and *nrelocp the number of PAGESIZE pages relocated. 5042 */ 5043 int 5044 page_relocate( 5045 page_t **target, 5046 page_t **replacement, 5047 int grouplock, 5048 int freetarget, 5049 spgcnt_t *nrelocp, 5050 lgrp_t *lgrp) 5051 { 5052 spgcnt_t ret; 5053 5054 /* do_page_relocate returns 0 on success or errno value */ 5055 ret = do_page_relocate(target, replacement, grouplock, nrelocp, lgrp); 5056 5057 if (ret != 0 || freetarget == 0) { 5058 return (ret); 5059 } 5060 if (*nrelocp == 1) { 5061 ASSERT(*target != NULL); 5062 page_free(*target, 1); 5063 } else { 5064 page_t *tpp = *target; 5065 uint_t szc = tpp->p_szc; 5066 pgcnt_t npgs = page_get_pagecnt(szc); 5067 ASSERT(npgs > 1); 5068 ASSERT(szc != 0); 5069 do { 5070 ASSERT(PAGE_EXCL(tpp)); 5071 ASSERT(!hat_page_is_mapped(tpp)); 5072 ASSERT(tpp->p_szc == szc); 5073 PP_SETFREE(tpp); 5074 PP_SETAGED(tpp); 5075 npgs--; 5076 } while ((tpp = tpp->p_next) != *target); 5077 ASSERT(npgs == 0); 5078 page_list_add_pages(*target, 0); 5079 npgs = page_get_pagecnt(szc); 5080 page_create_putback(npgs); 5081 } 5082 return (ret); 5083 } 5084 5085 /* 5086 * it is up to the caller to deal with pcf accounting. 5087 */ 5088 void 5089 page_free_replacement_page(page_t *pplist) 5090 { 5091 page_t *pp; 5092 5093 while (pplist != NULL) { 5094 /* 5095 * pp_targ is a linked list. 5096 */ 5097 pp = pplist; 5098 if (pp->p_szc == 0) { 5099 page_sub(&pplist, pp); 5100 page_clr_all_props(pp); 5101 PP_SETFREE(pp); 5102 PP_SETAGED(pp); 5103 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 5104 page_unlock(pp); 5105 VM_STAT_ADD(pagecnt.pc_free_replacement_page[0]); 5106 } else { 5107 spgcnt_t curnpgs = page_get_pagecnt(pp->p_szc); 5108 page_t *tpp; 5109 page_list_break(&pp, &pplist, curnpgs); 5110 tpp = pp; 5111 do { 5112 ASSERT(PAGE_EXCL(tpp)); 5113 ASSERT(!hat_page_is_mapped(tpp)); 5114 page_clr_all_props(pp); 5115 PP_SETFREE(tpp); 5116 PP_SETAGED(tpp); 5117 } while ((tpp = tpp->p_next) != pp); 5118 page_list_add_pages(pp, 0); 5119 VM_STAT_ADD(pagecnt.pc_free_replacement_page[1]); 5120 } 5121 } 5122 } 5123 5124 /* 5125 * Relocate target to non-relocatable replacement page. 5126 */ 5127 int 5128 page_relocate_cage(page_t **target, page_t **replacement) 5129 { 5130 page_t *tpp, *rpp; 5131 spgcnt_t pgcnt, npgs; 5132 int result; 5133 5134 tpp = *target; 5135 5136 ASSERT(PAGE_EXCL(tpp)); 5137 ASSERT(tpp->p_szc == 0); 5138 5139 pgcnt = btop(page_get_pagesize(tpp->p_szc)); 5140 5141 do { 5142 (void) page_create_wait(pgcnt, PG_WAIT | PG_NORELOC); 5143 rpp = page_get_replacement_page(tpp, NULL, PGR_NORELOC); 5144 if (rpp == NULL) { 5145 page_create_putback(pgcnt); 5146 kcage_cageout_wakeup(); 5147 } 5148 } while (rpp == NULL); 5149 5150 ASSERT(PP_ISNORELOC(rpp)); 5151 5152 result = page_relocate(&tpp, &rpp, 0, 1, &npgs, NULL); 5153 5154 if (result == 0) { 5155 *replacement = rpp; 5156 if (pgcnt != npgs) 5157 panic("page_relocate_cage: partial relocation"); 5158 } 5159 5160 return (result); 5161 } 5162 5163 /* 5164 * Release the page lock on a page, place on cachelist 5165 * tail if no longer mapped. Caller can let us know if 5166 * the page is known to be clean. 5167 */ 5168 int 5169 page_release(page_t *pp, int checkmod) 5170 { 5171 int status; 5172 5173 ASSERT(PAGE_LOCKED(pp) && !PP_ISFREE(pp) && 5174 (pp->p_vnode != NULL)); 5175 5176 if (!hat_page_is_mapped(pp) && !IS_SWAPVP(pp->p_vnode) && 5177 ((PAGE_SHARED(pp) && page_tryupgrade(pp)) || PAGE_EXCL(pp)) && 5178 pp->p_lckcnt == 0 && pp->p_cowcnt == 0 && 5179 !hat_page_is_mapped(pp)) { 5180 5181 /* 5182 * If page is modified, unlock it 5183 * 5184 * (p_nrm & P_MOD) bit has the latest stuff because: 5185 * (1) We found that this page doesn't have any mappings 5186 * _after_ holding SE_EXCL and 5187 * (2) We didn't drop SE_EXCL lock after the check in (1) 5188 */ 5189 if (checkmod && hat_ismod(pp)) { 5190 page_unlock(pp); 5191 status = PGREL_MOD; 5192 } else { 5193 /*LINTED: constant in conditional context*/ 5194 VN_DISPOSE(pp, B_FREE, 0, kcred); 5195 status = PGREL_CLEAN; 5196 } 5197 } else { 5198 page_unlock(pp); 5199 status = PGREL_NOTREL; 5200 } 5201 return (status); 5202 } 5203 5204 /* 5205 * Given a constituent page, try to demote the large page on the freelist. 5206 * 5207 * Returns nonzero if the page could be demoted successfully. Returns with 5208 * the constituent page still locked. 5209 */ 5210 int 5211 page_try_demote_free_pages(page_t *pp) 5212 { 5213 page_t *rootpp = pp; 5214 pfn_t pfn = page_pptonum(pp); 5215 spgcnt_t npgs; 5216 uint_t szc = pp->p_szc; 5217 5218 ASSERT(PP_ISFREE(pp)); 5219 ASSERT(PAGE_EXCL(pp)); 5220 5221 /* 5222 * Adjust rootpp and lock it, if `pp' is not the base 5223 * constituent page. 5224 */ 5225 npgs = page_get_pagecnt(pp->p_szc); 5226 if (npgs == 1) { 5227 return (0); 5228 } 5229 5230 if (!IS_P2ALIGNED(pfn, npgs)) { 5231 pfn = P2ALIGN(pfn, npgs); 5232 rootpp = page_numtopp_nolock(pfn); 5233 } 5234 5235 if (pp != rootpp && !page_trylock(rootpp, SE_EXCL)) { 5236 return (0); 5237 } 5238 5239 if (rootpp->p_szc != szc) { 5240 if (pp != rootpp) 5241 page_unlock(rootpp); 5242 return (0); 5243 } 5244 5245 page_demote_free_pages(rootpp); 5246 5247 if (pp != rootpp) 5248 page_unlock(rootpp); 5249 5250 ASSERT(PP_ISFREE(pp)); 5251 ASSERT(PAGE_EXCL(pp)); 5252 return (1); 5253 } 5254 5255 /* 5256 * Given a constituent page, try to demote the large page. 5257 * 5258 * Returns nonzero if the page could be demoted successfully. Returns with 5259 * the constituent page still locked. 5260 */ 5261 int 5262 page_try_demote_pages(page_t *pp) 5263 { 5264 page_t *tpp, *rootpp = pp; 5265 pfn_t pfn = page_pptonum(pp); 5266 spgcnt_t i, npgs; 5267 uint_t szc = pp->p_szc; 5268 vnode_t *vp = pp->p_vnode; 5269 5270 ASSERT(PAGE_EXCL(pp)); 5271 5272 VM_STAT_ADD(pagecnt.pc_try_demote_pages[0]); 5273 5274 if (pp->p_szc == 0) { 5275 VM_STAT_ADD(pagecnt.pc_try_demote_pages[1]); 5276 return (1); 5277 } 5278 5279 if (vp != NULL && !IS_SWAPFSVP(vp) && !VN_ISKAS(vp)) { 5280 VM_STAT_ADD(pagecnt.pc_try_demote_pages[2]); 5281 page_demote_vp_pages(pp); 5282 ASSERT(pp->p_szc == 0); 5283 return (1); 5284 } 5285 5286 /* 5287 * Adjust rootpp if passed in is not the base 5288 * constituent page. 5289 */ 5290 npgs = page_get_pagecnt(pp->p_szc); 5291 ASSERT(npgs > 1); 5292 if (!IS_P2ALIGNED(pfn, npgs)) { 5293 pfn = P2ALIGN(pfn, npgs); 5294 rootpp = page_numtopp_nolock(pfn); 5295 VM_STAT_ADD(pagecnt.pc_try_demote_pages[3]); 5296 ASSERT(rootpp->p_vnode != NULL); 5297 ASSERT(rootpp->p_szc == szc); 5298 } 5299 5300 /* 5301 * We can't demote kernel pages since we can't hat_unload() 5302 * the mappings. 5303 */ 5304 if (VN_ISKAS(rootpp->p_vnode)) 5305 return (0); 5306 5307 /* 5308 * Attempt to lock all constituent pages except the page passed 5309 * in since it's already locked. 5310 */ 5311 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5312 ASSERT(!PP_ISFREE(tpp)); 5313 ASSERT(tpp->p_vnode != NULL); 5314 5315 if (tpp != pp && !page_trylock(tpp, SE_EXCL)) 5316 break; 5317 ASSERT(tpp->p_szc == rootpp->p_szc); 5318 ASSERT(page_pptonum(tpp) == page_pptonum(rootpp) + i); 5319 } 5320 5321 /* 5322 * If we failed to lock them all then unlock what we have 5323 * locked so far and bail. 5324 */ 5325 if (i < npgs) { 5326 tpp = rootpp; 5327 while (i-- > 0) { 5328 if (tpp != pp) 5329 page_unlock(tpp); 5330 tpp++; 5331 } 5332 VM_STAT_ADD(pagecnt.pc_try_demote_pages[4]); 5333 return (0); 5334 } 5335 5336 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5337 ASSERT(PAGE_EXCL(tpp)); 5338 ASSERT(tpp->p_slckcnt == 0); 5339 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 5340 tpp->p_szc = 0; 5341 } 5342 5343 /* 5344 * Unlock all pages except the page passed in. 5345 */ 5346 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5347 ASSERT(!hat_page_is_mapped(tpp)); 5348 if (tpp != pp) 5349 page_unlock(tpp); 5350 } 5351 5352 VM_STAT_ADD(pagecnt.pc_try_demote_pages[5]); 5353 return (1); 5354 } 5355 5356 /* 5357 * Called by page_free() and page_destroy() to demote the page size code 5358 * (p_szc) to 0 (since we can't just put a single PAGESIZE page with non zero 5359 * p_szc on free list, neither can we just clear p_szc of a single page_t 5360 * within a large page since it will break other code that relies on p_szc 5361 * being the same for all page_t's of a large page). Anonymous pages should 5362 * never end up here because anon_map_getpages() cannot deal with p_szc 5363 * changes after a single constituent page is locked. While anonymous or 5364 * kernel large pages are demoted or freed the entire large page at a time 5365 * with all constituent pages locked EXCL for the file system pages we 5366 * have to be able to demote a large page (i.e. decrease all constituent pages 5367 * p_szc) with only just an EXCL lock on one of constituent pages. The reason 5368 * we can easily deal with anonymous page demotion the entire large page at a 5369 * time is that those operation originate at address space level and concern 5370 * the entire large page region with actual demotion only done when pages are 5371 * not shared with any other processes (therefore we can always get EXCL lock 5372 * on all anonymous constituent pages after clearing segment page 5373 * cache). However file system pages can be truncated or invalidated at a 5374 * PAGESIZE level from the file system side and end up in page_free() or 5375 * page_destroy() (we also allow only part of the large page to be SOFTLOCKed 5376 * and therefore pageout should be able to demote a large page by EXCL locking 5377 * any constituent page that is not under SOFTLOCK). In those cases we cannot 5378 * rely on being able to lock EXCL all constituent pages. 5379 * 5380 * To prevent szc changes on file system pages one has to lock all constituent 5381 * pages at least SHARED (or call page_szc_lock()). The only subsystem that 5382 * doesn't rely on locking all constituent pages (or using page_szc_lock()) to 5383 * prevent szc changes is hat layer that uses its own page level mlist 5384 * locks. hat assumes that szc doesn't change after mlist lock for a page is 5385 * taken. Therefore we need to change szc under hat level locks if we only 5386 * have an EXCL lock on a single constituent page and hat still references any 5387 * of constituent pages. (Note we can't "ignore" hat layer by simply 5388 * hat_pageunload() all constituent pages without having EXCL locks on all of 5389 * constituent pages). We use hat_page_demote() call to safely demote szc of 5390 * all constituent pages under hat locks when we only have an EXCL lock on one 5391 * of constituent pages. 5392 * 5393 * This routine calls page_szc_lock() before calling hat_page_demote() to 5394 * allow segvn in one special case not to lock all constituent pages SHARED 5395 * before calling hat_memload_array() that relies on p_szc not changing even 5396 * before hat level mlist lock is taken. In that case segvn uses 5397 * page_szc_lock() to prevent hat_page_demote() changing p_szc values. 5398 * 5399 * Anonymous or kernel page demotion still has to lock all pages exclusively 5400 * and do hat_pageunload() on all constituent pages before demoting the page 5401 * therefore there's no need for anonymous or kernel page demotion to use 5402 * hat_page_demote() mechanism. 5403 * 5404 * hat_page_demote() removes all large mappings that map pp and then decreases 5405 * p_szc starting from the last constituent page of the large page. By working 5406 * from the tail of a large page in pfn decreasing order allows one looking at 5407 * the root page to know that hat_page_demote() is done for root's szc area. 5408 * e.g. if a root page has szc 1 one knows it only has to lock all constituent 5409 * pages within szc 1 area to prevent szc changes because hat_page_demote() 5410 * that started on this page when it had szc > 1 is done for this szc 1 area. 5411 * 5412 * We are guaranteed that all constituent pages of pp's large page belong to 5413 * the same vnode with the consecutive offsets increasing in the direction of 5414 * the pfn i.e. the identity of constituent pages can't change until their 5415 * p_szc is decreased. Therefore it's safe for hat_page_demote() to remove 5416 * large mappings to pp even though we don't lock any constituent page except 5417 * pp (i.e. we won't unload e.g. kernel locked page). 5418 */ 5419 static void 5420 page_demote_vp_pages(page_t *pp) 5421 { 5422 kmutex_t *mtx; 5423 5424 ASSERT(PAGE_EXCL(pp)); 5425 ASSERT(!PP_ISFREE(pp)); 5426 ASSERT(pp->p_vnode != NULL); 5427 ASSERT(!IS_SWAPFSVP(pp->p_vnode)); 5428 ASSERT(!PP_ISKAS(pp)); 5429 5430 VM_STAT_ADD(pagecnt.pc_demote_pages[0]); 5431 5432 mtx = page_szc_lock(pp); 5433 if (mtx != NULL) { 5434 hat_page_demote(pp); 5435 mutex_exit(mtx); 5436 } 5437 ASSERT(pp->p_szc == 0); 5438 } 5439 5440 /* 5441 * Mark any existing pages for migration in the given range 5442 */ 5443 void 5444 page_mark_migrate(struct seg *seg, caddr_t addr, size_t len, 5445 struct anon_map *amp, ulong_t anon_index, vnode_t *vp, 5446 u_offset_t vnoff, int rflag) 5447 { 5448 struct anon *ap; 5449 vnode_t *curvp; 5450 lgrp_t *from; 5451 pgcnt_t i; 5452 pgcnt_t nlocked; 5453 u_offset_t off; 5454 pfn_t pfn; 5455 size_t pgsz; 5456 size_t segpgsz; 5457 pgcnt_t pages; 5458 uint_t pszc; 5459 page_t **ppa; 5460 pgcnt_t ppa_nentries; 5461 page_t *pp; 5462 caddr_t va; 5463 ulong_t an_idx; 5464 anon_sync_obj_t cookie; 5465 5466 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5467 5468 /* 5469 * Don't do anything if don't need to do lgroup optimizations 5470 * on this system 5471 */ 5472 if (!lgrp_optimizations()) 5473 return; 5474 5475 /* 5476 * Align address and length to (potentially large) page boundary 5477 */ 5478 segpgsz = page_get_pagesize(seg->s_szc); 5479 addr = (caddr_t)P2ALIGN((uintptr_t)addr, segpgsz); 5480 if (rflag) 5481 len = P2ROUNDUP(len, segpgsz); 5482 5483 /* 5484 * Allocate page array to accommodate largest page size 5485 */ 5486 pgsz = page_get_pagesize(page_num_pagesizes() - 1); 5487 ppa_nentries = btop(pgsz); 5488 ppa = kmem_zalloc(ppa_nentries * sizeof (page_t *), KM_SLEEP); 5489 5490 /* 5491 * Do one (large) page at a time 5492 */ 5493 va = addr; 5494 while (va < addr + len) { 5495 /* 5496 * Lookup (root) page for vnode and offset corresponding to 5497 * this virtual address 5498 * Try anonmap first since there may be copy-on-write 5499 * pages, but initialize vnode pointer and offset using 5500 * vnode arguments just in case there isn't an amp. 5501 */ 5502 curvp = vp; 5503 off = vnoff + va - seg->s_base; 5504 if (amp) { 5505 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5506 an_idx = anon_index + seg_page(seg, va); 5507 anon_array_enter(amp, an_idx, &cookie); 5508 ap = anon_get_ptr(amp->ahp, an_idx); 5509 if (ap) 5510 swap_xlate(ap, &curvp, &off); 5511 anon_array_exit(&cookie); 5512 ANON_LOCK_EXIT(&->a_rwlock); 5513 } 5514 5515 pp = NULL; 5516 if (curvp) 5517 pp = page_lookup(curvp, off, SE_SHARED); 5518 5519 /* 5520 * If there isn't a page at this virtual address, 5521 * skip to next page 5522 */ 5523 if (pp == NULL) { 5524 va += PAGESIZE; 5525 continue; 5526 } 5527 5528 /* 5529 * Figure out which lgroup this page is in for kstats 5530 */ 5531 pfn = page_pptonum(pp); 5532 from = lgrp_pfn_to_lgrp(pfn); 5533 5534 /* 5535 * Get page size, and round up and skip to next page boundary 5536 * if unaligned address 5537 */ 5538 pszc = pp->p_szc; 5539 pgsz = page_get_pagesize(pszc); 5540 pages = btop(pgsz); 5541 if (!IS_P2ALIGNED(va, pgsz) || 5542 !IS_P2ALIGNED(pfn, pages) || 5543 pgsz > segpgsz) { 5544 pgsz = MIN(pgsz, segpgsz); 5545 page_unlock(pp); 5546 i = btop(P2END((uintptr_t)va, pgsz) - 5547 (uintptr_t)va); 5548 va = (caddr_t)P2END((uintptr_t)va, pgsz); 5549 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, i); 5550 continue; 5551 } 5552 5553 /* 5554 * Upgrade to exclusive lock on page 5555 */ 5556 if (!page_tryupgrade(pp)) { 5557 page_unlock(pp); 5558 va += pgsz; 5559 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5560 btop(pgsz)); 5561 continue; 5562 } 5563 5564 /* 5565 * Remember pages locked exclusively and how many 5566 */ 5567 ppa[0] = pp; 5568 nlocked = 1; 5569 5570 /* 5571 * Lock constituent pages if this is large page 5572 */ 5573 if (pages > 1) { 5574 /* 5575 * Lock all constituents except root page, since it 5576 * should be locked already. 5577 */ 5578 for (i = 1; i < pages; i++) { 5579 pp++; 5580 if (!page_trylock(pp, SE_EXCL)) { 5581 break; 5582 } 5583 if (PP_ISFREE(pp) || 5584 pp->p_szc != pszc) { 5585 /* 5586 * hat_page_demote() raced in with us. 5587 */ 5588 ASSERT(!IS_SWAPFSVP(curvp)); 5589 page_unlock(pp); 5590 break; 5591 } 5592 ppa[nlocked] = pp; 5593 nlocked++; 5594 } 5595 } 5596 5597 /* 5598 * If all constituent pages couldn't be locked, 5599 * unlock pages locked so far and skip to next page. 5600 */ 5601 if (nlocked != pages) { 5602 for (i = 0; i < nlocked; i++) 5603 page_unlock(ppa[i]); 5604 va += pgsz; 5605 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5606 btop(pgsz)); 5607 continue; 5608 } 5609 5610 /* 5611 * hat_page_demote() can no longer happen 5612 * since last cons page had the right p_szc after 5613 * all cons pages were locked. all cons pages 5614 * should now have the same p_szc. 5615 */ 5616 5617 /* 5618 * All constituent pages locked successfully, so mark 5619 * large page for migration and unload the mappings of 5620 * constituent pages, so a fault will occur on any part of the 5621 * large page 5622 */ 5623 PP_SETMIGRATE(ppa[0]); 5624 for (i = 0; i < nlocked; i++) { 5625 pp = ppa[i]; 5626 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 5627 ASSERT(hat_page_getshare(pp) == 0); 5628 page_unlock(pp); 5629 } 5630 lgrp_stat_add(from->lgrp_id, LGRP_PMM_PGS, nlocked); 5631 5632 va += pgsz; 5633 } 5634 kmem_free(ppa, ppa_nentries * sizeof (page_t *)); 5635 } 5636 5637 /* 5638 * Migrate any pages that have been marked for migration in the given range 5639 */ 5640 void 5641 page_migrate( 5642 struct seg *seg, 5643 caddr_t addr, 5644 page_t **ppa, 5645 pgcnt_t npages) 5646 { 5647 lgrp_t *from; 5648 lgrp_t *to; 5649 page_t *newpp; 5650 page_t *pp; 5651 pfn_t pfn; 5652 size_t pgsz; 5653 spgcnt_t page_cnt; 5654 spgcnt_t i; 5655 uint_t pszc; 5656 5657 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5658 5659 while (npages > 0) { 5660 pp = *ppa; 5661 pszc = pp->p_szc; 5662 pgsz = page_get_pagesize(pszc); 5663 page_cnt = btop(pgsz); 5664 5665 /* 5666 * Check to see whether this page is marked for migration 5667 * 5668 * Assume that root page of large page is marked for 5669 * migration and none of the other constituent pages 5670 * are marked. This really simplifies clearing the 5671 * migrate bit by not having to clear it from each 5672 * constituent page. 5673 * 5674 * note we don't want to relocate an entire large page if 5675 * someone is only using one subpage. 5676 */ 5677 if (npages < page_cnt) 5678 break; 5679 5680 /* 5681 * Is it marked for migration? 5682 */ 5683 if (!PP_ISMIGRATE(pp)) 5684 goto next; 5685 5686 /* 5687 * Determine lgroups that page is being migrated between 5688 */ 5689 pfn = page_pptonum(pp); 5690 if (!IS_P2ALIGNED(pfn, page_cnt)) { 5691 break; 5692 } 5693 from = lgrp_pfn_to_lgrp(pfn); 5694 to = lgrp_mem_choose(seg, addr, pgsz); 5695 5696 /* 5697 * Need to get exclusive lock's to migrate 5698 */ 5699 for (i = 0; i < page_cnt; i++) { 5700 ASSERT(PAGE_LOCKED(ppa[i])); 5701 if (page_pptonum(ppa[i]) != pfn + i || 5702 ppa[i]->p_szc != pszc) { 5703 break; 5704 } 5705 if (!page_tryupgrade(ppa[i])) { 5706 lgrp_stat_add(from->lgrp_id, 5707 LGRP_PM_FAIL_LOCK_PGS, 5708 page_cnt); 5709 break; 5710 } 5711 5712 /* 5713 * Check to see whether we are trying to migrate 5714 * page to lgroup where it is allocated already. 5715 * If so, clear the migrate bit and skip to next 5716 * page. 5717 */ 5718 if (i == 0 && to == from) { 5719 PP_CLRMIGRATE(ppa[0]); 5720 page_downgrade(ppa[0]); 5721 goto next; 5722 } 5723 } 5724 5725 /* 5726 * If all constituent pages couldn't be locked, 5727 * unlock pages locked so far and skip to next page. 5728 */ 5729 if (i != page_cnt) { 5730 while (--i != -1) { 5731 page_downgrade(ppa[i]); 5732 } 5733 goto next; 5734 } 5735 5736 (void) page_create_wait(page_cnt, PG_WAIT); 5737 newpp = page_get_replacement_page(pp, to, PGR_SAMESZC); 5738 if (newpp == NULL) { 5739 page_create_putback(page_cnt); 5740 for (i = 0; i < page_cnt; i++) { 5741 page_downgrade(ppa[i]); 5742 } 5743 lgrp_stat_add(to->lgrp_id, LGRP_PM_FAIL_ALLOC_PGS, 5744 page_cnt); 5745 goto next; 5746 } 5747 ASSERT(newpp->p_szc == pszc); 5748 /* 5749 * Clear migrate bit and relocate page 5750 */ 5751 PP_CLRMIGRATE(pp); 5752 if (page_relocate(&pp, &newpp, 0, 1, &page_cnt, to)) { 5753 panic("page_migrate: page_relocate failed"); 5754 } 5755 ASSERT(page_cnt * PAGESIZE == pgsz); 5756 5757 /* 5758 * Keep stats for number of pages migrated from and to 5759 * each lgroup 5760 */ 5761 lgrp_stat_add(from->lgrp_id, LGRP_PM_SRC_PGS, page_cnt); 5762 lgrp_stat_add(to->lgrp_id, LGRP_PM_DEST_PGS, page_cnt); 5763 /* 5764 * update the page_t array we were passed in and 5765 * unlink constituent pages of a large page. 5766 */ 5767 for (i = 0; i < page_cnt; ++i, ++pp) { 5768 ASSERT(PAGE_EXCL(newpp)); 5769 ASSERT(newpp->p_szc == pszc); 5770 ppa[i] = newpp; 5771 pp = newpp; 5772 page_sub(&newpp, pp); 5773 page_downgrade(pp); 5774 } 5775 ASSERT(newpp == NULL); 5776 next: 5777 addr += pgsz; 5778 ppa += page_cnt; 5779 npages -= page_cnt; 5780 } 5781 } 5782 5783 ulong_t mem_waiters = 0; 5784 ulong_t max_count = 20; 5785 #define MAX_DELAY 0x1ff 5786 5787 /* 5788 * Check if enough memory is available to proceed. 5789 * Depending on system configuration and how much memory is 5790 * reserved for swap we need to check against two variables. 5791 * e.g. on systems with little physical swap availrmem can be 5792 * more reliable indicator of how much memory is available. 5793 * On systems with large phys swap freemem can be better indicator. 5794 * If freemem drops below threshold level don't return an error 5795 * immediately but wake up pageout to free memory and block. 5796 * This is done number of times. If pageout is not able to free 5797 * memory within certain time return an error. 5798 * The same applies for availrmem but kmem_reap is used to 5799 * free memory. 5800 */ 5801 int 5802 page_mem_avail(pgcnt_t npages) 5803 { 5804 ulong_t count; 5805 5806 #if defined(__i386) 5807 if (freemem > desfree + npages && 5808 availrmem > swapfs_reserve + npages && 5809 btop(vmem_size(heap_arena, VMEM_FREE)) > tune.t_minarmem + 5810 npages) 5811 return (1); 5812 #else 5813 if (freemem > desfree + npages && 5814 availrmem > swapfs_reserve + npages) 5815 return (1); 5816 #endif 5817 5818 count = max_count; 5819 atomic_add_long(&mem_waiters, 1); 5820 5821 while (freemem < desfree + npages && --count) { 5822 cv_signal(&proc_pageout->p_cv); 5823 if (delay_sig(hz + (mem_waiters & MAX_DELAY))) { 5824 atomic_add_long(&mem_waiters, -1); 5825 return (0); 5826 } 5827 } 5828 if (count == 0) { 5829 atomic_add_long(&mem_waiters, -1); 5830 return (0); 5831 } 5832 5833 count = max_count; 5834 while (availrmem < swapfs_reserve + npages && --count) { 5835 kmem_reap(); 5836 if (delay_sig(hz + (mem_waiters & MAX_DELAY))) { 5837 atomic_add_long(&mem_waiters, -1); 5838 return (0); 5839 } 5840 } 5841 atomic_add_long(&mem_waiters, -1); 5842 if (count == 0) 5843 return (0); 5844 5845 #if defined(__i386) 5846 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 5847 tune.t_minarmem + npages) 5848 return (0); 5849 #endif 5850 return (1); 5851 } 5852 5853 #define MAX_CNT 60 /* max num of iterations */ 5854 /* 5855 * Reclaim/reserve availrmem for npages. 5856 * If there is not enough memory start reaping seg, kmem caches. 5857 * Start pageout scanner (via page_needfree()). 5858 * Exit after ~ MAX_CNT s regardless of how much memory has been released. 5859 * Note: There is no guarantee that any availrmem will be freed as 5860 * this memory typically is locked (kernel heap) or reserved for swap. 5861 * Also due to memory fragmentation kmem allocator may not be able 5862 * to free any memory (single user allocated buffer will prevent 5863 * freeing slab or a page). 5864 */ 5865 int 5866 page_reclaim_mem(pgcnt_t npages, pgcnt_t epages, int adjust) 5867 { 5868 int i = 0; 5869 int ret = 0; 5870 pgcnt_t deficit; 5871 pgcnt_t old_availrmem; 5872 5873 mutex_enter(&freemem_lock); 5874 old_availrmem = availrmem - 1; 5875 while ((availrmem < tune.t_minarmem + npages + epages) && 5876 (old_availrmem < availrmem) && (i++ < MAX_CNT)) { 5877 old_availrmem = availrmem; 5878 deficit = tune.t_minarmem + npages + epages - availrmem; 5879 mutex_exit(&freemem_lock); 5880 page_needfree(deficit); 5881 seg_preap(); 5882 kmem_reap(); 5883 delay(hz); 5884 page_needfree(-(spgcnt_t)deficit); 5885 mutex_enter(&freemem_lock); 5886 } 5887 5888 if (adjust && (availrmem >= tune.t_minarmem + npages + epages)) { 5889 availrmem -= npages; 5890 ret = 1; 5891 } 5892 5893 mutex_exit(&freemem_lock); 5894 5895 return (ret); 5896 } 5897 5898 /* 5899 * Search the memory segments to locate the desired page. Within a 5900 * segment, pages increase linearly with one page structure per 5901 * physical page frame (size PAGESIZE). The search begins 5902 * with the segment that was accessed last, to take advantage of locality. 5903 * If the hint misses, we start from the beginning of the sorted memseg list 5904 */ 5905 5906 5907 /* 5908 * Some data structures for pfn to pp lookup. 5909 */ 5910 ulong_t mhash_per_slot; 5911 struct memseg *memseg_hash[N_MEM_SLOTS]; 5912 5913 page_t * 5914 page_numtopp_nolock(pfn_t pfnum) 5915 { 5916 struct memseg *seg; 5917 page_t *pp; 5918 vm_cpu_data_t *vc = CPU->cpu_vm_data; 5919 5920 ASSERT(vc != NULL); 5921 5922 MEMSEG_STAT_INCR(nsearch); 5923 5924 /* Try last winner first */ 5925 if (((seg = vc->vc_pnum_memseg) != NULL) && 5926 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5927 MEMSEG_STAT_INCR(nlastwon); 5928 pp = seg->pages + (pfnum - seg->pages_base); 5929 if (pp->p_pagenum == pfnum) 5930 return ((page_t *)pp); 5931 } 5932 5933 /* Else Try hash */ 5934 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5935 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5936 MEMSEG_STAT_INCR(nhashwon); 5937 vc->vc_pnum_memseg = seg; 5938 pp = seg->pages + (pfnum - seg->pages_base); 5939 if (pp->p_pagenum == pfnum) 5940 return ((page_t *)pp); 5941 } 5942 5943 /* Else Brute force */ 5944 for (seg = memsegs; seg != NULL; seg = seg->next) { 5945 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5946 vc->vc_pnum_memseg = seg; 5947 pp = seg->pages + (pfnum - seg->pages_base); 5948 return ((page_t *)pp); 5949 } 5950 } 5951 vc->vc_pnum_memseg = NULL; 5952 MEMSEG_STAT_INCR(nnotfound); 5953 return ((page_t *)NULL); 5954 5955 } 5956 5957 struct memseg * 5958 page_numtomemseg_nolock(pfn_t pfnum) 5959 { 5960 struct memseg *seg; 5961 page_t *pp; 5962 5963 /* Try hash */ 5964 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5965 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5966 pp = seg->pages + (pfnum - seg->pages_base); 5967 if (pp->p_pagenum == pfnum) 5968 return (seg); 5969 } 5970 5971 /* Else Brute force */ 5972 for (seg = memsegs; seg != NULL; seg = seg->next) { 5973 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5974 return (seg); 5975 } 5976 } 5977 return ((struct memseg *)NULL); 5978 } 5979 5980 /* 5981 * Given a page and a count return the page struct that is 5982 * n structs away from the current one in the global page 5983 * list. 5984 * 5985 * This function wraps to the first page upon 5986 * reaching the end of the memseg list. 5987 */ 5988 page_t * 5989 page_nextn(page_t *pp, ulong_t n) 5990 { 5991 struct memseg *seg; 5992 page_t *ppn; 5993 vm_cpu_data_t *vc = (vm_cpu_data_t *)CPU->cpu_vm_data; 5994 5995 ASSERT(vc != NULL); 5996 5997 if (((seg = vc->vc_pnext_memseg) == NULL) || 5998 (seg->pages_base == seg->pages_end) || 5999 !(pp >= seg->pages && pp < seg->epages)) { 6000 6001 for (seg = memsegs; seg; seg = seg->next) { 6002 if (pp >= seg->pages && pp < seg->epages) 6003 break; 6004 } 6005 6006 if (seg == NULL) { 6007 /* Memory delete got in, return something valid. */ 6008 /* TODO: fix me. */ 6009 seg = memsegs; 6010 pp = seg->pages; 6011 } 6012 } 6013 6014 /* check for wraparound - possible if n is large */ 6015 while ((ppn = (pp + n)) >= seg->epages || ppn < pp) { 6016 n -= seg->epages - pp; 6017 seg = seg->next; 6018 if (seg == NULL) 6019 seg = memsegs; 6020 pp = seg->pages; 6021 } 6022 vc->vc_pnext_memseg = seg; 6023 return (ppn); 6024 } 6025 6026 /* 6027 * Initialize for a loop using page_next_scan_large(). 6028 */ 6029 page_t * 6030 page_next_scan_init(void **cookie) 6031 { 6032 ASSERT(cookie != NULL); 6033 *cookie = (void *)memsegs; 6034 return ((page_t *)memsegs->pages); 6035 } 6036 6037 /* 6038 * Return the next page in a scan of page_t's, assuming we want 6039 * to skip over sub-pages within larger page sizes. 6040 * 6041 * The cookie is used to keep track of the current memseg. 6042 */ 6043 page_t * 6044 page_next_scan_large( 6045 page_t *pp, 6046 ulong_t *n, 6047 void **cookie) 6048 { 6049 struct memseg *seg = (struct memseg *)*cookie; 6050 page_t *new_pp; 6051 ulong_t cnt; 6052 pfn_t pfn; 6053 6054 6055 /* 6056 * get the count of page_t's to skip based on the page size 6057 */ 6058 ASSERT(pp != NULL); 6059 if (pp->p_szc == 0) { 6060 cnt = 1; 6061 } else { 6062 pfn = page_pptonum(pp); 6063 cnt = page_get_pagecnt(pp->p_szc); 6064 cnt -= pfn & (cnt - 1); 6065 } 6066 *n += cnt; 6067 new_pp = pp + cnt; 6068 6069 /* 6070 * Catch if we went past the end of the current memory segment. If so, 6071 * just move to the next segment with pages. 6072 */ 6073 if (new_pp >= seg->epages) { 6074 do { 6075 seg = seg->next; 6076 if (seg == NULL) 6077 seg = memsegs; 6078 } while (seg->pages == seg->epages); 6079 new_pp = seg->pages; 6080 *cookie = (void *)seg; 6081 } 6082 6083 return (new_pp); 6084 } 6085 6086 6087 /* 6088 * Returns next page in list. Note: this function wraps 6089 * to the first page in the list upon reaching the end 6090 * of the list. Callers should be aware of this fact. 6091 */ 6092 6093 /* We should change this be a #define */ 6094 6095 page_t * 6096 page_next(page_t *pp) 6097 { 6098 return (page_nextn(pp, 1)); 6099 } 6100 6101 page_t * 6102 page_first() 6103 { 6104 return ((page_t *)memsegs->pages); 6105 } 6106 6107 6108 /* 6109 * This routine is called at boot with the initial memory configuration 6110 * and when memory is added or removed. 6111 */ 6112 void 6113 build_pfn_hash() 6114 { 6115 pfn_t cur; 6116 pgcnt_t index; 6117 struct memseg *pseg; 6118 int i; 6119 6120 /* 6121 * Clear memseg_hash array. 6122 * Since memory add/delete is designed to operate concurrently 6123 * with normal operation, the hash rebuild must be able to run 6124 * concurrently with page_numtopp_nolock(). To support this 6125 * functionality, assignments to memseg_hash array members must 6126 * be done atomically. 6127 * 6128 * NOTE: bzero() does not currently guarantee this for kernel 6129 * threads, and cannot be used here. 6130 */ 6131 for (i = 0; i < N_MEM_SLOTS; i++) 6132 memseg_hash[i] = NULL; 6133 6134 hat_kpm_mseghash_clear(N_MEM_SLOTS); 6135 6136 /* 6137 * Physmax is the last valid pfn. 6138 */ 6139 mhash_per_slot = (physmax + 1) >> MEM_HASH_SHIFT; 6140 for (pseg = memsegs; pseg != NULL; pseg = pseg->next) { 6141 index = MEMSEG_PFN_HASH(pseg->pages_base); 6142 cur = pseg->pages_base; 6143 do { 6144 if (index >= N_MEM_SLOTS) 6145 index = MEMSEG_PFN_HASH(cur); 6146 6147 if (memseg_hash[index] == NULL || 6148 memseg_hash[index]->pages_base > pseg->pages_base) { 6149 memseg_hash[index] = pseg; 6150 hat_kpm_mseghash_update(index, pseg); 6151 } 6152 cur += mhash_per_slot; 6153 index++; 6154 } while (cur < pseg->pages_end); 6155 } 6156 } 6157 6158 /* 6159 * Return the pagenum for the pp 6160 */ 6161 pfn_t 6162 page_pptonum(page_t *pp) 6163 { 6164 return (pp->p_pagenum); 6165 } 6166 6167 /* 6168 * interface to the referenced and modified etc bits 6169 * in the PSM part of the page struct 6170 * when no locking is desired. 6171 */ 6172 void 6173 page_set_props(page_t *pp, uint_t flags) 6174 { 6175 ASSERT((flags & ~(P_MOD | P_REF | P_RO)) == 0); 6176 pp->p_nrm |= (uchar_t)flags; 6177 } 6178 6179 void 6180 page_clr_all_props(page_t *pp) 6181 { 6182 pp->p_nrm = 0; 6183 } 6184 6185 /* 6186 * Clear p_lckcnt and p_cowcnt, adjusting freemem if required. 6187 */ 6188 int 6189 page_clear_lck_cow(page_t *pp, int adjust) 6190 { 6191 int f_amount; 6192 6193 ASSERT(PAGE_EXCL(pp)); 6194 6195 /* 6196 * The page_struct_lock need not be acquired here since 6197 * we require the caller hold the page exclusively locked. 6198 */ 6199 f_amount = 0; 6200 if (pp->p_lckcnt) { 6201 f_amount = 1; 6202 pp->p_lckcnt = 0; 6203 } 6204 if (pp->p_cowcnt) { 6205 f_amount += pp->p_cowcnt; 6206 pp->p_cowcnt = 0; 6207 } 6208 6209 if (adjust && f_amount) { 6210 mutex_enter(&freemem_lock); 6211 availrmem += f_amount; 6212 mutex_exit(&freemem_lock); 6213 } 6214 6215 return (f_amount); 6216 } 6217 6218 /* 6219 * The following functions is called from free_vp_pages() 6220 * for an inexact estimate of a newly free'd page... 6221 */ 6222 ulong_t 6223 page_share_cnt(page_t *pp) 6224 { 6225 return (hat_page_getshare(pp)); 6226 } 6227 6228 int 6229 page_isshared(page_t *pp) 6230 { 6231 return (hat_page_checkshare(pp, 1)); 6232 } 6233 6234 int 6235 page_isfree(page_t *pp) 6236 { 6237 return (PP_ISFREE(pp)); 6238 } 6239 6240 int 6241 page_isref(page_t *pp) 6242 { 6243 return (hat_page_getattr(pp, P_REF)); 6244 } 6245 6246 int 6247 page_ismod(page_t *pp) 6248 { 6249 return (hat_page_getattr(pp, P_MOD)); 6250 } 6251 6252 /* 6253 * The following code all currently relates to the page capture logic: 6254 * 6255 * This logic is used for cases where there is a desire to claim a certain 6256 * physical page in the system for the caller. As it may not be possible 6257 * to capture the page immediately, the p_toxic bits are used in the page 6258 * structure to indicate that someone wants to capture this page. When the 6259 * page gets unlocked, the toxic flag will be noted and an attempt to capture 6260 * the page will be made. If it is successful, the original callers callback 6261 * will be called with the page to do with it what they please. 6262 * 6263 * There is also an async thread which wakes up to attempt to capture 6264 * pages occasionally which have the capture bit set. All of the pages which 6265 * need to be captured asynchronously have been inserted into the 6266 * page_capture_hash and thus this thread walks that hash list. Items in the 6267 * hash have an expiration time so this thread handles that as well by removing 6268 * the item from the hash if it has expired. 6269 * 6270 * Some important things to note are: 6271 * - if the PR_CAPTURE bit is set on a page, then the page is in the 6272 * page_capture_hash. The page_capture_hash_head.pchh_mutex is needed 6273 * to set and clear this bit, and while the lock is held is the only time 6274 * you can add or remove an entry from the hash. 6275 * - the PR_CAPTURE bit can only be set and cleared while holding the 6276 * page_capture_hash_head.pchh_mutex 6277 * - the t_flag field of the thread struct is used with the T_CAPTURING 6278 * flag to prevent recursion while dealing with large pages. 6279 * - pages which need to be retired never expire on the page_capture_hash. 6280 */ 6281 6282 static void page_capture_thread(void); 6283 static kthread_t *pc_thread_id; 6284 kcondvar_t pc_cv; 6285 static kmutex_t pc_thread_mutex; 6286 static clock_t pc_thread_shortwait; 6287 static clock_t pc_thread_longwait; 6288 static int pc_thread_ism_retry; 6289 6290 struct page_capture_callback pc_cb[PC_NUM_CALLBACKS]; 6291 6292 /* Note that this is a circular linked list */ 6293 typedef struct page_capture_hash_bucket { 6294 page_t *pp; 6295 uint_t szc; 6296 uint_t flags; 6297 clock_t expires; /* lbolt at which this request expires. */ 6298 void *datap; /* Cached data passed in for callback */ 6299 struct page_capture_hash_bucket *next; 6300 struct page_capture_hash_bucket *prev; 6301 } page_capture_hash_bucket_t; 6302 6303 /* 6304 * Each hash bucket will have it's own mutex and two lists which are: 6305 * active (0): represents requests which have not been processed by 6306 * the page_capture async thread yet. 6307 * walked (1): represents requests which have been processed by the 6308 * page_capture async thread within it's given walk of this bucket. 6309 * 6310 * These are all needed so that we can synchronize all async page_capture 6311 * events. When the async thread moves to a new bucket, it will append the 6312 * walked list to the active list and walk each item one at a time, moving it 6313 * from the active list to the walked list. Thus if there is an async request 6314 * outstanding for a given page, it will always be in one of the two lists. 6315 * New requests will always be added to the active list. 6316 * If we were not able to capture a page before the request expired, we'd free 6317 * up the request structure which would indicate to page_capture that there is 6318 * no longer a need for the given page, and clear the PR_CAPTURE flag if 6319 * possible. 6320 */ 6321 typedef struct page_capture_hash_head { 6322 kmutex_t pchh_mutex; 6323 uint_t num_pages; 6324 page_capture_hash_bucket_t lists[2]; /* sentinel nodes */ 6325 } page_capture_hash_head_t; 6326 6327 #ifdef DEBUG 6328 #define NUM_PAGE_CAPTURE_BUCKETS 4 6329 #else 6330 #define NUM_PAGE_CAPTURE_BUCKETS 64 6331 #endif 6332 6333 page_capture_hash_head_t page_capture_hash[NUM_PAGE_CAPTURE_BUCKETS]; 6334 6335 /* for now use a very simple hash based upon the size of a page struct */ 6336 #define PAGE_CAPTURE_HASH(pp) \ 6337 ((int)(((uintptr_t)pp >> 7) & (NUM_PAGE_CAPTURE_BUCKETS - 1))) 6338 6339 extern pgcnt_t swapfs_minfree; 6340 6341 int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap); 6342 6343 /* 6344 * a callback function is required for page capture requests. 6345 */ 6346 void 6347 page_capture_register_callback(uint_t index, clock_t duration, 6348 int (*cb_func)(page_t *, void *, uint_t)) 6349 { 6350 ASSERT(pc_cb[index].cb_active == 0); 6351 ASSERT(cb_func != NULL); 6352 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6353 pc_cb[index].duration = duration; 6354 pc_cb[index].cb_func = cb_func; 6355 pc_cb[index].cb_active = 1; 6356 rw_exit(&pc_cb[index].cb_rwlock); 6357 } 6358 6359 void 6360 page_capture_unregister_callback(uint_t index) 6361 { 6362 int i, j; 6363 struct page_capture_hash_bucket *bp1; 6364 struct page_capture_hash_bucket *bp2; 6365 struct page_capture_hash_bucket *head = NULL; 6366 uint_t flags = (1 << index); 6367 6368 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6369 ASSERT(pc_cb[index].cb_active == 1); 6370 pc_cb[index].duration = 0; /* Paranoia */ 6371 pc_cb[index].cb_func = NULL; /* Paranoia */ 6372 pc_cb[index].cb_active = 0; 6373 rw_exit(&pc_cb[index].cb_rwlock); 6374 6375 /* 6376 * Just move all the entries to a private list which we can walk 6377 * through without the need to hold any locks. 6378 * No more requests can get added to the hash lists for this consumer 6379 * as the cb_active field for the callback has been cleared. 6380 */ 6381 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 6382 mutex_enter(&page_capture_hash[i].pchh_mutex); 6383 for (j = 0; j < 2; j++) { 6384 bp1 = page_capture_hash[i].lists[j].next; 6385 /* walk through all but first (sentinel) element */ 6386 while (bp1 != &page_capture_hash[i].lists[j]) { 6387 bp2 = bp1; 6388 if (bp2->flags & flags) { 6389 bp1 = bp2->next; 6390 bp1->prev = bp2->prev; 6391 bp2->prev->next = bp1; 6392 bp2->next = head; 6393 head = bp2; 6394 /* 6395 * Clear the PR_CAPTURE bit as we 6396 * hold appropriate locks here. 6397 */ 6398 page_clrtoxic(head->pp, PR_CAPTURE); 6399 page_capture_hash[i].num_pages--; 6400 continue; 6401 } 6402 bp1 = bp1->next; 6403 } 6404 } 6405 mutex_exit(&page_capture_hash[i].pchh_mutex); 6406 } 6407 6408 while (head != NULL) { 6409 bp1 = head; 6410 head = head->next; 6411 kmem_free(bp1, sizeof (*bp1)); 6412 } 6413 } 6414 6415 6416 /* 6417 * Find pp in the active list and move it to the walked list if it 6418 * exists. 6419 * Note that most often pp should be at the front of the active list 6420 * as it is currently used and thus there is no other sort of optimization 6421 * being done here as this is a linked list data structure. 6422 * Returns 1 on successful move or 0 if page could not be found. 6423 */ 6424 static int 6425 page_capture_move_to_walked(page_t *pp) 6426 { 6427 page_capture_hash_bucket_t *bp; 6428 int index; 6429 6430 index = PAGE_CAPTURE_HASH(pp); 6431 6432 mutex_enter(&page_capture_hash[index].pchh_mutex); 6433 bp = page_capture_hash[index].lists[0].next; 6434 while (bp != &page_capture_hash[index].lists[0]) { 6435 if (bp->pp == pp) { 6436 /* Remove from old list */ 6437 bp->next->prev = bp->prev; 6438 bp->prev->next = bp->next; 6439 6440 /* Add to new list */ 6441 bp->next = page_capture_hash[index].lists[1].next; 6442 bp->prev = &page_capture_hash[index].lists[1]; 6443 page_capture_hash[index].lists[1].next = bp; 6444 bp->next->prev = bp; 6445 mutex_exit(&page_capture_hash[index].pchh_mutex); 6446 6447 return (1); 6448 } 6449 bp = bp->next; 6450 } 6451 mutex_exit(&page_capture_hash[index].pchh_mutex); 6452 return (0); 6453 } 6454 6455 /* 6456 * Add a new entry to the page capture hash. The only case where a new 6457 * entry is not added is when the page capture consumer is no longer registered. 6458 * In this case, we'll silently not add the page to the hash. We know that 6459 * page retire will always be registered for the case where we are currently 6460 * unretiring a page and thus there are no conflicts. 6461 */ 6462 static void 6463 page_capture_add_hash(page_t *pp, uint_t szc, uint_t flags, void *datap) 6464 { 6465 page_capture_hash_bucket_t *bp1; 6466 page_capture_hash_bucket_t *bp2; 6467 int index; 6468 int cb_index; 6469 int i; 6470 #ifdef DEBUG 6471 page_capture_hash_bucket_t *tp1; 6472 int l; 6473 #endif 6474 6475 ASSERT(!(flags & CAPTURE_ASYNC)); 6476 6477 bp1 = kmem_alloc(sizeof (struct page_capture_hash_bucket), KM_SLEEP); 6478 6479 bp1->pp = pp; 6480 bp1->szc = szc; 6481 bp1->flags = flags; 6482 bp1->datap = datap; 6483 6484 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6485 if ((flags >> cb_index) & 1) { 6486 break; 6487 } 6488 } 6489 6490 ASSERT(cb_index != PC_NUM_CALLBACKS); 6491 6492 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6493 if (pc_cb[cb_index].cb_active) { 6494 if (pc_cb[cb_index].duration == -1) { 6495 bp1->expires = (clock_t)-1; 6496 } else { 6497 bp1->expires = lbolt + pc_cb[cb_index].duration; 6498 } 6499 } else { 6500 /* There's no callback registered so don't add to the hash */ 6501 rw_exit(&pc_cb[cb_index].cb_rwlock); 6502 kmem_free(bp1, sizeof (*bp1)); 6503 return; 6504 } 6505 6506 index = PAGE_CAPTURE_HASH(pp); 6507 6508 /* 6509 * Only allow capture flag to be modified under this mutex. 6510 * Prevents multiple entries for same page getting added. 6511 */ 6512 mutex_enter(&page_capture_hash[index].pchh_mutex); 6513 6514 /* 6515 * if not already on the hash, set capture bit and add to the hash 6516 */ 6517 if (!(pp->p_toxic & PR_CAPTURE)) { 6518 #ifdef DEBUG 6519 /* Check for duplicate entries */ 6520 for (l = 0; l < 2; l++) { 6521 tp1 = page_capture_hash[index].lists[l].next; 6522 while (tp1 != &page_capture_hash[index].lists[l]) { 6523 if (tp1->pp == pp) { 6524 panic("page pp 0x%p already on hash " 6525 "at 0x%p\n", pp, tp1); 6526 } 6527 tp1 = tp1->next; 6528 } 6529 } 6530 6531 #endif 6532 page_settoxic(pp, PR_CAPTURE); 6533 bp1->next = page_capture_hash[index].lists[0].next; 6534 bp1->prev = &page_capture_hash[index].lists[0]; 6535 bp1->next->prev = bp1; 6536 page_capture_hash[index].lists[0].next = bp1; 6537 page_capture_hash[index].num_pages++; 6538 if (flags & CAPTURE_RETIRE) { 6539 page_retire_incr_pend_count(); 6540 } 6541 mutex_exit(&page_capture_hash[index].pchh_mutex); 6542 rw_exit(&pc_cb[cb_index].cb_rwlock); 6543 cv_signal(&pc_cv); 6544 return; 6545 } 6546 6547 /* 6548 * A page retire request will replace any other request. 6549 * A second physmem request which is for a different process than 6550 * the currently registered one will be dropped as there is 6551 * no way to hold the private data for both calls. 6552 * In the future, once there are more callers, this will have to 6553 * be worked out better as there needs to be private storage for 6554 * at least each type of caller (maybe have datap be an array of 6555 * *void's so that we can index based upon callers index). 6556 */ 6557 6558 /* walk hash list to update expire time */ 6559 for (i = 0; i < 2; i++) { 6560 bp2 = page_capture_hash[index].lists[i].next; 6561 while (bp2 != &page_capture_hash[index].lists[i]) { 6562 if (bp2->pp == pp) { 6563 if (flags & CAPTURE_RETIRE) { 6564 if (!(bp2->flags & CAPTURE_RETIRE)) { 6565 page_retire_incr_pend_count(); 6566 bp2->flags = flags; 6567 bp2->expires = bp1->expires; 6568 bp2->datap = datap; 6569 } 6570 } else { 6571 ASSERT(flags & CAPTURE_PHYSMEM); 6572 if (!(bp2->flags & CAPTURE_RETIRE) && 6573 (datap == bp2->datap)) { 6574 bp2->expires = bp1->expires; 6575 } 6576 } 6577 mutex_exit(&page_capture_hash[index]. 6578 pchh_mutex); 6579 rw_exit(&pc_cb[cb_index].cb_rwlock); 6580 kmem_free(bp1, sizeof (*bp1)); 6581 return; 6582 } 6583 bp2 = bp2->next; 6584 } 6585 } 6586 6587 /* 6588 * the PR_CAPTURE flag is protected by the page_capture_hash mutexes 6589 * and thus it either has to be set or not set and can't change 6590 * while holding the mutex above. 6591 */ 6592 panic("page_capture_add_hash, PR_CAPTURE flag set on pp %p\n", pp); 6593 } 6594 6595 /* 6596 * We have a page in our hands, lets try and make it ours by turning 6597 * it into a clean page like it had just come off the freelists. 6598 * 6599 * Returns 0 on success, with the page still EXCL locked. 6600 * On failure, the page will be unlocked, and returns EAGAIN 6601 */ 6602 static int 6603 page_capture_clean_page(page_t *pp) 6604 { 6605 page_t *newpp; 6606 int skip_unlock = 0; 6607 spgcnt_t count; 6608 page_t *tpp; 6609 int ret = 0; 6610 int extra; 6611 6612 ASSERT(PAGE_EXCL(pp)); 6613 ASSERT(!PP_RETIRED(pp)); 6614 ASSERT(curthread->t_flag & T_CAPTURING); 6615 6616 if (PP_ISFREE(pp)) { 6617 if (!page_reclaim(pp, NULL)) { 6618 skip_unlock = 1; 6619 ret = EAGAIN; 6620 goto cleanup; 6621 } 6622 ASSERT(pp->p_szc == 0); 6623 if (pp->p_vnode != NULL) { 6624 /* 6625 * Since this page came from the 6626 * cachelist, we must destroy the 6627 * old vnode association. 6628 */ 6629 page_hashout(pp, NULL); 6630 } 6631 goto cleanup; 6632 } 6633 6634 /* 6635 * If we know page_relocate will fail, skip it 6636 * It could still fail due to a UE on another page but we 6637 * can't do anything about that. 6638 */ 6639 if (pp->p_toxic & PR_UE) { 6640 goto skip_relocate; 6641 } 6642 6643 /* 6644 * It's possible that pages can not have a vnode as fsflush comes 6645 * through and cleans up these pages. It's ugly but that's how it is. 6646 */ 6647 if (pp->p_vnode == NULL) { 6648 goto skip_relocate; 6649 } 6650 6651 /* 6652 * Page was not free, so lets try to relocate it. 6653 * page_relocate only works with root pages, so if this is not a root 6654 * page, we need to demote it to try and relocate it. 6655 * Unfortunately this is the best we can do right now. 6656 */ 6657 newpp = NULL; 6658 if ((pp->p_szc > 0) && (pp != PP_PAGEROOT(pp))) { 6659 if (page_try_demote_pages(pp) == 0) { 6660 ret = EAGAIN; 6661 goto cleanup; 6662 } 6663 } 6664 ret = page_relocate(&pp, &newpp, 1, 0, &count, NULL); 6665 if (ret == 0) { 6666 page_t *npp; 6667 /* unlock the new page(s) */ 6668 while (count-- > 0) { 6669 ASSERT(newpp != NULL); 6670 npp = newpp; 6671 page_sub(&newpp, npp); 6672 page_unlock(npp); 6673 } 6674 ASSERT(newpp == NULL); 6675 /* 6676 * Check to see if the page we have is too large. 6677 * If so, demote it freeing up the extra pages. 6678 */ 6679 if (pp->p_szc > 0) { 6680 /* For now demote extra pages to szc == 0 */ 6681 extra = page_get_pagecnt(pp->p_szc) - 1; 6682 while (extra > 0) { 6683 tpp = pp->p_next; 6684 page_sub(&pp, tpp); 6685 tpp->p_szc = 0; 6686 page_free(tpp, 1); 6687 extra--; 6688 } 6689 /* Make sure to set our page to szc 0 as well */ 6690 ASSERT(pp->p_next == pp && pp->p_prev == pp); 6691 pp->p_szc = 0; 6692 } 6693 goto cleanup; 6694 } else if (ret == EIO) { 6695 ret = EAGAIN; 6696 goto cleanup; 6697 } else { 6698 /* 6699 * Need to reset return type as we failed to relocate the page 6700 * but that does not mean that some of the next steps will not 6701 * work. 6702 */ 6703 ret = 0; 6704 } 6705 6706 skip_relocate: 6707 6708 if (pp->p_szc > 0) { 6709 if (page_try_demote_pages(pp) == 0) { 6710 ret = EAGAIN; 6711 goto cleanup; 6712 } 6713 } 6714 6715 ASSERT(pp->p_szc == 0); 6716 6717 if (hat_ismod(pp)) { 6718 ret = EAGAIN; 6719 goto cleanup; 6720 } 6721 if (PP_ISKAS(pp)) { 6722 ret = EAGAIN; 6723 goto cleanup; 6724 } 6725 if (pp->p_lckcnt || pp->p_cowcnt) { 6726 ret = EAGAIN; 6727 goto cleanup; 6728 } 6729 6730 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 6731 ASSERT(!hat_page_is_mapped(pp)); 6732 6733 if (hat_ismod(pp)) { 6734 /* 6735 * This is a semi-odd case as the page is now modified but not 6736 * mapped as we just unloaded the mappings above. 6737 */ 6738 ret = EAGAIN; 6739 goto cleanup; 6740 } 6741 if (pp->p_vnode != NULL) { 6742 page_hashout(pp, NULL); 6743 } 6744 6745 /* 6746 * At this point, the page should be in a clean state and 6747 * we can do whatever we want with it. 6748 */ 6749 6750 cleanup: 6751 if (ret != 0) { 6752 if (!skip_unlock) { 6753 page_unlock(pp); 6754 } 6755 } else { 6756 ASSERT(pp->p_szc == 0); 6757 ASSERT(PAGE_EXCL(pp)); 6758 6759 pp->p_next = pp; 6760 pp->p_prev = pp; 6761 } 6762 return (ret); 6763 } 6764 6765 /* 6766 * Various callers of page_trycapture() can have different restrictions upon 6767 * what memory they have access to. 6768 * Returns 0 on success, with the following error codes on failure: 6769 * EPERM - The requested page is long term locked, and thus repeated 6770 * requests to capture this page will likely fail. 6771 * ENOMEM - There was not enough free memory in the system to safely 6772 * map the requested page. 6773 * ENOENT - The requested page was inside the kernel cage, and the 6774 * PHYSMEM_CAGE flag was not set. 6775 */ 6776 int 6777 page_capture_pre_checks(page_t *pp, uint_t flags) 6778 { 6779 #if defined(__sparc) 6780 extern struct vnode prom_ppages; 6781 #endif /* __sparc */ 6782 6783 ASSERT(pp != NULL); 6784 6785 /* only physmem currently has restrictions */ 6786 if (!(flags & CAPTURE_PHYSMEM)) { 6787 return (0); 6788 } 6789 6790 #if defined(__sparc) 6791 if (pp->p_vnode == &prom_ppages) { 6792 return (EPERM); 6793 } 6794 6795 if (PP_ISNORELOC(pp) && !(flags & CAPTURE_GET_CAGE)) { 6796 return (ENOENT); 6797 } 6798 6799 if (PP_ISNORELOCKERNEL(pp)) { 6800 return (EPERM); 6801 } 6802 #else 6803 if (PP_ISKAS(pp)) { 6804 return (EPERM); 6805 } 6806 #endif /* __sparc */ 6807 6808 if (availrmem < swapfs_minfree) { 6809 /* 6810 * We won't try to capture this page as we are 6811 * running low on memory. 6812 */ 6813 return (ENOMEM); 6814 } 6815 return (0); 6816 } 6817 6818 /* 6819 * Once we have a page in our mits, go ahead and complete the capture 6820 * operation. 6821 * Returns 1 on failure where page is no longer needed 6822 * Returns 0 on success 6823 * Returns -1 if there was a transient failure. 6824 * Failure cases must release the SE_EXCL lock on pp (usually via page_free). 6825 */ 6826 int 6827 page_capture_take_action(page_t *pp, uint_t flags, void *datap) 6828 { 6829 int cb_index; 6830 int ret = 0; 6831 page_capture_hash_bucket_t *bp1; 6832 page_capture_hash_bucket_t *bp2; 6833 int index; 6834 int found = 0; 6835 int i; 6836 6837 ASSERT(PAGE_EXCL(pp)); 6838 ASSERT(curthread->t_flag & T_CAPTURING); 6839 6840 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6841 if ((flags >> cb_index) & 1) { 6842 break; 6843 } 6844 } 6845 ASSERT(cb_index < PC_NUM_CALLBACKS); 6846 6847 /* 6848 * Remove the entry from the page_capture hash, but don't free it yet 6849 * as we may need to put it back. 6850 * Since we own the page at this point in time, we should find it 6851 * in the hash if this is an ASYNC call. If we don't it's likely 6852 * that the page_capture_async() thread decided that this request 6853 * had expired, in which case we just continue on. 6854 */ 6855 if (flags & CAPTURE_ASYNC) { 6856 6857 index = PAGE_CAPTURE_HASH(pp); 6858 6859 mutex_enter(&page_capture_hash[index].pchh_mutex); 6860 for (i = 0; i < 2 && !found; i++) { 6861 bp1 = page_capture_hash[index].lists[i].next; 6862 while (bp1 != &page_capture_hash[index].lists[i]) { 6863 if (bp1->pp == pp) { 6864 bp1->next->prev = bp1->prev; 6865 bp1->prev->next = bp1->next; 6866 page_capture_hash[index].num_pages--; 6867 page_clrtoxic(pp, PR_CAPTURE); 6868 found = 1; 6869 break; 6870 } 6871 bp1 = bp1->next; 6872 } 6873 } 6874 mutex_exit(&page_capture_hash[index].pchh_mutex); 6875 } 6876 6877 /* Synchronize with the unregister func. */ 6878 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6879 if (!pc_cb[cb_index].cb_active) { 6880 page_free(pp, 1); 6881 rw_exit(&pc_cb[cb_index].cb_rwlock); 6882 if (found) { 6883 kmem_free(bp1, sizeof (*bp1)); 6884 } 6885 return (1); 6886 } 6887 6888 /* 6889 * We need to remove the entry from the page capture hash and turn off 6890 * the PR_CAPTURE bit before calling the callback. We'll need to cache 6891 * the entry here, and then based upon the return value, cleanup 6892 * appropriately or re-add it to the hash, making sure that someone else 6893 * hasn't already done so. 6894 * It should be rare for the callback to fail and thus it's ok for 6895 * the failure path to be a bit complicated as the success path is 6896 * cleaner and the locking rules are easier to follow. 6897 */ 6898 6899 ret = pc_cb[cb_index].cb_func(pp, datap, flags); 6900 6901 rw_exit(&pc_cb[cb_index].cb_rwlock); 6902 6903 /* 6904 * If this was an ASYNC request, we need to cleanup the hash if the 6905 * callback was successful or if the request was no longer valid. 6906 * For non-ASYNC requests, we return failure to map and the caller 6907 * will take care of adding the request to the hash. 6908 * Note also that the callback itself is responsible for the page 6909 * at this point in time in terms of locking ... The most common 6910 * case for the failure path should just be a page_free. 6911 */ 6912 if (ret >= 0) { 6913 if (found) { 6914 if (bp1->flags & CAPTURE_RETIRE) { 6915 page_retire_decr_pend_count(); 6916 } 6917 kmem_free(bp1, sizeof (*bp1)); 6918 } 6919 return (ret); 6920 } 6921 if (!found) { 6922 return (ret); 6923 } 6924 6925 ASSERT(flags & CAPTURE_ASYNC); 6926 6927 /* 6928 * Check for expiration time first as we can just free it up if it's 6929 * expired. 6930 */ 6931 if (lbolt > bp1->expires && bp1->expires != -1) { 6932 kmem_free(bp1, sizeof (*bp1)); 6933 return (ret); 6934 } 6935 6936 /* 6937 * The callback failed and there used to be an entry in the hash for 6938 * this page, so we need to add it back to the hash. 6939 */ 6940 mutex_enter(&page_capture_hash[index].pchh_mutex); 6941 if (!(pp->p_toxic & PR_CAPTURE)) { 6942 /* just add bp1 back to head of walked list */ 6943 page_settoxic(pp, PR_CAPTURE); 6944 bp1->next = page_capture_hash[index].lists[1].next; 6945 bp1->prev = &page_capture_hash[index].lists[1]; 6946 bp1->next->prev = bp1; 6947 page_capture_hash[index].lists[1].next = bp1; 6948 page_capture_hash[index].num_pages++; 6949 mutex_exit(&page_capture_hash[index].pchh_mutex); 6950 return (ret); 6951 } 6952 6953 /* 6954 * Otherwise there was a new capture request added to list 6955 * Need to make sure that our original data is represented if 6956 * appropriate. 6957 */ 6958 for (i = 0; i < 2; i++) { 6959 bp2 = page_capture_hash[index].lists[i].next; 6960 while (bp2 != &page_capture_hash[index].lists[i]) { 6961 if (bp2->pp == pp) { 6962 if (bp1->flags & CAPTURE_RETIRE) { 6963 if (!(bp2->flags & CAPTURE_RETIRE)) { 6964 bp2->szc = bp1->szc; 6965 bp2->flags = bp1->flags; 6966 bp2->expires = bp1->expires; 6967 bp2->datap = bp1->datap; 6968 } 6969 } else { 6970 ASSERT(bp1->flags & CAPTURE_PHYSMEM); 6971 if (!(bp2->flags & CAPTURE_RETIRE)) { 6972 bp2->szc = bp1->szc; 6973 bp2->flags = bp1->flags; 6974 bp2->expires = bp1->expires; 6975 bp2->datap = bp1->datap; 6976 } 6977 } 6978 mutex_exit(&page_capture_hash[index]. 6979 pchh_mutex); 6980 kmem_free(bp1, sizeof (*bp1)); 6981 return (ret); 6982 } 6983 bp2 = bp2->next; 6984 } 6985 } 6986 panic("PR_CAPTURE set but not on hash for pp 0x%p\n", pp); 6987 /*NOTREACHED*/ 6988 } 6989 6990 /* 6991 * Try to capture the given page for the caller specified in the flags 6992 * parameter. The page will either be captured and handed over to the 6993 * appropriate callback, or will be queued up in the page capture hash 6994 * to be captured asynchronously. 6995 * If the current request is due to an async capture, the page must be 6996 * exclusively locked before calling this function. 6997 * Currently szc must be 0 but in the future this should be expandable to 6998 * other page sizes. 6999 * Returns 0 on success, with the following error codes on failure: 7000 * EPERM - The requested page is long term locked, and thus repeated 7001 * requests to capture this page will likely fail. 7002 * ENOMEM - There was not enough free memory in the system to safely 7003 * map the requested page. 7004 * ENOENT - The requested page was inside the kernel cage, and the 7005 * CAPTURE_GET_CAGE flag was not set. 7006 * EAGAIN - The requested page could not be capturead at this point in 7007 * time but future requests will likely work. 7008 * EBUSY - The requested page is retired and the CAPTURE_GET_RETIRED flag 7009 * was not set. 7010 */ 7011 int 7012 page_itrycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 7013 { 7014 int ret; 7015 int cb_index; 7016 7017 if (flags & CAPTURE_ASYNC) { 7018 ASSERT(PAGE_EXCL(pp)); 7019 goto async; 7020 } 7021 7022 /* Make sure there's enough availrmem ... */ 7023 ret = page_capture_pre_checks(pp, flags); 7024 if (ret != 0) { 7025 return (ret); 7026 } 7027 7028 if (!page_trylock(pp, SE_EXCL)) { 7029 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 7030 if ((flags >> cb_index) & 1) { 7031 break; 7032 } 7033 } 7034 ASSERT(cb_index < PC_NUM_CALLBACKS); 7035 ret = EAGAIN; 7036 /* Special case for retired pages */ 7037 if (PP_RETIRED(pp)) { 7038 if (flags & CAPTURE_GET_RETIRED) { 7039 if (!page_unretire_pp(pp, PR_UNR_TEMP)) { 7040 /* 7041 * Need to set capture bit and add to 7042 * hash so that the page will be 7043 * retired when freed. 7044 */ 7045 page_capture_add_hash(pp, szc, 7046 CAPTURE_RETIRE, NULL); 7047 ret = 0; 7048 goto own_page; 7049 } 7050 } else { 7051 return (EBUSY); 7052 } 7053 } 7054 page_capture_add_hash(pp, szc, flags, datap); 7055 return (ret); 7056 } 7057 7058 async: 7059 ASSERT(PAGE_EXCL(pp)); 7060 7061 /* Need to check for physmem async requests that availrmem is sane */ 7062 if ((flags & (CAPTURE_ASYNC | CAPTURE_PHYSMEM)) == 7063 (CAPTURE_ASYNC | CAPTURE_PHYSMEM) && 7064 (availrmem < swapfs_minfree)) { 7065 page_unlock(pp); 7066 return (ENOMEM); 7067 } 7068 7069 ret = page_capture_clean_page(pp); 7070 7071 if (ret != 0) { 7072 /* We failed to get the page, so lets add it to the hash */ 7073 if (!(flags & CAPTURE_ASYNC)) { 7074 page_capture_add_hash(pp, szc, flags, datap); 7075 } 7076 return (ret); 7077 } 7078 7079 own_page: 7080 ASSERT(PAGE_EXCL(pp)); 7081 ASSERT(pp->p_szc == 0); 7082 7083 /* Call the callback */ 7084 ret = page_capture_take_action(pp, flags, datap); 7085 7086 if (ret == 0) { 7087 return (0); 7088 } 7089 7090 /* 7091 * Note that in the failure cases from page_capture_take_action, the 7092 * EXCL lock will have already been dropped. 7093 */ 7094 if ((ret == -1) && (!(flags & CAPTURE_ASYNC))) { 7095 page_capture_add_hash(pp, szc, flags, datap); 7096 } 7097 return (EAGAIN); 7098 } 7099 7100 int 7101 page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 7102 { 7103 int ret; 7104 7105 curthread->t_flag |= T_CAPTURING; 7106 ret = page_itrycapture(pp, szc, flags, datap); 7107 curthread->t_flag &= ~T_CAPTURING; /* xor works as we know its set */ 7108 return (ret); 7109 } 7110 7111 /* 7112 * When unlocking a page which has the PR_CAPTURE bit set, this routine 7113 * gets called to try and capture the page. 7114 */ 7115 void 7116 page_unlock_capture(page_t *pp) 7117 { 7118 page_capture_hash_bucket_t *bp; 7119 int index; 7120 int i; 7121 uint_t szc; 7122 uint_t flags = 0; 7123 void *datap; 7124 kmutex_t *mp; 7125 extern vnode_t retired_pages; 7126 7127 /* 7128 * We need to protect against a possible deadlock here where we own 7129 * the vnode page hash mutex and want to acquire it again as there 7130 * are locations in the code, where we unlock a page while holding 7131 * the mutex which can lead to the page being captured and eventually 7132 * end up here. As we may be hashing out the old page and hashing into 7133 * the retire vnode, we need to make sure we don't own them. 7134 * Other callbacks who do hash operations also need to make sure that 7135 * before they hashin to a vnode that they do not currently own the 7136 * vphm mutex otherwise there will be a panic. 7137 */ 7138 if (mutex_owned(page_vnode_mutex(&retired_pages))) { 7139 page_unlock_nocapture(pp); 7140 return; 7141 } 7142 if (pp->p_vnode != NULL && mutex_owned(page_vnode_mutex(pp->p_vnode))) { 7143 page_unlock_nocapture(pp); 7144 return; 7145 } 7146 7147 index = PAGE_CAPTURE_HASH(pp); 7148 7149 mp = &page_capture_hash[index].pchh_mutex; 7150 mutex_enter(mp); 7151 for (i = 0; i < 2; i++) { 7152 bp = page_capture_hash[index].lists[i].next; 7153 while (bp != &page_capture_hash[index].lists[i]) { 7154 if (bp->pp == pp) { 7155 szc = bp->szc; 7156 flags = bp->flags | CAPTURE_ASYNC; 7157 datap = bp->datap; 7158 mutex_exit(mp); 7159 (void) page_trycapture(pp, szc, flags, datap); 7160 return; 7161 } 7162 bp = bp->next; 7163 } 7164 } 7165 7166 /* Failed to find page in hash so clear flags and unlock it. */ 7167 page_clrtoxic(pp, PR_CAPTURE); 7168 page_unlock(pp); 7169 7170 mutex_exit(mp); 7171 } 7172 7173 void 7174 page_capture_init() 7175 { 7176 int i; 7177 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7178 page_capture_hash[i].lists[0].next = 7179 &page_capture_hash[i].lists[0]; 7180 page_capture_hash[i].lists[0].prev = 7181 &page_capture_hash[i].lists[0]; 7182 page_capture_hash[i].lists[1].next = 7183 &page_capture_hash[i].lists[1]; 7184 page_capture_hash[i].lists[1].prev = 7185 &page_capture_hash[i].lists[1]; 7186 } 7187 7188 pc_thread_shortwait = 23 * hz; 7189 pc_thread_longwait = 1201 * hz; 7190 pc_thread_ism_retry = 3; 7191 mutex_init(&pc_thread_mutex, NULL, MUTEX_DEFAULT, NULL); 7192 cv_init(&pc_cv, NULL, CV_DEFAULT, NULL); 7193 pc_thread_id = thread_create(NULL, 0, page_capture_thread, NULL, 0, &p0, 7194 TS_RUN, minclsyspri); 7195 } 7196 7197 /* 7198 * It is necessary to scrub any failing pages prior to reboot in order to 7199 * prevent a latent error trap from occurring on the next boot. 7200 */ 7201 void 7202 page_retire_mdboot() 7203 { 7204 page_t *pp; 7205 int i, j; 7206 page_capture_hash_bucket_t *bp; 7207 7208 /* walk lists looking for pages to scrub */ 7209 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7210 if (page_capture_hash[i].num_pages == 0) 7211 continue; 7212 7213 mutex_enter(&page_capture_hash[i].pchh_mutex); 7214 7215 for (j = 0; j < 2; j++) { 7216 bp = page_capture_hash[i].lists[j].next; 7217 while (bp != &page_capture_hash[i].lists[j]) { 7218 pp = bp->pp; 7219 if (!PP_ISKAS(pp) && PP_TOXIC(pp)) { 7220 pp->p_selock = -1; /* pacify ASSERTs */ 7221 PP_CLRFREE(pp); 7222 pagescrub(pp, 0, PAGESIZE); 7223 pp->p_selock = 0; 7224 } 7225 bp = bp->next; 7226 } 7227 } 7228 mutex_exit(&page_capture_hash[i].pchh_mutex); 7229 } 7230 } 7231 7232 /* 7233 * Walk the page_capture_hash trying to capture pages and also cleanup old 7234 * entries which have expired. 7235 */ 7236 void 7237 page_capture_async() 7238 { 7239 page_t *pp; 7240 int i; 7241 int ret; 7242 page_capture_hash_bucket_t *bp1, *bp2; 7243 uint_t szc; 7244 uint_t flags; 7245 void *datap; 7246 7247 /* If there are outstanding pages to be captured, get to work */ 7248 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7249 if (page_capture_hash[i].num_pages == 0) 7250 continue; 7251 /* Append list 1 to list 0 and then walk through list 0 */ 7252 mutex_enter(&page_capture_hash[i].pchh_mutex); 7253 bp1 = &page_capture_hash[i].lists[1]; 7254 bp2 = bp1->next; 7255 if (bp1 != bp2) { 7256 bp1->prev->next = page_capture_hash[i].lists[0].next; 7257 bp2->prev = &page_capture_hash[i].lists[0]; 7258 page_capture_hash[i].lists[0].next->prev = bp1->prev; 7259 page_capture_hash[i].lists[0].next = bp2; 7260 bp1->next = bp1; 7261 bp1->prev = bp1; 7262 } 7263 7264 /* list[1] will be empty now */ 7265 7266 bp1 = page_capture_hash[i].lists[0].next; 7267 while (bp1 != &page_capture_hash[i].lists[0]) { 7268 /* Check expiration time */ 7269 if ((lbolt > bp1->expires && bp1->expires != -1) || 7270 page_deleted(bp1->pp)) { 7271 page_capture_hash[i].lists[0].next = bp1->next; 7272 bp1->next->prev = 7273 &page_capture_hash[i].lists[0]; 7274 page_capture_hash[i].num_pages--; 7275 7276 /* 7277 * We can safely remove the PR_CAPTURE bit 7278 * without holding the EXCL lock on the page 7279 * as the PR_CAPTURE bit requres that the 7280 * page_capture_hash[].pchh_mutex be held 7281 * to modify it. 7282 */ 7283 page_clrtoxic(bp1->pp, PR_CAPTURE); 7284 mutex_exit(&page_capture_hash[i].pchh_mutex); 7285 kmem_free(bp1, sizeof (*bp1)); 7286 mutex_enter(&page_capture_hash[i].pchh_mutex); 7287 bp1 = page_capture_hash[i].lists[0].next; 7288 continue; 7289 } 7290 pp = bp1->pp; 7291 szc = bp1->szc; 7292 flags = bp1->flags; 7293 datap = bp1->datap; 7294 mutex_exit(&page_capture_hash[i].pchh_mutex); 7295 if (page_trylock(pp, SE_EXCL)) { 7296 ret = page_trycapture(pp, szc, 7297 flags | CAPTURE_ASYNC, datap); 7298 } else { 7299 ret = 1; /* move to walked hash */ 7300 } 7301 7302 if (ret != 0) { 7303 /* Move to walked hash */ 7304 (void) page_capture_move_to_walked(pp); 7305 } 7306 mutex_enter(&page_capture_hash[i].pchh_mutex); 7307 bp1 = page_capture_hash[i].lists[0].next; 7308 } 7309 7310 mutex_exit(&page_capture_hash[i].pchh_mutex); 7311 } 7312 } 7313 7314 /* 7315 * This function is called by the page_capture_thread, and is needed in 7316 * in order to initiate aio cleanup, so that pages used in aio 7317 * will be unlocked and subsequently retired by page_capture_thread. 7318 */ 7319 static int 7320 do_aio_cleanup(void) 7321 { 7322 proc_t *procp; 7323 int (*aio_cleanup_dr_delete_memory)(proc_t *); 7324 int cleaned = 0; 7325 7326 if (modload("sys", "kaio") == -1) { 7327 cmn_err(CE_WARN, "do_aio_cleanup: cannot load kaio"); 7328 return (0); 7329 } 7330 /* 7331 * We use the aio_cleanup_dr_delete_memory function to 7332 * initiate the actual clean up; this function will wake 7333 * up the per-process aio_cleanup_thread. 7334 */ 7335 aio_cleanup_dr_delete_memory = (int (*)(proc_t *)) 7336 modgetsymvalue("aio_cleanup_dr_delete_memory", 0); 7337 if (aio_cleanup_dr_delete_memory == NULL) { 7338 cmn_err(CE_WARN, 7339 "aio_cleanup_dr_delete_memory not found in kaio"); 7340 return (0); 7341 } 7342 mutex_enter(&pidlock); 7343 for (procp = practive; (procp != NULL); procp = procp->p_next) { 7344 mutex_enter(&procp->p_lock); 7345 if (procp->p_aio != NULL) { 7346 /* cleanup proc's outstanding kaio */ 7347 cleaned += (*aio_cleanup_dr_delete_memory)(procp); 7348 } 7349 mutex_exit(&procp->p_lock); 7350 } 7351 mutex_exit(&pidlock); 7352 return (cleaned); 7353 } 7354 7355 /* 7356 * helper function for page_capture_thread 7357 */ 7358 static void 7359 page_capture_handle_outstanding(void) 7360 { 7361 extern size_t spt_used; 7362 int ntry; 7363 7364 if (!page_retire_pend_count()) { 7365 /* 7366 * Do we really want to be this aggressive 7367 * for things other than page_retire? 7368 * Maybe have a counter for each callback 7369 * type to guide how aggressive we should 7370 * be here. Thus if there's at least one 7371 * page for page_retire we go ahead and reap 7372 * like this. 7373 */ 7374 kmem_reap(); 7375 seg_preap(); 7376 page_capture_async(); 7377 } else { 7378 /* 7379 * There are pages pending retirement, so 7380 * we reap prior to attempting to capture. 7381 */ 7382 kmem_reap(); 7383 /* 7384 * When ISM is in use, we need to disable and 7385 * purge the seg_pcache, and initiate aio 7386 * cleanup in order to release page locks and 7387 * subsquently retire pages in need of 7388 * retirement. 7389 */ 7390 if (spt_used) { 7391 /* disable and purge seg_pcache */ 7392 (void) seg_p_disable(); 7393 for (ntry = 0; ntry < pc_thread_ism_retry; ntry++) { 7394 if (!page_retire_pend_count()) 7395 break; 7396 if (do_aio_cleanup()) { 7397 /* 7398 * allow the apps cleanup threads 7399 * to run 7400 */ 7401 delay(pc_thread_shortwait); 7402 } 7403 page_capture_async(); 7404 } 7405 /* reenable seg_pcache */ 7406 seg_p_enable(); 7407 } else { 7408 seg_preap(); 7409 page_capture_async(); 7410 } 7411 } 7412 } 7413 7414 /* 7415 * The page_capture_thread loops forever, looking to see if there are 7416 * pages still waiting to be captured. 7417 */ 7418 static void 7419 page_capture_thread(void) 7420 { 7421 callb_cpr_t c; 7422 int outstanding; 7423 int i; 7424 7425 CALLB_CPR_INIT(&c, &pc_thread_mutex, callb_generic_cpr, "page_capture"); 7426 7427 mutex_enter(&pc_thread_mutex); 7428 for (;;) { 7429 outstanding = 0; 7430 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) 7431 outstanding += page_capture_hash[i].num_pages; 7432 if (outstanding) { 7433 page_capture_handle_outstanding(); 7434 CALLB_CPR_SAFE_BEGIN(&c); 7435 (void) cv_timedwait(&pc_cv, &pc_thread_mutex, 7436 lbolt + pc_thread_shortwait); 7437 CALLB_CPR_SAFE_END(&c, &pc_thread_mutex); 7438 } else { 7439 CALLB_CPR_SAFE_BEGIN(&c); 7440 (void) cv_timedwait(&pc_cv, &pc_thread_mutex, 7441 lbolt + pc_thread_longwait); 7442 CALLB_CPR_SAFE_END(&c, &pc_thread_mutex); 7443 } 7444 } 7445 /*NOTREACHED*/ 7446 } 7447