1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 26 /* All Rights Reserved */ 27 28 /* 29 * University Copyright- Copyright (c) 1982, 1986, 1988 30 * The Regents of the University of California 31 * All Rights Reserved 32 * 33 * University Acknowledgment- Portions of this document are derived from 34 * software developed by the University of California, Berkeley, and its 35 * contributors. 36 */ 37 38 /* 39 * VM - physical page management. 40 */ 41 42 #include <sys/types.h> 43 #include <sys/t_lock.h> 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/errno.h> 47 #include <sys/time.h> 48 #include <sys/vnode.h> 49 #include <sys/vm.h> 50 #include <sys/vtrace.h> 51 #include <sys/swap.h> 52 #include <sys/cmn_err.h> 53 #include <sys/tuneable.h> 54 #include <sys/sysmacros.h> 55 #include <sys/cpuvar.h> 56 #include <sys/callb.h> 57 #include <sys/debug.h> 58 #include <sys/tnf_probe.h> 59 #include <sys/condvar_impl.h> 60 #include <sys/mem_config.h> 61 #include <sys/mem_cage.h> 62 #include <sys/kflt_mem.h> 63 #include <sys/kmem.h> 64 #include <sys/atomic.h> 65 #include <sys/strlog.h> 66 #include <sys/mman.h> 67 #include <sys/ontrap.h> 68 #include <sys/lgrp.h> 69 #include <sys/vfs.h> 70 71 #include <vm/hat.h> 72 #include <vm/anon.h> 73 #include <vm/page.h> 74 #include <vm/seg.h> 75 #include <vm/pvn.h> 76 #include <vm/seg_kmem.h> 77 #include <vm/vm_dep.h> 78 #include <sys/vm_usage.h> 79 #include <fs/fs_subr.h> 80 #include <sys/ddi.h> 81 #include <sys/modctl.h> 82 83 static int nopageage = 0; 84 85 static pgcnt_t max_page_get; /* max page_get request size in pages */ 86 pgcnt_t total_pages = 0; /* total number of pages (used by /proc) */ 87 88 /* 89 * freemem_lock protects all freemem variables: 90 * availrmem. Also this lock protects the globals which track the 91 * availrmem changes for accurate kernel footprint calculation. 92 * See below for an explanation of these 93 * globals. 94 */ 95 kmutex_t freemem_lock; 96 pgcnt_t availrmem; 97 pgcnt_t availrmem_initial; 98 99 /* 100 * These globals track availrmem changes to get a more accurate 101 * estimate of tke kernel size. Historically pp_kernel is used for 102 * kernel size and is based on availrmem. But availrmem is adjusted for 103 * locked pages in the system not just for kernel locked pages. 104 * These new counters will track the pages locked through segvn and 105 * by explicit user locking. 106 * 107 * pages_locked : How many pages are locked because of user specified 108 * locking through mlock or plock. 109 * 110 * pages_useclaim,pages_claimed : These two variables track the 111 * claim adjustments because of the protection changes on a segvn segment. 112 * 113 * All these globals are protected by the same lock which protects availrmem. 114 */ 115 pgcnt_t pages_locked = 0; 116 pgcnt_t pages_useclaim = 0; 117 pgcnt_t pages_claimed = 0; 118 119 120 /* 121 * new_freemem_lock protects freemem, freemem_wait & freemem_cv. 122 */ 123 static kmutex_t new_freemem_lock; 124 static uint_t freemem_wait; /* someone waiting for freemem */ 125 static kcondvar_t freemem_cv; 126 127 /* 128 * The logical page free list is maintained as two lists, the 'free' 129 * and the 'cache' lists. 130 * The free list contains those pages that should be reused first. 131 * 132 * The implementation of the lists is machine dependent. 133 * PAGE_GET_FREELISTS(), page_get_cachelist(), 134 * page_list_sub(), and page_list_add() 135 * form the interface to the machine dependent implementation. 136 * 137 * Pages with p_free set are on the cache list. 138 * Pages with p_free and p_age set are on the free list, 139 * 140 * A page may be locked while on either list. 141 */ 142 143 /* 144 * free list accounting stuff. 145 * 146 * 147 * Spread out the value for the number of pages on the 148 * page free and page cache lists. If there is just one 149 * value, then it must be under just one lock. 150 * The lock contention and cache traffic are a real bother. 151 * 152 * When we acquire and then drop a single pcf lock 153 * we can start in the middle of the array of pcf structures. 154 * If we acquire more than one pcf lock at a time, we need to 155 * start at the front to avoid deadlocking. 156 * 157 * pcf_count holds the number of pages in each pool. 158 * 159 * pcf_block is set when page_create_get_something() has asked the 160 * PSM page freelist and page cachelist routines without specifying 161 * a color and nothing came back. This is used to block anything 162 * else from moving pages from one list to the other while the 163 * lists are searched again. If a page is freeed while pcf_block is 164 * set, then pcf_reserve is incremented. pcgs_unblock() takes care 165 * of clearning pcf_block, doing the wakeups, etc. 166 */ 167 168 #define MAX_PCF_FANOUT NCPU 169 static uint_t pcf_fanout = 1; /* Will get changed at boot time */ 170 static uint_t pcf_fanout_mask = 0; 171 172 struct pcf { 173 kmutex_t pcf_lock; /* protects the structure */ 174 uint_t pcf_count; /* page count */ 175 uint_t pcf_wait; /* number of waiters */ 176 uint_t pcf_block; /* pcgs flag to page_free() */ 177 uint_t pcf_reserve; /* pages freed after pcf_block set */ 178 uint_t pcf_fill[10]; /* to line up on the caches */ 179 }; 180 181 /* 182 * PCF_INDEX hash needs to be dynamic (every so often the hash changes where 183 * it will hash the cpu to). This is done to prevent a drain condition 184 * from happening. This drain condition will occur when pcf_count decrement 185 * occurs on cpu A and the increment of pcf_count always occurs on cpu B. An 186 * example of this shows up with device interrupts. The dma buffer is allocated 187 * by the cpu requesting the IO thus the pcf_count is decremented based on that. 188 * When the memory is returned by the interrupt thread, the pcf_count will be 189 * incremented based on the cpu servicing the interrupt. 190 */ 191 static struct pcf pcf[MAX_PCF_FANOUT]; 192 #define PCF_INDEX() ((int)(((long)CPU->cpu_seqid) + \ 193 (randtick() >> 24)) & (pcf_fanout_mask)) 194 195 static int pcf_decrement_bucket(pgcnt_t); 196 static int pcf_decrement_multiple(pgcnt_t *, pgcnt_t, int); 197 198 kmutex_t pcgs_lock; /* serializes page_create_get_ */ 199 kmutex_t pcgs_cagelock; /* serializes NOSLEEP cage allocs */ 200 kmutex_t pcgs_wait_lock; /* used for delay in pcgs */ 201 static kcondvar_t pcgs_cv; /* cv for delay in pcgs */ 202 203 #ifdef VM_STATS 204 205 /* 206 * No locks, but so what, they are only statistics. 207 */ 208 209 static struct page_tcnt { 210 int pc_free_cache; /* free's into cache list */ 211 int pc_free_dontneed; /* free's with dontneed */ 212 int pc_free_pageout; /* free's from pageout */ 213 int pc_free_free; /* free's into free list */ 214 int pc_free_pages; /* free's into large page free list */ 215 int pc_destroy_pages; /* large page destroy's */ 216 int pc_get_cache; /* get's from cache list */ 217 int pc_get_free; /* get's from free list */ 218 int pc_reclaim; /* reclaim's */ 219 int pc_abortfree; /* abort's of free pages */ 220 int pc_find_hit; /* find's that find page */ 221 int pc_find_miss; /* find's that don't find page */ 222 int pc_destroy_free; /* # of free pages destroyed */ 223 #define PC_HASH_CNT (4*PAGE_HASHAVELEN) 224 int pc_find_hashlen[PC_HASH_CNT+1]; 225 int pc_addclaim_pages; 226 int pc_subclaim_pages; 227 int pc_free_replacement_page[2]; 228 int pc_try_demote_pages[6]; 229 int pc_demote_pages[2]; 230 } pagecnt; 231 232 uint_t hashin_count; 233 uint_t hashin_not_held; 234 uint_t hashin_already; 235 236 uint_t hashout_count; 237 uint_t hashout_not_held; 238 239 uint_t page_create_count; 240 uint_t page_create_not_enough; 241 uint_t page_create_not_enough_again; 242 uint_t page_create_zero; 243 uint_t page_create_hashout; 244 uint_t page_create_page_lock_failed; 245 uint_t page_create_trylock_failed; 246 uint_t page_create_found_one; 247 uint_t page_create_hashin_failed; 248 uint_t page_create_dropped_phm; 249 250 uint_t page_create_new; 251 uint_t page_create_exists; 252 uint_t page_create_putbacks; 253 uint_t page_create_overshoot; 254 255 uint_t page_reclaim_zero; 256 uint_t page_reclaim_zero_locked; 257 258 uint_t page_rename_exists; 259 uint_t page_rename_count; 260 261 uint_t page_lookup_cnt[20]; 262 uint_t page_lookup_nowait_cnt[10]; 263 uint_t page_find_cnt; 264 uint_t page_exists_cnt; 265 uint_t page_exists_forreal_cnt; 266 uint_t page_lookup_dev_cnt; 267 uint_t get_cachelist_cnt; 268 uint_t page_create_cnt[10]; 269 uint_t alloc_pages[9]; 270 uint_t page_exphcontg[19]; 271 uint_t page_create_large_cnt[10]; 272 273 /* 274 * Collects statistics. 275 */ 276 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 277 uint_t mylen = 0; \ 278 \ 279 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash, mylen++) { \ 280 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 281 break; \ 282 } \ 283 if ((pp) != NULL) \ 284 pagecnt.pc_find_hit++; \ 285 else \ 286 pagecnt.pc_find_miss++; \ 287 if (mylen > PC_HASH_CNT) \ 288 mylen = PC_HASH_CNT; \ 289 pagecnt.pc_find_hashlen[mylen]++; \ 290 } 291 292 #else /* VM_STATS */ 293 294 /* 295 * Don't collect statistics 296 */ 297 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 298 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \ 299 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 300 break; \ 301 } \ 302 } 303 304 #endif /* VM_STATS */ 305 306 307 308 #ifdef DEBUG 309 #define MEMSEG_SEARCH_STATS 310 #endif 311 312 #ifdef MEMSEG_SEARCH_STATS 313 struct memseg_stats { 314 uint_t nsearch; 315 uint_t nlastwon; 316 uint_t nhashwon; 317 uint_t nnotfound; 318 } memseg_stats; 319 320 #define MEMSEG_STAT_INCR(v) \ 321 atomic_add_32(&memseg_stats.v, 1) 322 #else 323 #define MEMSEG_STAT_INCR(x) 324 #endif 325 326 struct memseg *memsegs; /* list of memory segments */ 327 328 /* 329 * /etc/system tunable to control large page allocation hueristic. 330 * 331 * Setting to LPAP_LOCAL will heavily prefer the local lgroup over remote lgroup 332 * for large page allocation requests. If a large page is not readily 333 * avaliable on the local freelists we will go through additional effort 334 * to create a large page, potentially moving smaller pages around to coalesce 335 * larger pages in the local lgroup. 336 * Default value of LPAP_DEFAULT will go to remote freelists if large pages 337 * are not readily available in the local lgroup. 338 */ 339 enum lpap { 340 LPAP_DEFAULT, /* default large page allocation policy */ 341 LPAP_LOCAL /* local large page allocation policy */ 342 }; 343 344 enum lpap lpg_alloc_prefer = LPAP_DEFAULT; 345 346 static void page_init_mem_config(void); 347 static int page_do_hashin(page_t *, vnode_t *, u_offset_t); 348 static void page_do_hashout(page_t *); 349 static void page_capture_init(); 350 int page_capture_take_action(page_t *, uint_t, void *); 351 352 static void page_demote_vp_pages(page_t *); 353 354 355 void 356 pcf_init(void) 357 358 { 359 if (boot_ncpus != -1) { 360 pcf_fanout = boot_ncpus; 361 } else { 362 pcf_fanout = max_ncpus; 363 } 364 #ifdef sun4v 365 /* 366 * Force at least 4 buckets if possible for sun4v. 367 */ 368 pcf_fanout = MAX(pcf_fanout, 4); 369 #endif /* sun4v */ 370 371 /* 372 * Round up to the nearest power of 2. 373 */ 374 pcf_fanout = MIN(pcf_fanout, MAX_PCF_FANOUT); 375 if (!ISP2(pcf_fanout)) { 376 pcf_fanout = 1 << highbit(pcf_fanout); 377 378 if (pcf_fanout > MAX_PCF_FANOUT) { 379 pcf_fanout = 1 << (highbit(MAX_PCF_FANOUT) - 1); 380 } 381 } 382 pcf_fanout_mask = pcf_fanout - 1; 383 } 384 385 /* 386 * vm subsystem related initialization 387 */ 388 void 389 vm_init(void) 390 { 391 boolean_t callb_vm_cpr(void *, int); 392 393 (void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm"); 394 page_init_mem_config(); 395 page_retire_init(); 396 vm_usage_init(); 397 page_capture_init(); 398 } 399 400 /* 401 * This function is called at startup and when memory is added or deleted. 402 */ 403 void 404 init_pages_pp_maximum() 405 { 406 static pgcnt_t p_min; 407 static pgcnt_t pages_pp_maximum_startup; 408 static pgcnt_t avrmem_delta; 409 static int init_done; 410 static int user_set; /* true if set in /etc/system */ 411 412 if (init_done == 0) { 413 414 /* If the user specified a value, save it */ 415 if (pages_pp_maximum != 0) { 416 user_set = 1; 417 pages_pp_maximum_startup = pages_pp_maximum; 418 } 419 420 /* 421 * Setting of pages_pp_maximum is based first time 422 * on the value of availrmem just after the start-up 423 * allocations. To preserve this relationship at run 424 * time, use a delta from availrmem_initial. 425 */ 426 ASSERT(availrmem_initial >= availrmem); 427 avrmem_delta = availrmem_initial - availrmem; 428 429 /* The allowable floor of pages_pp_maximum */ 430 p_min = tune.t_minarmem + 100; 431 432 /* Make sure we don't come through here again. */ 433 init_done = 1; 434 } 435 /* 436 * Determine pages_pp_maximum, the number of currently available 437 * pages (availrmem) that can't be `locked'. If not set by 438 * the user, we set it to 4% of the currently available memory 439 * plus 4MB. 440 * But we also insist that it be greater than tune.t_minarmem; 441 * otherwise a process could lock down a lot of memory, get swapped 442 * out, and never have enough to get swapped back in. 443 */ 444 if (user_set) 445 pages_pp_maximum = pages_pp_maximum_startup; 446 else 447 pages_pp_maximum = ((availrmem_initial - avrmem_delta) / 25) 448 + btop(4 * 1024 * 1024); 449 450 if (pages_pp_maximum <= p_min) { 451 pages_pp_maximum = p_min; 452 } 453 } 454 455 void 456 set_max_page_get(pgcnt_t target_total_pages) 457 { 458 max_page_get = target_total_pages / 2; 459 } 460 461 static pgcnt_t pending_delete; 462 463 /*ARGSUSED*/ 464 static void 465 page_mem_config_post_add( 466 void *arg, 467 pgcnt_t delta_pages) 468 { 469 set_max_page_get(total_pages - pending_delete); 470 init_pages_pp_maximum(); 471 } 472 473 /*ARGSUSED*/ 474 static int 475 page_mem_config_pre_del( 476 void *arg, 477 pgcnt_t delta_pages) 478 { 479 pgcnt_t nv; 480 481 nv = atomic_add_long_nv(&pending_delete, (spgcnt_t)delta_pages); 482 set_max_page_get(total_pages - nv); 483 return (0); 484 } 485 486 /*ARGSUSED*/ 487 static void 488 page_mem_config_post_del( 489 void *arg, 490 pgcnt_t delta_pages, 491 int cancelled) 492 { 493 pgcnt_t nv; 494 495 nv = atomic_add_long_nv(&pending_delete, -(spgcnt_t)delta_pages); 496 set_max_page_get(total_pages - nv); 497 if (!cancelled) 498 init_pages_pp_maximum(); 499 } 500 501 static kphysm_setup_vector_t page_mem_config_vec = { 502 KPHYSM_SETUP_VECTOR_VERSION, 503 page_mem_config_post_add, 504 page_mem_config_pre_del, 505 page_mem_config_post_del, 506 }; 507 508 static void 509 page_init_mem_config(void) 510 { 511 #ifdef DEBUG 512 ASSERT(kphysm_setup_func_register(&page_mem_config_vec, 513 (void *)NULL) == 0); 514 #else /* !DEBUG */ 515 (void) kphysm_setup_func_register(&page_mem_config_vec, (void *)NULL); 516 #endif /* !DEBUG */ 517 518 } 519 520 /* 521 * Evenly spread out the PCF counters for large free pages 522 */ 523 static void 524 page_free_large_ctr(pgcnt_t npages) 525 { 526 static struct pcf *p = pcf; 527 pgcnt_t lump; 528 529 freemem += npages; 530 531 lump = roundup(npages, pcf_fanout) / pcf_fanout; 532 533 while (npages > 0) { 534 535 ASSERT(!p->pcf_block); 536 537 if (lump < npages) { 538 p->pcf_count += (uint_t)lump; 539 npages -= lump; 540 } else { 541 p->pcf_count += (uint_t)npages; 542 npages = 0; 543 } 544 545 ASSERT(!p->pcf_wait); 546 547 if (++p > &pcf[pcf_fanout - 1]) 548 p = pcf; 549 } 550 551 ASSERT(npages == 0); 552 } 553 554 /* 555 * Add a physical chunk of memory to the system free lists during startup. 556 * Platform specific startup() allocates the memory for the page structs. 557 * 558 * num - number of page structures 559 * base - page number (pfn) to be associated with the first page. 560 * 561 * Since we are doing this during startup (ie. single threaded), we will 562 * use shortcut routines to avoid any locking overhead while putting all 563 * these pages on the freelists. 564 * 565 * NOTE: Any changes performed to page_free(), must also be performed to 566 * add_physmem() since this is how we initialize all page_t's at 567 * boot time. 568 */ 569 void 570 add_physmem( 571 page_t *pp, 572 pgcnt_t num, 573 pfn_t pnum) 574 { 575 page_t *root = NULL; 576 uint_t szc = page_num_pagesizes() - 1; 577 pgcnt_t large = page_get_pagecnt(szc); 578 pgcnt_t cnt = 0; 579 580 TRACE_2(TR_FAC_VM, TR_PAGE_INIT, 581 "add_physmem:pp %p num %lu", pp, num); 582 583 /* 584 * Arbitrarily limit the max page_get request 585 * to 1/2 of the page structs we have. 586 */ 587 total_pages += num; 588 set_max_page_get(total_pages); 589 590 PLCNT_MODIFY_MAX(pnum, (long)num); 591 592 /* 593 * The physical space for the pages array 594 * representing ram pages has already been 595 * allocated. Here we initialize each lock 596 * in the page structure, and put each on 597 * the free list 598 */ 599 for (; num; pp++, pnum++, num--) { 600 601 /* 602 * this needs to fill in the page number 603 * and do any other arch specific initialization 604 */ 605 add_physmem_cb(pp, pnum); 606 607 pp->p_lckcnt = 0; 608 pp->p_cowcnt = 0; 609 pp->p_slckcnt = 0; 610 611 /* 612 * Initialize the page lock as unlocked, since nobody 613 * can see or access this page yet. 614 */ 615 pp->p_selock = 0; 616 617 /* 618 * Initialize IO lock 619 */ 620 page_iolock_init(pp); 621 622 /* 623 * initialize other fields in the page_t 624 */ 625 PP_SETFREE(pp); 626 page_clr_all_props(pp); 627 PP_SETAGED(pp); 628 pp->p_offset = (u_offset_t)-1; 629 pp->p_next = pp; 630 pp->p_prev = pp; 631 632 /* 633 * Simple case: System doesn't support large pages. 634 */ 635 if (szc == 0) { 636 pp->p_szc = 0; 637 page_free_at_startup(pp); 638 continue; 639 } 640 641 /* 642 * Handle unaligned pages, we collect them up onto 643 * the root page until we have a full large page. 644 */ 645 if (!IS_P2ALIGNED(pnum, large)) { 646 647 /* 648 * If not in a large page, 649 * just free as small page. 650 */ 651 if (root == NULL) { 652 pp->p_szc = 0; 653 page_free_at_startup(pp); 654 continue; 655 } 656 657 /* 658 * Link a constituent page into the large page. 659 */ 660 pp->p_szc = szc; 661 page_list_concat(&root, &pp); 662 663 /* 664 * When large page is fully formed, free it. 665 */ 666 if (++cnt == large) { 667 page_free_large_ctr(cnt); 668 page_list_add_pages(root, PG_LIST_ISINIT); 669 root = NULL; 670 cnt = 0; 671 } 672 continue; 673 } 674 675 /* 676 * At this point we have a page number which 677 * is aligned. We assert that we aren't already 678 * in a different large page. 679 */ 680 ASSERT(IS_P2ALIGNED(pnum, large)); 681 ASSERT(root == NULL && cnt == 0); 682 683 /* 684 * If insufficient number of pages left to form 685 * a large page, just free the small page. 686 */ 687 if (num < large) { 688 pp->p_szc = 0; 689 page_free_at_startup(pp); 690 continue; 691 } 692 693 /* 694 * Otherwise start a new large page. 695 */ 696 pp->p_szc = szc; 697 cnt++; 698 root = pp; 699 } 700 ASSERT(root == NULL && cnt == 0); 701 } 702 703 /* 704 * Find a page representing the specified [vp, offset]. 705 * If we find the page but it is intransit coming in, 706 * it will have an "exclusive" lock and we wait for 707 * the i/o to complete. A page found on the free list 708 * is always reclaimed and then locked. On success, the page 709 * is locked, its data is valid and it isn't on the free 710 * list, while a NULL is returned if the page doesn't exist. 711 */ 712 page_t * 713 page_lookup(vnode_t *vp, u_offset_t off, se_t se) 714 { 715 return (page_lookup_create(vp, off, se, NULL, NULL, 0)); 716 } 717 718 /* 719 * Find a page representing the specified [vp, offset]. 720 * We either return the one we found or, if passed in, 721 * create one with identity of [vp, offset] of the 722 * pre-allocated page. If we find existing page but it is 723 * intransit coming in, it will have an "exclusive" lock 724 * and we wait for the i/o to complete. A page found on 725 * the free list is always reclaimed and then locked. 726 * On success, the page is locked, its data is valid and 727 * it isn't on the free list, while a NULL is returned 728 * if the page doesn't exist and newpp is NULL; 729 */ 730 page_t * 731 page_lookup_create( 732 vnode_t *vp, 733 u_offset_t off, 734 se_t se, 735 page_t *newpp, 736 spgcnt_t *nrelocp, 737 int flags) 738 { 739 page_t *pp; 740 kmutex_t *phm; 741 ulong_t index; 742 uint_t hash_locked; 743 uint_t es; 744 745 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 746 VM_STAT_ADD(page_lookup_cnt[0]); 747 ASSERT(newpp ? PAGE_EXCL(newpp) : 1); 748 749 /* 750 * Acquire the appropriate page hash lock since 751 * we have to search the hash list. Pages that 752 * hash to this list can't change identity while 753 * this lock is held. 754 */ 755 hash_locked = 0; 756 index = PAGE_HASH_FUNC(vp, off); 757 phm = NULL; 758 top: 759 PAGE_HASH_SEARCH(index, pp, vp, off); 760 if (pp != NULL) { 761 VM_STAT_ADD(page_lookup_cnt[1]); 762 es = (newpp != NULL) ? 1 : 0; 763 es |= flags; 764 if (!hash_locked) { 765 VM_STAT_ADD(page_lookup_cnt[2]); 766 if (!page_try_reclaim_lock(pp, se, es)) { 767 /* 768 * On a miss, acquire the phm. Then 769 * next time, page_lock() will be called, 770 * causing a wait if the page is busy. 771 * just looping with page_trylock() would 772 * get pretty boring. 773 */ 774 VM_STAT_ADD(page_lookup_cnt[3]); 775 phm = PAGE_HASH_MUTEX(index); 776 mutex_enter(phm); 777 hash_locked = 1; 778 goto top; 779 } 780 } else { 781 VM_STAT_ADD(page_lookup_cnt[4]); 782 if (!page_lock_es(pp, se, phm, P_RECLAIM, es)) { 783 VM_STAT_ADD(page_lookup_cnt[5]); 784 goto top; 785 } 786 } 787 788 /* 789 * Since `pp' is locked it can not change identity now. 790 * Reconfirm we locked the correct page. 791 * 792 * Both the p_vnode and p_offset *must* be cast volatile 793 * to force a reload of their values: The PAGE_HASH_SEARCH 794 * macro will have stuffed p_vnode and p_offset into 795 * registers before calling page_trylock(); another thread, 796 * actually holding the hash lock, could have changed the 797 * page's identity in memory, but our registers would not 798 * be changed, fooling the reconfirmation. If the hash 799 * lock was held during the search, the casting would 800 * not be needed. 801 */ 802 VM_STAT_ADD(page_lookup_cnt[6]); 803 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 804 ((volatile u_offset_t)(pp->p_offset) != off)) { 805 VM_STAT_ADD(page_lookup_cnt[7]); 806 if (hash_locked) { 807 panic("page_lookup_create: lost page %p", 808 (void *)pp); 809 /*NOTREACHED*/ 810 } 811 page_unlock(pp); 812 phm = PAGE_HASH_MUTEX(index); 813 mutex_enter(phm); 814 hash_locked = 1; 815 goto top; 816 } 817 818 /* 819 * If page_trylock() was called, then pp may still be on 820 * the cachelist (can't be on the free list, it would not 821 * have been found in the search). If it is on the 822 * cachelist it must be pulled now. To pull the page from 823 * the cachelist, it must be exclusively locked. 824 * 825 * The other big difference between page_trylock() and 826 * page_lock(), is that page_lock() will pull the 827 * page from whatever free list (the cache list in this 828 * case) the page is on. If page_trylock() was used 829 * above, then we have to do the reclaim ourselves. 830 */ 831 if ((!hash_locked) && (PP_ISFREE(pp))) { 832 ASSERT(PP_ISAGED(pp) == 0); 833 VM_STAT_ADD(page_lookup_cnt[8]); 834 835 /* 836 * page_relcaim will insure that we 837 * have this page exclusively 838 */ 839 840 if (!page_reclaim(pp, NULL)) { 841 /* 842 * Page_reclaim dropped whatever lock 843 * we held. 844 */ 845 VM_STAT_ADD(page_lookup_cnt[9]); 846 phm = PAGE_HASH_MUTEX(index); 847 mutex_enter(phm); 848 hash_locked = 1; 849 goto top; 850 } else if (se == SE_SHARED && newpp == NULL) { 851 VM_STAT_ADD(page_lookup_cnt[10]); 852 page_downgrade(pp); 853 } 854 } 855 856 if (hash_locked) { 857 mutex_exit(phm); 858 } 859 860 if (newpp != NULL && pp->p_szc < newpp->p_szc && 861 PAGE_EXCL(pp) && nrelocp != NULL) { 862 ASSERT(nrelocp != NULL); 863 (void) page_relocate(&pp, &newpp, 1, 1, nrelocp, 864 NULL); 865 if (*nrelocp > 0) { 866 VM_STAT_COND_ADD(*nrelocp == 1, 867 page_lookup_cnt[11]); 868 VM_STAT_COND_ADD(*nrelocp > 1, 869 page_lookup_cnt[12]); 870 pp = newpp; 871 se = SE_EXCL; 872 } else { 873 if (se == SE_SHARED) { 874 page_downgrade(pp); 875 } 876 VM_STAT_ADD(page_lookup_cnt[13]); 877 } 878 } else if (newpp != NULL && nrelocp != NULL) { 879 if (PAGE_EXCL(pp) && se == SE_SHARED) { 880 page_downgrade(pp); 881 } 882 VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc, 883 page_lookup_cnt[14]); 884 VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc, 885 page_lookup_cnt[15]); 886 VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc, 887 page_lookup_cnt[16]); 888 } else if (newpp != NULL && PAGE_EXCL(pp)) { 889 se = SE_EXCL; 890 } 891 } else if (!hash_locked) { 892 VM_STAT_ADD(page_lookup_cnt[17]); 893 phm = PAGE_HASH_MUTEX(index); 894 mutex_enter(phm); 895 hash_locked = 1; 896 goto top; 897 } else if (newpp != NULL) { 898 /* 899 * If we have a preallocated page then 900 * insert it now and basically behave like 901 * page_create. 902 */ 903 VM_STAT_ADD(page_lookup_cnt[18]); 904 /* 905 * Since we hold the page hash mutex and 906 * just searched for this page, page_hashin 907 * had better not fail. If it does, that 908 * means some thread did not follow the 909 * page hash mutex rules. Panic now and 910 * get it over with. As usual, go down 911 * holding all the locks. 912 */ 913 ASSERT(MUTEX_HELD(phm)); 914 if (!page_hashin(newpp, vp, off, phm)) { 915 ASSERT(MUTEX_HELD(phm)); 916 panic("page_lookup_create: hashin failed %p %p %llx %p", 917 (void *)newpp, (void *)vp, off, (void *)phm); 918 /*NOTREACHED*/ 919 } 920 ASSERT(MUTEX_HELD(phm)); 921 mutex_exit(phm); 922 phm = NULL; 923 page_set_props(newpp, P_REF); 924 page_io_lock(newpp); 925 pp = newpp; 926 se = SE_EXCL; 927 } else { 928 VM_STAT_ADD(page_lookup_cnt[19]); 929 mutex_exit(phm); 930 } 931 932 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 933 934 ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1); 935 936 return (pp); 937 } 938 939 /* 940 * Search the hash list for the page representing the 941 * specified [vp, offset] and return it locked. Skip 942 * free pages and pages that cannot be locked as requested. 943 * Used while attempting to kluster pages. 944 */ 945 page_t * 946 page_lookup_nowait(vnode_t *vp, u_offset_t off, se_t se) 947 { 948 page_t *pp; 949 kmutex_t *phm; 950 ulong_t index; 951 uint_t locked; 952 953 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 954 VM_STAT_ADD(page_lookup_nowait_cnt[0]); 955 956 index = PAGE_HASH_FUNC(vp, off); 957 PAGE_HASH_SEARCH(index, pp, vp, off); 958 locked = 0; 959 if (pp == NULL) { 960 top: 961 VM_STAT_ADD(page_lookup_nowait_cnt[1]); 962 locked = 1; 963 phm = PAGE_HASH_MUTEX(index); 964 mutex_enter(phm); 965 PAGE_HASH_SEARCH(index, pp, vp, off); 966 } 967 968 if (pp == NULL || PP_ISFREE(pp)) { 969 VM_STAT_ADD(page_lookup_nowait_cnt[2]); 970 pp = NULL; 971 } else { 972 if (!page_trylock(pp, se)) { 973 VM_STAT_ADD(page_lookup_nowait_cnt[3]); 974 pp = NULL; 975 } else { 976 VM_STAT_ADD(page_lookup_nowait_cnt[4]); 977 /* 978 * See the comment in page_lookup() 979 */ 980 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 981 ((u_offset_t)(pp->p_offset) != off)) { 982 VM_STAT_ADD(page_lookup_nowait_cnt[5]); 983 if (locked) { 984 panic("page_lookup_nowait %p", 985 (void *)pp); 986 /*NOTREACHED*/ 987 } 988 page_unlock(pp); 989 goto top; 990 } 991 if (PP_ISFREE(pp)) { 992 VM_STAT_ADD(page_lookup_nowait_cnt[6]); 993 page_unlock(pp); 994 pp = NULL; 995 } 996 } 997 } 998 if (locked) { 999 VM_STAT_ADD(page_lookup_nowait_cnt[7]); 1000 mutex_exit(phm); 1001 } 1002 1003 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 1004 1005 return (pp); 1006 } 1007 1008 /* 1009 * Search the hash list for a page with the specified [vp, off] 1010 * that is known to exist and is already locked. This routine 1011 * is typically used by segment SOFTUNLOCK routines. 1012 */ 1013 page_t * 1014 page_find(vnode_t *vp, u_offset_t off) 1015 { 1016 page_t *pp; 1017 kmutex_t *phm; 1018 ulong_t index; 1019 1020 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1021 VM_STAT_ADD(page_find_cnt); 1022 1023 index = PAGE_HASH_FUNC(vp, off); 1024 phm = PAGE_HASH_MUTEX(index); 1025 1026 mutex_enter(phm); 1027 PAGE_HASH_SEARCH(index, pp, vp, off); 1028 mutex_exit(phm); 1029 1030 ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr); 1031 return (pp); 1032 } 1033 1034 /* 1035 * Determine whether a page with the specified [vp, off] 1036 * currently exists in the system. Obviously this should 1037 * only be considered as a hint since nothing prevents the 1038 * page from disappearing or appearing immediately after 1039 * the return from this routine. Subsequently, we don't 1040 * even bother to lock the list. 1041 */ 1042 page_t * 1043 page_exists(vnode_t *vp, u_offset_t off) 1044 { 1045 page_t *pp; 1046 ulong_t index; 1047 1048 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1049 VM_STAT_ADD(page_exists_cnt); 1050 1051 index = PAGE_HASH_FUNC(vp, off); 1052 PAGE_HASH_SEARCH(index, pp, vp, off); 1053 1054 return (pp); 1055 } 1056 1057 /* 1058 * Determine if physically contiguous pages exist for [vp, off] - [vp, off + 1059 * page_size(szc)) range. if they exist and ppa is not NULL fill ppa array 1060 * with these pages locked SHARED. If necessary reclaim pages from 1061 * freelist. Return 1 if contiguous pages exist and 0 otherwise. 1062 * 1063 * If we fail to lock pages still return 1 if pages exist and contiguous. 1064 * But in this case return value is just a hint. ppa array won't be filled. 1065 * Caller should initialize ppa[0] as NULL to distinguish return value. 1066 * 1067 * Returns 0 if pages don't exist or not physically contiguous. 1068 * 1069 * This routine doesn't work for anonymous(swapfs) pages. 1070 */ 1071 int 1072 page_exists_physcontig(vnode_t *vp, u_offset_t off, uint_t szc, page_t *ppa[]) 1073 { 1074 pgcnt_t pages; 1075 pfn_t pfn; 1076 page_t *rootpp; 1077 pgcnt_t i; 1078 pgcnt_t j; 1079 u_offset_t save_off = off; 1080 ulong_t index; 1081 kmutex_t *phm; 1082 page_t *pp; 1083 uint_t pszc; 1084 int loopcnt = 0; 1085 1086 ASSERT(szc != 0); 1087 ASSERT(vp != NULL); 1088 ASSERT(!IS_SWAPFSVP(vp)); 1089 ASSERT(!VN_ISKAS(vp)); 1090 1091 again: 1092 if (++loopcnt > 3) { 1093 VM_STAT_ADD(page_exphcontg[0]); 1094 return (0); 1095 } 1096 1097 index = PAGE_HASH_FUNC(vp, off); 1098 phm = PAGE_HASH_MUTEX(index); 1099 1100 mutex_enter(phm); 1101 PAGE_HASH_SEARCH(index, pp, vp, off); 1102 mutex_exit(phm); 1103 1104 VM_STAT_ADD(page_exphcontg[1]); 1105 1106 if (pp == NULL) { 1107 VM_STAT_ADD(page_exphcontg[2]); 1108 return (0); 1109 } 1110 1111 pages = page_get_pagecnt(szc); 1112 rootpp = pp; 1113 pfn = rootpp->p_pagenum; 1114 1115 if ((pszc = pp->p_szc) >= szc && ppa != NULL) { 1116 VM_STAT_ADD(page_exphcontg[3]); 1117 if (!page_trylock(pp, SE_SHARED)) { 1118 VM_STAT_ADD(page_exphcontg[4]); 1119 return (1); 1120 } 1121 /* 1122 * Also check whether p_pagenum was modified by DR. 1123 */ 1124 if (pp->p_szc != pszc || pp->p_vnode != vp || 1125 pp->p_offset != off || pp->p_pagenum != pfn) { 1126 VM_STAT_ADD(page_exphcontg[5]); 1127 page_unlock(pp); 1128 off = save_off; 1129 goto again; 1130 } 1131 /* 1132 * szc was non zero and vnode and offset matched after we 1133 * locked the page it means it can't become free on us. 1134 */ 1135 ASSERT(!PP_ISFREE(pp)); 1136 if (!IS_P2ALIGNED(pfn, pages)) { 1137 page_unlock(pp); 1138 return (0); 1139 } 1140 ppa[0] = pp; 1141 pp++; 1142 off += PAGESIZE; 1143 pfn++; 1144 for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1145 if (!page_trylock(pp, SE_SHARED)) { 1146 VM_STAT_ADD(page_exphcontg[6]); 1147 pp--; 1148 while (i-- > 0) { 1149 page_unlock(pp); 1150 pp--; 1151 } 1152 ppa[0] = NULL; 1153 return (1); 1154 } 1155 if (pp->p_szc != pszc) { 1156 VM_STAT_ADD(page_exphcontg[7]); 1157 page_unlock(pp); 1158 pp--; 1159 while (i-- > 0) { 1160 page_unlock(pp); 1161 pp--; 1162 } 1163 ppa[0] = NULL; 1164 off = save_off; 1165 goto again; 1166 } 1167 /* 1168 * szc the same as for previous already locked pages 1169 * with right identity. Since this page had correct 1170 * szc after we locked it can't get freed or destroyed 1171 * and therefore must have the expected identity. 1172 */ 1173 ASSERT(!PP_ISFREE(pp)); 1174 if (pp->p_vnode != vp || 1175 pp->p_offset != off) { 1176 panic("page_exists_physcontig: " 1177 "large page identity doesn't match"); 1178 } 1179 ppa[i] = pp; 1180 ASSERT(pp->p_pagenum == pfn); 1181 } 1182 VM_STAT_ADD(page_exphcontg[8]); 1183 ppa[pages] = NULL; 1184 return (1); 1185 } else if (pszc >= szc) { 1186 VM_STAT_ADD(page_exphcontg[9]); 1187 if (!IS_P2ALIGNED(pfn, pages)) { 1188 return (0); 1189 } 1190 return (1); 1191 } 1192 1193 if (!IS_P2ALIGNED(pfn, pages)) { 1194 VM_STAT_ADD(page_exphcontg[10]); 1195 return (0); 1196 } 1197 1198 if (page_numtomemseg_nolock(pfn) != 1199 page_numtomemseg_nolock(pfn + pages - 1)) { 1200 VM_STAT_ADD(page_exphcontg[11]); 1201 return (0); 1202 } 1203 1204 /* 1205 * We loop up 4 times across pages to promote page size. 1206 * We're extra cautious to promote page size atomically with respect 1207 * to everybody else. But we can probably optimize into 1 loop if 1208 * this becomes an issue. 1209 */ 1210 1211 for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1212 if (!page_trylock(pp, SE_EXCL)) { 1213 VM_STAT_ADD(page_exphcontg[12]); 1214 break; 1215 } 1216 /* 1217 * Check whether p_pagenum was modified by DR. 1218 */ 1219 if (pp->p_pagenum != pfn) { 1220 page_unlock(pp); 1221 break; 1222 } 1223 if (pp->p_vnode != vp || 1224 pp->p_offset != off) { 1225 VM_STAT_ADD(page_exphcontg[13]); 1226 page_unlock(pp); 1227 break; 1228 } 1229 if (pp->p_szc >= szc) { 1230 ASSERT(i == 0); 1231 page_unlock(pp); 1232 off = save_off; 1233 goto again; 1234 } 1235 } 1236 1237 if (i != pages) { 1238 VM_STAT_ADD(page_exphcontg[14]); 1239 --pp; 1240 while (i-- > 0) { 1241 page_unlock(pp); 1242 --pp; 1243 } 1244 return (0); 1245 } 1246 1247 pp = rootpp; 1248 for (i = 0; i < pages; i++, pp++) { 1249 if (PP_ISFREE(pp)) { 1250 VM_STAT_ADD(page_exphcontg[15]); 1251 ASSERT(!PP_ISAGED(pp)); 1252 ASSERT(pp->p_szc == 0); 1253 if (!page_reclaim(pp, NULL)) { 1254 break; 1255 } 1256 } else { 1257 ASSERT(pp->p_szc < szc); 1258 VM_STAT_ADD(page_exphcontg[16]); 1259 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 1260 } 1261 } 1262 if (i < pages) { 1263 VM_STAT_ADD(page_exphcontg[17]); 1264 /* 1265 * page_reclaim failed because we were out of memory. 1266 * drop the rest of the locks and return because this page 1267 * must be already reallocated anyway. 1268 */ 1269 pp = rootpp; 1270 for (j = 0; j < pages; j++, pp++) { 1271 if (j != i) { 1272 page_unlock(pp); 1273 } 1274 } 1275 return (0); 1276 } 1277 1278 off = save_off; 1279 pp = rootpp; 1280 for (i = 0; i < pages; i++, pp++, off += PAGESIZE) { 1281 ASSERT(PAGE_EXCL(pp)); 1282 ASSERT(!PP_ISFREE(pp)); 1283 ASSERT(!hat_page_is_mapped(pp)); 1284 ASSERT(pp->p_vnode == vp); 1285 ASSERT(pp->p_offset == off); 1286 pp->p_szc = szc; 1287 } 1288 pp = rootpp; 1289 for (i = 0; i < pages; i++, pp++) { 1290 if (ppa == NULL) { 1291 page_unlock(pp); 1292 } else { 1293 ppa[i] = pp; 1294 page_downgrade(ppa[i]); 1295 } 1296 } 1297 if (ppa != NULL) { 1298 ppa[pages] = NULL; 1299 } 1300 VM_STAT_ADD(page_exphcontg[18]); 1301 ASSERT(vp->v_pages != NULL); 1302 return (1); 1303 } 1304 1305 /* 1306 * Determine whether a page with the specified [vp, off] 1307 * currently exists in the system and if so return its 1308 * size code. Obviously this should only be considered as 1309 * a hint since nothing prevents the page from disappearing 1310 * or appearing immediately after the return from this routine. 1311 */ 1312 int 1313 page_exists_forreal(vnode_t *vp, u_offset_t off, uint_t *szc) 1314 { 1315 page_t *pp; 1316 kmutex_t *phm; 1317 ulong_t index; 1318 int rc = 0; 1319 1320 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1321 ASSERT(szc != NULL); 1322 VM_STAT_ADD(page_exists_forreal_cnt); 1323 1324 index = PAGE_HASH_FUNC(vp, off); 1325 phm = PAGE_HASH_MUTEX(index); 1326 1327 mutex_enter(phm); 1328 PAGE_HASH_SEARCH(index, pp, vp, off); 1329 if (pp != NULL) { 1330 *szc = pp->p_szc; 1331 rc = 1; 1332 } 1333 mutex_exit(phm); 1334 return (rc); 1335 } 1336 1337 /* wakeup threads waiting for pages in page_create_get_something() */ 1338 void 1339 wakeup_pcgs(void) 1340 { 1341 if (!CV_HAS_WAITERS(&pcgs_cv)) 1342 return; 1343 cv_broadcast(&pcgs_cv); 1344 } 1345 1346 /* 1347 * 'freemem' is used all over the kernel as an indication of how many 1348 * pages are free (either on the cache list or on the free page list) 1349 * in the system. In very few places is a really accurate 'freemem' 1350 * needed. To avoid contention of the lock protecting a the 1351 * single freemem, it was spread out into NCPU buckets. Set_freemem 1352 * sets freemem to the total of all NCPU buckets. It is called from 1353 * clock() on each TICK. 1354 */ 1355 void 1356 set_freemem() 1357 { 1358 struct pcf *p; 1359 ulong_t t; 1360 uint_t i; 1361 1362 t = 0; 1363 p = pcf; 1364 for (i = 0; i < pcf_fanout; i++) { 1365 t += p->pcf_count; 1366 p++; 1367 } 1368 freemem = t; 1369 1370 /* 1371 * Don't worry about grabbing mutex. It's not that 1372 * critical if we miss a tick or two. This is 1373 * where we wakeup possible delayers in 1374 * page_create_get_something(). 1375 */ 1376 wakeup_pcgs(); 1377 } 1378 1379 ulong_t 1380 get_freemem() 1381 { 1382 struct pcf *p; 1383 ulong_t t; 1384 uint_t i; 1385 1386 t = 0; 1387 p = pcf; 1388 for (i = 0; i < pcf_fanout; i++) { 1389 t += p->pcf_count; 1390 p++; 1391 } 1392 /* 1393 * We just calculated it, might as well set it. 1394 */ 1395 freemem = t; 1396 return (t); 1397 } 1398 1399 /* 1400 * Acquire all of the page cache & free (pcf) locks. 1401 */ 1402 void 1403 pcf_acquire_all() 1404 { 1405 struct pcf *p; 1406 uint_t i; 1407 1408 p = pcf; 1409 for (i = 0; i < pcf_fanout; i++) { 1410 mutex_enter(&p->pcf_lock); 1411 p++; 1412 } 1413 } 1414 1415 /* 1416 * Release all the pcf_locks. 1417 */ 1418 void 1419 pcf_release_all() 1420 { 1421 struct pcf *p; 1422 uint_t i; 1423 1424 p = pcf; 1425 for (i = 0; i < pcf_fanout; i++) { 1426 mutex_exit(&p->pcf_lock); 1427 p++; 1428 } 1429 } 1430 1431 /* 1432 * Inform the VM system that we need some pages freed up. 1433 * Calls must be symmetric, e.g.: 1434 * 1435 * page_needfree(100); 1436 * wait a bit; 1437 * page_needfree(-100); 1438 */ 1439 void 1440 page_needfree(spgcnt_t npages) 1441 { 1442 mutex_enter(&new_freemem_lock); 1443 needfree += npages; 1444 mutex_exit(&new_freemem_lock); 1445 } 1446 1447 /* 1448 * Throttle for page_create(): try to prevent freemem from dropping 1449 * below throttlefree. We can't provide a 100% guarantee because 1450 * KM_NOSLEEP allocations, page_reclaim(), and various other things 1451 * nibble away at the freelist. However, we can block all PG_WAIT 1452 * allocations until memory becomes available. The motivation is 1453 * that several things can fall apart when there's no free memory: 1454 * 1455 * (1) If pageout() needs memory to push a page, the system deadlocks. 1456 * 1457 * (2) By (broken) specification, timeout(9F) can neither fail nor 1458 * block, so it has no choice but to panic the system if it 1459 * cannot allocate a callout structure. 1460 * 1461 * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block; 1462 * it panics if it cannot allocate a callback structure. 1463 * 1464 * (4) Untold numbers of third-party drivers have not yet been hardened 1465 * against KM_NOSLEEP and/or allocb() failures; they simply assume 1466 * success and panic the system with a data fault on failure. 1467 * (The long-term solution to this particular problem is to ship 1468 * hostile fault-injecting DEBUG kernels with the DDK.) 1469 * 1470 * It is theoretically impossible to guarantee success of non-blocking 1471 * allocations, but in practice, this throttle is very hard to break. 1472 */ 1473 static int 1474 page_create_throttle(pgcnt_t npages, int flags) 1475 { 1476 ulong_t fm; 1477 uint_t i; 1478 pgcnt_t tf; /* effective value of throttlefree */ 1479 1480 /* 1481 * Normal priority allocations. 1482 */ 1483 if ((flags & (PG_WAIT | PG_NORMALPRI)) == PG_NORMALPRI) { 1484 ASSERT(!(flags & (PG_PANIC | PG_PUSHPAGE))); 1485 return (freemem >= npages + throttlefree); 1486 } 1487 1488 /* 1489 * Never deny pages when: 1490 * - it's a thread that cannot block [NOMEMWAIT()] 1491 * - the allocation cannot block and must not fail 1492 * - the allocation cannot block and is pageout dispensated 1493 */ 1494 if (NOMEMWAIT() || 1495 ((flags & (PG_WAIT | PG_PANIC)) == PG_PANIC) || 1496 ((flags & (PG_WAIT | PG_PUSHPAGE)) == PG_PUSHPAGE)) 1497 return (1); 1498 1499 /* 1500 * If the allocation can't block, we look favorably upon it 1501 * unless we're below pageout_reserve. In that case we fail 1502 * the allocation because we want to make sure there are a few 1503 * pages available for pageout. 1504 */ 1505 if ((flags & PG_WAIT) == 0) 1506 return (freemem >= npages + pageout_reserve); 1507 1508 /* Calculate the effective throttlefree value */ 1509 tf = throttlefree - 1510 ((flags & PG_PUSHPAGE) ? pageout_reserve : 0); 1511 1512 cv_signal(&proc_pageout->p_cv); 1513 1514 for (;;) { 1515 fm = 0; 1516 pcf_acquire_all(); 1517 mutex_enter(&new_freemem_lock); 1518 for (i = 0; i < pcf_fanout; i++) { 1519 fm += pcf[i].pcf_count; 1520 pcf[i].pcf_wait++; 1521 mutex_exit(&pcf[i].pcf_lock); 1522 } 1523 freemem = fm; 1524 if (freemem >= npages + tf) { 1525 mutex_exit(&new_freemem_lock); 1526 break; 1527 } 1528 needfree += npages; 1529 freemem_wait++; 1530 cv_wait(&freemem_cv, &new_freemem_lock); 1531 freemem_wait--; 1532 needfree -= npages; 1533 mutex_exit(&new_freemem_lock); 1534 } 1535 return (1); 1536 } 1537 1538 /* 1539 * page_create_wait() is called to either coalesce pages from the 1540 * different pcf buckets or to wait because there simply are not 1541 * enough pages to satisfy the caller's request. 1542 * 1543 * Sadly, this is called from platform/vm/vm_machdep.c 1544 */ 1545 int 1546 page_create_wait(pgcnt_t npages, uint_t flags) 1547 { 1548 pgcnt_t total; 1549 uint_t i; 1550 struct pcf *p; 1551 1552 /* 1553 * Wait until there are enough free pages to satisfy our 1554 * entire request. 1555 * We set needfree += npages before prodding pageout, to make sure 1556 * it does real work when npages > lotsfree > freemem. 1557 */ 1558 VM_STAT_ADD(page_create_not_enough); 1559 1560 ASSERT(!kcage_on ? !(flags & PG_NORELOC) : 1); 1561 checkagain: 1562 1563 /* Throttle kernel memory allocations if necessary */ 1564 KERNEL_THROTTLE(npages, flags); 1565 1566 if (freemem < npages + throttlefree) 1567 if (!page_create_throttle(npages, flags)) 1568 return (0); 1569 1570 if (pcf_decrement_bucket(npages) || 1571 pcf_decrement_multiple(&total, npages, 0)) 1572 return (1); 1573 1574 /* 1575 * All of the pcf locks are held, there are not enough pages 1576 * to satisfy the request (npages < total). 1577 * Be sure to acquire the new_freemem_lock before dropping 1578 * the pcf locks. This prevents dropping wakeups in page_free(). 1579 * The order is always pcf_lock then new_freemem_lock. 1580 * 1581 * Since we hold all the pcf locks, it is a good time to set freemem. 1582 * 1583 * If the caller does not want to wait, return now. 1584 * Else turn the pageout daemon loose to find something 1585 * and wait till it does. 1586 * 1587 */ 1588 freemem = total; 1589 1590 if ((flags & PG_WAIT) == 0) { 1591 pcf_release_all(); 1592 1593 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_NOMEM, 1594 "page_create_nomem:npages %ld freemem %ld", npages, freemem); 1595 return (0); 1596 } 1597 1598 ASSERT(proc_pageout != NULL); 1599 cv_signal(&proc_pageout->p_cv); 1600 1601 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_START, 1602 "page_create_sleep_start: freemem %ld needfree %ld", 1603 freemem, needfree); 1604 1605 /* 1606 * We are going to wait. 1607 * We currently hold all of the pcf_locks, 1608 * get the new_freemem_lock (it protects freemem_wait), 1609 * before dropping the pcf_locks. 1610 */ 1611 mutex_enter(&new_freemem_lock); 1612 1613 p = pcf; 1614 for (i = 0; i < pcf_fanout; i++) { 1615 p->pcf_wait++; 1616 mutex_exit(&p->pcf_lock); 1617 p++; 1618 } 1619 1620 needfree += npages; 1621 freemem_wait++; 1622 1623 cv_wait(&freemem_cv, &new_freemem_lock); 1624 1625 freemem_wait--; 1626 needfree -= npages; 1627 1628 mutex_exit(&new_freemem_lock); 1629 1630 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_END, 1631 "page_create_sleep_end: freemem %ld needfree %ld", 1632 freemem, needfree); 1633 1634 VM_STAT_ADD(page_create_not_enough_again); 1635 goto checkagain; 1636 } 1637 /* 1638 * A routine to do the opposite of page_create_wait(). 1639 */ 1640 void 1641 page_create_putback(spgcnt_t npages) 1642 { 1643 struct pcf *p; 1644 pgcnt_t lump; 1645 uint_t *which; 1646 1647 /* 1648 * When a contiguous lump is broken up, we have to 1649 * deal with lots of pages (min 64) so lets spread 1650 * the wealth around. 1651 */ 1652 lump = roundup(npages, pcf_fanout) / pcf_fanout; 1653 freemem += npages; 1654 1655 for (p = pcf; (npages > 0) && (p < &pcf[pcf_fanout]); p++) { 1656 which = &p->pcf_count; 1657 1658 mutex_enter(&p->pcf_lock); 1659 1660 if (p->pcf_block) { 1661 which = &p->pcf_reserve; 1662 } 1663 1664 if (lump < npages) { 1665 *which += (uint_t)lump; 1666 npages -= lump; 1667 } else { 1668 *which += (uint_t)npages; 1669 npages = 0; 1670 } 1671 1672 if (p->pcf_wait) { 1673 mutex_enter(&new_freemem_lock); 1674 /* 1675 * Check to see if some other thread 1676 * is actually waiting. Another bucket 1677 * may have woken it up by now. If there 1678 * are no waiters, then set our pcf_wait 1679 * count to zero to avoid coming in here 1680 * next time. 1681 */ 1682 if (freemem_wait) { 1683 if (npages > 1) { 1684 cv_broadcast(&freemem_cv); 1685 } else { 1686 cv_signal(&freemem_cv); 1687 } 1688 p->pcf_wait--; 1689 } else { 1690 p->pcf_wait = 0; 1691 } 1692 mutex_exit(&new_freemem_lock); 1693 } 1694 mutex_exit(&p->pcf_lock); 1695 } 1696 ASSERT(npages == 0); 1697 } 1698 1699 /* 1700 * A helper routine for page_create_get_something. 1701 * The indenting got to deep down there. 1702 * Unblock the pcf counters. Any pages freed after 1703 * pcf_block got set are moved to pcf_count and 1704 * wakeups (cv_broadcast() or cv_signal()) are done as needed. 1705 */ 1706 static void 1707 pcgs_unblock(void) 1708 { 1709 int i; 1710 struct pcf *p; 1711 1712 /* Update freemem while we're here. */ 1713 freemem = 0; 1714 p = pcf; 1715 for (i = 0; i < pcf_fanout; i++) { 1716 mutex_enter(&p->pcf_lock); 1717 ASSERT(p->pcf_count == 0); 1718 p->pcf_count = p->pcf_reserve; 1719 p->pcf_block = 0; 1720 freemem += p->pcf_count; 1721 if (p->pcf_wait) { 1722 mutex_enter(&new_freemem_lock); 1723 if (freemem_wait) { 1724 if (p->pcf_reserve > 1) { 1725 cv_broadcast(&freemem_cv); 1726 p->pcf_wait = 0; 1727 } else { 1728 cv_signal(&freemem_cv); 1729 p->pcf_wait--; 1730 } 1731 } else { 1732 p->pcf_wait = 0; 1733 } 1734 mutex_exit(&new_freemem_lock); 1735 } 1736 p->pcf_reserve = 0; 1737 mutex_exit(&p->pcf_lock); 1738 p++; 1739 } 1740 } 1741 1742 /* 1743 * Called from page_create_va() when both the cache and free lists 1744 * have been checked once. 1745 * 1746 * Either returns a page or panics since the accounting was done 1747 * way before we got here. 1748 * 1749 * We don't come here often, so leave the accounting on permanently. 1750 */ 1751 1752 #define MAX_PCGS 100 1753 1754 #ifdef DEBUG 1755 #define PCGS_TRIES 100 1756 #else /* DEBUG */ 1757 #define PCGS_TRIES 10 1758 #endif /* DEBUG */ 1759 1760 #ifdef VM_STATS 1761 uint_t pcgs_counts[PCGS_TRIES]; 1762 uint_t pcgs_too_many; 1763 uint_t pcgs_entered; 1764 uint_t pcgs_entered_noreloc; 1765 uint_t pcgs_locked; 1766 uint_t pcgs_cagelocked; 1767 #endif /* VM_STATS */ 1768 1769 static page_t * 1770 page_create_get_something(vnode_t *vp, u_offset_t off, struct seg *seg, 1771 caddr_t vaddr, uint_t flags) 1772 { 1773 uint_t count; 1774 page_t *pp; 1775 uint_t locked, i; 1776 struct pcf *p; 1777 lgrp_t *lgrp; 1778 int cagelocked = 0; 1779 1780 VM_STAT_ADD(pcgs_entered); 1781 1782 /* 1783 * Tap any reserve freelists: if we fail now, we'll die 1784 * since the page(s) we're looking for have already been 1785 * accounted for. 1786 */ 1787 flags |= PG_PANIC; 1788 1789 if ((flags & (PG_NORELOC|PG_KFLT)) != 0) { 1790 VM_STAT_ADD(pcgs_entered_noreloc); 1791 /* 1792 * Requests for free pages from critical threads 1793 * such as pageout still won't throttle here, but 1794 * we must try again, to give the cageout thread 1795 * another chance to catch up. Since we already 1796 * accounted for the pages, we had better get them 1797 * this time. 1798 * 1799 * N.B. All non-critical threads acquire the pcgs_cagelock 1800 * to serialize access to the freelists. This implements a 1801 * turnstile-type synchornization to avoid starvation of 1802 * critical requests for PG_NORELOC memory by non-critical 1803 * threads: all non-critical threads must acquire a 'ticket' 1804 * before passing through, which entails making sure 1805 * kcage_freemem won't fall below minfree prior to grabbing 1806 * pages from the freelists. 1807 */ 1808 /* LINTED */ 1809 if (KERNEL_THROTTLE_NONCRIT(1, flags)) { 1810 mutex_enter(&pcgs_cagelock); 1811 cagelocked = 1; 1812 VM_STAT_ADD(pcgs_cagelocked); 1813 } 1814 } 1815 1816 /* 1817 * Time to get serious. 1818 * We failed to get a `correctly colored' page from both the 1819 * free and cache lists. 1820 * We escalate in stage. 1821 * 1822 * First try both lists without worring about color. 1823 * 1824 * Then, grab all page accounting locks (ie. pcf[]) and 1825 * steal any pages that they have and set the pcf_block flag to 1826 * stop deletions from the lists. This will help because 1827 * a page can get added to the free list while we are looking 1828 * at the cache list, then another page could be added to the cache 1829 * list allowing the page on the free list to be removed as we 1830 * move from looking at the cache list to the free list. This 1831 * could happen over and over. We would never find the page 1832 * we have accounted for. 1833 * 1834 * Noreloc pages are a subset of the global (relocatable) page pool. 1835 * They are not tracked separately in the pcf bins, so it is 1836 * impossible to know when doing pcf accounting if the available 1837 * page(s) are noreloc pages or not. When looking for a noreloc page 1838 * it is quite easy to end up here even if the global (relocatable) 1839 * page pool has plenty of free pages but the noreloc pool is empty. 1840 * 1841 * When the noreloc pool is empty (or low), additional noreloc pages 1842 * are created by converting pages from the global page pool. This 1843 * process will stall during pcf accounting if the pcf bins are 1844 * already locked. Such is the case when a noreloc allocation is 1845 * looping here in page_create_get_something waiting for more noreloc 1846 * pages to appear. 1847 * 1848 * Short of adding a new field to the pcf bins to accurately track 1849 * the number of free noreloc pages, we instead do not grab the 1850 * pcgs_lock, do not set the pcf blocks and do not timeout when 1851 * allocating a noreloc page. This allows noreloc allocations to 1852 * loop without blocking global page pool allocations. 1853 * 1854 * NOTE: the behaviour of page_create_get_something has not changed 1855 * for the case of global page pool allocations. 1856 */ 1857 1858 flags &= ~PG_MATCH_COLOR; 1859 locked = 0; 1860 #if defined(__i386) || defined(__amd64) 1861 flags = page_create_update_flags_x86(flags); 1862 #endif 1863 1864 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 1865 1866 for (count = 0; kcage_on || kflt_on || count < MAX_PCGS; count++) { 1867 PAGE_GET_FREELISTS(pp, vp, off, seg, vaddr, PAGESIZE, 1868 flags, lgrp); 1869 if (pp == NULL) { 1870 pp = page_get_cachelist(vp, off, seg, vaddr, 1871 flags, lgrp); 1872 } 1873 if (pp == NULL) { 1874 /* 1875 * Serialize. Don't fight with other pcgs(). 1876 */ 1877 if (!locked && KERNEL_NOT_THROTTLED(flags)) { 1878 mutex_enter(&pcgs_lock); 1879 VM_STAT_ADD(pcgs_locked); 1880 locked = 1; 1881 p = pcf; 1882 for (i = 0; i < pcf_fanout; i++) { 1883 mutex_enter(&p->pcf_lock); 1884 ASSERT(p->pcf_block == 0); 1885 p->pcf_block = 1; 1886 p->pcf_reserve = p->pcf_count; 1887 p->pcf_count = 0; 1888 mutex_exit(&p->pcf_lock); 1889 p++; 1890 } 1891 freemem = 0; 1892 } 1893 1894 if (count) { 1895 /* 1896 * Since page_free() puts pages on 1897 * a list then accounts for it, we 1898 * just have to wait for page_free() 1899 * to unlock any page it was working 1900 * with. The page_lock()-page_reclaim() 1901 * path falls in the same boat. 1902 * 1903 * We don't need to check on the 1904 * PG_WAIT flag, we have already 1905 * accounted for the page we are 1906 * looking for in page_create_va(). 1907 * 1908 * We just wait a moment to let any 1909 * locked pages on the lists free up, 1910 * then continue around and try again. 1911 * 1912 * Will be awakened by set_freemem(). 1913 */ 1914 mutex_enter(&pcgs_wait_lock); 1915 cv_wait(&pcgs_cv, &pcgs_wait_lock); 1916 mutex_exit(&pcgs_wait_lock); 1917 } 1918 } else { 1919 #ifdef VM_STATS 1920 if (count >= PCGS_TRIES) { 1921 VM_STAT_ADD(pcgs_too_many); 1922 } else { 1923 VM_STAT_ADD(pcgs_counts[count]); 1924 } 1925 #endif 1926 if (locked) { 1927 pcgs_unblock(); 1928 mutex_exit(&pcgs_lock); 1929 } 1930 if (cagelocked) 1931 mutex_exit(&pcgs_cagelock); 1932 return (pp); 1933 } 1934 } 1935 /* 1936 * we go down holding the pcf locks. 1937 */ 1938 panic("no %spage found %d", 1939 ((flags & PG_NORELOC) ? "non-reloc " : ""), count); 1940 /*NOTREACHED*/ 1941 } 1942 1943 /* 1944 * Create enough pages for "bytes" worth of data starting at 1945 * "off" in "vp". 1946 * 1947 * Where flag must be one of: 1948 * 1949 * PG_EXCL: Exclusive create (fail if any page already 1950 * exists in the page cache) which does not 1951 * wait for memory to become available. 1952 * 1953 * PG_WAIT: Non-exclusive create which can wait for 1954 * memory to become available. 1955 * 1956 * PG_PHYSCONTIG: Allocate physically contiguous pages. 1957 * (Not Supported) 1958 * 1959 * A doubly linked list of pages is returned to the caller. Each page 1960 * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock) 1961 * lock. 1962 * 1963 * Unable to change the parameters to page_create() in a minor release, 1964 * we renamed page_create() to page_create_va(), changed all known calls 1965 * from page_create() to page_create_va(), and created this wrapper. 1966 * 1967 * Upon a major release, we should break compatibility by deleting this 1968 * wrapper, and replacing all the strings "page_create_va", with "page_create". 1969 * 1970 * NOTE: There is a copy of this interface as page_create_io() in 1971 * i86/vm/vm_machdep.c. Any bugs fixed here should be applied 1972 * there. 1973 */ 1974 page_t * 1975 page_create(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags) 1976 { 1977 caddr_t random_vaddr; 1978 struct seg kseg; 1979 1980 #ifdef DEBUG 1981 cmn_err(CE_WARN, "Using deprecated interface page_create: caller %p", 1982 (void *)caller()); 1983 #endif 1984 1985 random_vaddr = (caddr_t)(((uintptr_t)vp >> 7) ^ 1986 (uintptr_t)(off >> PAGESHIFT)); 1987 kseg.s_as = &kas; 1988 1989 return (page_create_va(vp, off, bytes, flags, &kseg, random_vaddr)); 1990 } 1991 1992 #ifdef DEBUG 1993 uint32_t pg_alloc_pgs_mtbf = 0; 1994 #endif 1995 1996 /* 1997 * Used for large page support. It will attempt to allocate 1998 * a large page(s) off the freelist. 1999 * 2000 * Returns non zero on failure. 2001 */ 2002 int 2003 page_alloc_pages(struct vnode *vp, struct seg *seg, caddr_t addr, 2004 page_t **basepp, page_t *ppa[], uint_t szc, int anypgsz, int pgflags) 2005 { 2006 pgcnt_t npgs, curnpgs, totpgs; 2007 size_t pgsz; 2008 page_t *pplist = NULL, *pp; 2009 int err = 0; 2010 lgrp_t *lgrp; 2011 2012 ASSERT(szc != 0 && szc <= (page_num_pagesizes() - 1)); 2013 ASSERT(pgflags == 0 || pgflags == PG_LOCAL); 2014 2015 /* 2016 * Check if system heavily prefers local large pages over remote 2017 * on systems with multiple lgroups. 2018 */ 2019 if (lpg_alloc_prefer == LPAP_LOCAL && nlgrps > 1) { 2020 pgflags = PG_LOCAL; 2021 } 2022 2023 VM_STAT_ADD(alloc_pages[0]); 2024 2025 #ifdef DEBUG 2026 if (pg_alloc_pgs_mtbf && !(gethrtime() % pg_alloc_pgs_mtbf)) { 2027 return (ENOMEM); 2028 } 2029 #endif 2030 2031 /* 2032 * One must be NULL but not both. 2033 * And one must be non NULL but not both. 2034 */ 2035 ASSERT(basepp != NULL || ppa != NULL); 2036 ASSERT(basepp == NULL || ppa == NULL); 2037 2038 #if defined(__i386) || defined(__amd64) 2039 while (page_chk_freelist(szc) == 0) { 2040 VM_STAT_ADD(alloc_pages[8]); 2041 if (anypgsz == 0 || --szc == 0) 2042 return (ENOMEM); 2043 } 2044 #endif 2045 2046 pgsz = page_get_pagesize(szc); 2047 totpgs = curnpgs = npgs = pgsz >> PAGESHIFT; 2048 2049 ASSERT(((uintptr_t)addr & (pgsz - 1)) == 0); 2050 2051 (void) page_create_wait(npgs, PG_WAIT); 2052 2053 while (npgs && szc) { 2054 lgrp = lgrp_mem_choose(seg, addr, pgsz); 2055 if (pgflags == PG_LOCAL) { 2056 PAGE_GET_FREELISTS(pp, vp, 0, seg, addr, pgsz, 2057 pgflags, lgrp); 2058 if (pp == NULL) { 2059 /* LINTED */ 2060 PAGE_GET_FREELISTS(pp, vp, 0, seg, addr, pgsz, 2061 0, lgrp); 2062 } 2063 } else { 2064 /* LINTED */ 2065 PAGE_GET_FREELISTS(pp, vp, 0, seg, addr, pgsz, 2066 0, lgrp); 2067 } 2068 if (pp != NULL) { 2069 VM_STAT_ADD(alloc_pages[1]); 2070 page_list_concat(&pplist, &pp); 2071 ASSERT(npgs >= curnpgs); 2072 npgs -= curnpgs; 2073 } else if (anypgsz) { 2074 VM_STAT_ADD(alloc_pages[2]); 2075 szc--; 2076 pgsz = page_get_pagesize(szc); 2077 curnpgs = pgsz >> PAGESHIFT; 2078 } else { 2079 VM_STAT_ADD(alloc_pages[3]); 2080 ASSERT(npgs == totpgs); 2081 page_create_putback(npgs); 2082 return (ENOMEM); 2083 } 2084 } 2085 if (szc == 0) { 2086 VM_STAT_ADD(alloc_pages[4]); 2087 ASSERT(npgs != 0); 2088 page_create_putback(npgs); 2089 err = ENOMEM; 2090 } else if (basepp != NULL) { 2091 ASSERT(npgs == 0); 2092 ASSERT(ppa == NULL); 2093 *basepp = pplist; 2094 } 2095 2096 npgs = totpgs - npgs; 2097 pp = pplist; 2098 2099 /* 2100 * Clear the free and age bits. Also if we were passed in a ppa then 2101 * fill it in with all the constituent pages from the large page. But 2102 * if we failed to allocate all the pages just free what we got. 2103 */ 2104 while (npgs != 0) { 2105 ASSERT(PP_ISFREE(pp)); 2106 ASSERT(PP_ISAGED(pp)); 2107 if (ppa != NULL || err != 0) { 2108 if (err == 0) { 2109 VM_STAT_ADD(alloc_pages[5]); 2110 PP_CLRFREE(pp); 2111 PP_CLRAGED(pp); 2112 page_sub(&pplist, pp); 2113 *ppa++ = pp; 2114 npgs--; 2115 } else { 2116 VM_STAT_ADD(alloc_pages[6]); 2117 ASSERT(pp->p_szc != 0); 2118 curnpgs = page_get_pagecnt(pp->p_szc); 2119 page_list_break(&pp, &pplist, curnpgs); 2120 page_list_add_pages(pp, 0); 2121 page_create_putback(curnpgs); 2122 ASSERT(npgs >= curnpgs); 2123 npgs -= curnpgs; 2124 } 2125 pp = pplist; 2126 } else { 2127 VM_STAT_ADD(alloc_pages[7]); 2128 PP_CLRFREE(pp); 2129 PP_CLRAGED(pp); 2130 pp = pp->p_next; 2131 npgs--; 2132 } 2133 } 2134 return (err); 2135 } 2136 2137 /* 2138 * Get a single large page off of the freelists, and set it up for use. 2139 * Number of bytes requested must be a supported page size. 2140 * 2141 * Note that this call may fail even if there is sufficient 2142 * memory available or PG_WAIT is set, so the caller must 2143 * be willing to fallback on page_create_va(), block and retry, 2144 * or fail the requester. 2145 */ 2146 page_t * 2147 page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2148 struct seg *seg, caddr_t vaddr, void *arg) 2149 { 2150 pgcnt_t npages; 2151 page_t *pp; 2152 page_t *rootpp; 2153 lgrp_t *lgrp; 2154 lgrp_id_t *lgrpid = (lgrp_id_t *)arg; 2155 2156 ASSERT(vp != NULL); 2157 2158 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2159 PG_NORELOC | PG_PANIC | PG_PUSHPAGE | PG_NORMALPRI)) == 0); 2160 /* but no others */ 2161 2162 ASSERT((flags & PG_EXCL) == PG_EXCL); 2163 2164 npages = btop(bytes); 2165 2166 if (kflt_on && ((flags & PG_NORELOC) || VN_ISKAS(vp)) && 2167 !panicstr) { 2168 /* 2169 * If the kernel freelist is active, and this is a 2170 * kernel page or one that is non-relocatable because it 2171 * is locked then set the PG_KFLT flag so that this page 2172 * will be allocated from the kernel freelist and therefore 2173 * will not fragment memory 2174 */ 2175 flags |= PG_KFLT; 2176 } 2177 2178 if (!kcage_on || panicstr) { 2179 /* 2180 * If the cage is off, we turn off the PG_NORELOC flag 2181 * however if the kernel freelist is active we will use 2182 * this to prevent memory fragmentation instead. 2183 * In panic do not use the cage or the kernel freelist. 2184 */ 2185 flags &= ~PG_NORELOC; 2186 } 2187 2188 /* 2189 * Make sure there's adequate physical memory available. 2190 * Note: PG_WAIT is ignored here. 2191 */ 2192 if (freemem <= throttlefree + npages) { 2193 VM_STAT_ADD(page_create_large_cnt[1]); 2194 return (NULL); 2195 } 2196 2197 /* 2198 * If cage or kernel freelist is on, dampen draw from cage when 2199 * available cage space is low. 2200 */ 2201 /* LINTED */ 2202 if (KERNEL_THROTTLE_PGCREATE(npages, flags, PG_WAIT)) { 2203 VM_STAT_ADD(page_create_large_cnt[2]); 2204 return (NULL); 2205 } 2206 2207 if (!pcf_decrement_bucket(npages) && 2208 !pcf_decrement_multiple(NULL, npages, 1)) { 2209 VM_STAT_ADD(page_create_large_cnt[4]); 2210 return (NULL); 2211 } 2212 2213 /* 2214 * This is where this function behaves fundamentally differently 2215 * than page_create_va(); since we're intending to map the page 2216 * with a single TTE, we have to get it as a physically contiguous 2217 * hardware pagesize chunk. If we can't, we fail. 2218 */ 2219 if (lgrpid != NULL && *lgrpid >= 0 && *lgrpid <= lgrp_alloc_max && 2220 LGRP_EXISTS(lgrp_table[*lgrpid])) 2221 lgrp = lgrp_table[*lgrpid]; 2222 else 2223 lgrp = lgrp_mem_choose(seg, vaddr, bytes); 2224 2225 PAGE_GET_FREELISTS(rootpp, &kvp, off, seg, vaddr, 2226 bytes, flags & ~PG_MATCH_COLOR, lgrp); 2227 if (rootpp == NULL) { 2228 page_create_putback(npages); 2229 VM_STAT_ADD(page_create_large_cnt[5]); 2230 return (NULL); 2231 } 2232 2233 /* 2234 * if we got the page with the wrong mtype give it back this is a 2235 * workaround for CR 6249718. When CR 6249718 is fixed we never get 2236 * inside "if" and the workaround becomes just a nop 2237 */ 2238 if (kcage_on && (flags & PG_NORELOC) && !PP_ISNORELOC(rootpp)) { 2239 page_list_add_pages(rootpp, 0); 2240 page_create_putback(npages); 2241 VM_STAT_ADD(page_create_large_cnt[6]); 2242 return (NULL); 2243 } 2244 2245 /* 2246 * If satisfying this request has left us with too little 2247 * memory, start the wheels turning to get some back. The 2248 * first clause of the test prevents waking up the pageout 2249 * daemon in situations where it would decide that there's 2250 * nothing to do. 2251 */ 2252 if (nscan < desscan && freemem < minfree) { 2253 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2254 "pageout_cv_signal:freemem %ld", freemem); 2255 cv_signal(&proc_pageout->p_cv); 2256 } 2257 2258 pp = rootpp; 2259 while (npages--) { 2260 ASSERT(PAGE_EXCL(pp)); 2261 ASSERT(pp->p_vnode == NULL); 2262 ASSERT(!hat_page_is_mapped(pp)); 2263 PP_CLRFREE(pp); 2264 PP_CLRAGED(pp); 2265 if (!page_hashin(pp, vp, off, NULL)) 2266 panic("page_create_large: hashin failed: page %p", 2267 (void *)pp); 2268 page_io_lock(pp); 2269 off += PAGESIZE; 2270 pp = pp->p_next; 2271 } 2272 2273 VM_STAT_ADD(page_create_large_cnt[0]); 2274 return (rootpp); 2275 } 2276 2277 page_t * 2278 page_create_va(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2279 struct seg *seg, caddr_t vaddr) 2280 { 2281 page_t *plist = NULL; 2282 pgcnt_t npages; 2283 pgcnt_t found_on_free = 0; 2284 pgcnt_t pages_req; 2285 page_t *npp = NULL; 2286 struct pcf *p; 2287 lgrp_t *lgrp; 2288 2289 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START, 2290 "page_create_start:vp %p off %llx bytes %lu flags %x", 2291 vp, off, bytes, flags); 2292 2293 ASSERT(bytes != 0 && vp != NULL); 2294 2295 if ((flags & PG_EXCL) == 0 && (flags & PG_WAIT) == 0) { 2296 panic("page_create: invalid flags"); 2297 /*NOTREACHED*/ 2298 } 2299 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2300 PG_NORELOC | PG_PANIC | PG_PUSHPAGE | PG_NORMALPRI)) == 0); 2301 /* but no others */ 2302 2303 pages_req = npages = btopr(bytes); 2304 /* 2305 * Try to see whether request is too large to *ever* be 2306 * satisfied, in order to prevent deadlock. We arbitrarily 2307 * decide to limit maximum size requests to max_page_get. 2308 */ 2309 if (npages >= max_page_get) { 2310 if ((flags & PG_WAIT) == 0) { 2311 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_TOOBIG, 2312 "page_create_toobig:vp %p off %llx npages " 2313 "%lu max_page_get %lu", 2314 vp, off, npages, max_page_get); 2315 return (NULL); 2316 } else { 2317 cmn_err(CE_WARN, 2318 "Request for too much kernel memory " 2319 "(%lu bytes), will hang forever", bytes); 2320 for (;;) 2321 delay(1000000000); 2322 } 2323 } 2324 2325 if (kflt_on && ((flags & PG_NORELOC) || VN_ISKAS(vp)) && 2326 !panicstr) { 2327 /* 2328 * If the kernel freelist is active, and this is a 2329 * kernel page or one that is non-relocatable because it 2330 * is locked then set the PG_KFLT flag so that this page 2331 * will be allocated from the kernel freelist and therefore 2332 * will not fragment memory 2333 */ 2334 flags |= PG_KFLT; 2335 } 2336 2337 if (!kcage_on || panicstr) { 2338 /* 2339 * If the cage is off, we turn off the PG_NORELOC flag 2340 * however if the kernel freelist is active we will use 2341 * this to prevent memory fragmentation instead. 2342 * In panic do not use the cage or the kernel freelist. 2343 */ 2344 flags &= ~PG_NORELOC; 2345 } 2346 2347 if ((freemem <= throttlefree + npages) && 2348 (!page_create_throttle(npages, flags))) { 2349 return (NULL); 2350 } 2351 2352 /* 2353 * If cage or kernel freelist is on, dampen draw from cage when 2354 * available cage space is low. 2355 */ 2356 2357 /* LINTED */ 2358 if (KERNEL_THROTTLE_PGCREATE(npages, flags, 0)) { 2359 return (NULL); 2360 } 2361 2362 VM_STAT_ADD(page_create_cnt[0]); 2363 2364 if (!pcf_decrement_bucket(npages)) { 2365 /* 2366 * Have to look harder. If npages is greater than 2367 * one, then we might have to coalesce the counters. 2368 * 2369 * Go wait. We come back having accounted 2370 * for the memory. 2371 */ 2372 VM_STAT_ADD(page_create_cnt[1]); 2373 if (!page_create_wait(npages, flags)) { 2374 VM_STAT_ADD(page_create_cnt[2]); 2375 return (NULL); 2376 } 2377 } 2378 2379 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS, 2380 "page_create_success:vp %p off %llx", vp, off); 2381 2382 /* 2383 * If satisfying this request has left us with too little 2384 * memory, start the wheels turning to get some back. The 2385 * first clause of the test prevents waking up the pageout 2386 * daemon in situations where it would decide that there's 2387 * nothing to do. 2388 */ 2389 if (nscan < desscan && freemem < minfree) { 2390 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2391 "pageout_cv_signal:freemem %ld", freemem); 2392 cv_signal(&proc_pageout->p_cv); 2393 } 2394 2395 /* 2396 * Loop around collecting the requested number of pages. 2397 * Most of the time, we have to `create' a new page. With 2398 * this in mind, pull the page off the free list before 2399 * getting the hash lock. This will minimize the hash 2400 * lock hold time, nesting, and the like. If it turns 2401 * out we don't need the page, we put it back at the end. 2402 */ 2403 while (npages--) { 2404 page_t *pp; 2405 kmutex_t *phm = NULL; 2406 ulong_t index; 2407 2408 index = PAGE_HASH_FUNC(vp, off); 2409 top: 2410 ASSERT(phm == NULL); 2411 ASSERT(index == PAGE_HASH_FUNC(vp, off)); 2412 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 2413 2414 if (npp == NULL) { 2415 /* 2416 * Try to get a page from the freelist (ie, 2417 * a page with no [vp, off] tag). If that 2418 * fails, use the cachelist. 2419 * 2420 * During the first attempt at both the free 2421 * and cache lists we try for the correct color. 2422 */ 2423 /* 2424 * XXXX-how do we deal with virtual indexed 2425 * caches and and colors? 2426 */ 2427 VM_STAT_ADD(page_create_cnt[4]); 2428 /* 2429 * Get lgroup to allocate next page of shared memory 2430 * from and use it to specify where to allocate 2431 * the physical memory 2432 */ 2433 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 2434 PAGE_GET_FREELISTS(npp, vp, off, seg, vaddr, PAGESIZE, 2435 flags | PG_MATCH_COLOR, lgrp); 2436 if (npp == NULL) { 2437 npp = page_get_cachelist(vp, off, seg, 2438 vaddr, flags | PG_MATCH_COLOR, lgrp); 2439 if (npp == NULL) { 2440 npp = page_create_get_something(vp, 2441 off, seg, vaddr, 2442 flags & ~PG_MATCH_COLOR); 2443 } 2444 2445 if (PP_ISAGED(npp) == 0) { 2446 /* 2447 * Since this page came from the 2448 * cachelist, we must destroy the 2449 * old vnode association. 2450 */ 2451 page_hashout(npp, NULL); 2452 } 2453 } 2454 } 2455 2456 /* 2457 * We own this page! 2458 */ 2459 ASSERT(PAGE_EXCL(npp)); 2460 ASSERT(npp->p_vnode == NULL); 2461 ASSERT(!hat_page_is_mapped(npp)); 2462 PP_CLRFREE(npp); 2463 PP_CLRAGED(npp); 2464 2465 /* 2466 * Here we have a page in our hot little mits and are 2467 * just waiting to stuff it on the appropriate lists. 2468 * Get the mutex and check to see if it really does 2469 * not exist. 2470 */ 2471 phm = PAGE_HASH_MUTEX(index); 2472 mutex_enter(phm); 2473 PAGE_HASH_SEARCH(index, pp, vp, off); 2474 if (pp == NULL) { 2475 VM_STAT_ADD(page_create_new); 2476 pp = npp; 2477 npp = NULL; 2478 if (!page_hashin(pp, vp, off, phm)) { 2479 /* 2480 * Since we hold the page hash mutex and 2481 * just searched for this page, page_hashin 2482 * had better not fail. If it does, that 2483 * means somethread did not follow the 2484 * page hash mutex rules. Panic now and 2485 * get it over with. As usual, go down 2486 * holding all the locks. 2487 */ 2488 ASSERT(MUTEX_HELD(phm)); 2489 panic("page_create: " 2490 "hashin failed %p %p %llx %p", 2491 (void *)pp, (void *)vp, off, (void *)phm); 2492 /*NOTREACHED*/ 2493 } 2494 ASSERT(MUTEX_HELD(phm)); 2495 mutex_exit(phm); 2496 phm = NULL; 2497 2498 /* 2499 * Hat layer locking need not be done to set 2500 * the following bits since the page is not hashed 2501 * and was on the free list (i.e., had no mappings). 2502 * 2503 * Set the reference bit to protect 2504 * against immediate pageout 2505 * 2506 * XXXmh modify freelist code to set reference 2507 * bit so we don't have to do it here. 2508 */ 2509 page_set_props(pp, P_REF); 2510 found_on_free++; 2511 } else { 2512 VM_STAT_ADD(page_create_exists); 2513 if (flags & PG_EXCL) { 2514 /* 2515 * Found an existing page, and the caller 2516 * wanted all new pages. Undo all of the work 2517 * we have done. 2518 */ 2519 mutex_exit(phm); 2520 phm = NULL; 2521 while (plist != NULL) { 2522 pp = plist; 2523 page_sub(&plist, pp); 2524 page_io_unlock(pp); 2525 /* large pages should not end up here */ 2526 ASSERT(pp->p_szc == 0); 2527 /*LINTED: constant in conditional ctx*/ 2528 VN_DISPOSE(pp, B_INVAL, 0, kcred); 2529 } 2530 VM_STAT_ADD(page_create_found_one); 2531 goto fail; 2532 } 2533 ASSERT(flags & PG_WAIT); 2534 if (!page_lock(pp, SE_EXCL, phm, P_NO_RECLAIM)) { 2535 /* 2536 * Start all over again if we blocked trying 2537 * to lock the page. 2538 */ 2539 mutex_exit(phm); 2540 VM_STAT_ADD(page_create_page_lock_failed); 2541 phm = NULL; 2542 goto top; 2543 } 2544 mutex_exit(phm); 2545 phm = NULL; 2546 2547 if (PP_ISFREE(pp)) { 2548 ASSERT(PP_ISAGED(pp) == 0); 2549 VM_STAT_ADD(pagecnt.pc_get_cache); 2550 page_list_sub(pp, PG_CACHE_LIST); 2551 PP_CLRFREE(pp); 2552 found_on_free++; 2553 } 2554 } 2555 2556 /* 2557 * Got a page! It is locked. Acquire the i/o 2558 * lock since we are going to use the p_next and 2559 * p_prev fields to link the requested pages together. 2560 */ 2561 page_io_lock(pp); 2562 page_add(&plist, pp); 2563 plist = plist->p_next; 2564 off += PAGESIZE; 2565 vaddr += PAGESIZE; 2566 } 2567 2568 ASSERT((flags & PG_EXCL) ? (found_on_free == pages_req) : 1); 2569 fail: 2570 if (npp != NULL) { 2571 /* 2572 * Did not need this page after all. 2573 * Put it back on the free list. 2574 */ 2575 VM_STAT_ADD(page_create_putbacks); 2576 PP_SETFREE(npp); 2577 PP_SETAGED(npp); 2578 npp->p_offset = (u_offset_t)-1; 2579 page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL); 2580 page_unlock(npp); 2581 } 2582 2583 ASSERT(pages_req >= found_on_free); 2584 2585 { 2586 uint_t overshoot = (uint_t)(pages_req - found_on_free); 2587 2588 if (overshoot) { 2589 VM_STAT_ADD(page_create_overshoot); 2590 p = &pcf[PCF_INDEX()]; 2591 mutex_enter(&p->pcf_lock); 2592 if (p->pcf_block) { 2593 p->pcf_reserve += overshoot; 2594 } else { 2595 p->pcf_count += overshoot; 2596 if (p->pcf_wait) { 2597 mutex_enter(&new_freemem_lock); 2598 if (freemem_wait) { 2599 cv_signal(&freemem_cv); 2600 p->pcf_wait--; 2601 } else { 2602 p->pcf_wait = 0; 2603 } 2604 mutex_exit(&new_freemem_lock); 2605 } 2606 } 2607 mutex_exit(&p->pcf_lock); 2608 /* freemem is approximate, so this test OK */ 2609 if (!p->pcf_block) 2610 freemem += overshoot; 2611 } 2612 } 2613 2614 return (plist); 2615 } 2616 2617 /* 2618 * One or more constituent pages of this large page has been marked 2619 * toxic. Simply demote the large page to PAGESIZE pages and let 2620 * page_free() handle it. This routine should only be called by 2621 * large page free routines (page_free_pages() and page_destroy_pages(). 2622 * All pages are locked SE_EXCL and have already been marked free. 2623 */ 2624 static void 2625 page_free_toxic_pages(page_t *rootpp) 2626 { 2627 page_t *tpp; 2628 pgcnt_t i, pgcnt = page_get_pagecnt(rootpp->p_szc); 2629 #ifdef DEBUG 2630 uint_t szc = rootpp->p_szc; 2631 #endif 2632 2633 for (i = 0, tpp = rootpp; i < pgcnt; i++, tpp = tpp->p_next) { 2634 ASSERT(tpp->p_szc == szc); 2635 ASSERT((PAGE_EXCL(tpp) && 2636 !page_iolock_assert(tpp)) || panicstr); 2637 tpp->p_szc = 0; 2638 } 2639 2640 while (rootpp != NULL) { 2641 tpp = rootpp; 2642 page_sub(&rootpp, tpp); 2643 ASSERT(PP_ISFREE(tpp)); 2644 PP_CLRFREE(tpp); 2645 page_free(tpp, 1); 2646 } 2647 } 2648 2649 /* 2650 * Put page on the "free" list. 2651 * The free list is really two lists maintained by 2652 * the PSM of whatever machine we happen to be on. 2653 */ 2654 void 2655 page_free(page_t *pp, int dontneed) 2656 { 2657 struct pcf *p; 2658 uint_t pcf_index; 2659 2660 ASSERT((PAGE_EXCL(pp) && 2661 !page_iolock_assert(pp)) || panicstr); 2662 2663 if (PP_ISFREE(pp)) { 2664 panic("page_free: page %p is free", (void *)pp); 2665 } 2666 2667 if (pp->p_szc != 0) { 2668 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 2669 PP_ISKAS(pp)) { 2670 panic("page_free: anon or kernel " 2671 "or no vnode large page %p", (void *)pp); 2672 } 2673 page_demote_vp_pages(pp); 2674 ASSERT(pp->p_szc == 0); 2675 } 2676 2677 /* 2678 * The page_struct_lock need not be acquired to examine these 2679 * fields since the page has an "exclusive" lock. 2680 */ 2681 if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 2682 pp->p_slckcnt != 0) { 2683 panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d " 2684 "slckcnt = %d", (void *)pp, page_pptonum(pp), pp->p_lckcnt, 2685 pp->p_cowcnt, pp->p_slckcnt); 2686 /*NOTREACHED*/ 2687 } 2688 2689 ASSERT(!hat_page_getshare(pp)); 2690 2691 PP_SETFREE(pp); 2692 ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) || 2693 !hat_ismod(pp)); 2694 page_clr_all_props(pp); 2695 ASSERT(!hat_page_getshare(pp)); 2696 2697 /* 2698 * Now we add the page to the head of the free list. 2699 * But if this page is associated with a paged vnode 2700 * then we adjust the head forward so that the page is 2701 * effectively at the end of the list. 2702 */ 2703 if (pp->p_vnode == NULL) { 2704 /* 2705 * Page has no identity, put it on the free list. 2706 */ 2707 PP_SETAGED(pp); 2708 pp->p_offset = (u_offset_t)-1; 2709 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 2710 VM_STAT_ADD(pagecnt.pc_free_free); 2711 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2712 "page_free_free:pp %p", pp); 2713 } else { 2714 PP_CLRAGED(pp); 2715 2716 if (!dontneed || nopageage) { 2717 /* move it to the tail of the list */ 2718 page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL); 2719 2720 VM_STAT_ADD(pagecnt.pc_free_cache); 2721 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL, 2722 "page_free_cache_tail:pp %p", pp); 2723 } else { 2724 page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD); 2725 2726 VM_STAT_ADD(pagecnt.pc_free_dontneed); 2727 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD, 2728 "page_free_cache_head:pp %p", pp); 2729 } 2730 } 2731 page_unlock(pp); 2732 2733 /* 2734 * Now do the `freemem' accounting. 2735 */ 2736 pcf_index = PCF_INDEX(); 2737 p = &pcf[pcf_index]; 2738 2739 mutex_enter(&p->pcf_lock); 2740 if (p->pcf_block) { 2741 p->pcf_reserve += 1; 2742 } else { 2743 p->pcf_count += 1; 2744 if (p->pcf_wait) { 2745 mutex_enter(&new_freemem_lock); 2746 /* 2747 * Check to see if some other thread 2748 * is actually waiting. Another bucket 2749 * may have woken it up by now. If there 2750 * are no waiters, then set our pcf_wait 2751 * count to zero to avoid coming in here 2752 * next time. Also, since only one page 2753 * was put on the free list, just wake 2754 * up one waiter. 2755 */ 2756 if (freemem_wait) { 2757 cv_signal(&freemem_cv); 2758 p->pcf_wait--; 2759 } else { 2760 p->pcf_wait = 0; 2761 } 2762 mutex_exit(&new_freemem_lock); 2763 } 2764 } 2765 mutex_exit(&p->pcf_lock); 2766 2767 /* freemem is approximate, so this test OK */ 2768 if (!p->pcf_block) 2769 freemem += 1; 2770 } 2771 2772 /* 2773 * Put page on the "free" list during intial startup. 2774 * This happens during initial single threaded execution. 2775 */ 2776 void 2777 page_free_at_startup(page_t *pp) 2778 { 2779 struct pcf *p; 2780 uint_t pcf_index; 2781 2782 page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT); 2783 VM_STAT_ADD(pagecnt.pc_free_free); 2784 2785 /* 2786 * Now do the `freemem' accounting. 2787 */ 2788 pcf_index = PCF_INDEX(); 2789 p = &pcf[pcf_index]; 2790 2791 ASSERT(p->pcf_block == 0); 2792 ASSERT(p->pcf_wait == 0); 2793 p->pcf_count += 1; 2794 2795 /* freemem is approximate, so this is OK */ 2796 freemem += 1; 2797 } 2798 2799 void 2800 page_free_pages(page_t *pp) 2801 { 2802 page_t *tpp, *rootpp = NULL; 2803 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 2804 pgcnt_t i; 2805 #ifdef DEBUG 2806 uint_t szc = pp->p_szc; 2807 #endif 2808 2809 VM_STAT_ADD(pagecnt.pc_free_pages); 2810 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2811 "page_free_free:pp %p", pp); 2812 2813 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 2814 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 2815 panic("page_free_pages: not root page %p", (void *)pp); 2816 /*NOTREACHED*/ 2817 } 2818 2819 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 2820 ASSERT((PAGE_EXCL(tpp) && 2821 !page_iolock_assert(tpp)) || panicstr); 2822 if (PP_ISFREE(tpp)) { 2823 panic("page_free_pages: page %p is free", (void *)tpp); 2824 /*NOTREACHED*/ 2825 } 2826 if (hat_page_is_mapped(tpp) || tpp->p_lckcnt != 0 || 2827 tpp->p_cowcnt != 0 || tpp->p_slckcnt != 0) { 2828 panic("page_free_pages %p", (void *)tpp); 2829 /*NOTREACHED*/ 2830 } 2831 2832 ASSERT(!hat_page_getshare(tpp)); 2833 ASSERT(tpp->p_vnode == NULL); 2834 ASSERT(tpp->p_szc == szc); 2835 2836 PP_SETFREE(tpp); 2837 page_clr_all_props(tpp); 2838 PP_SETAGED(tpp); 2839 tpp->p_offset = (u_offset_t)-1; 2840 ASSERT(tpp->p_next == tpp); 2841 ASSERT(tpp->p_prev == tpp); 2842 page_list_concat(&rootpp, &tpp); 2843 } 2844 ASSERT(rootpp == pp); 2845 2846 page_list_add_pages(rootpp, 0); 2847 page_create_putback(pgcnt); 2848 } 2849 2850 int free_pages = 1; 2851 2852 /* 2853 * This routine attempts to return pages to the cachelist via page_release(). 2854 * It does not *have* to be successful in all cases, since the pageout scanner 2855 * will catch any pages it misses. It does need to be fast and not introduce 2856 * too much overhead. 2857 * 2858 * If a page isn't found on the unlocked sweep of the page_hash bucket, we 2859 * don't lock and retry. This is ok, since the page scanner will eventually 2860 * find any page we miss in free_vp_pages(). 2861 */ 2862 void 2863 free_vp_pages(vnode_t *vp, u_offset_t off, size_t len) 2864 { 2865 page_t *pp; 2866 u_offset_t eoff; 2867 extern int swap_in_range(vnode_t *, u_offset_t, size_t); 2868 2869 eoff = off + len; 2870 2871 if (free_pages == 0) 2872 return; 2873 if (swap_in_range(vp, off, len)) 2874 return; 2875 2876 for (; off < eoff; off += PAGESIZE) { 2877 2878 /* 2879 * find the page using a fast, but inexact search. It'll be OK 2880 * if a few pages slip through the cracks here. 2881 */ 2882 pp = page_exists(vp, off); 2883 2884 /* 2885 * If we didn't find the page (it may not exist), the page 2886 * is free, looks still in use (shared), or we can't lock it, 2887 * just give up. 2888 */ 2889 if (pp == NULL || 2890 PP_ISFREE(pp) || 2891 page_share_cnt(pp) > 0 || 2892 !page_trylock(pp, SE_EXCL)) 2893 continue; 2894 2895 /* 2896 * Once we have locked pp, verify that it's still the 2897 * correct page and not already free 2898 */ 2899 ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL)); 2900 if (pp->p_vnode != vp || pp->p_offset != off || PP_ISFREE(pp)) { 2901 page_unlock(pp); 2902 continue; 2903 } 2904 2905 /* 2906 * try to release the page... 2907 */ 2908 (void) page_release(pp, 1); 2909 } 2910 } 2911 2912 /* 2913 * Reclaim the given page from the free list. 2914 * If pp is part of a large pages, only the given constituent page is reclaimed 2915 * and the large page it belonged to will be demoted. This can only happen 2916 * if the page is not on the cachelist. 2917 * 2918 * Returns 1 on success or 0 on failure. 2919 * 2920 * The page is unlocked if it can't be reclaimed (when freemem == 0). 2921 * If `lock' is non-null, it will be dropped and re-acquired if 2922 * the routine must wait while freemem is 0. 2923 * 2924 * As it turns out, boot_getpages() does this. It picks a page, 2925 * based on where OBP mapped in some address, gets its pfn, searches 2926 * the memsegs, locks the page, then pulls it off the free list! 2927 */ 2928 int 2929 page_reclaim(page_t *pp, kmutex_t *lock) 2930 { 2931 struct pcf *p; 2932 struct cpu *cpup; 2933 int enough; 2934 uint_t i; 2935 2936 ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1); 2937 ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp)); 2938 2939 /* 2940 * If `freemem' is 0, we cannot reclaim this page from the 2941 * freelist, so release every lock we might hold: the page, 2942 * and the `lock' before blocking. 2943 * 2944 * The only way `freemem' can become 0 while there are pages 2945 * marked free (have their p->p_free bit set) is when the 2946 * system is low on memory and doing a page_create(). In 2947 * order to guarantee that once page_create() starts acquiring 2948 * pages it will be able to get all that it needs since `freemem' 2949 * was decreased by the requested amount. So, we need to release 2950 * this page, and let page_create() have it. 2951 * 2952 * Since `freemem' being zero is not supposed to happen, just 2953 * use the usual hash stuff as a starting point. If that bucket 2954 * is empty, then assume the worst, and start at the beginning 2955 * of the pcf array. If we always start at the beginning 2956 * when acquiring more than one pcf lock, there won't be any 2957 * deadlock problems. 2958 */ 2959 2960 /* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */ 2961 2962 if (freemem <= throttlefree && !page_create_throttle(1l, 0)) { 2963 pcf_acquire_all(); 2964 goto page_reclaim_nomem; 2965 } 2966 2967 enough = pcf_decrement_bucket(1); 2968 2969 if (!enough) { 2970 VM_STAT_ADD(page_reclaim_zero); 2971 /* 2972 * Check again. Its possible that some other thread 2973 * could have been right behind us, and added one 2974 * to a list somewhere. Acquire each of the pcf locks 2975 * until we find a page. 2976 */ 2977 p = pcf; 2978 for (i = 0; i < pcf_fanout; i++) { 2979 mutex_enter(&p->pcf_lock); 2980 if (p->pcf_count >= 1) { 2981 p->pcf_count -= 1; 2982 /* 2983 * freemem is not protected by any lock. Thus, 2984 * we cannot have any assertion containing 2985 * freemem here. 2986 */ 2987 freemem -= 1; 2988 enough = 1; 2989 break; 2990 } 2991 p++; 2992 } 2993 2994 if (!enough) { 2995 page_reclaim_nomem: 2996 /* 2997 * We really can't have page `pp'. 2998 * Time for the no-memory dance with 2999 * page_free(). This is just like 3000 * page_create_wait(). Plus the added 3001 * attraction of releasing whatever mutex 3002 * we held when we were called with in `lock'. 3003 * Page_unlock() will wakeup any thread 3004 * waiting around for this page. 3005 */ 3006 if (lock) { 3007 VM_STAT_ADD(page_reclaim_zero_locked); 3008 mutex_exit(lock); 3009 } 3010 page_unlock(pp); 3011 3012 /* 3013 * get this before we drop all the pcf locks. 3014 */ 3015 mutex_enter(&new_freemem_lock); 3016 3017 p = pcf; 3018 for (i = 0; i < pcf_fanout; i++) { 3019 p->pcf_wait++; 3020 mutex_exit(&p->pcf_lock); 3021 p++; 3022 } 3023 3024 freemem_wait++; 3025 cv_wait(&freemem_cv, &new_freemem_lock); 3026 freemem_wait--; 3027 3028 mutex_exit(&new_freemem_lock); 3029 3030 if (lock) { 3031 mutex_enter(lock); 3032 } 3033 return (0); 3034 } 3035 3036 /* 3037 * The pcf accounting has been done, 3038 * though none of the pcf_wait flags have been set, 3039 * drop the locks and continue on. 3040 */ 3041 while (p >= pcf) { 3042 mutex_exit(&p->pcf_lock); 3043 p--; 3044 } 3045 } 3046 3047 3048 VM_STAT_ADD(pagecnt.pc_reclaim); 3049 3050 /* 3051 * page_list_sub will handle the case where pp is a large page. 3052 * It's possible that the page was promoted while on the freelist 3053 */ 3054 if (PP_ISAGED(pp)) { 3055 page_list_sub(pp, PG_FREE_LIST); 3056 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_FREE, 3057 "page_reclaim_free:pp %p", pp); 3058 } else { 3059 page_list_sub(pp, PG_CACHE_LIST); 3060 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_CACHE, 3061 "page_reclaim_cache:pp %p", pp); 3062 } 3063 3064 /* 3065 * clear the p_free & p_age bits since this page is no longer 3066 * on the free list. Notice that there was a brief time where 3067 * a page is marked as free, but is not on the list. 3068 * 3069 * Set the reference bit to protect against immediate pageout. 3070 */ 3071 PP_CLRFREE(pp); 3072 PP_CLRAGED(pp); 3073 page_set_props(pp, P_REF); 3074 3075 CPU_STATS_ENTER_K(); 3076 cpup = CPU; /* get cpup now that CPU cannot change */ 3077 CPU_STATS_ADDQ(cpup, vm, pgrec, 1); 3078 CPU_STATS_ADDQ(cpup, vm, pgfrec, 1); 3079 CPU_STATS_EXIT_K(); 3080 ASSERT(pp->p_szc == 0); 3081 3082 return (1); 3083 } 3084 3085 /* 3086 * Destroy identity of the page and put it back on 3087 * the page free list. Assumes that the caller has 3088 * acquired the "exclusive" lock on the page. 3089 */ 3090 void 3091 page_destroy(page_t *pp, int dontfree) 3092 { 3093 ASSERT((PAGE_EXCL(pp) && 3094 !page_iolock_assert(pp)) || panicstr); 3095 ASSERT(pp->p_slckcnt == 0 || panicstr); 3096 3097 if (pp->p_szc != 0) { 3098 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 3099 PP_ISKAS(pp)) { 3100 panic("page_destroy: anon or kernel or no vnode " 3101 "large page %p", (void *)pp); 3102 } 3103 page_demote_vp_pages(pp); 3104 ASSERT(pp->p_szc == 0); 3105 } 3106 3107 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy:pp %p", pp); 3108 3109 /* 3110 * Unload translations, if any, then hash out the 3111 * page to erase its identity. 3112 */ 3113 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3114 page_hashout(pp, NULL); 3115 3116 if (!dontfree) { 3117 /* 3118 * Acquire the "freemem_lock" for availrmem. 3119 * The page_struct_lock need not be acquired for lckcnt 3120 * and cowcnt since the page has an "exclusive" lock. 3121 * We are doing a modified version of page_pp_unlock here. 3122 */ 3123 if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) { 3124 mutex_enter(&freemem_lock); 3125 if (pp->p_lckcnt != 0) { 3126 availrmem++; 3127 pages_locked--; 3128 pp->p_lckcnt = 0; 3129 } 3130 if (pp->p_cowcnt != 0) { 3131 availrmem += pp->p_cowcnt; 3132 pages_locked -= pp->p_cowcnt; 3133 pp->p_cowcnt = 0; 3134 } 3135 mutex_exit(&freemem_lock); 3136 } 3137 /* 3138 * Put the page on the "free" list. 3139 */ 3140 page_free(pp, 0); 3141 } 3142 } 3143 3144 void 3145 page_destroy_pages(page_t *pp) 3146 { 3147 3148 page_t *tpp, *rootpp = NULL; 3149 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 3150 pgcnt_t i, pglcks = 0; 3151 #ifdef DEBUG 3152 uint_t szc = pp->p_szc; 3153 #endif 3154 3155 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 3156 3157 VM_STAT_ADD(pagecnt.pc_destroy_pages); 3158 3159 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy_pages:pp %p", pp); 3160 3161 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 3162 panic("page_destroy_pages: not root page %p", (void *)pp); 3163 /*NOTREACHED*/ 3164 } 3165 3166 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 3167 ASSERT((PAGE_EXCL(tpp) && 3168 !page_iolock_assert(tpp)) || panicstr); 3169 ASSERT(tpp->p_slckcnt == 0 || panicstr); 3170 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 3171 page_hashout(tpp, NULL); 3172 ASSERT(tpp->p_offset == (u_offset_t)-1); 3173 if (tpp->p_lckcnt != 0) { 3174 pglcks++; 3175 tpp->p_lckcnt = 0; 3176 } else if (tpp->p_cowcnt != 0) { 3177 pglcks += tpp->p_cowcnt; 3178 tpp->p_cowcnt = 0; 3179 } 3180 ASSERT(!hat_page_getshare(tpp)); 3181 ASSERT(tpp->p_vnode == NULL); 3182 ASSERT(tpp->p_szc == szc); 3183 3184 PP_SETFREE(tpp); 3185 page_clr_all_props(tpp); 3186 PP_SETAGED(tpp); 3187 ASSERT(tpp->p_next == tpp); 3188 ASSERT(tpp->p_prev == tpp); 3189 page_list_concat(&rootpp, &tpp); 3190 } 3191 3192 ASSERT(rootpp == pp); 3193 if (pglcks != 0) { 3194 mutex_enter(&freemem_lock); 3195 availrmem += pglcks; 3196 mutex_exit(&freemem_lock); 3197 } 3198 3199 page_list_add_pages(rootpp, 0); 3200 page_create_putback(pgcnt); 3201 } 3202 3203 /* 3204 * Similar to page_destroy(), but destroys pages which are 3205 * locked and known to be on the page free list. Since 3206 * the page is known to be free and locked, no one can access 3207 * it. 3208 * 3209 * Also, the number of free pages does not change. 3210 */ 3211 void 3212 page_destroy_free(page_t *pp) 3213 { 3214 ASSERT(PAGE_EXCL(pp)); 3215 ASSERT(PP_ISFREE(pp)); 3216 ASSERT(pp->p_vnode); 3217 ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0); 3218 ASSERT(!hat_page_is_mapped(pp)); 3219 ASSERT(PP_ISAGED(pp) == 0); 3220 ASSERT(pp->p_szc == 0); 3221 3222 VM_STAT_ADD(pagecnt.pc_destroy_free); 3223 page_list_sub(pp, PG_CACHE_LIST); 3224 3225 page_hashout(pp, NULL); 3226 ASSERT(pp->p_vnode == NULL); 3227 ASSERT(pp->p_offset == (u_offset_t)-1); 3228 ASSERT(pp->p_hash == NULL); 3229 3230 PP_SETAGED(pp); 3231 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 3232 page_unlock(pp); 3233 3234 mutex_enter(&new_freemem_lock); 3235 if (freemem_wait) { 3236 cv_signal(&freemem_cv); 3237 } 3238 mutex_exit(&new_freemem_lock); 3239 } 3240 3241 /* 3242 * Rename the page "opp" to have an identity specified 3243 * by [vp, off]. If a page already exists with this name 3244 * it is locked and destroyed. Note that the page's 3245 * translations are not unloaded during the rename. 3246 * 3247 * This routine is used by the anon layer to "steal" the 3248 * original page and is not unlike destroying a page and 3249 * creating a new page using the same page frame. 3250 * 3251 * XXX -- Could deadlock if caller 1 tries to rename A to B while 3252 * caller 2 tries to rename B to A. 3253 */ 3254 void 3255 page_rename(page_t *opp, vnode_t *vp, u_offset_t off) 3256 { 3257 page_t *pp; 3258 int olckcnt = 0; 3259 int ocowcnt = 0; 3260 kmutex_t *phm; 3261 ulong_t index; 3262 3263 ASSERT(PAGE_EXCL(opp) && !page_iolock_assert(opp)); 3264 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3265 ASSERT(PP_ISFREE(opp) == 0); 3266 3267 VM_STAT_ADD(page_rename_count); 3268 3269 TRACE_3(TR_FAC_VM, TR_PAGE_RENAME, 3270 "page rename:pp %p vp %p off %llx", opp, vp, off); 3271 3272 /* 3273 * CacheFS may call page_rename for a large NFS page 3274 * when both CacheFS and NFS mount points are used 3275 * by applications. Demote this large page before 3276 * renaming it, to ensure that there are no "partial" 3277 * large pages left lying around. 3278 */ 3279 if (opp->p_szc != 0) { 3280 #ifdef DEBUG 3281 vnode_t *ovp = opp->p_vnode; 3282 #endif 3283 ASSERT(ovp != NULL); 3284 ASSERT(!IS_SWAPFSVP(ovp)); 3285 ASSERT(!VN_ISKAS(ovp)); 3286 page_demote_vp_pages(opp); 3287 ASSERT(opp->p_szc == 0); 3288 } 3289 3290 page_hashout(opp, NULL); 3291 PP_CLRAGED(opp); 3292 3293 /* 3294 * Acquire the appropriate page hash lock, since 3295 * we're going to rename the page. 3296 */ 3297 index = PAGE_HASH_FUNC(vp, off); 3298 phm = PAGE_HASH_MUTEX(index); 3299 mutex_enter(phm); 3300 top: 3301 /* 3302 * Look for an existing page with this name and destroy it if found. 3303 * By holding the page hash lock all the way to the page_hashin() 3304 * call, we are assured that no page can be created with this 3305 * identity. In the case when the phm lock is dropped to undo any 3306 * hat layer mappings, the existing page is held with an "exclusive" 3307 * lock, again preventing another page from being created with 3308 * this identity. 3309 */ 3310 PAGE_HASH_SEARCH(index, pp, vp, off); 3311 if (pp != NULL) { 3312 VM_STAT_ADD(page_rename_exists); 3313 3314 /* 3315 * As it turns out, this is one of only two places where 3316 * page_lock() needs to hold the passed in lock in the 3317 * successful case. In all of the others, the lock could 3318 * be dropped as soon as the attempt is made to lock 3319 * the page. It is tempting to add yet another arguement, 3320 * PL_KEEP or PL_DROP, to let page_lock know what to do. 3321 */ 3322 if (!page_lock(pp, SE_EXCL, phm, P_RECLAIM)) { 3323 /* 3324 * Went to sleep because the page could not 3325 * be locked. We were woken up when the page 3326 * was unlocked, or when the page was destroyed. 3327 * In either case, `phm' was dropped while we 3328 * slept. Hence we should not just roar through 3329 * this loop. 3330 */ 3331 goto top; 3332 } 3333 3334 /* 3335 * If an existing page is a large page, then demote 3336 * it to ensure that no "partial" large pages are 3337 * "created" after page_rename. An existing page 3338 * can be a CacheFS page, and can't belong to swapfs. 3339 */ 3340 if (hat_page_is_mapped(pp)) { 3341 /* 3342 * Unload translations. Since we hold the 3343 * exclusive lock on this page, the page 3344 * can not be changed while we drop phm. 3345 * This is also not a lock protocol violation, 3346 * but rather the proper way to do things. 3347 */ 3348 mutex_exit(phm); 3349 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3350 if (pp->p_szc != 0) { 3351 ASSERT(!IS_SWAPFSVP(vp)); 3352 ASSERT(!VN_ISKAS(vp)); 3353 page_demote_vp_pages(pp); 3354 ASSERT(pp->p_szc == 0); 3355 } 3356 mutex_enter(phm); 3357 } else if (pp->p_szc != 0) { 3358 ASSERT(!IS_SWAPFSVP(vp)); 3359 ASSERT(!VN_ISKAS(vp)); 3360 mutex_exit(phm); 3361 page_demote_vp_pages(pp); 3362 ASSERT(pp->p_szc == 0); 3363 mutex_enter(phm); 3364 } 3365 page_hashout(pp, phm); 3366 } 3367 /* 3368 * Hash in the page with the new identity. 3369 */ 3370 if (!page_hashin(opp, vp, off, phm)) { 3371 /* 3372 * We were holding phm while we searched for [vp, off] 3373 * and only dropped phm if we found and locked a page. 3374 * If we can't create this page now, then some thing 3375 * is really broken. 3376 */ 3377 panic("page_rename: Can't hash in page: %p", (void *)pp); 3378 /*NOTREACHED*/ 3379 } 3380 3381 ASSERT(MUTEX_HELD(phm)); 3382 mutex_exit(phm); 3383 3384 /* 3385 * Now that we have dropped phm, lets get around to finishing up 3386 * with pp. 3387 */ 3388 if (pp != NULL) { 3389 ASSERT(!hat_page_is_mapped(pp)); 3390 /* for now large pages should not end up here */ 3391 ASSERT(pp->p_szc == 0); 3392 /* 3393 * Save the locks for transfer to the new page and then 3394 * clear them so page_free doesn't think they're important. 3395 * The page_struct_lock need not be acquired for lckcnt and 3396 * cowcnt since the page has an "exclusive" lock. 3397 */ 3398 olckcnt = pp->p_lckcnt; 3399 ocowcnt = pp->p_cowcnt; 3400 pp->p_lckcnt = pp->p_cowcnt = 0; 3401 3402 /* 3403 * Put the page on the "free" list after we drop 3404 * the lock. The less work under the lock the better. 3405 */ 3406 /*LINTED: constant in conditional context*/ 3407 VN_DISPOSE(pp, B_FREE, 0, kcred); 3408 } 3409 3410 /* 3411 * Transfer the lock count from the old page (if any). 3412 * The page_struct_lock need not be acquired for lckcnt and 3413 * cowcnt since the page has an "exclusive" lock. 3414 */ 3415 opp->p_lckcnt += olckcnt; 3416 opp->p_cowcnt += ocowcnt; 3417 } 3418 3419 /* 3420 * low level routine to add page `pp' to the hash and vp chains for [vp, offset] 3421 * 3422 * Pages are normally inserted at the start of a vnode's v_pages list. 3423 * If the vnode is VMODSORT and the page is modified, it goes at the end. 3424 * This can happen when a modified page is relocated for DR. 3425 * 3426 * Returns 1 on success and 0 on failure. 3427 */ 3428 static int 3429 page_do_hashin(page_t *pp, vnode_t *vp, u_offset_t offset) 3430 { 3431 page_t **listp; 3432 page_t *tp; 3433 ulong_t index; 3434 3435 ASSERT(PAGE_EXCL(pp)); 3436 ASSERT(vp != NULL); 3437 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3438 3439 /* 3440 * Be sure to set these up before the page is inserted on the hash 3441 * list. As soon as the page is placed on the list some other 3442 * thread might get confused and wonder how this page could 3443 * possibly hash to this list. 3444 */ 3445 pp->p_vnode = vp; 3446 pp->p_offset = offset; 3447 3448 /* 3449 * record if this page is on a swap vnode 3450 */ 3451 if ((vp->v_flag & VISSWAP) != 0) 3452 PP_SETSWAP(pp); 3453 3454 index = PAGE_HASH_FUNC(vp, offset); 3455 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(index))); 3456 listp = &page_hash[index]; 3457 3458 /* 3459 * If this page is already hashed in, fail this attempt to add it. 3460 */ 3461 for (tp = *listp; tp != NULL; tp = tp->p_hash) { 3462 if (tp->p_vnode == vp && tp->p_offset == offset) { 3463 pp->p_vnode = NULL; 3464 pp->p_offset = (u_offset_t)(-1); 3465 return (0); 3466 } 3467 } 3468 pp->p_hash = *listp; 3469 *listp = pp; 3470 3471 /* 3472 * Add the page to the vnode's list of pages 3473 */ 3474 if (vp->v_pages != NULL && IS_VMODSORT(vp) && hat_ismod(pp)) 3475 listp = &vp->v_pages->p_vpprev->p_vpnext; 3476 else 3477 listp = &vp->v_pages; 3478 3479 page_vpadd(listp, pp); 3480 3481 return (1); 3482 } 3483 3484 /* 3485 * Add page `pp' to both the hash and vp chains for [vp, offset]. 3486 * 3487 * Returns 1 on success and 0 on failure. 3488 * If hold is passed in, it is not dropped. 3489 */ 3490 int 3491 page_hashin(page_t *pp, vnode_t *vp, u_offset_t offset, kmutex_t *hold) 3492 { 3493 kmutex_t *phm = NULL; 3494 kmutex_t *vphm; 3495 int rc; 3496 3497 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3498 ASSERT(pp->p_fsdata == 0 || panicstr); 3499 3500 TRACE_3(TR_FAC_VM, TR_PAGE_HASHIN, 3501 "page_hashin:pp %p vp %p offset %llx", 3502 pp, vp, offset); 3503 3504 VM_STAT_ADD(hashin_count); 3505 3506 if (hold != NULL) 3507 phm = hold; 3508 else { 3509 VM_STAT_ADD(hashin_not_held); 3510 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, offset)); 3511 mutex_enter(phm); 3512 } 3513 3514 vphm = page_vnode_mutex(vp); 3515 mutex_enter(vphm); 3516 rc = page_do_hashin(pp, vp, offset); 3517 mutex_exit(vphm); 3518 if (hold == NULL) 3519 mutex_exit(phm); 3520 #ifdef VM_STATS 3521 if (rc == 0) { 3522 VM_STAT_ADD(hashin_already); 3523 } 3524 #endif 3525 return (rc); 3526 } 3527 3528 /* 3529 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3530 * All mutexes must be held 3531 */ 3532 static void 3533 page_do_hashout(page_t *pp) 3534 { 3535 page_t **hpp; 3536 page_t *hp; 3537 vnode_t *vp = pp->p_vnode; 3538 3539 ASSERT(vp != NULL); 3540 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3541 3542 /* 3543 * First, take pp off of its hash chain. 3544 */ 3545 hpp = &page_hash[PAGE_HASH_FUNC(vp, pp->p_offset)]; 3546 3547 for (;;) { 3548 hp = *hpp; 3549 if (hp == pp) 3550 break; 3551 if (hp == NULL) { 3552 panic("page_do_hashout"); 3553 /*NOTREACHED*/ 3554 } 3555 hpp = &hp->p_hash; 3556 } 3557 *hpp = pp->p_hash; 3558 3559 /* 3560 * Now remove it from its associated vnode. 3561 */ 3562 if (vp->v_pages) 3563 page_vpsub(&vp->v_pages, pp); 3564 3565 pp->p_hash = NULL; 3566 page_clr_all_props(pp); 3567 PP_CLRSWAP(pp); 3568 pp->p_vnode = NULL; 3569 pp->p_offset = (u_offset_t)-1; 3570 pp->p_fsdata = 0; 3571 } 3572 3573 /* 3574 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3575 * 3576 * When `phm' is non-NULL it contains the address of the mutex protecting the 3577 * hash list pp is on. It is not dropped. 3578 */ 3579 void 3580 page_hashout(page_t *pp, kmutex_t *phm) 3581 { 3582 vnode_t *vp; 3583 ulong_t index; 3584 kmutex_t *nphm; 3585 kmutex_t *vphm; 3586 kmutex_t *sep; 3587 3588 ASSERT(phm != NULL ? MUTEX_HELD(phm) : 1); 3589 ASSERT(pp->p_vnode != NULL); 3590 ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr); 3591 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp->p_vnode))); 3592 3593 vp = pp->p_vnode; 3594 3595 TRACE_2(TR_FAC_VM, TR_PAGE_HASHOUT, 3596 "page_hashout:pp %p vp %p", pp, vp); 3597 3598 /* Kernel probe */ 3599 TNF_PROBE_2(page_unmap, "vm pagefault", /* CSTYLED */, 3600 tnf_opaque, vnode, vp, 3601 tnf_offset, offset, pp->p_offset); 3602 3603 /* 3604 * 3605 */ 3606 VM_STAT_ADD(hashout_count); 3607 index = PAGE_HASH_FUNC(vp, pp->p_offset); 3608 if (phm == NULL) { 3609 VM_STAT_ADD(hashout_not_held); 3610 nphm = PAGE_HASH_MUTEX(index); 3611 mutex_enter(nphm); 3612 } 3613 ASSERT(phm ? phm == PAGE_HASH_MUTEX(index) : 1); 3614 3615 3616 /* 3617 * grab page vnode mutex and remove it... 3618 */ 3619 vphm = page_vnode_mutex(vp); 3620 mutex_enter(vphm); 3621 3622 page_do_hashout(pp); 3623 3624 mutex_exit(vphm); 3625 if (phm == NULL) 3626 mutex_exit(nphm); 3627 3628 /* 3629 * Wake up processes waiting for this page. The page's 3630 * identity has been changed, and is probably not the 3631 * desired page any longer. 3632 */ 3633 sep = page_se_mutex(pp); 3634 mutex_enter(sep); 3635 pp->p_selock &= ~SE_EWANTED; 3636 if (CV_HAS_WAITERS(&pp->p_cv)) 3637 cv_broadcast(&pp->p_cv); 3638 mutex_exit(sep); 3639 } 3640 3641 /* 3642 * Add the page to the front of a linked list of pages 3643 * using the p_next & p_prev pointers for the list. 3644 * The caller is responsible for protecting the list pointers. 3645 */ 3646 void 3647 page_add(page_t **ppp, page_t *pp) 3648 { 3649 ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3650 3651 page_add_common(ppp, pp); 3652 } 3653 3654 3655 3656 /* 3657 * Common code for page_add() and mach_page_add() 3658 */ 3659 void 3660 page_add_common(page_t **ppp, page_t *pp) 3661 { 3662 if (*ppp == NULL) { 3663 pp->p_next = pp->p_prev = pp; 3664 } else { 3665 pp->p_next = *ppp; 3666 pp->p_prev = (*ppp)->p_prev; 3667 (*ppp)->p_prev = pp; 3668 pp->p_prev->p_next = pp; 3669 } 3670 *ppp = pp; 3671 } 3672 3673 3674 /* 3675 * Remove this page from a linked list of pages 3676 * using the p_next & p_prev pointers for the list. 3677 * 3678 * The caller is responsible for protecting the list pointers. 3679 */ 3680 void 3681 page_sub(page_t **ppp, page_t *pp) 3682 { 3683 ASSERT((PP_ISFREE(pp)) ? 1 : 3684 (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3685 3686 if (*ppp == NULL || pp == NULL) { 3687 panic("page_sub: bad arg(s): pp %p, *ppp %p", 3688 (void *)pp, (void *)(*ppp)); 3689 /*NOTREACHED*/ 3690 } 3691 3692 page_sub_common(ppp, pp); 3693 } 3694 3695 3696 /* 3697 * Common code for page_sub() and mach_page_sub() 3698 */ 3699 void 3700 page_sub_common(page_t **ppp, page_t *pp) 3701 { 3702 if (*ppp == pp) 3703 *ppp = pp->p_next; /* go to next page */ 3704 3705 if (*ppp == pp) 3706 *ppp = NULL; /* page list is gone */ 3707 else { 3708 pp->p_prev->p_next = pp->p_next; 3709 pp->p_next->p_prev = pp->p_prev; 3710 } 3711 pp->p_prev = pp->p_next = pp; /* make pp a list of one */ 3712 } 3713 3714 3715 /* 3716 * Break page list cppp into two lists with npages in the first list. 3717 * The tail is returned in nppp. 3718 */ 3719 void 3720 page_list_break(page_t **oppp, page_t **nppp, pgcnt_t npages) 3721 { 3722 page_t *s1pp = *oppp; 3723 page_t *s2pp; 3724 page_t *e1pp, *e2pp; 3725 long n = 0; 3726 3727 if (s1pp == NULL) { 3728 *nppp = NULL; 3729 return; 3730 } 3731 if (npages == 0) { 3732 *nppp = s1pp; 3733 *oppp = NULL; 3734 return; 3735 } 3736 for (n = 0, s2pp = *oppp; n < npages; n++) { 3737 s2pp = s2pp->p_next; 3738 } 3739 /* Fix head and tail of new lists */ 3740 e1pp = s2pp->p_prev; 3741 e2pp = s1pp->p_prev; 3742 s1pp->p_prev = e1pp; 3743 e1pp->p_next = s1pp; 3744 s2pp->p_prev = e2pp; 3745 e2pp->p_next = s2pp; 3746 3747 /* second list empty */ 3748 if (s2pp == s1pp) { 3749 *oppp = s1pp; 3750 *nppp = NULL; 3751 } else { 3752 *oppp = s1pp; 3753 *nppp = s2pp; 3754 } 3755 } 3756 3757 /* 3758 * Concatenate page list nppp onto the end of list ppp. 3759 */ 3760 void 3761 page_list_concat(page_t **ppp, page_t **nppp) 3762 { 3763 page_t *s1pp, *s2pp, *e1pp, *e2pp; 3764 3765 if (*nppp == NULL) { 3766 return; 3767 } 3768 if (*ppp == NULL) { 3769 *ppp = *nppp; 3770 return; 3771 } 3772 s1pp = *ppp; 3773 e1pp = s1pp->p_prev; 3774 s2pp = *nppp; 3775 e2pp = s2pp->p_prev; 3776 s1pp->p_prev = e2pp; 3777 e2pp->p_next = s1pp; 3778 e1pp->p_next = s2pp; 3779 s2pp->p_prev = e1pp; 3780 } 3781 3782 /* 3783 * return the next page in the page list 3784 */ 3785 page_t * 3786 page_list_next(page_t *pp) 3787 { 3788 return (pp->p_next); 3789 } 3790 3791 3792 /* 3793 * Add the page to the front of the linked list of pages 3794 * using p_vpnext/p_vpprev pointers for the list. 3795 * 3796 * The caller is responsible for protecting the lists. 3797 */ 3798 void 3799 page_vpadd(page_t **ppp, page_t *pp) 3800 { 3801 if (*ppp == NULL) { 3802 pp->p_vpnext = pp->p_vpprev = pp; 3803 } else { 3804 pp->p_vpnext = *ppp; 3805 pp->p_vpprev = (*ppp)->p_vpprev; 3806 (*ppp)->p_vpprev = pp; 3807 pp->p_vpprev->p_vpnext = pp; 3808 } 3809 *ppp = pp; 3810 } 3811 3812 /* 3813 * Remove this page from the linked list of pages 3814 * using p_vpnext/p_vpprev pointers for the list. 3815 * 3816 * The caller is responsible for protecting the lists. 3817 */ 3818 void 3819 page_vpsub(page_t **ppp, page_t *pp) 3820 { 3821 if (*ppp == NULL || pp == NULL) { 3822 panic("page_vpsub: bad arg(s): pp %p, *ppp %p", 3823 (void *)pp, (void *)(*ppp)); 3824 /*NOTREACHED*/ 3825 } 3826 3827 if (*ppp == pp) 3828 *ppp = pp->p_vpnext; /* go to next page */ 3829 3830 if (*ppp == pp) 3831 *ppp = NULL; /* page list is gone */ 3832 else { 3833 pp->p_vpprev->p_vpnext = pp->p_vpnext; 3834 pp->p_vpnext->p_vpprev = pp->p_vpprev; 3835 } 3836 pp->p_vpprev = pp->p_vpnext = pp; /* make pp a list of one */ 3837 } 3838 3839 /* 3840 * Lock a physical page into memory "long term". Used to support "lock 3841 * in memory" functions. Accepts the page to be locked, and a cow variable 3842 * to indicate whether a the lock will travel to the new page during 3843 * a potential copy-on-write. 3844 */ 3845 int 3846 page_pp_lock( 3847 page_t *pp, /* page to be locked */ 3848 int cow, /* cow lock */ 3849 int kernel) /* must succeed -- ignore checking */ 3850 { 3851 int r = 0; /* result -- assume failure */ 3852 3853 ASSERT(PAGE_LOCKED(pp)); 3854 3855 page_struct_lock(pp); 3856 /* 3857 * Acquire the "freemem_lock" for availrmem. 3858 */ 3859 if (cow) { 3860 mutex_enter(&freemem_lock); 3861 if ((availrmem > pages_pp_maximum) && 3862 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 3863 availrmem--; 3864 pages_locked++; 3865 mutex_exit(&freemem_lock); 3866 r = 1; 3867 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 3868 cmn_err(CE_WARN, 3869 "COW lock limit reached on pfn 0x%lx", 3870 page_pptonum(pp)); 3871 } 3872 } else 3873 mutex_exit(&freemem_lock); 3874 } else { 3875 if (pp->p_lckcnt) { 3876 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 3877 r = 1; 3878 if (++pp->p_lckcnt == 3879 (ushort_t)PAGE_LOCK_MAXIMUM) { 3880 cmn_err(CE_WARN, "Page lock limit " 3881 "reached on pfn 0x%lx", 3882 page_pptonum(pp)); 3883 } 3884 } 3885 } else { 3886 if (kernel) { 3887 /* availrmem accounting done by caller */ 3888 ++pp->p_lckcnt; 3889 r = 1; 3890 } else { 3891 mutex_enter(&freemem_lock); 3892 if (availrmem > pages_pp_maximum) { 3893 availrmem--; 3894 pages_locked++; 3895 ++pp->p_lckcnt; 3896 r = 1; 3897 } 3898 mutex_exit(&freemem_lock); 3899 } 3900 } 3901 } 3902 page_struct_unlock(pp); 3903 return (r); 3904 } 3905 3906 /* 3907 * Decommit a lock on a physical page frame. Account for cow locks if 3908 * appropriate. 3909 */ 3910 void 3911 page_pp_unlock( 3912 page_t *pp, /* page to be unlocked */ 3913 int cow, /* expect cow lock */ 3914 int kernel) /* this was a kernel lock */ 3915 { 3916 ASSERT(PAGE_LOCKED(pp)); 3917 3918 page_struct_lock(pp); 3919 /* 3920 * Acquire the "freemem_lock" for availrmem. 3921 * If cowcnt or lcknt is already 0 do nothing; i.e., we 3922 * could be called to unlock even if nothing is locked. This could 3923 * happen if locked file pages were truncated (removing the lock) 3924 * and the file was grown again and new pages faulted in; the new 3925 * pages are unlocked but the segment still thinks they're locked. 3926 */ 3927 if (cow) { 3928 if (pp->p_cowcnt) { 3929 mutex_enter(&freemem_lock); 3930 pp->p_cowcnt--; 3931 availrmem++; 3932 pages_locked--; 3933 mutex_exit(&freemem_lock); 3934 } 3935 } else { 3936 if (pp->p_lckcnt && --pp->p_lckcnt == 0) { 3937 if (!kernel) { 3938 mutex_enter(&freemem_lock); 3939 availrmem++; 3940 pages_locked--; 3941 mutex_exit(&freemem_lock); 3942 } 3943 } 3944 } 3945 page_struct_unlock(pp); 3946 } 3947 3948 /* 3949 * This routine reserves availrmem for npages; 3950 * flags: KM_NOSLEEP or KM_SLEEP 3951 * returns 1 on success or 0 on failure 3952 */ 3953 int 3954 page_resv(pgcnt_t npages, uint_t flags) 3955 { 3956 mutex_enter(&freemem_lock); 3957 while (availrmem < tune.t_minarmem + npages) { 3958 if (flags & KM_NOSLEEP) { 3959 mutex_exit(&freemem_lock); 3960 return (0); 3961 } 3962 mutex_exit(&freemem_lock); 3963 page_needfree(npages); 3964 kmem_reap(); 3965 delay(hz >> 2); 3966 page_needfree(-(spgcnt_t)npages); 3967 mutex_enter(&freemem_lock); 3968 } 3969 availrmem -= npages; 3970 mutex_exit(&freemem_lock); 3971 return (1); 3972 } 3973 3974 /* 3975 * This routine unreserves availrmem for npages; 3976 */ 3977 void 3978 page_unresv(pgcnt_t npages) 3979 { 3980 mutex_enter(&freemem_lock); 3981 availrmem += npages; 3982 mutex_exit(&freemem_lock); 3983 } 3984 3985 /* 3986 * See Statement at the beginning of segvn_lockop() regarding 3987 * the way we handle cowcnts and lckcnts. 3988 * 3989 * Transfer cowcnt on 'opp' to cowcnt on 'npp' if the vpage 3990 * that breaks COW has PROT_WRITE. 3991 * 3992 * Note that, we may also break COW in case we are softlocking 3993 * on read access during physio; 3994 * in this softlock case, the vpage may not have PROT_WRITE. 3995 * So, we need to transfer lckcnt on 'opp' to lckcnt on 'npp' 3996 * if the vpage doesn't have PROT_WRITE. 3997 * 3998 * This routine is never called if we are stealing a page 3999 * in anon_private. 4000 * 4001 * The caller subtracted from availrmem for read only mapping. 4002 * if lckcnt is 1 increment availrmem. 4003 */ 4004 void 4005 page_pp_useclaim( 4006 page_t *opp, /* original page frame losing lock */ 4007 page_t *npp, /* new page frame gaining lock */ 4008 uint_t write_perm) /* set if vpage has PROT_WRITE */ 4009 { 4010 int payback = 0; 4011 int nidx, oidx; 4012 4013 ASSERT(PAGE_LOCKED(opp)); 4014 ASSERT(PAGE_LOCKED(npp)); 4015 4016 /* 4017 * Since we have two pages we probably have two locks. We need to take 4018 * them in a defined order to avoid deadlocks. It's also possible they 4019 * both hash to the same lock in which case this is a non-issue. 4020 */ 4021 nidx = PAGE_LLOCK_HASH(PP_PAGEROOT(npp)); 4022 oidx = PAGE_LLOCK_HASH(PP_PAGEROOT(opp)); 4023 if (nidx < oidx) { 4024 page_struct_lock(npp); 4025 page_struct_lock(opp); 4026 } else if (oidx < nidx) { 4027 page_struct_lock(opp); 4028 page_struct_lock(npp); 4029 } else { /* The pages hash to the same lock */ 4030 page_struct_lock(npp); 4031 } 4032 4033 ASSERT(npp->p_cowcnt == 0); 4034 ASSERT(npp->p_lckcnt == 0); 4035 4036 /* Don't use claim if nothing is locked (see page_pp_unlock above) */ 4037 if ((write_perm && opp->p_cowcnt != 0) || 4038 (!write_perm && opp->p_lckcnt != 0)) { 4039 4040 if (write_perm) { 4041 npp->p_cowcnt++; 4042 ASSERT(opp->p_cowcnt != 0); 4043 opp->p_cowcnt--; 4044 } else { 4045 4046 ASSERT(opp->p_lckcnt != 0); 4047 4048 /* 4049 * We didn't need availrmem decremented if p_lckcnt on 4050 * original page is 1. Here, we are unlocking 4051 * read-only copy belonging to original page and 4052 * are locking a copy belonging to new page. 4053 */ 4054 if (opp->p_lckcnt == 1) 4055 payback = 1; 4056 4057 npp->p_lckcnt++; 4058 opp->p_lckcnt--; 4059 } 4060 } 4061 if (payback) { 4062 mutex_enter(&freemem_lock); 4063 availrmem++; 4064 pages_useclaim--; 4065 mutex_exit(&freemem_lock); 4066 } 4067 4068 if (nidx < oidx) { 4069 page_struct_unlock(opp); 4070 page_struct_unlock(npp); 4071 } else if (oidx < nidx) { 4072 page_struct_unlock(npp); 4073 page_struct_unlock(opp); 4074 } else { /* The pages hash to the same lock */ 4075 page_struct_unlock(npp); 4076 } 4077 } 4078 4079 /* 4080 * Simple claim adjust functions -- used to support changes in 4081 * claims due to changes in access permissions. Used by segvn_setprot(). 4082 */ 4083 int 4084 page_addclaim(page_t *pp) 4085 { 4086 int r = 0; /* result */ 4087 4088 ASSERT(PAGE_LOCKED(pp)); 4089 4090 page_struct_lock(pp); 4091 ASSERT(pp->p_lckcnt != 0); 4092 4093 if (pp->p_lckcnt == 1) { 4094 if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4095 --pp->p_lckcnt; 4096 r = 1; 4097 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4098 cmn_err(CE_WARN, 4099 "COW lock limit reached on pfn 0x%lx", 4100 page_pptonum(pp)); 4101 } 4102 } 4103 } else { 4104 mutex_enter(&freemem_lock); 4105 if ((availrmem > pages_pp_maximum) && 4106 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 4107 --availrmem; 4108 ++pages_claimed; 4109 mutex_exit(&freemem_lock); 4110 --pp->p_lckcnt; 4111 r = 1; 4112 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4113 cmn_err(CE_WARN, 4114 "COW lock limit reached on pfn 0x%lx", 4115 page_pptonum(pp)); 4116 } 4117 } else 4118 mutex_exit(&freemem_lock); 4119 } 4120 page_struct_unlock(pp); 4121 return (r); 4122 } 4123 4124 int 4125 page_subclaim(page_t *pp) 4126 { 4127 int r = 0; 4128 4129 ASSERT(PAGE_LOCKED(pp)); 4130 4131 page_struct_lock(pp); 4132 ASSERT(pp->p_cowcnt != 0); 4133 4134 if (pp->p_lckcnt) { 4135 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4136 r = 1; 4137 /* 4138 * for availrmem 4139 */ 4140 mutex_enter(&freemem_lock); 4141 availrmem++; 4142 pages_claimed--; 4143 mutex_exit(&freemem_lock); 4144 4145 pp->p_cowcnt--; 4146 4147 if (++pp->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4148 cmn_err(CE_WARN, 4149 "Page lock limit reached on pfn 0x%lx", 4150 page_pptonum(pp)); 4151 } 4152 } 4153 } else { 4154 r = 1; 4155 pp->p_cowcnt--; 4156 pp->p_lckcnt++; 4157 } 4158 page_struct_unlock(pp); 4159 return (r); 4160 } 4161 4162 /* 4163 * Variant of page_addclaim(), where ppa[] contains the pages of a single large 4164 * page. 4165 */ 4166 int 4167 page_addclaim_pages(page_t **ppa) 4168 { 4169 pgcnt_t lckpgs = 0, pg_idx; 4170 4171 VM_STAT_ADD(pagecnt.pc_addclaim_pages); 4172 4173 /* 4174 * Only need to take the page struct lock on the large page root. 4175 */ 4176 page_struct_lock(ppa[0]); 4177 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4178 4179 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4180 ASSERT(ppa[pg_idx]->p_lckcnt != 0); 4181 if (ppa[pg_idx]->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4182 page_struct_unlock(ppa[0]); 4183 return (0); 4184 } 4185 if (ppa[pg_idx]->p_lckcnt > 1) 4186 lckpgs++; 4187 } 4188 4189 if (lckpgs != 0) { 4190 mutex_enter(&freemem_lock); 4191 if (availrmem >= pages_pp_maximum + lckpgs) { 4192 availrmem -= lckpgs; 4193 pages_claimed += lckpgs; 4194 } else { 4195 mutex_exit(&freemem_lock); 4196 page_struct_unlock(ppa[0]); 4197 return (0); 4198 } 4199 mutex_exit(&freemem_lock); 4200 } 4201 4202 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4203 ppa[pg_idx]->p_lckcnt--; 4204 ppa[pg_idx]->p_cowcnt++; 4205 } 4206 page_struct_unlock(ppa[0]); 4207 return (1); 4208 } 4209 4210 /* 4211 * Variant of page_subclaim(), where ppa[] contains the pages of a single large 4212 * page. 4213 */ 4214 int 4215 page_subclaim_pages(page_t **ppa) 4216 { 4217 pgcnt_t ulckpgs = 0, pg_idx; 4218 4219 VM_STAT_ADD(pagecnt.pc_subclaim_pages); 4220 4221 /* 4222 * Only need to take the page struct lock on the large page root. 4223 */ 4224 page_struct_lock(ppa[0]); 4225 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4226 4227 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4228 ASSERT(ppa[pg_idx]->p_cowcnt != 0); 4229 if (ppa[pg_idx]->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4230 page_struct_unlock(ppa[0]); 4231 return (0); 4232 } 4233 if (ppa[pg_idx]->p_lckcnt != 0) 4234 ulckpgs++; 4235 } 4236 4237 if (ulckpgs != 0) { 4238 mutex_enter(&freemem_lock); 4239 availrmem += ulckpgs; 4240 pages_claimed -= ulckpgs; 4241 mutex_exit(&freemem_lock); 4242 } 4243 4244 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4245 ppa[pg_idx]->p_cowcnt--; 4246 ppa[pg_idx]->p_lckcnt++; 4247 4248 } 4249 page_struct_unlock(ppa[0]); 4250 return (1); 4251 } 4252 4253 page_t * 4254 page_numtopp(pfn_t pfnum, se_t se) 4255 { 4256 page_t *pp; 4257 4258 retry: 4259 pp = page_numtopp_nolock(pfnum); 4260 if (pp == NULL) { 4261 return ((page_t *)NULL); 4262 } 4263 4264 /* 4265 * Acquire the appropriate lock on the page. 4266 */ 4267 while (!page_lock(pp, se, (kmutex_t *)NULL, P_RECLAIM)) { 4268 if (page_pptonum(pp) != pfnum) 4269 goto retry; 4270 continue; 4271 } 4272 4273 if (page_pptonum(pp) != pfnum) { 4274 page_unlock(pp); 4275 goto retry; 4276 } 4277 4278 return (pp); 4279 } 4280 4281 page_t * 4282 page_numtopp_noreclaim(pfn_t pfnum, se_t se) 4283 { 4284 page_t *pp; 4285 4286 retry: 4287 pp = page_numtopp_nolock(pfnum); 4288 if (pp == NULL) { 4289 return ((page_t *)NULL); 4290 } 4291 4292 /* 4293 * Acquire the appropriate lock on the page. 4294 */ 4295 while (!page_lock(pp, se, (kmutex_t *)NULL, P_NO_RECLAIM)) { 4296 if (page_pptonum(pp) != pfnum) 4297 goto retry; 4298 continue; 4299 } 4300 4301 if (page_pptonum(pp) != pfnum) { 4302 page_unlock(pp); 4303 goto retry; 4304 } 4305 4306 return (pp); 4307 } 4308 4309 /* 4310 * This routine is like page_numtopp, but will only return page structs 4311 * for pages which are ok for loading into hardware using the page struct. 4312 */ 4313 page_t * 4314 page_numtopp_nowait(pfn_t pfnum, se_t se) 4315 { 4316 page_t *pp; 4317 4318 retry: 4319 pp = page_numtopp_nolock(pfnum); 4320 if (pp == NULL) { 4321 return ((page_t *)NULL); 4322 } 4323 4324 /* 4325 * Try to acquire the appropriate lock on the page. 4326 */ 4327 if (PP_ISFREE(pp)) 4328 pp = NULL; 4329 else { 4330 if (!page_trylock(pp, se)) 4331 pp = NULL; 4332 else { 4333 if (page_pptonum(pp) != pfnum) { 4334 page_unlock(pp); 4335 goto retry; 4336 } 4337 if (PP_ISFREE(pp)) { 4338 page_unlock(pp); 4339 pp = NULL; 4340 } 4341 } 4342 } 4343 return (pp); 4344 } 4345 4346 #define SYNC_PROGRESS_NPAGES 1000 4347 4348 /* 4349 * Returns a count of dirty pages that are in the process 4350 * of being written out. If 'cleanit' is set, try to push the page. 4351 */ 4352 pgcnt_t 4353 page_busy(int cleanit) 4354 { 4355 page_t *page0 = page_first(); 4356 page_t *pp = page0; 4357 pgcnt_t nppbusy = 0; 4358 int counter = 0; 4359 u_offset_t off; 4360 4361 do { 4362 vnode_t *vp = pp->p_vnode; 4363 4364 /* 4365 * Reset the sync timeout. The page list is very long 4366 * on large memory systems. 4367 */ 4368 if (++counter > SYNC_PROGRESS_NPAGES) { 4369 counter = 0; 4370 vfs_syncprogress(); 4371 } 4372 4373 /* 4374 * A page is a candidate for syncing if it is: 4375 * 4376 * (a) On neither the freelist nor the cachelist 4377 * (b) Hashed onto a vnode 4378 * (c) Not a kernel page 4379 * (d) Dirty 4380 * (e) Not part of a swapfile 4381 * (f) a page which belongs to a real vnode; eg has a non-null 4382 * v_vfsp pointer. 4383 * (g) Backed by a filesystem which doesn't have a 4384 * stubbed-out sync operation 4385 */ 4386 if (!PP_ISFREE(pp) && vp != NULL && !VN_ISKAS(vp) && 4387 hat_ismod(pp) && !IS_SWAPVP(vp) && vp->v_vfsp != NULL && 4388 vfs_can_sync(vp->v_vfsp)) { 4389 nppbusy++; 4390 4391 if (!cleanit) 4392 continue; 4393 if (!page_trylock(pp, SE_EXCL)) 4394 continue; 4395 4396 if (PP_ISFREE(pp) || vp == NULL || IS_SWAPVP(vp) || 4397 pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 4398 !(hat_pagesync(pp, 4399 HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) & P_MOD)) { 4400 page_unlock(pp); 4401 continue; 4402 } 4403 off = pp->p_offset; 4404 VN_HOLD(vp); 4405 page_unlock(pp); 4406 (void) VOP_PUTPAGE(vp, off, PAGESIZE, 4407 B_ASYNC | B_FREE, kcred, NULL); 4408 VN_RELE(vp); 4409 } 4410 } while ((pp = page_next(pp)) != page0); 4411 4412 vfs_syncprogress(); 4413 return (nppbusy); 4414 } 4415 4416 void page_invalidate_pages(void); 4417 4418 /* 4419 * callback handler to vm sub-system 4420 * 4421 * callers make sure no recursive entries to this func. 4422 */ 4423 /*ARGSUSED*/ 4424 boolean_t 4425 callb_vm_cpr(void *arg, int code) 4426 { 4427 if (code == CB_CODE_CPR_CHKPT) 4428 page_invalidate_pages(); 4429 return (B_TRUE); 4430 } 4431 4432 /* 4433 * Invalidate all pages of the system. 4434 * It shouldn't be called until all user page activities are all stopped. 4435 */ 4436 void 4437 page_invalidate_pages() 4438 { 4439 page_t *pp; 4440 page_t *page0; 4441 pgcnt_t nbusypages; 4442 int retry = 0; 4443 const int MAXRETRIES = 4; 4444 top: 4445 /* 4446 * Flush dirty pages and destroy the clean ones. 4447 */ 4448 nbusypages = 0; 4449 4450 pp = page0 = page_first(); 4451 do { 4452 struct vnode *vp; 4453 u_offset_t offset; 4454 int mod; 4455 4456 /* 4457 * skip the page if it has no vnode or the page associated 4458 * with the kernel vnode or prom allocated kernel mem. 4459 */ 4460 if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp)) 4461 continue; 4462 4463 /* 4464 * skip the page which is already free invalidated. 4465 */ 4466 if (PP_ISFREE(pp) && PP_ISAGED(pp)) 4467 continue; 4468 4469 /* 4470 * skip pages that are already locked or can't be "exclusively" 4471 * locked or are already free. After we lock the page, check 4472 * the free and age bits again to be sure it's not destroyed 4473 * yet. 4474 * To achieve max. parallelization, we use page_trylock instead 4475 * of page_lock so that we don't get block on individual pages 4476 * while we have thousands of other pages to process. 4477 */ 4478 if (!page_trylock(pp, SE_EXCL)) { 4479 nbusypages++; 4480 continue; 4481 } else if (PP_ISFREE(pp)) { 4482 if (!PP_ISAGED(pp)) { 4483 page_destroy_free(pp); 4484 } else { 4485 page_unlock(pp); 4486 } 4487 continue; 4488 } 4489 /* 4490 * Is this page involved in some I/O? shared? 4491 * 4492 * The page_struct_lock need not be acquired to 4493 * examine these fields since the page has an 4494 * "exclusive" lock. 4495 */ 4496 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 4497 page_unlock(pp); 4498 continue; 4499 } 4500 4501 if (vp->v_type == VCHR) { 4502 panic("vp->v_type == VCHR"); 4503 /*NOTREACHED*/ 4504 } 4505 4506 if (!page_try_demote_pages(pp)) { 4507 page_unlock(pp); 4508 continue; 4509 } 4510 4511 /* 4512 * Check the modified bit. Leave the bits alone in hardware 4513 * (they will be modified if we do the putpage). 4514 */ 4515 mod = (hat_pagesync(pp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) 4516 & P_MOD); 4517 if (mod) { 4518 offset = pp->p_offset; 4519 /* 4520 * Hold the vnode before releasing the page lock 4521 * to prevent it from being freed and re-used by 4522 * some other thread. 4523 */ 4524 VN_HOLD(vp); 4525 page_unlock(pp); 4526 /* 4527 * No error return is checked here. Callers such as 4528 * cpr deals with the dirty pages at the dump time 4529 * if this putpage fails. 4530 */ 4531 (void) VOP_PUTPAGE(vp, offset, PAGESIZE, B_INVAL, 4532 kcred, NULL); 4533 VN_RELE(vp); 4534 } else { 4535 /*LINTED: constant in conditional context*/ 4536 VN_DISPOSE(pp, B_INVAL, 0, kcred); 4537 } 4538 } while ((pp = page_next(pp)) != page0); 4539 if (nbusypages && retry++ < MAXRETRIES) { 4540 delay(1); 4541 goto top; 4542 } 4543 } 4544 4545 /* 4546 * Replace the page "old" with the page "new" on the page hash and vnode lists 4547 * 4548 * the replacement must be done in place, ie the equivalent sequence: 4549 * 4550 * vp = old->p_vnode; 4551 * off = old->p_offset; 4552 * page_do_hashout(old) 4553 * page_do_hashin(new, vp, off) 4554 * 4555 * doesn't work, since 4556 * 1) if old is the only page on the vnode, the v_pages list has a window 4557 * where it looks empty. This will break file system assumptions. 4558 * and 4559 * 2) pvn_vplist_dirty() can't deal with pages moving on the v_pages list. 4560 */ 4561 static void 4562 page_do_relocate_hash(page_t *new, page_t *old) 4563 { 4564 page_t **hash_list; 4565 vnode_t *vp = old->p_vnode; 4566 kmutex_t *sep; 4567 4568 ASSERT(PAGE_EXCL(old)); 4569 ASSERT(PAGE_EXCL(new)); 4570 ASSERT(vp != NULL); 4571 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 4572 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, old->p_offset)))); 4573 4574 /* 4575 * First find old page on the page hash list 4576 */ 4577 hash_list = &page_hash[PAGE_HASH_FUNC(vp, old->p_offset)]; 4578 4579 for (;;) { 4580 if (*hash_list == old) 4581 break; 4582 if (*hash_list == NULL) { 4583 panic("page_do_hashout"); 4584 /*NOTREACHED*/ 4585 } 4586 hash_list = &(*hash_list)->p_hash; 4587 } 4588 4589 /* 4590 * update new and replace old with new on the page hash list 4591 */ 4592 new->p_vnode = old->p_vnode; 4593 new->p_offset = old->p_offset; 4594 new->p_hash = old->p_hash; 4595 *hash_list = new; 4596 4597 if ((new->p_vnode->v_flag & VISSWAP) != 0) 4598 PP_SETSWAP(new); 4599 4600 /* 4601 * replace old with new on the vnode's page list 4602 */ 4603 if (old->p_vpnext == old) { 4604 new->p_vpnext = new; 4605 new->p_vpprev = new; 4606 } else { 4607 new->p_vpnext = old->p_vpnext; 4608 new->p_vpprev = old->p_vpprev; 4609 new->p_vpnext->p_vpprev = new; 4610 new->p_vpprev->p_vpnext = new; 4611 } 4612 if (vp->v_pages == old) 4613 vp->v_pages = new; 4614 4615 /* 4616 * clear out the old page 4617 */ 4618 old->p_hash = NULL; 4619 old->p_vpnext = NULL; 4620 old->p_vpprev = NULL; 4621 old->p_vnode = NULL; 4622 PP_CLRSWAP(old); 4623 old->p_offset = (u_offset_t)-1; 4624 page_clr_all_props(old); 4625 4626 /* 4627 * Wake up processes waiting for this page. The page's 4628 * identity has been changed, and is probably not the 4629 * desired page any longer. 4630 */ 4631 sep = page_se_mutex(old); 4632 mutex_enter(sep); 4633 old->p_selock &= ~SE_EWANTED; 4634 if (CV_HAS_WAITERS(&old->p_cv)) 4635 cv_broadcast(&old->p_cv); 4636 mutex_exit(sep); 4637 } 4638 4639 /* 4640 * This function moves the identity of page "pp_old" to page "pp_new". 4641 * Both pages must be locked on entry. "pp_new" is free, has no identity, 4642 * and need not be hashed out from anywhere. 4643 */ 4644 void 4645 page_relocate_hash(page_t *pp_new, page_t *pp_old) 4646 { 4647 vnode_t *vp = pp_old->p_vnode; 4648 u_offset_t off = pp_old->p_offset; 4649 kmutex_t *phm, *vphm; 4650 4651 /* 4652 * Rehash two pages 4653 */ 4654 ASSERT(PAGE_EXCL(pp_old)); 4655 ASSERT(PAGE_EXCL(pp_new)); 4656 ASSERT(vp != NULL); 4657 ASSERT(pp_new->p_vnode == NULL); 4658 4659 /* 4660 * hashout then hashin while holding the mutexes 4661 */ 4662 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, off)); 4663 mutex_enter(phm); 4664 vphm = page_vnode_mutex(vp); 4665 mutex_enter(vphm); 4666 4667 page_do_relocate_hash(pp_new, pp_old); 4668 4669 /* The following comment preserved from page_flip(). */ 4670 pp_new->p_fsdata = pp_old->p_fsdata; 4671 pp_old->p_fsdata = 0; 4672 mutex_exit(vphm); 4673 mutex_exit(phm); 4674 4675 /* 4676 * The page_struct_lock need not be acquired for lckcnt and 4677 * cowcnt since the page has an "exclusive" lock. 4678 */ 4679 ASSERT(pp_new->p_lckcnt == 0); 4680 ASSERT(pp_new->p_cowcnt == 0); 4681 pp_new->p_lckcnt = pp_old->p_lckcnt; 4682 pp_new->p_cowcnt = pp_old->p_cowcnt; 4683 pp_old->p_lckcnt = pp_old->p_cowcnt = 0; 4684 4685 } 4686 4687 /* 4688 * Helper routine used to lock all remaining members of a 4689 * large page. The caller is responsible for passing in a locked 4690 * pp. If pp is a large page, then it succeeds in locking all the 4691 * remaining constituent pages or it returns with only the 4692 * original page locked. 4693 * 4694 * Returns 1 on success, 0 on failure. 4695 * 4696 * If success is returned this routine guarantees p_szc for all constituent 4697 * pages of a large page pp belongs to can't change. To achieve this we 4698 * recheck szc of pp after locking all constituent pages and retry if szc 4699 * changed (it could only decrease). Since hat_page_demote() needs an EXCL 4700 * lock on one of constituent pages it can't be running after all constituent 4701 * pages are locked. hat_page_demote() with a lock on a constituent page 4702 * outside of this large page (i.e. pp belonged to a larger large page) is 4703 * already done with all constituent pages of pp since the root's p_szc is 4704 * changed last. Therefore no need to synchronize with hat_page_demote() that 4705 * locked a constituent page outside of pp's current large page. 4706 */ 4707 #ifdef DEBUG 4708 uint32_t gpg_trylock_mtbf = 0; 4709 #endif 4710 4711 int 4712 group_page_trylock(page_t *pp, se_t se) 4713 { 4714 page_t *tpp; 4715 pgcnt_t npgs, i, j; 4716 uint_t pszc = pp->p_szc; 4717 4718 #ifdef DEBUG 4719 if (gpg_trylock_mtbf && !(gethrtime() % gpg_trylock_mtbf)) { 4720 return (0); 4721 } 4722 #endif 4723 4724 if (pp != PP_GROUPLEADER(pp, pszc)) { 4725 return (0); 4726 } 4727 4728 retry: 4729 ASSERT(PAGE_LOCKED_SE(pp, se)); 4730 ASSERT(!PP_ISFREE(pp)); 4731 if (pszc == 0) { 4732 return (1); 4733 } 4734 npgs = page_get_pagecnt(pszc); 4735 tpp = pp + 1; 4736 for (i = 1; i < npgs; i++, tpp++) { 4737 if (!page_trylock(tpp, se)) { 4738 tpp = pp + 1; 4739 for (j = 1; j < i; j++, tpp++) { 4740 page_unlock(tpp); 4741 } 4742 return (0); 4743 } 4744 } 4745 if (pp->p_szc != pszc) { 4746 ASSERT(pp->p_szc < pszc); 4747 ASSERT(pp->p_vnode != NULL && !PP_ISKAS(pp) && 4748 !IS_SWAPFSVP(pp->p_vnode)); 4749 tpp = pp + 1; 4750 for (i = 1; i < npgs; i++, tpp++) { 4751 page_unlock(tpp); 4752 } 4753 pszc = pp->p_szc; 4754 goto retry; 4755 } 4756 return (1); 4757 } 4758 4759 void 4760 group_page_unlock(page_t *pp) 4761 { 4762 page_t *tpp; 4763 pgcnt_t npgs, i; 4764 4765 ASSERT(PAGE_LOCKED(pp)); 4766 ASSERT(!PP_ISFREE(pp)); 4767 ASSERT(pp == PP_PAGEROOT(pp)); 4768 npgs = page_get_pagecnt(pp->p_szc); 4769 for (i = 1, tpp = pp + 1; i < npgs; i++, tpp++) { 4770 page_unlock(tpp); 4771 } 4772 } 4773 4774 /* 4775 * returns 4776 * 0 : on success and *nrelocp is number of relocated PAGESIZE pages 4777 * ERANGE : this is not a base page 4778 * EBUSY : failure to get locks on the page/pages 4779 * ENOMEM : failure to obtain replacement pages 4780 * EAGAIN : OBP has not yet completed its boot-time handoff to the kernel 4781 * EIO : An error occurred while trying to copy the page data 4782 * 4783 * Return with all constituent members of target and replacement 4784 * SE_EXCL locked. It is the callers responsibility to drop the 4785 * locks. 4786 */ 4787 int 4788 do_page_relocate( 4789 page_t **target, 4790 page_t **replacement, 4791 int grouplock, 4792 spgcnt_t *nrelocp, 4793 lgrp_t *lgrp) 4794 { 4795 page_t *first_repl; 4796 page_t *repl; 4797 page_t *targ; 4798 page_t *pl = NULL; 4799 uint_t ppattr; 4800 pfn_t pfn, repl_pfn; 4801 uint_t szc; 4802 spgcnt_t npgs, i; 4803 int repl_contig = 0; 4804 uint_t flags = 0; 4805 spgcnt_t dofree = 0; 4806 4807 *nrelocp = 0; 4808 4809 #if defined(__sparc) 4810 /* 4811 * We need to wait till OBP has completed 4812 * its boot-time handoff of its resources to the kernel 4813 * before we allow page relocation 4814 */ 4815 if (page_relocate_ready == 0) { 4816 return (EAGAIN); 4817 } 4818 #endif 4819 4820 /* 4821 * If this is not a base page, 4822 * just return with 0x0 pages relocated. 4823 */ 4824 targ = *target; 4825 ASSERT(PAGE_EXCL(targ)); 4826 ASSERT(!PP_ISFREE(targ)); 4827 szc = targ->p_szc; 4828 ASSERT(szc < mmu_page_sizes); 4829 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4830 pfn = targ->p_pagenum; 4831 if (pfn != PFN_BASE(pfn, szc)) { 4832 VM_STAT_ADD(vmm_vmstats.ppr_relocnoroot[szc]); 4833 return (ERANGE); 4834 } 4835 4836 if ((repl = *replacement) != NULL && repl->p_szc >= szc) { 4837 repl_pfn = repl->p_pagenum; 4838 if (repl_pfn != PFN_BASE(repl_pfn, szc)) { 4839 VM_STAT_ADD(vmm_vmstats.ppr_reloc_replnoroot[szc]); 4840 return (ERANGE); 4841 } 4842 repl_contig = 1; 4843 } 4844 4845 /* 4846 * We must lock all members of this large page or we cannot 4847 * relocate any part of it. 4848 */ 4849 if (grouplock != 0 && !group_page_trylock(targ, SE_EXCL)) { 4850 VM_STAT_ADD(vmm_vmstats.ppr_relocnolock[targ->p_szc]); 4851 return (EBUSY); 4852 } 4853 4854 /* 4855 * reread szc it could have been decreased before 4856 * group_page_trylock() was done. 4857 */ 4858 szc = targ->p_szc; 4859 ASSERT(szc < mmu_page_sizes); 4860 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4861 ASSERT(pfn == PFN_BASE(pfn, szc)); 4862 4863 npgs = page_get_pagecnt(targ->p_szc); 4864 4865 if (repl == NULL) { 4866 dofree = npgs; /* Size of target page in MMU pages */ 4867 if (!page_create_wait(dofree, 0)) { 4868 if (grouplock != 0) { 4869 group_page_unlock(targ); 4870 } 4871 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4872 return (ENOMEM); 4873 } 4874 4875 /* 4876 * seg kmem pages require that the target and replacement 4877 * page be the same pagesize. 4878 */ 4879 flags = (VN_ISKAS(targ->p_vnode)) ? PGR_SAMESZC : 0; 4880 repl = page_get_replacement_page(targ, lgrp, flags); 4881 if (repl == NULL) { 4882 if (grouplock != 0) { 4883 group_page_unlock(targ); 4884 } 4885 page_create_putback(dofree); 4886 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4887 return (ENOMEM); 4888 } 4889 } 4890 #ifdef DEBUG 4891 else { 4892 ASSERT(PAGE_LOCKED(repl)); 4893 } 4894 #endif /* DEBUG */ 4895 4896 #if defined(__sparc) 4897 /* 4898 * Let hat_page_relocate() complete the relocation if it's kernel page 4899 */ 4900 if (VN_ISKAS(targ->p_vnode)) { 4901 *replacement = repl; 4902 if (hat_page_relocate(target, replacement, nrelocp) != 0) { 4903 if (grouplock != 0) { 4904 group_page_unlock(targ); 4905 } 4906 if (dofree) { 4907 *replacement = NULL; 4908 page_free_replacement_page(repl); 4909 page_create_putback(dofree); 4910 } 4911 VM_STAT_ADD(vmm_vmstats.ppr_krelocfail[szc]); 4912 return (EAGAIN); 4913 } 4914 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 4915 return (0); 4916 } 4917 #else 4918 #if defined(lint) 4919 dofree = dofree; 4920 #endif 4921 #endif 4922 4923 first_repl = repl; 4924 4925 for (i = 0; i < npgs; i++) { 4926 ASSERT(PAGE_EXCL(targ)); 4927 ASSERT(targ->p_slckcnt == 0); 4928 ASSERT(repl->p_slckcnt == 0); 4929 4930 (void) hat_pageunload(targ, HAT_FORCE_PGUNLOAD); 4931 4932 ASSERT(hat_page_getshare(targ) == 0); 4933 ASSERT(!PP_ISFREE(targ)); 4934 ASSERT(targ->p_pagenum == (pfn + i)); 4935 ASSERT(repl_contig == 0 || 4936 repl->p_pagenum == (repl_pfn + i)); 4937 4938 /* 4939 * Copy the page contents and attributes then 4940 * relocate the page in the page hash. 4941 */ 4942 if (ppcopy(targ, repl) == 0) { 4943 targ = *target; 4944 repl = first_repl; 4945 VM_STAT_ADD(vmm_vmstats.ppr_copyfail); 4946 if (grouplock != 0) { 4947 group_page_unlock(targ); 4948 } 4949 if (dofree) { 4950 *replacement = NULL; 4951 page_free_replacement_page(repl); 4952 page_create_putback(dofree); 4953 } 4954 return (EIO); 4955 } 4956 4957 targ++; 4958 if (repl_contig != 0) { 4959 repl++; 4960 } else { 4961 repl = repl->p_next; 4962 } 4963 } 4964 4965 repl = first_repl; 4966 targ = *target; 4967 4968 for (i = 0; i < npgs; i++) { 4969 ppattr = hat_page_getattr(targ, (P_MOD | P_REF | P_RO)); 4970 page_clr_all_props(repl); 4971 page_set_props(repl, ppattr); 4972 page_relocate_hash(repl, targ); 4973 4974 ASSERT(hat_page_getshare(targ) == 0); 4975 ASSERT(hat_page_getshare(repl) == 0); 4976 /* 4977 * Now clear the props on targ, after the 4978 * page_relocate_hash(), they no longer 4979 * have any meaning. 4980 */ 4981 page_clr_all_props(targ); 4982 ASSERT(targ->p_next == targ); 4983 ASSERT(targ->p_prev == targ); 4984 page_list_concat(&pl, &targ); 4985 4986 targ++; 4987 if (repl_contig != 0) { 4988 repl++; 4989 } else { 4990 repl = repl->p_next; 4991 } 4992 } 4993 /* assert that we have come full circle with repl */ 4994 ASSERT(repl_contig == 1 || first_repl == repl); 4995 4996 *target = pl; 4997 if (*replacement == NULL) { 4998 ASSERT(first_repl == repl); 4999 *replacement = repl; 5000 } 5001 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 5002 *nrelocp = npgs; 5003 return (0); 5004 } 5005 /* 5006 * On success returns 0 and *nrelocp the number of PAGESIZE pages relocated. 5007 */ 5008 int 5009 page_relocate( 5010 page_t **target, 5011 page_t **replacement, 5012 int grouplock, 5013 int freetarget, 5014 spgcnt_t *nrelocp, 5015 lgrp_t *lgrp) 5016 { 5017 spgcnt_t ret; 5018 5019 /* do_page_relocate returns 0 on success or errno value */ 5020 ret = do_page_relocate(target, replacement, grouplock, nrelocp, lgrp); 5021 5022 if (ret != 0 || freetarget == 0) { 5023 return (ret); 5024 } 5025 if (*nrelocp == 1) { 5026 ASSERT(*target != NULL); 5027 page_free(*target, 1); 5028 } else { 5029 page_t *tpp = *target; 5030 uint_t szc = tpp->p_szc; 5031 pgcnt_t npgs = page_get_pagecnt(szc); 5032 ASSERT(npgs > 1); 5033 ASSERT(szc != 0); 5034 do { 5035 ASSERT(PAGE_EXCL(tpp)); 5036 ASSERT(!hat_page_is_mapped(tpp)); 5037 ASSERT(tpp->p_szc == szc); 5038 PP_SETFREE(tpp); 5039 PP_SETAGED(tpp); 5040 npgs--; 5041 } while ((tpp = tpp->p_next) != *target); 5042 ASSERT(npgs == 0); 5043 page_list_add_pages(*target, 0); 5044 npgs = page_get_pagecnt(szc); 5045 page_create_putback(npgs); 5046 } 5047 return (ret); 5048 } 5049 5050 /* 5051 * it is up to the caller to deal with pcf accounting. 5052 */ 5053 void 5054 page_free_replacement_page(page_t *pplist) 5055 { 5056 page_t *pp; 5057 5058 while (pplist != NULL) { 5059 /* 5060 * pp_targ is a linked list. 5061 */ 5062 pp = pplist; 5063 if (pp->p_szc == 0) { 5064 page_sub(&pplist, pp); 5065 page_clr_all_props(pp); 5066 PP_SETFREE(pp); 5067 PP_SETAGED(pp); 5068 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 5069 page_unlock(pp); 5070 VM_STAT_ADD(pagecnt.pc_free_replacement_page[0]); 5071 } else { 5072 spgcnt_t curnpgs = page_get_pagecnt(pp->p_szc); 5073 page_t *tpp; 5074 page_list_break(&pp, &pplist, curnpgs); 5075 tpp = pp; 5076 do { 5077 ASSERT(PAGE_EXCL(tpp)); 5078 ASSERT(!hat_page_is_mapped(tpp)); 5079 page_clr_all_props(tpp); 5080 PP_SETFREE(tpp); 5081 PP_SETAGED(tpp); 5082 } while ((tpp = tpp->p_next) != pp); 5083 page_list_add_pages(pp, 0); 5084 VM_STAT_ADD(pagecnt.pc_free_replacement_page[1]); 5085 } 5086 } 5087 } 5088 5089 /* 5090 * Relocate target to non-relocatable replacement page. 5091 */ 5092 int 5093 page_relocate_cage(page_t **target, page_t **replacement) 5094 { 5095 page_t *tpp, *rpp; 5096 spgcnt_t pgcnt, npgs; 5097 int result; 5098 5099 tpp = *target; 5100 5101 ASSERT(PAGE_EXCL(tpp)); 5102 ASSERT(tpp->p_szc == 0); 5103 5104 pgcnt = btop(page_get_pagesize(tpp->p_szc)); 5105 5106 do { 5107 (void) page_create_wait(pgcnt, PG_WAIT | PG_NORELOC); 5108 rpp = page_get_replacement_page(tpp, NULL, PGR_NORELOC); 5109 if (rpp == NULL) { 5110 page_create_putback(pgcnt); 5111 kcage_cageout_wakeup(); 5112 } 5113 } while (rpp == NULL); 5114 5115 ASSERT(PP_ISNORELOC(rpp)); 5116 5117 result = page_relocate(&tpp, &rpp, 0, 1, &npgs, NULL); 5118 5119 if (result == 0) { 5120 *replacement = rpp; 5121 if (pgcnt != npgs) 5122 panic("page_relocate_cage: partial relocation"); 5123 } 5124 5125 return (result); 5126 } 5127 5128 /* 5129 * Release the page lock on a page, place on cachelist 5130 * tail if no longer mapped. Caller can let us know if 5131 * the page is known to be clean. 5132 */ 5133 int 5134 page_release(page_t *pp, int checkmod) 5135 { 5136 int status; 5137 5138 ASSERT(PAGE_LOCKED(pp) && !PP_ISFREE(pp) && 5139 (pp->p_vnode != NULL)); 5140 5141 if (!hat_page_is_mapped(pp) && !IS_SWAPVP(pp->p_vnode) && 5142 ((PAGE_SHARED(pp) && page_tryupgrade(pp)) || PAGE_EXCL(pp)) && 5143 pp->p_lckcnt == 0 && pp->p_cowcnt == 0 && 5144 !hat_page_is_mapped(pp)) { 5145 5146 /* 5147 * If page is modified, unlock it 5148 * 5149 * (p_nrm & P_MOD) bit has the latest stuff because: 5150 * (1) We found that this page doesn't have any mappings 5151 * _after_ holding SE_EXCL and 5152 * (2) We didn't drop SE_EXCL lock after the check in (1) 5153 */ 5154 if (checkmod && hat_ismod(pp)) { 5155 page_unlock(pp); 5156 status = PGREL_MOD; 5157 } else { 5158 /*LINTED: constant in conditional context*/ 5159 VN_DISPOSE(pp, B_FREE, 0, kcred); 5160 status = PGREL_CLEAN; 5161 } 5162 } else { 5163 page_unlock(pp); 5164 status = PGREL_NOTREL; 5165 } 5166 return (status); 5167 } 5168 5169 /* 5170 * Given a constituent page, try to demote the large page on the freelist. 5171 * 5172 * Returns nonzero if the page could be demoted successfully. Returns with 5173 * the constituent page still locked. 5174 */ 5175 int 5176 page_try_demote_free_pages(page_t *pp) 5177 { 5178 page_t *rootpp = pp; 5179 pfn_t pfn = page_pptonum(pp); 5180 spgcnt_t npgs; 5181 uint_t szc = pp->p_szc; 5182 5183 ASSERT(PP_ISFREE(pp)); 5184 ASSERT(PAGE_EXCL(pp)); 5185 5186 /* 5187 * Adjust rootpp and lock it, if `pp' is not the base 5188 * constituent page. 5189 */ 5190 npgs = page_get_pagecnt(pp->p_szc); 5191 if (npgs == 1) { 5192 return (0); 5193 } 5194 5195 if (!IS_P2ALIGNED(pfn, npgs)) { 5196 pfn = P2ALIGN(pfn, npgs); 5197 rootpp = page_numtopp_nolock(pfn); 5198 } 5199 5200 if (pp != rootpp && !page_trylock(rootpp, SE_EXCL)) { 5201 return (0); 5202 } 5203 5204 if (rootpp->p_szc != szc) { 5205 if (pp != rootpp) 5206 page_unlock(rootpp); 5207 return (0); 5208 } 5209 5210 page_demote_free_pages(rootpp); 5211 5212 if (pp != rootpp) 5213 page_unlock(rootpp); 5214 5215 ASSERT(PP_ISFREE(pp)); 5216 ASSERT(PAGE_EXCL(pp)); 5217 return (1); 5218 } 5219 5220 /* 5221 * Given a constituent page, try to demote the large page. 5222 * 5223 * Returns nonzero if the page could be demoted successfully. Returns with 5224 * the constituent page still locked. 5225 */ 5226 int 5227 page_try_demote_pages(page_t *pp) 5228 { 5229 page_t *tpp, *rootpp = pp; 5230 pfn_t pfn = page_pptonum(pp); 5231 spgcnt_t i, npgs; 5232 vnode_t *vp = pp->p_vnode; 5233 #ifdef DEBUG 5234 uint_t szc = pp->p_szc; 5235 #endif 5236 5237 ASSERT(PAGE_EXCL(pp)); 5238 5239 VM_STAT_ADD(pagecnt.pc_try_demote_pages[0]); 5240 5241 if (pp->p_szc == 0) { 5242 VM_STAT_ADD(pagecnt.pc_try_demote_pages[1]); 5243 return (1); 5244 } 5245 5246 if (vp != NULL && !IS_SWAPFSVP(vp) && !VN_ISKAS(vp)) { 5247 VM_STAT_ADD(pagecnt.pc_try_demote_pages[2]); 5248 page_demote_vp_pages(pp); 5249 ASSERT(pp->p_szc == 0); 5250 return (1); 5251 } 5252 5253 /* 5254 * Adjust rootpp if passed in is not the base 5255 * constituent page. 5256 */ 5257 npgs = page_get_pagecnt(pp->p_szc); 5258 ASSERT(npgs > 1); 5259 if (!IS_P2ALIGNED(pfn, npgs)) { 5260 pfn = P2ALIGN(pfn, npgs); 5261 rootpp = page_numtopp_nolock(pfn); 5262 VM_STAT_ADD(pagecnt.pc_try_demote_pages[3]); 5263 ASSERT(rootpp->p_vnode != NULL); 5264 ASSERT(rootpp->p_szc == szc); 5265 } 5266 5267 /* 5268 * We can't demote kernel pages since we can't hat_unload() 5269 * the mappings. 5270 */ 5271 if (VN_ISKAS(rootpp->p_vnode)) 5272 return (0); 5273 5274 /* 5275 * Attempt to lock all constituent pages except the page passed 5276 * in since it's already locked. 5277 */ 5278 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5279 ASSERT(!PP_ISFREE(tpp)); 5280 ASSERT(tpp->p_vnode != NULL); 5281 5282 if (tpp != pp && !page_trylock(tpp, SE_EXCL)) 5283 break; 5284 ASSERT(tpp->p_szc == rootpp->p_szc); 5285 ASSERT(page_pptonum(tpp) == page_pptonum(rootpp) + i); 5286 } 5287 5288 /* 5289 * If we failed to lock them all then unlock what we have 5290 * locked so far and bail. 5291 */ 5292 if (i < npgs) { 5293 tpp = rootpp; 5294 while (i-- > 0) { 5295 if (tpp != pp) 5296 page_unlock(tpp); 5297 tpp++; 5298 } 5299 VM_STAT_ADD(pagecnt.pc_try_demote_pages[4]); 5300 return (0); 5301 } 5302 5303 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5304 ASSERT(PAGE_EXCL(tpp)); 5305 ASSERT(tpp->p_slckcnt == 0); 5306 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 5307 tpp->p_szc = 0; 5308 } 5309 5310 /* 5311 * Unlock all pages except the page passed in. 5312 */ 5313 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5314 ASSERT(!hat_page_is_mapped(tpp)); 5315 if (tpp != pp) 5316 page_unlock(tpp); 5317 } 5318 5319 VM_STAT_ADD(pagecnt.pc_try_demote_pages[5]); 5320 return (1); 5321 } 5322 5323 /* 5324 * Called by page_free() and page_destroy() to demote the page size code 5325 * (p_szc) to 0 (since we can't just put a single PAGESIZE page with non zero 5326 * p_szc on free list, neither can we just clear p_szc of a single page_t 5327 * within a large page since it will break other code that relies on p_szc 5328 * being the same for all page_t's of a large page). Anonymous pages should 5329 * never end up here because anon_map_getpages() cannot deal with p_szc 5330 * changes after a single constituent page is locked. While anonymous or 5331 * kernel large pages are demoted or freed the entire large page at a time 5332 * with all constituent pages locked EXCL for the file system pages we 5333 * have to be able to demote a large page (i.e. decrease all constituent pages 5334 * p_szc) with only just an EXCL lock on one of constituent pages. The reason 5335 * we can easily deal with anonymous page demotion the entire large page at a 5336 * time is that those operation originate at address space level and concern 5337 * the entire large page region with actual demotion only done when pages are 5338 * not shared with any other processes (therefore we can always get EXCL lock 5339 * on all anonymous constituent pages after clearing segment page 5340 * cache). However file system pages can be truncated or invalidated at a 5341 * PAGESIZE level from the file system side and end up in page_free() or 5342 * page_destroy() (we also allow only part of the large page to be SOFTLOCKed 5343 * and therefore pageout should be able to demote a large page by EXCL locking 5344 * any constituent page that is not under SOFTLOCK). In those cases we cannot 5345 * rely on being able to lock EXCL all constituent pages. 5346 * 5347 * To prevent szc changes on file system pages one has to lock all constituent 5348 * pages at least SHARED (or call page_szc_lock()). The only subsystem that 5349 * doesn't rely on locking all constituent pages (or using page_szc_lock()) to 5350 * prevent szc changes is hat layer that uses its own page level mlist 5351 * locks. hat assumes that szc doesn't change after mlist lock for a page is 5352 * taken. Therefore we need to change szc under hat level locks if we only 5353 * have an EXCL lock on a single constituent page and hat still references any 5354 * of constituent pages. (Note we can't "ignore" hat layer by simply 5355 * hat_pageunload() all constituent pages without having EXCL locks on all of 5356 * constituent pages). We use hat_page_demote() call to safely demote szc of 5357 * all constituent pages under hat locks when we only have an EXCL lock on one 5358 * of constituent pages. 5359 * 5360 * This routine calls page_szc_lock() before calling hat_page_demote() to 5361 * allow segvn in one special case not to lock all constituent pages SHARED 5362 * before calling hat_memload_array() that relies on p_szc not changing even 5363 * before hat level mlist lock is taken. In that case segvn uses 5364 * page_szc_lock() to prevent hat_page_demote() changing p_szc values. 5365 * 5366 * Anonymous or kernel page demotion still has to lock all pages exclusively 5367 * and do hat_pageunload() on all constituent pages before demoting the page 5368 * therefore there's no need for anonymous or kernel page demotion to use 5369 * hat_page_demote() mechanism. 5370 * 5371 * hat_page_demote() removes all large mappings that map pp and then decreases 5372 * p_szc starting from the last constituent page of the large page. By working 5373 * from the tail of a large page in pfn decreasing order allows one looking at 5374 * the root page to know that hat_page_demote() is done for root's szc area. 5375 * e.g. if a root page has szc 1 one knows it only has to lock all constituent 5376 * pages within szc 1 area to prevent szc changes because hat_page_demote() 5377 * that started on this page when it had szc > 1 is done for this szc 1 area. 5378 * 5379 * We are guaranteed that all constituent pages of pp's large page belong to 5380 * the same vnode with the consecutive offsets increasing in the direction of 5381 * the pfn i.e. the identity of constituent pages can't change until their 5382 * p_szc is decreased. Therefore it's safe for hat_page_demote() to remove 5383 * large mappings to pp even though we don't lock any constituent page except 5384 * pp (i.e. we won't unload e.g. kernel locked page). 5385 */ 5386 static void 5387 page_demote_vp_pages(page_t *pp) 5388 { 5389 kmutex_t *mtx; 5390 5391 ASSERT(PAGE_EXCL(pp)); 5392 ASSERT(!PP_ISFREE(pp)); 5393 ASSERT(pp->p_vnode != NULL); 5394 ASSERT(!IS_SWAPFSVP(pp->p_vnode)); 5395 ASSERT(!PP_ISKAS(pp)); 5396 5397 VM_STAT_ADD(pagecnt.pc_demote_pages[0]); 5398 5399 mtx = page_szc_lock(pp); 5400 if (mtx != NULL) { 5401 hat_page_demote(pp); 5402 mutex_exit(mtx); 5403 } 5404 ASSERT(pp->p_szc == 0); 5405 } 5406 5407 /* 5408 * Mark any existing pages for migration in the given range 5409 */ 5410 void 5411 page_mark_migrate(struct seg *seg, caddr_t addr, size_t len, 5412 struct anon_map *amp, ulong_t anon_index, vnode_t *vp, 5413 u_offset_t vnoff, int rflag) 5414 { 5415 struct anon *ap; 5416 vnode_t *curvp; 5417 lgrp_t *from; 5418 pgcnt_t nlocked; 5419 u_offset_t off; 5420 pfn_t pfn; 5421 size_t pgsz; 5422 size_t segpgsz; 5423 pgcnt_t pages; 5424 uint_t pszc; 5425 page_t *pp0, *pp; 5426 caddr_t va; 5427 ulong_t an_idx; 5428 anon_sync_obj_t cookie; 5429 5430 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5431 5432 /* 5433 * Don't do anything if don't need to do lgroup optimizations 5434 * on this system 5435 */ 5436 if (!lgrp_optimizations()) 5437 return; 5438 5439 /* 5440 * Align address and length to (potentially large) page boundary 5441 */ 5442 segpgsz = page_get_pagesize(seg->s_szc); 5443 addr = (caddr_t)P2ALIGN((uintptr_t)addr, segpgsz); 5444 if (rflag) 5445 len = P2ROUNDUP(len, segpgsz); 5446 5447 /* 5448 * Do one (large) page at a time 5449 */ 5450 va = addr; 5451 while (va < addr + len) { 5452 /* 5453 * Lookup (root) page for vnode and offset corresponding to 5454 * this virtual address 5455 * Try anonmap first since there may be copy-on-write 5456 * pages, but initialize vnode pointer and offset using 5457 * vnode arguments just in case there isn't an amp. 5458 */ 5459 curvp = vp; 5460 off = vnoff + va - seg->s_base; 5461 if (amp) { 5462 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5463 an_idx = anon_index + seg_page(seg, va); 5464 anon_array_enter(amp, an_idx, &cookie); 5465 ap = anon_get_ptr(amp->ahp, an_idx); 5466 if (ap) 5467 swap_xlate(ap, &curvp, &off); 5468 anon_array_exit(&cookie); 5469 ANON_LOCK_EXIT(&->a_rwlock); 5470 } 5471 5472 pp = NULL; 5473 if (curvp) 5474 pp = page_lookup(curvp, off, SE_SHARED); 5475 5476 /* 5477 * If there isn't a page at this virtual address, 5478 * skip to next page 5479 */ 5480 if (pp == NULL) { 5481 va += PAGESIZE; 5482 continue; 5483 } 5484 5485 /* 5486 * Figure out which lgroup this page is in for kstats 5487 */ 5488 pfn = page_pptonum(pp); 5489 from = lgrp_pfn_to_lgrp(pfn); 5490 5491 /* 5492 * Get page size, and round up and skip to next page boundary 5493 * if unaligned address 5494 */ 5495 pszc = pp->p_szc; 5496 pgsz = page_get_pagesize(pszc); 5497 pages = btop(pgsz); 5498 if (!IS_P2ALIGNED(va, pgsz) || 5499 !IS_P2ALIGNED(pfn, pages) || 5500 pgsz > segpgsz) { 5501 pgsz = MIN(pgsz, segpgsz); 5502 page_unlock(pp); 5503 pages = btop(P2END((uintptr_t)va, pgsz) - 5504 (uintptr_t)va); 5505 va = (caddr_t)P2END((uintptr_t)va, pgsz); 5506 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, pages); 5507 continue; 5508 } 5509 5510 /* 5511 * Upgrade to exclusive lock on page 5512 */ 5513 if (!page_tryupgrade(pp)) { 5514 page_unlock(pp); 5515 va += pgsz; 5516 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5517 btop(pgsz)); 5518 continue; 5519 } 5520 5521 pp0 = pp++; 5522 nlocked = 1; 5523 5524 /* 5525 * Lock constituent pages if this is large page 5526 */ 5527 if (pages > 1) { 5528 /* 5529 * Lock all constituents except root page, since it 5530 * should be locked already. 5531 */ 5532 for (; nlocked < pages; nlocked++) { 5533 if (!page_trylock(pp, SE_EXCL)) { 5534 break; 5535 } 5536 if (PP_ISFREE(pp) || 5537 pp->p_szc != pszc) { 5538 /* 5539 * hat_page_demote() raced in with us. 5540 */ 5541 ASSERT(!IS_SWAPFSVP(curvp)); 5542 page_unlock(pp); 5543 break; 5544 } 5545 pp++; 5546 } 5547 } 5548 5549 /* 5550 * If all constituent pages couldn't be locked, 5551 * unlock pages locked so far and skip to next page. 5552 */ 5553 if (nlocked < pages) { 5554 while (pp0 < pp) { 5555 page_unlock(pp0++); 5556 } 5557 va += pgsz; 5558 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5559 btop(pgsz)); 5560 continue; 5561 } 5562 5563 /* 5564 * hat_page_demote() can no longer happen 5565 * since last cons page had the right p_szc after 5566 * all cons pages were locked. all cons pages 5567 * should now have the same p_szc. 5568 */ 5569 5570 /* 5571 * All constituent pages locked successfully, so mark 5572 * large page for migration and unload the mappings of 5573 * constituent pages, so a fault will occur on any part of the 5574 * large page 5575 */ 5576 PP_SETMIGRATE(pp0); 5577 while (pp0 < pp) { 5578 (void) hat_pageunload(pp0, HAT_FORCE_PGUNLOAD); 5579 ASSERT(hat_page_getshare(pp0) == 0); 5580 page_unlock(pp0++); 5581 } 5582 lgrp_stat_add(from->lgrp_id, LGRP_PMM_PGS, nlocked); 5583 5584 va += pgsz; 5585 } 5586 } 5587 5588 /* 5589 * Migrate any pages that have been marked for migration in the given range 5590 */ 5591 void 5592 page_migrate( 5593 struct seg *seg, 5594 caddr_t addr, 5595 page_t **ppa, 5596 pgcnt_t npages) 5597 { 5598 lgrp_t *from; 5599 lgrp_t *to; 5600 page_t *newpp; 5601 page_t *pp; 5602 pfn_t pfn; 5603 size_t pgsz; 5604 spgcnt_t page_cnt; 5605 spgcnt_t i; 5606 uint_t pszc; 5607 5608 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5609 5610 while (npages > 0) { 5611 pp = *ppa; 5612 pszc = pp->p_szc; 5613 pgsz = page_get_pagesize(pszc); 5614 page_cnt = btop(pgsz); 5615 5616 /* 5617 * Check to see whether this page is marked for migration 5618 * 5619 * Assume that root page of large page is marked for 5620 * migration and none of the other constituent pages 5621 * are marked. This really simplifies clearing the 5622 * migrate bit by not having to clear it from each 5623 * constituent page. 5624 * 5625 * note we don't want to relocate an entire large page if 5626 * someone is only using one subpage. 5627 */ 5628 if (npages < page_cnt) 5629 break; 5630 5631 /* 5632 * Is it marked for migration? 5633 */ 5634 if (!PP_ISMIGRATE(pp)) 5635 goto next; 5636 5637 /* 5638 * Determine lgroups that page is being migrated between 5639 */ 5640 pfn = page_pptonum(pp); 5641 if (!IS_P2ALIGNED(pfn, page_cnt)) { 5642 break; 5643 } 5644 from = lgrp_pfn_to_lgrp(pfn); 5645 to = lgrp_mem_choose(seg, addr, pgsz); 5646 5647 /* 5648 * Need to get exclusive lock's to migrate 5649 */ 5650 for (i = 0; i < page_cnt; i++) { 5651 ASSERT(PAGE_LOCKED(ppa[i])); 5652 if (page_pptonum(ppa[i]) != pfn + i || 5653 ppa[i]->p_szc != pszc) { 5654 break; 5655 } 5656 if (!page_tryupgrade(ppa[i])) { 5657 lgrp_stat_add(from->lgrp_id, 5658 LGRP_PM_FAIL_LOCK_PGS, 5659 page_cnt); 5660 break; 5661 } 5662 5663 /* 5664 * Check to see whether we are trying to migrate 5665 * page to lgroup where it is allocated already. 5666 * If so, clear the migrate bit and skip to next 5667 * page. 5668 */ 5669 if (i == 0 && to == from) { 5670 PP_CLRMIGRATE(ppa[0]); 5671 page_downgrade(ppa[0]); 5672 goto next; 5673 } 5674 } 5675 5676 /* 5677 * If all constituent pages couldn't be locked, 5678 * unlock pages locked so far and skip to next page. 5679 */ 5680 if (i != page_cnt) { 5681 while (--i != -1) { 5682 page_downgrade(ppa[i]); 5683 } 5684 goto next; 5685 } 5686 5687 (void) page_create_wait(page_cnt, PG_WAIT); 5688 newpp = page_get_replacement_page(pp, to, PGR_SAMESZC); 5689 if (newpp == NULL) { 5690 page_create_putback(page_cnt); 5691 for (i = 0; i < page_cnt; i++) { 5692 page_downgrade(ppa[i]); 5693 } 5694 lgrp_stat_add(to->lgrp_id, LGRP_PM_FAIL_ALLOC_PGS, 5695 page_cnt); 5696 goto next; 5697 } 5698 ASSERT(newpp->p_szc == pszc); 5699 /* 5700 * Clear migrate bit and relocate page 5701 */ 5702 PP_CLRMIGRATE(pp); 5703 if (page_relocate(&pp, &newpp, 0, 1, &page_cnt, to)) { 5704 panic("page_migrate: page_relocate failed"); 5705 } 5706 ASSERT(page_cnt * PAGESIZE == pgsz); 5707 5708 /* 5709 * Keep stats for number of pages migrated from and to 5710 * each lgroup 5711 */ 5712 lgrp_stat_add(from->lgrp_id, LGRP_PM_SRC_PGS, page_cnt); 5713 lgrp_stat_add(to->lgrp_id, LGRP_PM_DEST_PGS, page_cnt); 5714 /* 5715 * update the page_t array we were passed in and 5716 * unlink constituent pages of a large page. 5717 */ 5718 for (i = 0; i < page_cnt; ++i, ++pp) { 5719 ASSERT(PAGE_EXCL(newpp)); 5720 ASSERT(newpp->p_szc == pszc); 5721 ppa[i] = newpp; 5722 pp = newpp; 5723 page_sub(&newpp, pp); 5724 page_downgrade(pp); 5725 } 5726 ASSERT(newpp == NULL); 5727 next: 5728 addr += pgsz; 5729 ppa += page_cnt; 5730 npages -= page_cnt; 5731 } 5732 } 5733 5734 ulong_t mem_waiters = 0; 5735 ulong_t max_count = 20; 5736 #define MAX_DELAY 0x1ff 5737 5738 /* 5739 * Check if enough memory is available to proceed. 5740 * Depending on system configuration and how much memory is 5741 * reserved for swap we need to check against two variables. 5742 * e.g. on systems with little physical swap availrmem can be 5743 * more reliable indicator of how much memory is available. 5744 * On systems with large phys swap freemem can be better indicator. 5745 * If freemem drops below threshold level don't return an error 5746 * immediately but wake up pageout to free memory and block. 5747 * This is done number of times. If pageout is not able to free 5748 * memory within certain time return an error. 5749 * The same applies for availrmem but kmem_reap is used to 5750 * free memory. 5751 */ 5752 int 5753 page_mem_avail(pgcnt_t npages) 5754 { 5755 ulong_t count; 5756 5757 #if defined(__i386) 5758 if (freemem > desfree + npages && 5759 availrmem > swapfs_reserve + npages && 5760 btop(vmem_size(heap_arena, VMEM_FREE)) > tune.t_minarmem + 5761 npages) 5762 return (1); 5763 #else 5764 if (freemem > desfree + npages && 5765 availrmem > swapfs_reserve + npages) 5766 return (1); 5767 #endif 5768 5769 count = max_count; 5770 atomic_add_long(&mem_waiters, 1); 5771 5772 while (freemem < desfree + npages && --count) { 5773 cv_signal(&proc_pageout->p_cv); 5774 if (delay_sig(hz + (mem_waiters & MAX_DELAY))) { 5775 atomic_add_long(&mem_waiters, -1); 5776 return (0); 5777 } 5778 } 5779 if (count == 0) { 5780 atomic_add_long(&mem_waiters, -1); 5781 return (0); 5782 } 5783 5784 count = max_count; 5785 while (availrmem < swapfs_reserve + npages && --count) { 5786 kmem_reap(); 5787 if (delay_sig(hz + (mem_waiters & MAX_DELAY))) { 5788 atomic_add_long(&mem_waiters, -1); 5789 return (0); 5790 } 5791 } 5792 atomic_add_long(&mem_waiters, -1); 5793 if (count == 0) 5794 return (0); 5795 5796 #if defined(__i386) 5797 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 5798 tune.t_minarmem + npages) 5799 return (0); 5800 #endif 5801 return (1); 5802 } 5803 5804 #define MAX_CNT 60 /* max num of iterations */ 5805 /* 5806 * Reclaim/reserve availrmem for npages. 5807 * If there is not enough memory start reaping seg, kmem caches. 5808 * Start pageout scanner (via page_needfree()). 5809 * Exit after ~ MAX_CNT s regardless of how much memory has been released. 5810 * Note: There is no guarantee that any availrmem will be freed as 5811 * this memory typically is locked (kernel heap) or reserved for swap. 5812 * Also due to memory fragmentation kmem allocator may not be able 5813 * to free any memory (single user allocated buffer will prevent 5814 * freeing slab or a page). 5815 */ 5816 int 5817 page_reclaim_mem(pgcnt_t npages, pgcnt_t epages, int adjust) 5818 { 5819 int i = 0; 5820 int ret = 0; 5821 pgcnt_t deficit; 5822 pgcnt_t old_availrmem; 5823 5824 mutex_enter(&freemem_lock); 5825 old_availrmem = availrmem - 1; 5826 while ((availrmem < tune.t_minarmem + npages + epages) && 5827 (old_availrmem < availrmem) && (i++ < MAX_CNT)) { 5828 old_availrmem = availrmem; 5829 deficit = tune.t_minarmem + npages + epages - availrmem; 5830 mutex_exit(&freemem_lock); 5831 page_needfree(deficit); 5832 kmem_reap(); 5833 delay(hz); 5834 page_needfree(-(spgcnt_t)deficit); 5835 mutex_enter(&freemem_lock); 5836 } 5837 5838 if (adjust && (availrmem >= tune.t_minarmem + npages + epages)) { 5839 availrmem -= npages; 5840 ret = 1; 5841 } 5842 5843 mutex_exit(&freemem_lock); 5844 5845 return (ret); 5846 } 5847 5848 /* 5849 * Search the memory segments to locate the desired page. Within a 5850 * segment, pages increase linearly with one page structure per 5851 * physical page frame (size PAGESIZE). The search begins 5852 * with the segment that was accessed last, to take advantage of locality. 5853 * If the hint misses, we start from the beginning of the sorted memseg list 5854 */ 5855 5856 5857 /* 5858 * Some data structures for pfn to pp lookup. 5859 */ 5860 ulong_t mhash_per_slot; 5861 struct memseg *memseg_hash[N_MEM_SLOTS]; 5862 5863 page_t * 5864 page_numtopp_nolock(pfn_t pfnum) 5865 { 5866 struct memseg *seg; 5867 page_t *pp; 5868 vm_cpu_data_t *vc; 5869 5870 /* 5871 * We need to disable kernel preemption while referencing the 5872 * cpu_vm_data field in order to prevent us from being switched to 5873 * another cpu and trying to reference it after it has been freed. 5874 * This will keep us on cpu and prevent it from being removed while 5875 * we are still on it. 5876 * 5877 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg 5878 * which is being resued by DR who will flush those references 5879 * before modifying the reused memseg. See memseg_cpu_vm_flush(). 5880 */ 5881 kpreempt_disable(); 5882 vc = CPU->cpu_vm_data; 5883 ASSERT(vc != NULL); 5884 5885 MEMSEG_STAT_INCR(nsearch); 5886 5887 /* Try last winner first */ 5888 if (((seg = vc->vc_pnum_memseg) != NULL) && 5889 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5890 MEMSEG_STAT_INCR(nlastwon); 5891 pp = seg->pages + (pfnum - seg->pages_base); 5892 if (pp->p_pagenum == pfnum) { 5893 kpreempt_enable(); 5894 return ((page_t *)pp); 5895 } 5896 } 5897 5898 /* Else Try hash */ 5899 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5900 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5901 MEMSEG_STAT_INCR(nhashwon); 5902 vc->vc_pnum_memseg = seg; 5903 pp = seg->pages + (pfnum - seg->pages_base); 5904 if (pp->p_pagenum == pfnum) { 5905 kpreempt_enable(); 5906 return ((page_t *)pp); 5907 } 5908 } 5909 5910 /* Else Brute force */ 5911 for (seg = memsegs; seg != NULL; seg = seg->next) { 5912 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5913 vc->vc_pnum_memseg = seg; 5914 pp = seg->pages + (pfnum - seg->pages_base); 5915 if (pp->p_pagenum == pfnum) { 5916 kpreempt_enable(); 5917 return ((page_t *)pp); 5918 } 5919 } 5920 } 5921 vc->vc_pnum_memseg = NULL; 5922 kpreempt_enable(); 5923 MEMSEG_STAT_INCR(nnotfound); 5924 return ((page_t *)NULL); 5925 5926 } 5927 5928 struct memseg * 5929 page_numtomemseg_nolock(pfn_t pfnum) 5930 { 5931 struct memseg *seg; 5932 page_t *pp; 5933 5934 /* 5935 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg 5936 * which is being resued by DR who will flush those references 5937 * before modifying the reused memseg. See memseg_cpu_vm_flush(). 5938 */ 5939 kpreempt_disable(); 5940 /* Try hash */ 5941 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5942 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5943 pp = seg->pages + (pfnum - seg->pages_base); 5944 if (pp->p_pagenum == pfnum) { 5945 kpreempt_enable(); 5946 return (seg); 5947 } 5948 } 5949 5950 /* Else Brute force */ 5951 for (seg = memsegs; seg != NULL; seg = seg->next) { 5952 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5953 pp = seg->pages + (pfnum - seg->pages_base); 5954 if (pp->p_pagenum == pfnum) { 5955 kpreempt_enable(); 5956 return (seg); 5957 } 5958 } 5959 } 5960 kpreempt_enable(); 5961 return ((struct memseg *)NULL); 5962 } 5963 5964 /* 5965 * Given a page and a count return the page struct that is 5966 * n structs away from the current one in the global page 5967 * list. 5968 * 5969 * This function wraps to the first page upon 5970 * reaching the end of the memseg list. 5971 */ 5972 page_t * 5973 page_nextn(page_t *pp, ulong_t n) 5974 { 5975 struct memseg *seg; 5976 page_t *ppn; 5977 vm_cpu_data_t *vc; 5978 5979 /* 5980 * We need to disable kernel preemption while referencing the 5981 * cpu_vm_data field in order to prevent us from being switched to 5982 * another cpu and trying to reference it after it has been freed. 5983 * This will keep us on cpu and prevent it from being removed while 5984 * we are still on it. 5985 * 5986 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg 5987 * which is being resued by DR who will flush those references 5988 * before modifying the reused memseg. See memseg_cpu_vm_flush(). 5989 */ 5990 kpreempt_disable(); 5991 vc = (vm_cpu_data_t *)CPU->cpu_vm_data; 5992 5993 ASSERT(vc != NULL); 5994 5995 if (((seg = vc->vc_pnext_memseg) == NULL) || 5996 (seg->pages_base == seg->pages_end) || 5997 !(pp >= seg->pages && pp < seg->epages)) { 5998 5999 for (seg = memsegs; seg; seg = seg->next) { 6000 if (pp >= seg->pages && pp < seg->epages) 6001 break; 6002 } 6003 6004 if (seg == NULL) { 6005 /* Memory delete got in, return something valid. */ 6006 /* TODO: fix me. */ 6007 seg = memsegs; 6008 pp = seg->pages; 6009 } 6010 } 6011 6012 /* check for wraparound - possible if n is large */ 6013 while ((ppn = (pp + n)) >= seg->epages || ppn < pp) { 6014 n -= seg->epages - pp; 6015 seg = seg->next; 6016 if (seg == NULL) 6017 seg = memsegs; 6018 pp = seg->pages; 6019 } 6020 vc->vc_pnext_memseg = seg; 6021 kpreempt_enable(); 6022 return (ppn); 6023 } 6024 6025 /* 6026 * Initialize for a loop using page_next_scan_large(). 6027 */ 6028 page_t * 6029 page_next_scan_init(void **cookie) 6030 { 6031 ASSERT(cookie != NULL); 6032 *cookie = (void *)memsegs; 6033 return ((page_t *)memsegs->pages); 6034 } 6035 6036 /* 6037 * Return the next page in a scan of page_t's, assuming we want 6038 * to skip over sub-pages within larger page sizes. 6039 * 6040 * The cookie is used to keep track of the current memseg. 6041 */ 6042 page_t * 6043 page_next_scan_large( 6044 page_t *pp, 6045 ulong_t *n, 6046 void **cookie) 6047 { 6048 struct memseg *seg = (struct memseg *)*cookie; 6049 page_t *new_pp; 6050 ulong_t cnt; 6051 pfn_t pfn; 6052 6053 6054 /* 6055 * get the count of page_t's to skip based on the page size 6056 */ 6057 ASSERT(pp != NULL); 6058 if (pp->p_szc == 0) { 6059 cnt = 1; 6060 } else { 6061 pfn = page_pptonum(pp); 6062 cnt = page_get_pagecnt(pp->p_szc); 6063 cnt -= pfn & (cnt - 1); 6064 } 6065 *n += cnt; 6066 new_pp = pp + cnt; 6067 6068 /* 6069 * Catch if we went past the end of the current memory segment. If so, 6070 * just move to the next segment with pages. 6071 */ 6072 if (new_pp >= seg->epages || seg->pages_base == seg->pages_end) { 6073 do { 6074 seg = seg->next; 6075 if (seg == NULL) 6076 seg = memsegs; 6077 } while (seg->pages_base == seg->pages_end); 6078 new_pp = seg->pages; 6079 *cookie = (void *)seg; 6080 } 6081 6082 return (new_pp); 6083 } 6084 6085 6086 /* 6087 * Returns next page in list. Note: this function wraps 6088 * to the first page in the list upon reaching the end 6089 * of the list. Callers should be aware of this fact. 6090 */ 6091 6092 /* We should change this be a #define */ 6093 6094 page_t * 6095 page_next(page_t *pp) 6096 { 6097 return (page_nextn(pp, 1)); 6098 } 6099 6100 page_t * 6101 page_first() 6102 { 6103 return ((page_t *)memsegs->pages); 6104 } 6105 6106 6107 /* 6108 * This routine is called at boot with the initial memory configuration 6109 * and when memory is added or removed. 6110 */ 6111 void 6112 build_pfn_hash() 6113 { 6114 pfn_t cur; 6115 pgcnt_t index; 6116 struct memseg *pseg; 6117 int i; 6118 6119 /* 6120 * Clear memseg_hash array. 6121 * Since memory add/delete is designed to operate concurrently 6122 * with normal operation, the hash rebuild must be able to run 6123 * concurrently with page_numtopp_nolock(). To support this 6124 * functionality, assignments to memseg_hash array members must 6125 * be done atomically. 6126 * 6127 * NOTE: bzero() does not currently guarantee this for kernel 6128 * threads, and cannot be used here. 6129 */ 6130 for (i = 0; i < N_MEM_SLOTS; i++) 6131 memseg_hash[i] = NULL; 6132 6133 hat_kpm_mseghash_clear(N_MEM_SLOTS); 6134 6135 /* 6136 * Physmax is the last valid pfn. 6137 */ 6138 mhash_per_slot = (physmax + 1) >> MEM_HASH_SHIFT; 6139 for (pseg = memsegs; pseg != NULL; pseg = pseg->next) { 6140 index = MEMSEG_PFN_HASH(pseg->pages_base); 6141 cur = pseg->pages_base; 6142 do { 6143 if (index >= N_MEM_SLOTS) 6144 index = MEMSEG_PFN_HASH(cur); 6145 6146 if (memseg_hash[index] == NULL || 6147 memseg_hash[index]->pages_base > pseg->pages_base) { 6148 memseg_hash[index] = pseg; 6149 hat_kpm_mseghash_update(index, pseg); 6150 } 6151 cur += mhash_per_slot; 6152 index++; 6153 } while (cur < pseg->pages_end); 6154 } 6155 } 6156 6157 /* 6158 * Return the pagenum for the pp 6159 */ 6160 pfn_t 6161 page_pptonum(page_t *pp) 6162 { 6163 return (pp->p_pagenum); 6164 } 6165 6166 /* 6167 * interface to the referenced and modified etc bits 6168 * in the PSM part of the page struct 6169 * when no locking is desired. 6170 */ 6171 void 6172 page_set_props(page_t *pp, uint_t flags) 6173 { 6174 ASSERT((flags & ~(P_MOD | P_REF | P_RO)) == 0); 6175 pp->p_nrm |= (uchar_t)flags; 6176 } 6177 6178 void 6179 page_clr_all_props(page_t *pp) 6180 { 6181 pp->p_nrm = 0; 6182 } 6183 6184 /* 6185 * Clear p_lckcnt and p_cowcnt, adjusting freemem if required. 6186 */ 6187 int 6188 page_clear_lck_cow(page_t *pp, int adjust) 6189 { 6190 int f_amount; 6191 6192 ASSERT(PAGE_EXCL(pp)); 6193 6194 /* 6195 * The page_struct_lock need not be acquired here since 6196 * we require the caller hold the page exclusively locked. 6197 */ 6198 f_amount = 0; 6199 if (pp->p_lckcnt) { 6200 f_amount = 1; 6201 pp->p_lckcnt = 0; 6202 } 6203 if (pp->p_cowcnt) { 6204 f_amount += pp->p_cowcnt; 6205 pp->p_cowcnt = 0; 6206 } 6207 6208 if (adjust && f_amount) { 6209 mutex_enter(&freemem_lock); 6210 availrmem += f_amount; 6211 mutex_exit(&freemem_lock); 6212 } 6213 6214 return (f_amount); 6215 } 6216 6217 /* 6218 * The following functions is called from free_vp_pages() 6219 * for an inexact estimate of a newly free'd page... 6220 */ 6221 ulong_t 6222 page_share_cnt(page_t *pp) 6223 { 6224 return (hat_page_getshare(pp)); 6225 } 6226 6227 int 6228 page_isshared(page_t *pp) 6229 { 6230 return (hat_page_checkshare(pp, 1)); 6231 } 6232 6233 int 6234 page_isfree(page_t *pp) 6235 { 6236 return (PP_ISFREE(pp)); 6237 } 6238 6239 int 6240 page_isref(page_t *pp) 6241 { 6242 return (hat_page_getattr(pp, P_REF)); 6243 } 6244 6245 int 6246 page_ismod(page_t *pp) 6247 { 6248 return (hat_page_getattr(pp, P_MOD)); 6249 } 6250 6251 /* 6252 * The following code all currently relates to the page capture logic: 6253 * 6254 * This logic is used for cases where there is a desire to claim a certain 6255 * physical page in the system for the caller. As it may not be possible 6256 * to capture the page immediately, the p_toxic bits are used in the page 6257 * structure to indicate that someone wants to capture this page. When the 6258 * page gets unlocked, the toxic flag will be noted and an attempt to capture 6259 * the page will be made. If it is successful, the original callers callback 6260 * will be called with the page to do with it what they please. 6261 * 6262 * There is also an async thread which wakes up to attempt to capture 6263 * pages occasionally which have the capture bit set. All of the pages which 6264 * need to be captured asynchronously have been inserted into the 6265 * page_capture_hash and thus this thread walks that hash list. Items in the 6266 * hash have an expiration time so this thread handles that as well by removing 6267 * the item from the hash if it has expired. 6268 * 6269 * Some important things to note are: 6270 * - if the PR_CAPTURE bit is set on a page, then the page is in the 6271 * page_capture_hash. The page_capture_hash_head.pchh_mutex is needed 6272 * to set and clear this bit, and while the lock is held is the only time 6273 * you can add or remove an entry from the hash. 6274 * - the PR_CAPTURE bit can only be set and cleared while holding the 6275 * page_capture_hash_head.pchh_mutex 6276 * - the t_flag field of the thread struct is used with the T_CAPTURING 6277 * flag to prevent recursion while dealing with large pages. 6278 * - pages which need to be retired never expire on the page_capture_hash. 6279 */ 6280 6281 static void page_capture_thread(void); 6282 static kthread_t *pc_thread_id; 6283 kcondvar_t pc_cv; 6284 static kmutex_t pc_thread_mutex; 6285 static clock_t pc_thread_shortwait; 6286 static clock_t pc_thread_longwait; 6287 static int pc_thread_retry; 6288 6289 struct page_capture_callback pc_cb[PC_NUM_CALLBACKS]; 6290 6291 /* Note that this is a circular linked list */ 6292 typedef struct page_capture_hash_bucket { 6293 page_t *pp; 6294 uchar_t szc; 6295 uchar_t pri; 6296 uint_t flags; 6297 clock_t expires; /* lbolt at which this request expires. */ 6298 void *datap; /* Cached data passed in for callback */ 6299 struct page_capture_hash_bucket *next; 6300 struct page_capture_hash_bucket *prev; 6301 } page_capture_hash_bucket_t; 6302 6303 #define PC_PRI_HI 0 /* capture now */ 6304 #define PC_PRI_LO 1 /* capture later */ 6305 #define PC_NUM_PRI 2 6306 6307 #define PAGE_CAPTURE_PRIO(pp) (PP_ISRAF(pp) ? PC_PRI_LO : PC_PRI_HI) 6308 6309 6310 /* 6311 * Each hash bucket will have it's own mutex and two lists which are: 6312 * active (0): represents requests which have not been processed by 6313 * the page_capture async thread yet. 6314 * walked (1): represents requests which have been processed by the 6315 * page_capture async thread within it's given walk of this bucket. 6316 * 6317 * These are all needed so that we can synchronize all async page_capture 6318 * events. When the async thread moves to a new bucket, it will append the 6319 * walked list to the active list and walk each item one at a time, moving it 6320 * from the active list to the walked list. Thus if there is an async request 6321 * outstanding for a given page, it will always be in one of the two lists. 6322 * New requests will always be added to the active list. 6323 * If we were not able to capture a page before the request expired, we'd free 6324 * up the request structure which would indicate to page_capture that there is 6325 * no longer a need for the given page, and clear the PR_CAPTURE flag if 6326 * possible. 6327 */ 6328 typedef struct page_capture_hash_head { 6329 kmutex_t pchh_mutex; 6330 uint_t num_pages[PC_NUM_PRI]; 6331 page_capture_hash_bucket_t lists[2]; /* sentinel nodes */ 6332 } page_capture_hash_head_t; 6333 6334 #ifdef DEBUG 6335 #define NUM_PAGE_CAPTURE_BUCKETS 4 6336 #else 6337 #define NUM_PAGE_CAPTURE_BUCKETS 64 6338 #endif 6339 6340 page_capture_hash_head_t page_capture_hash[NUM_PAGE_CAPTURE_BUCKETS]; 6341 6342 /* for now use a very simple hash based upon the size of a page struct */ 6343 #define PAGE_CAPTURE_HASH(pp) \ 6344 ((int)(((uintptr_t)pp >> 7) & (NUM_PAGE_CAPTURE_BUCKETS - 1))) 6345 6346 extern pgcnt_t swapfs_minfree; 6347 6348 int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap); 6349 6350 /* 6351 * a callback function is required for page capture requests. 6352 */ 6353 void 6354 page_capture_register_callback(uint_t index, clock_t duration, 6355 int (*cb_func)(page_t *, void *, uint_t)) 6356 { 6357 ASSERT(pc_cb[index].cb_active == 0); 6358 ASSERT(cb_func != NULL); 6359 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6360 pc_cb[index].duration = duration; 6361 pc_cb[index].cb_func = cb_func; 6362 pc_cb[index].cb_active = 1; 6363 rw_exit(&pc_cb[index].cb_rwlock); 6364 } 6365 6366 void 6367 page_capture_unregister_callback(uint_t index) 6368 { 6369 int i, j; 6370 struct page_capture_hash_bucket *bp1; 6371 struct page_capture_hash_bucket *bp2; 6372 struct page_capture_hash_bucket *head = NULL; 6373 uint_t flags = (1 << index); 6374 6375 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6376 ASSERT(pc_cb[index].cb_active == 1); 6377 pc_cb[index].duration = 0; /* Paranoia */ 6378 pc_cb[index].cb_func = NULL; /* Paranoia */ 6379 pc_cb[index].cb_active = 0; 6380 rw_exit(&pc_cb[index].cb_rwlock); 6381 6382 /* 6383 * Just move all the entries to a private list which we can walk 6384 * through without the need to hold any locks. 6385 * No more requests can get added to the hash lists for this consumer 6386 * as the cb_active field for the callback has been cleared. 6387 */ 6388 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 6389 mutex_enter(&page_capture_hash[i].pchh_mutex); 6390 for (j = 0; j < 2; j++) { 6391 bp1 = page_capture_hash[i].lists[j].next; 6392 /* walk through all but first (sentinel) element */ 6393 while (bp1 != &page_capture_hash[i].lists[j]) { 6394 bp2 = bp1; 6395 if (bp2->flags & flags) { 6396 bp1 = bp2->next; 6397 bp1->prev = bp2->prev; 6398 bp2->prev->next = bp1; 6399 bp2->next = head; 6400 head = bp2; 6401 /* 6402 * Clear the PR_CAPTURE bit as we 6403 * hold appropriate locks here. 6404 */ 6405 page_clrtoxic(head->pp, PR_CAPTURE); 6406 page_capture_hash[i]. 6407 num_pages[bp2->pri]--; 6408 continue; 6409 } 6410 bp1 = bp1->next; 6411 } 6412 } 6413 mutex_exit(&page_capture_hash[i].pchh_mutex); 6414 } 6415 6416 while (head != NULL) { 6417 bp1 = head; 6418 head = head->next; 6419 kmem_free(bp1, sizeof (*bp1)); 6420 } 6421 } 6422 6423 6424 /* 6425 * Find pp in the active list and move it to the walked list if it 6426 * exists. 6427 * Note that most often pp should be at the front of the active list 6428 * as it is currently used and thus there is no other sort of optimization 6429 * being done here as this is a linked list data structure. 6430 * Returns 1 on successful move or 0 if page could not be found. 6431 */ 6432 static int 6433 page_capture_move_to_walked(page_t *pp) 6434 { 6435 page_capture_hash_bucket_t *bp; 6436 int index; 6437 6438 index = PAGE_CAPTURE_HASH(pp); 6439 6440 mutex_enter(&page_capture_hash[index].pchh_mutex); 6441 bp = page_capture_hash[index].lists[0].next; 6442 while (bp != &page_capture_hash[index].lists[0]) { 6443 if (bp->pp == pp) { 6444 /* Remove from old list */ 6445 bp->next->prev = bp->prev; 6446 bp->prev->next = bp->next; 6447 6448 /* Add to new list */ 6449 bp->next = page_capture_hash[index].lists[1].next; 6450 bp->prev = &page_capture_hash[index].lists[1]; 6451 page_capture_hash[index].lists[1].next = bp; 6452 bp->next->prev = bp; 6453 6454 /* 6455 * There is a small probability of page on a free 6456 * list being retired while being allocated 6457 * and before P_RAF is set on it. The page may 6458 * end up marked as high priority request instead 6459 * of low priority request. 6460 * If P_RAF page is not marked as low priority request 6461 * change it to low priority request. 6462 */ 6463 page_capture_hash[index].num_pages[bp->pri]--; 6464 bp->pri = PAGE_CAPTURE_PRIO(pp); 6465 page_capture_hash[index].num_pages[bp->pri]++; 6466 mutex_exit(&page_capture_hash[index].pchh_mutex); 6467 return (1); 6468 } 6469 bp = bp->next; 6470 } 6471 mutex_exit(&page_capture_hash[index].pchh_mutex); 6472 return (0); 6473 } 6474 6475 /* 6476 * Add a new entry to the page capture hash. The only case where a new 6477 * entry is not added is when the page capture consumer is no longer registered. 6478 * In this case, we'll silently not add the page to the hash. We know that 6479 * page retire will always be registered for the case where we are currently 6480 * unretiring a page and thus there are no conflicts. 6481 */ 6482 static void 6483 page_capture_add_hash(page_t *pp, uint_t szc, uint_t flags, void *datap) 6484 { 6485 page_capture_hash_bucket_t *bp1; 6486 page_capture_hash_bucket_t *bp2; 6487 int index; 6488 int cb_index; 6489 int i; 6490 uchar_t pri; 6491 #ifdef DEBUG 6492 page_capture_hash_bucket_t *tp1; 6493 int l; 6494 #endif 6495 6496 ASSERT(!(flags & CAPTURE_ASYNC)); 6497 6498 bp1 = kmem_alloc(sizeof (struct page_capture_hash_bucket), KM_SLEEP); 6499 6500 bp1->pp = pp; 6501 bp1->szc = szc; 6502 bp1->flags = flags; 6503 bp1->datap = datap; 6504 6505 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6506 if ((flags >> cb_index) & 1) { 6507 break; 6508 } 6509 } 6510 6511 ASSERT(cb_index != PC_NUM_CALLBACKS); 6512 6513 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6514 if (pc_cb[cb_index].cb_active) { 6515 if (pc_cb[cb_index].duration == -1) { 6516 bp1->expires = (clock_t)-1; 6517 } else { 6518 bp1->expires = ddi_get_lbolt() + 6519 pc_cb[cb_index].duration; 6520 } 6521 } else { 6522 /* There's no callback registered so don't add to the hash */ 6523 rw_exit(&pc_cb[cb_index].cb_rwlock); 6524 kmem_free(bp1, sizeof (*bp1)); 6525 return; 6526 } 6527 6528 index = PAGE_CAPTURE_HASH(pp); 6529 6530 /* 6531 * Only allow capture flag to be modified under this mutex. 6532 * Prevents multiple entries for same page getting added. 6533 */ 6534 mutex_enter(&page_capture_hash[index].pchh_mutex); 6535 6536 /* 6537 * if not already on the hash, set capture bit and add to the hash 6538 */ 6539 if (!(pp->p_toxic & PR_CAPTURE)) { 6540 #ifdef DEBUG 6541 /* Check for duplicate entries */ 6542 for (l = 0; l < 2; l++) { 6543 tp1 = page_capture_hash[index].lists[l].next; 6544 while (tp1 != &page_capture_hash[index].lists[l]) { 6545 if (tp1->pp == pp) { 6546 panic("page pp 0x%p already on hash " 6547 "at 0x%p\n", 6548 (void *)pp, (void *)tp1); 6549 } 6550 tp1 = tp1->next; 6551 } 6552 } 6553 6554 #endif 6555 page_settoxic(pp, PR_CAPTURE); 6556 pri = PAGE_CAPTURE_PRIO(pp); 6557 bp1->pri = pri; 6558 bp1->next = page_capture_hash[index].lists[0].next; 6559 bp1->prev = &page_capture_hash[index].lists[0]; 6560 bp1->next->prev = bp1; 6561 page_capture_hash[index].lists[0].next = bp1; 6562 page_capture_hash[index].num_pages[pri]++; 6563 if (flags & CAPTURE_RETIRE) { 6564 page_retire_incr_pend_count(datap); 6565 } 6566 mutex_exit(&page_capture_hash[index].pchh_mutex); 6567 rw_exit(&pc_cb[cb_index].cb_rwlock); 6568 cv_signal(&pc_cv); 6569 return; 6570 } 6571 6572 /* 6573 * A page retire request will replace any other request. 6574 * A second physmem request which is for a different process than 6575 * the currently registered one will be dropped as there is 6576 * no way to hold the private data for both calls. 6577 * In the future, once there are more callers, this will have to 6578 * be worked out better as there needs to be private storage for 6579 * at least each type of caller (maybe have datap be an array of 6580 * *void's so that we can index based upon callers index). 6581 */ 6582 6583 /* walk hash list to update expire time */ 6584 for (i = 0; i < 2; i++) { 6585 bp2 = page_capture_hash[index].lists[i].next; 6586 while (bp2 != &page_capture_hash[index].lists[i]) { 6587 if (bp2->pp == pp) { 6588 if (flags & CAPTURE_RETIRE) { 6589 if (!(bp2->flags & CAPTURE_RETIRE)) { 6590 page_retire_incr_pend_count( 6591 datap); 6592 bp2->flags = flags; 6593 bp2->expires = bp1->expires; 6594 bp2->datap = datap; 6595 } 6596 } else { 6597 ASSERT(flags & CAPTURE_PHYSMEM); 6598 if (!(bp2->flags & CAPTURE_RETIRE) && 6599 (datap == bp2->datap)) { 6600 bp2->expires = bp1->expires; 6601 } 6602 } 6603 mutex_exit(&page_capture_hash[index]. 6604 pchh_mutex); 6605 rw_exit(&pc_cb[cb_index].cb_rwlock); 6606 kmem_free(bp1, sizeof (*bp1)); 6607 return; 6608 } 6609 bp2 = bp2->next; 6610 } 6611 } 6612 6613 /* 6614 * the PR_CAPTURE flag is protected by the page_capture_hash mutexes 6615 * and thus it either has to be set or not set and can't change 6616 * while holding the mutex above. 6617 */ 6618 panic("page_capture_add_hash, PR_CAPTURE flag set on pp %p\n", 6619 (void *)pp); 6620 } 6621 6622 /* 6623 * We have a page in our hands, lets try and make it ours by turning 6624 * it into a clean page like it had just come off the freelists. 6625 * 6626 * Returns 0 on success, with the page still EXCL locked. 6627 * On failure, the page will be unlocked, and returns EAGAIN 6628 */ 6629 static int 6630 page_capture_clean_page(page_t *pp) 6631 { 6632 page_t *newpp; 6633 int skip_unlock = 0; 6634 spgcnt_t count; 6635 page_t *tpp; 6636 int ret = 0; 6637 int extra; 6638 6639 ASSERT(PAGE_EXCL(pp)); 6640 ASSERT(!PP_RETIRED(pp)); 6641 ASSERT(curthread->t_flag & T_CAPTURING); 6642 6643 if (PP_ISFREE(pp)) { 6644 if (!page_reclaim(pp, NULL)) { 6645 skip_unlock = 1; 6646 ret = EAGAIN; 6647 goto cleanup; 6648 } 6649 ASSERT(pp->p_szc == 0); 6650 if (pp->p_vnode != NULL) { 6651 /* 6652 * Since this page came from the 6653 * cachelist, we must destroy the 6654 * old vnode association. 6655 */ 6656 page_hashout(pp, NULL); 6657 } 6658 goto cleanup; 6659 } 6660 6661 /* 6662 * If we know page_relocate will fail, skip it 6663 * It could still fail due to a UE on another page but we 6664 * can't do anything about that. 6665 */ 6666 if (pp->p_toxic & PR_UE) { 6667 goto skip_relocate; 6668 } 6669 6670 /* 6671 * It's possible that pages can not have a vnode as fsflush comes 6672 * through and cleans up these pages. It's ugly but that's how it is. 6673 */ 6674 if (pp->p_vnode == NULL) { 6675 goto skip_relocate; 6676 } 6677 6678 /* 6679 * Page was not free, so lets try to relocate it. 6680 * page_relocate only works with root pages, so if this is not a root 6681 * page, we need to demote it to try and relocate it. 6682 * Unfortunately this is the best we can do right now. 6683 */ 6684 newpp = NULL; 6685 if ((pp->p_szc > 0) && (pp != PP_PAGEROOT(pp))) { 6686 if (page_try_demote_pages(pp) == 0) { 6687 ret = EAGAIN; 6688 goto cleanup; 6689 } 6690 } 6691 ret = page_relocate(&pp, &newpp, 1, 0, &count, NULL); 6692 if (ret == 0) { 6693 page_t *npp; 6694 /* unlock the new page(s) */ 6695 while (count-- > 0) { 6696 ASSERT(newpp != NULL); 6697 npp = newpp; 6698 page_sub(&newpp, npp); 6699 page_unlock(npp); 6700 } 6701 ASSERT(newpp == NULL); 6702 /* 6703 * Check to see if the page we have is too large. 6704 * If so, demote it freeing up the extra pages. 6705 */ 6706 if (pp->p_szc > 0) { 6707 /* For now demote extra pages to szc == 0 */ 6708 extra = page_get_pagecnt(pp->p_szc) - 1; 6709 while (extra > 0) { 6710 tpp = pp->p_next; 6711 page_sub(&pp, tpp); 6712 tpp->p_szc = 0; 6713 page_free(tpp, 1); 6714 extra--; 6715 } 6716 /* Make sure to set our page to szc 0 as well */ 6717 ASSERT(pp->p_next == pp && pp->p_prev == pp); 6718 pp->p_szc = 0; 6719 } 6720 goto cleanup; 6721 } else if (ret == EIO) { 6722 ret = EAGAIN; 6723 goto cleanup; 6724 } else { 6725 /* 6726 * Need to reset return type as we failed to relocate the page 6727 * but that does not mean that some of the next steps will not 6728 * work. 6729 */ 6730 ret = 0; 6731 } 6732 6733 skip_relocate: 6734 6735 if (pp->p_szc > 0) { 6736 if (page_try_demote_pages(pp) == 0) { 6737 ret = EAGAIN; 6738 goto cleanup; 6739 } 6740 } 6741 6742 ASSERT(pp->p_szc == 0); 6743 6744 if (hat_ismod(pp)) { 6745 ret = EAGAIN; 6746 goto cleanup; 6747 } 6748 if (PP_ISKAS(pp)) { 6749 ret = EAGAIN; 6750 goto cleanup; 6751 } 6752 if (pp->p_lckcnt || pp->p_cowcnt) { 6753 ret = EAGAIN; 6754 goto cleanup; 6755 } 6756 6757 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 6758 ASSERT(!hat_page_is_mapped(pp)); 6759 6760 if (hat_ismod(pp)) { 6761 /* 6762 * This is a semi-odd case as the page is now modified but not 6763 * mapped as we just unloaded the mappings above. 6764 */ 6765 ret = EAGAIN; 6766 goto cleanup; 6767 } 6768 if (pp->p_vnode != NULL) { 6769 page_hashout(pp, NULL); 6770 } 6771 6772 /* 6773 * At this point, the page should be in a clean state and 6774 * we can do whatever we want with it. 6775 */ 6776 6777 cleanup: 6778 if (ret != 0) { 6779 if (!skip_unlock) { 6780 page_unlock(pp); 6781 } 6782 } else { 6783 ASSERT(pp->p_szc == 0); 6784 ASSERT(PAGE_EXCL(pp)); 6785 6786 pp->p_next = pp; 6787 pp->p_prev = pp; 6788 } 6789 return (ret); 6790 } 6791 6792 /* 6793 * Various callers of page_trycapture() can have different restrictions upon 6794 * what memory they have access to. 6795 * Returns 0 on success, with the following error codes on failure: 6796 * EPERM - The requested page is long term locked, and thus repeated 6797 * requests to capture this page will likely fail. 6798 * ENOMEM - There was not enough free memory in the system to safely 6799 * map the requested page. 6800 * ENOENT - The requested page was inside the kernel cage, and the 6801 * PHYSMEM_CAGE flag was not set. 6802 */ 6803 int 6804 page_capture_pre_checks(page_t *pp, uint_t flags) 6805 { 6806 ASSERT(pp != NULL); 6807 6808 #if defined(__sparc) 6809 if (pp->p_vnode == &promvp) { 6810 return (EPERM); 6811 } 6812 6813 if (PP_ISNORELOC(pp) && !(flags & CAPTURE_GET_CAGE) && 6814 (flags & CAPTURE_PHYSMEM)) { 6815 return (ENOENT); 6816 } 6817 6818 if (PP_ISNORELOCKERNEL(pp)) { 6819 return (EPERM); 6820 } 6821 #else 6822 if (PP_ISKAS(pp)) { 6823 return (EPERM); 6824 } 6825 #endif /* __sparc */ 6826 6827 /* only physmem currently has the restrictions checked below */ 6828 if (!(flags & CAPTURE_PHYSMEM)) { 6829 return (0); 6830 } 6831 6832 if (availrmem < swapfs_minfree) { 6833 /* 6834 * We won't try to capture this page as we are 6835 * running low on memory. 6836 */ 6837 return (ENOMEM); 6838 } 6839 return (0); 6840 } 6841 6842 /* 6843 * Once we have a page in our mits, go ahead and complete the capture 6844 * operation. 6845 * Returns 1 on failure where page is no longer needed 6846 * Returns 0 on success 6847 * Returns -1 if there was a transient failure. 6848 * Failure cases must release the SE_EXCL lock on pp (usually via page_free). 6849 */ 6850 int 6851 page_capture_take_action(page_t *pp, uint_t flags, void *datap) 6852 { 6853 int cb_index; 6854 int ret = 0; 6855 page_capture_hash_bucket_t *bp1; 6856 page_capture_hash_bucket_t *bp2; 6857 int index; 6858 int found = 0; 6859 int i; 6860 6861 ASSERT(PAGE_EXCL(pp)); 6862 ASSERT(curthread->t_flag & T_CAPTURING); 6863 6864 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6865 if ((flags >> cb_index) & 1) { 6866 break; 6867 } 6868 } 6869 ASSERT(cb_index < PC_NUM_CALLBACKS); 6870 6871 /* 6872 * Remove the entry from the page_capture hash, but don't free it yet 6873 * as we may need to put it back. 6874 * Since we own the page at this point in time, we should find it 6875 * in the hash if this is an ASYNC call. If we don't it's likely 6876 * that the page_capture_async() thread decided that this request 6877 * had expired, in which case we just continue on. 6878 */ 6879 if (flags & CAPTURE_ASYNC) { 6880 6881 index = PAGE_CAPTURE_HASH(pp); 6882 6883 mutex_enter(&page_capture_hash[index].pchh_mutex); 6884 for (i = 0; i < 2 && !found; i++) { 6885 bp1 = page_capture_hash[index].lists[i].next; 6886 while (bp1 != &page_capture_hash[index].lists[i]) { 6887 if (bp1->pp == pp) { 6888 bp1->next->prev = bp1->prev; 6889 bp1->prev->next = bp1->next; 6890 page_capture_hash[index]. 6891 num_pages[bp1->pri]--; 6892 page_clrtoxic(pp, PR_CAPTURE); 6893 found = 1; 6894 break; 6895 } 6896 bp1 = bp1->next; 6897 } 6898 } 6899 mutex_exit(&page_capture_hash[index].pchh_mutex); 6900 } 6901 6902 /* Synchronize with the unregister func. */ 6903 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6904 if (!pc_cb[cb_index].cb_active) { 6905 page_free(pp, 1); 6906 rw_exit(&pc_cb[cb_index].cb_rwlock); 6907 if (found) { 6908 kmem_free(bp1, sizeof (*bp1)); 6909 } 6910 return (1); 6911 } 6912 6913 /* 6914 * We need to remove the entry from the page capture hash and turn off 6915 * the PR_CAPTURE bit before calling the callback. We'll need to cache 6916 * the entry here, and then based upon the return value, cleanup 6917 * appropriately or re-add it to the hash, making sure that someone else 6918 * hasn't already done so. 6919 * It should be rare for the callback to fail and thus it's ok for 6920 * the failure path to be a bit complicated as the success path is 6921 * cleaner and the locking rules are easier to follow. 6922 */ 6923 6924 ret = pc_cb[cb_index].cb_func(pp, datap, flags); 6925 6926 rw_exit(&pc_cb[cb_index].cb_rwlock); 6927 6928 /* 6929 * If this was an ASYNC request, we need to cleanup the hash if the 6930 * callback was successful or if the request was no longer valid. 6931 * For non-ASYNC requests, we return failure to map and the caller 6932 * will take care of adding the request to the hash. 6933 * Note also that the callback itself is responsible for the page 6934 * at this point in time in terms of locking ... The most common 6935 * case for the failure path should just be a page_free. 6936 */ 6937 if (ret >= 0) { 6938 if (found) { 6939 if (bp1->flags & CAPTURE_RETIRE) { 6940 page_retire_decr_pend_count(datap); 6941 } 6942 kmem_free(bp1, sizeof (*bp1)); 6943 } 6944 return (ret); 6945 } 6946 if (!found) { 6947 return (ret); 6948 } 6949 6950 ASSERT(flags & CAPTURE_ASYNC); 6951 6952 /* 6953 * Check for expiration time first as we can just free it up if it's 6954 * expired. 6955 */ 6956 if (ddi_get_lbolt() > bp1->expires && bp1->expires != -1) { 6957 kmem_free(bp1, sizeof (*bp1)); 6958 return (ret); 6959 } 6960 6961 /* 6962 * The callback failed and there used to be an entry in the hash for 6963 * this page, so we need to add it back to the hash. 6964 */ 6965 mutex_enter(&page_capture_hash[index].pchh_mutex); 6966 if (!(pp->p_toxic & PR_CAPTURE)) { 6967 /* just add bp1 back to head of walked list */ 6968 page_settoxic(pp, PR_CAPTURE); 6969 bp1->next = page_capture_hash[index].lists[1].next; 6970 bp1->prev = &page_capture_hash[index].lists[1]; 6971 bp1->next->prev = bp1; 6972 bp1->pri = PAGE_CAPTURE_PRIO(pp); 6973 page_capture_hash[index].lists[1].next = bp1; 6974 page_capture_hash[index].num_pages[bp1->pri]++; 6975 mutex_exit(&page_capture_hash[index].pchh_mutex); 6976 return (ret); 6977 } 6978 6979 /* 6980 * Otherwise there was a new capture request added to list 6981 * Need to make sure that our original data is represented if 6982 * appropriate. 6983 */ 6984 for (i = 0; i < 2; i++) { 6985 bp2 = page_capture_hash[index].lists[i].next; 6986 while (bp2 != &page_capture_hash[index].lists[i]) { 6987 if (bp2->pp == pp) { 6988 if (bp1->flags & CAPTURE_RETIRE) { 6989 if (!(bp2->flags & CAPTURE_RETIRE)) { 6990 bp2->szc = bp1->szc; 6991 bp2->flags = bp1->flags; 6992 bp2->expires = bp1->expires; 6993 bp2->datap = bp1->datap; 6994 } 6995 } else { 6996 ASSERT(bp1->flags & CAPTURE_PHYSMEM); 6997 if (!(bp2->flags & CAPTURE_RETIRE)) { 6998 bp2->szc = bp1->szc; 6999 bp2->flags = bp1->flags; 7000 bp2->expires = bp1->expires; 7001 bp2->datap = bp1->datap; 7002 } 7003 } 7004 page_capture_hash[index].num_pages[bp2->pri]--; 7005 bp2->pri = PAGE_CAPTURE_PRIO(pp); 7006 page_capture_hash[index].num_pages[bp2->pri]++; 7007 mutex_exit(&page_capture_hash[index]. 7008 pchh_mutex); 7009 kmem_free(bp1, sizeof (*bp1)); 7010 return (ret); 7011 } 7012 bp2 = bp2->next; 7013 } 7014 } 7015 panic("PR_CAPTURE set but not on hash for pp 0x%p\n", (void *)pp); 7016 /*NOTREACHED*/ 7017 } 7018 7019 /* 7020 * Try to capture the given page for the caller specified in the flags 7021 * parameter. The page will either be captured and handed over to the 7022 * appropriate callback, or will be queued up in the page capture hash 7023 * to be captured asynchronously. 7024 * If the current request is due to an async capture, the page must be 7025 * exclusively locked before calling this function. 7026 * Currently szc must be 0 but in the future this should be expandable to 7027 * other page sizes. 7028 * Returns 0 on success, with the following error codes on failure: 7029 * EPERM - The requested page is long term locked, and thus repeated 7030 * requests to capture this page will likely fail. 7031 * ENOMEM - There was not enough free memory in the system to safely 7032 * map the requested page. 7033 * ENOENT - The requested page was inside the kernel cage, and the 7034 * CAPTURE_GET_CAGE flag was not set. 7035 * EAGAIN - The requested page could not be capturead at this point in 7036 * time but future requests will likely work. 7037 * EBUSY - The requested page is retired and the CAPTURE_GET_RETIRED flag 7038 * was not set. 7039 */ 7040 int 7041 page_itrycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 7042 { 7043 int ret; 7044 int cb_index; 7045 7046 if (flags & CAPTURE_ASYNC) { 7047 ASSERT(PAGE_EXCL(pp)); 7048 goto async; 7049 } 7050 7051 /* Make sure there's enough availrmem ... */ 7052 ret = page_capture_pre_checks(pp, flags); 7053 if (ret != 0) { 7054 return (ret); 7055 } 7056 7057 if (!page_trylock(pp, SE_EXCL)) { 7058 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 7059 if ((flags >> cb_index) & 1) { 7060 break; 7061 } 7062 } 7063 ASSERT(cb_index < PC_NUM_CALLBACKS); 7064 ret = EAGAIN; 7065 /* Special case for retired pages */ 7066 if (PP_RETIRED(pp)) { 7067 if (flags & CAPTURE_GET_RETIRED) { 7068 if (!page_unretire_pp(pp, PR_UNR_TEMP)) { 7069 /* 7070 * Need to set capture bit and add to 7071 * hash so that the page will be 7072 * retired when freed. 7073 */ 7074 page_capture_add_hash(pp, szc, 7075 CAPTURE_RETIRE, NULL); 7076 ret = 0; 7077 goto own_page; 7078 } 7079 } else { 7080 return (EBUSY); 7081 } 7082 } 7083 page_capture_add_hash(pp, szc, flags, datap); 7084 return (ret); 7085 } 7086 7087 async: 7088 ASSERT(PAGE_EXCL(pp)); 7089 7090 /* Need to check for physmem async requests that availrmem is sane */ 7091 if ((flags & (CAPTURE_ASYNC | CAPTURE_PHYSMEM)) == 7092 (CAPTURE_ASYNC | CAPTURE_PHYSMEM) && 7093 (availrmem < swapfs_minfree)) { 7094 page_unlock(pp); 7095 return (ENOMEM); 7096 } 7097 7098 ret = page_capture_clean_page(pp); 7099 7100 if (ret != 0) { 7101 /* We failed to get the page, so lets add it to the hash */ 7102 if (!(flags & CAPTURE_ASYNC)) { 7103 page_capture_add_hash(pp, szc, flags, datap); 7104 } 7105 return (ret); 7106 } 7107 7108 own_page: 7109 ASSERT(PAGE_EXCL(pp)); 7110 ASSERT(pp->p_szc == 0); 7111 7112 /* Call the callback */ 7113 ret = page_capture_take_action(pp, flags, datap); 7114 7115 if (ret == 0) { 7116 return (0); 7117 } 7118 7119 /* 7120 * Note that in the failure cases from page_capture_take_action, the 7121 * EXCL lock will have already been dropped. 7122 */ 7123 if ((ret == -1) && (!(flags & CAPTURE_ASYNC))) { 7124 page_capture_add_hash(pp, szc, flags, datap); 7125 } 7126 return (EAGAIN); 7127 } 7128 7129 int 7130 page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 7131 { 7132 int ret; 7133 7134 curthread->t_flag |= T_CAPTURING; 7135 ret = page_itrycapture(pp, szc, flags, datap); 7136 curthread->t_flag &= ~T_CAPTURING; /* xor works as we know its set */ 7137 return (ret); 7138 } 7139 7140 /* 7141 * When unlocking a page which has the PR_CAPTURE bit set, this routine 7142 * gets called to try and capture the page. 7143 */ 7144 void 7145 page_unlock_capture(page_t *pp) 7146 { 7147 page_capture_hash_bucket_t *bp; 7148 int index; 7149 int i; 7150 uint_t szc; 7151 uint_t flags = 0; 7152 void *datap; 7153 kmutex_t *mp; 7154 extern vnode_t retired_pages; 7155 7156 /* 7157 * We need to protect against a possible deadlock here where we own 7158 * the vnode page hash mutex and want to acquire it again as there 7159 * are locations in the code, where we unlock a page while holding 7160 * the mutex which can lead to the page being captured and eventually 7161 * end up here. As we may be hashing out the old page and hashing into 7162 * the retire vnode, we need to make sure we don't own them. 7163 * Other callbacks who do hash operations also need to make sure that 7164 * before they hashin to a vnode that they do not currently own the 7165 * vphm mutex otherwise there will be a panic. 7166 */ 7167 if (mutex_owned(page_vnode_mutex(&retired_pages))) { 7168 page_unlock_nocapture(pp); 7169 return; 7170 } 7171 if (pp->p_vnode != NULL && mutex_owned(page_vnode_mutex(pp->p_vnode))) { 7172 page_unlock_nocapture(pp); 7173 return; 7174 } 7175 7176 index = PAGE_CAPTURE_HASH(pp); 7177 7178 mp = &page_capture_hash[index].pchh_mutex; 7179 mutex_enter(mp); 7180 for (i = 0; i < 2; i++) { 7181 bp = page_capture_hash[index].lists[i].next; 7182 while (bp != &page_capture_hash[index].lists[i]) { 7183 if (bp->pp == pp) { 7184 szc = bp->szc; 7185 flags = bp->flags | CAPTURE_ASYNC; 7186 datap = bp->datap; 7187 mutex_exit(mp); 7188 (void) page_trycapture(pp, szc, flags, datap); 7189 return; 7190 } 7191 bp = bp->next; 7192 } 7193 } 7194 7195 /* Failed to find page in hash so clear flags and unlock it. */ 7196 page_clrtoxic(pp, PR_CAPTURE); 7197 page_unlock(pp); 7198 7199 mutex_exit(mp); 7200 } 7201 7202 void 7203 page_capture_init() 7204 { 7205 int i; 7206 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7207 page_capture_hash[i].lists[0].next = 7208 &page_capture_hash[i].lists[0]; 7209 page_capture_hash[i].lists[0].prev = 7210 &page_capture_hash[i].lists[0]; 7211 page_capture_hash[i].lists[1].next = 7212 &page_capture_hash[i].lists[1]; 7213 page_capture_hash[i].lists[1].prev = 7214 &page_capture_hash[i].lists[1]; 7215 } 7216 7217 pc_thread_shortwait = 23 * hz; 7218 pc_thread_longwait = 1201 * hz; 7219 pc_thread_retry = 3; 7220 mutex_init(&pc_thread_mutex, NULL, MUTEX_DEFAULT, NULL); 7221 cv_init(&pc_cv, NULL, CV_DEFAULT, NULL); 7222 pc_thread_id = thread_create(NULL, 0, page_capture_thread, NULL, 0, &p0, 7223 TS_RUN, minclsyspri); 7224 } 7225 7226 /* 7227 * It is necessary to scrub any failing pages prior to reboot in order to 7228 * prevent a latent error trap from occurring on the next boot. 7229 */ 7230 void 7231 page_retire_mdboot() 7232 { 7233 page_t *pp; 7234 int i, j; 7235 page_capture_hash_bucket_t *bp; 7236 uchar_t pri; 7237 7238 /* walk lists looking for pages to scrub */ 7239 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7240 for (pri = 0; pri < PC_NUM_PRI; pri++) { 7241 if (page_capture_hash[i].num_pages[pri] != 0) { 7242 break; 7243 } 7244 } 7245 if (pri == PC_NUM_PRI) 7246 continue; 7247 7248 mutex_enter(&page_capture_hash[i].pchh_mutex); 7249 7250 for (j = 0; j < 2; j++) { 7251 bp = page_capture_hash[i].lists[j].next; 7252 while (bp != &page_capture_hash[i].lists[j]) { 7253 pp = bp->pp; 7254 if (PP_TOXIC(pp)) { 7255 if (page_trylock(pp, SE_EXCL)) { 7256 PP_CLRFREE(pp); 7257 pagescrub(pp, 0, PAGESIZE); 7258 page_unlock(pp); 7259 } 7260 } 7261 bp = bp->next; 7262 } 7263 } 7264 mutex_exit(&page_capture_hash[i].pchh_mutex); 7265 } 7266 } 7267 7268 /* 7269 * Walk the page_capture_hash trying to capture pages and also cleanup old 7270 * entries which have expired. 7271 */ 7272 void 7273 page_capture_async() 7274 { 7275 page_t *pp; 7276 int i; 7277 int ret; 7278 page_capture_hash_bucket_t *bp1, *bp2; 7279 uint_t szc; 7280 uint_t flags; 7281 void *datap; 7282 uchar_t pri; 7283 7284 /* If there are outstanding pages to be captured, get to work */ 7285 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7286 for (pri = 0; pri < PC_NUM_PRI; pri++) { 7287 if (page_capture_hash[i].num_pages[pri] != 0) 7288 break; 7289 } 7290 if (pri == PC_NUM_PRI) 7291 continue; 7292 7293 /* Append list 1 to list 0 and then walk through list 0 */ 7294 mutex_enter(&page_capture_hash[i].pchh_mutex); 7295 bp1 = &page_capture_hash[i].lists[1]; 7296 bp2 = bp1->next; 7297 if (bp1 != bp2) { 7298 bp1->prev->next = page_capture_hash[i].lists[0].next; 7299 bp2->prev = &page_capture_hash[i].lists[0]; 7300 page_capture_hash[i].lists[0].next->prev = bp1->prev; 7301 page_capture_hash[i].lists[0].next = bp2; 7302 bp1->next = bp1; 7303 bp1->prev = bp1; 7304 } 7305 7306 /* list[1] will be empty now */ 7307 7308 bp1 = page_capture_hash[i].lists[0].next; 7309 while (bp1 != &page_capture_hash[i].lists[0]) { 7310 /* Check expiration time */ 7311 if ((ddi_get_lbolt() > bp1->expires && 7312 bp1->expires != -1) || 7313 page_deleted(bp1->pp)) { 7314 page_capture_hash[i].lists[0].next = bp1->next; 7315 bp1->next->prev = 7316 &page_capture_hash[i].lists[0]; 7317 page_capture_hash[i].num_pages[bp1->pri]--; 7318 7319 /* 7320 * We can safely remove the PR_CAPTURE bit 7321 * without holding the EXCL lock on the page 7322 * as the PR_CAPTURE bit requres that the 7323 * page_capture_hash[].pchh_mutex be held 7324 * to modify it. 7325 */ 7326 page_clrtoxic(bp1->pp, PR_CAPTURE); 7327 mutex_exit(&page_capture_hash[i].pchh_mutex); 7328 kmem_free(bp1, sizeof (*bp1)); 7329 mutex_enter(&page_capture_hash[i].pchh_mutex); 7330 bp1 = page_capture_hash[i].lists[0].next; 7331 continue; 7332 } 7333 pp = bp1->pp; 7334 szc = bp1->szc; 7335 flags = bp1->flags; 7336 datap = bp1->datap; 7337 mutex_exit(&page_capture_hash[i].pchh_mutex); 7338 if (page_trylock(pp, SE_EXCL)) { 7339 ret = page_trycapture(pp, szc, 7340 flags | CAPTURE_ASYNC, datap); 7341 } else { 7342 ret = 1; /* move to walked hash */ 7343 } 7344 7345 if (ret != 0) { 7346 /* Move to walked hash */ 7347 (void) page_capture_move_to_walked(pp); 7348 } 7349 mutex_enter(&page_capture_hash[i].pchh_mutex); 7350 bp1 = page_capture_hash[i].lists[0].next; 7351 } 7352 7353 mutex_exit(&page_capture_hash[i].pchh_mutex); 7354 } 7355 } 7356 7357 /* 7358 * This function is called by the page_capture_thread, and is needed in 7359 * in order to initiate aio cleanup, so that pages used in aio 7360 * will be unlocked and subsequently retired by page_capture_thread. 7361 */ 7362 static int 7363 do_aio_cleanup(void) 7364 { 7365 proc_t *procp; 7366 int (*aio_cleanup_dr_delete_memory)(proc_t *); 7367 int cleaned = 0; 7368 7369 if (modload("sys", "kaio") == -1) { 7370 cmn_err(CE_WARN, "do_aio_cleanup: cannot load kaio"); 7371 return (0); 7372 } 7373 /* 7374 * We use the aio_cleanup_dr_delete_memory function to 7375 * initiate the actual clean up; this function will wake 7376 * up the per-process aio_cleanup_thread. 7377 */ 7378 aio_cleanup_dr_delete_memory = (int (*)(proc_t *)) 7379 modgetsymvalue("aio_cleanup_dr_delete_memory", 0); 7380 if (aio_cleanup_dr_delete_memory == NULL) { 7381 cmn_err(CE_WARN, 7382 "aio_cleanup_dr_delete_memory not found in kaio"); 7383 return (0); 7384 } 7385 mutex_enter(&pidlock); 7386 for (procp = practive; (procp != NULL); procp = procp->p_next) { 7387 mutex_enter(&procp->p_lock); 7388 if (procp->p_aio != NULL) { 7389 /* cleanup proc's outstanding kaio */ 7390 cleaned += (*aio_cleanup_dr_delete_memory)(procp); 7391 } 7392 mutex_exit(&procp->p_lock); 7393 } 7394 mutex_exit(&pidlock); 7395 return (cleaned); 7396 } 7397 7398 /* 7399 * helper function for page_capture_thread 7400 */ 7401 static void 7402 page_capture_handle_outstanding(void) 7403 { 7404 int ntry; 7405 7406 /* Reap pages before attempting capture pages */ 7407 kmem_reap(); 7408 7409 if ((page_retire_pend_count() > page_retire_pend_kas_count()) && 7410 hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 7411 /* 7412 * Note: Purging only for platforms that support 7413 * ISM hat_pageunload() - mainly SPARC. On x86/x64 7414 * platforms ISM pages SE_SHARED locked until destroyed. 7415 */ 7416 7417 /* disable and purge seg_pcache */ 7418 (void) seg_p_disable(); 7419 for (ntry = 0; ntry < pc_thread_retry; ntry++) { 7420 if (!page_retire_pend_count()) 7421 break; 7422 if (do_aio_cleanup()) { 7423 /* 7424 * allow the apps cleanup threads 7425 * to run 7426 */ 7427 delay(pc_thread_shortwait); 7428 } 7429 page_capture_async(); 7430 } 7431 /* reenable seg_pcache */ 7432 seg_p_enable(); 7433 7434 /* completed what can be done. break out */ 7435 return; 7436 } 7437 7438 /* 7439 * For kernel pages and/or unsupported HAT_DYNAMIC_ISM_UNMAP, reap 7440 * and then attempt to capture. 7441 */ 7442 seg_preap(); 7443 page_capture_async(); 7444 } 7445 7446 /* 7447 * The page_capture_thread loops forever, looking to see if there are 7448 * pages still waiting to be captured. 7449 */ 7450 static void 7451 page_capture_thread(void) 7452 { 7453 callb_cpr_t c; 7454 int i; 7455 int high_pri_pages; 7456 int low_pri_pages; 7457 clock_t timeout; 7458 7459 CALLB_CPR_INIT(&c, &pc_thread_mutex, callb_generic_cpr, "page_capture"); 7460 7461 mutex_enter(&pc_thread_mutex); 7462 for (;;) { 7463 high_pri_pages = 0; 7464 low_pri_pages = 0; 7465 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7466 high_pri_pages += 7467 page_capture_hash[i].num_pages[PC_PRI_HI]; 7468 low_pri_pages += 7469 page_capture_hash[i].num_pages[PC_PRI_LO]; 7470 } 7471 7472 timeout = pc_thread_longwait; 7473 if (high_pri_pages != 0) { 7474 timeout = pc_thread_shortwait; 7475 page_capture_handle_outstanding(); 7476 } else if (low_pri_pages != 0) { 7477 page_capture_async(); 7478 } 7479 CALLB_CPR_SAFE_BEGIN(&c); 7480 (void) cv_reltimedwait(&pc_cv, &pc_thread_mutex, 7481 timeout, TR_CLOCK_TICK); 7482 CALLB_CPR_SAFE_END(&c, &pc_thread_mutex); 7483 } 7484 /*NOTREACHED*/ 7485 } 7486 /* 7487 * Attempt to locate a bucket that has enough pages to satisfy the request. 7488 * The initial check is done without the lock to avoid unneeded contention. 7489 * The function returns 1 if enough pages were found, else 0 if it could not 7490 * find enough pages in a bucket. 7491 */ 7492 static int 7493 pcf_decrement_bucket(pgcnt_t npages) 7494 { 7495 struct pcf *p; 7496 struct pcf *q; 7497 int i; 7498 7499 p = &pcf[PCF_INDEX()]; 7500 q = &pcf[pcf_fanout]; 7501 for (i = 0; i < pcf_fanout; i++) { 7502 if (p->pcf_count > npages) { 7503 /* 7504 * a good one to try. 7505 */ 7506 mutex_enter(&p->pcf_lock); 7507 if (p->pcf_count > npages) { 7508 p->pcf_count -= (uint_t)npages; 7509 /* 7510 * freemem is not protected by any lock. 7511 * Thus, we cannot have any assertion 7512 * containing freemem here. 7513 */ 7514 freemem -= npages; 7515 mutex_exit(&p->pcf_lock); 7516 return (1); 7517 } 7518 mutex_exit(&p->pcf_lock); 7519 } 7520 p++; 7521 if (p >= q) { 7522 p = pcf; 7523 } 7524 } 7525 return (0); 7526 } 7527 7528 /* 7529 * Arguments: 7530 * pcftotal_ret: If the value is not NULL and we have walked all the 7531 * buckets but did not find enough pages then it will 7532 * be set to the total number of pages in all the pcf 7533 * buckets. 7534 * npages: Is the number of pages we have been requested to 7535 * find. 7536 * unlock: If set to 0 we will leave the buckets locked if the 7537 * requested number of pages are not found. 7538 * 7539 * Go and try to satisfy the page request from any number of buckets. 7540 * This can be a very expensive operation as we have to lock the buckets 7541 * we are checking (and keep them locked), starting at bucket 0. 7542 * 7543 * The function returns 1 if enough pages were found, else 0 if it could not 7544 * find enough pages in the buckets. 7545 * 7546 */ 7547 static int 7548 pcf_decrement_multiple(pgcnt_t *pcftotal_ret, pgcnt_t npages, int unlock) 7549 { 7550 struct pcf *p; 7551 pgcnt_t pcftotal; 7552 int i; 7553 7554 p = pcf; 7555 /* try to collect pages from several pcf bins */ 7556 for (pcftotal = 0, i = 0; i < pcf_fanout; i++) { 7557 mutex_enter(&p->pcf_lock); 7558 pcftotal += p->pcf_count; 7559 if (pcftotal >= npages) { 7560 /* 7561 * Wow! There are enough pages laying around 7562 * to satisfy the request. Do the accounting, 7563 * drop the locks we acquired, and go back. 7564 * 7565 * freemem is not protected by any lock. So, 7566 * we cannot have any assertion containing 7567 * freemem. 7568 */ 7569 freemem -= npages; 7570 while (p >= pcf) { 7571 if (p->pcf_count <= npages) { 7572 npages -= p->pcf_count; 7573 p->pcf_count = 0; 7574 } else { 7575 p->pcf_count -= (uint_t)npages; 7576 npages = 0; 7577 } 7578 mutex_exit(&p->pcf_lock); 7579 p--; 7580 } 7581 ASSERT(npages == 0); 7582 return (1); 7583 } 7584 p++; 7585 } 7586 if (unlock) { 7587 /* failed to collect pages - release the locks */ 7588 while (--p >= pcf) { 7589 mutex_exit(&p->pcf_lock); 7590 } 7591 } 7592 if (pcftotal_ret != NULL) 7593 *pcftotal_ret = pcftotal; 7594 return (0); 7595 } 7596