1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2015, Josef 'Jeff' Sipek <jeffpc@josefsipek.net> 24 * Copyright (c) 2015, 2016 by Delphix. All rights reserved. 25 * Copyright 2018 Joyent, Inc. 26 * Copyright 2021 Oxide Computer Company 27 * Copyright 2024 MNX Cloud, Inc. 28 */ 29 30 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 31 /* All Rights Reserved */ 32 33 /* 34 * University Copyright- Copyright (c) 1982, 1986, 1988 35 * The Regents of the University of California 36 * All Rights Reserved 37 * 38 * University Acknowledgment- Portions of this document are derived from 39 * software developed by the University of California, Berkeley, and its 40 * contributors. 41 */ 42 43 /* 44 * VM - physical page management. 45 */ 46 47 #include <sys/types.h> 48 #include <sys/t_lock.h> 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/errno.h> 52 #include <sys/time.h> 53 #include <sys/vnode.h> 54 #include <sys/vm.h> 55 #include <sys/vtrace.h> 56 #include <sys/swap.h> 57 #include <sys/cmn_err.h> 58 #include <sys/tuneable.h> 59 #include <sys/sysmacros.h> 60 #include <sys/cpuvar.h> 61 #include <sys/callb.h> 62 #include <sys/debug.h> 63 #include <sys/condvar_impl.h> 64 #include <sys/mem_config.h> 65 #include <sys/mem_cage.h> 66 #include <sys/kmem.h> 67 #include <sys/atomic.h> 68 #include <sys/strlog.h> 69 #include <sys/mman.h> 70 #include <sys/ontrap.h> 71 #include <sys/lgrp.h> 72 #include <sys/vfs.h> 73 74 #include <vm/hat.h> 75 #include <vm/anon.h> 76 #include <vm/page.h> 77 #include <vm/seg.h> 78 #include <vm/pvn.h> 79 #include <vm/seg_kmem.h> 80 #include <vm/vm_dep.h> 81 #include <sys/vm_usage.h> 82 #include <fs/fs_subr.h> 83 #include <sys/ddi.h> 84 #include <sys/modctl.h> 85 86 static pgcnt_t max_page_get; /* max page_get request size in pages */ 87 pgcnt_t total_pages = 0; /* total number of pages (used by /proc) */ 88 volatile uint64_t n_throttle = 0; 89 90 /* 91 * freemem_lock protects all freemem variables: 92 * availrmem. Also this lock protects the globals which track the 93 * availrmem changes for accurate kernel footprint calculation. 94 * See below for an explanation of these 95 * globals. 96 */ 97 kmutex_t freemem_lock; 98 pgcnt_t availrmem; 99 pgcnt_t availrmem_initial; 100 101 /* 102 * These globals track availrmem changes to get a more accurate 103 * estimate of tke kernel size. Historically pp_kernel is used for 104 * kernel size and is based on availrmem. But availrmem is adjusted for 105 * locked pages in the system not just for kernel locked pages. 106 * These new counters will track the pages locked through segvn and 107 * by explicit user locking. 108 * 109 * pages_locked : How many pages are locked because of user specified 110 * locking through mlock or plock. 111 * 112 * pages_useclaim,pages_claimed : These two variables track the 113 * claim adjustments because of the protection changes on a segvn segment. 114 * 115 * All these globals are protected by the same lock which protects availrmem. 116 */ 117 pgcnt_t pages_locked = 0; 118 pgcnt_t pages_useclaim = 0; 119 pgcnt_t pages_claimed = 0; 120 121 122 /* 123 * new_freemem_lock protects freemem, freemem_wait & freemem_cv. 124 */ 125 static kmutex_t new_freemem_lock; 126 static uint_t freemem_wait; /* someone waiting for freemem */ 127 static kcondvar_t freemem_cv; 128 129 /* 130 * The logical page free list is maintained as two lists, the 'free' 131 * and the 'cache' lists. 132 * The free list contains those pages that should be reused first. 133 * 134 * The implementation of the lists is machine dependent. 135 * page_get_freelist(), page_get_cachelist(), 136 * page_list_sub(), and page_list_add() 137 * form the interface to the machine dependent implementation. 138 * 139 * Pages with p_free set are on the cache list. 140 * Pages with p_free and p_age set are on the free list, 141 * 142 * A page may be locked while on either list. 143 */ 144 145 /* 146 * free list accounting stuff. 147 * 148 * 149 * Spread out the value for the number of pages on the 150 * page free and page cache lists. If there is just one 151 * value, then it must be under just one lock. 152 * The lock contention and cache traffic are a real bother. 153 * 154 * When we acquire and then drop a single pcf lock 155 * we can start in the middle of the array of pcf structures. 156 * If we acquire more than one pcf lock at a time, we need to 157 * start at the front to avoid deadlocking. 158 * 159 * pcf_count holds the number of pages in each pool. 160 * 161 * pcf_block is set when page_create_get_something() has asked the 162 * PSM page freelist and page cachelist routines without specifying 163 * a color and nothing came back. This is used to block anything 164 * else from moving pages from one list to the other while the 165 * lists are searched again. If a page is freeed while pcf_block is 166 * set, then pcf_reserve is incremented. pcgs_unblock() takes care 167 * of clearning pcf_block, doing the wakeups, etc. 168 */ 169 170 #define MAX_PCF_FANOUT NCPU 171 static uint_t pcf_fanout = 1; /* Will get changed at boot time */ 172 static uint_t pcf_fanout_mask = 0; 173 174 struct pcf { 175 kmutex_t pcf_lock; /* protects the structure */ 176 uint_t pcf_count; /* page count */ 177 uint_t pcf_wait; /* number of waiters */ 178 uint_t pcf_block; /* pcgs flag to page_free() */ 179 uint_t pcf_reserve; /* pages freed after pcf_block set */ 180 uint_t pcf_fill[10]; /* to line up on the caches */ 181 }; 182 183 /* 184 * PCF_INDEX hash needs to be dynamic (every so often the hash changes where 185 * it will hash the cpu to). This is done to prevent a drain condition 186 * from happening. This drain condition will occur when pcf_count decrement 187 * occurs on cpu A and the increment of pcf_count always occurs on cpu B. An 188 * example of this shows up with device interrupts. The dma buffer is allocated 189 * by the cpu requesting the IO thus the pcf_count is decremented based on that. 190 * When the memory is returned by the interrupt thread, the pcf_count will be 191 * incremented based on the cpu servicing the interrupt. 192 */ 193 static struct pcf pcf[MAX_PCF_FANOUT]; 194 #define PCF_INDEX() ((int)(((long)CPU->cpu_seqid) + \ 195 (randtick() >> 24)) & (pcf_fanout_mask)) 196 197 static int pcf_decrement_bucket(pgcnt_t); 198 static int pcf_decrement_multiple(pgcnt_t *, pgcnt_t, int); 199 200 kmutex_t pcgs_lock; /* serializes page_create_get_ */ 201 kmutex_t pcgs_cagelock; /* serializes NOSLEEP cage allocs */ 202 kmutex_t pcgs_wait_lock; /* used for delay in pcgs */ 203 static kcondvar_t pcgs_cv; /* cv for delay in pcgs */ 204 205 #ifdef VM_STATS 206 207 /* 208 * No locks, but so what, they are only statistics. 209 */ 210 211 static struct page_tcnt { 212 int pc_free_cache; /* free's into cache list */ 213 int pc_free_dontneed; /* free's with dontneed */ 214 int pc_free_pageout; /* free's from pageout */ 215 int pc_free_free; /* free's into free list */ 216 int pc_free_pages; /* free's into large page free list */ 217 int pc_destroy_pages; /* large page destroy's */ 218 int pc_get_cache; /* get's from cache list */ 219 int pc_get_free; /* get's from free list */ 220 int pc_reclaim; /* reclaim's */ 221 int pc_abortfree; /* abort's of free pages */ 222 int pc_find_hit; /* find's that find page */ 223 int pc_find_miss; /* find's that don't find page */ 224 int pc_destroy_free; /* # of free pages destroyed */ 225 #define PC_HASH_CNT (4*PAGE_HASHAVELEN) 226 int pc_find_hashlen[PC_HASH_CNT+1]; 227 int pc_addclaim_pages; 228 int pc_subclaim_pages; 229 int pc_free_replacement_page[2]; 230 int pc_try_demote_pages[6]; 231 int pc_demote_pages[2]; 232 } pagecnt; 233 234 uint_t hashin_count; 235 uint_t hashin_not_held; 236 uint_t hashin_already; 237 238 uint_t hashout_count; 239 uint_t hashout_not_held; 240 241 uint_t page_create_count; 242 uint_t page_create_not_enough; 243 uint_t page_create_not_enough_again; 244 uint_t page_create_zero; 245 uint_t page_create_hashout; 246 uint_t page_create_page_lock_failed; 247 uint_t page_create_trylock_failed; 248 uint_t page_create_found_one; 249 uint_t page_create_hashin_failed; 250 uint_t page_create_dropped_phm; 251 252 uint_t page_create_new; 253 uint_t page_create_exists; 254 uint_t page_create_putbacks; 255 uint_t page_create_overshoot; 256 257 uint_t page_reclaim_zero; 258 uint_t page_reclaim_zero_locked; 259 260 uint_t page_rename_exists; 261 uint_t page_rename_count; 262 263 uint_t page_lookup_cnt[20]; 264 uint_t page_lookup_nowait_cnt[10]; 265 uint_t page_find_cnt; 266 uint_t page_exists_cnt; 267 uint_t page_exists_forreal_cnt; 268 uint_t page_lookup_dev_cnt; 269 uint_t get_cachelist_cnt; 270 uint_t page_create_cnt[10]; 271 uint_t alloc_pages[9]; 272 uint_t page_exphcontg[19]; 273 uint_t page_create_large_cnt[10]; 274 275 #endif 276 277 static inline page_t * 278 page_hash_search(ulong_t index, vnode_t *vnode, u_offset_t off) 279 { 280 uint_t mylen = 0; 281 page_t *page; 282 283 for (page = page_hash[index]; page; page = page->p_hash, mylen++) 284 if (page->p_vnode == vnode && page->p_offset == off) 285 break; 286 287 #ifdef VM_STATS 288 if (page != NULL) 289 pagecnt.pc_find_hit++; 290 else 291 pagecnt.pc_find_miss++; 292 293 pagecnt.pc_find_hashlen[MIN(mylen, PC_HASH_CNT)]++; 294 #endif 295 296 return (page); 297 } 298 299 300 #ifdef DEBUG 301 #define MEMSEG_SEARCH_STATS 302 #endif 303 304 #ifdef MEMSEG_SEARCH_STATS 305 struct memseg_stats { 306 uint_t nsearch; 307 uint_t nlastwon; 308 uint_t nhashwon; 309 uint_t nnotfound; 310 } memseg_stats; 311 312 #define MEMSEG_STAT_INCR(v) \ 313 atomic_inc_32(&memseg_stats.v) 314 #else 315 #define MEMSEG_STAT_INCR(x) 316 #endif 317 318 struct memseg *memsegs; /* list of memory segments */ 319 320 /* 321 * /etc/system tunable to control large page allocation hueristic. 322 * 323 * Setting to LPAP_LOCAL will heavily prefer the local lgroup over remote lgroup 324 * for large page allocation requests. If a large page is not readily 325 * avaliable on the local freelists we will go through additional effort 326 * to create a large page, potentially moving smaller pages around to coalesce 327 * larger pages in the local lgroup. 328 * Default value of LPAP_DEFAULT will go to remote freelists if large pages 329 * are not readily available in the local lgroup. 330 */ 331 enum lpap { 332 LPAP_DEFAULT, /* default large page allocation policy */ 333 LPAP_LOCAL /* local large page allocation policy */ 334 }; 335 336 enum lpap lpg_alloc_prefer = LPAP_DEFAULT; 337 338 static void page_init_mem_config(void); 339 static int page_do_hashin(page_t *, vnode_t *, u_offset_t); 340 static void page_do_hashout(page_t *); 341 static void page_capture_init(); 342 int page_capture_take_action(page_t *, uint_t, void *); 343 344 static void page_demote_vp_pages(page_t *); 345 346 347 void 348 pcf_init(void) 349 { 350 if (boot_ncpus != -1) { 351 pcf_fanout = boot_ncpus; 352 } else { 353 pcf_fanout = max_ncpus; 354 } 355 #ifdef sun4v 356 /* 357 * Force at least 4 buckets if possible for sun4v. 358 */ 359 pcf_fanout = MAX(pcf_fanout, 4); 360 #endif /* sun4v */ 361 362 /* 363 * Round up to the nearest power of 2. 364 */ 365 pcf_fanout = MIN(pcf_fanout, MAX_PCF_FANOUT); 366 if (!ISP2(pcf_fanout)) { 367 pcf_fanout = 1 << highbit(pcf_fanout); 368 369 if (pcf_fanout > MAX_PCF_FANOUT) { 370 pcf_fanout = 1 << (highbit(MAX_PCF_FANOUT) - 1); 371 } 372 } 373 pcf_fanout_mask = pcf_fanout - 1; 374 } 375 376 /* 377 * vm subsystem related initialization 378 */ 379 void 380 vm_init(void) 381 { 382 boolean_t callb_vm_cpr(void *, int); 383 384 (void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm"); 385 page_init_mem_config(); 386 page_retire_init(); 387 vm_usage_init(); 388 page_capture_init(); 389 } 390 391 /* 392 * This function is called at startup and when memory is added or deleted. 393 */ 394 void 395 init_pages_pp_maximum() 396 { 397 static pgcnt_t p_min; 398 static pgcnt_t pages_pp_maximum_startup; 399 static pgcnt_t avrmem_delta; 400 static int init_done; 401 static int user_set; /* true if set in /etc/system */ 402 403 if (init_done == 0) { 404 405 /* If the user specified a value, save it */ 406 if (pages_pp_maximum != 0) { 407 user_set = 1; 408 pages_pp_maximum_startup = pages_pp_maximum; 409 } 410 411 /* 412 * Setting of pages_pp_maximum is based first time 413 * on the value of availrmem just after the start-up 414 * allocations. To preserve this relationship at run 415 * time, use a delta from availrmem_initial. 416 */ 417 ASSERT(availrmem_initial >= availrmem); 418 avrmem_delta = availrmem_initial - availrmem; 419 420 /* The allowable floor of pages_pp_maximum */ 421 p_min = tune.t_minarmem + 100; 422 423 /* Make sure we don't come through here again. */ 424 init_done = 1; 425 } 426 /* 427 * Determine pages_pp_maximum, the number of currently available 428 * pages (availrmem) that can't be `locked'. If not set by 429 * the user, we set it to 4% of the currently available memory 430 * plus 4MB. 431 * But we also insist that it be greater than tune.t_minarmem; 432 * otherwise a process could lock down a lot of memory, get swapped 433 * out, and never have enough to get swapped back in. 434 */ 435 if (user_set) 436 pages_pp_maximum = pages_pp_maximum_startup; 437 else 438 pages_pp_maximum = ((availrmem_initial - avrmem_delta) / 25) 439 + btop(4 * 1024 * 1024); 440 441 if (pages_pp_maximum <= p_min) { 442 pages_pp_maximum = p_min; 443 } 444 } 445 446 /* 447 * In the past, we limited the maximum pages that could be gotten to essentially 448 * 1/2 of the total pages on the system. However, this is too conservative for 449 * some cases. For example, if we want to host a large virtual machine which 450 * needs to use a significant portion of the system's memory. In practice, 451 * allowing more than 1/2 of the total pages is fine, but becomes problematic 452 * as we approach or exceed 75% of the pages on the system. Thus, we limit the 453 * maximum to 23/32 of the total pages, which is ~72%. 454 */ 455 void 456 set_max_page_get(pgcnt_t target_total_pages) 457 { 458 max_page_get = (target_total_pages >> 5) * 23; 459 ASSERT3U(max_page_get, >, 0); 460 } 461 462 pgcnt_t 463 get_max_page_get() 464 { 465 return (max_page_get); 466 } 467 468 static pgcnt_t pending_delete; 469 470 /*ARGSUSED*/ 471 static void 472 page_mem_config_post_add( 473 void *arg, 474 pgcnt_t delta_pages) 475 { 476 set_max_page_get(total_pages - pending_delete); 477 init_pages_pp_maximum(); 478 } 479 480 /*ARGSUSED*/ 481 static int 482 page_mem_config_pre_del( 483 void *arg, 484 pgcnt_t delta_pages) 485 { 486 pgcnt_t nv; 487 488 nv = atomic_add_long_nv(&pending_delete, (spgcnt_t)delta_pages); 489 set_max_page_get(total_pages - nv); 490 return (0); 491 } 492 493 /*ARGSUSED*/ 494 static void 495 page_mem_config_post_del( 496 void *arg, 497 pgcnt_t delta_pages, 498 int cancelled) 499 { 500 pgcnt_t nv; 501 502 nv = atomic_add_long_nv(&pending_delete, -(spgcnt_t)delta_pages); 503 set_max_page_get(total_pages - nv); 504 if (!cancelled) 505 init_pages_pp_maximum(); 506 } 507 508 static kphysm_setup_vector_t page_mem_config_vec = { 509 KPHYSM_SETUP_VECTOR_VERSION, 510 page_mem_config_post_add, 511 page_mem_config_pre_del, 512 page_mem_config_post_del, 513 }; 514 515 static void 516 page_init_mem_config(void) 517 { 518 int ret; 519 520 ret = kphysm_setup_func_register(&page_mem_config_vec, (void *)NULL); 521 ASSERT(ret == 0); 522 } 523 524 /* 525 * Evenly spread out the PCF counters for large free pages 526 */ 527 static void 528 page_free_large_ctr(pgcnt_t npages) 529 { 530 static struct pcf *p = pcf; 531 pgcnt_t lump; 532 533 freemem += npages; 534 535 lump = roundup(npages, pcf_fanout) / pcf_fanout; 536 537 while (npages > 0) { 538 539 ASSERT(!p->pcf_block); 540 541 if (lump < npages) { 542 p->pcf_count += (uint_t)lump; 543 npages -= lump; 544 } else { 545 p->pcf_count += (uint_t)npages; 546 npages = 0; 547 } 548 549 ASSERT(!p->pcf_wait); 550 551 if (++p > &pcf[pcf_fanout - 1]) 552 p = pcf; 553 } 554 555 ASSERT(npages == 0); 556 } 557 558 /* 559 * Add a physical chunk of memory to the system free lists during startup. 560 * Platform specific startup() allocates the memory for the page structs. 561 * 562 * num - number of page structures 563 * base - page number (pfn) to be associated with the first page. 564 * 565 * Since we are doing this during startup (ie. single threaded), we will 566 * use shortcut routines to avoid any locking overhead while putting all 567 * these pages on the freelists. 568 * 569 * NOTE: Any changes performed to page_free(), must also be performed to 570 * add_physmem() since this is how we initialize all page_t's at 571 * boot time. 572 */ 573 void 574 add_physmem( 575 page_t *pp, 576 pgcnt_t num, 577 pfn_t pnum) 578 { 579 page_t *root = NULL; 580 uint_t szc = page_num_pagesizes() - 1; 581 pgcnt_t large = page_get_pagecnt(szc); 582 pgcnt_t cnt = 0; 583 584 TRACE_2(TR_FAC_VM, TR_PAGE_INIT, 585 "add_physmem:pp %p num %lu", pp, num); 586 587 /* 588 * Arbitrarily limit the max page_get request 589 * to 1/2 of the page structs we have. 590 */ 591 total_pages += num; 592 set_max_page_get(total_pages); 593 594 PLCNT_MODIFY_MAX(pnum, (long)num); 595 596 /* 597 * The physical space for the pages array 598 * representing ram pages has already been 599 * allocated. Here we initialize each lock 600 * in the page structure, and put each on 601 * the free list 602 */ 603 for (; num; pp++, pnum++, num--) { 604 605 /* 606 * this needs to fill in the page number 607 * and do any other arch specific initialization 608 */ 609 add_physmem_cb(pp, pnum); 610 611 pp->p_lckcnt = 0; 612 pp->p_cowcnt = 0; 613 pp->p_slckcnt = 0; 614 615 /* 616 * Initialize the page lock as unlocked, since nobody 617 * can see or access this page yet. 618 */ 619 pp->p_selock = 0; 620 621 /* 622 * Initialize IO lock 623 */ 624 page_iolock_init(pp); 625 626 /* 627 * initialize other fields in the page_t 628 */ 629 PP_SETFREE(pp); 630 page_clr_all_props(pp); 631 PP_SETAGED(pp); 632 pp->p_offset = (u_offset_t)-1; 633 pp->p_next = pp; 634 pp->p_prev = pp; 635 636 /* 637 * Simple case: System doesn't support large pages. 638 */ 639 if (szc == 0) { 640 pp->p_szc = 0; 641 page_free_at_startup(pp); 642 continue; 643 } 644 645 /* 646 * Handle unaligned pages, we collect them up onto 647 * the root page until we have a full large page. 648 */ 649 if (!IS_P2ALIGNED(pnum, large)) { 650 651 /* 652 * If not in a large page, 653 * just free as small page. 654 */ 655 if (root == NULL) { 656 pp->p_szc = 0; 657 page_free_at_startup(pp); 658 continue; 659 } 660 661 /* 662 * Link a constituent page into the large page. 663 */ 664 pp->p_szc = szc; 665 page_list_concat(&root, &pp); 666 667 /* 668 * When large page is fully formed, free it. 669 */ 670 if (++cnt == large) { 671 page_free_large_ctr(cnt); 672 page_list_add_pages(root, PG_LIST_ISINIT); 673 root = NULL; 674 cnt = 0; 675 } 676 continue; 677 } 678 679 /* 680 * At this point we have a page number which 681 * is aligned. We assert that we aren't already 682 * in a different large page. 683 */ 684 ASSERT(IS_P2ALIGNED(pnum, large)); 685 ASSERT(root == NULL && cnt == 0); 686 687 /* 688 * If insufficient number of pages left to form 689 * a large page, just free the small page. 690 */ 691 if (num < large) { 692 pp->p_szc = 0; 693 page_free_at_startup(pp); 694 continue; 695 } 696 697 /* 698 * Otherwise start a new large page. 699 */ 700 pp->p_szc = szc; 701 cnt++; 702 root = pp; 703 } 704 ASSERT(root == NULL && cnt == 0); 705 } 706 707 /* 708 * Find a page representing the specified [vp, offset]. 709 * If we find the page but it is intransit coming in, 710 * it will have an "exclusive" lock and we wait for 711 * the i/o to complete. A page found on the free list 712 * is always reclaimed and then locked. On success, the page 713 * is locked, its data is valid and it isn't on the free 714 * list, while a NULL is returned if the page doesn't exist. 715 */ 716 page_t * 717 page_lookup(vnode_t *vp, u_offset_t off, se_t se) 718 { 719 return (page_lookup_create(vp, off, se, NULL, NULL, 0)); 720 } 721 722 /* 723 * Find a page representing the specified [vp, offset]. 724 * We either return the one we found or, if passed in, 725 * create one with identity of [vp, offset] of the 726 * pre-allocated page. If we find existing page but it is 727 * intransit coming in, it will have an "exclusive" lock 728 * and we wait for the i/o to complete. A page found on 729 * the free list is always reclaimed and then locked. 730 * On success, the page is locked, its data is valid and 731 * it isn't on the free list, while a NULL is returned 732 * if the page doesn't exist and newpp is NULL; 733 */ 734 page_t * 735 page_lookup_create( 736 vnode_t *vp, 737 u_offset_t off, 738 se_t se, 739 page_t *newpp, 740 spgcnt_t *nrelocp, 741 int flags) 742 { 743 page_t *pp; 744 kmutex_t *phm; 745 ulong_t index; 746 uint_t hash_locked; 747 uint_t es; 748 749 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 750 VM_STAT_ADD(page_lookup_cnt[0]); 751 ASSERT(newpp ? PAGE_EXCL(newpp) : 1); 752 753 /* 754 * Acquire the appropriate page hash lock since 755 * we have to search the hash list. Pages that 756 * hash to this list can't change identity while 757 * this lock is held. 758 */ 759 hash_locked = 0; 760 index = PAGE_HASH_FUNC(vp, off); 761 phm = NULL; 762 top: 763 pp = page_hash_search(index, vp, off); 764 if (pp != NULL) { 765 VM_STAT_ADD(page_lookup_cnt[1]); 766 es = (newpp != NULL) ? 1 : 0; 767 es |= flags; 768 if (!hash_locked) { 769 VM_STAT_ADD(page_lookup_cnt[2]); 770 if (!page_try_reclaim_lock(pp, se, es)) { 771 /* 772 * On a miss, acquire the phm. Then 773 * next time, page_lock() will be called, 774 * causing a wait if the page is busy. 775 * just looping with page_trylock() would 776 * get pretty boring. 777 */ 778 VM_STAT_ADD(page_lookup_cnt[3]); 779 phm = PAGE_HASH_MUTEX(index); 780 mutex_enter(phm); 781 hash_locked = 1; 782 goto top; 783 } 784 } else { 785 VM_STAT_ADD(page_lookup_cnt[4]); 786 if (!page_lock_es(pp, se, phm, P_RECLAIM, es)) { 787 VM_STAT_ADD(page_lookup_cnt[5]); 788 goto top; 789 } 790 } 791 792 /* 793 * Since `pp' is locked it can not change identity now. 794 * Reconfirm we locked the correct page. 795 * 796 * Both the p_vnode and p_offset *must* be cast volatile 797 * to force a reload of their values: The page_hash_search 798 * function will have stuffed p_vnode and p_offset into 799 * registers before calling page_trylock(); another thread, 800 * actually holding the hash lock, could have changed the 801 * page's identity in memory, but our registers would not 802 * be changed, fooling the reconfirmation. If the hash 803 * lock was held during the search, the casting would 804 * not be needed. 805 */ 806 VM_STAT_ADD(page_lookup_cnt[6]); 807 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 808 ((volatile u_offset_t)(pp->p_offset) != off)) { 809 VM_STAT_ADD(page_lookup_cnt[7]); 810 if (hash_locked) { 811 panic("page_lookup_create: lost page %p", 812 (void *)pp); 813 /*NOTREACHED*/ 814 } 815 page_unlock(pp); 816 phm = PAGE_HASH_MUTEX(index); 817 mutex_enter(phm); 818 hash_locked = 1; 819 goto top; 820 } 821 822 /* 823 * If page_trylock() was called, then pp may still be on 824 * the cachelist (can't be on the free list, it would not 825 * have been found in the search). If it is on the 826 * cachelist it must be pulled now. To pull the page from 827 * the cachelist, it must be exclusively locked. 828 * 829 * The other big difference between page_trylock() and 830 * page_lock(), is that page_lock() will pull the 831 * page from whatever free list (the cache list in this 832 * case) the page is on. If page_trylock() was used 833 * above, then we have to do the reclaim ourselves. 834 */ 835 if ((!hash_locked) && (PP_ISFREE(pp))) { 836 ASSERT(PP_ISAGED(pp) == 0); 837 VM_STAT_ADD(page_lookup_cnt[8]); 838 839 /* 840 * page_relcaim will insure that we 841 * have this page exclusively 842 */ 843 844 if (!page_reclaim(pp, NULL)) { 845 /* 846 * Page_reclaim dropped whatever lock 847 * we held. 848 */ 849 VM_STAT_ADD(page_lookup_cnt[9]); 850 phm = PAGE_HASH_MUTEX(index); 851 mutex_enter(phm); 852 hash_locked = 1; 853 goto top; 854 } else if (se == SE_SHARED && newpp == NULL) { 855 VM_STAT_ADD(page_lookup_cnt[10]); 856 page_downgrade(pp); 857 } 858 } 859 860 if (hash_locked) { 861 mutex_exit(phm); 862 } 863 864 if (newpp != NULL && pp->p_szc < newpp->p_szc && 865 PAGE_EXCL(pp) && nrelocp != NULL) { 866 ASSERT(nrelocp != NULL); 867 (void) page_relocate(&pp, &newpp, 1, 1, nrelocp, 868 NULL); 869 if (*nrelocp > 0) { 870 VM_STAT_COND_ADD(*nrelocp == 1, 871 page_lookup_cnt[11]); 872 VM_STAT_COND_ADD(*nrelocp > 1, 873 page_lookup_cnt[12]); 874 pp = newpp; 875 se = SE_EXCL; 876 } else { 877 if (se == SE_SHARED) { 878 page_downgrade(pp); 879 } 880 VM_STAT_ADD(page_lookup_cnt[13]); 881 } 882 } else if (newpp != NULL && nrelocp != NULL) { 883 if (PAGE_EXCL(pp) && se == SE_SHARED) { 884 page_downgrade(pp); 885 } 886 VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc, 887 page_lookup_cnt[14]); 888 VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc, 889 page_lookup_cnt[15]); 890 VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc, 891 page_lookup_cnt[16]); 892 } else if (newpp != NULL && PAGE_EXCL(pp)) { 893 se = SE_EXCL; 894 } 895 } else if (!hash_locked) { 896 VM_STAT_ADD(page_lookup_cnt[17]); 897 phm = PAGE_HASH_MUTEX(index); 898 mutex_enter(phm); 899 hash_locked = 1; 900 goto top; 901 } else if (newpp != NULL) { 902 /* 903 * If we have a preallocated page then 904 * insert it now and basically behave like 905 * page_create. 906 */ 907 VM_STAT_ADD(page_lookup_cnt[18]); 908 /* 909 * Since we hold the page hash mutex and 910 * just searched for this page, page_hashin 911 * had better not fail. If it does, that 912 * means some thread did not follow the 913 * page hash mutex rules. Panic now and 914 * get it over with. As usual, go down 915 * holding all the locks. 916 */ 917 ASSERT(MUTEX_HELD(phm)); 918 if (!page_hashin(newpp, vp, off, phm)) { 919 ASSERT(MUTEX_HELD(phm)); 920 panic("page_lookup_create: hashin failed %p %p %llx %p", 921 (void *)newpp, (void *)vp, off, (void *)phm); 922 /*NOTREACHED*/ 923 } 924 ASSERT(MUTEX_HELD(phm)); 925 mutex_exit(phm); 926 phm = NULL; 927 page_set_props(newpp, P_REF); 928 page_io_lock(newpp); 929 pp = newpp; 930 se = SE_EXCL; 931 } else { 932 VM_STAT_ADD(page_lookup_cnt[19]); 933 mutex_exit(phm); 934 } 935 936 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 937 938 ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1); 939 940 return (pp); 941 } 942 943 /* 944 * Search the hash list for the page representing the 945 * specified [vp, offset] and return it locked. Skip 946 * free pages and pages that cannot be locked as requested. 947 * Used while attempting to kluster pages. 948 */ 949 page_t * 950 page_lookup_nowait(vnode_t *vp, u_offset_t off, se_t se) 951 { 952 page_t *pp; 953 kmutex_t *phm; 954 ulong_t index; 955 uint_t locked; 956 957 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 958 VM_STAT_ADD(page_lookup_nowait_cnt[0]); 959 960 index = PAGE_HASH_FUNC(vp, off); 961 pp = page_hash_search(index, vp, off); 962 locked = 0; 963 if (pp == NULL) { 964 top: 965 VM_STAT_ADD(page_lookup_nowait_cnt[1]); 966 locked = 1; 967 phm = PAGE_HASH_MUTEX(index); 968 mutex_enter(phm); 969 pp = page_hash_search(index, vp, off); 970 } 971 972 if (pp == NULL || PP_ISFREE(pp)) { 973 VM_STAT_ADD(page_lookup_nowait_cnt[2]); 974 pp = NULL; 975 } else { 976 if (!page_trylock(pp, se)) { 977 VM_STAT_ADD(page_lookup_nowait_cnt[3]); 978 pp = NULL; 979 } else { 980 VM_STAT_ADD(page_lookup_nowait_cnt[4]); 981 /* 982 * See the comment in page_lookup() 983 */ 984 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 985 ((u_offset_t)(pp->p_offset) != off)) { 986 VM_STAT_ADD(page_lookup_nowait_cnt[5]); 987 if (locked) { 988 panic("page_lookup_nowait %p", 989 (void *)pp); 990 /*NOTREACHED*/ 991 } 992 page_unlock(pp); 993 goto top; 994 } 995 if (PP_ISFREE(pp)) { 996 VM_STAT_ADD(page_lookup_nowait_cnt[6]); 997 page_unlock(pp); 998 pp = NULL; 999 } 1000 } 1001 } 1002 if (locked) { 1003 VM_STAT_ADD(page_lookup_nowait_cnt[7]); 1004 mutex_exit(phm); 1005 } 1006 1007 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 1008 1009 return (pp); 1010 } 1011 1012 /* 1013 * Search the hash list for a page with the specified [vp, off] 1014 * that is known to exist and is already locked. This routine 1015 * is typically used by segment SOFTUNLOCK routines. 1016 */ 1017 page_t * 1018 page_find(vnode_t *vp, u_offset_t off) 1019 { 1020 page_t *pp; 1021 kmutex_t *phm; 1022 ulong_t index; 1023 1024 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1025 VM_STAT_ADD(page_find_cnt); 1026 1027 index = PAGE_HASH_FUNC(vp, off); 1028 phm = PAGE_HASH_MUTEX(index); 1029 1030 mutex_enter(phm); 1031 pp = page_hash_search(index, vp, off); 1032 mutex_exit(phm); 1033 1034 ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr); 1035 return (pp); 1036 } 1037 1038 /* 1039 * Determine whether a page with the specified [vp, off] 1040 * currently exists in the system. Obviously this should 1041 * only be considered as a hint since nothing prevents the 1042 * page from disappearing or appearing immediately after 1043 * the return from this routine. Subsequently, we don't 1044 * even bother to lock the list. 1045 */ 1046 page_t * 1047 page_exists(vnode_t *vp, u_offset_t off) 1048 { 1049 ulong_t index; 1050 1051 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1052 VM_STAT_ADD(page_exists_cnt); 1053 1054 index = PAGE_HASH_FUNC(vp, off); 1055 1056 return (page_hash_search(index, vp, off)); 1057 } 1058 1059 /* 1060 * Determine if physically contiguous pages exist for [vp, off] - [vp, off + 1061 * page_size(szc)) range. if they exist and ppa is not NULL fill ppa array 1062 * with these pages locked SHARED. If necessary reclaim pages from 1063 * freelist. Return 1 if contiguous pages exist and 0 otherwise. 1064 * 1065 * If we fail to lock pages still return 1 if pages exist and contiguous. 1066 * But in this case return value is just a hint. ppa array won't be filled. 1067 * Caller should initialize ppa[0] as NULL to distinguish return value. 1068 * 1069 * Returns 0 if pages don't exist or not physically contiguous. 1070 * 1071 * This routine doesn't work for anonymous(swapfs) pages. 1072 */ 1073 int 1074 page_exists_physcontig(vnode_t *vp, u_offset_t off, uint_t szc, page_t *ppa[]) 1075 { 1076 pgcnt_t pages; 1077 pfn_t pfn; 1078 page_t *rootpp; 1079 pgcnt_t i; 1080 pgcnt_t j; 1081 u_offset_t save_off = off; 1082 ulong_t index; 1083 kmutex_t *phm; 1084 page_t *pp; 1085 uint_t pszc; 1086 int loopcnt = 0; 1087 1088 ASSERT(szc != 0); 1089 ASSERT(vp != NULL); 1090 ASSERT(!IS_SWAPFSVP(vp)); 1091 ASSERT(!VN_ISKAS(vp)); 1092 1093 again: 1094 if (++loopcnt > 3) { 1095 VM_STAT_ADD(page_exphcontg[0]); 1096 return (0); 1097 } 1098 1099 index = PAGE_HASH_FUNC(vp, off); 1100 phm = PAGE_HASH_MUTEX(index); 1101 1102 mutex_enter(phm); 1103 pp = page_hash_search(index, vp, off); 1104 mutex_exit(phm); 1105 1106 VM_STAT_ADD(page_exphcontg[1]); 1107 1108 if (pp == NULL) { 1109 VM_STAT_ADD(page_exphcontg[2]); 1110 return (0); 1111 } 1112 1113 pages = page_get_pagecnt(szc); 1114 rootpp = pp; 1115 pfn = rootpp->p_pagenum; 1116 1117 if ((pszc = pp->p_szc) >= szc && ppa != NULL) { 1118 VM_STAT_ADD(page_exphcontg[3]); 1119 if (!page_trylock(pp, SE_SHARED)) { 1120 VM_STAT_ADD(page_exphcontg[4]); 1121 return (1); 1122 } 1123 /* 1124 * Also check whether p_pagenum was modified by DR. 1125 */ 1126 if (pp->p_szc != pszc || pp->p_vnode != vp || 1127 pp->p_offset != off || pp->p_pagenum != pfn) { 1128 VM_STAT_ADD(page_exphcontg[5]); 1129 page_unlock(pp); 1130 off = save_off; 1131 goto again; 1132 } 1133 /* 1134 * szc was non zero and vnode and offset matched after we 1135 * locked the page it means it can't become free on us. 1136 */ 1137 ASSERT(!PP_ISFREE(pp)); 1138 if (!IS_P2ALIGNED(pfn, pages)) { 1139 page_unlock(pp); 1140 return (0); 1141 } 1142 ppa[0] = pp; 1143 pp++; 1144 off += PAGESIZE; 1145 pfn++; 1146 for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1147 if (!page_trylock(pp, SE_SHARED)) { 1148 VM_STAT_ADD(page_exphcontg[6]); 1149 pp--; 1150 while (i-- > 0) { 1151 page_unlock(pp); 1152 pp--; 1153 } 1154 ppa[0] = NULL; 1155 return (1); 1156 } 1157 if (pp->p_szc != pszc) { 1158 VM_STAT_ADD(page_exphcontg[7]); 1159 page_unlock(pp); 1160 pp--; 1161 while (i-- > 0) { 1162 page_unlock(pp); 1163 pp--; 1164 } 1165 ppa[0] = NULL; 1166 off = save_off; 1167 goto again; 1168 } 1169 /* 1170 * szc the same as for previous already locked pages 1171 * with right identity. Since this page had correct 1172 * szc after we locked it can't get freed or destroyed 1173 * and therefore must have the expected identity. 1174 */ 1175 ASSERT(!PP_ISFREE(pp)); 1176 if (pp->p_vnode != vp || 1177 pp->p_offset != off) { 1178 panic("page_exists_physcontig: " 1179 "large page identity doesn't match"); 1180 } 1181 ppa[i] = pp; 1182 ASSERT(pp->p_pagenum == pfn); 1183 } 1184 VM_STAT_ADD(page_exphcontg[8]); 1185 ppa[pages] = NULL; 1186 return (1); 1187 } else if (pszc >= szc) { 1188 VM_STAT_ADD(page_exphcontg[9]); 1189 if (!IS_P2ALIGNED(pfn, pages)) { 1190 return (0); 1191 } 1192 return (1); 1193 } 1194 1195 if (!IS_P2ALIGNED(pfn, pages)) { 1196 VM_STAT_ADD(page_exphcontg[10]); 1197 return (0); 1198 } 1199 1200 if (page_numtomemseg_nolock(pfn) != 1201 page_numtomemseg_nolock(pfn + pages - 1)) { 1202 VM_STAT_ADD(page_exphcontg[11]); 1203 return (0); 1204 } 1205 1206 /* 1207 * We loop up 4 times across pages to promote page size. 1208 * We're extra cautious to promote page size atomically with respect 1209 * to everybody else. But we can probably optimize into 1 loop if 1210 * this becomes an issue. 1211 */ 1212 1213 for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1214 if (!page_trylock(pp, SE_EXCL)) { 1215 VM_STAT_ADD(page_exphcontg[12]); 1216 break; 1217 } 1218 /* 1219 * Check whether p_pagenum was modified by DR. 1220 */ 1221 if (pp->p_pagenum != pfn) { 1222 page_unlock(pp); 1223 break; 1224 } 1225 if (pp->p_vnode != vp || 1226 pp->p_offset != off) { 1227 VM_STAT_ADD(page_exphcontg[13]); 1228 page_unlock(pp); 1229 break; 1230 } 1231 if (pp->p_szc >= szc) { 1232 ASSERT(i == 0); 1233 page_unlock(pp); 1234 off = save_off; 1235 goto again; 1236 } 1237 } 1238 1239 if (i != pages) { 1240 VM_STAT_ADD(page_exphcontg[14]); 1241 --pp; 1242 while (i-- > 0) { 1243 page_unlock(pp); 1244 --pp; 1245 } 1246 return (0); 1247 } 1248 1249 pp = rootpp; 1250 for (i = 0; i < pages; i++, pp++) { 1251 if (PP_ISFREE(pp)) { 1252 VM_STAT_ADD(page_exphcontg[15]); 1253 ASSERT(!PP_ISAGED(pp)); 1254 ASSERT(pp->p_szc == 0); 1255 if (!page_reclaim(pp, NULL)) { 1256 break; 1257 } 1258 } else { 1259 ASSERT(pp->p_szc < szc); 1260 VM_STAT_ADD(page_exphcontg[16]); 1261 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 1262 } 1263 } 1264 if (i < pages) { 1265 VM_STAT_ADD(page_exphcontg[17]); 1266 /* 1267 * page_reclaim failed because we were out of memory. 1268 * drop the rest of the locks and return because this page 1269 * must be already reallocated anyway. 1270 */ 1271 pp = rootpp; 1272 for (j = 0; j < pages; j++, pp++) { 1273 if (j != i) { 1274 page_unlock(pp); 1275 } 1276 } 1277 return (0); 1278 } 1279 1280 off = save_off; 1281 pp = rootpp; 1282 for (i = 0; i < pages; i++, pp++, off += PAGESIZE) { 1283 ASSERT(PAGE_EXCL(pp)); 1284 ASSERT(!PP_ISFREE(pp)); 1285 ASSERT(!hat_page_is_mapped(pp)); 1286 ASSERT(pp->p_vnode == vp); 1287 ASSERT(pp->p_offset == off); 1288 pp->p_szc = szc; 1289 } 1290 pp = rootpp; 1291 for (i = 0; i < pages; i++, pp++) { 1292 if (ppa == NULL) { 1293 page_unlock(pp); 1294 } else { 1295 ppa[i] = pp; 1296 page_downgrade(ppa[i]); 1297 } 1298 } 1299 if (ppa != NULL) { 1300 ppa[pages] = NULL; 1301 } 1302 VM_STAT_ADD(page_exphcontg[18]); 1303 ASSERT(vp->v_pages != NULL); 1304 return (1); 1305 } 1306 1307 /* 1308 * Determine whether a page with the specified [vp, off] 1309 * currently exists in the system and if so return its 1310 * size code. Obviously this should only be considered as 1311 * a hint since nothing prevents the page from disappearing 1312 * or appearing immediately after the return from this routine. 1313 */ 1314 int 1315 page_exists_forreal(vnode_t *vp, u_offset_t off, uint_t *szc) 1316 { 1317 page_t *pp; 1318 kmutex_t *phm; 1319 ulong_t index; 1320 int rc = 0; 1321 1322 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1323 ASSERT(szc != NULL); 1324 VM_STAT_ADD(page_exists_forreal_cnt); 1325 1326 index = PAGE_HASH_FUNC(vp, off); 1327 phm = PAGE_HASH_MUTEX(index); 1328 1329 mutex_enter(phm); 1330 pp = page_hash_search(index, vp, off); 1331 if (pp != NULL) { 1332 *szc = pp->p_szc; 1333 rc = 1; 1334 } 1335 mutex_exit(phm); 1336 return (rc); 1337 } 1338 1339 /* wakeup threads waiting for pages in page_create_get_something() */ 1340 void 1341 wakeup_pcgs(void) 1342 { 1343 if (!CV_HAS_WAITERS(&pcgs_cv)) 1344 return; 1345 cv_broadcast(&pcgs_cv); 1346 } 1347 1348 /* 1349 * 'freemem' is used all over the kernel as an indication of how many 1350 * pages are free (either on the cache list or on the free page list) 1351 * in the system. In very few places is a really accurate 'freemem' 1352 * needed. To avoid contention of the lock protecting a the 1353 * single freemem, it was spread out into NCPU buckets. Set_freemem 1354 * sets freemem to the total of all NCPU buckets. It is called from 1355 * clock() on each TICK. 1356 */ 1357 void 1358 set_freemem(void) 1359 { 1360 struct pcf *p; 1361 ulong_t t; 1362 uint_t i; 1363 1364 t = 0; 1365 p = pcf; 1366 for (i = 0; i < pcf_fanout; i++) { 1367 t += p->pcf_count; 1368 p++; 1369 } 1370 freemem = t; 1371 1372 /* 1373 * Don't worry about grabbing mutex. It's not that 1374 * critical if we miss a tick or two. This is 1375 * where we wakeup possible delayers in 1376 * page_create_get_something(). 1377 */ 1378 wakeup_pcgs(); 1379 } 1380 1381 ulong_t 1382 get_freemem() 1383 { 1384 struct pcf *p; 1385 ulong_t t; 1386 uint_t i; 1387 1388 t = 0; 1389 p = pcf; 1390 for (i = 0; i < pcf_fanout; i++) { 1391 t += p->pcf_count; 1392 p++; 1393 } 1394 /* 1395 * We just calculated it, might as well set it. 1396 */ 1397 freemem = t; 1398 return (t); 1399 } 1400 1401 /* 1402 * Acquire all of the page cache & free (pcf) locks. 1403 */ 1404 void 1405 pcf_acquire_all() 1406 { 1407 struct pcf *p; 1408 uint_t i; 1409 1410 p = pcf; 1411 for (i = 0; i < pcf_fanout; i++) { 1412 mutex_enter(&p->pcf_lock); 1413 p++; 1414 } 1415 } 1416 1417 /* 1418 * Release all the pcf_locks. 1419 */ 1420 void 1421 pcf_release_all() 1422 { 1423 struct pcf *p; 1424 uint_t i; 1425 1426 p = pcf; 1427 for (i = 0; i < pcf_fanout; i++) { 1428 mutex_exit(&p->pcf_lock); 1429 p++; 1430 } 1431 } 1432 1433 /* 1434 * Inform the VM system that we need some pages freed up. 1435 * Calls must be symmetric, e.g.: 1436 * 1437 * page_needfree(100); 1438 * wait a bit; 1439 * page_needfree(-100); 1440 */ 1441 void 1442 page_needfree(spgcnt_t npages) 1443 { 1444 mutex_enter(&new_freemem_lock); 1445 needfree += npages; 1446 mutex_exit(&new_freemem_lock); 1447 } 1448 1449 /* 1450 * Throttle for page_create(): try to prevent freemem from dropping 1451 * below throttlefree. We can't provide a 100% guarantee because 1452 * KM_NOSLEEP allocations, page_reclaim(), and various other things 1453 * nibble away at the freelist. However, we can block all PG_WAIT 1454 * allocations until memory becomes available. The motivation is 1455 * that several things can fall apart when there's no free memory: 1456 * 1457 * (1) If pageout() needs memory to push a page, the system deadlocks. 1458 * 1459 * (2) By (broken) specification, timeout(9F) can neither fail nor 1460 * block, so it has no choice but to panic the system if it 1461 * cannot allocate a callout structure. 1462 * 1463 * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block; 1464 * it panics if it cannot allocate a callback structure. 1465 * 1466 * (4) Untold numbers of third-party drivers have not yet been hardened 1467 * against KM_NOSLEEP and/or allocb() failures; they simply assume 1468 * success and panic the system with a data fault on failure. 1469 * (The long-term solution to this particular problem is to ship 1470 * hostile fault-injecting DEBUG kernels with the DDK.) 1471 * 1472 * It is theoretically impossible to guarantee success of non-blocking 1473 * allocations, but in practice, this throttle is very hard to break. 1474 */ 1475 static int 1476 page_create_throttle(pgcnt_t npages, int flags) 1477 { 1478 ulong_t fm; 1479 uint_t i; 1480 pgcnt_t tf; /* effective value of throttlefree */ 1481 1482 atomic_inc_64(&n_throttle); 1483 1484 /* 1485 * Normal priority allocations. 1486 */ 1487 if ((flags & (PG_WAIT | PG_NORMALPRI)) == PG_NORMALPRI) { 1488 ASSERT(!(flags & (PG_PANIC | PG_PUSHPAGE))); 1489 return (freemem >= npages + throttlefree); 1490 } 1491 1492 /* 1493 * Never deny pages when: 1494 * - it's a thread that cannot block [NOMEMWAIT()] 1495 * - the allocation cannot block and must not fail 1496 * - the allocation cannot block and is pageout dispensated 1497 */ 1498 if (NOMEMWAIT() || 1499 ((flags & (PG_WAIT | PG_PANIC)) == PG_PANIC) || 1500 ((flags & (PG_WAIT | PG_PUSHPAGE)) == PG_PUSHPAGE)) 1501 return (1); 1502 1503 /* 1504 * If the allocation can't block, we look favorably upon it 1505 * unless we're below pageout_reserve. In that case we fail 1506 * the allocation because we want to make sure there are a few 1507 * pages available for pageout. 1508 */ 1509 if ((flags & PG_WAIT) == 0) 1510 return (freemem >= npages + pageout_reserve); 1511 1512 /* Calculate the effective throttlefree value */ 1513 tf = throttlefree - 1514 ((flags & PG_PUSHPAGE) ? pageout_reserve : 0); 1515 1516 WAKE_PAGEOUT_SCANNER(page__create__throttle); 1517 1518 for (;;) { 1519 fm = 0; 1520 pcf_acquire_all(); 1521 mutex_enter(&new_freemem_lock); 1522 for (i = 0; i < pcf_fanout; i++) { 1523 fm += pcf[i].pcf_count; 1524 pcf[i].pcf_wait++; 1525 mutex_exit(&pcf[i].pcf_lock); 1526 } 1527 freemem = fm; 1528 if (freemem >= npages + tf) { 1529 mutex_exit(&new_freemem_lock); 1530 break; 1531 } 1532 needfree += npages; 1533 freemem_wait++; 1534 cv_wait(&freemem_cv, &new_freemem_lock); 1535 freemem_wait--; 1536 needfree -= npages; 1537 mutex_exit(&new_freemem_lock); 1538 } 1539 return (1); 1540 } 1541 1542 /* 1543 * page_create_wait() is called to either coalesce pages from the 1544 * different pcf buckets or to wait because there simply are not 1545 * enough pages to satisfy the caller's request. 1546 * 1547 * Sadly, this is called from platform/vm/vm_machdep.c 1548 */ 1549 int 1550 page_create_wait(pgcnt_t npages, uint_t flags) 1551 { 1552 pgcnt_t total; 1553 uint_t i; 1554 struct pcf *p; 1555 1556 /* 1557 * Wait until there are enough free pages to satisfy our 1558 * entire request. 1559 * We set needfree += npages before prodding pageout, to make sure 1560 * it does real work when npages > lotsfree > freemem. 1561 */ 1562 VM_STAT_ADD(page_create_not_enough); 1563 1564 ASSERT(!kcage_on ? !(flags & PG_NORELOC) : 1); 1565 checkagain: 1566 if ((flags & PG_NORELOC) && 1567 kcage_freemem < kcage_throttlefree + npages) 1568 (void) kcage_create_throttle(npages, flags); 1569 1570 if (freemem < npages + throttlefree) 1571 if (!page_create_throttle(npages, flags)) 1572 return (0); 1573 1574 if (pcf_decrement_bucket(npages) || 1575 pcf_decrement_multiple(&total, npages, 0)) 1576 return (1); 1577 1578 /* 1579 * All of the pcf locks are held, there are not enough pages 1580 * to satisfy the request (npages < total). 1581 * Be sure to acquire the new_freemem_lock before dropping 1582 * the pcf locks. This prevents dropping wakeups in page_free(). 1583 * The order is always pcf_lock then new_freemem_lock. 1584 * 1585 * Since we hold all the pcf locks, it is a good time to set freemem. 1586 * 1587 * If the caller does not want to wait, return now. 1588 * Else turn the pageout daemon loose to find something 1589 * and wait till it does. 1590 * 1591 */ 1592 freemem = total; 1593 1594 if ((flags & PG_WAIT) == 0) { 1595 pcf_release_all(); 1596 1597 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_NOMEM, 1598 "page_create_nomem:npages %ld freemem %ld", npages, freemem); 1599 return (0); 1600 } 1601 1602 ASSERT(proc_pageout != NULL); 1603 WAKE_PAGEOUT_SCANNER(page__create__wait); 1604 1605 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_START, 1606 "page_create_sleep_start: freemem %ld needfree %ld", 1607 freemem, needfree); 1608 1609 /* 1610 * We are going to wait. 1611 * We currently hold all of the pcf_locks, 1612 * get the new_freemem_lock (it protects freemem_wait), 1613 * before dropping the pcf_locks. 1614 */ 1615 mutex_enter(&new_freemem_lock); 1616 1617 p = pcf; 1618 for (i = 0; i < pcf_fanout; i++) { 1619 p->pcf_wait++; 1620 mutex_exit(&p->pcf_lock); 1621 p++; 1622 } 1623 1624 needfree += npages; 1625 freemem_wait++; 1626 1627 cv_wait(&freemem_cv, &new_freemem_lock); 1628 1629 freemem_wait--; 1630 needfree -= npages; 1631 1632 mutex_exit(&new_freemem_lock); 1633 1634 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_END, 1635 "page_create_sleep_end: freemem %ld needfree %ld", 1636 freemem, needfree); 1637 1638 VM_STAT_ADD(page_create_not_enough_again); 1639 goto checkagain; 1640 } 1641 /* 1642 * A routine to do the opposite of page_create_wait(). 1643 */ 1644 void 1645 page_create_putback(spgcnt_t npages) 1646 { 1647 struct pcf *p; 1648 pgcnt_t lump; 1649 uint_t *which; 1650 1651 /* 1652 * When a contiguous lump is broken up, we have to 1653 * deal with lots of pages (min 64) so lets spread 1654 * the wealth around. 1655 */ 1656 lump = roundup(npages, pcf_fanout) / pcf_fanout; 1657 freemem += npages; 1658 1659 for (p = pcf; (npages > 0) && (p < &pcf[pcf_fanout]); p++) { 1660 which = &p->pcf_count; 1661 1662 mutex_enter(&p->pcf_lock); 1663 1664 if (p->pcf_block) { 1665 which = &p->pcf_reserve; 1666 } 1667 1668 if (lump < npages) { 1669 *which += (uint_t)lump; 1670 npages -= lump; 1671 } else { 1672 *which += (uint_t)npages; 1673 npages = 0; 1674 } 1675 1676 if (p->pcf_wait) { 1677 mutex_enter(&new_freemem_lock); 1678 /* 1679 * Check to see if some other thread 1680 * is actually waiting. Another bucket 1681 * may have woken it up by now. If there 1682 * are no waiters, then set our pcf_wait 1683 * count to zero to avoid coming in here 1684 * next time. 1685 */ 1686 if (freemem_wait) { 1687 if (npages > 1) { 1688 cv_broadcast(&freemem_cv); 1689 } else { 1690 cv_signal(&freemem_cv); 1691 } 1692 p->pcf_wait--; 1693 } else { 1694 p->pcf_wait = 0; 1695 } 1696 mutex_exit(&new_freemem_lock); 1697 } 1698 mutex_exit(&p->pcf_lock); 1699 } 1700 ASSERT(npages == 0); 1701 } 1702 1703 /* 1704 * A helper routine for page_create_get_something. 1705 * The indenting got to deep down there. 1706 * Unblock the pcf counters. Any pages freed after 1707 * pcf_block got set are moved to pcf_count and 1708 * wakeups (cv_broadcast() or cv_signal()) are done as needed. 1709 */ 1710 static void 1711 pcgs_unblock(void) 1712 { 1713 int i; 1714 struct pcf *p; 1715 1716 /* Update freemem while we're here. */ 1717 freemem = 0; 1718 p = pcf; 1719 for (i = 0; i < pcf_fanout; i++) { 1720 mutex_enter(&p->pcf_lock); 1721 ASSERT(p->pcf_count == 0); 1722 p->pcf_count = p->pcf_reserve; 1723 p->pcf_block = 0; 1724 freemem += p->pcf_count; 1725 if (p->pcf_wait) { 1726 mutex_enter(&new_freemem_lock); 1727 if (freemem_wait) { 1728 if (p->pcf_reserve > 1) { 1729 cv_broadcast(&freemem_cv); 1730 p->pcf_wait = 0; 1731 } else { 1732 cv_signal(&freemem_cv); 1733 p->pcf_wait--; 1734 } 1735 } else { 1736 p->pcf_wait = 0; 1737 } 1738 mutex_exit(&new_freemem_lock); 1739 } 1740 p->pcf_reserve = 0; 1741 mutex_exit(&p->pcf_lock); 1742 p++; 1743 } 1744 } 1745 1746 /* 1747 * Called from page_create_va() when both the cache and free lists 1748 * have been checked once. 1749 * 1750 * Either returns a page or panics since the accounting was done 1751 * way before we got here. 1752 * 1753 * We don't come here often, so leave the accounting on permanently. 1754 */ 1755 1756 #define MAX_PCGS 100 1757 1758 #ifdef DEBUG 1759 #define PCGS_TRIES 100 1760 #else /* DEBUG */ 1761 #define PCGS_TRIES 10 1762 #endif /* DEBUG */ 1763 1764 #ifdef VM_STATS 1765 uint_t pcgs_counts[PCGS_TRIES]; 1766 uint_t pcgs_too_many; 1767 uint_t pcgs_entered; 1768 uint_t pcgs_entered_noreloc; 1769 uint_t pcgs_locked; 1770 uint_t pcgs_cagelocked; 1771 #endif /* VM_STATS */ 1772 1773 static page_t * 1774 page_create_get_something(vnode_t *vp, u_offset_t off, struct seg *seg, 1775 caddr_t vaddr, uint_t flags) 1776 { 1777 uint_t count; 1778 page_t *pp; 1779 uint_t locked, i; 1780 struct pcf *p; 1781 lgrp_t *lgrp; 1782 int cagelocked = 0; 1783 1784 VM_STAT_ADD(pcgs_entered); 1785 1786 /* 1787 * Tap any reserve freelists: if we fail now, we'll die 1788 * since the page(s) we're looking for have already been 1789 * accounted for. 1790 */ 1791 flags |= PG_PANIC; 1792 1793 if ((flags & PG_NORELOC) != 0) { 1794 VM_STAT_ADD(pcgs_entered_noreloc); 1795 /* 1796 * Requests for free pages from critical threads 1797 * such as pageout still won't throttle here, but 1798 * we must try again, to give the cageout thread 1799 * another chance to catch up. Since we already 1800 * accounted for the pages, we had better get them 1801 * this time. 1802 * 1803 * N.B. All non-critical threads acquire the pcgs_cagelock 1804 * to serialize access to the freelists. This implements a 1805 * turnstile-type synchornization to avoid starvation of 1806 * critical requests for PG_NORELOC memory by non-critical 1807 * threads: all non-critical threads must acquire a 'ticket' 1808 * before passing through, which entails making sure 1809 * kcage_freemem won't fall below minfree prior to grabbing 1810 * pages from the freelists. 1811 */ 1812 if (kcage_create_throttle(1, flags) == KCT_NONCRIT) { 1813 mutex_enter(&pcgs_cagelock); 1814 cagelocked = 1; 1815 VM_STAT_ADD(pcgs_cagelocked); 1816 } 1817 } 1818 1819 /* 1820 * Time to get serious. 1821 * We failed to get a `correctly colored' page from both the 1822 * free and cache lists. 1823 * We escalate in stage. 1824 * 1825 * First try both lists without worring about color. 1826 * 1827 * Then, grab all page accounting locks (ie. pcf[]) and 1828 * steal any pages that they have and set the pcf_block flag to 1829 * stop deletions from the lists. This will help because 1830 * a page can get added to the free list while we are looking 1831 * at the cache list, then another page could be added to the cache 1832 * list allowing the page on the free list to be removed as we 1833 * move from looking at the cache list to the free list. This 1834 * could happen over and over. We would never find the page 1835 * we have accounted for. 1836 * 1837 * Noreloc pages are a subset of the global (relocatable) page pool. 1838 * They are not tracked separately in the pcf bins, so it is 1839 * impossible to know when doing pcf accounting if the available 1840 * page(s) are noreloc pages or not. When looking for a noreloc page 1841 * it is quite easy to end up here even if the global (relocatable) 1842 * page pool has plenty of free pages but the noreloc pool is empty. 1843 * 1844 * When the noreloc pool is empty (or low), additional noreloc pages 1845 * are created by converting pages from the global page pool. This 1846 * process will stall during pcf accounting if the pcf bins are 1847 * already locked. Such is the case when a noreloc allocation is 1848 * looping here in page_create_get_something waiting for more noreloc 1849 * pages to appear. 1850 * 1851 * Short of adding a new field to the pcf bins to accurately track 1852 * the number of free noreloc pages, we instead do not grab the 1853 * pcgs_lock, do not set the pcf blocks and do not timeout when 1854 * allocating a noreloc page. This allows noreloc allocations to 1855 * loop without blocking global page pool allocations. 1856 * 1857 * NOTE: the behaviour of page_create_get_something has not changed 1858 * for the case of global page pool allocations. 1859 */ 1860 1861 flags &= ~PG_MATCH_COLOR; 1862 locked = 0; 1863 #if defined(__x86) 1864 flags = page_create_update_flags_x86(flags); 1865 #endif 1866 1867 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 1868 1869 for (count = 0; kcage_on || count < MAX_PCGS; count++) { 1870 pp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 1871 flags, lgrp); 1872 if (pp == NULL) { 1873 pp = page_get_cachelist(vp, off, seg, vaddr, 1874 flags, lgrp); 1875 } 1876 if (pp == NULL) { 1877 /* 1878 * Serialize. Don't fight with other pcgs(). 1879 */ 1880 if (!locked && (!kcage_on || !(flags & PG_NORELOC))) { 1881 mutex_enter(&pcgs_lock); 1882 VM_STAT_ADD(pcgs_locked); 1883 locked = 1; 1884 p = pcf; 1885 for (i = 0; i < pcf_fanout; i++) { 1886 mutex_enter(&p->pcf_lock); 1887 ASSERT(p->pcf_block == 0); 1888 p->pcf_block = 1; 1889 p->pcf_reserve = p->pcf_count; 1890 p->pcf_count = 0; 1891 mutex_exit(&p->pcf_lock); 1892 p++; 1893 } 1894 freemem = 0; 1895 } 1896 1897 if (count) { 1898 /* 1899 * Since page_free() puts pages on 1900 * a list then accounts for it, we 1901 * just have to wait for page_free() 1902 * to unlock any page it was working 1903 * with. The page_lock()-page_reclaim() 1904 * path falls in the same boat. 1905 * 1906 * We don't need to check on the 1907 * PG_WAIT flag, we have already 1908 * accounted for the page we are 1909 * looking for in page_create_va(). 1910 * 1911 * We just wait a moment to let any 1912 * locked pages on the lists free up, 1913 * then continue around and try again. 1914 * 1915 * Will be awakened by set_freemem(). 1916 */ 1917 mutex_enter(&pcgs_wait_lock); 1918 cv_wait(&pcgs_cv, &pcgs_wait_lock); 1919 mutex_exit(&pcgs_wait_lock); 1920 } 1921 } else { 1922 #ifdef VM_STATS 1923 if (count >= PCGS_TRIES) { 1924 VM_STAT_ADD(pcgs_too_many); 1925 } else { 1926 VM_STAT_ADD(pcgs_counts[count]); 1927 } 1928 #endif 1929 if (locked) { 1930 pcgs_unblock(); 1931 mutex_exit(&pcgs_lock); 1932 } 1933 if (cagelocked) 1934 mutex_exit(&pcgs_cagelock); 1935 return (pp); 1936 } 1937 } 1938 /* 1939 * we go down holding the pcf locks. 1940 */ 1941 panic("no %spage found %d", 1942 ((flags & PG_NORELOC) ? "non-reloc " : ""), count); 1943 /*NOTREACHED*/ 1944 } 1945 1946 /* 1947 * Create enough pages for "bytes" worth of data starting at 1948 * "off" in "vp". 1949 * 1950 * Where flag must be one of: 1951 * 1952 * PG_EXCL: Exclusive create (fail if any page already 1953 * exists in the page cache) which does not 1954 * wait for memory to become available. 1955 * 1956 * PG_WAIT: Non-exclusive create which can wait for 1957 * memory to become available. 1958 * 1959 * PG_PHYSCONTIG: Allocate physically contiguous pages. 1960 * (Not Supported) 1961 * 1962 * A doubly linked list of pages is returned to the caller. Each page 1963 * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock) 1964 * lock. 1965 * 1966 * Unable to change the parameters to page_create() in a minor release, 1967 * we renamed page_create() to page_create_va(), changed all known calls 1968 * from page_create() to page_create_va(), and created this wrapper. 1969 * 1970 * Upon a major release, we should break compatibility by deleting this 1971 * wrapper, and replacing all the strings "page_create_va", with "page_create". 1972 * 1973 * NOTE: There is a copy of this interface as page_create_io() in 1974 * i86/vm/vm_machdep.c. Any bugs fixed here should be applied 1975 * there. 1976 */ 1977 page_t * 1978 page_create(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags) 1979 { 1980 caddr_t random_vaddr; 1981 struct seg kseg; 1982 1983 #ifdef DEBUG 1984 cmn_err(CE_WARN, "Using deprecated interface page_create: caller %p", 1985 (void *)caller()); 1986 #endif 1987 1988 random_vaddr = (caddr_t)(((uintptr_t)vp >> 7) ^ 1989 (uintptr_t)(off >> PAGESHIFT)); 1990 kseg.s_as = &kas; 1991 1992 return (page_create_va(vp, off, bytes, flags, &kseg, random_vaddr)); 1993 } 1994 1995 #ifdef DEBUG 1996 uint32_t pg_alloc_pgs_mtbf = 0; 1997 #endif 1998 1999 /* 2000 * Used for large page support. It will attempt to allocate 2001 * a large page(s) off the freelist. 2002 * 2003 * Returns non zero on failure. 2004 */ 2005 int 2006 page_alloc_pages(struct vnode *vp, struct seg *seg, caddr_t addr, 2007 page_t **basepp, page_t *ppa[], uint_t szc, int anypgsz, int pgflags) 2008 { 2009 pgcnt_t npgs, curnpgs, totpgs; 2010 size_t pgsz; 2011 page_t *pplist = NULL, *pp; 2012 int err = 0; 2013 lgrp_t *lgrp; 2014 2015 ASSERT(szc != 0 && szc <= (page_num_pagesizes() - 1)); 2016 ASSERT(pgflags == 0 || pgflags == PG_LOCAL); 2017 2018 /* 2019 * Check if system heavily prefers local large pages over remote 2020 * on systems with multiple lgroups. 2021 */ 2022 if (lpg_alloc_prefer == LPAP_LOCAL && nlgrps > 1) { 2023 pgflags = PG_LOCAL; 2024 } 2025 2026 VM_STAT_ADD(alloc_pages[0]); 2027 2028 #ifdef DEBUG 2029 if (pg_alloc_pgs_mtbf && !(gethrtime() % pg_alloc_pgs_mtbf)) { 2030 return (ENOMEM); 2031 } 2032 #endif 2033 2034 /* 2035 * One must be NULL but not both. 2036 * And one must be non NULL but not both. 2037 */ 2038 ASSERT(basepp != NULL || ppa != NULL); 2039 ASSERT(basepp == NULL || ppa == NULL); 2040 2041 #if defined(__x86) 2042 while (page_chk_freelist(szc) == 0) { 2043 VM_STAT_ADD(alloc_pages[8]); 2044 if (anypgsz == 0 || --szc == 0) 2045 return (ENOMEM); 2046 } 2047 #endif 2048 2049 pgsz = page_get_pagesize(szc); 2050 totpgs = curnpgs = npgs = pgsz >> PAGESHIFT; 2051 2052 ASSERT(((uintptr_t)addr & (pgsz - 1)) == 0); 2053 2054 (void) page_create_wait(npgs, PG_WAIT); 2055 2056 while (npgs && szc) { 2057 lgrp = lgrp_mem_choose(seg, addr, pgsz); 2058 if (pgflags == PG_LOCAL) { 2059 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2060 pgflags, lgrp); 2061 if (pp == NULL) { 2062 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2063 0, lgrp); 2064 } 2065 } else { 2066 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2067 0, lgrp); 2068 } 2069 if (pp != NULL) { 2070 VM_STAT_ADD(alloc_pages[1]); 2071 page_list_concat(&pplist, &pp); 2072 ASSERT(npgs >= curnpgs); 2073 npgs -= curnpgs; 2074 } else if (anypgsz) { 2075 VM_STAT_ADD(alloc_pages[2]); 2076 szc--; 2077 pgsz = page_get_pagesize(szc); 2078 curnpgs = pgsz >> PAGESHIFT; 2079 } else { 2080 VM_STAT_ADD(alloc_pages[3]); 2081 ASSERT(npgs == totpgs); 2082 page_create_putback(npgs); 2083 return (ENOMEM); 2084 } 2085 } 2086 if (szc == 0) { 2087 VM_STAT_ADD(alloc_pages[4]); 2088 ASSERT(npgs != 0); 2089 page_create_putback(npgs); 2090 err = ENOMEM; 2091 } else if (basepp != NULL) { 2092 ASSERT(npgs == 0); 2093 ASSERT(ppa == NULL); 2094 *basepp = pplist; 2095 } 2096 2097 npgs = totpgs - npgs; 2098 pp = pplist; 2099 2100 /* 2101 * Clear the free and age bits. Also if we were passed in a ppa then 2102 * fill it in with all the constituent pages from the large page. But 2103 * if we failed to allocate all the pages just free what we got. 2104 */ 2105 while (npgs != 0) { 2106 ASSERT(PP_ISFREE(pp)); 2107 ASSERT(PP_ISAGED(pp)); 2108 if (ppa != NULL || err != 0) { 2109 if (err == 0) { 2110 VM_STAT_ADD(alloc_pages[5]); 2111 PP_CLRFREE(pp); 2112 PP_CLRAGED(pp); 2113 page_sub(&pplist, pp); 2114 *ppa++ = pp; 2115 npgs--; 2116 } else { 2117 VM_STAT_ADD(alloc_pages[6]); 2118 ASSERT(pp->p_szc != 0); 2119 curnpgs = page_get_pagecnt(pp->p_szc); 2120 page_list_break(&pp, &pplist, curnpgs); 2121 page_list_add_pages(pp, 0); 2122 page_create_putback(curnpgs); 2123 ASSERT(npgs >= curnpgs); 2124 npgs -= curnpgs; 2125 } 2126 pp = pplist; 2127 } else { 2128 VM_STAT_ADD(alloc_pages[7]); 2129 PP_CLRFREE(pp); 2130 PP_CLRAGED(pp); 2131 pp = pp->p_next; 2132 npgs--; 2133 } 2134 } 2135 return (err); 2136 } 2137 2138 /* 2139 * Get a single large page off of the freelists, and set it up for use. 2140 * Number of bytes requested must be a supported page size. 2141 * 2142 * Note that this call may fail even if there is sufficient 2143 * memory available or PG_WAIT is set, so the caller must 2144 * be willing to fallback on page_create_va(), block and retry, 2145 * or fail the requester. 2146 */ 2147 page_t * 2148 page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2149 struct seg *seg, caddr_t vaddr, void *arg) 2150 { 2151 pgcnt_t npages; 2152 page_t *pp; 2153 page_t *rootpp; 2154 lgrp_t *lgrp; 2155 lgrp_id_t *lgrpid = (lgrp_id_t *)arg; 2156 2157 ASSERT(vp != NULL); 2158 2159 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2160 PG_NORELOC | PG_PANIC | PG_PUSHPAGE | PG_NORMALPRI)) == 0); 2161 /* but no others */ 2162 2163 ASSERT((flags & PG_EXCL) == PG_EXCL); 2164 2165 npages = btop(bytes); 2166 2167 if (!kcage_on || panicstr) { 2168 /* 2169 * Cage is OFF, or we are single threaded in 2170 * panic, so make everything a RELOC request. 2171 */ 2172 flags &= ~PG_NORELOC; 2173 } 2174 2175 /* 2176 * Make sure there's adequate physical memory available. 2177 * Note: PG_WAIT is ignored here. 2178 */ 2179 if (freemem <= throttlefree + npages) { 2180 VM_STAT_ADD(page_create_large_cnt[1]); 2181 return (NULL); 2182 } 2183 2184 /* 2185 * If cage is on, dampen draw from cage when available 2186 * cage space is low. 2187 */ 2188 if ((flags & (PG_NORELOC | PG_WAIT)) == (PG_NORELOC | PG_WAIT) && 2189 kcage_freemem < kcage_throttlefree + npages) { 2190 2191 /* 2192 * The cage is on, the caller wants PG_NORELOC 2193 * pages and available cage memory is very low. 2194 * Call kcage_create_throttle() to attempt to 2195 * control demand on the cage. 2196 */ 2197 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) { 2198 VM_STAT_ADD(page_create_large_cnt[2]); 2199 return (NULL); 2200 } 2201 } 2202 2203 if (!pcf_decrement_bucket(npages) && 2204 !pcf_decrement_multiple(NULL, npages, 1)) { 2205 VM_STAT_ADD(page_create_large_cnt[4]); 2206 return (NULL); 2207 } 2208 2209 /* 2210 * This is where this function behaves fundamentally differently 2211 * than page_create_va(); since we're intending to map the page 2212 * with a single TTE, we have to get it as a physically contiguous 2213 * hardware pagesize chunk. If we can't, we fail. 2214 */ 2215 if (lgrpid != NULL && *lgrpid >= 0 && *lgrpid <= lgrp_alloc_max && 2216 LGRP_EXISTS(lgrp_table[*lgrpid])) 2217 lgrp = lgrp_table[*lgrpid]; 2218 else 2219 lgrp = lgrp_mem_choose(seg, vaddr, bytes); 2220 2221 if ((rootpp = page_get_freelist(&kvp, off, seg, vaddr, 2222 bytes, flags & ~PG_MATCH_COLOR, lgrp)) == NULL) { 2223 page_create_putback(npages); 2224 VM_STAT_ADD(page_create_large_cnt[5]); 2225 return (NULL); 2226 } 2227 2228 /* 2229 * if we got the page with the wrong mtype give it back this is a 2230 * workaround for CR 6249718. When CR 6249718 is fixed we never get 2231 * inside "if" and the workaround becomes just a nop 2232 */ 2233 if (kcage_on && (flags & PG_NORELOC) && !PP_ISNORELOC(rootpp)) { 2234 page_list_add_pages(rootpp, 0); 2235 page_create_putback(npages); 2236 VM_STAT_ADD(page_create_large_cnt[6]); 2237 return (NULL); 2238 } 2239 2240 /* 2241 * If satisfying this request has left us with too little 2242 * memory, start the wheels turning to get some back. The 2243 * first clause of the test prevents waking up the pageout 2244 * daemon in situations where it would decide that there's 2245 * nothing to do. 2246 */ 2247 if (nscan < desscan && freemem < minfree) { 2248 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2249 "pageout_cv_signal:freemem %ld", freemem); 2250 WAKE_PAGEOUT_SCANNER(va__large); 2251 } 2252 2253 pp = rootpp; 2254 while (npages--) { 2255 ASSERT(PAGE_EXCL(pp)); 2256 ASSERT(pp->p_vnode == NULL); 2257 ASSERT(!hat_page_is_mapped(pp)); 2258 PP_CLRFREE(pp); 2259 PP_CLRAGED(pp); 2260 if (!page_hashin(pp, vp, off, NULL)) 2261 panic("page_create_large: hashin failed: page %p", 2262 (void *)pp); 2263 page_io_lock(pp); 2264 off += PAGESIZE; 2265 pp = pp->p_next; 2266 } 2267 2268 VM_STAT_ADD(page_create_large_cnt[0]); 2269 return (rootpp); 2270 } 2271 2272 page_t * 2273 page_create_va(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2274 struct seg *seg, caddr_t vaddr) 2275 { 2276 page_t *plist = NULL; 2277 pgcnt_t npages; 2278 pgcnt_t found_on_free = 0; 2279 pgcnt_t pages_req; 2280 page_t *npp = NULL; 2281 struct pcf *p; 2282 lgrp_t *lgrp; 2283 2284 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START, 2285 "page_create_start:vp %p off %llx bytes %lu flags %x", 2286 vp, off, bytes, flags); 2287 2288 ASSERT(bytes != 0 && vp != NULL); 2289 2290 if ((flags & PG_EXCL) == 0 && (flags & PG_WAIT) == 0) { 2291 panic("page_create: invalid flags"); 2292 /*NOTREACHED*/ 2293 } 2294 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2295 PG_NORELOC | PG_PANIC | PG_PUSHPAGE | PG_NORMALPRI)) == 0); 2296 /* but no others */ 2297 2298 pages_req = npages = btopr(bytes); 2299 /* 2300 * Try to see whether request is too large to *ever* be 2301 * satisfied, in order to prevent deadlock. We arbitrarily 2302 * decide to limit maximum size requests to max_page_get. 2303 */ 2304 if (npages >= max_page_get) { 2305 if ((flags & PG_WAIT) == 0) { 2306 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_TOOBIG, 2307 "page_create_toobig:vp %p off %llx npages " 2308 "%lu max_page_get %lu", 2309 vp, off, npages, max_page_get); 2310 return (NULL); 2311 } else { 2312 cmn_err(CE_WARN, 2313 "Request for too much kernel memory " 2314 "(%lu bytes), will hang forever", bytes); 2315 for (;;) 2316 delay(1000000000); 2317 } 2318 } 2319 2320 if (!kcage_on || panicstr) { 2321 /* 2322 * Cage is OFF, or we are single threaded in 2323 * panic, so make everything a RELOC request. 2324 */ 2325 flags &= ~PG_NORELOC; 2326 } 2327 2328 if (freemem <= throttlefree + npages) 2329 if (!page_create_throttle(npages, flags)) 2330 return (NULL); 2331 2332 /* 2333 * If cage is on, dampen draw from cage when available 2334 * cage space is low. 2335 */ 2336 if ((flags & PG_NORELOC) && 2337 kcage_freemem < kcage_throttlefree + npages) { 2338 2339 /* 2340 * The cage is on, the caller wants PG_NORELOC 2341 * pages and available cage memory is very low. 2342 * Call kcage_create_throttle() to attempt to 2343 * control demand on the cage. 2344 */ 2345 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) 2346 return (NULL); 2347 } 2348 2349 VM_STAT_ADD(page_create_cnt[0]); 2350 2351 if (!pcf_decrement_bucket(npages)) { 2352 /* 2353 * Have to look harder. If npages is greater than 2354 * one, then we might have to coalesce the counters. 2355 * 2356 * Go wait. We come back having accounted 2357 * for the memory. 2358 */ 2359 VM_STAT_ADD(page_create_cnt[1]); 2360 if (!page_create_wait(npages, flags)) { 2361 VM_STAT_ADD(page_create_cnt[2]); 2362 return (NULL); 2363 } 2364 } 2365 2366 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS, 2367 "page_create_success:vp %p off %llx", vp, off); 2368 2369 /* 2370 * If satisfying this request has left us with too little 2371 * memory, start the wheels turning to get some back. The 2372 * first clause of the test prevents waking up the pageout 2373 * daemon in situations where it would decide that there's 2374 * nothing to do. 2375 */ 2376 if (nscan < desscan && freemem < minfree) { 2377 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2378 "pageout_cv_signal:freemem %ld", freemem); 2379 WAKE_PAGEOUT_SCANNER(va); 2380 } 2381 2382 /* 2383 * Loop around collecting the requested number of pages. 2384 * Most of the time, we have to `create' a new page. With 2385 * this in mind, pull the page off the free list before 2386 * getting the hash lock. This will minimize the hash 2387 * lock hold time, nesting, and the like. If it turns 2388 * out we don't need the page, we put it back at the end. 2389 */ 2390 while (npages--) { 2391 page_t *pp; 2392 kmutex_t *phm = NULL; 2393 ulong_t index; 2394 2395 index = PAGE_HASH_FUNC(vp, off); 2396 top: 2397 ASSERT(phm == NULL); 2398 ASSERT(index == PAGE_HASH_FUNC(vp, off)); 2399 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 2400 2401 if (npp == NULL) { 2402 /* 2403 * Try to get a page from the freelist (ie, 2404 * a page with no [vp, off] tag). If that 2405 * fails, use the cachelist. 2406 * 2407 * During the first attempt at both the free 2408 * and cache lists we try for the correct color. 2409 */ 2410 /* 2411 * XXXX-how do we deal with virtual indexed 2412 * caches and and colors? 2413 */ 2414 VM_STAT_ADD(page_create_cnt[4]); 2415 /* 2416 * Get lgroup to allocate next page of shared memory 2417 * from and use it to specify where to allocate 2418 * the physical memory 2419 */ 2420 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 2421 npp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 2422 flags | PG_MATCH_COLOR, lgrp); 2423 if (npp == NULL) { 2424 npp = page_get_cachelist(vp, off, seg, 2425 vaddr, flags | PG_MATCH_COLOR, lgrp); 2426 if (npp == NULL) { 2427 npp = page_create_get_something(vp, 2428 off, seg, vaddr, 2429 flags & ~PG_MATCH_COLOR); 2430 } 2431 2432 if (PP_ISAGED(npp) == 0) { 2433 /* 2434 * Since this page came from the 2435 * cachelist, we must destroy the 2436 * old vnode association. 2437 */ 2438 page_hashout(npp, NULL); 2439 } 2440 } 2441 } 2442 2443 /* 2444 * We own this page! 2445 */ 2446 ASSERT(PAGE_EXCL(npp)); 2447 ASSERT(npp->p_vnode == NULL); 2448 ASSERT(!hat_page_is_mapped(npp)); 2449 PP_CLRFREE(npp); 2450 PP_CLRAGED(npp); 2451 2452 /* 2453 * Here we have a page in our hot little mits and are 2454 * just waiting to stuff it on the appropriate lists. 2455 * Get the mutex and check to see if it really does 2456 * not exist. 2457 */ 2458 phm = PAGE_HASH_MUTEX(index); 2459 mutex_enter(phm); 2460 pp = page_hash_search(index, vp, off); 2461 if (pp == NULL) { 2462 VM_STAT_ADD(page_create_new); 2463 pp = npp; 2464 npp = NULL; 2465 if (!page_hashin(pp, vp, off, phm)) { 2466 /* 2467 * Since we hold the page hash mutex and 2468 * just searched for this page, page_hashin 2469 * had better not fail. If it does, that 2470 * means somethread did not follow the 2471 * page hash mutex rules. Panic now and 2472 * get it over with. As usual, go down 2473 * holding all the locks. 2474 */ 2475 ASSERT(MUTEX_HELD(phm)); 2476 panic("page_create: " 2477 "hashin failed %p %p %llx %p", 2478 (void *)pp, (void *)vp, off, (void *)phm); 2479 /*NOTREACHED*/ 2480 } 2481 ASSERT(MUTEX_HELD(phm)); 2482 mutex_exit(phm); 2483 phm = NULL; 2484 2485 /* 2486 * Hat layer locking need not be done to set 2487 * the following bits since the page is not hashed 2488 * and was on the free list (i.e., had no mappings). 2489 * 2490 * Set the reference bit to protect 2491 * against immediate pageout 2492 * 2493 * XXXmh modify freelist code to set reference 2494 * bit so we don't have to do it here. 2495 */ 2496 page_set_props(pp, P_REF); 2497 found_on_free++; 2498 } else { 2499 VM_STAT_ADD(page_create_exists); 2500 if (flags & PG_EXCL) { 2501 /* 2502 * Found an existing page, and the caller 2503 * wanted all new pages. Undo all of the work 2504 * we have done. 2505 */ 2506 mutex_exit(phm); 2507 phm = NULL; 2508 while (plist != NULL) { 2509 pp = plist; 2510 page_sub(&plist, pp); 2511 page_io_unlock(pp); 2512 /* large pages should not end up here */ 2513 ASSERT(pp->p_szc == 0); 2514 /*LINTED: constant in conditional ctx*/ 2515 VN_DISPOSE(pp, B_INVAL, 0, kcred); 2516 } 2517 VM_STAT_ADD(page_create_found_one); 2518 goto fail; 2519 } 2520 ASSERT(flags & PG_WAIT); 2521 if (!page_lock(pp, SE_EXCL, phm, P_NO_RECLAIM)) { 2522 /* 2523 * Start all over again if we blocked trying 2524 * to lock the page. 2525 */ 2526 mutex_exit(phm); 2527 VM_STAT_ADD(page_create_page_lock_failed); 2528 phm = NULL; 2529 goto top; 2530 } 2531 mutex_exit(phm); 2532 phm = NULL; 2533 2534 if (PP_ISFREE(pp)) { 2535 ASSERT(PP_ISAGED(pp) == 0); 2536 VM_STAT_ADD(pagecnt.pc_get_cache); 2537 page_list_sub(pp, PG_CACHE_LIST); 2538 PP_CLRFREE(pp); 2539 found_on_free++; 2540 } 2541 } 2542 2543 /* 2544 * Got a page! It is locked. Acquire the i/o 2545 * lock since we are going to use the p_next and 2546 * p_prev fields to link the requested pages together. 2547 */ 2548 page_io_lock(pp); 2549 page_add(&plist, pp); 2550 plist = plist->p_next; 2551 off += PAGESIZE; 2552 vaddr += PAGESIZE; 2553 } 2554 2555 ASSERT((flags & PG_EXCL) ? (found_on_free == pages_req) : 1); 2556 fail: 2557 if (npp != NULL) { 2558 /* 2559 * Did not need this page after all. 2560 * Put it back on the free list. 2561 */ 2562 VM_STAT_ADD(page_create_putbacks); 2563 PP_SETFREE(npp); 2564 PP_SETAGED(npp); 2565 npp->p_offset = (u_offset_t)-1; 2566 page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL); 2567 page_unlock(npp); 2568 2569 } 2570 2571 ASSERT(pages_req >= found_on_free); 2572 2573 { 2574 uint_t overshoot = (uint_t)(pages_req - found_on_free); 2575 2576 if (overshoot) { 2577 VM_STAT_ADD(page_create_overshoot); 2578 p = &pcf[PCF_INDEX()]; 2579 mutex_enter(&p->pcf_lock); 2580 if (p->pcf_block) { 2581 p->pcf_reserve += overshoot; 2582 } else { 2583 p->pcf_count += overshoot; 2584 if (p->pcf_wait) { 2585 mutex_enter(&new_freemem_lock); 2586 if (freemem_wait) { 2587 cv_signal(&freemem_cv); 2588 p->pcf_wait--; 2589 } else { 2590 p->pcf_wait = 0; 2591 } 2592 mutex_exit(&new_freemem_lock); 2593 } 2594 } 2595 mutex_exit(&p->pcf_lock); 2596 /* freemem is approximate, so this test OK */ 2597 if (!p->pcf_block) 2598 freemem += overshoot; 2599 } 2600 } 2601 2602 return (plist); 2603 } 2604 2605 /* 2606 * One or more constituent pages of this large page has been marked 2607 * toxic. Simply demote the large page to PAGESIZE pages and let 2608 * page_free() handle it. This routine should only be called by 2609 * large page free routines (page_free_pages() and page_destroy_pages(). 2610 * All pages are locked SE_EXCL and have already been marked free. 2611 */ 2612 static void 2613 page_free_toxic_pages(page_t *rootpp) 2614 { 2615 page_t *tpp; 2616 pgcnt_t i, pgcnt = page_get_pagecnt(rootpp->p_szc); 2617 uint_t szc = rootpp->p_szc; 2618 2619 for (i = 0, tpp = rootpp; i < pgcnt; i++, tpp = tpp->p_next) { 2620 ASSERT(tpp->p_szc == szc); 2621 ASSERT((PAGE_EXCL(tpp) && 2622 !page_iolock_assert(tpp)) || panicstr); 2623 tpp->p_szc = 0; 2624 } 2625 2626 while (rootpp != NULL) { 2627 tpp = rootpp; 2628 page_sub(&rootpp, tpp); 2629 ASSERT(PP_ISFREE(tpp)); 2630 PP_CLRFREE(tpp); 2631 page_free(tpp, 1); 2632 } 2633 } 2634 2635 /* 2636 * Put page on the "free" list. 2637 * The free list is really two lists maintained by 2638 * the PSM of whatever machine we happen to be on. 2639 */ 2640 void 2641 page_free(page_t *pp, int dontneed) 2642 { 2643 struct pcf *p; 2644 uint_t pcf_index; 2645 2646 ASSERT((PAGE_EXCL(pp) && 2647 !page_iolock_assert(pp)) || panicstr); 2648 2649 if (PP_ISFREE(pp)) { 2650 panic("page_free: page %p is free", (void *)pp); 2651 } 2652 2653 if (pp->p_szc != 0) { 2654 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 2655 PP_ISKAS(pp)) { 2656 panic("page_free: anon or kernel " 2657 "or no vnode large page %p", (void *)pp); 2658 } 2659 page_demote_vp_pages(pp); 2660 ASSERT(pp->p_szc == 0); 2661 } 2662 2663 /* 2664 * The page_struct_lock need not be acquired to examine these 2665 * fields since the page has an "exclusive" lock. 2666 */ 2667 if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 2668 pp->p_slckcnt != 0) { 2669 panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d " 2670 "slckcnt = %d", (void *)pp, page_pptonum(pp), pp->p_lckcnt, 2671 pp->p_cowcnt, pp->p_slckcnt); 2672 /*NOTREACHED*/ 2673 } 2674 2675 ASSERT(!hat_page_getshare(pp)); 2676 2677 PP_SETFREE(pp); 2678 ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) || 2679 !hat_ismod(pp)); 2680 page_clr_all_props(pp); 2681 ASSERT(!hat_page_getshare(pp)); 2682 2683 /* 2684 * Now we add the page to the head of the free list. 2685 * But if this page is associated with a paged vnode 2686 * then we adjust the head forward so that the page is 2687 * effectively at the end of the list. 2688 */ 2689 if (pp->p_vnode == NULL) { 2690 /* 2691 * Page has no identity, put it on the free list. 2692 */ 2693 PP_SETAGED(pp); 2694 pp->p_offset = (u_offset_t)-1; 2695 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 2696 VM_STAT_ADD(pagecnt.pc_free_free); 2697 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2698 "page_free_free:pp %p", pp); 2699 } else { 2700 PP_CLRAGED(pp); 2701 2702 if (!dontneed) { 2703 /* move it to the tail of the list */ 2704 page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL); 2705 2706 VM_STAT_ADD(pagecnt.pc_free_cache); 2707 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL, 2708 "page_free_cache_tail:pp %p", pp); 2709 } else { 2710 page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD); 2711 2712 VM_STAT_ADD(pagecnt.pc_free_dontneed); 2713 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD, 2714 "page_free_cache_head:pp %p", pp); 2715 } 2716 } 2717 page_unlock(pp); 2718 2719 /* 2720 * Now do the `freemem' accounting. 2721 */ 2722 pcf_index = PCF_INDEX(); 2723 p = &pcf[pcf_index]; 2724 2725 mutex_enter(&p->pcf_lock); 2726 if (p->pcf_block) { 2727 p->pcf_reserve += 1; 2728 } else { 2729 p->pcf_count += 1; 2730 if (p->pcf_wait) { 2731 mutex_enter(&new_freemem_lock); 2732 /* 2733 * Check to see if some other thread 2734 * is actually waiting. Another bucket 2735 * may have woken it up by now. If there 2736 * are no waiters, then set our pcf_wait 2737 * count to zero to avoid coming in here 2738 * next time. Also, since only one page 2739 * was put on the free list, just wake 2740 * up one waiter. 2741 */ 2742 if (freemem_wait) { 2743 cv_signal(&freemem_cv); 2744 p->pcf_wait--; 2745 } else { 2746 p->pcf_wait = 0; 2747 } 2748 mutex_exit(&new_freemem_lock); 2749 } 2750 } 2751 mutex_exit(&p->pcf_lock); 2752 2753 /* freemem is approximate, so this test OK */ 2754 if (!p->pcf_block) 2755 freemem += 1; 2756 } 2757 2758 /* 2759 * Put page on the "free" list during intial startup. 2760 * This happens during initial single threaded execution. 2761 */ 2762 void 2763 page_free_at_startup(page_t *pp) 2764 { 2765 struct pcf *p; 2766 uint_t pcf_index; 2767 2768 page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT); 2769 VM_STAT_ADD(pagecnt.pc_free_free); 2770 2771 /* 2772 * Now do the `freemem' accounting. 2773 */ 2774 pcf_index = PCF_INDEX(); 2775 p = &pcf[pcf_index]; 2776 2777 ASSERT(p->pcf_block == 0); 2778 ASSERT(p->pcf_wait == 0); 2779 p->pcf_count += 1; 2780 2781 /* freemem is approximate, so this is OK */ 2782 freemem += 1; 2783 } 2784 2785 void 2786 page_free_pages(page_t *pp) 2787 { 2788 page_t *tpp, *rootpp = NULL; 2789 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 2790 pgcnt_t i; 2791 uint_t szc = pp->p_szc; 2792 2793 VM_STAT_ADD(pagecnt.pc_free_pages); 2794 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2795 "page_free_free:pp %p", pp); 2796 2797 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 2798 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 2799 panic("page_free_pages: not root page %p", (void *)pp); 2800 /*NOTREACHED*/ 2801 } 2802 2803 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 2804 ASSERT((PAGE_EXCL(tpp) && 2805 !page_iolock_assert(tpp)) || panicstr); 2806 if (PP_ISFREE(tpp)) { 2807 panic("page_free_pages: page %p is free", (void *)tpp); 2808 /*NOTREACHED*/ 2809 } 2810 if (hat_page_is_mapped(tpp) || tpp->p_lckcnt != 0 || 2811 tpp->p_cowcnt != 0 || tpp->p_slckcnt != 0) { 2812 panic("page_free_pages %p", (void *)tpp); 2813 /*NOTREACHED*/ 2814 } 2815 2816 ASSERT(!hat_page_getshare(tpp)); 2817 ASSERT(tpp->p_vnode == NULL); 2818 ASSERT(tpp->p_szc == szc); 2819 2820 PP_SETFREE(tpp); 2821 page_clr_all_props(tpp); 2822 PP_SETAGED(tpp); 2823 tpp->p_offset = (u_offset_t)-1; 2824 ASSERT(tpp->p_next == tpp); 2825 ASSERT(tpp->p_prev == tpp); 2826 page_list_concat(&rootpp, &tpp); 2827 } 2828 ASSERT(rootpp == pp); 2829 2830 page_list_add_pages(rootpp, 0); 2831 page_create_putback(pgcnt); 2832 } 2833 2834 int free_pages = 1; 2835 2836 /* 2837 * This routine attempts to return pages to the cachelist via page_release(). 2838 * It does not *have* to be successful in all cases, since the pageout scanner 2839 * will catch any pages it misses. It does need to be fast and not introduce 2840 * too much overhead. 2841 * 2842 * If a page isn't found on the unlocked sweep of the page_hash bucket, we 2843 * don't lock and retry. This is ok, since the page scanner will eventually 2844 * find any page we miss in free_vp_pages(). 2845 */ 2846 void 2847 free_vp_pages(vnode_t *vp, u_offset_t off, size_t len) 2848 { 2849 page_t *pp; 2850 u_offset_t eoff; 2851 extern int swap_in_range(vnode_t *, u_offset_t, size_t); 2852 2853 eoff = off + len; 2854 2855 if (free_pages == 0) 2856 return; 2857 if (swap_in_range(vp, off, len)) 2858 return; 2859 2860 for (; off < eoff; off += PAGESIZE) { 2861 2862 /* 2863 * find the page using a fast, but inexact search. It'll be OK 2864 * if a few pages slip through the cracks here. 2865 */ 2866 pp = page_exists(vp, off); 2867 2868 /* 2869 * If we didn't find the page (it may not exist), the page 2870 * is free, looks still in use (shared), or we can't lock it, 2871 * just give up. 2872 */ 2873 if (pp == NULL || 2874 PP_ISFREE(pp) || 2875 page_share_cnt(pp) > 0 || 2876 !page_trylock(pp, SE_EXCL)) 2877 continue; 2878 2879 /* 2880 * Once we have locked pp, verify that it's still the 2881 * correct page and not already free 2882 */ 2883 ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL)); 2884 if (pp->p_vnode != vp || pp->p_offset != off || PP_ISFREE(pp)) { 2885 page_unlock(pp); 2886 continue; 2887 } 2888 2889 /* 2890 * try to release the page... 2891 */ 2892 (void) page_release(pp, 1); 2893 } 2894 } 2895 2896 /* 2897 * Reclaim the given page from the free list. 2898 * If pp is part of a large pages, only the given constituent page is reclaimed 2899 * and the large page it belonged to will be demoted. This can only happen 2900 * if the page is not on the cachelist. 2901 * 2902 * Returns 1 on success or 0 on failure. 2903 * 2904 * The page is unlocked if it can't be reclaimed (when freemem == 0). 2905 * If `lock' is non-null, it will be dropped and re-acquired if 2906 * the routine must wait while freemem is 0. 2907 * 2908 * As it turns out, boot_getpages() does this. It picks a page, 2909 * based on where OBP mapped in some address, gets its pfn, searches 2910 * the memsegs, locks the page, then pulls it off the free list! 2911 */ 2912 int 2913 page_reclaim(page_t *pp, kmutex_t *lock) 2914 { 2915 struct pcf *p; 2916 struct cpu *cpup; 2917 int enough; 2918 uint_t i; 2919 2920 ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1); 2921 ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp)); 2922 2923 /* 2924 * If `freemem' is 0, we cannot reclaim this page from the 2925 * freelist, so release every lock we might hold: the page, 2926 * and the `lock' before blocking. 2927 * 2928 * The only way `freemem' can become 0 while there are pages 2929 * marked free (have their p->p_free bit set) is when the 2930 * system is low on memory and doing a page_create(). In 2931 * order to guarantee that once page_create() starts acquiring 2932 * pages it will be able to get all that it needs since `freemem' 2933 * was decreased by the requested amount. So, we need to release 2934 * this page, and let page_create() have it. 2935 * 2936 * Since `freemem' being zero is not supposed to happen, just 2937 * use the usual hash stuff as a starting point. If that bucket 2938 * is empty, then assume the worst, and start at the beginning 2939 * of the pcf array. If we always start at the beginning 2940 * when acquiring more than one pcf lock, there won't be any 2941 * deadlock problems. 2942 */ 2943 2944 /* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */ 2945 2946 if (freemem <= throttlefree && !page_create_throttle(1l, 0)) { 2947 pcf_acquire_all(); 2948 goto page_reclaim_nomem; 2949 } 2950 2951 enough = pcf_decrement_bucket(1); 2952 2953 if (!enough) { 2954 VM_STAT_ADD(page_reclaim_zero); 2955 /* 2956 * Check again. Its possible that some other thread 2957 * could have been right behind us, and added one 2958 * to a list somewhere. Acquire each of the pcf locks 2959 * until we find a page. 2960 */ 2961 p = pcf; 2962 for (i = 0; i < pcf_fanout; i++) { 2963 mutex_enter(&p->pcf_lock); 2964 if (p->pcf_count >= 1) { 2965 p->pcf_count -= 1; 2966 /* 2967 * freemem is not protected by any lock. Thus, 2968 * we cannot have any assertion containing 2969 * freemem here. 2970 */ 2971 freemem -= 1; 2972 enough = 1; 2973 break; 2974 } 2975 p++; 2976 } 2977 2978 if (!enough) { 2979 page_reclaim_nomem: 2980 /* 2981 * We really can't have page `pp'. 2982 * Time for the no-memory dance with 2983 * page_free(). This is just like 2984 * page_create_wait(). Plus the added 2985 * attraction of releasing whatever mutex 2986 * we held when we were called with in `lock'. 2987 * Page_unlock() will wakeup any thread 2988 * waiting around for this page. 2989 */ 2990 if (lock) { 2991 VM_STAT_ADD(page_reclaim_zero_locked); 2992 mutex_exit(lock); 2993 } 2994 page_unlock(pp); 2995 2996 /* 2997 * get this before we drop all the pcf locks. 2998 */ 2999 mutex_enter(&new_freemem_lock); 3000 3001 p = pcf; 3002 for (i = 0; i < pcf_fanout; i++) { 3003 p->pcf_wait++; 3004 mutex_exit(&p->pcf_lock); 3005 p++; 3006 } 3007 3008 freemem_wait++; 3009 cv_wait(&freemem_cv, &new_freemem_lock); 3010 freemem_wait--; 3011 3012 mutex_exit(&new_freemem_lock); 3013 3014 if (lock) { 3015 mutex_enter(lock); 3016 } 3017 return (0); 3018 } 3019 3020 /* 3021 * The pcf accounting has been done, 3022 * though none of the pcf_wait flags have been set, 3023 * drop the locks and continue on. 3024 */ 3025 while (p >= pcf) { 3026 mutex_exit(&p->pcf_lock); 3027 p--; 3028 } 3029 } 3030 3031 3032 VM_STAT_ADD(pagecnt.pc_reclaim); 3033 3034 /* 3035 * page_list_sub will handle the case where pp is a large page. 3036 * It's possible that the page was promoted while on the freelist 3037 */ 3038 if (PP_ISAGED(pp)) { 3039 page_list_sub(pp, PG_FREE_LIST); 3040 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_FREE, 3041 "page_reclaim_free:pp %p", pp); 3042 } else { 3043 page_list_sub(pp, PG_CACHE_LIST); 3044 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_CACHE, 3045 "page_reclaim_cache:pp %p", pp); 3046 } 3047 3048 /* 3049 * clear the p_free & p_age bits since this page is no longer 3050 * on the free list. Notice that there was a brief time where 3051 * a page is marked as free, but is not on the list. 3052 * 3053 * Set the reference bit to protect against immediate pageout. 3054 */ 3055 PP_CLRFREE(pp); 3056 PP_CLRAGED(pp); 3057 page_set_props(pp, P_REF); 3058 3059 CPU_STATS_ENTER_K(); 3060 cpup = CPU; /* get cpup now that CPU cannot change */ 3061 CPU_STATS_ADDQ(cpup, vm, pgrec, 1); 3062 CPU_STATS_ADDQ(cpup, vm, pgfrec, 1); 3063 CPU_STATS_EXIT_K(); 3064 ASSERT(pp->p_szc == 0); 3065 3066 return (1); 3067 } 3068 3069 /* 3070 * Destroy identity of the page and put it back on 3071 * the page free list. Assumes that the caller has 3072 * acquired the "exclusive" lock on the page. 3073 */ 3074 void 3075 page_destroy(page_t *pp, int dontfree) 3076 { 3077 ASSERT((PAGE_EXCL(pp) && 3078 !page_iolock_assert(pp)) || panicstr); 3079 ASSERT(pp->p_slckcnt == 0 || panicstr); 3080 3081 if (pp->p_szc != 0) { 3082 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 3083 PP_ISKAS(pp)) { 3084 panic("page_destroy: anon or kernel or no vnode " 3085 "large page %p", (void *)pp); 3086 } 3087 page_demote_vp_pages(pp); 3088 ASSERT(pp->p_szc == 0); 3089 } 3090 3091 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy:pp %p", pp); 3092 3093 /* 3094 * Unload translations, if any, then hash out the 3095 * page to erase its identity. 3096 */ 3097 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3098 page_hashout(pp, NULL); 3099 3100 if (!dontfree) { 3101 /* 3102 * Acquire the "freemem_lock" for availrmem. 3103 * The page_struct_lock need not be acquired for lckcnt 3104 * and cowcnt since the page has an "exclusive" lock. 3105 * We are doing a modified version of page_pp_unlock here. 3106 */ 3107 if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) { 3108 mutex_enter(&freemem_lock); 3109 if (pp->p_lckcnt != 0) { 3110 availrmem++; 3111 pages_locked--; 3112 pp->p_lckcnt = 0; 3113 } 3114 if (pp->p_cowcnt != 0) { 3115 availrmem += pp->p_cowcnt; 3116 pages_locked -= pp->p_cowcnt; 3117 pp->p_cowcnt = 0; 3118 } 3119 mutex_exit(&freemem_lock); 3120 } 3121 /* 3122 * Put the page on the "free" list. 3123 */ 3124 page_free(pp, 0); 3125 } 3126 } 3127 3128 void 3129 page_destroy_pages(page_t *pp) 3130 { 3131 3132 page_t *tpp, *rootpp = NULL; 3133 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 3134 pgcnt_t i, pglcks = 0; 3135 uint_t szc = pp->p_szc; 3136 3137 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 3138 3139 VM_STAT_ADD(pagecnt.pc_destroy_pages); 3140 3141 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy_pages:pp %p", pp); 3142 3143 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 3144 panic("page_destroy_pages: not root page %p", (void *)pp); 3145 /*NOTREACHED*/ 3146 } 3147 3148 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 3149 ASSERT((PAGE_EXCL(tpp) && 3150 !page_iolock_assert(tpp)) || panicstr); 3151 ASSERT(tpp->p_slckcnt == 0 || panicstr); 3152 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 3153 page_hashout(tpp, NULL); 3154 ASSERT(tpp->p_offset == (u_offset_t)-1); 3155 if (tpp->p_lckcnt != 0) { 3156 pglcks++; 3157 tpp->p_lckcnt = 0; 3158 } else if (tpp->p_cowcnt != 0) { 3159 pglcks += tpp->p_cowcnt; 3160 tpp->p_cowcnt = 0; 3161 } 3162 ASSERT(!hat_page_getshare(tpp)); 3163 ASSERT(tpp->p_vnode == NULL); 3164 ASSERT(tpp->p_szc == szc); 3165 3166 PP_SETFREE(tpp); 3167 page_clr_all_props(tpp); 3168 PP_SETAGED(tpp); 3169 ASSERT(tpp->p_next == tpp); 3170 ASSERT(tpp->p_prev == tpp); 3171 page_list_concat(&rootpp, &tpp); 3172 } 3173 3174 ASSERT(rootpp == pp); 3175 if (pglcks != 0) { 3176 mutex_enter(&freemem_lock); 3177 availrmem += pglcks; 3178 mutex_exit(&freemem_lock); 3179 } 3180 3181 page_list_add_pages(rootpp, 0); 3182 page_create_putback(pgcnt); 3183 } 3184 3185 /* 3186 * Similar to page_destroy(), but destroys pages which are 3187 * locked and known to be on the page free list. Since 3188 * the page is known to be free and locked, no one can access 3189 * it. 3190 * 3191 * Also, the number of free pages does not change. 3192 */ 3193 void 3194 page_destroy_free(page_t *pp) 3195 { 3196 ASSERT(PAGE_EXCL(pp)); 3197 ASSERT(PP_ISFREE(pp)); 3198 ASSERT(pp->p_vnode); 3199 ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0); 3200 ASSERT(!hat_page_is_mapped(pp)); 3201 ASSERT(PP_ISAGED(pp) == 0); 3202 ASSERT(pp->p_szc == 0); 3203 3204 VM_STAT_ADD(pagecnt.pc_destroy_free); 3205 page_list_sub(pp, PG_CACHE_LIST); 3206 3207 page_hashout(pp, NULL); 3208 ASSERT(pp->p_vnode == NULL); 3209 ASSERT(pp->p_offset == (u_offset_t)-1); 3210 ASSERT(pp->p_hash == NULL); 3211 3212 PP_SETAGED(pp); 3213 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 3214 page_unlock(pp); 3215 3216 mutex_enter(&new_freemem_lock); 3217 if (freemem_wait) { 3218 cv_signal(&freemem_cv); 3219 } 3220 mutex_exit(&new_freemem_lock); 3221 } 3222 3223 /* 3224 * Rename the page "opp" to have an identity specified 3225 * by [vp, off]. If a page already exists with this name 3226 * it is locked and destroyed. Note that the page's 3227 * translations are not unloaded during the rename. 3228 * 3229 * This routine is used by the anon layer to "steal" the 3230 * original page and is not unlike destroying a page and 3231 * creating a new page using the same page frame. 3232 * 3233 * XXX -- Could deadlock if caller 1 tries to rename A to B while 3234 * caller 2 tries to rename B to A. 3235 */ 3236 void 3237 page_rename(page_t *opp, vnode_t *vp, u_offset_t off) 3238 { 3239 page_t *pp; 3240 int olckcnt = 0; 3241 int ocowcnt = 0; 3242 kmutex_t *phm; 3243 ulong_t index; 3244 3245 ASSERT(PAGE_EXCL(opp) && !page_iolock_assert(opp)); 3246 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3247 ASSERT(PP_ISFREE(opp) == 0); 3248 3249 VM_STAT_ADD(page_rename_count); 3250 3251 TRACE_3(TR_FAC_VM, TR_PAGE_RENAME, 3252 "page rename:pp %p vp %p off %llx", opp, vp, off); 3253 3254 /* 3255 * CacheFS may call page_rename for a large NFS page 3256 * when both CacheFS and NFS mount points are used 3257 * by applications. Demote this large page before 3258 * renaming it, to ensure that there are no "partial" 3259 * large pages left lying around. 3260 */ 3261 if (opp->p_szc != 0) { 3262 vnode_t *ovp = opp->p_vnode; 3263 ASSERT(ovp != NULL); 3264 ASSERT(!IS_SWAPFSVP(ovp)); 3265 ASSERT(!VN_ISKAS(ovp)); 3266 page_demote_vp_pages(opp); 3267 ASSERT(opp->p_szc == 0); 3268 } 3269 3270 page_hashout(opp, NULL); 3271 PP_CLRAGED(opp); 3272 3273 /* 3274 * Acquire the appropriate page hash lock, since 3275 * we're going to rename the page. 3276 */ 3277 index = PAGE_HASH_FUNC(vp, off); 3278 phm = PAGE_HASH_MUTEX(index); 3279 mutex_enter(phm); 3280 top: 3281 /* 3282 * Look for an existing page with this name and destroy it if found. 3283 * By holding the page hash lock all the way to the page_hashin() 3284 * call, we are assured that no page can be created with this 3285 * identity. In the case when the phm lock is dropped to undo any 3286 * hat layer mappings, the existing page is held with an "exclusive" 3287 * lock, again preventing another page from being created with 3288 * this identity. 3289 */ 3290 pp = page_hash_search(index, vp, off); 3291 if (pp != NULL) { 3292 VM_STAT_ADD(page_rename_exists); 3293 3294 /* 3295 * As it turns out, this is one of only two places where 3296 * page_lock() needs to hold the passed in lock in the 3297 * successful case. In all of the others, the lock could 3298 * be dropped as soon as the attempt is made to lock 3299 * the page. It is tempting to add yet another arguement, 3300 * PL_KEEP or PL_DROP, to let page_lock know what to do. 3301 */ 3302 if (!page_lock(pp, SE_EXCL, phm, P_RECLAIM)) { 3303 /* 3304 * Went to sleep because the page could not 3305 * be locked. We were woken up when the page 3306 * was unlocked, or when the page was destroyed. 3307 * In either case, `phm' was dropped while we 3308 * slept. Hence we should not just roar through 3309 * this loop. 3310 */ 3311 goto top; 3312 } 3313 3314 /* 3315 * If an existing page is a large page, then demote 3316 * it to ensure that no "partial" large pages are 3317 * "created" after page_rename. An existing page 3318 * can be a CacheFS page, and can't belong to swapfs. 3319 */ 3320 if (hat_page_is_mapped(pp)) { 3321 /* 3322 * Unload translations. Since we hold the 3323 * exclusive lock on this page, the page 3324 * can not be changed while we drop phm. 3325 * This is also not a lock protocol violation, 3326 * but rather the proper way to do things. 3327 */ 3328 mutex_exit(phm); 3329 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3330 if (pp->p_szc != 0) { 3331 ASSERT(!IS_SWAPFSVP(vp)); 3332 ASSERT(!VN_ISKAS(vp)); 3333 page_demote_vp_pages(pp); 3334 ASSERT(pp->p_szc == 0); 3335 } 3336 mutex_enter(phm); 3337 } else if (pp->p_szc != 0) { 3338 ASSERT(!IS_SWAPFSVP(vp)); 3339 ASSERT(!VN_ISKAS(vp)); 3340 mutex_exit(phm); 3341 page_demote_vp_pages(pp); 3342 ASSERT(pp->p_szc == 0); 3343 mutex_enter(phm); 3344 } 3345 page_hashout(pp, phm); 3346 } 3347 /* 3348 * Hash in the page with the new identity. 3349 */ 3350 if (!page_hashin(opp, vp, off, phm)) { 3351 /* 3352 * We were holding phm while we searched for [vp, off] 3353 * and only dropped phm if we found and locked a page. 3354 * If we can't create this page now, then some thing 3355 * is really broken. 3356 */ 3357 panic("page_rename: Can't hash in page: %p", (void *)pp); 3358 /*NOTREACHED*/ 3359 } 3360 3361 ASSERT(MUTEX_HELD(phm)); 3362 mutex_exit(phm); 3363 3364 /* 3365 * Now that we have dropped phm, lets get around to finishing up 3366 * with pp. 3367 */ 3368 if (pp != NULL) { 3369 ASSERT(!hat_page_is_mapped(pp)); 3370 /* for now large pages should not end up here */ 3371 ASSERT(pp->p_szc == 0); 3372 /* 3373 * Save the locks for transfer to the new page and then 3374 * clear them so page_free doesn't think they're important. 3375 * The page_struct_lock need not be acquired for lckcnt and 3376 * cowcnt since the page has an "exclusive" lock. 3377 */ 3378 olckcnt = pp->p_lckcnt; 3379 ocowcnt = pp->p_cowcnt; 3380 pp->p_lckcnt = pp->p_cowcnt = 0; 3381 3382 /* 3383 * Put the page on the "free" list after we drop 3384 * the lock. The less work under the lock the better. 3385 */ 3386 /*LINTED: constant in conditional context*/ 3387 VN_DISPOSE(pp, B_FREE, 0, kcred); 3388 } 3389 3390 /* 3391 * Transfer the lock count from the old page (if any). 3392 * The page_struct_lock need not be acquired for lckcnt and 3393 * cowcnt since the page has an "exclusive" lock. 3394 */ 3395 opp->p_lckcnt += olckcnt; 3396 opp->p_cowcnt += ocowcnt; 3397 } 3398 3399 /* 3400 * low level routine to add page `pp' to the hash and vp chains for [vp, offset] 3401 * 3402 * Pages are normally inserted at the start of a vnode's v_pages list. 3403 * If the vnode is VMODSORT and the page is modified, it goes at the end. 3404 * This can happen when a modified page is relocated for DR. 3405 * 3406 * Returns 1 on success and 0 on failure. 3407 */ 3408 static int 3409 page_do_hashin(page_t *pp, vnode_t *vp, u_offset_t offset) 3410 { 3411 page_t **listp; 3412 page_t *tp; 3413 ulong_t index; 3414 3415 ASSERT(PAGE_EXCL(pp)); 3416 ASSERT(vp != NULL); 3417 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3418 3419 /* 3420 * Be sure to set these up before the page is inserted on the hash 3421 * list. As soon as the page is placed on the list some other 3422 * thread might get confused and wonder how this page could 3423 * possibly hash to this list. 3424 */ 3425 pp->p_vnode = vp; 3426 pp->p_offset = offset; 3427 3428 /* 3429 * record if this page is on a swap vnode 3430 */ 3431 if ((vp->v_flag & VISSWAP) != 0) 3432 PP_SETSWAP(pp); 3433 3434 index = PAGE_HASH_FUNC(vp, offset); 3435 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(index))); 3436 listp = &page_hash[index]; 3437 3438 /* 3439 * If this page is already hashed in, fail this attempt to add it. 3440 */ 3441 for (tp = *listp; tp != NULL; tp = tp->p_hash) { 3442 if (tp->p_vnode == vp && tp->p_offset == offset) { 3443 pp->p_vnode = NULL; 3444 pp->p_offset = (u_offset_t)(-1); 3445 return (0); 3446 } 3447 } 3448 pp->p_hash = *listp; 3449 *listp = pp; 3450 3451 /* 3452 * Add the page to the vnode's list of pages 3453 */ 3454 if (vp->v_pages != NULL && IS_VMODSORT(vp) && hat_ismod(pp)) 3455 listp = &vp->v_pages->p_vpprev->p_vpnext; 3456 else 3457 listp = &vp->v_pages; 3458 3459 page_vpadd(listp, pp); 3460 3461 return (1); 3462 } 3463 3464 /* 3465 * Add page `pp' to both the hash and vp chains for [vp, offset]. 3466 * 3467 * Returns 1 on success and 0 on failure. 3468 * If hold is passed in, it is not dropped. 3469 */ 3470 int 3471 page_hashin(page_t *pp, vnode_t *vp, u_offset_t offset, kmutex_t *hold) 3472 { 3473 kmutex_t *phm = NULL; 3474 kmutex_t *vphm; 3475 int rc; 3476 3477 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3478 ASSERT(pp->p_fsdata == 0 || panicstr); 3479 3480 TRACE_3(TR_FAC_VM, TR_PAGE_HASHIN, 3481 "page_hashin:pp %p vp %p offset %llx", 3482 pp, vp, offset); 3483 3484 VM_STAT_ADD(hashin_count); 3485 3486 if (hold != NULL) 3487 phm = hold; 3488 else { 3489 VM_STAT_ADD(hashin_not_held); 3490 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, offset)); 3491 mutex_enter(phm); 3492 } 3493 3494 vphm = page_vnode_mutex(vp); 3495 mutex_enter(vphm); 3496 rc = page_do_hashin(pp, vp, offset); 3497 mutex_exit(vphm); 3498 if (hold == NULL) 3499 mutex_exit(phm); 3500 if (rc == 0) 3501 VM_STAT_ADD(hashin_already); 3502 return (rc); 3503 } 3504 3505 /* 3506 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3507 * All mutexes must be held 3508 */ 3509 static void 3510 page_do_hashout(page_t *pp) 3511 { 3512 page_t **hpp; 3513 page_t *hp; 3514 vnode_t *vp = pp->p_vnode; 3515 3516 ASSERT(vp != NULL); 3517 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3518 3519 /* 3520 * First, take pp off of its hash chain. 3521 */ 3522 hpp = &page_hash[PAGE_HASH_FUNC(vp, pp->p_offset)]; 3523 3524 for (;;) { 3525 hp = *hpp; 3526 if (hp == pp) 3527 break; 3528 if (hp == NULL) { 3529 panic("page_do_hashout"); 3530 /*NOTREACHED*/ 3531 } 3532 hpp = &hp->p_hash; 3533 } 3534 *hpp = pp->p_hash; 3535 3536 /* 3537 * Now remove it from its associated vnode. 3538 */ 3539 if (vp->v_pages) 3540 page_vpsub(&vp->v_pages, pp); 3541 3542 pp->p_hash = NULL; 3543 page_clr_all_props(pp); 3544 PP_CLRSWAP(pp); 3545 pp->p_vnode = NULL; 3546 pp->p_offset = (u_offset_t)-1; 3547 pp->p_fsdata = 0; 3548 } 3549 3550 /* 3551 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3552 * 3553 * When `phm' is non-NULL it contains the address of the mutex protecting the 3554 * hash list pp is on. It is not dropped. 3555 */ 3556 void 3557 page_hashout(page_t *pp, kmutex_t *phm) 3558 { 3559 vnode_t *vp; 3560 ulong_t index; 3561 kmutex_t *nphm; 3562 kmutex_t *vphm; 3563 kmutex_t *sep; 3564 3565 ASSERT(phm != NULL ? MUTEX_HELD(phm) : 1); 3566 ASSERT(pp->p_vnode != NULL); 3567 ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr); 3568 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp->p_vnode))); 3569 3570 vp = pp->p_vnode; 3571 3572 TRACE_2(TR_FAC_VM, TR_PAGE_HASHOUT, 3573 "page_hashout:pp %p vp %p", pp, vp); 3574 3575 /* 3576 * 3577 */ 3578 VM_STAT_ADD(hashout_count); 3579 index = PAGE_HASH_FUNC(vp, pp->p_offset); 3580 if (phm == NULL) { 3581 VM_STAT_ADD(hashout_not_held); 3582 nphm = PAGE_HASH_MUTEX(index); 3583 mutex_enter(nphm); 3584 } 3585 ASSERT(phm ? phm == PAGE_HASH_MUTEX(index) : 1); 3586 3587 3588 /* 3589 * grab page vnode mutex and remove it... 3590 */ 3591 vphm = page_vnode_mutex(vp); 3592 mutex_enter(vphm); 3593 3594 page_do_hashout(pp); 3595 3596 mutex_exit(vphm); 3597 if (phm == NULL) 3598 mutex_exit(nphm); 3599 3600 /* 3601 * Wake up processes waiting for this page. The page's 3602 * identity has been changed, and is probably not the 3603 * desired page any longer. 3604 */ 3605 sep = page_se_mutex(pp); 3606 mutex_enter(sep); 3607 pp->p_selock &= ~SE_EWANTED; 3608 if (CV_HAS_WAITERS(&pp->p_cv)) 3609 cv_broadcast(&pp->p_cv); 3610 mutex_exit(sep); 3611 } 3612 3613 /* 3614 * Add the page to the front of a linked list of pages 3615 * using the p_next & p_prev pointers for the list. 3616 * The caller is responsible for protecting the list pointers. 3617 */ 3618 void 3619 page_add(page_t **ppp, page_t *pp) 3620 { 3621 ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3622 3623 page_add_common(ppp, pp); 3624 } 3625 3626 3627 3628 /* 3629 * Common code for page_add() and mach_page_add() 3630 */ 3631 void 3632 page_add_common(page_t **ppp, page_t *pp) 3633 { 3634 if (*ppp == NULL) { 3635 pp->p_next = pp->p_prev = pp; 3636 } else { 3637 pp->p_next = *ppp; 3638 pp->p_prev = (*ppp)->p_prev; 3639 (*ppp)->p_prev = pp; 3640 pp->p_prev->p_next = pp; 3641 } 3642 *ppp = pp; 3643 } 3644 3645 3646 /* 3647 * Remove this page from a linked list of pages 3648 * using the p_next & p_prev pointers for the list. 3649 * 3650 * The caller is responsible for protecting the list pointers. 3651 */ 3652 void 3653 page_sub(page_t **ppp, page_t *pp) 3654 { 3655 ASSERT(pp != NULL && (PP_ISFREE(pp)) ? 1 : 3656 (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3657 3658 if (*ppp == NULL || pp == NULL) { 3659 panic("page_sub: bad arg(s): pp %p, *ppp %p", 3660 (void *)pp, (void *)(*ppp)); 3661 /*NOTREACHED*/ 3662 } 3663 3664 page_sub_common(ppp, pp); 3665 } 3666 3667 3668 /* 3669 * Common code for page_sub() and mach_page_sub() 3670 */ 3671 void 3672 page_sub_common(page_t **ppp, page_t *pp) 3673 { 3674 if (*ppp == pp) 3675 *ppp = pp->p_next; /* go to next page */ 3676 3677 if (*ppp == pp) 3678 *ppp = NULL; /* page list is gone */ 3679 else { 3680 pp->p_prev->p_next = pp->p_next; 3681 pp->p_next->p_prev = pp->p_prev; 3682 } 3683 pp->p_prev = pp->p_next = pp; /* make pp a list of one */ 3684 } 3685 3686 3687 /* 3688 * Break page list cppp into two lists with npages in the first list. 3689 * The tail is returned in nppp. 3690 */ 3691 void 3692 page_list_break(page_t **oppp, page_t **nppp, pgcnt_t npages) 3693 { 3694 page_t *s1pp = *oppp; 3695 page_t *s2pp; 3696 page_t *e1pp, *e2pp; 3697 long n = 0; 3698 3699 if (s1pp == NULL) { 3700 *nppp = NULL; 3701 return; 3702 } 3703 if (npages == 0) { 3704 *nppp = s1pp; 3705 *oppp = NULL; 3706 return; 3707 } 3708 for (n = 0, s2pp = *oppp; n < npages; n++) { 3709 s2pp = s2pp->p_next; 3710 } 3711 /* Fix head and tail of new lists */ 3712 e1pp = s2pp->p_prev; 3713 e2pp = s1pp->p_prev; 3714 s1pp->p_prev = e1pp; 3715 e1pp->p_next = s1pp; 3716 s2pp->p_prev = e2pp; 3717 e2pp->p_next = s2pp; 3718 3719 /* second list empty */ 3720 if (s2pp == s1pp) { 3721 *oppp = s1pp; 3722 *nppp = NULL; 3723 } else { 3724 *oppp = s1pp; 3725 *nppp = s2pp; 3726 } 3727 } 3728 3729 /* 3730 * Concatenate page list nppp onto the end of list ppp. 3731 */ 3732 void 3733 page_list_concat(page_t **ppp, page_t **nppp) 3734 { 3735 page_t *s1pp, *s2pp, *e1pp, *e2pp; 3736 3737 if (*nppp == NULL) { 3738 return; 3739 } 3740 if (*ppp == NULL) { 3741 *ppp = *nppp; 3742 return; 3743 } 3744 s1pp = *ppp; 3745 e1pp = s1pp->p_prev; 3746 s2pp = *nppp; 3747 e2pp = s2pp->p_prev; 3748 s1pp->p_prev = e2pp; 3749 e2pp->p_next = s1pp; 3750 e1pp->p_next = s2pp; 3751 s2pp->p_prev = e1pp; 3752 } 3753 3754 /* 3755 * return the next page in the page list 3756 */ 3757 page_t * 3758 page_list_next(page_t *pp) 3759 { 3760 return (pp->p_next); 3761 } 3762 3763 3764 /* 3765 * Add the page to the front of the linked list of pages 3766 * using p_vpnext/p_vpprev pointers for the list. 3767 * 3768 * The caller is responsible for protecting the lists. 3769 */ 3770 void 3771 page_vpadd(page_t **ppp, page_t *pp) 3772 { 3773 if (*ppp == NULL) { 3774 pp->p_vpnext = pp->p_vpprev = pp; 3775 } else { 3776 pp->p_vpnext = *ppp; 3777 pp->p_vpprev = (*ppp)->p_vpprev; 3778 (*ppp)->p_vpprev = pp; 3779 pp->p_vpprev->p_vpnext = pp; 3780 } 3781 *ppp = pp; 3782 } 3783 3784 /* 3785 * Remove this page from the linked list of pages 3786 * using p_vpnext/p_vpprev pointers for the list. 3787 * 3788 * The caller is responsible for protecting the lists. 3789 */ 3790 void 3791 page_vpsub(page_t **ppp, page_t *pp) 3792 { 3793 if (*ppp == NULL || pp == NULL) { 3794 panic("page_vpsub: bad arg(s): pp %p, *ppp %p", 3795 (void *)pp, (void *)(*ppp)); 3796 /*NOTREACHED*/ 3797 } 3798 3799 if (*ppp == pp) 3800 *ppp = pp->p_vpnext; /* go to next page */ 3801 3802 if (*ppp == pp) 3803 *ppp = NULL; /* page list is gone */ 3804 else { 3805 pp->p_vpprev->p_vpnext = pp->p_vpnext; 3806 pp->p_vpnext->p_vpprev = pp->p_vpprev; 3807 } 3808 pp->p_vpprev = pp->p_vpnext = pp; /* make pp a list of one */ 3809 } 3810 3811 /* 3812 * Lock a physical page into memory "long term". Used to support "lock 3813 * in memory" functions. Accepts the page to be locked, and a cow variable 3814 * to indicate whether a the lock will travel to the new page during 3815 * a potential copy-on-write. 3816 */ 3817 int 3818 page_pp_lock( 3819 page_t *pp, /* page to be locked */ 3820 int cow, /* cow lock */ 3821 int kernel) /* must succeed -- ignore checking */ 3822 { 3823 int r = 0; /* result -- assume failure */ 3824 3825 ASSERT(PAGE_LOCKED(pp)); 3826 3827 page_struct_lock(pp); 3828 /* 3829 * Acquire the "freemem_lock" for availrmem. 3830 */ 3831 if (cow) { 3832 mutex_enter(&freemem_lock); 3833 if ((availrmem > pages_pp_maximum) && 3834 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 3835 availrmem--; 3836 pages_locked++; 3837 mutex_exit(&freemem_lock); 3838 r = 1; 3839 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 3840 cmn_err(CE_WARN, 3841 "COW lock limit reached on pfn 0x%lx", 3842 page_pptonum(pp)); 3843 } 3844 } else 3845 mutex_exit(&freemem_lock); 3846 } else { 3847 if (pp->p_lckcnt) { 3848 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 3849 r = 1; 3850 if (++pp->p_lckcnt == 3851 (ushort_t)PAGE_LOCK_MAXIMUM) { 3852 cmn_err(CE_WARN, "Page lock limit " 3853 "reached on pfn 0x%lx", 3854 page_pptonum(pp)); 3855 } 3856 } 3857 } else { 3858 if (kernel) { 3859 /* availrmem accounting done by caller */ 3860 ++pp->p_lckcnt; 3861 r = 1; 3862 } else { 3863 mutex_enter(&freemem_lock); 3864 if (availrmem > pages_pp_maximum) { 3865 availrmem--; 3866 pages_locked++; 3867 ++pp->p_lckcnt; 3868 r = 1; 3869 } 3870 mutex_exit(&freemem_lock); 3871 } 3872 } 3873 } 3874 page_struct_unlock(pp); 3875 return (r); 3876 } 3877 3878 /* 3879 * Decommit a lock on a physical page frame. Account for cow locks if 3880 * appropriate. 3881 */ 3882 void 3883 page_pp_unlock( 3884 page_t *pp, /* page to be unlocked */ 3885 int cow, /* expect cow lock */ 3886 int kernel) /* this was a kernel lock */ 3887 { 3888 ASSERT(PAGE_LOCKED(pp)); 3889 3890 page_struct_lock(pp); 3891 /* 3892 * Acquire the "freemem_lock" for availrmem. 3893 * If cowcnt or lcknt is already 0 do nothing; i.e., we 3894 * could be called to unlock even if nothing is locked. This could 3895 * happen if locked file pages were truncated (removing the lock) 3896 * and the file was grown again and new pages faulted in; the new 3897 * pages are unlocked but the segment still thinks they're locked. 3898 */ 3899 if (cow) { 3900 if (pp->p_cowcnt) { 3901 mutex_enter(&freemem_lock); 3902 pp->p_cowcnt--; 3903 availrmem++; 3904 pages_locked--; 3905 mutex_exit(&freemem_lock); 3906 } 3907 } else { 3908 if (pp->p_lckcnt && --pp->p_lckcnt == 0) { 3909 if (!kernel) { 3910 mutex_enter(&freemem_lock); 3911 availrmem++; 3912 pages_locked--; 3913 mutex_exit(&freemem_lock); 3914 } 3915 } 3916 } 3917 page_struct_unlock(pp); 3918 } 3919 3920 /* 3921 * This routine reserves availrmem for npages. 3922 * It returns 1 on success or 0 on failure. 3923 * 3924 * flags: KM_NOSLEEP or KM_SLEEP 3925 * cb_wait: called to induce delay when KM_SLEEP reservation requires kmem 3926 * reaping to potentially succeed. If the callback returns 0, the 3927 * reservation attempts will cease to repeat and page_xresv() may 3928 * report a failure. If cb_wait is NULL, the traditional delay(hz/2) 3929 * behavior will be used while waiting for a reap. 3930 */ 3931 int 3932 page_xresv(pgcnt_t npages, uint_t flags, int (*cb_wait)(void)) 3933 { 3934 mutex_enter(&freemem_lock); 3935 if (availrmem >= tune.t_minarmem + npages) { 3936 availrmem -= npages; 3937 mutex_exit(&freemem_lock); 3938 return (1); 3939 } else if ((flags & KM_NOSLEEP) != 0) { 3940 mutex_exit(&freemem_lock); 3941 return (0); 3942 } 3943 mutex_exit(&freemem_lock); 3944 3945 /* 3946 * We signal memory pressure to the system by elevating 'needfree'. 3947 * Processes such as kmem reaping, pageout, and ZFS ARC shrinking can 3948 * then respond to said pressure by freeing pages. 3949 */ 3950 page_needfree(npages); 3951 int nobail = 1; 3952 do { 3953 kmem_reap(); 3954 if (cb_wait == NULL) { 3955 delay(hz >> 2); 3956 } else { 3957 nobail = cb_wait(); 3958 } 3959 3960 mutex_enter(&freemem_lock); 3961 if (availrmem >= tune.t_minarmem + npages) { 3962 availrmem -= npages; 3963 mutex_exit(&freemem_lock); 3964 page_needfree(-(spgcnt_t)npages); 3965 return (1); 3966 } 3967 mutex_exit(&freemem_lock); 3968 } while (nobail != 0); 3969 page_needfree(-(spgcnt_t)npages); 3970 3971 return (0); 3972 } 3973 3974 /* 3975 * This routine reserves availrmem for npages; 3976 * flags: KM_NOSLEEP or KM_SLEEP 3977 * returns 1 on success or 0 on failure 3978 */ 3979 int 3980 page_resv(pgcnt_t npages, uint_t flags) 3981 { 3982 return (page_xresv(npages, flags, NULL)); 3983 } 3984 3985 /* 3986 * This routine unreserves availrmem for npages; 3987 */ 3988 void 3989 page_unresv(pgcnt_t npages) 3990 { 3991 mutex_enter(&freemem_lock); 3992 availrmem += npages; 3993 mutex_exit(&freemem_lock); 3994 } 3995 3996 /* 3997 * See Statement at the beginning of segvn_lockop() regarding 3998 * the way we handle cowcnts and lckcnts. 3999 * 4000 * Transfer cowcnt on 'opp' to cowcnt on 'npp' if the vpage 4001 * that breaks COW has PROT_WRITE. 4002 * 4003 * Note that, we may also break COW in case we are softlocking 4004 * on read access during physio; 4005 * in this softlock case, the vpage may not have PROT_WRITE. 4006 * So, we need to transfer lckcnt on 'opp' to lckcnt on 'npp' 4007 * if the vpage doesn't have PROT_WRITE. 4008 * 4009 * This routine is never called if we are stealing a page 4010 * in anon_private. 4011 * 4012 * The caller subtracted from availrmem for read only mapping. 4013 * if lckcnt is 1 increment availrmem. 4014 */ 4015 void 4016 page_pp_useclaim( 4017 page_t *opp, /* original page frame losing lock */ 4018 page_t *npp, /* new page frame gaining lock */ 4019 uint_t write_perm) /* set if vpage has PROT_WRITE */ 4020 { 4021 int payback = 0; 4022 int nidx, oidx; 4023 4024 ASSERT(PAGE_LOCKED(opp)); 4025 ASSERT(PAGE_LOCKED(npp)); 4026 4027 /* 4028 * Since we have two pages we probably have two locks. We need to take 4029 * them in a defined order to avoid deadlocks. It's also possible they 4030 * both hash to the same lock in which case this is a non-issue. 4031 */ 4032 nidx = PAGE_LLOCK_HASH(PP_PAGEROOT(npp)); 4033 oidx = PAGE_LLOCK_HASH(PP_PAGEROOT(opp)); 4034 if (nidx < oidx) { 4035 page_struct_lock(npp); 4036 page_struct_lock(opp); 4037 } else if (oidx < nidx) { 4038 page_struct_lock(opp); 4039 page_struct_lock(npp); 4040 } else { /* The pages hash to the same lock */ 4041 page_struct_lock(npp); 4042 } 4043 4044 ASSERT(npp->p_cowcnt == 0); 4045 ASSERT(npp->p_lckcnt == 0); 4046 4047 /* Don't use claim if nothing is locked (see page_pp_unlock above) */ 4048 if ((write_perm && opp->p_cowcnt != 0) || 4049 (!write_perm && opp->p_lckcnt != 0)) { 4050 4051 if (write_perm) { 4052 npp->p_cowcnt++; 4053 ASSERT(opp->p_cowcnt != 0); 4054 opp->p_cowcnt--; 4055 } else { 4056 4057 ASSERT(opp->p_lckcnt != 0); 4058 4059 /* 4060 * We didn't need availrmem decremented if p_lckcnt on 4061 * original page is 1. Here, we are unlocking 4062 * read-only copy belonging to original page and 4063 * are locking a copy belonging to new page. 4064 */ 4065 if (opp->p_lckcnt == 1) 4066 payback = 1; 4067 4068 npp->p_lckcnt++; 4069 opp->p_lckcnt--; 4070 } 4071 } 4072 if (payback) { 4073 mutex_enter(&freemem_lock); 4074 availrmem++; 4075 pages_useclaim--; 4076 mutex_exit(&freemem_lock); 4077 } 4078 4079 if (nidx < oidx) { 4080 page_struct_unlock(opp); 4081 page_struct_unlock(npp); 4082 } else if (oidx < nidx) { 4083 page_struct_unlock(npp); 4084 page_struct_unlock(opp); 4085 } else { /* The pages hash to the same lock */ 4086 page_struct_unlock(npp); 4087 } 4088 } 4089 4090 /* 4091 * Simple claim adjust functions -- used to support changes in 4092 * claims due to changes in access permissions. Used by segvn_setprot(). 4093 */ 4094 int 4095 page_addclaim(page_t *pp) 4096 { 4097 int r = 0; /* result */ 4098 4099 ASSERT(PAGE_LOCKED(pp)); 4100 4101 page_struct_lock(pp); 4102 ASSERT(pp->p_lckcnt != 0); 4103 4104 if (pp->p_lckcnt == 1) { 4105 if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4106 --pp->p_lckcnt; 4107 r = 1; 4108 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4109 cmn_err(CE_WARN, 4110 "COW lock limit reached on pfn 0x%lx", 4111 page_pptonum(pp)); 4112 } 4113 } 4114 } else { 4115 mutex_enter(&freemem_lock); 4116 if ((availrmem > pages_pp_maximum) && 4117 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 4118 --availrmem; 4119 ++pages_claimed; 4120 mutex_exit(&freemem_lock); 4121 --pp->p_lckcnt; 4122 r = 1; 4123 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4124 cmn_err(CE_WARN, 4125 "COW lock limit reached on pfn 0x%lx", 4126 page_pptonum(pp)); 4127 } 4128 } else 4129 mutex_exit(&freemem_lock); 4130 } 4131 page_struct_unlock(pp); 4132 return (r); 4133 } 4134 4135 int 4136 page_subclaim(page_t *pp) 4137 { 4138 int r = 0; 4139 4140 ASSERT(PAGE_LOCKED(pp)); 4141 4142 page_struct_lock(pp); 4143 ASSERT(pp->p_cowcnt != 0); 4144 4145 if (pp->p_lckcnt) { 4146 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4147 r = 1; 4148 /* 4149 * for availrmem 4150 */ 4151 mutex_enter(&freemem_lock); 4152 availrmem++; 4153 pages_claimed--; 4154 mutex_exit(&freemem_lock); 4155 4156 pp->p_cowcnt--; 4157 4158 if (++pp->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4159 cmn_err(CE_WARN, 4160 "Page lock limit reached on pfn 0x%lx", 4161 page_pptonum(pp)); 4162 } 4163 } 4164 } else { 4165 r = 1; 4166 pp->p_cowcnt--; 4167 pp->p_lckcnt++; 4168 } 4169 page_struct_unlock(pp); 4170 return (r); 4171 } 4172 4173 /* 4174 * Variant of page_addclaim(), where ppa[] contains the pages of a single large 4175 * page. 4176 */ 4177 int 4178 page_addclaim_pages(page_t **ppa) 4179 { 4180 pgcnt_t lckpgs = 0, pg_idx; 4181 4182 VM_STAT_ADD(pagecnt.pc_addclaim_pages); 4183 4184 /* 4185 * Only need to take the page struct lock on the large page root. 4186 */ 4187 page_struct_lock(ppa[0]); 4188 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4189 4190 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4191 ASSERT(ppa[pg_idx]->p_lckcnt != 0); 4192 if (ppa[pg_idx]->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4193 page_struct_unlock(ppa[0]); 4194 return (0); 4195 } 4196 if (ppa[pg_idx]->p_lckcnt > 1) 4197 lckpgs++; 4198 } 4199 4200 if (lckpgs != 0) { 4201 mutex_enter(&freemem_lock); 4202 if (availrmem >= pages_pp_maximum + lckpgs) { 4203 availrmem -= lckpgs; 4204 pages_claimed += lckpgs; 4205 } else { 4206 mutex_exit(&freemem_lock); 4207 page_struct_unlock(ppa[0]); 4208 return (0); 4209 } 4210 mutex_exit(&freemem_lock); 4211 } 4212 4213 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4214 ppa[pg_idx]->p_lckcnt--; 4215 ppa[pg_idx]->p_cowcnt++; 4216 } 4217 page_struct_unlock(ppa[0]); 4218 return (1); 4219 } 4220 4221 /* 4222 * Variant of page_subclaim(), where ppa[] contains the pages of a single large 4223 * page. 4224 */ 4225 int 4226 page_subclaim_pages(page_t **ppa) 4227 { 4228 pgcnt_t ulckpgs = 0, pg_idx; 4229 4230 VM_STAT_ADD(pagecnt.pc_subclaim_pages); 4231 4232 /* 4233 * Only need to take the page struct lock on the large page root. 4234 */ 4235 page_struct_lock(ppa[0]); 4236 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4237 4238 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4239 ASSERT(ppa[pg_idx]->p_cowcnt != 0); 4240 if (ppa[pg_idx]->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4241 page_struct_unlock(ppa[0]); 4242 return (0); 4243 } 4244 if (ppa[pg_idx]->p_lckcnt != 0) 4245 ulckpgs++; 4246 } 4247 4248 if (ulckpgs != 0) { 4249 mutex_enter(&freemem_lock); 4250 availrmem += ulckpgs; 4251 pages_claimed -= ulckpgs; 4252 mutex_exit(&freemem_lock); 4253 } 4254 4255 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4256 ppa[pg_idx]->p_cowcnt--; 4257 ppa[pg_idx]->p_lckcnt++; 4258 4259 } 4260 page_struct_unlock(ppa[0]); 4261 return (1); 4262 } 4263 4264 page_t * 4265 page_numtopp(pfn_t pfnum, se_t se) 4266 { 4267 page_t *pp; 4268 4269 retry: 4270 pp = page_numtopp_nolock(pfnum); 4271 if (pp == NULL) { 4272 return ((page_t *)NULL); 4273 } 4274 4275 /* 4276 * Acquire the appropriate lock on the page. 4277 */ 4278 while (!page_lock(pp, se, (kmutex_t *)NULL, P_RECLAIM)) { 4279 if (page_pptonum(pp) != pfnum) 4280 goto retry; 4281 continue; 4282 } 4283 4284 if (page_pptonum(pp) != pfnum) { 4285 page_unlock(pp); 4286 goto retry; 4287 } 4288 4289 return (pp); 4290 } 4291 4292 page_t * 4293 page_numtopp_noreclaim(pfn_t pfnum, se_t se) 4294 { 4295 page_t *pp; 4296 4297 retry: 4298 pp = page_numtopp_nolock(pfnum); 4299 if (pp == NULL) { 4300 return ((page_t *)NULL); 4301 } 4302 4303 /* 4304 * Acquire the appropriate lock on the page. 4305 */ 4306 while (!page_lock(pp, se, (kmutex_t *)NULL, P_NO_RECLAIM)) { 4307 if (page_pptonum(pp) != pfnum) 4308 goto retry; 4309 continue; 4310 } 4311 4312 if (page_pptonum(pp) != pfnum) { 4313 page_unlock(pp); 4314 goto retry; 4315 } 4316 4317 return (pp); 4318 } 4319 4320 /* 4321 * This routine is like page_numtopp, but will only return page structs 4322 * for pages which are ok for loading into hardware using the page struct. 4323 */ 4324 page_t * 4325 page_numtopp_nowait(pfn_t pfnum, se_t se) 4326 { 4327 page_t *pp; 4328 4329 retry: 4330 pp = page_numtopp_nolock(pfnum); 4331 if (pp == NULL) { 4332 return ((page_t *)NULL); 4333 } 4334 4335 /* 4336 * Try to acquire the appropriate lock on the page. 4337 */ 4338 if (PP_ISFREE(pp)) 4339 pp = NULL; 4340 else { 4341 if (!page_trylock(pp, se)) 4342 pp = NULL; 4343 else { 4344 if (page_pptonum(pp) != pfnum) { 4345 page_unlock(pp); 4346 goto retry; 4347 } 4348 if (PP_ISFREE(pp)) { 4349 page_unlock(pp); 4350 pp = NULL; 4351 } 4352 } 4353 } 4354 return (pp); 4355 } 4356 4357 /* 4358 * Returns a count of dirty pages that are in the process 4359 * of being written out. If 'cleanit' is set, try to push the page. 4360 */ 4361 pgcnt_t 4362 page_busy(int cleanit) 4363 { 4364 page_t *page0 = page_first(); 4365 page_t *pp = page0; 4366 pgcnt_t nppbusy = 0; 4367 u_offset_t off; 4368 4369 do { 4370 vnode_t *vp = pp->p_vnode; 4371 /* 4372 * A page is a candidate for syncing if it is: 4373 * 4374 * (a) On neither the freelist nor the cachelist 4375 * (b) Hashed onto a vnode 4376 * (c) Not a kernel page 4377 * (d) Dirty 4378 * (e) Not part of a swapfile 4379 * (f) a page which belongs to a real vnode; eg has a non-null 4380 * v_vfsp pointer. 4381 * (g) Backed by a filesystem which doesn't have a 4382 * stubbed-out sync operation 4383 */ 4384 if (!PP_ISFREE(pp) && vp != NULL && !VN_ISKAS(vp) && 4385 hat_ismod(pp) && !IS_SWAPVP(vp) && vp->v_vfsp != NULL && 4386 vfs_can_sync(vp->v_vfsp)) { 4387 nppbusy++; 4388 4389 if (!cleanit) 4390 continue; 4391 if (!page_trylock(pp, SE_EXCL)) 4392 continue; 4393 4394 if (PP_ISFREE(pp) || vp == NULL || IS_SWAPVP(vp) || 4395 pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 4396 !(hat_pagesync(pp, 4397 HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) & P_MOD)) { 4398 page_unlock(pp); 4399 continue; 4400 } 4401 off = pp->p_offset; 4402 VN_HOLD(vp); 4403 page_unlock(pp); 4404 (void) VOP_PUTPAGE(vp, off, PAGESIZE, 4405 B_ASYNC | B_FREE, kcred, NULL); 4406 VN_RELE(vp); 4407 } 4408 } while ((pp = page_next(pp)) != page0); 4409 4410 return (nppbusy); 4411 } 4412 4413 void page_invalidate_pages(void); 4414 4415 /* 4416 * callback handler to vm sub-system 4417 * 4418 * callers make sure no recursive entries to this func. 4419 */ 4420 /*ARGSUSED*/ 4421 boolean_t 4422 callb_vm_cpr(void *arg, int code) 4423 { 4424 if (code == CB_CODE_CPR_CHKPT) 4425 page_invalidate_pages(); 4426 return (B_TRUE); 4427 } 4428 4429 /* 4430 * Invalidate all pages of the system. 4431 * It shouldn't be called until all user page activities are all stopped. 4432 */ 4433 void 4434 page_invalidate_pages() 4435 { 4436 page_t *pp; 4437 page_t *page0; 4438 pgcnt_t nbusypages; 4439 int retry = 0; 4440 const int MAXRETRIES = 4; 4441 top: 4442 /* 4443 * Flush dirty pages and destroy the clean ones. 4444 */ 4445 nbusypages = 0; 4446 4447 pp = page0 = page_first(); 4448 do { 4449 struct vnode *vp; 4450 u_offset_t offset; 4451 int mod; 4452 4453 /* 4454 * skip the page if it has no vnode or the page associated 4455 * with the kernel vnode or prom allocated kernel mem. 4456 */ 4457 if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp)) 4458 continue; 4459 4460 /* 4461 * skip the page which is already free invalidated. 4462 */ 4463 if (PP_ISFREE(pp) && PP_ISAGED(pp)) 4464 continue; 4465 4466 /* 4467 * skip pages that are already locked or can't be "exclusively" 4468 * locked or are already free. After we lock the page, check 4469 * the free and age bits again to be sure it's not destroyed 4470 * yet. 4471 * To achieve max. parallelization, we use page_trylock instead 4472 * of page_lock so that we don't get block on individual pages 4473 * while we have thousands of other pages to process. 4474 */ 4475 if (!page_trylock(pp, SE_EXCL)) { 4476 nbusypages++; 4477 continue; 4478 } else if (PP_ISFREE(pp)) { 4479 if (!PP_ISAGED(pp)) { 4480 page_destroy_free(pp); 4481 } else { 4482 page_unlock(pp); 4483 } 4484 continue; 4485 } 4486 /* 4487 * Is this page involved in some I/O? shared? 4488 * 4489 * The page_struct_lock need not be acquired to 4490 * examine these fields since the page has an 4491 * "exclusive" lock. 4492 */ 4493 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 4494 page_unlock(pp); 4495 continue; 4496 } 4497 4498 if (vp->v_type == VCHR) { 4499 panic("vp->v_type == VCHR"); 4500 /*NOTREACHED*/ 4501 } 4502 4503 if (!page_try_demote_pages(pp)) { 4504 page_unlock(pp); 4505 continue; 4506 } 4507 4508 /* 4509 * Check the modified bit. Leave the bits alone in hardware 4510 * (they will be modified if we do the putpage). 4511 */ 4512 mod = (hat_pagesync(pp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) 4513 & P_MOD); 4514 if (mod) { 4515 offset = pp->p_offset; 4516 /* 4517 * Hold the vnode before releasing the page lock 4518 * to prevent it from being freed and re-used by 4519 * some other thread. 4520 */ 4521 VN_HOLD(vp); 4522 page_unlock(pp); 4523 /* 4524 * No error return is checked here. Callers such as 4525 * cpr deals with the dirty pages at the dump time 4526 * if this putpage fails. 4527 */ 4528 (void) VOP_PUTPAGE(vp, offset, PAGESIZE, B_INVAL, 4529 kcred, NULL); 4530 VN_RELE(vp); 4531 } else { 4532 /*LINTED: constant in conditional context*/ 4533 VN_DISPOSE(pp, B_INVAL, 0, kcred); 4534 } 4535 } while ((pp = page_next(pp)) != page0); 4536 if (nbusypages && retry++ < MAXRETRIES) { 4537 delay(1); 4538 goto top; 4539 } 4540 } 4541 4542 /* 4543 * Replace the page "old" with the page "new" on the page hash and vnode lists 4544 * 4545 * the replacement must be done in place, ie the equivalent sequence: 4546 * 4547 * vp = old->p_vnode; 4548 * off = old->p_offset; 4549 * page_do_hashout(old) 4550 * page_do_hashin(new, vp, off) 4551 * 4552 * doesn't work, since 4553 * 1) if old is the only page on the vnode, the v_pages list has a window 4554 * where it looks empty. This will break file system assumptions. 4555 * and 4556 * 2) pvn_vplist_dirty() can't deal with pages moving on the v_pages list. 4557 */ 4558 static void 4559 page_do_relocate_hash(page_t *new, page_t *old) 4560 { 4561 page_t **hash_list; 4562 vnode_t *vp = old->p_vnode; 4563 kmutex_t *sep; 4564 4565 ASSERT(PAGE_EXCL(old)); 4566 ASSERT(PAGE_EXCL(new)); 4567 ASSERT(vp != NULL); 4568 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 4569 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, old->p_offset)))); 4570 4571 /* 4572 * First find old page on the page hash list 4573 */ 4574 hash_list = &page_hash[PAGE_HASH_FUNC(vp, old->p_offset)]; 4575 4576 for (;;) { 4577 if (*hash_list == old) 4578 break; 4579 if (*hash_list == NULL) { 4580 panic("page_do_hashout"); 4581 /*NOTREACHED*/ 4582 } 4583 hash_list = &(*hash_list)->p_hash; 4584 } 4585 4586 /* 4587 * update new and replace old with new on the page hash list 4588 */ 4589 new->p_vnode = old->p_vnode; 4590 new->p_offset = old->p_offset; 4591 new->p_hash = old->p_hash; 4592 *hash_list = new; 4593 4594 if ((new->p_vnode->v_flag & VISSWAP) != 0) 4595 PP_SETSWAP(new); 4596 4597 /* 4598 * replace old with new on the vnode's page list 4599 */ 4600 if (old->p_vpnext == old) { 4601 new->p_vpnext = new; 4602 new->p_vpprev = new; 4603 } else { 4604 new->p_vpnext = old->p_vpnext; 4605 new->p_vpprev = old->p_vpprev; 4606 new->p_vpnext->p_vpprev = new; 4607 new->p_vpprev->p_vpnext = new; 4608 } 4609 if (vp->v_pages == old) 4610 vp->v_pages = new; 4611 4612 /* 4613 * clear out the old page 4614 */ 4615 old->p_hash = NULL; 4616 old->p_vpnext = NULL; 4617 old->p_vpprev = NULL; 4618 old->p_vnode = NULL; 4619 PP_CLRSWAP(old); 4620 old->p_offset = (u_offset_t)-1; 4621 page_clr_all_props(old); 4622 4623 /* 4624 * Wake up processes waiting for this page. The page's 4625 * identity has been changed, and is probably not the 4626 * desired page any longer. 4627 */ 4628 sep = page_se_mutex(old); 4629 mutex_enter(sep); 4630 old->p_selock &= ~SE_EWANTED; 4631 if (CV_HAS_WAITERS(&old->p_cv)) 4632 cv_broadcast(&old->p_cv); 4633 mutex_exit(sep); 4634 } 4635 4636 /* 4637 * This function moves the identity of page "pp_old" to page "pp_new". 4638 * Both pages must be locked on entry. "pp_new" is free, has no identity, 4639 * and need not be hashed out from anywhere. 4640 */ 4641 void 4642 page_relocate_hash(page_t *pp_new, page_t *pp_old) 4643 { 4644 vnode_t *vp = pp_old->p_vnode; 4645 u_offset_t off = pp_old->p_offset; 4646 kmutex_t *phm, *vphm; 4647 4648 /* 4649 * Rehash two pages 4650 */ 4651 ASSERT(PAGE_EXCL(pp_old)); 4652 ASSERT(PAGE_EXCL(pp_new)); 4653 ASSERT(vp != NULL); 4654 ASSERT(pp_new->p_vnode == NULL); 4655 4656 /* 4657 * hashout then hashin while holding the mutexes 4658 */ 4659 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, off)); 4660 mutex_enter(phm); 4661 vphm = page_vnode_mutex(vp); 4662 mutex_enter(vphm); 4663 4664 page_do_relocate_hash(pp_new, pp_old); 4665 4666 /* The following comment preserved from page_flip(). */ 4667 pp_new->p_fsdata = pp_old->p_fsdata; 4668 pp_old->p_fsdata = 0; 4669 mutex_exit(vphm); 4670 mutex_exit(phm); 4671 4672 /* 4673 * The page_struct_lock need not be acquired for lckcnt and 4674 * cowcnt since the page has an "exclusive" lock. 4675 */ 4676 ASSERT(pp_new->p_lckcnt == 0); 4677 ASSERT(pp_new->p_cowcnt == 0); 4678 pp_new->p_lckcnt = pp_old->p_lckcnt; 4679 pp_new->p_cowcnt = pp_old->p_cowcnt; 4680 pp_old->p_lckcnt = pp_old->p_cowcnt = 0; 4681 4682 } 4683 4684 /* 4685 * Helper routine used to lock all remaining members of a 4686 * large page. The caller is responsible for passing in a locked 4687 * pp. If pp is a large page, then it succeeds in locking all the 4688 * remaining constituent pages or it returns with only the 4689 * original page locked. 4690 * 4691 * Returns 1 on success, 0 on failure. 4692 * 4693 * If success is returned this routine guarantees p_szc for all constituent 4694 * pages of a large page pp belongs to can't change. To achieve this we 4695 * recheck szc of pp after locking all constituent pages and retry if szc 4696 * changed (it could only decrease). Since hat_page_demote() needs an EXCL 4697 * lock on one of constituent pages it can't be running after all constituent 4698 * pages are locked. hat_page_demote() with a lock on a constituent page 4699 * outside of this large page (i.e. pp belonged to a larger large page) is 4700 * already done with all constituent pages of pp since the root's p_szc is 4701 * changed last. Therefore no need to synchronize with hat_page_demote() that 4702 * locked a constituent page outside of pp's current large page. 4703 */ 4704 #ifdef DEBUG 4705 uint32_t gpg_trylock_mtbf = 0; 4706 #endif 4707 4708 int 4709 group_page_trylock(page_t *pp, se_t se) 4710 { 4711 page_t *tpp; 4712 pgcnt_t npgs, i, j; 4713 uint_t pszc = pp->p_szc; 4714 4715 #ifdef DEBUG 4716 if (gpg_trylock_mtbf && !(gethrtime() % gpg_trylock_mtbf)) { 4717 return (0); 4718 } 4719 #endif 4720 4721 if (pp != PP_GROUPLEADER(pp, pszc)) { 4722 return (0); 4723 } 4724 4725 retry: 4726 ASSERT(PAGE_LOCKED_SE(pp, se)); 4727 ASSERT(!PP_ISFREE(pp)); 4728 if (pszc == 0) { 4729 return (1); 4730 } 4731 npgs = page_get_pagecnt(pszc); 4732 tpp = pp + 1; 4733 for (i = 1; i < npgs; i++, tpp++) { 4734 if (!page_trylock(tpp, se)) { 4735 tpp = pp + 1; 4736 for (j = 1; j < i; j++, tpp++) { 4737 page_unlock(tpp); 4738 } 4739 return (0); 4740 } 4741 } 4742 if (pp->p_szc != pszc) { 4743 ASSERT(pp->p_szc < pszc); 4744 ASSERT(pp->p_vnode != NULL && !PP_ISKAS(pp) && 4745 !IS_SWAPFSVP(pp->p_vnode)); 4746 tpp = pp + 1; 4747 for (i = 1; i < npgs; i++, tpp++) { 4748 page_unlock(tpp); 4749 } 4750 pszc = pp->p_szc; 4751 goto retry; 4752 } 4753 return (1); 4754 } 4755 4756 void 4757 group_page_unlock(page_t *pp) 4758 { 4759 page_t *tpp; 4760 pgcnt_t npgs, i; 4761 4762 ASSERT(PAGE_LOCKED(pp)); 4763 ASSERT(!PP_ISFREE(pp)); 4764 ASSERT(pp == PP_PAGEROOT(pp)); 4765 npgs = page_get_pagecnt(pp->p_szc); 4766 for (i = 1, tpp = pp + 1; i < npgs; i++, tpp++) { 4767 page_unlock(tpp); 4768 } 4769 } 4770 4771 /* 4772 * returns 4773 * 0 : on success and *nrelocp is number of relocated PAGESIZE pages 4774 * ERANGE : this is not a base page 4775 * EBUSY : failure to get locks on the page/pages 4776 * ENOMEM : failure to obtain replacement pages 4777 * EAGAIN : OBP has not yet completed its boot-time handoff to the kernel 4778 * EIO : An error occurred while trying to copy the page data 4779 * 4780 * Return with all constituent members of target and replacement 4781 * SE_EXCL locked. It is the callers responsibility to drop the 4782 * locks. 4783 */ 4784 int 4785 do_page_relocate( 4786 page_t **target, 4787 page_t **replacement, 4788 int grouplock, 4789 spgcnt_t *nrelocp, 4790 lgrp_t *lgrp) 4791 { 4792 page_t *first_repl; 4793 page_t *repl; 4794 page_t *targ; 4795 page_t *pl = NULL; 4796 uint_t ppattr; 4797 pfn_t pfn, repl_pfn = 0; 4798 uint_t szc; 4799 spgcnt_t npgs, i; 4800 int repl_contig = 0; 4801 uint_t flags = 0; 4802 spgcnt_t dofree = 0; 4803 4804 *nrelocp = 0; 4805 4806 #if defined(__sparc) 4807 /* 4808 * We need to wait till OBP has completed 4809 * its boot-time handoff of its resources to the kernel 4810 * before we allow page relocation 4811 */ 4812 if (page_relocate_ready == 0) { 4813 return (EAGAIN); 4814 } 4815 #endif 4816 4817 /* 4818 * If this is not a base page, 4819 * just return with 0x0 pages relocated. 4820 */ 4821 targ = *target; 4822 ASSERT(PAGE_EXCL(targ)); 4823 ASSERT(!PP_ISFREE(targ)); 4824 szc = targ->p_szc; 4825 ASSERT(szc < mmu_page_sizes); 4826 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4827 pfn = targ->p_pagenum; 4828 if (pfn != PFN_BASE(pfn, szc)) { 4829 VM_STAT_ADD(vmm_vmstats.ppr_relocnoroot[szc]); 4830 return (ERANGE); 4831 } 4832 4833 if ((repl = *replacement) != NULL && repl->p_szc >= szc) { 4834 repl_pfn = repl->p_pagenum; 4835 if (repl_pfn != PFN_BASE(repl_pfn, szc)) { 4836 VM_STAT_ADD(vmm_vmstats.ppr_reloc_replnoroot[szc]); 4837 return (ERANGE); 4838 } 4839 repl_contig = 1; 4840 } 4841 4842 /* 4843 * We must lock all members of this large page or we cannot 4844 * relocate any part of it. 4845 */ 4846 if (grouplock != 0 && !group_page_trylock(targ, SE_EXCL)) { 4847 VM_STAT_ADD(vmm_vmstats.ppr_relocnolock[targ->p_szc]); 4848 return (EBUSY); 4849 } 4850 4851 /* 4852 * reread szc it could have been decreased before 4853 * group_page_trylock() was done. 4854 */ 4855 szc = targ->p_szc; 4856 ASSERT(szc < mmu_page_sizes); 4857 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4858 ASSERT(pfn == PFN_BASE(pfn, szc)); 4859 4860 npgs = page_get_pagecnt(targ->p_szc); 4861 4862 if (repl == NULL) { 4863 dofree = npgs; /* Size of target page in MMU pages */ 4864 if (!page_create_wait(dofree, 0)) { 4865 if (grouplock != 0) { 4866 group_page_unlock(targ); 4867 } 4868 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4869 return (ENOMEM); 4870 } 4871 4872 /* 4873 * seg kmem pages require that the target and replacement 4874 * page be the same pagesize. 4875 */ 4876 flags = (VN_ISKAS(targ->p_vnode)) ? PGR_SAMESZC : 0; 4877 repl = page_get_replacement_page(targ, lgrp, flags); 4878 if (repl == NULL) { 4879 if (grouplock != 0) { 4880 group_page_unlock(targ); 4881 } 4882 page_create_putback(dofree); 4883 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4884 return (ENOMEM); 4885 } 4886 } 4887 #ifdef DEBUG 4888 else { 4889 ASSERT(PAGE_LOCKED(repl)); 4890 } 4891 #endif /* DEBUG */ 4892 4893 #if defined(__sparc) 4894 /* 4895 * Let hat_page_relocate() complete the relocation if it's kernel page 4896 */ 4897 if (VN_ISKAS(targ->p_vnode)) { 4898 *replacement = repl; 4899 if (hat_page_relocate(target, replacement, nrelocp) != 0) { 4900 if (grouplock != 0) { 4901 group_page_unlock(targ); 4902 } 4903 if (dofree) { 4904 *replacement = NULL; 4905 page_free_replacement_page(repl); 4906 page_create_putback(dofree); 4907 } 4908 VM_STAT_ADD(vmm_vmstats.ppr_krelocfail[szc]); 4909 return (EAGAIN); 4910 } 4911 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 4912 return (0); 4913 } 4914 #endif 4915 4916 first_repl = repl; 4917 4918 for (i = 0; i < npgs; i++) { 4919 ASSERT(PAGE_EXCL(targ)); 4920 ASSERT(targ->p_slckcnt == 0); 4921 ASSERT(repl->p_slckcnt == 0); 4922 4923 (void) hat_pageunload(targ, HAT_FORCE_PGUNLOAD); 4924 4925 ASSERT(hat_page_getshare(targ) == 0); 4926 ASSERT(!PP_ISFREE(targ)); 4927 ASSERT(targ->p_pagenum == (pfn + i)); 4928 ASSERT(repl_contig == 0 || 4929 repl->p_pagenum == (repl_pfn + i)); 4930 4931 /* 4932 * Copy the page contents and attributes then 4933 * relocate the page in the page hash. 4934 */ 4935 if (ppcopy(targ, repl) == 0) { 4936 targ = *target; 4937 repl = first_repl; 4938 VM_STAT_ADD(vmm_vmstats.ppr_copyfail); 4939 if (grouplock != 0) { 4940 group_page_unlock(targ); 4941 } 4942 if (dofree) { 4943 *replacement = NULL; 4944 page_free_replacement_page(repl); 4945 page_create_putback(dofree); 4946 } 4947 return (EIO); 4948 } 4949 4950 targ++; 4951 if (repl_contig != 0) { 4952 repl++; 4953 } else { 4954 repl = repl->p_next; 4955 } 4956 } 4957 4958 repl = first_repl; 4959 targ = *target; 4960 4961 for (i = 0; i < npgs; i++) { 4962 ppattr = hat_page_getattr(targ, (P_MOD | P_REF | P_RO)); 4963 page_clr_all_props(repl); 4964 page_set_props(repl, ppattr); 4965 page_relocate_hash(repl, targ); 4966 4967 ASSERT(hat_page_getshare(targ) == 0); 4968 ASSERT(hat_page_getshare(repl) == 0); 4969 /* 4970 * Now clear the props on targ, after the 4971 * page_relocate_hash(), they no longer 4972 * have any meaning. 4973 */ 4974 page_clr_all_props(targ); 4975 ASSERT(targ->p_next == targ); 4976 ASSERT(targ->p_prev == targ); 4977 page_list_concat(&pl, &targ); 4978 4979 targ++; 4980 if (repl_contig != 0) { 4981 repl++; 4982 } else { 4983 repl = repl->p_next; 4984 } 4985 } 4986 /* assert that we have come full circle with repl */ 4987 ASSERT(repl_contig == 1 || first_repl == repl); 4988 4989 *target = pl; 4990 if (*replacement == NULL) { 4991 ASSERT(first_repl == repl); 4992 *replacement = repl; 4993 } 4994 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 4995 *nrelocp = npgs; 4996 return (0); 4997 } 4998 /* 4999 * On success returns 0 and *nrelocp the number of PAGESIZE pages relocated. 5000 */ 5001 int 5002 page_relocate( 5003 page_t **target, 5004 page_t **replacement, 5005 int grouplock, 5006 int freetarget, 5007 spgcnt_t *nrelocp, 5008 lgrp_t *lgrp) 5009 { 5010 spgcnt_t ret; 5011 5012 /* do_page_relocate returns 0 on success or errno value */ 5013 ret = do_page_relocate(target, replacement, grouplock, nrelocp, lgrp); 5014 5015 if (ret != 0 || freetarget == 0) { 5016 return (ret); 5017 } 5018 if (*nrelocp == 1) { 5019 ASSERT(*target != NULL); 5020 page_free(*target, 1); 5021 } else { 5022 page_t *tpp = *target; 5023 uint_t szc = tpp->p_szc; 5024 pgcnt_t npgs = page_get_pagecnt(szc); 5025 ASSERT(npgs > 1); 5026 ASSERT(szc != 0); 5027 do { 5028 ASSERT(PAGE_EXCL(tpp)); 5029 ASSERT(!hat_page_is_mapped(tpp)); 5030 ASSERT(tpp->p_szc == szc); 5031 PP_SETFREE(tpp); 5032 PP_SETAGED(tpp); 5033 npgs--; 5034 } while ((tpp = tpp->p_next) != *target); 5035 ASSERT(npgs == 0); 5036 page_list_add_pages(*target, 0); 5037 npgs = page_get_pagecnt(szc); 5038 page_create_putback(npgs); 5039 } 5040 return (ret); 5041 } 5042 5043 /* 5044 * it is up to the caller to deal with pcf accounting. 5045 */ 5046 void 5047 page_free_replacement_page(page_t *pplist) 5048 { 5049 page_t *pp; 5050 5051 while (pplist != NULL) { 5052 /* 5053 * pp_targ is a linked list. 5054 */ 5055 pp = pplist; 5056 if (pp->p_szc == 0) { 5057 page_sub(&pplist, pp); 5058 page_clr_all_props(pp); 5059 PP_SETFREE(pp); 5060 PP_SETAGED(pp); 5061 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 5062 page_unlock(pp); 5063 VM_STAT_ADD(pagecnt.pc_free_replacement_page[0]); 5064 } else { 5065 spgcnt_t curnpgs = page_get_pagecnt(pp->p_szc); 5066 page_t *tpp; 5067 page_list_break(&pp, &pplist, curnpgs); 5068 tpp = pp; 5069 do { 5070 ASSERT(PAGE_EXCL(tpp)); 5071 ASSERT(!hat_page_is_mapped(tpp)); 5072 page_clr_all_props(tpp); 5073 PP_SETFREE(tpp); 5074 PP_SETAGED(tpp); 5075 } while ((tpp = tpp->p_next) != pp); 5076 page_list_add_pages(pp, 0); 5077 VM_STAT_ADD(pagecnt.pc_free_replacement_page[1]); 5078 } 5079 } 5080 } 5081 5082 /* 5083 * Relocate target to non-relocatable replacement page. 5084 */ 5085 int 5086 page_relocate_cage(page_t **target, page_t **replacement) 5087 { 5088 page_t *tpp, *rpp; 5089 spgcnt_t pgcnt, npgs; 5090 int result; 5091 5092 tpp = *target; 5093 5094 ASSERT(PAGE_EXCL(tpp)); 5095 ASSERT(tpp->p_szc == 0); 5096 5097 pgcnt = btop(page_get_pagesize(tpp->p_szc)); 5098 5099 do { 5100 (void) page_create_wait(pgcnt, PG_WAIT | PG_NORELOC); 5101 rpp = page_get_replacement_page(tpp, NULL, PGR_NORELOC); 5102 if (rpp == NULL) { 5103 page_create_putback(pgcnt); 5104 kcage_cageout_wakeup(); 5105 } 5106 } while (rpp == NULL); 5107 5108 ASSERT(PP_ISNORELOC(rpp)); 5109 5110 result = page_relocate(&tpp, &rpp, 0, 1, &npgs, NULL); 5111 5112 if (result == 0) { 5113 *replacement = rpp; 5114 if (pgcnt != npgs) 5115 panic("page_relocate_cage: partial relocation"); 5116 } 5117 5118 return (result); 5119 } 5120 5121 /* 5122 * Release the page lock on a page, place on cachelist 5123 * tail if no longer mapped. Caller can let us know if 5124 * the page is known to be clean. 5125 */ 5126 int 5127 page_release(page_t *pp, int checkmod) 5128 { 5129 int status; 5130 5131 ASSERT(PAGE_LOCKED(pp) && !PP_ISFREE(pp) && 5132 (pp->p_vnode != NULL)); 5133 5134 if (!hat_page_is_mapped(pp) && !IS_SWAPVP(pp->p_vnode) && 5135 ((PAGE_SHARED(pp) && page_tryupgrade(pp)) || PAGE_EXCL(pp)) && 5136 pp->p_lckcnt == 0 && pp->p_cowcnt == 0 && 5137 !hat_page_is_mapped(pp)) { 5138 5139 /* 5140 * If page is modified, unlock it 5141 * 5142 * (p_nrm & P_MOD) bit has the latest stuff because: 5143 * (1) We found that this page doesn't have any mappings 5144 * _after_ holding SE_EXCL and 5145 * (2) We didn't drop SE_EXCL lock after the check in (1) 5146 */ 5147 if (checkmod && hat_ismod(pp)) { 5148 page_unlock(pp); 5149 status = PGREL_MOD; 5150 } else { 5151 /*LINTED: constant in conditional context*/ 5152 VN_DISPOSE(pp, B_FREE, 0, kcred); 5153 status = PGREL_CLEAN; 5154 } 5155 } else { 5156 page_unlock(pp); 5157 status = PGREL_NOTREL; 5158 } 5159 return (status); 5160 } 5161 5162 /* 5163 * Given a constituent page, try to demote the large page on the freelist. 5164 * 5165 * Returns nonzero if the page could be demoted successfully. Returns with 5166 * the constituent page still locked. 5167 */ 5168 int 5169 page_try_demote_free_pages(page_t *pp) 5170 { 5171 page_t *rootpp = pp; 5172 pfn_t pfn = page_pptonum(pp); 5173 spgcnt_t npgs; 5174 uint_t szc = pp->p_szc; 5175 5176 ASSERT(PP_ISFREE(pp)); 5177 ASSERT(PAGE_EXCL(pp)); 5178 5179 /* 5180 * Adjust rootpp and lock it, if `pp' is not the base 5181 * constituent page. 5182 */ 5183 npgs = page_get_pagecnt(pp->p_szc); 5184 if (npgs == 1) { 5185 return (0); 5186 } 5187 5188 if (!IS_P2ALIGNED(pfn, npgs)) { 5189 pfn = P2ALIGN(pfn, npgs); 5190 rootpp = page_numtopp_nolock(pfn); 5191 } 5192 5193 if (pp != rootpp && !page_trylock(rootpp, SE_EXCL)) { 5194 return (0); 5195 } 5196 5197 if (rootpp->p_szc != szc) { 5198 if (pp != rootpp) 5199 page_unlock(rootpp); 5200 return (0); 5201 } 5202 5203 page_demote_free_pages(rootpp); 5204 5205 if (pp != rootpp) 5206 page_unlock(rootpp); 5207 5208 ASSERT(PP_ISFREE(pp)); 5209 ASSERT(PAGE_EXCL(pp)); 5210 return (1); 5211 } 5212 5213 /* 5214 * Given a constituent page, try to demote the large page. 5215 * 5216 * Returns nonzero if the page could be demoted successfully. Returns with 5217 * the constituent page still locked. 5218 */ 5219 int 5220 page_try_demote_pages(page_t *pp) 5221 { 5222 page_t *tpp, *rootpp = pp; 5223 pfn_t pfn = page_pptonum(pp); 5224 spgcnt_t i, npgs; 5225 uint_t szc = pp->p_szc; 5226 vnode_t *vp = pp->p_vnode; 5227 5228 ASSERT(PAGE_EXCL(pp)); 5229 5230 VM_STAT_ADD(pagecnt.pc_try_demote_pages[0]); 5231 5232 if (pp->p_szc == 0) { 5233 VM_STAT_ADD(pagecnt.pc_try_demote_pages[1]); 5234 return (1); 5235 } 5236 5237 if (vp != NULL && !IS_SWAPFSVP(vp) && !VN_ISKAS(vp)) { 5238 VM_STAT_ADD(pagecnt.pc_try_demote_pages[2]); 5239 page_demote_vp_pages(pp); 5240 ASSERT(pp->p_szc == 0); 5241 return (1); 5242 } 5243 5244 /* 5245 * Adjust rootpp if passed in is not the base 5246 * constituent page. 5247 */ 5248 npgs = page_get_pagecnt(pp->p_szc); 5249 ASSERT(npgs > 1); 5250 if (!IS_P2ALIGNED(pfn, npgs)) { 5251 pfn = P2ALIGN(pfn, npgs); 5252 rootpp = page_numtopp_nolock(pfn); 5253 VM_STAT_ADD(pagecnt.pc_try_demote_pages[3]); 5254 ASSERT(rootpp->p_vnode != NULL); 5255 ASSERT(rootpp->p_szc == szc); 5256 } 5257 5258 /* 5259 * We can't demote kernel pages since we can't hat_unload() 5260 * the mappings. 5261 */ 5262 if (VN_ISKAS(rootpp->p_vnode)) 5263 return (0); 5264 5265 /* 5266 * Attempt to lock all constituent pages except the page passed 5267 * in since it's already locked. 5268 */ 5269 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5270 ASSERT(!PP_ISFREE(tpp)); 5271 ASSERT(tpp->p_vnode != NULL); 5272 5273 if (tpp != pp && !page_trylock(tpp, SE_EXCL)) 5274 break; 5275 ASSERT(tpp->p_szc == rootpp->p_szc); 5276 ASSERT(page_pptonum(tpp) == page_pptonum(rootpp) + i); 5277 } 5278 5279 /* 5280 * If we failed to lock them all then unlock what we have 5281 * locked so far and bail. 5282 */ 5283 if (i < npgs) { 5284 tpp = rootpp; 5285 while (i-- > 0) { 5286 if (tpp != pp) 5287 page_unlock(tpp); 5288 tpp++; 5289 } 5290 VM_STAT_ADD(pagecnt.pc_try_demote_pages[4]); 5291 return (0); 5292 } 5293 5294 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5295 ASSERT(PAGE_EXCL(tpp)); 5296 ASSERT(tpp->p_slckcnt == 0); 5297 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 5298 tpp->p_szc = 0; 5299 } 5300 5301 /* 5302 * Unlock all pages except the page passed in. 5303 */ 5304 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5305 ASSERT(!hat_page_is_mapped(tpp)); 5306 if (tpp != pp) 5307 page_unlock(tpp); 5308 } 5309 5310 VM_STAT_ADD(pagecnt.pc_try_demote_pages[5]); 5311 return (1); 5312 } 5313 5314 /* 5315 * Called by page_free() and page_destroy() to demote the page size code 5316 * (p_szc) to 0 (since we can't just put a single PAGESIZE page with non zero 5317 * p_szc on free list, neither can we just clear p_szc of a single page_t 5318 * within a large page since it will break other code that relies on p_szc 5319 * being the same for all page_t's of a large page). Anonymous pages should 5320 * never end up here because anon_map_getpages() cannot deal with p_szc 5321 * changes after a single constituent page is locked. While anonymous or 5322 * kernel large pages are demoted or freed the entire large page at a time 5323 * with all constituent pages locked EXCL for the file system pages we 5324 * have to be able to demote a large page (i.e. decrease all constituent pages 5325 * p_szc) with only just an EXCL lock on one of constituent pages. The reason 5326 * we can easily deal with anonymous page demotion the entire large page at a 5327 * time is that those operation originate at address space level and concern 5328 * the entire large page region with actual demotion only done when pages are 5329 * not shared with any other processes (therefore we can always get EXCL lock 5330 * on all anonymous constituent pages after clearing segment page 5331 * cache). However file system pages can be truncated or invalidated at a 5332 * PAGESIZE level from the file system side and end up in page_free() or 5333 * page_destroy() (we also allow only part of the large page to be SOFTLOCKed 5334 * and therefore pageout should be able to demote a large page by EXCL locking 5335 * any constituent page that is not under SOFTLOCK). In those cases we cannot 5336 * rely on being able to lock EXCL all constituent pages. 5337 * 5338 * To prevent szc changes on file system pages one has to lock all constituent 5339 * pages at least SHARED (or call page_szc_lock()). The only subsystem that 5340 * doesn't rely on locking all constituent pages (or using page_szc_lock()) to 5341 * prevent szc changes is hat layer that uses its own page level mlist 5342 * locks. hat assumes that szc doesn't change after mlist lock for a page is 5343 * taken. Therefore we need to change szc under hat level locks if we only 5344 * have an EXCL lock on a single constituent page and hat still references any 5345 * of constituent pages. (Note we can't "ignore" hat layer by simply 5346 * hat_pageunload() all constituent pages without having EXCL locks on all of 5347 * constituent pages). We use hat_page_demote() call to safely demote szc of 5348 * all constituent pages under hat locks when we only have an EXCL lock on one 5349 * of constituent pages. 5350 * 5351 * This routine calls page_szc_lock() before calling hat_page_demote() to 5352 * allow segvn in one special case not to lock all constituent pages SHARED 5353 * before calling hat_memload_array() that relies on p_szc not changing even 5354 * before hat level mlist lock is taken. In that case segvn uses 5355 * page_szc_lock() to prevent hat_page_demote() changing p_szc values. 5356 * 5357 * Anonymous or kernel page demotion still has to lock all pages exclusively 5358 * and do hat_pageunload() on all constituent pages before demoting the page 5359 * therefore there's no need for anonymous or kernel page demotion to use 5360 * hat_page_demote() mechanism. 5361 * 5362 * hat_page_demote() removes all large mappings that map pp and then decreases 5363 * p_szc starting from the last constituent page of the large page. By working 5364 * from the tail of a large page in pfn decreasing order allows one looking at 5365 * the root page to know that hat_page_demote() is done for root's szc area. 5366 * e.g. if a root page has szc 1 one knows it only has to lock all constituent 5367 * pages within szc 1 area to prevent szc changes because hat_page_demote() 5368 * that started on this page when it had szc > 1 is done for this szc 1 area. 5369 * 5370 * We are guaranteed that all constituent pages of pp's large page belong to 5371 * the same vnode with the consecutive offsets increasing in the direction of 5372 * the pfn i.e. the identity of constituent pages can't change until their 5373 * p_szc is decreased. Therefore it's safe for hat_page_demote() to remove 5374 * large mappings to pp even though we don't lock any constituent page except 5375 * pp (i.e. we won't unload e.g. kernel locked page). 5376 */ 5377 static void 5378 page_demote_vp_pages(page_t *pp) 5379 { 5380 kmutex_t *mtx; 5381 5382 ASSERT(PAGE_EXCL(pp)); 5383 ASSERT(!PP_ISFREE(pp)); 5384 ASSERT(pp->p_vnode != NULL); 5385 ASSERT(!IS_SWAPFSVP(pp->p_vnode)); 5386 ASSERT(!PP_ISKAS(pp)); 5387 5388 VM_STAT_ADD(pagecnt.pc_demote_pages[0]); 5389 5390 mtx = page_szc_lock(pp); 5391 if (mtx != NULL) { 5392 hat_page_demote(pp); 5393 mutex_exit(mtx); 5394 } 5395 ASSERT(pp->p_szc == 0); 5396 } 5397 5398 /* 5399 * Mark any existing pages for migration in the given range 5400 */ 5401 void 5402 page_mark_migrate(struct seg *seg, caddr_t addr, size_t len, 5403 struct anon_map *amp, ulong_t anon_index, vnode_t *vp, 5404 u_offset_t vnoff, int rflag) 5405 { 5406 struct anon *ap; 5407 vnode_t *curvp; 5408 lgrp_t *from; 5409 pgcnt_t nlocked; 5410 u_offset_t off; 5411 pfn_t pfn; 5412 size_t pgsz; 5413 size_t segpgsz; 5414 pgcnt_t pages; 5415 uint_t pszc; 5416 page_t *pp0, *pp; 5417 caddr_t va; 5418 ulong_t an_idx; 5419 anon_sync_obj_t cookie; 5420 5421 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 5422 5423 /* 5424 * Don't do anything if don't need to do lgroup optimizations 5425 * on this system 5426 */ 5427 if (!lgrp_optimizations()) 5428 return; 5429 5430 /* 5431 * Align address and length to (potentially large) page boundary 5432 */ 5433 segpgsz = page_get_pagesize(seg->s_szc); 5434 addr = (caddr_t)P2ALIGN((uintptr_t)addr, segpgsz); 5435 if (rflag) 5436 len = P2ROUNDUP(len, segpgsz); 5437 5438 /* 5439 * Do one (large) page at a time 5440 */ 5441 va = addr; 5442 while (va < addr + len) { 5443 /* 5444 * Lookup (root) page for vnode and offset corresponding to 5445 * this virtual address 5446 * Try anonmap first since there may be copy-on-write 5447 * pages, but initialize vnode pointer and offset using 5448 * vnode arguments just in case there isn't an amp. 5449 */ 5450 curvp = vp; 5451 off = vnoff + va - seg->s_base; 5452 if (amp) { 5453 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5454 an_idx = anon_index + seg_page(seg, va); 5455 anon_array_enter(amp, an_idx, &cookie); 5456 ap = anon_get_ptr(amp->ahp, an_idx); 5457 if (ap) 5458 swap_xlate(ap, &curvp, &off); 5459 anon_array_exit(&cookie); 5460 ANON_LOCK_EXIT(&->a_rwlock); 5461 } 5462 5463 pp = NULL; 5464 if (curvp) 5465 pp = page_lookup(curvp, off, SE_SHARED); 5466 5467 /* 5468 * If there isn't a page at this virtual address, 5469 * skip to next page 5470 */ 5471 if (pp == NULL) { 5472 va += PAGESIZE; 5473 continue; 5474 } 5475 5476 /* 5477 * Figure out which lgroup this page is in for kstats 5478 */ 5479 pfn = page_pptonum(pp); 5480 from = lgrp_pfn_to_lgrp(pfn); 5481 5482 /* 5483 * Get page size, and round up and skip to next page boundary 5484 * if unaligned address 5485 */ 5486 pszc = pp->p_szc; 5487 pgsz = page_get_pagesize(pszc); 5488 pages = btop(pgsz); 5489 if (!IS_P2ALIGNED(va, pgsz) || 5490 !IS_P2ALIGNED(pfn, pages) || 5491 pgsz > segpgsz) { 5492 pgsz = MIN(pgsz, segpgsz); 5493 page_unlock(pp); 5494 pages = btop(P2END((uintptr_t)va, pgsz) - 5495 (uintptr_t)va); 5496 va = (caddr_t)P2END((uintptr_t)va, pgsz); 5497 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, pages); 5498 continue; 5499 } 5500 5501 /* 5502 * Upgrade to exclusive lock on page 5503 */ 5504 if (!page_tryupgrade(pp)) { 5505 page_unlock(pp); 5506 va += pgsz; 5507 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5508 btop(pgsz)); 5509 continue; 5510 } 5511 5512 pp0 = pp++; 5513 nlocked = 1; 5514 5515 /* 5516 * Lock constituent pages if this is large page 5517 */ 5518 if (pages > 1) { 5519 /* 5520 * Lock all constituents except root page, since it 5521 * should be locked already. 5522 */ 5523 for (; nlocked < pages; nlocked++) { 5524 if (!page_trylock(pp, SE_EXCL)) { 5525 break; 5526 } 5527 if (PP_ISFREE(pp) || 5528 pp->p_szc != pszc) { 5529 /* 5530 * hat_page_demote() raced in with us. 5531 */ 5532 ASSERT(!IS_SWAPFSVP(curvp)); 5533 page_unlock(pp); 5534 break; 5535 } 5536 pp++; 5537 } 5538 } 5539 5540 /* 5541 * If all constituent pages couldn't be locked, 5542 * unlock pages locked so far and skip to next page. 5543 */ 5544 if (nlocked < pages) { 5545 while (pp0 < pp) { 5546 page_unlock(pp0++); 5547 } 5548 va += pgsz; 5549 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5550 btop(pgsz)); 5551 continue; 5552 } 5553 5554 /* 5555 * hat_page_demote() can no longer happen 5556 * since last cons page had the right p_szc after 5557 * all cons pages were locked. all cons pages 5558 * should now have the same p_szc. 5559 */ 5560 5561 /* 5562 * All constituent pages locked successfully, so mark 5563 * large page for migration and unload the mappings of 5564 * constituent pages, so a fault will occur on any part of the 5565 * large page 5566 */ 5567 PP_SETMIGRATE(pp0); 5568 while (pp0 < pp) { 5569 (void) hat_pageunload(pp0, HAT_FORCE_PGUNLOAD); 5570 ASSERT(hat_page_getshare(pp0) == 0); 5571 page_unlock(pp0++); 5572 } 5573 lgrp_stat_add(from->lgrp_id, LGRP_PMM_PGS, nlocked); 5574 5575 va += pgsz; 5576 } 5577 } 5578 5579 /* 5580 * Migrate any pages that have been marked for migration in the given range 5581 */ 5582 void 5583 page_migrate( 5584 struct seg *seg, 5585 caddr_t addr, 5586 page_t **ppa, 5587 pgcnt_t npages) 5588 { 5589 lgrp_t *from; 5590 lgrp_t *to; 5591 page_t *newpp; 5592 page_t *pp; 5593 pfn_t pfn; 5594 size_t pgsz; 5595 spgcnt_t page_cnt; 5596 spgcnt_t i; 5597 uint_t pszc; 5598 5599 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 5600 5601 while (npages > 0) { 5602 pp = *ppa; 5603 pszc = pp->p_szc; 5604 pgsz = page_get_pagesize(pszc); 5605 page_cnt = btop(pgsz); 5606 5607 /* 5608 * Check to see whether this page is marked for migration 5609 * 5610 * Assume that root page of large page is marked for 5611 * migration and none of the other constituent pages 5612 * are marked. This really simplifies clearing the 5613 * migrate bit by not having to clear it from each 5614 * constituent page. 5615 * 5616 * note we don't want to relocate an entire large page if 5617 * someone is only using one subpage. 5618 */ 5619 if (npages < page_cnt) 5620 break; 5621 5622 /* 5623 * Is it marked for migration? 5624 */ 5625 if (!PP_ISMIGRATE(pp)) 5626 goto next; 5627 5628 /* 5629 * Determine lgroups that page is being migrated between 5630 */ 5631 pfn = page_pptonum(pp); 5632 if (!IS_P2ALIGNED(pfn, page_cnt)) { 5633 break; 5634 } 5635 from = lgrp_pfn_to_lgrp(pfn); 5636 to = lgrp_mem_choose(seg, addr, pgsz); 5637 5638 /* 5639 * Need to get exclusive lock's to migrate 5640 */ 5641 for (i = 0; i < page_cnt; i++) { 5642 ASSERT(PAGE_LOCKED(ppa[i])); 5643 if (page_pptonum(ppa[i]) != pfn + i || 5644 ppa[i]->p_szc != pszc) { 5645 break; 5646 } 5647 if (!page_tryupgrade(ppa[i])) { 5648 lgrp_stat_add(from->lgrp_id, 5649 LGRP_PM_FAIL_LOCK_PGS, 5650 page_cnt); 5651 break; 5652 } 5653 5654 /* 5655 * Check to see whether we are trying to migrate 5656 * page to lgroup where it is allocated already. 5657 * If so, clear the migrate bit and skip to next 5658 * page. 5659 */ 5660 if (i == 0 && to == from) { 5661 PP_CLRMIGRATE(ppa[0]); 5662 page_downgrade(ppa[0]); 5663 goto next; 5664 } 5665 } 5666 5667 /* 5668 * If all constituent pages couldn't be locked, 5669 * unlock pages locked so far and skip to next page. 5670 */ 5671 if (i != page_cnt) { 5672 while (--i != -1) { 5673 page_downgrade(ppa[i]); 5674 } 5675 goto next; 5676 } 5677 5678 (void) page_create_wait(page_cnt, PG_WAIT); 5679 newpp = page_get_replacement_page(pp, to, PGR_SAMESZC); 5680 if (newpp == NULL) { 5681 page_create_putback(page_cnt); 5682 for (i = 0; i < page_cnt; i++) { 5683 page_downgrade(ppa[i]); 5684 } 5685 lgrp_stat_add(to->lgrp_id, LGRP_PM_FAIL_ALLOC_PGS, 5686 page_cnt); 5687 goto next; 5688 } 5689 ASSERT(newpp->p_szc == pszc); 5690 /* 5691 * Clear migrate bit and relocate page 5692 */ 5693 PP_CLRMIGRATE(pp); 5694 if (page_relocate(&pp, &newpp, 0, 1, &page_cnt, to)) { 5695 panic("page_migrate: page_relocate failed"); 5696 } 5697 ASSERT(page_cnt * PAGESIZE == pgsz); 5698 5699 /* 5700 * Keep stats for number of pages migrated from and to 5701 * each lgroup 5702 */ 5703 lgrp_stat_add(from->lgrp_id, LGRP_PM_SRC_PGS, page_cnt); 5704 lgrp_stat_add(to->lgrp_id, LGRP_PM_DEST_PGS, page_cnt); 5705 /* 5706 * update the page_t array we were passed in and 5707 * unlink constituent pages of a large page. 5708 */ 5709 for (i = 0; i < page_cnt; ++i, ++pp) { 5710 ASSERT(PAGE_EXCL(newpp)); 5711 ASSERT(newpp->p_szc == pszc); 5712 ppa[i] = newpp; 5713 pp = newpp; 5714 page_sub(&newpp, pp); 5715 page_downgrade(pp); 5716 } 5717 ASSERT(newpp == NULL); 5718 next: 5719 addr += pgsz; 5720 ppa += page_cnt; 5721 npages -= page_cnt; 5722 } 5723 } 5724 5725 uint_t page_reclaim_maxcnt = 60; /* max total iterations */ 5726 uint_t page_reclaim_nofree_maxcnt = 3; /* max iterations without progress */ 5727 /* 5728 * Reclaim/reserve availrmem for npages. 5729 * If there is not enough memory start reaping seg, kmem caches. 5730 * Start pageout scanner (via page_needfree()). 5731 * Exit after ~ MAX_CNT s regardless of how much memory has been released. 5732 * Note: There is no guarantee that any availrmem will be freed as 5733 * this memory typically is locked (kernel heap) or reserved for swap. 5734 * Also due to memory fragmentation kmem allocator may not be able 5735 * to free any memory (single user allocated buffer will prevent 5736 * freeing slab or a page). 5737 */ 5738 int 5739 page_reclaim_mem(pgcnt_t npages, pgcnt_t epages, int adjust) 5740 { 5741 int i = 0; 5742 int i_nofree = 0; 5743 int ret = 0; 5744 pgcnt_t deficit; 5745 pgcnt_t old_availrmem = 0; 5746 5747 mutex_enter(&freemem_lock); 5748 while (availrmem < tune.t_minarmem + npages + epages && 5749 i++ < page_reclaim_maxcnt) { 5750 /* ensure we made some progress in the last few iterations */ 5751 if (old_availrmem < availrmem) { 5752 old_availrmem = availrmem; 5753 i_nofree = 0; 5754 } else if (i_nofree++ >= page_reclaim_nofree_maxcnt) { 5755 break; 5756 } 5757 5758 deficit = tune.t_minarmem + npages + epages - availrmem; 5759 mutex_exit(&freemem_lock); 5760 page_needfree(deficit); 5761 kmem_reap(); 5762 delay(hz); 5763 page_needfree(-(spgcnt_t)deficit); 5764 mutex_enter(&freemem_lock); 5765 } 5766 5767 if (adjust && (availrmem >= tune.t_minarmem + npages + epages)) { 5768 availrmem -= npages; 5769 ret = 1; 5770 } 5771 5772 mutex_exit(&freemem_lock); 5773 5774 return (ret); 5775 } 5776 5777 /* 5778 * Search the memory segments to locate the desired page. Within a 5779 * segment, pages increase linearly with one page structure per 5780 * physical page frame (size PAGESIZE). The search begins 5781 * with the segment that was accessed last, to take advantage of locality. 5782 * If the hint misses, we start from the beginning of the sorted memseg list 5783 */ 5784 5785 5786 /* 5787 * Some data structures for pfn to pp lookup. 5788 */ 5789 ulong_t mhash_per_slot; 5790 struct memseg *memseg_hash[N_MEM_SLOTS]; 5791 5792 page_t * 5793 page_numtopp_nolock(pfn_t pfnum) 5794 { 5795 struct memseg *seg; 5796 page_t *pp; 5797 vm_cpu_data_t *vc; 5798 5799 /* 5800 * We need to disable kernel preemption while referencing the 5801 * cpu_vm_data field in order to prevent us from being switched to 5802 * another cpu and trying to reference it after it has been freed. 5803 * This will keep us on cpu and prevent it from being removed while 5804 * we are still on it. 5805 * 5806 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg 5807 * which is being resued by DR who will flush those references 5808 * before modifying the reused memseg. See memseg_cpu_vm_flush(). 5809 */ 5810 kpreempt_disable(); 5811 vc = CPU->cpu_vm_data; 5812 ASSERT(vc != NULL); 5813 5814 MEMSEG_STAT_INCR(nsearch); 5815 5816 /* Try last winner first */ 5817 if (((seg = vc->vc_pnum_memseg) != NULL) && 5818 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5819 MEMSEG_STAT_INCR(nlastwon); 5820 pp = seg->pages + (pfnum - seg->pages_base); 5821 if (pp->p_pagenum == pfnum) { 5822 kpreempt_enable(); 5823 return ((page_t *)pp); 5824 } 5825 } 5826 5827 /* Else Try hash */ 5828 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5829 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5830 MEMSEG_STAT_INCR(nhashwon); 5831 vc->vc_pnum_memseg = seg; 5832 pp = seg->pages + (pfnum - seg->pages_base); 5833 if (pp->p_pagenum == pfnum) { 5834 kpreempt_enable(); 5835 return ((page_t *)pp); 5836 } 5837 } 5838 5839 /* Else Brute force */ 5840 for (seg = memsegs; seg != NULL; seg = seg->next) { 5841 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5842 vc->vc_pnum_memseg = seg; 5843 pp = seg->pages + (pfnum - seg->pages_base); 5844 if (pp->p_pagenum == pfnum) { 5845 kpreempt_enable(); 5846 return ((page_t *)pp); 5847 } 5848 } 5849 } 5850 vc->vc_pnum_memseg = NULL; 5851 kpreempt_enable(); 5852 MEMSEG_STAT_INCR(nnotfound); 5853 return ((page_t *)NULL); 5854 5855 } 5856 5857 struct memseg * 5858 page_numtomemseg_nolock(pfn_t pfnum) 5859 { 5860 struct memseg *seg; 5861 page_t *pp; 5862 5863 /* 5864 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg 5865 * which is being resued by DR who will flush those references 5866 * before modifying the reused memseg. See memseg_cpu_vm_flush(). 5867 */ 5868 kpreempt_disable(); 5869 /* Try hash */ 5870 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5871 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5872 pp = seg->pages + (pfnum - seg->pages_base); 5873 if (pp->p_pagenum == pfnum) { 5874 kpreempt_enable(); 5875 return (seg); 5876 } 5877 } 5878 5879 /* Else Brute force */ 5880 for (seg = memsegs; seg != NULL; seg = seg->next) { 5881 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5882 pp = seg->pages + (pfnum - seg->pages_base); 5883 if (pp->p_pagenum == pfnum) { 5884 kpreempt_enable(); 5885 return (seg); 5886 } 5887 } 5888 } 5889 kpreempt_enable(); 5890 return ((struct memseg *)NULL); 5891 } 5892 5893 /* 5894 * Given a page and a count return the page struct that is 5895 * n structs away from the current one in the global page 5896 * list. 5897 * 5898 * This function wraps to the first page upon 5899 * reaching the end of the memseg list. 5900 */ 5901 page_t * 5902 page_nextn(page_t *pp, ulong_t n) 5903 { 5904 struct memseg *seg; 5905 page_t *ppn; 5906 vm_cpu_data_t *vc; 5907 5908 /* 5909 * We need to disable kernel preemption while referencing the 5910 * cpu_vm_data field in order to prevent us from being switched to 5911 * another cpu and trying to reference it after it has been freed. 5912 * This will keep us on cpu and prevent it from being removed while 5913 * we are still on it. 5914 * 5915 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg 5916 * which is being resued by DR who will flush those references 5917 * before modifying the reused memseg. See memseg_cpu_vm_flush(). 5918 */ 5919 kpreempt_disable(); 5920 vc = (vm_cpu_data_t *)CPU->cpu_vm_data; 5921 5922 ASSERT(vc != NULL); 5923 5924 if (((seg = vc->vc_pnext_memseg) == NULL) || 5925 (seg->pages_base == seg->pages_end) || 5926 !(pp >= seg->pages && pp < seg->epages)) { 5927 5928 for (seg = memsegs; seg; seg = seg->next) { 5929 if (pp >= seg->pages && pp < seg->epages) 5930 break; 5931 } 5932 5933 if (seg == NULL) { 5934 /* Memory delete got in, return something valid. */ 5935 /* TODO: fix me. */ 5936 seg = memsegs; 5937 pp = seg->pages; 5938 } 5939 } 5940 5941 /* check for wraparound - possible if n is large */ 5942 while ((ppn = (pp + n)) >= seg->epages || ppn < pp) { 5943 n -= seg->epages - pp; 5944 seg = seg->next; 5945 if (seg == NULL) 5946 seg = memsegs; 5947 pp = seg->pages; 5948 } 5949 vc->vc_pnext_memseg = seg; 5950 kpreempt_enable(); 5951 return (ppn); 5952 } 5953 5954 /* 5955 * Initialize for a loop using page_next_scan_large(). 5956 */ 5957 page_t * 5958 page_next_scan_init(void **cookie) 5959 { 5960 ASSERT(cookie != NULL); 5961 *cookie = (void *)memsegs; 5962 return ((page_t *)memsegs->pages); 5963 } 5964 5965 /* 5966 * Return the next page in a scan of page_t's, assuming we want 5967 * to skip over sub-pages within larger page sizes. 5968 * 5969 * The cookie is used to keep track of the current memseg. 5970 */ 5971 page_t * 5972 page_next_scan_large( 5973 page_t *pp, 5974 ulong_t *n, 5975 void **cookie) 5976 { 5977 struct memseg *seg = (struct memseg *)*cookie; 5978 page_t *new_pp; 5979 ulong_t cnt; 5980 pfn_t pfn; 5981 5982 5983 /* 5984 * get the count of page_t's to skip based on the page size 5985 */ 5986 ASSERT(pp != NULL); 5987 if (pp->p_szc == 0) { 5988 cnt = 1; 5989 } else { 5990 pfn = page_pptonum(pp); 5991 cnt = page_get_pagecnt(pp->p_szc); 5992 cnt -= pfn & (cnt - 1); 5993 } 5994 *n += cnt; 5995 new_pp = pp + cnt; 5996 5997 /* 5998 * Catch if we went past the end of the current memory segment. If so, 5999 * just move to the next segment with pages. 6000 */ 6001 if (new_pp >= seg->epages || seg->pages_base == seg->pages_end) { 6002 do { 6003 seg = seg->next; 6004 if (seg == NULL) 6005 seg = memsegs; 6006 } while (seg->pages_base == seg->pages_end); 6007 new_pp = seg->pages; 6008 *cookie = (void *)seg; 6009 } 6010 6011 return (new_pp); 6012 } 6013 6014 6015 /* 6016 * Returns next page in list. Note: this function wraps 6017 * to the first page in the list upon reaching the end 6018 * of the list. Callers should be aware of this fact. 6019 */ 6020 6021 /* We should change this be a #define */ 6022 6023 page_t * 6024 page_next(page_t *pp) 6025 { 6026 return (page_nextn(pp, 1)); 6027 } 6028 6029 page_t * 6030 page_first() 6031 { 6032 return ((page_t *)memsegs->pages); 6033 } 6034 6035 6036 /* 6037 * This routine is called at boot with the initial memory configuration 6038 * and when memory is added or removed. 6039 */ 6040 void 6041 build_pfn_hash() 6042 { 6043 pfn_t cur; 6044 pgcnt_t index; 6045 struct memseg *pseg; 6046 int i; 6047 6048 /* 6049 * Clear memseg_hash array. 6050 * Since memory add/delete is designed to operate concurrently 6051 * with normal operation, the hash rebuild must be able to run 6052 * concurrently with page_numtopp_nolock(). To support this 6053 * functionality, assignments to memseg_hash array members must 6054 * be done atomically. 6055 * 6056 * NOTE: bzero() does not currently guarantee this for kernel 6057 * threads, and cannot be used here. 6058 */ 6059 for (i = 0; i < N_MEM_SLOTS; i++) 6060 memseg_hash[i] = NULL; 6061 6062 hat_kpm_mseghash_clear(N_MEM_SLOTS); 6063 6064 /* 6065 * Physmax is the last valid pfn. 6066 */ 6067 mhash_per_slot = (physmax + 1) >> MEM_HASH_SHIFT; 6068 for (pseg = memsegs; pseg != NULL; pseg = pseg->next) { 6069 index = MEMSEG_PFN_HASH(pseg->pages_base); 6070 cur = pseg->pages_base; 6071 do { 6072 if (index >= N_MEM_SLOTS) 6073 index = MEMSEG_PFN_HASH(cur); 6074 6075 if (memseg_hash[index] == NULL || 6076 memseg_hash[index]->pages_base > pseg->pages_base) { 6077 memseg_hash[index] = pseg; 6078 hat_kpm_mseghash_update(index, pseg); 6079 } 6080 cur += mhash_per_slot; 6081 index++; 6082 } while (cur < pseg->pages_end); 6083 } 6084 } 6085 6086 /* 6087 * Return the pagenum for the pp 6088 */ 6089 pfn_t 6090 page_pptonum(page_t *pp) 6091 { 6092 return (pp->p_pagenum); 6093 } 6094 6095 /* 6096 * interface to the referenced and modified etc bits 6097 * in the PSM part of the page struct 6098 * when no locking is desired. 6099 */ 6100 void 6101 page_set_props(page_t *pp, uint_t flags) 6102 { 6103 ASSERT((flags & ~(P_MOD | P_REF | P_RO)) == 0); 6104 pp->p_nrm |= (uchar_t)flags; 6105 } 6106 6107 void 6108 page_clr_all_props(page_t *pp) 6109 { 6110 pp->p_nrm = 0; 6111 } 6112 6113 /* 6114 * Clear p_lckcnt and p_cowcnt, adjusting freemem if required. 6115 */ 6116 int 6117 page_clear_lck_cow(page_t *pp, int adjust) 6118 { 6119 int f_amount; 6120 6121 ASSERT(PAGE_EXCL(pp)); 6122 6123 /* 6124 * The page_struct_lock need not be acquired here since 6125 * we require the caller hold the page exclusively locked. 6126 */ 6127 f_amount = 0; 6128 if (pp->p_lckcnt) { 6129 f_amount = 1; 6130 pp->p_lckcnt = 0; 6131 } 6132 if (pp->p_cowcnt) { 6133 f_amount += pp->p_cowcnt; 6134 pp->p_cowcnt = 0; 6135 } 6136 6137 if (adjust && f_amount) { 6138 mutex_enter(&freemem_lock); 6139 availrmem += f_amount; 6140 mutex_exit(&freemem_lock); 6141 } 6142 6143 return (f_amount); 6144 } 6145 6146 /* 6147 * The following functions is called from free_vp_pages() 6148 * for an inexact estimate of a newly free'd page... 6149 */ 6150 ulong_t 6151 page_share_cnt(page_t *pp) 6152 { 6153 return (hat_page_getshare(pp)); 6154 } 6155 6156 int 6157 page_isshared(page_t *pp) 6158 { 6159 return (hat_page_checkshare(pp, 1)); 6160 } 6161 6162 int 6163 page_isfree(page_t *pp) 6164 { 6165 return (PP_ISFREE(pp)); 6166 } 6167 6168 int 6169 page_isref(page_t *pp) 6170 { 6171 return (hat_page_getattr(pp, P_REF)); 6172 } 6173 6174 int 6175 page_ismod(page_t *pp) 6176 { 6177 return (hat_page_getattr(pp, P_MOD)); 6178 } 6179 6180 /* 6181 * The following code all currently relates to the page capture logic: 6182 * 6183 * This logic is used for cases where there is a desire to claim a certain 6184 * physical page in the system for the caller. As it may not be possible 6185 * to capture the page immediately, the p_toxic bits are used in the page 6186 * structure to indicate that someone wants to capture this page. When the 6187 * page gets unlocked, the toxic flag will be noted and an attempt to capture 6188 * the page will be made. If it is successful, the original callers callback 6189 * will be called with the page to do with it what they please. 6190 * 6191 * There is also an async thread which wakes up to attempt to capture 6192 * pages occasionally which have the capture bit set. All of the pages which 6193 * need to be captured asynchronously have been inserted into the 6194 * page_capture_hash and thus this thread walks that hash list. Items in the 6195 * hash have an expiration time so this thread handles that as well by removing 6196 * the item from the hash if it has expired. 6197 * 6198 * Some important things to note are: 6199 * - if the PR_CAPTURE bit is set on a page, then the page is in the 6200 * page_capture_hash. The page_capture_hash_head.pchh_mutex is needed 6201 * to set and clear this bit, and while the lock is held is the only time 6202 * you can add or remove an entry from the hash. 6203 * - the PR_CAPTURE bit can only be set and cleared while holding the 6204 * page_capture_hash_head.pchh_mutex 6205 * - the t_flag field of the thread struct is used with the T_CAPTURING 6206 * flag to prevent recursion while dealing with large pages. 6207 * - pages which need to be retired never expire on the page_capture_hash. 6208 */ 6209 6210 static void page_capture_thread(void); 6211 static kthread_t *pc_thread_id; 6212 kcondvar_t pc_cv; 6213 static kmutex_t pc_thread_mutex; 6214 static clock_t pc_thread_shortwait; 6215 static clock_t pc_thread_longwait; 6216 static int pc_thread_retry; 6217 6218 struct page_capture_callback pc_cb[PC_NUM_CALLBACKS]; 6219 6220 /* Note that this is a circular linked list */ 6221 typedef struct page_capture_hash_bucket { 6222 page_t *pp; 6223 uchar_t szc; 6224 uchar_t pri; 6225 uint_t flags; 6226 clock_t expires; /* lbolt at which this request expires. */ 6227 void *datap; /* Cached data passed in for callback */ 6228 struct page_capture_hash_bucket *next; 6229 struct page_capture_hash_bucket *prev; 6230 } page_capture_hash_bucket_t; 6231 6232 #define PC_PRI_HI 0 /* capture now */ 6233 #define PC_PRI_LO 1 /* capture later */ 6234 #define PC_NUM_PRI 2 6235 6236 #define PAGE_CAPTURE_PRIO(pp) (PP_ISRAF(pp) ? PC_PRI_LO : PC_PRI_HI) 6237 6238 6239 /* 6240 * Each hash bucket will have it's own mutex and two lists which are: 6241 * active (0): represents requests which have not been processed by 6242 * the page_capture async thread yet. 6243 * walked (1): represents requests which have been processed by the 6244 * page_capture async thread within it's given walk of this bucket. 6245 * 6246 * These are all needed so that we can synchronize all async page_capture 6247 * events. When the async thread moves to a new bucket, it will append the 6248 * walked list to the active list and walk each item one at a time, moving it 6249 * from the active list to the walked list. Thus if there is an async request 6250 * outstanding for a given page, it will always be in one of the two lists. 6251 * New requests will always be added to the active list. 6252 * If we were not able to capture a page before the request expired, we'd free 6253 * up the request structure which would indicate to page_capture that there is 6254 * no longer a need for the given page, and clear the PR_CAPTURE flag if 6255 * possible. 6256 */ 6257 typedef struct page_capture_hash_head { 6258 kmutex_t pchh_mutex; 6259 uint_t num_pages[PC_NUM_PRI]; 6260 page_capture_hash_bucket_t lists[2]; /* sentinel nodes */ 6261 } page_capture_hash_head_t; 6262 6263 #ifdef DEBUG 6264 #define NUM_PAGE_CAPTURE_BUCKETS 4 6265 #else 6266 #define NUM_PAGE_CAPTURE_BUCKETS 64 6267 #endif 6268 6269 page_capture_hash_head_t page_capture_hash[NUM_PAGE_CAPTURE_BUCKETS]; 6270 6271 /* for now use a very simple hash based upon the size of a page struct */ 6272 #define PAGE_CAPTURE_HASH(pp) \ 6273 ((int)(((uintptr_t)pp >> 7) & (NUM_PAGE_CAPTURE_BUCKETS - 1))) 6274 6275 extern pgcnt_t swapfs_minfree; 6276 6277 int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap); 6278 6279 /* 6280 * a callback function is required for page capture requests. 6281 */ 6282 void 6283 page_capture_register_callback(uint_t index, clock_t duration, 6284 int (*cb_func)(page_t *, void *, uint_t)) 6285 { 6286 ASSERT(pc_cb[index].cb_active == 0); 6287 ASSERT(cb_func != NULL); 6288 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6289 pc_cb[index].duration = duration; 6290 pc_cb[index].cb_func = cb_func; 6291 pc_cb[index].cb_active = 1; 6292 rw_exit(&pc_cb[index].cb_rwlock); 6293 } 6294 6295 void 6296 page_capture_unregister_callback(uint_t index) 6297 { 6298 int i, j; 6299 struct page_capture_hash_bucket *bp1; 6300 struct page_capture_hash_bucket *bp2; 6301 struct page_capture_hash_bucket *head = NULL; 6302 uint_t flags = (1 << index); 6303 6304 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6305 ASSERT(pc_cb[index].cb_active == 1); 6306 pc_cb[index].duration = 0; /* Paranoia */ 6307 pc_cb[index].cb_func = NULL; /* Paranoia */ 6308 pc_cb[index].cb_active = 0; 6309 rw_exit(&pc_cb[index].cb_rwlock); 6310 6311 /* 6312 * Just move all the entries to a private list which we can walk 6313 * through without the need to hold any locks. 6314 * No more requests can get added to the hash lists for this consumer 6315 * as the cb_active field for the callback has been cleared. 6316 */ 6317 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 6318 mutex_enter(&page_capture_hash[i].pchh_mutex); 6319 for (j = 0; j < 2; j++) { 6320 bp1 = page_capture_hash[i].lists[j].next; 6321 /* walk through all but first (sentinel) element */ 6322 while (bp1 != &page_capture_hash[i].lists[j]) { 6323 bp2 = bp1; 6324 if (bp2->flags & flags) { 6325 bp1 = bp2->next; 6326 bp1->prev = bp2->prev; 6327 bp2->prev->next = bp1; 6328 bp2->next = head; 6329 head = bp2; 6330 /* 6331 * Clear the PR_CAPTURE bit as we 6332 * hold appropriate locks here. 6333 */ 6334 page_clrtoxic(head->pp, PR_CAPTURE); 6335 page_capture_hash[i]. 6336 num_pages[bp2->pri]--; 6337 continue; 6338 } 6339 bp1 = bp1->next; 6340 } 6341 } 6342 mutex_exit(&page_capture_hash[i].pchh_mutex); 6343 } 6344 6345 while (head != NULL) { 6346 bp1 = head; 6347 head = head->next; 6348 kmem_free(bp1, sizeof (*bp1)); 6349 } 6350 } 6351 6352 6353 /* 6354 * Find pp in the active list and move it to the walked list if it 6355 * exists. 6356 * Note that most often pp should be at the front of the active list 6357 * as it is currently used and thus there is no other sort of optimization 6358 * being done here as this is a linked list data structure. 6359 * Returns 1 on successful move or 0 if page could not be found. 6360 */ 6361 static int 6362 page_capture_move_to_walked(page_t *pp) 6363 { 6364 page_capture_hash_bucket_t *bp; 6365 int index; 6366 6367 index = PAGE_CAPTURE_HASH(pp); 6368 6369 mutex_enter(&page_capture_hash[index].pchh_mutex); 6370 bp = page_capture_hash[index].lists[0].next; 6371 while (bp != &page_capture_hash[index].lists[0]) { 6372 if (bp->pp == pp) { 6373 /* Remove from old list */ 6374 bp->next->prev = bp->prev; 6375 bp->prev->next = bp->next; 6376 6377 /* Add to new list */ 6378 bp->next = page_capture_hash[index].lists[1].next; 6379 bp->prev = &page_capture_hash[index].lists[1]; 6380 page_capture_hash[index].lists[1].next = bp; 6381 bp->next->prev = bp; 6382 6383 /* 6384 * There is a small probability of page on a free 6385 * list being retired while being allocated 6386 * and before P_RAF is set on it. The page may 6387 * end up marked as high priority request instead 6388 * of low priority request. 6389 * If P_RAF page is not marked as low priority request 6390 * change it to low priority request. 6391 */ 6392 page_capture_hash[index].num_pages[bp->pri]--; 6393 bp->pri = PAGE_CAPTURE_PRIO(pp); 6394 page_capture_hash[index].num_pages[bp->pri]++; 6395 mutex_exit(&page_capture_hash[index].pchh_mutex); 6396 return (1); 6397 } 6398 bp = bp->next; 6399 } 6400 mutex_exit(&page_capture_hash[index].pchh_mutex); 6401 return (0); 6402 } 6403 6404 /* 6405 * Add a new entry to the page capture hash. The only case where a new 6406 * entry is not added is when the page capture consumer is no longer registered. 6407 * In this case, we'll silently not add the page to the hash. We know that 6408 * page retire will always be registered for the case where we are currently 6409 * unretiring a page and thus there are no conflicts. 6410 */ 6411 static void 6412 page_capture_add_hash(page_t *pp, uint_t szc, uint_t flags, void *datap) 6413 { 6414 page_capture_hash_bucket_t *bp1; 6415 page_capture_hash_bucket_t *bp2; 6416 int index; 6417 int cb_index; 6418 int i; 6419 uchar_t pri; 6420 #ifdef DEBUG 6421 page_capture_hash_bucket_t *tp1; 6422 int l; 6423 #endif 6424 6425 ASSERT(!(flags & CAPTURE_ASYNC)); 6426 6427 bp1 = kmem_alloc(sizeof (struct page_capture_hash_bucket), KM_SLEEP); 6428 6429 bp1->pp = pp; 6430 bp1->szc = szc; 6431 bp1->flags = flags; 6432 bp1->datap = datap; 6433 6434 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6435 if ((flags >> cb_index) & 1) { 6436 break; 6437 } 6438 } 6439 6440 ASSERT(cb_index != PC_NUM_CALLBACKS); 6441 6442 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6443 if (pc_cb[cb_index].cb_active) { 6444 if (pc_cb[cb_index].duration == -1) { 6445 bp1->expires = (clock_t)-1; 6446 } else { 6447 bp1->expires = ddi_get_lbolt() + 6448 pc_cb[cb_index].duration; 6449 } 6450 } else { 6451 /* There's no callback registered so don't add to the hash */ 6452 rw_exit(&pc_cb[cb_index].cb_rwlock); 6453 kmem_free(bp1, sizeof (*bp1)); 6454 return; 6455 } 6456 6457 index = PAGE_CAPTURE_HASH(pp); 6458 6459 /* 6460 * Only allow capture flag to be modified under this mutex. 6461 * Prevents multiple entries for same page getting added. 6462 */ 6463 mutex_enter(&page_capture_hash[index].pchh_mutex); 6464 6465 /* 6466 * if not already on the hash, set capture bit and add to the hash 6467 */ 6468 if (!(pp->p_toxic & PR_CAPTURE)) { 6469 #ifdef DEBUG 6470 /* Check for duplicate entries */ 6471 for (l = 0; l < 2; l++) { 6472 tp1 = page_capture_hash[index].lists[l].next; 6473 while (tp1 != &page_capture_hash[index].lists[l]) { 6474 if (tp1->pp == pp) { 6475 panic("page pp 0x%p already on hash " 6476 "at 0x%p\n", 6477 (void *)pp, (void *)tp1); 6478 } 6479 tp1 = tp1->next; 6480 } 6481 } 6482 6483 #endif 6484 page_settoxic(pp, PR_CAPTURE); 6485 pri = PAGE_CAPTURE_PRIO(pp); 6486 bp1->pri = pri; 6487 bp1->next = page_capture_hash[index].lists[0].next; 6488 bp1->prev = &page_capture_hash[index].lists[0]; 6489 bp1->next->prev = bp1; 6490 page_capture_hash[index].lists[0].next = bp1; 6491 page_capture_hash[index].num_pages[pri]++; 6492 if (flags & CAPTURE_RETIRE) { 6493 page_retire_incr_pend_count(datap); 6494 } 6495 mutex_exit(&page_capture_hash[index].pchh_mutex); 6496 rw_exit(&pc_cb[cb_index].cb_rwlock); 6497 cv_signal(&pc_cv); 6498 return; 6499 } 6500 6501 /* 6502 * A page retire request will replace any other request. 6503 * A second physmem request which is for a different process than 6504 * the currently registered one will be dropped as there is 6505 * no way to hold the private data for both calls. 6506 * In the future, once there are more callers, this will have to 6507 * be worked out better as there needs to be private storage for 6508 * at least each type of caller (maybe have datap be an array of 6509 * *void's so that we can index based upon callers index). 6510 */ 6511 6512 /* walk hash list to update expire time */ 6513 for (i = 0; i < 2; i++) { 6514 bp2 = page_capture_hash[index].lists[i].next; 6515 while (bp2 != &page_capture_hash[index].lists[i]) { 6516 if (bp2->pp == pp) { 6517 if (flags & CAPTURE_RETIRE) { 6518 if (!(bp2->flags & CAPTURE_RETIRE)) { 6519 page_retire_incr_pend_count( 6520 datap); 6521 bp2->flags = flags; 6522 bp2->expires = bp1->expires; 6523 bp2->datap = datap; 6524 } 6525 } else { 6526 ASSERT(flags & CAPTURE_PHYSMEM); 6527 if (!(bp2->flags & CAPTURE_RETIRE) && 6528 (datap == bp2->datap)) { 6529 bp2->expires = bp1->expires; 6530 } 6531 } 6532 mutex_exit(&page_capture_hash[index]. 6533 pchh_mutex); 6534 rw_exit(&pc_cb[cb_index].cb_rwlock); 6535 kmem_free(bp1, sizeof (*bp1)); 6536 return; 6537 } 6538 bp2 = bp2->next; 6539 } 6540 } 6541 6542 /* 6543 * the PR_CAPTURE flag is protected by the page_capture_hash mutexes 6544 * and thus it either has to be set or not set and can't change 6545 * while holding the mutex above. 6546 */ 6547 panic("page_capture_add_hash, PR_CAPTURE flag set on pp %p\n", 6548 (void *)pp); 6549 } 6550 6551 /* 6552 * We have a page in our hands, lets try and make it ours by turning 6553 * it into a clean page like it had just come off the freelists. 6554 * 6555 * Returns 0 on success, with the page still EXCL locked. 6556 * On failure, the page will be unlocked, and returns EAGAIN 6557 */ 6558 static int 6559 page_capture_clean_page(page_t *pp) 6560 { 6561 page_t *newpp; 6562 int skip_unlock = 0; 6563 spgcnt_t count; 6564 page_t *tpp; 6565 int ret = 0; 6566 int extra; 6567 6568 ASSERT(PAGE_EXCL(pp)); 6569 ASSERT(!PP_RETIRED(pp)); 6570 ASSERT(curthread->t_flag & T_CAPTURING); 6571 6572 if (PP_ISFREE(pp)) { 6573 if (!page_reclaim(pp, NULL)) { 6574 skip_unlock = 1; 6575 ret = EAGAIN; 6576 goto cleanup; 6577 } 6578 ASSERT(pp->p_szc == 0); 6579 if (pp->p_vnode != NULL) { 6580 /* 6581 * Since this page came from the 6582 * cachelist, we must destroy the 6583 * old vnode association. 6584 */ 6585 page_hashout(pp, NULL); 6586 } 6587 goto cleanup; 6588 } 6589 6590 /* 6591 * If we know page_relocate will fail, skip it 6592 * It could still fail due to a UE on another page but we 6593 * can't do anything about that. 6594 */ 6595 if (pp->p_toxic & PR_UE) { 6596 goto skip_relocate; 6597 } 6598 6599 /* 6600 * It's possible that pages can not have a vnode as fsflush comes 6601 * through and cleans up these pages. It's ugly but that's how it is. 6602 */ 6603 if (pp->p_vnode == NULL) { 6604 goto skip_relocate; 6605 } 6606 6607 /* 6608 * Page was not free, so lets try to relocate it. 6609 * page_relocate only works with root pages, so if this is not a root 6610 * page, we need to demote it to try and relocate it. 6611 * Unfortunately this is the best we can do right now. 6612 */ 6613 newpp = NULL; 6614 if ((pp->p_szc > 0) && (pp != PP_PAGEROOT(pp))) { 6615 if (page_try_demote_pages(pp) == 0) { 6616 ret = EAGAIN; 6617 goto cleanup; 6618 } 6619 } 6620 ret = page_relocate(&pp, &newpp, 1, 0, &count, NULL); 6621 if (ret == 0) { 6622 page_t *npp; 6623 /* unlock the new page(s) */ 6624 while (count-- > 0) { 6625 ASSERT(newpp != NULL); 6626 npp = newpp; 6627 page_sub(&newpp, npp); 6628 page_unlock(npp); 6629 } 6630 ASSERT(newpp == NULL); 6631 /* 6632 * Check to see if the page we have is too large. 6633 * If so, demote it freeing up the extra pages. 6634 */ 6635 if (pp->p_szc > 0) { 6636 /* For now demote extra pages to szc == 0 */ 6637 extra = page_get_pagecnt(pp->p_szc) - 1; 6638 while (extra > 0) { 6639 tpp = pp->p_next; 6640 page_sub(&pp, tpp); 6641 tpp->p_szc = 0; 6642 page_free(tpp, 1); 6643 extra--; 6644 } 6645 /* Make sure to set our page to szc 0 as well */ 6646 ASSERT(pp->p_next == pp && pp->p_prev == pp); 6647 pp->p_szc = 0; 6648 } 6649 goto cleanup; 6650 } else if (ret == EIO) { 6651 ret = EAGAIN; 6652 goto cleanup; 6653 } else { 6654 /* 6655 * Need to reset return type as we failed to relocate the page 6656 * but that does not mean that some of the next steps will not 6657 * work. 6658 */ 6659 ret = 0; 6660 } 6661 6662 skip_relocate: 6663 6664 if (pp->p_szc > 0) { 6665 if (page_try_demote_pages(pp) == 0) { 6666 ret = EAGAIN; 6667 goto cleanup; 6668 } 6669 } 6670 6671 ASSERT(pp->p_szc == 0); 6672 6673 if (hat_ismod(pp)) { 6674 ret = EAGAIN; 6675 goto cleanup; 6676 } 6677 if (PP_ISKAS(pp)) { 6678 ret = EAGAIN; 6679 goto cleanup; 6680 } 6681 if (pp->p_lckcnt || pp->p_cowcnt) { 6682 ret = EAGAIN; 6683 goto cleanup; 6684 } 6685 6686 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 6687 ASSERT(!hat_page_is_mapped(pp)); 6688 6689 if (hat_ismod(pp)) { 6690 /* 6691 * This is a semi-odd case as the page is now modified but not 6692 * mapped as we just unloaded the mappings above. 6693 */ 6694 ret = EAGAIN; 6695 goto cleanup; 6696 } 6697 if (pp->p_vnode != NULL) { 6698 page_hashout(pp, NULL); 6699 } 6700 6701 /* 6702 * At this point, the page should be in a clean state and 6703 * we can do whatever we want with it. 6704 */ 6705 6706 cleanup: 6707 if (ret != 0) { 6708 if (!skip_unlock) { 6709 page_unlock(pp); 6710 } 6711 } else { 6712 ASSERT(pp->p_szc == 0); 6713 ASSERT(PAGE_EXCL(pp)); 6714 6715 pp->p_next = pp; 6716 pp->p_prev = pp; 6717 } 6718 return (ret); 6719 } 6720 6721 /* 6722 * Various callers of page_trycapture() can have different restrictions upon 6723 * what memory they have access to. 6724 * Returns 0 on success, with the following error codes on failure: 6725 * EPERM - The requested page is long term locked, and thus repeated 6726 * requests to capture this page will likely fail. 6727 * ENOMEM - There was not enough free memory in the system to safely 6728 * map the requested page. 6729 * ENOENT - The requested page was inside the kernel cage, and the 6730 * PHYSMEM_CAGE flag was not set. 6731 */ 6732 int 6733 page_capture_pre_checks(page_t *pp, uint_t flags) 6734 { 6735 ASSERT(pp != NULL); 6736 6737 #if defined(__sparc) 6738 if (pp->p_vnode == &promvp) { 6739 return (EPERM); 6740 } 6741 6742 if (PP_ISNORELOC(pp) && !(flags & CAPTURE_GET_CAGE) && 6743 (flags & CAPTURE_PHYSMEM)) { 6744 return (ENOENT); 6745 } 6746 6747 if (PP_ISNORELOCKERNEL(pp)) { 6748 return (EPERM); 6749 } 6750 #else 6751 if (PP_ISKAS(pp)) { 6752 return (EPERM); 6753 } 6754 #endif /* __sparc */ 6755 6756 /* only physmem currently has the restrictions checked below */ 6757 if (!(flags & CAPTURE_PHYSMEM)) { 6758 return (0); 6759 } 6760 6761 if (availrmem < swapfs_minfree) { 6762 /* 6763 * We won't try to capture this page as we are 6764 * running low on memory. 6765 */ 6766 return (ENOMEM); 6767 } 6768 return (0); 6769 } 6770 6771 /* 6772 * Once we have a page in our mits, go ahead and complete the capture 6773 * operation. 6774 * Returns 1 on failure where page is no longer needed 6775 * Returns 0 on success 6776 * Returns -1 if there was a transient failure. 6777 * Failure cases must release the SE_EXCL lock on pp (usually via page_free). 6778 */ 6779 int 6780 page_capture_take_action(page_t *pp, uint_t flags, void *datap) 6781 { 6782 int cb_index; 6783 int ret = 0; 6784 page_capture_hash_bucket_t *bp1; 6785 page_capture_hash_bucket_t *bp2; 6786 int index; 6787 int found = 0; 6788 int i; 6789 6790 ASSERT(PAGE_EXCL(pp)); 6791 ASSERT(curthread->t_flag & T_CAPTURING); 6792 6793 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6794 if ((flags >> cb_index) & 1) { 6795 break; 6796 } 6797 } 6798 ASSERT(cb_index < PC_NUM_CALLBACKS); 6799 6800 /* 6801 * Remove the entry from the page_capture hash, but don't free it yet 6802 * as we may need to put it back. 6803 * Since we own the page at this point in time, we should find it 6804 * in the hash if this is an ASYNC call. If we don't it's likely 6805 * that the page_capture_async() thread decided that this request 6806 * had expired, in which case we just continue on. 6807 */ 6808 if (flags & CAPTURE_ASYNC) { 6809 6810 index = PAGE_CAPTURE_HASH(pp); 6811 6812 mutex_enter(&page_capture_hash[index].pchh_mutex); 6813 for (i = 0; i < 2 && !found; i++) { 6814 bp1 = page_capture_hash[index].lists[i].next; 6815 while (bp1 != &page_capture_hash[index].lists[i]) { 6816 if (bp1->pp == pp) { 6817 bp1->next->prev = bp1->prev; 6818 bp1->prev->next = bp1->next; 6819 page_capture_hash[index]. 6820 num_pages[bp1->pri]--; 6821 page_clrtoxic(pp, PR_CAPTURE); 6822 found = 1; 6823 break; 6824 } 6825 bp1 = bp1->next; 6826 } 6827 } 6828 mutex_exit(&page_capture_hash[index].pchh_mutex); 6829 } 6830 6831 /* Synchronize with the unregister func. */ 6832 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6833 if (!pc_cb[cb_index].cb_active) { 6834 page_free(pp, 1); 6835 rw_exit(&pc_cb[cb_index].cb_rwlock); 6836 if (found) { 6837 kmem_free(bp1, sizeof (*bp1)); 6838 } 6839 return (1); 6840 } 6841 6842 /* 6843 * We need to remove the entry from the page capture hash and turn off 6844 * the PR_CAPTURE bit before calling the callback. We'll need to cache 6845 * the entry here, and then based upon the return value, cleanup 6846 * appropriately or re-add it to the hash, making sure that someone else 6847 * hasn't already done so. 6848 * It should be rare for the callback to fail and thus it's ok for 6849 * the failure path to be a bit complicated as the success path is 6850 * cleaner and the locking rules are easier to follow. 6851 */ 6852 6853 ret = pc_cb[cb_index].cb_func(pp, datap, flags); 6854 6855 rw_exit(&pc_cb[cb_index].cb_rwlock); 6856 6857 /* 6858 * If this was an ASYNC request, we need to cleanup the hash if the 6859 * callback was successful or if the request was no longer valid. 6860 * For non-ASYNC requests, we return failure to map and the caller 6861 * will take care of adding the request to the hash. 6862 * Note also that the callback itself is responsible for the page 6863 * at this point in time in terms of locking ... The most common 6864 * case for the failure path should just be a page_free. 6865 */ 6866 if (ret >= 0) { 6867 if (found) { 6868 if (bp1->flags & CAPTURE_RETIRE) { 6869 page_retire_decr_pend_count(datap); 6870 } 6871 kmem_free(bp1, sizeof (*bp1)); 6872 } 6873 return (ret); 6874 } 6875 if (!found) { 6876 return (ret); 6877 } 6878 6879 ASSERT(flags & CAPTURE_ASYNC); 6880 6881 /* 6882 * Check for expiration time first as we can just free it up if it's 6883 * expired. 6884 */ 6885 if (ddi_get_lbolt() > bp1->expires && bp1->expires != -1) { 6886 kmem_free(bp1, sizeof (*bp1)); 6887 return (ret); 6888 } 6889 6890 /* 6891 * The callback failed and there used to be an entry in the hash for 6892 * this page, so we need to add it back to the hash. 6893 */ 6894 mutex_enter(&page_capture_hash[index].pchh_mutex); 6895 if (!(pp->p_toxic & PR_CAPTURE)) { 6896 /* just add bp1 back to head of walked list */ 6897 page_settoxic(pp, PR_CAPTURE); 6898 bp1->next = page_capture_hash[index].lists[1].next; 6899 bp1->prev = &page_capture_hash[index].lists[1]; 6900 bp1->next->prev = bp1; 6901 bp1->pri = PAGE_CAPTURE_PRIO(pp); 6902 page_capture_hash[index].lists[1].next = bp1; 6903 page_capture_hash[index].num_pages[bp1->pri]++; 6904 mutex_exit(&page_capture_hash[index].pchh_mutex); 6905 return (ret); 6906 } 6907 6908 /* 6909 * Otherwise there was a new capture request added to list 6910 * Need to make sure that our original data is represented if 6911 * appropriate. 6912 */ 6913 for (i = 0; i < 2; i++) { 6914 bp2 = page_capture_hash[index].lists[i].next; 6915 while (bp2 != &page_capture_hash[index].lists[i]) { 6916 if (bp2->pp == pp) { 6917 if (bp1->flags & CAPTURE_RETIRE) { 6918 if (!(bp2->flags & CAPTURE_RETIRE)) { 6919 bp2->szc = bp1->szc; 6920 bp2->flags = bp1->flags; 6921 bp2->expires = bp1->expires; 6922 bp2->datap = bp1->datap; 6923 } 6924 } else { 6925 ASSERT(bp1->flags & CAPTURE_PHYSMEM); 6926 if (!(bp2->flags & CAPTURE_RETIRE)) { 6927 bp2->szc = bp1->szc; 6928 bp2->flags = bp1->flags; 6929 bp2->expires = bp1->expires; 6930 bp2->datap = bp1->datap; 6931 } 6932 } 6933 page_capture_hash[index].num_pages[bp2->pri]--; 6934 bp2->pri = PAGE_CAPTURE_PRIO(pp); 6935 page_capture_hash[index].num_pages[bp2->pri]++; 6936 mutex_exit(&page_capture_hash[index]. 6937 pchh_mutex); 6938 kmem_free(bp1, sizeof (*bp1)); 6939 return (ret); 6940 } 6941 bp2 = bp2->next; 6942 } 6943 } 6944 panic("PR_CAPTURE set but not on hash for pp 0x%p\n", (void *)pp); 6945 /*NOTREACHED*/ 6946 } 6947 6948 /* 6949 * Try to capture the given page for the caller specified in the flags 6950 * parameter. The page will either be captured and handed over to the 6951 * appropriate callback, or will be queued up in the page capture hash 6952 * to be captured asynchronously. 6953 * If the current request is due to an async capture, the page must be 6954 * exclusively locked before calling this function. 6955 * Currently szc must be 0 but in the future this should be expandable to 6956 * other page sizes. 6957 * Returns 0 on success, with the following error codes on failure: 6958 * EPERM - The requested page is long term locked, and thus repeated 6959 * requests to capture this page will likely fail. 6960 * ENOMEM - There was not enough free memory in the system to safely 6961 * map the requested page. 6962 * ENOENT - The requested page was inside the kernel cage, and the 6963 * CAPTURE_GET_CAGE flag was not set. 6964 * EAGAIN - The requested page could not be capturead at this point in 6965 * time but future requests will likely work. 6966 * EBUSY - The requested page is retired and the CAPTURE_GET_RETIRED flag 6967 * was not set. 6968 */ 6969 int 6970 page_itrycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 6971 { 6972 int ret; 6973 int cb_index; 6974 6975 if (flags & CAPTURE_ASYNC) { 6976 ASSERT(PAGE_EXCL(pp)); 6977 goto async; 6978 } 6979 6980 /* Make sure there's enough availrmem ... */ 6981 ret = page_capture_pre_checks(pp, flags); 6982 if (ret != 0) { 6983 return (ret); 6984 } 6985 6986 if (!page_trylock(pp, SE_EXCL)) { 6987 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6988 if ((flags >> cb_index) & 1) { 6989 break; 6990 } 6991 } 6992 ASSERT(cb_index < PC_NUM_CALLBACKS); 6993 ret = EAGAIN; 6994 /* Special case for retired pages */ 6995 if (PP_RETIRED(pp)) { 6996 if (flags & CAPTURE_GET_RETIRED) { 6997 if (!page_unretire_pp(pp, PR_UNR_TEMP)) { 6998 /* 6999 * Need to set capture bit and add to 7000 * hash so that the page will be 7001 * retired when freed. 7002 */ 7003 page_capture_add_hash(pp, szc, 7004 CAPTURE_RETIRE, NULL); 7005 ret = 0; 7006 goto own_page; 7007 } 7008 } else { 7009 return (EBUSY); 7010 } 7011 } 7012 page_capture_add_hash(pp, szc, flags, datap); 7013 return (ret); 7014 } 7015 7016 async: 7017 ASSERT(PAGE_EXCL(pp)); 7018 7019 /* Need to check for physmem async requests that availrmem is sane */ 7020 if ((flags & (CAPTURE_ASYNC | CAPTURE_PHYSMEM)) == 7021 (CAPTURE_ASYNC | CAPTURE_PHYSMEM) && 7022 (availrmem < swapfs_minfree)) { 7023 page_unlock(pp); 7024 return (ENOMEM); 7025 } 7026 7027 ret = page_capture_clean_page(pp); 7028 7029 if (ret != 0) { 7030 /* We failed to get the page, so lets add it to the hash */ 7031 if (!(flags & CAPTURE_ASYNC)) { 7032 page_capture_add_hash(pp, szc, flags, datap); 7033 } 7034 return (ret); 7035 } 7036 7037 own_page: 7038 ASSERT(PAGE_EXCL(pp)); 7039 ASSERT(pp->p_szc == 0); 7040 7041 /* Call the callback */ 7042 ret = page_capture_take_action(pp, flags, datap); 7043 7044 if (ret == 0) { 7045 return (0); 7046 } 7047 7048 /* 7049 * Note that in the failure cases from page_capture_take_action, the 7050 * EXCL lock will have already been dropped. 7051 */ 7052 if ((ret == -1) && (!(flags & CAPTURE_ASYNC))) { 7053 page_capture_add_hash(pp, szc, flags, datap); 7054 } 7055 return (EAGAIN); 7056 } 7057 7058 int 7059 page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 7060 { 7061 int ret; 7062 7063 curthread->t_flag |= T_CAPTURING; 7064 ret = page_itrycapture(pp, szc, flags, datap); 7065 curthread->t_flag &= ~T_CAPTURING; /* xor works as we know its set */ 7066 return (ret); 7067 } 7068 7069 /* 7070 * When unlocking a page which has the PR_CAPTURE bit set, this routine 7071 * gets called to try and capture the page. 7072 */ 7073 void 7074 page_unlock_capture(page_t *pp) 7075 { 7076 page_capture_hash_bucket_t *bp; 7077 int index; 7078 int i; 7079 uint_t szc; 7080 uint_t flags = 0; 7081 void *datap; 7082 kmutex_t *mp; 7083 extern vnode_t retired_pages; 7084 7085 /* 7086 * We need to protect against a possible deadlock here where we own 7087 * the vnode page hash mutex and want to acquire it again as there 7088 * are locations in the code, where we unlock a page while holding 7089 * the mutex which can lead to the page being captured and eventually 7090 * end up here. As we may be hashing out the old page and hashing into 7091 * the retire vnode, we need to make sure we don't own them. 7092 * Other callbacks who do hash operations also need to make sure that 7093 * before they hashin to a vnode that they do not currently own the 7094 * vphm mutex otherwise there will be a panic. 7095 */ 7096 if (mutex_owned(page_vnode_mutex(&retired_pages))) { 7097 page_unlock_nocapture(pp); 7098 return; 7099 } 7100 if (pp->p_vnode != NULL && mutex_owned(page_vnode_mutex(pp->p_vnode))) { 7101 page_unlock_nocapture(pp); 7102 return; 7103 } 7104 7105 index = PAGE_CAPTURE_HASH(pp); 7106 7107 mp = &page_capture_hash[index].pchh_mutex; 7108 mutex_enter(mp); 7109 for (i = 0; i < 2; i++) { 7110 bp = page_capture_hash[index].lists[i].next; 7111 while (bp != &page_capture_hash[index].lists[i]) { 7112 if (bp->pp == pp) { 7113 szc = bp->szc; 7114 flags = bp->flags | CAPTURE_ASYNC; 7115 datap = bp->datap; 7116 mutex_exit(mp); 7117 (void) page_trycapture(pp, szc, flags, datap); 7118 return; 7119 } 7120 bp = bp->next; 7121 } 7122 } 7123 7124 /* Failed to find page in hash so clear flags and unlock it. */ 7125 page_clrtoxic(pp, PR_CAPTURE); 7126 page_unlock(pp); 7127 7128 mutex_exit(mp); 7129 } 7130 7131 void 7132 page_capture_init() 7133 { 7134 int i; 7135 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7136 page_capture_hash[i].lists[0].next = 7137 &page_capture_hash[i].lists[0]; 7138 page_capture_hash[i].lists[0].prev = 7139 &page_capture_hash[i].lists[0]; 7140 page_capture_hash[i].lists[1].next = 7141 &page_capture_hash[i].lists[1]; 7142 page_capture_hash[i].lists[1].prev = 7143 &page_capture_hash[i].lists[1]; 7144 } 7145 7146 pc_thread_shortwait = 23 * hz; 7147 pc_thread_longwait = 1201 * hz; 7148 pc_thread_retry = 3; 7149 mutex_init(&pc_thread_mutex, NULL, MUTEX_DEFAULT, NULL); 7150 cv_init(&pc_cv, NULL, CV_DEFAULT, NULL); 7151 pc_thread_id = thread_create(NULL, 0, page_capture_thread, NULL, 0, &p0, 7152 TS_RUN, minclsyspri); 7153 } 7154 7155 /* 7156 * It is necessary to scrub any failing pages prior to reboot in order to 7157 * prevent a latent error trap from occurring on the next boot. 7158 */ 7159 void 7160 page_retire_mdboot() 7161 { 7162 page_t *pp; 7163 int i, j; 7164 page_capture_hash_bucket_t *bp; 7165 uchar_t pri; 7166 7167 /* walk lists looking for pages to scrub */ 7168 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7169 for (pri = 0; pri < PC_NUM_PRI; pri++) { 7170 if (page_capture_hash[i].num_pages[pri] != 0) { 7171 break; 7172 } 7173 } 7174 if (pri == PC_NUM_PRI) 7175 continue; 7176 7177 mutex_enter(&page_capture_hash[i].pchh_mutex); 7178 7179 for (j = 0; j < 2; j++) { 7180 bp = page_capture_hash[i].lists[j].next; 7181 while (bp != &page_capture_hash[i].lists[j]) { 7182 pp = bp->pp; 7183 if (PP_TOXIC(pp)) { 7184 if (page_trylock(pp, SE_EXCL)) { 7185 PP_CLRFREE(pp); 7186 pagescrub(pp, 0, PAGESIZE); 7187 page_unlock(pp); 7188 } 7189 } 7190 bp = bp->next; 7191 } 7192 } 7193 mutex_exit(&page_capture_hash[i].pchh_mutex); 7194 } 7195 } 7196 7197 /* 7198 * Walk the page_capture_hash trying to capture pages and also cleanup old 7199 * entries which have expired. 7200 */ 7201 void 7202 page_capture_async() 7203 { 7204 page_t *pp; 7205 int i; 7206 int ret; 7207 page_capture_hash_bucket_t *bp1, *bp2; 7208 uint_t szc; 7209 uint_t flags; 7210 void *datap; 7211 uchar_t pri; 7212 7213 /* If there are outstanding pages to be captured, get to work */ 7214 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7215 for (pri = 0; pri < PC_NUM_PRI; pri++) { 7216 if (page_capture_hash[i].num_pages[pri] != 0) 7217 break; 7218 } 7219 if (pri == PC_NUM_PRI) 7220 continue; 7221 7222 /* Append list 1 to list 0 and then walk through list 0 */ 7223 mutex_enter(&page_capture_hash[i].pchh_mutex); 7224 bp1 = &page_capture_hash[i].lists[1]; 7225 bp2 = bp1->next; 7226 if (bp1 != bp2) { 7227 bp1->prev->next = page_capture_hash[i].lists[0].next; 7228 bp2->prev = &page_capture_hash[i].lists[0]; 7229 page_capture_hash[i].lists[0].next->prev = bp1->prev; 7230 page_capture_hash[i].lists[0].next = bp2; 7231 bp1->next = bp1; 7232 bp1->prev = bp1; 7233 } 7234 7235 /* list[1] will be empty now */ 7236 7237 bp1 = page_capture_hash[i].lists[0].next; 7238 while (bp1 != &page_capture_hash[i].lists[0]) { 7239 /* Check expiration time */ 7240 if ((ddi_get_lbolt() > bp1->expires && 7241 bp1->expires != -1) || 7242 page_deleted(bp1->pp)) { 7243 page_capture_hash[i].lists[0].next = bp1->next; 7244 bp1->next->prev = 7245 &page_capture_hash[i].lists[0]; 7246 page_capture_hash[i].num_pages[bp1->pri]--; 7247 7248 /* 7249 * We can safely remove the PR_CAPTURE bit 7250 * without holding the EXCL lock on the page 7251 * as the PR_CAPTURE bit requres that the 7252 * page_capture_hash[].pchh_mutex be held 7253 * to modify it. 7254 */ 7255 page_clrtoxic(bp1->pp, PR_CAPTURE); 7256 mutex_exit(&page_capture_hash[i].pchh_mutex); 7257 kmem_free(bp1, sizeof (*bp1)); 7258 mutex_enter(&page_capture_hash[i].pchh_mutex); 7259 bp1 = page_capture_hash[i].lists[0].next; 7260 continue; 7261 } 7262 pp = bp1->pp; 7263 szc = bp1->szc; 7264 flags = bp1->flags; 7265 datap = bp1->datap; 7266 mutex_exit(&page_capture_hash[i].pchh_mutex); 7267 if (page_trylock(pp, SE_EXCL)) { 7268 ret = page_trycapture(pp, szc, 7269 flags | CAPTURE_ASYNC, datap); 7270 } else { 7271 ret = 1; /* move to walked hash */ 7272 } 7273 7274 if (ret != 0) { 7275 /* Move to walked hash */ 7276 (void) page_capture_move_to_walked(pp); 7277 } 7278 mutex_enter(&page_capture_hash[i].pchh_mutex); 7279 bp1 = page_capture_hash[i].lists[0].next; 7280 } 7281 7282 mutex_exit(&page_capture_hash[i].pchh_mutex); 7283 } 7284 } 7285 7286 /* 7287 * This function is called by the page_capture_thread, and is needed in 7288 * in order to initiate aio cleanup, so that pages used in aio 7289 * will be unlocked and subsequently retired by page_capture_thread. 7290 */ 7291 static int 7292 do_aio_cleanup(void) 7293 { 7294 proc_t *procp; 7295 int (*aio_cleanup_dr_delete_memory)(proc_t *); 7296 int cleaned = 0; 7297 7298 if (modload("sys", "kaio") == -1) { 7299 cmn_err(CE_WARN, "do_aio_cleanup: cannot load kaio"); 7300 return (0); 7301 } 7302 /* 7303 * We use the aio_cleanup_dr_delete_memory function to 7304 * initiate the actual clean up; this function will wake 7305 * up the per-process aio_cleanup_thread. 7306 */ 7307 aio_cleanup_dr_delete_memory = (int (*)(proc_t *)) 7308 modgetsymvalue("aio_cleanup_dr_delete_memory", 0); 7309 if (aio_cleanup_dr_delete_memory == NULL) { 7310 cmn_err(CE_WARN, 7311 "aio_cleanup_dr_delete_memory not found in kaio"); 7312 return (0); 7313 } 7314 mutex_enter(&pidlock); 7315 for (procp = practive; (procp != NULL); procp = procp->p_next) { 7316 mutex_enter(&procp->p_lock); 7317 if (procp->p_aio != NULL) { 7318 /* cleanup proc's outstanding kaio */ 7319 cleaned += (*aio_cleanup_dr_delete_memory)(procp); 7320 } 7321 mutex_exit(&procp->p_lock); 7322 } 7323 mutex_exit(&pidlock); 7324 return (cleaned); 7325 } 7326 7327 /* 7328 * helper function for page_capture_thread 7329 */ 7330 static void 7331 page_capture_handle_outstanding(void) 7332 { 7333 int ntry; 7334 7335 /* Reap pages before attempting capture pages */ 7336 kmem_reap(); 7337 7338 if ((page_retire_pend_count() > page_retire_pend_kas_count()) && 7339 hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 7340 /* 7341 * Note: Purging only for platforms that support 7342 * ISM hat_pageunload() - mainly SPARC. On x86/x64 7343 * platforms ISM pages SE_SHARED locked until destroyed. 7344 */ 7345 7346 /* disable and purge seg_pcache */ 7347 (void) seg_p_disable(); 7348 for (ntry = 0; ntry < pc_thread_retry; ntry++) { 7349 if (!page_retire_pend_count()) 7350 break; 7351 if (do_aio_cleanup()) { 7352 /* 7353 * allow the apps cleanup threads 7354 * to run 7355 */ 7356 delay(pc_thread_shortwait); 7357 } 7358 page_capture_async(); 7359 } 7360 /* reenable seg_pcache */ 7361 seg_p_enable(); 7362 7363 /* completed what can be done. break out */ 7364 return; 7365 } 7366 7367 /* 7368 * For kernel pages and/or unsupported HAT_DYNAMIC_ISM_UNMAP, reap 7369 * and then attempt to capture. 7370 */ 7371 seg_preap(); 7372 page_capture_async(); 7373 } 7374 7375 /* 7376 * The page_capture_thread loops forever, looking to see if there are 7377 * pages still waiting to be captured. 7378 */ 7379 static void 7380 page_capture_thread(void) 7381 { 7382 callb_cpr_t c; 7383 int i; 7384 int high_pri_pages; 7385 int low_pri_pages; 7386 clock_t timeout; 7387 7388 CALLB_CPR_INIT(&c, &pc_thread_mutex, callb_generic_cpr, "page_capture"); 7389 7390 mutex_enter(&pc_thread_mutex); 7391 for (;;) { 7392 high_pri_pages = 0; 7393 low_pri_pages = 0; 7394 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7395 high_pri_pages += 7396 page_capture_hash[i].num_pages[PC_PRI_HI]; 7397 low_pri_pages += 7398 page_capture_hash[i].num_pages[PC_PRI_LO]; 7399 } 7400 7401 timeout = pc_thread_longwait; 7402 if (high_pri_pages != 0) { 7403 timeout = pc_thread_shortwait; 7404 page_capture_handle_outstanding(); 7405 } else if (low_pri_pages != 0) { 7406 page_capture_async(); 7407 } 7408 CALLB_CPR_SAFE_BEGIN(&c); 7409 (void) cv_reltimedwait(&pc_cv, &pc_thread_mutex, 7410 timeout, TR_CLOCK_TICK); 7411 CALLB_CPR_SAFE_END(&c, &pc_thread_mutex); 7412 } 7413 /*NOTREACHED*/ 7414 } 7415 /* 7416 * Attempt to locate a bucket that has enough pages to satisfy the request. 7417 * The initial check is done without the lock to avoid unneeded contention. 7418 * The function returns 1 if enough pages were found, else 0 if it could not 7419 * find enough pages in a bucket. 7420 */ 7421 static int 7422 pcf_decrement_bucket(pgcnt_t npages) 7423 { 7424 struct pcf *p; 7425 struct pcf *q; 7426 int i; 7427 7428 p = &pcf[PCF_INDEX()]; 7429 q = &pcf[pcf_fanout]; 7430 for (i = 0; i < pcf_fanout; i++) { 7431 if (p->pcf_count > npages) { 7432 /* 7433 * a good one to try. 7434 */ 7435 mutex_enter(&p->pcf_lock); 7436 if (p->pcf_count > npages) { 7437 p->pcf_count -= (uint_t)npages; 7438 /* 7439 * freemem is not protected by any lock. 7440 * Thus, we cannot have any assertion 7441 * containing freemem here. 7442 */ 7443 freemem -= npages; 7444 mutex_exit(&p->pcf_lock); 7445 return (1); 7446 } 7447 mutex_exit(&p->pcf_lock); 7448 } 7449 p++; 7450 if (p >= q) { 7451 p = pcf; 7452 } 7453 } 7454 return (0); 7455 } 7456 7457 /* 7458 * Arguments: 7459 * pcftotal_ret: If the value is not NULL and we have walked all the 7460 * buckets but did not find enough pages then it will 7461 * be set to the total number of pages in all the pcf 7462 * buckets. 7463 * npages: Is the number of pages we have been requested to 7464 * find. 7465 * unlock: If set to 0 we will leave the buckets locked if the 7466 * requested number of pages are not found. 7467 * 7468 * Go and try to satisfy the page request from any number of buckets. 7469 * This can be a very expensive operation as we have to lock the buckets 7470 * we are checking (and keep them locked), starting at bucket 0. 7471 * 7472 * The function returns 1 if enough pages were found, else 0 if it could not 7473 * find enough pages in the buckets. 7474 * 7475 */ 7476 static int 7477 pcf_decrement_multiple(pgcnt_t *pcftotal_ret, pgcnt_t npages, int unlock) 7478 { 7479 struct pcf *p; 7480 pgcnt_t pcftotal; 7481 int i; 7482 7483 p = pcf; 7484 /* try to collect pages from several pcf bins */ 7485 for (pcftotal = 0, i = 0; i < pcf_fanout; i++) { 7486 mutex_enter(&p->pcf_lock); 7487 pcftotal += p->pcf_count; 7488 if (pcftotal >= npages) { 7489 /* 7490 * Wow! There are enough pages laying around 7491 * to satisfy the request. Do the accounting, 7492 * drop the locks we acquired, and go back. 7493 * 7494 * freemem is not protected by any lock. So, 7495 * we cannot have any assertion containing 7496 * freemem. 7497 */ 7498 freemem -= npages; 7499 while (p >= pcf) { 7500 if (p->pcf_count <= npages) { 7501 npages -= p->pcf_count; 7502 p->pcf_count = 0; 7503 } else { 7504 p->pcf_count -= (uint_t)npages; 7505 npages = 0; 7506 } 7507 mutex_exit(&p->pcf_lock); 7508 p--; 7509 } 7510 ASSERT(npages == 0); 7511 return (1); 7512 } 7513 p++; 7514 } 7515 if (unlock) { 7516 /* failed to collect pages - release the locks */ 7517 while (--p >= pcf) { 7518 mutex_exit(&p->pcf_lock); 7519 } 7520 } 7521 if (pcftotal_ret != NULL) 7522 *pcftotal_ret = pcftotal; 7523 return (0); 7524 } 7525