1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2015, Josef 'Jeff' Sipek <jeffpc@josefsipek.net> 24 * Copyright (c) 2015, 2016 by Delphix. All rights reserved. 25 * Copyright 2018 Joyent, Inc. 26 * Copyright 2021 Oxide Computer Company 27 */ 28 29 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 30 /* All Rights Reserved */ 31 32 /* 33 * University Copyright- Copyright (c) 1982, 1986, 1988 34 * The Regents of the University of California 35 * All Rights Reserved 36 * 37 * University Acknowledgment- Portions of this document are derived from 38 * software developed by the University of California, Berkeley, and its 39 * contributors. 40 */ 41 42 /* 43 * VM - physical page management. 44 */ 45 46 #include <sys/types.h> 47 #include <sys/t_lock.h> 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/errno.h> 51 #include <sys/time.h> 52 #include <sys/vnode.h> 53 #include <sys/vm.h> 54 #include <sys/vtrace.h> 55 #include <sys/swap.h> 56 #include <sys/cmn_err.h> 57 #include <sys/tuneable.h> 58 #include <sys/sysmacros.h> 59 #include <sys/cpuvar.h> 60 #include <sys/callb.h> 61 #include <sys/debug.h> 62 #include <sys/tnf_probe.h> 63 #include <sys/condvar_impl.h> 64 #include <sys/mem_config.h> 65 #include <sys/mem_cage.h> 66 #include <sys/kmem.h> 67 #include <sys/atomic.h> 68 #include <sys/strlog.h> 69 #include <sys/mman.h> 70 #include <sys/ontrap.h> 71 #include <sys/lgrp.h> 72 #include <sys/vfs.h> 73 74 #include <vm/hat.h> 75 #include <vm/anon.h> 76 #include <vm/page.h> 77 #include <vm/seg.h> 78 #include <vm/pvn.h> 79 #include <vm/seg_kmem.h> 80 #include <vm/vm_dep.h> 81 #include <sys/vm_usage.h> 82 #include <fs/fs_subr.h> 83 #include <sys/ddi.h> 84 #include <sys/modctl.h> 85 86 static pgcnt_t max_page_get; /* max page_get request size in pages */ 87 pgcnt_t total_pages = 0; /* total number of pages (used by /proc) */ 88 89 /* 90 * freemem_lock protects all freemem variables: 91 * availrmem. Also this lock protects the globals which track the 92 * availrmem changes for accurate kernel footprint calculation. 93 * See below for an explanation of these 94 * globals. 95 */ 96 kmutex_t freemem_lock; 97 pgcnt_t availrmem; 98 pgcnt_t availrmem_initial; 99 100 /* 101 * These globals track availrmem changes to get a more accurate 102 * estimate of tke kernel size. Historically pp_kernel is used for 103 * kernel size and is based on availrmem. But availrmem is adjusted for 104 * locked pages in the system not just for kernel locked pages. 105 * These new counters will track the pages locked through segvn and 106 * by explicit user locking. 107 * 108 * pages_locked : How many pages are locked because of user specified 109 * locking through mlock or plock. 110 * 111 * pages_useclaim,pages_claimed : These two variables track the 112 * claim adjustments because of the protection changes on a segvn segment. 113 * 114 * All these globals are protected by the same lock which protects availrmem. 115 */ 116 pgcnt_t pages_locked = 0; 117 pgcnt_t pages_useclaim = 0; 118 pgcnt_t pages_claimed = 0; 119 120 121 /* 122 * new_freemem_lock protects freemem, freemem_wait & freemem_cv. 123 */ 124 static kmutex_t new_freemem_lock; 125 static uint_t freemem_wait; /* someone waiting for freemem */ 126 static kcondvar_t freemem_cv; 127 128 /* 129 * The logical page free list is maintained as two lists, the 'free' 130 * and the 'cache' lists. 131 * The free list contains those pages that should be reused first. 132 * 133 * The implementation of the lists is machine dependent. 134 * page_get_freelist(), page_get_cachelist(), 135 * page_list_sub(), and page_list_add() 136 * form the interface to the machine dependent implementation. 137 * 138 * Pages with p_free set are on the cache list. 139 * Pages with p_free and p_age set are on the free list, 140 * 141 * A page may be locked while on either list. 142 */ 143 144 /* 145 * free list accounting stuff. 146 * 147 * 148 * Spread out the value for the number of pages on the 149 * page free and page cache lists. If there is just one 150 * value, then it must be under just one lock. 151 * The lock contention and cache traffic are a real bother. 152 * 153 * When we acquire and then drop a single pcf lock 154 * we can start in the middle of the array of pcf structures. 155 * If we acquire more than one pcf lock at a time, we need to 156 * start at the front to avoid deadlocking. 157 * 158 * pcf_count holds the number of pages in each pool. 159 * 160 * pcf_block is set when page_create_get_something() has asked the 161 * PSM page freelist and page cachelist routines without specifying 162 * a color and nothing came back. This is used to block anything 163 * else from moving pages from one list to the other while the 164 * lists are searched again. If a page is freeed while pcf_block is 165 * set, then pcf_reserve is incremented. pcgs_unblock() takes care 166 * of clearning pcf_block, doing the wakeups, etc. 167 */ 168 169 #define MAX_PCF_FANOUT NCPU 170 static uint_t pcf_fanout = 1; /* Will get changed at boot time */ 171 static uint_t pcf_fanout_mask = 0; 172 173 struct pcf { 174 kmutex_t pcf_lock; /* protects the structure */ 175 uint_t pcf_count; /* page count */ 176 uint_t pcf_wait; /* number of waiters */ 177 uint_t pcf_block; /* pcgs flag to page_free() */ 178 uint_t pcf_reserve; /* pages freed after pcf_block set */ 179 uint_t pcf_fill[10]; /* to line up on the caches */ 180 }; 181 182 /* 183 * PCF_INDEX hash needs to be dynamic (every so often the hash changes where 184 * it will hash the cpu to). This is done to prevent a drain condition 185 * from happening. This drain condition will occur when pcf_count decrement 186 * occurs on cpu A and the increment of pcf_count always occurs on cpu B. An 187 * example of this shows up with device interrupts. The dma buffer is allocated 188 * by the cpu requesting the IO thus the pcf_count is decremented based on that. 189 * When the memory is returned by the interrupt thread, the pcf_count will be 190 * incremented based on the cpu servicing the interrupt. 191 */ 192 static struct pcf pcf[MAX_PCF_FANOUT]; 193 #define PCF_INDEX() ((int)(((long)CPU->cpu_seqid) + \ 194 (randtick() >> 24)) & (pcf_fanout_mask)) 195 196 static int pcf_decrement_bucket(pgcnt_t); 197 static int pcf_decrement_multiple(pgcnt_t *, pgcnt_t, int); 198 199 kmutex_t pcgs_lock; /* serializes page_create_get_ */ 200 kmutex_t pcgs_cagelock; /* serializes NOSLEEP cage allocs */ 201 kmutex_t pcgs_wait_lock; /* used for delay in pcgs */ 202 static kcondvar_t pcgs_cv; /* cv for delay in pcgs */ 203 204 #ifdef VM_STATS 205 206 /* 207 * No locks, but so what, they are only statistics. 208 */ 209 210 static struct page_tcnt { 211 int pc_free_cache; /* free's into cache list */ 212 int pc_free_dontneed; /* free's with dontneed */ 213 int pc_free_pageout; /* free's from pageout */ 214 int pc_free_free; /* free's into free list */ 215 int pc_free_pages; /* free's into large page free list */ 216 int pc_destroy_pages; /* large page destroy's */ 217 int pc_get_cache; /* get's from cache list */ 218 int pc_get_free; /* get's from free list */ 219 int pc_reclaim; /* reclaim's */ 220 int pc_abortfree; /* abort's of free pages */ 221 int pc_find_hit; /* find's that find page */ 222 int pc_find_miss; /* find's that don't find page */ 223 int pc_destroy_free; /* # of free pages destroyed */ 224 #define PC_HASH_CNT (4*PAGE_HASHAVELEN) 225 int pc_find_hashlen[PC_HASH_CNT+1]; 226 int pc_addclaim_pages; 227 int pc_subclaim_pages; 228 int pc_free_replacement_page[2]; 229 int pc_try_demote_pages[6]; 230 int pc_demote_pages[2]; 231 } pagecnt; 232 233 uint_t hashin_count; 234 uint_t hashin_not_held; 235 uint_t hashin_already; 236 237 uint_t hashout_count; 238 uint_t hashout_not_held; 239 240 uint_t page_create_count; 241 uint_t page_create_not_enough; 242 uint_t page_create_not_enough_again; 243 uint_t page_create_zero; 244 uint_t page_create_hashout; 245 uint_t page_create_page_lock_failed; 246 uint_t page_create_trylock_failed; 247 uint_t page_create_found_one; 248 uint_t page_create_hashin_failed; 249 uint_t page_create_dropped_phm; 250 251 uint_t page_create_new; 252 uint_t page_create_exists; 253 uint_t page_create_putbacks; 254 uint_t page_create_overshoot; 255 256 uint_t page_reclaim_zero; 257 uint_t page_reclaim_zero_locked; 258 259 uint_t page_rename_exists; 260 uint_t page_rename_count; 261 262 uint_t page_lookup_cnt[20]; 263 uint_t page_lookup_nowait_cnt[10]; 264 uint_t page_find_cnt; 265 uint_t page_exists_cnt; 266 uint_t page_exists_forreal_cnt; 267 uint_t page_lookup_dev_cnt; 268 uint_t get_cachelist_cnt; 269 uint_t page_create_cnt[10]; 270 uint_t alloc_pages[9]; 271 uint_t page_exphcontg[19]; 272 uint_t page_create_large_cnt[10]; 273 274 #endif 275 276 static inline page_t * 277 page_hash_search(ulong_t index, vnode_t *vnode, u_offset_t off) 278 { 279 uint_t mylen = 0; 280 page_t *page; 281 282 for (page = page_hash[index]; page; page = page->p_hash, mylen++) 283 if (page->p_vnode == vnode && page->p_offset == off) 284 break; 285 286 #ifdef VM_STATS 287 if (page != NULL) 288 pagecnt.pc_find_hit++; 289 else 290 pagecnt.pc_find_miss++; 291 292 pagecnt.pc_find_hashlen[MIN(mylen, PC_HASH_CNT)]++; 293 #endif 294 295 return (page); 296 } 297 298 299 #ifdef DEBUG 300 #define MEMSEG_SEARCH_STATS 301 #endif 302 303 #ifdef MEMSEG_SEARCH_STATS 304 struct memseg_stats { 305 uint_t nsearch; 306 uint_t nlastwon; 307 uint_t nhashwon; 308 uint_t nnotfound; 309 } memseg_stats; 310 311 #define MEMSEG_STAT_INCR(v) \ 312 atomic_inc_32(&memseg_stats.v) 313 #else 314 #define MEMSEG_STAT_INCR(x) 315 #endif 316 317 struct memseg *memsegs; /* list of memory segments */ 318 319 /* 320 * /etc/system tunable to control large page allocation hueristic. 321 * 322 * Setting to LPAP_LOCAL will heavily prefer the local lgroup over remote lgroup 323 * for large page allocation requests. If a large page is not readily 324 * avaliable on the local freelists we will go through additional effort 325 * to create a large page, potentially moving smaller pages around to coalesce 326 * larger pages in the local lgroup. 327 * Default value of LPAP_DEFAULT will go to remote freelists if large pages 328 * are not readily available in the local lgroup. 329 */ 330 enum lpap { 331 LPAP_DEFAULT, /* default large page allocation policy */ 332 LPAP_LOCAL /* local large page allocation policy */ 333 }; 334 335 enum lpap lpg_alloc_prefer = LPAP_DEFAULT; 336 337 static void page_init_mem_config(void); 338 static int page_do_hashin(page_t *, vnode_t *, u_offset_t); 339 static void page_do_hashout(page_t *); 340 static void page_capture_init(); 341 int page_capture_take_action(page_t *, uint_t, void *); 342 343 static void page_demote_vp_pages(page_t *); 344 345 346 void 347 pcf_init(void) 348 { 349 if (boot_ncpus != -1) { 350 pcf_fanout = boot_ncpus; 351 } else { 352 pcf_fanout = max_ncpus; 353 } 354 #ifdef sun4v 355 /* 356 * Force at least 4 buckets if possible for sun4v. 357 */ 358 pcf_fanout = MAX(pcf_fanout, 4); 359 #endif /* sun4v */ 360 361 /* 362 * Round up to the nearest power of 2. 363 */ 364 pcf_fanout = MIN(pcf_fanout, MAX_PCF_FANOUT); 365 if (!ISP2(pcf_fanout)) { 366 pcf_fanout = 1 << highbit(pcf_fanout); 367 368 if (pcf_fanout > MAX_PCF_FANOUT) { 369 pcf_fanout = 1 << (highbit(MAX_PCF_FANOUT) - 1); 370 } 371 } 372 pcf_fanout_mask = pcf_fanout - 1; 373 } 374 375 /* 376 * vm subsystem related initialization 377 */ 378 void 379 vm_init(void) 380 { 381 boolean_t callb_vm_cpr(void *, int); 382 383 (void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm"); 384 page_init_mem_config(); 385 page_retire_init(); 386 vm_usage_init(); 387 page_capture_init(); 388 } 389 390 /* 391 * This function is called at startup and when memory is added or deleted. 392 */ 393 void 394 init_pages_pp_maximum() 395 { 396 static pgcnt_t p_min; 397 static pgcnt_t pages_pp_maximum_startup; 398 static pgcnt_t avrmem_delta; 399 static int init_done; 400 static int user_set; /* true if set in /etc/system */ 401 402 if (init_done == 0) { 403 404 /* If the user specified a value, save it */ 405 if (pages_pp_maximum != 0) { 406 user_set = 1; 407 pages_pp_maximum_startup = pages_pp_maximum; 408 } 409 410 /* 411 * Setting of pages_pp_maximum is based first time 412 * on the value of availrmem just after the start-up 413 * allocations. To preserve this relationship at run 414 * time, use a delta from availrmem_initial. 415 */ 416 ASSERT(availrmem_initial >= availrmem); 417 avrmem_delta = availrmem_initial - availrmem; 418 419 /* The allowable floor of pages_pp_maximum */ 420 p_min = tune.t_minarmem + 100; 421 422 /* Make sure we don't come through here again. */ 423 init_done = 1; 424 } 425 /* 426 * Determine pages_pp_maximum, the number of currently available 427 * pages (availrmem) that can't be `locked'. If not set by 428 * the user, we set it to 4% of the currently available memory 429 * plus 4MB. 430 * But we also insist that it be greater than tune.t_minarmem; 431 * otherwise a process could lock down a lot of memory, get swapped 432 * out, and never have enough to get swapped back in. 433 */ 434 if (user_set) 435 pages_pp_maximum = pages_pp_maximum_startup; 436 else 437 pages_pp_maximum = ((availrmem_initial - avrmem_delta) / 25) 438 + btop(4 * 1024 * 1024); 439 440 if (pages_pp_maximum <= p_min) { 441 pages_pp_maximum = p_min; 442 } 443 } 444 445 /* 446 * In the past, we limited the maximum pages that could be gotten to essentially 447 * 1/2 of the total pages on the system. However, this is too conservative for 448 * some cases. For example, if we want to host a large virtual machine which 449 * needs to use a significant portion of the system's memory. In practice, 450 * allowing more than 1/2 of the total pages is fine, but becomes problematic 451 * as we approach or exceed 75% of the pages on the system. Thus, we limit the 452 * maximum to 23/32 of the total pages, which is ~72%. 453 */ 454 void 455 set_max_page_get(pgcnt_t target_total_pages) 456 { 457 max_page_get = (target_total_pages >> 5) * 23; 458 ASSERT3U(max_page_get, >, 0); 459 } 460 461 pgcnt_t 462 get_max_page_get() 463 { 464 return (max_page_get); 465 } 466 467 static pgcnt_t pending_delete; 468 469 /*ARGSUSED*/ 470 static void 471 page_mem_config_post_add( 472 void *arg, 473 pgcnt_t delta_pages) 474 { 475 set_max_page_get(total_pages - pending_delete); 476 init_pages_pp_maximum(); 477 } 478 479 /*ARGSUSED*/ 480 static int 481 page_mem_config_pre_del( 482 void *arg, 483 pgcnt_t delta_pages) 484 { 485 pgcnt_t nv; 486 487 nv = atomic_add_long_nv(&pending_delete, (spgcnt_t)delta_pages); 488 set_max_page_get(total_pages - nv); 489 return (0); 490 } 491 492 /*ARGSUSED*/ 493 static void 494 page_mem_config_post_del( 495 void *arg, 496 pgcnt_t delta_pages, 497 int cancelled) 498 { 499 pgcnt_t nv; 500 501 nv = atomic_add_long_nv(&pending_delete, -(spgcnt_t)delta_pages); 502 set_max_page_get(total_pages - nv); 503 if (!cancelled) 504 init_pages_pp_maximum(); 505 } 506 507 static kphysm_setup_vector_t page_mem_config_vec = { 508 KPHYSM_SETUP_VECTOR_VERSION, 509 page_mem_config_post_add, 510 page_mem_config_pre_del, 511 page_mem_config_post_del, 512 }; 513 514 static void 515 page_init_mem_config(void) 516 { 517 int ret; 518 519 ret = kphysm_setup_func_register(&page_mem_config_vec, (void *)NULL); 520 ASSERT(ret == 0); 521 } 522 523 /* 524 * Evenly spread out the PCF counters for large free pages 525 */ 526 static void 527 page_free_large_ctr(pgcnt_t npages) 528 { 529 static struct pcf *p = pcf; 530 pgcnt_t lump; 531 532 freemem += npages; 533 534 lump = roundup(npages, pcf_fanout) / pcf_fanout; 535 536 while (npages > 0) { 537 538 ASSERT(!p->pcf_block); 539 540 if (lump < npages) { 541 p->pcf_count += (uint_t)lump; 542 npages -= lump; 543 } else { 544 p->pcf_count += (uint_t)npages; 545 npages = 0; 546 } 547 548 ASSERT(!p->pcf_wait); 549 550 if (++p > &pcf[pcf_fanout - 1]) 551 p = pcf; 552 } 553 554 ASSERT(npages == 0); 555 } 556 557 /* 558 * Add a physical chunk of memory to the system free lists during startup. 559 * Platform specific startup() allocates the memory for the page structs. 560 * 561 * num - number of page structures 562 * base - page number (pfn) to be associated with the first page. 563 * 564 * Since we are doing this during startup (ie. single threaded), we will 565 * use shortcut routines to avoid any locking overhead while putting all 566 * these pages on the freelists. 567 * 568 * NOTE: Any changes performed to page_free(), must also be performed to 569 * add_physmem() since this is how we initialize all page_t's at 570 * boot time. 571 */ 572 void 573 add_physmem( 574 page_t *pp, 575 pgcnt_t num, 576 pfn_t pnum) 577 { 578 page_t *root = NULL; 579 uint_t szc = page_num_pagesizes() - 1; 580 pgcnt_t large = page_get_pagecnt(szc); 581 pgcnt_t cnt = 0; 582 583 TRACE_2(TR_FAC_VM, TR_PAGE_INIT, 584 "add_physmem:pp %p num %lu", pp, num); 585 586 /* 587 * Arbitrarily limit the max page_get request 588 * to 1/2 of the page structs we have. 589 */ 590 total_pages += num; 591 set_max_page_get(total_pages); 592 593 PLCNT_MODIFY_MAX(pnum, (long)num); 594 595 /* 596 * The physical space for the pages array 597 * representing ram pages has already been 598 * allocated. Here we initialize each lock 599 * in the page structure, and put each on 600 * the free list 601 */ 602 for (; num; pp++, pnum++, num--) { 603 604 /* 605 * this needs to fill in the page number 606 * and do any other arch specific initialization 607 */ 608 add_physmem_cb(pp, pnum); 609 610 pp->p_lckcnt = 0; 611 pp->p_cowcnt = 0; 612 pp->p_slckcnt = 0; 613 614 /* 615 * Initialize the page lock as unlocked, since nobody 616 * can see or access this page yet. 617 */ 618 pp->p_selock = 0; 619 620 /* 621 * Initialize IO lock 622 */ 623 page_iolock_init(pp); 624 625 /* 626 * initialize other fields in the page_t 627 */ 628 PP_SETFREE(pp); 629 page_clr_all_props(pp); 630 PP_SETAGED(pp); 631 pp->p_offset = (u_offset_t)-1; 632 pp->p_next = pp; 633 pp->p_prev = pp; 634 635 /* 636 * Simple case: System doesn't support large pages. 637 */ 638 if (szc == 0) { 639 pp->p_szc = 0; 640 page_free_at_startup(pp); 641 continue; 642 } 643 644 /* 645 * Handle unaligned pages, we collect them up onto 646 * the root page until we have a full large page. 647 */ 648 if (!IS_P2ALIGNED(pnum, large)) { 649 650 /* 651 * If not in a large page, 652 * just free as small page. 653 */ 654 if (root == NULL) { 655 pp->p_szc = 0; 656 page_free_at_startup(pp); 657 continue; 658 } 659 660 /* 661 * Link a constituent page into the large page. 662 */ 663 pp->p_szc = szc; 664 page_list_concat(&root, &pp); 665 666 /* 667 * When large page is fully formed, free it. 668 */ 669 if (++cnt == large) { 670 page_free_large_ctr(cnt); 671 page_list_add_pages(root, PG_LIST_ISINIT); 672 root = NULL; 673 cnt = 0; 674 } 675 continue; 676 } 677 678 /* 679 * At this point we have a page number which 680 * is aligned. We assert that we aren't already 681 * in a different large page. 682 */ 683 ASSERT(IS_P2ALIGNED(pnum, large)); 684 ASSERT(root == NULL && cnt == 0); 685 686 /* 687 * If insufficient number of pages left to form 688 * a large page, just free the small page. 689 */ 690 if (num < large) { 691 pp->p_szc = 0; 692 page_free_at_startup(pp); 693 continue; 694 } 695 696 /* 697 * Otherwise start a new large page. 698 */ 699 pp->p_szc = szc; 700 cnt++; 701 root = pp; 702 } 703 ASSERT(root == NULL && cnt == 0); 704 } 705 706 /* 707 * Find a page representing the specified [vp, offset]. 708 * If we find the page but it is intransit coming in, 709 * it will have an "exclusive" lock and we wait for 710 * the i/o to complete. A page found on the free list 711 * is always reclaimed and then locked. On success, the page 712 * is locked, its data is valid and it isn't on the free 713 * list, while a NULL is returned if the page doesn't exist. 714 */ 715 page_t * 716 page_lookup(vnode_t *vp, u_offset_t off, se_t se) 717 { 718 return (page_lookup_create(vp, off, se, NULL, NULL, 0)); 719 } 720 721 /* 722 * Find a page representing the specified [vp, offset]. 723 * We either return the one we found or, if passed in, 724 * create one with identity of [vp, offset] of the 725 * pre-allocated page. If we find existing page but it is 726 * intransit coming in, it will have an "exclusive" lock 727 * and we wait for the i/o to complete. A page found on 728 * the free list is always reclaimed and then locked. 729 * On success, the page is locked, its data is valid and 730 * it isn't on the free list, while a NULL is returned 731 * if the page doesn't exist and newpp is NULL; 732 */ 733 page_t * 734 page_lookup_create( 735 vnode_t *vp, 736 u_offset_t off, 737 se_t se, 738 page_t *newpp, 739 spgcnt_t *nrelocp, 740 int flags) 741 { 742 page_t *pp; 743 kmutex_t *phm; 744 ulong_t index; 745 uint_t hash_locked; 746 uint_t es; 747 748 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 749 VM_STAT_ADD(page_lookup_cnt[0]); 750 ASSERT(newpp ? PAGE_EXCL(newpp) : 1); 751 752 /* 753 * Acquire the appropriate page hash lock since 754 * we have to search the hash list. Pages that 755 * hash to this list can't change identity while 756 * this lock is held. 757 */ 758 hash_locked = 0; 759 index = PAGE_HASH_FUNC(vp, off); 760 phm = NULL; 761 top: 762 pp = page_hash_search(index, vp, off); 763 if (pp != NULL) { 764 VM_STAT_ADD(page_lookup_cnt[1]); 765 es = (newpp != NULL) ? 1 : 0; 766 es |= flags; 767 if (!hash_locked) { 768 VM_STAT_ADD(page_lookup_cnt[2]); 769 if (!page_try_reclaim_lock(pp, se, es)) { 770 /* 771 * On a miss, acquire the phm. Then 772 * next time, page_lock() will be called, 773 * causing a wait if the page is busy. 774 * just looping with page_trylock() would 775 * get pretty boring. 776 */ 777 VM_STAT_ADD(page_lookup_cnt[3]); 778 phm = PAGE_HASH_MUTEX(index); 779 mutex_enter(phm); 780 hash_locked = 1; 781 goto top; 782 } 783 } else { 784 VM_STAT_ADD(page_lookup_cnt[4]); 785 if (!page_lock_es(pp, se, phm, P_RECLAIM, es)) { 786 VM_STAT_ADD(page_lookup_cnt[5]); 787 goto top; 788 } 789 } 790 791 /* 792 * Since `pp' is locked it can not change identity now. 793 * Reconfirm we locked the correct page. 794 * 795 * Both the p_vnode and p_offset *must* be cast volatile 796 * to force a reload of their values: The page_hash_search 797 * function will have stuffed p_vnode and p_offset into 798 * registers before calling page_trylock(); another thread, 799 * actually holding the hash lock, could have changed the 800 * page's identity in memory, but our registers would not 801 * be changed, fooling the reconfirmation. If the hash 802 * lock was held during the search, the casting would 803 * not be needed. 804 */ 805 VM_STAT_ADD(page_lookup_cnt[6]); 806 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 807 ((volatile u_offset_t)(pp->p_offset) != off)) { 808 VM_STAT_ADD(page_lookup_cnt[7]); 809 if (hash_locked) { 810 panic("page_lookup_create: lost page %p", 811 (void *)pp); 812 /*NOTREACHED*/ 813 } 814 page_unlock(pp); 815 phm = PAGE_HASH_MUTEX(index); 816 mutex_enter(phm); 817 hash_locked = 1; 818 goto top; 819 } 820 821 /* 822 * If page_trylock() was called, then pp may still be on 823 * the cachelist (can't be on the free list, it would not 824 * have been found in the search). If it is on the 825 * cachelist it must be pulled now. To pull the page from 826 * the cachelist, it must be exclusively locked. 827 * 828 * The other big difference between page_trylock() and 829 * page_lock(), is that page_lock() will pull the 830 * page from whatever free list (the cache list in this 831 * case) the page is on. If page_trylock() was used 832 * above, then we have to do the reclaim ourselves. 833 */ 834 if ((!hash_locked) && (PP_ISFREE(pp))) { 835 ASSERT(PP_ISAGED(pp) == 0); 836 VM_STAT_ADD(page_lookup_cnt[8]); 837 838 /* 839 * page_relcaim will insure that we 840 * have this page exclusively 841 */ 842 843 if (!page_reclaim(pp, NULL)) { 844 /* 845 * Page_reclaim dropped whatever lock 846 * we held. 847 */ 848 VM_STAT_ADD(page_lookup_cnt[9]); 849 phm = PAGE_HASH_MUTEX(index); 850 mutex_enter(phm); 851 hash_locked = 1; 852 goto top; 853 } else if (se == SE_SHARED && newpp == NULL) { 854 VM_STAT_ADD(page_lookup_cnt[10]); 855 page_downgrade(pp); 856 } 857 } 858 859 if (hash_locked) { 860 mutex_exit(phm); 861 } 862 863 if (newpp != NULL && pp->p_szc < newpp->p_szc && 864 PAGE_EXCL(pp) && nrelocp != NULL) { 865 ASSERT(nrelocp != NULL); 866 (void) page_relocate(&pp, &newpp, 1, 1, nrelocp, 867 NULL); 868 if (*nrelocp > 0) { 869 VM_STAT_COND_ADD(*nrelocp == 1, 870 page_lookup_cnt[11]); 871 VM_STAT_COND_ADD(*nrelocp > 1, 872 page_lookup_cnt[12]); 873 pp = newpp; 874 se = SE_EXCL; 875 } else { 876 if (se == SE_SHARED) { 877 page_downgrade(pp); 878 } 879 VM_STAT_ADD(page_lookup_cnt[13]); 880 } 881 } else if (newpp != NULL && nrelocp != NULL) { 882 if (PAGE_EXCL(pp) && se == SE_SHARED) { 883 page_downgrade(pp); 884 } 885 VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc, 886 page_lookup_cnt[14]); 887 VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc, 888 page_lookup_cnt[15]); 889 VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc, 890 page_lookup_cnt[16]); 891 } else if (newpp != NULL && PAGE_EXCL(pp)) { 892 se = SE_EXCL; 893 } 894 } else if (!hash_locked) { 895 VM_STAT_ADD(page_lookup_cnt[17]); 896 phm = PAGE_HASH_MUTEX(index); 897 mutex_enter(phm); 898 hash_locked = 1; 899 goto top; 900 } else if (newpp != NULL) { 901 /* 902 * If we have a preallocated page then 903 * insert it now and basically behave like 904 * page_create. 905 */ 906 VM_STAT_ADD(page_lookup_cnt[18]); 907 /* 908 * Since we hold the page hash mutex and 909 * just searched for this page, page_hashin 910 * had better not fail. If it does, that 911 * means some thread did not follow the 912 * page hash mutex rules. Panic now and 913 * get it over with. As usual, go down 914 * holding all the locks. 915 */ 916 ASSERT(MUTEX_HELD(phm)); 917 if (!page_hashin(newpp, vp, off, phm)) { 918 ASSERT(MUTEX_HELD(phm)); 919 panic("page_lookup_create: hashin failed %p %p %llx %p", 920 (void *)newpp, (void *)vp, off, (void *)phm); 921 /*NOTREACHED*/ 922 } 923 ASSERT(MUTEX_HELD(phm)); 924 mutex_exit(phm); 925 phm = NULL; 926 page_set_props(newpp, P_REF); 927 page_io_lock(newpp); 928 pp = newpp; 929 se = SE_EXCL; 930 } else { 931 VM_STAT_ADD(page_lookup_cnt[19]); 932 mutex_exit(phm); 933 } 934 935 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 936 937 ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1); 938 939 return (pp); 940 } 941 942 /* 943 * Search the hash list for the page representing the 944 * specified [vp, offset] and return it locked. Skip 945 * free pages and pages that cannot be locked as requested. 946 * Used while attempting to kluster pages. 947 */ 948 page_t * 949 page_lookup_nowait(vnode_t *vp, u_offset_t off, se_t se) 950 { 951 page_t *pp; 952 kmutex_t *phm; 953 ulong_t index; 954 uint_t locked; 955 956 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 957 VM_STAT_ADD(page_lookup_nowait_cnt[0]); 958 959 index = PAGE_HASH_FUNC(vp, off); 960 pp = page_hash_search(index, vp, off); 961 locked = 0; 962 if (pp == NULL) { 963 top: 964 VM_STAT_ADD(page_lookup_nowait_cnt[1]); 965 locked = 1; 966 phm = PAGE_HASH_MUTEX(index); 967 mutex_enter(phm); 968 pp = page_hash_search(index, vp, off); 969 } 970 971 if (pp == NULL || PP_ISFREE(pp)) { 972 VM_STAT_ADD(page_lookup_nowait_cnt[2]); 973 pp = NULL; 974 } else { 975 if (!page_trylock(pp, se)) { 976 VM_STAT_ADD(page_lookup_nowait_cnt[3]); 977 pp = NULL; 978 } else { 979 VM_STAT_ADD(page_lookup_nowait_cnt[4]); 980 /* 981 * See the comment in page_lookup() 982 */ 983 if (((volatile struct vnode *)(pp->p_vnode) != vp) || 984 ((u_offset_t)(pp->p_offset) != off)) { 985 VM_STAT_ADD(page_lookup_nowait_cnt[5]); 986 if (locked) { 987 panic("page_lookup_nowait %p", 988 (void *)pp); 989 /*NOTREACHED*/ 990 } 991 page_unlock(pp); 992 goto top; 993 } 994 if (PP_ISFREE(pp)) { 995 VM_STAT_ADD(page_lookup_nowait_cnt[6]); 996 page_unlock(pp); 997 pp = NULL; 998 } 999 } 1000 } 1001 if (locked) { 1002 VM_STAT_ADD(page_lookup_nowait_cnt[7]); 1003 mutex_exit(phm); 1004 } 1005 1006 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1); 1007 1008 return (pp); 1009 } 1010 1011 /* 1012 * Search the hash list for a page with the specified [vp, off] 1013 * that is known to exist and is already locked. This routine 1014 * is typically used by segment SOFTUNLOCK routines. 1015 */ 1016 page_t * 1017 page_find(vnode_t *vp, u_offset_t off) 1018 { 1019 page_t *pp; 1020 kmutex_t *phm; 1021 ulong_t index; 1022 1023 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1024 VM_STAT_ADD(page_find_cnt); 1025 1026 index = PAGE_HASH_FUNC(vp, off); 1027 phm = PAGE_HASH_MUTEX(index); 1028 1029 mutex_enter(phm); 1030 pp = page_hash_search(index, vp, off); 1031 mutex_exit(phm); 1032 1033 ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr); 1034 return (pp); 1035 } 1036 1037 /* 1038 * Determine whether a page with the specified [vp, off] 1039 * currently exists in the system. Obviously this should 1040 * only be considered as a hint since nothing prevents the 1041 * page from disappearing or appearing immediately after 1042 * the return from this routine. Subsequently, we don't 1043 * even bother to lock the list. 1044 */ 1045 page_t * 1046 page_exists(vnode_t *vp, u_offset_t off) 1047 { 1048 ulong_t index; 1049 1050 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1051 VM_STAT_ADD(page_exists_cnt); 1052 1053 index = PAGE_HASH_FUNC(vp, off); 1054 1055 return (page_hash_search(index, vp, off)); 1056 } 1057 1058 /* 1059 * Determine if physically contiguous pages exist for [vp, off] - [vp, off + 1060 * page_size(szc)) range. if they exist and ppa is not NULL fill ppa array 1061 * with these pages locked SHARED. If necessary reclaim pages from 1062 * freelist. Return 1 if contiguous pages exist and 0 otherwise. 1063 * 1064 * If we fail to lock pages still return 1 if pages exist and contiguous. 1065 * But in this case return value is just a hint. ppa array won't be filled. 1066 * Caller should initialize ppa[0] as NULL to distinguish return value. 1067 * 1068 * Returns 0 if pages don't exist or not physically contiguous. 1069 * 1070 * This routine doesn't work for anonymous(swapfs) pages. 1071 */ 1072 int 1073 page_exists_physcontig(vnode_t *vp, u_offset_t off, uint_t szc, page_t *ppa[]) 1074 { 1075 pgcnt_t pages; 1076 pfn_t pfn; 1077 page_t *rootpp; 1078 pgcnt_t i; 1079 pgcnt_t j; 1080 u_offset_t save_off = off; 1081 ulong_t index; 1082 kmutex_t *phm; 1083 page_t *pp; 1084 uint_t pszc; 1085 int loopcnt = 0; 1086 1087 ASSERT(szc != 0); 1088 ASSERT(vp != NULL); 1089 ASSERT(!IS_SWAPFSVP(vp)); 1090 ASSERT(!VN_ISKAS(vp)); 1091 1092 again: 1093 if (++loopcnt > 3) { 1094 VM_STAT_ADD(page_exphcontg[0]); 1095 return (0); 1096 } 1097 1098 index = PAGE_HASH_FUNC(vp, off); 1099 phm = PAGE_HASH_MUTEX(index); 1100 1101 mutex_enter(phm); 1102 pp = page_hash_search(index, vp, off); 1103 mutex_exit(phm); 1104 1105 VM_STAT_ADD(page_exphcontg[1]); 1106 1107 if (pp == NULL) { 1108 VM_STAT_ADD(page_exphcontg[2]); 1109 return (0); 1110 } 1111 1112 pages = page_get_pagecnt(szc); 1113 rootpp = pp; 1114 pfn = rootpp->p_pagenum; 1115 1116 if ((pszc = pp->p_szc) >= szc && ppa != NULL) { 1117 VM_STAT_ADD(page_exphcontg[3]); 1118 if (!page_trylock(pp, SE_SHARED)) { 1119 VM_STAT_ADD(page_exphcontg[4]); 1120 return (1); 1121 } 1122 /* 1123 * Also check whether p_pagenum was modified by DR. 1124 */ 1125 if (pp->p_szc != pszc || pp->p_vnode != vp || 1126 pp->p_offset != off || pp->p_pagenum != pfn) { 1127 VM_STAT_ADD(page_exphcontg[5]); 1128 page_unlock(pp); 1129 off = save_off; 1130 goto again; 1131 } 1132 /* 1133 * szc was non zero and vnode and offset matched after we 1134 * locked the page it means it can't become free on us. 1135 */ 1136 ASSERT(!PP_ISFREE(pp)); 1137 if (!IS_P2ALIGNED(pfn, pages)) { 1138 page_unlock(pp); 1139 return (0); 1140 } 1141 ppa[0] = pp; 1142 pp++; 1143 off += PAGESIZE; 1144 pfn++; 1145 for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1146 if (!page_trylock(pp, SE_SHARED)) { 1147 VM_STAT_ADD(page_exphcontg[6]); 1148 pp--; 1149 while (i-- > 0) { 1150 page_unlock(pp); 1151 pp--; 1152 } 1153 ppa[0] = NULL; 1154 return (1); 1155 } 1156 if (pp->p_szc != pszc) { 1157 VM_STAT_ADD(page_exphcontg[7]); 1158 page_unlock(pp); 1159 pp--; 1160 while (i-- > 0) { 1161 page_unlock(pp); 1162 pp--; 1163 } 1164 ppa[0] = NULL; 1165 off = save_off; 1166 goto again; 1167 } 1168 /* 1169 * szc the same as for previous already locked pages 1170 * with right identity. Since this page had correct 1171 * szc after we locked it can't get freed or destroyed 1172 * and therefore must have the expected identity. 1173 */ 1174 ASSERT(!PP_ISFREE(pp)); 1175 if (pp->p_vnode != vp || 1176 pp->p_offset != off) { 1177 panic("page_exists_physcontig: " 1178 "large page identity doesn't match"); 1179 } 1180 ppa[i] = pp; 1181 ASSERT(pp->p_pagenum == pfn); 1182 } 1183 VM_STAT_ADD(page_exphcontg[8]); 1184 ppa[pages] = NULL; 1185 return (1); 1186 } else if (pszc >= szc) { 1187 VM_STAT_ADD(page_exphcontg[9]); 1188 if (!IS_P2ALIGNED(pfn, pages)) { 1189 return (0); 1190 } 1191 return (1); 1192 } 1193 1194 if (!IS_P2ALIGNED(pfn, pages)) { 1195 VM_STAT_ADD(page_exphcontg[10]); 1196 return (0); 1197 } 1198 1199 if (page_numtomemseg_nolock(pfn) != 1200 page_numtomemseg_nolock(pfn + pages - 1)) { 1201 VM_STAT_ADD(page_exphcontg[11]); 1202 return (0); 1203 } 1204 1205 /* 1206 * We loop up 4 times across pages to promote page size. 1207 * We're extra cautious to promote page size atomically with respect 1208 * to everybody else. But we can probably optimize into 1 loop if 1209 * this becomes an issue. 1210 */ 1211 1212 for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) { 1213 if (!page_trylock(pp, SE_EXCL)) { 1214 VM_STAT_ADD(page_exphcontg[12]); 1215 break; 1216 } 1217 /* 1218 * Check whether p_pagenum was modified by DR. 1219 */ 1220 if (pp->p_pagenum != pfn) { 1221 page_unlock(pp); 1222 break; 1223 } 1224 if (pp->p_vnode != vp || 1225 pp->p_offset != off) { 1226 VM_STAT_ADD(page_exphcontg[13]); 1227 page_unlock(pp); 1228 break; 1229 } 1230 if (pp->p_szc >= szc) { 1231 ASSERT(i == 0); 1232 page_unlock(pp); 1233 off = save_off; 1234 goto again; 1235 } 1236 } 1237 1238 if (i != pages) { 1239 VM_STAT_ADD(page_exphcontg[14]); 1240 --pp; 1241 while (i-- > 0) { 1242 page_unlock(pp); 1243 --pp; 1244 } 1245 return (0); 1246 } 1247 1248 pp = rootpp; 1249 for (i = 0; i < pages; i++, pp++) { 1250 if (PP_ISFREE(pp)) { 1251 VM_STAT_ADD(page_exphcontg[15]); 1252 ASSERT(!PP_ISAGED(pp)); 1253 ASSERT(pp->p_szc == 0); 1254 if (!page_reclaim(pp, NULL)) { 1255 break; 1256 } 1257 } else { 1258 ASSERT(pp->p_szc < szc); 1259 VM_STAT_ADD(page_exphcontg[16]); 1260 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 1261 } 1262 } 1263 if (i < pages) { 1264 VM_STAT_ADD(page_exphcontg[17]); 1265 /* 1266 * page_reclaim failed because we were out of memory. 1267 * drop the rest of the locks and return because this page 1268 * must be already reallocated anyway. 1269 */ 1270 pp = rootpp; 1271 for (j = 0; j < pages; j++, pp++) { 1272 if (j != i) { 1273 page_unlock(pp); 1274 } 1275 } 1276 return (0); 1277 } 1278 1279 off = save_off; 1280 pp = rootpp; 1281 for (i = 0; i < pages; i++, pp++, off += PAGESIZE) { 1282 ASSERT(PAGE_EXCL(pp)); 1283 ASSERT(!PP_ISFREE(pp)); 1284 ASSERT(!hat_page_is_mapped(pp)); 1285 ASSERT(pp->p_vnode == vp); 1286 ASSERT(pp->p_offset == off); 1287 pp->p_szc = szc; 1288 } 1289 pp = rootpp; 1290 for (i = 0; i < pages; i++, pp++) { 1291 if (ppa == NULL) { 1292 page_unlock(pp); 1293 } else { 1294 ppa[i] = pp; 1295 page_downgrade(ppa[i]); 1296 } 1297 } 1298 if (ppa != NULL) { 1299 ppa[pages] = NULL; 1300 } 1301 VM_STAT_ADD(page_exphcontg[18]); 1302 ASSERT(vp->v_pages != NULL); 1303 return (1); 1304 } 1305 1306 /* 1307 * Determine whether a page with the specified [vp, off] 1308 * currently exists in the system and if so return its 1309 * size code. Obviously this should only be considered as 1310 * a hint since nothing prevents the page from disappearing 1311 * or appearing immediately after the return from this routine. 1312 */ 1313 int 1314 page_exists_forreal(vnode_t *vp, u_offset_t off, uint_t *szc) 1315 { 1316 page_t *pp; 1317 kmutex_t *phm; 1318 ulong_t index; 1319 int rc = 0; 1320 1321 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 1322 ASSERT(szc != NULL); 1323 VM_STAT_ADD(page_exists_forreal_cnt); 1324 1325 index = PAGE_HASH_FUNC(vp, off); 1326 phm = PAGE_HASH_MUTEX(index); 1327 1328 mutex_enter(phm); 1329 pp = page_hash_search(index, vp, off); 1330 if (pp != NULL) { 1331 *szc = pp->p_szc; 1332 rc = 1; 1333 } 1334 mutex_exit(phm); 1335 return (rc); 1336 } 1337 1338 /* wakeup threads waiting for pages in page_create_get_something() */ 1339 void 1340 wakeup_pcgs(void) 1341 { 1342 if (!CV_HAS_WAITERS(&pcgs_cv)) 1343 return; 1344 cv_broadcast(&pcgs_cv); 1345 } 1346 1347 /* 1348 * 'freemem' is used all over the kernel as an indication of how many 1349 * pages are free (either on the cache list or on the free page list) 1350 * in the system. In very few places is a really accurate 'freemem' 1351 * needed. To avoid contention of the lock protecting a the 1352 * single freemem, it was spread out into NCPU buckets. Set_freemem 1353 * sets freemem to the total of all NCPU buckets. It is called from 1354 * clock() on each TICK. 1355 */ 1356 void 1357 set_freemem(void) 1358 { 1359 struct pcf *p; 1360 ulong_t t; 1361 uint_t i; 1362 1363 t = 0; 1364 p = pcf; 1365 for (i = 0; i < pcf_fanout; i++) { 1366 t += p->pcf_count; 1367 p++; 1368 } 1369 freemem = t; 1370 1371 /* 1372 * Don't worry about grabbing mutex. It's not that 1373 * critical if we miss a tick or two. This is 1374 * where we wakeup possible delayers in 1375 * page_create_get_something(). 1376 */ 1377 wakeup_pcgs(); 1378 } 1379 1380 ulong_t 1381 get_freemem() 1382 { 1383 struct pcf *p; 1384 ulong_t t; 1385 uint_t i; 1386 1387 t = 0; 1388 p = pcf; 1389 for (i = 0; i < pcf_fanout; i++) { 1390 t += p->pcf_count; 1391 p++; 1392 } 1393 /* 1394 * We just calculated it, might as well set it. 1395 */ 1396 freemem = t; 1397 return (t); 1398 } 1399 1400 /* 1401 * Acquire all of the page cache & free (pcf) locks. 1402 */ 1403 void 1404 pcf_acquire_all() 1405 { 1406 struct pcf *p; 1407 uint_t i; 1408 1409 p = pcf; 1410 for (i = 0; i < pcf_fanout; i++) { 1411 mutex_enter(&p->pcf_lock); 1412 p++; 1413 } 1414 } 1415 1416 /* 1417 * Release all the pcf_locks. 1418 */ 1419 void 1420 pcf_release_all() 1421 { 1422 struct pcf *p; 1423 uint_t i; 1424 1425 p = pcf; 1426 for (i = 0; i < pcf_fanout; i++) { 1427 mutex_exit(&p->pcf_lock); 1428 p++; 1429 } 1430 } 1431 1432 /* 1433 * Inform the VM system that we need some pages freed up. 1434 * Calls must be symmetric, e.g.: 1435 * 1436 * page_needfree(100); 1437 * wait a bit; 1438 * page_needfree(-100); 1439 */ 1440 void 1441 page_needfree(spgcnt_t npages) 1442 { 1443 mutex_enter(&new_freemem_lock); 1444 needfree += npages; 1445 mutex_exit(&new_freemem_lock); 1446 } 1447 1448 /* 1449 * Throttle for page_create(): try to prevent freemem from dropping 1450 * below throttlefree. We can't provide a 100% guarantee because 1451 * KM_NOSLEEP allocations, page_reclaim(), and various other things 1452 * nibble away at the freelist. However, we can block all PG_WAIT 1453 * allocations until memory becomes available. The motivation is 1454 * that several things can fall apart when there's no free memory: 1455 * 1456 * (1) If pageout() needs memory to push a page, the system deadlocks. 1457 * 1458 * (2) By (broken) specification, timeout(9F) can neither fail nor 1459 * block, so it has no choice but to panic the system if it 1460 * cannot allocate a callout structure. 1461 * 1462 * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block; 1463 * it panics if it cannot allocate a callback structure. 1464 * 1465 * (4) Untold numbers of third-party drivers have not yet been hardened 1466 * against KM_NOSLEEP and/or allocb() failures; they simply assume 1467 * success and panic the system with a data fault on failure. 1468 * (The long-term solution to this particular problem is to ship 1469 * hostile fault-injecting DEBUG kernels with the DDK.) 1470 * 1471 * It is theoretically impossible to guarantee success of non-blocking 1472 * allocations, but in practice, this throttle is very hard to break. 1473 */ 1474 static int 1475 page_create_throttle(pgcnt_t npages, int flags) 1476 { 1477 ulong_t fm; 1478 uint_t i; 1479 pgcnt_t tf; /* effective value of throttlefree */ 1480 1481 /* 1482 * Normal priority allocations. 1483 */ 1484 if ((flags & (PG_WAIT | PG_NORMALPRI)) == PG_NORMALPRI) { 1485 ASSERT(!(flags & (PG_PANIC | PG_PUSHPAGE))); 1486 return (freemem >= npages + throttlefree); 1487 } 1488 1489 /* 1490 * Never deny pages when: 1491 * - it's a thread that cannot block [NOMEMWAIT()] 1492 * - the allocation cannot block and must not fail 1493 * - the allocation cannot block and is pageout dispensated 1494 */ 1495 if (NOMEMWAIT() || 1496 ((flags & (PG_WAIT | PG_PANIC)) == PG_PANIC) || 1497 ((flags & (PG_WAIT | PG_PUSHPAGE)) == PG_PUSHPAGE)) 1498 return (1); 1499 1500 /* 1501 * If the allocation can't block, we look favorably upon it 1502 * unless we're below pageout_reserve. In that case we fail 1503 * the allocation because we want to make sure there are a few 1504 * pages available for pageout. 1505 */ 1506 if ((flags & PG_WAIT) == 0) 1507 return (freemem >= npages + pageout_reserve); 1508 1509 /* Calculate the effective throttlefree value */ 1510 tf = throttlefree - 1511 ((flags & PG_PUSHPAGE) ? pageout_reserve : 0); 1512 1513 cv_signal(&proc_pageout->p_cv); 1514 1515 for (;;) { 1516 fm = 0; 1517 pcf_acquire_all(); 1518 mutex_enter(&new_freemem_lock); 1519 for (i = 0; i < pcf_fanout; i++) { 1520 fm += pcf[i].pcf_count; 1521 pcf[i].pcf_wait++; 1522 mutex_exit(&pcf[i].pcf_lock); 1523 } 1524 freemem = fm; 1525 if (freemem >= npages + tf) { 1526 mutex_exit(&new_freemem_lock); 1527 break; 1528 } 1529 needfree += npages; 1530 freemem_wait++; 1531 cv_wait(&freemem_cv, &new_freemem_lock); 1532 freemem_wait--; 1533 needfree -= npages; 1534 mutex_exit(&new_freemem_lock); 1535 } 1536 return (1); 1537 } 1538 1539 /* 1540 * page_create_wait() is called to either coalesce pages from the 1541 * different pcf buckets or to wait because there simply are not 1542 * enough pages to satisfy the caller's request. 1543 * 1544 * Sadly, this is called from platform/vm/vm_machdep.c 1545 */ 1546 int 1547 page_create_wait(pgcnt_t npages, uint_t flags) 1548 { 1549 pgcnt_t total; 1550 uint_t i; 1551 struct pcf *p; 1552 1553 /* 1554 * Wait until there are enough free pages to satisfy our 1555 * entire request. 1556 * We set needfree += npages before prodding pageout, to make sure 1557 * it does real work when npages > lotsfree > freemem. 1558 */ 1559 VM_STAT_ADD(page_create_not_enough); 1560 1561 ASSERT(!kcage_on ? !(flags & PG_NORELOC) : 1); 1562 checkagain: 1563 if ((flags & PG_NORELOC) && 1564 kcage_freemem < kcage_throttlefree + npages) 1565 (void) kcage_create_throttle(npages, flags); 1566 1567 if (freemem < npages + throttlefree) 1568 if (!page_create_throttle(npages, flags)) 1569 return (0); 1570 1571 if (pcf_decrement_bucket(npages) || 1572 pcf_decrement_multiple(&total, npages, 0)) 1573 return (1); 1574 1575 /* 1576 * All of the pcf locks are held, there are not enough pages 1577 * to satisfy the request (npages < total). 1578 * Be sure to acquire the new_freemem_lock before dropping 1579 * the pcf locks. This prevents dropping wakeups in page_free(). 1580 * The order is always pcf_lock then new_freemem_lock. 1581 * 1582 * Since we hold all the pcf locks, it is a good time to set freemem. 1583 * 1584 * If the caller does not want to wait, return now. 1585 * Else turn the pageout daemon loose to find something 1586 * and wait till it does. 1587 * 1588 */ 1589 freemem = total; 1590 1591 if ((flags & PG_WAIT) == 0) { 1592 pcf_release_all(); 1593 1594 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_NOMEM, 1595 "page_create_nomem:npages %ld freemem %ld", npages, freemem); 1596 return (0); 1597 } 1598 1599 ASSERT(proc_pageout != NULL); 1600 cv_signal(&proc_pageout->p_cv); 1601 1602 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_START, 1603 "page_create_sleep_start: freemem %ld needfree %ld", 1604 freemem, needfree); 1605 1606 /* 1607 * We are going to wait. 1608 * We currently hold all of the pcf_locks, 1609 * get the new_freemem_lock (it protects freemem_wait), 1610 * before dropping the pcf_locks. 1611 */ 1612 mutex_enter(&new_freemem_lock); 1613 1614 p = pcf; 1615 for (i = 0; i < pcf_fanout; i++) { 1616 p->pcf_wait++; 1617 mutex_exit(&p->pcf_lock); 1618 p++; 1619 } 1620 1621 needfree += npages; 1622 freemem_wait++; 1623 1624 cv_wait(&freemem_cv, &new_freemem_lock); 1625 1626 freemem_wait--; 1627 needfree -= npages; 1628 1629 mutex_exit(&new_freemem_lock); 1630 1631 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_END, 1632 "page_create_sleep_end: freemem %ld needfree %ld", 1633 freemem, needfree); 1634 1635 VM_STAT_ADD(page_create_not_enough_again); 1636 goto checkagain; 1637 } 1638 /* 1639 * A routine to do the opposite of page_create_wait(). 1640 */ 1641 void 1642 page_create_putback(spgcnt_t npages) 1643 { 1644 struct pcf *p; 1645 pgcnt_t lump; 1646 uint_t *which; 1647 1648 /* 1649 * When a contiguous lump is broken up, we have to 1650 * deal with lots of pages (min 64) so lets spread 1651 * the wealth around. 1652 */ 1653 lump = roundup(npages, pcf_fanout) / pcf_fanout; 1654 freemem += npages; 1655 1656 for (p = pcf; (npages > 0) && (p < &pcf[pcf_fanout]); p++) { 1657 which = &p->pcf_count; 1658 1659 mutex_enter(&p->pcf_lock); 1660 1661 if (p->pcf_block) { 1662 which = &p->pcf_reserve; 1663 } 1664 1665 if (lump < npages) { 1666 *which += (uint_t)lump; 1667 npages -= lump; 1668 } else { 1669 *which += (uint_t)npages; 1670 npages = 0; 1671 } 1672 1673 if (p->pcf_wait) { 1674 mutex_enter(&new_freemem_lock); 1675 /* 1676 * Check to see if some other thread 1677 * is actually waiting. Another bucket 1678 * may have woken it up by now. If there 1679 * are no waiters, then set our pcf_wait 1680 * count to zero to avoid coming in here 1681 * next time. 1682 */ 1683 if (freemem_wait) { 1684 if (npages > 1) { 1685 cv_broadcast(&freemem_cv); 1686 } else { 1687 cv_signal(&freemem_cv); 1688 } 1689 p->pcf_wait--; 1690 } else { 1691 p->pcf_wait = 0; 1692 } 1693 mutex_exit(&new_freemem_lock); 1694 } 1695 mutex_exit(&p->pcf_lock); 1696 } 1697 ASSERT(npages == 0); 1698 } 1699 1700 /* 1701 * A helper routine for page_create_get_something. 1702 * The indenting got to deep down there. 1703 * Unblock the pcf counters. Any pages freed after 1704 * pcf_block got set are moved to pcf_count and 1705 * wakeups (cv_broadcast() or cv_signal()) are done as needed. 1706 */ 1707 static void 1708 pcgs_unblock(void) 1709 { 1710 int i; 1711 struct pcf *p; 1712 1713 /* Update freemem while we're here. */ 1714 freemem = 0; 1715 p = pcf; 1716 for (i = 0; i < pcf_fanout; i++) { 1717 mutex_enter(&p->pcf_lock); 1718 ASSERT(p->pcf_count == 0); 1719 p->pcf_count = p->pcf_reserve; 1720 p->pcf_block = 0; 1721 freemem += p->pcf_count; 1722 if (p->pcf_wait) { 1723 mutex_enter(&new_freemem_lock); 1724 if (freemem_wait) { 1725 if (p->pcf_reserve > 1) { 1726 cv_broadcast(&freemem_cv); 1727 p->pcf_wait = 0; 1728 } else { 1729 cv_signal(&freemem_cv); 1730 p->pcf_wait--; 1731 } 1732 } else { 1733 p->pcf_wait = 0; 1734 } 1735 mutex_exit(&new_freemem_lock); 1736 } 1737 p->pcf_reserve = 0; 1738 mutex_exit(&p->pcf_lock); 1739 p++; 1740 } 1741 } 1742 1743 /* 1744 * Called from page_create_va() when both the cache and free lists 1745 * have been checked once. 1746 * 1747 * Either returns a page or panics since the accounting was done 1748 * way before we got here. 1749 * 1750 * We don't come here often, so leave the accounting on permanently. 1751 */ 1752 1753 #define MAX_PCGS 100 1754 1755 #ifdef DEBUG 1756 #define PCGS_TRIES 100 1757 #else /* DEBUG */ 1758 #define PCGS_TRIES 10 1759 #endif /* DEBUG */ 1760 1761 #ifdef VM_STATS 1762 uint_t pcgs_counts[PCGS_TRIES]; 1763 uint_t pcgs_too_many; 1764 uint_t pcgs_entered; 1765 uint_t pcgs_entered_noreloc; 1766 uint_t pcgs_locked; 1767 uint_t pcgs_cagelocked; 1768 #endif /* VM_STATS */ 1769 1770 static page_t * 1771 page_create_get_something(vnode_t *vp, u_offset_t off, struct seg *seg, 1772 caddr_t vaddr, uint_t flags) 1773 { 1774 uint_t count; 1775 page_t *pp; 1776 uint_t locked, i; 1777 struct pcf *p; 1778 lgrp_t *lgrp; 1779 int cagelocked = 0; 1780 1781 VM_STAT_ADD(pcgs_entered); 1782 1783 /* 1784 * Tap any reserve freelists: if we fail now, we'll die 1785 * since the page(s) we're looking for have already been 1786 * accounted for. 1787 */ 1788 flags |= PG_PANIC; 1789 1790 if ((flags & PG_NORELOC) != 0) { 1791 VM_STAT_ADD(pcgs_entered_noreloc); 1792 /* 1793 * Requests for free pages from critical threads 1794 * such as pageout still won't throttle here, but 1795 * we must try again, to give the cageout thread 1796 * another chance to catch up. Since we already 1797 * accounted for the pages, we had better get them 1798 * this time. 1799 * 1800 * N.B. All non-critical threads acquire the pcgs_cagelock 1801 * to serialize access to the freelists. This implements a 1802 * turnstile-type synchornization to avoid starvation of 1803 * critical requests for PG_NORELOC memory by non-critical 1804 * threads: all non-critical threads must acquire a 'ticket' 1805 * before passing through, which entails making sure 1806 * kcage_freemem won't fall below minfree prior to grabbing 1807 * pages from the freelists. 1808 */ 1809 if (kcage_create_throttle(1, flags) == KCT_NONCRIT) { 1810 mutex_enter(&pcgs_cagelock); 1811 cagelocked = 1; 1812 VM_STAT_ADD(pcgs_cagelocked); 1813 } 1814 } 1815 1816 /* 1817 * Time to get serious. 1818 * We failed to get a `correctly colored' page from both the 1819 * free and cache lists. 1820 * We escalate in stage. 1821 * 1822 * First try both lists without worring about color. 1823 * 1824 * Then, grab all page accounting locks (ie. pcf[]) and 1825 * steal any pages that they have and set the pcf_block flag to 1826 * stop deletions from the lists. This will help because 1827 * a page can get added to the free list while we are looking 1828 * at the cache list, then another page could be added to the cache 1829 * list allowing the page on the free list to be removed as we 1830 * move from looking at the cache list to the free list. This 1831 * could happen over and over. We would never find the page 1832 * we have accounted for. 1833 * 1834 * Noreloc pages are a subset of the global (relocatable) page pool. 1835 * They are not tracked separately in the pcf bins, so it is 1836 * impossible to know when doing pcf accounting if the available 1837 * page(s) are noreloc pages or not. When looking for a noreloc page 1838 * it is quite easy to end up here even if the global (relocatable) 1839 * page pool has plenty of free pages but the noreloc pool is empty. 1840 * 1841 * When the noreloc pool is empty (or low), additional noreloc pages 1842 * are created by converting pages from the global page pool. This 1843 * process will stall during pcf accounting if the pcf bins are 1844 * already locked. Such is the case when a noreloc allocation is 1845 * looping here in page_create_get_something waiting for more noreloc 1846 * pages to appear. 1847 * 1848 * Short of adding a new field to the pcf bins to accurately track 1849 * the number of free noreloc pages, we instead do not grab the 1850 * pcgs_lock, do not set the pcf blocks and do not timeout when 1851 * allocating a noreloc page. This allows noreloc allocations to 1852 * loop without blocking global page pool allocations. 1853 * 1854 * NOTE: the behaviour of page_create_get_something has not changed 1855 * for the case of global page pool allocations. 1856 */ 1857 1858 flags &= ~PG_MATCH_COLOR; 1859 locked = 0; 1860 #if defined(__x86) 1861 flags = page_create_update_flags_x86(flags); 1862 #endif 1863 1864 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 1865 1866 for (count = 0; kcage_on || count < MAX_PCGS; count++) { 1867 pp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 1868 flags, lgrp); 1869 if (pp == NULL) { 1870 pp = page_get_cachelist(vp, off, seg, vaddr, 1871 flags, lgrp); 1872 } 1873 if (pp == NULL) { 1874 /* 1875 * Serialize. Don't fight with other pcgs(). 1876 */ 1877 if (!locked && (!kcage_on || !(flags & PG_NORELOC))) { 1878 mutex_enter(&pcgs_lock); 1879 VM_STAT_ADD(pcgs_locked); 1880 locked = 1; 1881 p = pcf; 1882 for (i = 0; i < pcf_fanout; i++) { 1883 mutex_enter(&p->pcf_lock); 1884 ASSERT(p->pcf_block == 0); 1885 p->pcf_block = 1; 1886 p->pcf_reserve = p->pcf_count; 1887 p->pcf_count = 0; 1888 mutex_exit(&p->pcf_lock); 1889 p++; 1890 } 1891 freemem = 0; 1892 } 1893 1894 if (count) { 1895 /* 1896 * Since page_free() puts pages on 1897 * a list then accounts for it, we 1898 * just have to wait for page_free() 1899 * to unlock any page it was working 1900 * with. The page_lock()-page_reclaim() 1901 * path falls in the same boat. 1902 * 1903 * We don't need to check on the 1904 * PG_WAIT flag, we have already 1905 * accounted for the page we are 1906 * looking for in page_create_va(). 1907 * 1908 * We just wait a moment to let any 1909 * locked pages on the lists free up, 1910 * then continue around and try again. 1911 * 1912 * Will be awakened by set_freemem(). 1913 */ 1914 mutex_enter(&pcgs_wait_lock); 1915 cv_wait(&pcgs_cv, &pcgs_wait_lock); 1916 mutex_exit(&pcgs_wait_lock); 1917 } 1918 } else { 1919 #ifdef VM_STATS 1920 if (count >= PCGS_TRIES) { 1921 VM_STAT_ADD(pcgs_too_many); 1922 } else { 1923 VM_STAT_ADD(pcgs_counts[count]); 1924 } 1925 #endif 1926 if (locked) { 1927 pcgs_unblock(); 1928 mutex_exit(&pcgs_lock); 1929 } 1930 if (cagelocked) 1931 mutex_exit(&pcgs_cagelock); 1932 return (pp); 1933 } 1934 } 1935 /* 1936 * we go down holding the pcf locks. 1937 */ 1938 panic("no %spage found %d", 1939 ((flags & PG_NORELOC) ? "non-reloc " : ""), count); 1940 /*NOTREACHED*/ 1941 } 1942 1943 /* 1944 * Create enough pages for "bytes" worth of data starting at 1945 * "off" in "vp". 1946 * 1947 * Where flag must be one of: 1948 * 1949 * PG_EXCL: Exclusive create (fail if any page already 1950 * exists in the page cache) which does not 1951 * wait for memory to become available. 1952 * 1953 * PG_WAIT: Non-exclusive create which can wait for 1954 * memory to become available. 1955 * 1956 * PG_PHYSCONTIG: Allocate physically contiguous pages. 1957 * (Not Supported) 1958 * 1959 * A doubly linked list of pages is returned to the caller. Each page 1960 * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock) 1961 * lock. 1962 * 1963 * Unable to change the parameters to page_create() in a minor release, 1964 * we renamed page_create() to page_create_va(), changed all known calls 1965 * from page_create() to page_create_va(), and created this wrapper. 1966 * 1967 * Upon a major release, we should break compatibility by deleting this 1968 * wrapper, and replacing all the strings "page_create_va", with "page_create". 1969 * 1970 * NOTE: There is a copy of this interface as page_create_io() in 1971 * i86/vm/vm_machdep.c. Any bugs fixed here should be applied 1972 * there. 1973 */ 1974 page_t * 1975 page_create(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags) 1976 { 1977 caddr_t random_vaddr; 1978 struct seg kseg; 1979 1980 #ifdef DEBUG 1981 cmn_err(CE_WARN, "Using deprecated interface page_create: caller %p", 1982 (void *)caller()); 1983 #endif 1984 1985 random_vaddr = (caddr_t)(((uintptr_t)vp >> 7) ^ 1986 (uintptr_t)(off >> PAGESHIFT)); 1987 kseg.s_as = &kas; 1988 1989 return (page_create_va(vp, off, bytes, flags, &kseg, random_vaddr)); 1990 } 1991 1992 #ifdef DEBUG 1993 uint32_t pg_alloc_pgs_mtbf = 0; 1994 #endif 1995 1996 /* 1997 * Used for large page support. It will attempt to allocate 1998 * a large page(s) off the freelist. 1999 * 2000 * Returns non zero on failure. 2001 */ 2002 int 2003 page_alloc_pages(struct vnode *vp, struct seg *seg, caddr_t addr, 2004 page_t **basepp, page_t *ppa[], uint_t szc, int anypgsz, int pgflags) 2005 { 2006 pgcnt_t npgs, curnpgs, totpgs; 2007 size_t pgsz; 2008 page_t *pplist = NULL, *pp; 2009 int err = 0; 2010 lgrp_t *lgrp; 2011 2012 ASSERT(szc != 0 && szc <= (page_num_pagesizes() - 1)); 2013 ASSERT(pgflags == 0 || pgflags == PG_LOCAL); 2014 2015 /* 2016 * Check if system heavily prefers local large pages over remote 2017 * on systems with multiple lgroups. 2018 */ 2019 if (lpg_alloc_prefer == LPAP_LOCAL && nlgrps > 1) { 2020 pgflags = PG_LOCAL; 2021 } 2022 2023 VM_STAT_ADD(alloc_pages[0]); 2024 2025 #ifdef DEBUG 2026 if (pg_alloc_pgs_mtbf && !(gethrtime() % pg_alloc_pgs_mtbf)) { 2027 return (ENOMEM); 2028 } 2029 #endif 2030 2031 /* 2032 * One must be NULL but not both. 2033 * And one must be non NULL but not both. 2034 */ 2035 ASSERT(basepp != NULL || ppa != NULL); 2036 ASSERT(basepp == NULL || ppa == NULL); 2037 2038 #if defined(__x86) 2039 while (page_chk_freelist(szc) == 0) { 2040 VM_STAT_ADD(alloc_pages[8]); 2041 if (anypgsz == 0 || --szc == 0) 2042 return (ENOMEM); 2043 } 2044 #endif 2045 2046 pgsz = page_get_pagesize(szc); 2047 totpgs = curnpgs = npgs = pgsz >> PAGESHIFT; 2048 2049 ASSERT(((uintptr_t)addr & (pgsz - 1)) == 0); 2050 2051 (void) page_create_wait(npgs, PG_WAIT); 2052 2053 while (npgs && szc) { 2054 lgrp = lgrp_mem_choose(seg, addr, pgsz); 2055 if (pgflags == PG_LOCAL) { 2056 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2057 pgflags, lgrp); 2058 if (pp == NULL) { 2059 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2060 0, lgrp); 2061 } 2062 } else { 2063 pp = page_get_freelist(vp, 0, seg, addr, pgsz, 2064 0, lgrp); 2065 } 2066 if (pp != NULL) { 2067 VM_STAT_ADD(alloc_pages[1]); 2068 page_list_concat(&pplist, &pp); 2069 ASSERT(npgs >= curnpgs); 2070 npgs -= curnpgs; 2071 } else if (anypgsz) { 2072 VM_STAT_ADD(alloc_pages[2]); 2073 szc--; 2074 pgsz = page_get_pagesize(szc); 2075 curnpgs = pgsz >> PAGESHIFT; 2076 } else { 2077 VM_STAT_ADD(alloc_pages[3]); 2078 ASSERT(npgs == totpgs); 2079 page_create_putback(npgs); 2080 return (ENOMEM); 2081 } 2082 } 2083 if (szc == 0) { 2084 VM_STAT_ADD(alloc_pages[4]); 2085 ASSERT(npgs != 0); 2086 page_create_putback(npgs); 2087 err = ENOMEM; 2088 } else if (basepp != NULL) { 2089 ASSERT(npgs == 0); 2090 ASSERT(ppa == NULL); 2091 *basepp = pplist; 2092 } 2093 2094 npgs = totpgs - npgs; 2095 pp = pplist; 2096 2097 /* 2098 * Clear the free and age bits. Also if we were passed in a ppa then 2099 * fill it in with all the constituent pages from the large page. But 2100 * if we failed to allocate all the pages just free what we got. 2101 */ 2102 while (npgs != 0) { 2103 ASSERT(PP_ISFREE(pp)); 2104 ASSERT(PP_ISAGED(pp)); 2105 if (ppa != NULL || err != 0) { 2106 if (err == 0) { 2107 VM_STAT_ADD(alloc_pages[5]); 2108 PP_CLRFREE(pp); 2109 PP_CLRAGED(pp); 2110 page_sub(&pplist, pp); 2111 *ppa++ = pp; 2112 npgs--; 2113 } else { 2114 VM_STAT_ADD(alloc_pages[6]); 2115 ASSERT(pp->p_szc != 0); 2116 curnpgs = page_get_pagecnt(pp->p_szc); 2117 page_list_break(&pp, &pplist, curnpgs); 2118 page_list_add_pages(pp, 0); 2119 page_create_putback(curnpgs); 2120 ASSERT(npgs >= curnpgs); 2121 npgs -= curnpgs; 2122 } 2123 pp = pplist; 2124 } else { 2125 VM_STAT_ADD(alloc_pages[7]); 2126 PP_CLRFREE(pp); 2127 PP_CLRAGED(pp); 2128 pp = pp->p_next; 2129 npgs--; 2130 } 2131 } 2132 return (err); 2133 } 2134 2135 /* 2136 * Get a single large page off of the freelists, and set it up for use. 2137 * Number of bytes requested must be a supported page size. 2138 * 2139 * Note that this call may fail even if there is sufficient 2140 * memory available or PG_WAIT is set, so the caller must 2141 * be willing to fallback on page_create_va(), block and retry, 2142 * or fail the requester. 2143 */ 2144 page_t * 2145 page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2146 struct seg *seg, caddr_t vaddr, void *arg) 2147 { 2148 pgcnt_t npages; 2149 page_t *pp; 2150 page_t *rootpp; 2151 lgrp_t *lgrp; 2152 lgrp_id_t *lgrpid = (lgrp_id_t *)arg; 2153 2154 ASSERT(vp != NULL); 2155 2156 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2157 PG_NORELOC | PG_PANIC | PG_PUSHPAGE | PG_NORMALPRI)) == 0); 2158 /* but no others */ 2159 2160 ASSERT((flags & PG_EXCL) == PG_EXCL); 2161 2162 npages = btop(bytes); 2163 2164 if (!kcage_on || panicstr) { 2165 /* 2166 * Cage is OFF, or we are single threaded in 2167 * panic, so make everything a RELOC request. 2168 */ 2169 flags &= ~PG_NORELOC; 2170 } 2171 2172 /* 2173 * Make sure there's adequate physical memory available. 2174 * Note: PG_WAIT is ignored here. 2175 */ 2176 if (freemem <= throttlefree + npages) { 2177 VM_STAT_ADD(page_create_large_cnt[1]); 2178 return (NULL); 2179 } 2180 2181 /* 2182 * If cage is on, dampen draw from cage when available 2183 * cage space is low. 2184 */ 2185 if ((flags & (PG_NORELOC | PG_WAIT)) == (PG_NORELOC | PG_WAIT) && 2186 kcage_freemem < kcage_throttlefree + npages) { 2187 2188 /* 2189 * The cage is on, the caller wants PG_NORELOC 2190 * pages and available cage memory is very low. 2191 * Call kcage_create_throttle() to attempt to 2192 * control demand on the cage. 2193 */ 2194 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) { 2195 VM_STAT_ADD(page_create_large_cnt[2]); 2196 return (NULL); 2197 } 2198 } 2199 2200 if (!pcf_decrement_bucket(npages) && 2201 !pcf_decrement_multiple(NULL, npages, 1)) { 2202 VM_STAT_ADD(page_create_large_cnt[4]); 2203 return (NULL); 2204 } 2205 2206 /* 2207 * This is where this function behaves fundamentally differently 2208 * than page_create_va(); since we're intending to map the page 2209 * with a single TTE, we have to get it as a physically contiguous 2210 * hardware pagesize chunk. If we can't, we fail. 2211 */ 2212 if (lgrpid != NULL && *lgrpid >= 0 && *lgrpid <= lgrp_alloc_max && 2213 LGRP_EXISTS(lgrp_table[*lgrpid])) 2214 lgrp = lgrp_table[*lgrpid]; 2215 else 2216 lgrp = lgrp_mem_choose(seg, vaddr, bytes); 2217 2218 if ((rootpp = page_get_freelist(&kvp, off, seg, vaddr, 2219 bytes, flags & ~PG_MATCH_COLOR, lgrp)) == NULL) { 2220 page_create_putback(npages); 2221 VM_STAT_ADD(page_create_large_cnt[5]); 2222 return (NULL); 2223 } 2224 2225 /* 2226 * if we got the page with the wrong mtype give it back this is a 2227 * workaround for CR 6249718. When CR 6249718 is fixed we never get 2228 * inside "if" and the workaround becomes just a nop 2229 */ 2230 if (kcage_on && (flags & PG_NORELOC) && !PP_ISNORELOC(rootpp)) { 2231 page_list_add_pages(rootpp, 0); 2232 page_create_putback(npages); 2233 VM_STAT_ADD(page_create_large_cnt[6]); 2234 return (NULL); 2235 } 2236 2237 /* 2238 * If satisfying this request has left us with too little 2239 * memory, start the wheels turning to get some back. The 2240 * first clause of the test prevents waking up the pageout 2241 * daemon in situations where it would decide that there's 2242 * nothing to do. 2243 */ 2244 if (nscan < desscan && freemem < minfree) { 2245 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2246 "pageout_cv_signal:freemem %ld", freemem); 2247 cv_signal(&proc_pageout->p_cv); 2248 } 2249 2250 pp = rootpp; 2251 while (npages--) { 2252 ASSERT(PAGE_EXCL(pp)); 2253 ASSERT(pp->p_vnode == NULL); 2254 ASSERT(!hat_page_is_mapped(pp)); 2255 PP_CLRFREE(pp); 2256 PP_CLRAGED(pp); 2257 if (!page_hashin(pp, vp, off, NULL)) 2258 panic("page_create_large: hashin failed: page %p", 2259 (void *)pp); 2260 page_io_lock(pp); 2261 off += PAGESIZE; 2262 pp = pp->p_next; 2263 } 2264 2265 VM_STAT_ADD(page_create_large_cnt[0]); 2266 return (rootpp); 2267 } 2268 2269 page_t * 2270 page_create_va(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags, 2271 struct seg *seg, caddr_t vaddr) 2272 { 2273 page_t *plist = NULL; 2274 pgcnt_t npages; 2275 pgcnt_t found_on_free = 0; 2276 pgcnt_t pages_req; 2277 page_t *npp = NULL; 2278 struct pcf *p; 2279 lgrp_t *lgrp; 2280 2281 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START, 2282 "page_create_start:vp %p off %llx bytes %lu flags %x", 2283 vp, off, bytes, flags); 2284 2285 ASSERT(bytes != 0 && vp != NULL); 2286 2287 if ((flags & PG_EXCL) == 0 && (flags & PG_WAIT) == 0) { 2288 panic("page_create: invalid flags"); 2289 /*NOTREACHED*/ 2290 } 2291 ASSERT((flags & ~(PG_EXCL | PG_WAIT | 2292 PG_NORELOC | PG_PANIC | PG_PUSHPAGE | PG_NORMALPRI)) == 0); 2293 /* but no others */ 2294 2295 pages_req = npages = btopr(bytes); 2296 /* 2297 * Try to see whether request is too large to *ever* be 2298 * satisfied, in order to prevent deadlock. We arbitrarily 2299 * decide to limit maximum size requests to max_page_get. 2300 */ 2301 if (npages >= max_page_get) { 2302 if ((flags & PG_WAIT) == 0) { 2303 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_TOOBIG, 2304 "page_create_toobig:vp %p off %llx npages " 2305 "%lu max_page_get %lu", 2306 vp, off, npages, max_page_get); 2307 return (NULL); 2308 } else { 2309 cmn_err(CE_WARN, 2310 "Request for too much kernel memory " 2311 "(%lu bytes), will hang forever", bytes); 2312 for (;;) 2313 delay(1000000000); 2314 } 2315 } 2316 2317 if (!kcage_on || panicstr) { 2318 /* 2319 * Cage is OFF, or we are single threaded in 2320 * panic, so make everything a RELOC request. 2321 */ 2322 flags &= ~PG_NORELOC; 2323 } 2324 2325 if (freemem <= throttlefree + npages) 2326 if (!page_create_throttle(npages, flags)) 2327 return (NULL); 2328 2329 /* 2330 * If cage is on, dampen draw from cage when available 2331 * cage space is low. 2332 */ 2333 if ((flags & PG_NORELOC) && 2334 kcage_freemem < kcage_throttlefree + npages) { 2335 2336 /* 2337 * The cage is on, the caller wants PG_NORELOC 2338 * pages and available cage memory is very low. 2339 * Call kcage_create_throttle() to attempt to 2340 * control demand on the cage. 2341 */ 2342 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) 2343 return (NULL); 2344 } 2345 2346 VM_STAT_ADD(page_create_cnt[0]); 2347 2348 if (!pcf_decrement_bucket(npages)) { 2349 /* 2350 * Have to look harder. If npages is greater than 2351 * one, then we might have to coalesce the counters. 2352 * 2353 * Go wait. We come back having accounted 2354 * for the memory. 2355 */ 2356 VM_STAT_ADD(page_create_cnt[1]); 2357 if (!page_create_wait(npages, flags)) { 2358 VM_STAT_ADD(page_create_cnt[2]); 2359 return (NULL); 2360 } 2361 } 2362 2363 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS, 2364 "page_create_success:vp %p off %llx", vp, off); 2365 2366 /* 2367 * If satisfying this request has left us with too little 2368 * memory, start the wheels turning to get some back. The 2369 * first clause of the test prevents waking up the pageout 2370 * daemon in situations where it would decide that there's 2371 * nothing to do. 2372 */ 2373 if (nscan < desscan && freemem < minfree) { 2374 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2375 "pageout_cv_signal:freemem %ld", freemem); 2376 cv_signal(&proc_pageout->p_cv); 2377 } 2378 2379 /* 2380 * Loop around collecting the requested number of pages. 2381 * Most of the time, we have to `create' a new page. With 2382 * this in mind, pull the page off the free list before 2383 * getting the hash lock. This will minimize the hash 2384 * lock hold time, nesting, and the like. If it turns 2385 * out we don't need the page, we put it back at the end. 2386 */ 2387 while (npages--) { 2388 page_t *pp; 2389 kmutex_t *phm = NULL; 2390 ulong_t index; 2391 2392 index = PAGE_HASH_FUNC(vp, off); 2393 top: 2394 ASSERT(phm == NULL); 2395 ASSERT(index == PAGE_HASH_FUNC(vp, off)); 2396 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 2397 2398 if (npp == NULL) { 2399 /* 2400 * Try to get a page from the freelist (ie, 2401 * a page with no [vp, off] tag). If that 2402 * fails, use the cachelist. 2403 * 2404 * During the first attempt at both the free 2405 * and cache lists we try for the correct color. 2406 */ 2407 /* 2408 * XXXX-how do we deal with virtual indexed 2409 * caches and and colors? 2410 */ 2411 VM_STAT_ADD(page_create_cnt[4]); 2412 /* 2413 * Get lgroup to allocate next page of shared memory 2414 * from and use it to specify where to allocate 2415 * the physical memory 2416 */ 2417 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE); 2418 npp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE, 2419 flags | PG_MATCH_COLOR, lgrp); 2420 if (npp == NULL) { 2421 npp = page_get_cachelist(vp, off, seg, 2422 vaddr, flags | PG_MATCH_COLOR, lgrp); 2423 if (npp == NULL) { 2424 npp = page_create_get_something(vp, 2425 off, seg, vaddr, 2426 flags & ~PG_MATCH_COLOR); 2427 } 2428 2429 if (PP_ISAGED(npp) == 0) { 2430 /* 2431 * Since this page came from the 2432 * cachelist, we must destroy the 2433 * old vnode association. 2434 */ 2435 page_hashout(npp, NULL); 2436 } 2437 } 2438 } 2439 2440 /* 2441 * We own this page! 2442 */ 2443 ASSERT(PAGE_EXCL(npp)); 2444 ASSERT(npp->p_vnode == NULL); 2445 ASSERT(!hat_page_is_mapped(npp)); 2446 PP_CLRFREE(npp); 2447 PP_CLRAGED(npp); 2448 2449 /* 2450 * Here we have a page in our hot little mits and are 2451 * just waiting to stuff it on the appropriate lists. 2452 * Get the mutex and check to see if it really does 2453 * not exist. 2454 */ 2455 phm = PAGE_HASH_MUTEX(index); 2456 mutex_enter(phm); 2457 pp = page_hash_search(index, vp, off); 2458 if (pp == NULL) { 2459 VM_STAT_ADD(page_create_new); 2460 pp = npp; 2461 npp = NULL; 2462 if (!page_hashin(pp, vp, off, phm)) { 2463 /* 2464 * Since we hold the page hash mutex and 2465 * just searched for this page, page_hashin 2466 * had better not fail. If it does, that 2467 * means somethread did not follow the 2468 * page hash mutex rules. Panic now and 2469 * get it over with. As usual, go down 2470 * holding all the locks. 2471 */ 2472 ASSERT(MUTEX_HELD(phm)); 2473 panic("page_create: " 2474 "hashin failed %p %p %llx %p", 2475 (void *)pp, (void *)vp, off, (void *)phm); 2476 /*NOTREACHED*/ 2477 } 2478 ASSERT(MUTEX_HELD(phm)); 2479 mutex_exit(phm); 2480 phm = NULL; 2481 2482 /* 2483 * Hat layer locking need not be done to set 2484 * the following bits since the page is not hashed 2485 * and was on the free list (i.e., had no mappings). 2486 * 2487 * Set the reference bit to protect 2488 * against immediate pageout 2489 * 2490 * XXXmh modify freelist code to set reference 2491 * bit so we don't have to do it here. 2492 */ 2493 page_set_props(pp, P_REF); 2494 found_on_free++; 2495 } else { 2496 VM_STAT_ADD(page_create_exists); 2497 if (flags & PG_EXCL) { 2498 /* 2499 * Found an existing page, and the caller 2500 * wanted all new pages. Undo all of the work 2501 * we have done. 2502 */ 2503 mutex_exit(phm); 2504 phm = NULL; 2505 while (plist != NULL) { 2506 pp = plist; 2507 page_sub(&plist, pp); 2508 page_io_unlock(pp); 2509 /* large pages should not end up here */ 2510 ASSERT(pp->p_szc == 0); 2511 /*LINTED: constant in conditional ctx*/ 2512 VN_DISPOSE(pp, B_INVAL, 0, kcred); 2513 } 2514 VM_STAT_ADD(page_create_found_one); 2515 goto fail; 2516 } 2517 ASSERT(flags & PG_WAIT); 2518 if (!page_lock(pp, SE_EXCL, phm, P_NO_RECLAIM)) { 2519 /* 2520 * Start all over again if we blocked trying 2521 * to lock the page. 2522 */ 2523 mutex_exit(phm); 2524 VM_STAT_ADD(page_create_page_lock_failed); 2525 phm = NULL; 2526 goto top; 2527 } 2528 mutex_exit(phm); 2529 phm = NULL; 2530 2531 if (PP_ISFREE(pp)) { 2532 ASSERT(PP_ISAGED(pp) == 0); 2533 VM_STAT_ADD(pagecnt.pc_get_cache); 2534 page_list_sub(pp, PG_CACHE_LIST); 2535 PP_CLRFREE(pp); 2536 found_on_free++; 2537 } 2538 } 2539 2540 /* 2541 * Got a page! It is locked. Acquire the i/o 2542 * lock since we are going to use the p_next and 2543 * p_prev fields to link the requested pages together. 2544 */ 2545 page_io_lock(pp); 2546 page_add(&plist, pp); 2547 plist = plist->p_next; 2548 off += PAGESIZE; 2549 vaddr += PAGESIZE; 2550 } 2551 2552 ASSERT((flags & PG_EXCL) ? (found_on_free == pages_req) : 1); 2553 fail: 2554 if (npp != NULL) { 2555 /* 2556 * Did not need this page after all. 2557 * Put it back on the free list. 2558 */ 2559 VM_STAT_ADD(page_create_putbacks); 2560 PP_SETFREE(npp); 2561 PP_SETAGED(npp); 2562 npp->p_offset = (u_offset_t)-1; 2563 page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL); 2564 page_unlock(npp); 2565 2566 } 2567 2568 ASSERT(pages_req >= found_on_free); 2569 2570 { 2571 uint_t overshoot = (uint_t)(pages_req - found_on_free); 2572 2573 if (overshoot) { 2574 VM_STAT_ADD(page_create_overshoot); 2575 p = &pcf[PCF_INDEX()]; 2576 mutex_enter(&p->pcf_lock); 2577 if (p->pcf_block) { 2578 p->pcf_reserve += overshoot; 2579 } else { 2580 p->pcf_count += overshoot; 2581 if (p->pcf_wait) { 2582 mutex_enter(&new_freemem_lock); 2583 if (freemem_wait) { 2584 cv_signal(&freemem_cv); 2585 p->pcf_wait--; 2586 } else { 2587 p->pcf_wait = 0; 2588 } 2589 mutex_exit(&new_freemem_lock); 2590 } 2591 } 2592 mutex_exit(&p->pcf_lock); 2593 /* freemem is approximate, so this test OK */ 2594 if (!p->pcf_block) 2595 freemem += overshoot; 2596 } 2597 } 2598 2599 return (plist); 2600 } 2601 2602 /* 2603 * One or more constituent pages of this large page has been marked 2604 * toxic. Simply demote the large page to PAGESIZE pages and let 2605 * page_free() handle it. This routine should only be called by 2606 * large page free routines (page_free_pages() and page_destroy_pages(). 2607 * All pages are locked SE_EXCL and have already been marked free. 2608 */ 2609 static void 2610 page_free_toxic_pages(page_t *rootpp) 2611 { 2612 page_t *tpp; 2613 pgcnt_t i, pgcnt = page_get_pagecnt(rootpp->p_szc); 2614 uint_t szc = rootpp->p_szc; 2615 2616 for (i = 0, tpp = rootpp; i < pgcnt; i++, tpp = tpp->p_next) { 2617 ASSERT(tpp->p_szc == szc); 2618 ASSERT((PAGE_EXCL(tpp) && 2619 !page_iolock_assert(tpp)) || panicstr); 2620 tpp->p_szc = 0; 2621 } 2622 2623 while (rootpp != NULL) { 2624 tpp = rootpp; 2625 page_sub(&rootpp, tpp); 2626 ASSERT(PP_ISFREE(tpp)); 2627 PP_CLRFREE(tpp); 2628 page_free(tpp, 1); 2629 } 2630 } 2631 2632 /* 2633 * Put page on the "free" list. 2634 * The free list is really two lists maintained by 2635 * the PSM of whatever machine we happen to be on. 2636 */ 2637 void 2638 page_free(page_t *pp, int dontneed) 2639 { 2640 struct pcf *p; 2641 uint_t pcf_index; 2642 2643 ASSERT((PAGE_EXCL(pp) && 2644 !page_iolock_assert(pp)) || panicstr); 2645 2646 if (PP_ISFREE(pp)) { 2647 panic("page_free: page %p is free", (void *)pp); 2648 } 2649 2650 if (pp->p_szc != 0) { 2651 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 2652 PP_ISKAS(pp)) { 2653 panic("page_free: anon or kernel " 2654 "or no vnode large page %p", (void *)pp); 2655 } 2656 page_demote_vp_pages(pp); 2657 ASSERT(pp->p_szc == 0); 2658 } 2659 2660 /* 2661 * The page_struct_lock need not be acquired to examine these 2662 * fields since the page has an "exclusive" lock. 2663 */ 2664 if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 2665 pp->p_slckcnt != 0) { 2666 panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d " 2667 "slckcnt = %d", (void *)pp, page_pptonum(pp), pp->p_lckcnt, 2668 pp->p_cowcnt, pp->p_slckcnt); 2669 /*NOTREACHED*/ 2670 } 2671 2672 ASSERT(!hat_page_getshare(pp)); 2673 2674 PP_SETFREE(pp); 2675 ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) || 2676 !hat_ismod(pp)); 2677 page_clr_all_props(pp); 2678 ASSERT(!hat_page_getshare(pp)); 2679 2680 /* 2681 * Now we add the page to the head of the free list. 2682 * But if this page is associated with a paged vnode 2683 * then we adjust the head forward so that the page is 2684 * effectively at the end of the list. 2685 */ 2686 if (pp->p_vnode == NULL) { 2687 /* 2688 * Page has no identity, put it on the free list. 2689 */ 2690 PP_SETAGED(pp); 2691 pp->p_offset = (u_offset_t)-1; 2692 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 2693 VM_STAT_ADD(pagecnt.pc_free_free); 2694 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2695 "page_free_free:pp %p", pp); 2696 } else { 2697 PP_CLRAGED(pp); 2698 2699 if (!dontneed) { 2700 /* move it to the tail of the list */ 2701 page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL); 2702 2703 VM_STAT_ADD(pagecnt.pc_free_cache); 2704 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL, 2705 "page_free_cache_tail:pp %p", pp); 2706 } else { 2707 page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD); 2708 2709 VM_STAT_ADD(pagecnt.pc_free_dontneed); 2710 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD, 2711 "page_free_cache_head:pp %p", pp); 2712 } 2713 } 2714 page_unlock(pp); 2715 2716 /* 2717 * Now do the `freemem' accounting. 2718 */ 2719 pcf_index = PCF_INDEX(); 2720 p = &pcf[pcf_index]; 2721 2722 mutex_enter(&p->pcf_lock); 2723 if (p->pcf_block) { 2724 p->pcf_reserve += 1; 2725 } else { 2726 p->pcf_count += 1; 2727 if (p->pcf_wait) { 2728 mutex_enter(&new_freemem_lock); 2729 /* 2730 * Check to see if some other thread 2731 * is actually waiting. Another bucket 2732 * may have woken it up by now. If there 2733 * are no waiters, then set our pcf_wait 2734 * count to zero to avoid coming in here 2735 * next time. Also, since only one page 2736 * was put on the free list, just wake 2737 * up one waiter. 2738 */ 2739 if (freemem_wait) { 2740 cv_signal(&freemem_cv); 2741 p->pcf_wait--; 2742 } else { 2743 p->pcf_wait = 0; 2744 } 2745 mutex_exit(&new_freemem_lock); 2746 } 2747 } 2748 mutex_exit(&p->pcf_lock); 2749 2750 /* freemem is approximate, so this test OK */ 2751 if (!p->pcf_block) 2752 freemem += 1; 2753 } 2754 2755 /* 2756 * Put page on the "free" list during intial startup. 2757 * This happens during initial single threaded execution. 2758 */ 2759 void 2760 page_free_at_startup(page_t *pp) 2761 { 2762 struct pcf *p; 2763 uint_t pcf_index; 2764 2765 page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT); 2766 VM_STAT_ADD(pagecnt.pc_free_free); 2767 2768 /* 2769 * Now do the `freemem' accounting. 2770 */ 2771 pcf_index = PCF_INDEX(); 2772 p = &pcf[pcf_index]; 2773 2774 ASSERT(p->pcf_block == 0); 2775 ASSERT(p->pcf_wait == 0); 2776 p->pcf_count += 1; 2777 2778 /* freemem is approximate, so this is OK */ 2779 freemem += 1; 2780 } 2781 2782 void 2783 page_free_pages(page_t *pp) 2784 { 2785 page_t *tpp, *rootpp = NULL; 2786 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 2787 pgcnt_t i; 2788 uint_t szc = pp->p_szc; 2789 2790 VM_STAT_ADD(pagecnt.pc_free_pages); 2791 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE, 2792 "page_free_free:pp %p", pp); 2793 2794 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 2795 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 2796 panic("page_free_pages: not root page %p", (void *)pp); 2797 /*NOTREACHED*/ 2798 } 2799 2800 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 2801 ASSERT((PAGE_EXCL(tpp) && 2802 !page_iolock_assert(tpp)) || panicstr); 2803 if (PP_ISFREE(tpp)) { 2804 panic("page_free_pages: page %p is free", (void *)tpp); 2805 /*NOTREACHED*/ 2806 } 2807 if (hat_page_is_mapped(tpp) || tpp->p_lckcnt != 0 || 2808 tpp->p_cowcnt != 0 || tpp->p_slckcnt != 0) { 2809 panic("page_free_pages %p", (void *)tpp); 2810 /*NOTREACHED*/ 2811 } 2812 2813 ASSERT(!hat_page_getshare(tpp)); 2814 ASSERT(tpp->p_vnode == NULL); 2815 ASSERT(tpp->p_szc == szc); 2816 2817 PP_SETFREE(tpp); 2818 page_clr_all_props(tpp); 2819 PP_SETAGED(tpp); 2820 tpp->p_offset = (u_offset_t)-1; 2821 ASSERT(tpp->p_next == tpp); 2822 ASSERT(tpp->p_prev == tpp); 2823 page_list_concat(&rootpp, &tpp); 2824 } 2825 ASSERT(rootpp == pp); 2826 2827 page_list_add_pages(rootpp, 0); 2828 page_create_putback(pgcnt); 2829 } 2830 2831 int free_pages = 1; 2832 2833 /* 2834 * This routine attempts to return pages to the cachelist via page_release(). 2835 * It does not *have* to be successful in all cases, since the pageout scanner 2836 * will catch any pages it misses. It does need to be fast and not introduce 2837 * too much overhead. 2838 * 2839 * If a page isn't found on the unlocked sweep of the page_hash bucket, we 2840 * don't lock and retry. This is ok, since the page scanner will eventually 2841 * find any page we miss in free_vp_pages(). 2842 */ 2843 void 2844 free_vp_pages(vnode_t *vp, u_offset_t off, size_t len) 2845 { 2846 page_t *pp; 2847 u_offset_t eoff; 2848 extern int swap_in_range(vnode_t *, u_offset_t, size_t); 2849 2850 eoff = off + len; 2851 2852 if (free_pages == 0) 2853 return; 2854 if (swap_in_range(vp, off, len)) 2855 return; 2856 2857 for (; off < eoff; off += PAGESIZE) { 2858 2859 /* 2860 * find the page using a fast, but inexact search. It'll be OK 2861 * if a few pages slip through the cracks here. 2862 */ 2863 pp = page_exists(vp, off); 2864 2865 /* 2866 * If we didn't find the page (it may not exist), the page 2867 * is free, looks still in use (shared), or we can't lock it, 2868 * just give up. 2869 */ 2870 if (pp == NULL || 2871 PP_ISFREE(pp) || 2872 page_share_cnt(pp) > 0 || 2873 !page_trylock(pp, SE_EXCL)) 2874 continue; 2875 2876 /* 2877 * Once we have locked pp, verify that it's still the 2878 * correct page and not already free 2879 */ 2880 ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL)); 2881 if (pp->p_vnode != vp || pp->p_offset != off || PP_ISFREE(pp)) { 2882 page_unlock(pp); 2883 continue; 2884 } 2885 2886 /* 2887 * try to release the page... 2888 */ 2889 (void) page_release(pp, 1); 2890 } 2891 } 2892 2893 /* 2894 * Reclaim the given page from the free list. 2895 * If pp is part of a large pages, only the given constituent page is reclaimed 2896 * and the large page it belonged to will be demoted. This can only happen 2897 * if the page is not on the cachelist. 2898 * 2899 * Returns 1 on success or 0 on failure. 2900 * 2901 * The page is unlocked if it can't be reclaimed (when freemem == 0). 2902 * If `lock' is non-null, it will be dropped and re-acquired if 2903 * the routine must wait while freemem is 0. 2904 * 2905 * As it turns out, boot_getpages() does this. It picks a page, 2906 * based on where OBP mapped in some address, gets its pfn, searches 2907 * the memsegs, locks the page, then pulls it off the free list! 2908 */ 2909 int 2910 page_reclaim(page_t *pp, kmutex_t *lock) 2911 { 2912 struct pcf *p; 2913 struct cpu *cpup; 2914 int enough; 2915 uint_t i; 2916 2917 ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1); 2918 ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp)); 2919 2920 /* 2921 * If `freemem' is 0, we cannot reclaim this page from the 2922 * freelist, so release every lock we might hold: the page, 2923 * and the `lock' before blocking. 2924 * 2925 * The only way `freemem' can become 0 while there are pages 2926 * marked free (have their p->p_free bit set) is when the 2927 * system is low on memory and doing a page_create(). In 2928 * order to guarantee that once page_create() starts acquiring 2929 * pages it will be able to get all that it needs since `freemem' 2930 * was decreased by the requested amount. So, we need to release 2931 * this page, and let page_create() have it. 2932 * 2933 * Since `freemem' being zero is not supposed to happen, just 2934 * use the usual hash stuff as a starting point. If that bucket 2935 * is empty, then assume the worst, and start at the beginning 2936 * of the pcf array. If we always start at the beginning 2937 * when acquiring more than one pcf lock, there won't be any 2938 * deadlock problems. 2939 */ 2940 2941 /* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */ 2942 2943 if (freemem <= throttlefree && !page_create_throttle(1l, 0)) { 2944 pcf_acquire_all(); 2945 goto page_reclaim_nomem; 2946 } 2947 2948 enough = pcf_decrement_bucket(1); 2949 2950 if (!enough) { 2951 VM_STAT_ADD(page_reclaim_zero); 2952 /* 2953 * Check again. Its possible that some other thread 2954 * could have been right behind us, and added one 2955 * to a list somewhere. Acquire each of the pcf locks 2956 * until we find a page. 2957 */ 2958 p = pcf; 2959 for (i = 0; i < pcf_fanout; i++) { 2960 mutex_enter(&p->pcf_lock); 2961 if (p->pcf_count >= 1) { 2962 p->pcf_count -= 1; 2963 /* 2964 * freemem is not protected by any lock. Thus, 2965 * we cannot have any assertion containing 2966 * freemem here. 2967 */ 2968 freemem -= 1; 2969 enough = 1; 2970 break; 2971 } 2972 p++; 2973 } 2974 2975 if (!enough) { 2976 page_reclaim_nomem: 2977 /* 2978 * We really can't have page `pp'. 2979 * Time for the no-memory dance with 2980 * page_free(). This is just like 2981 * page_create_wait(). Plus the added 2982 * attraction of releasing whatever mutex 2983 * we held when we were called with in `lock'. 2984 * Page_unlock() will wakeup any thread 2985 * waiting around for this page. 2986 */ 2987 if (lock) { 2988 VM_STAT_ADD(page_reclaim_zero_locked); 2989 mutex_exit(lock); 2990 } 2991 page_unlock(pp); 2992 2993 /* 2994 * get this before we drop all the pcf locks. 2995 */ 2996 mutex_enter(&new_freemem_lock); 2997 2998 p = pcf; 2999 for (i = 0; i < pcf_fanout; i++) { 3000 p->pcf_wait++; 3001 mutex_exit(&p->pcf_lock); 3002 p++; 3003 } 3004 3005 freemem_wait++; 3006 cv_wait(&freemem_cv, &new_freemem_lock); 3007 freemem_wait--; 3008 3009 mutex_exit(&new_freemem_lock); 3010 3011 if (lock) { 3012 mutex_enter(lock); 3013 } 3014 return (0); 3015 } 3016 3017 /* 3018 * The pcf accounting has been done, 3019 * though none of the pcf_wait flags have been set, 3020 * drop the locks and continue on. 3021 */ 3022 while (p >= pcf) { 3023 mutex_exit(&p->pcf_lock); 3024 p--; 3025 } 3026 } 3027 3028 3029 VM_STAT_ADD(pagecnt.pc_reclaim); 3030 3031 /* 3032 * page_list_sub will handle the case where pp is a large page. 3033 * It's possible that the page was promoted while on the freelist 3034 */ 3035 if (PP_ISAGED(pp)) { 3036 page_list_sub(pp, PG_FREE_LIST); 3037 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_FREE, 3038 "page_reclaim_free:pp %p", pp); 3039 } else { 3040 page_list_sub(pp, PG_CACHE_LIST); 3041 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_CACHE, 3042 "page_reclaim_cache:pp %p", pp); 3043 } 3044 3045 /* 3046 * clear the p_free & p_age bits since this page is no longer 3047 * on the free list. Notice that there was a brief time where 3048 * a page is marked as free, but is not on the list. 3049 * 3050 * Set the reference bit to protect against immediate pageout. 3051 */ 3052 PP_CLRFREE(pp); 3053 PP_CLRAGED(pp); 3054 page_set_props(pp, P_REF); 3055 3056 CPU_STATS_ENTER_K(); 3057 cpup = CPU; /* get cpup now that CPU cannot change */ 3058 CPU_STATS_ADDQ(cpup, vm, pgrec, 1); 3059 CPU_STATS_ADDQ(cpup, vm, pgfrec, 1); 3060 CPU_STATS_EXIT_K(); 3061 ASSERT(pp->p_szc == 0); 3062 3063 return (1); 3064 } 3065 3066 /* 3067 * Destroy identity of the page and put it back on 3068 * the page free list. Assumes that the caller has 3069 * acquired the "exclusive" lock on the page. 3070 */ 3071 void 3072 page_destroy(page_t *pp, int dontfree) 3073 { 3074 ASSERT((PAGE_EXCL(pp) && 3075 !page_iolock_assert(pp)) || panicstr); 3076 ASSERT(pp->p_slckcnt == 0 || panicstr); 3077 3078 if (pp->p_szc != 0) { 3079 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) || 3080 PP_ISKAS(pp)) { 3081 panic("page_destroy: anon or kernel or no vnode " 3082 "large page %p", (void *)pp); 3083 } 3084 page_demote_vp_pages(pp); 3085 ASSERT(pp->p_szc == 0); 3086 } 3087 3088 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy:pp %p", pp); 3089 3090 /* 3091 * Unload translations, if any, then hash out the 3092 * page to erase its identity. 3093 */ 3094 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3095 page_hashout(pp, NULL); 3096 3097 if (!dontfree) { 3098 /* 3099 * Acquire the "freemem_lock" for availrmem. 3100 * The page_struct_lock need not be acquired for lckcnt 3101 * and cowcnt since the page has an "exclusive" lock. 3102 * We are doing a modified version of page_pp_unlock here. 3103 */ 3104 if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) { 3105 mutex_enter(&freemem_lock); 3106 if (pp->p_lckcnt != 0) { 3107 availrmem++; 3108 pages_locked--; 3109 pp->p_lckcnt = 0; 3110 } 3111 if (pp->p_cowcnt != 0) { 3112 availrmem += pp->p_cowcnt; 3113 pages_locked -= pp->p_cowcnt; 3114 pp->p_cowcnt = 0; 3115 } 3116 mutex_exit(&freemem_lock); 3117 } 3118 /* 3119 * Put the page on the "free" list. 3120 */ 3121 page_free(pp, 0); 3122 } 3123 } 3124 3125 void 3126 page_destroy_pages(page_t *pp) 3127 { 3128 3129 page_t *tpp, *rootpp = NULL; 3130 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc); 3131 pgcnt_t i, pglcks = 0; 3132 uint_t szc = pp->p_szc; 3133 3134 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes()); 3135 3136 VM_STAT_ADD(pagecnt.pc_destroy_pages); 3137 3138 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy_pages:pp %p", pp); 3139 3140 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) { 3141 panic("page_destroy_pages: not root page %p", (void *)pp); 3142 /*NOTREACHED*/ 3143 } 3144 3145 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) { 3146 ASSERT((PAGE_EXCL(tpp) && 3147 !page_iolock_assert(tpp)) || panicstr); 3148 ASSERT(tpp->p_slckcnt == 0 || panicstr); 3149 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 3150 page_hashout(tpp, NULL); 3151 ASSERT(tpp->p_offset == (u_offset_t)-1); 3152 if (tpp->p_lckcnt != 0) { 3153 pglcks++; 3154 tpp->p_lckcnt = 0; 3155 } else if (tpp->p_cowcnt != 0) { 3156 pglcks += tpp->p_cowcnt; 3157 tpp->p_cowcnt = 0; 3158 } 3159 ASSERT(!hat_page_getshare(tpp)); 3160 ASSERT(tpp->p_vnode == NULL); 3161 ASSERT(tpp->p_szc == szc); 3162 3163 PP_SETFREE(tpp); 3164 page_clr_all_props(tpp); 3165 PP_SETAGED(tpp); 3166 ASSERT(tpp->p_next == tpp); 3167 ASSERT(tpp->p_prev == tpp); 3168 page_list_concat(&rootpp, &tpp); 3169 } 3170 3171 ASSERT(rootpp == pp); 3172 if (pglcks != 0) { 3173 mutex_enter(&freemem_lock); 3174 availrmem += pglcks; 3175 mutex_exit(&freemem_lock); 3176 } 3177 3178 page_list_add_pages(rootpp, 0); 3179 page_create_putback(pgcnt); 3180 } 3181 3182 /* 3183 * Similar to page_destroy(), but destroys pages which are 3184 * locked and known to be on the page free list. Since 3185 * the page is known to be free and locked, no one can access 3186 * it. 3187 * 3188 * Also, the number of free pages does not change. 3189 */ 3190 void 3191 page_destroy_free(page_t *pp) 3192 { 3193 ASSERT(PAGE_EXCL(pp)); 3194 ASSERT(PP_ISFREE(pp)); 3195 ASSERT(pp->p_vnode); 3196 ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0); 3197 ASSERT(!hat_page_is_mapped(pp)); 3198 ASSERT(PP_ISAGED(pp) == 0); 3199 ASSERT(pp->p_szc == 0); 3200 3201 VM_STAT_ADD(pagecnt.pc_destroy_free); 3202 page_list_sub(pp, PG_CACHE_LIST); 3203 3204 page_hashout(pp, NULL); 3205 ASSERT(pp->p_vnode == NULL); 3206 ASSERT(pp->p_offset == (u_offset_t)-1); 3207 ASSERT(pp->p_hash == NULL); 3208 3209 PP_SETAGED(pp); 3210 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 3211 page_unlock(pp); 3212 3213 mutex_enter(&new_freemem_lock); 3214 if (freemem_wait) { 3215 cv_signal(&freemem_cv); 3216 } 3217 mutex_exit(&new_freemem_lock); 3218 } 3219 3220 /* 3221 * Rename the page "opp" to have an identity specified 3222 * by [vp, off]. If a page already exists with this name 3223 * it is locked and destroyed. Note that the page's 3224 * translations are not unloaded during the rename. 3225 * 3226 * This routine is used by the anon layer to "steal" the 3227 * original page and is not unlike destroying a page and 3228 * creating a new page using the same page frame. 3229 * 3230 * XXX -- Could deadlock if caller 1 tries to rename A to B while 3231 * caller 2 tries to rename B to A. 3232 */ 3233 void 3234 page_rename(page_t *opp, vnode_t *vp, u_offset_t off) 3235 { 3236 page_t *pp; 3237 int olckcnt = 0; 3238 int ocowcnt = 0; 3239 kmutex_t *phm; 3240 ulong_t index; 3241 3242 ASSERT(PAGE_EXCL(opp) && !page_iolock_assert(opp)); 3243 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3244 ASSERT(PP_ISFREE(opp) == 0); 3245 3246 VM_STAT_ADD(page_rename_count); 3247 3248 TRACE_3(TR_FAC_VM, TR_PAGE_RENAME, 3249 "page rename:pp %p vp %p off %llx", opp, vp, off); 3250 3251 /* 3252 * CacheFS may call page_rename for a large NFS page 3253 * when both CacheFS and NFS mount points are used 3254 * by applications. Demote this large page before 3255 * renaming it, to ensure that there are no "partial" 3256 * large pages left lying around. 3257 */ 3258 if (opp->p_szc != 0) { 3259 vnode_t *ovp = opp->p_vnode; 3260 ASSERT(ovp != NULL); 3261 ASSERT(!IS_SWAPFSVP(ovp)); 3262 ASSERT(!VN_ISKAS(ovp)); 3263 page_demote_vp_pages(opp); 3264 ASSERT(opp->p_szc == 0); 3265 } 3266 3267 page_hashout(opp, NULL); 3268 PP_CLRAGED(opp); 3269 3270 /* 3271 * Acquire the appropriate page hash lock, since 3272 * we're going to rename the page. 3273 */ 3274 index = PAGE_HASH_FUNC(vp, off); 3275 phm = PAGE_HASH_MUTEX(index); 3276 mutex_enter(phm); 3277 top: 3278 /* 3279 * Look for an existing page with this name and destroy it if found. 3280 * By holding the page hash lock all the way to the page_hashin() 3281 * call, we are assured that no page can be created with this 3282 * identity. In the case when the phm lock is dropped to undo any 3283 * hat layer mappings, the existing page is held with an "exclusive" 3284 * lock, again preventing another page from being created with 3285 * this identity. 3286 */ 3287 pp = page_hash_search(index, vp, off); 3288 if (pp != NULL) { 3289 VM_STAT_ADD(page_rename_exists); 3290 3291 /* 3292 * As it turns out, this is one of only two places where 3293 * page_lock() needs to hold the passed in lock in the 3294 * successful case. In all of the others, the lock could 3295 * be dropped as soon as the attempt is made to lock 3296 * the page. It is tempting to add yet another arguement, 3297 * PL_KEEP or PL_DROP, to let page_lock know what to do. 3298 */ 3299 if (!page_lock(pp, SE_EXCL, phm, P_RECLAIM)) { 3300 /* 3301 * Went to sleep because the page could not 3302 * be locked. We were woken up when the page 3303 * was unlocked, or when the page was destroyed. 3304 * In either case, `phm' was dropped while we 3305 * slept. Hence we should not just roar through 3306 * this loop. 3307 */ 3308 goto top; 3309 } 3310 3311 /* 3312 * If an existing page is a large page, then demote 3313 * it to ensure that no "partial" large pages are 3314 * "created" after page_rename. An existing page 3315 * can be a CacheFS page, and can't belong to swapfs. 3316 */ 3317 if (hat_page_is_mapped(pp)) { 3318 /* 3319 * Unload translations. Since we hold the 3320 * exclusive lock on this page, the page 3321 * can not be changed while we drop phm. 3322 * This is also not a lock protocol violation, 3323 * but rather the proper way to do things. 3324 */ 3325 mutex_exit(phm); 3326 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 3327 if (pp->p_szc != 0) { 3328 ASSERT(!IS_SWAPFSVP(vp)); 3329 ASSERT(!VN_ISKAS(vp)); 3330 page_demote_vp_pages(pp); 3331 ASSERT(pp->p_szc == 0); 3332 } 3333 mutex_enter(phm); 3334 } else if (pp->p_szc != 0) { 3335 ASSERT(!IS_SWAPFSVP(vp)); 3336 ASSERT(!VN_ISKAS(vp)); 3337 mutex_exit(phm); 3338 page_demote_vp_pages(pp); 3339 ASSERT(pp->p_szc == 0); 3340 mutex_enter(phm); 3341 } 3342 page_hashout(pp, phm); 3343 } 3344 /* 3345 * Hash in the page with the new identity. 3346 */ 3347 if (!page_hashin(opp, vp, off, phm)) { 3348 /* 3349 * We were holding phm while we searched for [vp, off] 3350 * and only dropped phm if we found and locked a page. 3351 * If we can't create this page now, then some thing 3352 * is really broken. 3353 */ 3354 panic("page_rename: Can't hash in page: %p", (void *)pp); 3355 /*NOTREACHED*/ 3356 } 3357 3358 ASSERT(MUTEX_HELD(phm)); 3359 mutex_exit(phm); 3360 3361 /* 3362 * Now that we have dropped phm, lets get around to finishing up 3363 * with pp. 3364 */ 3365 if (pp != NULL) { 3366 ASSERT(!hat_page_is_mapped(pp)); 3367 /* for now large pages should not end up here */ 3368 ASSERT(pp->p_szc == 0); 3369 /* 3370 * Save the locks for transfer to the new page and then 3371 * clear them so page_free doesn't think they're important. 3372 * The page_struct_lock need not be acquired for lckcnt and 3373 * cowcnt since the page has an "exclusive" lock. 3374 */ 3375 olckcnt = pp->p_lckcnt; 3376 ocowcnt = pp->p_cowcnt; 3377 pp->p_lckcnt = pp->p_cowcnt = 0; 3378 3379 /* 3380 * Put the page on the "free" list after we drop 3381 * the lock. The less work under the lock the better. 3382 */ 3383 /*LINTED: constant in conditional context*/ 3384 VN_DISPOSE(pp, B_FREE, 0, kcred); 3385 } 3386 3387 /* 3388 * Transfer the lock count from the old page (if any). 3389 * The page_struct_lock need not be acquired for lckcnt and 3390 * cowcnt since the page has an "exclusive" lock. 3391 */ 3392 opp->p_lckcnt += olckcnt; 3393 opp->p_cowcnt += ocowcnt; 3394 } 3395 3396 /* 3397 * low level routine to add page `pp' to the hash and vp chains for [vp, offset] 3398 * 3399 * Pages are normally inserted at the start of a vnode's v_pages list. 3400 * If the vnode is VMODSORT and the page is modified, it goes at the end. 3401 * This can happen when a modified page is relocated for DR. 3402 * 3403 * Returns 1 on success and 0 on failure. 3404 */ 3405 static int 3406 page_do_hashin(page_t *pp, vnode_t *vp, u_offset_t offset) 3407 { 3408 page_t **listp; 3409 page_t *tp; 3410 ulong_t index; 3411 3412 ASSERT(PAGE_EXCL(pp)); 3413 ASSERT(vp != NULL); 3414 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3415 3416 /* 3417 * Be sure to set these up before the page is inserted on the hash 3418 * list. As soon as the page is placed on the list some other 3419 * thread might get confused and wonder how this page could 3420 * possibly hash to this list. 3421 */ 3422 pp->p_vnode = vp; 3423 pp->p_offset = offset; 3424 3425 /* 3426 * record if this page is on a swap vnode 3427 */ 3428 if ((vp->v_flag & VISSWAP) != 0) 3429 PP_SETSWAP(pp); 3430 3431 index = PAGE_HASH_FUNC(vp, offset); 3432 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(index))); 3433 listp = &page_hash[index]; 3434 3435 /* 3436 * If this page is already hashed in, fail this attempt to add it. 3437 */ 3438 for (tp = *listp; tp != NULL; tp = tp->p_hash) { 3439 if (tp->p_vnode == vp && tp->p_offset == offset) { 3440 pp->p_vnode = NULL; 3441 pp->p_offset = (u_offset_t)(-1); 3442 return (0); 3443 } 3444 } 3445 pp->p_hash = *listp; 3446 *listp = pp; 3447 3448 /* 3449 * Add the page to the vnode's list of pages 3450 */ 3451 if (vp->v_pages != NULL && IS_VMODSORT(vp) && hat_ismod(pp)) 3452 listp = &vp->v_pages->p_vpprev->p_vpnext; 3453 else 3454 listp = &vp->v_pages; 3455 3456 page_vpadd(listp, pp); 3457 3458 return (1); 3459 } 3460 3461 /* 3462 * Add page `pp' to both the hash and vp chains for [vp, offset]. 3463 * 3464 * Returns 1 on success and 0 on failure. 3465 * If hold is passed in, it is not dropped. 3466 */ 3467 int 3468 page_hashin(page_t *pp, vnode_t *vp, u_offset_t offset, kmutex_t *hold) 3469 { 3470 kmutex_t *phm = NULL; 3471 kmutex_t *vphm; 3472 int rc; 3473 3474 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3475 ASSERT(pp->p_fsdata == 0 || panicstr); 3476 3477 TRACE_3(TR_FAC_VM, TR_PAGE_HASHIN, 3478 "page_hashin:pp %p vp %p offset %llx", 3479 pp, vp, offset); 3480 3481 VM_STAT_ADD(hashin_count); 3482 3483 if (hold != NULL) 3484 phm = hold; 3485 else { 3486 VM_STAT_ADD(hashin_not_held); 3487 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, offset)); 3488 mutex_enter(phm); 3489 } 3490 3491 vphm = page_vnode_mutex(vp); 3492 mutex_enter(vphm); 3493 rc = page_do_hashin(pp, vp, offset); 3494 mutex_exit(vphm); 3495 if (hold == NULL) 3496 mutex_exit(phm); 3497 if (rc == 0) 3498 VM_STAT_ADD(hashin_already); 3499 return (rc); 3500 } 3501 3502 /* 3503 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3504 * All mutexes must be held 3505 */ 3506 static void 3507 page_do_hashout(page_t *pp) 3508 { 3509 page_t **hpp; 3510 page_t *hp; 3511 vnode_t *vp = pp->p_vnode; 3512 3513 ASSERT(vp != NULL); 3514 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 3515 3516 /* 3517 * First, take pp off of its hash chain. 3518 */ 3519 hpp = &page_hash[PAGE_HASH_FUNC(vp, pp->p_offset)]; 3520 3521 for (;;) { 3522 hp = *hpp; 3523 if (hp == pp) 3524 break; 3525 if (hp == NULL) { 3526 panic("page_do_hashout"); 3527 /*NOTREACHED*/ 3528 } 3529 hpp = &hp->p_hash; 3530 } 3531 *hpp = pp->p_hash; 3532 3533 /* 3534 * Now remove it from its associated vnode. 3535 */ 3536 if (vp->v_pages) 3537 page_vpsub(&vp->v_pages, pp); 3538 3539 pp->p_hash = NULL; 3540 page_clr_all_props(pp); 3541 PP_CLRSWAP(pp); 3542 pp->p_vnode = NULL; 3543 pp->p_offset = (u_offset_t)-1; 3544 pp->p_fsdata = 0; 3545 } 3546 3547 /* 3548 * Remove page ``pp'' from the hash and vp chains and remove vp association. 3549 * 3550 * When `phm' is non-NULL it contains the address of the mutex protecting the 3551 * hash list pp is on. It is not dropped. 3552 */ 3553 void 3554 page_hashout(page_t *pp, kmutex_t *phm) 3555 { 3556 vnode_t *vp; 3557 ulong_t index; 3558 kmutex_t *nphm; 3559 kmutex_t *vphm; 3560 kmutex_t *sep; 3561 3562 ASSERT(phm != NULL ? MUTEX_HELD(phm) : 1); 3563 ASSERT(pp->p_vnode != NULL); 3564 ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr); 3565 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp->p_vnode))); 3566 3567 vp = pp->p_vnode; 3568 3569 TRACE_2(TR_FAC_VM, TR_PAGE_HASHOUT, 3570 "page_hashout:pp %p vp %p", pp, vp); 3571 3572 /* Kernel probe */ 3573 TNF_PROBE_2(page_unmap, "vm pagefault", /* CSTYLED */, 3574 tnf_opaque, vnode, vp, 3575 tnf_offset, offset, pp->p_offset); 3576 3577 /* 3578 * 3579 */ 3580 VM_STAT_ADD(hashout_count); 3581 index = PAGE_HASH_FUNC(vp, pp->p_offset); 3582 if (phm == NULL) { 3583 VM_STAT_ADD(hashout_not_held); 3584 nphm = PAGE_HASH_MUTEX(index); 3585 mutex_enter(nphm); 3586 } 3587 ASSERT(phm ? phm == PAGE_HASH_MUTEX(index) : 1); 3588 3589 3590 /* 3591 * grab page vnode mutex and remove it... 3592 */ 3593 vphm = page_vnode_mutex(vp); 3594 mutex_enter(vphm); 3595 3596 page_do_hashout(pp); 3597 3598 mutex_exit(vphm); 3599 if (phm == NULL) 3600 mutex_exit(nphm); 3601 3602 /* 3603 * Wake up processes waiting for this page. The page's 3604 * identity has been changed, and is probably not the 3605 * desired page any longer. 3606 */ 3607 sep = page_se_mutex(pp); 3608 mutex_enter(sep); 3609 pp->p_selock &= ~SE_EWANTED; 3610 if (CV_HAS_WAITERS(&pp->p_cv)) 3611 cv_broadcast(&pp->p_cv); 3612 mutex_exit(sep); 3613 } 3614 3615 /* 3616 * Add the page to the front of a linked list of pages 3617 * using the p_next & p_prev pointers for the list. 3618 * The caller is responsible for protecting the list pointers. 3619 */ 3620 void 3621 page_add(page_t **ppp, page_t *pp) 3622 { 3623 ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3624 3625 page_add_common(ppp, pp); 3626 } 3627 3628 3629 3630 /* 3631 * Common code for page_add() and mach_page_add() 3632 */ 3633 void 3634 page_add_common(page_t **ppp, page_t *pp) 3635 { 3636 if (*ppp == NULL) { 3637 pp->p_next = pp->p_prev = pp; 3638 } else { 3639 pp->p_next = *ppp; 3640 pp->p_prev = (*ppp)->p_prev; 3641 (*ppp)->p_prev = pp; 3642 pp->p_prev->p_next = pp; 3643 } 3644 *ppp = pp; 3645 } 3646 3647 3648 /* 3649 * Remove this page from a linked list of pages 3650 * using the p_next & p_prev pointers for the list. 3651 * 3652 * The caller is responsible for protecting the list pointers. 3653 */ 3654 void 3655 page_sub(page_t **ppp, page_t *pp) 3656 { 3657 ASSERT((PP_ISFREE(pp)) ? 1 : 3658 (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp))); 3659 3660 if (*ppp == NULL || pp == NULL) { 3661 panic("page_sub: bad arg(s): pp %p, *ppp %p", 3662 (void *)pp, (void *)(*ppp)); 3663 /*NOTREACHED*/ 3664 } 3665 3666 page_sub_common(ppp, pp); 3667 } 3668 3669 3670 /* 3671 * Common code for page_sub() and mach_page_sub() 3672 */ 3673 void 3674 page_sub_common(page_t **ppp, page_t *pp) 3675 { 3676 if (*ppp == pp) 3677 *ppp = pp->p_next; /* go to next page */ 3678 3679 if (*ppp == pp) 3680 *ppp = NULL; /* page list is gone */ 3681 else { 3682 pp->p_prev->p_next = pp->p_next; 3683 pp->p_next->p_prev = pp->p_prev; 3684 } 3685 pp->p_prev = pp->p_next = pp; /* make pp a list of one */ 3686 } 3687 3688 3689 /* 3690 * Break page list cppp into two lists with npages in the first list. 3691 * The tail is returned in nppp. 3692 */ 3693 void 3694 page_list_break(page_t **oppp, page_t **nppp, pgcnt_t npages) 3695 { 3696 page_t *s1pp = *oppp; 3697 page_t *s2pp; 3698 page_t *e1pp, *e2pp; 3699 long n = 0; 3700 3701 if (s1pp == NULL) { 3702 *nppp = NULL; 3703 return; 3704 } 3705 if (npages == 0) { 3706 *nppp = s1pp; 3707 *oppp = NULL; 3708 return; 3709 } 3710 for (n = 0, s2pp = *oppp; n < npages; n++) { 3711 s2pp = s2pp->p_next; 3712 } 3713 /* Fix head and tail of new lists */ 3714 e1pp = s2pp->p_prev; 3715 e2pp = s1pp->p_prev; 3716 s1pp->p_prev = e1pp; 3717 e1pp->p_next = s1pp; 3718 s2pp->p_prev = e2pp; 3719 e2pp->p_next = s2pp; 3720 3721 /* second list empty */ 3722 if (s2pp == s1pp) { 3723 *oppp = s1pp; 3724 *nppp = NULL; 3725 } else { 3726 *oppp = s1pp; 3727 *nppp = s2pp; 3728 } 3729 } 3730 3731 /* 3732 * Concatenate page list nppp onto the end of list ppp. 3733 */ 3734 void 3735 page_list_concat(page_t **ppp, page_t **nppp) 3736 { 3737 page_t *s1pp, *s2pp, *e1pp, *e2pp; 3738 3739 if (*nppp == NULL) { 3740 return; 3741 } 3742 if (*ppp == NULL) { 3743 *ppp = *nppp; 3744 return; 3745 } 3746 s1pp = *ppp; 3747 e1pp = s1pp->p_prev; 3748 s2pp = *nppp; 3749 e2pp = s2pp->p_prev; 3750 s1pp->p_prev = e2pp; 3751 e2pp->p_next = s1pp; 3752 e1pp->p_next = s2pp; 3753 s2pp->p_prev = e1pp; 3754 } 3755 3756 /* 3757 * return the next page in the page list 3758 */ 3759 page_t * 3760 page_list_next(page_t *pp) 3761 { 3762 return (pp->p_next); 3763 } 3764 3765 3766 /* 3767 * Add the page to the front of the linked list of pages 3768 * using p_vpnext/p_vpprev pointers for the list. 3769 * 3770 * The caller is responsible for protecting the lists. 3771 */ 3772 void 3773 page_vpadd(page_t **ppp, page_t *pp) 3774 { 3775 if (*ppp == NULL) { 3776 pp->p_vpnext = pp->p_vpprev = pp; 3777 } else { 3778 pp->p_vpnext = *ppp; 3779 pp->p_vpprev = (*ppp)->p_vpprev; 3780 (*ppp)->p_vpprev = pp; 3781 pp->p_vpprev->p_vpnext = pp; 3782 } 3783 *ppp = pp; 3784 } 3785 3786 /* 3787 * Remove this page from the linked list of pages 3788 * using p_vpnext/p_vpprev pointers for the list. 3789 * 3790 * The caller is responsible for protecting the lists. 3791 */ 3792 void 3793 page_vpsub(page_t **ppp, page_t *pp) 3794 { 3795 if (*ppp == NULL || pp == NULL) { 3796 panic("page_vpsub: bad arg(s): pp %p, *ppp %p", 3797 (void *)pp, (void *)(*ppp)); 3798 /*NOTREACHED*/ 3799 } 3800 3801 if (*ppp == pp) 3802 *ppp = pp->p_vpnext; /* go to next page */ 3803 3804 if (*ppp == pp) 3805 *ppp = NULL; /* page list is gone */ 3806 else { 3807 pp->p_vpprev->p_vpnext = pp->p_vpnext; 3808 pp->p_vpnext->p_vpprev = pp->p_vpprev; 3809 } 3810 pp->p_vpprev = pp->p_vpnext = pp; /* make pp a list of one */ 3811 } 3812 3813 /* 3814 * Lock a physical page into memory "long term". Used to support "lock 3815 * in memory" functions. Accepts the page to be locked, and a cow variable 3816 * to indicate whether a the lock will travel to the new page during 3817 * a potential copy-on-write. 3818 */ 3819 int 3820 page_pp_lock( 3821 page_t *pp, /* page to be locked */ 3822 int cow, /* cow lock */ 3823 int kernel) /* must succeed -- ignore checking */ 3824 { 3825 int r = 0; /* result -- assume failure */ 3826 3827 ASSERT(PAGE_LOCKED(pp)); 3828 3829 page_struct_lock(pp); 3830 /* 3831 * Acquire the "freemem_lock" for availrmem. 3832 */ 3833 if (cow) { 3834 mutex_enter(&freemem_lock); 3835 if ((availrmem > pages_pp_maximum) && 3836 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 3837 availrmem--; 3838 pages_locked++; 3839 mutex_exit(&freemem_lock); 3840 r = 1; 3841 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 3842 cmn_err(CE_WARN, 3843 "COW lock limit reached on pfn 0x%lx", 3844 page_pptonum(pp)); 3845 } 3846 } else 3847 mutex_exit(&freemem_lock); 3848 } else { 3849 if (pp->p_lckcnt) { 3850 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 3851 r = 1; 3852 if (++pp->p_lckcnt == 3853 (ushort_t)PAGE_LOCK_MAXIMUM) { 3854 cmn_err(CE_WARN, "Page lock limit " 3855 "reached on pfn 0x%lx", 3856 page_pptonum(pp)); 3857 } 3858 } 3859 } else { 3860 if (kernel) { 3861 /* availrmem accounting done by caller */ 3862 ++pp->p_lckcnt; 3863 r = 1; 3864 } else { 3865 mutex_enter(&freemem_lock); 3866 if (availrmem > pages_pp_maximum) { 3867 availrmem--; 3868 pages_locked++; 3869 ++pp->p_lckcnt; 3870 r = 1; 3871 } 3872 mutex_exit(&freemem_lock); 3873 } 3874 } 3875 } 3876 page_struct_unlock(pp); 3877 return (r); 3878 } 3879 3880 /* 3881 * Decommit a lock on a physical page frame. Account for cow locks if 3882 * appropriate. 3883 */ 3884 void 3885 page_pp_unlock( 3886 page_t *pp, /* page to be unlocked */ 3887 int cow, /* expect cow lock */ 3888 int kernel) /* this was a kernel lock */ 3889 { 3890 ASSERT(PAGE_LOCKED(pp)); 3891 3892 page_struct_lock(pp); 3893 /* 3894 * Acquire the "freemem_lock" for availrmem. 3895 * If cowcnt or lcknt is already 0 do nothing; i.e., we 3896 * could be called to unlock even if nothing is locked. This could 3897 * happen if locked file pages were truncated (removing the lock) 3898 * and the file was grown again and new pages faulted in; the new 3899 * pages are unlocked but the segment still thinks they're locked. 3900 */ 3901 if (cow) { 3902 if (pp->p_cowcnt) { 3903 mutex_enter(&freemem_lock); 3904 pp->p_cowcnt--; 3905 availrmem++; 3906 pages_locked--; 3907 mutex_exit(&freemem_lock); 3908 } 3909 } else { 3910 if (pp->p_lckcnt && --pp->p_lckcnt == 0) { 3911 if (!kernel) { 3912 mutex_enter(&freemem_lock); 3913 availrmem++; 3914 pages_locked--; 3915 mutex_exit(&freemem_lock); 3916 } 3917 } 3918 } 3919 page_struct_unlock(pp); 3920 } 3921 3922 /* 3923 * This routine reserves availrmem for npages. 3924 * It returns 1 on success or 0 on failure. 3925 * 3926 * flags: KM_NOSLEEP or KM_SLEEP 3927 * cb_wait: called to induce delay when KM_SLEEP reservation requires kmem 3928 * reaping to potentially succeed. If the callback returns 0, the 3929 * reservation attempts will cease to repeat and page_xresv() may 3930 * report a failure. If cb_wait is NULL, the traditional delay(hz/2) 3931 * behavior will be used while waiting for a reap. 3932 */ 3933 int 3934 page_xresv(pgcnt_t npages, uint_t flags, int (*cb_wait)(void)) 3935 { 3936 mutex_enter(&freemem_lock); 3937 if (availrmem >= tune.t_minarmem + npages) { 3938 availrmem -= npages; 3939 mutex_exit(&freemem_lock); 3940 return (1); 3941 } else if ((flags & KM_NOSLEEP) != 0) { 3942 mutex_exit(&freemem_lock); 3943 return (0); 3944 } 3945 mutex_exit(&freemem_lock); 3946 3947 /* 3948 * We signal memory pressure to the system by elevating 'needfree'. 3949 * Processes such as kmem reaping, pageout, and ZFS ARC shrinking can 3950 * then respond to said pressure by freeing pages. 3951 */ 3952 page_needfree(npages); 3953 int nobail = 1; 3954 do { 3955 kmem_reap(); 3956 if (cb_wait == NULL) { 3957 delay(hz >> 2); 3958 } else { 3959 nobail = cb_wait(); 3960 } 3961 3962 mutex_enter(&freemem_lock); 3963 if (availrmem >= tune.t_minarmem + npages) { 3964 availrmem -= npages; 3965 mutex_exit(&freemem_lock); 3966 page_needfree(-(spgcnt_t)npages); 3967 return (1); 3968 } 3969 mutex_exit(&freemem_lock); 3970 } while (nobail != 0); 3971 page_needfree(-(spgcnt_t)npages); 3972 3973 return (0); 3974 } 3975 3976 /* 3977 * This routine reserves availrmem for npages; 3978 * flags: KM_NOSLEEP or KM_SLEEP 3979 * returns 1 on success or 0 on failure 3980 */ 3981 int 3982 page_resv(pgcnt_t npages, uint_t flags) 3983 { 3984 return (page_xresv(npages, flags, NULL)); 3985 } 3986 3987 /* 3988 * This routine unreserves availrmem for npages; 3989 */ 3990 void 3991 page_unresv(pgcnt_t npages) 3992 { 3993 mutex_enter(&freemem_lock); 3994 availrmem += npages; 3995 mutex_exit(&freemem_lock); 3996 } 3997 3998 /* 3999 * See Statement at the beginning of segvn_lockop() regarding 4000 * the way we handle cowcnts and lckcnts. 4001 * 4002 * Transfer cowcnt on 'opp' to cowcnt on 'npp' if the vpage 4003 * that breaks COW has PROT_WRITE. 4004 * 4005 * Note that, we may also break COW in case we are softlocking 4006 * on read access during physio; 4007 * in this softlock case, the vpage may not have PROT_WRITE. 4008 * So, we need to transfer lckcnt on 'opp' to lckcnt on 'npp' 4009 * if the vpage doesn't have PROT_WRITE. 4010 * 4011 * This routine is never called if we are stealing a page 4012 * in anon_private. 4013 * 4014 * The caller subtracted from availrmem for read only mapping. 4015 * if lckcnt is 1 increment availrmem. 4016 */ 4017 void 4018 page_pp_useclaim( 4019 page_t *opp, /* original page frame losing lock */ 4020 page_t *npp, /* new page frame gaining lock */ 4021 uint_t write_perm) /* set if vpage has PROT_WRITE */ 4022 { 4023 int payback = 0; 4024 int nidx, oidx; 4025 4026 ASSERT(PAGE_LOCKED(opp)); 4027 ASSERT(PAGE_LOCKED(npp)); 4028 4029 /* 4030 * Since we have two pages we probably have two locks. We need to take 4031 * them in a defined order to avoid deadlocks. It's also possible they 4032 * both hash to the same lock in which case this is a non-issue. 4033 */ 4034 nidx = PAGE_LLOCK_HASH(PP_PAGEROOT(npp)); 4035 oidx = PAGE_LLOCK_HASH(PP_PAGEROOT(opp)); 4036 if (nidx < oidx) { 4037 page_struct_lock(npp); 4038 page_struct_lock(opp); 4039 } else if (oidx < nidx) { 4040 page_struct_lock(opp); 4041 page_struct_lock(npp); 4042 } else { /* The pages hash to the same lock */ 4043 page_struct_lock(npp); 4044 } 4045 4046 ASSERT(npp->p_cowcnt == 0); 4047 ASSERT(npp->p_lckcnt == 0); 4048 4049 /* Don't use claim if nothing is locked (see page_pp_unlock above) */ 4050 if ((write_perm && opp->p_cowcnt != 0) || 4051 (!write_perm && opp->p_lckcnt != 0)) { 4052 4053 if (write_perm) { 4054 npp->p_cowcnt++; 4055 ASSERT(opp->p_cowcnt != 0); 4056 opp->p_cowcnt--; 4057 } else { 4058 4059 ASSERT(opp->p_lckcnt != 0); 4060 4061 /* 4062 * We didn't need availrmem decremented if p_lckcnt on 4063 * original page is 1. Here, we are unlocking 4064 * read-only copy belonging to original page and 4065 * are locking a copy belonging to new page. 4066 */ 4067 if (opp->p_lckcnt == 1) 4068 payback = 1; 4069 4070 npp->p_lckcnt++; 4071 opp->p_lckcnt--; 4072 } 4073 } 4074 if (payback) { 4075 mutex_enter(&freemem_lock); 4076 availrmem++; 4077 pages_useclaim--; 4078 mutex_exit(&freemem_lock); 4079 } 4080 4081 if (nidx < oidx) { 4082 page_struct_unlock(opp); 4083 page_struct_unlock(npp); 4084 } else if (oidx < nidx) { 4085 page_struct_unlock(npp); 4086 page_struct_unlock(opp); 4087 } else { /* The pages hash to the same lock */ 4088 page_struct_unlock(npp); 4089 } 4090 } 4091 4092 /* 4093 * Simple claim adjust functions -- used to support changes in 4094 * claims due to changes in access permissions. Used by segvn_setprot(). 4095 */ 4096 int 4097 page_addclaim(page_t *pp) 4098 { 4099 int r = 0; /* result */ 4100 4101 ASSERT(PAGE_LOCKED(pp)); 4102 4103 page_struct_lock(pp); 4104 ASSERT(pp->p_lckcnt != 0); 4105 4106 if (pp->p_lckcnt == 1) { 4107 if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4108 --pp->p_lckcnt; 4109 r = 1; 4110 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4111 cmn_err(CE_WARN, 4112 "COW lock limit reached on pfn 0x%lx", 4113 page_pptonum(pp)); 4114 } 4115 } 4116 } else { 4117 mutex_enter(&freemem_lock); 4118 if ((availrmem > pages_pp_maximum) && 4119 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) { 4120 --availrmem; 4121 ++pages_claimed; 4122 mutex_exit(&freemem_lock); 4123 --pp->p_lckcnt; 4124 r = 1; 4125 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4126 cmn_err(CE_WARN, 4127 "COW lock limit reached on pfn 0x%lx", 4128 page_pptonum(pp)); 4129 } 4130 } else 4131 mutex_exit(&freemem_lock); 4132 } 4133 page_struct_unlock(pp); 4134 return (r); 4135 } 4136 4137 int 4138 page_subclaim(page_t *pp) 4139 { 4140 int r = 0; 4141 4142 ASSERT(PAGE_LOCKED(pp)); 4143 4144 page_struct_lock(pp); 4145 ASSERT(pp->p_cowcnt != 0); 4146 4147 if (pp->p_lckcnt) { 4148 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) { 4149 r = 1; 4150 /* 4151 * for availrmem 4152 */ 4153 mutex_enter(&freemem_lock); 4154 availrmem++; 4155 pages_claimed--; 4156 mutex_exit(&freemem_lock); 4157 4158 pp->p_cowcnt--; 4159 4160 if (++pp->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4161 cmn_err(CE_WARN, 4162 "Page lock limit reached on pfn 0x%lx", 4163 page_pptonum(pp)); 4164 } 4165 } 4166 } else { 4167 r = 1; 4168 pp->p_cowcnt--; 4169 pp->p_lckcnt++; 4170 } 4171 page_struct_unlock(pp); 4172 return (r); 4173 } 4174 4175 /* 4176 * Variant of page_addclaim(), where ppa[] contains the pages of a single large 4177 * page. 4178 */ 4179 int 4180 page_addclaim_pages(page_t **ppa) 4181 { 4182 pgcnt_t lckpgs = 0, pg_idx; 4183 4184 VM_STAT_ADD(pagecnt.pc_addclaim_pages); 4185 4186 /* 4187 * Only need to take the page struct lock on the large page root. 4188 */ 4189 page_struct_lock(ppa[0]); 4190 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4191 4192 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4193 ASSERT(ppa[pg_idx]->p_lckcnt != 0); 4194 if (ppa[pg_idx]->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4195 page_struct_unlock(ppa[0]); 4196 return (0); 4197 } 4198 if (ppa[pg_idx]->p_lckcnt > 1) 4199 lckpgs++; 4200 } 4201 4202 if (lckpgs != 0) { 4203 mutex_enter(&freemem_lock); 4204 if (availrmem >= pages_pp_maximum + lckpgs) { 4205 availrmem -= lckpgs; 4206 pages_claimed += lckpgs; 4207 } else { 4208 mutex_exit(&freemem_lock); 4209 page_struct_unlock(ppa[0]); 4210 return (0); 4211 } 4212 mutex_exit(&freemem_lock); 4213 } 4214 4215 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4216 ppa[pg_idx]->p_lckcnt--; 4217 ppa[pg_idx]->p_cowcnt++; 4218 } 4219 page_struct_unlock(ppa[0]); 4220 return (1); 4221 } 4222 4223 /* 4224 * Variant of page_subclaim(), where ppa[] contains the pages of a single large 4225 * page. 4226 */ 4227 int 4228 page_subclaim_pages(page_t **ppa) 4229 { 4230 pgcnt_t ulckpgs = 0, pg_idx; 4231 4232 VM_STAT_ADD(pagecnt.pc_subclaim_pages); 4233 4234 /* 4235 * Only need to take the page struct lock on the large page root. 4236 */ 4237 page_struct_lock(ppa[0]); 4238 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4239 4240 ASSERT(PAGE_LOCKED(ppa[pg_idx])); 4241 ASSERT(ppa[pg_idx]->p_cowcnt != 0); 4242 if (ppa[pg_idx]->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) { 4243 page_struct_unlock(ppa[0]); 4244 return (0); 4245 } 4246 if (ppa[pg_idx]->p_lckcnt != 0) 4247 ulckpgs++; 4248 } 4249 4250 if (ulckpgs != 0) { 4251 mutex_enter(&freemem_lock); 4252 availrmem += ulckpgs; 4253 pages_claimed -= ulckpgs; 4254 mutex_exit(&freemem_lock); 4255 } 4256 4257 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) { 4258 ppa[pg_idx]->p_cowcnt--; 4259 ppa[pg_idx]->p_lckcnt++; 4260 4261 } 4262 page_struct_unlock(ppa[0]); 4263 return (1); 4264 } 4265 4266 page_t * 4267 page_numtopp(pfn_t pfnum, se_t se) 4268 { 4269 page_t *pp; 4270 4271 retry: 4272 pp = page_numtopp_nolock(pfnum); 4273 if (pp == NULL) { 4274 return ((page_t *)NULL); 4275 } 4276 4277 /* 4278 * Acquire the appropriate lock on the page. 4279 */ 4280 while (!page_lock(pp, se, (kmutex_t *)NULL, P_RECLAIM)) { 4281 if (page_pptonum(pp) != pfnum) 4282 goto retry; 4283 continue; 4284 } 4285 4286 if (page_pptonum(pp) != pfnum) { 4287 page_unlock(pp); 4288 goto retry; 4289 } 4290 4291 return (pp); 4292 } 4293 4294 page_t * 4295 page_numtopp_noreclaim(pfn_t pfnum, se_t se) 4296 { 4297 page_t *pp; 4298 4299 retry: 4300 pp = page_numtopp_nolock(pfnum); 4301 if (pp == NULL) { 4302 return ((page_t *)NULL); 4303 } 4304 4305 /* 4306 * Acquire the appropriate lock on the page. 4307 */ 4308 while (!page_lock(pp, se, (kmutex_t *)NULL, P_NO_RECLAIM)) { 4309 if (page_pptonum(pp) != pfnum) 4310 goto retry; 4311 continue; 4312 } 4313 4314 if (page_pptonum(pp) != pfnum) { 4315 page_unlock(pp); 4316 goto retry; 4317 } 4318 4319 return (pp); 4320 } 4321 4322 /* 4323 * This routine is like page_numtopp, but will only return page structs 4324 * for pages which are ok for loading into hardware using the page struct. 4325 */ 4326 page_t * 4327 page_numtopp_nowait(pfn_t pfnum, se_t se) 4328 { 4329 page_t *pp; 4330 4331 retry: 4332 pp = page_numtopp_nolock(pfnum); 4333 if (pp == NULL) { 4334 return ((page_t *)NULL); 4335 } 4336 4337 /* 4338 * Try to acquire the appropriate lock on the page. 4339 */ 4340 if (PP_ISFREE(pp)) 4341 pp = NULL; 4342 else { 4343 if (!page_trylock(pp, se)) 4344 pp = NULL; 4345 else { 4346 if (page_pptonum(pp) != pfnum) { 4347 page_unlock(pp); 4348 goto retry; 4349 } 4350 if (PP_ISFREE(pp)) { 4351 page_unlock(pp); 4352 pp = NULL; 4353 } 4354 } 4355 } 4356 return (pp); 4357 } 4358 4359 /* 4360 * Returns a count of dirty pages that are in the process 4361 * of being written out. If 'cleanit' is set, try to push the page. 4362 */ 4363 pgcnt_t 4364 page_busy(int cleanit) 4365 { 4366 page_t *page0 = page_first(); 4367 page_t *pp = page0; 4368 pgcnt_t nppbusy = 0; 4369 u_offset_t off; 4370 4371 do { 4372 vnode_t *vp = pp->p_vnode; 4373 /* 4374 * A page is a candidate for syncing if it is: 4375 * 4376 * (a) On neither the freelist nor the cachelist 4377 * (b) Hashed onto a vnode 4378 * (c) Not a kernel page 4379 * (d) Dirty 4380 * (e) Not part of a swapfile 4381 * (f) a page which belongs to a real vnode; eg has a non-null 4382 * v_vfsp pointer. 4383 * (g) Backed by a filesystem which doesn't have a 4384 * stubbed-out sync operation 4385 */ 4386 if (!PP_ISFREE(pp) && vp != NULL && !VN_ISKAS(vp) && 4387 hat_ismod(pp) && !IS_SWAPVP(vp) && vp->v_vfsp != NULL && 4388 vfs_can_sync(vp->v_vfsp)) { 4389 nppbusy++; 4390 4391 if (!cleanit) 4392 continue; 4393 if (!page_trylock(pp, SE_EXCL)) 4394 continue; 4395 4396 if (PP_ISFREE(pp) || vp == NULL || IS_SWAPVP(vp) || 4397 pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 4398 !(hat_pagesync(pp, 4399 HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) & P_MOD)) { 4400 page_unlock(pp); 4401 continue; 4402 } 4403 off = pp->p_offset; 4404 VN_HOLD(vp); 4405 page_unlock(pp); 4406 (void) VOP_PUTPAGE(vp, off, PAGESIZE, 4407 B_ASYNC | B_FREE, kcred, NULL); 4408 VN_RELE(vp); 4409 } 4410 } while ((pp = page_next(pp)) != page0); 4411 4412 return (nppbusy); 4413 } 4414 4415 void page_invalidate_pages(void); 4416 4417 /* 4418 * callback handler to vm sub-system 4419 * 4420 * callers make sure no recursive entries to this func. 4421 */ 4422 /*ARGSUSED*/ 4423 boolean_t 4424 callb_vm_cpr(void *arg, int code) 4425 { 4426 if (code == CB_CODE_CPR_CHKPT) 4427 page_invalidate_pages(); 4428 return (B_TRUE); 4429 } 4430 4431 /* 4432 * Invalidate all pages of the system. 4433 * It shouldn't be called until all user page activities are all stopped. 4434 */ 4435 void 4436 page_invalidate_pages() 4437 { 4438 page_t *pp; 4439 page_t *page0; 4440 pgcnt_t nbusypages; 4441 int retry = 0; 4442 const int MAXRETRIES = 4; 4443 top: 4444 /* 4445 * Flush dirty pages and destroy the clean ones. 4446 */ 4447 nbusypages = 0; 4448 4449 pp = page0 = page_first(); 4450 do { 4451 struct vnode *vp; 4452 u_offset_t offset; 4453 int mod; 4454 4455 /* 4456 * skip the page if it has no vnode or the page associated 4457 * with the kernel vnode or prom allocated kernel mem. 4458 */ 4459 if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp)) 4460 continue; 4461 4462 /* 4463 * skip the page which is already free invalidated. 4464 */ 4465 if (PP_ISFREE(pp) && PP_ISAGED(pp)) 4466 continue; 4467 4468 /* 4469 * skip pages that are already locked or can't be "exclusively" 4470 * locked or are already free. After we lock the page, check 4471 * the free and age bits again to be sure it's not destroyed 4472 * yet. 4473 * To achieve max. parallelization, we use page_trylock instead 4474 * of page_lock so that we don't get block on individual pages 4475 * while we have thousands of other pages to process. 4476 */ 4477 if (!page_trylock(pp, SE_EXCL)) { 4478 nbusypages++; 4479 continue; 4480 } else if (PP_ISFREE(pp)) { 4481 if (!PP_ISAGED(pp)) { 4482 page_destroy_free(pp); 4483 } else { 4484 page_unlock(pp); 4485 } 4486 continue; 4487 } 4488 /* 4489 * Is this page involved in some I/O? shared? 4490 * 4491 * The page_struct_lock need not be acquired to 4492 * examine these fields since the page has an 4493 * "exclusive" lock. 4494 */ 4495 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 4496 page_unlock(pp); 4497 continue; 4498 } 4499 4500 if (vp->v_type == VCHR) { 4501 panic("vp->v_type == VCHR"); 4502 /*NOTREACHED*/ 4503 } 4504 4505 if (!page_try_demote_pages(pp)) { 4506 page_unlock(pp); 4507 continue; 4508 } 4509 4510 /* 4511 * Check the modified bit. Leave the bits alone in hardware 4512 * (they will be modified if we do the putpage). 4513 */ 4514 mod = (hat_pagesync(pp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) 4515 & P_MOD); 4516 if (mod) { 4517 offset = pp->p_offset; 4518 /* 4519 * Hold the vnode before releasing the page lock 4520 * to prevent it from being freed and re-used by 4521 * some other thread. 4522 */ 4523 VN_HOLD(vp); 4524 page_unlock(pp); 4525 /* 4526 * No error return is checked here. Callers such as 4527 * cpr deals with the dirty pages at the dump time 4528 * if this putpage fails. 4529 */ 4530 (void) VOP_PUTPAGE(vp, offset, PAGESIZE, B_INVAL, 4531 kcred, NULL); 4532 VN_RELE(vp); 4533 } else { 4534 /*LINTED: constant in conditional context*/ 4535 VN_DISPOSE(pp, B_INVAL, 0, kcred); 4536 } 4537 } while ((pp = page_next(pp)) != page0); 4538 if (nbusypages && retry++ < MAXRETRIES) { 4539 delay(1); 4540 goto top; 4541 } 4542 } 4543 4544 /* 4545 * Replace the page "old" with the page "new" on the page hash and vnode lists 4546 * 4547 * the replacement must be done in place, ie the equivalent sequence: 4548 * 4549 * vp = old->p_vnode; 4550 * off = old->p_offset; 4551 * page_do_hashout(old) 4552 * page_do_hashin(new, vp, off) 4553 * 4554 * doesn't work, since 4555 * 1) if old is the only page on the vnode, the v_pages list has a window 4556 * where it looks empty. This will break file system assumptions. 4557 * and 4558 * 2) pvn_vplist_dirty() can't deal with pages moving on the v_pages list. 4559 */ 4560 static void 4561 page_do_relocate_hash(page_t *new, page_t *old) 4562 { 4563 page_t **hash_list; 4564 vnode_t *vp = old->p_vnode; 4565 kmutex_t *sep; 4566 4567 ASSERT(PAGE_EXCL(old)); 4568 ASSERT(PAGE_EXCL(new)); 4569 ASSERT(vp != NULL); 4570 ASSERT(MUTEX_HELD(page_vnode_mutex(vp))); 4571 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, old->p_offset)))); 4572 4573 /* 4574 * First find old page on the page hash list 4575 */ 4576 hash_list = &page_hash[PAGE_HASH_FUNC(vp, old->p_offset)]; 4577 4578 for (;;) { 4579 if (*hash_list == old) 4580 break; 4581 if (*hash_list == NULL) { 4582 panic("page_do_hashout"); 4583 /*NOTREACHED*/ 4584 } 4585 hash_list = &(*hash_list)->p_hash; 4586 } 4587 4588 /* 4589 * update new and replace old with new on the page hash list 4590 */ 4591 new->p_vnode = old->p_vnode; 4592 new->p_offset = old->p_offset; 4593 new->p_hash = old->p_hash; 4594 *hash_list = new; 4595 4596 if ((new->p_vnode->v_flag & VISSWAP) != 0) 4597 PP_SETSWAP(new); 4598 4599 /* 4600 * replace old with new on the vnode's page list 4601 */ 4602 if (old->p_vpnext == old) { 4603 new->p_vpnext = new; 4604 new->p_vpprev = new; 4605 } else { 4606 new->p_vpnext = old->p_vpnext; 4607 new->p_vpprev = old->p_vpprev; 4608 new->p_vpnext->p_vpprev = new; 4609 new->p_vpprev->p_vpnext = new; 4610 } 4611 if (vp->v_pages == old) 4612 vp->v_pages = new; 4613 4614 /* 4615 * clear out the old page 4616 */ 4617 old->p_hash = NULL; 4618 old->p_vpnext = NULL; 4619 old->p_vpprev = NULL; 4620 old->p_vnode = NULL; 4621 PP_CLRSWAP(old); 4622 old->p_offset = (u_offset_t)-1; 4623 page_clr_all_props(old); 4624 4625 /* 4626 * Wake up processes waiting for this page. The page's 4627 * identity has been changed, and is probably not the 4628 * desired page any longer. 4629 */ 4630 sep = page_se_mutex(old); 4631 mutex_enter(sep); 4632 old->p_selock &= ~SE_EWANTED; 4633 if (CV_HAS_WAITERS(&old->p_cv)) 4634 cv_broadcast(&old->p_cv); 4635 mutex_exit(sep); 4636 } 4637 4638 /* 4639 * This function moves the identity of page "pp_old" to page "pp_new". 4640 * Both pages must be locked on entry. "pp_new" is free, has no identity, 4641 * and need not be hashed out from anywhere. 4642 */ 4643 void 4644 page_relocate_hash(page_t *pp_new, page_t *pp_old) 4645 { 4646 vnode_t *vp = pp_old->p_vnode; 4647 u_offset_t off = pp_old->p_offset; 4648 kmutex_t *phm, *vphm; 4649 4650 /* 4651 * Rehash two pages 4652 */ 4653 ASSERT(PAGE_EXCL(pp_old)); 4654 ASSERT(PAGE_EXCL(pp_new)); 4655 ASSERT(vp != NULL); 4656 ASSERT(pp_new->p_vnode == NULL); 4657 4658 /* 4659 * hashout then hashin while holding the mutexes 4660 */ 4661 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, off)); 4662 mutex_enter(phm); 4663 vphm = page_vnode_mutex(vp); 4664 mutex_enter(vphm); 4665 4666 page_do_relocate_hash(pp_new, pp_old); 4667 4668 /* The following comment preserved from page_flip(). */ 4669 pp_new->p_fsdata = pp_old->p_fsdata; 4670 pp_old->p_fsdata = 0; 4671 mutex_exit(vphm); 4672 mutex_exit(phm); 4673 4674 /* 4675 * The page_struct_lock need not be acquired for lckcnt and 4676 * cowcnt since the page has an "exclusive" lock. 4677 */ 4678 ASSERT(pp_new->p_lckcnt == 0); 4679 ASSERT(pp_new->p_cowcnt == 0); 4680 pp_new->p_lckcnt = pp_old->p_lckcnt; 4681 pp_new->p_cowcnt = pp_old->p_cowcnt; 4682 pp_old->p_lckcnt = pp_old->p_cowcnt = 0; 4683 4684 } 4685 4686 /* 4687 * Helper routine used to lock all remaining members of a 4688 * large page. The caller is responsible for passing in a locked 4689 * pp. If pp is a large page, then it succeeds in locking all the 4690 * remaining constituent pages or it returns with only the 4691 * original page locked. 4692 * 4693 * Returns 1 on success, 0 on failure. 4694 * 4695 * If success is returned this routine guarantees p_szc for all constituent 4696 * pages of a large page pp belongs to can't change. To achieve this we 4697 * recheck szc of pp after locking all constituent pages and retry if szc 4698 * changed (it could only decrease). Since hat_page_demote() needs an EXCL 4699 * lock on one of constituent pages it can't be running after all constituent 4700 * pages are locked. hat_page_demote() with a lock on a constituent page 4701 * outside of this large page (i.e. pp belonged to a larger large page) is 4702 * already done with all constituent pages of pp since the root's p_szc is 4703 * changed last. Therefore no need to synchronize with hat_page_demote() that 4704 * locked a constituent page outside of pp's current large page. 4705 */ 4706 #ifdef DEBUG 4707 uint32_t gpg_trylock_mtbf = 0; 4708 #endif 4709 4710 int 4711 group_page_trylock(page_t *pp, se_t se) 4712 { 4713 page_t *tpp; 4714 pgcnt_t npgs, i, j; 4715 uint_t pszc = pp->p_szc; 4716 4717 #ifdef DEBUG 4718 if (gpg_trylock_mtbf && !(gethrtime() % gpg_trylock_mtbf)) { 4719 return (0); 4720 } 4721 #endif 4722 4723 if (pp != PP_GROUPLEADER(pp, pszc)) { 4724 return (0); 4725 } 4726 4727 retry: 4728 ASSERT(PAGE_LOCKED_SE(pp, se)); 4729 ASSERT(!PP_ISFREE(pp)); 4730 if (pszc == 0) { 4731 return (1); 4732 } 4733 npgs = page_get_pagecnt(pszc); 4734 tpp = pp + 1; 4735 for (i = 1; i < npgs; i++, tpp++) { 4736 if (!page_trylock(tpp, se)) { 4737 tpp = pp + 1; 4738 for (j = 1; j < i; j++, tpp++) { 4739 page_unlock(tpp); 4740 } 4741 return (0); 4742 } 4743 } 4744 if (pp->p_szc != pszc) { 4745 ASSERT(pp->p_szc < pszc); 4746 ASSERT(pp->p_vnode != NULL && !PP_ISKAS(pp) && 4747 !IS_SWAPFSVP(pp->p_vnode)); 4748 tpp = pp + 1; 4749 for (i = 1; i < npgs; i++, tpp++) { 4750 page_unlock(tpp); 4751 } 4752 pszc = pp->p_szc; 4753 goto retry; 4754 } 4755 return (1); 4756 } 4757 4758 void 4759 group_page_unlock(page_t *pp) 4760 { 4761 page_t *tpp; 4762 pgcnt_t npgs, i; 4763 4764 ASSERT(PAGE_LOCKED(pp)); 4765 ASSERT(!PP_ISFREE(pp)); 4766 ASSERT(pp == PP_PAGEROOT(pp)); 4767 npgs = page_get_pagecnt(pp->p_szc); 4768 for (i = 1, tpp = pp + 1; i < npgs; i++, tpp++) { 4769 page_unlock(tpp); 4770 } 4771 } 4772 4773 /* 4774 * returns 4775 * 0 : on success and *nrelocp is number of relocated PAGESIZE pages 4776 * ERANGE : this is not a base page 4777 * EBUSY : failure to get locks on the page/pages 4778 * ENOMEM : failure to obtain replacement pages 4779 * EAGAIN : OBP has not yet completed its boot-time handoff to the kernel 4780 * EIO : An error occurred while trying to copy the page data 4781 * 4782 * Return with all constituent members of target and replacement 4783 * SE_EXCL locked. It is the callers responsibility to drop the 4784 * locks. 4785 */ 4786 int 4787 do_page_relocate( 4788 page_t **target, 4789 page_t **replacement, 4790 int grouplock, 4791 spgcnt_t *nrelocp, 4792 lgrp_t *lgrp) 4793 { 4794 page_t *first_repl; 4795 page_t *repl; 4796 page_t *targ; 4797 page_t *pl = NULL; 4798 uint_t ppattr; 4799 pfn_t pfn, repl_pfn; 4800 uint_t szc; 4801 spgcnt_t npgs, i; 4802 int repl_contig = 0; 4803 uint_t flags = 0; 4804 spgcnt_t dofree = 0; 4805 4806 *nrelocp = 0; 4807 4808 #if defined(__sparc) 4809 /* 4810 * We need to wait till OBP has completed 4811 * its boot-time handoff of its resources to the kernel 4812 * before we allow page relocation 4813 */ 4814 if (page_relocate_ready == 0) { 4815 return (EAGAIN); 4816 } 4817 #endif 4818 4819 /* 4820 * If this is not a base page, 4821 * just return with 0x0 pages relocated. 4822 */ 4823 targ = *target; 4824 ASSERT(PAGE_EXCL(targ)); 4825 ASSERT(!PP_ISFREE(targ)); 4826 szc = targ->p_szc; 4827 ASSERT(szc < mmu_page_sizes); 4828 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4829 pfn = targ->p_pagenum; 4830 if (pfn != PFN_BASE(pfn, szc)) { 4831 VM_STAT_ADD(vmm_vmstats.ppr_relocnoroot[szc]); 4832 return (ERANGE); 4833 } 4834 4835 if ((repl = *replacement) != NULL && repl->p_szc >= szc) { 4836 repl_pfn = repl->p_pagenum; 4837 if (repl_pfn != PFN_BASE(repl_pfn, szc)) { 4838 VM_STAT_ADD(vmm_vmstats.ppr_reloc_replnoroot[szc]); 4839 return (ERANGE); 4840 } 4841 repl_contig = 1; 4842 } 4843 4844 /* 4845 * We must lock all members of this large page or we cannot 4846 * relocate any part of it. 4847 */ 4848 if (grouplock != 0 && !group_page_trylock(targ, SE_EXCL)) { 4849 VM_STAT_ADD(vmm_vmstats.ppr_relocnolock[targ->p_szc]); 4850 return (EBUSY); 4851 } 4852 4853 /* 4854 * reread szc it could have been decreased before 4855 * group_page_trylock() was done. 4856 */ 4857 szc = targ->p_szc; 4858 ASSERT(szc < mmu_page_sizes); 4859 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]); 4860 ASSERT(pfn == PFN_BASE(pfn, szc)); 4861 4862 npgs = page_get_pagecnt(targ->p_szc); 4863 4864 if (repl == NULL) { 4865 dofree = npgs; /* Size of target page in MMU pages */ 4866 if (!page_create_wait(dofree, 0)) { 4867 if (grouplock != 0) { 4868 group_page_unlock(targ); 4869 } 4870 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4871 return (ENOMEM); 4872 } 4873 4874 /* 4875 * seg kmem pages require that the target and replacement 4876 * page be the same pagesize. 4877 */ 4878 flags = (VN_ISKAS(targ->p_vnode)) ? PGR_SAMESZC : 0; 4879 repl = page_get_replacement_page(targ, lgrp, flags); 4880 if (repl == NULL) { 4881 if (grouplock != 0) { 4882 group_page_unlock(targ); 4883 } 4884 page_create_putback(dofree); 4885 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]); 4886 return (ENOMEM); 4887 } 4888 } 4889 #ifdef DEBUG 4890 else { 4891 ASSERT(PAGE_LOCKED(repl)); 4892 } 4893 #endif /* DEBUG */ 4894 4895 #if defined(__sparc) 4896 /* 4897 * Let hat_page_relocate() complete the relocation if it's kernel page 4898 */ 4899 if (VN_ISKAS(targ->p_vnode)) { 4900 *replacement = repl; 4901 if (hat_page_relocate(target, replacement, nrelocp) != 0) { 4902 if (grouplock != 0) { 4903 group_page_unlock(targ); 4904 } 4905 if (dofree) { 4906 *replacement = NULL; 4907 page_free_replacement_page(repl); 4908 page_create_putback(dofree); 4909 } 4910 VM_STAT_ADD(vmm_vmstats.ppr_krelocfail[szc]); 4911 return (EAGAIN); 4912 } 4913 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 4914 return (0); 4915 } 4916 #else 4917 #if defined(lint) 4918 dofree = dofree; 4919 #endif 4920 #endif 4921 4922 first_repl = repl; 4923 4924 for (i = 0; i < npgs; i++) { 4925 ASSERT(PAGE_EXCL(targ)); 4926 ASSERT(targ->p_slckcnt == 0); 4927 ASSERT(repl->p_slckcnt == 0); 4928 4929 (void) hat_pageunload(targ, HAT_FORCE_PGUNLOAD); 4930 4931 ASSERT(hat_page_getshare(targ) == 0); 4932 ASSERT(!PP_ISFREE(targ)); 4933 ASSERT(targ->p_pagenum == (pfn + i)); 4934 ASSERT(repl_contig == 0 || 4935 repl->p_pagenum == (repl_pfn + i)); 4936 4937 /* 4938 * Copy the page contents and attributes then 4939 * relocate the page in the page hash. 4940 */ 4941 if (ppcopy(targ, repl) == 0) { 4942 targ = *target; 4943 repl = first_repl; 4944 VM_STAT_ADD(vmm_vmstats.ppr_copyfail); 4945 if (grouplock != 0) { 4946 group_page_unlock(targ); 4947 } 4948 if (dofree) { 4949 *replacement = NULL; 4950 page_free_replacement_page(repl); 4951 page_create_putback(dofree); 4952 } 4953 return (EIO); 4954 } 4955 4956 targ++; 4957 if (repl_contig != 0) { 4958 repl++; 4959 } else { 4960 repl = repl->p_next; 4961 } 4962 } 4963 4964 repl = first_repl; 4965 targ = *target; 4966 4967 for (i = 0; i < npgs; i++) { 4968 ppattr = hat_page_getattr(targ, (P_MOD | P_REF | P_RO)); 4969 page_clr_all_props(repl); 4970 page_set_props(repl, ppattr); 4971 page_relocate_hash(repl, targ); 4972 4973 ASSERT(hat_page_getshare(targ) == 0); 4974 ASSERT(hat_page_getshare(repl) == 0); 4975 /* 4976 * Now clear the props on targ, after the 4977 * page_relocate_hash(), they no longer 4978 * have any meaning. 4979 */ 4980 page_clr_all_props(targ); 4981 ASSERT(targ->p_next == targ); 4982 ASSERT(targ->p_prev == targ); 4983 page_list_concat(&pl, &targ); 4984 4985 targ++; 4986 if (repl_contig != 0) { 4987 repl++; 4988 } else { 4989 repl = repl->p_next; 4990 } 4991 } 4992 /* assert that we have come full circle with repl */ 4993 ASSERT(repl_contig == 1 || first_repl == repl); 4994 4995 *target = pl; 4996 if (*replacement == NULL) { 4997 ASSERT(first_repl == repl); 4998 *replacement = repl; 4999 } 5000 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]); 5001 *nrelocp = npgs; 5002 return (0); 5003 } 5004 /* 5005 * On success returns 0 and *nrelocp the number of PAGESIZE pages relocated. 5006 */ 5007 int 5008 page_relocate( 5009 page_t **target, 5010 page_t **replacement, 5011 int grouplock, 5012 int freetarget, 5013 spgcnt_t *nrelocp, 5014 lgrp_t *lgrp) 5015 { 5016 spgcnt_t ret; 5017 5018 /* do_page_relocate returns 0 on success or errno value */ 5019 ret = do_page_relocate(target, replacement, grouplock, nrelocp, lgrp); 5020 5021 if (ret != 0 || freetarget == 0) { 5022 return (ret); 5023 } 5024 if (*nrelocp == 1) { 5025 ASSERT(*target != NULL); 5026 page_free(*target, 1); 5027 } else { 5028 page_t *tpp = *target; 5029 uint_t szc = tpp->p_szc; 5030 pgcnt_t npgs = page_get_pagecnt(szc); 5031 ASSERT(npgs > 1); 5032 ASSERT(szc != 0); 5033 do { 5034 ASSERT(PAGE_EXCL(tpp)); 5035 ASSERT(!hat_page_is_mapped(tpp)); 5036 ASSERT(tpp->p_szc == szc); 5037 PP_SETFREE(tpp); 5038 PP_SETAGED(tpp); 5039 npgs--; 5040 } while ((tpp = tpp->p_next) != *target); 5041 ASSERT(npgs == 0); 5042 page_list_add_pages(*target, 0); 5043 npgs = page_get_pagecnt(szc); 5044 page_create_putback(npgs); 5045 } 5046 return (ret); 5047 } 5048 5049 /* 5050 * it is up to the caller to deal with pcf accounting. 5051 */ 5052 void 5053 page_free_replacement_page(page_t *pplist) 5054 { 5055 page_t *pp; 5056 5057 while (pplist != NULL) { 5058 /* 5059 * pp_targ is a linked list. 5060 */ 5061 pp = pplist; 5062 if (pp->p_szc == 0) { 5063 page_sub(&pplist, pp); 5064 page_clr_all_props(pp); 5065 PP_SETFREE(pp); 5066 PP_SETAGED(pp); 5067 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 5068 page_unlock(pp); 5069 VM_STAT_ADD(pagecnt.pc_free_replacement_page[0]); 5070 } else { 5071 spgcnt_t curnpgs = page_get_pagecnt(pp->p_szc); 5072 page_t *tpp; 5073 page_list_break(&pp, &pplist, curnpgs); 5074 tpp = pp; 5075 do { 5076 ASSERT(PAGE_EXCL(tpp)); 5077 ASSERT(!hat_page_is_mapped(tpp)); 5078 page_clr_all_props(tpp); 5079 PP_SETFREE(tpp); 5080 PP_SETAGED(tpp); 5081 } while ((tpp = tpp->p_next) != pp); 5082 page_list_add_pages(pp, 0); 5083 VM_STAT_ADD(pagecnt.pc_free_replacement_page[1]); 5084 } 5085 } 5086 } 5087 5088 /* 5089 * Relocate target to non-relocatable replacement page. 5090 */ 5091 int 5092 page_relocate_cage(page_t **target, page_t **replacement) 5093 { 5094 page_t *tpp, *rpp; 5095 spgcnt_t pgcnt, npgs; 5096 int result; 5097 5098 tpp = *target; 5099 5100 ASSERT(PAGE_EXCL(tpp)); 5101 ASSERT(tpp->p_szc == 0); 5102 5103 pgcnt = btop(page_get_pagesize(tpp->p_szc)); 5104 5105 do { 5106 (void) page_create_wait(pgcnt, PG_WAIT | PG_NORELOC); 5107 rpp = page_get_replacement_page(tpp, NULL, PGR_NORELOC); 5108 if (rpp == NULL) { 5109 page_create_putback(pgcnt); 5110 kcage_cageout_wakeup(); 5111 } 5112 } while (rpp == NULL); 5113 5114 ASSERT(PP_ISNORELOC(rpp)); 5115 5116 result = page_relocate(&tpp, &rpp, 0, 1, &npgs, NULL); 5117 5118 if (result == 0) { 5119 *replacement = rpp; 5120 if (pgcnt != npgs) 5121 panic("page_relocate_cage: partial relocation"); 5122 } 5123 5124 return (result); 5125 } 5126 5127 /* 5128 * Release the page lock on a page, place on cachelist 5129 * tail if no longer mapped. Caller can let us know if 5130 * the page is known to be clean. 5131 */ 5132 int 5133 page_release(page_t *pp, int checkmod) 5134 { 5135 int status; 5136 5137 ASSERT(PAGE_LOCKED(pp) && !PP_ISFREE(pp) && 5138 (pp->p_vnode != NULL)); 5139 5140 if (!hat_page_is_mapped(pp) && !IS_SWAPVP(pp->p_vnode) && 5141 ((PAGE_SHARED(pp) && page_tryupgrade(pp)) || PAGE_EXCL(pp)) && 5142 pp->p_lckcnt == 0 && pp->p_cowcnt == 0 && 5143 !hat_page_is_mapped(pp)) { 5144 5145 /* 5146 * If page is modified, unlock it 5147 * 5148 * (p_nrm & P_MOD) bit has the latest stuff because: 5149 * (1) We found that this page doesn't have any mappings 5150 * _after_ holding SE_EXCL and 5151 * (2) We didn't drop SE_EXCL lock after the check in (1) 5152 */ 5153 if (checkmod && hat_ismod(pp)) { 5154 page_unlock(pp); 5155 status = PGREL_MOD; 5156 } else { 5157 /*LINTED: constant in conditional context*/ 5158 VN_DISPOSE(pp, B_FREE, 0, kcred); 5159 status = PGREL_CLEAN; 5160 } 5161 } else { 5162 page_unlock(pp); 5163 status = PGREL_NOTREL; 5164 } 5165 return (status); 5166 } 5167 5168 /* 5169 * Given a constituent page, try to demote the large page on the freelist. 5170 * 5171 * Returns nonzero if the page could be demoted successfully. Returns with 5172 * the constituent page still locked. 5173 */ 5174 int 5175 page_try_demote_free_pages(page_t *pp) 5176 { 5177 page_t *rootpp = pp; 5178 pfn_t pfn = page_pptonum(pp); 5179 spgcnt_t npgs; 5180 uint_t szc = pp->p_szc; 5181 5182 ASSERT(PP_ISFREE(pp)); 5183 ASSERT(PAGE_EXCL(pp)); 5184 5185 /* 5186 * Adjust rootpp and lock it, if `pp' is not the base 5187 * constituent page. 5188 */ 5189 npgs = page_get_pagecnt(pp->p_szc); 5190 if (npgs == 1) { 5191 return (0); 5192 } 5193 5194 if (!IS_P2ALIGNED(pfn, npgs)) { 5195 pfn = P2ALIGN(pfn, npgs); 5196 rootpp = page_numtopp_nolock(pfn); 5197 } 5198 5199 if (pp != rootpp && !page_trylock(rootpp, SE_EXCL)) { 5200 return (0); 5201 } 5202 5203 if (rootpp->p_szc != szc) { 5204 if (pp != rootpp) 5205 page_unlock(rootpp); 5206 return (0); 5207 } 5208 5209 page_demote_free_pages(rootpp); 5210 5211 if (pp != rootpp) 5212 page_unlock(rootpp); 5213 5214 ASSERT(PP_ISFREE(pp)); 5215 ASSERT(PAGE_EXCL(pp)); 5216 return (1); 5217 } 5218 5219 /* 5220 * Given a constituent page, try to demote the large page. 5221 * 5222 * Returns nonzero if the page could be demoted successfully. Returns with 5223 * the constituent page still locked. 5224 */ 5225 int 5226 page_try_demote_pages(page_t *pp) 5227 { 5228 page_t *tpp, *rootpp = pp; 5229 pfn_t pfn = page_pptonum(pp); 5230 spgcnt_t i, npgs; 5231 uint_t szc = pp->p_szc; 5232 vnode_t *vp = pp->p_vnode; 5233 5234 ASSERT(PAGE_EXCL(pp)); 5235 5236 VM_STAT_ADD(pagecnt.pc_try_demote_pages[0]); 5237 5238 if (pp->p_szc == 0) { 5239 VM_STAT_ADD(pagecnt.pc_try_demote_pages[1]); 5240 return (1); 5241 } 5242 5243 if (vp != NULL && !IS_SWAPFSVP(vp) && !VN_ISKAS(vp)) { 5244 VM_STAT_ADD(pagecnt.pc_try_demote_pages[2]); 5245 page_demote_vp_pages(pp); 5246 ASSERT(pp->p_szc == 0); 5247 return (1); 5248 } 5249 5250 /* 5251 * Adjust rootpp if passed in is not the base 5252 * constituent page. 5253 */ 5254 npgs = page_get_pagecnt(pp->p_szc); 5255 ASSERT(npgs > 1); 5256 if (!IS_P2ALIGNED(pfn, npgs)) { 5257 pfn = P2ALIGN(pfn, npgs); 5258 rootpp = page_numtopp_nolock(pfn); 5259 VM_STAT_ADD(pagecnt.pc_try_demote_pages[3]); 5260 ASSERT(rootpp->p_vnode != NULL); 5261 ASSERT(rootpp->p_szc == szc); 5262 } 5263 5264 /* 5265 * We can't demote kernel pages since we can't hat_unload() 5266 * the mappings. 5267 */ 5268 if (VN_ISKAS(rootpp->p_vnode)) 5269 return (0); 5270 5271 /* 5272 * Attempt to lock all constituent pages except the page passed 5273 * in since it's already locked. 5274 */ 5275 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5276 ASSERT(!PP_ISFREE(tpp)); 5277 ASSERT(tpp->p_vnode != NULL); 5278 5279 if (tpp != pp && !page_trylock(tpp, SE_EXCL)) 5280 break; 5281 ASSERT(tpp->p_szc == rootpp->p_szc); 5282 ASSERT(page_pptonum(tpp) == page_pptonum(rootpp) + i); 5283 } 5284 5285 /* 5286 * If we failed to lock them all then unlock what we have 5287 * locked so far and bail. 5288 */ 5289 if (i < npgs) { 5290 tpp = rootpp; 5291 while (i-- > 0) { 5292 if (tpp != pp) 5293 page_unlock(tpp); 5294 tpp++; 5295 } 5296 VM_STAT_ADD(pagecnt.pc_try_demote_pages[4]); 5297 return (0); 5298 } 5299 5300 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5301 ASSERT(PAGE_EXCL(tpp)); 5302 ASSERT(tpp->p_slckcnt == 0); 5303 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD); 5304 tpp->p_szc = 0; 5305 } 5306 5307 /* 5308 * Unlock all pages except the page passed in. 5309 */ 5310 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) { 5311 ASSERT(!hat_page_is_mapped(tpp)); 5312 if (tpp != pp) 5313 page_unlock(tpp); 5314 } 5315 5316 VM_STAT_ADD(pagecnt.pc_try_demote_pages[5]); 5317 return (1); 5318 } 5319 5320 /* 5321 * Called by page_free() and page_destroy() to demote the page size code 5322 * (p_szc) to 0 (since we can't just put a single PAGESIZE page with non zero 5323 * p_szc on free list, neither can we just clear p_szc of a single page_t 5324 * within a large page since it will break other code that relies on p_szc 5325 * being the same for all page_t's of a large page). Anonymous pages should 5326 * never end up here because anon_map_getpages() cannot deal with p_szc 5327 * changes after a single constituent page is locked. While anonymous or 5328 * kernel large pages are demoted or freed the entire large page at a time 5329 * with all constituent pages locked EXCL for the file system pages we 5330 * have to be able to demote a large page (i.e. decrease all constituent pages 5331 * p_szc) with only just an EXCL lock on one of constituent pages. The reason 5332 * we can easily deal with anonymous page demotion the entire large page at a 5333 * time is that those operation originate at address space level and concern 5334 * the entire large page region with actual demotion only done when pages are 5335 * not shared with any other processes (therefore we can always get EXCL lock 5336 * on all anonymous constituent pages after clearing segment page 5337 * cache). However file system pages can be truncated or invalidated at a 5338 * PAGESIZE level from the file system side and end up in page_free() or 5339 * page_destroy() (we also allow only part of the large page to be SOFTLOCKed 5340 * and therefore pageout should be able to demote a large page by EXCL locking 5341 * any constituent page that is not under SOFTLOCK). In those cases we cannot 5342 * rely on being able to lock EXCL all constituent pages. 5343 * 5344 * To prevent szc changes on file system pages one has to lock all constituent 5345 * pages at least SHARED (or call page_szc_lock()). The only subsystem that 5346 * doesn't rely on locking all constituent pages (or using page_szc_lock()) to 5347 * prevent szc changes is hat layer that uses its own page level mlist 5348 * locks. hat assumes that szc doesn't change after mlist lock for a page is 5349 * taken. Therefore we need to change szc under hat level locks if we only 5350 * have an EXCL lock on a single constituent page and hat still references any 5351 * of constituent pages. (Note we can't "ignore" hat layer by simply 5352 * hat_pageunload() all constituent pages without having EXCL locks on all of 5353 * constituent pages). We use hat_page_demote() call to safely demote szc of 5354 * all constituent pages under hat locks when we only have an EXCL lock on one 5355 * of constituent pages. 5356 * 5357 * This routine calls page_szc_lock() before calling hat_page_demote() to 5358 * allow segvn in one special case not to lock all constituent pages SHARED 5359 * before calling hat_memload_array() that relies on p_szc not changing even 5360 * before hat level mlist lock is taken. In that case segvn uses 5361 * page_szc_lock() to prevent hat_page_demote() changing p_szc values. 5362 * 5363 * Anonymous or kernel page demotion still has to lock all pages exclusively 5364 * and do hat_pageunload() on all constituent pages before demoting the page 5365 * therefore there's no need for anonymous or kernel page demotion to use 5366 * hat_page_demote() mechanism. 5367 * 5368 * hat_page_demote() removes all large mappings that map pp and then decreases 5369 * p_szc starting from the last constituent page of the large page. By working 5370 * from the tail of a large page in pfn decreasing order allows one looking at 5371 * the root page to know that hat_page_demote() is done for root's szc area. 5372 * e.g. if a root page has szc 1 one knows it only has to lock all constituent 5373 * pages within szc 1 area to prevent szc changes because hat_page_demote() 5374 * that started on this page when it had szc > 1 is done for this szc 1 area. 5375 * 5376 * We are guaranteed that all constituent pages of pp's large page belong to 5377 * the same vnode with the consecutive offsets increasing in the direction of 5378 * the pfn i.e. the identity of constituent pages can't change until their 5379 * p_szc is decreased. Therefore it's safe for hat_page_demote() to remove 5380 * large mappings to pp even though we don't lock any constituent page except 5381 * pp (i.e. we won't unload e.g. kernel locked page). 5382 */ 5383 static void 5384 page_demote_vp_pages(page_t *pp) 5385 { 5386 kmutex_t *mtx; 5387 5388 ASSERT(PAGE_EXCL(pp)); 5389 ASSERT(!PP_ISFREE(pp)); 5390 ASSERT(pp->p_vnode != NULL); 5391 ASSERT(!IS_SWAPFSVP(pp->p_vnode)); 5392 ASSERT(!PP_ISKAS(pp)); 5393 5394 VM_STAT_ADD(pagecnt.pc_demote_pages[0]); 5395 5396 mtx = page_szc_lock(pp); 5397 if (mtx != NULL) { 5398 hat_page_demote(pp); 5399 mutex_exit(mtx); 5400 } 5401 ASSERT(pp->p_szc == 0); 5402 } 5403 5404 /* 5405 * Mark any existing pages for migration in the given range 5406 */ 5407 void 5408 page_mark_migrate(struct seg *seg, caddr_t addr, size_t len, 5409 struct anon_map *amp, ulong_t anon_index, vnode_t *vp, 5410 u_offset_t vnoff, int rflag) 5411 { 5412 struct anon *ap; 5413 vnode_t *curvp; 5414 lgrp_t *from; 5415 pgcnt_t nlocked; 5416 u_offset_t off; 5417 pfn_t pfn; 5418 size_t pgsz; 5419 size_t segpgsz; 5420 pgcnt_t pages; 5421 uint_t pszc; 5422 page_t *pp0, *pp; 5423 caddr_t va; 5424 ulong_t an_idx; 5425 anon_sync_obj_t cookie; 5426 5427 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 5428 5429 /* 5430 * Don't do anything if don't need to do lgroup optimizations 5431 * on this system 5432 */ 5433 if (!lgrp_optimizations()) 5434 return; 5435 5436 /* 5437 * Align address and length to (potentially large) page boundary 5438 */ 5439 segpgsz = page_get_pagesize(seg->s_szc); 5440 addr = (caddr_t)P2ALIGN((uintptr_t)addr, segpgsz); 5441 if (rflag) 5442 len = P2ROUNDUP(len, segpgsz); 5443 5444 /* 5445 * Do one (large) page at a time 5446 */ 5447 va = addr; 5448 while (va < addr + len) { 5449 /* 5450 * Lookup (root) page for vnode and offset corresponding to 5451 * this virtual address 5452 * Try anonmap first since there may be copy-on-write 5453 * pages, but initialize vnode pointer and offset using 5454 * vnode arguments just in case there isn't an amp. 5455 */ 5456 curvp = vp; 5457 off = vnoff + va - seg->s_base; 5458 if (amp) { 5459 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5460 an_idx = anon_index + seg_page(seg, va); 5461 anon_array_enter(amp, an_idx, &cookie); 5462 ap = anon_get_ptr(amp->ahp, an_idx); 5463 if (ap) 5464 swap_xlate(ap, &curvp, &off); 5465 anon_array_exit(&cookie); 5466 ANON_LOCK_EXIT(&->a_rwlock); 5467 } 5468 5469 pp = NULL; 5470 if (curvp) 5471 pp = page_lookup(curvp, off, SE_SHARED); 5472 5473 /* 5474 * If there isn't a page at this virtual address, 5475 * skip to next page 5476 */ 5477 if (pp == NULL) { 5478 va += PAGESIZE; 5479 continue; 5480 } 5481 5482 /* 5483 * Figure out which lgroup this page is in for kstats 5484 */ 5485 pfn = page_pptonum(pp); 5486 from = lgrp_pfn_to_lgrp(pfn); 5487 5488 /* 5489 * Get page size, and round up and skip to next page boundary 5490 * if unaligned address 5491 */ 5492 pszc = pp->p_szc; 5493 pgsz = page_get_pagesize(pszc); 5494 pages = btop(pgsz); 5495 if (!IS_P2ALIGNED(va, pgsz) || 5496 !IS_P2ALIGNED(pfn, pages) || 5497 pgsz > segpgsz) { 5498 pgsz = MIN(pgsz, segpgsz); 5499 page_unlock(pp); 5500 pages = btop(P2END((uintptr_t)va, pgsz) - 5501 (uintptr_t)va); 5502 va = (caddr_t)P2END((uintptr_t)va, pgsz); 5503 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, pages); 5504 continue; 5505 } 5506 5507 /* 5508 * Upgrade to exclusive lock on page 5509 */ 5510 if (!page_tryupgrade(pp)) { 5511 page_unlock(pp); 5512 va += pgsz; 5513 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5514 btop(pgsz)); 5515 continue; 5516 } 5517 5518 pp0 = pp++; 5519 nlocked = 1; 5520 5521 /* 5522 * Lock constituent pages if this is large page 5523 */ 5524 if (pages > 1) { 5525 /* 5526 * Lock all constituents except root page, since it 5527 * should be locked already. 5528 */ 5529 for (; nlocked < pages; nlocked++) { 5530 if (!page_trylock(pp, SE_EXCL)) { 5531 break; 5532 } 5533 if (PP_ISFREE(pp) || 5534 pp->p_szc != pszc) { 5535 /* 5536 * hat_page_demote() raced in with us. 5537 */ 5538 ASSERT(!IS_SWAPFSVP(curvp)); 5539 page_unlock(pp); 5540 break; 5541 } 5542 pp++; 5543 } 5544 } 5545 5546 /* 5547 * If all constituent pages couldn't be locked, 5548 * unlock pages locked so far and skip to next page. 5549 */ 5550 if (nlocked < pages) { 5551 while (pp0 < pp) { 5552 page_unlock(pp0++); 5553 } 5554 va += pgsz; 5555 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, 5556 btop(pgsz)); 5557 continue; 5558 } 5559 5560 /* 5561 * hat_page_demote() can no longer happen 5562 * since last cons page had the right p_szc after 5563 * all cons pages were locked. all cons pages 5564 * should now have the same p_szc. 5565 */ 5566 5567 /* 5568 * All constituent pages locked successfully, so mark 5569 * large page for migration and unload the mappings of 5570 * constituent pages, so a fault will occur on any part of the 5571 * large page 5572 */ 5573 PP_SETMIGRATE(pp0); 5574 while (pp0 < pp) { 5575 (void) hat_pageunload(pp0, HAT_FORCE_PGUNLOAD); 5576 ASSERT(hat_page_getshare(pp0) == 0); 5577 page_unlock(pp0++); 5578 } 5579 lgrp_stat_add(from->lgrp_id, LGRP_PMM_PGS, nlocked); 5580 5581 va += pgsz; 5582 } 5583 } 5584 5585 /* 5586 * Migrate any pages that have been marked for migration in the given range 5587 */ 5588 void 5589 page_migrate( 5590 struct seg *seg, 5591 caddr_t addr, 5592 page_t **ppa, 5593 pgcnt_t npages) 5594 { 5595 lgrp_t *from; 5596 lgrp_t *to; 5597 page_t *newpp; 5598 page_t *pp; 5599 pfn_t pfn; 5600 size_t pgsz; 5601 spgcnt_t page_cnt; 5602 spgcnt_t i; 5603 uint_t pszc; 5604 5605 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); 5606 5607 while (npages > 0) { 5608 pp = *ppa; 5609 pszc = pp->p_szc; 5610 pgsz = page_get_pagesize(pszc); 5611 page_cnt = btop(pgsz); 5612 5613 /* 5614 * Check to see whether this page is marked for migration 5615 * 5616 * Assume that root page of large page is marked for 5617 * migration and none of the other constituent pages 5618 * are marked. This really simplifies clearing the 5619 * migrate bit by not having to clear it from each 5620 * constituent page. 5621 * 5622 * note we don't want to relocate an entire large page if 5623 * someone is only using one subpage. 5624 */ 5625 if (npages < page_cnt) 5626 break; 5627 5628 /* 5629 * Is it marked for migration? 5630 */ 5631 if (!PP_ISMIGRATE(pp)) 5632 goto next; 5633 5634 /* 5635 * Determine lgroups that page is being migrated between 5636 */ 5637 pfn = page_pptonum(pp); 5638 if (!IS_P2ALIGNED(pfn, page_cnt)) { 5639 break; 5640 } 5641 from = lgrp_pfn_to_lgrp(pfn); 5642 to = lgrp_mem_choose(seg, addr, pgsz); 5643 5644 /* 5645 * Need to get exclusive lock's to migrate 5646 */ 5647 for (i = 0; i < page_cnt; i++) { 5648 ASSERT(PAGE_LOCKED(ppa[i])); 5649 if (page_pptonum(ppa[i]) != pfn + i || 5650 ppa[i]->p_szc != pszc) { 5651 break; 5652 } 5653 if (!page_tryupgrade(ppa[i])) { 5654 lgrp_stat_add(from->lgrp_id, 5655 LGRP_PM_FAIL_LOCK_PGS, 5656 page_cnt); 5657 break; 5658 } 5659 5660 /* 5661 * Check to see whether we are trying to migrate 5662 * page to lgroup where it is allocated already. 5663 * If so, clear the migrate bit and skip to next 5664 * page. 5665 */ 5666 if (i == 0 && to == from) { 5667 PP_CLRMIGRATE(ppa[0]); 5668 page_downgrade(ppa[0]); 5669 goto next; 5670 } 5671 } 5672 5673 /* 5674 * If all constituent pages couldn't be locked, 5675 * unlock pages locked so far and skip to next page. 5676 */ 5677 if (i != page_cnt) { 5678 while (--i != -1) { 5679 page_downgrade(ppa[i]); 5680 } 5681 goto next; 5682 } 5683 5684 (void) page_create_wait(page_cnt, PG_WAIT); 5685 newpp = page_get_replacement_page(pp, to, PGR_SAMESZC); 5686 if (newpp == NULL) { 5687 page_create_putback(page_cnt); 5688 for (i = 0; i < page_cnt; i++) { 5689 page_downgrade(ppa[i]); 5690 } 5691 lgrp_stat_add(to->lgrp_id, LGRP_PM_FAIL_ALLOC_PGS, 5692 page_cnt); 5693 goto next; 5694 } 5695 ASSERT(newpp->p_szc == pszc); 5696 /* 5697 * Clear migrate bit and relocate page 5698 */ 5699 PP_CLRMIGRATE(pp); 5700 if (page_relocate(&pp, &newpp, 0, 1, &page_cnt, to)) { 5701 panic("page_migrate: page_relocate failed"); 5702 } 5703 ASSERT(page_cnt * PAGESIZE == pgsz); 5704 5705 /* 5706 * Keep stats for number of pages migrated from and to 5707 * each lgroup 5708 */ 5709 lgrp_stat_add(from->lgrp_id, LGRP_PM_SRC_PGS, page_cnt); 5710 lgrp_stat_add(to->lgrp_id, LGRP_PM_DEST_PGS, page_cnt); 5711 /* 5712 * update the page_t array we were passed in and 5713 * unlink constituent pages of a large page. 5714 */ 5715 for (i = 0; i < page_cnt; ++i, ++pp) { 5716 ASSERT(PAGE_EXCL(newpp)); 5717 ASSERT(newpp->p_szc == pszc); 5718 ppa[i] = newpp; 5719 pp = newpp; 5720 page_sub(&newpp, pp); 5721 page_downgrade(pp); 5722 } 5723 ASSERT(newpp == NULL); 5724 next: 5725 addr += pgsz; 5726 ppa += page_cnt; 5727 npages -= page_cnt; 5728 } 5729 } 5730 5731 uint_t page_reclaim_maxcnt = 60; /* max total iterations */ 5732 uint_t page_reclaim_nofree_maxcnt = 3; /* max iterations without progress */ 5733 /* 5734 * Reclaim/reserve availrmem for npages. 5735 * If there is not enough memory start reaping seg, kmem caches. 5736 * Start pageout scanner (via page_needfree()). 5737 * Exit after ~ MAX_CNT s regardless of how much memory has been released. 5738 * Note: There is no guarantee that any availrmem will be freed as 5739 * this memory typically is locked (kernel heap) or reserved for swap. 5740 * Also due to memory fragmentation kmem allocator may not be able 5741 * to free any memory (single user allocated buffer will prevent 5742 * freeing slab or a page). 5743 */ 5744 int 5745 page_reclaim_mem(pgcnt_t npages, pgcnt_t epages, int adjust) 5746 { 5747 int i = 0; 5748 int i_nofree = 0; 5749 int ret = 0; 5750 pgcnt_t deficit; 5751 pgcnt_t old_availrmem = 0; 5752 5753 mutex_enter(&freemem_lock); 5754 while (availrmem < tune.t_minarmem + npages + epages && 5755 i++ < page_reclaim_maxcnt) { 5756 /* ensure we made some progress in the last few iterations */ 5757 if (old_availrmem < availrmem) { 5758 old_availrmem = availrmem; 5759 i_nofree = 0; 5760 } else if (i_nofree++ >= page_reclaim_nofree_maxcnt) { 5761 break; 5762 } 5763 5764 deficit = tune.t_minarmem + npages + epages - availrmem; 5765 mutex_exit(&freemem_lock); 5766 page_needfree(deficit); 5767 kmem_reap(); 5768 delay(hz); 5769 page_needfree(-(spgcnt_t)deficit); 5770 mutex_enter(&freemem_lock); 5771 } 5772 5773 if (adjust && (availrmem >= tune.t_minarmem + npages + epages)) { 5774 availrmem -= npages; 5775 ret = 1; 5776 } 5777 5778 mutex_exit(&freemem_lock); 5779 5780 return (ret); 5781 } 5782 5783 /* 5784 * Search the memory segments to locate the desired page. Within a 5785 * segment, pages increase linearly with one page structure per 5786 * physical page frame (size PAGESIZE). The search begins 5787 * with the segment that was accessed last, to take advantage of locality. 5788 * If the hint misses, we start from the beginning of the sorted memseg list 5789 */ 5790 5791 5792 /* 5793 * Some data structures for pfn to pp lookup. 5794 */ 5795 ulong_t mhash_per_slot; 5796 struct memseg *memseg_hash[N_MEM_SLOTS]; 5797 5798 page_t * 5799 page_numtopp_nolock(pfn_t pfnum) 5800 { 5801 struct memseg *seg; 5802 page_t *pp; 5803 vm_cpu_data_t *vc; 5804 5805 /* 5806 * We need to disable kernel preemption while referencing the 5807 * cpu_vm_data field in order to prevent us from being switched to 5808 * another cpu and trying to reference it after it has been freed. 5809 * This will keep us on cpu and prevent it from being removed while 5810 * we are still on it. 5811 * 5812 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg 5813 * which is being resued by DR who will flush those references 5814 * before modifying the reused memseg. See memseg_cpu_vm_flush(). 5815 */ 5816 kpreempt_disable(); 5817 vc = CPU->cpu_vm_data; 5818 ASSERT(vc != NULL); 5819 5820 MEMSEG_STAT_INCR(nsearch); 5821 5822 /* Try last winner first */ 5823 if (((seg = vc->vc_pnum_memseg) != NULL) && 5824 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5825 MEMSEG_STAT_INCR(nlastwon); 5826 pp = seg->pages + (pfnum - seg->pages_base); 5827 if (pp->p_pagenum == pfnum) { 5828 kpreempt_enable(); 5829 return ((page_t *)pp); 5830 } 5831 } 5832 5833 /* Else Try hash */ 5834 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5835 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5836 MEMSEG_STAT_INCR(nhashwon); 5837 vc->vc_pnum_memseg = seg; 5838 pp = seg->pages + (pfnum - seg->pages_base); 5839 if (pp->p_pagenum == pfnum) { 5840 kpreempt_enable(); 5841 return ((page_t *)pp); 5842 } 5843 } 5844 5845 /* Else Brute force */ 5846 for (seg = memsegs; seg != NULL; seg = seg->next) { 5847 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5848 vc->vc_pnum_memseg = seg; 5849 pp = seg->pages + (pfnum - seg->pages_base); 5850 if (pp->p_pagenum == pfnum) { 5851 kpreempt_enable(); 5852 return ((page_t *)pp); 5853 } 5854 } 5855 } 5856 vc->vc_pnum_memseg = NULL; 5857 kpreempt_enable(); 5858 MEMSEG_STAT_INCR(nnotfound); 5859 return ((page_t *)NULL); 5860 5861 } 5862 5863 struct memseg * 5864 page_numtomemseg_nolock(pfn_t pfnum) 5865 { 5866 struct memseg *seg; 5867 page_t *pp; 5868 5869 /* 5870 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg 5871 * which is being resued by DR who will flush those references 5872 * before modifying the reused memseg. See memseg_cpu_vm_flush(). 5873 */ 5874 kpreempt_disable(); 5875 /* Try hash */ 5876 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) && 5877 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) { 5878 pp = seg->pages + (pfnum - seg->pages_base); 5879 if (pp->p_pagenum == pfnum) { 5880 kpreempt_enable(); 5881 return (seg); 5882 } 5883 } 5884 5885 /* Else Brute force */ 5886 for (seg = memsegs; seg != NULL; seg = seg->next) { 5887 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) { 5888 pp = seg->pages + (pfnum - seg->pages_base); 5889 if (pp->p_pagenum == pfnum) { 5890 kpreempt_enable(); 5891 return (seg); 5892 } 5893 } 5894 } 5895 kpreempt_enable(); 5896 return ((struct memseg *)NULL); 5897 } 5898 5899 /* 5900 * Given a page and a count return the page struct that is 5901 * n structs away from the current one in the global page 5902 * list. 5903 * 5904 * This function wraps to the first page upon 5905 * reaching the end of the memseg list. 5906 */ 5907 page_t * 5908 page_nextn(page_t *pp, ulong_t n) 5909 { 5910 struct memseg *seg; 5911 page_t *ppn; 5912 vm_cpu_data_t *vc; 5913 5914 /* 5915 * We need to disable kernel preemption while referencing the 5916 * cpu_vm_data field in order to prevent us from being switched to 5917 * another cpu and trying to reference it after it has been freed. 5918 * This will keep us on cpu and prevent it from being removed while 5919 * we are still on it. 5920 * 5921 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg 5922 * which is being resued by DR who will flush those references 5923 * before modifying the reused memseg. See memseg_cpu_vm_flush(). 5924 */ 5925 kpreempt_disable(); 5926 vc = (vm_cpu_data_t *)CPU->cpu_vm_data; 5927 5928 ASSERT(vc != NULL); 5929 5930 if (((seg = vc->vc_pnext_memseg) == NULL) || 5931 (seg->pages_base == seg->pages_end) || 5932 !(pp >= seg->pages && pp < seg->epages)) { 5933 5934 for (seg = memsegs; seg; seg = seg->next) { 5935 if (pp >= seg->pages && pp < seg->epages) 5936 break; 5937 } 5938 5939 if (seg == NULL) { 5940 /* Memory delete got in, return something valid. */ 5941 /* TODO: fix me. */ 5942 seg = memsegs; 5943 pp = seg->pages; 5944 } 5945 } 5946 5947 /* check for wraparound - possible if n is large */ 5948 while ((ppn = (pp + n)) >= seg->epages || ppn < pp) { 5949 n -= seg->epages - pp; 5950 seg = seg->next; 5951 if (seg == NULL) 5952 seg = memsegs; 5953 pp = seg->pages; 5954 } 5955 vc->vc_pnext_memseg = seg; 5956 kpreempt_enable(); 5957 return (ppn); 5958 } 5959 5960 /* 5961 * Initialize for a loop using page_next_scan_large(). 5962 */ 5963 page_t * 5964 page_next_scan_init(void **cookie) 5965 { 5966 ASSERT(cookie != NULL); 5967 *cookie = (void *)memsegs; 5968 return ((page_t *)memsegs->pages); 5969 } 5970 5971 /* 5972 * Return the next page in a scan of page_t's, assuming we want 5973 * to skip over sub-pages within larger page sizes. 5974 * 5975 * The cookie is used to keep track of the current memseg. 5976 */ 5977 page_t * 5978 page_next_scan_large( 5979 page_t *pp, 5980 ulong_t *n, 5981 void **cookie) 5982 { 5983 struct memseg *seg = (struct memseg *)*cookie; 5984 page_t *new_pp; 5985 ulong_t cnt; 5986 pfn_t pfn; 5987 5988 5989 /* 5990 * get the count of page_t's to skip based on the page size 5991 */ 5992 ASSERT(pp != NULL); 5993 if (pp->p_szc == 0) { 5994 cnt = 1; 5995 } else { 5996 pfn = page_pptonum(pp); 5997 cnt = page_get_pagecnt(pp->p_szc); 5998 cnt -= pfn & (cnt - 1); 5999 } 6000 *n += cnt; 6001 new_pp = pp + cnt; 6002 6003 /* 6004 * Catch if we went past the end of the current memory segment. If so, 6005 * just move to the next segment with pages. 6006 */ 6007 if (new_pp >= seg->epages || seg->pages_base == seg->pages_end) { 6008 do { 6009 seg = seg->next; 6010 if (seg == NULL) 6011 seg = memsegs; 6012 } while (seg->pages_base == seg->pages_end); 6013 new_pp = seg->pages; 6014 *cookie = (void *)seg; 6015 } 6016 6017 return (new_pp); 6018 } 6019 6020 6021 /* 6022 * Returns next page in list. Note: this function wraps 6023 * to the first page in the list upon reaching the end 6024 * of the list. Callers should be aware of this fact. 6025 */ 6026 6027 /* We should change this be a #define */ 6028 6029 page_t * 6030 page_next(page_t *pp) 6031 { 6032 return (page_nextn(pp, 1)); 6033 } 6034 6035 page_t * 6036 page_first() 6037 { 6038 return ((page_t *)memsegs->pages); 6039 } 6040 6041 6042 /* 6043 * This routine is called at boot with the initial memory configuration 6044 * and when memory is added or removed. 6045 */ 6046 void 6047 build_pfn_hash() 6048 { 6049 pfn_t cur; 6050 pgcnt_t index; 6051 struct memseg *pseg; 6052 int i; 6053 6054 /* 6055 * Clear memseg_hash array. 6056 * Since memory add/delete is designed to operate concurrently 6057 * with normal operation, the hash rebuild must be able to run 6058 * concurrently with page_numtopp_nolock(). To support this 6059 * functionality, assignments to memseg_hash array members must 6060 * be done atomically. 6061 * 6062 * NOTE: bzero() does not currently guarantee this for kernel 6063 * threads, and cannot be used here. 6064 */ 6065 for (i = 0; i < N_MEM_SLOTS; i++) 6066 memseg_hash[i] = NULL; 6067 6068 hat_kpm_mseghash_clear(N_MEM_SLOTS); 6069 6070 /* 6071 * Physmax is the last valid pfn. 6072 */ 6073 mhash_per_slot = (physmax + 1) >> MEM_HASH_SHIFT; 6074 for (pseg = memsegs; pseg != NULL; pseg = pseg->next) { 6075 index = MEMSEG_PFN_HASH(pseg->pages_base); 6076 cur = pseg->pages_base; 6077 do { 6078 if (index >= N_MEM_SLOTS) 6079 index = MEMSEG_PFN_HASH(cur); 6080 6081 if (memseg_hash[index] == NULL || 6082 memseg_hash[index]->pages_base > pseg->pages_base) { 6083 memseg_hash[index] = pseg; 6084 hat_kpm_mseghash_update(index, pseg); 6085 } 6086 cur += mhash_per_slot; 6087 index++; 6088 } while (cur < pseg->pages_end); 6089 } 6090 } 6091 6092 /* 6093 * Return the pagenum for the pp 6094 */ 6095 pfn_t 6096 page_pptonum(page_t *pp) 6097 { 6098 return (pp->p_pagenum); 6099 } 6100 6101 /* 6102 * interface to the referenced and modified etc bits 6103 * in the PSM part of the page struct 6104 * when no locking is desired. 6105 */ 6106 void 6107 page_set_props(page_t *pp, uint_t flags) 6108 { 6109 ASSERT((flags & ~(P_MOD | P_REF | P_RO)) == 0); 6110 pp->p_nrm |= (uchar_t)flags; 6111 } 6112 6113 void 6114 page_clr_all_props(page_t *pp) 6115 { 6116 pp->p_nrm = 0; 6117 } 6118 6119 /* 6120 * Clear p_lckcnt and p_cowcnt, adjusting freemem if required. 6121 */ 6122 int 6123 page_clear_lck_cow(page_t *pp, int adjust) 6124 { 6125 int f_amount; 6126 6127 ASSERT(PAGE_EXCL(pp)); 6128 6129 /* 6130 * The page_struct_lock need not be acquired here since 6131 * we require the caller hold the page exclusively locked. 6132 */ 6133 f_amount = 0; 6134 if (pp->p_lckcnt) { 6135 f_amount = 1; 6136 pp->p_lckcnt = 0; 6137 } 6138 if (pp->p_cowcnt) { 6139 f_amount += pp->p_cowcnt; 6140 pp->p_cowcnt = 0; 6141 } 6142 6143 if (adjust && f_amount) { 6144 mutex_enter(&freemem_lock); 6145 availrmem += f_amount; 6146 mutex_exit(&freemem_lock); 6147 } 6148 6149 return (f_amount); 6150 } 6151 6152 /* 6153 * The following functions is called from free_vp_pages() 6154 * for an inexact estimate of a newly free'd page... 6155 */ 6156 ulong_t 6157 page_share_cnt(page_t *pp) 6158 { 6159 return (hat_page_getshare(pp)); 6160 } 6161 6162 int 6163 page_isshared(page_t *pp) 6164 { 6165 return (hat_page_checkshare(pp, 1)); 6166 } 6167 6168 int 6169 page_isfree(page_t *pp) 6170 { 6171 return (PP_ISFREE(pp)); 6172 } 6173 6174 int 6175 page_isref(page_t *pp) 6176 { 6177 return (hat_page_getattr(pp, P_REF)); 6178 } 6179 6180 int 6181 page_ismod(page_t *pp) 6182 { 6183 return (hat_page_getattr(pp, P_MOD)); 6184 } 6185 6186 /* 6187 * The following code all currently relates to the page capture logic: 6188 * 6189 * This logic is used for cases where there is a desire to claim a certain 6190 * physical page in the system for the caller. As it may not be possible 6191 * to capture the page immediately, the p_toxic bits are used in the page 6192 * structure to indicate that someone wants to capture this page. When the 6193 * page gets unlocked, the toxic flag will be noted and an attempt to capture 6194 * the page will be made. If it is successful, the original callers callback 6195 * will be called with the page to do with it what they please. 6196 * 6197 * There is also an async thread which wakes up to attempt to capture 6198 * pages occasionally which have the capture bit set. All of the pages which 6199 * need to be captured asynchronously have been inserted into the 6200 * page_capture_hash and thus this thread walks that hash list. Items in the 6201 * hash have an expiration time so this thread handles that as well by removing 6202 * the item from the hash if it has expired. 6203 * 6204 * Some important things to note are: 6205 * - if the PR_CAPTURE bit is set on a page, then the page is in the 6206 * page_capture_hash. The page_capture_hash_head.pchh_mutex is needed 6207 * to set and clear this bit, and while the lock is held is the only time 6208 * you can add or remove an entry from the hash. 6209 * - the PR_CAPTURE bit can only be set and cleared while holding the 6210 * page_capture_hash_head.pchh_mutex 6211 * - the t_flag field of the thread struct is used with the T_CAPTURING 6212 * flag to prevent recursion while dealing with large pages. 6213 * - pages which need to be retired never expire on the page_capture_hash. 6214 */ 6215 6216 static void page_capture_thread(void); 6217 static kthread_t *pc_thread_id; 6218 kcondvar_t pc_cv; 6219 static kmutex_t pc_thread_mutex; 6220 static clock_t pc_thread_shortwait; 6221 static clock_t pc_thread_longwait; 6222 static int pc_thread_retry; 6223 6224 struct page_capture_callback pc_cb[PC_NUM_CALLBACKS]; 6225 6226 /* Note that this is a circular linked list */ 6227 typedef struct page_capture_hash_bucket { 6228 page_t *pp; 6229 uchar_t szc; 6230 uchar_t pri; 6231 uint_t flags; 6232 clock_t expires; /* lbolt at which this request expires. */ 6233 void *datap; /* Cached data passed in for callback */ 6234 struct page_capture_hash_bucket *next; 6235 struct page_capture_hash_bucket *prev; 6236 } page_capture_hash_bucket_t; 6237 6238 #define PC_PRI_HI 0 /* capture now */ 6239 #define PC_PRI_LO 1 /* capture later */ 6240 #define PC_NUM_PRI 2 6241 6242 #define PAGE_CAPTURE_PRIO(pp) (PP_ISRAF(pp) ? PC_PRI_LO : PC_PRI_HI) 6243 6244 6245 /* 6246 * Each hash bucket will have it's own mutex and two lists which are: 6247 * active (0): represents requests which have not been processed by 6248 * the page_capture async thread yet. 6249 * walked (1): represents requests which have been processed by the 6250 * page_capture async thread within it's given walk of this bucket. 6251 * 6252 * These are all needed so that we can synchronize all async page_capture 6253 * events. When the async thread moves to a new bucket, it will append the 6254 * walked list to the active list and walk each item one at a time, moving it 6255 * from the active list to the walked list. Thus if there is an async request 6256 * outstanding for a given page, it will always be in one of the two lists. 6257 * New requests will always be added to the active list. 6258 * If we were not able to capture a page before the request expired, we'd free 6259 * up the request structure which would indicate to page_capture that there is 6260 * no longer a need for the given page, and clear the PR_CAPTURE flag if 6261 * possible. 6262 */ 6263 typedef struct page_capture_hash_head { 6264 kmutex_t pchh_mutex; 6265 uint_t num_pages[PC_NUM_PRI]; 6266 page_capture_hash_bucket_t lists[2]; /* sentinel nodes */ 6267 } page_capture_hash_head_t; 6268 6269 #ifdef DEBUG 6270 #define NUM_PAGE_CAPTURE_BUCKETS 4 6271 #else 6272 #define NUM_PAGE_CAPTURE_BUCKETS 64 6273 #endif 6274 6275 page_capture_hash_head_t page_capture_hash[NUM_PAGE_CAPTURE_BUCKETS]; 6276 6277 /* for now use a very simple hash based upon the size of a page struct */ 6278 #define PAGE_CAPTURE_HASH(pp) \ 6279 ((int)(((uintptr_t)pp >> 7) & (NUM_PAGE_CAPTURE_BUCKETS - 1))) 6280 6281 extern pgcnt_t swapfs_minfree; 6282 6283 int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap); 6284 6285 /* 6286 * a callback function is required for page capture requests. 6287 */ 6288 void 6289 page_capture_register_callback(uint_t index, clock_t duration, 6290 int (*cb_func)(page_t *, void *, uint_t)) 6291 { 6292 ASSERT(pc_cb[index].cb_active == 0); 6293 ASSERT(cb_func != NULL); 6294 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6295 pc_cb[index].duration = duration; 6296 pc_cb[index].cb_func = cb_func; 6297 pc_cb[index].cb_active = 1; 6298 rw_exit(&pc_cb[index].cb_rwlock); 6299 } 6300 6301 void 6302 page_capture_unregister_callback(uint_t index) 6303 { 6304 int i, j; 6305 struct page_capture_hash_bucket *bp1; 6306 struct page_capture_hash_bucket *bp2; 6307 struct page_capture_hash_bucket *head = NULL; 6308 uint_t flags = (1 << index); 6309 6310 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER); 6311 ASSERT(pc_cb[index].cb_active == 1); 6312 pc_cb[index].duration = 0; /* Paranoia */ 6313 pc_cb[index].cb_func = NULL; /* Paranoia */ 6314 pc_cb[index].cb_active = 0; 6315 rw_exit(&pc_cb[index].cb_rwlock); 6316 6317 /* 6318 * Just move all the entries to a private list which we can walk 6319 * through without the need to hold any locks. 6320 * No more requests can get added to the hash lists for this consumer 6321 * as the cb_active field for the callback has been cleared. 6322 */ 6323 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 6324 mutex_enter(&page_capture_hash[i].pchh_mutex); 6325 for (j = 0; j < 2; j++) { 6326 bp1 = page_capture_hash[i].lists[j].next; 6327 /* walk through all but first (sentinel) element */ 6328 while (bp1 != &page_capture_hash[i].lists[j]) { 6329 bp2 = bp1; 6330 if (bp2->flags & flags) { 6331 bp1 = bp2->next; 6332 bp1->prev = bp2->prev; 6333 bp2->prev->next = bp1; 6334 bp2->next = head; 6335 head = bp2; 6336 /* 6337 * Clear the PR_CAPTURE bit as we 6338 * hold appropriate locks here. 6339 */ 6340 page_clrtoxic(head->pp, PR_CAPTURE); 6341 page_capture_hash[i]. 6342 num_pages[bp2->pri]--; 6343 continue; 6344 } 6345 bp1 = bp1->next; 6346 } 6347 } 6348 mutex_exit(&page_capture_hash[i].pchh_mutex); 6349 } 6350 6351 while (head != NULL) { 6352 bp1 = head; 6353 head = head->next; 6354 kmem_free(bp1, sizeof (*bp1)); 6355 } 6356 } 6357 6358 6359 /* 6360 * Find pp in the active list and move it to the walked list if it 6361 * exists. 6362 * Note that most often pp should be at the front of the active list 6363 * as it is currently used and thus there is no other sort of optimization 6364 * being done here as this is a linked list data structure. 6365 * Returns 1 on successful move or 0 if page could not be found. 6366 */ 6367 static int 6368 page_capture_move_to_walked(page_t *pp) 6369 { 6370 page_capture_hash_bucket_t *bp; 6371 int index; 6372 6373 index = PAGE_CAPTURE_HASH(pp); 6374 6375 mutex_enter(&page_capture_hash[index].pchh_mutex); 6376 bp = page_capture_hash[index].lists[0].next; 6377 while (bp != &page_capture_hash[index].lists[0]) { 6378 if (bp->pp == pp) { 6379 /* Remove from old list */ 6380 bp->next->prev = bp->prev; 6381 bp->prev->next = bp->next; 6382 6383 /* Add to new list */ 6384 bp->next = page_capture_hash[index].lists[1].next; 6385 bp->prev = &page_capture_hash[index].lists[1]; 6386 page_capture_hash[index].lists[1].next = bp; 6387 bp->next->prev = bp; 6388 6389 /* 6390 * There is a small probability of page on a free 6391 * list being retired while being allocated 6392 * and before P_RAF is set on it. The page may 6393 * end up marked as high priority request instead 6394 * of low priority request. 6395 * If P_RAF page is not marked as low priority request 6396 * change it to low priority request. 6397 */ 6398 page_capture_hash[index].num_pages[bp->pri]--; 6399 bp->pri = PAGE_CAPTURE_PRIO(pp); 6400 page_capture_hash[index].num_pages[bp->pri]++; 6401 mutex_exit(&page_capture_hash[index].pchh_mutex); 6402 return (1); 6403 } 6404 bp = bp->next; 6405 } 6406 mutex_exit(&page_capture_hash[index].pchh_mutex); 6407 return (0); 6408 } 6409 6410 /* 6411 * Add a new entry to the page capture hash. The only case where a new 6412 * entry is not added is when the page capture consumer is no longer registered. 6413 * In this case, we'll silently not add the page to the hash. We know that 6414 * page retire will always be registered for the case where we are currently 6415 * unretiring a page and thus there are no conflicts. 6416 */ 6417 static void 6418 page_capture_add_hash(page_t *pp, uint_t szc, uint_t flags, void *datap) 6419 { 6420 page_capture_hash_bucket_t *bp1; 6421 page_capture_hash_bucket_t *bp2; 6422 int index; 6423 int cb_index; 6424 int i; 6425 uchar_t pri; 6426 #ifdef DEBUG 6427 page_capture_hash_bucket_t *tp1; 6428 int l; 6429 #endif 6430 6431 ASSERT(!(flags & CAPTURE_ASYNC)); 6432 6433 bp1 = kmem_alloc(sizeof (struct page_capture_hash_bucket), KM_SLEEP); 6434 6435 bp1->pp = pp; 6436 bp1->szc = szc; 6437 bp1->flags = flags; 6438 bp1->datap = datap; 6439 6440 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6441 if ((flags >> cb_index) & 1) { 6442 break; 6443 } 6444 } 6445 6446 ASSERT(cb_index != PC_NUM_CALLBACKS); 6447 6448 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6449 if (pc_cb[cb_index].cb_active) { 6450 if (pc_cb[cb_index].duration == -1) { 6451 bp1->expires = (clock_t)-1; 6452 } else { 6453 bp1->expires = ddi_get_lbolt() + 6454 pc_cb[cb_index].duration; 6455 } 6456 } else { 6457 /* There's no callback registered so don't add to the hash */ 6458 rw_exit(&pc_cb[cb_index].cb_rwlock); 6459 kmem_free(bp1, sizeof (*bp1)); 6460 return; 6461 } 6462 6463 index = PAGE_CAPTURE_HASH(pp); 6464 6465 /* 6466 * Only allow capture flag to be modified under this mutex. 6467 * Prevents multiple entries for same page getting added. 6468 */ 6469 mutex_enter(&page_capture_hash[index].pchh_mutex); 6470 6471 /* 6472 * if not already on the hash, set capture bit and add to the hash 6473 */ 6474 if (!(pp->p_toxic & PR_CAPTURE)) { 6475 #ifdef DEBUG 6476 /* Check for duplicate entries */ 6477 for (l = 0; l < 2; l++) { 6478 tp1 = page_capture_hash[index].lists[l].next; 6479 while (tp1 != &page_capture_hash[index].lists[l]) { 6480 if (tp1->pp == pp) { 6481 panic("page pp 0x%p already on hash " 6482 "at 0x%p\n", 6483 (void *)pp, (void *)tp1); 6484 } 6485 tp1 = tp1->next; 6486 } 6487 } 6488 6489 #endif 6490 page_settoxic(pp, PR_CAPTURE); 6491 pri = PAGE_CAPTURE_PRIO(pp); 6492 bp1->pri = pri; 6493 bp1->next = page_capture_hash[index].lists[0].next; 6494 bp1->prev = &page_capture_hash[index].lists[0]; 6495 bp1->next->prev = bp1; 6496 page_capture_hash[index].lists[0].next = bp1; 6497 page_capture_hash[index].num_pages[pri]++; 6498 if (flags & CAPTURE_RETIRE) { 6499 page_retire_incr_pend_count(datap); 6500 } 6501 mutex_exit(&page_capture_hash[index].pchh_mutex); 6502 rw_exit(&pc_cb[cb_index].cb_rwlock); 6503 cv_signal(&pc_cv); 6504 return; 6505 } 6506 6507 /* 6508 * A page retire request will replace any other request. 6509 * A second physmem request which is for a different process than 6510 * the currently registered one will be dropped as there is 6511 * no way to hold the private data for both calls. 6512 * In the future, once there are more callers, this will have to 6513 * be worked out better as there needs to be private storage for 6514 * at least each type of caller (maybe have datap be an array of 6515 * *void's so that we can index based upon callers index). 6516 */ 6517 6518 /* walk hash list to update expire time */ 6519 for (i = 0; i < 2; i++) { 6520 bp2 = page_capture_hash[index].lists[i].next; 6521 while (bp2 != &page_capture_hash[index].lists[i]) { 6522 if (bp2->pp == pp) { 6523 if (flags & CAPTURE_RETIRE) { 6524 if (!(bp2->flags & CAPTURE_RETIRE)) { 6525 page_retire_incr_pend_count( 6526 datap); 6527 bp2->flags = flags; 6528 bp2->expires = bp1->expires; 6529 bp2->datap = datap; 6530 } 6531 } else { 6532 ASSERT(flags & CAPTURE_PHYSMEM); 6533 if (!(bp2->flags & CAPTURE_RETIRE) && 6534 (datap == bp2->datap)) { 6535 bp2->expires = bp1->expires; 6536 } 6537 } 6538 mutex_exit(&page_capture_hash[index]. 6539 pchh_mutex); 6540 rw_exit(&pc_cb[cb_index].cb_rwlock); 6541 kmem_free(bp1, sizeof (*bp1)); 6542 return; 6543 } 6544 bp2 = bp2->next; 6545 } 6546 } 6547 6548 /* 6549 * the PR_CAPTURE flag is protected by the page_capture_hash mutexes 6550 * and thus it either has to be set or not set and can't change 6551 * while holding the mutex above. 6552 */ 6553 panic("page_capture_add_hash, PR_CAPTURE flag set on pp %p\n", 6554 (void *)pp); 6555 } 6556 6557 /* 6558 * We have a page in our hands, lets try and make it ours by turning 6559 * it into a clean page like it had just come off the freelists. 6560 * 6561 * Returns 0 on success, with the page still EXCL locked. 6562 * On failure, the page will be unlocked, and returns EAGAIN 6563 */ 6564 static int 6565 page_capture_clean_page(page_t *pp) 6566 { 6567 page_t *newpp; 6568 int skip_unlock = 0; 6569 spgcnt_t count; 6570 page_t *tpp; 6571 int ret = 0; 6572 int extra; 6573 6574 ASSERT(PAGE_EXCL(pp)); 6575 ASSERT(!PP_RETIRED(pp)); 6576 ASSERT(curthread->t_flag & T_CAPTURING); 6577 6578 if (PP_ISFREE(pp)) { 6579 if (!page_reclaim(pp, NULL)) { 6580 skip_unlock = 1; 6581 ret = EAGAIN; 6582 goto cleanup; 6583 } 6584 ASSERT(pp->p_szc == 0); 6585 if (pp->p_vnode != NULL) { 6586 /* 6587 * Since this page came from the 6588 * cachelist, we must destroy the 6589 * old vnode association. 6590 */ 6591 page_hashout(pp, NULL); 6592 } 6593 goto cleanup; 6594 } 6595 6596 /* 6597 * If we know page_relocate will fail, skip it 6598 * It could still fail due to a UE on another page but we 6599 * can't do anything about that. 6600 */ 6601 if (pp->p_toxic & PR_UE) { 6602 goto skip_relocate; 6603 } 6604 6605 /* 6606 * It's possible that pages can not have a vnode as fsflush comes 6607 * through and cleans up these pages. It's ugly but that's how it is. 6608 */ 6609 if (pp->p_vnode == NULL) { 6610 goto skip_relocate; 6611 } 6612 6613 /* 6614 * Page was not free, so lets try to relocate it. 6615 * page_relocate only works with root pages, so if this is not a root 6616 * page, we need to demote it to try and relocate it. 6617 * Unfortunately this is the best we can do right now. 6618 */ 6619 newpp = NULL; 6620 if ((pp->p_szc > 0) && (pp != PP_PAGEROOT(pp))) { 6621 if (page_try_demote_pages(pp) == 0) { 6622 ret = EAGAIN; 6623 goto cleanup; 6624 } 6625 } 6626 ret = page_relocate(&pp, &newpp, 1, 0, &count, NULL); 6627 if (ret == 0) { 6628 page_t *npp; 6629 /* unlock the new page(s) */ 6630 while (count-- > 0) { 6631 ASSERT(newpp != NULL); 6632 npp = newpp; 6633 page_sub(&newpp, npp); 6634 page_unlock(npp); 6635 } 6636 ASSERT(newpp == NULL); 6637 /* 6638 * Check to see if the page we have is too large. 6639 * If so, demote it freeing up the extra pages. 6640 */ 6641 if (pp->p_szc > 0) { 6642 /* For now demote extra pages to szc == 0 */ 6643 extra = page_get_pagecnt(pp->p_szc) - 1; 6644 while (extra > 0) { 6645 tpp = pp->p_next; 6646 page_sub(&pp, tpp); 6647 tpp->p_szc = 0; 6648 page_free(tpp, 1); 6649 extra--; 6650 } 6651 /* Make sure to set our page to szc 0 as well */ 6652 ASSERT(pp->p_next == pp && pp->p_prev == pp); 6653 pp->p_szc = 0; 6654 } 6655 goto cleanup; 6656 } else if (ret == EIO) { 6657 ret = EAGAIN; 6658 goto cleanup; 6659 } else { 6660 /* 6661 * Need to reset return type as we failed to relocate the page 6662 * but that does not mean that some of the next steps will not 6663 * work. 6664 */ 6665 ret = 0; 6666 } 6667 6668 skip_relocate: 6669 6670 if (pp->p_szc > 0) { 6671 if (page_try_demote_pages(pp) == 0) { 6672 ret = EAGAIN; 6673 goto cleanup; 6674 } 6675 } 6676 6677 ASSERT(pp->p_szc == 0); 6678 6679 if (hat_ismod(pp)) { 6680 ret = EAGAIN; 6681 goto cleanup; 6682 } 6683 if (PP_ISKAS(pp)) { 6684 ret = EAGAIN; 6685 goto cleanup; 6686 } 6687 if (pp->p_lckcnt || pp->p_cowcnt) { 6688 ret = EAGAIN; 6689 goto cleanup; 6690 } 6691 6692 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 6693 ASSERT(!hat_page_is_mapped(pp)); 6694 6695 if (hat_ismod(pp)) { 6696 /* 6697 * This is a semi-odd case as the page is now modified but not 6698 * mapped as we just unloaded the mappings above. 6699 */ 6700 ret = EAGAIN; 6701 goto cleanup; 6702 } 6703 if (pp->p_vnode != NULL) { 6704 page_hashout(pp, NULL); 6705 } 6706 6707 /* 6708 * At this point, the page should be in a clean state and 6709 * we can do whatever we want with it. 6710 */ 6711 6712 cleanup: 6713 if (ret != 0) { 6714 if (!skip_unlock) { 6715 page_unlock(pp); 6716 } 6717 } else { 6718 ASSERT(pp->p_szc == 0); 6719 ASSERT(PAGE_EXCL(pp)); 6720 6721 pp->p_next = pp; 6722 pp->p_prev = pp; 6723 } 6724 return (ret); 6725 } 6726 6727 /* 6728 * Various callers of page_trycapture() can have different restrictions upon 6729 * what memory they have access to. 6730 * Returns 0 on success, with the following error codes on failure: 6731 * EPERM - The requested page is long term locked, and thus repeated 6732 * requests to capture this page will likely fail. 6733 * ENOMEM - There was not enough free memory in the system to safely 6734 * map the requested page. 6735 * ENOENT - The requested page was inside the kernel cage, and the 6736 * PHYSMEM_CAGE flag was not set. 6737 */ 6738 int 6739 page_capture_pre_checks(page_t *pp, uint_t flags) 6740 { 6741 ASSERT(pp != NULL); 6742 6743 #if defined(__sparc) 6744 if (pp->p_vnode == &promvp) { 6745 return (EPERM); 6746 } 6747 6748 if (PP_ISNORELOC(pp) && !(flags & CAPTURE_GET_CAGE) && 6749 (flags & CAPTURE_PHYSMEM)) { 6750 return (ENOENT); 6751 } 6752 6753 if (PP_ISNORELOCKERNEL(pp)) { 6754 return (EPERM); 6755 } 6756 #else 6757 if (PP_ISKAS(pp)) { 6758 return (EPERM); 6759 } 6760 #endif /* __sparc */ 6761 6762 /* only physmem currently has the restrictions checked below */ 6763 if (!(flags & CAPTURE_PHYSMEM)) { 6764 return (0); 6765 } 6766 6767 if (availrmem < swapfs_minfree) { 6768 /* 6769 * We won't try to capture this page as we are 6770 * running low on memory. 6771 */ 6772 return (ENOMEM); 6773 } 6774 return (0); 6775 } 6776 6777 /* 6778 * Once we have a page in our mits, go ahead and complete the capture 6779 * operation. 6780 * Returns 1 on failure where page is no longer needed 6781 * Returns 0 on success 6782 * Returns -1 if there was a transient failure. 6783 * Failure cases must release the SE_EXCL lock on pp (usually via page_free). 6784 */ 6785 int 6786 page_capture_take_action(page_t *pp, uint_t flags, void *datap) 6787 { 6788 int cb_index; 6789 int ret = 0; 6790 page_capture_hash_bucket_t *bp1; 6791 page_capture_hash_bucket_t *bp2; 6792 int index; 6793 int found = 0; 6794 int i; 6795 6796 ASSERT(PAGE_EXCL(pp)); 6797 ASSERT(curthread->t_flag & T_CAPTURING); 6798 6799 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6800 if ((flags >> cb_index) & 1) { 6801 break; 6802 } 6803 } 6804 ASSERT(cb_index < PC_NUM_CALLBACKS); 6805 6806 /* 6807 * Remove the entry from the page_capture hash, but don't free it yet 6808 * as we may need to put it back. 6809 * Since we own the page at this point in time, we should find it 6810 * in the hash if this is an ASYNC call. If we don't it's likely 6811 * that the page_capture_async() thread decided that this request 6812 * had expired, in which case we just continue on. 6813 */ 6814 if (flags & CAPTURE_ASYNC) { 6815 6816 index = PAGE_CAPTURE_HASH(pp); 6817 6818 mutex_enter(&page_capture_hash[index].pchh_mutex); 6819 for (i = 0; i < 2 && !found; i++) { 6820 bp1 = page_capture_hash[index].lists[i].next; 6821 while (bp1 != &page_capture_hash[index].lists[i]) { 6822 if (bp1->pp == pp) { 6823 bp1->next->prev = bp1->prev; 6824 bp1->prev->next = bp1->next; 6825 page_capture_hash[index]. 6826 num_pages[bp1->pri]--; 6827 page_clrtoxic(pp, PR_CAPTURE); 6828 found = 1; 6829 break; 6830 } 6831 bp1 = bp1->next; 6832 } 6833 } 6834 mutex_exit(&page_capture_hash[index].pchh_mutex); 6835 } 6836 6837 /* Synchronize with the unregister func. */ 6838 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER); 6839 if (!pc_cb[cb_index].cb_active) { 6840 page_free(pp, 1); 6841 rw_exit(&pc_cb[cb_index].cb_rwlock); 6842 if (found) { 6843 kmem_free(bp1, sizeof (*bp1)); 6844 } 6845 return (1); 6846 } 6847 6848 /* 6849 * We need to remove the entry from the page capture hash and turn off 6850 * the PR_CAPTURE bit before calling the callback. We'll need to cache 6851 * the entry here, and then based upon the return value, cleanup 6852 * appropriately or re-add it to the hash, making sure that someone else 6853 * hasn't already done so. 6854 * It should be rare for the callback to fail and thus it's ok for 6855 * the failure path to be a bit complicated as the success path is 6856 * cleaner and the locking rules are easier to follow. 6857 */ 6858 6859 ret = pc_cb[cb_index].cb_func(pp, datap, flags); 6860 6861 rw_exit(&pc_cb[cb_index].cb_rwlock); 6862 6863 /* 6864 * If this was an ASYNC request, we need to cleanup the hash if the 6865 * callback was successful or if the request was no longer valid. 6866 * For non-ASYNC requests, we return failure to map and the caller 6867 * will take care of adding the request to the hash. 6868 * Note also that the callback itself is responsible for the page 6869 * at this point in time in terms of locking ... The most common 6870 * case for the failure path should just be a page_free. 6871 */ 6872 if (ret >= 0) { 6873 if (found) { 6874 if (bp1->flags & CAPTURE_RETIRE) { 6875 page_retire_decr_pend_count(datap); 6876 } 6877 kmem_free(bp1, sizeof (*bp1)); 6878 } 6879 return (ret); 6880 } 6881 if (!found) { 6882 return (ret); 6883 } 6884 6885 ASSERT(flags & CAPTURE_ASYNC); 6886 6887 /* 6888 * Check for expiration time first as we can just free it up if it's 6889 * expired. 6890 */ 6891 if (ddi_get_lbolt() > bp1->expires && bp1->expires != -1) { 6892 kmem_free(bp1, sizeof (*bp1)); 6893 return (ret); 6894 } 6895 6896 /* 6897 * The callback failed and there used to be an entry in the hash for 6898 * this page, so we need to add it back to the hash. 6899 */ 6900 mutex_enter(&page_capture_hash[index].pchh_mutex); 6901 if (!(pp->p_toxic & PR_CAPTURE)) { 6902 /* just add bp1 back to head of walked list */ 6903 page_settoxic(pp, PR_CAPTURE); 6904 bp1->next = page_capture_hash[index].lists[1].next; 6905 bp1->prev = &page_capture_hash[index].lists[1]; 6906 bp1->next->prev = bp1; 6907 bp1->pri = PAGE_CAPTURE_PRIO(pp); 6908 page_capture_hash[index].lists[1].next = bp1; 6909 page_capture_hash[index].num_pages[bp1->pri]++; 6910 mutex_exit(&page_capture_hash[index].pchh_mutex); 6911 return (ret); 6912 } 6913 6914 /* 6915 * Otherwise there was a new capture request added to list 6916 * Need to make sure that our original data is represented if 6917 * appropriate. 6918 */ 6919 for (i = 0; i < 2; i++) { 6920 bp2 = page_capture_hash[index].lists[i].next; 6921 while (bp2 != &page_capture_hash[index].lists[i]) { 6922 if (bp2->pp == pp) { 6923 if (bp1->flags & CAPTURE_RETIRE) { 6924 if (!(bp2->flags & CAPTURE_RETIRE)) { 6925 bp2->szc = bp1->szc; 6926 bp2->flags = bp1->flags; 6927 bp2->expires = bp1->expires; 6928 bp2->datap = bp1->datap; 6929 } 6930 } else { 6931 ASSERT(bp1->flags & CAPTURE_PHYSMEM); 6932 if (!(bp2->flags & CAPTURE_RETIRE)) { 6933 bp2->szc = bp1->szc; 6934 bp2->flags = bp1->flags; 6935 bp2->expires = bp1->expires; 6936 bp2->datap = bp1->datap; 6937 } 6938 } 6939 page_capture_hash[index].num_pages[bp2->pri]--; 6940 bp2->pri = PAGE_CAPTURE_PRIO(pp); 6941 page_capture_hash[index].num_pages[bp2->pri]++; 6942 mutex_exit(&page_capture_hash[index]. 6943 pchh_mutex); 6944 kmem_free(bp1, sizeof (*bp1)); 6945 return (ret); 6946 } 6947 bp2 = bp2->next; 6948 } 6949 } 6950 panic("PR_CAPTURE set but not on hash for pp 0x%p\n", (void *)pp); 6951 /*NOTREACHED*/ 6952 } 6953 6954 /* 6955 * Try to capture the given page for the caller specified in the flags 6956 * parameter. The page will either be captured and handed over to the 6957 * appropriate callback, or will be queued up in the page capture hash 6958 * to be captured asynchronously. 6959 * If the current request is due to an async capture, the page must be 6960 * exclusively locked before calling this function. 6961 * Currently szc must be 0 but in the future this should be expandable to 6962 * other page sizes. 6963 * Returns 0 on success, with the following error codes on failure: 6964 * EPERM - The requested page is long term locked, and thus repeated 6965 * requests to capture this page will likely fail. 6966 * ENOMEM - There was not enough free memory in the system to safely 6967 * map the requested page. 6968 * ENOENT - The requested page was inside the kernel cage, and the 6969 * CAPTURE_GET_CAGE flag was not set. 6970 * EAGAIN - The requested page could not be capturead at this point in 6971 * time but future requests will likely work. 6972 * EBUSY - The requested page is retired and the CAPTURE_GET_RETIRED flag 6973 * was not set. 6974 */ 6975 int 6976 page_itrycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 6977 { 6978 int ret; 6979 int cb_index; 6980 6981 if (flags & CAPTURE_ASYNC) { 6982 ASSERT(PAGE_EXCL(pp)); 6983 goto async; 6984 } 6985 6986 /* Make sure there's enough availrmem ... */ 6987 ret = page_capture_pre_checks(pp, flags); 6988 if (ret != 0) { 6989 return (ret); 6990 } 6991 6992 if (!page_trylock(pp, SE_EXCL)) { 6993 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) { 6994 if ((flags >> cb_index) & 1) { 6995 break; 6996 } 6997 } 6998 ASSERT(cb_index < PC_NUM_CALLBACKS); 6999 ret = EAGAIN; 7000 /* Special case for retired pages */ 7001 if (PP_RETIRED(pp)) { 7002 if (flags & CAPTURE_GET_RETIRED) { 7003 if (!page_unretire_pp(pp, PR_UNR_TEMP)) { 7004 /* 7005 * Need to set capture bit and add to 7006 * hash so that the page will be 7007 * retired when freed. 7008 */ 7009 page_capture_add_hash(pp, szc, 7010 CAPTURE_RETIRE, NULL); 7011 ret = 0; 7012 goto own_page; 7013 } 7014 } else { 7015 return (EBUSY); 7016 } 7017 } 7018 page_capture_add_hash(pp, szc, flags, datap); 7019 return (ret); 7020 } 7021 7022 async: 7023 ASSERT(PAGE_EXCL(pp)); 7024 7025 /* Need to check for physmem async requests that availrmem is sane */ 7026 if ((flags & (CAPTURE_ASYNC | CAPTURE_PHYSMEM)) == 7027 (CAPTURE_ASYNC | CAPTURE_PHYSMEM) && 7028 (availrmem < swapfs_minfree)) { 7029 page_unlock(pp); 7030 return (ENOMEM); 7031 } 7032 7033 ret = page_capture_clean_page(pp); 7034 7035 if (ret != 0) { 7036 /* We failed to get the page, so lets add it to the hash */ 7037 if (!(flags & CAPTURE_ASYNC)) { 7038 page_capture_add_hash(pp, szc, flags, datap); 7039 } 7040 return (ret); 7041 } 7042 7043 own_page: 7044 ASSERT(PAGE_EXCL(pp)); 7045 ASSERT(pp->p_szc == 0); 7046 7047 /* Call the callback */ 7048 ret = page_capture_take_action(pp, flags, datap); 7049 7050 if (ret == 0) { 7051 return (0); 7052 } 7053 7054 /* 7055 * Note that in the failure cases from page_capture_take_action, the 7056 * EXCL lock will have already been dropped. 7057 */ 7058 if ((ret == -1) && (!(flags & CAPTURE_ASYNC))) { 7059 page_capture_add_hash(pp, szc, flags, datap); 7060 } 7061 return (EAGAIN); 7062 } 7063 7064 int 7065 page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap) 7066 { 7067 int ret; 7068 7069 curthread->t_flag |= T_CAPTURING; 7070 ret = page_itrycapture(pp, szc, flags, datap); 7071 curthread->t_flag &= ~T_CAPTURING; /* xor works as we know its set */ 7072 return (ret); 7073 } 7074 7075 /* 7076 * When unlocking a page which has the PR_CAPTURE bit set, this routine 7077 * gets called to try and capture the page. 7078 */ 7079 void 7080 page_unlock_capture(page_t *pp) 7081 { 7082 page_capture_hash_bucket_t *bp; 7083 int index; 7084 int i; 7085 uint_t szc; 7086 uint_t flags = 0; 7087 void *datap; 7088 kmutex_t *mp; 7089 extern vnode_t retired_pages; 7090 7091 /* 7092 * We need to protect against a possible deadlock here where we own 7093 * the vnode page hash mutex and want to acquire it again as there 7094 * are locations in the code, where we unlock a page while holding 7095 * the mutex which can lead to the page being captured and eventually 7096 * end up here. As we may be hashing out the old page and hashing into 7097 * the retire vnode, we need to make sure we don't own them. 7098 * Other callbacks who do hash operations also need to make sure that 7099 * before they hashin to a vnode that they do not currently own the 7100 * vphm mutex otherwise there will be a panic. 7101 */ 7102 if (mutex_owned(page_vnode_mutex(&retired_pages))) { 7103 page_unlock_nocapture(pp); 7104 return; 7105 } 7106 if (pp->p_vnode != NULL && mutex_owned(page_vnode_mutex(pp->p_vnode))) { 7107 page_unlock_nocapture(pp); 7108 return; 7109 } 7110 7111 index = PAGE_CAPTURE_HASH(pp); 7112 7113 mp = &page_capture_hash[index].pchh_mutex; 7114 mutex_enter(mp); 7115 for (i = 0; i < 2; i++) { 7116 bp = page_capture_hash[index].lists[i].next; 7117 while (bp != &page_capture_hash[index].lists[i]) { 7118 if (bp->pp == pp) { 7119 szc = bp->szc; 7120 flags = bp->flags | CAPTURE_ASYNC; 7121 datap = bp->datap; 7122 mutex_exit(mp); 7123 (void) page_trycapture(pp, szc, flags, datap); 7124 return; 7125 } 7126 bp = bp->next; 7127 } 7128 } 7129 7130 /* Failed to find page in hash so clear flags and unlock it. */ 7131 page_clrtoxic(pp, PR_CAPTURE); 7132 page_unlock(pp); 7133 7134 mutex_exit(mp); 7135 } 7136 7137 void 7138 page_capture_init() 7139 { 7140 int i; 7141 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7142 page_capture_hash[i].lists[0].next = 7143 &page_capture_hash[i].lists[0]; 7144 page_capture_hash[i].lists[0].prev = 7145 &page_capture_hash[i].lists[0]; 7146 page_capture_hash[i].lists[1].next = 7147 &page_capture_hash[i].lists[1]; 7148 page_capture_hash[i].lists[1].prev = 7149 &page_capture_hash[i].lists[1]; 7150 } 7151 7152 pc_thread_shortwait = 23 * hz; 7153 pc_thread_longwait = 1201 * hz; 7154 pc_thread_retry = 3; 7155 mutex_init(&pc_thread_mutex, NULL, MUTEX_DEFAULT, NULL); 7156 cv_init(&pc_cv, NULL, CV_DEFAULT, NULL); 7157 pc_thread_id = thread_create(NULL, 0, page_capture_thread, NULL, 0, &p0, 7158 TS_RUN, minclsyspri); 7159 } 7160 7161 /* 7162 * It is necessary to scrub any failing pages prior to reboot in order to 7163 * prevent a latent error trap from occurring on the next boot. 7164 */ 7165 void 7166 page_retire_mdboot() 7167 { 7168 page_t *pp; 7169 int i, j; 7170 page_capture_hash_bucket_t *bp; 7171 uchar_t pri; 7172 7173 /* walk lists looking for pages to scrub */ 7174 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7175 for (pri = 0; pri < PC_NUM_PRI; pri++) { 7176 if (page_capture_hash[i].num_pages[pri] != 0) { 7177 break; 7178 } 7179 } 7180 if (pri == PC_NUM_PRI) 7181 continue; 7182 7183 mutex_enter(&page_capture_hash[i].pchh_mutex); 7184 7185 for (j = 0; j < 2; j++) { 7186 bp = page_capture_hash[i].lists[j].next; 7187 while (bp != &page_capture_hash[i].lists[j]) { 7188 pp = bp->pp; 7189 if (PP_TOXIC(pp)) { 7190 if (page_trylock(pp, SE_EXCL)) { 7191 PP_CLRFREE(pp); 7192 pagescrub(pp, 0, PAGESIZE); 7193 page_unlock(pp); 7194 } 7195 } 7196 bp = bp->next; 7197 } 7198 } 7199 mutex_exit(&page_capture_hash[i].pchh_mutex); 7200 } 7201 } 7202 7203 /* 7204 * Walk the page_capture_hash trying to capture pages and also cleanup old 7205 * entries which have expired. 7206 */ 7207 void 7208 page_capture_async() 7209 { 7210 page_t *pp; 7211 int i; 7212 int ret; 7213 page_capture_hash_bucket_t *bp1, *bp2; 7214 uint_t szc; 7215 uint_t flags; 7216 void *datap; 7217 uchar_t pri; 7218 7219 /* If there are outstanding pages to be captured, get to work */ 7220 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7221 for (pri = 0; pri < PC_NUM_PRI; pri++) { 7222 if (page_capture_hash[i].num_pages[pri] != 0) 7223 break; 7224 } 7225 if (pri == PC_NUM_PRI) 7226 continue; 7227 7228 /* Append list 1 to list 0 and then walk through list 0 */ 7229 mutex_enter(&page_capture_hash[i].pchh_mutex); 7230 bp1 = &page_capture_hash[i].lists[1]; 7231 bp2 = bp1->next; 7232 if (bp1 != bp2) { 7233 bp1->prev->next = page_capture_hash[i].lists[0].next; 7234 bp2->prev = &page_capture_hash[i].lists[0]; 7235 page_capture_hash[i].lists[0].next->prev = bp1->prev; 7236 page_capture_hash[i].lists[0].next = bp2; 7237 bp1->next = bp1; 7238 bp1->prev = bp1; 7239 } 7240 7241 /* list[1] will be empty now */ 7242 7243 bp1 = page_capture_hash[i].lists[0].next; 7244 while (bp1 != &page_capture_hash[i].lists[0]) { 7245 /* Check expiration time */ 7246 if ((ddi_get_lbolt() > bp1->expires && 7247 bp1->expires != -1) || 7248 page_deleted(bp1->pp)) { 7249 page_capture_hash[i].lists[0].next = bp1->next; 7250 bp1->next->prev = 7251 &page_capture_hash[i].lists[0]; 7252 page_capture_hash[i].num_pages[bp1->pri]--; 7253 7254 /* 7255 * We can safely remove the PR_CAPTURE bit 7256 * without holding the EXCL lock on the page 7257 * as the PR_CAPTURE bit requres that the 7258 * page_capture_hash[].pchh_mutex be held 7259 * to modify it. 7260 */ 7261 page_clrtoxic(bp1->pp, PR_CAPTURE); 7262 mutex_exit(&page_capture_hash[i].pchh_mutex); 7263 kmem_free(bp1, sizeof (*bp1)); 7264 mutex_enter(&page_capture_hash[i].pchh_mutex); 7265 bp1 = page_capture_hash[i].lists[0].next; 7266 continue; 7267 } 7268 pp = bp1->pp; 7269 szc = bp1->szc; 7270 flags = bp1->flags; 7271 datap = bp1->datap; 7272 mutex_exit(&page_capture_hash[i].pchh_mutex); 7273 if (page_trylock(pp, SE_EXCL)) { 7274 ret = page_trycapture(pp, szc, 7275 flags | CAPTURE_ASYNC, datap); 7276 } else { 7277 ret = 1; /* move to walked hash */ 7278 } 7279 7280 if (ret != 0) { 7281 /* Move to walked hash */ 7282 (void) page_capture_move_to_walked(pp); 7283 } 7284 mutex_enter(&page_capture_hash[i].pchh_mutex); 7285 bp1 = page_capture_hash[i].lists[0].next; 7286 } 7287 7288 mutex_exit(&page_capture_hash[i].pchh_mutex); 7289 } 7290 } 7291 7292 /* 7293 * This function is called by the page_capture_thread, and is needed in 7294 * in order to initiate aio cleanup, so that pages used in aio 7295 * will be unlocked and subsequently retired by page_capture_thread. 7296 */ 7297 static int 7298 do_aio_cleanup(void) 7299 { 7300 proc_t *procp; 7301 int (*aio_cleanup_dr_delete_memory)(proc_t *); 7302 int cleaned = 0; 7303 7304 if (modload("sys", "kaio") == -1) { 7305 cmn_err(CE_WARN, "do_aio_cleanup: cannot load kaio"); 7306 return (0); 7307 } 7308 /* 7309 * We use the aio_cleanup_dr_delete_memory function to 7310 * initiate the actual clean up; this function will wake 7311 * up the per-process aio_cleanup_thread. 7312 */ 7313 aio_cleanup_dr_delete_memory = (int (*)(proc_t *)) 7314 modgetsymvalue("aio_cleanup_dr_delete_memory", 0); 7315 if (aio_cleanup_dr_delete_memory == NULL) { 7316 cmn_err(CE_WARN, 7317 "aio_cleanup_dr_delete_memory not found in kaio"); 7318 return (0); 7319 } 7320 mutex_enter(&pidlock); 7321 for (procp = practive; (procp != NULL); procp = procp->p_next) { 7322 mutex_enter(&procp->p_lock); 7323 if (procp->p_aio != NULL) { 7324 /* cleanup proc's outstanding kaio */ 7325 cleaned += (*aio_cleanup_dr_delete_memory)(procp); 7326 } 7327 mutex_exit(&procp->p_lock); 7328 } 7329 mutex_exit(&pidlock); 7330 return (cleaned); 7331 } 7332 7333 /* 7334 * helper function for page_capture_thread 7335 */ 7336 static void 7337 page_capture_handle_outstanding(void) 7338 { 7339 int ntry; 7340 7341 /* Reap pages before attempting capture pages */ 7342 kmem_reap(); 7343 7344 if ((page_retire_pend_count() > page_retire_pend_kas_count()) && 7345 hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 7346 /* 7347 * Note: Purging only for platforms that support 7348 * ISM hat_pageunload() - mainly SPARC. On x86/x64 7349 * platforms ISM pages SE_SHARED locked until destroyed. 7350 */ 7351 7352 /* disable and purge seg_pcache */ 7353 (void) seg_p_disable(); 7354 for (ntry = 0; ntry < pc_thread_retry; ntry++) { 7355 if (!page_retire_pend_count()) 7356 break; 7357 if (do_aio_cleanup()) { 7358 /* 7359 * allow the apps cleanup threads 7360 * to run 7361 */ 7362 delay(pc_thread_shortwait); 7363 } 7364 page_capture_async(); 7365 } 7366 /* reenable seg_pcache */ 7367 seg_p_enable(); 7368 7369 /* completed what can be done. break out */ 7370 return; 7371 } 7372 7373 /* 7374 * For kernel pages and/or unsupported HAT_DYNAMIC_ISM_UNMAP, reap 7375 * and then attempt to capture. 7376 */ 7377 seg_preap(); 7378 page_capture_async(); 7379 } 7380 7381 /* 7382 * The page_capture_thread loops forever, looking to see if there are 7383 * pages still waiting to be captured. 7384 */ 7385 static void 7386 page_capture_thread(void) 7387 { 7388 callb_cpr_t c; 7389 int i; 7390 int high_pri_pages; 7391 int low_pri_pages; 7392 clock_t timeout; 7393 7394 CALLB_CPR_INIT(&c, &pc_thread_mutex, callb_generic_cpr, "page_capture"); 7395 7396 mutex_enter(&pc_thread_mutex); 7397 for (;;) { 7398 high_pri_pages = 0; 7399 low_pri_pages = 0; 7400 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) { 7401 high_pri_pages += 7402 page_capture_hash[i].num_pages[PC_PRI_HI]; 7403 low_pri_pages += 7404 page_capture_hash[i].num_pages[PC_PRI_LO]; 7405 } 7406 7407 timeout = pc_thread_longwait; 7408 if (high_pri_pages != 0) { 7409 timeout = pc_thread_shortwait; 7410 page_capture_handle_outstanding(); 7411 } else if (low_pri_pages != 0) { 7412 page_capture_async(); 7413 } 7414 CALLB_CPR_SAFE_BEGIN(&c); 7415 (void) cv_reltimedwait(&pc_cv, &pc_thread_mutex, 7416 timeout, TR_CLOCK_TICK); 7417 CALLB_CPR_SAFE_END(&c, &pc_thread_mutex); 7418 } 7419 /*NOTREACHED*/ 7420 } 7421 /* 7422 * Attempt to locate a bucket that has enough pages to satisfy the request. 7423 * The initial check is done without the lock to avoid unneeded contention. 7424 * The function returns 1 if enough pages were found, else 0 if it could not 7425 * find enough pages in a bucket. 7426 */ 7427 static int 7428 pcf_decrement_bucket(pgcnt_t npages) 7429 { 7430 struct pcf *p; 7431 struct pcf *q; 7432 int i; 7433 7434 p = &pcf[PCF_INDEX()]; 7435 q = &pcf[pcf_fanout]; 7436 for (i = 0; i < pcf_fanout; i++) { 7437 if (p->pcf_count > npages) { 7438 /* 7439 * a good one to try. 7440 */ 7441 mutex_enter(&p->pcf_lock); 7442 if (p->pcf_count > npages) { 7443 p->pcf_count -= (uint_t)npages; 7444 /* 7445 * freemem is not protected by any lock. 7446 * Thus, we cannot have any assertion 7447 * containing freemem here. 7448 */ 7449 freemem -= npages; 7450 mutex_exit(&p->pcf_lock); 7451 return (1); 7452 } 7453 mutex_exit(&p->pcf_lock); 7454 } 7455 p++; 7456 if (p >= q) { 7457 p = pcf; 7458 } 7459 } 7460 return (0); 7461 } 7462 7463 /* 7464 * Arguments: 7465 * pcftotal_ret: If the value is not NULL and we have walked all the 7466 * buckets but did not find enough pages then it will 7467 * be set to the total number of pages in all the pcf 7468 * buckets. 7469 * npages: Is the number of pages we have been requested to 7470 * find. 7471 * unlock: If set to 0 we will leave the buckets locked if the 7472 * requested number of pages are not found. 7473 * 7474 * Go and try to satisfy the page request from any number of buckets. 7475 * This can be a very expensive operation as we have to lock the buckets 7476 * we are checking (and keep them locked), starting at bucket 0. 7477 * 7478 * The function returns 1 if enough pages were found, else 0 if it could not 7479 * find enough pages in the buckets. 7480 * 7481 */ 7482 static int 7483 pcf_decrement_multiple(pgcnt_t *pcftotal_ret, pgcnt_t npages, int unlock) 7484 { 7485 struct pcf *p; 7486 pgcnt_t pcftotal; 7487 int i; 7488 7489 p = pcf; 7490 /* try to collect pages from several pcf bins */ 7491 for (pcftotal = 0, i = 0; i < pcf_fanout; i++) { 7492 mutex_enter(&p->pcf_lock); 7493 pcftotal += p->pcf_count; 7494 if (pcftotal >= npages) { 7495 /* 7496 * Wow! There are enough pages laying around 7497 * to satisfy the request. Do the accounting, 7498 * drop the locks we acquired, and go back. 7499 * 7500 * freemem is not protected by any lock. So, 7501 * we cannot have any assertion containing 7502 * freemem. 7503 */ 7504 freemem -= npages; 7505 while (p >= pcf) { 7506 if (p->pcf_count <= npages) { 7507 npages -= p->pcf_count; 7508 p->pcf_count = 0; 7509 } else { 7510 p->pcf_count -= (uint_t)npages; 7511 npages = 0; 7512 } 7513 mutex_exit(&p->pcf_lock); 7514 p--; 7515 } 7516 ASSERT(npages == 0); 7517 return (1); 7518 } 7519 p++; 7520 } 7521 if (unlock) { 7522 /* failed to collect pages - release the locks */ 7523 while (--p >= pcf) { 7524 mutex_exit(&p->pcf_lock); 7525 } 7526 } 7527 if (pcftotal_ret != NULL) 7528 *pcftotal_ret = pcftotal; 7529 return (0); 7530 } 7531