1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * Portions of this source code were derived from Berkeley 4.3 BSD 31 * under license from the Regents of the University of California. 32 */ 33 34 35 /* 36 * This file contains common functions to access and manage the page lists. 37 * Many of these routines originated from platform dependent modules 38 * (sun4/vm/vm_dep.c, i86pc/vm/vm_machdep.c) and modified to function in 39 * a platform independent manner. 40 * 41 * vm/vm_dep.h provides for platform specific support. 42 */ 43 44 #include <sys/types.h> 45 #include <sys/debug.h> 46 #include <sys/cmn_err.h> 47 #include <sys/systm.h> 48 #include <sys/atomic.h> 49 #include <sys/sysmacros.h> 50 #include <vm/as.h> 51 #include <vm/page.h> 52 #include <vm/seg_kmem.h> 53 #include <vm/seg_vn.h> 54 #include <sys/vmsystm.h> 55 #include <sys/memnode.h> 56 #include <vm/vm_dep.h> 57 #include <sys/lgrp.h> 58 #include <sys/mem_config.h> 59 #include <sys/callb.h> 60 #include <sys/mem_cage.h> 61 #include <sys/sdt.h> 62 63 extern uint_t vac_colors; 64 65 #define MAX_PRAGMA_ALIGN 128 66 67 /* vm_cpu_data0 for the boot cpu before kmem is initialized */ 68 69 #if L2CACHE_ALIGN_MAX <= MAX_PRAGMA_ALIGN 70 #pragma align L2CACHE_ALIGN_MAX(vm_cpu_data0) 71 #else 72 #pragma align MAX_PRAGMA_ALIGN(vm_cpu_data0) 73 #endif 74 char vm_cpu_data0[VM_CPU_DATA_PADSIZE]; 75 76 /* 77 * number of page colors equivalent to reqested color in page_get routines. 78 * If set, keeps large pages intact longer and keeps MPO allocation 79 * from the local mnode in favor of acquiring the 'correct' page color from 80 * a demoted large page or from a remote mnode. 81 */ 82 uint_t colorequiv; 83 84 /* 85 * color equivalency mask for each page size. 86 * Mask is computed based on cpu L2$ way sizes and colorequiv global. 87 * High 4 bits determine the number of high order bits of the color to ignore. 88 * Low 4 bits determines number of low order bits of color to ignore (it's only 89 * relevant for hashed index based page coloring). 90 */ 91 uchar_t colorequivszc[MMU_PAGE_SIZES]; 92 93 /* 94 * if set, specifies the percentage of large pages that are free from within 95 * a large page region before attempting to lock those pages for 96 * page_get_contig_pages processing. 97 * 98 * Should be turned on when kpr is available when page_trylock_contig_pages 99 * can be more selective. 100 */ 101 102 int ptcpthreshold; 103 104 /* 105 * Limit page get contig page search based on failure cnts in pgcpfailcnt[]. 106 * Enabled by default via pgcplimitsearch. 107 * 108 * pgcpfailcnt[] is bounded by PGCPFAILMAX (>= 1/2 of installed 109 * memory). When reached, pgcpfailcnt[] is reset to 1/2 of this upper 110 * bound. This upper bound range guarantees: 111 * - all large page 'slots' will be searched over time 112 * - the minimum (1) large page candidates considered on each pgcp call 113 * - count doesn't wrap around to 0 114 */ 115 pgcnt_t pgcpfailcnt[MMU_PAGE_SIZES]; 116 int pgcplimitsearch = 1; 117 118 #define PGCPFAILMAX (1 << (highbit(physinstalled) - 1)) 119 #define SETPGCPFAILCNT(szc) \ 120 if (++pgcpfailcnt[szc] >= PGCPFAILMAX) \ 121 pgcpfailcnt[szc] = PGCPFAILMAX / 2; 122 123 #ifdef VM_STATS 124 struct vmm_vmstats_str vmm_vmstats; 125 126 #endif /* VM_STATS */ 127 128 #if defined(__sparc) 129 #define LPGCREATE 0 130 #else 131 /* enable page_get_contig_pages */ 132 #define LPGCREATE 1 133 #endif 134 135 int pg_contig_disable; 136 int pg_lpgcreate_nocage = LPGCREATE; 137 138 /* 139 * page_freelist_split pfn flag to signify no lo or hi pfn requirement. 140 */ 141 #define PFNNULL 0 142 143 /* Flags involved in promotion and demotion routines */ 144 #define PC_FREE 0x1 /* put page on freelist */ 145 #define PC_ALLOC 0x2 /* return page for allocation */ 146 147 /* 148 * Flag for page_demote to be used with PC_FREE to denote that we don't care 149 * what the color is as the color parameter to the function is ignored. 150 */ 151 #define PC_NO_COLOR (-1) 152 153 /* mtype value for page_promote to use when mtype does not matter */ 154 #define PC_MTYPE_ANY (-1) 155 156 /* 157 * page counters candidates info 158 * See page_ctrs_cands comment below for more details. 159 * fields are as follows: 160 * pcc_pages_free: # pages which freelist coalesce can create 161 * pcc_color_free: pointer to page free counts per color 162 */ 163 typedef struct pcc_info { 164 pgcnt_t pcc_pages_free; 165 pgcnt_t *pcc_color_free; 166 uint_t pad[12]; 167 } pcc_info_t; 168 169 /* 170 * On big machines it can take a long time to check page_counters 171 * arrays. page_ctrs_cands is a summary array whose elements are a dynamically 172 * updated sum of all elements of the corresponding page_counters arrays. 173 * page_freelist_coalesce() searches page_counters only if an appropriate 174 * element of page_ctrs_cands array is greater than 0. 175 * 176 * page_ctrs_cands is indexed by mutex (i), region (r), mnode (m), mrange (g) 177 */ 178 pcc_info_t **page_ctrs_cands[NPC_MUTEX][MMU_PAGE_SIZES]; 179 180 /* 181 * Return in val the total number of free pages which can be created 182 * for the given mnode (m), mrange (g), and region size (r) 183 */ 184 #define PGCTRS_CANDS_GETVALUE(m, g, r, val) { \ 185 int i; \ 186 val = 0; \ 187 for (i = 0; i < NPC_MUTEX; i++) { \ 188 val += page_ctrs_cands[i][(r)][(m)][(g)].pcc_pages_free; \ 189 } \ 190 } 191 192 /* 193 * Return in val the total number of free pages which can be created 194 * for the given mnode (m), mrange (g), region size (r), and color (c) 195 */ 196 #define PGCTRS_CANDS_GETVALUECOLOR(m, g, r, c, val) { \ 197 int i; \ 198 val = 0; \ 199 ASSERT((c) < PAGE_GET_PAGECOLORS(r)); \ 200 for (i = 0; i < NPC_MUTEX; i++) { \ 201 val += \ 202 page_ctrs_cands[i][(r)][(m)][(g)].pcc_color_free[(c)]; \ 203 } \ 204 } 205 206 /* 207 * We can only allow a single thread to update a counter within the physical 208 * range of the largest supported page size. That is the finest granularity 209 * possible since the counter values are dependent on each other 210 * as you move accross region sizes. PP_CTR_LOCK_INDX is used to determine the 211 * ctr_mutex lock index for a particular physical range. 212 */ 213 static kmutex_t *ctr_mutex[NPC_MUTEX]; 214 215 #define PP_CTR_LOCK_INDX(pp) \ 216 (((pp)->p_pagenum >> \ 217 (PAGE_BSZS_SHIFT(mmu_page_sizes - 1))) & (NPC_MUTEX - 1)) 218 219 #define INVALID_COLOR 0xffffffff 220 #define INVALID_MASK 0xffffffff 221 222 /* 223 * Local functions prototypes. 224 */ 225 226 void page_ctr_add(int, int, page_t *, int); 227 void page_ctr_add_internal(int, int, page_t *, int); 228 void page_ctr_sub(int, int, page_t *, int); 229 void page_ctr_sub_internal(int, int, page_t *, int); 230 void page_freelist_lock(int); 231 void page_freelist_unlock(int); 232 page_t *page_promote(int, pfn_t, uchar_t, int, int); 233 page_t *page_demote(int, pfn_t, pfn_t, uchar_t, uchar_t, int, int); 234 page_t *page_freelist_split(uchar_t, 235 uint_t, int, int, pfn_t, pfn_t, page_list_walker_t *); 236 page_t *page_get_mnode_cachelist(uint_t, uint_t, int, int); 237 static int page_trylock_cons(page_t *pp, se_t se); 238 239 /* 240 * The page_counters array below is used to keep track of free contiguous 241 * physical memory. A hw_page_map_t will be allocated per mnode per szc. 242 * This contains an array of counters, the size of the array, a shift value 243 * used to convert a pagenum into a counter array index or vice versa, as 244 * well as a cache of the last successful index to be promoted to a larger 245 * page size. As an optimization, we keep track of the last successful index 246 * to be promoted per page color for the given size region, and this is 247 * allocated dynamically based upon the number of colors for a given 248 * region size. 249 * 250 * Conceptually, the page counters are represented as: 251 * 252 * page_counters[region_size][mnode] 253 * 254 * region_size: size code of a candidate larger page made up 255 * of contiguous free smaller pages. 256 * 257 * page_counters[region_size][mnode].hpm_counters[index]: 258 * represents how many (region_size - 1) pages either 259 * exist or can be created within the given index range. 260 * 261 * Let's look at a sparc example: 262 * If we want to create a free 512k page, we look at region_size 2 263 * for the mnode we want. We calculate the index and look at a specific 264 * hpm_counters location. If we see 8 (FULL_REGION_CNT on sparc) at 265 * this location, it means that 8 64k pages either exist or can be created 266 * from 8K pages in order to make a single free 512k page at the given 267 * index. Note that when a region is full, it will contribute to the 268 * counts in the region above it. Thus we will not know what page 269 * size the free pages will be which can be promoted to this new free 270 * page unless we look at all regions below the current region. 271 */ 272 273 /* 274 * Note: hpmctr_t is defined in platform vm_dep.h 275 * hw_page_map_t contains all the information needed for the page_counters 276 * logic. The fields are as follows: 277 * 278 * hpm_counters: dynamically allocated array to hold counter data 279 * hpm_entries: entries in hpm_counters 280 * hpm_shift: shift for pnum/array index conv 281 * hpm_base: PFN mapped to counter index 0 282 * hpm_color_current: last index in counter array for this color at 283 * which we successfully created a large page 284 */ 285 typedef struct hw_page_map { 286 hpmctr_t *hpm_counters; 287 size_t hpm_entries; 288 int hpm_shift; 289 pfn_t hpm_base; 290 size_t *hpm_color_current[MAX_MNODE_MRANGES]; 291 #if defined(__sparc) 292 uint_t pad[4]; 293 #endif 294 } hw_page_map_t; 295 296 /* 297 * Element zero is not used, but is allocated for convenience. 298 */ 299 static hw_page_map_t *page_counters[MMU_PAGE_SIZES]; 300 301 /* 302 * Cached value of MNODE_RANGE_CNT(mnode). 303 * This is a function call in x86. 304 */ 305 static int mnode_nranges[MAX_MEM_NODES]; 306 static int mnode_maxmrange[MAX_MEM_NODES]; 307 308 /* 309 * The following macros are convenient ways to get access to the individual 310 * elements of the page_counters arrays. They can be used on both 311 * the left side and right side of equations. 312 */ 313 #define PAGE_COUNTERS(mnode, rg_szc, idx) \ 314 (page_counters[(rg_szc)][(mnode)].hpm_counters[(idx)]) 315 316 #define PAGE_COUNTERS_COUNTERS(mnode, rg_szc) \ 317 (page_counters[(rg_szc)][(mnode)].hpm_counters) 318 319 #define PAGE_COUNTERS_SHIFT(mnode, rg_szc) \ 320 (page_counters[(rg_szc)][(mnode)].hpm_shift) 321 322 #define PAGE_COUNTERS_ENTRIES(mnode, rg_szc) \ 323 (page_counters[(rg_szc)][(mnode)].hpm_entries) 324 325 #define PAGE_COUNTERS_BASE(mnode, rg_szc) \ 326 (page_counters[(rg_szc)][(mnode)].hpm_base) 327 328 #define PAGE_COUNTERS_CURRENT_COLOR_ARRAY(mnode, rg_szc, g) \ 329 (page_counters[(rg_szc)][(mnode)].hpm_color_current[(g)]) 330 331 #define PAGE_COUNTERS_CURRENT_COLOR(mnode, rg_szc, color, mrange) \ 332 (page_counters[(rg_szc)][(mnode)]. \ 333 hpm_color_current[(mrange)][(color)]) 334 335 #define PNUM_TO_IDX(mnode, rg_szc, pnum) \ 336 (((pnum) - PAGE_COUNTERS_BASE((mnode), (rg_szc))) >> \ 337 PAGE_COUNTERS_SHIFT((mnode), (rg_szc))) 338 339 #define IDX_TO_PNUM(mnode, rg_szc, index) \ 340 (PAGE_COUNTERS_BASE((mnode), (rg_szc)) + \ 341 ((index) << PAGE_COUNTERS_SHIFT((mnode), (rg_szc)))) 342 343 /* 344 * Protects the hpm_counters and hpm_color_current memory from changing while 345 * looking at page counters information. 346 * Grab the write lock to modify what these fields point at. 347 * Grab the read lock to prevent any pointers from changing. 348 * The write lock can not be held during memory allocation due to a possible 349 * recursion deadlock with trying to grab the read lock while the 350 * write lock is already held. 351 */ 352 krwlock_t page_ctrs_rwlock[MAX_MEM_NODES]; 353 354 355 /* 356 * initialize cpu_vm_data to point at cache aligned vm_cpu_data_t. 357 */ 358 void 359 cpu_vm_data_init(struct cpu *cp) 360 { 361 if (cp == CPU0) { 362 cp->cpu_vm_data = (void *)&vm_cpu_data0; 363 } else { 364 void *kmptr; 365 int align; 366 size_t sz; 367 368 align = (L2CACHE_ALIGN) ? L2CACHE_ALIGN : L2CACHE_ALIGN_MAX; 369 sz = P2ROUNDUP(sizeof (vm_cpu_data_t), align) + align; 370 kmptr = kmem_zalloc(sz, KM_SLEEP); 371 cp->cpu_vm_data = (void *) P2ROUNDUP((uintptr_t)kmptr, align); 372 ((vm_cpu_data_t *)cp->cpu_vm_data)->vc_kmptr = kmptr; 373 ((vm_cpu_data_t *)cp->cpu_vm_data)->vc_kmsize = sz; 374 } 375 } 376 377 /* 378 * free cpu_vm_data 379 */ 380 void 381 cpu_vm_data_destroy(struct cpu *cp) 382 { 383 if (cp->cpu_seqid && cp->cpu_vm_data) { 384 ASSERT(cp != CPU0); 385 kmem_free(((vm_cpu_data_t *)cp->cpu_vm_data)->vc_kmptr, 386 ((vm_cpu_data_t *)cp->cpu_vm_data)->vc_kmsize); 387 } 388 cp->cpu_vm_data = NULL; 389 } 390 391 392 /* 393 * page size to page size code 394 */ 395 int 396 page_szc(size_t pagesize) 397 { 398 int i = 0; 399 400 while (hw_page_array[i].hp_size) { 401 if (pagesize == hw_page_array[i].hp_size) 402 return (i); 403 i++; 404 } 405 return (-1); 406 } 407 408 /* 409 * page size to page size code with the restriction that it be a supported 410 * user page size. If it's not a supported user page size, -1 will be returned. 411 */ 412 int 413 page_szc_user_filtered(size_t pagesize) 414 { 415 int szc = page_szc(pagesize); 416 if ((szc != -1) && (SZC_2_USERSZC(szc) != -1)) { 417 return (szc); 418 } 419 return (-1); 420 } 421 422 /* 423 * Return how many page sizes are available for the user to use. This is 424 * what the hardware supports and not based upon how the OS implements the 425 * support of different page sizes. 426 * 427 * If legacy is non-zero, return the number of pagesizes available to legacy 428 * applications. The number of legacy page sizes might be less than the 429 * exported user page sizes. This is to prevent legacy applications that 430 * use the largest page size returned from getpagesizes(3c) from inadvertantly 431 * using the 'new' large pagesizes. 432 */ 433 uint_t 434 page_num_user_pagesizes(int legacy) 435 { 436 if (legacy) 437 return (mmu_legacy_page_sizes); 438 return (mmu_exported_page_sizes); 439 } 440 441 uint_t 442 page_num_pagesizes(void) 443 { 444 return (mmu_page_sizes); 445 } 446 447 /* 448 * returns the count of the number of base pagesize pages associated with szc 449 */ 450 pgcnt_t 451 page_get_pagecnt(uint_t szc) 452 { 453 if (szc >= mmu_page_sizes) 454 panic("page_get_pagecnt: out of range %d", szc); 455 return (hw_page_array[szc].hp_pgcnt); 456 } 457 458 size_t 459 page_get_pagesize(uint_t szc) 460 { 461 if (szc >= mmu_page_sizes) 462 panic("page_get_pagesize: out of range %d", szc); 463 return (hw_page_array[szc].hp_size); 464 } 465 466 /* 467 * Return the size of a page based upon the index passed in. An index of 468 * zero refers to the smallest page size in the system, and as index increases 469 * it refers to the next larger supported page size in the system. 470 * Note that szc and userszc may not be the same due to unsupported szc's on 471 * some systems. 472 */ 473 size_t 474 page_get_user_pagesize(uint_t userszc) 475 { 476 uint_t szc = USERSZC_2_SZC(userszc); 477 478 if (szc >= mmu_page_sizes) 479 panic("page_get_user_pagesize: out of range %d", szc); 480 return (hw_page_array[szc].hp_size); 481 } 482 483 uint_t 484 page_get_shift(uint_t szc) 485 { 486 if (szc >= mmu_page_sizes) 487 panic("page_get_shift: out of range %d", szc); 488 return (PAGE_GET_SHIFT(szc)); 489 } 490 491 uint_t 492 page_get_pagecolors(uint_t szc) 493 { 494 if (szc >= mmu_page_sizes) 495 panic("page_get_pagecolors: out of range %d", szc); 496 return (PAGE_GET_PAGECOLORS(szc)); 497 } 498 499 /* 500 * this assigns the desired equivalent color after a split 501 */ 502 uint_t 503 page_correct_color(uchar_t szc, uchar_t nszc, uint_t color, 504 uint_t ncolor, uint_t ceq_mask) 505 { 506 ASSERT(nszc > szc); 507 ASSERT(szc < mmu_page_sizes); 508 ASSERT(color < PAGE_GET_PAGECOLORS(szc)); 509 ASSERT(ncolor < PAGE_GET_PAGECOLORS(nszc)); 510 511 color &= ceq_mask; 512 ncolor = PAGE_CONVERT_COLOR(ncolor, szc, nszc); 513 return (color | (ncolor & ~ceq_mask)); 514 } 515 516 /* 517 * The interleaved_mnodes flag is set when mnodes overlap in 518 * the physbase..physmax range, but have disjoint slices. 519 * In this case hpm_counters is shared by all mnodes. 520 * This flag is set dynamically by the platform. 521 */ 522 int interleaved_mnodes = 0; 523 524 /* 525 * Called by startup(). 526 * Size up the per page size free list counters based on physmax 527 * of each node and max_mem_nodes. 528 * 529 * If interleaved_mnodes is set we need to find the first mnode that 530 * exists. hpm_counters for the first mnode will then be shared by 531 * all other mnodes. If interleaved_mnodes is not set, just set 532 * first=mnode each time. That means there will be no sharing. 533 */ 534 size_t 535 page_ctrs_sz(void) 536 { 537 int r; /* region size */ 538 int mnode; 539 int firstmn; /* first mnode that exists */ 540 int nranges; 541 pfn_t physbase; 542 pfn_t physmax; 543 uint_t ctrs_sz = 0; 544 int i; 545 pgcnt_t colors_per_szc[MMU_PAGE_SIZES]; 546 547 /* 548 * We need to determine how many page colors there are for each 549 * page size in order to allocate memory for any color specific 550 * arrays. 551 */ 552 for (i = 0; i < mmu_page_sizes; i++) { 553 colors_per_szc[i] = PAGE_GET_PAGECOLORS(i); 554 } 555 556 for (firstmn = -1, mnode = 0; mnode < max_mem_nodes; mnode++) { 557 558 pgcnt_t r_pgcnt; 559 pfn_t r_base; 560 pgcnt_t r_align; 561 562 if (mem_node_config[mnode].exists == 0) 563 continue; 564 565 HPM_COUNTERS_LIMITS(mnode, physbase, physmax, firstmn); 566 nranges = MNODE_RANGE_CNT(mnode); 567 mnode_nranges[mnode] = nranges; 568 mnode_maxmrange[mnode] = MNODE_MAX_MRANGE(mnode); 569 570 /* 571 * determine size needed for page counter arrays with 572 * base aligned to large page size. 573 */ 574 for (r = 1; r < mmu_page_sizes; r++) { 575 /* add in space for hpm_color_current */ 576 ctrs_sz += sizeof (size_t) * 577 colors_per_szc[r] * nranges; 578 579 if (firstmn != mnode) 580 continue; 581 582 /* add in space for hpm_counters */ 583 r_align = page_get_pagecnt(r); 584 r_base = physbase; 585 r_base &= ~(r_align - 1); 586 r_pgcnt = howmany(physmax - r_base + 1, r_align); 587 588 /* 589 * Round up to always allocate on pointer sized 590 * boundaries. 591 */ 592 ctrs_sz += P2ROUNDUP((r_pgcnt * sizeof (hpmctr_t)), 593 sizeof (hpmctr_t *)); 594 } 595 } 596 597 for (r = 1; r < mmu_page_sizes; r++) { 598 ctrs_sz += (max_mem_nodes * sizeof (hw_page_map_t)); 599 } 600 601 /* add in space for page_ctrs_cands and pcc_color_free */ 602 ctrs_sz += sizeof (pcc_info_t *) * max_mem_nodes * 603 mmu_page_sizes * NPC_MUTEX; 604 605 for (mnode = 0; mnode < max_mem_nodes; mnode++) { 606 607 if (mem_node_config[mnode].exists == 0) 608 continue; 609 610 nranges = mnode_nranges[mnode]; 611 ctrs_sz += sizeof (pcc_info_t) * nranges * 612 mmu_page_sizes * NPC_MUTEX; 613 for (r = 1; r < mmu_page_sizes; r++) { 614 ctrs_sz += sizeof (pgcnt_t) * nranges * 615 colors_per_szc[r] * NPC_MUTEX; 616 } 617 } 618 619 /* ctr_mutex */ 620 ctrs_sz += (max_mem_nodes * NPC_MUTEX * sizeof (kmutex_t)); 621 622 /* size for page list counts */ 623 PLCNT_SZ(ctrs_sz); 624 625 /* 626 * add some slop for roundups. page_ctrs_alloc will roundup the start 627 * address of the counters to ecache_alignsize boundary for every 628 * memory node. 629 */ 630 return (ctrs_sz + max_mem_nodes * L2CACHE_ALIGN); 631 } 632 633 caddr_t 634 page_ctrs_alloc(caddr_t alloc_base) 635 { 636 int mnode; 637 int mrange, nranges; 638 int r; /* region size */ 639 int i; 640 int firstmn; /* first mnode that exists */ 641 pfn_t physbase; 642 pfn_t physmax; 643 pgcnt_t colors_per_szc[MMU_PAGE_SIZES]; 644 645 /* 646 * We need to determine how many page colors there are for each 647 * page size in order to allocate memory for any color specific 648 * arrays. 649 */ 650 for (i = 0; i < mmu_page_sizes; i++) { 651 colors_per_szc[i] = PAGE_GET_PAGECOLORS(i); 652 } 653 654 for (r = 1; r < mmu_page_sizes; r++) { 655 page_counters[r] = (hw_page_map_t *)alloc_base; 656 alloc_base += (max_mem_nodes * sizeof (hw_page_map_t)); 657 } 658 659 /* page_ctrs_cands and pcc_color_free array */ 660 for (i = 0; i < NPC_MUTEX; i++) { 661 for (r = 1; r < mmu_page_sizes; r++) { 662 663 page_ctrs_cands[i][r] = (pcc_info_t **)alloc_base; 664 alloc_base += sizeof (pcc_info_t *) * max_mem_nodes; 665 666 for (mnode = 0; mnode < max_mem_nodes; mnode++) { 667 pcc_info_t *pi; 668 669 if (mem_node_config[mnode].exists == 0) 670 continue; 671 672 nranges = mnode_nranges[mnode]; 673 674 pi = (pcc_info_t *)alloc_base; 675 alloc_base += sizeof (pcc_info_t) * nranges; 676 page_ctrs_cands[i][r][mnode] = pi; 677 678 for (mrange = 0; mrange < nranges; mrange++) { 679 pi->pcc_color_free = 680 (pgcnt_t *)alloc_base; 681 alloc_base += sizeof (pgcnt_t) * 682 colors_per_szc[r]; 683 pi++; 684 } 685 } 686 } 687 } 688 689 /* ctr_mutex */ 690 for (i = 0; i < NPC_MUTEX; i++) { 691 ctr_mutex[i] = (kmutex_t *)alloc_base; 692 alloc_base += (max_mem_nodes * sizeof (kmutex_t)); 693 } 694 695 /* initialize page list counts */ 696 PLCNT_INIT(alloc_base); 697 698 for (firstmn = -1, mnode = 0; mnode < max_mem_nodes; mnode++) { 699 700 pgcnt_t r_pgcnt; 701 pfn_t r_base; 702 pgcnt_t r_align; 703 int r_shift; 704 int nranges = mnode_nranges[mnode]; 705 706 if (mem_node_config[mnode].exists == 0) 707 continue; 708 709 HPM_COUNTERS_LIMITS(mnode, physbase, physmax, firstmn); 710 711 for (r = 1; r < mmu_page_sizes; r++) { 712 /* 713 * the page_counters base has to be aligned to the 714 * page count of page size code r otherwise the counts 715 * will cross large page boundaries. 716 */ 717 r_align = page_get_pagecnt(r); 718 r_base = physbase; 719 /* base needs to be aligned - lower to aligned value */ 720 r_base &= ~(r_align - 1); 721 r_pgcnt = howmany(physmax - r_base + 1, r_align); 722 r_shift = PAGE_BSZS_SHIFT(r); 723 724 PAGE_COUNTERS_SHIFT(mnode, r) = r_shift; 725 PAGE_COUNTERS_ENTRIES(mnode, r) = r_pgcnt; 726 PAGE_COUNTERS_BASE(mnode, r) = r_base; 727 for (mrange = 0; mrange < nranges; mrange++) { 728 PAGE_COUNTERS_CURRENT_COLOR_ARRAY(mnode, 729 r, mrange) = (size_t *)alloc_base; 730 alloc_base += sizeof (size_t) * 731 colors_per_szc[r]; 732 } 733 for (i = 0; i < colors_per_szc[r]; i++) { 734 uint_t color_mask = colors_per_szc[r] - 1; 735 pfn_t pfnum = r_base; 736 size_t idx; 737 int mrange; 738 MEM_NODE_ITERATOR_DECL(it); 739 740 MEM_NODE_ITERATOR_INIT(pfnum, mnode, r, &it); 741 if (pfnum == (pfn_t)-1) { 742 idx = 0; 743 } else { 744 PAGE_NEXT_PFN_FOR_COLOR(pfnum, r, i, 745 color_mask, color_mask, &it); 746 idx = PNUM_TO_IDX(mnode, r, pfnum); 747 idx = (idx >= r_pgcnt) ? 0 : idx; 748 } 749 for (mrange = 0; mrange < nranges; mrange++) { 750 PAGE_COUNTERS_CURRENT_COLOR(mnode, 751 r, i, mrange) = idx; 752 } 753 } 754 755 /* hpm_counters may be shared by all mnodes */ 756 if (firstmn == mnode) { 757 PAGE_COUNTERS_COUNTERS(mnode, r) = 758 (hpmctr_t *)alloc_base; 759 alloc_base += 760 P2ROUNDUP((sizeof (hpmctr_t) * r_pgcnt), 761 sizeof (hpmctr_t *)); 762 } else { 763 PAGE_COUNTERS_COUNTERS(mnode, r) = 764 PAGE_COUNTERS_COUNTERS(firstmn, r); 765 } 766 767 /* 768 * Verify that PNUM_TO_IDX and IDX_TO_PNUM 769 * satisfy the identity requirement. 770 * We should be able to go from one to the other 771 * and get consistent values. 772 */ 773 ASSERT(PNUM_TO_IDX(mnode, r, 774 (IDX_TO_PNUM(mnode, r, 0))) == 0); 775 ASSERT(IDX_TO_PNUM(mnode, r, 776 (PNUM_TO_IDX(mnode, r, r_base))) == r_base); 777 } 778 /* 779 * Roundup the start address of the page_counters to 780 * cache aligned boundary for every memory node. 781 * page_ctrs_sz() has added some slop for these roundups. 782 */ 783 alloc_base = (caddr_t)P2ROUNDUP((uintptr_t)alloc_base, 784 L2CACHE_ALIGN); 785 } 786 787 /* Initialize other page counter specific data structures. */ 788 for (mnode = 0; mnode < MAX_MEM_NODES; mnode++) { 789 rw_init(&page_ctrs_rwlock[mnode], NULL, RW_DEFAULT, NULL); 790 } 791 792 return (alloc_base); 793 } 794 795 /* 796 * Functions to adjust region counters for each size free list. 797 * Caller is responsible to acquire the ctr_mutex lock if necessary and 798 * thus can be called during startup without locks. 799 */ 800 /* ARGSUSED */ 801 void 802 page_ctr_add_internal(int mnode, int mtype, page_t *pp, int flags) 803 { 804 ssize_t r; /* region size */ 805 ssize_t idx; 806 pfn_t pfnum; 807 int lckidx; 808 809 ASSERT(mnode == PP_2_MEM_NODE(pp)); 810 ASSERT(mtype == PP_2_MTYPE(pp)); 811 812 ASSERT(pp->p_szc < mmu_page_sizes); 813 814 PLCNT_INCR(pp, mnode, mtype, pp->p_szc, flags); 815 816 /* no counter update needed for largest page size */ 817 if (pp->p_szc >= mmu_page_sizes - 1) { 818 return; 819 } 820 821 r = pp->p_szc + 1; 822 pfnum = pp->p_pagenum; 823 lckidx = PP_CTR_LOCK_INDX(pp); 824 825 /* 826 * Increment the count of free pages for the current 827 * region. Continue looping up in region size incrementing 828 * count if the preceeding region is full. 829 */ 830 while (r < mmu_page_sizes) { 831 idx = PNUM_TO_IDX(mnode, r, pfnum); 832 833 ASSERT(idx < PAGE_COUNTERS_ENTRIES(mnode, r)); 834 ASSERT(PAGE_COUNTERS(mnode, r, idx) < FULL_REGION_CNT(r)); 835 836 if (++PAGE_COUNTERS(mnode, r, idx) != FULL_REGION_CNT(r)) { 837 break; 838 } else { 839 int root_mtype = PP_2_MTYPE(PP_GROUPLEADER(pp, r)); 840 pcc_info_t *cand = &page_ctrs_cands[lckidx][r][mnode] 841 [MTYPE_2_MRANGE(mnode, root_mtype)]; 842 843 cand->pcc_pages_free++; 844 cand->pcc_color_free[PP_2_BIN_SZC(pp, r)]++; 845 } 846 r++; 847 } 848 } 849 850 void 851 page_ctr_add(int mnode, int mtype, page_t *pp, int flags) 852 { 853 int lckidx = PP_CTR_LOCK_INDX(pp); 854 kmutex_t *lock = &ctr_mutex[lckidx][mnode]; 855 856 mutex_enter(lock); 857 page_ctr_add_internal(mnode, mtype, pp, flags); 858 mutex_exit(lock); 859 } 860 861 void 862 page_ctr_sub_internal(int mnode, int mtype, page_t *pp, int flags) 863 { 864 int lckidx; 865 ssize_t r; /* region size */ 866 ssize_t idx; 867 pfn_t pfnum; 868 869 ASSERT(mnode == PP_2_MEM_NODE(pp)); 870 ASSERT(mtype == PP_2_MTYPE(pp)); 871 872 ASSERT(pp->p_szc < mmu_page_sizes); 873 874 PLCNT_DECR(pp, mnode, mtype, pp->p_szc, flags); 875 876 /* no counter update needed for largest page size */ 877 if (pp->p_szc >= mmu_page_sizes - 1) { 878 return; 879 } 880 881 r = pp->p_szc + 1; 882 pfnum = pp->p_pagenum; 883 lckidx = PP_CTR_LOCK_INDX(pp); 884 885 /* 886 * Decrement the count of free pages for the current 887 * region. Continue looping up in region size decrementing 888 * count if the preceeding region was full. 889 */ 890 while (r < mmu_page_sizes) { 891 idx = PNUM_TO_IDX(mnode, r, pfnum); 892 893 ASSERT(idx < PAGE_COUNTERS_ENTRIES(mnode, r)); 894 ASSERT(PAGE_COUNTERS(mnode, r, idx) > 0); 895 896 if (--PAGE_COUNTERS(mnode, r, idx) != FULL_REGION_CNT(r) - 1) { 897 break; 898 } else { 899 int root_mtype = PP_2_MTYPE(PP_GROUPLEADER(pp, r)); 900 pcc_info_t *cand = &page_ctrs_cands[lckidx][r][mnode] 901 [MTYPE_2_MRANGE(mnode, root_mtype)]; 902 903 ASSERT(cand->pcc_pages_free != 0); 904 ASSERT(cand->pcc_color_free[PP_2_BIN_SZC(pp, r)] != 0); 905 906 cand->pcc_pages_free--; 907 cand->pcc_color_free[PP_2_BIN_SZC(pp, r)]--; 908 } 909 r++; 910 } 911 } 912 913 void 914 page_ctr_sub(int mnode, int mtype, page_t *pp, int flags) 915 { 916 int lckidx = PP_CTR_LOCK_INDX(pp); 917 kmutex_t *lock = &ctr_mutex[lckidx][mnode]; 918 919 mutex_enter(lock); 920 page_ctr_sub_internal(mnode, mtype, pp, flags); 921 mutex_exit(lock); 922 } 923 924 /* 925 * Adjust page counters following a memory attach, since typically the 926 * size of the array needs to change, and the PFN to counter index 927 * mapping needs to change. 928 * 929 * It is possible this mnode did not exist at startup. In that case 930 * allocate pcc_info_t and pcc_color_free arrays. Also, allow for nranges 931 * to change (a theoretical possibility on x86), which means pcc_color_free 932 * arrays must be extended. 933 */ 934 uint_t 935 page_ctrs_adjust(int mnode) 936 { 937 pgcnt_t npgs; 938 int r; /* region size */ 939 int i; 940 size_t pcsz, old_csz; 941 hpmctr_t *new_ctr, *old_ctr; 942 pfn_t oldbase, newbase; 943 pfn_t physbase, physmax; 944 size_t old_npgs; 945 hpmctr_t *ctr_cache[MMU_PAGE_SIZES]; 946 size_t size_cache[MMU_PAGE_SIZES]; 947 size_t *color_cache[MMU_PAGE_SIZES][MAX_MNODE_MRANGES]; 948 size_t *old_color_array[MAX_MNODE_MRANGES]; 949 pgcnt_t colors_per_szc[MMU_PAGE_SIZES]; 950 pcc_info_t **cands_cache; 951 pcc_info_t *old_pi, *pi; 952 pgcnt_t *pgcntp; 953 int nr, old_nranges, mrange, nranges = MNODE_RANGE_CNT(mnode); 954 int cands_cache_nranges; 955 int old_maxmrange, new_maxmrange; 956 int rc = 0; 957 958 cands_cache = kmem_zalloc(sizeof (pcc_info_t *) * NPC_MUTEX * 959 MMU_PAGE_SIZES, KM_NOSLEEP); 960 if (cands_cache == NULL) 961 return (ENOMEM); 962 963 i = -1; 964 HPM_COUNTERS_LIMITS(mnode, physbase, physmax, i); 965 966 newbase = physbase & ~PC_BASE_ALIGN_MASK; 967 npgs = roundup(physmax, PC_BASE_ALIGN) - newbase; 968 969 /* prepare to free non-null pointers on the way out */ 970 cands_cache_nranges = nranges; 971 bzero(ctr_cache, sizeof (ctr_cache)); 972 bzero(color_cache, sizeof (color_cache)); 973 974 /* 975 * We need to determine how many page colors there are for each 976 * page size in order to allocate memory for any color specific 977 * arrays. 978 */ 979 for (r = 0; r < mmu_page_sizes; r++) { 980 colors_per_szc[r] = PAGE_GET_PAGECOLORS(r); 981 } 982 983 /* 984 * Preallocate all of the new hpm_counters arrays as we can't 985 * hold the page_ctrs_rwlock as a writer and allocate memory. 986 * If we can't allocate all of the arrays, undo our work so far 987 * and return failure. 988 */ 989 for (r = 1; r < mmu_page_sizes; r++) { 990 pcsz = npgs >> PAGE_BSZS_SHIFT(r); 991 size_cache[r] = pcsz; 992 ctr_cache[r] = kmem_zalloc(pcsz * 993 sizeof (hpmctr_t), KM_NOSLEEP); 994 if (ctr_cache[r] == NULL) { 995 rc = ENOMEM; 996 goto cleanup; 997 } 998 } 999 1000 /* 1001 * Preallocate all of the new color current arrays as we can't 1002 * hold the page_ctrs_rwlock as a writer and allocate memory. 1003 * If we can't allocate all of the arrays, undo our work so far 1004 * and return failure. 1005 */ 1006 for (r = 1; r < mmu_page_sizes; r++) { 1007 for (mrange = 0; mrange < nranges; mrange++) { 1008 color_cache[r][mrange] = kmem_zalloc(sizeof (size_t) * 1009 colors_per_szc[r], KM_NOSLEEP); 1010 if (color_cache[r][mrange] == NULL) { 1011 rc = ENOMEM; 1012 goto cleanup; 1013 } 1014 } 1015 } 1016 1017 /* 1018 * Preallocate all of the new pcc_info_t arrays as we can't 1019 * hold the page_ctrs_rwlock as a writer and allocate memory. 1020 * If we can't allocate all of the arrays, undo our work so far 1021 * and return failure. 1022 */ 1023 for (r = 1; r < mmu_page_sizes; r++) { 1024 for (i = 0; i < NPC_MUTEX; i++) { 1025 pi = kmem_zalloc(nranges * sizeof (pcc_info_t), 1026 KM_NOSLEEP); 1027 if (pi == NULL) { 1028 rc = ENOMEM; 1029 goto cleanup; 1030 } 1031 cands_cache[i * MMU_PAGE_SIZES + r] = pi; 1032 1033 for (mrange = 0; mrange < nranges; mrange++, pi++) { 1034 pgcntp = kmem_zalloc(colors_per_szc[r] * 1035 sizeof (pgcnt_t), KM_NOSLEEP); 1036 if (pgcntp == NULL) { 1037 rc = ENOMEM; 1038 goto cleanup; 1039 } 1040 pi->pcc_color_free = pgcntp; 1041 } 1042 } 1043 } 1044 1045 /* 1046 * Grab the write lock to prevent others from walking these arrays 1047 * while we are modifying them. 1048 */ 1049 PAGE_CTRS_WRITE_LOCK(mnode); 1050 1051 old_nranges = mnode_nranges[mnode]; 1052 cands_cache_nranges = old_nranges; 1053 mnode_nranges[mnode] = nranges; 1054 old_maxmrange = mnode_maxmrange[mnode]; 1055 mnode_maxmrange[mnode] = MNODE_MAX_MRANGE(mnode); 1056 new_maxmrange = mnode_maxmrange[mnode]; 1057 1058 for (r = 1; r < mmu_page_sizes; r++) { 1059 PAGE_COUNTERS_SHIFT(mnode, r) = PAGE_BSZS_SHIFT(r); 1060 old_ctr = PAGE_COUNTERS_COUNTERS(mnode, r); 1061 old_csz = PAGE_COUNTERS_ENTRIES(mnode, r); 1062 oldbase = PAGE_COUNTERS_BASE(mnode, r); 1063 old_npgs = old_csz << PAGE_COUNTERS_SHIFT(mnode, r); 1064 for (mrange = 0; mrange < MAX_MNODE_MRANGES; mrange++) { 1065 old_color_array[mrange] = 1066 PAGE_COUNTERS_CURRENT_COLOR_ARRAY(mnode, 1067 r, mrange); 1068 } 1069 1070 pcsz = npgs >> PAGE_COUNTERS_SHIFT(mnode, r); 1071 new_ctr = ctr_cache[r]; 1072 ctr_cache[r] = NULL; 1073 if (old_ctr != NULL && 1074 (oldbase + old_npgs > newbase) && 1075 (newbase + npgs > oldbase)) { 1076 /* 1077 * Map the intersection of the old and new 1078 * counters into the new array. 1079 */ 1080 size_t offset; 1081 if (newbase > oldbase) { 1082 offset = (newbase - oldbase) >> 1083 PAGE_COUNTERS_SHIFT(mnode, r); 1084 bcopy(old_ctr + offset, new_ctr, 1085 MIN(pcsz, (old_csz - offset)) * 1086 sizeof (hpmctr_t)); 1087 } else { 1088 offset = (oldbase - newbase) >> 1089 PAGE_COUNTERS_SHIFT(mnode, r); 1090 bcopy(old_ctr, new_ctr + offset, 1091 MIN(pcsz - offset, old_csz) * 1092 sizeof (hpmctr_t)); 1093 } 1094 } 1095 1096 PAGE_COUNTERS_COUNTERS(mnode, r) = new_ctr; 1097 PAGE_COUNTERS_ENTRIES(mnode, r) = pcsz; 1098 PAGE_COUNTERS_BASE(mnode, r) = newbase; 1099 1100 /* update shared hpm_counters in other mnodes */ 1101 if (interleaved_mnodes) { 1102 for (i = 0; i < max_mem_nodes; i++) { 1103 if (i == mnode) 1104 continue; 1105 if (mem_node_config[i].exists == 0) 1106 continue; 1107 ASSERT(PAGE_COUNTERS_COUNTERS(i, r) == old_ctr); 1108 PAGE_COUNTERS_COUNTERS(i, r) = new_ctr; 1109 PAGE_COUNTERS_ENTRIES(i, r) = pcsz; 1110 PAGE_COUNTERS_BASE(i, r) = newbase; 1111 } 1112 } 1113 1114 for (mrange = 0; mrange < MAX_MNODE_MRANGES; mrange++) { 1115 PAGE_COUNTERS_CURRENT_COLOR_ARRAY(mnode, r, mrange) = 1116 color_cache[r][mrange]; 1117 color_cache[r][mrange] = NULL; 1118 } 1119 /* 1120 * for now, just reset on these events as it's probably 1121 * not worthwhile to try and optimize this. 1122 */ 1123 for (i = 0; i < colors_per_szc[r]; i++) { 1124 uint_t color_mask = colors_per_szc[r] - 1; 1125 int mlo = interleaved_mnodes ? 0 : mnode; 1126 int mhi = interleaved_mnodes ? max_mem_nodes : 1127 (mnode + 1); 1128 int m; 1129 pfn_t pfnum = newbase; 1130 size_t idx; 1131 MEM_NODE_ITERATOR_DECL(it); 1132 1133 for (m = mlo; m < mhi; m++) { 1134 if (mem_node_config[m].exists == 0) 1135 continue; 1136 MEM_NODE_ITERATOR_INIT(pfnum, m, r, &it); 1137 if (pfnum == (pfn_t)-1) { 1138 idx = 0; 1139 } else { 1140 PAGE_NEXT_PFN_FOR_COLOR(pfnum, r, i, 1141 color_mask, color_mask, &it); 1142 idx = PNUM_TO_IDX(m, r, pfnum); 1143 idx = (idx < pcsz) ? idx : 0; 1144 } 1145 for (mrange = 0; mrange < nranges; mrange++) { 1146 PAGE_COUNTERS_CURRENT_COLOR(m, 1147 r, i, mrange) = idx; 1148 } 1149 } 1150 } 1151 1152 /* cache info for freeing out of the critical path */ 1153 if ((caddr_t)old_ctr >= kernelheap && 1154 (caddr_t)old_ctr < ekernelheap) { 1155 ctr_cache[r] = old_ctr; 1156 size_cache[r] = old_csz; 1157 } 1158 for (mrange = 0; mrange < MAX_MNODE_MRANGES; mrange++) { 1159 size_t *tmp = old_color_array[mrange]; 1160 if ((caddr_t)tmp >= kernelheap && 1161 (caddr_t)tmp < ekernelheap) { 1162 color_cache[r][mrange] = tmp; 1163 } 1164 } 1165 /* 1166 * Verify that PNUM_TO_IDX and IDX_TO_PNUM 1167 * satisfy the identity requirement. 1168 * We should be able to go from one to the other 1169 * and get consistent values. 1170 */ 1171 ASSERT(PNUM_TO_IDX(mnode, r, 1172 (IDX_TO_PNUM(mnode, r, 0))) == 0); 1173 ASSERT(IDX_TO_PNUM(mnode, r, 1174 (PNUM_TO_IDX(mnode, r, newbase))) == newbase); 1175 1176 /* pcc_info_t and pcc_color_free */ 1177 for (i = 0; i < NPC_MUTEX; i++) { 1178 pcc_info_t *epi; 1179 pcc_info_t *eold_pi; 1180 1181 pi = cands_cache[i * MMU_PAGE_SIZES + r]; 1182 old_pi = page_ctrs_cands[i][r][mnode]; 1183 page_ctrs_cands[i][r][mnode] = pi; 1184 cands_cache[i * MMU_PAGE_SIZES + r] = old_pi; 1185 1186 /* preserve old pcc_color_free values, if any */ 1187 if (old_pi == NULL) 1188 continue; 1189 1190 /* 1191 * when/if x86 does DR, must account for 1192 * possible change in range index when 1193 * preserving pcc_info 1194 */ 1195 epi = &pi[nranges]; 1196 eold_pi = &old_pi[old_nranges]; 1197 if (new_maxmrange > old_maxmrange) { 1198 pi += new_maxmrange - old_maxmrange; 1199 } else if (new_maxmrange < old_maxmrange) { 1200 old_pi += old_maxmrange - new_maxmrange; 1201 } 1202 for (; pi < epi && old_pi < eold_pi; pi++, old_pi++) { 1203 pcc_info_t tmp = *pi; 1204 *pi = *old_pi; 1205 *old_pi = tmp; 1206 } 1207 } 1208 } 1209 PAGE_CTRS_WRITE_UNLOCK(mnode); 1210 1211 /* 1212 * Now that we have dropped the write lock, it is safe to free all 1213 * of the memory we have cached above. 1214 * We come thru here to free memory when pre-alloc fails, and also to 1215 * free old pointers which were recorded while locked. 1216 */ 1217 cleanup: 1218 for (r = 1; r < mmu_page_sizes; r++) { 1219 if (ctr_cache[r] != NULL) { 1220 kmem_free(ctr_cache[r], 1221 size_cache[r] * sizeof (hpmctr_t)); 1222 } 1223 for (mrange = 0; mrange < MAX_MNODE_MRANGES; mrange++) { 1224 if (color_cache[r][mrange] != NULL) { 1225 kmem_free(color_cache[r][mrange], 1226 colors_per_szc[r] * sizeof (size_t)); 1227 } 1228 } 1229 for (i = 0; i < NPC_MUTEX; i++) { 1230 pi = cands_cache[i * MMU_PAGE_SIZES + r]; 1231 if (pi == NULL) 1232 continue; 1233 nr = cands_cache_nranges; 1234 for (mrange = 0; mrange < nr; mrange++, pi++) { 1235 pgcntp = pi->pcc_color_free; 1236 if (pgcntp == NULL) 1237 continue; 1238 if ((caddr_t)pgcntp >= kernelheap && 1239 (caddr_t)pgcntp < ekernelheap) { 1240 kmem_free(pgcntp, 1241 colors_per_szc[r] * 1242 sizeof (pgcnt_t)); 1243 } 1244 } 1245 pi = cands_cache[i * MMU_PAGE_SIZES + r]; 1246 if ((caddr_t)pi >= kernelheap && 1247 (caddr_t)pi < ekernelheap) { 1248 kmem_free(pi, nr * sizeof (pcc_info_t)); 1249 } 1250 } 1251 } 1252 1253 kmem_free(cands_cache, 1254 sizeof (pcc_info_t *) * NPC_MUTEX * MMU_PAGE_SIZES); 1255 return (rc); 1256 } 1257 1258 1259 #ifdef DEBUG 1260 1261 /* 1262 * confirm pp is a large page corresponding to szc 1263 */ 1264 void 1265 chk_lpg(page_t *pp, uchar_t szc) 1266 { 1267 spgcnt_t npgs = page_get_pagecnt(pp->p_szc); 1268 uint_t noreloc; 1269 1270 if (npgs == 1) { 1271 ASSERT(pp->p_szc == 0); 1272 ASSERT(pp->p_next == pp); 1273 ASSERT(pp->p_prev == pp); 1274 return; 1275 } 1276 1277 ASSERT(pp->p_vpnext == pp || pp->p_vpnext == NULL); 1278 ASSERT(pp->p_vpprev == pp || pp->p_vpprev == NULL); 1279 1280 ASSERT(IS_P2ALIGNED(pp->p_pagenum, npgs)); 1281 ASSERT(pp->p_pagenum == (pp->p_next->p_pagenum - 1)); 1282 ASSERT(pp->p_prev->p_pagenum == (pp->p_pagenum + (npgs - 1))); 1283 ASSERT(pp->p_prev == (pp + (npgs - 1))); 1284 1285 /* 1286 * Check list of pages. 1287 */ 1288 noreloc = PP_ISNORELOC(pp); 1289 while (npgs--) { 1290 if (npgs != 0) { 1291 ASSERT(pp->p_pagenum == pp->p_next->p_pagenum - 1); 1292 ASSERT(pp->p_next == (pp + 1)); 1293 } 1294 ASSERT(pp->p_szc == szc); 1295 ASSERT(PP_ISFREE(pp)); 1296 ASSERT(PP_ISAGED(pp)); 1297 ASSERT(pp->p_vpnext == pp || pp->p_vpnext == NULL); 1298 ASSERT(pp->p_vpprev == pp || pp->p_vpprev == NULL); 1299 ASSERT(pp->p_vnode == NULL); 1300 ASSERT(PP_ISNORELOC(pp) == noreloc); 1301 1302 pp = pp->p_next; 1303 } 1304 } 1305 #endif /* DEBUG */ 1306 1307 void 1308 page_freelist_lock(int mnode) 1309 { 1310 int i; 1311 for (i = 0; i < NPC_MUTEX; i++) { 1312 mutex_enter(FPC_MUTEX(mnode, i)); 1313 mutex_enter(CPC_MUTEX(mnode, i)); 1314 } 1315 } 1316 1317 void 1318 page_freelist_unlock(int mnode) 1319 { 1320 int i; 1321 for (i = 0; i < NPC_MUTEX; i++) { 1322 mutex_exit(FPC_MUTEX(mnode, i)); 1323 mutex_exit(CPC_MUTEX(mnode, i)); 1324 } 1325 } 1326 1327 /* 1328 * add pp to the specified page list. Defaults to head of the page list 1329 * unless PG_LIST_TAIL is specified. 1330 */ 1331 void 1332 page_list_add(page_t *pp, int flags) 1333 { 1334 page_t **ppp; 1335 kmutex_t *pcm; 1336 uint_t bin, mtype; 1337 int mnode; 1338 1339 ASSERT(PAGE_EXCL(pp) || (flags & PG_LIST_ISINIT)); 1340 ASSERT(PP_ISFREE(pp)); 1341 ASSERT(!hat_page_is_mapped(pp)); 1342 ASSERT(hat_page_getshare(pp) == 0); 1343 1344 /* 1345 * Large pages should be freed via page_list_add_pages(). 1346 */ 1347 ASSERT(pp->p_szc == 0); 1348 1349 /* 1350 * Don't need to lock the freelist first here 1351 * because the page isn't on the freelist yet. 1352 * This means p_szc can't change on us. 1353 */ 1354 1355 bin = PP_2_BIN(pp); 1356 mnode = PP_2_MEM_NODE(pp); 1357 mtype = PP_2_MTYPE(pp); 1358 1359 if (flags & PG_LIST_ISINIT) { 1360 /* 1361 * PG_LIST_ISINIT is set during system startup (ie. single 1362 * threaded), add a page to the free list and add to the 1363 * the free region counters w/o any locking 1364 */ 1365 ppp = &PAGE_FREELISTS(mnode, 0, bin, mtype); 1366 1367 /* inline version of page_add() */ 1368 if (*ppp != NULL) { 1369 pp->p_next = *ppp; 1370 pp->p_prev = (*ppp)->p_prev; 1371 (*ppp)->p_prev = pp; 1372 pp->p_prev->p_next = pp; 1373 } else 1374 *ppp = pp; 1375 1376 page_ctr_add_internal(mnode, mtype, pp, flags); 1377 VM_STAT_ADD(vmm_vmstats.pladd_free[0]); 1378 } else { 1379 pcm = PC_BIN_MUTEX(mnode, bin, flags); 1380 1381 if (flags & PG_FREE_LIST) { 1382 VM_STAT_ADD(vmm_vmstats.pladd_free[0]); 1383 ASSERT(PP_ISAGED(pp)); 1384 ppp = &PAGE_FREELISTS(mnode, 0, bin, mtype); 1385 1386 } else { 1387 VM_STAT_ADD(vmm_vmstats.pladd_cache); 1388 ASSERT(pp->p_vnode); 1389 ASSERT((pp->p_offset & PAGEOFFSET) == 0); 1390 ppp = &PAGE_CACHELISTS(mnode, bin, mtype); 1391 } 1392 mutex_enter(pcm); 1393 page_add(ppp, pp); 1394 1395 if (flags & PG_LIST_TAIL) 1396 *ppp = (*ppp)->p_next; 1397 /* 1398 * Add counters before releasing pcm mutex to avoid a race with 1399 * page_freelist_coalesce and page_freelist_split. 1400 */ 1401 page_ctr_add(mnode, mtype, pp, flags); 1402 mutex_exit(pcm); 1403 } 1404 1405 1406 #if defined(__sparc) 1407 if (PP_ISNORELOC(pp)) { 1408 kcage_freemem_add(1); 1409 } 1410 #endif 1411 /* 1412 * It is up to the caller to unlock the page! 1413 */ 1414 ASSERT(PAGE_EXCL(pp) || (flags & PG_LIST_ISINIT)); 1415 } 1416 1417 1418 #ifdef __sparc 1419 /* 1420 * This routine is only used by kcage_init during system startup. 1421 * It performs the function of page_list_sub/PP_SETNORELOC/page_list_add 1422 * without the overhead of taking locks and updating counters. 1423 */ 1424 void 1425 page_list_noreloc_startup(page_t *pp) 1426 { 1427 page_t **ppp; 1428 uint_t bin; 1429 int mnode; 1430 int mtype; 1431 int flags = 0; 1432 1433 /* 1434 * If this is a large page on the freelist then 1435 * break it up into smaller pages. 1436 */ 1437 if (pp->p_szc != 0) 1438 page_boot_demote(pp); 1439 1440 /* 1441 * Get list page is currently on. 1442 */ 1443 bin = PP_2_BIN(pp); 1444 mnode = PP_2_MEM_NODE(pp); 1445 mtype = PP_2_MTYPE(pp); 1446 ASSERT(mtype == MTYPE_RELOC); 1447 ASSERT(pp->p_szc == 0); 1448 1449 if (PP_ISAGED(pp)) { 1450 ppp = &PAGE_FREELISTS(mnode, 0, bin, mtype); 1451 flags |= PG_FREE_LIST; 1452 } else { 1453 ppp = &PAGE_CACHELISTS(mnode, bin, mtype); 1454 flags |= PG_CACHE_LIST; 1455 } 1456 1457 ASSERT(*ppp != NULL); 1458 1459 /* 1460 * Delete page from current list. 1461 */ 1462 if (*ppp == pp) 1463 *ppp = pp->p_next; /* go to next page */ 1464 if (*ppp == pp) { 1465 *ppp = NULL; /* page list is gone */ 1466 } else { 1467 pp->p_prev->p_next = pp->p_next; 1468 pp->p_next->p_prev = pp->p_prev; 1469 } 1470 1471 /* 1472 * Decrement page counters 1473 */ 1474 page_ctr_sub_internal(mnode, mtype, pp, flags); 1475 1476 /* 1477 * Set no reloc for cage initted pages. 1478 */ 1479 PP_SETNORELOC(pp); 1480 1481 mtype = PP_2_MTYPE(pp); 1482 ASSERT(mtype == MTYPE_NORELOC); 1483 1484 /* 1485 * Get new list for page. 1486 */ 1487 if (PP_ISAGED(pp)) { 1488 ppp = &PAGE_FREELISTS(mnode, 0, bin, mtype); 1489 } else { 1490 ppp = &PAGE_CACHELISTS(mnode, bin, mtype); 1491 } 1492 1493 /* 1494 * Insert page on new list. 1495 */ 1496 if (*ppp == NULL) { 1497 *ppp = pp; 1498 pp->p_next = pp->p_prev = pp; 1499 } else { 1500 pp->p_next = *ppp; 1501 pp->p_prev = (*ppp)->p_prev; 1502 (*ppp)->p_prev = pp; 1503 pp->p_prev->p_next = pp; 1504 } 1505 1506 /* 1507 * Increment page counters 1508 */ 1509 page_ctr_add_internal(mnode, mtype, pp, flags); 1510 1511 /* 1512 * Update cage freemem counter 1513 */ 1514 atomic_add_long(&kcage_freemem, 1); 1515 } 1516 #else /* __sparc */ 1517 1518 /* ARGSUSED */ 1519 void 1520 page_list_noreloc_startup(page_t *pp) 1521 { 1522 panic("page_list_noreloc_startup: should be here only for sparc"); 1523 } 1524 #endif 1525 1526 void 1527 page_list_add_pages(page_t *pp, int flags) 1528 { 1529 kmutex_t *pcm; 1530 pgcnt_t pgcnt; 1531 uint_t bin, mtype, i; 1532 int mnode; 1533 1534 /* default to freelist/head */ 1535 ASSERT((flags & (PG_CACHE_LIST | PG_LIST_TAIL)) == 0); 1536 1537 CHK_LPG(pp, pp->p_szc); 1538 VM_STAT_ADD(vmm_vmstats.pladd_free[pp->p_szc]); 1539 1540 bin = PP_2_BIN(pp); 1541 mnode = PP_2_MEM_NODE(pp); 1542 mtype = PP_2_MTYPE(pp); 1543 1544 if (flags & PG_LIST_ISINIT) { 1545 ASSERT(pp->p_szc == mmu_page_sizes - 1); 1546 page_vpadd(&PAGE_FREELISTS(mnode, pp->p_szc, bin, mtype), pp); 1547 ASSERT(!PP_ISNORELOC(pp)); 1548 PLCNT_INCR(pp, mnode, mtype, pp->p_szc, flags); 1549 } else { 1550 1551 ASSERT(pp->p_szc != 0 && pp->p_szc < mmu_page_sizes); 1552 1553 pcm = PC_BIN_MUTEX(mnode, bin, PG_FREE_LIST); 1554 1555 mutex_enter(pcm); 1556 page_vpadd(&PAGE_FREELISTS(mnode, pp->p_szc, bin, mtype), pp); 1557 page_ctr_add(mnode, mtype, pp, PG_FREE_LIST); 1558 mutex_exit(pcm); 1559 1560 pgcnt = page_get_pagecnt(pp->p_szc); 1561 #if defined(__sparc) 1562 if (PP_ISNORELOC(pp)) 1563 kcage_freemem_add(pgcnt); 1564 #endif 1565 for (i = 0; i < pgcnt; i++, pp++) 1566 page_unlock_nocapture(pp); 1567 } 1568 } 1569 1570 /* 1571 * During boot, need to demote a large page to base 1572 * pagesize pages for seg_kmem for use in boot_alloc() 1573 */ 1574 void 1575 page_boot_demote(page_t *pp) 1576 { 1577 ASSERT(pp->p_szc != 0); 1578 ASSERT(PP_ISFREE(pp)); 1579 ASSERT(PP_ISAGED(pp)); 1580 1581 (void) page_demote(PP_2_MEM_NODE(pp), 1582 PFN_BASE(pp->p_pagenum, pp->p_szc), 0, pp->p_szc, 0, PC_NO_COLOR, 1583 PC_FREE); 1584 1585 ASSERT(PP_ISFREE(pp)); 1586 ASSERT(PP_ISAGED(pp)); 1587 ASSERT(pp->p_szc == 0); 1588 } 1589 1590 /* 1591 * Take a particular page off of whatever freelist the page 1592 * is claimed to be on. 1593 * 1594 * NOTE: Only used for PAGESIZE pages. 1595 */ 1596 void 1597 page_list_sub(page_t *pp, int flags) 1598 { 1599 int bin; 1600 uint_t mtype; 1601 int mnode; 1602 kmutex_t *pcm; 1603 page_t **ppp; 1604 1605 ASSERT(PAGE_EXCL(pp)); 1606 ASSERT(PP_ISFREE(pp)); 1607 1608 /* 1609 * The p_szc field can only be changed by page_promote() 1610 * and page_demote(). Only free pages can be promoted and 1611 * demoted and the free list MUST be locked during these 1612 * operations. So to prevent a race in page_list_sub() 1613 * between computing which bin of the freelist lock to 1614 * grab and actually grabing the lock we check again that 1615 * the bin we locked is still the correct one. Notice that 1616 * the p_szc field could have actually changed on us but 1617 * if the bin happens to still be the same we are safe. 1618 */ 1619 try_again: 1620 bin = PP_2_BIN(pp); 1621 mnode = PP_2_MEM_NODE(pp); 1622 pcm = PC_BIN_MUTEX(mnode, bin, flags); 1623 mutex_enter(pcm); 1624 if (PP_2_BIN(pp) != bin) { 1625 mutex_exit(pcm); 1626 goto try_again; 1627 } 1628 mtype = PP_2_MTYPE(pp); 1629 1630 if (flags & PG_FREE_LIST) { 1631 VM_STAT_ADD(vmm_vmstats.plsub_free[0]); 1632 ASSERT(PP_ISAGED(pp)); 1633 ppp = &PAGE_FREELISTS(mnode, pp->p_szc, bin, mtype); 1634 } else { 1635 VM_STAT_ADD(vmm_vmstats.plsub_cache); 1636 ASSERT(!PP_ISAGED(pp)); 1637 ppp = &PAGE_CACHELISTS(mnode, bin, mtype); 1638 } 1639 1640 /* 1641 * Common PAGESIZE case. 1642 * 1643 * Note that we locked the freelist. This prevents 1644 * any page promotion/demotion operations. Therefore 1645 * the p_szc will not change until we drop pcm mutex. 1646 */ 1647 if (pp->p_szc == 0) { 1648 page_sub(ppp, pp); 1649 /* 1650 * Subtract counters before releasing pcm mutex 1651 * to avoid race with page_freelist_coalesce. 1652 */ 1653 page_ctr_sub(mnode, mtype, pp, flags); 1654 mutex_exit(pcm); 1655 1656 #if defined(__sparc) 1657 if (PP_ISNORELOC(pp)) { 1658 kcage_freemem_sub(1); 1659 } 1660 #endif 1661 return; 1662 } 1663 1664 /* 1665 * Large pages on the cache list are not supported. 1666 */ 1667 if (flags & PG_CACHE_LIST) 1668 panic("page_list_sub: large page on cachelist"); 1669 1670 /* 1671 * Slow but rare. 1672 * 1673 * Somebody wants this particular page which is part 1674 * of a large page. In this case we just demote the page 1675 * if it's on the freelist. 1676 * 1677 * We have to drop pcm before locking the entire freelist. 1678 * Once we have re-locked the freelist check to make sure 1679 * the page hasn't already been demoted or completely 1680 * freed. 1681 */ 1682 mutex_exit(pcm); 1683 page_freelist_lock(mnode); 1684 if (pp->p_szc != 0) { 1685 /* 1686 * Large page is on freelist. 1687 */ 1688 (void) page_demote(mnode, PFN_BASE(pp->p_pagenum, pp->p_szc), 1689 0, pp->p_szc, 0, PC_NO_COLOR, PC_FREE); 1690 } 1691 ASSERT(PP_ISFREE(pp)); 1692 ASSERT(PP_ISAGED(pp)); 1693 ASSERT(pp->p_szc == 0); 1694 1695 /* 1696 * Subtract counters before releasing pcm mutex 1697 * to avoid race with page_freelist_coalesce. 1698 */ 1699 bin = PP_2_BIN(pp); 1700 mtype = PP_2_MTYPE(pp); 1701 ppp = &PAGE_FREELISTS(mnode, pp->p_szc, bin, mtype); 1702 1703 page_sub(ppp, pp); 1704 page_ctr_sub(mnode, mtype, pp, flags); 1705 page_freelist_unlock(mnode); 1706 1707 #if defined(__sparc) 1708 if (PP_ISNORELOC(pp)) { 1709 kcage_freemem_sub(1); 1710 } 1711 #endif 1712 } 1713 1714 void 1715 page_list_sub_pages(page_t *pp, uint_t szc) 1716 { 1717 kmutex_t *pcm; 1718 uint_t bin, mtype; 1719 int mnode; 1720 1721 ASSERT(PAGE_EXCL(pp)); 1722 ASSERT(PP_ISFREE(pp)); 1723 ASSERT(PP_ISAGED(pp)); 1724 1725 /* 1726 * See comment in page_list_sub(). 1727 */ 1728 try_again: 1729 bin = PP_2_BIN(pp); 1730 mnode = PP_2_MEM_NODE(pp); 1731 pcm = PC_BIN_MUTEX(mnode, bin, PG_FREE_LIST); 1732 mutex_enter(pcm); 1733 if (PP_2_BIN(pp) != bin) { 1734 mutex_exit(pcm); 1735 goto try_again; 1736 } 1737 1738 /* 1739 * If we're called with a page larger than szc or it got 1740 * promoted above szc before we locked the freelist then 1741 * drop pcm and re-lock entire freelist. If page still larger 1742 * than szc then demote it. 1743 */ 1744 if (pp->p_szc > szc) { 1745 mutex_exit(pcm); 1746 pcm = NULL; 1747 page_freelist_lock(mnode); 1748 if (pp->p_szc > szc) { 1749 VM_STAT_ADD(vmm_vmstats.plsubpages_szcbig); 1750 (void) page_demote(mnode, 1751 PFN_BASE(pp->p_pagenum, pp->p_szc), 0, 1752 pp->p_szc, szc, PC_NO_COLOR, PC_FREE); 1753 } 1754 bin = PP_2_BIN(pp); 1755 } 1756 ASSERT(PP_ISFREE(pp)); 1757 ASSERT(PP_ISAGED(pp)); 1758 ASSERT(pp->p_szc <= szc); 1759 ASSERT(pp == PP_PAGEROOT(pp)); 1760 1761 VM_STAT_ADD(vmm_vmstats.plsub_free[pp->p_szc]); 1762 1763 mtype = PP_2_MTYPE(pp); 1764 if (pp->p_szc != 0) { 1765 page_vpsub(&PAGE_FREELISTS(mnode, pp->p_szc, bin, mtype), pp); 1766 CHK_LPG(pp, pp->p_szc); 1767 } else { 1768 VM_STAT_ADD(vmm_vmstats.plsubpages_szc0); 1769 page_sub(&PAGE_FREELISTS(mnode, pp->p_szc, bin, mtype), pp); 1770 } 1771 page_ctr_sub(mnode, mtype, pp, PG_FREE_LIST); 1772 1773 if (pcm != NULL) { 1774 mutex_exit(pcm); 1775 } else { 1776 page_freelist_unlock(mnode); 1777 } 1778 1779 #if defined(__sparc) 1780 if (PP_ISNORELOC(pp)) { 1781 pgcnt_t pgcnt; 1782 1783 pgcnt = page_get_pagecnt(pp->p_szc); 1784 kcage_freemem_sub(pgcnt); 1785 } 1786 #endif 1787 } 1788 1789 /* 1790 * Add the page to the front of a linked list of pages 1791 * using the p_next & p_prev pointers for the list. 1792 * The caller is responsible for protecting the list pointers. 1793 */ 1794 void 1795 mach_page_add(page_t **ppp, page_t *pp) 1796 { 1797 if (*ppp == NULL) { 1798 pp->p_next = pp->p_prev = pp; 1799 } else { 1800 pp->p_next = *ppp; 1801 pp->p_prev = (*ppp)->p_prev; 1802 (*ppp)->p_prev = pp; 1803 pp->p_prev->p_next = pp; 1804 } 1805 *ppp = pp; 1806 } 1807 1808 /* 1809 * Remove this page from a linked list of pages 1810 * using the p_next & p_prev pointers for the list. 1811 * 1812 * The caller is responsible for protecting the list pointers. 1813 */ 1814 void 1815 mach_page_sub(page_t **ppp, page_t *pp) 1816 { 1817 ASSERT(PP_ISFREE(pp)); 1818 1819 if (*ppp == NULL || pp == NULL) 1820 panic("mach_page_sub"); 1821 1822 if (*ppp == pp) 1823 *ppp = pp->p_next; /* go to next page */ 1824 1825 if (*ppp == pp) 1826 *ppp = NULL; /* page list is gone */ 1827 else { 1828 pp->p_prev->p_next = pp->p_next; 1829 pp->p_next->p_prev = pp->p_prev; 1830 } 1831 pp->p_prev = pp->p_next = pp; /* make pp a list of one */ 1832 } 1833 1834 /* 1835 * Routine fsflush uses to gradually coalesce the free list into larger pages. 1836 */ 1837 void 1838 page_promote_size(page_t *pp, uint_t cur_szc) 1839 { 1840 pfn_t pfn; 1841 int mnode; 1842 int idx; 1843 int new_szc = cur_szc + 1; 1844 int full = FULL_REGION_CNT(new_szc); 1845 1846 pfn = page_pptonum(pp); 1847 mnode = PFN_2_MEM_NODE(pfn); 1848 1849 page_freelist_lock(mnode); 1850 1851 idx = PNUM_TO_IDX(mnode, new_szc, pfn); 1852 if (PAGE_COUNTERS(mnode, new_szc, idx) == full) 1853 (void) page_promote(mnode, pfn, new_szc, PC_FREE, PC_MTYPE_ANY); 1854 1855 page_freelist_unlock(mnode); 1856 } 1857 1858 static uint_t page_promote_err; 1859 static uint_t page_promote_noreloc_err; 1860 1861 /* 1862 * Create a single larger page (of szc new_szc) from smaller contiguous pages 1863 * for the given mnode starting at pfnum. Pages involved are on the freelist 1864 * before the call and may be returned to the caller if requested, otherwise 1865 * they will be placed back on the freelist. 1866 * If flags is PC_ALLOC, then the large page will be returned to the user in 1867 * a state which is consistent with a page being taken off the freelist. If 1868 * we failed to lock the new large page, then we will return NULL to the 1869 * caller and put the large page on the freelist instead. 1870 * If flags is PC_FREE, then the large page will be placed on the freelist, 1871 * and NULL will be returned. 1872 * The caller is responsible for locking the freelist as well as any other 1873 * accounting which needs to be done for a returned page. 1874 * 1875 * RFE: For performance pass in pp instead of pfnum so 1876 * we can avoid excessive calls to page_numtopp_nolock(). 1877 * This would depend on an assumption that all contiguous 1878 * pages are in the same memseg so we can just add/dec 1879 * our pp. 1880 * 1881 * Lock ordering: 1882 * 1883 * There is a potential but rare deadlock situation 1884 * for page promotion and demotion operations. The problem 1885 * is there are two paths into the freelist manager and 1886 * they have different lock orders: 1887 * 1888 * page_create() 1889 * lock freelist 1890 * page_lock(EXCL) 1891 * unlock freelist 1892 * return 1893 * caller drops page_lock 1894 * 1895 * page_free() and page_reclaim() 1896 * caller grabs page_lock(EXCL) 1897 * 1898 * lock freelist 1899 * unlock freelist 1900 * drop page_lock 1901 * 1902 * What prevents a thread in page_create() from deadlocking 1903 * with a thread freeing or reclaiming the same page is the 1904 * page_trylock() in page_get_freelist(). If the trylock fails 1905 * it skips the page. 1906 * 1907 * The lock ordering for promotion and demotion is the same as 1908 * for page_create(). Since the same deadlock could occur during 1909 * page promotion and freeing or reclaiming of a page on the 1910 * cache list we might have to fail the operation and undo what 1911 * have done so far. Again this is rare. 1912 */ 1913 page_t * 1914 page_promote(int mnode, pfn_t pfnum, uchar_t new_szc, int flags, int mtype) 1915 { 1916 page_t *pp, *pplist, *tpp, *start_pp; 1917 pgcnt_t new_npgs, npgs; 1918 uint_t bin; 1919 pgcnt_t tmpnpgs, pages_left; 1920 uint_t noreloc; 1921 int which_list; 1922 ulong_t index; 1923 kmutex_t *phm; 1924 1925 /* 1926 * General algorithm: 1927 * Find the starting page 1928 * Walk each page struct removing it from the freelist, 1929 * and linking it to all the other pages removed. 1930 * Once all pages are off the freelist, 1931 * walk the list, modifying p_szc to new_szc and what 1932 * ever other info needs to be done to create a large free page. 1933 * According to the flags, either return the page or put it 1934 * on the freelist. 1935 */ 1936 1937 start_pp = page_numtopp_nolock(pfnum); 1938 ASSERT(start_pp && (start_pp->p_pagenum == pfnum)); 1939 new_npgs = page_get_pagecnt(new_szc); 1940 ASSERT(IS_P2ALIGNED(pfnum, new_npgs)); 1941 1942 /* don't return page of the wrong mtype */ 1943 if (mtype != PC_MTYPE_ANY && mtype != PP_2_MTYPE(start_pp)) 1944 return (NULL); 1945 1946 /* 1947 * Loop through smaller pages to confirm that all pages 1948 * give the same result for PP_ISNORELOC(). 1949 * We can check this reliably here as the protocol for setting 1950 * P_NORELOC requires pages to be taken off the free list first. 1951 */ 1952 noreloc = PP_ISNORELOC(start_pp); 1953 for (pp = start_pp + new_npgs; --pp > start_pp; ) { 1954 if (noreloc != PP_ISNORELOC(pp)) { 1955 page_promote_noreloc_err++; 1956 page_promote_err++; 1957 return (NULL); 1958 } 1959 } 1960 1961 pages_left = new_npgs; 1962 pplist = NULL; 1963 pp = start_pp; 1964 1965 /* Loop around coalescing the smaller pages into a big page. */ 1966 while (pages_left) { 1967 /* 1968 * Remove from the freelist. 1969 */ 1970 ASSERT(PP_ISFREE(pp)); 1971 bin = PP_2_BIN(pp); 1972 ASSERT(mnode == PP_2_MEM_NODE(pp)); 1973 mtype = PP_2_MTYPE(pp); 1974 if (PP_ISAGED(pp)) { 1975 1976 /* 1977 * PG_FREE_LIST 1978 */ 1979 if (pp->p_szc) { 1980 page_vpsub(&PAGE_FREELISTS(mnode, 1981 pp->p_szc, bin, mtype), pp); 1982 } else { 1983 mach_page_sub(&PAGE_FREELISTS(mnode, 0, 1984 bin, mtype), pp); 1985 } 1986 which_list = PG_FREE_LIST; 1987 } else { 1988 ASSERT(pp->p_szc == 0); 1989 1990 /* 1991 * PG_CACHE_LIST 1992 * 1993 * Since this page comes from the 1994 * cachelist, we must destroy the 1995 * vnode association. 1996 */ 1997 if (!page_trylock(pp, SE_EXCL)) { 1998 goto fail_promote; 1999 } 2000 2001 /* 2002 * We need to be careful not to deadlock 2003 * with another thread in page_lookup(). 2004 * The page_lookup() thread could be holding 2005 * the same phm that we need if the two 2006 * pages happen to hash to the same phm lock. 2007 * At this point we have locked the entire 2008 * freelist and page_lookup() could be trying 2009 * to grab a freelist lock. 2010 */ 2011 index = PAGE_HASH_FUNC(pp->p_vnode, pp->p_offset); 2012 phm = PAGE_HASH_MUTEX(index); 2013 if (!mutex_tryenter(phm)) { 2014 page_unlock_nocapture(pp); 2015 goto fail_promote; 2016 } 2017 2018 mach_page_sub(&PAGE_CACHELISTS(mnode, bin, mtype), pp); 2019 page_hashout(pp, phm); 2020 mutex_exit(phm); 2021 PP_SETAGED(pp); 2022 page_unlock_nocapture(pp); 2023 which_list = PG_CACHE_LIST; 2024 } 2025 page_ctr_sub(mnode, mtype, pp, which_list); 2026 2027 /* 2028 * Concatenate the smaller page(s) onto 2029 * the large page list. 2030 */ 2031 tmpnpgs = npgs = page_get_pagecnt(pp->p_szc); 2032 pages_left -= npgs; 2033 tpp = pp; 2034 while (npgs--) { 2035 tpp->p_szc = new_szc; 2036 tpp = tpp->p_next; 2037 } 2038 page_list_concat(&pplist, &pp); 2039 pp += tmpnpgs; 2040 } 2041 CHK_LPG(pplist, new_szc); 2042 2043 /* 2044 * return the page to the user if requested 2045 * in the properly locked state. 2046 */ 2047 if (flags == PC_ALLOC && (page_trylock_cons(pplist, SE_EXCL))) { 2048 return (pplist); 2049 } 2050 2051 /* 2052 * Otherwise place the new large page on the freelist 2053 */ 2054 bin = PP_2_BIN(pplist); 2055 mnode = PP_2_MEM_NODE(pplist); 2056 mtype = PP_2_MTYPE(pplist); 2057 page_vpadd(&PAGE_FREELISTS(mnode, new_szc, bin, mtype), pplist); 2058 2059 page_ctr_add(mnode, mtype, pplist, PG_FREE_LIST); 2060 return (NULL); 2061 2062 fail_promote: 2063 /* 2064 * A thread must have still been freeing or 2065 * reclaiming the page on the cachelist. 2066 * To prevent a deadlock undo what we have 2067 * done sofar and return failure. This 2068 * situation can only happen while promoting 2069 * PAGESIZE pages. 2070 */ 2071 page_promote_err++; 2072 while (pplist) { 2073 pp = pplist; 2074 mach_page_sub(&pplist, pp); 2075 pp->p_szc = 0; 2076 bin = PP_2_BIN(pp); 2077 mtype = PP_2_MTYPE(pp); 2078 mach_page_add(&PAGE_FREELISTS(mnode, 0, bin, mtype), pp); 2079 page_ctr_add(mnode, mtype, pp, PG_FREE_LIST); 2080 } 2081 return (NULL); 2082 2083 } 2084 2085 /* 2086 * Break up a large page into smaller size pages. 2087 * Pages involved are on the freelist before the call and may 2088 * be returned to the caller if requested, otherwise they will 2089 * be placed back on the freelist. 2090 * The caller is responsible for locking the freelist as well as any other 2091 * accounting which needs to be done for a returned page. 2092 * If flags is not PC_ALLOC, the color argument is ignored, and thus 2093 * technically, any value may be passed in but PC_NO_COLOR is the standard 2094 * which should be followed for clarity's sake. 2095 * Returns a page whose pfn is < pfnmax 2096 */ 2097 page_t * 2098 page_demote(int mnode, pfn_t pfnum, pfn_t pfnmax, uchar_t cur_szc, 2099 uchar_t new_szc, int color, int flags) 2100 { 2101 page_t *pp, *pplist, *npplist; 2102 pgcnt_t npgs, n; 2103 uint_t bin; 2104 uint_t mtype; 2105 page_t *ret_pp = NULL; 2106 2107 ASSERT(cur_szc != 0); 2108 ASSERT(new_szc < cur_szc); 2109 2110 pplist = page_numtopp_nolock(pfnum); 2111 ASSERT(pplist != NULL); 2112 2113 ASSERT(pplist->p_szc == cur_szc); 2114 2115 bin = PP_2_BIN(pplist); 2116 ASSERT(mnode == PP_2_MEM_NODE(pplist)); 2117 mtype = PP_2_MTYPE(pplist); 2118 page_vpsub(&PAGE_FREELISTS(mnode, cur_szc, bin, mtype), pplist); 2119 2120 CHK_LPG(pplist, cur_szc); 2121 page_ctr_sub(mnode, mtype, pplist, PG_FREE_LIST); 2122 2123 /* 2124 * Number of PAGESIZE pages for smaller new_szc 2125 * page. 2126 */ 2127 npgs = page_get_pagecnt(new_szc); 2128 2129 while (pplist) { 2130 pp = pplist; 2131 2132 ASSERT(pp->p_szc == cur_szc); 2133 2134 /* 2135 * We either break it up into PAGESIZE pages or larger. 2136 */ 2137 if (npgs == 1) { /* PAGESIZE case */ 2138 mach_page_sub(&pplist, pp); 2139 ASSERT(pp->p_szc == cur_szc); 2140 ASSERT(new_szc == 0); 2141 ASSERT(mnode == PP_2_MEM_NODE(pp)); 2142 pp->p_szc = new_szc; 2143 bin = PP_2_BIN(pp); 2144 if ((bin == color) && (flags == PC_ALLOC) && 2145 (ret_pp == NULL) && (pfnmax == 0 || 2146 pp->p_pagenum < pfnmax) && 2147 page_trylock_cons(pp, SE_EXCL)) { 2148 ret_pp = pp; 2149 } else { 2150 mtype = PP_2_MTYPE(pp); 2151 mach_page_add(&PAGE_FREELISTS(mnode, 0, bin, 2152 mtype), pp); 2153 page_ctr_add(mnode, mtype, pp, PG_FREE_LIST); 2154 } 2155 } else { 2156 page_t *try_to_return_this_page = NULL; 2157 int count = 0; 2158 2159 /* 2160 * Break down into smaller lists of pages. 2161 */ 2162 page_list_break(&pplist, &npplist, npgs); 2163 2164 pp = pplist; 2165 n = npgs; 2166 while (n--) { 2167 ASSERT(pp->p_szc == cur_szc); 2168 /* 2169 * Check whether all the pages in this list 2170 * fit the request criteria. 2171 */ 2172 if (pfnmax == 0 || pp->p_pagenum < pfnmax) { 2173 count++; 2174 } 2175 pp->p_szc = new_szc; 2176 pp = pp->p_next; 2177 } 2178 2179 if (count == npgs && 2180 (pfnmax == 0 || pp->p_pagenum < pfnmax)) { 2181 try_to_return_this_page = pp; 2182 } 2183 2184 CHK_LPG(pplist, new_szc); 2185 2186 bin = PP_2_BIN(pplist); 2187 if (try_to_return_this_page) 2188 ASSERT(mnode == 2189 PP_2_MEM_NODE(try_to_return_this_page)); 2190 if ((bin == color) && (flags == PC_ALLOC) && 2191 (ret_pp == NULL) && try_to_return_this_page && 2192 page_trylock_cons(try_to_return_this_page, 2193 SE_EXCL)) { 2194 ret_pp = try_to_return_this_page; 2195 } else { 2196 mtype = PP_2_MTYPE(pp); 2197 page_vpadd(&PAGE_FREELISTS(mnode, new_szc, 2198 bin, mtype), pplist); 2199 2200 page_ctr_add(mnode, mtype, pplist, 2201 PG_FREE_LIST); 2202 } 2203 pplist = npplist; 2204 } 2205 } 2206 return (ret_pp); 2207 } 2208 2209 int mpss_coalesce_disable = 0; 2210 2211 /* 2212 * Coalesce free pages into a page of the given szc and color if possible. 2213 * Return the pointer to the page created, otherwise, return NULL. 2214 * 2215 * If pfnhi is non-zero, search for large page with pfn range less than pfnhi. 2216 */ 2217 page_t * 2218 page_freelist_coalesce(int mnode, uchar_t szc, uint_t color, uint_t ceq_mask, 2219 int mtype, pfn_t pfnhi) 2220 { 2221 int r = szc; /* region size */ 2222 int mrange; 2223 uint_t full, bin, color_mask, wrap = 0; 2224 pfn_t pfnum, lo, hi; 2225 size_t len, idx, idx0; 2226 pgcnt_t cands = 0, szcpgcnt = page_get_pagecnt(szc); 2227 page_t *ret_pp; 2228 MEM_NODE_ITERATOR_DECL(it); 2229 #if defined(__sparc) 2230 pfn_t pfnum0, nlo, nhi; 2231 #endif 2232 2233 if (mpss_coalesce_disable) { 2234 ASSERT(szc < MMU_PAGE_SIZES); 2235 VM_STAT_ADD(vmm_vmstats.page_ctrs_coalesce[szc][0]); 2236 return (NULL); 2237 } 2238 2239 ASSERT(szc < mmu_page_sizes); 2240 color_mask = PAGE_GET_PAGECOLORS(szc) - 1; 2241 ASSERT(ceq_mask <= color_mask); 2242 ASSERT(color <= color_mask); 2243 color &= ceq_mask; 2244 2245 /* Prevent page_counters dynamic memory from being freed */ 2246 rw_enter(&page_ctrs_rwlock[mnode], RW_READER); 2247 2248 mrange = MTYPE_2_MRANGE(mnode, mtype); 2249 ASSERT(mrange < mnode_nranges[mnode]); 2250 VM_STAT_ADD(vmm_vmstats.page_ctrs_coalesce[r][mrange]); 2251 2252 /* get pfn range for mtype */ 2253 len = PAGE_COUNTERS_ENTRIES(mnode, r); 2254 #if defined(__sparc) 2255 lo = PAGE_COUNTERS_BASE(mnode, r); 2256 hi = IDX_TO_PNUM(mnode, r, len); 2257 #else 2258 MNODETYPE_2_PFN(mnode, mtype, lo, hi); 2259 hi++; 2260 #endif 2261 2262 /* use lower limit if given */ 2263 if (pfnhi != PFNNULL && pfnhi < hi) 2264 hi = pfnhi; 2265 2266 /* round to szcpgcnt boundaries */ 2267 lo = P2ROUNDUP(lo, szcpgcnt); 2268 MEM_NODE_ITERATOR_INIT(lo, mnode, szc, &it); 2269 if (lo == (pfn_t)-1) { 2270 rw_exit(&page_ctrs_rwlock[mnode]); 2271 return (NULL); 2272 } 2273 hi = hi & ~(szcpgcnt - 1); 2274 2275 /* set lo to the closest pfn of the right color */ 2276 if (((PFN_2_COLOR(lo, szc, &it) ^ color) & ceq_mask) || 2277 (interleaved_mnodes && PFN_2_MEM_NODE(lo) != mnode)) { 2278 PAGE_NEXT_PFN_FOR_COLOR(lo, szc, color, ceq_mask, color_mask, 2279 &it); 2280 } 2281 2282 if (hi <= lo) { 2283 rw_exit(&page_ctrs_rwlock[mnode]); 2284 return (NULL); 2285 } 2286 2287 full = FULL_REGION_CNT(r); 2288 2289 /* calculate the number of page candidates and initial search index */ 2290 bin = color; 2291 idx0 = (size_t)(-1); 2292 do { 2293 pgcnt_t acand; 2294 2295 PGCTRS_CANDS_GETVALUECOLOR(mnode, mrange, r, bin, acand); 2296 if (acand) { 2297 idx = PAGE_COUNTERS_CURRENT_COLOR(mnode, 2298 r, bin, mrange); 2299 idx0 = MIN(idx0, idx); 2300 cands += acand; 2301 } 2302 bin = ADD_MASKED(bin, 1, ceq_mask, color_mask); 2303 } while (bin != color); 2304 2305 if (cands == 0) { 2306 VM_STAT_ADD(vmm_vmstats.page_ctrs_cands_skip[r][mrange]); 2307 rw_exit(&page_ctrs_rwlock[mnode]); 2308 return (NULL); 2309 } 2310 2311 pfnum = IDX_TO_PNUM(mnode, r, idx0); 2312 if (pfnum < lo || pfnum >= hi) { 2313 pfnum = lo; 2314 } else { 2315 MEM_NODE_ITERATOR_INIT(pfnum, mnode, szc, &it); 2316 if (pfnum == (pfn_t)-1) { 2317 pfnum = lo; 2318 MEM_NODE_ITERATOR_INIT(pfnum, mnode, szc, &it); 2319 ASSERT(pfnum != (pfn_t)-1); 2320 } else if ((PFN_2_COLOR(pfnum, szc, &it) ^ color) & ceq_mask || 2321 (interleaved_mnodes && PFN_2_MEM_NODE(pfnum) != mnode)) { 2322 /* invalid color, get the closest correct pfn */ 2323 PAGE_NEXT_PFN_FOR_COLOR(pfnum, szc, color, ceq_mask, 2324 color_mask, &it); 2325 if (pfnum >= hi) { 2326 pfnum = lo; 2327 MEM_NODE_ITERATOR_INIT(pfnum, mnode, szc, &it); 2328 } 2329 } 2330 } 2331 2332 /* set starting index */ 2333 idx0 = PNUM_TO_IDX(mnode, r, pfnum); 2334 ASSERT(idx0 < len); 2335 2336 #if defined(__sparc) 2337 pfnum0 = pfnum; /* page corresponding to idx0 */ 2338 nhi = 0; /* search kcage ranges */ 2339 #endif 2340 2341 for (idx = idx0; wrap == 0 || (idx < idx0 && wrap < 2); ) { 2342 2343 #if defined(__sparc) 2344 /* 2345 * Find lowest intersection of kcage ranges and mnode. 2346 * MTYPE_NORELOC means look in the cage, otherwise outside. 2347 */ 2348 if (nhi <= pfnum) { 2349 if (kcage_next_range(mtype == MTYPE_NORELOC, pfnum, 2350 (wrap == 0 ? hi : pfnum0), &nlo, &nhi)) 2351 goto wrapit; 2352 2353 /* jump to the next page in the range */ 2354 if (pfnum < nlo) { 2355 pfnum = P2ROUNDUP(nlo, szcpgcnt); 2356 MEM_NODE_ITERATOR_INIT(pfnum, mnode, szc, &it); 2357 idx = PNUM_TO_IDX(mnode, r, pfnum); 2358 if (idx >= len || pfnum >= hi) 2359 goto wrapit; 2360 if ((PFN_2_COLOR(pfnum, szc, &it) ^ color) & 2361 ceq_mask) 2362 goto next; 2363 if (interleaved_mnodes && 2364 PFN_2_MEM_NODE(pfnum) != mnode) 2365 goto next; 2366 } 2367 } 2368 #endif 2369 2370 if (PAGE_COUNTERS(mnode, r, idx) != full) 2371 goto next; 2372 2373 /* 2374 * RFE: For performance maybe we can do something less 2375 * brutal than locking the entire freelist. So far 2376 * this doesn't seem to be a performance problem? 2377 */ 2378 page_freelist_lock(mnode); 2379 if (PAGE_COUNTERS(mnode, r, idx) == full) { 2380 ret_pp = 2381 page_promote(mnode, pfnum, r, PC_ALLOC, mtype); 2382 if (ret_pp != NULL) { 2383 VM_STAT_ADD(vmm_vmstats.pfc_coalok[r][mrange]); 2384 PAGE_COUNTERS_CURRENT_COLOR(mnode, r, 2385 PFN_2_COLOR(pfnum, szc, &it), mrange) = idx; 2386 page_freelist_unlock(mnode); 2387 rw_exit(&page_ctrs_rwlock[mnode]); 2388 #if defined(__sparc) 2389 if (PP_ISNORELOC(ret_pp)) { 2390 pgcnt_t npgs; 2391 2392 npgs = page_get_pagecnt(ret_pp->p_szc); 2393 kcage_freemem_sub(npgs); 2394 } 2395 #endif 2396 return (ret_pp); 2397 } 2398 } else { 2399 VM_STAT_ADD(vmm_vmstats.page_ctrs_changed[r][mrange]); 2400 } 2401 2402 page_freelist_unlock(mnode); 2403 /* 2404 * No point looking for another page if we've 2405 * already tried all of the ones that 2406 * page_ctr_cands indicated. Stash off where we left 2407 * off. 2408 * Note: this is not exact since we don't hold the 2409 * page_freelist_locks before we initially get the 2410 * value of cands for performance reasons, but should 2411 * be a decent approximation. 2412 */ 2413 if (--cands == 0) { 2414 PAGE_COUNTERS_CURRENT_COLOR(mnode, r, color, mrange) = 2415 idx; 2416 break; 2417 } 2418 next: 2419 PAGE_NEXT_PFN_FOR_COLOR(pfnum, szc, color, ceq_mask, 2420 color_mask, &it); 2421 idx = PNUM_TO_IDX(mnode, r, pfnum); 2422 if (idx >= len || pfnum >= hi) { 2423 wrapit: 2424 pfnum = lo; 2425 MEM_NODE_ITERATOR_INIT(pfnum, mnode, szc, &it); 2426 idx = PNUM_TO_IDX(mnode, r, pfnum); 2427 wrap++; 2428 #if defined(__sparc) 2429 nhi = 0; /* search kcage ranges */ 2430 #endif 2431 } 2432 } 2433 2434 rw_exit(&page_ctrs_rwlock[mnode]); 2435 VM_STAT_ADD(vmm_vmstats.page_ctrs_failed[r][mrange]); 2436 return (NULL); 2437 } 2438 2439 /* 2440 * For the given mnode, promote as many small pages to large pages as possible. 2441 * mnode can be -1, which means do them all 2442 */ 2443 void 2444 page_freelist_coalesce_all(int mnode) 2445 { 2446 int r; /* region size */ 2447 int idx, full; 2448 size_t len; 2449 int doall = interleaved_mnodes || mnode < 0; 2450 int mlo = doall ? 0 : mnode; 2451 int mhi = doall ? max_mem_nodes : (mnode + 1); 2452 2453 VM_STAT_ADD(vmm_vmstats.page_ctrs_coalesce_all); 2454 2455 if (mpss_coalesce_disable) { 2456 return; 2457 } 2458 2459 /* 2460 * Lock the entire freelist and coalesce what we can. 2461 * 2462 * Always promote to the largest page possible 2463 * first to reduce the number of page promotions. 2464 */ 2465 for (mnode = mlo; mnode < mhi; mnode++) { 2466 rw_enter(&page_ctrs_rwlock[mnode], RW_READER); 2467 page_freelist_lock(mnode); 2468 } 2469 for (r = mmu_page_sizes - 1; r > 0; r--) { 2470 for (mnode = mlo; mnode < mhi; mnode++) { 2471 pgcnt_t cands = 0; 2472 int mrange, nranges = mnode_nranges[mnode]; 2473 2474 for (mrange = 0; mrange < nranges; mrange++) { 2475 PGCTRS_CANDS_GETVALUE(mnode, mrange, r, cands); 2476 if (cands != 0) 2477 break; 2478 } 2479 if (cands == 0) { 2480 VM_STAT_ADD(vmm_vmstats. 2481 page_ctrs_cands_skip_all); 2482 continue; 2483 } 2484 2485 full = FULL_REGION_CNT(r); 2486 len = PAGE_COUNTERS_ENTRIES(mnode, r); 2487 2488 for (idx = 0; idx < len; idx++) { 2489 if (PAGE_COUNTERS(mnode, r, idx) == full) { 2490 pfn_t pfnum = 2491 IDX_TO_PNUM(mnode, r, idx); 2492 int tmnode = interleaved_mnodes ? 2493 PFN_2_MEM_NODE(pfnum) : mnode; 2494 2495 ASSERT(pfnum >= 2496 mem_node_config[tmnode].physbase && 2497 pfnum < 2498 mem_node_config[tmnode].physmax); 2499 2500 (void) page_promote(tmnode, 2501 pfnum, r, PC_FREE, PC_MTYPE_ANY); 2502 } 2503 } 2504 /* shared hpm_counters covers all mnodes, so we quit */ 2505 if (interleaved_mnodes) 2506 break; 2507 } 2508 } 2509 for (mnode = mlo; mnode < mhi; mnode++) { 2510 page_freelist_unlock(mnode); 2511 rw_exit(&page_ctrs_rwlock[mnode]); 2512 } 2513 } 2514 2515 /* 2516 * This is where all polices for moving pages around 2517 * to different page size free lists is implemented. 2518 * Returns 1 on success, 0 on failure. 2519 * 2520 * So far these are the priorities for this algorithm in descending 2521 * order: 2522 * 2523 * 1) When servicing a request try to do so with a free page 2524 * from next size up. Helps defer fragmentation as long 2525 * as possible. 2526 * 2527 * 2) Page coalesce on demand. Only when a freelist 2528 * larger than PAGESIZE is empty and step 1 2529 * will not work since all larger size lists are 2530 * also empty. 2531 * 2532 * If pfnhi is non-zero, search for large page with pfn range less than pfnhi. 2533 */ 2534 2535 page_t * 2536 page_freelist_split(uchar_t szc, uint_t color, int mnode, int mtype, 2537 pfn_t pfnlo, pfn_t pfnhi, page_list_walker_t *plw) 2538 { 2539 uchar_t nszc = szc + 1; 2540 uint_t bin, sbin, bin_prev; 2541 page_t *pp, *firstpp; 2542 page_t *ret_pp = NULL; 2543 uint_t color_mask; 2544 2545 if (nszc == mmu_page_sizes) 2546 return (NULL); 2547 2548 ASSERT(nszc < mmu_page_sizes); 2549 color_mask = PAGE_GET_PAGECOLORS(nszc) - 1; 2550 bin = sbin = PAGE_GET_NSZ_COLOR(szc, color); 2551 bin_prev = (plw->plw_bin_split_prev == color) ? INVALID_COLOR : 2552 PAGE_GET_NSZ_COLOR(szc, plw->plw_bin_split_prev); 2553 2554 VM_STAT_ADD(vmm_vmstats.pfs_req[szc]); 2555 /* 2556 * First try to break up a larger page to fill current size freelist. 2557 */ 2558 while (plw->plw_bins[nszc] != 0) { 2559 2560 ASSERT(nszc < mmu_page_sizes); 2561 2562 /* 2563 * If page found then demote it. 2564 */ 2565 if (PAGE_FREELISTS(mnode, nszc, bin, mtype)) { 2566 page_freelist_lock(mnode); 2567 firstpp = pp = PAGE_FREELISTS(mnode, nszc, bin, mtype); 2568 2569 /* 2570 * If pfnhi is not PFNNULL, look for large page below 2571 * pfnhi. PFNNULL signifies no pfn requirement. 2572 */ 2573 if (pp && 2574 ((pfnhi != PFNNULL && pp->p_pagenum >= pfnhi) || 2575 (pfnlo != PFNNULL && pp->p_pagenum < pfnlo))) { 2576 do { 2577 pp = pp->p_vpnext; 2578 if (pp == firstpp) { 2579 pp = NULL; 2580 break; 2581 } 2582 } while ((pfnhi != PFNNULL && 2583 pp->p_pagenum >= pfnhi) || 2584 (pfnlo != PFNNULL && 2585 pp->p_pagenum < pfnlo)); 2586 2587 if (pfnhi != PFNNULL && pp != NULL) 2588 ASSERT(pp->p_pagenum < pfnhi); 2589 2590 if (pfnlo != PFNNULL && pp != NULL) 2591 ASSERT(pp->p_pagenum >= pfnlo); 2592 } 2593 if (pp) { 2594 uint_t ccolor = page_correct_color(szc, nszc, 2595 color, bin, plw->plw_ceq_mask[szc]); 2596 2597 ASSERT(pp->p_szc == nszc); 2598 VM_STAT_ADD(vmm_vmstats.pfs_demote[nszc]); 2599 ret_pp = page_demote(mnode, pp->p_pagenum, 2600 pfnhi, pp->p_szc, szc, ccolor, PC_ALLOC); 2601 if (ret_pp) { 2602 page_freelist_unlock(mnode); 2603 #if defined(__sparc) 2604 if (PP_ISNORELOC(ret_pp)) { 2605 pgcnt_t npgs; 2606 2607 npgs = page_get_pagecnt( 2608 ret_pp->p_szc); 2609 kcage_freemem_sub(npgs); 2610 } 2611 #endif 2612 return (ret_pp); 2613 } 2614 } 2615 page_freelist_unlock(mnode); 2616 } 2617 2618 /* loop through next size bins */ 2619 bin = ADD_MASKED(bin, 1, plw->plw_ceq_mask[nszc], color_mask); 2620 plw->plw_bins[nszc]--; 2621 2622 if (bin == sbin) { 2623 uchar_t nnszc = nszc + 1; 2624 2625 /* we are done with this page size - check next */ 2626 if (plw->plw_bins[nnszc] == 0) 2627 /* we have already checked next size bins */ 2628 break; 2629 2630 bin = sbin = PAGE_GET_NSZ_COLOR(nszc, bin); 2631 if (bin_prev != INVALID_COLOR) { 2632 bin_prev = PAGE_GET_NSZ_COLOR(nszc, bin_prev); 2633 if (!((bin ^ bin_prev) & 2634 plw->plw_ceq_mask[nnszc])) 2635 break; 2636 } 2637 ASSERT(nnszc < mmu_page_sizes); 2638 color_mask = PAGE_GET_PAGECOLORS(nnszc) - 1; 2639 nszc = nnszc; 2640 ASSERT(nszc < mmu_page_sizes); 2641 } 2642 } 2643 2644 return (ret_pp); 2645 } 2646 2647 /* 2648 * Helper routine used only by the freelist code to lock 2649 * a page. If the page is a large page then it succeeds in 2650 * locking all the constituent pages or none at all. 2651 * Returns 1 on sucess, 0 on failure. 2652 */ 2653 static int 2654 page_trylock_cons(page_t *pp, se_t se) 2655 { 2656 page_t *tpp, *first_pp = pp; 2657 2658 /* 2659 * Fail if can't lock first or only page. 2660 */ 2661 if (!page_trylock(pp, se)) { 2662 return (0); 2663 } 2664 2665 /* 2666 * PAGESIZE: common case. 2667 */ 2668 if (pp->p_szc == 0) { 2669 return (1); 2670 } 2671 2672 /* 2673 * Large page case. 2674 */ 2675 tpp = pp->p_next; 2676 while (tpp != pp) { 2677 if (!page_trylock(tpp, se)) { 2678 /* 2679 * On failure unlock what we have locked so far. 2680 * We want to avoid attempting to capture these 2681 * pages as the pcm mutex may be held which could 2682 * lead to a recursive mutex panic. 2683 */ 2684 while (first_pp != tpp) { 2685 page_unlock_nocapture(first_pp); 2686 first_pp = first_pp->p_next; 2687 } 2688 return (0); 2689 } 2690 tpp = tpp->p_next; 2691 } 2692 return (1); 2693 } 2694 2695 /* 2696 * init context for walking page lists 2697 * Called when a page of the given szc in unavailable. Sets markers 2698 * for the beginning of the search to detect when search has 2699 * completed a full cycle. Sets flags for splitting larger pages 2700 * and coalescing smaller pages. Page walking procedes until a page 2701 * of the desired equivalent color is found. 2702 */ 2703 void 2704 page_list_walk_init(uchar_t szc, uint_t flags, uint_t bin, int can_split, 2705 int use_ceq, page_list_walker_t *plw) 2706 { 2707 uint_t nszc, ceq_mask, colors; 2708 uchar_t ceq = use_ceq ? colorequivszc[szc] : 0; 2709 2710 ASSERT(szc < mmu_page_sizes); 2711 colors = PAGE_GET_PAGECOLORS(szc); 2712 2713 plw->plw_colors = colors; 2714 plw->plw_color_mask = colors - 1; 2715 plw->plw_bin_marker = plw->plw_bin0 = bin; 2716 plw->plw_bin_split_prev = bin; 2717 plw->plw_bin_step = (szc == 0) ? vac_colors : 1; 2718 2719 /* 2720 * if vac aliasing is possible make sure lower order color 2721 * bits are never ignored 2722 */ 2723 if (vac_colors > 1) 2724 ceq &= 0xf0; 2725 2726 /* 2727 * calculate the number of non-equivalent colors and 2728 * color equivalency mask 2729 */ 2730 plw->plw_ceq_dif = colors >> ((ceq >> 4) + (ceq & 0xf)); 2731 ASSERT(szc > 0 || plw->plw_ceq_dif >= vac_colors); 2732 ASSERT(plw->plw_ceq_dif > 0); 2733 plw->plw_ceq_mask[szc] = (plw->plw_ceq_dif - 1) << (ceq & 0xf); 2734 2735 if (flags & PG_MATCH_COLOR) { 2736 if (cpu_page_colors < 0) { 2737 /* 2738 * this is a heterogeneous machine with different CPUs 2739 * having different size e$ (not supported for ni2/rock 2740 */ 2741 uint_t cpucolors = CPUSETSIZE() >> PAGE_GET_SHIFT(szc); 2742 cpucolors = MAX(cpucolors, 1); 2743 ceq_mask = plw->plw_color_mask & (cpucolors - 1); 2744 plw->plw_ceq_mask[szc] = 2745 MIN(ceq_mask, plw->plw_ceq_mask[szc]); 2746 } 2747 plw->plw_ceq_dif = 1; 2748 } 2749 2750 /* we can split pages in the freelist, but not the cachelist */ 2751 if (can_split) { 2752 plw->plw_do_split = (szc + 1 < mmu_page_sizes) ? 1 : 0; 2753 2754 /* set next szc color masks and number of free list bins */ 2755 for (nszc = szc + 1; nszc < mmu_page_sizes; nszc++, szc++) { 2756 plw->plw_ceq_mask[nszc] = PAGE_GET_NSZ_MASK(szc, 2757 plw->plw_ceq_mask[szc]); 2758 plw->plw_bins[nszc] = PAGE_GET_PAGECOLORS(nszc); 2759 } 2760 plw->plw_ceq_mask[nszc] = INVALID_MASK; 2761 plw->plw_bins[nszc] = 0; 2762 2763 } else { 2764 ASSERT(szc == 0); 2765 plw->plw_do_split = 0; 2766 plw->plw_bins[1] = 0; 2767 plw->plw_ceq_mask[1] = INVALID_MASK; 2768 } 2769 } 2770 2771 /* 2772 * set mark to flag where next split should occur 2773 */ 2774 #define PAGE_SET_NEXT_SPLIT_MARKER(szc, nszc, bin, plw) { \ 2775 uint_t bin_nsz = PAGE_GET_NSZ_COLOR(szc, bin); \ 2776 uint_t bin0_nsz = PAGE_GET_NSZ_COLOR(szc, plw->plw_bin0); \ 2777 uint_t neq_mask = ~plw->plw_ceq_mask[nszc] & plw->plw_color_mask; \ 2778 plw->plw_split_next = \ 2779 INC_MASKED(bin_nsz, neq_mask, plw->plw_color_mask); \ 2780 if (!((plw->plw_split_next ^ bin0_nsz) & plw->plw_ceq_mask[nszc])) { \ 2781 plw->plw_split_next = \ 2782 INC_MASKED(plw->plw_split_next, \ 2783 neq_mask, plw->plw_color_mask); \ 2784 } \ 2785 } 2786 2787 uint_t 2788 page_list_walk_next_bin(uchar_t szc, uint_t bin, page_list_walker_t *plw) 2789 { 2790 uint_t neq_mask = ~plw->plw_ceq_mask[szc] & plw->plw_color_mask; 2791 uint_t bin0_nsz, nbin_nsz, nbin0, nbin; 2792 uchar_t nszc = szc + 1; 2793 2794 nbin = ADD_MASKED(bin, 2795 plw->plw_bin_step, neq_mask, plw->plw_color_mask); 2796 2797 if (plw->plw_do_split) { 2798 plw->plw_bin_split_prev = bin; 2799 PAGE_SET_NEXT_SPLIT_MARKER(szc, nszc, bin, plw); 2800 plw->plw_do_split = 0; 2801 } 2802 2803 if (szc == 0) { 2804 if (plw->plw_count != 0 || plw->plw_ceq_dif == vac_colors) { 2805 if (nbin == plw->plw_bin0 && 2806 (vac_colors == 1 || nbin != plw->plw_bin_marker)) { 2807 nbin = ADD_MASKED(nbin, plw->plw_bin_step, 2808 neq_mask, plw->plw_color_mask); 2809 plw->plw_bin_split_prev = plw->plw_bin0; 2810 } 2811 2812 if (vac_colors > 1 && nbin == plw->plw_bin_marker) { 2813 plw->plw_bin_marker = 2814 nbin = INC_MASKED(nbin, neq_mask, 2815 plw->plw_color_mask); 2816 plw->plw_bin_split_prev = plw->plw_bin0; 2817 /* 2818 * large pages all have the same vac color 2819 * so by now we should be done with next 2820 * size page splitting process 2821 */ 2822 ASSERT(plw->plw_bins[1] == 0); 2823 plw->plw_do_split = 0; 2824 return (nbin); 2825 } 2826 2827 } else { 2828 uint_t bin_jump = (vac_colors == 1) ? 2829 (BIN_STEP & ~3) - (plw->plw_bin0 & 3) : BIN_STEP; 2830 2831 bin_jump &= ~(vac_colors - 1); 2832 2833 nbin0 = ADD_MASKED(plw->plw_bin0, bin_jump, neq_mask, 2834 plw->plw_color_mask); 2835 2836 if ((nbin0 ^ plw->plw_bin0) & plw->plw_ceq_mask[szc]) { 2837 2838 plw->plw_bin_marker = nbin = nbin0; 2839 2840 if (plw->plw_bins[nszc] != 0) { 2841 /* 2842 * check if next page size bin is the 2843 * same as the next page size bin for 2844 * bin0 2845 */ 2846 nbin_nsz = PAGE_GET_NSZ_COLOR(szc, 2847 nbin); 2848 bin0_nsz = PAGE_GET_NSZ_COLOR(szc, 2849 plw->plw_bin0); 2850 2851 if ((bin0_nsz ^ nbin_nsz) & 2852 plw->plw_ceq_mask[nszc]) 2853 plw->plw_do_split = 1; 2854 } 2855 return (nbin); 2856 } 2857 } 2858 } 2859 2860 if (plw->plw_bins[nszc] != 0) { 2861 nbin_nsz = PAGE_GET_NSZ_COLOR(szc, nbin); 2862 if (!((plw->plw_split_next ^ nbin_nsz) & 2863 plw->plw_ceq_mask[nszc])) 2864 plw->plw_do_split = 1; 2865 } 2866 2867 return (nbin); 2868 } 2869 2870 page_t * 2871 page_get_mnode_freelist(int mnode, uint_t bin, int mtype, uchar_t szc, 2872 uint_t flags) 2873 { 2874 kmutex_t *pcm; 2875 page_t *pp, *first_pp; 2876 uint_t sbin; 2877 int plw_initialized; 2878 page_list_walker_t plw; 2879 2880 ASSERT(szc < mmu_page_sizes); 2881 2882 VM_STAT_ADD(vmm_vmstats.pgmf_alloc[szc]); 2883 2884 MTYPE_START(mnode, mtype, flags); 2885 if (mtype < 0) { /* mnode does not have memory in mtype range */ 2886 VM_STAT_ADD(vmm_vmstats.pgmf_allocempty[szc]); 2887 return (NULL); 2888 } 2889 try_again: 2890 2891 plw_initialized = 0; 2892 plw.plw_ceq_dif = 1; 2893 2894 /* 2895 * Only hold one freelist lock at a time, that way we 2896 * can start anywhere and not have to worry about lock 2897 * ordering. 2898 */ 2899 for (plw.plw_count = 0; 2900 plw.plw_count < plw.plw_ceq_dif; plw.plw_count++) { 2901 sbin = bin; 2902 do { 2903 if (!PAGE_FREELISTS(mnode, szc, bin, mtype)) 2904 goto bin_empty_1; 2905 2906 pcm = PC_BIN_MUTEX(mnode, bin, PG_FREE_LIST); 2907 mutex_enter(pcm); 2908 pp = PAGE_FREELISTS(mnode, szc, bin, mtype); 2909 if (pp == NULL) 2910 goto bin_empty_0; 2911 2912 /* 2913 * These were set before the page 2914 * was put on the free list, 2915 * they must still be set. 2916 */ 2917 ASSERT(PP_ISFREE(pp)); 2918 ASSERT(PP_ISAGED(pp)); 2919 ASSERT(pp->p_vnode == NULL); 2920 ASSERT(pp->p_hash == NULL); 2921 ASSERT(pp->p_offset == (u_offset_t)-1); 2922 ASSERT(pp->p_szc == szc); 2923 ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode); 2924 2925 /* 2926 * Walk down the hash chain. 2927 * 8k pages are linked on p_next 2928 * and p_prev fields. Large pages 2929 * are a contiguous group of 2930 * constituent pages linked together 2931 * on their p_next and p_prev fields. 2932 * The large pages are linked together 2933 * on the hash chain using p_vpnext 2934 * p_vpprev of the base constituent 2935 * page of each large page. 2936 */ 2937 first_pp = pp; 2938 while (!page_trylock_cons(pp, SE_EXCL)) { 2939 if (szc == 0) { 2940 pp = pp->p_next; 2941 } else { 2942 pp = pp->p_vpnext; 2943 } 2944 2945 ASSERT(PP_ISFREE(pp)); 2946 ASSERT(PP_ISAGED(pp)); 2947 ASSERT(pp->p_vnode == NULL); 2948 ASSERT(pp->p_hash == NULL); 2949 ASSERT(pp->p_offset == (u_offset_t)-1); 2950 ASSERT(pp->p_szc == szc); 2951 ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode); 2952 2953 if (pp == first_pp) 2954 goto bin_empty_0; 2955 } 2956 2957 ASSERT(pp != NULL); 2958 ASSERT(mtype == PP_2_MTYPE(pp)); 2959 ASSERT(pp->p_szc == szc); 2960 if (szc == 0) { 2961 page_sub(&PAGE_FREELISTS(mnode, 2962 szc, bin, mtype), pp); 2963 } else { 2964 page_vpsub(&PAGE_FREELISTS(mnode, 2965 szc, bin, mtype), pp); 2966 CHK_LPG(pp, szc); 2967 } 2968 page_ctr_sub(mnode, mtype, pp, PG_FREE_LIST); 2969 2970 if ((PP_ISFREE(pp) == 0) || (PP_ISAGED(pp) == 0)) 2971 panic("free page is not. pp %p", (void *)pp); 2972 mutex_exit(pcm); 2973 2974 #if defined(__sparc) 2975 ASSERT(!kcage_on || PP_ISNORELOC(pp) || 2976 (flags & PG_NORELOC) == 0); 2977 2978 if (PP_ISNORELOC(pp)) 2979 kcage_freemem_sub(page_get_pagecnt(szc)); 2980 #endif 2981 VM_STAT_ADD(vmm_vmstats.pgmf_allocok[szc]); 2982 return (pp); 2983 2984 bin_empty_0: 2985 mutex_exit(pcm); 2986 bin_empty_1: 2987 if (plw_initialized == 0) { 2988 page_list_walk_init(szc, flags, bin, 1, 1, 2989 &plw); 2990 plw_initialized = 1; 2991 ASSERT(plw.plw_colors <= 2992 PAGE_GET_PAGECOLORS(szc)); 2993 ASSERT(plw.plw_colors > 0); 2994 ASSERT((plw.plw_colors & 2995 (plw.plw_colors - 1)) == 0); 2996 ASSERT(bin < plw.plw_colors); 2997 ASSERT(plw.plw_ceq_mask[szc] < plw.plw_colors); 2998 } 2999 /* calculate the next bin with equivalent color */ 3000 bin = ADD_MASKED(bin, plw.plw_bin_step, 3001 plw.plw_ceq_mask[szc], plw.plw_color_mask); 3002 } while (sbin != bin); 3003 3004 /* 3005 * color bins are all empty if color match. Try and 3006 * satisfy the request by breaking up or coalescing 3007 * pages from a different size freelist of the correct 3008 * color that satisfies the ORIGINAL color requested. 3009 * If that fails then try pages of the same size but 3010 * different colors assuming we are not called with 3011 * PG_MATCH_COLOR. 3012 */ 3013 if (plw.plw_do_split && 3014 (pp = page_freelist_split(szc, bin, mnode, 3015 mtype, PFNNULL, PFNNULL, &plw)) != NULL) 3016 return (pp); 3017 3018 if (szc > 0 && (pp = page_freelist_coalesce(mnode, szc, 3019 bin, plw.plw_ceq_mask[szc], mtype, PFNNULL)) != NULL) 3020 return (pp); 3021 3022 if (plw.plw_ceq_dif > 1) 3023 bin = page_list_walk_next_bin(szc, bin, &plw); 3024 } 3025 3026 /* if allowed, cycle through additional mtypes */ 3027 MTYPE_NEXT(mnode, mtype, flags); 3028 if (mtype >= 0) 3029 goto try_again; 3030 3031 VM_STAT_ADD(vmm_vmstats.pgmf_allocfailed[szc]); 3032 3033 return (NULL); 3034 } 3035 3036 /* 3037 * Returns the count of free pages for 'pp' with size code 'szc'. 3038 * Note: This function does not return an exact value as the page freelist 3039 * locks are not held and thus the values in the page_counters may be 3040 * changing as we walk through the data. 3041 */ 3042 static int 3043 page_freecnt(int mnode, page_t *pp, uchar_t szc) 3044 { 3045 pgcnt_t pgfree; 3046 pgcnt_t cnt; 3047 ssize_t r = szc; /* region size */ 3048 ssize_t idx; 3049 int i; 3050 int full, range; 3051 3052 /* Make sure pagenum passed in is aligned properly */ 3053 ASSERT((pp->p_pagenum & (PNUM_SIZE(szc) - 1)) == 0); 3054 ASSERT(szc > 0); 3055 3056 /* Prevent page_counters dynamic memory from being freed */ 3057 rw_enter(&page_ctrs_rwlock[mnode], RW_READER); 3058 idx = PNUM_TO_IDX(mnode, r, pp->p_pagenum); 3059 cnt = PAGE_COUNTERS(mnode, r, idx); 3060 pgfree = cnt << PNUM_SHIFT(r - 1); 3061 range = FULL_REGION_CNT(szc); 3062 3063 /* Check for completely full region */ 3064 if (cnt == range) { 3065 rw_exit(&page_ctrs_rwlock[mnode]); 3066 return (pgfree); 3067 } 3068 3069 while (--r > 0) { 3070 idx = PNUM_TO_IDX(mnode, r, pp->p_pagenum); 3071 full = FULL_REGION_CNT(r); 3072 for (i = 0; i < range; i++, idx++) { 3073 cnt = PAGE_COUNTERS(mnode, r, idx); 3074 /* 3075 * If cnt here is full, that means we have already 3076 * accounted for these pages earlier. 3077 */ 3078 if (cnt != full) { 3079 pgfree += (cnt << PNUM_SHIFT(r - 1)); 3080 } 3081 } 3082 range *= full; 3083 } 3084 rw_exit(&page_ctrs_rwlock[mnode]); 3085 return (pgfree); 3086 } 3087 3088 /* 3089 * Called from page_geti_contig_pages to exclusively lock constituent pages 3090 * starting from 'spp' for page size code 'szc'. 3091 * 3092 * If 'ptcpthreshold' is set, the number of free pages needed in the 'szc' 3093 * region needs to be greater than or equal to the threshold. 3094 */ 3095 static int 3096 page_trylock_contig_pages(int mnode, page_t *spp, uchar_t szc, int flags) 3097 { 3098 pgcnt_t pgcnt = PNUM_SIZE(szc); 3099 pgcnt_t pgfree, i; 3100 page_t *pp; 3101 3102 VM_STAT_ADD(vmm_vmstats.ptcp[szc]); 3103 3104 3105 if ((ptcpthreshold == 0) || (flags & PGI_PGCPHIPRI)) 3106 goto skipptcpcheck; 3107 /* 3108 * check if there are sufficient free pages available before attempting 3109 * to trylock. Count is approximate as page counters can change. 3110 */ 3111 pgfree = page_freecnt(mnode, spp, szc); 3112 3113 /* attempt to trylock if there are sufficient already free pages */ 3114 if (pgfree < pgcnt/ptcpthreshold) { 3115 VM_STAT_ADD(vmm_vmstats.ptcpfreethresh[szc]); 3116 return (0); 3117 } 3118 3119 skipptcpcheck: 3120 3121 for (i = 0; i < pgcnt; i++) { 3122 pp = &spp[i]; 3123 if (!page_trylock(pp, SE_EXCL)) { 3124 VM_STAT_ADD(vmm_vmstats.ptcpfailexcl[szc]); 3125 while (--i != (pgcnt_t)-1) { 3126 pp = &spp[i]; 3127 ASSERT(PAGE_EXCL(pp)); 3128 page_unlock_nocapture(pp); 3129 } 3130 return (0); 3131 } 3132 ASSERT(spp[i].p_pagenum == spp->p_pagenum + i); 3133 if ((pp->p_szc > szc || (szc && pp->p_szc == szc)) && 3134 !PP_ISFREE(pp)) { 3135 VM_STAT_ADD(vmm_vmstats.ptcpfailszc[szc]); 3136 ASSERT(i == 0); 3137 page_unlock_nocapture(pp); 3138 return (0); 3139 } 3140 if (PP_ISNORELOC(pp)) { 3141 VM_STAT_ADD(vmm_vmstats.ptcpfailcage[szc]); 3142 while (i != (pgcnt_t)-1) { 3143 pp = &spp[i]; 3144 ASSERT(PAGE_EXCL(pp)); 3145 page_unlock_nocapture(pp); 3146 i--; 3147 } 3148 return (0); 3149 } 3150 } 3151 VM_STAT_ADD(vmm_vmstats.ptcpok[szc]); 3152 return (1); 3153 } 3154 3155 /* 3156 * Claim large page pointed to by 'pp'. 'pp' is the starting set 3157 * of 'szc' constituent pages that had been locked exclusively previously. 3158 * Will attempt to relocate constituent pages in use. 3159 */ 3160 static page_t * 3161 page_claim_contig_pages(page_t *pp, uchar_t szc, int flags) 3162 { 3163 spgcnt_t pgcnt, npgs, i; 3164 page_t *targpp, *rpp, *hpp; 3165 page_t *replpp = NULL; 3166 page_t *pplist = NULL; 3167 3168 ASSERT(pp != NULL); 3169 3170 pgcnt = page_get_pagecnt(szc); 3171 while (pgcnt) { 3172 ASSERT(PAGE_EXCL(pp)); 3173 ASSERT(!PP_ISNORELOC(pp)); 3174 if (PP_ISFREE(pp)) { 3175 /* 3176 * If this is a PG_FREE_LIST page then its 3177 * size code can change underneath us due to 3178 * page promotion or demotion. As an optimzation 3179 * use page_list_sub_pages() instead of 3180 * page_list_sub(). 3181 */ 3182 if (PP_ISAGED(pp)) { 3183 page_list_sub_pages(pp, szc); 3184 if (pp->p_szc == szc) { 3185 return (pp); 3186 } 3187 ASSERT(pp->p_szc < szc); 3188 npgs = page_get_pagecnt(pp->p_szc); 3189 hpp = pp; 3190 for (i = 0; i < npgs; i++, pp++) { 3191 pp->p_szc = szc; 3192 } 3193 page_list_concat(&pplist, &hpp); 3194 pgcnt -= npgs; 3195 continue; 3196 } 3197 ASSERT(!PP_ISAGED(pp)); 3198 ASSERT(pp->p_szc == 0); 3199 page_list_sub(pp, PG_CACHE_LIST); 3200 page_hashout(pp, NULL); 3201 PP_SETAGED(pp); 3202 pp->p_szc = szc; 3203 page_list_concat(&pplist, &pp); 3204 pp++; 3205 pgcnt--; 3206 continue; 3207 } 3208 npgs = page_get_pagecnt(pp->p_szc); 3209 3210 /* 3211 * page_create_wait freemem accounting done by caller of 3212 * page_get_freelist and not necessary to call it prior to 3213 * calling page_get_replacement_page. 3214 * 3215 * page_get_replacement_page can call page_get_contig_pages 3216 * to acquire a large page (szc > 0); the replacement must be 3217 * smaller than the contig page size to avoid looping or 3218 * szc == 0 and PGI_PGCPSZC0 is set. 3219 */ 3220 if (pp->p_szc < szc || (szc == 0 && (flags & PGI_PGCPSZC0))) { 3221 replpp = page_get_replacement_page(pp, NULL, 0); 3222 if (replpp) { 3223 npgs = page_get_pagecnt(pp->p_szc); 3224 ASSERT(npgs <= pgcnt); 3225 targpp = pp; 3226 } 3227 } 3228 3229 /* 3230 * If replacement is NULL or do_page_relocate fails, fail 3231 * coalescing of pages. 3232 */ 3233 if (replpp == NULL || (do_page_relocate(&targpp, &replpp, 0, 3234 &npgs, NULL) != 0)) { 3235 /* 3236 * Unlock un-processed target list 3237 */ 3238 while (pgcnt--) { 3239 ASSERT(PAGE_EXCL(pp)); 3240 page_unlock_nocapture(pp); 3241 pp++; 3242 } 3243 /* 3244 * Free the processed target list. 3245 */ 3246 while (pplist) { 3247 pp = pplist; 3248 page_sub(&pplist, pp); 3249 ASSERT(PAGE_EXCL(pp)); 3250 ASSERT(pp->p_szc == szc); 3251 ASSERT(PP_ISFREE(pp)); 3252 ASSERT(PP_ISAGED(pp)); 3253 pp->p_szc = 0; 3254 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 3255 page_unlock_nocapture(pp); 3256 } 3257 3258 if (replpp != NULL) 3259 page_free_replacement_page(replpp); 3260 3261 return (NULL); 3262 } 3263 ASSERT(pp == targpp); 3264 3265 /* LINTED */ 3266 ASSERT(hpp = pp); /* That's right, it's an assignment */ 3267 3268 pp += npgs; 3269 pgcnt -= npgs; 3270 3271 while (npgs--) { 3272 ASSERT(PAGE_EXCL(targpp)); 3273 ASSERT(!PP_ISFREE(targpp)); 3274 ASSERT(!PP_ISNORELOC(targpp)); 3275 PP_SETFREE(targpp); 3276 ASSERT(PP_ISAGED(targpp)); 3277 ASSERT(targpp->p_szc < szc || (szc == 0 && 3278 (flags & PGI_PGCPSZC0))); 3279 targpp->p_szc = szc; 3280 targpp = targpp->p_next; 3281 3282 rpp = replpp; 3283 ASSERT(rpp != NULL); 3284 page_sub(&replpp, rpp); 3285 ASSERT(PAGE_EXCL(rpp)); 3286 ASSERT(!PP_ISFREE(rpp)); 3287 page_unlock_nocapture(rpp); 3288 } 3289 ASSERT(targpp == hpp); 3290 ASSERT(replpp == NULL); 3291 page_list_concat(&pplist, &targpp); 3292 } 3293 CHK_LPG(pplist, szc); 3294 return (pplist); 3295 } 3296 3297 /* 3298 * Trim kernel cage from pfnlo-pfnhi and store result in lo-hi. Return code 3299 * of 0 means nothing left after trim. 3300 */ 3301 int 3302 trimkcage(struct memseg *mseg, pfn_t *lo, pfn_t *hi, pfn_t pfnlo, pfn_t pfnhi) 3303 { 3304 pfn_t kcagepfn; 3305 int decr; 3306 int rc = 0; 3307 3308 if (PP_ISNORELOC(mseg->pages)) { 3309 if (PP_ISNORELOC(mseg->epages - 1) == 0) { 3310 3311 /* lower part of this mseg inside kernel cage */ 3312 decr = kcage_current_pfn(&kcagepfn); 3313 3314 /* kernel cage may have transitioned past mseg */ 3315 if (kcagepfn >= mseg->pages_base && 3316 kcagepfn < mseg->pages_end) { 3317 ASSERT(decr == 0); 3318 *lo = MAX(kcagepfn, pfnlo); 3319 *hi = MIN(pfnhi, (mseg->pages_end - 1)); 3320 rc = 1; 3321 } 3322 } 3323 /* else entire mseg in the cage */ 3324 } else { 3325 if (PP_ISNORELOC(mseg->epages - 1)) { 3326 3327 /* upper part of this mseg inside kernel cage */ 3328 decr = kcage_current_pfn(&kcagepfn); 3329 3330 /* kernel cage may have transitioned past mseg */ 3331 if (kcagepfn >= mseg->pages_base && 3332 kcagepfn < mseg->pages_end) { 3333 ASSERT(decr); 3334 *hi = MIN(kcagepfn, pfnhi); 3335 *lo = MAX(pfnlo, mseg->pages_base); 3336 rc = 1; 3337 } 3338 } else { 3339 /* entire mseg outside of kernel cage */ 3340 *lo = MAX(pfnlo, mseg->pages_base); 3341 *hi = MIN(pfnhi, (mseg->pages_end - 1)); 3342 rc = 1; 3343 } 3344 } 3345 return (rc); 3346 } 3347 3348 /* 3349 * called from page_get_contig_pages to search 'pfnlo' thru 'pfnhi' to claim a 3350 * page with size code 'szc'. Claiming such a page requires acquiring 3351 * exclusive locks on all constituent pages (page_trylock_contig_pages), 3352 * relocating pages in use and concatenating these constituent pages into a 3353 * large page. 3354 * 3355 * The page lists do not have such a large page and page_freelist_split has 3356 * already failed to demote larger pages and/or coalesce smaller free pages. 3357 * 3358 * 'flags' may specify PG_COLOR_MATCH which would limit the search of large 3359 * pages with the same color as 'bin'. 3360 * 3361 * 'pfnflag' specifies the subset of the pfn range to search. 3362 */ 3363 3364 static page_t * 3365 page_geti_contig_pages(int mnode, uint_t bin, uchar_t szc, int flags, 3366 pfn_t pfnlo, pfn_t pfnhi, pgcnt_t pfnflag) 3367 { 3368 struct memseg *mseg; 3369 pgcnt_t szcpgcnt = page_get_pagecnt(szc); 3370 pgcnt_t szcpgmask = szcpgcnt - 1; 3371 pfn_t randpfn; 3372 page_t *pp, *randpp, *endpp; 3373 uint_t colors, ceq_mask; 3374 /* LINTED : set but not used in function */ 3375 uint_t color_mask; 3376 pfn_t hi, lo; 3377 uint_t skip; 3378 MEM_NODE_ITERATOR_DECL(it); 3379 3380 ASSERT(szc != 0 || (flags & PGI_PGCPSZC0)); 3381 3382 pfnlo = P2ROUNDUP(pfnlo, szcpgcnt); 3383 3384 if ((pfnhi - pfnlo) + 1 < szcpgcnt || pfnlo >= pfnhi) 3385 return (NULL); 3386 3387 ASSERT(szc < mmu_page_sizes); 3388 3389 colors = PAGE_GET_PAGECOLORS(szc); 3390 color_mask = colors - 1; 3391 if ((colors > 1) && (flags & PG_MATCH_COLOR)) { 3392 uchar_t ceq = colorequivszc[szc]; 3393 uint_t ceq_dif = colors >> ((ceq >> 4) + (ceq & 0xf)); 3394 3395 ASSERT(ceq_dif > 0); 3396 ceq_mask = (ceq_dif - 1) << (ceq & 0xf); 3397 } else { 3398 ceq_mask = 0; 3399 } 3400 3401 ASSERT(bin < colors); 3402 3403 /* clear "non-significant" color bits */ 3404 bin &= ceq_mask; 3405 3406 /* 3407 * trim the pfn range to search based on pfnflag. pfnflag is set 3408 * when there have been previous page_get_contig_page failures to 3409 * limit the search. 3410 * 3411 * The high bit in pfnflag specifies the number of 'slots' in the 3412 * pfn range and the remainder of pfnflag specifies which slot. 3413 * For example, a value of 1010b would mean the second slot of 3414 * the pfn range that has been divided into 8 slots. 3415 */ 3416 if (pfnflag > 1) { 3417 int slots = 1 << (highbit(pfnflag) - 1); 3418 int slotid = pfnflag & (slots - 1); 3419 pgcnt_t szcpages; 3420 int slotlen; 3421 3422 pfnhi = P2ALIGN((pfnhi + 1), szcpgcnt) - 1; 3423 szcpages = ((pfnhi - pfnlo) + 1) / szcpgcnt; 3424 slotlen = howmany(szcpages, slots); 3425 /* skip if 'slotid' slot is empty */ 3426 if (slotid * slotlen >= szcpages) 3427 return (NULL); 3428 pfnlo = pfnlo + (((slotid * slotlen) % szcpages) * szcpgcnt); 3429 ASSERT(pfnlo < pfnhi); 3430 if (pfnhi > pfnlo + (slotlen * szcpgcnt)) 3431 pfnhi = pfnlo + (slotlen * szcpgcnt) - 1; 3432 } 3433 3434 memsegs_lock(0); 3435 3436 /* 3437 * loop through memsegs to look for contig page candidates 3438 */ 3439 3440 for (mseg = memsegs; mseg != NULL; mseg = mseg->next) { 3441 if (pfnhi < mseg->pages_base || pfnlo >= mseg->pages_end) { 3442 /* no overlap */ 3443 continue; 3444 } 3445 3446 if (mseg->pages_end - mseg->pages_base < szcpgcnt) 3447 /* mseg too small */ 3448 continue; 3449 3450 /* 3451 * trim off kernel cage pages from pfn range and check for 3452 * a trimmed pfn range returned that does not span the 3453 * desired large page size. 3454 */ 3455 if (kcage_on) { 3456 if (trimkcage(mseg, &lo, &hi, pfnlo, pfnhi) == 0 || 3457 lo >= hi || ((hi - lo) + 1) < szcpgcnt) 3458 continue; 3459 } else { 3460 lo = MAX(pfnlo, mseg->pages_base); 3461 hi = MIN(pfnhi, (mseg->pages_end - 1)); 3462 } 3463 3464 /* round to szcpgcnt boundaries */ 3465 lo = P2ROUNDUP(lo, szcpgcnt); 3466 3467 MEM_NODE_ITERATOR_INIT(lo, mnode, szc, &it); 3468 hi = P2ALIGN((hi + 1), szcpgcnt) - 1; 3469 3470 if (hi <= lo) 3471 continue; 3472 3473 /* 3474 * set lo to point to the pfn for the desired bin. Large 3475 * page sizes may only have a single page color 3476 */ 3477 skip = szcpgcnt; 3478 if (ceq_mask > 0 || interleaved_mnodes) { 3479 /* set lo to point at appropriate color */ 3480 if (((PFN_2_COLOR(lo, szc, &it) ^ bin) & ceq_mask) || 3481 (interleaved_mnodes && 3482 PFN_2_MEM_NODE(lo) != mnode)) { 3483 PAGE_NEXT_PFN_FOR_COLOR(lo, szc, bin, ceq_mask, 3484 color_mask, &it); 3485 } 3486 if (hi <= lo) 3487 /* mseg cannot satisfy color request */ 3488 continue; 3489 } 3490 3491 /* randomly choose a point between lo and hi to begin search */ 3492 3493 randpfn = (pfn_t)GETTICK(); 3494 randpfn = ((randpfn % (hi - lo)) + lo) & ~(skip - 1); 3495 MEM_NODE_ITERATOR_INIT(randpfn, mnode, szc, &it); 3496 if (ceq_mask || interleaved_mnodes || randpfn == (pfn_t)-1) { 3497 if (randpfn != (pfn_t)-1) { 3498 PAGE_NEXT_PFN_FOR_COLOR(randpfn, szc, bin, 3499 ceq_mask, color_mask, &it); 3500 } 3501 if (randpfn >= hi) { 3502 randpfn = lo; 3503 MEM_NODE_ITERATOR_INIT(randpfn, mnode, szc, 3504 &it); 3505 } 3506 } 3507 randpp = mseg->pages + (randpfn - mseg->pages_base); 3508 3509 ASSERT(randpp->p_pagenum == randpfn); 3510 3511 pp = randpp; 3512 endpp = mseg->pages + (hi - mseg->pages_base) + 1; 3513 3514 ASSERT(randpp + szcpgcnt <= endpp); 3515 3516 do { 3517 ASSERT(!(pp->p_pagenum & szcpgmask)); 3518 ASSERT(((PP_2_BIN(pp) ^ bin) & ceq_mask) == 0); 3519 3520 if (page_trylock_contig_pages(mnode, pp, szc, flags)) { 3521 /* pages unlocked by page_claim on failure */ 3522 if (page_claim_contig_pages(pp, szc, flags)) { 3523 memsegs_unlock(0); 3524 return (pp); 3525 } 3526 } 3527 3528 if (ceq_mask == 0 && !interleaved_mnodes) { 3529 pp += skip; 3530 } else { 3531 pfn_t pfn = pp->p_pagenum; 3532 3533 PAGE_NEXT_PFN_FOR_COLOR(pfn, szc, bin, 3534 ceq_mask, color_mask, &it); 3535 if (pfn == (pfn_t)-1) { 3536 pp = endpp; 3537 } else { 3538 pp = mseg->pages + 3539 (pfn - mseg->pages_base); 3540 } 3541 } 3542 if (pp >= endpp) { 3543 /* start from the beginning */ 3544 MEM_NODE_ITERATOR_INIT(lo, mnode, szc, &it); 3545 pp = mseg->pages + (lo - mseg->pages_base); 3546 ASSERT(pp->p_pagenum == lo); 3547 ASSERT(pp + szcpgcnt <= endpp); 3548 } 3549 } while (pp != randpp); 3550 } 3551 memsegs_unlock(0); 3552 return (NULL); 3553 } 3554 3555 3556 /* 3557 * controlling routine that searches through physical memory in an attempt to 3558 * claim a large page based on the input parameters. 3559 * on the page free lists. 3560 * 3561 * calls page_geti_contig_pages with an initial pfn range from the mnode 3562 * and mtype. page_geti_contig_pages will trim off the parts of the pfn range 3563 * that overlaps with the kernel cage or does not match the requested page 3564 * color if PG_MATCH_COLOR is set. Since this search is very expensive, 3565 * page_geti_contig_pages may further limit the search range based on 3566 * previous failure counts (pgcpfailcnt[]). 3567 * 3568 * for PGI_PGCPSZC0 requests, page_get_contig_pages will relocate a base 3569 * pagesize page that satisfies mtype. 3570 */ 3571 page_t * 3572 page_get_contig_pages(int mnode, uint_t bin, int mtype, uchar_t szc, 3573 uint_t flags) 3574 { 3575 pfn_t pfnlo, pfnhi; /* contig pages pfn range */ 3576 page_t *pp; 3577 pgcnt_t pfnflag = 0; /* no limit on search if 0 */ 3578 3579 VM_STAT_ADD(vmm_vmstats.pgcp_alloc[szc]); 3580 3581 /* no allocations from cage */ 3582 flags |= PGI_NOCAGE; 3583 3584 /* LINTED */ 3585 MTYPE_START(mnode, mtype, flags); 3586 if (mtype < 0) { /* mnode does not have memory in mtype range */ 3587 VM_STAT_ADD(vmm_vmstats.pgcp_allocempty[szc]); 3588 return (NULL); 3589 } 3590 3591 ASSERT(szc > 0 || (flags & PGI_PGCPSZC0)); 3592 3593 /* do not limit search and ignore color if hi pri */ 3594 3595 if (pgcplimitsearch && ((flags & PGI_PGCPHIPRI) == 0)) 3596 pfnflag = pgcpfailcnt[szc]; 3597 3598 /* remove color match to improve chances */ 3599 3600 if (flags & PGI_PGCPHIPRI || pfnflag) 3601 flags &= ~PG_MATCH_COLOR; 3602 3603 do { 3604 /* get pfn range based on mnode and mtype */ 3605 MNODETYPE_2_PFN(mnode, mtype, pfnlo, pfnhi); 3606 3607 ASSERT(pfnhi >= pfnlo); 3608 3609 pp = page_geti_contig_pages(mnode, bin, szc, flags, 3610 pfnlo, pfnhi, pfnflag); 3611 3612 if (pp != NULL) { 3613 pfnflag = pgcpfailcnt[szc]; 3614 if (pfnflag) { 3615 /* double the search size */ 3616 pgcpfailcnt[szc] = pfnflag >> 1; 3617 } 3618 VM_STAT_ADD(vmm_vmstats.pgcp_allocok[szc]); 3619 return (pp); 3620 } 3621 MTYPE_NEXT(mnode, mtype, flags); 3622 } while (mtype >= 0); 3623 3624 VM_STAT_ADD(vmm_vmstats.pgcp_allocfailed[szc]); 3625 return (NULL); 3626 } 3627 3628 #if defined(__i386) || defined(__amd64) 3629 /* 3630 * Determine the likelihood of finding/coalescing a szc page. 3631 * Return 0 if the likelihood is small otherwise return 1. 3632 * 3633 * For now, be conservative and check only 1g pages and return 0 3634 * if there had been previous coalescing failures and the szc pages 3635 * needed to satisfy request would exhaust most of freemem. 3636 */ 3637 int 3638 page_chk_freelist(uint_t szc) 3639 { 3640 pgcnt_t pgcnt; 3641 3642 if (szc <= 1) 3643 return (1); 3644 3645 pgcnt = page_get_pagecnt(szc); 3646 if (pgcpfailcnt[szc] && pgcnt + throttlefree >= freemem) { 3647 VM_STAT_ADD(vmm_vmstats.pcf_deny[szc]); 3648 return (0); 3649 } 3650 VM_STAT_ADD(vmm_vmstats.pcf_allow[szc]); 3651 return (1); 3652 } 3653 #endif 3654 3655 /* 3656 * Find the `best' page on the freelist for this (vp,off) (as,vaddr) pair. 3657 * 3658 * Does its own locking and accounting. 3659 * If PG_MATCH_COLOR is set, then NULL will be returned if there are no 3660 * pages of the proper color even if there are pages of a different color. 3661 * 3662 * Finds a page, removes it, THEN locks it. 3663 */ 3664 3665 /*ARGSUSED*/ 3666 page_t * 3667 page_get_freelist(struct vnode *vp, u_offset_t off, struct seg *seg, 3668 caddr_t vaddr, size_t size, uint_t flags, struct lgrp *lgrp) 3669 { 3670 struct as *as = seg->s_as; 3671 page_t *pp = NULL; 3672 ulong_t bin; 3673 uchar_t szc; 3674 int mnode; 3675 int mtype; 3676 page_t *(*page_get_func)(int, uint_t, int, uchar_t, uint_t); 3677 lgrp_mnode_cookie_t lgrp_cookie; 3678 3679 page_get_func = page_get_mnode_freelist; 3680 3681 /* 3682 * If we aren't passed a specific lgroup, or passed a freed lgrp 3683 * assume we wish to allocate near to the current thread's home. 3684 */ 3685 if (!LGRP_EXISTS(lgrp)) 3686 lgrp = lgrp_home_lgrp(); 3687 3688 if (kcage_on) { 3689 if ((flags & (PG_NORELOC | PG_PANIC)) == PG_NORELOC && 3690 kcage_freemem < kcage_throttlefree + btop(size) && 3691 curthread != kcage_cageout_thread) { 3692 /* 3693 * Set a "reserve" of kcage_throttlefree pages for 3694 * PG_PANIC and cageout thread allocations. 3695 * 3696 * Everybody else has to serialize in 3697 * page_create_get_something() to get a cage page, so 3698 * that we don't deadlock cageout! 3699 */ 3700 return (NULL); 3701 } 3702 } else { 3703 flags &= ~PG_NORELOC; 3704 flags |= PGI_NOCAGE; 3705 } 3706 3707 /* LINTED */ 3708 MTYPE_INIT(mtype, vp, vaddr, flags, size); 3709 3710 /* 3711 * Convert size to page size code. 3712 */ 3713 if ((szc = page_szc(size)) == (uchar_t)-1) 3714 panic("page_get_freelist: illegal page size request"); 3715 ASSERT(szc < mmu_page_sizes); 3716 3717 VM_STAT_ADD(vmm_vmstats.pgf_alloc[szc]); 3718 3719 /* LINTED */ 3720 AS_2_BIN(as, seg, vp, vaddr, bin, szc); 3721 3722 ASSERT(bin < PAGE_GET_PAGECOLORS(szc)); 3723 3724 /* 3725 * Try to get a local page first, but try remote if we can't 3726 * get a page of the right color. 3727 */ 3728 pgretry: 3729 LGRP_MNODE_COOKIE_INIT(lgrp_cookie, lgrp, LGRP_SRCH_LOCAL); 3730 while ((mnode = lgrp_memnode_choose(&lgrp_cookie)) >= 0) { 3731 pp = page_get_func(mnode, bin, mtype, szc, flags); 3732 if (pp != NULL) { 3733 VM_STAT_ADD(vmm_vmstats.pgf_allocok[szc]); 3734 DTRACE_PROBE4(page__get, 3735 lgrp_t *, lgrp, 3736 int, mnode, 3737 ulong_t, bin, 3738 uint_t, flags); 3739 return (pp); 3740 } 3741 } 3742 ASSERT(pp == NULL); 3743 3744 /* 3745 * for non-SZC0 PAGESIZE requests, check cachelist before checking 3746 * remote free lists. Caller expected to call page_get_cachelist which 3747 * will check local cache lists and remote free lists. 3748 */ 3749 if (szc == 0 && ((flags & PGI_PGCPSZC0) == 0)) { 3750 VM_STAT_ADD(vmm_vmstats.pgf_allocdeferred); 3751 return (NULL); 3752 } 3753 3754 ASSERT(szc > 0 || (flags & PGI_PGCPSZC0)); 3755 3756 lgrp_stat_add(lgrp->lgrp_id, LGRP_NUM_ALLOC_FAIL, 1); 3757 3758 if (!(flags & PG_LOCAL)) { 3759 /* 3760 * Try to get a non-local freelist page. 3761 */ 3762 LGRP_MNODE_COOKIE_UPGRADE(lgrp_cookie); 3763 while ((mnode = lgrp_memnode_choose(&lgrp_cookie)) >= 0) { 3764 pp = page_get_func(mnode, bin, mtype, szc, flags); 3765 if (pp != NULL) { 3766 DTRACE_PROBE4(page__get, 3767 lgrp_t *, lgrp, 3768 int, mnode, 3769 ulong_t, bin, 3770 uint_t, flags); 3771 VM_STAT_ADD(vmm_vmstats.pgf_allocokrem[szc]); 3772 return (pp); 3773 } 3774 } 3775 ASSERT(pp == NULL); 3776 } 3777 3778 /* 3779 * when the cage is off chances are page_get_contig_pages() will fail 3780 * to lock a large page chunk therefore when the cage is off it's not 3781 * called by default. this can be changed via /etc/system. 3782 * 3783 * page_get_contig_pages() also called to acquire a base pagesize page 3784 * for page_create_get_something(). 3785 */ 3786 if (!(flags & PG_NORELOC) && (pg_contig_disable == 0) && 3787 (kcage_on || pg_lpgcreate_nocage || szc == 0) && 3788 (page_get_func != page_get_contig_pages)) { 3789 3790 VM_STAT_ADD(vmm_vmstats.pgf_allocretry[szc]); 3791 page_get_func = page_get_contig_pages; 3792 goto pgretry; 3793 } 3794 3795 if (!(flags & PG_LOCAL) && pgcplimitsearch && 3796 page_get_func == page_get_contig_pages) 3797 SETPGCPFAILCNT(szc); 3798 3799 VM_STAT_ADD(vmm_vmstats.pgf_allocfailed[szc]); 3800 return (NULL); 3801 } 3802 3803 /* 3804 * Find the `best' page on the cachelist for this (vp,off) (as,vaddr) pair. 3805 * 3806 * Does its own locking. 3807 * If PG_MATCH_COLOR is set, then NULL will be returned if there are no 3808 * pages of the proper color even if there are pages of a different color. 3809 * Otherwise, scan the bins for ones with pages. For each bin with pages, 3810 * try to lock one of them. If no page can be locked, try the 3811 * next bin. Return NULL if a page can not be found and locked. 3812 * 3813 * Finds a pages, trys to lock it, then removes it. 3814 */ 3815 3816 /*ARGSUSED*/ 3817 page_t * 3818 page_get_cachelist(struct vnode *vp, u_offset_t off, struct seg *seg, 3819 caddr_t vaddr, uint_t flags, struct lgrp *lgrp) 3820 { 3821 page_t *pp; 3822 struct as *as = seg->s_as; 3823 ulong_t bin; 3824 /*LINTED*/ 3825 int mnode; 3826 int mtype; 3827 lgrp_mnode_cookie_t lgrp_cookie; 3828 3829 /* 3830 * If we aren't passed a specific lgroup, or pasased a freed lgrp 3831 * assume we wish to allocate near to the current thread's home. 3832 */ 3833 if (!LGRP_EXISTS(lgrp)) 3834 lgrp = lgrp_home_lgrp(); 3835 3836 if (!kcage_on) { 3837 flags &= ~PG_NORELOC; 3838 flags |= PGI_NOCAGE; 3839 } 3840 3841 if ((flags & (PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == PG_NORELOC && 3842 kcage_freemem <= kcage_throttlefree) { 3843 /* 3844 * Reserve kcage_throttlefree pages for critical kernel 3845 * threads. 3846 * 3847 * Everybody else has to go to page_create_get_something() 3848 * to get a cage page, so we don't deadlock cageout. 3849 */ 3850 return (NULL); 3851 } 3852 3853 /* LINTED */ 3854 AS_2_BIN(as, seg, vp, vaddr, bin, 0); 3855 3856 ASSERT(bin < PAGE_GET_PAGECOLORS(0)); 3857 3858 /* LINTED */ 3859 MTYPE_INIT(mtype, vp, vaddr, flags, MMU_PAGESIZE); 3860 3861 VM_STAT_ADD(vmm_vmstats.pgc_alloc); 3862 3863 /* 3864 * Try local cachelists first 3865 */ 3866 LGRP_MNODE_COOKIE_INIT(lgrp_cookie, lgrp, LGRP_SRCH_LOCAL); 3867 while ((mnode = lgrp_memnode_choose(&lgrp_cookie)) >= 0) { 3868 pp = page_get_mnode_cachelist(bin, flags, mnode, mtype); 3869 if (pp != NULL) { 3870 VM_STAT_ADD(vmm_vmstats.pgc_allocok); 3871 DTRACE_PROBE4(page__get, 3872 lgrp_t *, lgrp, 3873 int, mnode, 3874 ulong_t, bin, 3875 uint_t, flags); 3876 return (pp); 3877 } 3878 } 3879 3880 lgrp_stat_add(lgrp->lgrp_id, LGRP_NUM_ALLOC_FAIL, 1); 3881 3882 /* 3883 * Try freelists/cachelists that are farther away 3884 * This is our only chance to allocate remote pages for PAGESIZE 3885 * requests. 3886 */ 3887 LGRP_MNODE_COOKIE_UPGRADE(lgrp_cookie); 3888 while ((mnode = lgrp_memnode_choose(&lgrp_cookie)) >= 0) { 3889 pp = page_get_mnode_freelist(mnode, bin, mtype, 3890 0, flags); 3891 if (pp != NULL) { 3892 VM_STAT_ADD(vmm_vmstats.pgc_allocokdeferred); 3893 DTRACE_PROBE4(page__get, 3894 lgrp_t *, lgrp, 3895 int, mnode, 3896 ulong_t, bin, 3897 uint_t, flags); 3898 return (pp); 3899 } 3900 pp = page_get_mnode_cachelist(bin, flags, mnode, mtype); 3901 if (pp != NULL) { 3902 VM_STAT_ADD(vmm_vmstats.pgc_allocokrem); 3903 DTRACE_PROBE4(page__get, 3904 lgrp_t *, lgrp, 3905 int, mnode, 3906 ulong_t, bin, 3907 uint_t, flags); 3908 return (pp); 3909 } 3910 } 3911 3912 VM_STAT_ADD(vmm_vmstats.pgc_allocfailed); 3913 return (NULL); 3914 } 3915 3916 page_t * 3917 page_get_mnode_cachelist(uint_t bin, uint_t flags, int mnode, int mtype) 3918 { 3919 kmutex_t *pcm; 3920 page_t *pp, *first_pp; 3921 uint_t sbin; 3922 int plw_initialized; 3923 page_list_walker_t plw; 3924 3925 VM_STAT_ADD(vmm_vmstats.pgmc_alloc); 3926 3927 /* LINTED */ 3928 MTYPE_START(mnode, mtype, flags); 3929 if (mtype < 0) { /* mnode does not have memory in mtype range */ 3930 VM_STAT_ADD(vmm_vmstats.pgmc_allocempty); 3931 return (NULL); 3932 } 3933 3934 try_again: 3935 3936 plw_initialized = 0; 3937 plw.plw_ceq_dif = 1; 3938 3939 /* 3940 * Only hold one cachelist lock at a time, that way we 3941 * can start anywhere and not have to worry about lock 3942 * ordering. 3943 */ 3944 3945 for (plw.plw_count = 0; 3946 plw.plw_count < plw.plw_ceq_dif; plw.plw_count++) { 3947 sbin = bin; 3948 do { 3949 3950 if (!PAGE_CACHELISTS(mnode, bin, mtype)) 3951 goto bin_empty_1; 3952 pcm = PC_BIN_MUTEX(mnode, bin, PG_CACHE_LIST); 3953 mutex_enter(pcm); 3954 pp = PAGE_CACHELISTS(mnode, bin, mtype); 3955 if (pp == NULL) 3956 goto bin_empty_0; 3957 3958 first_pp = pp; 3959 ASSERT(pp->p_vnode); 3960 ASSERT(PP_ISAGED(pp) == 0); 3961 ASSERT(pp->p_szc == 0); 3962 ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode); 3963 while (!page_trylock(pp, SE_EXCL)) { 3964 pp = pp->p_next; 3965 ASSERT(pp->p_szc == 0); 3966 if (pp == first_pp) { 3967 /* 3968 * We have searched the complete list! 3969 * And all of them (might only be one) 3970 * are locked. This can happen since 3971 * these pages can also be found via 3972 * the hash list. When found via the 3973 * hash list, they are locked first, 3974 * then removed. We give up to let the 3975 * other thread run. 3976 */ 3977 pp = NULL; 3978 break; 3979 } 3980 ASSERT(pp->p_vnode); 3981 ASSERT(PP_ISFREE(pp)); 3982 ASSERT(PP_ISAGED(pp) == 0); 3983 ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == 3984 mnode); 3985 } 3986 3987 if (pp) { 3988 page_t **ppp; 3989 /* 3990 * Found and locked a page. 3991 * Pull it off the list. 3992 */ 3993 ASSERT(mtype == PP_2_MTYPE(pp)); 3994 ppp = &PAGE_CACHELISTS(mnode, bin, mtype); 3995 page_sub(ppp, pp); 3996 /* 3997 * Subtract counters before releasing pcm mutex 3998 * to avoid a race with page_freelist_coalesce 3999 * and page_freelist_split. 4000 */ 4001 page_ctr_sub(mnode, mtype, pp, PG_CACHE_LIST); 4002 mutex_exit(pcm); 4003 ASSERT(pp->p_vnode); 4004 ASSERT(PP_ISAGED(pp) == 0); 4005 #if defined(__sparc) 4006 ASSERT(!kcage_on || 4007 (flags & PG_NORELOC) == 0 || 4008 PP_ISNORELOC(pp)); 4009 if (PP_ISNORELOC(pp)) { 4010 kcage_freemem_sub(1); 4011 } 4012 #endif 4013 VM_STAT_ADD(vmm_vmstats. pgmc_allocok); 4014 return (pp); 4015 } 4016 bin_empty_0: 4017 mutex_exit(pcm); 4018 bin_empty_1: 4019 if (plw_initialized == 0) { 4020 page_list_walk_init(0, flags, bin, 0, 1, &plw); 4021 plw_initialized = 1; 4022 } 4023 /* calculate the next bin with equivalent color */ 4024 bin = ADD_MASKED(bin, plw.plw_bin_step, 4025 plw.plw_ceq_mask[0], plw.plw_color_mask); 4026 } while (sbin != bin); 4027 4028 if (plw.plw_ceq_dif > 1) 4029 bin = page_list_walk_next_bin(0, bin, &plw); 4030 } 4031 4032 MTYPE_NEXT(mnode, mtype, flags); 4033 if (mtype >= 0) 4034 goto try_again; 4035 4036 VM_STAT_ADD(vmm_vmstats.pgmc_allocfailed); 4037 return (NULL); 4038 } 4039 4040 #ifdef DEBUG 4041 #define REPL_PAGE_STATS 4042 #endif /* DEBUG */ 4043 4044 #ifdef REPL_PAGE_STATS 4045 struct repl_page_stats { 4046 uint_t ngets; 4047 uint_t ngets_noreloc; 4048 uint_t npgr_noreloc; 4049 uint_t nnopage_first; 4050 uint_t nnopage; 4051 uint_t nhashout; 4052 uint_t nnofree; 4053 uint_t nnext_pp; 4054 } repl_page_stats; 4055 #define REPL_STAT_INCR(v) atomic_add_32(&repl_page_stats.v, 1) 4056 #else /* REPL_PAGE_STATS */ 4057 #define REPL_STAT_INCR(v) 4058 #endif /* REPL_PAGE_STATS */ 4059 4060 int pgrppgcp; 4061 4062 /* 4063 * The freemem accounting must be done by the caller. 4064 * First we try to get a replacement page of the same size as like_pp, 4065 * if that is not possible, then we just get a set of discontiguous 4066 * PAGESIZE pages. 4067 */ 4068 page_t * 4069 page_get_replacement_page(page_t *orig_like_pp, struct lgrp *lgrp_target, 4070 uint_t pgrflags) 4071 { 4072 page_t *like_pp; 4073 page_t *pp, *pplist; 4074 page_t *pl = NULL; 4075 ulong_t bin; 4076 int mnode, page_mnode; 4077 int szc; 4078 spgcnt_t npgs, pg_cnt; 4079 pfn_t pfnum; 4080 int mtype; 4081 int flags = 0; 4082 lgrp_mnode_cookie_t lgrp_cookie; 4083 lgrp_t *lgrp; 4084 4085 REPL_STAT_INCR(ngets); 4086 like_pp = orig_like_pp; 4087 ASSERT(PAGE_EXCL(like_pp)); 4088 4089 szc = like_pp->p_szc; 4090 npgs = page_get_pagecnt(szc); 4091 /* 4092 * Now we reset like_pp to the base page_t. 4093 * That way, we won't walk past the end of this 'szc' page. 4094 */ 4095 pfnum = PFN_BASE(like_pp->p_pagenum, szc); 4096 like_pp = page_numtopp_nolock(pfnum); 4097 ASSERT(like_pp->p_szc == szc); 4098 4099 if (PP_ISNORELOC(like_pp)) { 4100 ASSERT(kcage_on); 4101 REPL_STAT_INCR(ngets_noreloc); 4102 flags = PGI_RELOCONLY; 4103 } else if (pgrflags & PGR_NORELOC) { 4104 ASSERT(kcage_on); 4105 REPL_STAT_INCR(npgr_noreloc); 4106 flags = PG_NORELOC; 4107 } 4108 4109 /* 4110 * Kernel pages must always be replaced with the same size 4111 * pages, since we cannot properly handle demotion of kernel 4112 * pages. 4113 */ 4114 if (PP_ISKAS(like_pp)) 4115 pgrflags |= PGR_SAMESZC; 4116 4117 /* LINTED */ 4118 MTYPE_PGR_INIT(mtype, flags, like_pp, page_mnode, npgs); 4119 4120 while (npgs) { 4121 pplist = NULL; 4122 for (;;) { 4123 pg_cnt = page_get_pagecnt(szc); 4124 bin = PP_2_BIN(like_pp); 4125 ASSERT(like_pp->p_szc == orig_like_pp->p_szc); 4126 ASSERT(pg_cnt <= npgs); 4127 4128 /* 4129 * If an lgroup was specified, try to get the 4130 * page from that lgroup. 4131 * NOTE: Must be careful with code below because 4132 * lgroup may disappear and reappear since there 4133 * is no locking for lgroup here. 4134 */ 4135 if (LGRP_EXISTS(lgrp_target)) { 4136 /* 4137 * Keep local variable for lgroup separate 4138 * from lgroup argument since this code should 4139 * only be exercised when lgroup argument 4140 * exists.... 4141 */ 4142 lgrp = lgrp_target; 4143 4144 /* Try the lgroup's freelists first */ 4145 LGRP_MNODE_COOKIE_INIT(lgrp_cookie, lgrp, 4146 LGRP_SRCH_LOCAL); 4147 while ((pplist == NULL) && 4148 (mnode = lgrp_memnode_choose(&lgrp_cookie)) 4149 != -1) { 4150 pplist = 4151 page_get_mnode_freelist(mnode, bin, 4152 mtype, szc, flags); 4153 } 4154 4155 /* 4156 * Now try it's cachelists if this is a 4157 * small page. Don't need to do it for 4158 * larger ones since page_freelist_coalesce() 4159 * already failed. 4160 */ 4161 if (pplist != NULL || szc != 0) 4162 break; 4163 4164 /* Now try it's cachelists */ 4165 LGRP_MNODE_COOKIE_INIT(lgrp_cookie, lgrp, 4166 LGRP_SRCH_LOCAL); 4167 4168 while ((pplist == NULL) && 4169 (mnode = lgrp_memnode_choose(&lgrp_cookie)) 4170 != -1) { 4171 pplist = 4172 page_get_mnode_cachelist(bin, flags, 4173 mnode, mtype); 4174 } 4175 if (pplist != NULL) { 4176 page_hashout(pplist, NULL); 4177 PP_SETAGED(pplist); 4178 REPL_STAT_INCR(nhashout); 4179 break; 4180 } 4181 /* Done looking in this lgroup. Bail out. */ 4182 break; 4183 } 4184 4185 /* 4186 * No lgroup was specified (or lgroup was removed by 4187 * DR, so just try to get the page as close to 4188 * like_pp's mnode as possible. 4189 * First try the local freelist... 4190 */ 4191 mnode = PP_2_MEM_NODE(like_pp); 4192 pplist = page_get_mnode_freelist(mnode, bin, 4193 mtype, szc, flags); 4194 if (pplist != NULL) 4195 break; 4196 4197 REPL_STAT_INCR(nnofree); 4198 4199 /* 4200 * ...then the local cachelist. Don't need to do it for 4201 * larger pages cause page_freelist_coalesce() already 4202 * failed there anyway. 4203 */ 4204 if (szc == 0) { 4205 pplist = page_get_mnode_cachelist(bin, flags, 4206 mnode, mtype); 4207 if (pplist != NULL) { 4208 page_hashout(pplist, NULL); 4209 PP_SETAGED(pplist); 4210 REPL_STAT_INCR(nhashout); 4211 break; 4212 } 4213 } 4214 4215 /* Now try remote freelists */ 4216 page_mnode = mnode; 4217 lgrp = 4218 lgrp_hand_to_lgrp(MEM_NODE_2_LGRPHAND(page_mnode)); 4219 LGRP_MNODE_COOKIE_INIT(lgrp_cookie, lgrp, 4220 LGRP_SRCH_HIER); 4221 while (pplist == NULL && 4222 (mnode = lgrp_memnode_choose(&lgrp_cookie)) 4223 != -1) { 4224 /* 4225 * Skip local mnode. 4226 */ 4227 if ((mnode == page_mnode) || 4228 (mem_node_config[mnode].exists == 0)) 4229 continue; 4230 4231 pplist = page_get_mnode_freelist(mnode, 4232 bin, mtype, szc, flags); 4233 } 4234 4235 if (pplist != NULL) 4236 break; 4237 4238 4239 /* Now try remote cachelists */ 4240 LGRP_MNODE_COOKIE_INIT(lgrp_cookie, lgrp, 4241 LGRP_SRCH_HIER); 4242 while (pplist == NULL && szc == 0) { 4243 mnode = lgrp_memnode_choose(&lgrp_cookie); 4244 if (mnode == -1) 4245 break; 4246 /* 4247 * Skip local mnode. 4248 */ 4249 if ((mnode == page_mnode) || 4250 (mem_node_config[mnode].exists == 0)) 4251 continue; 4252 4253 pplist = page_get_mnode_cachelist(bin, 4254 flags, mnode, mtype); 4255 4256 if (pplist != NULL) { 4257 page_hashout(pplist, NULL); 4258 PP_SETAGED(pplist); 4259 REPL_STAT_INCR(nhashout); 4260 break; 4261 } 4262 } 4263 4264 /* 4265 * Break out of while loop under the following cases: 4266 * - If we successfully got a page. 4267 * - If pgrflags specified only returning a specific 4268 * page size and we could not find that page size. 4269 * - If we could not satisfy the request with PAGESIZE 4270 * or larger pages. 4271 */ 4272 if (pplist != NULL || szc == 0) 4273 break; 4274 4275 if ((pgrflags & PGR_SAMESZC) || pgrppgcp) { 4276 /* try to find contig page */ 4277 4278 LGRP_MNODE_COOKIE_INIT(lgrp_cookie, lgrp, 4279 LGRP_SRCH_HIER); 4280 4281 while ((pplist == NULL) && 4282 (mnode = 4283 lgrp_memnode_choose(&lgrp_cookie)) 4284 != -1) { 4285 pplist = page_get_contig_pages( 4286 mnode, bin, mtype, szc, 4287 flags | PGI_PGCPHIPRI); 4288 } 4289 break; 4290 } 4291 4292 /* 4293 * The correct thing to do here is try the next 4294 * page size down using szc--. Due to a bug 4295 * with the processing of HAT_RELOAD_SHARE 4296 * where the sfmmu_ttecnt arrays of all 4297 * hats sharing an ISM segment don't get updated, 4298 * using intermediate size pages for relocation 4299 * can lead to continuous page faults. 4300 */ 4301 szc = 0; 4302 } 4303 4304 if (pplist != NULL) { 4305 DTRACE_PROBE4(page__get, 4306 lgrp_t *, lgrp, 4307 int, mnode, 4308 ulong_t, bin, 4309 uint_t, flags); 4310 4311 while (pplist != NULL && pg_cnt--) { 4312 ASSERT(pplist != NULL); 4313 pp = pplist; 4314 page_sub(&pplist, pp); 4315 PP_CLRFREE(pp); 4316 PP_CLRAGED(pp); 4317 page_list_concat(&pl, &pp); 4318 npgs--; 4319 like_pp = like_pp + 1; 4320 REPL_STAT_INCR(nnext_pp); 4321 } 4322 ASSERT(pg_cnt == 0); 4323 } else { 4324 break; 4325 } 4326 } 4327 4328 if (npgs) { 4329 /* 4330 * We were unable to allocate the necessary number 4331 * of pages. 4332 * We need to free up any pl. 4333 */ 4334 REPL_STAT_INCR(nnopage); 4335 page_free_replacement_page(pl); 4336 return (NULL); 4337 } else { 4338 return (pl); 4339 } 4340 } 4341 4342 /* 4343 * demote a free large page to it's constituent pages 4344 */ 4345 void 4346 page_demote_free_pages(page_t *pp) 4347 { 4348 4349 int mnode; 4350 4351 ASSERT(pp != NULL); 4352 ASSERT(PAGE_LOCKED(pp)); 4353 ASSERT(PP_ISFREE(pp)); 4354 ASSERT(pp->p_szc != 0 && pp->p_szc < mmu_page_sizes); 4355 4356 mnode = PP_2_MEM_NODE(pp); 4357 page_freelist_lock(mnode); 4358 if (pp->p_szc != 0) { 4359 (void) page_demote(mnode, PFN_BASE(pp->p_pagenum, 4360 pp->p_szc), 0, pp->p_szc, 0, PC_NO_COLOR, PC_FREE); 4361 } 4362 page_freelist_unlock(mnode); 4363 ASSERT(pp->p_szc == 0); 4364 } 4365 4366 /* 4367 * Factor in colorequiv to check additional 'equivalent' bins. 4368 * colorequiv may be set in /etc/system 4369 */ 4370 void 4371 page_set_colorequiv_arr(void) 4372 { 4373 if (colorequiv > 1) { 4374 int i; 4375 uint_t sv_a = lowbit(colorequiv) - 1; 4376 4377 if (sv_a > 15) 4378 sv_a = 15; 4379 4380 for (i = 0; i < MMU_PAGE_SIZES; i++) { 4381 uint_t colors; 4382 uint_t a = sv_a; 4383 4384 if ((colors = hw_page_array[i].hp_colors) <= 1) { 4385 continue; 4386 } 4387 while ((colors >> a) == 0) 4388 a--; 4389 if ((a << 4) > colorequivszc[i]) { 4390 colorequivszc[i] = (a << 4); 4391 } 4392 } 4393 } 4394 } 4395