1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * Portions of this source code were derived from Berkeley 4.3 BSD 31 * under license from the Regents of the University of California. 32 */ 33 34 #pragma ident "%Z%%M% %I% %E% SMI" 35 36 /* 37 * UNIX machine dependent virtual memory support. 38 */ 39 40 #include <sys/types.h> 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/user.h> 44 #include <sys/proc.h> 45 #include <sys/kmem.h> 46 #include <sys/vmem.h> 47 #include <sys/buf.h> 48 #include <sys/cpuvar.h> 49 #include <sys/lgrp.h> 50 #include <sys/disp.h> 51 #include <sys/vm.h> 52 #include <sys/mman.h> 53 #include <sys/vnode.h> 54 #include <sys/cred.h> 55 #include <sys/exec.h> 56 #include <sys/exechdr.h> 57 #include <sys/debug.h> 58 #include <sys/vmsystm.h> 59 60 #include <vm/hat.h> 61 #include <vm/as.h> 62 #include <vm/seg.h> 63 #include <vm/seg_kp.h> 64 #include <vm/seg_vn.h> 65 #include <vm/page.h> 66 #include <vm/seg_kmem.h> 67 #include <vm/seg_kpm.h> 68 #include <vm/vm_dep.h> 69 70 #include <sys/cpu.h> 71 #include <sys/vm_machparam.h> 72 #include <sys/memlist.h> 73 #include <sys/bootconf.h> /* XXX the memlist stuff belongs in memlist_plat.h */ 74 #include <vm/hat_i86.h> 75 #include <sys/x86_archext.h> 76 #include <sys/elf_386.h> 77 #include <sys/cmn_err.h> 78 #include <sys/archsystm.h> 79 #include <sys/machsystm.h> 80 81 #include <sys/vtrace.h> 82 #include <sys/ddidmareq.h> 83 #include <sys/promif.h> 84 #include <sys/memnode.h> 85 #include <sys/stack.h> 86 #include <util/qsort.h> 87 #include <sys/taskq.h> 88 89 #ifdef __xpv 90 91 #include <sys/hypervisor.h> 92 #include <sys/xen_mmu.h> 93 #include <sys/balloon_impl.h> 94 95 /* 96 * domain 0 pages usable for DMA are kept pre-allocated and kept in 97 * distinct lists, ordered by increasing mfn. 98 */ 99 static kmutex_t io_pool_lock; 100 static page_t *io_pool_4g; /* pool for 32 bit dma limited devices */ 101 static page_t *io_pool_16m; /* pool for 24 bit dma limited legacy devices */ 102 static long io_pool_cnt; 103 static long io_pool_cnt_max = 0; 104 #define DEFAULT_IO_POOL_MIN 128 105 static long io_pool_cnt_min = DEFAULT_IO_POOL_MIN; 106 static long io_pool_cnt_lowater = 0; 107 static long io_pool_shrink_attempts; /* how many times did we try to shrink */ 108 static long io_pool_shrinks; /* how many times did we really shrink */ 109 static long io_pool_grows; /* how many times did we grow */ 110 static mfn_t start_mfn = 1; 111 static caddr_t io_pool_kva; /* use to alloc pages when needed */ 112 113 static int create_contig_pfnlist(uint_t); 114 115 /* 116 * percentage of phys mem to hold in the i/o pool 117 */ 118 #define DEFAULT_IO_POOL_PCT 2 119 static long io_pool_physmem_pct = DEFAULT_IO_POOL_PCT; 120 static void page_io_pool_sub(page_t **, page_t *, page_t *); 121 122 #endif /* __xpv */ 123 124 uint_t vac_colors = 1; 125 126 int largepagesupport = 0; 127 extern uint_t page_create_new; 128 extern uint_t page_create_exists; 129 extern uint_t page_create_putbacks; 130 extern uint_t page_create_putbacks; 131 /* 132 * Allow users to disable the kernel's use of SSE. 133 */ 134 extern int use_sse_pagecopy, use_sse_pagezero; 135 136 /* 137 * combined memory ranges from mnode and memranges[] to manage single 138 * mnode/mtype dimension in the page lists. 139 */ 140 typedef struct { 141 pfn_t mnr_pfnlo; 142 pfn_t mnr_pfnhi; 143 int mnr_mnode; 144 int mnr_memrange; /* index into memranges[] */ 145 /* maintain page list stats */ 146 pgcnt_t mnr_mt_clpgcnt; /* cache list cnt */ 147 pgcnt_t mnr_mt_flpgcnt[MMU_PAGE_SIZES]; /* free list cnt per szc */ 148 pgcnt_t mnr_mt_totcnt; /* sum of cache and free lists */ 149 #ifdef DEBUG 150 struct mnr_mts { /* mnode/mtype szc stats */ 151 pgcnt_t mnr_mts_pgcnt; 152 int mnr_mts_colors; 153 pgcnt_t *mnr_mtsc_pgcnt; 154 } *mnr_mts; 155 #endif 156 } mnoderange_t; 157 158 #define MEMRANGEHI(mtype) \ 159 ((mtype > 0) ? memranges[mtype - 1] - 1: physmax) 160 #define MEMRANGELO(mtype) (memranges[mtype]) 161 162 #define MTYPE_FREEMEM(mt) (mnoderanges[mt].mnr_mt_totcnt) 163 164 /* 165 * As the PC architecture evolved memory up was clumped into several 166 * ranges for various historical I/O devices to do DMA. 167 * < 16Meg - ISA bus 168 * < 2Gig - ??? 169 * < 4Gig - PCI bus or drivers that don't understand PAE mode 170 * 171 * These are listed in reverse order, so that we can skip over unused 172 * ranges on machines with small memories. 173 * 174 * For now under the Hypervisor, we'll only ever have one memrange. 175 */ 176 #define PFN_4GIG 0x100000 177 #define PFN_16MEG 0x1000 178 static pfn_t arch_memranges[NUM_MEM_RANGES] = { 179 PFN_4GIG, /* pfn range for 4G and above */ 180 0x80000, /* pfn range for 2G-4G */ 181 PFN_16MEG, /* pfn range for 16M-2G */ 182 0x00000, /* pfn range for 0-16M */ 183 }; 184 pfn_t *memranges = &arch_memranges[0]; 185 int nranges = NUM_MEM_RANGES; 186 187 /* 188 * This combines mem_node_config and memranges into one data 189 * structure to be used for page list management. 190 */ 191 mnoderange_t *mnoderanges; 192 int mnoderangecnt; 193 int mtype4g; 194 195 /* 196 * 4g memory management variables for systems with more than 4g of memory: 197 * 198 * physical memory below 4g is required for 32bit dma devices and, currently, 199 * for kmem memory. On systems with more than 4g of memory, the pool of memory 200 * below 4g can be depleted without any paging activity given that there is 201 * likely to be sufficient memory above 4g. 202 * 203 * physmax4g is set true if the largest pfn is over 4g. The rest of the 204 * 4g memory management code is enabled only when physmax4g is true. 205 * 206 * maxmem4g is the count of the maximum number of pages on the page lists 207 * with physical addresses below 4g. It can be a lot less then 4g given that 208 * BIOS may reserve large chunks of space below 4g for hot plug pci devices, 209 * agp aperture etc. 210 * 211 * freemem4g maintains the count of the number of available pages on the 212 * page lists with physical addresses below 4g. 213 * 214 * DESFREE4G specifies the desired amount of below 4g memory. It defaults to 215 * 6% (desfree4gshift = 4) of maxmem4g. 216 * 217 * RESTRICT4G_ALLOC returns true if freemem4g falls below DESFREE4G 218 * and the amount of physical memory above 4g is greater than freemem4g. 219 * In this case, page_get_* routines will restrict below 4g allocations 220 * for requests that don't specifically require it. 221 */ 222 223 #define LOTSFREE4G (maxmem4g >> lotsfree4gshift) 224 #define DESFREE4G (maxmem4g >> desfree4gshift) 225 226 #define RESTRICT4G_ALLOC \ 227 (physmax4g && (freemem4g < DESFREE4G) && ((freemem4g << 1) < freemem)) 228 229 static pgcnt_t maxmem4g; 230 static pgcnt_t freemem4g; 231 static int physmax4g; 232 static int desfree4gshift = 4; /* maxmem4g shift to derive DESFREE4G */ 233 static int lotsfree4gshift = 3; 234 235 /* 236 * 16m memory management: 237 * 238 * reserve some amount of physical memory below 16m for legacy devices. 239 * 240 * RESTRICT16M_ALLOC returns true if an there are sufficient free pages above 241 * 16m or if the 16m pool drops below DESFREE16M. 242 * 243 * In this case, general page allocations via page_get_{free,cache}list 244 * routines will be restricted from allocating from the 16m pool. Allocations 245 * that require specific pfn ranges (page_get_anylist) and PG_PANIC allocations 246 * are not restricted. 247 */ 248 249 #define FREEMEM16M MTYPE_FREEMEM(0) 250 #define DESFREE16M desfree16m 251 #define RESTRICT16M_ALLOC(freemem, pgcnt, flags) \ 252 ((freemem != 0) && ((flags & PG_PANIC) == 0) && \ 253 ((freemem >= (FREEMEM16M)) || \ 254 (FREEMEM16M < (DESFREE16M + pgcnt)))) 255 256 static pgcnt_t desfree16m = 0x380; 257 258 /* 259 * This can be patched via /etc/system to allow old non-PAE aware device 260 * drivers to use kmem_alloc'd memory on 32 bit systems with > 4Gig RAM. 261 */ 262 int restricted_kmemalloc = 0; 263 264 #ifdef VM_STATS 265 struct { 266 ulong_t pga_alloc; 267 ulong_t pga_notfullrange; 268 ulong_t pga_nulldmaattr; 269 ulong_t pga_allocok; 270 ulong_t pga_allocfailed; 271 ulong_t pgma_alloc; 272 ulong_t pgma_allocok; 273 ulong_t pgma_allocfailed; 274 ulong_t pgma_allocempty; 275 } pga_vmstats; 276 #endif 277 278 uint_t mmu_page_sizes; 279 280 /* How many page sizes the users can see */ 281 uint_t mmu_exported_page_sizes; 282 283 /* page sizes that legacy applications can see */ 284 uint_t mmu_legacy_page_sizes; 285 286 /* 287 * Number of pages in 1 GB. Don't enable automatic large pages if we have 288 * fewer than this many pages. 289 */ 290 pgcnt_t shm_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT); 291 pgcnt_t privm_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT); 292 293 /* 294 * Maximum and default segment size tunables for user private 295 * and shared anon memory, and user text and initialized data. 296 * These can be patched via /etc/system to allow large pages 297 * to be used for mapping application private and shared anon memory. 298 */ 299 size_t mcntl0_lpsize = MMU_PAGESIZE; 300 size_t max_uheap_lpsize = MMU_PAGESIZE; 301 size_t default_uheap_lpsize = MMU_PAGESIZE; 302 size_t max_ustack_lpsize = MMU_PAGESIZE; 303 size_t default_ustack_lpsize = MMU_PAGESIZE; 304 size_t max_privmap_lpsize = MMU_PAGESIZE; 305 size_t max_uidata_lpsize = MMU_PAGESIZE; 306 size_t max_utext_lpsize = MMU_PAGESIZE; 307 size_t max_shm_lpsize = MMU_PAGESIZE; 308 309 310 /* 311 * initialized by page_coloring_init(). 312 */ 313 uint_t page_colors; 314 uint_t page_colors_mask; 315 uint_t page_coloring_shift; 316 int cpu_page_colors; 317 static uint_t l2_colors; 318 319 /* 320 * Page freelists and cachelists are dynamically allocated once mnoderangecnt 321 * and page_colors are calculated from the l2 cache n-way set size. Within a 322 * mnode range, the page freelist and cachelist are hashed into bins based on 323 * color. This makes it easier to search for a page within a specific memory 324 * range. 325 */ 326 #define PAGE_COLORS_MIN 16 327 328 page_t ****page_freelists; 329 page_t ***page_cachelists; 330 331 332 /* 333 * Used by page layer to know about page sizes 334 */ 335 hw_pagesize_t hw_page_array[MAX_NUM_LEVEL + 1]; 336 337 kmutex_t *fpc_mutex[NPC_MUTEX]; 338 kmutex_t *cpc_mutex[NPC_MUTEX]; 339 340 /* 341 * Only let one thread at a time try to coalesce large pages, to 342 * prevent them from working against each other. 343 */ 344 static kmutex_t contig_lock; 345 #define CONTIG_LOCK() mutex_enter(&contig_lock); 346 #define CONTIG_UNLOCK() mutex_exit(&contig_lock); 347 348 #define PFN_16M (mmu_btop((uint64_t)0x1000000)) 349 350 /* 351 * Return the optimum page size for a given mapping 352 */ 353 /*ARGSUSED*/ 354 size_t 355 map_pgsz(int maptype, struct proc *p, caddr_t addr, size_t len, int memcntl) 356 { 357 level_t l = 0; 358 size_t pgsz = MMU_PAGESIZE; 359 size_t max_lpsize; 360 uint_t mszc; 361 362 ASSERT(maptype != MAPPGSZ_VA); 363 364 if (maptype != MAPPGSZ_ISM && physmem < privm_lpg_min_physmem) { 365 return (MMU_PAGESIZE); 366 } 367 368 switch (maptype) { 369 case MAPPGSZ_HEAP: 370 case MAPPGSZ_STK: 371 max_lpsize = memcntl ? mcntl0_lpsize : (maptype == 372 MAPPGSZ_HEAP ? max_uheap_lpsize : max_ustack_lpsize); 373 if (max_lpsize == MMU_PAGESIZE) { 374 return (MMU_PAGESIZE); 375 } 376 if (len == 0) { 377 len = (maptype == MAPPGSZ_HEAP) ? p->p_brkbase + 378 p->p_brksize - p->p_bssbase : p->p_stksize; 379 } 380 len = (maptype == MAPPGSZ_HEAP) ? MAX(len, 381 default_uheap_lpsize) : MAX(len, default_ustack_lpsize); 382 383 /* 384 * use the pages size that best fits len 385 */ 386 for (l = mmu.umax_page_level; l > 0; --l) { 387 if (LEVEL_SIZE(l) > max_lpsize || len < LEVEL_SIZE(l)) { 388 continue; 389 } else { 390 pgsz = LEVEL_SIZE(l); 391 } 392 break; 393 } 394 395 mszc = (maptype == MAPPGSZ_HEAP ? p->p_brkpageszc : 396 p->p_stkpageszc); 397 if (addr == 0 && (pgsz < hw_page_array[mszc].hp_size)) { 398 pgsz = hw_page_array[mszc].hp_size; 399 } 400 return (pgsz); 401 402 case MAPPGSZ_ISM: 403 for (l = mmu.umax_page_level; l > 0; --l) { 404 if (len >= LEVEL_SIZE(l)) 405 return (LEVEL_SIZE(l)); 406 } 407 return (LEVEL_SIZE(0)); 408 } 409 return (pgsz); 410 } 411 412 static uint_t 413 map_szcvec(caddr_t addr, size_t size, uintptr_t off, size_t max_lpsize, 414 size_t min_physmem) 415 { 416 caddr_t eaddr = addr + size; 417 uint_t szcvec = 0; 418 caddr_t raddr; 419 caddr_t readdr; 420 size_t pgsz; 421 int i; 422 423 if (physmem < min_physmem || max_lpsize <= MMU_PAGESIZE) { 424 return (0); 425 } 426 427 for (i = mmu_exported_page_sizes - 1; i > 0; i--) { 428 pgsz = page_get_pagesize(i); 429 if (pgsz > max_lpsize) { 430 continue; 431 } 432 raddr = (caddr_t)P2ROUNDUP((uintptr_t)addr, pgsz); 433 readdr = (caddr_t)P2ALIGN((uintptr_t)eaddr, pgsz); 434 if (raddr < addr || raddr >= readdr) { 435 continue; 436 } 437 if (P2PHASE((uintptr_t)addr ^ off, pgsz)) { 438 continue; 439 } 440 /* 441 * Set szcvec to the remaining page sizes. 442 */ 443 szcvec = ((1 << (i + 1)) - 1) & ~1; 444 break; 445 } 446 return (szcvec); 447 } 448 449 /* 450 * Return a bit vector of large page size codes that 451 * can be used to map [addr, addr + len) region. 452 */ 453 /*ARGSUSED*/ 454 uint_t 455 map_pgszcvec(caddr_t addr, size_t size, uintptr_t off, int flags, int type, 456 int memcntl) 457 { 458 size_t max_lpsize = mcntl0_lpsize; 459 460 if (mmu.max_page_level == 0) 461 return (0); 462 463 if (flags & MAP_TEXT) { 464 if (!memcntl) 465 max_lpsize = max_utext_lpsize; 466 return (map_szcvec(addr, size, off, max_lpsize, 467 shm_lpg_min_physmem)); 468 469 } else if (flags & MAP_INITDATA) { 470 if (!memcntl) 471 max_lpsize = max_uidata_lpsize; 472 return (map_szcvec(addr, size, off, max_lpsize, 473 privm_lpg_min_physmem)); 474 475 } else if (type == MAPPGSZC_SHM) { 476 if (!memcntl) 477 max_lpsize = max_shm_lpsize; 478 return (map_szcvec(addr, size, off, max_lpsize, 479 shm_lpg_min_physmem)); 480 481 } else if (type == MAPPGSZC_HEAP) { 482 if (!memcntl) 483 max_lpsize = max_uheap_lpsize; 484 return (map_szcvec(addr, size, off, max_lpsize, 485 privm_lpg_min_physmem)); 486 487 } else if (type == MAPPGSZC_STACK) { 488 if (!memcntl) 489 max_lpsize = max_ustack_lpsize; 490 return (map_szcvec(addr, size, off, max_lpsize, 491 privm_lpg_min_physmem)); 492 493 } else { 494 if (!memcntl) 495 max_lpsize = max_privmap_lpsize; 496 return (map_szcvec(addr, size, off, max_lpsize, 497 privm_lpg_min_physmem)); 498 } 499 } 500 501 /* 502 * Handle a pagefault. 503 */ 504 faultcode_t 505 pagefault( 506 caddr_t addr, 507 enum fault_type type, 508 enum seg_rw rw, 509 int iskernel) 510 { 511 struct as *as; 512 struct hat *hat; 513 struct proc *p; 514 kthread_t *t; 515 faultcode_t res; 516 caddr_t base; 517 size_t len; 518 int err; 519 int mapped_red; 520 uintptr_t ea; 521 522 ASSERT_STACK_ALIGNED(); 523 524 if (INVALID_VADDR(addr)) 525 return (FC_NOMAP); 526 527 mapped_red = segkp_map_red(); 528 529 if (iskernel) { 530 as = &kas; 531 hat = as->a_hat; 532 } else { 533 t = curthread; 534 p = ttoproc(t); 535 as = p->p_as; 536 hat = as->a_hat; 537 } 538 539 /* 540 * Dispatch pagefault. 541 */ 542 res = as_fault(hat, as, addr, 1, type, rw); 543 544 /* 545 * If this isn't a potential unmapped hole in the user's 546 * UNIX data or stack segments, just return status info. 547 */ 548 if (res != FC_NOMAP || iskernel) 549 goto out; 550 551 /* 552 * Check to see if we happened to faulted on a currently unmapped 553 * part of the UNIX data or stack segments. If so, create a zfod 554 * mapping there and then try calling the fault routine again. 555 */ 556 base = p->p_brkbase; 557 len = p->p_brksize; 558 559 if (addr < base || addr >= base + len) { /* data seg? */ 560 base = (caddr_t)p->p_usrstack - p->p_stksize; 561 len = p->p_stksize; 562 if (addr < base || addr >= p->p_usrstack) { /* stack seg? */ 563 /* not in either UNIX data or stack segments */ 564 res = FC_NOMAP; 565 goto out; 566 } 567 } 568 569 /* 570 * the rest of this function implements a 3.X 4.X 5.X compatibility 571 * This code is probably not needed anymore 572 */ 573 if (p->p_model == DATAMODEL_ILP32) { 574 575 /* expand the gap to the page boundaries on each side */ 576 ea = P2ROUNDUP((uintptr_t)base + len, MMU_PAGESIZE); 577 base = (caddr_t)P2ALIGN((uintptr_t)base, MMU_PAGESIZE); 578 len = ea - (uintptr_t)base; 579 580 as_rangelock(as); 581 if (as_gap(as, MMU_PAGESIZE, &base, &len, AH_CONTAIN, addr) == 582 0) { 583 err = as_map(as, base, len, segvn_create, zfod_argsp); 584 as_rangeunlock(as); 585 if (err) { 586 res = FC_MAKE_ERR(err); 587 goto out; 588 } 589 } else { 590 /* 591 * This page is already mapped by another thread after 592 * we returned from as_fault() above. We just fall 593 * through as_fault() below. 594 */ 595 as_rangeunlock(as); 596 } 597 598 res = as_fault(hat, as, addr, 1, F_INVAL, rw); 599 } 600 601 out: 602 if (mapped_red) 603 segkp_unmap_red(); 604 605 return (res); 606 } 607 608 void 609 map_addr(caddr_t *addrp, size_t len, offset_t off, int vacalign, uint_t flags) 610 { 611 struct proc *p = curproc; 612 caddr_t userlimit = (flags & _MAP_LOW32) ? 613 (caddr_t)_userlimit32 : p->p_as->a_userlimit; 614 615 map_addr_proc(addrp, len, off, vacalign, userlimit, curproc, flags); 616 } 617 618 /*ARGSUSED*/ 619 int 620 map_addr_vacalign_check(caddr_t addr, u_offset_t off) 621 { 622 return (0); 623 } 624 625 /* 626 * map_addr_proc() is the routine called when the system is to 627 * choose an address for the user. We will pick an address 628 * range which is the highest available below userlimit. 629 * 630 * addrp is a value/result parameter. 631 * On input it is a hint from the user to be used in a completely 632 * machine dependent fashion. We decide to completely ignore this hint. 633 * 634 * On output it is NULL if no address can be found in the current 635 * processes address space or else an address that is currently 636 * not mapped for len bytes with a page of red zone on either side. 637 * 638 * align is not needed on x86 (it's for viturally addressed caches) 639 */ 640 /*ARGSUSED*/ 641 void 642 map_addr_proc( 643 caddr_t *addrp, 644 size_t len, 645 offset_t off, 646 int vacalign, 647 caddr_t userlimit, 648 struct proc *p, 649 uint_t flags) 650 { 651 struct as *as = p->p_as; 652 caddr_t addr; 653 caddr_t base; 654 size_t slen; 655 size_t align_amount; 656 657 ASSERT32(userlimit == as->a_userlimit); 658 659 base = p->p_brkbase; 660 #if defined(__amd64) 661 /* 662 * XX64 Yes, this needs more work. 663 */ 664 if (p->p_model == DATAMODEL_NATIVE) { 665 if (userlimit < as->a_userlimit) { 666 /* 667 * This happens when a program wants to map 668 * something in a range that's accessible to a 669 * program in a smaller address space. For example, 670 * a 64-bit program calling mmap32(2) to guarantee 671 * that the returned address is below 4Gbytes. 672 */ 673 ASSERT((uintptr_t)userlimit < ADDRESS_C(0xffffffff)); 674 675 if (userlimit > base) 676 slen = userlimit - base; 677 else { 678 *addrp = NULL; 679 return; 680 } 681 } else { 682 /* 683 * XX64 This layout is probably wrong .. but in 684 * the event we make the amd64 address space look 685 * like sparcv9 i.e. with the stack -above- the 686 * heap, this bit of code might even be correct. 687 */ 688 slen = p->p_usrstack - base - 689 (((size_t)rctl_enforced_value( 690 rctlproc_legacy[RLIMIT_STACK], 691 p->p_rctls, p) + PAGEOFFSET) & PAGEMASK); 692 } 693 } else 694 #endif 695 slen = userlimit - base; 696 697 len = (len + PAGEOFFSET) & PAGEMASK; 698 699 /* 700 * Redzone for each side of the request. This is done to leave 701 * one page unmapped between segments. This is not required, but 702 * it's useful for the user because if their program strays across 703 * a segment boundary, it will catch a fault immediately making 704 * debugging a little easier. 705 */ 706 len += 2 * MMU_PAGESIZE; 707 708 /* 709 * figure out what the alignment should be 710 * 711 * XX64 -- is there an ELF_AMD64_MAXPGSZ or is it the same???? 712 */ 713 if (len <= ELF_386_MAXPGSZ) { 714 /* 715 * Align virtual addresses to ensure that ELF shared libraries 716 * are mapped with the appropriate alignment constraints by 717 * the run-time linker. 718 */ 719 align_amount = ELF_386_MAXPGSZ; 720 } else { 721 int l = mmu.umax_page_level; 722 723 while (l && len < LEVEL_SIZE(l)) 724 --l; 725 726 align_amount = LEVEL_SIZE(l); 727 } 728 729 if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) 730 align_amount = (uintptr_t)*addrp; 731 732 len += align_amount; 733 734 /* 735 * Look for a large enough hole starting below userlimit. 736 * After finding it, use the upper part. Addition of PAGESIZE 737 * is for the redzone as described above. 738 */ 739 if (as_gap(as, len, &base, &slen, AH_HI, NULL) == 0) { 740 caddr_t as_addr; 741 742 addr = base + slen - len + MMU_PAGESIZE; 743 as_addr = addr; 744 /* 745 * Round address DOWN to the alignment amount, 746 * add the offset, and if this address is less 747 * than the original address, add alignment amount. 748 */ 749 addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1))); 750 addr += (uintptr_t)(off & (align_amount - 1)); 751 if (addr < as_addr) 752 addr += align_amount; 753 754 ASSERT(addr <= (as_addr + align_amount)); 755 ASSERT(((uintptr_t)addr & (align_amount - 1)) == 756 ((uintptr_t)(off & (align_amount - 1)))); 757 *addrp = addr; 758 } else { 759 *addrp = NULL; /* no more virtual space */ 760 } 761 } 762 763 /* 764 * Determine whether [base, base+len] contains a valid range of 765 * addresses at least minlen long. base and len are adjusted if 766 * required to provide a valid range. 767 */ 768 /*ARGSUSED3*/ 769 int 770 valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir) 771 { 772 uintptr_t hi, lo; 773 774 lo = (uintptr_t)*basep; 775 hi = lo + *lenp; 776 777 /* 778 * If hi rolled over the top, try cutting back. 779 */ 780 if (hi < lo) { 781 if (0 - lo + hi < minlen) 782 return (0); 783 if (0 - lo < minlen) 784 return (0); 785 *lenp = 0 - lo; 786 } else if (hi - lo < minlen) { 787 return (0); 788 } 789 #if defined(__amd64) 790 /* 791 * Deal with a possible hole in the address range between 792 * hole_start and hole_end that should never be mapped. 793 */ 794 if (lo < hole_start) { 795 if (hi > hole_start) { 796 if (hi < hole_end) { 797 hi = hole_start; 798 } else { 799 /* lo < hole_start && hi >= hole_end */ 800 if (dir == AH_LO) { 801 /* 802 * prefer lowest range 803 */ 804 if (hole_start - lo >= minlen) 805 hi = hole_start; 806 else if (hi - hole_end >= minlen) 807 lo = hole_end; 808 else 809 return (0); 810 } else { 811 /* 812 * prefer highest range 813 */ 814 if (hi - hole_end >= minlen) 815 lo = hole_end; 816 else if (hole_start - lo >= minlen) 817 hi = hole_start; 818 else 819 return (0); 820 } 821 } 822 } 823 } else { 824 /* lo >= hole_start */ 825 if (hi < hole_end) 826 return (0); 827 if (lo < hole_end) 828 lo = hole_end; 829 } 830 831 if (hi - lo < minlen) 832 return (0); 833 834 *basep = (caddr_t)lo; 835 *lenp = hi - lo; 836 #endif 837 return (1); 838 } 839 840 /* 841 * Determine whether [addr, addr+len] are valid user addresses. 842 */ 843 /*ARGSUSED*/ 844 int 845 valid_usr_range(caddr_t addr, size_t len, uint_t prot, struct as *as, 846 caddr_t userlimit) 847 { 848 caddr_t eaddr = addr + len; 849 850 if (eaddr <= addr || addr >= userlimit || eaddr > userlimit) 851 return (RANGE_BADADDR); 852 853 #if defined(__amd64) 854 /* 855 * Check for the VA hole 856 */ 857 if (eaddr > (caddr_t)hole_start && addr < (caddr_t)hole_end) 858 return (RANGE_BADADDR); 859 #endif 860 861 return (RANGE_OKAY); 862 } 863 864 /* 865 * Return 1 if the page frame is onboard memory, else 0. 866 */ 867 int 868 pf_is_memory(pfn_t pf) 869 { 870 if (pfn_is_foreign(pf)) 871 return (0); 872 return (address_in_memlist(phys_install, pfn_to_pa(pf), 1)); 873 } 874 875 /* 876 * return the memrange containing pfn 877 */ 878 int 879 memrange_num(pfn_t pfn) 880 { 881 int n; 882 883 for (n = 0; n < nranges - 1; ++n) { 884 if (pfn >= memranges[n]) 885 break; 886 } 887 return (n); 888 } 889 890 /* 891 * return the mnoderange containing pfn 892 */ 893 /*ARGSUSED*/ 894 int 895 pfn_2_mtype(pfn_t pfn) 896 { 897 #if defined(__xpv) 898 return (0); 899 #else 900 int n; 901 902 for (n = mnoderangecnt - 1; n >= 0; n--) { 903 if (pfn >= mnoderanges[n].mnr_pfnlo) { 904 break; 905 } 906 } 907 return (n); 908 #endif 909 } 910 911 #if !defined(__xpv) 912 /* 913 * is_contigpage_free: 914 * returns a page list of contiguous pages. It minimally has to return 915 * minctg pages. Caller determines minctg based on the scatter-gather 916 * list length. 917 * 918 * pfnp is set to the next page frame to search on return. 919 */ 920 static page_t * 921 is_contigpage_free( 922 pfn_t *pfnp, 923 pgcnt_t *pgcnt, 924 pgcnt_t minctg, 925 uint64_t pfnseg, 926 int iolock) 927 { 928 int i = 0; 929 pfn_t pfn = *pfnp; 930 page_t *pp; 931 page_t *plist = NULL; 932 933 /* 934 * fail if pfn + minctg crosses a segment boundary. 935 * Adjust for next starting pfn to begin at segment boundary. 936 */ 937 938 if (((*pfnp + minctg - 1) & pfnseg) < (*pfnp & pfnseg)) { 939 *pfnp = roundup(*pfnp, pfnseg + 1); 940 return (NULL); 941 } 942 943 do { 944 retry: 945 pp = page_numtopp_nolock(pfn + i); 946 if ((pp == NULL) || 947 (page_trylock(pp, SE_EXCL) == 0)) { 948 (*pfnp)++; 949 break; 950 } 951 if (page_pptonum(pp) != pfn + i) { 952 page_unlock(pp); 953 goto retry; 954 } 955 956 if (!(PP_ISFREE(pp))) { 957 page_unlock(pp); 958 (*pfnp)++; 959 break; 960 } 961 962 if (!PP_ISAGED(pp)) { 963 page_list_sub(pp, PG_CACHE_LIST); 964 page_hashout(pp, (kmutex_t *)NULL); 965 } else { 966 page_list_sub(pp, PG_FREE_LIST); 967 } 968 969 if (iolock) 970 page_io_lock(pp); 971 page_list_concat(&plist, &pp); 972 973 /* 974 * exit loop when pgcnt satisfied or segment boundary reached. 975 */ 976 977 } while ((++i < *pgcnt) && ((pfn + i) & pfnseg)); 978 979 *pfnp += i; /* set to next pfn to search */ 980 981 if (i >= minctg) { 982 *pgcnt -= i; 983 return (plist); 984 } 985 986 /* 987 * failure: minctg not satisfied. 988 * 989 * if next request crosses segment boundary, set next pfn 990 * to search from the segment boundary. 991 */ 992 if (((*pfnp + minctg - 1) & pfnseg) < (*pfnp & pfnseg)) 993 *pfnp = roundup(*pfnp, pfnseg + 1); 994 995 /* clean up any pages already allocated */ 996 997 while (plist) { 998 pp = plist; 999 page_sub(&plist, pp); 1000 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL); 1001 if (iolock) 1002 page_io_unlock(pp); 1003 page_unlock(pp); 1004 } 1005 1006 return (NULL); 1007 } 1008 #endif /* !__xpv */ 1009 1010 /* 1011 * verify that pages being returned from allocator have correct DMA attribute 1012 */ 1013 #ifndef DEBUG 1014 #define check_dma(a, b, c) (0) 1015 #else 1016 static void 1017 check_dma(ddi_dma_attr_t *dma_attr, page_t *pp, int cnt) 1018 { 1019 if (dma_attr == NULL) 1020 return; 1021 1022 while (cnt-- > 0) { 1023 if (pa_to_ma(pfn_to_pa(pp->p_pagenum)) < 1024 dma_attr->dma_attr_addr_lo) 1025 panic("PFN (pp=%p) below dma_attr_addr_lo", pp); 1026 if (pa_to_ma(pfn_to_pa(pp->p_pagenum)) >= 1027 dma_attr->dma_attr_addr_hi) 1028 panic("PFN (pp=%p) above dma_attr_addr_hi", pp); 1029 pp = pp->p_next; 1030 } 1031 } 1032 #endif 1033 1034 #if !defined(__xpv) 1035 static page_t * 1036 page_get_contigpage(pgcnt_t *pgcnt, ddi_dma_attr_t *mattr, int iolock) 1037 { 1038 pfn_t pfn; 1039 int sgllen; 1040 uint64_t pfnseg; 1041 pgcnt_t minctg; 1042 page_t *pplist = NULL, *plist; 1043 uint64_t lo, hi; 1044 pgcnt_t pfnalign = 0; 1045 static pfn_t startpfn; 1046 static pgcnt_t lastctgcnt; 1047 uintptr_t align; 1048 1049 CONTIG_LOCK(); 1050 1051 if (mattr) { 1052 lo = mmu_btop((mattr->dma_attr_addr_lo + MMU_PAGEOFFSET)); 1053 hi = mmu_btop(mattr->dma_attr_addr_hi); 1054 if (hi >= physmax) 1055 hi = physmax - 1; 1056 sgllen = mattr->dma_attr_sgllen; 1057 pfnseg = mmu_btop(mattr->dma_attr_seg); 1058 1059 align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer); 1060 if (align > MMU_PAGESIZE) 1061 pfnalign = mmu_btop(align); 1062 1063 /* 1064 * in order to satisfy the request, must minimally 1065 * acquire minctg contiguous pages 1066 */ 1067 minctg = howmany(*pgcnt, sgllen); 1068 1069 ASSERT(hi >= lo); 1070 1071 /* 1072 * start from where last searched if the minctg >= lastctgcnt 1073 */ 1074 if (minctg < lastctgcnt || startpfn < lo || startpfn > hi) 1075 startpfn = lo; 1076 } else { 1077 hi = physmax - 1; 1078 lo = 0; 1079 sgllen = 1; 1080 pfnseg = mmu.highest_pfn; 1081 minctg = *pgcnt; 1082 1083 if (minctg < lastctgcnt) 1084 startpfn = lo; 1085 } 1086 lastctgcnt = minctg; 1087 1088 ASSERT(pfnseg + 1 >= (uint64_t)minctg); 1089 1090 /* conserve 16m memory - start search above 16m when possible */ 1091 if (hi > PFN_16M && startpfn < PFN_16M) 1092 startpfn = PFN_16M; 1093 1094 pfn = startpfn; 1095 if (pfnalign) 1096 pfn = P2ROUNDUP(pfn, pfnalign); 1097 1098 while (pfn + minctg - 1 <= hi) { 1099 1100 plist = is_contigpage_free(&pfn, pgcnt, minctg, pfnseg, iolock); 1101 if (plist) { 1102 page_list_concat(&pplist, &plist); 1103 sgllen--; 1104 /* 1105 * return when contig pages no longer needed 1106 */ 1107 if (!*pgcnt || ((*pgcnt <= sgllen) && !pfnalign)) { 1108 startpfn = pfn; 1109 CONTIG_UNLOCK(); 1110 check_dma(mattr, pplist, *pgcnt); 1111 return (pplist); 1112 } 1113 minctg = howmany(*pgcnt, sgllen); 1114 } 1115 if (pfnalign) 1116 pfn = P2ROUNDUP(pfn, pfnalign); 1117 } 1118 1119 /* cannot find contig pages in specified range */ 1120 if (startpfn == lo) { 1121 CONTIG_UNLOCK(); 1122 return (NULL); 1123 } 1124 1125 /* did not start with lo previously */ 1126 pfn = lo; 1127 if (pfnalign) 1128 pfn = P2ROUNDUP(pfn, pfnalign); 1129 1130 /* allow search to go above startpfn */ 1131 while (pfn < startpfn) { 1132 1133 plist = is_contigpage_free(&pfn, pgcnt, minctg, pfnseg, iolock); 1134 if (plist != NULL) { 1135 1136 page_list_concat(&pplist, &plist); 1137 sgllen--; 1138 1139 /* 1140 * return when contig pages no longer needed 1141 */ 1142 if (!*pgcnt || ((*pgcnt <= sgllen) && !pfnalign)) { 1143 startpfn = pfn; 1144 CONTIG_UNLOCK(); 1145 check_dma(mattr, pplist, *pgcnt); 1146 return (pplist); 1147 } 1148 minctg = howmany(*pgcnt, sgllen); 1149 } 1150 if (pfnalign) 1151 pfn = P2ROUNDUP(pfn, pfnalign); 1152 } 1153 CONTIG_UNLOCK(); 1154 return (NULL); 1155 } 1156 #endif /* !__xpv */ 1157 1158 /* 1159 * mnode_range_cnt() calculates the number of memory ranges for mnode and 1160 * memranges[]. Used to determine the size of page lists and mnoderanges. 1161 */ 1162 int 1163 mnode_range_cnt(int mnode) 1164 { 1165 #if defined(__xpv) 1166 ASSERT(mnode == 0); 1167 return (1); 1168 #else /* __xpv */ 1169 int mri; 1170 int mnrcnt = 0; 1171 1172 if (mem_node_config[mnode].exists != 0) { 1173 mri = nranges - 1; 1174 1175 /* find the memranges index below contained in mnode range */ 1176 1177 while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase) 1178 mri--; 1179 1180 /* 1181 * increment mnode range counter when memranges or mnode 1182 * boundary is reached. 1183 */ 1184 while (mri >= 0 && 1185 mem_node_config[mnode].physmax >= MEMRANGELO(mri)) { 1186 mnrcnt++; 1187 if (mem_node_config[mnode].physmax > MEMRANGEHI(mri)) 1188 mri--; 1189 else 1190 break; 1191 } 1192 } 1193 ASSERT(mnrcnt <= MAX_MNODE_MRANGES); 1194 return (mnrcnt); 1195 #endif /* __xpv */ 1196 } 1197 1198 /* 1199 * mnode_range_setup() initializes mnoderanges. 1200 */ 1201 void 1202 mnode_range_setup(mnoderange_t *mnoderanges) 1203 { 1204 int mnode, mri; 1205 1206 for (mnode = 0; mnode < max_mem_nodes; mnode++) { 1207 if (mem_node_config[mnode].exists == 0) 1208 continue; 1209 1210 mri = nranges - 1; 1211 1212 while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase) 1213 mri--; 1214 1215 while (mri >= 0 && mem_node_config[mnode].physmax >= 1216 MEMRANGELO(mri)) { 1217 mnoderanges->mnr_pfnlo = MAX(MEMRANGELO(mri), 1218 mem_node_config[mnode].physbase); 1219 mnoderanges->mnr_pfnhi = MIN(MEMRANGEHI(mri), 1220 mem_node_config[mnode].physmax); 1221 mnoderanges->mnr_mnode = mnode; 1222 mnoderanges->mnr_memrange = mri; 1223 mnoderanges++; 1224 if (mem_node_config[mnode].physmax > MEMRANGEHI(mri)) 1225 mri--; 1226 else 1227 break; 1228 } 1229 } 1230 } 1231 1232 /*ARGSUSED*/ 1233 int 1234 mtype_init(vnode_t *vp, caddr_t vaddr, uint_t *flags, size_t pgsz) 1235 { 1236 int mtype = mnoderangecnt - 1; 1237 1238 #if !defined(__xpv) 1239 #if defined(__i386) 1240 /* 1241 * set the mtype range 1242 * - kmem requests needs to be below 4g if restricted_kmemalloc is set. 1243 * - for non kmem requests, set range to above 4g if memory below 4g 1244 * runs low. 1245 */ 1246 if (restricted_kmemalloc && VN_ISKAS(vp) && 1247 (caddr_t)(vaddr) >= kernelheap && 1248 (caddr_t)(vaddr) < ekernelheap) { 1249 ASSERT(physmax4g); 1250 mtype = mtype4g; 1251 if (RESTRICT16M_ALLOC(freemem4g - btop(pgsz), 1252 btop(pgsz), *flags)) { 1253 *flags |= PGI_MT_RANGE16M; 1254 } else { 1255 VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt); 1256 VM_STAT_COND_ADD((*flags & PG_PANIC), 1257 vmm_vmstats.pgpanicalloc); 1258 *flags |= PGI_MT_RANGE0; 1259 } 1260 return (mtype); 1261 } 1262 #endif /* __i386 */ 1263 1264 if (RESTRICT4G_ALLOC) { 1265 VM_STAT_ADD(vmm_vmstats.restrict4gcnt); 1266 /* here only for > 4g systems */ 1267 *flags |= PGI_MT_RANGE4G; 1268 } else if (RESTRICT16M_ALLOC(freemem, btop(pgsz), *flags)) { 1269 *flags |= PGI_MT_RANGE16M; 1270 } else { 1271 VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt); 1272 VM_STAT_COND_ADD((*flags & PG_PANIC), vmm_vmstats.pgpanicalloc); 1273 *flags |= PGI_MT_RANGE0; 1274 } 1275 #endif /* !__xpv */ 1276 return (mtype); 1277 } 1278 1279 1280 /* mtype init for page_get_replacement_page */ 1281 /*ARGSUSED*/ 1282 int 1283 mtype_pgr_init(int *flags, page_t *pp, int mnode, pgcnt_t pgcnt) 1284 { 1285 int mtype = mnoderangecnt - 1; 1286 #if !defined(__ixpv) 1287 if (RESTRICT16M_ALLOC(freemem, pgcnt, *flags)) { 1288 *flags |= PGI_MT_RANGE16M; 1289 } else { 1290 VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt); 1291 *flags |= PGI_MT_RANGE0; 1292 } 1293 #endif 1294 return (mtype); 1295 } 1296 1297 /* 1298 * Determine if the mnode range specified in mtype contains memory belonging 1299 * to memory node mnode. If flags & PGI_MT_RANGE is set then mtype contains 1300 * the range of indices from high pfn to 0, 16m or 4g. 1301 * 1302 * Return first mnode range type index found otherwise return -1 if none found. 1303 */ 1304 int 1305 mtype_func(int mnode, int mtype, uint_t flags) 1306 { 1307 if (flags & PGI_MT_RANGE) { 1308 int mtlim = 0; 1309 1310 if (flags & PGI_MT_NEXT) 1311 mtype--; 1312 if (flags & PGI_MT_RANGE4G) 1313 mtlim = mtype4g + 1; /* exclude 0-4g range */ 1314 else if (flags & PGI_MT_RANGE16M) 1315 mtlim = 1; /* exclude 0-16m range */ 1316 while (mtype >= mtlim) { 1317 if (mnoderanges[mtype].mnr_mnode == mnode) 1318 return (mtype); 1319 mtype--; 1320 } 1321 } else if (mnoderanges[mtype].mnr_mnode == mnode) { 1322 return (mtype); 1323 } 1324 return (-1); 1325 } 1326 1327 /* 1328 * Update the page list max counts with the pfn range specified by the 1329 * input parameters. Called from add_physmem() when physical memory with 1330 * page_t's are initially added to the page lists. 1331 */ 1332 void 1333 mtype_modify_max(pfn_t startpfn, long cnt) 1334 { 1335 int mtype = 0; 1336 pfn_t endpfn = startpfn + cnt, pfn; 1337 pgcnt_t inc; 1338 1339 ASSERT(cnt > 0); 1340 1341 if (!physmax4g) 1342 return; 1343 1344 for (pfn = startpfn; pfn < endpfn; ) { 1345 if (pfn <= mnoderanges[mtype].mnr_pfnhi) { 1346 if (endpfn < mnoderanges[mtype].mnr_pfnhi) { 1347 inc = endpfn - pfn; 1348 } else { 1349 inc = mnoderanges[mtype].mnr_pfnhi - pfn + 1; 1350 } 1351 if (mtype <= mtype4g) 1352 maxmem4g += inc; 1353 pfn += inc; 1354 } 1355 mtype++; 1356 ASSERT(mtype < mnoderangecnt || pfn >= endpfn); 1357 } 1358 } 1359 1360 int 1361 mtype_2_mrange(int mtype) 1362 { 1363 return (mnoderanges[mtype].mnr_memrange); 1364 } 1365 1366 void 1367 mnodetype_2_pfn(int mnode, int mtype, pfn_t *pfnlo, pfn_t *pfnhi) 1368 { 1369 ASSERT(mnoderanges[mtype].mnr_mnode == mnode); 1370 *pfnlo = mnoderanges[mtype].mnr_pfnlo; 1371 *pfnhi = mnoderanges[mtype].mnr_pfnhi; 1372 } 1373 1374 size_t 1375 plcnt_sz(size_t ctrs_sz) 1376 { 1377 #ifdef DEBUG 1378 int szc, colors; 1379 1380 ctrs_sz += mnoderangecnt * sizeof (struct mnr_mts) * mmu_page_sizes; 1381 for (szc = 0; szc < mmu_page_sizes; szc++) { 1382 colors = page_get_pagecolors(szc); 1383 ctrs_sz += mnoderangecnt * sizeof (pgcnt_t) * colors; 1384 } 1385 #endif 1386 return (ctrs_sz); 1387 } 1388 1389 caddr_t 1390 plcnt_init(caddr_t addr) 1391 { 1392 #ifdef DEBUG 1393 int mt, szc, colors; 1394 1395 for (mt = 0; mt < mnoderangecnt; mt++) { 1396 mnoderanges[mt].mnr_mts = (struct mnr_mts *)addr; 1397 addr += (sizeof (struct mnr_mts) * mmu_page_sizes); 1398 for (szc = 0; szc < mmu_page_sizes; szc++) { 1399 colors = page_get_pagecolors(szc); 1400 mnoderanges[mt].mnr_mts[szc].mnr_mts_colors = colors; 1401 mnoderanges[mt].mnr_mts[szc].mnr_mtsc_pgcnt = 1402 (pgcnt_t *)addr; 1403 addr += (sizeof (pgcnt_t) * colors); 1404 } 1405 } 1406 #endif 1407 return (addr); 1408 } 1409 1410 void 1411 plcnt_inc_dec(page_t *pp, int mtype, int szc, long cnt, int flags) 1412 { 1413 #ifdef DEBUG 1414 int bin = PP_2_BIN(pp); 1415 1416 atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].mnr_mts_pgcnt, cnt); 1417 atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].mnr_mtsc_pgcnt[bin], 1418 cnt); 1419 #endif 1420 ASSERT(mtype == PP_2_MTYPE(pp)); 1421 if (physmax4g && mtype <= mtype4g) 1422 atomic_add_long(&freemem4g, cnt); 1423 if (flags & PG_CACHE_LIST) 1424 atomic_add_long(&mnoderanges[mtype].mnr_mt_clpgcnt, cnt); 1425 else 1426 atomic_add_long(&mnoderanges[mtype].mnr_mt_flpgcnt[szc], cnt); 1427 atomic_add_long(&mnoderanges[mtype].mnr_mt_totcnt, cnt); 1428 } 1429 1430 /* 1431 * Returns the free page count for mnode 1432 */ 1433 int 1434 mnode_pgcnt(int mnode) 1435 { 1436 int mtype = mnoderangecnt - 1; 1437 int flags = PGI_MT_RANGE0; 1438 pgcnt_t pgcnt = 0; 1439 1440 mtype = mtype_func(mnode, mtype, flags); 1441 1442 while (mtype != -1) { 1443 pgcnt += MTYPE_FREEMEM(mtype); 1444 mtype = mtype_func(mnode, mtype, flags | PGI_MT_NEXT); 1445 } 1446 return (pgcnt); 1447 } 1448 1449 /* 1450 * Initialize page coloring variables based on the l2 cache parameters. 1451 * Calculate and return memory needed for page coloring data structures. 1452 */ 1453 size_t 1454 page_coloring_init(uint_t l2_sz, int l2_linesz, int l2_assoc) 1455 { 1456 size_t colorsz = 0; 1457 int i; 1458 int colors; 1459 1460 #if defined(__xpv) 1461 /* 1462 * Hypervisor domains currently don't have any concept of NUMA. 1463 * Hence we'll act like there is only 1 memrange. 1464 */ 1465 i = memrange_num(1); 1466 #else /* !__xpv */ 1467 /* 1468 * Reduce the memory ranges lists if we don't have large amounts 1469 * of memory. This avoids searching known empty free lists. 1470 */ 1471 i = memrange_num(physmax); 1472 #if defined(__i386) 1473 if (i > 0) 1474 restricted_kmemalloc = 0; 1475 #endif 1476 /* physmax greater than 4g */ 1477 if (i == 0) 1478 physmax4g = 1; 1479 #endif /* !__xpv */ 1480 memranges += i; 1481 nranges -= i; 1482 1483 ASSERT(mmu_page_sizes <= MMU_PAGE_SIZES); 1484 1485 ASSERT(ISP2(l2_sz)); 1486 ASSERT(ISP2(l2_linesz)); 1487 ASSERT(l2_sz > MMU_PAGESIZE); 1488 1489 /* l2_assoc is 0 for fully associative l2 cache */ 1490 if (l2_assoc) 1491 l2_colors = MAX(1, l2_sz / (l2_assoc * MMU_PAGESIZE)); 1492 else 1493 l2_colors = 1; 1494 1495 /* for scalability, configure at least PAGE_COLORS_MIN color bins */ 1496 page_colors = MAX(l2_colors, PAGE_COLORS_MIN); 1497 1498 /* 1499 * cpu_page_colors is non-zero when a page color may be spread across 1500 * multiple bins. 1501 */ 1502 if (l2_colors < page_colors) 1503 cpu_page_colors = l2_colors; 1504 1505 ASSERT(ISP2(page_colors)); 1506 1507 page_colors_mask = page_colors - 1; 1508 1509 ASSERT(ISP2(CPUSETSIZE())); 1510 page_coloring_shift = lowbit(CPUSETSIZE()); 1511 1512 /* initialize number of colors per page size */ 1513 for (i = 0; i <= mmu.max_page_level; i++) { 1514 hw_page_array[i].hp_size = LEVEL_SIZE(i); 1515 hw_page_array[i].hp_shift = LEVEL_SHIFT(i); 1516 hw_page_array[i].hp_pgcnt = LEVEL_SIZE(i) >> LEVEL_SHIFT(0); 1517 hw_page_array[i].hp_colors = (page_colors_mask >> 1518 (hw_page_array[i].hp_shift - hw_page_array[0].hp_shift)) 1519 + 1; 1520 colorequivszc[i] = 0; 1521 } 1522 1523 /* 1524 * The value of cpu_page_colors determines if additional color bins 1525 * need to be checked for a particular color in the page_get routines. 1526 */ 1527 if (cpu_page_colors != 0) { 1528 1529 int a = lowbit(page_colors) - lowbit(cpu_page_colors); 1530 ASSERT(a > 0); 1531 ASSERT(a < 16); 1532 1533 for (i = 0; i <= mmu.max_page_level; i++) { 1534 if ((colors = hw_page_array[i].hp_colors) <= 1) { 1535 colorequivszc[i] = 0; 1536 continue; 1537 } 1538 while ((colors >> a) == 0) 1539 a--; 1540 ASSERT(a >= 0); 1541 1542 /* higher 4 bits encodes color equiv mask */ 1543 colorequivszc[i] = (a << 4); 1544 } 1545 } 1546 1547 /* factor in colorequiv to check additional 'equivalent' bins. */ 1548 if (colorequiv > 1) { 1549 1550 int a = lowbit(colorequiv) - 1; 1551 if (a > 15) 1552 a = 15; 1553 1554 for (i = 0; i <= mmu.max_page_level; i++) { 1555 if ((colors = hw_page_array[i].hp_colors) <= 1) { 1556 continue; 1557 } 1558 while ((colors >> a) == 0) 1559 a--; 1560 if ((a << 4) > colorequivszc[i]) { 1561 colorequivszc[i] = (a << 4); 1562 } 1563 } 1564 } 1565 1566 /* size for mnoderanges */ 1567 for (mnoderangecnt = 0, i = 0; i < max_mem_nodes; i++) 1568 mnoderangecnt += mnode_range_cnt(i); 1569 colorsz = mnoderangecnt * sizeof (mnoderange_t); 1570 1571 /* size for fpc_mutex and cpc_mutex */ 1572 colorsz += (2 * max_mem_nodes * sizeof (kmutex_t) * NPC_MUTEX); 1573 1574 /* size of page_freelists */ 1575 colorsz += mnoderangecnt * sizeof (page_t ***); 1576 colorsz += mnoderangecnt * mmu_page_sizes * sizeof (page_t **); 1577 1578 for (i = 0; i < mmu_page_sizes; i++) { 1579 colors = page_get_pagecolors(i); 1580 colorsz += mnoderangecnt * colors * sizeof (page_t *); 1581 } 1582 1583 /* size of page_cachelists */ 1584 colorsz += mnoderangecnt * sizeof (page_t **); 1585 colorsz += mnoderangecnt * page_colors * sizeof (page_t *); 1586 1587 return (colorsz); 1588 } 1589 1590 /* 1591 * Called once at startup to configure page_coloring data structures and 1592 * does the 1st page_free()/page_freelist_add(). 1593 */ 1594 void 1595 page_coloring_setup(caddr_t pcmemaddr) 1596 { 1597 int i; 1598 int j; 1599 int k; 1600 caddr_t addr; 1601 int colors; 1602 1603 /* 1604 * do page coloring setup 1605 */ 1606 addr = pcmemaddr; 1607 1608 mnoderanges = (mnoderange_t *)addr; 1609 addr += (mnoderangecnt * sizeof (mnoderange_t)); 1610 1611 mnode_range_setup(mnoderanges); 1612 1613 if (physmax4g) 1614 mtype4g = pfn_2_mtype(0xfffff); 1615 1616 for (k = 0; k < NPC_MUTEX; k++) { 1617 fpc_mutex[k] = (kmutex_t *)addr; 1618 addr += (max_mem_nodes * sizeof (kmutex_t)); 1619 } 1620 for (k = 0; k < NPC_MUTEX; k++) { 1621 cpc_mutex[k] = (kmutex_t *)addr; 1622 addr += (max_mem_nodes * sizeof (kmutex_t)); 1623 } 1624 page_freelists = (page_t ****)addr; 1625 addr += (mnoderangecnt * sizeof (page_t ***)); 1626 1627 page_cachelists = (page_t ***)addr; 1628 addr += (mnoderangecnt * sizeof (page_t **)); 1629 1630 for (i = 0; i < mnoderangecnt; i++) { 1631 page_freelists[i] = (page_t ***)addr; 1632 addr += (mmu_page_sizes * sizeof (page_t **)); 1633 1634 for (j = 0; j < mmu_page_sizes; j++) { 1635 colors = page_get_pagecolors(j); 1636 page_freelists[i][j] = (page_t **)addr; 1637 addr += (colors * sizeof (page_t *)); 1638 } 1639 page_cachelists[i] = (page_t **)addr; 1640 addr += (page_colors * sizeof (page_t *)); 1641 } 1642 } 1643 1644 #if defined(__xpv) 1645 /* 1646 * Give back 10% of the io_pool pages to the free list. 1647 * Don't shrink the pool below some absolute minimum. 1648 */ 1649 static void 1650 page_io_pool_shrink() 1651 { 1652 int retcnt; 1653 page_t *pp, *pp_first, *pp_last, **curpool; 1654 mfn_t mfn; 1655 int bothpools = 0; 1656 1657 mutex_enter(&io_pool_lock); 1658 io_pool_shrink_attempts++; /* should be a kstat? */ 1659 retcnt = io_pool_cnt / 10; 1660 if (io_pool_cnt - retcnt < io_pool_cnt_min) 1661 retcnt = io_pool_cnt - io_pool_cnt_min; 1662 if (retcnt <= 0) 1663 goto done; 1664 io_pool_shrinks++; /* should be a kstat? */ 1665 curpool = &io_pool_4g; 1666 domore: 1667 /* 1668 * Loop through taking pages from the end of the list 1669 * (highest mfns) till amount to return reached. 1670 */ 1671 for (pp = *curpool; pp && retcnt > 0; ) { 1672 pp_first = pp_last = pp->p_prev; 1673 if (pp_first == *curpool) 1674 break; 1675 retcnt--; 1676 io_pool_cnt--; 1677 page_io_pool_sub(curpool, pp_first, pp_last); 1678 if ((mfn = pfn_to_mfn(pp->p_pagenum)) < start_mfn) 1679 start_mfn = mfn; 1680 page_free(pp_first, 1); 1681 pp = *curpool; 1682 } 1683 if (retcnt != 0 && !bothpools) { 1684 /* 1685 * If not enough found in less constrained pool try the 1686 * more constrained one. 1687 */ 1688 curpool = &io_pool_16m; 1689 bothpools = 1; 1690 goto domore; 1691 } 1692 done: 1693 mutex_exit(&io_pool_lock); 1694 } 1695 1696 #endif /* __xpv */ 1697 1698 uint_t 1699 page_create_update_flags_x86(uint_t flags) 1700 { 1701 #if defined(__xpv) 1702 /* 1703 * Check this is an urgent allocation and free pages are depleted. 1704 */ 1705 if (!(flags & PG_WAIT) && freemem < desfree) 1706 page_io_pool_shrink(); 1707 #else /* !__xpv */ 1708 /* 1709 * page_create_get_something may call this because 4g memory may be 1710 * depleted. Set flags to allow for relocation of base page below 1711 * 4g if necessary. 1712 */ 1713 if (physmax4g) 1714 flags |= (PGI_PGCPSZC0 | PGI_PGCPHIPRI); 1715 #endif /* __xpv */ 1716 return (flags); 1717 } 1718 1719 /*ARGSUSED*/ 1720 int 1721 bp_color(struct buf *bp) 1722 { 1723 return (0); 1724 } 1725 1726 #if defined(__xpv) 1727 1728 /* 1729 * Take pages out of an io_pool 1730 */ 1731 static void 1732 page_io_pool_sub(page_t **poolp, page_t *pp_first, page_t *pp_last) 1733 { 1734 if (*poolp == pp_first) { 1735 *poolp = pp_last->p_next; 1736 if (*poolp == pp_first) 1737 *poolp = NULL; 1738 } 1739 pp_first->p_prev->p_next = pp_last->p_next; 1740 pp_last->p_next->p_prev = pp_first->p_prev; 1741 pp_first->p_prev = pp_last; 1742 pp_last->p_next = pp_first; 1743 } 1744 1745 /* 1746 * Put a page on the io_pool list. The list is ordered by increasing MFN. 1747 */ 1748 static void 1749 page_io_pool_add(page_t **poolp, page_t *pp) 1750 { 1751 page_t *look; 1752 mfn_t mfn = mfn_list[pp->p_pagenum]; 1753 1754 if (*poolp == NULL) { 1755 *poolp = pp; 1756 pp->p_next = pp; 1757 pp->p_prev = pp; 1758 return; 1759 } 1760 1761 /* 1762 * Since we try to take pages from the high end of the pool 1763 * chances are good that the pages to be put on the list will 1764 * go at or near the end of the list. so start at the end and 1765 * work backwards. 1766 */ 1767 look = (*poolp)->p_prev; 1768 while (mfn < mfn_list[look->p_pagenum]) { 1769 look = look->p_prev; 1770 if (look == (*poolp)->p_prev) 1771 break; /* backed all the way to front of list */ 1772 } 1773 1774 /* insert after look */ 1775 pp->p_prev = look; 1776 pp->p_next = look->p_next; 1777 pp->p_next->p_prev = pp; 1778 look->p_next = pp; 1779 if (mfn < mfn_list[(*poolp)->p_pagenum]) { 1780 /* 1781 * we inserted a new first list element 1782 * adjust pool pointer to newly inserted element 1783 */ 1784 *poolp = pp; 1785 } 1786 } 1787 1788 /* 1789 * Add a page to the io_pool. Setting the force flag will force the page 1790 * into the io_pool no matter what. 1791 */ 1792 static void 1793 add_page_to_pool(page_t *pp, int force) 1794 { 1795 page_t *highest; 1796 page_t *freep = NULL; 1797 1798 mutex_enter(&io_pool_lock); 1799 /* 1800 * Always keep the scarce low memory pages 1801 */ 1802 if (mfn_list[pp->p_pagenum] < PFN_16MEG) { 1803 ++io_pool_cnt; 1804 page_io_pool_add(&io_pool_16m, pp); 1805 goto done; 1806 } 1807 if (io_pool_cnt < io_pool_cnt_max || force) { 1808 ++io_pool_cnt; 1809 page_io_pool_add(&io_pool_4g, pp); 1810 } else { 1811 highest = io_pool_4g->p_prev; 1812 if (mfn_list[pp->p_pagenum] < mfn_list[highest->p_pagenum]) { 1813 page_io_pool_sub(&io_pool_4g, highest, highest); 1814 page_io_pool_add(&io_pool_4g, pp); 1815 freep = highest; 1816 } else { 1817 freep = pp; 1818 } 1819 } 1820 done: 1821 mutex_exit(&io_pool_lock); 1822 if (freep) 1823 page_free(freep, 1); 1824 } 1825 1826 1827 int contig_pfn_cnt; /* no of pfns in the contig pfn list */ 1828 int contig_pfn_max; /* capacity of the contig pfn list */ 1829 int next_alloc_pfn; /* next position in list to start a contig search */ 1830 int contig_pfnlist_updates; /* pfn list update count */ 1831 int contig_pfnlist_locked; /* contig pfn list locked against use */ 1832 int contig_pfnlist_builds; /* how many times have we (re)built list */ 1833 int contig_pfnlist_buildfailed; /* how many times has list build failed */ 1834 int create_contig_pending; /* nonzero means taskq creating contig list */ 1835 pfn_t *contig_pfn_list = NULL; /* list of contig pfns in ascending mfn order */ 1836 1837 /* 1838 * Function to use in sorting a list of pfns by their underlying mfns. 1839 */ 1840 static int 1841 mfn_compare(const void *pfnp1, const void *pfnp2) 1842 { 1843 mfn_t mfn1 = mfn_list[*(pfn_t *)pfnp1]; 1844 mfn_t mfn2 = mfn_list[*(pfn_t *)pfnp2]; 1845 1846 if (mfn1 > mfn2) 1847 return (1); 1848 if (mfn1 < mfn2) 1849 return (-1); 1850 return (0); 1851 } 1852 1853 /* 1854 * Compact the contig_pfn_list by tossing all the non-contiguous 1855 * elements from the list. 1856 */ 1857 static void 1858 compact_contig_pfn_list(void) 1859 { 1860 pfn_t pfn, lapfn, prev_lapfn; 1861 mfn_t mfn; 1862 int i, newcnt = 0; 1863 1864 prev_lapfn = 0; 1865 for (i = 0; i < contig_pfn_cnt - 1; i++) { 1866 pfn = contig_pfn_list[i]; 1867 lapfn = contig_pfn_list[i + 1]; 1868 mfn = mfn_list[pfn]; 1869 /* 1870 * See if next pfn is for a contig mfn 1871 */ 1872 if (mfn_list[lapfn] != mfn + 1) 1873 continue; 1874 /* 1875 * pfn and lookahead are both put in list 1876 * unless pfn is the previous lookahead. 1877 */ 1878 if (pfn != prev_lapfn) 1879 contig_pfn_list[newcnt++] = pfn; 1880 contig_pfn_list[newcnt++] = lapfn; 1881 prev_lapfn = lapfn; 1882 } 1883 for (i = newcnt; i < contig_pfn_cnt; i++) 1884 contig_pfn_list[i] = 0; 1885 contig_pfn_cnt = newcnt; 1886 } 1887 1888 /*ARGSUSED*/ 1889 static void 1890 call_create_contiglist(void *arg) 1891 { 1892 mutex_enter(&io_pool_lock); 1893 (void) create_contig_pfnlist(PG_WAIT); 1894 create_contig_pending = 0; 1895 mutex_exit(&io_pool_lock); 1896 } 1897 1898 /* 1899 * Create list of freelist pfns that have underlying 1900 * contiguous mfns. The list is kept in ascending mfn order. 1901 * returns 1 if list created else 0. 1902 */ 1903 static int 1904 create_contig_pfnlist(uint_t flags) 1905 { 1906 pfn_t pfn; 1907 page_t *pp; 1908 1909 if (contig_pfn_list != NULL) 1910 return (1); 1911 ASSERT(!contig_pfnlist_locked); 1912 contig_pfn_max = freemem + (freemem / 10); 1913 contig_pfn_list = kmem_zalloc(contig_pfn_max * sizeof (pfn_t), 1914 (flags & PG_WAIT) ? KM_SLEEP : KM_NOSLEEP); 1915 if (contig_pfn_list == NULL) { 1916 /* 1917 * If we could not create the contig list (because 1918 * we could not sleep for memory). Dispatch a taskq that can 1919 * sleep to get the memory. 1920 */ 1921 if (!create_contig_pending) { 1922 if (taskq_dispatch(system_taskq, call_create_contiglist, 1923 NULL, TQ_NOSLEEP) != NULL) 1924 create_contig_pending = 1; 1925 } 1926 contig_pfnlist_buildfailed++; /* count list build failures */ 1927 return (0); 1928 } 1929 ASSERT(contig_pfn_cnt == 0); 1930 for (pfn = 0; pfn < mfn_count; pfn++) { 1931 pp = page_numtopp_nolock(pfn); 1932 if (pp == NULL || !PP_ISFREE(pp)) 1933 continue; 1934 contig_pfn_list[contig_pfn_cnt] = pfn; 1935 if (++contig_pfn_cnt == contig_pfn_max) 1936 break; 1937 } 1938 qsort(contig_pfn_list, contig_pfn_cnt, sizeof (pfn_t), mfn_compare); 1939 compact_contig_pfn_list(); 1940 /* 1941 * Make sure next search of the newly created contiguous pfn 1942 * list starts at the beginning of the list. 1943 */ 1944 next_alloc_pfn = 0; 1945 contig_pfnlist_builds++; /* count list builds */ 1946 return (1); 1947 } 1948 1949 1950 /* 1951 * Toss the current contig pfnlist. Someone is about to do a massive 1952 * update to pfn<->mfn mappings. So we have them destroy the list and lock 1953 * it till they are done with their update. 1954 */ 1955 void 1956 clear_and_lock_contig_pfnlist() 1957 { 1958 pfn_t *listp = NULL; 1959 size_t listsize; 1960 1961 mutex_enter(&io_pool_lock); 1962 ASSERT(!contig_pfnlist_locked); 1963 if (contig_pfn_list != NULL) { 1964 listp = contig_pfn_list; 1965 listsize = contig_pfn_max * sizeof (pfn_t); 1966 contig_pfn_list = NULL; 1967 contig_pfn_max = contig_pfn_cnt = 0; 1968 } 1969 contig_pfnlist_locked = 1; 1970 mutex_exit(&io_pool_lock); 1971 if (listp != NULL) 1972 kmem_free(listp, listsize); 1973 } 1974 1975 /* 1976 * Unlock the contig_pfn_list. The next attempted use of it will cause 1977 * it to be re-created. 1978 */ 1979 void 1980 unlock_contig_pfnlist() 1981 { 1982 mutex_enter(&io_pool_lock); 1983 ASSERT(contig_pfnlist_locked); 1984 contig_pfnlist_locked = 0; 1985 mutex_exit(&io_pool_lock); 1986 } 1987 1988 /* 1989 * Update the contiguous pfn list in response to a pfn <-> mfn reassignment 1990 */ 1991 void 1992 update_contig_pfnlist(pfn_t pfn, mfn_t oldmfn, mfn_t newmfn) 1993 { 1994 int probe_hi, probe_lo, probe_pos, insert_after, insert_point; 1995 pfn_t probe_pfn; 1996 mfn_t probe_mfn; 1997 1998 if (contig_pfn_list == NULL) 1999 return; 2000 mutex_enter(&io_pool_lock); 2001 contig_pfnlist_updates++; 2002 /* 2003 * Find the pfn in the current list. Use a binary chop to locate it. 2004 */ 2005 probe_hi = contig_pfn_cnt - 1; 2006 probe_lo = 0; 2007 probe_pos = (probe_hi + probe_lo) / 2; 2008 while ((probe_pfn = contig_pfn_list[probe_pos]) != pfn) { 2009 if (probe_pos == probe_lo) { /* pfn not in list */ 2010 probe_pos = -1; 2011 break; 2012 } 2013 if (pfn_to_mfn(probe_pfn) <= oldmfn) 2014 probe_lo = probe_pos; 2015 else 2016 probe_hi = probe_pos; 2017 probe_pos = (probe_hi + probe_lo) / 2; 2018 } 2019 if (probe_pos >= 0) { /* remove pfn fom list */ 2020 contig_pfn_cnt--; 2021 ovbcopy(&contig_pfn_list[probe_pos + 1], 2022 &contig_pfn_list[probe_pos], 2023 (contig_pfn_cnt - probe_pos) * sizeof (pfn_t)); 2024 } 2025 if (newmfn == MFN_INVALID) 2026 goto done; 2027 /* 2028 * Check if new mfn has adjacent mfns in the list 2029 */ 2030 probe_hi = contig_pfn_cnt - 1; 2031 probe_lo = 0; 2032 insert_after = -2; 2033 do { 2034 probe_pos = (probe_hi + probe_lo) / 2; 2035 probe_mfn = pfn_to_mfn(contig_pfn_list[probe_pos]); 2036 if (newmfn == probe_mfn + 1) 2037 insert_after = probe_pos; 2038 else if (newmfn == probe_mfn - 1) 2039 insert_after = probe_pos - 1; 2040 if (probe_pos == probe_lo) 2041 break; 2042 if (probe_mfn <= newmfn) 2043 probe_lo = probe_pos; 2044 else 2045 probe_hi = probe_pos; 2046 } while (insert_after == -2); 2047 /* 2048 * If there is space in the list and there are adjacent mfns 2049 * insert the pfn in to its proper place in the list. 2050 */ 2051 if (insert_after != -2 && contig_pfn_cnt + 1 <= contig_pfn_max) { 2052 insert_point = insert_after + 1; 2053 ovbcopy(&contig_pfn_list[insert_point], 2054 &contig_pfn_list[insert_point + 1], 2055 (contig_pfn_cnt - insert_point) * sizeof (pfn_t)); 2056 contig_pfn_list[insert_point] = pfn; 2057 contig_pfn_cnt++; 2058 } 2059 done: 2060 mutex_exit(&io_pool_lock); 2061 } 2062 2063 /* 2064 * Called to (re-)populate the io_pool from the free page lists. 2065 */ 2066 long 2067 populate_io_pool(void) 2068 { 2069 pfn_t pfn; 2070 mfn_t mfn, max_mfn; 2071 page_t *pp; 2072 2073 /* 2074 * Figure out the bounds of the pool on first invocation. 2075 * We use a percentage of memory for the io pool size. 2076 * we allow that to shrink, but not to less than a fixed minimum 2077 */ 2078 if (io_pool_cnt_max == 0) { 2079 io_pool_cnt_max = physmem / (100 / io_pool_physmem_pct); 2080 io_pool_cnt_lowater = io_pool_cnt_max; 2081 /* 2082 * This is the first time in populate_io_pool, grab a va to use 2083 * when we need to allocate pages. 2084 */ 2085 io_pool_kva = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP); 2086 } 2087 /* 2088 * If we are out of pages in the pool, then grow the size of the pool 2089 */ 2090 if (io_pool_cnt == 0) 2091 io_pool_cnt_max += io_pool_cnt_max / 20; /* grow by 5% */ 2092 io_pool_grows++; /* should be a kstat? */ 2093 2094 /* 2095 * Get highest mfn on this platform, but limit to the 32 bit DMA max. 2096 */ 2097 (void) mfn_to_pfn(start_mfn); 2098 max_mfn = MIN(cached_max_mfn, PFN_4GIG); 2099 for (mfn = start_mfn; mfn < max_mfn; start_mfn = ++mfn) { 2100 pfn = mfn_to_pfn(mfn); 2101 if (pfn & PFN_IS_FOREIGN_MFN) 2102 continue; 2103 /* 2104 * try to allocate it from free pages 2105 */ 2106 pp = page_numtopp_alloc(pfn); 2107 if (pp == NULL) 2108 continue; 2109 PP_CLRFREE(pp); 2110 add_page_to_pool(pp, 1); 2111 if (io_pool_cnt >= io_pool_cnt_max) 2112 break; 2113 } 2114 2115 return (io_pool_cnt); 2116 } 2117 2118 /* 2119 * Destroy a page that was being used for DMA I/O. It may or 2120 * may not actually go back to the io_pool. 2121 */ 2122 void 2123 page_destroy_io(page_t *pp) 2124 { 2125 mfn_t mfn = mfn_list[pp->p_pagenum]; 2126 2127 /* 2128 * When the page was alloc'd a reservation was made, release it now 2129 */ 2130 page_unresv(1); 2131 /* 2132 * Unload translations, if any, then hash out the 2133 * page to erase its identity. 2134 */ 2135 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 2136 page_hashout(pp, NULL); 2137 2138 /* 2139 * If the page came from the free lists, just put it back to them. 2140 * DomU pages always go on the free lists as well. 2141 */ 2142 if (!DOMAIN_IS_INITDOMAIN(xen_info) || mfn >= PFN_4GIG) { 2143 page_free(pp, 1); 2144 return; 2145 } 2146 2147 add_page_to_pool(pp, 0); 2148 } 2149 2150 2151 long contig_searches; /* count of times contig pages requested */ 2152 long contig_search_restarts; /* count of contig ranges tried */ 2153 long contig_search_failed; /* count of contig alloc failures */ 2154 2155 /* 2156 * Look thru the contiguous pfns that are not part of the io_pool for 2157 * contiguous free pages. Return a list of the found pages or NULL. 2158 */ 2159 page_t * 2160 find_contig_free(uint_t bytes, uint_t flags) 2161 { 2162 page_t *pp, *plist = NULL; 2163 mfn_t mfn, prev_mfn; 2164 pfn_t pfn; 2165 int pages_needed, pages_requested; 2166 int search_start; 2167 2168 /* 2169 * create the contig pfn list if not already done 2170 */ 2171 if (contig_pfn_list == NULL) { 2172 if (contig_pfnlist_locked) { 2173 return (NULL); 2174 } else { 2175 if (!create_contig_pfnlist(flags)) 2176 return (NULL); 2177 } 2178 } 2179 contig_searches++; 2180 /* 2181 * Search contiguous pfn list for physically contiguous pages not in 2182 * the io_pool. Start the search where the last search left off. 2183 */ 2184 pages_requested = pages_needed = mmu_btop(bytes); 2185 search_start = next_alloc_pfn; 2186 prev_mfn = 0; 2187 while (pages_needed) { 2188 pfn = contig_pfn_list[next_alloc_pfn]; 2189 mfn = pfn_to_mfn(pfn); 2190 if ((prev_mfn == 0 || mfn == prev_mfn + 1) && 2191 (pp = page_numtopp_alloc(pfn)) != NULL) { 2192 PP_CLRFREE(pp); 2193 page_io_pool_add(&plist, pp); 2194 pages_needed--; 2195 prev_mfn = mfn; 2196 } else { 2197 contig_search_restarts++; 2198 /* 2199 * free partial page list 2200 */ 2201 while (plist != NULL) { 2202 pp = plist; 2203 page_io_pool_sub(&plist, pp, pp); 2204 page_free(pp, 1); 2205 } 2206 pages_needed = pages_requested; 2207 prev_mfn = 0; 2208 } 2209 if (++next_alloc_pfn == contig_pfn_cnt) 2210 next_alloc_pfn = 0; 2211 if (next_alloc_pfn == search_start) 2212 break; /* all pfns searched */ 2213 } 2214 if (pages_needed) { 2215 contig_search_failed++; 2216 /* 2217 * Failed to find enough contig pages. 2218 * free partial page list 2219 */ 2220 while (plist != NULL) { 2221 pp = plist; 2222 page_io_pool_sub(&plist, pp, pp); 2223 page_free(pp, 1); 2224 } 2225 } 2226 return (plist); 2227 } 2228 2229 /* 2230 * Allocator for domain 0 I/O pages. We match the required 2231 * DMA attributes and contiguity constraints. 2232 */ 2233 /*ARGSUSED*/ 2234 page_t * 2235 page_create_io( 2236 struct vnode *vp, 2237 u_offset_t off, 2238 uint_t bytes, 2239 uint_t flags, 2240 struct as *as, 2241 caddr_t vaddr, 2242 ddi_dma_attr_t *mattr) 2243 { 2244 mfn_t max_mfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL); 2245 page_t *pp_first; /* list to return */ 2246 page_t *pp_last; /* last in list to return */ 2247 page_t *pp, **poolp, **pplist = NULL, *expp; 2248 int i, extpages = 0, npages = 0, contig, anyaddr, extra; 2249 mfn_t lo_mfn; 2250 mfn_t hi_mfn; 2251 mfn_t mfn, tmfn; 2252 mfn_t *mfnlist = 0; 2253 pgcnt_t pfnalign = 0; 2254 int align, order, nbits, extents; 2255 uint64_t pfnseg; 2256 int attempt = 0, is_domu = 0; 2257 int asked_hypervisor = 0; 2258 uint_t kflags; 2259 2260 ASSERT(mattr != NULL); 2261 lo_mfn = mmu_btop(mattr->dma_attr_addr_lo); 2262 hi_mfn = mmu_btop(mattr->dma_attr_addr_hi); 2263 align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer); 2264 if (align > MMU_PAGESIZE) 2265 pfnalign = mmu_btop(align); 2266 pfnseg = mmu_btop(mattr->dma_attr_seg); 2267 2268 /* 2269 * Clear the contig flag if only one page is needed. 2270 */ 2271 contig = (flags & PG_PHYSCONTIG); 2272 flags &= ~PG_PHYSCONTIG; 2273 bytes = P2ROUNDUP(bytes, MMU_PAGESIZE); 2274 if (bytes == MMU_PAGESIZE) 2275 contig = 0; 2276 2277 /* 2278 * Check if any old page in the system is fine. 2279 * DomU should always go down this path. 2280 */ 2281 is_domu = !DOMAIN_IS_INITDOMAIN(xen_info); 2282 anyaddr = lo_mfn == 0 && hi_mfn >= max_mfn && !pfnalign; 2283 if ((!contig && anyaddr) || is_domu) { 2284 pp = page_create_va(vp, off, bytes, flags, &kvseg, vaddr); 2285 if (pp) 2286 return (pp); 2287 else if (is_domu) 2288 return (NULL); /* no memory available */ 2289 } 2290 /* 2291 * DomU should never reach here 2292 */ 2293 try_again: 2294 /* 2295 * We could just want unconstrained but contig pages. 2296 */ 2297 if (anyaddr && contig && pfnseg >= max_mfn) { 2298 /* 2299 * Look for free contig pages to satisfy the request. 2300 */ 2301 mutex_enter(&io_pool_lock); 2302 pp_first = find_contig_free(bytes, flags); 2303 mutex_exit(&io_pool_lock); 2304 if (pp_first != NULL) 2305 goto done; 2306 } 2307 /* 2308 * See if we want pages for a legacy device 2309 */ 2310 if (hi_mfn < PFN_16MEG) 2311 poolp = &io_pool_16m; 2312 else 2313 poolp = &io_pool_4g; 2314 try_smaller: 2315 /* 2316 * Take pages from I/O pool. We'll use pages from the highest MFN 2317 * range possible. 2318 */ 2319 pp_first = pp_last = NULL; 2320 npages = mmu_btop(bytes); 2321 mutex_enter(&io_pool_lock); 2322 for (pp = *poolp; pp && npages > 0; ) { 2323 pp = pp->p_prev; 2324 2325 /* 2326 * skip pages above allowable range 2327 */ 2328 mfn = mfn_list[pp->p_pagenum]; 2329 if (hi_mfn < mfn) 2330 goto skip; 2331 2332 /* 2333 * stop at pages below allowable range 2334 */ 2335 if (lo_mfn > mfn) 2336 break; 2337 restart: 2338 if (pp_last == NULL) { 2339 /* 2340 * Check alignment 2341 */ 2342 tmfn = mfn - (npages - 1); 2343 if (pfnalign) { 2344 if (tmfn != P2ROUNDUP(tmfn, pfnalign)) 2345 goto skip; /* not properly aligned */ 2346 } 2347 /* 2348 * Check segment 2349 */ 2350 if ((mfn & pfnseg) < (tmfn & pfnseg)) 2351 goto skip; /* crosses segment boundary */ 2352 /* 2353 * Start building page list 2354 */ 2355 pp_first = pp_last = pp; 2356 npages--; 2357 } else { 2358 /* 2359 * check physical contiguity if required 2360 */ 2361 if (contig && 2362 mfn_list[pp_first->p_pagenum] != mfn + 1) { 2363 /* 2364 * not a contiguous page, restart list. 2365 */ 2366 pp_last = NULL; 2367 npages = mmu_btop(bytes); 2368 goto restart; 2369 } else { /* add page to list */ 2370 pp_first = pp; 2371 --npages; 2372 } 2373 } 2374 skip: 2375 if (pp == *poolp) 2376 break; 2377 } 2378 2379 /* 2380 * If we didn't find memory. Try the more constrained pool, then 2381 * sweep free pages into the DMA pool and try again. If we fail 2382 * repeatedly, ask the Hypervisor for help. 2383 */ 2384 if (npages != 0) { 2385 mutex_exit(&io_pool_lock); 2386 /* 2387 * If we were looking in the less constrained pool and didn't 2388 * find pages, try the more constrained pool. 2389 */ 2390 if (poolp == &io_pool_4g) { 2391 poolp = &io_pool_16m; 2392 goto try_smaller; 2393 } 2394 kmem_reap(); 2395 if (++attempt < 4) { 2396 /* 2397 * Grab some more io_pool pages 2398 */ 2399 (void) populate_io_pool(); 2400 goto try_again; 2401 } 2402 2403 if (asked_hypervisor++) 2404 return (NULL); /* really out of luck */ 2405 /* 2406 * Hypervisor exchange doesn't handle segment or alignment 2407 * constraints 2408 */ 2409 if (mattr->dma_attr_seg < mattr->dma_attr_addr_hi || pfnalign) 2410 return (NULL); 2411 /* 2412 * Try exchanging pages with the hypervisor. 2413 */ 2414 npages = mmu_btop(bytes); 2415 kflags = flags & PG_WAIT ? KM_SLEEP : KM_NOSLEEP; 2416 /* 2417 * Hypervisor will allocate extents, if we want contig pages 2418 * extent must be >= npages 2419 */ 2420 if (contig) { 2421 order = highbit(npages) - 1; 2422 if (npages & ((1 << order) - 1)) 2423 order++; 2424 extpages = 1 << order; 2425 } else { 2426 order = 0; 2427 extpages = npages; 2428 } 2429 if (extpages > npages) { 2430 extra = extpages - npages; 2431 if (!page_resv(extra, kflags)) 2432 return (NULL); 2433 } 2434 pplist = kmem_alloc(extpages * sizeof (page_t *), kflags); 2435 if (pplist == NULL) 2436 goto fail; 2437 mfnlist = kmem_alloc(extpages * sizeof (mfn_t), kflags); 2438 if (mfnlist == NULL) 2439 goto fail; 2440 pp = page_create_va(vp, off, npages * PAGESIZE, flags, 2441 &kvseg, vaddr); 2442 if (pp == NULL) 2443 goto fail; 2444 pp_first = pp; 2445 if (extpages > npages) { 2446 /* 2447 * fill out the rest of extent pages to swap with the 2448 * hypervisor 2449 */ 2450 for (i = 0; i < extra; i++) { 2451 expp = page_create_va(vp, 2452 (u_offset_t)(uintptr_t)io_pool_kva, 2453 PAGESIZE, flags, &kvseg, io_pool_kva); 2454 if (expp == NULL) 2455 goto balloon_fail; 2456 (void) hat_pageunload(expp, HAT_FORCE_PGUNLOAD); 2457 page_io_unlock(expp); 2458 page_hashout(expp, NULL); 2459 page_io_lock(expp); 2460 /* 2461 * add page to end of list 2462 */ 2463 expp->p_prev = pp_first->p_prev; 2464 expp->p_next = pp_first; 2465 expp->p_prev->p_next = expp; 2466 pp_first->p_prev = expp; 2467 } 2468 2469 } 2470 for (i = 0; i < extpages; i++) { 2471 pplist[i] = pp; 2472 pp = pp->p_next; 2473 } 2474 nbits = highbit(mattr->dma_attr_addr_hi); 2475 extents = contig ? 1 : npages; 2476 if (balloon_replace_pages(extents, pplist, nbits, order, 2477 mfnlist) != extents) 2478 goto balloon_fail; 2479 2480 kmem_free(pplist, extpages * sizeof (page_t *)); 2481 kmem_free(mfnlist, extpages * sizeof (mfn_t)); 2482 /* 2483 * Return any excess pages to free list 2484 */ 2485 if (extpages > npages) { 2486 for (i = 0; i < extra; i++) { 2487 pp = pp_first->p_prev; 2488 page_sub(&pp_first, pp); 2489 page_io_unlock(pp); 2490 page_unresv(1); 2491 page_free(pp, 1); 2492 } 2493 } 2494 check_dma(mattr, pp_first, mmu_btop(bytes)); 2495 return (pp_first); 2496 } 2497 2498 /* 2499 * Found the pages, now snip them from the list 2500 */ 2501 page_io_pool_sub(poolp, pp_first, pp_last); 2502 io_pool_cnt -= mmu_btop(bytes); 2503 if (io_pool_cnt < io_pool_cnt_lowater) 2504 io_pool_cnt_lowater = io_pool_cnt; /* io pool low water mark */ 2505 mutex_exit(&io_pool_lock); 2506 done: 2507 check_dma(mattr, pp_first, mmu_btop(bytes)); 2508 pp = pp_first; 2509 do { 2510 if (!page_hashin(pp, vp, off, NULL)) { 2511 panic("pg_create_io: hashin failed pp %p, vp %p," 2512 " off %llx", 2513 (void *)pp, (void *)vp, off); 2514 } 2515 off += MMU_PAGESIZE; 2516 PP_CLRFREE(pp); 2517 PP_CLRAGED(pp); 2518 page_set_props(pp, P_REF); 2519 page_io_lock(pp); 2520 pp = pp->p_next; 2521 } while (pp != pp_first); 2522 return (pp_first); 2523 balloon_fail: 2524 /* 2525 * Return pages to free list and return failure 2526 */ 2527 while (pp_first != NULL) { 2528 pp = pp_first; 2529 page_sub(&pp_first, pp); 2530 page_io_unlock(pp); 2531 if (pp->p_vnode != NULL) 2532 page_hashout(pp, NULL); 2533 page_free(pp, 1); 2534 } 2535 fail: 2536 if (pplist) 2537 kmem_free(pplist, extpages * sizeof (page_t *)); 2538 if (mfnlist) 2539 kmem_free(mfnlist, extpages * sizeof (mfn_t)); 2540 page_unresv(extpages - npages); 2541 return (NULL); 2542 } 2543 2544 /* 2545 * Lock and return the page with the highest mfn that we can find. last_mfn 2546 * holds the last one found, so the next search can start from there. We 2547 * also keep a counter so that we don't loop forever if the machine has no 2548 * free pages. 2549 * 2550 * This is called from the balloon thread to find pages to give away. new_high 2551 * is used when new mfn's have been added to the system - we will reset our 2552 * search if the new mfn's are higher than our current search position. 2553 */ 2554 page_t * 2555 page_get_high_mfn(mfn_t new_high) 2556 { 2557 static mfn_t last_mfn = 0; 2558 pfn_t pfn; 2559 page_t *pp; 2560 ulong_t loop_count = 0; 2561 2562 if (new_high > last_mfn) 2563 last_mfn = new_high; 2564 2565 for (; loop_count < mfn_count; loop_count++, last_mfn--) { 2566 if (last_mfn == 0) { 2567 last_mfn = cached_max_mfn; 2568 } 2569 2570 pfn = mfn_to_pfn(last_mfn); 2571 if (pfn & PFN_IS_FOREIGN_MFN) 2572 continue; 2573 2574 /* See if the page is free. If so, lock it. */ 2575 pp = page_numtopp_alloc(pfn); 2576 if (pp == NULL) 2577 continue; 2578 PP_CLRFREE(pp); 2579 2580 ASSERT(PAGE_EXCL(pp)); 2581 ASSERT(pp->p_vnode == NULL); 2582 ASSERT(!hat_page_is_mapped(pp)); 2583 last_mfn--; 2584 return (pp); 2585 } 2586 return (NULL); 2587 } 2588 2589 #else /* !__xpv */ 2590 2591 /* 2592 * get a page from any list with the given mnode 2593 */ 2594 static page_t * 2595 page_get_mnode_anylist(ulong_t origbin, uchar_t szc, uint_t flags, 2596 int mnode, int mtype, ddi_dma_attr_t *dma_attr) 2597 { 2598 kmutex_t *pcm; 2599 int i; 2600 page_t *pp; 2601 page_t *first_pp; 2602 uint64_t pgaddr; 2603 ulong_t bin; 2604 int mtypestart; 2605 int plw_initialized; 2606 page_list_walker_t plw; 2607 2608 VM_STAT_ADD(pga_vmstats.pgma_alloc); 2609 2610 ASSERT((flags & PG_MATCH_COLOR) == 0); 2611 ASSERT(szc == 0); 2612 ASSERT(dma_attr != NULL); 2613 2614 MTYPE_START(mnode, mtype, flags); 2615 if (mtype < 0) { 2616 VM_STAT_ADD(pga_vmstats.pgma_allocempty); 2617 return (NULL); 2618 } 2619 2620 mtypestart = mtype; 2621 2622 bin = origbin; 2623 2624 /* 2625 * check up to page_colors + 1 bins - origbin may be checked twice 2626 * because of BIN_STEP skip 2627 */ 2628 do { 2629 plw_initialized = 0; 2630 2631 for (plw.plw_count = 0; 2632 plw.plw_count < page_colors; plw.plw_count++) { 2633 2634 if (PAGE_FREELISTS(mnode, szc, bin, mtype) == NULL) 2635 goto nextfreebin; 2636 2637 pcm = PC_BIN_MUTEX(mnode, bin, PG_FREE_LIST); 2638 mutex_enter(pcm); 2639 pp = PAGE_FREELISTS(mnode, szc, bin, mtype); 2640 first_pp = pp; 2641 while (pp != NULL) { 2642 if (page_trylock(pp, SE_EXCL) == 0) { 2643 pp = pp->p_next; 2644 if (pp == first_pp) { 2645 pp = NULL; 2646 } 2647 continue; 2648 } 2649 2650 ASSERT(PP_ISFREE(pp)); 2651 ASSERT(PP_ISAGED(pp)); 2652 ASSERT(pp->p_vnode == NULL); 2653 ASSERT(pp->p_hash == NULL); 2654 ASSERT(pp->p_offset == (u_offset_t)-1); 2655 ASSERT(pp->p_szc == szc); 2656 ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode); 2657 /* check if page within DMA attributes */ 2658 pgaddr = pa_to_ma(pfn_to_pa(pp->p_pagenum)); 2659 if ((pgaddr >= dma_attr->dma_attr_addr_lo) && 2660 (pgaddr + MMU_PAGESIZE - 1 <= 2661 dma_attr->dma_attr_addr_hi)) { 2662 break; 2663 } 2664 2665 /* continue looking */ 2666 page_unlock(pp); 2667 pp = pp->p_next; 2668 if (pp == first_pp) 2669 pp = NULL; 2670 2671 } 2672 if (pp != NULL) { 2673 ASSERT(mtype == PP_2_MTYPE(pp)); 2674 ASSERT(pp->p_szc == 0); 2675 2676 /* found a page with specified DMA attributes */ 2677 page_sub(&PAGE_FREELISTS(mnode, szc, bin, 2678 mtype), pp); 2679 page_ctr_sub(mnode, mtype, pp, PG_FREE_LIST); 2680 2681 if ((PP_ISFREE(pp) == 0) || 2682 (PP_ISAGED(pp) == 0)) { 2683 cmn_err(CE_PANIC, "page %p is not free", 2684 (void *)pp); 2685 } 2686 2687 mutex_exit(pcm); 2688 check_dma(dma_attr, pp, 1); 2689 VM_STAT_ADD(pga_vmstats.pgma_allocok); 2690 return (pp); 2691 } 2692 mutex_exit(pcm); 2693 nextfreebin: 2694 if (plw_initialized == 0) { 2695 page_list_walk_init(szc, 0, bin, 1, 0, &plw); 2696 ASSERT(plw.plw_ceq_dif == page_colors); 2697 plw_initialized = 1; 2698 } 2699 2700 if (plw.plw_do_split) { 2701 pp = page_freelist_split(szc, bin, mnode, 2702 mtype, 2703 mmu_btop(dma_attr->dma_attr_addr_hi + 1), 2704 &plw); 2705 if (pp != NULL) 2706 return (pp); 2707 } 2708 2709 bin = page_list_walk_next_bin(szc, bin, &plw); 2710 } 2711 2712 MTYPE_NEXT(mnode, mtype, flags); 2713 } while (mtype >= 0); 2714 2715 /* failed to find a page in the freelist; try it in the cachelist */ 2716 2717 /* reset mtype start for cachelist search */ 2718 mtype = mtypestart; 2719 ASSERT(mtype >= 0); 2720 2721 /* start with the bin of matching color */ 2722 bin = origbin; 2723 2724 do { 2725 for (i = 0; i <= page_colors; i++) { 2726 if (PAGE_CACHELISTS(mnode, bin, mtype) == NULL) 2727 goto nextcachebin; 2728 pcm = PC_BIN_MUTEX(mnode, bin, PG_CACHE_LIST); 2729 mutex_enter(pcm); 2730 pp = PAGE_CACHELISTS(mnode, bin, mtype); 2731 first_pp = pp; 2732 while (pp != NULL) { 2733 if (page_trylock(pp, SE_EXCL) == 0) { 2734 pp = pp->p_next; 2735 if (pp == first_pp) 2736 break; 2737 continue; 2738 } 2739 ASSERT(pp->p_vnode); 2740 ASSERT(PP_ISAGED(pp) == 0); 2741 ASSERT(pp->p_szc == 0); 2742 ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode); 2743 2744 /* check if page within DMA attributes */ 2745 2746 pgaddr = pa_to_ma(pfn_to_pa(pp->p_pagenum)); 2747 if ((pgaddr >= dma_attr->dma_attr_addr_lo) && 2748 (pgaddr + MMU_PAGESIZE - 1 <= 2749 dma_attr->dma_attr_addr_hi)) { 2750 break; 2751 } 2752 2753 /* continue looking */ 2754 page_unlock(pp); 2755 pp = pp->p_next; 2756 if (pp == first_pp) 2757 pp = NULL; 2758 } 2759 2760 if (pp != NULL) { 2761 ASSERT(mtype == PP_2_MTYPE(pp)); 2762 ASSERT(pp->p_szc == 0); 2763 2764 /* found a page with specified DMA attributes */ 2765 page_sub(&PAGE_CACHELISTS(mnode, bin, 2766 mtype), pp); 2767 page_ctr_sub(mnode, mtype, pp, PG_CACHE_LIST); 2768 2769 mutex_exit(pcm); 2770 ASSERT(pp->p_vnode); 2771 ASSERT(PP_ISAGED(pp) == 0); 2772 check_dma(dma_attr, pp, 1); 2773 VM_STAT_ADD(pga_vmstats.pgma_allocok); 2774 return (pp); 2775 } 2776 mutex_exit(pcm); 2777 nextcachebin: 2778 bin += (i == 0) ? BIN_STEP : 1; 2779 bin &= page_colors_mask; 2780 } 2781 MTYPE_NEXT(mnode, mtype, flags); 2782 } while (mtype >= 0); 2783 2784 VM_STAT_ADD(pga_vmstats.pgma_allocfailed); 2785 return (NULL); 2786 } 2787 2788 /* 2789 * This function is similar to page_get_freelist()/page_get_cachelist() 2790 * but it searches both the lists to find a page with the specified 2791 * color (or no color) and DMA attributes. The search is done in the 2792 * freelist first and then in the cache list within the highest memory 2793 * range (based on DMA attributes) before searching in the lower 2794 * memory ranges. 2795 * 2796 * Note: This function is called only by page_create_io(). 2797 */ 2798 /*ARGSUSED*/ 2799 static page_t * 2800 page_get_anylist(struct vnode *vp, u_offset_t off, struct as *as, caddr_t vaddr, 2801 size_t size, uint_t flags, ddi_dma_attr_t *dma_attr, lgrp_t *lgrp) 2802 { 2803 uint_t bin; 2804 int mtype; 2805 page_t *pp; 2806 int n; 2807 int m; 2808 int szc; 2809 int fullrange; 2810 int mnode; 2811 int local_failed_stat = 0; 2812 lgrp_mnode_cookie_t lgrp_cookie; 2813 2814 VM_STAT_ADD(pga_vmstats.pga_alloc); 2815 2816 /* only base pagesize currently supported */ 2817 if (size != MMU_PAGESIZE) 2818 return (NULL); 2819 2820 /* 2821 * If we're passed a specific lgroup, we use it. Otherwise, 2822 * assume first-touch placement is desired. 2823 */ 2824 if (!LGRP_EXISTS(lgrp)) 2825 lgrp = lgrp_home_lgrp(); 2826 2827 /* LINTED */ 2828 AS_2_BIN(as, seg, vp, vaddr, bin, 0); 2829 2830 /* 2831 * Only hold one freelist or cachelist lock at a time, that way we 2832 * can start anywhere and not have to worry about lock 2833 * ordering. 2834 */ 2835 if (dma_attr == NULL) { 2836 n = 0; 2837 m = mnoderangecnt - 1; 2838 fullrange = 1; 2839 VM_STAT_ADD(pga_vmstats.pga_nulldmaattr); 2840 } else { 2841 pfn_t pfnlo = mmu_btop(dma_attr->dma_attr_addr_lo); 2842 pfn_t pfnhi = mmu_btop(dma_attr->dma_attr_addr_hi); 2843 2844 /* 2845 * We can guarantee alignment only for page boundary. 2846 */ 2847 if (dma_attr->dma_attr_align > MMU_PAGESIZE) 2848 return (NULL); 2849 2850 n = pfn_2_mtype(pfnlo); 2851 m = pfn_2_mtype(pfnhi); 2852 2853 fullrange = ((pfnlo == mnoderanges[n].mnr_pfnlo) && 2854 (pfnhi >= mnoderanges[m].mnr_pfnhi)); 2855 } 2856 VM_STAT_COND_ADD(fullrange == 0, pga_vmstats.pga_notfullrange); 2857 2858 if (n > m) 2859 return (NULL); 2860 2861 szc = 0; 2862 2863 /* cylcing thru mtype handled by RANGE0 if n == 0 */ 2864 if (n == 0) { 2865 flags |= PGI_MT_RANGE0; 2866 n = m; 2867 } 2868 2869 /* 2870 * Try local memory node first, but try remote if we can't 2871 * get a page of the right color. 2872 */ 2873 LGRP_MNODE_COOKIE_INIT(lgrp_cookie, lgrp, LGRP_SRCH_HIER); 2874 while ((mnode = lgrp_memnode_choose(&lgrp_cookie)) >= 0) { 2875 /* 2876 * allocate pages from high pfn to low. 2877 */ 2878 for (mtype = m; mtype >= n; mtype--) { 2879 if (fullrange != 0) { 2880 pp = page_get_mnode_freelist(mnode, 2881 bin, mtype, szc, flags); 2882 if (pp == NULL) { 2883 pp = page_get_mnode_cachelist( 2884 bin, flags, mnode, mtype); 2885 } 2886 } else { 2887 pp = page_get_mnode_anylist(bin, szc, 2888 flags, mnode, mtype, dma_attr); 2889 } 2890 if (pp != NULL) { 2891 VM_STAT_ADD(pga_vmstats.pga_allocok); 2892 check_dma(dma_attr, pp, 1); 2893 return (pp); 2894 } 2895 } 2896 if (!local_failed_stat) { 2897 lgrp_stat_add(lgrp->lgrp_id, LGRP_NUM_ALLOC_FAIL, 1); 2898 local_failed_stat = 1; 2899 } 2900 } 2901 VM_STAT_ADD(pga_vmstats.pga_allocfailed); 2902 2903 return (NULL); 2904 } 2905 2906 /* 2907 * page_create_io() 2908 * 2909 * This function is a copy of page_create_va() with an additional 2910 * argument 'mattr' that specifies DMA memory requirements to 2911 * the page list functions. This function is used by the segkmem 2912 * allocator so it is only to create new pages (i.e PG_EXCL is 2913 * set). 2914 * 2915 * Note: This interface is currently used by x86 PSM only and is 2916 * not fully specified so the commitment level is only for 2917 * private interface specific to x86. This interface uses PSM 2918 * specific page_get_anylist() interface. 2919 */ 2920 2921 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \ 2922 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \ 2923 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \ 2924 break; \ 2925 } \ 2926 } 2927 2928 2929 page_t * 2930 page_create_io( 2931 struct vnode *vp, 2932 u_offset_t off, 2933 uint_t bytes, 2934 uint_t flags, 2935 struct as *as, 2936 caddr_t vaddr, 2937 ddi_dma_attr_t *mattr) /* DMA memory attributes if any */ 2938 { 2939 page_t *plist = NULL; 2940 uint_t plist_len = 0; 2941 pgcnt_t npages; 2942 page_t *npp = NULL; 2943 uint_t pages_req; 2944 page_t *pp; 2945 kmutex_t *phm = NULL; 2946 uint_t index; 2947 2948 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START, 2949 "page_create_start:vp %p off %llx bytes %u flags %x", 2950 vp, off, bytes, flags); 2951 2952 ASSERT((flags & ~(PG_EXCL | PG_WAIT | PG_PHYSCONTIG)) == 0); 2953 2954 pages_req = npages = mmu_btopr(bytes); 2955 2956 /* 2957 * Do the freemem and pcf accounting. 2958 */ 2959 if (!page_create_wait(npages, flags)) { 2960 return (NULL); 2961 } 2962 2963 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS, 2964 "page_create_success:vp %p off %llx", vp, off); 2965 2966 /* 2967 * If satisfying this request has left us with too little 2968 * memory, start the wheels turning to get some back. The 2969 * first clause of the test prevents waking up the pageout 2970 * daemon in situations where it would decide that there's 2971 * nothing to do. 2972 */ 2973 if (nscan < desscan && freemem < minfree) { 2974 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL, 2975 "pageout_cv_signal:freemem %ld", freemem); 2976 cv_signal(&proc_pageout->p_cv); 2977 } 2978 2979 if (flags & PG_PHYSCONTIG) { 2980 2981 plist = page_get_contigpage(&npages, mattr, 1); 2982 if (plist == NULL) { 2983 page_create_putback(npages); 2984 return (NULL); 2985 } 2986 2987 pp = plist; 2988 2989 do { 2990 if (!page_hashin(pp, vp, off, NULL)) { 2991 panic("pg_creat_io: hashin failed %p %p %llx", 2992 (void *)pp, (void *)vp, off); 2993 } 2994 VM_STAT_ADD(page_create_new); 2995 off += MMU_PAGESIZE; 2996 PP_CLRFREE(pp); 2997 PP_CLRAGED(pp); 2998 page_set_props(pp, P_REF); 2999 pp = pp->p_next; 3000 } while (pp != plist); 3001 3002 if (!npages) { 3003 check_dma(mattr, plist, pages_req); 3004 return (plist); 3005 } else { 3006 vaddr += (pages_req - npages) << MMU_PAGESHIFT; 3007 } 3008 3009 /* 3010 * fall-thru: 3011 * 3012 * page_get_contigpage returns when npages <= sgllen. 3013 * Grab the rest of the non-contig pages below from anylist. 3014 */ 3015 } 3016 3017 /* 3018 * Loop around collecting the requested number of pages. 3019 * Most of the time, we have to `create' a new page. With 3020 * this in mind, pull the page off the free list before 3021 * getting the hash lock. This will minimize the hash 3022 * lock hold time, nesting, and the like. If it turns 3023 * out we don't need the page, we put it back at the end. 3024 */ 3025 while (npages--) { 3026 phm = NULL; 3027 3028 index = PAGE_HASH_FUNC(vp, off); 3029 top: 3030 ASSERT(phm == NULL); 3031 ASSERT(index == PAGE_HASH_FUNC(vp, off)); 3032 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp))); 3033 3034 if (npp == NULL) { 3035 /* 3036 * Try to get the page of any color either from 3037 * the freelist or from the cache list. 3038 */ 3039 npp = page_get_anylist(vp, off, as, vaddr, MMU_PAGESIZE, 3040 flags & ~PG_MATCH_COLOR, mattr, NULL); 3041 if (npp == NULL) { 3042 if (mattr == NULL) { 3043 /* 3044 * Not looking for a special page; 3045 * panic! 3046 */ 3047 panic("no page found %d", (int)npages); 3048 } 3049 /* 3050 * No page found! This can happen 3051 * if we are looking for a page 3052 * within a specific memory range 3053 * for DMA purposes. If PG_WAIT is 3054 * specified then we wait for a 3055 * while and then try again. The 3056 * wait could be forever if we 3057 * don't get the page(s) we need. 3058 * 3059 * Note: XXX We really need a mechanism 3060 * to wait for pages in the desired 3061 * range. For now, we wait for any 3062 * pages and see if we can use it. 3063 */ 3064 3065 if ((mattr != NULL) && (flags & PG_WAIT)) { 3066 delay(10); 3067 goto top; 3068 } 3069 goto fail; /* undo accounting stuff */ 3070 } 3071 3072 if (PP_ISAGED(npp) == 0) { 3073 /* 3074 * Since this page came from the 3075 * cachelist, we must destroy the 3076 * old vnode association. 3077 */ 3078 page_hashout(npp, (kmutex_t *)NULL); 3079 } 3080 } 3081 3082 /* 3083 * We own this page! 3084 */ 3085 ASSERT(PAGE_EXCL(npp)); 3086 ASSERT(npp->p_vnode == NULL); 3087 ASSERT(!hat_page_is_mapped(npp)); 3088 PP_CLRFREE(npp); 3089 PP_CLRAGED(npp); 3090 3091 /* 3092 * Here we have a page in our hot little mits and are 3093 * just waiting to stuff it on the appropriate lists. 3094 * Get the mutex and check to see if it really does 3095 * not exist. 3096 */ 3097 phm = PAGE_HASH_MUTEX(index); 3098 mutex_enter(phm); 3099 PAGE_HASH_SEARCH(index, pp, vp, off); 3100 if (pp == NULL) { 3101 VM_STAT_ADD(page_create_new); 3102 pp = npp; 3103 npp = NULL; 3104 if (!page_hashin(pp, vp, off, phm)) { 3105 /* 3106 * Since we hold the page hash mutex and 3107 * just searched for this page, page_hashin 3108 * had better not fail. If it does, that 3109 * means somethread did not follow the 3110 * page hash mutex rules. Panic now and 3111 * get it over with. As usual, go down 3112 * holding all the locks. 3113 */ 3114 ASSERT(MUTEX_HELD(phm)); 3115 panic("page_create: hashin fail %p %p %llx %p", 3116 (void *)pp, (void *)vp, off, (void *)phm); 3117 3118 } 3119 ASSERT(MUTEX_HELD(phm)); 3120 mutex_exit(phm); 3121 phm = NULL; 3122 3123 /* 3124 * Hat layer locking need not be done to set 3125 * the following bits since the page is not hashed 3126 * and was on the free list (i.e., had no mappings). 3127 * 3128 * Set the reference bit to protect 3129 * against immediate pageout 3130 * 3131 * XXXmh modify freelist code to set reference 3132 * bit so we don't have to do it here. 3133 */ 3134 page_set_props(pp, P_REF); 3135 } else { 3136 ASSERT(MUTEX_HELD(phm)); 3137 mutex_exit(phm); 3138 phm = NULL; 3139 /* 3140 * NOTE: This should not happen for pages associated 3141 * with kernel vnode 'kvp'. 3142 */ 3143 /* XX64 - to debug why this happens! */ 3144 ASSERT(!VN_ISKAS(vp)); 3145 if (VN_ISKAS(vp)) 3146 cmn_err(CE_NOTE, 3147 "page_create: page not expected " 3148 "in hash list for kernel vnode - pp 0x%p", 3149 (void *)pp); 3150 VM_STAT_ADD(page_create_exists); 3151 goto fail; 3152 } 3153 3154 /* 3155 * Got a page! It is locked. Acquire the i/o 3156 * lock since we are going to use the p_next and 3157 * p_prev fields to link the requested pages together. 3158 */ 3159 page_io_lock(pp); 3160 page_add(&plist, pp); 3161 plist = plist->p_next; 3162 off += MMU_PAGESIZE; 3163 vaddr += MMU_PAGESIZE; 3164 } 3165 3166 check_dma(mattr, plist, pages_req); 3167 return (plist); 3168 3169 fail: 3170 if (npp != NULL) { 3171 /* 3172 * Did not need this page after all. 3173 * Put it back on the free list. 3174 */ 3175 VM_STAT_ADD(page_create_putbacks); 3176 PP_SETFREE(npp); 3177 PP_SETAGED(npp); 3178 npp->p_offset = (u_offset_t)-1; 3179 page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL); 3180 page_unlock(npp); 3181 } 3182 3183 /* 3184 * Give up the pages we already got. 3185 */ 3186 while (plist != NULL) { 3187 pp = plist; 3188 page_sub(&plist, pp); 3189 page_io_unlock(pp); 3190 plist_len++; 3191 /*LINTED: constant in conditional ctx*/ 3192 VN_DISPOSE(pp, B_INVAL, 0, kcred); 3193 } 3194 3195 /* 3196 * VN_DISPOSE does freemem accounting for the pages in plist 3197 * by calling page_free. So, we need to undo the pcf accounting 3198 * for only the remaining pages. 3199 */ 3200 VM_STAT_ADD(page_create_putbacks); 3201 page_create_putback(pages_req - plist_len); 3202 3203 return (NULL); 3204 } 3205 #endif /* !__xpv */ 3206 3207 3208 /* 3209 * Copy the data from the physical page represented by "frompp" to 3210 * that represented by "topp". ppcopy uses CPU->cpu_caddr1 and 3211 * CPU->cpu_caddr2. It assumes that no one uses either map at interrupt 3212 * level and no one sleeps with an active mapping there. 3213 * 3214 * Note that the ref/mod bits in the page_t's are not affected by 3215 * this operation, hence it is up to the caller to update them appropriately. 3216 */ 3217 int 3218 ppcopy(page_t *frompp, page_t *topp) 3219 { 3220 caddr_t pp_addr1; 3221 caddr_t pp_addr2; 3222 hat_mempte_t pte1; 3223 hat_mempte_t pte2; 3224 kmutex_t *ppaddr_mutex; 3225 label_t ljb; 3226 int ret = 1; 3227 3228 ASSERT_STACK_ALIGNED(); 3229 ASSERT(PAGE_LOCKED(frompp)); 3230 ASSERT(PAGE_LOCKED(topp)); 3231 3232 if (kpm_enable) { 3233 pp_addr1 = hat_kpm_page2va(frompp, 0); 3234 pp_addr2 = hat_kpm_page2va(topp, 0); 3235 kpreempt_disable(); 3236 } else { 3237 /* 3238 * disable pre-emption so that CPU can't change 3239 */ 3240 kpreempt_disable(); 3241 3242 pp_addr1 = CPU->cpu_caddr1; 3243 pp_addr2 = CPU->cpu_caddr2; 3244 pte1 = CPU->cpu_caddr1pte; 3245 pte2 = CPU->cpu_caddr2pte; 3246 3247 ppaddr_mutex = &CPU->cpu_ppaddr_mutex; 3248 mutex_enter(ppaddr_mutex); 3249 3250 hat_mempte_remap(page_pptonum(frompp), pp_addr1, pte1, 3251 PROT_READ | HAT_STORECACHING_OK, HAT_LOAD_NOCONSIST); 3252 hat_mempte_remap(page_pptonum(topp), pp_addr2, pte2, 3253 PROT_READ | PROT_WRITE | HAT_STORECACHING_OK, 3254 HAT_LOAD_NOCONSIST); 3255 } 3256 3257 if (on_fault(&ljb)) { 3258 ret = 0; 3259 goto faulted; 3260 } 3261 if (use_sse_pagecopy) 3262 #ifdef __xpv 3263 page_copy_no_xmm(pp_addr2, pp_addr1); 3264 #else 3265 hwblkpagecopy(pp_addr1, pp_addr2); 3266 #endif 3267 else 3268 bcopy(pp_addr1, pp_addr2, PAGESIZE); 3269 3270 no_fault(); 3271 faulted: 3272 if (!kpm_enable) { 3273 #ifdef __xpv 3274 /* 3275 * We can't leave unused mappings laying about under the 3276 * hypervisor, so blow them away. 3277 */ 3278 if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr1, 0, 3279 UVMF_INVLPG | UVMF_LOCAL) < 0) 3280 panic("HYPERVISOR_update_va_mapping() failed"); 3281 if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr2, 0, 3282 UVMF_INVLPG | UVMF_LOCAL) < 0) 3283 panic("HYPERVISOR_update_va_mapping() failed"); 3284 #endif 3285 mutex_exit(ppaddr_mutex); 3286 } 3287 kpreempt_enable(); 3288 return (ret); 3289 } 3290 3291 void 3292 pagezero(page_t *pp, uint_t off, uint_t len) 3293 { 3294 ASSERT(PAGE_LOCKED(pp)); 3295 pfnzero(page_pptonum(pp), off, len); 3296 } 3297 3298 /* 3299 * Zero the physical page from off to off + len given by pfn 3300 * without changing the reference and modified bits of page. 3301 * 3302 * We use this using CPU private page address #2, see ppcopy() for more info. 3303 * pfnzero() must not be called at interrupt level. 3304 */ 3305 void 3306 pfnzero(pfn_t pfn, uint_t off, uint_t len) 3307 { 3308 caddr_t pp_addr2; 3309 hat_mempte_t pte2; 3310 kmutex_t *ppaddr_mutex = NULL; 3311 3312 ASSERT_STACK_ALIGNED(); 3313 ASSERT(len <= MMU_PAGESIZE); 3314 ASSERT(off <= MMU_PAGESIZE); 3315 ASSERT(off + len <= MMU_PAGESIZE); 3316 3317 if (kpm_enable && !pfn_is_foreign(pfn)) { 3318 pp_addr2 = hat_kpm_pfn2va(pfn); 3319 kpreempt_disable(); 3320 } else { 3321 kpreempt_disable(); 3322 3323 pp_addr2 = CPU->cpu_caddr2; 3324 pte2 = CPU->cpu_caddr2pte; 3325 3326 ppaddr_mutex = &CPU->cpu_ppaddr_mutex; 3327 mutex_enter(ppaddr_mutex); 3328 3329 hat_mempte_remap(pfn, pp_addr2, pte2, 3330 PROT_READ | PROT_WRITE | HAT_STORECACHING_OK, 3331 HAT_LOAD_NOCONSIST); 3332 } 3333 3334 if (use_sse_pagezero) { 3335 #ifdef __xpv 3336 uint_t rem; 3337 3338 /* 3339 * zero a byte at a time until properly aligned for 3340 * block_zero_no_xmm(). 3341 */ 3342 while (!P2NPHASE(off, ((uint_t)BLOCKZEROALIGN)) && len-- > 0) 3343 pp_addr2[off++] = 0; 3344 3345 /* 3346 * Now use faster block_zero_no_xmm() for any range 3347 * that is properly aligned and sized. 3348 */ 3349 rem = P2PHASE(len, ((uint_t)BLOCKZEROALIGN)); 3350 len -= rem; 3351 if (len != 0) { 3352 block_zero_no_xmm(pp_addr2 + off, len); 3353 off += len; 3354 } 3355 3356 /* 3357 * zero remainder with byte stores. 3358 */ 3359 while (rem-- > 0) 3360 pp_addr2[off++] = 0; 3361 #else 3362 hwblkclr(pp_addr2 + off, len); 3363 #endif 3364 } else { 3365 bzero(pp_addr2 + off, len); 3366 } 3367 3368 if (!kpm_enable || pfn_is_foreign(pfn)) { 3369 #ifdef __xpv 3370 /* 3371 * On the hypervisor this page might get used for a page 3372 * table before any intervening change to this mapping, 3373 * so blow it away. 3374 */ 3375 if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr2, 0, 3376 UVMF_INVLPG) < 0) 3377 panic("HYPERVISOR_update_va_mapping() failed"); 3378 #endif 3379 mutex_exit(ppaddr_mutex); 3380 } 3381 3382 kpreempt_enable(); 3383 } 3384 3385 /* 3386 * Platform-dependent page scrub call. 3387 */ 3388 void 3389 pagescrub(page_t *pp, uint_t off, uint_t len) 3390 { 3391 /* 3392 * For now, we rely on the fact that pagezero() will 3393 * always clear UEs. 3394 */ 3395 pagezero(pp, off, len); 3396 } 3397 3398 /* 3399 * set up two private addresses for use on a given CPU for use in ppcopy() 3400 */ 3401 void 3402 setup_vaddr_for_ppcopy(struct cpu *cpup) 3403 { 3404 void *addr; 3405 hat_mempte_t pte_pa; 3406 3407 addr = vmem_alloc(heap_arena, mmu_ptob(1), VM_SLEEP); 3408 pte_pa = hat_mempte_setup(addr); 3409 cpup->cpu_caddr1 = addr; 3410 cpup->cpu_caddr1pte = pte_pa; 3411 3412 addr = vmem_alloc(heap_arena, mmu_ptob(1), VM_SLEEP); 3413 pte_pa = hat_mempte_setup(addr); 3414 cpup->cpu_caddr2 = addr; 3415 cpup->cpu_caddr2pte = pte_pa; 3416 3417 mutex_init(&cpup->cpu_ppaddr_mutex, NULL, MUTEX_DEFAULT, NULL); 3418 } 3419 3420 /* 3421 * Undo setup_vaddr_for_ppcopy 3422 */ 3423 void 3424 teardown_vaddr_for_ppcopy(struct cpu *cpup) 3425 { 3426 mutex_destroy(&cpup->cpu_ppaddr_mutex); 3427 3428 hat_mempte_release(cpup->cpu_caddr2, cpup->cpu_caddr2pte); 3429 cpup->cpu_caddr2pte = 0; 3430 vmem_free(heap_arena, cpup->cpu_caddr2, mmu_ptob(1)); 3431 cpup->cpu_caddr2 = 0; 3432 3433 hat_mempte_release(cpup->cpu_caddr1, cpup->cpu_caddr1pte); 3434 cpup->cpu_caddr1pte = 0; 3435 vmem_free(heap_arena, cpup->cpu_caddr1, mmu_ptob(1)); 3436 cpup->cpu_caddr1 = 0; 3437 } 3438 3439 /* 3440 * Create the pageout scanner thread. The thread has to 3441 * start at procedure with process pp and priority pri. 3442 */ 3443 void 3444 pageout_init(void (*procedure)(), proc_t *pp, pri_t pri) 3445 { 3446 (void) thread_create(NULL, 0, procedure, NULL, 0, pp, TS_RUN, pri); 3447 } 3448 3449 /* 3450 * Function for flushing D-cache when performing module relocations 3451 * to an alternate mapping. Unnecessary on Intel / AMD platforms. 3452 */ 3453 void 3454 dcache_flushall() 3455 {} 3456 3457 size_t 3458 exec_get_spslew(void) 3459 { 3460 return (0); 3461 } 3462 3463 /* 3464 * Allocate a memory page. The argument 'seed' can be any pseudo-random 3465 * number to vary where the pages come from. This is quite a hacked up 3466 * method -- it works for now, but really needs to be fixed up a bit. 3467 * 3468 * We currently use page_create_va() on the kvp with fake offsets, 3469 * segments and virt address. This is pretty bogus, but was copied from the 3470 * old hat_i86.c code. A better approach would be to specify either mnode 3471 * random or mnode local and takes a page from whatever color has the MOST 3472 * available - this would have a minimal impact on page coloring. 3473 */ 3474 page_t * 3475 page_get_physical(uintptr_t seed) 3476 { 3477 page_t *pp; 3478 u_offset_t offset; 3479 static struct seg tmpseg; 3480 static uintptr_t ctr = 0; 3481 3482 /* 3483 * This code is gross, we really need a simpler page allocator. 3484 * 3485 * We need assign an offset for the page to call page_create_va(). 3486 * To avoid conflicts with other pages, we get creative with the offset. 3487 * For 32 bits, we pick an offset > 4Gig 3488 * For 64 bits, pick an offset somewhere in the VA hole. 3489 */ 3490 offset = seed; 3491 if (offset > kernelbase) 3492 offset -= kernelbase; 3493 offset <<= MMU_PAGESHIFT; 3494 #if defined(__amd64) 3495 offset += mmu.hole_start; /* something in VA hole */ 3496 #else 3497 offset += 1ULL << 40; /* something > 4 Gig */ 3498 #endif 3499 3500 if (page_resv(1, KM_NOSLEEP) == 0) 3501 return (NULL); 3502 3503 #ifdef DEBUG 3504 pp = page_exists(&kvp, offset); 3505 if (pp != NULL) 3506 panic("page already exists %p", pp); 3507 #endif 3508 3509 pp = page_create_va(&kvp, offset, MMU_PAGESIZE, PG_EXCL, 3510 &tmpseg, (caddr_t)(ctr += MMU_PAGESIZE)); /* changing VA usage */ 3511 if (pp == NULL) 3512 return (NULL); 3513 page_io_unlock(pp); 3514 page_hashout(pp, NULL); 3515 return (pp); 3516 } 3517