1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/param.h> 29 #include <sys/user.h> 30 #include <sys/mman.h> 31 #include <sys/kmem.h> 32 #include <sys/sysmacros.h> 33 #include <sys/cmn_err.h> 34 #include <sys/systm.h> 35 #include <sys/tuneable.h> 36 #include <vm/hat.h> 37 #include <vm/seg.h> 38 #include <vm/as.h> 39 #include <vm/anon.h> 40 #include <vm/page.h> 41 #include <sys/buf.h> 42 #include <sys/swap.h> 43 #include <sys/atomic.h> 44 #include <vm/seg_spt.h> 45 #include <sys/debug.h> 46 #include <sys/vtrace.h> 47 #include <sys/shm.h> 48 #include <sys/lgrp.h> 49 #include <sys/vmsystm.h> 50 51 #include <sys/tnf_probe.h> 52 53 #define SEGSPTADDR (caddr_t)0x0 54 55 /* 56 * # pages used for spt 57 */ 58 static size_t spt_used; 59 60 /* 61 * segspt_minfree is the memory left for system after ISM 62 * locked its pages; it is set up to 5% of availrmem in 63 * sptcreate when ISM is created. ISM should not use more 64 * than ~90% of availrmem; if it does, then the performance 65 * of the system may decrease. Machines with large memories may 66 * be able to use up more memory for ISM so we set the default 67 * segspt_minfree to 5% (which gives ISM max 95% of availrmem. 68 * If somebody wants even more memory for ISM (risking hanging 69 * the system) they can patch the segspt_minfree to smaller number. 70 */ 71 pgcnt_t segspt_minfree = 0; 72 73 static int segspt_create(struct seg *seg, caddr_t argsp); 74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize); 75 static void segspt_free(struct seg *seg); 76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len); 77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr); 78 79 static void 80 segspt_badop() 81 { 82 panic("segspt_badop called"); 83 /*NOTREACHED*/ 84 } 85 86 #define SEGSPT_BADOP(t) (t(*)())segspt_badop 87 88 struct seg_ops segspt_ops = { 89 SEGSPT_BADOP(int), /* dup */ 90 segspt_unmap, 91 segspt_free, 92 SEGSPT_BADOP(int), /* fault */ 93 SEGSPT_BADOP(faultcode_t), /* faulta */ 94 SEGSPT_BADOP(int), /* setprot */ 95 SEGSPT_BADOP(int), /* checkprot */ 96 SEGSPT_BADOP(int), /* kluster */ 97 SEGSPT_BADOP(size_t), /* swapout */ 98 SEGSPT_BADOP(int), /* sync */ 99 SEGSPT_BADOP(size_t), /* incore */ 100 SEGSPT_BADOP(int), /* lockop */ 101 SEGSPT_BADOP(int), /* getprot */ 102 SEGSPT_BADOP(u_offset_t), /* getoffset */ 103 SEGSPT_BADOP(int), /* gettype */ 104 SEGSPT_BADOP(int), /* getvp */ 105 SEGSPT_BADOP(int), /* advise */ 106 SEGSPT_BADOP(void), /* dump */ 107 SEGSPT_BADOP(int), /* pagelock */ 108 SEGSPT_BADOP(int), /* setpgsz */ 109 SEGSPT_BADOP(int), /* getmemid */ 110 segspt_getpolicy, /* getpolicy */ 111 SEGSPT_BADOP(int), /* capable */ 112 }; 113 114 static int segspt_shmdup(struct seg *seg, struct seg *newseg); 115 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize); 116 static void segspt_shmfree(struct seg *seg); 117 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg, 118 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw); 119 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr); 120 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr, 121 register size_t len, register uint_t prot); 122 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, 123 uint_t prot); 124 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta); 125 static size_t segspt_shmswapout(struct seg *seg); 126 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, 127 register char *vec); 128 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len, 129 int attr, uint_t flags); 130 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 131 int attr, int op, ulong_t *lockmap, size_t pos); 132 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, 133 uint_t *protv); 134 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr); 135 static int segspt_shmgettype(struct seg *seg, caddr_t addr); 136 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp); 137 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, 138 uint_t behav); 139 static void segspt_shmdump(struct seg *seg); 140 static int segspt_shmpagelock(struct seg *, caddr_t, size_t, 141 struct page ***, enum lock_type, enum seg_rw); 142 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t); 143 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *); 144 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t); 145 static int segspt_shmcapable(struct seg *, segcapability_t); 146 147 struct seg_ops segspt_shmops = { 148 segspt_shmdup, 149 segspt_shmunmap, 150 segspt_shmfree, 151 segspt_shmfault, 152 segspt_shmfaulta, 153 segspt_shmsetprot, 154 segspt_shmcheckprot, 155 segspt_shmkluster, 156 segspt_shmswapout, 157 segspt_shmsync, 158 segspt_shmincore, 159 segspt_shmlockop, 160 segspt_shmgetprot, 161 segspt_shmgetoffset, 162 segspt_shmgettype, 163 segspt_shmgetvp, 164 segspt_shmadvise, /* advise */ 165 segspt_shmdump, 166 segspt_shmpagelock, 167 segspt_shmsetpgsz, 168 segspt_shmgetmemid, 169 segspt_shmgetpolicy, 170 segspt_shmcapable, 171 }; 172 173 static void segspt_purge(struct seg *seg); 174 static int segspt_reclaim(struct seg *, caddr_t, size_t, struct page **, 175 enum seg_rw); 176 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len, 177 page_t **ppa); 178 179 180 181 /*ARGSUSED*/ 182 int 183 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp, 184 uint_t prot, uint_t flags, uint_t share_szc) 185 { 186 int err; 187 struct as *newas; 188 struct segspt_crargs sptcargs; 189 190 #ifdef DEBUG 191 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */, 192 tnf_ulong, size, size ); 193 #endif 194 if (segspt_minfree == 0) /* leave min 5% of availrmem for */ 195 segspt_minfree = availrmem/20; /* for the system */ 196 197 if (!hat_supported(HAT_SHARED_PT, (void *)0)) 198 return (EINVAL); 199 200 /* 201 * get a new as for this shared memory segment 202 */ 203 newas = as_alloc(); 204 sptcargs.amp = amp; 205 sptcargs.prot = prot; 206 sptcargs.flags = flags; 207 sptcargs.szc = share_szc; 208 209 /* 210 * create a shared page table (spt) segment 211 */ 212 213 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) { 214 as_free(newas); 215 return (err); 216 } 217 *sptseg = sptcargs.seg_spt; 218 return (0); 219 } 220 221 void 222 sptdestroy(struct as *as, struct anon_map *amp) 223 { 224 225 #ifdef DEBUG 226 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */); 227 #endif 228 (void) as_unmap(as, SEGSPTADDR, amp->size); 229 as_free(as); 230 } 231 232 /* 233 * called from seg_free(). 234 * free (i.e., unlock, unmap, return to free list) 235 * all the pages in the given seg. 236 */ 237 void 238 segspt_free(struct seg *seg) 239 { 240 struct spt_data *sptd = (struct spt_data *)seg->s_data; 241 242 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 243 244 if (sptd != NULL) { 245 if (sptd->spt_realsize) 246 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize); 247 248 if (sptd->spt_ppa_lckcnt) 249 kmem_free(sptd->spt_ppa_lckcnt, 250 sizeof (*sptd->spt_ppa_lckcnt) 251 * btopr(sptd->spt_amp->size)); 252 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp)); 253 mutex_destroy(&sptd->spt_lock); 254 kmem_free(sptd, sizeof (*sptd)); 255 } 256 } 257 258 /*ARGSUSED*/ 259 static int 260 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr, 261 uint_t flags) 262 { 263 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 264 265 return (0); 266 } 267 268 /*ARGSUSED*/ 269 static size_t 270 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec) 271 { 272 caddr_t eo_seg; 273 pgcnt_t npages; 274 struct shm_data *shmd = (struct shm_data *)seg->s_data; 275 struct seg *sptseg; 276 struct spt_data *sptd; 277 278 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 279 #ifdef lint 280 seg = seg; 281 #endif 282 sptseg = shmd->shm_sptseg; 283 sptd = sptseg->s_data; 284 285 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 286 eo_seg = addr + len; 287 while (addr < eo_seg) { 288 /* page exists, and it's locked. */ 289 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED | 290 SEG_PAGE_ANON; 291 addr += PAGESIZE; 292 } 293 return (len); 294 } else { 295 struct anon_map *amp = shmd->shm_amp; 296 struct anon *ap; 297 page_t *pp; 298 pgcnt_t anon_index; 299 struct vnode *vp; 300 u_offset_t off; 301 ulong_t i; 302 int ret; 303 anon_sync_obj_t cookie; 304 305 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 306 anon_index = seg_page(seg, addr); 307 npages = btopr(len); 308 if (anon_index + npages > btopr(shmd->shm_amp->size)) { 309 return (EINVAL); 310 } 311 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 312 for (i = 0; i < npages; i++, anon_index++) { 313 ret = 0; 314 anon_array_enter(amp, anon_index, &cookie); 315 ap = anon_get_ptr(amp->ahp, anon_index); 316 if (ap != NULL) { 317 swap_xlate(ap, &vp, &off); 318 anon_array_exit(&cookie); 319 pp = page_lookup_nowait(vp, off, SE_SHARED); 320 if (pp != NULL) { 321 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON; 322 page_unlock(pp); 323 } 324 } else { 325 anon_array_exit(&cookie); 326 } 327 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) { 328 ret |= SEG_PAGE_LOCKED; 329 } 330 *vec++ = (char)ret; 331 } 332 ANON_LOCK_EXIT(&->a_rwlock); 333 return (len); 334 } 335 } 336 337 static int 338 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize) 339 { 340 size_t share_size; 341 342 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 343 344 /* 345 * seg.s_size may have been rounded up to the largest page size 346 * in shmat(). 347 * XXX This should be cleanedup. sptdestroy should take a length 348 * argument which should be the same as sptcreate. Then 349 * this rounding would not be needed (or is done in shm.c) 350 * Only the check for full segment will be needed. 351 * 352 * XXX -- shouldn't raddr == 0 always? These tests don't seem 353 * to be useful at all. 354 */ 355 share_size = page_get_pagesize(seg->s_szc); 356 ssize = P2ROUNDUP(ssize, share_size); 357 358 if (raddr == seg->s_base && ssize == seg->s_size) { 359 seg_free(seg); 360 return (0); 361 } else 362 return (EINVAL); 363 } 364 365 int 366 segspt_create(struct seg *seg, caddr_t argsp) 367 { 368 int err; 369 caddr_t addr = seg->s_base; 370 struct spt_data *sptd; 371 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp; 372 struct anon_map *amp = sptcargs->amp; 373 struct cred *cred = CRED(); 374 ulong_t i, j, anon_index = 0; 375 pgcnt_t npages = btopr(amp->size); 376 struct vnode *vp; 377 page_t **ppa; 378 uint_t hat_flags; 379 380 /* 381 * We are holding the a_lock on the underlying dummy as, 382 * so we can make calls to the HAT layer. 383 */ 384 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 385 386 #ifdef DEBUG 387 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */, 388 tnf_opaque, addr, addr, 389 tnf_ulong, len, seg->s_size); 390 #endif 391 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 392 if (err = anon_swap_adjust(npages)) 393 return (err); 394 } 395 err = ENOMEM; 396 397 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL) 398 goto out1; 399 400 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 401 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages), 402 KM_NOSLEEP)) == NULL) 403 goto out2; 404 } 405 406 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL); 407 408 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL) 409 goto out3; 410 411 seg->s_ops = &segspt_ops; 412 sptd->spt_vp = vp; 413 sptd->spt_amp = amp; 414 sptd->spt_prot = sptcargs->prot; 415 sptd->spt_flags = sptcargs->flags; 416 seg->s_data = (caddr_t)sptd; 417 sptd->spt_ppa = NULL; 418 sptd->spt_ppa_lckcnt = NULL; 419 seg->s_szc = sptcargs->szc; 420 421 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 422 amp->a_szc = seg->s_szc; 423 ANON_LOCK_EXIT(&->a_rwlock); 424 425 /* 426 * Set policy to affect initial allocation of pages in 427 * anon_map_createpages() 428 */ 429 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index, 430 NULL, 0, ptob(npages)); 431 432 if (sptcargs->flags & SHM_PAGEABLE) { 433 size_t share_sz; 434 pgcnt_t new_npgs, more_pgs; 435 struct anon_hdr *nahp; 436 437 share_sz = page_get_pagesize(seg->s_szc); 438 if (!IS_P2ALIGNED(amp->size, share_sz)) { 439 /* 440 * We are rounding up the size of the anon array 441 * on 4 M boundary because we always create 4 M 442 * of page(s) when locking, faulting pages and we 443 * don't have to check for all corner cases e.g. 444 * if there is enough space to allocate 4 M 445 * page. 446 */ 447 new_npgs = btop(P2ROUNDUP(amp->size, share_sz)); 448 more_pgs = new_npgs - npages; 449 450 if (anon_resv(ptob(more_pgs)) == 0) { 451 err = ENOMEM; 452 goto out4; 453 } 454 nahp = anon_create(new_npgs, ANON_SLEEP); 455 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 456 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages, 457 ANON_SLEEP); 458 anon_release(amp->ahp, npages); 459 amp->ahp = nahp; 460 amp->swresv = amp->size = ptob(new_npgs); 461 ANON_LOCK_EXIT(&->a_rwlock); 462 npages = new_npgs; 463 } 464 465 sptd->spt_ppa_lckcnt = kmem_zalloc(npages * 466 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP); 467 sptd->spt_pcachecnt = 0; 468 sptd->spt_realsize = ptob(npages); 469 sptcargs->seg_spt = seg; 470 return (0); 471 } 472 473 /* 474 * get array of pages for each anon slot in amp 475 */ 476 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa, 477 seg, addr, S_CREATE, cred)) != 0) 478 goto out4; 479 480 /* 481 * addr is initial address corresponding to the first page on ppa list 482 */ 483 for (i = 0; i < npages; i++) { 484 /* attempt to lock all pages */ 485 if (!page_pp_lock(ppa[i], 0, 1)) { 486 /* 487 * if unable to lock any page, unlock all 488 * of them and return error 489 */ 490 for (j = 0; j < i; j++) 491 page_pp_unlock(ppa[j], 0, 1); 492 for (i = 0; i < npages; i++) { 493 page_unlock(ppa[i]); 494 } 495 err = ENOMEM; 496 goto out4; 497 } 498 } 499 500 /* 501 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 502 * for the entire life of the segment. For example platforms 503 * that do not support Dynamic Reconfiguration. 504 */ 505 hat_flags = HAT_LOAD_SHARE; 506 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL)) 507 hat_flags |= HAT_LOAD_LOCK; 508 509 hat_memload_array(seg->s_as->a_hat, addr, ptob(npages), 510 ppa, sptd->spt_prot, hat_flags); 511 512 /* 513 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 514 * we will leave the pages locked SE_SHARED for the life 515 * of the ISM segment. This will prevent any calls to 516 * hat_pageunload() on this ISM segment for those platforms. 517 */ 518 if (!(hat_flags & HAT_LOAD_LOCK)) { 519 /* 520 * On platforms that support HAT_DYNAMIC_ISM_UNMAP, 521 * we no longer need to hold the SE_SHARED lock on the pages, 522 * since L_PAGELOCK and F_SOFTLOCK calls will grab the 523 * SE_SHARED lock on the pages as necessary. 524 */ 525 for (i = 0; i < npages; i++) 526 page_unlock(ppa[i]); 527 } 528 sptd->spt_pcachecnt = 0; 529 kmem_free(ppa, ((sizeof (page_t *)) * npages)); 530 sptd->spt_realsize = ptob(npages); 531 atomic_add_long(&spt_used, npages); 532 sptcargs->seg_spt = seg; 533 return (0); 534 535 out4: 536 seg->s_data = NULL; 537 kmem_free(vp, sizeof (*vp)); 538 out3: 539 mutex_destroy(&sptd->spt_lock); 540 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 541 kmem_free(ppa, (sizeof (*ppa) * npages)); 542 out2: 543 kmem_free(sptd, sizeof (*sptd)); 544 out1: 545 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 546 anon_swap_restore(npages); 547 return (err); 548 } 549 550 /*ARGSUSED*/ 551 void 552 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len) 553 { 554 struct page *pp; 555 struct spt_data *sptd = (struct spt_data *)seg->s_data; 556 pgcnt_t npages; 557 ulong_t anon_idx; 558 struct anon_map *amp; 559 struct anon *ap; 560 struct vnode *vp; 561 u_offset_t off; 562 uint_t hat_flags; 563 int root = 0; 564 pgcnt_t pgs, curnpgs = 0; 565 page_t *rootpp; 566 567 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 568 569 len = P2ROUNDUP(len, PAGESIZE); 570 571 npages = btop(len); 572 573 hat_flags = HAT_UNLOAD_UNLOCK; 574 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) || 575 (sptd->spt_flags & SHM_PAGEABLE)) { 576 hat_flags = HAT_UNLOAD; 577 } 578 579 hat_unload(seg->s_as->a_hat, addr, len, hat_flags); 580 581 amp = sptd->spt_amp; 582 if (sptd->spt_flags & SHM_PAGEABLE) 583 npages = btop(amp->size); 584 585 ASSERT(amp); 586 for (anon_idx = 0; anon_idx < npages; anon_idx++) { 587 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 588 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) { 589 panic("segspt_free_pages: null app"); 590 /*NOTREACHED*/ 591 } 592 } else { 593 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx)) 594 == NULL) 595 continue; 596 } 597 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0); 598 swap_xlate(ap, &vp, &off); 599 600 /* 601 * If this platform supports HAT_DYNAMIC_ISM_UNMAP, 602 * the pages won't be having SE_SHARED lock at this 603 * point. 604 * 605 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 606 * the pages are still held SE_SHARED locked from the 607 * original segspt_create() 608 * 609 * Our goal is to get SE_EXCL lock on each page, remove 610 * permanent lock on it and invalidate the page. 611 */ 612 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 613 if (hat_flags == HAT_UNLOAD) 614 pp = page_lookup(vp, off, SE_EXCL); 615 else { 616 if ((pp = page_find(vp, off)) == NULL) { 617 panic("segspt_free_pages: " 618 "page not locked"); 619 /*NOTREACHED*/ 620 } 621 if (!page_tryupgrade(pp)) { 622 page_unlock(pp); 623 pp = page_lookup(vp, off, SE_EXCL); 624 } 625 } 626 if (pp == NULL) { 627 panic("segspt_free_pages: " 628 "page not in the system"); 629 /*NOTREACHED*/ 630 } 631 page_pp_unlock(pp, 0, 1); 632 } else { 633 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL) 634 continue; 635 page_pp_unlock(pp, 0, 0); 636 } 637 /* 638 * It's logical to invalidate the pages here as in most cases 639 * these were created by segspt. 640 */ 641 if (pp->p_szc != 0) { 642 /* 643 * For DISM swap is released in shm_rm_amp. 644 */ 645 if ((sptd->spt_flags & SHM_PAGEABLE) == 0 && 646 ap->an_pvp != NULL) { 647 panic("segspt_free_pages: pvp non NULL"); 648 /*NOTREACHED*/ 649 } 650 if (root == 0) { 651 ASSERT(curnpgs == 0); 652 root = 1; 653 rootpp = pp; 654 pgs = curnpgs = page_get_pagecnt(pp->p_szc); 655 ASSERT(pgs > 1); 656 ASSERT(IS_P2ALIGNED(pgs, pgs)); 657 ASSERT(!(page_pptonum(pp) & (pgs - 1))); 658 curnpgs--; 659 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) { 660 ASSERT(curnpgs == 1); 661 ASSERT(page_pptonum(pp) == 662 page_pptonum(rootpp) + (pgs - 1)); 663 page_destroy_pages(rootpp); 664 root = 0; 665 curnpgs = 0; 666 } else { 667 ASSERT(curnpgs > 1); 668 ASSERT(page_pptonum(pp) == 669 page_pptonum(rootpp) + (pgs - curnpgs)); 670 curnpgs--; 671 } 672 } else { 673 if (root != 0 || curnpgs != 0) { 674 panic("segspt_free_pages: bad large page"); 675 /*NOTREACHED*/ 676 } 677 /*LINTED: constant in conditional context */ 678 VN_DISPOSE(pp, B_INVAL, 0, kcred); 679 } 680 } 681 682 if (root != 0 || curnpgs != 0) { 683 panic("segspt_free_pages: bad large page"); 684 /*NOTREACHED*/ 685 } 686 687 /* 688 * mark that pages have been released 689 */ 690 sptd->spt_realsize = 0; 691 692 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 693 atomic_add_long(&spt_used, -npages); 694 anon_swap_restore(npages); 695 } 696 } 697 698 /* 699 * Get memory allocation policy info for specified address in given segment 700 */ 701 static lgrp_mem_policy_info_t * 702 segspt_getpolicy(struct seg *seg, caddr_t addr) 703 { 704 struct anon_map *amp; 705 ulong_t anon_index; 706 lgrp_mem_policy_info_t *policy_info; 707 struct spt_data *spt_data; 708 709 ASSERT(seg != NULL); 710 711 /* 712 * Get anon_map from segspt 713 * 714 * Assume that no lock needs to be held on anon_map, since 715 * it should be protected by its reference count which must be 716 * nonzero for an existing segment 717 * Need to grab readers lock on policy tree though 718 */ 719 spt_data = (struct spt_data *)seg->s_data; 720 if (spt_data == NULL) 721 return (NULL); 722 amp = spt_data->spt_amp; 723 ASSERT(amp->refcnt != 0); 724 725 /* 726 * Get policy info 727 * 728 * Assume starting anon index of 0 729 */ 730 anon_index = seg_page(seg, addr); 731 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 732 733 return (policy_info); 734 } 735 736 /* 737 * DISM only. 738 * Return locked pages over a given range. 739 * 740 * We will cache all DISM locked pages and save the pplist for the 741 * entire segment in the ppa field of the underlying DISM segment structure. 742 * Later, during a call to segspt_reclaim() we will use this ppa array 743 * to page_unlock() all of the pages and then we will free this ppa list. 744 */ 745 /*ARGSUSED*/ 746 static int 747 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len, 748 struct page ***ppp, enum lock_type type, enum seg_rw rw) 749 { 750 struct shm_data *shmd = (struct shm_data *)seg->s_data; 751 struct seg *sptseg = shmd->shm_sptseg; 752 struct spt_data *sptd = sptseg->s_data; 753 pgcnt_t pg_idx, npages, tot_npages, npgs; 754 struct page **pplist, **pl, **ppa, *pp; 755 struct anon_map *amp; 756 spgcnt_t an_idx; 757 int ret = ENOTSUP; 758 uint_t pl_built = 0; 759 struct anon *ap; 760 struct vnode *vp; 761 u_offset_t off; 762 pgcnt_t claim_availrmem = 0; 763 uint_t szc; 764 765 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 766 767 /* 768 * We want to lock/unlock the entire ISM segment. Therefore, 769 * we will be using the underlying sptseg and it's base address 770 * and length for the caching arguments. 771 */ 772 ASSERT(sptseg); 773 ASSERT(sptd); 774 775 pg_idx = seg_page(seg, addr); 776 npages = btopr(len); 777 778 /* 779 * check if the request is larger than number of pages covered 780 * by amp 781 */ 782 if (pg_idx + npages > btopr(sptd->spt_amp->size)) { 783 *ppp = NULL; 784 return (ENOTSUP); 785 } 786 787 if (type == L_PAGEUNLOCK) { 788 ASSERT(sptd->spt_ppa != NULL); 789 790 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 791 sptd->spt_ppa, sptd->spt_prot, segspt_reclaim); 792 793 /* 794 * If someone is blocked while unmapping, we purge 795 * segment page cache and thus reclaim pplist synchronously 796 * without waiting for seg_pasync_thread. This speeds up 797 * unmapping in cases where munmap(2) is called, while 798 * raw async i/o is still in progress or where a thread 799 * exits on data fault in a multithreaded application. 800 */ 801 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 802 segspt_purge(seg); 803 } 804 return (0); 805 } else if (type == L_PAGERECLAIM) { 806 ASSERT(sptd->spt_ppa != NULL); 807 (void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size, 808 sptd->spt_ppa, sptd->spt_prot); 809 return (0); 810 } 811 812 if (sptd->spt_flags & DISM_PPA_CHANGED) { 813 segspt_purge(seg); 814 /* 815 * for DISM ppa needs to be rebuild since 816 * number of locked pages could be changed 817 */ 818 *ppp = NULL; 819 return (ENOTSUP); 820 } 821 822 /* 823 * First try to find pages in segment page cache, without 824 * holding the segment lock. 825 */ 826 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 827 sptd->spt_prot); 828 if (pplist != NULL) { 829 ASSERT(sptd->spt_ppa != NULL); 830 ASSERT(sptd->spt_ppa == pplist); 831 ppa = sptd->spt_ppa; 832 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 833 if (ppa[an_idx] == NULL) { 834 seg_pinactive(seg, seg->s_base, 835 sptd->spt_amp->size, ppa, 836 sptd->spt_prot, segspt_reclaim); 837 *ppp = NULL; 838 return (ENOTSUP); 839 } 840 if ((szc = ppa[an_idx]->p_szc) != 0) { 841 npgs = page_get_pagecnt(szc); 842 an_idx = P2ROUNDUP(an_idx + 1, npgs); 843 } else { 844 an_idx++; 845 } 846 } 847 /* 848 * Since we cache the entire DISM segment, we want to 849 * set ppp to point to the first slot that corresponds 850 * to the requested addr, i.e. pg_idx. 851 */ 852 *ppp = &(sptd->spt_ppa[pg_idx]); 853 return (0); 854 } 855 856 /* The L_PAGELOCK case... */ 857 mutex_enter(&sptd->spt_lock); 858 /* 859 * try to find pages in segment page cache with mutex 860 */ 861 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 862 sptd->spt_prot); 863 if (pplist != NULL) { 864 ASSERT(sptd->spt_ppa != NULL); 865 ASSERT(sptd->spt_ppa == pplist); 866 ppa = sptd->spt_ppa; 867 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 868 if (ppa[an_idx] == NULL) { 869 mutex_exit(&sptd->spt_lock); 870 seg_pinactive(seg, seg->s_base, 871 sptd->spt_amp->size, ppa, 872 sptd->spt_prot, segspt_reclaim); 873 *ppp = NULL; 874 return (ENOTSUP); 875 } 876 if ((szc = ppa[an_idx]->p_szc) != 0) { 877 npgs = page_get_pagecnt(szc); 878 an_idx = P2ROUNDUP(an_idx + 1, npgs); 879 } else { 880 an_idx++; 881 } 882 } 883 /* 884 * Since we cache the entire DISM segment, we want to 885 * set ppp to point to the first slot that corresponds 886 * to the requested addr, i.e. pg_idx. 887 */ 888 mutex_exit(&sptd->spt_lock); 889 *ppp = &(sptd->spt_ppa[pg_idx]); 890 return (0); 891 } 892 if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) == 893 SEGP_FAIL) { 894 mutex_exit(&sptd->spt_lock); 895 *ppp = NULL; 896 return (ENOTSUP); 897 } 898 899 /* 900 * No need to worry about protections because DISM pages are always rw. 901 */ 902 pl = pplist = NULL; 903 amp = sptd->spt_amp; 904 905 /* 906 * Do we need to build the ppa array? 907 */ 908 if (sptd->spt_ppa == NULL) { 909 pgcnt_t lpg_cnt = 0; 910 911 pl_built = 1; 912 tot_npages = btopr(sptd->spt_amp->size); 913 914 ASSERT(sptd->spt_pcachecnt == 0); 915 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP); 916 pl = pplist; 917 918 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 919 for (an_idx = 0; an_idx < tot_npages; ) { 920 ap = anon_get_ptr(amp->ahp, an_idx); 921 /* 922 * Cache only mlocked pages. For large pages 923 * if one (constituent) page is mlocked 924 * all pages for that large page 925 * are cached also. This is for quick 926 * lookups of ppa array; 927 */ 928 if ((ap != NULL) && (lpg_cnt != 0 || 929 (sptd->spt_ppa_lckcnt[an_idx] != 0))) { 930 931 swap_xlate(ap, &vp, &off); 932 pp = page_lookup(vp, off, SE_SHARED); 933 ASSERT(pp != NULL); 934 if (lpg_cnt == 0) { 935 lpg_cnt++; 936 /* 937 * For a small page, we are done -- 938 * lpg_count is reset to 0 below. 939 * 940 * For a large page, we are guaranteed 941 * to find the anon structures of all 942 * constituent pages and a non-zero 943 * lpg_cnt ensures that we don't test 944 * for mlock for these. We are done 945 * when lpg_count reaches (npgs + 1). 946 * If we are not the first constituent 947 * page, restart at the first one. 948 */ 949 npgs = page_get_pagecnt(pp->p_szc); 950 if (!IS_P2ALIGNED(an_idx, npgs)) { 951 an_idx = P2ALIGN(an_idx, npgs); 952 page_unlock(pp); 953 continue; 954 } 955 } 956 if (++lpg_cnt > npgs) 957 lpg_cnt = 0; 958 959 /* 960 * availrmem is decremented only 961 * for unlocked pages 962 */ 963 if (sptd->spt_ppa_lckcnt[an_idx] == 0) 964 claim_availrmem++; 965 pplist[an_idx] = pp; 966 } 967 an_idx++; 968 } 969 ANON_LOCK_EXIT(&->a_rwlock); 970 971 mutex_enter(&freemem_lock); 972 if (availrmem < tune.t_minarmem + claim_availrmem) { 973 mutex_exit(&freemem_lock); 974 ret = FC_MAKE_ERR(ENOMEM); 975 claim_availrmem = 0; 976 goto insert_fail; 977 } else { 978 availrmem -= claim_availrmem; 979 } 980 mutex_exit(&freemem_lock); 981 982 sptd->spt_ppa = pl; 983 } else { 984 /* 985 * We already have a valid ppa[]. 986 */ 987 pl = sptd->spt_ppa; 988 } 989 990 ASSERT(pl != NULL); 991 992 ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size, 993 pl, sptd->spt_prot, SEGP_FORCE_WIRED | SEGP_ASYNC_FLUSH, 994 segspt_reclaim); 995 if (ret == SEGP_FAIL) { 996 /* 997 * seg_pinsert failed. We return 998 * ENOTSUP, so that the as_pagelock() code will 999 * then try the slower F_SOFTLOCK path. 1000 */ 1001 if (pl_built) { 1002 /* 1003 * No one else has referenced the ppa[]. 1004 * We created it and we need to destroy it. 1005 */ 1006 sptd->spt_ppa = NULL; 1007 } 1008 ret = ENOTSUP; 1009 goto insert_fail; 1010 } 1011 1012 /* 1013 * In either case, we increment softlockcnt on the 'real' segment. 1014 */ 1015 sptd->spt_pcachecnt++; 1016 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 1017 1018 ppa = sptd->spt_ppa; 1019 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 1020 if (ppa[an_idx] == NULL) { 1021 mutex_exit(&sptd->spt_lock); 1022 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 1023 pl, sptd->spt_prot, segspt_reclaim); 1024 *ppp = NULL; 1025 return (ENOTSUP); 1026 } 1027 if ((szc = ppa[an_idx]->p_szc) != 0) { 1028 npgs = page_get_pagecnt(szc); 1029 an_idx = P2ROUNDUP(an_idx + 1, npgs); 1030 } else { 1031 an_idx++; 1032 } 1033 } 1034 /* 1035 * We can now drop the sptd->spt_lock since the ppa[] 1036 * exists and he have incremented pacachecnt. 1037 */ 1038 mutex_exit(&sptd->spt_lock); 1039 1040 /* 1041 * Since we cache the entire segment, we want to 1042 * set ppp to point to the first slot that corresponds 1043 * to the requested addr, i.e. pg_idx. 1044 */ 1045 *ppp = &(sptd->spt_ppa[pg_idx]); 1046 return (ret); 1047 1048 insert_fail: 1049 /* 1050 * We will only reach this code if we tried and failed. 1051 * 1052 * And we can drop the lock on the dummy seg, once we've failed 1053 * to set up a new ppa[]. 1054 */ 1055 mutex_exit(&sptd->spt_lock); 1056 1057 if (pl_built) { 1058 mutex_enter(&freemem_lock); 1059 availrmem += claim_availrmem; 1060 mutex_exit(&freemem_lock); 1061 1062 /* 1063 * We created pl and we need to destroy it. 1064 */ 1065 pplist = pl; 1066 for (an_idx = 0; an_idx < tot_npages; an_idx++) { 1067 if (pplist[an_idx] != NULL) 1068 page_unlock(pplist[an_idx]); 1069 } 1070 kmem_free(pl, sizeof (page_t *) * tot_npages); 1071 } 1072 1073 if (shmd->shm_softlockcnt <= 0) { 1074 if (AS_ISUNMAPWAIT(seg->s_as)) { 1075 mutex_enter(&seg->s_as->a_contents); 1076 if (AS_ISUNMAPWAIT(seg->s_as)) { 1077 AS_CLRUNMAPWAIT(seg->s_as); 1078 cv_broadcast(&seg->s_as->a_cv); 1079 } 1080 mutex_exit(&seg->s_as->a_contents); 1081 } 1082 } 1083 *ppp = NULL; 1084 return (ret); 1085 } 1086 1087 1088 1089 /* 1090 * return locked pages over a given range. 1091 * 1092 * We will cache the entire ISM segment and save the pplist for the 1093 * entire segment in the ppa field of the underlying ISM segment structure. 1094 * Later, during a call to segspt_reclaim() we will use this ppa array 1095 * to page_unlock() all of the pages and then we will free this ppa list. 1096 */ 1097 /*ARGSUSED*/ 1098 static int 1099 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len, 1100 struct page ***ppp, enum lock_type type, enum seg_rw rw) 1101 { 1102 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1103 struct seg *sptseg = shmd->shm_sptseg; 1104 struct spt_data *sptd = sptseg->s_data; 1105 pgcnt_t np, page_index, npages; 1106 caddr_t a, spt_base; 1107 struct page **pplist, **pl, *pp; 1108 struct anon_map *amp; 1109 ulong_t anon_index; 1110 int ret = ENOTSUP; 1111 uint_t pl_built = 0; 1112 struct anon *ap; 1113 struct vnode *vp; 1114 u_offset_t off; 1115 1116 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1117 1118 /* 1119 * We want to lock/unlock the entire ISM segment. Therefore, 1120 * we will be using the underlying sptseg and it's base address 1121 * and length for the caching arguments. 1122 */ 1123 ASSERT(sptseg); 1124 ASSERT(sptd); 1125 1126 if (sptd->spt_flags & SHM_PAGEABLE) { 1127 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw)); 1128 } 1129 1130 page_index = seg_page(seg, addr); 1131 npages = btopr(len); 1132 1133 /* 1134 * check if the request is larger than number of pages covered 1135 * by amp 1136 */ 1137 if (page_index + npages > btopr(sptd->spt_amp->size)) { 1138 *ppp = NULL; 1139 return (ENOTSUP); 1140 } 1141 1142 if (type == L_PAGEUNLOCK) { 1143 1144 ASSERT(sptd->spt_ppa != NULL); 1145 1146 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 1147 sptd->spt_ppa, sptd->spt_prot, segspt_reclaim); 1148 1149 /* 1150 * If someone is blocked while unmapping, we purge 1151 * segment page cache and thus reclaim pplist synchronously 1152 * without waiting for seg_pasync_thread. This speeds up 1153 * unmapping in cases where munmap(2) is called, while 1154 * raw async i/o is still in progress or where a thread 1155 * exits on data fault in a multithreaded application. 1156 */ 1157 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 1158 segspt_purge(seg); 1159 } 1160 return (0); 1161 } else if (type == L_PAGERECLAIM) { 1162 ASSERT(sptd->spt_ppa != NULL); 1163 1164 (void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size, 1165 sptd->spt_ppa, sptd->spt_prot); 1166 return (0); 1167 } 1168 1169 /* 1170 * First try to find pages in segment page cache, without 1171 * holding the segment lock. 1172 */ 1173 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 1174 sptd->spt_prot); 1175 if (pplist != NULL) { 1176 ASSERT(sptd->spt_ppa == pplist); 1177 ASSERT(sptd->spt_ppa[page_index]); 1178 /* 1179 * Since we cache the entire ISM segment, we want to 1180 * set ppp to point to the first slot that corresponds 1181 * to the requested addr, i.e. page_index. 1182 */ 1183 *ppp = &(sptd->spt_ppa[page_index]); 1184 return (0); 1185 } 1186 1187 /* The L_PAGELOCK case... */ 1188 mutex_enter(&sptd->spt_lock); 1189 1190 /* 1191 * try to find pages in segment page cache 1192 */ 1193 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 1194 sptd->spt_prot); 1195 if (pplist != NULL) { 1196 ASSERT(sptd->spt_ppa == pplist); 1197 /* 1198 * Since we cache the entire segment, we want to 1199 * set ppp to point to the first slot that corresponds 1200 * to the requested addr, i.e. page_index. 1201 */ 1202 mutex_exit(&sptd->spt_lock); 1203 *ppp = &(sptd->spt_ppa[page_index]); 1204 return (0); 1205 } 1206 1207 if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) == 1208 SEGP_FAIL) { 1209 mutex_exit(&sptd->spt_lock); 1210 *ppp = NULL; 1211 return (ENOTSUP); 1212 } 1213 1214 /* 1215 * No need to worry about protections because ISM pages 1216 * are always rw. 1217 */ 1218 pl = pplist = NULL; 1219 1220 /* 1221 * Do we need to build the ppa array? 1222 */ 1223 if (sptd->spt_ppa == NULL) { 1224 ASSERT(sptd->spt_ppa == pplist); 1225 1226 spt_base = sptseg->s_base; 1227 pl_built = 1; 1228 1229 /* 1230 * availrmem is decremented once during anon_swap_adjust() 1231 * and is incremented during the anon_unresv(), which is 1232 * called from shm_rm_amp() when the segment is destroyed. 1233 */ 1234 amp = sptd->spt_amp; 1235 ASSERT(amp != NULL); 1236 1237 /* pcachecnt is protected by sptd->spt_lock */ 1238 ASSERT(sptd->spt_pcachecnt == 0); 1239 pplist = kmem_zalloc(sizeof (page_t *) 1240 * btopr(sptd->spt_amp->size), KM_SLEEP); 1241 pl = pplist; 1242 1243 anon_index = seg_page(sptseg, spt_base); 1244 1245 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1246 for (a = spt_base; a < (spt_base + sptd->spt_amp->size); 1247 a += PAGESIZE, anon_index++, pplist++) { 1248 ap = anon_get_ptr(amp->ahp, anon_index); 1249 ASSERT(ap != NULL); 1250 swap_xlate(ap, &vp, &off); 1251 pp = page_lookup(vp, off, SE_SHARED); 1252 ASSERT(pp != NULL); 1253 *pplist = pp; 1254 } 1255 ANON_LOCK_EXIT(&->a_rwlock); 1256 1257 if (a < (spt_base + sptd->spt_amp->size)) { 1258 ret = ENOTSUP; 1259 goto insert_fail; 1260 } 1261 sptd->spt_ppa = pl; 1262 } else { 1263 /* 1264 * We already have a valid ppa[]. 1265 */ 1266 pl = sptd->spt_ppa; 1267 } 1268 1269 ASSERT(pl != NULL); 1270 1271 ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size, 1272 pl, sptd->spt_prot, SEGP_FORCE_WIRED, segspt_reclaim); 1273 if (ret == SEGP_FAIL) { 1274 /* 1275 * seg_pinsert failed. We return 1276 * ENOTSUP, so that the as_pagelock() code will 1277 * then try the slower F_SOFTLOCK path. 1278 */ 1279 if (pl_built) { 1280 /* 1281 * No one else has referenced the ppa[]. 1282 * We created it and we need to destroy it. 1283 */ 1284 sptd->spt_ppa = NULL; 1285 } 1286 ret = ENOTSUP; 1287 goto insert_fail; 1288 } 1289 1290 /* 1291 * In either case, we increment softlockcnt on the 'real' segment. 1292 */ 1293 sptd->spt_pcachecnt++; 1294 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 1295 1296 /* 1297 * We can now drop the sptd->spt_lock since the ppa[] 1298 * exists and he have incremented pacachecnt. 1299 */ 1300 mutex_exit(&sptd->spt_lock); 1301 1302 /* 1303 * Since we cache the entire segment, we want to 1304 * set ppp to point to the first slot that corresponds 1305 * to the requested addr, i.e. page_index. 1306 */ 1307 *ppp = &(sptd->spt_ppa[page_index]); 1308 return (ret); 1309 1310 insert_fail: 1311 /* 1312 * We will only reach this code if we tried and failed. 1313 * 1314 * And we can drop the lock on the dummy seg, once we've failed 1315 * to set up a new ppa[]. 1316 */ 1317 mutex_exit(&sptd->spt_lock); 1318 1319 if (pl_built) { 1320 /* 1321 * We created pl and we need to destroy it. 1322 */ 1323 pplist = pl; 1324 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT); 1325 while (np) { 1326 page_unlock(*pplist); 1327 np--; 1328 pplist++; 1329 } 1330 kmem_free(pl, sizeof (page_t *) * 1331 btopr(sptd->spt_amp->size)); 1332 } 1333 if (shmd->shm_softlockcnt <= 0) { 1334 if (AS_ISUNMAPWAIT(seg->s_as)) { 1335 mutex_enter(&seg->s_as->a_contents); 1336 if (AS_ISUNMAPWAIT(seg->s_as)) { 1337 AS_CLRUNMAPWAIT(seg->s_as); 1338 cv_broadcast(&seg->s_as->a_cv); 1339 } 1340 mutex_exit(&seg->s_as->a_contents); 1341 } 1342 } 1343 *ppp = NULL; 1344 return (ret); 1345 } 1346 1347 /* 1348 * purge any cached pages in the I/O page cache 1349 */ 1350 static void 1351 segspt_purge(struct seg *seg) 1352 { 1353 seg_ppurge(seg); 1354 } 1355 1356 static int 1357 segspt_reclaim(struct seg *seg, caddr_t addr, size_t len, struct page **pplist, 1358 enum seg_rw rw) 1359 { 1360 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1361 struct seg *sptseg; 1362 struct spt_data *sptd; 1363 pgcnt_t npages, i, free_availrmem = 0; 1364 int done = 0; 1365 1366 #ifdef lint 1367 addr = addr; 1368 #endif 1369 sptseg = shmd->shm_sptseg; 1370 sptd = sptseg->s_data; 1371 npages = (len >> PAGESHIFT); 1372 ASSERT(npages); 1373 ASSERT(sptd->spt_pcachecnt != 0); 1374 ASSERT(sptd->spt_ppa == pplist); 1375 ASSERT(npages == btopr(sptd->spt_amp->size)); 1376 1377 /* 1378 * Acquire the lock on the dummy seg and destroy the 1379 * ppa array IF this is the last pcachecnt. 1380 */ 1381 mutex_enter(&sptd->spt_lock); 1382 if (--sptd->spt_pcachecnt == 0) { 1383 for (i = 0; i < npages; i++) { 1384 if (pplist[i] == NULL) { 1385 continue; 1386 } 1387 if (rw == S_WRITE) { 1388 hat_setrefmod(pplist[i]); 1389 } else { 1390 hat_setref(pplist[i]); 1391 } 1392 if ((sptd->spt_flags & SHM_PAGEABLE) && 1393 (sptd->spt_ppa_lckcnt[i] == 0)) 1394 free_availrmem++; 1395 page_unlock(pplist[i]); 1396 } 1397 if (sptd->spt_flags & SHM_PAGEABLE) { 1398 mutex_enter(&freemem_lock); 1399 availrmem += free_availrmem; 1400 mutex_exit(&freemem_lock); 1401 } 1402 /* 1403 * Since we want to cach/uncache the entire ISM segment, 1404 * we will track the pplist in a segspt specific field 1405 * ppa, that is initialized at the time we add an entry to 1406 * the cache. 1407 */ 1408 ASSERT(sptd->spt_pcachecnt == 0); 1409 kmem_free(pplist, sizeof (page_t *) * npages); 1410 sptd->spt_ppa = NULL; 1411 sptd->spt_flags &= ~DISM_PPA_CHANGED; 1412 done = 1; 1413 } 1414 mutex_exit(&sptd->spt_lock); 1415 /* 1416 * Now decrement softlockcnt. 1417 */ 1418 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1); 1419 1420 if (shmd->shm_softlockcnt <= 0) { 1421 if (AS_ISUNMAPWAIT(seg->s_as)) { 1422 mutex_enter(&seg->s_as->a_contents); 1423 if (AS_ISUNMAPWAIT(seg->s_as)) { 1424 AS_CLRUNMAPWAIT(seg->s_as); 1425 cv_broadcast(&seg->s_as->a_cv); 1426 } 1427 mutex_exit(&seg->s_as->a_contents); 1428 } 1429 } 1430 return (done); 1431 } 1432 1433 /* 1434 * Do a F_SOFTUNLOCK call over the range requested. 1435 * The range must have already been F_SOFTLOCK'ed. 1436 * 1437 * The calls to acquire and release the anon map lock mutex were 1438 * removed in order to avoid a deadly embrace during a DR 1439 * memory delete operation. (Eg. DR blocks while waiting for a 1440 * exclusive lock on a page that is being used for kaio; the 1441 * thread that will complete the kaio and call segspt_softunlock 1442 * blocks on the anon map lock; another thread holding the anon 1443 * map lock blocks on another page lock via the segspt_shmfault 1444 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.) 1445 * 1446 * The appropriateness of the removal is based upon the following: 1447 * 1. If we are holding a segment's reader lock and the page is held 1448 * shared, then the corresponding element in anonmap which points to 1449 * anon struct cannot change and there is no need to acquire the 1450 * anonymous map lock. 1451 * 2. Threads in segspt_softunlock have a reader lock on the segment 1452 * and already have the shared page lock, so we are guaranteed that 1453 * the anon map slot cannot change and therefore can call anon_get_ptr() 1454 * without grabbing the anonymous map lock. 1455 * 3. Threads that softlock a shared page break copy-on-write, even if 1456 * its a read. Thus cow faults can be ignored with respect to soft 1457 * unlocking, since the breaking of cow means that the anon slot(s) will 1458 * not be shared. 1459 */ 1460 static void 1461 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr, 1462 size_t len, enum seg_rw rw) 1463 { 1464 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1465 struct seg *sptseg; 1466 struct spt_data *sptd; 1467 page_t *pp; 1468 caddr_t adr; 1469 struct vnode *vp; 1470 u_offset_t offset; 1471 ulong_t anon_index; 1472 struct anon_map *amp; /* XXX - for locknest */ 1473 struct anon *ap = NULL; 1474 pgcnt_t npages; 1475 1476 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1477 1478 sptseg = shmd->shm_sptseg; 1479 sptd = sptseg->s_data; 1480 1481 /* 1482 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 1483 * and therefore their pages are SE_SHARED locked 1484 * for the entire life of the segment. 1485 */ 1486 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) && 1487 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) { 1488 goto softlock_decrement; 1489 } 1490 1491 /* 1492 * Any thread is free to do a page_find and 1493 * page_unlock() on the pages within this seg. 1494 * 1495 * We are already holding the as->a_lock on the user's 1496 * real segment, but we need to hold the a_lock on the 1497 * underlying dummy as. This is mostly to satisfy the 1498 * underlying HAT layer. 1499 */ 1500 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1501 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len); 1502 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1503 1504 amp = sptd->spt_amp; 1505 ASSERT(amp != NULL); 1506 anon_index = seg_page(sptseg, sptseg_addr); 1507 1508 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) { 1509 ap = anon_get_ptr(amp->ahp, anon_index++); 1510 ASSERT(ap != NULL); 1511 swap_xlate(ap, &vp, &offset); 1512 1513 /* 1514 * Use page_find() instead of page_lookup() to 1515 * find the page since we know that it has a 1516 * "shared" lock. 1517 */ 1518 pp = page_find(vp, offset); 1519 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1)); 1520 if (pp == NULL) { 1521 panic("segspt_softunlock: " 1522 "addr %p, ap %p, vp %p, off %llx", 1523 (void *)adr, (void *)ap, (void *)vp, offset); 1524 /*NOTREACHED*/ 1525 } 1526 1527 if (rw == S_WRITE) { 1528 hat_setrefmod(pp); 1529 } else if (rw != S_OTHER) { 1530 hat_setref(pp); 1531 } 1532 page_unlock(pp); 1533 } 1534 1535 softlock_decrement: 1536 npages = btopr(len); 1537 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages); 1538 if (shmd->shm_softlockcnt == 0) { 1539 /* 1540 * All SOFTLOCKS are gone. Wakeup any waiting 1541 * unmappers so they can try again to unmap. 1542 * Check for waiters first without the mutex 1543 * held so we don't always grab the mutex on 1544 * softunlocks. 1545 */ 1546 if (AS_ISUNMAPWAIT(seg->s_as)) { 1547 mutex_enter(&seg->s_as->a_contents); 1548 if (AS_ISUNMAPWAIT(seg->s_as)) { 1549 AS_CLRUNMAPWAIT(seg->s_as); 1550 cv_broadcast(&seg->s_as->a_cv); 1551 } 1552 mutex_exit(&seg->s_as->a_contents); 1553 } 1554 } 1555 } 1556 1557 int 1558 segspt_shmattach(struct seg *seg, caddr_t *argsp) 1559 { 1560 struct shm_data *shmd_arg = (struct shm_data *)argsp; 1561 struct shm_data *shmd; 1562 struct anon_map *shm_amp = shmd_arg->shm_amp; 1563 struct spt_data *sptd; 1564 int error = 0; 1565 1566 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1567 1568 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP); 1569 if (shmd == NULL) 1570 return (ENOMEM); 1571 1572 shmd->shm_sptas = shmd_arg->shm_sptas; 1573 shmd->shm_amp = shm_amp; 1574 shmd->shm_sptseg = shmd_arg->shm_sptseg; 1575 1576 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0, 1577 NULL, 0, seg->s_size); 1578 1579 seg->s_data = (void *)shmd; 1580 seg->s_ops = &segspt_shmops; 1581 seg->s_szc = shmd->shm_sptseg->s_szc; 1582 sptd = shmd->shm_sptseg->s_data; 1583 1584 if (sptd->spt_flags & SHM_PAGEABLE) { 1585 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size), 1586 KM_NOSLEEP)) == NULL) { 1587 seg->s_data = (void *)NULL; 1588 kmem_free(shmd, (sizeof (*shmd))); 1589 return (ENOMEM); 1590 } 1591 shmd->shm_lckpgs = 0; 1592 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 1593 if ((error = hat_share(seg->s_as->a_hat, seg->s_base, 1594 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1595 seg->s_size, seg->s_szc)) != 0) { 1596 kmem_free(shmd->shm_vpage, 1597 btopr(shm_amp->size)); 1598 } 1599 } 1600 } else { 1601 error = hat_share(seg->s_as->a_hat, seg->s_base, 1602 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1603 seg->s_size, seg->s_szc); 1604 } 1605 if (error) { 1606 seg->s_szc = 0; 1607 seg->s_data = (void *)NULL; 1608 kmem_free(shmd, (sizeof (*shmd))); 1609 } else { 1610 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1611 shm_amp->refcnt++; 1612 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1613 } 1614 return (error); 1615 } 1616 1617 int 1618 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize) 1619 { 1620 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1621 int reclaim = 1; 1622 1623 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1624 retry: 1625 if (shmd->shm_softlockcnt > 0) { 1626 if (reclaim == 1) { 1627 segspt_purge(seg); 1628 reclaim = 0; 1629 goto retry; 1630 } 1631 return (EAGAIN); 1632 } 1633 1634 if (ssize != seg->s_size) { 1635 #ifdef DEBUG 1636 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n", 1637 ssize, seg->s_size); 1638 #endif 1639 return (EINVAL); 1640 } 1641 1642 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK, 1643 NULL, 0); 1644 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc); 1645 1646 seg_free(seg); 1647 1648 return (0); 1649 } 1650 1651 void 1652 segspt_shmfree(struct seg *seg) 1653 { 1654 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1655 struct anon_map *shm_amp = shmd->shm_amp; 1656 1657 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1658 1659 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0, 1660 MC_UNLOCK, NULL, 0); 1661 1662 /* 1663 * Need to increment refcnt when attaching 1664 * and decrement when detaching because of dup(). 1665 */ 1666 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1667 shm_amp->refcnt--; 1668 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1669 1670 if (shmd->shm_vpage) { /* only for DISM */ 1671 kmem_free(shmd->shm_vpage, btopr(shm_amp->size)); 1672 shmd->shm_vpage = NULL; 1673 } 1674 kmem_free(shmd, sizeof (*shmd)); 1675 } 1676 1677 /*ARGSUSED*/ 1678 int 1679 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 1680 { 1681 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1682 1683 /* 1684 * Shared page table is more than shared mapping. 1685 * Individual process sharing page tables can't change prot 1686 * because there is only one set of page tables. 1687 * This will be allowed after private page table is 1688 * supported. 1689 */ 1690 /* need to return correct status error? */ 1691 return (0); 1692 } 1693 1694 1695 faultcode_t 1696 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr, 1697 size_t len, enum fault_type type, enum seg_rw rw) 1698 { 1699 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1700 struct seg *sptseg = shmd->shm_sptseg; 1701 struct as *curspt = shmd->shm_sptas; 1702 struct spt_data *sptd = sptseg->s_data; 1703 pgcnt_t npages; 1704 size_t share_sz, size; 1705 caddr_t segspt_addr, shm_addr; 1706 page_t **ppa; 1707 int i; 1708 ulong_t an_idx = 0; 1709 int err = 0; 1710 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0); 1711 1712 #ifdef lint 1713 hat = hat; 1714 #endif 1715 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1716 1717 /* 1718 * Because of the way spt is implemented 1719 * the realsize of the segment does not have to be 1720 * equal to the segment size itself. The segment size is 1721 * often in multiples of a page size larger than PAGESIZE. 1722 * The realsize is rounded up to the nearest PAGESIZE 1723 * based on what the user requested. This is a bit of 1724 * ungliness that is historical but not easily fixed 1725 * without re-designing the higher levels of ISM. 1726 */ 1727 ASSERT(addr >= seg->s_base); 1728 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 1729 return (FC_NOMAP); 1730 /* 1731 * For all of the following cases except F_PROT, we need to 1732 * make any necessary adjustments to addr and len 1733 * and get all of the necessary page_t's into an array called ppa[]. 1734 * 1735 * The code in shmat() forces base addr and len of ISM segment 1736 * to be aligned to largest page size supported. Therefore, 1737 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 1738 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 1739 * in large pagesize chunks, or else we will screw up the HAT 1740 * layer by calling hat_memload_array() with differing page sizes 1741 * over a given virtual range. 1742 */ 1743 share_sz = page_get_pagesize(sptseg->s_szc); 1744 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz); 1745 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), share_sz); 1746 npages = btopr(size); 1747 1748 /* 1749 * Now we need to convert from addr in segshm to addr in segspt. 1750 */ 1751 an_idx = seg_page(seg, shm_addr); 1752 segspt_addr = sptseg->s_base + ptob(an_idx); 1753 1754 ASSERT((segspt_addr + ptob(npages)) <= 1755 (sptseg->s_base + sptd->spt_realsize)); 1756 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size)); 1757 1758 switch (type) { 1759 1760 case F_SOFTLOCK: 1761 1762 mutex_enter(&freemem_lock); 1763 if (availrmem < tune.t_minarmem + npages) { 1764 mutex_exit(&freemem_lock); 1765 return (FC_MAKE_ERR(ENOMEM)); 1766 } else { 1767 availrmem -= npages; 1768 } 1769 mutex_exit(&freemem_lock); 1770 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 1771 /* 1772 * Fall through to the F_INVAL case to load up the hat layer 1773 * entries with the HAT_LOAD_LOCK flag. 1774 */ 1775 /* FALLTHRU */ 1776 case F_INVAL: 1777 1778 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 1779 return (FC_NOMAP); 1780 1781 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP); 1782 1783 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa); 1784 if (err != 0) { 1785 if (type == F_SOFTLOCK) { 1786 mutex_enter(&freemem_lock); 1787 availrmem += npages; 1788 mutex_exit(&freemem_lock); 1789 atomic_add_long((ulong_t *)( 1790 &(shmd->shm_softlockcnt)), -npages); 1791 } 1792 goto dism_err; 1793 } 1794 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1795 if (type == F_SOFTLOCK) { 1796 1797 /* 1798 * Load up the translation keeping it 1799 * locked and don't unlock the page. 1800 */ 1801 hat_memload_array(sptseg->s_as->a_hat, segspt_addr, 1802 size, ppa, sptd->spt_prot, 1803 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 1804 } else { 1805 if (hat == seg->s_as->a_hat) { 1806 1807 /* 1808 * Migrate pages marked for migration 1809 */ 1810 if (lgrp_optimizations()) 1811 page_migrate(seg, shm_addr, ppa, 1812 npages); 1813 1814 /* CPU HAT */ 1815 hat_memload_array(sptseg->s_as->a_hat, 1816 segspt_addr, size, ppa, sptd->spt_prot, 1817 HAT_LOAD_SHARE); 1818 } else { 1819 /* XHAT. Pass real address */ 1820 hat_memload_array(hat, shm_addr, 1821 size, ppa, sptd->spt_prot, HAT_LOAD_SHARE); 1822 } 1823 1824 /* 1825 * And now drop the SE_SHARED lock(s). 1826 */ 1827 if (dyn_ism_unmap) { 1828 for (i = 0; i < npages; i++) { 1829 page_unlock(ppa[i]); 1830 } 1831 } 1832 } 1833 1834 if (!dyn_ism_unmap) { 1835 if (hat_share(seg->s_as->a_hat, shm_addr, 1836 curspt->a_hat, segspt_addr, ptob(npages), 1837 seg->s_szc) != 0) { 1838 panic("hat_share err in DISM fault"); 1839 /* NOTREACHED */ 1840 } 1841 if (type == F_INVAL) { 1842 for (i = 0; i < npages; i++) { 1843 page_unlock(ppa[i]); 1844 } 1845 } 1846 } 1847 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1848 dism_err: 1849 kmem_free(ppa, npages * sizeof (page_t *)); 1850 return (err); 1851 1852 case F_SOFTUNLOCK: 1853 1854 mutex_enter(&freemem_lock); 1855 availrmem += npages; 1856 mutex_exit(&freemem_lock); 1857 1858 /* 1859 * This is a bit ugly, we pass in the real seg pointer, 1860 * but the segspt_addr is the virtual address within the 1861 * dummy seg. 1862 */ 1863 segspt_softunlock(seg, segspt_addr, size, rw); 1864 return (0); 1865 1866 case F_PROT: 1867 1868 /* 1869 * This takes care of the unusual case where a user 1870 * allocates a stack in shared memory and a register 1871 * window overflow is written to that stack page before 1872 * it is otherwise modified. 1873 * 1874 * We can get away with this because ISM segments are 1875 * always rw. Other than this unusual case, there 1876 * should be no instances of protection violations. 1877 */ 1878 return (0); 1879 1880 default: 1881 #ifdef DEBUG 1882 panic("segspt_dismfault default type?"); 1883 #else 1884 return (FC_NOMAP); 1885 #endif 1886 } 1887 } 1888 1889 1890 faultcode_t 1891 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr, 1892 size_t len, enum fault_type type, enum seg_rw rw) 1893 { 1894 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1895 struct seg *sptseg = shmd->shm_sptseg; 1896 struct as *curspt = shmd->shm_sptas; 1897 struct spt_data *sptd = sptseg->s_data; 1898 pgcnt_t npages; 1899 size_t share_size, size; 1900 caddr_t sptseg_addr, shm_addr; 1901 page_t *pp, **ppa; 1902 int i; 1903 u_offset_t offset; 1904 ulong_t anon_index = 0; 1905 struct vnode *vp; 1906 struct anon_map *amp; /* XXX - for locknest */ 1907 struct anon *ap = NULL; 1908 anon_sync_obj_t cookie; 1909 1910 #ifdef lint 1911 hat = hat; 1912 #endif 1913 1914 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1915 1916 if (sptd->spt_flags & SHM_PAGEABLE) { 1917 return (segspt_dismfault(hat, seg, addr, len, type, rw)); 1918 } 1919 1920 /* 1921 * Because of the way spt is implemented 1922 * the realsize of the segment does not have to be 1923 * equal to the segment size itself. The segment size is 1924 * often in multiples of a page size larger than PAGESIZE. 1925 * The realsize is rounded up to the nearest PAGESIZE 1926 * based on what the user requested. This is a bit of 1927 * ungliness that is historical but not easily fixed 1928 * without re-designing the higher levels of ISM. 1929 */ 1930 ASSERT(addr >= seg->s_base); 1931 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 1932 return (FC_NOMAP); 1933 /* 1934 * For all of the following cases except F_PROT, we need to 1935 * make any necessary adjustments to addr and len 1936 * and get all of the necessary page_t's into an array called ppa[]. 1937 * 1938 * The code in shmat() forces base addr and len of ISM segment 1939 * to be aligned to largest page size supported. Therefore, 1940 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 1941 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 1942 * in large pagesize chunks, or else we will screw up the HAT 1943 * layer by calling hat_memload_array() with differing page sizes 1944 * over a given virtual range. 1945 */ 1946 share_size = page_get_pagesize(sptseg->s_szc); 1947 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size); 1948 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), share_size); 1949 npages = btopr(size); 1950 1951 /* 1952 * Now we need to convert from addr in segshm to addr in segspt. 1953 */ 1954 anon_index = seg_page(seg, shm_addr); 1955 sptseg_addr = sptseg->s_base + ptob(anon_index); 1956 1957 /* 1958 * And now we may have to adjust npages downward if we have 1959 * exceeded the realsize of the segment or initial anon 1960 * allocations. 1961 */ 1962 if ((sptseg_addr + ptob(npages)) > 1963 (sptseg->s_base + sptd->spt_realsize)) 1964 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr; 1965 1966 npages = btopr(size); 1967 1968 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size)); 1969 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0); 1970 1971 switch (type) { 1972 1973 case F_SOFTLOCK: 1974 1975 /* 1976 * availrmem is decremented once during anon_swap_adjust() 1977 * and is incremented during the anon_unresv(), which is 1978 * called from shm_rm_amp() when the segment is destroyed. 1979 */ 1980 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 1981 /* 1982 * Some platforms assume that ISM pages are SE_SHARED 1983 * locked for the entire life of the segment. 1984 */ 1985 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) 1986 return (0); 1987 /* 1988 * Fall through to the F_INVAL case to load up the hat layer 1989 * entries with the HAT_LOAD_LOCK flag. 1990 */ 1991 1992 /* FALLTHRU */ 1993 case F_INVAL: 1994 1995 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 1996 return (FC_NOMAP); 1997 1998 /* 1999 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP 2000 * may still rely on this call to hat_share(). That 2001 * would imply that those hat's can fault on a 2002 * HAT_LOAD_LOCK translation, which would seem 2003 * contradictory. 2004 */ 2005 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 2006 if (hat_share(seg->s_as->a_hat, seg->s_base, 2007 curspt->a_hat, sptseg->s_base, 2008 sptseg->s_size, sptseg->s_szc) != 0) { 2009 panic("hat_share error in ISM fault"); 2010 /*NOTREACHED*/ 2011 } 2012 return (0); 2013 } 2014 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP); 2015 2016 /* 2017 * I see no need to lock the real seg, 2018 * here, because all of our work will be on the underlying 2019 * dummy seg. 2020 * 2021 * sptseg_addr and npages now account for large pages. 2022 */ 2023 amp = sptd->spt_amp; 2024 ASSERT(amp != NULL); 2025 anon_index = seg_page(sptseg, sptseg_addr); 2026 2027 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2028 for (i = 0; i < npages; i++) { 2029 anon_array_enter(amp, anon_index, &cookie); 2030 ap = anon_get_ptr(amp->ahp, anon_index++); 2031 ASSERT(ap != NULL); 2032 swap_xlate(ap, &vp, &offset); 2033 anon_array_exit(&cookie); 2034 pp = page_lookup(vp, offset, SE_SHARED); 2035 ASSERT(pp != NULL); 2036 ppa[i] = pp; 2037 } 2038 ANON_LOCK_EXIT(&->a_rwlock); 2039 ASSERT(i == npages); 2040 2041 /* 2042 * We are already holding the as->a_lock on the user's 2043 * real segment, but we need to hold the a_lock on the 2044 * underlying dummy as. This is mostly to satisfy the 2045 * underlying HAT layer. 2046 */ 2047 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 2048 if (type == F_SOFTLOCK) { 2049 /* 2050 * Load up the translation keeping it 2051 * locked and don't unlock the page. 2052 */ 2053 hat_memload_array(sptseg->s_as->a_hat, sptseg_addr, 2054 ptob(npages), ppa, sptd->spt_prot, 2055 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 2056 } else { 2057 if (hat == seg->s_as->a_hat) { 2058 2059 /* 2060 * Migrate pages marked for migration. 2061 */ 2062 if (lgrp_optimizations()) 2063 page_migrate(seg, shm_addr, ppa, 2064 npages); 2065 2066 /* CPU HAT */ 2067 hat_memload_array(sptseg->s_as->a_hat, 2068 sptseg_addr, ptob(npages), ppa, 2069 sptd->spt_prot, HAT_LOAD_SHARE); 2070 } else { 2071 /* XHAT. Pass real address */ 2072 hat_memload_array(hat, shm_addr, 2073 ptob(npages), ppa, sptd->spt_prot, 2074 HAT_LOAD_SHARE); 2075 } 2076 2077 /* 2078 * And now drop the SE_SHARED lock(s). 2079 */ 2080 for (i = 0; i < npages; i++) 2081 page_unlock(ppa[i]); 2082 } 2083 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 2084 2085 kmem_free(ppa, sizeof (page_t *) * npages); 2086 return (0); 2087 case F_SOFTUNLOCK: 2088 2089 /* 2090 * This is a bit ugly, we pass in the real seg pointer, 2091 * but the sptseg_addr is the virtual address within the 2092 * dummy seg. 2093 */ 2094 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw); 2095 return (0); 2096 2097 case F_PROT: 2098 2099 /* 2100 * This takes care of the unusual case where a user 2101 * allocates a stack in shared memory and a register 2102 * window overflow is written to that stack page before 2103 * it is otherwise modified. 2104 * 2105 * We can get away with this because ISM segments are 2106 * always rw. Other than this unusual case, there 2107 * should be no instances of protection violations. 2108 */ 2109 return (0); 2110 2111 default: 2112 #ifdef DEBUG 2113 cmn_err(CE_WARN, "segspt_shmfault default type?"); 2114 #endif 2115 return (FC_NOMAP); 2116 } 2117 } 2118 2119 /*ARGSUSED*/ 2120 static faultcode_t 2121 segspt_shmfaulta(struct seg *seg, caddr_t addr) 2122 { 2123 return (0); 2124 } 2125 2126 /*ARGSUSED*/ 2127 static int 2128 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta) 2129 { 2130 return (0); 2131 } 2132 2133 /*ARGSUSED*/ 2134 static size_t 2135 segspt_shmswapout(struct seg *seg) 2136 { 2137 return (0); 2138 } 2139 2140 /* 2141 * duplicate the shared page tables 2142 */ 2143 int 2144 segspt_shmdup(struct seg *seg, struct seg *newseg) 2145 { 2146 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2147 struct anon_map *amp = shmd->shm_amp; 2148 struct shm_data *shmd_new; 2149 struct seg *spt_seg = shmd->shm_sptseg; 2150 struct spt_data *sptd = spt_seg->s_data; 2151 int error = 0; 2152 2153 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2154 2155 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP); 2156 newseg->s_data = (void *)shmd_new; 2157 shmd_new->shm_sptas = shmd->shm_sptas; 2158 shmd_new->shm_amp = amp; 2159 shmd_new->shm_sptseg = shmd->shm_sptseg; 2160 newseg->s_ops = &segspt_shmops; 2161 newseg->s_szc = seg->s_szc; 2162 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc); 2163 2164 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2165 amp->refcnt++; 2166 ANON_LOCK_EXIT(&->a_rwlock); 2167 2168 if (sptd->spt_flags & SHM_PAGEABLE) { 2169 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP); 2170 shmd_new->shm_lckpgs = 0; 2171 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 2172 if ((error = hat_share(newseg->s_as->a_hat, 2173 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR, 2174 seg->s_size, seg->s_szc)) != 0) { 2175 kmem_free(shmd_new->shm_vpage, 2176 btopr(amp->size)); 2177 } 2178 } 2179 return (error); 2180 } else { 2181 return (hat_share(newseg->s_as->a_hat, newseg->s_base, 2182 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size, 2183 seg->s_szc)); 2184 2185 } 2186 } 2187 2188 /*ARGSUSED*/ 2189 int 2190 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 2191 { 2192 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2193 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2194 2195 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2196 2197 /* 2198 * ISM segment is always rw. 2199 */ 2200 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0); 2201 } 2202 2203 /* 2204 * Return an array of locked large pages, for empty slots allocate 2205 * private zero-filled anon pages. 2206 */ 2207 static int 2208 spt_anon_getpages( 2209 struct seg *sptseg, 2210 caddr_t sptaddr, 2211 size_t len, 2212 page_t *ppa[]) 2213 { 2214 struct spt_data *sptd = sptseg->s_data; 2215 struct anon_map *amp = sptd->spt_amp; 2216 enum seg_rw rw = sptd->spt_prot; 2217 uint_t szc = sptseg->s_szc; 2218 size_t pg_sz, share_sz = page_get_pagesize(szc); 2219 pgcnt_t lp_npgs; 2220 caddr_t lp_addr, e_sptaddr; 2221 uint_t vpprot, ppa_szc = 0; 2222 struct vpage *vpage = NULL; 2223 ulong_t j, ppa_idx; 2224 int err, ierr = 0; 2225 pgcnt_t an_idx; 2226 anon_sync_obj_t cookie; 2227 2228 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz)); 2229 ASSERT(len != 0); 2230 2231 pg_sz = share_sz; 2232 lp_npgs = btop(pg_sz); 2233 lp_addr = sptaddr; 2234 e_sptaddr = sptaddr + len; 2235 an_idx = seg_page(sptseg, sptaddr); 2236 ppa_idx = 0; 2237 2238 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2239 /*CONSTCOND*/ 2240 while (1) { 2241 for (; lp_addr < e_sptaddr; 2242 an_idx += lp_npgs, lp_addr += pg_sz, 2243 ppa_idx += lp_npgs) { 2244 2245 anon_array_enter(amp, an_idx, &cookie); 2246 ppa_szc = (uint_t)-1; 2247 ierr = anon_map_getpages(amp, an_idx, szc, sptseg, 2248 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx], 2249 &ppa_szc, vpage, rw, 0, segvn_anypgsz, kcred); 2250 anon_array_exit(&cookie); 2251 2252 if (ierr != 0) { 2253 if (ierr > 0) { 2254 err = FC_MAKE_ERR(ierr); 2255 goto lpgs_err; 2256 } 2257 break; 2258 } 2259 } 2260 if (lp_addr == e_sptaddr) { 2261 break; 2262 } 2263 ASSERT(lp_addr < e_sptaddr); 2264 2265 /* 2266 * ierr == -1 means we failed to allocate a large page. 2267 * so do a size down operation. 2268 * 2269 * ierr == -2 means some other process that privately shares 2270 * pages with this process has allocated a larger page and we 2271 * need to retry with larger pages. So do a size up 2272 * operation. This relies on the fact that large pages are 2273 * never partially shared i.e. if we share any constituent 2274 * page of a large page with another process we must share the 2275 * entire large page. Note this cannot happen for SOFTLOCK 2276 * case, unless current address (lpaddr) is at the beginning 2277 * of the next page size boundary because the other process 2278 * couldn't have relocated locked pages. 2279 */ 2280 ASSERT(ierr == -1 || ierr == -2); 2281 if (segvn_anypgsz) { 2282 ASSERT(ierr == -2 || szc != 0); 2283 ASSERT(ierr == -1 || szc < sptseg->s_szc); 2284 szc = (ierr == -1) ? szc - 1 : szc + 1; 2285 } else { 2286 /* 2287 * For faults and segvn_anypgsz == 0 2288 * we need to be careful not to loop forever 2289 * if existing page is found with szc other 2290 * than 0 or seg->s_szc. This could be due 2291 * to page relocations on behalf of DR or 2292 * more likely large page creation. For this 2293 * case simply re-size to existing page's szc 2294 * if returned by anon_map_getpages(). 2295 */ 2296 if (ppa_szc == (uint_t)-1) { 2297 szc = (ierr == -1) ? 0 : sptseg->s_szc; 2298 } else { 2299 ASSERT(ppa_szc <= sptseg->s_szc); 2300 ASSERT(ierr == -2 || ppa_szc < szc); 2301 ASSERT(ierr == -1 || ppa_szc > szc); 2302 szc = ppa_szc; 2303 } 2304 } 2305 pg_sz = page_get_pagesize(szc); 2306 lp_npgs = btop(pg_sz); 2307 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz)); 2308 } 2309 ANON_LOCK_EXIT(&->a_rwlock); 2310 return (0); 2311 2312 lpgs_err: 2313 ANON_LOCK_EXIT(&->a_rwlock); 2314 for (j = 0; j < ppa_idx; j++) 2315 page_unlock(ppa[j]); 2316 return (err); 2317 } 2318 2319 int 2320 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages, 2321 page_t **ppa, ulong_t *lockmap, size_t pos) 2322 { 2323 struct shm_data *shmd = seg->s_data; 2324 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2325 ulong_t i; 2326 int kernel; 2327 2328 for (i = 0; i < npages; anon_index++, pos++, i++) { 2329 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) { 2330 if (sptd->spt_ppa_lckcnt[anon_index] < 2331 (ushort_t)DISM_LOCK_MAX) { 2332 if (++sptd->spt_ppa_lckcnt[anon_index] == 2333 (ushort_t)DISM_LOCK_MAX) { 2334 cmn_err(CE_WARN, 2335 "DISM page lock limit " 2336 "reached on DISM offset 0x%lx\n", 2337 anon_index << PAGESHIFT); 2338 } 2339 kernel = (sptd->spt_ppa && 2340 sptd->spt_ppa[anon_index]) ? 1 : 0; 2341 if (!page_pp_lock(ppa[i], 0, kernel)) { 2342 /* unlock rest of the pages */ 2343 for (; i < npages; i++) 2344 page_unlock(ppa[i]); 2345 sptd->spt_ppa_lckcnt[anon_index]--; 2346 return (EAGAIN); 2347 } 2348 shmd->shm_lckpgs++; 2349 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED; 2350 if (lockmap != NULL) 2351 BT_SET(lockmap, pos); 2352 } 2353 } 2354 page_unlock(ppa[i]); 2355 } 2356 return (0); 2357 } 2358 2359 /*ARGSUSED*/ 2360 static int 2361 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 2362 int attr, int op, ulong_t *lockmap, size_t pos) 2363 { 2364 struct shm_data *shmd = seg->s_data; 2365 struct seg *sptseg = shmd->shm_sptseg; 2366 struct spt_data *sptd = sptseg->s_data; 2367 pgcnt_t npages, a_npages; 2368 page_t **ppa; 2369 pgcnt_t an_idx, a_an_idx, ppa_idx; 2370 caddr_t spt_addr, a_addr; /* spt and aligned address */ 2371 size_t a_len; /* aligned len */ 2372 size_t share_sz; 2373 ulong_t i; 2374 int sts = 0; 2375 2376 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2377 2378 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 2379 return (0); 2380 } 2381 2382 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 2383 an_idx = seg_page(seg, addr); 2384 npages = btopr(len); 2385 2386 if (an_idx + npages > btopr(shmd->shm_amp->size)) { 2387 return (ENOMEM); 2388 } 2389 2390 if (op == MC_LOCK) { 2391 /* 2392 * Need to align addr and size request if they are not 2393 * aligned so we can always allocate large page(s) however 2394 * we only lock what was requested in initial request. 2395 */ 2396 share_sz = page_get_pagesize(sptseg->s_szc); 2397 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz); 2398 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)), 2399 share_sz); 2400 a_npages = btop(a_len); 2401 a_an_idx = seg_page(seg, a_addr); 2402 spt_addr = sptseg->s_base + ptob(a_an_idx); 2403 ppa_idx = an_idx - a_an_idx; 2404 2405 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages), 2406 KM_NOSLEEP)) == NULL) { 2407 return (ENOMEM); 2408 } 2409 2410 /* 2411 * Don't cache any new pages for IO and 2412 * flush any cached pages. 2413 */ 2414 mutex_enter(&sptd->spt_lock); 2415 if (sptd->spt_ppa != NULL) 2416 sptd->spt_flags |= DISM_PPA_CHANGED; 2417 2418 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa); 2419 if (sts != 0) { 2420 mutex_exit(&sptd->spt_lock); 2421 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2422 return (sts); 2423 } 2424 2425 sts = spt_lockpages(seg, an_idx, npages, 2426 &ppa[ppa_idx], lockmap, pos); 2427 /* 2428 * unlock remaining pages for requests which are not 2429 * aligned or not in 4 M chunks 2430 */ 2431 for (i = 0; i < ppa_idx; i++) 2432 page_unlock(ppa[i]); 2433 for (i = ppa_idx + npages; i < a_npages; i++) 2434 page_unlock(ppa[i]); 2435 if (sptd->spt_ppa != NULL) 2436 sptd->spt_flags |= DISM_PPA_CHANGED; 2437 mutex_exit(&sptd->spt_lock); 2438 2439 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2440 2441 } else if (op == MC_UNLOCK) { /* unlock */ 2442 struct anon_map *amp; 2443 struct anon *ap; 2444 struct vnode *vp; 2445 u_offset_t off; 2446 struct page *pp; 2447 int kernel; 2448 anon_sync_obj_t cookie; 2449 2450 amp = sptd->spt_amp; 2451 mutex_enter(&sptd->spt_lock); 2452 if (shmd->shm_lckpgs == 0) { 2453 mutex_exit(&sptd->spt_lock); 2454 return (0); 2455 } 2456 /* 2457 * Don't cache new IO pages. 2458 */ 2459 if (sptd->spt_ppa != NULL) 2460 sptd->spt_flags |= DISM_PPA_CHANGED; 2461 2462 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2463 for (i = 0; i < npages; i++, an_idx++) { 2464 if (shmd->shm_vpage[an_idx] & DISM_PG_LOCKED) { 2465 anon_array_enter(amp, an_idx, &cookie); 2466 ap = anon_get_ptr(amp->ahp, an_idx); 2467 ASSERT(ap); 2468 ASSERT(sptd->spt_ppa_lckcnt[an_idx] > 0); 2469 2470 swap_xlate(ap, &vp, &off); 2471 anon_array_exit(&cookie); 2472 pp = page_lookup(vp, off, SE_SHARED); 2473 ASSERT(pp); 2474 /* 2475 * the availrmem is decremented only for 2476 * pages which are not in seg pcache, 2477 * for pages in seg pcache availrmem was 2478 * decremented in _dismpagelock() (if 2479 * they were not locked here) 2480 */ 2481 kernel = (sptd->spt_ppa && 2482 sptd->spt_ppa[an_idx]) ? 1 : 0; 2483 page_pp_unlock(pp, 0, kernel); 2484 page_unlock(pp); 2485 shmd->shm_vpage[an_idx] &= ~DISM_PG_LOCKED; 2486 sptd->spt_ppa_lckcnt[an_idx]--; 2487 shmd->shm_lckpgs--; 2488 } 2489 } 2490 ANON_LOCK_EXIT(&->a_rwlock); 2491 if (sptd->spt_ppa != NULL) 2492 sptd->spt_flags |= DISM_PPA_CHANGED; 2493 mutex_exit(&sptd->spt_lock); 2494 } 2495 return (sts); 2496 } 2497 2498 /*ARGSUSED*/ 2499 int 2500 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 2501 { 2502 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2503 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2504 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1; 2505 2506 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2507 2508 /* 2509 * ISM segment is always rw. 2510 */ 2511 while (--pgno >= 0) 2512 *protv++ = sptd->spt_prot; 2513 return (0); 2514 } 2515 2516 /*ARGSUSED*/ 2517 u_offset_t 2518 segspt_shmgetoffset(struct seg *seg, caddr_t addr) 2519 { 2520 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2521 2522 /* Offset does not matter in ISM memory */ 2523 2524 return ((u_offset_t)0); 2525 } 2526 2527 /* ARGSUSED */ 2528 int 2529 segspt_shmgettype(struct seg *seg, caddr_t addr) 2530 { 2531 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2532 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2533 2534 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2535 2536 /* 2537 * The shared memory mapping is always MAP_SHARED, SWAP is only 2538 * reserved for DISM 2539 */ 2540 return (MAP_SHARED | 2541 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE)); 2542 } 2543 2544 /*ARGSUSED*/ 2545 int 2546 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 2547 { 2548 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2549 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2550 2551 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2552 2553 *vpp = sptd->spt_vp; 2554 return (0); 2555 } 2556 2557 /*ARGSUSED*/ 2558 static int 2559 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 2560 { 2561 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2562 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2563 struct anon_map *amp; 2564 pgcnt_t pg_idx; 2565 2566 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2567 2568 if (behav == MADV_FREE) { 2569 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) 2570 return (0); 2571 2572 amp = sptd->spt_amp; 2573 pg_idx = seg_page(seg, addr); 2574 2575 mutex_enter(&sptd->spt_lock); 2576 if (sptd->spt_ppa != NULL) 2577 sptd->spt_flags |= DISM_PPA_CHANGED; 2578 mutex_exit(&sptd->spt_lock); 2579 2580 /* 2581 * Purge all DISM cached pages 2582 */ 2583 seg_ppurge_seg(segspt_reclaim); 2584 2585 mutex_enter(&sptd->spt_lock); 2586 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2587 anon_disclaim(amp, pg_idx, len, ANON_PGLOOKUP_BLK); 2588 ANON_LOCK_EXIT(&->a_rwlock); 2589 mutex_exit(&sptd->spt_lock); 2590 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP || 2591 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) { 2592 int already_set; 2593 ulong_t anon_index; 2594 lgrp_mem_policy_t policy; 2595 caddr_t shm_addr; 2596 size_t share_size; 2597 size_t size; 2598 struct seg *sptseg = shmd->shm_sptseg; 2599 caddr_t sptseg_addr; 2600 2601 /* 2602 * Align address and length to page size of underlying segment 2603 */ 2604 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc); 2605 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size); 2606 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), 2607 share_size); 2608 2609 amp = shmd->shm_amp; 2610 anon_index = seg_page(seg, shm_addr); 2611 2612 /* 2613 * And now we may have to adjust size downward if we have 2614 * exceeded the realsize of the segment or initial anon 2615 * allocations. 2616 */ 2617 sptseg_addr = sptseg->s_base + ptob(anon_index); 2618 if ((sptseg_addr + size) > 2619 (sptseg->s_base + sptd->spt_realsize)) 2620 size = (sptseg->s_base + sptd->spt_realsize) - 2621 sptseg_addr; 2622 2623 /* 2624 * Set memory allocation policy for this segment 2625 */ 2626 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED); 2627 already_set = lgrp_shm_policy_set(policy, amp, anon_index, 2628 NULL, 0, len); 2629 2630 /* 2631 * If random memory allocation policy set already, 2632 * don't bother reapplying it. 2633 */ 2634 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 2635 return (0); 2636 2637 /* 2638 * Mark any existing pages in the given range for 2639 * migration, flushing the I/O page cache, and using 2640 * underlying segment to calculate anon index and get 2641 * anonmap and vnode pointer from 2642 */ 2643 if (shmd->shm_softlockcnt > 0) 2644 segspt_purge(seg); 2645 2646 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0); 2647 } 2648 2649 return (0); 2650 } 2651 2652 /*ARGSUSED*/ 2653 void 2654 segspt_shmdump(struct seg *seg) 2655 { 2656 /* no-op for ISM segment */ 2657 } 2658 2659 /*ARGSUSED*/ 2660 static faultcode_t 2661 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 2662 { 2663 return (ENOTSUP); 2664 } 2665 2666 /* 2667 * get a memory ID for an addr in a given segment 2668 */ 2669 static int 2670 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 2671 { 2672 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2673 struct anon *ap; 2674 size_t anon_index; 2675 struct anon_map *amp = shmd->shm_amp; 2676 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2677 struct seg *sptseg = shmd->shm_sptseg; 2678 anon_sync_obj_t cookie; 2679 2680 anon_index = seg_page(seg, addr); 2681 2682 if (addr > (seg->s_base + sptd->spt_realsize)) { 2683 return (EFAULT); 2684 } 2685 2686 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2687 anon_array_enter(amp, anon_index, &cookie); 2688 ap = anon_get_ptr(amp->ahp, anon_index); 2689 if (ap == NULL) { 2690 struct page *pp; 2691 caddr_t spt_addr = sptseg->s_base + ptob(anon_index); 2692 2693 pp = anon_zero(sptseg, spt_addr, &ap, kcred); 2694 if (pp == NULL) { 2695 anon_array_exit(&cookie); 2696 ANON_LOCK_EXIT(&->a_rwlock); 2697 return (ENOMEM); 2698 } 2699 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 2700 page_unlock(pp); 2701 } 2702 anon_array_exit(&cookie); 2703 ANON_LOCK_EXIT(&->a_rwlock); 2704 memidp->val[0] = (uintptr_t)ap; 2705 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 2706 return (0); 2707 } 2708 2709 /* 2710 * Get memory allocation policy info for specified address in given segment 2711 */ 2712 static lgrp_mem_policy_info_t * 2713 segspt_shmgetpolicy(struct seg *seg, caddr_t addr) 2714 { 2715 struct anon_map *amp; 2716 ulong_t anon_index; 2717 lgrp_mem_policy_info_t *policy_info; 2718 struct shm_data *shm_data; 2719 2720 ASSERT(seg != NULL); 2721 2722 /* 2723 * Get anon_map from segshm 2724 * 2725 * Assume that no lock needs to be held on anon_map, since 2726 * it should be protected by its reference count which must be 2727 * nonzero for an existing segment 2728 * Need to grab readers lock on policy tree though 2729 */ 2730 shm_data = (struct shm_data *)seg->s_data; 2731 if (shm_data == NULL) 2732 return (NULL); 2733 amp = shm_data->shm_amp; 2734 ASSERT(amp->refcnt != 0); 2735 2736 /* 2737 * Get policy info 2738 * 2739 * Assume starting anon index of 0 2740 */ 2741 anon_index = seg_page(seg, addr); 2742 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 2743 2744 return (policy_info); 2745 } 2746 2747 /*ARGSUSED*/ 2748 static int 2749 segspt_shmcapable(struct seg *seg, segcapability_t capability) 2750 { 2751 return (0); 2752 } 2753