1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/param.h> 30 #include <sys/user.h> 31 #include <sys/mman.h> 32 #include <sys/kmem.h> 33 #include <sys/sysmacros.h> 34 #include <sys/cmn_err.h> 35 #include <sys/systm.h> 36 #include <sys/tuneable.h> 37 #include <vm/hat.h> 38 #include <vm/seg.h> 39 #include <vm/as.h> 40 #include <vm/anon.h> 41 #include <vm/page.h> 42 #include <sys/buf.h> 43 #include <sys/swap.h> 44 #include <sys/atomic.h> 45 #include <vm/seg_spt.h> 46 #include <sys/debug.h> 47 #include <sys/vtrace.h> 48 #include <sys/shm.h> 49 #include <sys/lgrp.h> 50 #include <sys/vmsystm.h> 51 52 #include <sys/tnf_probe.h> 53 54 #define SEGSPTADDR (caddr_t)0x0 55 56 /* 57 * # pages used for spt 58 */ 59 static size_t spt_used; 60 61 /* 62 * segspt_minfree is the memory left for system after ISM 63 * locked its pages; it is set up to 5% of availrmem in 64 * sptcreate when ISM is created. ISM should not use more 65 * than ~90% of availrmem; if it does, then the performance 66 * of the system may decrease. Machines with large memories may 67 * be able to use up more memory for ISM so we set the default 68 * segspt_minfree to 5% (which gives ISM max 95% of availrmem. 69 * If somebody wants even more memory for ISM (risking hanging 70 * the system) they can patch the segspt_minfree to smaller number. 71 */ 72 pgcnt_t segspt_minfree = 0; 73 74 static int segspt_create(struct seg *seg, caddr_t argsp); 75 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize); 76 static void segspt_free(struct seg *seg); 77 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len); 78 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr); 79 80 static void 81 segspt_badop() 82 { 83 panic("segspt_badop called"); 84 /*NOTREACHED*/ 85 } 86 87 #define SEGSPT_BADOP(t) (t(*)())segspt_badop 88 89 struct seg_ops segspt_ops = { 90 SEGSPT_BADOP(int), /* dup */ 91 segspt_unmap, 92 segspt_free, 93 SEGSPT_BADOP(int), /* fault */ 94 SEGSPT_BADOP(faultcode_t), /* faulta */ 95 SEGSPT_BADOP(int), /* setprot */ 96 SEGSPT_BADOP(int), /* checkprot */ 97 SEGSPT_BADOP(int), /* kluster */ 98 SEGSPT_BADOP(size_t), /* swapout */ 99 SEGSPT_BADOP(int), /* sync */ 100 SEGSPT_BADOP(size_t), /* incore */ 101 SEGSPT_BADOP(int), /* lockop */ 102 SEGSPT_BADOP(int), /* getprot */ 103 SEGSPT_BADOP(u_offset_t), /* getoffset */ 104 SEGSPT_BADOP(int), /* gettype */ 105 SEGSPT_BADOP(int), /* getvp */ 106 SEGSPT_BADOP(int), /* advise */ 107 SEGSPT_BADOP(void), /* dump */ 108 SEGSPT_BADOP(int), /* pagelock */ 109 SEGSPT_BADOP(int), /* setpgsz */ 110 SEGSPT_BADOP(int), /* getmemid */ 111 segspt_getpolicy, /* getpolicy */ 112 SEGSPT_BADOP(int), /* capable */ 113 }; 114 115 static int segspt_shmdup(struct seg *seg, struct seg *newseg); 116 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize); 117 static void segspt_shmfree(struct seg *seg); 118 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg, 119 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw); 120 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr); 121 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr, 122 register size_t len, register uint_t prot); 123 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, 124 uint_t prot); 125 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta); 126 static size_t segspt_shmswapout(struct seg *seg); 127 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, 128 register char *vec); 129 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len, 130 int attr, uint_t flags); 131 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 132 int attr, int op, ulong_t *lockmap, size_t pos); 133 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, 134 uint_t *protv); 135 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr); 136 static int segspt_shmgettype(struct seg *seg, caddr_t addr); 137 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp); 138 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, 139 uint_t behav); 140 static void segspt_shmdump(struct seg *seg); 141 static int segspt_shmpagelock(struct seg *, caddr_t, size_t, 142 struct page ***, enum lock_type, enum seg_rw); 143 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t); 144 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *); 145 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t); 146 static int segspt_shmcapable(struct seg *, segcapability_t); 147 148 struct seg_ops segspt_shmops = { 149 segspt_shmdup, 150 segspt_shmunmap, 151 segspt_shmfree, 152 segspt_shmfault, 153 segspt_shmfaulta, 154 segspt_shmsetprot, 155 segspt_shmcheckprot, 156 segspt_shmkluster, 157 segspt_shmswapout, 158 segspt_shmsync, 159 segspt_shmincore, 160 segspt_shmlockop, 161 segspt_shmgetprot, 162 segspt_shmgetoffset, 163 segspt_shmgettype, 164 segspt_shmgetvp, 165 segspt_shmadvise, /* advise */ 166 segspt_shmdump, 167 segspt_shmpagelock, 168 segspt_shmsetpgsz, 169 segspt_shmgetmemid, 170 segspt_shmgetpolicy, 171 segspt_shmcapable, 172 }; 173 174 static void segspt_purge(struct seg *seg); 175 static int segspt_reclaim(struct seg *, caddr_t, size_t, struct page **, 176 enum seg_rw); 177 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len, 178 page_t **ppa); 179 180 181 182 /*ARGSUSED*/ 183 int 184 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp, 185 uint_t prot, uint_t flags, uint_t share_szc) 186 { 187 int err; 188 struct as *newas; 189 struct segspt_crargs sptcargs; 190 191 #ifdef DEBUG 192 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */, 193 tnf_ulong, size, size ); 194 #endif 195 if (segspt_minfree == 0) /* leave min 5% of availrmem for */ 196 segspt_minfree = availrmem/20; /* for the system */ 197 198 if (!hat_supported(HAT_SHARED_PT, (void *)0)) 199 return (EINVAL); 200 201 /* 202 * get a new as for this shared memory segment 203 */ 204 newas = as_alloc(); 205 sptcargs.amp = amp; 206 sptcargs.prot = prot; 207 sptcargs.flags = flags; 208 sptcargs.szc = share_szc; 209 210 /* 211 * create a shared page table (spt) segment 212 */ 213 214 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) { 215 as_free(newas); 216 return (err); 217 } 218 *sptseg = sptcargs.seg_spt; 219 return (0); 220 } 221 222 void 223 sptdestroy(struct as *as, struct anon_map *amp) 224 { 225 226 #ifdef DEBUG 227 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */); 228 #endif 229 (void) as_unmap(as, SEGSPTADDR, amp->size); 230 as_free(as); 231 } 232 233 /* 234 * called from seg_free(). 235 * free (i.e., unlock, unmap, return to free list) 236 * all the pages in the given seg. 237 */ 238 void 239 segspt_free(struct seg *seg) 240 { 241 struct spt_data *sptd = (struct spt_data *)seg->s_data; 242 243 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 244 245 if (sptd != NULL) { 246 if (sptd->spt_realsize) 247 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize); 248 249 if (sptd->spt_ppa_lckcnt) 250 kmem_free(sptd->spt_ppa_lckcnt, 251 sizeof (*sptd->spt_ppa_lckcnt) 252 * btopr(sptd->spt_amp->size)); 253 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp)); 254 mutex_destroy(&sptd->spt_lock); 255 kmem_free(sptd, sizeof (*sptd)); 256 } 257 } 258 259 /*ARGSUSED*/ 260 static int 261 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr, 262 uint_t flags) 263 { 264 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 265 266 return (0); 267 } 268 269 /*ARGSUSED*/ 270 static size_t 271 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec) 272 { 273 caddr_t eo_seg; 274 pgcnt_t npages; 275 struct shm_data *shmd = (struct shm_data *)seg->s_data; 276 struct seg *sptseg; 277 struct spt_data *sptd; 278 279 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 280 #ifdef lint 281 seg = seg; 282 #endif 283 sptseg = shmd->shm_sptseg; 284 sptd = sptseg->s_data; 285 286 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 287 eo_seg = addr + len; 288 while (addr < eo_seg) { 289 /* page exists, and it's locked. */ 290 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED | 291 SEG_PAGE_ANON; 292 addr += PAGESIZE; 293 } 294 return (len); 295 } else { 296 struct anon_map *amp = shmd->shm_amp; 297 struct anon *ap; 298 page_t *pp; 299 pgcnt_t anon_index; 300 struct vnode *vp; 301 u_offset_t off; 302 ulong_t i; 303 int ret; 304 anon_sync_obj_t cookie; 305 306 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 307 anon_index = seg_page(seg, addr); 308 npages = btopr(len); 309 if (anon_index + npages > btopr(shmd->shm_amp->size)) { 310 return (EINVAL); 311 } 312 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 313 for (i = 0; i < npages; i++, anon_index++) { 314 ret = 0; 315 anon_array_enter(amp, anon_index, &cookie); 316 ap = anon_get_ptr(amp->ahp, anon_index); 317 if (ap != NULL) { 318 swap_xlate(ap, &vp, &off); 319 anon_array_exit(&cookie); 320 pp = page_lookup_nowait(vp, off, SE_SHARED); 321 if (pp != NULL) { 322 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON; 323 page_unlock(pp); 324 } 325 } else { 326 anon_array_exit(&cookie); 327 } 328 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) { 329 ret |= SEG_PAGE_LOCKED; 330 } 331 *vec++ = (char)ret; 332 } 333 ANON_LOCK_EXIT(&->a_rwlock); 334 return (len); 335 } 336 } 337 338 static int 339 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize) 340 { 341 size_t share_size; 342 343 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 344 345 /* 346 * seg.s_size may have been rounded up to the largest page size 347 * in shmat(). 348 * XXX This should be cleanedup. sptdestroy should take a length 349 * argument which should be the same as sptcreate. Then 350 * this rounding would not be needed (or is done in shm.c) 351 * Only the check for full segment will be needed. 352 * 353 * XXX -- shouldn't raddr == 0 always? These tests don't seem 354 * to be useful at all. 355 */ 356 share_size = page_get_pagesize(seg->s_szc); 357 ssize = P2ROUNDUP(ssize, share_size); 358 359 if (raddr == seg->s_base && ssize == seg->s_size) { 360 seg_free(seg); 361 return (0); 362 } else 363 return (EINVAL); 364 } 365 366 int 367 segspt_create(struct seg *seg, caddr_t argsp) 368 { 369 int err; 370 caddr_t addr = seg->s_base; 371 struct spt_data *sptd; 372 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp; 373 struct anon_map *amp = sptcargs->amp; 374 struct cred *cred = CRED(); 375 ulong_t i, j, anon_index = 0; 376 pgcnt_t npages = btopr(amp->size); 377 struct vnode *vp; 378 page_t **ppa; 379 uint_t hat_flags; 380 381 /* 382 * We are holding the a_lock on the underlying dummy as, 383 * so we can make calls to the HAT layer. 384 */ 385 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 386 387 #ifdef DEBUG 388 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */, 389 tnf_opaque, addr, addr, 390 tnf_ulong, len, seg->s_size); 391 #endif 392 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 393 if (err = anon_swap_adjust(npages)) 394 return (err); 395 } 396 err = ENOMEM; 397 398 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL) 399 goto out1; 400 401 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 402 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages), 403 KM_NOSLEEP)) == NULL) 404 goto out2; 405 } 406 407 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL); 408 409 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL) 410 goto out3; 411 412 seg->s_ops = &segspt_ops; 413 sptd->spt_vp = vp; 414 sptd->spt_amp = amp; 415 sptd->spt_prot = sptcargs->prot; 416 sptd->spt_flags = sptcargs->flags; 417 seg->s_data = (caddr_t)sptd; 418 sptd->spt_ppa = NULL; 419 sptd->spt_ppa_lckcnt = NULL; 420 seg->s_szc = sptcargs->szc; 421 422 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 423 amp->a_szc = seg->s_szc; 424 ANON_LOCK_EXIT(&->a_rwlock); 425 426 /* 427 * Set policy to affect initial allocation of pages in 428 * anon_map_createpages() 429 */ 430 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index, 431 NULL, 0, ptob(npages)); 432 433 if (sptcargs->flags & SHM_PAGEABLE) { 434 size_t share_sz; 435 pgcnt_t new_npgs, more_pgs; 436 struct anon_hdr *nahp; 437 438 share_sz = page_get_pagesize(seg->s_szc); 439 if (!IS_P2ALIGNED(amp->size, share_sz)) { 440 /* 441 * We are rounding up the size of the anon array 442 * on 4 M boundary because we always create 4 M 443 * of page(s) when locking, faulting pages and we 444 * don't have to check for all corner cases e.g. 445 * if there is enough space to allocate 4 M 446 * page. 447 */ 448 new_npgs = btop(P2ROUNDUP(amp->size, share_sz)); 449 more_pgs = new_npgs - npages; 450 451 if (anon_resv(ptob(more_pgs)) == 0) { 452 err = ENOMEM; 453 goto out4; 454 } 455 nahp = anon_create(new_npgs, ANON_SLEEP); 456 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 457 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages, 458 ANON_SLEEP); 459 anon_release(amp->ahp, npages); 460 amp->ahp = nahp; 461 amp->swresv = amp->size = ptob(new_npgs); 462 ANON_LOCK_EXIT(&->a_rwlock); 463 npages = new_npgs; 464 } 465 466 sptd->spt_ppa_lckcnt = kmem_zalloc(npages * 467 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP); 468 sptd->spt_pcachecnt = 0; 469 sptd->spt_realsize = ptob(npages); 470 sptcargs->seg_spt = seg; 471 return (0); 472 } 473 474 /* 475 * get array of pages for each anon slot in amp 476 */ 477 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa, 478 seg, addr, S_CREATE, cred)) != 0) 479 goto out4; 480 481 /* 482 * addr is initial address corresponding to the first page on ppa list 483 */ 484 for (i = 0; i < npages; i++) { 485 /* attempt to lock all pages */ 486 if (!page_pp_lock(ppa[i], 0, 1)) { 487 /* 488 * if unable to lock any page, unlock all 489 * of them and return error 490 */ 491 for (j = 0; j < i; j++) 492 page_pp_unlock(ppa[j], 0, 1); 493 for (i = 0; i < npages; i++) { 494 page_unlock(ppa[i]); 495 } 496 err = ENOMEM; 497 goto out4; 498 } 499 } 500 501 /* 502 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 503 * for the entire life of the segment. For example platforms 504 * that do not support Dynamic Reconfiguration. 505 */ 506 hat_flags = HAT_LOAD_SHARE; 507 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL)) 508 hat_flags |= HAT_LOAD_LOCK; 509 510 hat_memload_array(seg->s_as->a_hat, addr, ptob(npages), 511 ppa, sptd->spt_prot, hat_flags); 512 513 /* 514 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 515 * we will leave the pages locked SE_SHARED for the life 516 * of the ISM segment. This will prevent any calls to 517 * hat_pageunload() on this ISM segment for those platforms. 518 */ 519 if (!(hat_flags & HAT_LOAD_LOCK)) { 520 /* 521 * On platforms that support HAT_DYNAMIC_ISM_UNMAP, 522 * we no longer need to hold the SE_SHARED lock on the pages, 523 * since L_PAGELOCK and F_SOFTLOCK calls will grab the 524 * SE_SHARED lock on the pages as necessary. 525 */ 526 for (i = 0; i < npages; i++) 527 page_unlock(ppa[i]); 528 } 529 sptd->spt_pcachecnt = 0; 530 kmem_free(ppa, ((sizeof (page_t *)) * npages)); 531 sptd->spt_realsize = ptob(npages); 532 atomic_add_long(&spt_used, npages); 533 sptcargs->seg_spt = seg; 534 return (0); 535 536 out4: 537 seg->s_data = NULL; 538 kmem_free(vp, sizeof (*vp)); 539 out3: 540 mutex_destroy(&sptd->spt_lock); 541 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 542 kmem_free(ppa, (sizeof (*ppa) * npages)); 543 out2: 544 kmem_free(sptd, sizeof (*sptd)); 545 out1: 546 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 547 anon_swap_restore(npages); 548 return (err); 549 } 550 551 /*ARGSUSED*/ 552 void 553 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len) 554 { 555 struct page *pp; 556 struct spt_data *sptd = (struct spt_data *)seg->s_data; 557 pgcnt_t npages; 558 ulong_t anon_idx; 559 struct anon_map *amp; 560 struct anon *ap; 561 struct vnode *vp; 562 u_offset_t off; 563 uint_t hat_flags; 564 int root = 0; 565 pgcnt_t pgs, curnpgs = 0; 566 page_t *rootpp; 567 568 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 569 570 len = P2ROUNDUP(len, PAGESIZE); 571 572 npages = btop(len); 573 574 hat_flags = HAT_UNLOAD_UNLOCK; 575 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) || 576 (sptd->spt_flags & SHM_PAGEABLE)) { 577 hat_flags = HAT_UNLOAD; 578 } 579 580 hat_unload(seg->s_as->a_hat, addr, len, hat_flags); 581 582 amp = sptd->spt_amp; 583 if (sptd->spt_flags & SHM_PAGEABLE) 584 npages = btop(amp->size); 585 586 ASSERT(amp); 587 for (anon_idx = 0; anon_idx < npages; anon_idx++) { 588 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 589 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) { 590 panic("segspt_free_pages: null app"); 591 /*NOTREACHED*/ 592 } 593 } else { 594 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx)) 595 == NULL) 596 continue; 597 } 598 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0); 599 swap_xlate(ap, &vp, &off); 600 601 /* 602 * If this platform supports HAT_DYNAMIC_ISM_UNMAP, 603 * the pages won't be having SE_SHARED lock at this 604 * point. 605 * 606 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 607 * the pages are still held SE_SHARED locked from the 608 * original segspt_create() 609 * 610 * Our goal is to get SE_EXCL lock on each page, remove 611 * permanent lock on it and invalidate the page. 612 */ 613 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 614 if (hat_flags == HAT_UNLOAD) 615 pp = page_lookup(vp, off, SE_EXCL); 616 else { 617 if ((pp = page_find(vp, off)) == NULL) { 618 panic("segspt_free_pages: " 619 "page not locked"); 620 /*NOTREACHED*/ 621 } 622 if (!page_tryupgrade(pp)) { 623 page_unlock(pp); 624 pp = page_lookup(vp, off, SE_EXCL); 625 } 626 } 627 if (pp == NULL) { 628 panic("segspt_free_pages: " 629 "page not in the system"); 630 /*NOTREACHED*/ 631 } 632 page_pp_unlock(pp, 0, 1); 633 } else { 634 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL) 635 continue; 636 page_pp_unlock(pp, 0, 0); 637 } 638 /* 639 * It's logical to invalidate the pages here as in most cases 640 * these were created by segspt. 641 */ 642 if (pp->p_szc != 0) { 643 /* 644 * For DISM swap is released in shm_rm_amp. 645 */ 646 if ((sptd->spt_flags & SHM_PAGEABLE) == 0 && 647 ap->an_pvp != NULL) { 648 panic("segspt_free_pages: pvp non NULL"); 649 /*NOTREACHED*/ 650 } 651 if (root == 0) { 652 ASSERT(curnpgs == 0); 653 root = 1; 654 rootpp = pp; 655 pgs = curnpgs = page_get_pagecnt(pp->p_szc); 656 ASSERT(pgs > 1); 657 ASSERT(IS_P2ALIGNED(pgs, pgs)); 658 ASSERT(!(page_pptonum(pp) & (pgs - 1))); 659 curnpgs--; 660 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) { 661 ASSERT(curnpgs == 1); 662 ASSERT(page_pptonum(pp) == 663 page_pptonum(rootpp) + (pgs - 1)); 664 page_destroy_pages(rootpp); 665 root = 0; 666 curnpgs = 0; 667 } else { 668 ASSERT(curnpgs > 1); 669 ASSERT(page_pptonum(pp) == 670 page_pptonum(rootpp) + (pgs - curnpgs)); 671 curnpgs--; 672 } 673 } else { 674 if (root != 0 || curnpgs != 0) { 675 panic("segspt_free_pages: bad large page"); 676 /*NOTREACHED*/ 677 } 678 /*LINTED: constant in conditional context */ 679 VN_DISPOSE(pp, B_INVAL, 0, kcred); 680 } 681 } 682 683 if (root != 0 || curnpgs != 0) { 684 panic("segspt_free_pages: bad large page"); 685 /*NOTREACHED*/ 686 } 687 688 /* 689 * mark that pages have been released 690 */ 691 sptd->spt_realsize = 0; 692 693 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 694 atomic_add_long(&spt_used, -npages); 695 anon_swap_restore(npages); 696 } 697 } 698 699 /* 700 * Get memory allocation policy info for specified address in given segment 701 */ 702 static lgrp_mem_policy_info_t * 703 segspt_getpolicy(struct seg *seg, caddr_t addr) 704 { 705 struct anon_map *amp; 706 ulong_t anon_index; 707 lgrp_mem_policy_info_t *policy_info; 708 struct spt_data *spt_data; 709 710 ASSERT(seg != NULL); 711 712 /* 713 * Get anon_map from segspt 714 * 715 * Assume that no lock needs to be held on anon_map, since 716 * it should be protected by its reference count which must be 717 * nonzero for an existing segment 718 * Need to grab readers lock on policy tree though 719 */ 720 spt_data = (struct spt_data *)seg->s_data; 721 if (spt_data == NULL) 722 return (NULL); 723 amp = spt_data->spt_amp; 724 ASSERT(amp->refcnt != 0); 725 726 /* 727 * Get policy info 728 * 729 * Assume starting anon index of 0 730 */ 731 anon_index = seg_page(seg, addr); 732 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 733 734 return (policy_info); 735 } 736 737 /* 738 * DISM only. 739 * Return locked pages over a given range. 740 * 741 * We will cache all DISM locked pages and save the pplist for the 742 * entire segment in the ppa field of the underlying DISM segment structure. 743 * Later, during a call to segspt_reclaim() we will use this ppa array 744 * to page_unlock() all of the pages and then we will free this ppa list. 745 */ 746 /*ARGSUSED*/ 747 static int 748 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len, 749 struct page ***ppp, enum lock_type type, enum seg_rw rw) 750 { 751 struct shm_data *shmd = (struct shm_data *)seg->s_data; 752 struct seg *sptseg = shmd->shm_sptseg; 753 struct spt_data *sptd = sptseg->s_data; 754 pgcnt_t pg_idx, npages, tot_npages, npgs; 755 struct page **pplist, **pl, **ppa, *pp; 756 struct anon_map *amp; 757 spgcnt_t an_idx; 758 int ret = ENOTSUP; 759 uint_t pl_built = 0; 760 struct anon *ap; 761 struct vnode *vp; 762 u_offset_t off; 763 pgcnt_t claim_availrmem = 0; 764 uint_t szc; 765 766 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 767 768 /* 769 * We want to lock/unlock the entire ISM segment. Therefore, 770 * we will be using the underlying sptseg and it's base address 771 * and length for the caching arguments. 772 */ 773 ASSERT(sptseg); 774 ASSERT(sptd); 775 776 pg_idx = seg_page(seg, addr); 777 npages = btopr(len); 778 779 /* 780 * check if the request is larger than number of pages covered 781 * by amp 782 */ 783 if (pg_idx + npages > btopr(sptd->spt_amp->size)) { 784 *ppp = NULL; 785 return (ENOTSUP); 786 } 787 788 if (type == L_PAGEUNLOCK) { 789 ASSERT(sptd->spt_ppa != NULL); 790 791 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 792 sptd->spt_ppa, sptd->spt_prot, segspt_reclaim); 793 794 /* 795 * If someone is blocked while unmapping, we purge 796 * segment page cache and thus reclaim pplist synchronously 797 * without waiting for seg_pasync_thread. This speeds up 798 * unmapping in cases where munmap(2) is called, while 799 * raw async i/o is still in progress or where a thread 800 * exits on data fault in a multithreaded application. 801 */ 802 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 803 segspt_purge(seg); 804 } 805 return (0); 806 } else if (type == L_PAGERECLAIM) { 807 ASSERT(sptd->spt_ppa != NULL); 808 (void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size, 809 sptd->spt_ppa, sptd->spt_prot); 810 return (0); 811 } 812 813 if (sptd->spt_flags & DISM_PPA_CHANGED) { 814 segspt_purge(seg); 815 /* 816 * for DISM ppa needs to be rebuild since 817 * number of locked pages could be changed 818 */ 819 *ppp = NULL; 820 return (ENOTSUP); 821 } 822 823 /* 824 * First try to find pages in segment page cache, without 825 * holding the segment lock. 826 */ 827 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 828 sptd->spt_prot); 829 if (pplist != NULL) { 830 ASSERT(sptd->spt_ppa != NULL); 831 ASSERT(sptd->spt_ppa == pplist); 832 ppa = sptd->spt_ppa; 833 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 834 if (ppa[an_idx] == NULL) { 835 seg_pinactive(seg, seg->s_base, 836 sptd->spt_amp->size, ppa, 837 sptd->spt_prot, segspt_reclaim); 838 *ppp = NULL; 839 return (ENOTSUP); 840 } 841 if ((szc = ppa[an_idx]->p_szc) != 0) { 842 npgs = page_get_pagecnt(szc); 843 an_idx = P2ROUNDUP(an_idx + 1, npgs); 844 } else { 845 an_idx++; 846 } 847 } 848 /* 849 * Since we cache the entire DISM segment, we want to 850 * set ppp to point to the first slot that corresponds 851 * to the requested addr, i.e. pg_idx. 852 */ 853 *ppp = &(sptd->spt_ppa[pg_idx]); 854 return (0); 855 } 856 857 /* The L_PAGELOCK case... */ 858 mutex_enter(&sptd->spt_lock); 859 /* 860 * try to find pages in segment page cache with mutex 861 */ 862 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 863 sptd->spt_prot); 864 if (pplist != NULL) { 865 ASSERT(sptd->spt_ppa != NULL); 866 ASSERT(sptd->spt_ppa == pplist); 867 ppa = sptd->spt_ppa; 868 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 869 if (ppa[an_idx] == NULL) { 870 mutex_exit(&sptd->spt_lock); 871 seg_pinactive(seg, seg->s_base, 872 sptd->spt_amp->size, ppa, 873 sptd->spt_prot, segspt_reclaim); 874 *ppp = NULL; 875 return (ENOTSUP); 876 } 877 if ((szc = ppa[an_idx]->p_szc) != 0) { 878 npgs = page_get_pagecnt(szc); 879 an_idx = P2ROUNDUP(an_idx + 1, npgs); 880 } else { 881 an_idx++; 882 } 883 } 884 /* 885 * Since we cache the entire DISM segment, we want to 886 * set ppp to point to the first slot that corresponds 887 * to the requested addr, i.e. pg_idx. 888 */ 889 mutex_exit(&sptd->spt_lock); 890 *ppp = &(sptd->spt_ppa[pg_idx]); 891 return (0); 892 } 893 if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) == 894 SEGP_FAIL) { 895 mutex_exit(&sptd->spt_lock); 896 *ppp = NULL; 897 return (ENOTSUP); 898 } 899 900 /* 901 * No need to worry about protections because DISM pages are always rw. 902 */ 903 pl = pplist = NULL; 904 amp = sptd->spt_amp; 905 906 /* 907 * Do we need to build the ppa array? 908 */ 909 if (sptd->spt_ppa == NULL) { 910 pgcnt_t lpg_cnt = 0; 911 912 pl_built = 1; 913 tot_npages = btopr(sptd->spt_amp->size); 914 915 ASSERT(sptd->spt_pcachecnt == 0); 916 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP); 917 pl = pplist; 918 919 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 920 for (an_idx = 0; an_idx < tot_npages; ) { 921 ap = anon_get_ptr(amp->ahp, an_idx); 922 /* 923 * Cache only mlocked pages. For large pages 924 * if one (constituent) page is mlocked 925 * all pages for that large page 926 * are cached also. This is for quick 927 * lookups of ppa array; 928 */ 929 if ((ap != NULL) && (lpg_cnt != 0 || 930 (sptd->spt_ppa_lckcnt[an_idx] != 0))) { 931 932 swap_xlate(ap, &vp, &off); 933 pp = page_lookup(vp, off, SE_SHARED); 934 ASSERT(pp != NULL); 935 if (lpg_cnt == 0) { 936 npgs = page_get_pagecnt(pp->p_szc); 937 if (!IS_P2ALIGNED(an_idx, npgs)) { 938 an_idx = P2ALIGN(an_idx, npgs); 939 page_unlock(pp); 940 continue; 941 } 942 } 943 if (++lpg_cnt == npgs) 944 lpg_cnt = 0; 945 946 /* 947 * availrmem is decremented only 948 * for unlocked pages 949 */ 950 if (sptd->spt_ppa_lckcnt[an_idx] == 0) 951 claim_availrmem++; 952 pplist[an_idx] = pp; 953 } 954 an_idx++; 955 } 956 ANON_LOCK_EXIT(&->a_rwlock); 957 958 mutex_enter(&freemem_lock); 959 if (availrmem < tune.t_minarmem + claim_availrmem) { 960 mutex_exit(&freemem_lock); 961 ret = FC_MAKE_ERR(ENOMEM); 962 claim_availrmem = 0; 963 goto insert_fail; 964 } else { 965 availrmem -= claim_availrmem; 966 } 967 mutex_exit(&freemem_lock); 968 969 sptd->spt_ppa = pl; 970 } else { 971 /* 972 * We already have a valid ppa[]. 973 */ 974 pl = sptd->spt_ppa; 975 } 976 977 ASSERT(pl != NULL); 978 979 ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size, 980 pl, sptd->spt_prot, SEGP_FORCE_WIRED | SEGP_ASYNC_FLUSH, 981 segspt_reclaim); 982 if (ret == SEGP_FAIL) { 983 /* 984 * seg_pinsert failed. We return 985 * ENOTSUP, so that the as_pagelock() code will 986 * then try the slower F_SOFTLOCK path. 987 */ 988 if (pl_built) { 989 /* 990 * No one else has referenced the ppa[]. 991 * We created it and we need to destroy it. 992 */ 993 sptd->spt_ppa = NULL; 994 } 995 ret = ENOTSUP; 996 goto insert_fail; 997 } 998 999 /* 1000 * In either case, we increment softlockcnt on the 'real' segment. 1001 */ 1002 sptd->spt_pcachecnt++; 1003 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 1004 1005 ppa = sptd->spt_ppa; 1006 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 1007 if (ppa[an_idx] == NULL) { 1008 mutex_exit(&sptd->spt_lock); 1009 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 1010 pl, sptd->spt_prot, segspt_reclaim); 1011 *ppp = NULL; 1012 return (ENOTSUP); 1013 } 1014 if ((szc = ppa[an_idx]->p_szc) != 0) { 1015 npgs = page_get_pagecnt(szc); 1016 an_idx = P2ROUNDUP(an_idx + 1, npgs); 1017 } else { 1018 an_idx++; 1019 } 1020 } 1021 /* 1022 * We can now drop the sptd->spt_lock since the ppa[] 1023 * exists and he have incremented pacachecnt. 1024 */ 1025 mutex_exit(&sptd->spt_lock); 1026 1027 /* 1028 * Since we cache the entire segment, we want to 1029 * set ppp to point to the first slot that corresponds 1030 * to the requested addr, i.e. pg_idx. 1031 */ 1032 *ppp = &(sptd->spt_ppa[pg_idx]); 1033 return (ret); 1034 1035 insert_fail: 1036 /* 1037 * We will only reach this code if we tried and failed. 1038 * 1039 * And we can drop the lock on the dummy seg, once we've failed 1040 * to set up a new ppa[]. 1041 */ 1042 mutex_exit(&sptd->spt_lock); 1043 1044 if (pl_built) { 1045 mutex_enter(&freemem_lock); 1046 availrmem += claim_availrmem; 1047 mutex_exit(&freemem_lock); 1048 1049 /* 1050 * We created pl and we need to destroy it. 1051 */ 1052 pplist = pl; 1053 for (an_idx = 0; an_idx < tot_npages; an_idx++) { 1054 if (pplist[an_idx] != NULL) 1055 page_unlock(pplist[an_idx]); 1056 } 1057 kmem_free(pl, sizeof (page_t *) * tot_npages); 1058 } 1059 1060 if (shmd->shm_softlockcnt <= 0) { 1061 if (AS_ISUNMAPWAIT(seg->s_as)) { 1062 mutex_enter(&seg->s_as->a_contents); 1063 if (AS_ISUNMAPWAIT(seg->s_as)) { 1064 AS_CLRUNMAPWAIT(seg->s_as); 1065 cv_broadcast(&seg->s_as->a_cv); 1066 } 1067 mutex_exit(&seg->s_as->a_contents); 1068 } 1069 } 1070 *ppp = NULL; 1071 return (ret); 1072 } 1073 1074 1075 1076 /* 1077 * return locked pages over a given range. 1078 * 1079 * We will cache the entire ISM segment and save the pplist for the 1080 * entire segment in the ppa field of the underlying ISM segment structure. 1081 * Later, during a call to segspt_reclaim() we will use this ppa array 1082 * to page_unlock() all of the pages and then we will free this ppa list. 1083 */ 1084 /*ARGSUSED*/ 1085 static int 1086 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len, 1087 struct page ***ppp, enum lock_type type, enum seg_rw rw) 1088 { 1089 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1090 struct seg *sptseg = shmd->shm_sptseg; 1091 struct spt_data *sptd = sptseg->s_data; 1092 pgcnt_t np, page_index, npages; 1093 caddr_t a, spt_base; 1094 struct page **pplist, **pl, *pp; 1095 struct anon_map *amp; 1096 ulong_t anon_index; 1097 int ret = ENOTSUP; 1098 uint_t pl_built = 0; 1099 struct anon *ap; 1100 struct vnode *vp; 1101 u_offset_t off; 1102 1103 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1104 1105 /* 1106 * We want to lock/unlock the entire ISM segment. Therefore, 1107 * we will be using the underlying sptseg and it's base address 1108 * and length for the caching arguments. 1109 */ 1110 ASSERT(sptseg); 1111 ASSERT(sptd); 1112 1113 if (sptd->spt_flags & SHM_PAGEABLE) { 1114 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw)); 1115 } 1116 1117 page_index = seg_page(seg, addr); 1118 npages = btopr(len); 1119 1120 /* 1121 * check if the request is larger than number of pages covered 1122 * by amp 1123 */ 1124 if (page_index + npages > btopr(sptd->spt_amp->size)) { 1125 *ppp = NULL; 1126 return (ENOTSUP); 1127 } 1128 1129 if (type == L_PAGEUNLOCK) { 1130 1131 ASSERT(sptd->spt_ppa != NULL); 1132 1133 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 1134 sptd->spt_ppa, sptd->spt_prot, segspt_reclaim); 1135 1136 /* 1137 * If someone is blocked while unmapping, we purge 1138 * segment page cache and thus reclaim pplist synchronously 1139 * without waiting for seg_pasync_thread. This speeds up 1140 * unmapping in cases where munmap(2) is called, while 1141 * raw async i/o is still in progress or where a thread 1142 * exits on data fault in a multithreaded application. 1143 */ 1144 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 1145 segspt_purge(seg); 1146 } 1147 return (0); 1148 } else if (type == L_PAGERECLAIM) { 1149 ASSERT(sptd->spt_ppa != NULL); 1150 1151 (void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size, 1152 sptd->spt_ppa, sptd->spt_prot); 1153 return (0); 1154 } 1155 1156 /* 1157 * First try to find pages in segment page cache, without 1158 * holding the segment lock. 1159 */ 1160 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 1161 sptd->spt_prot); 1162 if (pplist != NULL) { 1163 ASSERT(sptd->spt_ppa == pplist); 1164 ASSERT(sptd->spt_ppa[page_index]); 1165 /* 1166 * Since we cache the entire ISM segment, we want to 1167 * set ppp to point to the first slot that corresponds 1168 * to the requested addr, i.e. page_index. 1169 */ 1170 *ppp = &(sptd->spt_ppa[page_index]); 1171 return (0); 1172 } 1173 1174 /* The L_PAGELOCK case... */ 1175 mutex_enter(&sptd->spt_lock); 1176 1177 /* 1178 * try to find pages in segment page cache 1179 */ 1180 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 1181 sptd->spt_prot); 1182 if (pplist != NULL) { 1183 ASSERT(sptd->spt_ppa == pplist); 1184 /* 1185 * Since we cache the entire segment, we want to 1186 * set ppp to point to the first slot that corresponds 1187 * to the requested addr, i.e. page_index. 1188 */ 1189 mutex_exit(&sptd->spt_lock); 1190 *ppp = &(sptd->spt_ppa[page_index]); 1191 return (0); 1192 } 1193 1194 if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) == 1195 SEGP_FAIL) { 1196 mutex_exit(&sptd->spt_lock); 1197 *ppp = NULL; 1198 return (ENOTSUP); 1199 } 1200 1201 /* 1202 * No need to worry about protections because ISM pages 1203 * are always rw. 1204 */ 1205 pl = pplist = NULL; 1206 1207 /* 1208 * Do we need to build the ppa array? 1209 */ 1210 if (sptd->spt_ppa == NULL) { 1211 ASSERT(sptd->spt_ppa == pplist); 1212 1213 spt_base = sptseg->s_base; 1214 pl_built = 1; 1215 1216 /* 1217 * availrmem is decremented once during anon_swap_adjust() 1218 * and is incremented during the anon_unresv(), which is 1219 * called from shm_rm_amp() when the segment is destroyed. 1220 */ 1221 amp = sptd->spt_amp; 1222 ASSERT(amp != NULL); 1223 1224 /* pcachecnt is protected by sptd->spt_lock */ 1225 ASSERT(sptd->spt_pcachecnt == 0); 1226 pplist = kmem_zalloc(sizeof (page_t *) 1227 * btopr(sptd->spt_amp->size), KM_SLEEP); 1228 pl = pplist; 1229 1230 anon_index = seg_page(sptseg, spt_base); 1231 1232 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1233 for (a = spt_base; a < (spt_base + sptd->spt_amp->size); 1234 a += PAGESIZE, anon_index++, pplist++) { 1235 ap = anon_get_ptr(amp->ahp, anon_index); 1236 ASSERT(ap != NULL); 1237 swap_xlate(ap, &vp, &off); 1238 pp = page_lookup(vp, off, SE_SHARED); 1239 ASSERT(pp != NULL); 1240 *pplist = pp; 1241 } 1242 ANON_LOCK_EXIT(&->a_rwlock); 1243 1244 if (a < (spt_base + sptd->spt_amp->size)) { 1245 ret = ENOTSUP; 1246 goto insert_fail; 1247 } 1248 sptd->spt_ppa = pl; 1249 } else { 1250 /* 1251 * We already have a valid ppa[]. 1252 */ 1253 pl = sptd->spt_ppa; 1254 } 1255 1256 ASSERT(pl != NULL); 1257 1258 ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size, 1259 pl, sptd->spt_prot, SEGP_FORCE_WIRED, segspt_reclaim); 1260 if (ret == SEGP_FAIL) { 1261 /* 1262 * seg_pinsert failed. We return 1263 * ENOTSUP, so that the as_pagelock() code will 1264 * then try the slower F_SOFTLOCK path. 1265 */ 1266 if (pl_built) { 1267 /* 1268 * No one else has referenced the ppa[]. 1269 * We created it and we need to destroy it. 1270 */ 1271 sptd->spt_ppa = NULL; 1272 } 1273 ret = ENOTSUP; 1274 goto insert_fail; 1275 } 1276 1277 /* 1278 * In either case, we increment softlockcnt on the 'real' segment. 1279 */ 1280 sptd->spt_pcachecnt++; 1281 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 1282 1283 /* 1284 * We can now drop the sptd->spt_lock since the ppa[] 1285 * exists and he have incremented pacachecnt. 1286 */ 1287 mutex_exit(&sptd->spt_lock); 1288 1289 /* 1290 * Since we cache the entire segment, we want to 1291 * set ppp to point to the first slot that corresponds 1292 * to the requested addr, i.e. page_index. 1293 */ 1294 *ppp = &(sptd->spt_ppa[page_index]); 1295 return (ret); 1296 1297 insert_fail: 1298 /* 1299 * We will only reach this code if we tried and failed. 1300 * 1301 * And we can drop the lock on the dummy seg, once we've failed 1302 * to set up a new ppa[]. 1303 */ 1304 mutex_exit(&sptd->spt_lock); 1305 1306 if (pl_built) { 1307 /* 1308 * We created pl and we need to destroy it. 1309 */ 1310 pplist = pl; 1311 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT); 1312 while (np) { 1313 page_unlock(*pplist); 1314 np--; 1315 pplist++; 1316 } 1317 kmem_free(pl, sizeof (page_t *) * 1318 btopr(sptd->spt_amp->size)); 1319 } 1320 if (shmd->shm_softlockcnt <= 0) { 1321 if (AS_ISUNMAPWAIT(seg->s_as)) { 1322 mutex_enter(&seg->s_as->a_contents); 1323 if (AS_ISUNMAPWAIT(seg->s_as)) { 1324 AS_CLRUNMAPWAIT(seg->s_as); 1325 cv_broadcast(&seg->s_as->a_cv); 1326 } 1327 mutex_exit(&seg->s_as->a_contents); 1328 } 1329 } 1330 *ppp = NULL; 1331 return (ret); 1332 } 1333 1334 /* 1335 * purge any cached pages in the I/O page cache 1336 */ 1337 static void 1338 segspt_purge(struct seg *seg) 1339 { 1340 seg_ppurge(seg); 1341 } 1342 1343 static int 1344 segspt_reclaim(struct seg *seg, caddr_t addr, size_t len, struct page **pplist, 1345 enum seg_rw rw) 1346 { 1347 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1348 struct seg *sptseg; 1349 struct spt_data *sptd; 1350 pgcnt_t npages, i, free_availrmem = 0; 1351 int done = 0; 1352 1353 #ifdef lint 1354 addr = addr; 1355 #endif 1356 sptseg = shmd->shm_sptseg; 1357 sptd = sptseg->s_data; 1358 npages = (len >> PAGESHIFT); 1359 ASSERT(npages); 1360 ASSERT(sptd->spt_pcachecnt != 0); 1361 ASSERT(sptd->spt_ppa == pplist); 1362 ASSERT(npages == btopr(sptd->spt_amp->size)); 1363 1364 /* 1365 * Acquire the lock on the dummy seg and destroy the 1366 * ppa array IF this is the last pcachecnt. 1367 */ 1368 mutex_enter(&sptd->spt_lock); 1369 if (--sptd->spt_pcachecnt == 0) { 1370 for (i = 0; i < npages; i++) { 1371 if (pplist[i] == NULL) { 1372 continue; 1373 } 1374 if (rw == S_WRITE) { 1375 hat_setrefmod(pplist[i]); 1376 } else { 1377 hat_setref(pplist[i]); 1378 } 1379 if ((sptd->spt_flags & SHM_PAGEABLE) && 1380 (sptd->spt_ppa_lckcnt[i] == 0)) 1381 free_availrmem++; 1382 page_unlock(pplist[i]); 1383 } 1384 if (sptd->spt_flags & SHM_PAGEABLE) { 1385 mutex_enter(&freemem_lock); 1386 availrmem += free_availrmem; 1387 mutex_exit(&freemem_lock); 1388 } 1389 /* 1390 * Since we want to cach/uncache the entire ISM segment, 1391 * we will track the pplist in a segspt specific field 1392 * ppa, that is initialized at the time we add an entry to 1393 * the cache. 1394 */ 1395 ASSERT(sptd->spt_pcachecnt == 0); 1396 kmem_free(pplist, sizeof (page_t *) * npages); 1397 sptd->spt_ppa = NULL; 1398 sptd->spt_flags &= ~DISM_PPA_CHANGED; 1399 done = 1; 1400 } 1401 mutex_exit(&sptd->spt_lock); 1402 /* 1403 * Now decrement softlockcnt. 1404 */ 1405 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1); 1406 1407 if (shmd->shm_softlockcnt <= 0) { 1408 if (AS_ISUNMAPWAIT(seg->s_as)) { 1409 mutex_enter(&seg->s_as->a_contents); 1410 if (AS_ISUNMAPWAIT(seg->s_as)) { 1411 AS_CLRUNMAPWAIT(seg->s_as); 1412 cv_broadcast(&seg->s_as->a_cv); 1413 } 1414 mutex_exit(&seg->s_as->a_contents); 1415 } 1416 } 1417 return (done); 1418 } 1419 1420 /* 1421 * Do a F_SOFTUNLOCK call over the range requested. 1422 * The range must have already been F_SOFTLOCK'ed. 1423 * 1424 * The calls to acquire and release the anon map lock mutex were 1425 * removed in order to avoid a deadly embrace during a DR 1426 * memory delete operation. (Eg. DR blocks while waiting for a 1427 * exclusive lock on a page that is being used for kaio; the 1428 * thread that will complete the kaio and call segspt_softunlock 1429 * blocks on the anon map lock; another thread holding the anon 1430 * map lock blocks on another page lock via the segspt_shmfault 1431 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.) 1432 * 1433 * The appropriateness of the removal is based upon the following: 1434 * 1. If we are holding a segment's reader lock and the page is held 1435 * shared, then the corresponding element in anonmap which points to 1436 * anon struct cannot change and there is no need to acquire the 1437 * anonymous map lock. 1438 * 2. Threads in segspt_softunlock have a reader lock on the segment 1439 * and already have the shared page lock, so we are guaranteed that 1440 * the anon map slot cannot change and therefore can call anon_get_ptr() 1441 * without grabbing the anonymous map lock. 1442 * 3. Threads that softlock a shared page break copy-on-write, even if 1443 * its a read. Thus cow faults can be ignored with respect to soft 1444 * unlocking, since the breaking of cow means that the anon slot(s) will 1445 * not be shared. 1446 */ 1447 static void 1448 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr, 1449 size_t len, enum seg_rw rw) 1450 { 1451 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1452 struct seg *sptseg; 1453 struct spt_data *sptd; 1454 page_t *pp; 1455 caddr_t adr; 1456 struct vnode *vp; 1457 u_offset_t offset; 1458 ulong_t anon_index; 1459 struct anon_map *amp; /* XXX - for locknest */ 1460 struct anon *ap = NULL; 1461 pgcnt_t npages; 1462 1463 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1464 1465 sptseg = shmd->shm_sptseg; 1466 sptd = sptseg->s_data; 1467 1468 /* 1469 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 1470 * and therefore their pages are SE_SHARED locked 1471 * for the entire life of the segment. 1472 */ 1473 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) && 1474 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) { 1475 goto softlock_decrement; 1476 } 1477 1478 /* 1479 * Any thread is free to do a page_find and 1480 * page_unlock() on the pages within this seg. 1481 * 1482 * We are already holding the as->a_lock on the user's 1483 * real segment, but we need to hold the a_lock on the 1484 * underlying dummy as. This is mostly to satisfy the 1485 * underlying HAT layer. 1486 */ 1487 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1488 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len); 1489 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1490 1491 amp = sptd->spt_amp; 1492 ASSERT(amp != NULL); 1493 anon_index = seg_page(sptseg, sptseg_addr); 1494 1495 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) { 1496 ap = anon_get_ptr(amp->ahp, anon_index++); 1497 ASSERT(ap != NULL); 1498 swap_xlate(ap, &vp, &offset); 1499 1500 /* 1501 * Use page_find() instead of page_lookup() to 1502 * find the page since we know that it has a 1503 * "shared" lock. 1504 */ 1505 pp = page_find(vp, offset); 1506 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1)); 1507 if (pp == NULL) { 1508 panic("segspt_softunlock: " 1509 "addr %p, ap %p, vp %p, off %llx", 1510 (void *)adr, (void *)ap, (void *)vp, offset); 1511 /*NOTREACHED*/ 1512 } 1513 1514 if (rw == S_WRITE) { 1515 hat_setrefmod(pp); 1516 } else if (rw != S_OTHER) { 1517 hat_setref(pp); 1518 } 1519 page_unlock(pp); 1520 } 1521 1522 softlock_decrement: 1523 npages = btopr(len); 1524 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages); 1525 if (shmd->shm_softlockcnt == 0) { 1526 /* 1527 * All SOFTLOCKS are gone. Wakeup any waiting 1528 * unmappers so they can try again to unmap. 1529 * Check for waiters first without the mutex 1530 * held so we don't always grab the mutex on 1531 * softunlocks. 1532 */ 1533 if (AS_ISUNMAPWAIT(seg->s_as)) { 1534 mutex_enter(&seg->s_as->a_contents); 1535 if (AS_ISUNMAPWAIT(seg->s_as)) { 1536 AS_CLRUNMAPWAIT(seg->s_as); 1537 cv_broadcast(&seg->s_as->a_cv); 1538 } 1539 mutex_exit(&seg->s_as->a_contents); 1540 } 1541 } 1542 } 1543 1544 int 1545 segspt_shmattach(struct seg *seg, caddr_t *argsp) 1546 { 1547 struct shm_data *shmd_arg = (struct shm_data *)argsp; 1548 struct shm_data *shmd; 1549 struct anon_map *shm_amp = shmd_arg->shm_amp; 1550 struct spt_data *sptd; 1551 int error = 0; 1552 1553 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1554 1555 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP); 1556 if (shmd == NULL) 1557 return (ENOMEM); 1558 1559 shmd->shm_sptas = shmd_arg->shm_sptas; 1560 shmd->shm_amp = shm_amp; 1561 shmd->shm_sptseg = shmd_arg->shm_sptseg; 1562 1563 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0, 1564 NULL, 0, seg->s_size); 1565 1566 seg->s_data = (void *)shmd; 1567 seg->s_ops = &segspt_shmops; 1568 seg->s_szc = shmd->shm_sptseg->s_szc; 1569 sptd = shmd->shm_sptseg->s_data; 1570 1571 if (sptd->spt_flags & SHM_PAGEABLE) { 1572 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size), 1573 KM_NOSLEEP)) == NULL) { 1574 seg->s_data = (void *)NULL; 1575 kmem_free(shmd, (sizeof (*shmd))); 1576 return (ENOMEM); 1577 } 1578 shmd->shm_lckpgs = 0; 1579 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 1580 if ((error = hat_share(seg->s_as->a_hat, seg->s_base, 1581 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1582 seg->s_size, seg->s_szc)) != 0) { 1583 kmem_free(shmd->shm_vpage, 1584 btopr(shm_amp->size)); 1585 } 1586 } 1587 } else { 1588 error = hat_share(seg->s_as->a_hat, seg->s_base, 1589 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1590 seg->s_size, seg->s_szc); 1591 } 1592 if (error) { 1593 seg->s_szc = 0; 1594 seg->s_data = (void *)NULL; 1595 kmem_free(shmd, (sizeof (*shmd))); 1596 } else { 1597 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1598 shm_amp->refcnt++; 1599 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1600 } 1601 return (error); 1602 } 1603 1604 int 1605 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize) 1606 { 1607 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1608 int reclaim = 1; 1609 1610 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1611 retry: 1612 if (shmd->shm_softlockcnt > 0) { 1613 if (reclaim == 1) { 1614 segspt_purge(seg); 1615 reclaim = 0; 1616 goto retry; 1617 } 1618 return (EAGAIN); 1619 } 1620 1621 if (ssize != seg->s_size) { 1622 #ifdef DEBUG 1623 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n", 1624 ssize, seg->s_size); 1625 #endif 1626 return (EINVAL); 1627 } 1628 1629 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK, 1630 NULL, 0); 1631 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc); 1632 1633 seg_free(seg); 1634 1635 return (0); 1636 } 1637 1638 void 1639 segspt_shmfree(struct seg *seg) 1640 { 1641 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1642 struct anon_map *shm_amp = shmd->shm_amp; 1643 1644 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1645 1646 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0, 1647 MC_UNLOCK, NULL, 0); 1648 1649 /* 1650 * Need to increment refcnt when attaching 1651 * and decrement when detaching because of dup(). 1652 */ 1653 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1654 shm_amp->refcnt--; 1655 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1656 1657 if (shmd->shm_vpage) { /* only for DISM */ 1658 kmem_free(shmd->shm_vpage, btopr(shm_amp->size)); 1659 shmd->shm_vpage = NULL; 1660 } 1661 kmem_free(shmd, sizeof (*shmd)); 1662 } 1663 1664 /*ARGSUSED*/ 1665 int 1666 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 1667 { 1668 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1669 1670 /* 1671 * Shared page table is more than shared mapping. 1672 * Individual process sharing page tables can't change prot 1673 * because there is only one set of page tables. 1674 * This will be allowed after private page table is 1675 * supported. 1676 */ 1677 /* need to return correct status error? */ 1678 return (0); 1679 } 1680 1681 1682 faultcode_t 1683 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr, 1684 size_t len, enum fault_type type, enum seg_rw rw) 1685 { 1686 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1687 struct seg *sptseg = shmd->shm_sptseg; 1688 struct as *curspt = shmd->shm_sptas; 1689 struct spt_data *sptd = sptseg->s_data; 1690 pgcnt_t npages; 1691 size_t share_sz, size; 1692 caddr_t segspt_addr, shm_addr; 1693 page_t **ppa; 1694 int i; 1695 ulong_t an_idx = 0; 1696 int err = 0; 1697 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0); 1698 1699 #ifdef lint 1700 hat = hat; 1701 #endif 1702 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1703 1704 /* 1705 * Because of the way spt is implemented 1706 * the realsize of the segment does not have to be 1707 * equal to the segment size itself. The segment size is 1708 * often in multiples of a page size larger than PAGESIZE. 1709 * The realsize is rounded up to the nearest PAGESIZE 1710 * based on what the user requested. This is a bit of 1711 * ungliness that is historical but not easily fixed 1712 * without re-designing the higher levels of ISM. 1713 */ 1714 ASSERT(addr >= seg->s_base); 1715 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 1716 return (FC_NOMAP); 1717 /* 1718 * For all of the following cases except F_PROT, we need to 1719 * make any necessary adjustments to addr and len 1720 * and get all of the necessary page_t's into an array called ppa[]. 1721 * 1722 * The code in shmat() forces base addr and len of ISM segment 1723 * to be aligned to largest page size supported. Therefore, 1724 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 1725 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 1726 * in large pagesize chunks, or else we will screw up the HAT 1727 * layer by calling hat_memload_array() with differing page sizes 1728 * over a given virtual range. 1729 */ 1730 share_sz = page_get_pagesize(sptseg->s_szc); 1731 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz); 1732 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), share_sz); 1733 npages = btopr(size); 1734 1735 /* 1736 * Now we need to convert from addr in segshm to addr in segspt. 1737 */ 1738 an_idx = seg_page(seg, shm_addr); 1739 segspt_addr = sptseg->s_base + ptob(an_idx); 1740 1741 ASSERT((segspt_addr + ptob(npages)) <= 1742 (sptseg->s_base + sptd->spt_realsize)); 1743 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size)); 1744 1745 switch (type) { 1746 1747 case F_SOFTLOCK: 1748 1749 mutex_enter(&freemem_lock); 1750 if (availrmem < tune.t_minarmem + npages) { 1751 mutex_exit(&freemem_lock); 1752 return (FC_MAKE_ERR(ENOMEM)); 1753 } else { 1754 availrmem -= npages; 1755 } 1756 mutex_exit(&freemem_lock); 1757 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 1758 /* 1759 * Fall through to the F_INVAL case to load up the hat layer 1760 * entries with the HAT_LOAD_LOCK flag. 1761 */ 1762 /* FALLTHRU */ 1763 case F_INVAL: 1764 1765 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 1766 return (FC_NOMAP); 1767 1768 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP); 1769 1770 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa); 1771 if (err != 0) { 1772 if (type == F_SOFTLOCK) { 1773 mutex_enter(&freemem_lock); 1774 availrmem += npages; 1775 mutex_exit(&freemem_lock); 1776 atomic_add_long((ulong_t *)( 1777 &(shmd->shm_softlockcnt)), -npages); 1778 } 1779 goto dism_err; 1780 } 1781 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1782 if (type == F_SOFTLOCK) { 1783 1784 /* 1785 * Load up the translation keeping it 1786 * locked and don't unlock the page. 1787 */ 1788 hat_memload_array(sptseg->s_as->a_hat, segspt_addr, 1789 size, ppa, sptd->spt_prot, 1790 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 1791 } else { 1792 if (hat == seg->s_as->a_hat) { 1793 1794 /* 1795 * Migrate pages marked for migration 1796 */ 1797 if (lgrp_optimizations()) 1798 page_migrate(seg, shm_addr, ppa, 1799 npages); 1800 1801 /* CPU HAT */ 1802 hat_memload_array(sptseg->s_as->a_hat, 1803 segspt_addr, size, ppa, sptd->spt_prot, 1804 HAT_LOAD_SHARE); 1805 } else { 1806 /* XHAT. Pass real address */ 1807 hat_memload_array(hat, shm_addr, 1808 size, ppa, sptd->spt_prot, HAT_LOAD_SHARE); 1809 } 1810 1811 /* 1812 * And now drop the SE_SHARED lock(s). 1813 */ 1814 if (dyn_ism_unmap) { 1815 for (i = 0; i < npages; i++) { 1816 page_unlock(ppa[i]); 1817 } 1818 } 1819 } 1820 1821 if (!dyn_ism_unmap) { 1822 if (hat_share(seg->s_as->a_hat, shm_addr, 1823 curspt->a_hat, segspt_addr, ptob(npages), 1824 seg->s_szc) != 0) { 1825 panic("hat_share err in DISM fault"); 1826 /* NOTREACHED */ 1827 } 1828 if (type == F_INVAL) { 1829 for (i = 0; i < npages; i++) { 1830 page_unlock(ppa[i]); 1831 } 1832 } 1833 } 1834 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1835 dism_err: 1836 kmem_free(ppa, npages * sizeof (page_t *)); 1837 return (err); 1838 1839 case F_SOFTUNLOCK: 1840 1841 mutex_enter(&freemem_lock); 1842 availrmem += npages; 1843 mutex_exit(&freemem_lock); 1844 1845 /* 1846 * This is a bit ugly, we pass in the real seg pointer, 1847 * but the segspt_addr is the virtual address within the 1848 * dummy seg. 1849 */ 1850 segspt_softunlock(seg, segspt_addr, size, rw); 1851 return (0); 1852 1853 case F_PROT: 1854 1855 /* 1856 * This takes care of the unusual case where a user 1857 * allocates a stack in shared memory and a register 1858 * window overflow is written to that stack page before 1859 * it is otherwise modified. 1860 * 1861 * We can get away with this because ISM segments are 1862 * always rw. Other than this unusual case, there 1863 * should be no instances of protection violations. 1864 */ 1865 return (0); 1866 1867 default: 1868 #ifdef DEBUG 1869 panic("segspt_dismfault default type?"); 1870 #else 1871 return (FC_NOMAP); 1872 #endif 1873 } 1874 } 1875 1876 1877 faultcode_t 1878 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr, 1879 size_t len, enum fault_type type, enum seg_rw rw) 1880 { 1881 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1882 struct seg *sptseg = shmd->shm_sptseg; 1883 struct as *curspt = shmd->shm_sptas; 1884 struct spt_data *sptd = sptseg->s_data; 1885 pgcnt_t npages; 1886 size_t share_size, size; 1887 caddr_t sptseg_addr, shm_addr; 1888 page_t *pp, **ppa; 1889 int i; 1890 u_offset_t offset; 1891 ulong_t anon_index = 0; 1892 struct vnode *vp; 1893 struct anon_map *amp; /* XXX - for locknest */ 1894 struct anon *ap = NULL; 1895 anon_sync_obj_t cookie; 1896 1897 #ifdef lint 1898 hat = hat; 1899 #endif 1900 1901 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1902 1903 if (sptd->spt_flags & SHM_PAGEABLE) { 1904 return (segspt_dismfault(hat, seg, addr, len, type, rw)); 1905 } 1906 1907 /* 1908 * Because of the way spt is implemented 1909 * the realsize of the segment does not have to be 1910 * equal to the segment size itself. The segment size is 1911 * often in multiples of a page size larger than PAGESIZE. 1912 * The realsize is rounded up to the nearest PAGESIZE 1913 * based on what the user requested. This is a bit of 1914 * ungliness that is historical but not easily fixed 1915 * without re-designing the higher levels of ISM. 1916 */ 1917 ASSERT(addr >= seg->s_base); 1918 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 1919 return (FC_NOMAP); 1920 /* 1921 * For all of the following cases except F_PROT, we need to 1922 * make any necessary adjustments to addr and len 1923 * and get all of the necessary page_t's into an array called ppa[]. 1924 * 1925 * The code in shmat() forces base addr and len of ISM segment 1926 * to be aligned to largest page size supported. Therefore, 1927 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 1928 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 1929 * in large pagesize chunks, or else we will screw up the HAT 1930 * layer by calling hat_memload_array() with differing page sizes 1931 * over a given virtual range. 1932 */ 1933 share_size = page_get_pagesize(sptseg->s_szc); 1934 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size); 1935 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), share_size); 1936 npages = btopr(size); 1937 1938 /* 1939 * Now we need to convert from addr in segshm to addr in segspt. 1940 */ 1941 anon_index = seg_page(seg, shm_addr); 1942 sptseg_addr = sptseg->s_base + ptob(anon_index); 1943 1944 /* 1945 * And now we may have to adjust npages downward if we have 1946 * exceeded the realsize of the segment or initial anon 1947 * allocations. 1948 */ 1949 if ((sptseg_addr + ptob(npages)) > 1950 (sptseg->s_base + sptd->spt_realsize)) 1951 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr; 1952 1953 npages = btopr(size); 1954 1955 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size)); 1956 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0); 1957 1958 switch (type) { 1959 1960 case F_SOFTLOCK: 1961 1962 /* 1963 * availrmem is decremented once during anon_swap_adjust() 1964 * and is incremented during the anon_unresv(), which is 1965 * called from shm_rm_amp() when the segment is destroyed. 1966 */ 1967 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 1968 /* 1969 * Some platforms assume that ISM pages are SE_SHARED 1970 * locked for the entire life of the segment. 1971 */ 1972 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) 1973 return (0); 1974 /* 1975 * Fall through to the F_INVAL case to load up the hat layer 1976 * entries with the HAT_LOAD_LOCK flag. 1977 */ 1978 1979 /* FALLTHRU */ 1980 case F_INVAL: 1981 1982 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 1983 return (FC_NOMAP); 1984 1985 /* 1986 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP 1987 * may still rely on this call to hat_share(). That 1988 * would imply that those hat's can fault on a 1989 * HAT_LOAD_LOCK translation, which would seem 1990 * contradictory. 1991 */ 1992 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 1993 if (hat_share(seg->s_as->a_hat, seg->s_base, 1994 curspt->a_hat, sptseg->s_base, 1995 sptseg->s_size, sptseg->s_szc) != 0) { 1996 panic("hat_share error in ISM fault"); 1997 /*NOTREACHED*/ 1998 } 1999 return (0); 2000 } 2001 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP); 2002 2003 /* 2004 * I see no need to lock the real seg, 2005 * here, because all of our work will be on the underlying 2006 * dummy seg. 2007 * 2008 * sptseg_addr and npages now account for large pages. 2009 */ 2010 amp = sptd->spt_amp; 2011 ASSERT(amp != NULL); 2012 anon_index = seg_page(sptseg, sptseg_addr); 2013 2014 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2015 for (i = 0; i < npages; i++) { 2016 anon_array_enter(amp, anon_index, &cookie); 2017 ap = anon_get_ptr(amp->ahp, anon_index++); 2018 ASSERT(ap != NULL); 2019 swap_xlate(ap, &vp, &offset); 2020 anon_array_exit(&cookie); 2021 pp = page_lookup(vp, offset, SE_SHARED); 2022 ASSERT(pp != NULL); 2023 ppa[i] = pp; 2024 } 2025 ANON_LOCK_EXIT(&->a_rwlock); 2026 ASSERT(i == npages); 2027 2028 /* 2029 * We are already holding the as->a_lock on the user's 2030 * real segment, but we need to hold the a_lock on the 2031 * underlying dummy as. This is mostly to satisfy the 2032 * underlying HAT layer. 2033 */ 2034 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 2035 if (type == F_SOFTLOCK) { 2036 /* 2037 * Load up the translation keeping it 2038 * locked and don't unlock the page. 2039 */ 2040 hat_memload_array(sptseg->s_as->a_hat, sptseg_addr, 2041 ptob(npages), ppa, sptd->spt_prot, 2042 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 2043 } else { 2044 if (hat == seg->s_as->a_hat) { 2045 2046 /* 2047 * Migrate pages marked for migration. 2048 */ 2049 if (lgrp_optimizations()) 2050 page_migrate(seg, shm_addr, ppa, 2051 npages); 2052 2053 /* CPU HAT */ 2054 hat_memload_array(sptseg->s_as->a_hat, 2055 sptseg_addr, ptob(npages), ppa, 2056 sptd->spt_prot, HAT_LOAD_SHARE); 2057 } else { 2058 /* XHAT. Pass real address */ 2059 hat_memload_array(hat, shm_addr, 2060 ptob(npages), ppa, sptd->spt_prot, 2061 HAT_LOAD_SHARE); 2062 } 2063 2064 /* 2065 * And now drop the SE_SHARED lock(s). 2066 */ 2067 for (i = 0; i < npages; i++) 2068 page_unlock(ppa[i]); 2069 } 2070 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 2071 2072 kmem_free(ppa, sizeof (page_t *) * npages); 2073 return (0); 2074 case F_SOFTUNLOCK: 2075 2076 /* 2077 * This is a bit ugly, we pass in the real seg pointer, 2078 * but the sptseg_addr is the virtual address within the 2079 * dummy seg. 2080 */ 2081 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw); 2082 return (0); 2083 2084 case F_PROT: 2085 2086 /* 2087 * This takes care of the unusual case where a user 2088 * allocates a stack in shared memory and a register 2089 * window overflow is written to that stack page before 2090 * it is otherwise modified. 2091 * 2092 * We can get away with this because ISM segments are 2093 * always rw. Other than this unusual case, there 2094 * should be no instances of protection violations. 2095 */ 2096 return (0); 2097 2098 default: 2099 #ifdef DEBUG 2100 cmn_err(CE_WARN, "segspt_shmfault default type?"); 2101 #endif 2102 return (FC_NOMAP); 2103 } 2104 } 2105 2106 /*ARGSUSED*/ 2107 static faultcode_t 2108 segspt_shmfaulta(struct seg *seg, caddr_t addr) 2109 { 2110 return (0); 2111 } 2112 2113 /*ARGSUSED*/ 2114 static int 2115 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta) 2116 { 2117 return (0); 2118 } 2119 2120 /*ARGSUSED*/ 2121 static size_t 2122 segspt_shmswapout(struct seg *seg) 2123 { 2124 return (0); 2125 } 2126 2127 /* 2128 * duplicate the shared page tables 2129 */ 2130 int 2131 segspt_shmdup(struct seg *seg, struct seg *newseg) 2132 { 2133 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2134 struct anon_map *amp = shmd->shm_amp; 2135 struct shm_data *shmd_new; 2136 struct seg *spt_seg = shmd->shm_sptseg; 2137 struct spt_data *sptd = spt_seg->s_data; 2138 int error = 0; 2139 2140 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2141 2142 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP); 2143 newseg->s_data = (void *)shmd_new; 2144 shmd_new->shm_sptas = shmd->shm_sptas; 2145 shmd_new->shm_amp = amp; 2146 shmd_new->shm_sptseg = shmd->shm_sptseg; 2147 newseg->s_ops = &segspt_shmops; 2148 newseg->s_szc = seg->s_szc; 2149 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc); 2150 2151 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2152 amp->refcnt++; 2153 ANON_LOCK_EXIT(&->a_rwlock); 2154 2155 if (sptd->spt_flags & SHM_PAGEABLE) { 2156 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP); 2157 shmd_new->shm_lckpgs = 0; 2158 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 2159 if ((error = hat_share(newseg->s_as->a_hat, 2160 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR, 2161 seg->s_size, seg->s_szc)) != 0) { 2162 kmem_free(shmd_new->shm_vpage, 2163 btopr(amp->size)); 2164 } 2165 } 2166 return (error); 2167 } else { 2168 return (hat_share(newseg->s_as->a_hat, newseg->s_base, 2169 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size, 2170 seg->s_szc)); 2171 2172 } 2173 } 2174 2175 /*ARGSUSED*/ 2176 int 2177 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 2178 { 2179 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2180 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2181 2182 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2183 2184 /* 2185 * ISM segment is always rw. 2186 */ 2187 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0); 2188 } 2189 2190 /* 2191 * Return an array of locked large pages, for empty slots allocate 2192 * private zero-filled anon pages. 2193 */ 2194 static int 2195 spt_anon_getpages( 2196 struct seg *sptseg, 2197 caddr_t sptaddr, 2198 size_t len, 2199 page_t *ppa[]) 2200 { 2201 struct spt_data *sptd = sptseg->s_data; 2202 struct anon_map *amp = sptd->spt_amp; 2203 enum seg_rw rw = sptd->spt_prot; 2204 uint_t szc = sptseg->s_szc; 2205 size_t pg_sz, share_sz = page_get_pagesize(szc); 2206 pgcnt_t lp_npgs; 2207 caddr_t lp_addr, e_sptaddr; 2208 uint_t vpprot, ppa_szc = 0; 2209 struct vpage *vpage = NULL; 2210 ulong_t j, ppa_idx; 2211 int err, ierr = 0; 2212 pgcnt_t an_idx; 2213 anon_sync_obj_t cookie; 2214 2215 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz)); 2216 ASSERT(len != 0); 2217 2218 pg_sz = share_sz; 2219 lp_npgs = btop(pg_sz); 2220 lp_addr = sptaddr; 2221 e_sptaddr = sptaddr + len; 2222 an_idx = seg_page(sptseg, sptaddr); 2223 ppa_idx = 0; 2224 2225 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2226 /*CONSTCOND*/ 2227 while (1) { 2228 for (; lp_addr < e_sptaddr; 2229 an_idx += lp_npgs, lp_addr += pg_sz, 2230 ppa_idx += lp_npgs) { 2231 2232 anon_array_enter(amp, an_idx, &cookie); 2233 ppa_szc = (uint_t)-1; 2234 ierr = anon_map_getpages(amp, an_idx, szc, sptseg, 2235 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx], 2236 &ppa_szc, vpage, rw, 0, segvn_anypgsz, kcred); 2237 anon_array_exit(&cookie); 2238 2239 if (ierr != 0) { 2240 if (ierr > 0) { 2241 err = FC_MAKE_ERR(ierr); 2242 goto lpgs_err; 2243 } 2244 break; 2245 } 2246 } 2247 if (lp_addr == e_sptaddr) { 2248 break; 2249 } 2250 ASSERT(lp_addr < e_sptaddr); 2251 2252 /* 2253 * ierr == -1 means we failed to allocate a large page. 2254 * so do a size down operation. 2255 * 2256 * ierr == -2 means some other process that privately shares 2257 * pages with this process has allocated a larger page and we 2258 * need to retry with larger pages. So do a size up 2259 * operation. This relies on the fact that large pages are 2260 * never partially shared i.e. if we share any constituent 2261 * page of a large page with another process we must share the 2262 * entire large page. Note this cannot happen for SOFTLOCK 2263 * case, unless current address (lpaddr) is at the beginning 2264 * of the next page size boundary because the other process 2265 * couldn't have relocated locked pages. 2266 */ 2267 ASSERT(ierr == -1 || ierr == -2); 2268 if (segvn_anypgsz) { 2269 ASSERT(ierr == -2 || szc != 0); 2270 ASSERT(ierr == -1 || szc < sptseg->s_szc); 2271 szc = (ierr == -1) ? szc - 1 : szc + 1; 2272 } else { 2273 /* 2274 * For faults and segvn_anypgsz == 0 2275 * we need to be careful not to loop forever 2276 * if existing page is found with szc other 2277 * than 0 or seg->s_szc. This could be due 2278 * to page relocations on behalf of DR or 2279 * more likely large page creation. For this 2280 * case simply re-size to existing page's szc 2281 * if returned by anon_map_getpages(). 2282 */ 2283 if (ppa_szc == (uint_t)-1) { 2284 szc = (ierr == -1) ? 0 : sptseg->s_szc; 2285 } else { 2286 ASSERT(ppa_szc <= sptseg->s_szc); 2287 ASSERT(ierr == -2 || ppa_szc < szc); 2288 ASSERT(ierr == -1 || ppa_szc > szc); 2289 szc = ppa_szc; 2290 } 2291 } 2292 pg_sz = page_get_pagesize(szc); 2293 lp_npgs = btop(pg_sz); 2294 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz)); 2295 } 2296 ANON_LOCK_EXIT(&->a_rwlock); 2297 return (0); 2298 2299 lpgs_err: 2300 ANON_LOCK_EXIT(&->a_rwlock); 2301 for (j = 0; j < ppa_idx; j++) 2302 page_unlock(ppa[j]); 2303 return (err); 2304 } 2305 2306 int 2307 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages, 2308 page_t **ppa, ulong_t *lockmap, size_t pos) 2309 { 2310 struct shm_data *shmd = seg->s_data; 2311 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2312 ulong_t i; 2313 int kernel; 2314 2315 for (i = 0; i < npages; anon_index++, pos++, i++) { 2316 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) { 2317 if (sptd->spt_ppa_lckcnt[anon_index] < 2318 (ushort_t)DISM_LOCK_MAX) { 2319 if (++sptd->spt_ppa_lckcnt[anon_index] == 2320 (ushort_t)DISM_LOCK_MAX) { 2321 cmn_err(CE_WARN, 2322 "DISM page lock limit " 2323 "reached on DISM offset 0x%lx\n", 2324 anon_index << PAGESHIFT); 2325 } 2326 kernel = (sptd->spt_ppa && 2327 sptd->spt_ppa[anon_index]) ? 1 : 0; 2328 if (!page_pp_lock(ppa[i], 0, kernel)) { 2329 /* unlock rest of the pages */ 2330 for (; i < npages; i++) 2331 page_unlock(ppa[i]); 2332 sptd->spt_ppa_lckcnt[anon_index]--; 2333 return (EAGAIN); 2334 } 2335 shmd->shm_lckpgs++; 2336 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED; 2337 if (lockmap != NULL) 2338 BT_SET(lockmap, pos); 2339 } 2340 } 2341 page_unlock(ppa[i]); 2342 } 2343 return (0); 2344 } 2345 2346 /*ARGSUSED*/ 2347 static int 2348 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 2349 int attr, int op, ulong_t *lockmap, size_t pos) 2350 { 2351 struct shm_data *shmd = seg->s_data; 2352 struct seg *sptseg = shmd->shm_sptseg; 2353 struct spt_data *sptd = sptseg->s_data; 2354 pgcnt_t npages, a_npages; 2355 page_t **ppa; 2356 pgcnt_t an_idx, a_an_idx, ppa_idx; 2357 caddr_t spt_addr, a_addr; /* spt and aligned address */ 2358 size_t a_len; /* aligned len */ 2359 size_t share_sz; 2360 ulong_t i; 2361 int sts = 0; 2362 2363 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2364 2365 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 2366 return (0); 2367 } 2368 2369 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 2370 an_idx = seg_page(seg, addr); 2371 npages = btopr(len); 2372 2373 if (an_idx + npages > btopr(shmd->shm_amp->size)) { 2374 return (ENOMEM); 2375 } 2376 2377 if (op == MC_LOCK) { 2378 /* 2379 * Need to align addr and size request if they are not 2380 * aligned so we can always allocate large page(s) however 2381 * we only lock what was requested in initial request. 2382 */ 2383 share_sz = page_get_pagesize(sptseg->s_szc); 2384 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz); 2385 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)), 2386 share_sz); 2387 a_npages = btop(a_len); 2388 a_an_idx = seg_page(seg, a_addr); 2389 spt_addr = sptseg->s_base + ptob(a_an_idx); 2390 ppa_idx = an_idx - a_an_idx; 2391 2392 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages), 2393 KM_NOSLEEP)) == NULL) { 2394 return (ENOMEM); 2395 } 2396 2397 /* 2398 * Don't cache any new pages for IO and 2399 * flush any cached pages. 2400 */ 2401 mutex_enter(&sptd->spt_lock); 2402 if (sptd->spt_ppa != NULL) 2403 sptd->spt_flags |= DISM_PPA_CHANGED; 2404 2405 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa); 2406 if (sts != 0) { 2407 mutex_exit(&sptd->spt_lock); 2408 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2409 return (sts); 2410 } 2411 2412 sts = spt_lockpages(seg, an_idx, npages, 2413 &ppa[ppa_idx], lockmap, pos); 2414 /* 2415 * unlock remaining pages for requests which are not 2416 * aligned or not in 4 M chunks 2417 */ 2418 for (i = 0; i < ppa_idx; i++) 2419 page_unlock(ppa[i]); 2420 for (i = ppa_idx + npages; i < a_npages; i++) 2421 page_unlock(ppa[i]); 2422 if (sptd->spt_ppa != NULL) 2423 sptd->spt_flags |= DISM_PPA_CHANGED; 2424 mutex_exit(&sptd->spt_lock); 2425 2426 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2427 2428 } else if (op == MC_UNLOCK) { /* unlock */ 2429 struct anon_map *amp; 2430 struct anon *ap; 2431 struct vnode *vp; 2432 u_offset_t off; 2433 struct page *pp; 2434 int kernel; 2435 anon_sync_obj_t cookie; 2436 2437 amp = sptd->spt_amp; 2438 mutex_enter(&sptd->spt_lock); 2439 if (shmd->shm_lckpgs == 0) { 2440 mutex_exit(&sptd->spt_lock); 2441 return (0); 2442 } 2443 /* 2444 * Don't cache new IO pages. 2445 */ 2446 if (sptd->spt_ppa != NULL) 2447 sptd->spt_flags |= DISM_PPA_CHANGED; 2448 2449 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2450 for (i = 0; i < npages; i++, an_idx++) { 2451 if (shmd->shm_vpage[an_idx] & DISM_PG_LOCKED) { 2452 anon_array_enter(amp, an_idx, &cookie); 2453 ap = anon_get_ptr(amp->ahp, an_idx); 2454 ASSERT(ap); 2455 ASSERT(sptd->spt_ppa_lckcnt[an_idx] > 0); 2456 2457 swap_xlate(ap, &vp, &off); 2458 anon_array_exit(&cookie); 2459 pp = page_lookup(vp, off, SE_SHARED); 2460 ASSERT(pp); 2461 /* 2462 * the availrmem is decremented only for 2463 * pages which are not in seg pcache, 2464 * for pages in seg pcache availrmem was 2465 * decremented in _dismpagelock() (if 2466 * they were not locked here) 2467 */ 2468 kernel = (sptd->spt_ppa && 2469 sptd->spt_ppa[an_idx]) ? 1 : 0; 2470 page_pp_unlock(pp, 0, kernel); 2471 page_unlock(pp); 2472 shmd->shm_vpage[an_idx] &= ~DISM_PG_LOCKED; 2473 sptd->spt_ppa_lckcnt[an_idx]--; 2474 shmd->shm_lckpgs--; 2475 } 2476 } 2477 ANON_LOCK_EXIT(&->a_rwlock); 2478 if (sptd->spt_ppa != NULL) 2479 sptd->spt_flags |= DISM_PPA_CHANGED; 2480 mutex_exit(&sptd->spt_lock); 2481 } 2482 return (sts); 2483 } 2484 2485 /*ARGSUSED*/ 2486 int 2487 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 2488 { 2489 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2490 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2491 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1; 2492 2493 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2494 2495 /* 2496 * ISM segment is always rw. 2497 */ 2498 while (--pgno >= 0) 2499 *protv++ = sptd->spt_prot; 2500 return (0); 2501 } 2502 2503 /*ARGSUSED*/ 2504 u_offset_t 2505 segspt_shmgetoffset(struct seg *seg, caddr_t addr) 2506 { 2507 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2508 2509 /* Offset does not matter in ISM memory */ 2510 2511 return ((u_offset_t)0); 2512 } 2513 2514 /* ARGSUSED */ 2515 int 2516 segspt_shmgettype(struct seg *seg, caddr_t addr) 2517 { 2518 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2519 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2520 2521 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2522 2523 /* 2524 * The shared memory mapping is always MAP_SHARED, SWAP is only 2525 * reserved for DISM 2526 */ 2527 return (MAP_SHARED | 2528 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE)); 2529 } 2530 2531 /*ARGSUSED*/ 2532 int 2533 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 2534 { 2535 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2536 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2537 2538 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2539 2540 *vpp = sptd->spt_vp; 2541 return (0); 2542 } 2543 2544 /*ARGSUSED*/ 2545 static int 2546 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 2547 { 2548 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2549 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2550 struct anon_map *amp; 2551 pgcnt_t pg_idx; 2552 2553 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2554 2555 if (behav == MADV_FREE) { 2556 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) 2557 return (0); 2558 2559 amp = sptd->spt_amp; 2560 pg_idx = seg_page(seg, addr); 2561 2562 mutex_enter(&sptd->spt_lock); 2563 if (sptd->spt_ppa != NULL) 2564 sptd->spt_flags |= DISM_PPA_CHANGED; 2565 mutex_exit(&sptd->spt_lock); 2566 2567 /* 2568 * Purge all DISM cached pages 2569 */ 2570 seg_ppurge_seg(segspt_reclaim); 2571 2572 mutex_enter(&sptd->spt_lock); 2573 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2574 anon_disclaim(amp, pg_idx, len, ANON_PGLOOKUP_BLK); 2575 ANON_LOCK_EXIT(&->a_rwlock); 2576 mutex_exit(&sptd->spt_lock); 2577 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP || 2578 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) { 2579 int already_set; 2580 ulong_t anon_index; 2581 lgrp_mem_policy_t policy; 2582 caddr_t shm_addr; 2583 size_t share_size; 2584 size_t size; 2585 struct seg *sptseg = shmd->shm_sptseg; 2586 caddr_t sptseg_addr; 2587 2588 /* 2589 * Align address and length to page size of underlying segment 2590 */ 2591 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc); 2592 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size); 2593 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), 2594 share_size); 2595 2596 amp = shmd->shm_amp; 2597 anon_index = seg_page(seg, shm_addr); 2598 2599 /* 2600 * And now we may have to adjust size downward if we have 2601 * exceeded the realsize of the segment or initial anon 2602 * allocations. 2603 */ 2604 sptseg_addr = sptseg->s_base + ptob(anon_index); 2605 if ((sptseg_addr + size) > 2606 (sptseg->s_base + sptd->spt_realsize)) 2607 size = (sptseg->s_base + sptd->spt_realsize) - 2608 sptseg_addr; 2609 2610 /* 2611 * Set memory allocation policy for this segment 2612 */ 2613 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED); 2614 already_set = lgrp_shm_policy_set(policy, amp, anon_index, 2615 NULL, 0, len); 2616 2617 /* 2618 * If random memory allocation policy set already, 2619 * don't bother reapplying it. 2620 */ 2621 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 2622 return (0); 2623 2624 /* 2625 * Mark any existing pages in the given range for 2626 * migration, flushing the I/O page cache, and using 2627 * underlying segment to calculate anon index and get 2628 * anonmap and vnode pointer from 2629 */ 2630 if (shmd->shm_softlockcnt > 0) 2631 segspt_purge(seg); 2632 2633 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0); 2634 } 2635 2636 return (0); 2637 } 2638 2639 /*ARGSUSED*/ 2640 void 2641 segspt_shmdump(struct seg *seg) 2642 { 2643 /* no-op for ISM segment */ 2644 } 2645 2646 /*ARGSUSED*/ 2647 static faultcode_t 2648 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 2649 { 2650 return (ENOTSUP); 2651 } 2652 2653 /* 2654 * get a memory ID for an addr in a given segment 2655 */ 2656 static int 2657 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 2658 { 2659 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2660 struct anon *ap; 2661 size_t anon_index; 2662 struct anon_map *amp = shmd->shm_amp; 2663 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2664 struct seg *sptseg = shmd->shm_sptseg; 2665 anon_sync_obj_t cookie; 2666 2667 anon_index = seg_page(seg, addr); 2668 2669 if (addr > (seg->s_base + sptd->spt_realsize)) { 2670 return (EFAULT); 2671 } 2672 2673 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2674 anon_array_enter(amp, anon_index, &cookie); 2675 ap = anon_get_ptr(amp->ahp, anon_index); 2676 if (ap == NULL) { 2677 struct page *pp; 2678 caddr_t spt_addr = sptseg->s_base + ptob(anon_index); 2679 2680 pp = anon_zero(sptseg, spt_addr, &ap, kcred); 2681 if (pp == NULL) { 2682 anon_array_exit(&cookie); 2683 ANON_LOCK_EXIT(&->a_rwlock); 2684 return (ENOMEM); 2685 } 2686 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 2687 page_unlock(pp); 2688 } 2689 anon_array_exit(&cookie); 2690 ANON_LOCK_EXIT(&->a_rwlock); 2691 memidp->val[0] = (uintptr_t)ap; 2692 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 2693 return (0); 2694 } 2695 2696 /* 2697 * Get memory allocation policy info for specified address in given segment 2698 */ 2699 static lgrp_mem_policy_info_t * 2700 segspt_shmgetpolicy(struct seg *seg, caddr_t addr) 2701 { 2702 struct anon_map *amp; 2703 ulong_t anon_index; 2704 lgrp_mem_policy_info_t *policy_info; 2705 struct shm_data *shm_data; 2706 2707 ASSERT(seg != NULL); 2708 2709 /* 2710 * Get anon_map from segshm 2711 * 2712 * Assume that no lock needs to be held on anon_map, since 2713 * it should be protected by its reference count which must be 2714 * nonzero for an existing segment 2715 * Need to grab readers lock on policy tree though 2716 */ 2717 shm_data = (struct shm_data *)seg->s_data; 2718 if (shm_data == NULL) 2719 return (NULL); 2720 amp = shm_data->shm_amp; 2721 ASSERT(amp->refcnt != 0); 2722 2723 /* 2724 * Get policy info 2725 * 2726 * Assume starting anon index of 0 2727 */ 2728 anon_index = seg_page(seg, addr); 2729 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 2730 2731 return (policy_info); 2732 } 2733 2734 /*ARGSUSED*/ 2735 static int 2736 segspt_shmcapable(struct seg *seg, segcapability_t capability) 2737 { 2738 return (0); 2739 } 2740