1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/param.h> 29 #include <sys/user.h> 30 #include <sys/mman.h> 31 #include <sys/kmem.h> 32 #include <sys/sysmacros.h> 33 #include <sys/cmn_err.h> 34 #include <sys/systm.h> 35 #include <sys/tuneable.h> 36 #include <vm/hat.h> 37 #include <vm/seg.h> 38 #include <vm/as.h> 39 #include <vm/anon.h> 40 #include <vm/page.h> 41 #include <sys/buf.h> 42 #include <sys/swap.h> 43 #include <sys/atomic.h> 44 #include <vm/seg_spt.h> 45 #include <sys/debug.h> 46 #include <sys/vtrace.h> 47 #include <sys/shm.h> 48 #include <sys/shm_impl.h> 49 #include <sys/lgrp.h> 50 #include <sys/vmsystm.h> 51 #include <sys/policy.h> 52 #include <sys/project.h> 53 #include <sys/tnf_probe.h> 54 #include <sys/zone.h> 55 56 #define SEGSPTADDR (caddr_t)0x0 57 58 /* 59 * # pages used for spt 60 */ 61 size_t spt_used; 62 63 /* 64 * segspt_minfree is the memory left for system after ISM 65 * locked its pages; it is set up to 5% of availrmem in 66 * sptcreate when ISM is created. ISM should not use more 67 * than ~90% of availrmem; if it does, then the performance 68 * of the system may decrease. Machines with large memories may 69 * be able to use up more memory for ISM so we set the default 70 * segspt_minfree to 5% (which gives ISM max 95% of availrmem. 71 * If somebody wants even more memory for ISM (risking hanging 72 * the system) they can patch the segspt_minfree to smaller number. 73 */ 74 pgcnt_t segspt_minfree = 0; 75 76 static int segspt_create(struct seg *seg, caddr_t argsp); 77 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize); 78 static void segspt_free(struct seg *seg); 79 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len); 80 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr); 81 82 static void 83 segspt_badop() 84 { 85 panic("segspt_badop called"); 86 /*NOTREACHED*/ 87 } 88 89 #define SEGSPT_BADOP(t) (t(*)())segspt_badop 90 91 struct seg_ops segspt_ops = { 92 SEGSPT_BADOP(int), /* dup */ 93 segspt_unmap, 94 segspt_free, 95 SEGSPT_BADOP(int), /* fault */ 96 SEGSPT_BADOP(faultcode_t), /* faulta */ 97 SEGSPT_BADOP(int), /* setprot */ 98 SEGSPT_BADOP(int), /* checkprot */ 99 SEGSPT_BADOP(int), /* kluster */ 100 SEGSPT_BADOP(size_t), /* swapout */ 101 SEGSPT_BADOP(int), /* sync */ 102 SEGSPT_BADOP(size_t), /* incore */ 103 SEGSPT_BADOP(int), /* lockop */ 104 SEGSPT_BADOP(int), /* getprot */ 105 SEGSPT_BADOP(u_offset_t), /* getoffset */ 106 SEGSPT_BADOP(int), /* gettype */ 107 SEGSPT_BADOP(int), /* getvp */ 108 SEGSPT_BADOP(int), /* advise */ 109 SEGSPT_BADOP(void), /* dump */ 110 SEGSPT_BADOP(int), /* pagelock */ 111 SEGSPT_BADOP(int), /* setpgsz */ 112 SEGSPT_BADOP(int), /* getmemid */ 113 segspt_getpolicy, /* getpolicy */ 114 SEGSPT_BADOP(int), /* capable */ 115 }; 116 117 static int segspt_shmdup(struct seg *seg, struct seg *newseg); 118 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize); 119 static void segspt_shmfree(struct seg *seg); 120 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg, 121 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw); 122 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr); 123 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr, 124 register size_t len, register uint_t prot); 125 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, 126 uint_t prot); 127 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta); 128 static size_t segspt_shmswapout(struct seg *seg); 129 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, 130 register char *vec); 131 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len, 132 int attr, uint_t flags); 133 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 134 int attr, int op, ulong_t *lockmap, size_t pos); 135 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, 136 uint_t *protv); 137 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr); 138 static int segspt_shmgettype(struct seg *seg, caddr_t addr); 139 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp); 140 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, 141 uint_t behav); 142 static void segspt_shmdump(struct seg *seg); 143 static int segspt_shmpagelock(struct seg *, caddr_t, size_t, 144 struct page ***, enum lock_type, enum seg_rw); 145 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t); 146 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *); 147 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t); 148 static int segspt_shmcapable(struct seg *, segcapability_t); 149 150 struct seg_ops segspt_shmops = { 151 segspt_shmdup, 152 segspt_shmunmap, 153 segspt_shmfree, 154 segspt_shmfault, 155 segspt_shmfaulta, 156 segspt_shmsetprot, 157 segspt_shmcheckprot, 158 segspt_shmkluster, 159 segspt_shmswapout, 160 segspt_shmsync, 161 segspt_shmincore, 162 segspt_shmlockop, 163 segspt_shmgetprot, 164 segspt_shmgetoffset, 165 segspt_shmgettype, 166 segspt_shmgetvp, 167 segspt_shmadvise, /* advise */ 168 segspt_shmdump, 169 segspt_shmpagelock, 170 segspt_shmsetpgsz, 171 segspt_shmgetmemid, 172 segspt_shmgetpolicy, 173 segspt_shmcapable, 174 }; 175 176 static void segspt_purge(struct seg *seg); 177 static int segspt_reclaim(void *, caddr_t, size_t, struct page **, 178 enum seg_rw, int); 179 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len, 180 page_t **ppa); 181 182 183 184 /*ARGSUSED*/ 185 int 186 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp, 187 uint_t prot, uint_t flags, uint_t share_szc) 188 { 189 int err; 190 struct as *newas; 191 struct segspt_crargs sptcargs; 192 193 #ifdef DEBUG 194 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */, 195 tnf_ulong, size, size ); 196 #endif 197 if (segspt_minfree == 0) /* leave min 5% of availrmem for */ 198 segspt_minfree = availrmem/20; /* for the system */ 199 200 if (!hat_supported(HAT_SHARED_PT, (void *)0)) 201 return (EINVAL); 202 203 /* 204 * get a new as for this shared memory segment 205 */ 206 newas = as_alloc(); 207 newas->a_proc = NULL; 208 sptcargs.amp = amp; 209 sptcargs.prot = prot; 210 sptcargs.flags = flags; 211 sptcargs.szc = share_szc; 212 /* 213 * create a shared page table (spt) segment 214 */ 215 216 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) { 217 as_free(newas); 218 return (err); 219 } 220 *sptseg = sptcargs.seg_spt; 221 return (0); 222 } 223 224 void 225 sptdestroy(struct as *as, struct anon_map *amp) 226 { 227 228 #ifdef DEBUG 229 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */); 230 #endif 231 (void) as_unmap(as, SEGSPTADDR, amp->size); 232 as_free(as); 233 } 234 235 /* 236 * called from seg_free(). 237 * free (i.e., unlock, unmap, return to free list) 238 * all the pages in the given seg. 239 */ 240 void 241 segspt_free(struct seg *seg) 242 { 243 struct spt_data *sptd = (struct spt_data *)seg->s_data; 244 245 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 246 247 if (sptd != NULL) { 248 if (sptd->spt_realsize) 249 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize); 250 251 if (sptd->spt_ppa_lckcnt) 252 kmem_free(sptd->spt_ppa_lckcnt, 253 sizeof (*sptd->spt_ppa_lckcnt) 254 * btopr(sptd->spt_amp->size)); 255 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp)); 256 cv_destroy(&sptd->spt_cv); 257 mutex_destroy(&sptd->spt_lock); 258 kmem_free(sptd, sizeof (*sptd)); 259 } 260 } 261 262 /*ARGSUSED*/ 263 static int 264 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr, 265 uint_t flags) 266 { 267 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 268 269 return (0); 270 } 271 272 /*ARGSUSED*/ 273 static size_t 274 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec) 275 { 276 caddr_t eo_seg; 277 pgcnt_t npages; 278 struct shm_data *shmd = (struct shm_data *)seg->s_data; 279 struct seg *sptseg; 280 struct spt_data *sptd; 281 282 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 283 #ifdef lint 284 seg = seg; 285 #endif 286 sptseg = shmd->shm_sptseg; 287 sptd = sptseg->s_data; 288 289 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 290 eo_seg = addr + len; 291 while (addr < eo_seg) { 292 /* page exists, and it's locked. */ 293 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED | 294 SEG_PAGE_ANON; 295 addr += PAGESIZE; 296 } 297 return (len); 298 } else { 299 struct anon_map *amp = shmd->shm_amp; 300 struct anon *ap; 301 page_t *pp; 302 pgcnt_t anon_index; 303 struct vnode *vp; 304 u_offset_t off; 305 ulong_t i; 306 int ret; 307 anon_sync_obj_t cookie; 308 309 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 310 anon_index = seg_page(seg, addr); 311 npages = btopr(len); 312 if (anon_index + npages > btopr(shmd->shm_amp->size)) { 313 return (EINVAL); 314 } 315 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 316 for (i = 0; i < npages; i++, anon_index++) { 317 ret = 0; 318 anon_array_enter(amp, anon_index, &cookie); 319 ap = anon_get_ptr(amp->ahp, anon_index); 320 if (ap != NULL) { 321 swap_xlate(ap, &vp, &off); 322 anon_array_exit(&cookie); 323 pp = page_lookup_nowait(vp, off, SE_SHARED); 324 if (pp != NULL) { 325 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON; 326 page_unlock(pp); 327 } 328 } else { 329 anon_array_exit(&cookie); 330 } 331 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) { 332 ret |= SEG_PAGE_LOCKED; 333 } 334 *vec++ = (char)ret; 335 } 336 ANON_LOCK_EXIT(&->a_rwlock); 337 return (len); 338 } 339 } 340 341 static int 342 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize) 343 { 344 size_t share_size; 345 346 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 347 348 /* 349 * seg.s_size may have been rounded up to the largest page size 350 * in shmat(). 351 * XXX This should be cleanedup. sptdestroy should take a length 352 * argument which should be the same as sptcreate. Then 353 * this rounding would not be needed (or is done in shm.c) 354 * Only the check for full segment will be needed. 355 * 356 * XXX -- shouldn't raddr == 0 always? These tests don't seem 357 * to be useful at all. 358 */ 359 share_size = page_get_pagesize(seg->s_szc); 360 ssize = P2ROUNDUP(ssize, share_size); 361 362 if (raddr == seg->s_base && ssize == seg->s_size) { 363 seg_free(seg); 364 return (0); 365 } else 366 return (EINVAL); 367 } 368 369 int 370 segspt_create(struct seg *seg, caddr_t argsp) 371 { 372 int err; 373 caddr_t addr = seg->s_base; 374 struct spt_data *sptd; 375 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp; 376 struct anon_map *amp = sptcargs->amp; 377 struct kshmid *sp = amp->a_sp; 378 struct cred *cred = CRED(); 379 ulong_t i, j, anon_index = 0; 380 pgcnt_t npages = btopr(amp->size); 381 struct vnode *vp; 382 page_t **ppa; 383 uint_t hat_flags; 384 size_t pgsz; 385 pgcnt_t pgcnt; 386 caddr_t a; 387 pgcnt_t pidx; 388 size_t sz; 389 proc_t *procp = curproc; 390 rctl_qty_t lockedbytes = 0; 391 kproject_t *proj; 392 393 /* 394 * We are holding the a_lock on the underlying dummy as, 395 * so we can make calls to the HAT layer. 396 */ 397 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 398 ASSERT(sp != NULL); 399 400 #ifdef DEBUG 401 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */, 402 tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size); 403 #endif 404 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 405 if (err = anon_swap_adjust(npages)) 406 return (err); 407 } 408 err = ENOMEM; 409 410 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL) 411 goto out1; 412 413 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 414 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages), 415 KM_NOSLEEP)) == NULL) 416 goto out2; 417 } 418 419 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL); 420 421 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL) 422 goto out3; 423 424 seg->s_ops = &segspt_ops; 425 sptd->spt_vp = vp; 426 sptd->spt_amp = amp; 427 sptd->spt_prot = sptcargs->prot; 428 sptd->spt_flags = sptcargs->flags; 429 seg->s_data = (caddr_t)sptd; 430 sptd->spt_ppa = NULL; 431 sptd->spt_ppa_lckcnt = NULL; 432 seg->s_szc = sptcargs->szc; 433 cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL); 434 sptd->spt_gen = 0; 435 436 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 437 if (seg->s_szc > amp->a_szc) { 438 amp->a_szc = seg->s_szc; 439 } 440 ANON_LOCK_EXIT(&->a_rwlock); 441 442 /* 443 * Set policy to affect initial allocation of pages in 444 * anon_map_createpages() 445 */ 446 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index, 447 NULL, 0, ptob(npages)); 448 449 if (sptcargs->flags & SHM_PAGEABLE) { 450 size_t share_sz; 451 pgcnt_t new_npgs, more_pgs; 452 struct anon_hdr *nahp; 453 zone_t *zone; 454 455 share_sz = page_get_pagesize(seg->s_szc); 456 if (!IS_P2ALIGNED(amp->size, share_sz)) { 457 /* 458 * We are rounding up the size of the anon array 459 * on 4 M boundary because we always create 4 M 460 * of page(s) when locking, faulting pages and we 461 * don't have to check for all corner cases e.g. 462 * if there is enough space to allocate 4 M 463 * page. 464 */ 465 new_npgs = btop(P2ROUNDUP(amp->size, share_sz)); 466 more_pgs = new_npgs - npages; 467 468 /* 469 * The zone will never be NULL, as a fully created 470 * shm always has an owning zone. 471 */ 472 zone = sp->shm_perm.ipc_zone; 473 ASSERT(zone != NULL); 474 if (anon_resv_zone(ptob(more_pgs), zone) == 0) { 475 err = ENOMEM; 476 goto out4; 477 } 478 479 nahp = anon_create(new_npgs, ANON_SLEEP); 480 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 481 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages, 482 ANON_SLEEP); 483 anon_release(amp->ahp, npages); 484 amp->ahp = nahp; 485 ASSERT(amp->swresv == ptob(npages)); 486 amp->swresv = amp->size = ptob(new_npgs); 487 ANON_LOCK_EXIT(&->a_rwlock); 488 npages = new_npgs; 489 } 490 491 sptd->spt_ppa_lckcnt = kmem_zalloc(npages * 492 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP); 493 sptd->spt_pcachecnt = 0; 494 sptd->spt_realsize = ptob(npages); 495 sptcargs->seg_spt = seg; 496 return (0); 497 } 498 499 /* 500 * get array of pages for each anon slot in amp 501 */ 502 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa, 503 seg, addr, S_CREATE, cred)) != 0) 504 goto out4; 505 506 mutex_enter(&sp->shm_mlock); 507 508 /* May be partially locked, so, count bytes to charge for locking */ 509 for (i = 0; i < npages; i++) 510 if (ppa[i]->p_lckcnt == 0) 511 lockedbytes += PAGESIZE; 512 513 proj = sp->shm_perm.ipc_proj; 514 515 if (lockedbytes > 0) { 516 mutex_enter(&procp->p_lock); 517 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) { 518 mutex_exit(&procp->p_lock); 519 mutex_exit(&sp->shm_mlock); 520 for (i = 0; i < npages; i++) 521 page_unlock(ppa[i]); 522 err = ENOMEM; 523 goto out4; 524 } 525 mutex_exit(&procp->p_lock); 526 } 527 528 /* 529 * addr is initial address corresponding to the first page on ppa list 530 */ 531 for (i = 0; i < npages; i++) { 532 /* attempt to lock all pages */ 533 if (page_pp_lock(ppa[i], 0, 1) == 0) { 534 /* 535 * if unable to lock any page, unlock all 536 * of them and return error 537 */ 538 for (j = 0; j < i; j++) 539 page_pp_unlock(ppa[j], 0, 1); 540 for (i = 0; i < npages; i++) 541 page_unlock(ppa[i]); 542 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0); 543 mutex_exit(&sp->shm_mlock); 544 err = ENOMEM; 545 goto out4; 546 } 547 } 548 mutex_exit(&sp->shm_mlock); 549 550 /* 551 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 552 * for the entire life of the segment. For example platforms 553 * that do not support Dynamic Reconfiguration. 554 */ 555 hat_flags = HAT_LOAD_SHARE; 556 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL)) 557 hat_flags |= HAT_LOAD_LOCK; 558 559 /* 560 * Load translations one lare page at a time 561 * to make sure we don't create mappings bigger than 562 * segment's size code in case underlying pages 563 * are shared with segvn's segment that uses bigger 564 * size code than we do. 565 */ 566 pgsz = page_get_pagesize(seg->s_szc); 567 pgcnt = page_get_pagecnt(seg->s_szc); 568 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) { 569 sz = MIN(pgsz, ptob(npages - pidx)); 570 hat_memload_array(seg->s_as->a_hat, a, sz, 571 &ppa[pidx], sptd->spt_prot, hat_flags); 572 } 573 574 /* 575 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 576 * we will leave the pages locked SE_SHARED for the life 577 * of the ISM segment. This will prevent any calls to 578 * hat_pageunload() on this ISM segment for those platforms. 579 */ 580 if (!(hat_flags & HAT_LOAD_LOCK)) { 581 /* 582 * On platforms that support HAT_DYNAMIC_ISM_UNMAP, 583 * we no longer need to hold the SE_SHARED lock on the pages, 584 * since L_PAGELOCK and F_SOFTLOCK calls will grab the 585 * SE_SHARED lock on the pages as necessary. 586 */ 587 for (i = 0; i < npages; i++) 588 page_unlock(ppa[i]); 589 } 590 sptd->spt_pcachecnt = 0; 591 kmem_free(ppa, ((sizeof (page_t *)) * npages)); 592 sptd->spt_realsize = ptob(npages); 593 atomic_add_long(&spt_used, npages); 594 sptcargs->seg_spt = seg; 595 return (0); 596 597 out4: 598 seg->s_data = NULL; 599 kmem_free(vp, sizeof (*vp)); 600 cv_destroy(&sptd->spt_cv); 601 out3: 602 mutex_destroy(&sptd->spt_lock); 603 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 604 kmem_free(ppa, (sizeof (*ppa) * npages)); 605 out2: 606 kmem_free(sptd, sizeof (*sptd)); 607 out1: 608 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 609 anon_swap_restore(npages); 610 return (err); 611 } 612 613 /*ARGSUSED*/ 614 void 615 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len) 616 { 617 struct page *pp; 618 struct spt_data *sptd = (struct spt_data *)seg->s_data; 619 pgcnt_t npages; 620 ulong_t anon_idx; 621 struct anon_map *amp; 622 struct anon *ap; 623 struct vnode *vp; 624 u_offset_t off; 625 uint_t hat_flags; 626 int root = 0; 627 pgcnt_t pgs, curnpgs = 0; 628 page_t *rootpp; 629 rctl_qty_t unlocked_bytes = 0; 630 kproject_t *proj; 631 kshmid_t *sp; 632 633 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 634 635 len = P2ROUNDUP(len, PAGESIZE); 636 637 npages = btop(len); 638 639 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP; 640 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) || 641 (sptd->spt_flags & SHM_PAGEABLE)) { 642 hat_flags = HAT_UNLOAD_UNMAP; 643 } 644 645 hat_unload(seg->s_as->a_hat, addr, len, hat_flags); 646 647 amp = sptd->spt_amp; 648 if (sptd->spt_flags & SHM_PAGEABLE) 649 npages = btop(amp->size); 650 651 ASSERT(amp != NULL); 652 653 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 654 sp = amp->a_sp; 655 proj = sp->shm_perm.ipc_proj; 656 mutex_enter(&sp->shm_mlock); 657 } 658 for (anon_idx = 0; anon_idx < npages; anon_idx++) { 659 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 660 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) { 661 panic("segspt_free_pages: null app"); 662 /*NOTREACHED*/ 663 } 664 } else { 665 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx)) 666 == NULL) 667 continue; 668 } 669 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0); 670 swap_xlate(ap, &vp, &off); 671 672 /* 673 * If this platform supports HAT_DYNAMIC_ISM_UNMAP, 674 * the pages won't be having SE_SHARED lock at this 675 * point. 676 * 677 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 678 * the pages are still held SE_SHARED locked from the 679 * original segspt_create() 680 * 681 * Our goal is to get SE_EXCL lock on each page, remove 682 * permanent lock on it and invalidate the page. 683 */ 684 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 685 if (hat_flags == HAT_UNLOAD_UNMAP) 686 pp = page_lookup(vp, off, SE_EXCL); 687 else { 688 if ((pp = page_find(vp, off)) == NULL) { 689 panic("segspt_free_pages: " 690 "page not locked"); 691 /*NOTREACHED*/ 692 } 693 if (!page_tryupgrade(pp)) { 694 page_unlock(pp); 695 pp = page_lookup(vp, off, SE_EXCL); 696 } 697 } 698 if (pp == NULL) { 699 panic("segspt_free_pages: " 700 "page not in the system"); 701 /*NOTREACHED*/ 702 } 703 ASSERT(pp->p_lckcnt > 0); 704 page_pp_unlock(pp, 0, 1); 705 if (pp->p_lckcnt == 0) 706 unlocked_bytes += PAGESIZE; 707 } else { 708 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL) 709 continue; 710 } 711 /* 712 * It's logical to invalidate the pages here as in most cases 713 * these were created by segspt. 714 */ 715 if (pp->p_szc != 0) { 716 if (root == 0) { 717 ASSERT(curnpgs == 0); 718 root = 1; 719 rootpp = pp; 720 pgs = curnpgs = page_get_pagecnt(pp->p_szc); 721 ASSERT(pgs > 1); 722 ASSERT(IS_P2ALIGNED(pgs, pgs)); 723 ASSERT(!(page_pptonum(pp) & (pgs - 1))); 724 curnpgs--; 725 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) { 726 ASSERT(curnpgs == 1); 727 ASSERT(page_pptonum(pp) == 728 page_pptonum(rootpp) + (pgs - 1)); 729 page_destroy_pages(rootpp); 730 root = 0; 731 curnpgs = 0; 732 } else { 733 ASSERT(curnpgs > 1); 734 ASSERT(page_pptonum(pp) == 735 page_pptonum(rootpp) + (pgs - curnpgs)); 736 curnpgs--; 737 } 738 } else { 739 if (root != 0 || curnpgs != 0) { 740 panic("segspt_free_pages: bad large page"); 741 /*NOTREACHED*/ 742 } 743 /*LINTED: constant in conditional context */ 744 VN_DISPOSE(pp, B_INVAL, 0, kcred); 745 } 746 } 747 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 748 if (unlocked_bytes > 0) 749 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0); 750 mutex_exit(&sp->shm_mlock); 751 } 752 if (root != 0 || curnpgs != 0) { 753 panic("segspt_free_pages: bad large page"); 754 /*NOTREACHED*/ 755 } 756 757 /* 758 * mark that pages have been released 759 */ 760 sptd->spt_realsize = 0; 761 762 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 763 atomic_add_long(&spt_used, -npages); 764 anon_swap_restore(npages); 765 } 766 } 767 768 /* 769 * Get memory allocation policy info for specified address in given segment 770 */ 771 static lgrp_mem_policy_info_t * 772 segspt_getpolicy(struct seg *seg, caddr_t addr) 773 { 774 struct anon_map *amp; 775 ulong_t anon_index; 776 lgrp_mem_policy_info_t *policy_info; 777 struct spt_data *spt_data; 778 779 ASSERT(seg != NULL); 780 781 /* 782 * Get anon_map from segspt 783 * 784 * Assume that no lock needs to be held on anon_map, since 785 * it should be protected by its reference count which must be 786 * nonzero for an existing segment 787 * Need to grab readers lock on policy tree though 788 */ 789 spt_data = (struct spt_data *)seg->s_data; 790 if (spt_data == NULL) 791 return (NULL); 792 amp = spt_data->spt_amp; 793 ASSERT(amp->refcnt != 0); 794 795 /* 796 * Get policy info 797 * 798 * Assume starting anon index of 0 799 */ 800 anon_index = seg_page(seg, addr); 801 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 802 803 return (policy_info); 804 } 805 806 /* 807 * DISM only. 808 * Return locked pages over a given range. 809 * 810 * We will cache all DISM locked pages and save the pplist for the 811 * entire segment in the ppa field of the underlying DISM segment structure. 812 * Later, during a call to segspt_reclaim() we will use this ppa array 813 * to page_unlock() all of the pages and then we will free this ppa list. 814 */ 815 /*ARGSUSED*/ 816 static int 817 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len, 818 struct page ***ppp, enum lock_type type, enum seg_rw rw) 819 { 820 struct shm_data *shmd = (struct shm_data *)seg->s_data; 821 struct seg *sptseg = shmd->shm_sptseg; 822 struct spt_data *sptd = sptseg->s_data; 823 pgcnt_t pg_idx, npages, tot_npages, npgs; 824 struct page **pplist, **pl, **ppa, *pp; 825 struct anon_map *amp; 826 spgcnt_t an_idx; 827 int ret = ENOTSUP; 828 uint_t pl_built = 0; 829 struct anon *ap; 830 struct vnode *vp; 831 u_offset_t off; 832 pgcnt_t claim_availrmem = 0; 833 uint_t szc; 834 835 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 836 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); 837 838 /* 839 * We want to lock/unlock the entire ISM segment. Therefore, 840 * we will be using the underlying sptseg and it's base address 841 * and length for the caching arguments. 842 */ 843 ASSERT(sptseg); 844 ASSERT(sptd); 845 846 pg_idx = seg_page(seg, addr); 847 npages = btopr(len); 848 849 /* 850 * check if the request is larger than number of pages covered 851 * by amp 852 */ 853 if (pg_idx + npages > btopr(sptd->spt_amp->size)) { 854 *ppp = NULL; 855 return (ENOTSUP); 856 } 857 858 if (type == L_PAGEUNLOCK) { 859 ASSERT(sptd->spt_ppa != NULL); 860 861 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size, 862 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim); 863 864 /* 865 * If someone is blocked while unmapping, we purge 866 * segment page cache and thus reclaim pplist synchronously 867 * without waiting for seg_pasync_thread. This speeds up 868 * unmapping in cases where munmap(2) is called, while 869 * raw async i/o is still in progress or where a thread 870 * exits on data fault in a multithreaded application. 871 */ 872 if ((sptd->spt_flags & DISM_PPA_CHANGED) || 873 (AS_ISUNMAPWAIT(seg->s_as) && 874 shmd->shm_softlockcnt > 0)) { 875 segspt_purge(seg); 876 } 877 return (0); 878 } 879 880 /* The L_PAGELOCK case ... */ 881 882 if (sptd->spt_flags & DISM_PPA_CHANGED) { 883 segspt_purge(seg); 884 /* 885 * for DISM ppa needs to be rebuild since 886 * number of locked pages could be changed 887 */ 888 *ppp = NULL; 889 return (ENOTSUP); 890 } 891 892 /* 893 * First try to find pages in segment page cache, without 894 * holding the segment lock. 895 */ 896 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size, 897 S_WRITE, SEGP_FORCE_WIRED); 898 if (pplist != NULL) { 899 ASSERT(sptd->spt_ppa != NULL); 900 ASSERT(sptd->spt_ppa == pplist); 901 ppa = sptd->spt_ppa; 902 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 903 if (ppa[an_idx] == NULL) { 904 seg_pinactive(seg, NULL, seg->s_base, 905 sptd->spt_amp->size, ppa, 906 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim); 907 *ppp = NULL; 908 return (ENOTSUP); 909 } 910 if ((szc = ppa[an_idx]->p_szc) != 0) { 911 npgs = page_get_pagecnt(szc); 912 an_idx = P2ROUNDUP(an_idx + 1, npgs); 913 } else { 914 an_idx++; 915 } 916 } 917 /* 918 * Since we cache the entire DISM segment, we want to 919 * set ppp to point to the first slot that corresponds 920 * to the requested addr, i.e. pg_idx. 921 */ 922 *ppp = &(sptd->spt_ppa[pg_idx]); 923 return (0); 924 } 925 926 mutex_enter(&sptd->spt_lock); 927 /* 928 * try to find pages in segment page cache with mutex 929 */ 930 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size, 931 S_WRITE, SEGP_FORCE_WIRED); 932 if (pplist != NULL) { 933 ASSERT(sptd->spt_ppa != NULL); 934 ASSERT(sptd->spt_ppa == pplist); 935 ppa = sptd->spt_ppa; 936 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 937 if (ppa[an_idx] == NULL) { 938 mutex_exit(&sptd->spt_lock); 939 seg_pinactive(seg, NULL, seg->s_base, 940 sptd->spt_amp->size, ppa, 941 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim); 942 *ppp = NULL; 943 return (ENOTSUP); 944 } 945 if ((szc = ppa[an_idx]->p_szc) != 0) { 946 npgs = page_get_pagecnt(szc); 947 an_idx = P2ROUNDUP(an_idx + 1, npgs); 948 } else { 949 an_idx++; 950 } 951 } 952 /* 953 * Since we cache the entire DISM segment, we want to 954 * set ppp to point to the first slot that corresponds 955 * to the requested addr, i.e. pg_idx. 956 */ 957 mutex_exit(&sptd->spt_lock); 958 *ppp = &(sptd->spt_ppa[pg_idx]); 959 return (0); 960 } 961 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size, 962 SEGP_FORCE_WIRED) == SEGP_FAIL) { 963 mutex_exit(&sptd->spt_lock); 964 *ppp = NULL; 965 return (ENOTSUP); 966 } 967 968 /* 969 * No need to worry about protections because DISM pages are always rw. 970 */ 971 pl = pplist = NULL; 972 amp = sptd->spt_amp; 973 974 /* 975 * Do we need to build the ppa array? 976 */ 977 if (sptd->spt_ppa == NULL) { 978 pgcnt_t lpg_cnt = 0; 979 980 pl_built = 1; 981 tot_npages = btopr(sptd->spt_amp->size); 982 983 ASSERT(sptd->spt_pcachecnt == 0); 984 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP); 985 pl = pplist; 986 987 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 988 for (an_idx = 0; an_idx < tot_npages; ) { 989 ap = anon_get_ptr(amp->ahp, an_idx); 990 /* 991 * Cache only mlocked pages. For large pages 992 * if one (constituent) page is mlocked 993 * all pages for that large page 994 * are cached also. This is for quick 995 * lookups of ppa array; 996 */ 997 if ((ap != NULL) && (lpg_cnt != 0 || 998 (sptd->spt_ppa_lckcnt[an_idx] != 0))) { 999 1000 swap_xlate(ap, &vp, &off); 1001 pp = page_lookup(vp, off, SE_SHARED); 1002 ASSERT(pp != NULL); 1003 if (lpg_cnt == 0) { 1004 lpg_cnt++; 1005 /* 1006 * For a small page, we are done -- 1007 * lpg_count is reset to 0 below. 1008 * 1009 * For a large page, we are guaranteed 1010 * to find the anon structures of all 1011 * constituent pages and a non-zero 1012 * lpg_cnt ensures that we don't test 1013 * for mlock for these. We are done 1014 * when lpg_count reaches (npgs + 1). 1015 * If we are not the first constituent 1016 * page, restart at the first one. 1017 */ 1018 npgs = page_get_pagecnt(pp->p_szc); 1019 if (!IS_P2ALIGNED(an_idx, npgs)) { 1020 an_idx = P2ALIGN(an_idx, npgs); 1021 page_unlock(pp); 1022 continue; 1023 } 1024 } 1025 if (++lpg_cnt > npgs) 1026 lpg_cnt = 0; 1027 1028 /* 1029 * availrmem is decremented only 1030 * for unlocked pages 1031 */ 1032 if (sptd->spt_ppa_lckcnt[an_idx] == 0) 1033 claim_availrmem++; 1034 pplist[an_idx] = pp; 1035 } 1036 an_idx++; 1037 } 1038 ANON_LOCK_EXIT(&->a_rwlock); 1039 1040 if (claim_availrmem) { 1041 mutex_enter(&freemem_lock); 1042 if (availrmem < tune.t_minarmem + claim_availrmem) { 1043 mutex_exit(&freemem_lock); 1044 ret = ENOTSUP; 1045 claim_availrmem = 0; 1046 goto insert_fail; 1047 } else { 1048 availrmem -= claim_availrmem; 1049 } 1050 mutex_exit(&freemem_lock); 1051 } 1052 1053 sptd->spt_ppa = pl; 1054 } else { 1055 /* 1056 * We already have a valid ppa[]. 1057 */ 1058 pl = sptd->spt_ppa; 1059 } 1060 1061 ASSERT(pl != NULL); 1062 1063 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size, 1064 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED, 1065 segspt_reclaim); 1066 if (ret == SEGP_FAIL) { 1067 /* 1068 * seg_pinsert failed. We return 1069 * ENOTSUP, so that the as_pagelock() code will 1070 * then try the slower F_SOFTLOCK path. 1071 */ 1072 if (pl_built) { 1073 /* 1074 * No one else has referenced the ppa[]. 1075 * We created it and we need to destroy it. 1076 */ 1077 sptd->spt_ppa = NULL; 1078 } 1079 ret = ENOTSUP; 1080 goto insert_fail; 1081 } 1082 1083 /* 1084 * In either case, we increment softlockcnt on the 'real' segment. 1085 */ 1086 sptd->spt_pcachecnt++; 1087 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 1088 1089 ppa = sptd->spt_ppa; 1090 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 1091 if (ppa[an_idx] == NULL) { 1092 mutex_exit(&sptd->spt_lock); 1093 seg_pinactive(seg, NULL, seg->s_base, 1094 sptd->spt_amp->size, 1095 pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim); 1096 *ppp = NULL; 1097 return (ENOTSUP); 1098 } 1099 if ((szc = ppa[an_idx]->p_szc) != 0) { 1100 npgs = page_get_pagecnt(szc); 1101 an_idx = P2ROUNDUP(an_idx + 1, npgs); 1102 } else { 1103 an_idx++; 1104 } 1105 } 1106 /* 1107 * We can now drop the sptd->spt_lock since the ppa[] 1108 * exists and he have incremented pacachecnt. 1109 */ 1110 mutex_exit(&sptd->spt_lock); 1111 1112 /* 1113 * Since we cache the entire segment, we want to 1114 * set ppp to point to the first slot that corresponds 1115 * to the requested addr, i.e. pg_idx. 1116 */ 1117 *ppp = &(sptd->spt_ppa[pg_idx]); 1118 return (0); 1119 1120 insert_fail: 1121 /* 1122 * We will only reach this code if we tried and failed. 1123 * 1124 * And we can drop the lock on the dummy seg, once we've failed 1125 * to set up a new ppa[]. 1126 */ 1127 mutex_exit(&sptd->spt_lock); 1128 1129 if (pl_built) { 1130 if (claim_availrmem) { 1131 mutex_enter(&freemem_lock); 1132 availrmem += claim_availrmem; 1133 mutex_exit(&freemem_lock); 1134 } 1135 1136 /* 1137 * We created pl and we need to destroy it. 1138 */ 1139 pplist = pl; 1140 for (an_idx = 0; an_idx < tot_npages; an_idx++) { 1141 if (pplist[an_idx] != NULL) 1142 page_unlock(pplist[an_idx]); 1143 } 1144 kmem_free(pl, sizeof (page_t *) * tot_npages); 1145 } 1146 1147 if (shmd->shm_softlockcnt <= 0) { 1148 if (AS_ISUNMAPWAIT(seg->s_as)) { 1149 mutex_enter(&seg->s_as->a_contents); 1150 if (AS_ISUNMAPWAIT(seg->s_as)) { 1151 AS_CLRUNMAPWAIT(seg->s_as); 1152 cv_broadcast(&seg->s_as->a_cv); 1153 } 1154 mutex_exit(&seg->s_as->a_contents); 1155 } 1156 } 1157 *ppp = NULL; 1158 return (ret); 1159 } 1160 1161 1162 1163 /* 1164 * return locked pages over a given range. 1165 * 1166 * We will cache the entire ISM segment and save the pplist for the 1167 * entire segment in the ppa field of the underlying ISM segment structure. 1168 * Later, during a call to segspt_reclaim() we will use this ppa array 1169 * to page_unlock() all of the pages and then we will free this ppa list. 1170 */ 1171 /*ARGSUSED*/ 1172 static int 1173 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len, 1174 struct page ***ppp, enum lock_type type, enum seg_rw rw) 1175 { 1176 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1177 struct seg *sptseg = shmd->shm_sptseg; 1178 struct spt_data *sptd = sptseg->s_data; 1179 pgcnt_t np, page_index, npages; 1180 caddr_t a, spt_base; 1181 struct page **pplist, **pl, *pp; 1182 struct anon_map *amp; 1183 ulong_t anon_index; 1184 int ret = ENOTSUP; 1185 uint_t pl_built = 0; 1186 struct anon *ap; 1187 struct vnode *vp; 1188 u_offset_t off; 1189 1190 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1191 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); 1192 1193 1194 /* 1195 * We want to lock/unlock the entire ISM segment. Therefore, 1196 * we will be using the underlying sptseg and it's base address 1197 * and length for the caching arguments. 1198 */ 1199 ASSERT(sptseg); 1200 ASSERT(sptd); 1201 1202 if (sptd->spt_flags & SHM_PAGEABLE) { 1203 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw)); 1204 } 1205 1206 page_index = seg_page(seg, addr); 1207 npages = btopr(len); 1208 1209 /* 1210 * check if the request is larger than number of pages covered 1211 * by amp 1212 */ 1213 if (page_index + npages > btopr(sptd->spt_amp->size)) { 1214 *ppp = NULL; 1215 return (ENOTSUP); 1216 } 1217 1218 if (type == L_PAGEUNLOCK) { 1219 1220 ASSERT(sptd->spt_ppa != NULL); 1221 1222 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size, 1223 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim); 1224 1225 /* 1226 * If someone is blocked while unmapping, we purge 1227 * segment page cache and thus reclaim pplist synchronously 1228 * without waiting for seg_pasync_thread. This speeds up 1229 * unmapping in cases where munmap(2) is called, while 1230 * raw async i/o is still in progress or where a thread 1231 * exits on data fault in a multithreaded application. 1232 */ 1233 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 1234 segspt_purge(seg); 1235 } 1236 return (0); 1237 } 1238 1239 /* The L_PAGELOCK case... */ 1240 1241 /* 1242 * First try to find pages in segment page cache, without 1243 * holding the segment lock. 1244 */ 1245 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size, 1246 S_WRITE, SEGP_FORCE_WIRED); 1247 if (pplist != NULL) { 1248 ASSERT(sptd->spt_ppa == pplist); 1249 ASSERT(sptd->spt_ppa[page_index]); 1250 /* 1251 * Since we cache the entire ISM segment, we want to 1252 * set ppp to point to the first slot that corresponds 1253 * to the requested addr, i.e. page_index. 1254 */ 1255 *ppp = &(sptd->spt_ppa[page_index]); 1256 return (0); 1257 } 1258 1259 mutex_enter(&sptd->spt_lock); 1260 1261 /* 1262 * try to find pages in segment page cache 1263 */ 1264 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size, 1265 S_WRITE, SEGP_FORCE_WIRED); 1266 if (pplist != NULL) { 1267 ASSERT(sptd->spt_ppa == pplist); 1268 /* 1269 * Since we cache the entire segment, we want to 1270 * set ppp to point to the first slot that corresponds 1271 * to the requested addr, i.e. page_index. 1272 */ 1273 mutex_exit(&sptd->spt_lock); 1274 *ppp = &(sptd->spt_ppa[page_index]); 1275 return (0); 1276 } 1277 1278 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size, 1279 SEGP_FORCE_WIRED) == SEGP_FAIL) { 1280 mutex_exit(&sptd->spt_lock); 1281 *ppp = NULL; 1282 return (ENOTSUP); 1283 } 1284 1285 /* 1286 * No need to worry about protections because ISM pages 1287 * are always rw. 1288 */ 1289 pl = pplist = NULL; 1290 1291 /* 1292 * Do we need to build the ppa array? 1293 */ 1294 if (sptd->spt_ppa == NULL) { 1295 ASSERT(sptd->spt_ppa == pplist); 1296 1297 spt_base = sptseg->s_base; 1298 pl_built = 1; 1299 1300 /* 1301 * availrmem is decremented once during anon_swap_adjust() 1302 * and is incremented during the anon_unresv(), which is 1303 * called from shm_rm_amp() when the segment is destroyed. 1304 */ 1305 amp = sptd->spt_amp; 1306 ASSERT(amp != NULL); 1307 1308 /* pcachecnt is protected by sptd->spt_lock */ 1309 ASSERT(sptd->spt_pcachecnt == 0); 1310 pplist = kmem_zalloc(sizeof (page_t *) 1311 * btopr(sptd->spt_amp->size), KM_SLEEP); 1312 pl = pplist; 1313 1314 anon_index = seg_page(sptseg, spt_base); 1315 1316 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1317 for (a = spt_base; a < (spt_base + sptd->spt_amp->size); 1318 a += PAGESIZE, anon_index++, pplist++) { 1319 ap = anon_get_ptr(amp->ahp, anon_index); 1320 ASSERT(ap != NULL); 1321 swap_xlate(ap, &vp, &off); 1322 pp = page_lookup(vp, off, SE_SHARED); 1323 ASSERT(pp != NULL); 1324 *pplist = pp; 1325 } 1326 ANON_LOCK_EXIT(&->a_rwlock); 1327 1328 if (a < (spt_base + sptd->spt_amp->size)) { 1329 ret = ENOTSUP; 1330 goto insert_fail; 1331 } 1332 sptd->spt_ppa = pl; 1333 } else { 1334 /* 1335 * We already have a valid ppa[]. 1336 */ 1337 pl = sptd->spt_ppa; 1338 } 1339 1340 ASSERT(pl != NULL); 1341 1342 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size, 1343 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED, 1344 segspt_reclaim); 1345 if (ret == SEGP_FAIL) { 1346 /* 1347 * seg_pinsert failed. We return 1348 * ENOTSUP, so that the as_pagelock() code will 1349 * then try the slower F_SOFTLOCK path. 1350 */ 1351 if (pl_built) { 1352 /* 1353 * No one else has referenced the ppa[]. 1354 * We created it and we need to destroy it. 1355 */ 1356 sptd->spt_ppa = NULL; 1357 } 1358 ret = ENOTSUP; 1359 goto insert_fail; 1360 } 1361 1362 /* 1363 * In either case, we increment softlockcnt on the 'real' segment. 1364 */ 1365 sptd->spt_pcachecnt++; 1366 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 1367 1368 /* 1369 * We can now drop the sptd->spt_lock since the ppa[] 1370 * exists and he have incremented pacachecnt. 1371 */ 1372 mutex_exit(&sptd->spt_lock); 1373 1374 /* 1375 * Since we cache the entire segment, we want to 1376 * set ppp to point to the first slot that corresponds 1377 * to the requested addr, i.e. page_index. 1378 */ 1379 *ppp = &(sptd->spt_ppa[page_index]); 1380 return (0); 1381 1382 insert_fail: 1383 /* 1384 * We will only reach this code if we tried and failed. 1385 * 1386 * And we can drop the lock on the dummy seg, once we've failed 1387 * to set up a new ppa[]. 1388 */ 1389 mutex_exit(&sptd->spt_lock); 1390 1391 if (pl_built) { 1392 /* 1393 * We created pl and we need to destroy it. 1394 */ 1395 pplist = pl; 1396 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT); 1397 while (np) { 1398 page_unlock(*pplist); 1399 np--; 1400 pplist++; 1401 } 1402 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size)); 1403 } 1404 if (shmd->shm_softlockcnt <= 0) { 1405 if (AS_ISUNMAPWAIT(seg->s_as)) { 1406 mutex_enter(&seg->s_as->a_contents); 1407 if (AS_ISUNMAPWAIT(seg->s_as)) { 1408 AS_CLRUNMAPWAIT(seg->s_as); 1409 cv_broadcast(&seg->s_as->a_cv); 1410 } 1411 mutex_exit(&seg->s_as->a_contents); 1412 } 1413 } 1414 *ppp = NULL; 1415 return (ret); 1416 } 1417 1418 /* 1419 * purge any cached pages in the I/O page cache 1420 */ 1421 static void 1422 segspt_purge(struct seg *seg) 1423 { 1424 seg_ppurge(seg, NULL, SEGP_FORCE_WIRED); 1425 } 1426 1427 static int 1428 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 1429 enum seg_rw rw, int async) 1430 { 1431 struct seg *seg = (struct seg *)ptag; 1432 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1433 struct seg *sptseg; 1434 struct spt_data *sptd; 1435 pgcnt_t npages, i, free_availrmem = 0; 1436 int done = 0; 1437 1438 #ifdef lint 1439 addr = addr; 1440 #endif 1441 sptseg = shmd->shm_sptseg; 1442 sptd = sptseg->s_data; 1443 npages = (len >> PAGESHIFT); 1444 ASSERT(npages); 1445 ASSERT(sptd->spt_pcachecnt != 0); 1446 ASSERT(sptd->spt_ppa == pplist); 1447 ASSERT(npages == btopr(sptd->spt_amp->size)); 1448 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1449 1450 /* 1451 * Acquire the lock on the dummy seg and destroy the 1452 * ppa array IF this is the last pcachecnt. 1453 */ 1454 mutex_enter(&sptd->spt_lock); 1455 if (--sptd->spt_pcachecnt == 0) { 1456 for (i = 0; i < npages; i++) { 1457 if (pplist[i] == NULL) { 1458 continue; 1459 } 1460 if (rw == S_WRITE) { 1461 hat_setrefmod(pplist[i]); 1462 } else { 1463 hat_setref(pplist[i]); 1464 } 1465 if ((sptd->spt_flags & SHM_PAGEABLE) && 1466 (sptd->spt_ppa_lckcnt[i] == 0)) 1467 free_availrmem++; 1468 page_unlock(pplist[i]); 1469 } 1470 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) { 1471 mutex_enter(&freemem_lock); 1472 availrmem += free_availrmem; 1473 mutex_exit(&freemem_lock); 1474 } 1475 /* 1476 * Since we want to cach/uncache the entire ISM segment, 1477 * we will track the pplist in a segspt specific field 1478 * ppa, that is initialized at the time we add an entry to 1479 * the cache. 1480 */ 1481 ASSERT(sptd->spt_pcachecnt == 0); 1482 kmem_free(pplist, sizeof (page_t *) * npages); 1483 sptd->spt_ppa = NULL; 1484 sptd->spt_flags &= ~DISM_PPA_CHANGED; 1485 sptd->spt_gen++; 1486 cv_broadcast(&sptd->spt_cv); 1487 done = 1; 1488 } 1489 mutex_exit(&sptd->spt_lock); 1490 1491 /* 1492 * If we are pcache async thread or called via seg_ppurge_wiredpp() we 1493 * may not hold AS lock (in this case async argument is not 0). This 1494 * means if softlockcnt drops to 0 after the decrement below address 1495 * space may get freed. We can't allow it since after softlock 1496 * derement to 0 we still need to access as structure for possible 1497 * wakeup of unmap waiters. To prevent the disappearance of as we take 1498 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes 1499 * this mutex as a barrier to make sure this routine completes before 1500 * segment is freed. 1501 * 1502 * The second complication we have to deal with in async case is a 1503 * possibility of missed wake up of unmap wait thread. When we don't 1504 * hold as lock here we may take a_contents lock before unmap wait 1505 * thread that was first to see softlockcnt was still not 0. As a 1506 * result we'll fail to wake up an unmap wait thread. To avoid this 1507 * race we set nounmapwait flag in as structure if we drop softlockcnt 1508 * to 0 if async is not 0. unmapwait thread 1509 * will not block if this flag is set. 1510 */ 1511 if (async) 1512 mutex_enter(&shmd->shm_segfree_syncmtx); 1513 1514 /* 1515 * Now decrement softlockcnt. 1516 */ 1517 ASSERT(shmd->shm_softlockcnt > 0); 1518 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1); 1519 1520 if (shmd->shm_softlockcnt <= 0) { 1521 if (async || AS_ISUNMAPWAIT(seg->s_as)) { 1522 mutex_enter(&seg->s_as->a_contents); 1523 if (async) 1524 AS_SETNOUNMAPWAIT(seg->s_as); 1525 if (AS_ISUNMAPWAIT(seg->s_as)) { 1526 AS_CLRUNMAPWAIT(seg->s_as); 1527 cv_broadcast(&seg->s_as->a_cv); 1528 } 1529 mutex_exit(&seg->s_as->a_contents); 1530 } 1531 } 1532 1533 if (async) 1534 mutex_exit(&shmd->shm_segfree_syncmtx); 1535 1536 return (done); 1537 } 1538 1539 /* 1540 * Do a F_SOFTUNLOCK call over the range requested. 1541 * The range must have already been F_SOFTLOCK'ed. 1542 * 1543 * The calls to acquire and release the anon map lock mutex were 1544 * removed in order to avoid a deadly embrace during a DR 1545 * memory delete operation. (Eg. DR blocks while waiting for a 1546 * exclusive lock on a page that is being used for kaio; the 1547 * thread that will complete the kaio and call segspt_softunlock 1548 * blocks on the anon map lock; another thread holding the anon 1549 * map lock blocks on another page lock via the segspt_shmfault 1550 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.) 1551 * 1552 * The appropriateness of the removal is based upon the following: 1553 * 1. If we are holding a segment's reader lock and the page is held 1554 * shared, then the corresponding element in anonmap which points to 1555 * anon struct cannot change and there is no need to acquire the 1556 * anonymous map lock. 1557 * 2. Threads in segspt_softunlock have a reader lock on the segment 1558 * and already have the shared page lock, so we are guaranteed that 1559 * the anon map slot cannot change and therefore can call anon_get_ptr() 1560 * without grabbing the anonymous map lock. 1561 * 3. Threads that softlock a shared page break copy-on-write, even if 1562 * its a read. Thus cow faults can be ignored with respect to soft 1563 * unlocking, since the breaking of cow means that the anon slot(s) will 1564 * not be shared. 1565 */ 1566 static void 1567 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr, 1568 size_t len, enum seg_rw rw) 1569 { 1570 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1571 struct seg *sptseg; 1572 struct spt_data *sptd; 1573 page_t *pp; 1574 caddr_t adr; 1575 struct vnode *vp; 1576 u_offset_t offset; 1577 ulong_t anon_index; 1578 struct anon_map *amp; /* XXX - for locknest */ 1579 struct anon *ap = NULL; 1580 pgcnt_t npages; 1581 1582 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1583 1584 sptseg = shmd->shm_sptseg; 1585 sptd = sptseg->s_data; 1586 1587 /* 1588 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 1589 * and therefore their pages are SE_SHARED locked 1590 * for the entire life of the segment. 1591 */ 1592 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) && 1593 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) { 1594 goto softlock_decrement; 1595 } 1596 1597 /* 1598 * Any thread is free to do a page_find and 1599 * page_unlock() on the pages within this seg. 1600 * 1601 * We are already holding the as->a_lock on the user's 1602 * real segment, but we need to hold the a_lock on the 1603 * underlying dummy as. This is mostly to satisfy the 1604 * underlying HAT layer. 1605 */ 1606 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1607 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len); 1608 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1609 1610 amp = sptd->spt_amp; 1611 ASSERT(amp != NULL); 1612 anon_index = seg_page(sptseg, sptseg_addr); 1613 1614 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) { 1615 ap = anon_get_ptr(amp->ahp, anon_index++); 1616 ASSERT(ap != NULL); 1617 swap_xlate(ap, &vp, &offset); 1618 1619 /* 1620 * Use page_find() instead of page_lookup() to 1621 * find the page since we know that it has a 1622 * "shared" lock. 1623 */ 1624 pp = page_find(vp, offset); 1625 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1)); 1626 if (pp == NULL) { 1627 panic("segspt_softunlock: " 1628 "addr %p, ap %p, vp %p, off %llx", 1629 (void *)adr, (void *)ap, (void *)vp, offset); 1630 /*NOTREACHED*/ 1631 } 1632 1633 if (rw == S_WRITE) { 1634 hat_setrefmod(pp); 1635 } else if (rw != S_OTHER) { 1636 hat_setref(pp); 1637 } 1638 page_unlock(pp); 1639 } 1640 1641 softlock_decrement: 1642 npages = btopr(len); 1643 ASSERT(shmd->shm_softlockcnt >= npages); 1644 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages); 1645 if (shmd->shm_softlockcnt == 0) { 1646 /* 1647 * All SOFTLOCKS are gone. Wakeup any waiting 1648 * unmappers so they can try again to unmap. 1649 * Check for waiters first without the mutex 1650 * held so we don't always grab the mutex on 1651 * softunlocks. 1652 */ 1653 if (AS_ISUNMAPWAIT(seg->s_as)) { 1654 mutex_enter(&seg->s_as->a_contents); 1655 if (AS_ISUNMAPWAIT(seg->s_as)) { 1656 AS_CLRUNMAPWAIT(seg->s_as); 1657 cv_broadcast(&seg->s_as->a_cv); 1658 } 1659 mutex_exit(&seg->s_as->a_contents); 1660 } 1661 } 1662 } 1663 1664 int 1665 segspt_shmattach(struct seg *seg, caddr_t *argsp) 1666 { 1667 struct shm_data *shmd_arg = (struct shm_data *)argsp; 1668 struct shm_data *shmd; 1669 struct anon_map *shm_amp = shmd_arg->shm_amp; 1670 struct spt_data *sptd; 1671 int error = 0; 1672 1673 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1674 1675 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP); 1676 if (shmd == NULL) 1677 return (ENOMEM); 1678 1679 shmd->shm_sptas = shmd_arg->shm_sptas; 1680 shmd->shm_amp = shm_amp; 1681 shmd->shm_sptseg = shmd_arg->shm_sptseg; 1682 1683 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0, 1684 NULL, 0, seg->s_size); 1685 1686 mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL); 1687 1688 seg->s_data = (void *)shmd; 1689 seg->s_ops = &segspt_shmops; 1690 seg->s_szc = shmd->shm_sptseg->s_szc; 1691 sptd = shmd->shm_sptseg->s_data; 1692 1693 if (sptd->spt_flags & SHM_PAGEABLE) { 1694 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size), 1695 KM_NOSLEEP)) == NULL) { 1696 seg->s_data = (void *)NULL; 1697 kmem_free(shmd, (sizeof (*shmd))); 1698 return (ENOMEM); 1699 } 1700 shmd->shm_lckpgs = 0; 1701 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 1702 if ((error = hat_share(seg->s_as->a_hat, seg->s_base, 1703 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1704 seg->s_size, seg->s_szc)) != 0) { 1705 kmem_free(shmd->shm_vpage, 1706 btopr(shm_amp->size)); 1707 } 1708 } 1709 } else { 1710 error = hat_share(seg->s_as->a_hat, seg->s_base, 1711 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1712 seg->s_size, seg->s_szc); 1713 } 1714 if (error) { 1715 seg->s_szc = 0; 1716 seg->s_data = (void *)NULL; 1717 kmem_free(shmd, (sizeof (*shmd))); 1718 } else { 1719 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1720 shm_amp->refcnt++; 1721 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1722 } 1723 return (error); 1724 } 1725 1726 int 1727 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize) 1728 { 1729 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1730 int reclaim = 1; 1731 1732 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1733 retry: 1734 if (shmd->shm_softlockcnt > 0) { 1735 if (reclaim == 1) { 1736 segspt_purge(seg); 1737 reclaim = 0; 1738 goto retry; 1739 } 1740 return (EAGAIN); 1741 } 1742 1743 if (ssize != seg->s_size) { 1744 #ifdef DEBUG 1745 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n", 1746 ssize, seg->s_size); 1747 #endif 1748 return (EINVAL); 1749 } 1750 1751 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK, 1752 NULL, 0); 1753 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc); 1754 1755 seg_free(seg); 1756 1757 return (0); 1758 } 1759 1760 void 1761 segspt_shmfree(struct seg *seg) 1762 { 1763 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1764 struct anon_map *shm_amp = shmd->shm_amp; 1765 1766 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1767 1768 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0, 1769 MC_UNLOCK, NULL, 0); 1770 1771 /* 1772 * Need to increment refcnt when attaching 1773 * and decrement when detaching because of dup(). 1774 */ 1775 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1776 shm_amp->refcnt--; 1777 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1778 1779 if (shmd->shm_vpage) { /* only for DISM */ 1780 kmem_free(shmd->shm_vpage, btopr(shm_amp->size)); 1781 shmd->shm_vpage = NULL; 1782 } 1783 1784 /* 1785 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's 1786 * still working with this segment without holding as lock. 1787 */ 1788 ASSERT(shmd->shm_softlockcnt == 0); 1789 mutex_enter(&shmd->shm_segfree_syncmtx); 1790 mutex_destroy(&shmd->shm_segfree_syncmtx); 1791 1792 kmem_free(shmd, sizeof (*shmd)); 1793 } 1794 1795 /*ARGSUSED*/ 1796 int 1797 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 1798 { 1799 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1800 1801 /* 1802 * Shared page table is more than shared mapping. 1803 * Individual process sharing page tables can't change prot 1804 * because there is only one set of page tables. 1805 * This will be allowed after private page table is 1806 * supported. 1807 */ 1808 /* need to return correct status error? */ 1809 return (0); 1810 } 1811 1812 1813 faultcode_t 1814 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr, 1815 size_t len, enum fault_type type, enum seg_rw rw) 1816 { 1817 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1818 struct seg *sptseg = shmd->shm_sptseg; 1819 struct as *curspt = shmd->shm_sptas; 1820 struct spt_data *sptd = sptseg->s_data; 1821 pgcnt_t npages; 1822 size_t size; 1823 caddr_t segspt_addr, shm_addr; 1824 page_t **ppa; 1825 int i; 1826 ulong_t an_idx = 0; 1827 int err = 0; 1828 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0); 1829 size_t pgsz; 1830 pgcnt_t pgcnt; 1831 caddr_t a; 1832 pgcnt_t pidx; 1833 1834 #ifdef lint 1835 hat = hat; 1836 #endif 1837 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1838 1839 /* 1840 * Because of the way spt is implemented 1841 * the realsize of the segment does not have to be 1842 * equal to the segment size itself. The segment size is 1843 * often in multiples of a page size larger than PAGESIZE. 1844 * The realsize is rounded up to the nearest PAGESIZE 1845 * based on what the user requested. This is a bit of 1846 * ungliness that is historical but not easily fixed 1847 * without re-designing the higher levels of ISM. 1848 */ 1849 ASSERT(addr >= seg->s_base); 1850 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 1851 return (FC_NOMAP); 1852 /* 1853 * For all of the following cases except F_PROT, we need to 1854 * make any necessary adjustments to addr and len 1855 * and get all of the necessary page_t's into an array called ppa[]. 1856 * 1857 * The code in shmat() forces base addr and len of ISM segment 1858 * to be aligned to largest page size supported. Therefore, 1859 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 1860 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 1861 * in large pagesize chunks, or else we will screw up the HAT 1862 * layer by calling hat_memload_array() with differing page sizes 1863 * over a given virtual range. 1864 */ 1865 pgsz = page_get_pagesize(sptseg->s_szc); 1866 pgcnt = page_get_pagecnt(sptseg->s_szc); 1867 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); 1868 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz); 1869 npages = btopr(size); 1870 1871 /* 1872 * Now we need to convert from addr in segshm to addr in segspt. 1873 */ 1874 an_idx = seg_page(seg, shm_addr); 1875 segspt_addr = sptseg->s_base + ptob(an_idx); 1876 1877 ASSERT((segspt_addr + ptob(npages)) <= 1878 (sptseg->s_base + sptd->spt_realsize)); 1879 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size)); 1880 1881 switch (type) { 1882 1883 case F_SOFTLOCK: 1884 1885 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 1886 /* 1887 * Fall through to the F_INVAL case to load up the hat layer 1888 * entries with the HAT_LOAD_LOCK flag. 1889 */ 1890 /* FALLTHRU */ 1891 case F_INVAL: 1892 1893 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 1894 return (FC_NOMAP); 1895 1896 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP); 1897 1898 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa); 1899 if (err != 0) { 1900 if (type == F_SOFTLOCK) { 1901 atomic_add_long((ulong_t *)( 1902 &(shmd->shm_softlockcnt)), -npages); 1903 } 1904 goto dism_err; 1905 } 1906 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1907 a = segspt_addr; 1908 pidx = 0; 1909 if (type == F_SOFTLOCK) { 1910 1911 /* 1912 * Load up the translation keeping it 1913 * locked and don't unlock the page. 1914 */ 1915 for (; pidx < npages; a += pgsz, pidx += pgcnt) { 1916 hat_memload_array(sptseg->s_as->a_hat, 1917 a, pgsz, &ppa[pidx], sptd->spt_prot, 1918 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 1919 } 1920 } else { 1921 if (hat == seg->s_as->a_hat) { 1922 1923 /* 1924 * Migrate pages marked for migration 1925 */ 1926 if (lgrp_optimizations()) 1927 page_migrate(seg, shm_addr, ppa, 1928 npages); 1929 1930 /* CPU HAT */ 1931 for (; pidx < npages; 1932 a += pgsz, pidx += pgcnt) { 1933 hat_memload_array(sptseg->s_as->a_hat, 1934 a, pgsz, &ppa[pidx], 1935 sptd->spt_prot, 1936 HAT_LOAD_SHARE); 1937 } 1938 } else { 1939 /* XHAT. Pass real address */ 1940 hat_memload_array(hat, shm_addr, 1941 size, ppa, sptd->spt_prot, HAT_LOAD_SHARE); 1942 } 1943 1944 /* 1945 * And now drop the SE_SHARED lock(s). 1946 */ 1947 if (dyn_ism_unmap) { 1948 for (i = 0; i < npages; i++) { 1949 page_unlock(ppa[i]); 1950 } 1951 } 1952 } 1953 1954 if (!dyn_ism_unmap) { 1955 if (hat_share(seg->s_as->a_hat, shm_addr, 1956 curspt->a_hat, segspt_addr, ptob(npages), 1957 seg->s_szc) != 0) { 1958 panic("hat_share err in DISM fault"); 1959 /* NOTREACHED */ 1960 } 1961 if (type == F_INVAL) { 1962 for (i = 0; i < npages; i++) { 1963 page_unlock(ppa[i]); 1964 } 1965 } 1966 } 1967 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1968 dism_err: 1969 kmem_free(ppa, npages * sizeof (page_t *)); 1970 return (err); 1971 1972 case F_SOFTUNLOCK: 1973 1974 /* 1975 * This is a bit ugly, we pass in the real seg pointer, 1976 * but the segspt_addr is the virtual address within the 1977 * dummy seg. 1978 */ 1979 segspt_softunlock(seg, segspt_addr, size, rw); 1980 return (0); 1981 1982 case F_PROT: 1983 1984 /* 1985 * This takes care of the unusual case where a user 1986 * allocates a stack in shared memory and a register 1987 * window overflow is written to that stack page before 1988 * it is otherwise modified. 1989 * 1990 * We can get away with this because ISM segments are 1991 * always rw. Other than this unusual case, there 1992 * should be no instances of protection violations. 1993 */ 1994 return (0); 1995 1996 default: 1997 #ifdef DEBUG 1998 panic("segspt_dismfault default type?"); 1999 #else 2000 return (FC_NOMAP); 2001 #endif 2002 } 2003 } 2004 2005 2006 faultcode_t 2007 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr, 2008 size_t len, enum fault_type type, enum seg_rw rw) 2009 { 2010 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2011 struct seg *sptseg = shmd->shm_sptseg; 2012 struct as *curspt = shmd->shm_sptas; 2013 struct spt_data *sptd = sptseg->s_data; 2014 pgcnt_t npages; 2015 size_t size; 2016 caddr_t sptseg_addr, shm_addr; 2017 page_t *pp, **ppa; 2018 int i; 2019 u_offset_t offset; 2020 ulong_t anon_index = 0; 2021 struct vnode *vp; 2022 struct anon_map *amp; /* XXX - for locknest */ 2023 struct anon *ap = NULL; 2024 size_t pgsz; 2025 pgcnt_t pgcnt; 2026 caddr_t a; 2027 pgcnt_t pidx; 2028 size_t sz; 2029 2030 #ifdef lint 2031 hat = hat; 2032 #endif 2033 2034 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2035 2036 if (sptd->spt_flags & SHM_PAGEABLE) { 2037 return (segspt_dismfault(hat, seg, addr, len, type, rw)); 2038 } 2039 2040 /* 2041 * Because of the way spt is implemented 2042 * the realsize of the segment does not have to be 2043 * equal to the segment size itself. The segment size is 2044 * often in multiples of a page size larger than PAGESIZE. 2045 * The realsize is rounded up to the nearest PAGESIZE 2046 * based on what the user requested. This is a bit of 2047 * ungliness that is historical but not easily fixed 2048 * without re-designing the higher levels of ISM. 2049 */ 2050 ASSERT(addr >= seg->s_base); 2051 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 2052 return (FC_NOMAP); 2053 /* 2054 * For all of the following cases except F_PROT, we need to 2055 * make any necessary adjustments to addr and len 2056 * and get all of the necessary page_t's into an array called ppa[]. 2057 * 2058 * The code in shmat() forces base addr and len of ISM segment 2059 * to be aligned to largest page size supported. Therefore, 2060 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 2061 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 2062 * in large pagesize chunks, or else we will screw up the HAT 2063 * layer by calling hat_memload_array() with differing page sizes 2064 * over a given virtual range. 2065 */ 2066 pgsz = page_get_pagesize(sptseg->s_szc); 2067 pgcnt = page_get_pagecnt(sptseg->s_szc); 2068 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); 2069 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz); 2070 npages = btopr(size); 2071 2072 /* 2073 * Now we need to convert from addr in segshm to addr in segspt. 2074 */ 2075 anon_index = seg_page(seg, shm_addr); 2076 sptseg_addr = sptseg->s_base + ptob(anon_index); 2077 2078 /* 2079 * And now we may have to adjust npages downward if we have 2080 * exceeded the realsize of the segment or initial anon 2081 * allocations. 2082 */ 2083 if ((sptseg_addr + ptob(npages)) > 2084 (sptseg->s_base + sptd->spt_realsize)) 2085 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr; 2086 2087 npages = btopr(size); 2088 2089 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size)); 2090 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0); 2091 2092 switch (type) { 2093 2094 case F_SOFTLOCK: 2095 2096 /* 2097 * availrmem is decremented once during anon_swap_adjust() 2098 * and is incremented during the anon_unresv(), which is 2099 * called from shm_rm_amp() when the segment is destroyed. 2100 */ 2101 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 2102 /* 2103 * Some platforms assume that ISM pages are SE_SHARED 2104 * locked for the entire life of the segment. 2105 */ 2106 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) 2107 return (0); 2108 /* 2109 * Fall through to the F_INVAL case to load up the hat layer 2110 * entries with the HAT_LOAD_LOCK flag. 2111 */ 2112 2113 /* FALLTHRU */ 2114 case F_INVAL: 2115 2116 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 2117 return (FC_NOMAP); 2118 2119 /* 2120 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP 2121 * may still rely on this call to hat_share(). That 2122 * would imply that those hat's can fault on a 2123 * HAT_LOAD_LOCK translation, which would seem 2124 * contradictory. 2125 */ 2126 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 2127 if (hat_share(seg->s_as->a_hat, seg->s_base, 2128 curspt->a_hat, sptseg->s_base, 2129 sptseg->s_size, sptseg->s_szc) != 0) { 2130 panic("hat_share error in ISM fault"); 2131 /*NOTREACHED*/ 2132 } 2133 return (0); 2134 } 2135 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP); 2136 2137 /* 2138 * I see no need to lock the real seg, 2139 * here, because all of our work will be on the underlying 2140 * dummy seg. 2141 * 2142 * sptseg_addr and npages now account for large pages. 2143 */ 2144 amp = sptd->spt_amp; 2145 ASSERT(amp != NULL); 2146 anon_index = seg_page(sptseg, sptseg_addr); 2147 2148 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2149 for (i = 0; i < npages; i++) { 2150 ap = anon_get_ptr(amp->ahp, anon_index++); 2151 ASSERT(ap != NULL); 2152 swap_xlate(ap, &vp, &offset); 2153 pp = page_lookup(vp, offset, SE_SHARED); 2154 ASSERT(pp != NULL); 2155 ppa[i] = pp; 2156 } 2157 ANON_LOCK_EXIT(&->a_rwlock); 2158 ASSERT(i == npages); 2159 2160 /* 2161 * We are already holding the as->a_lock on the user's 2162 * real segment, but we need to hold the a_lock on the 2163 * underlying dummy as. This is mostly to satisfy the 2164 * underlying HAT layer. 2165 */ 2166 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 2167 a = sptseg_addr; 2168 pidx = 0; 2169 if (type == F_SOFTLOCK) { 2170 /* 2171 * Load up the translation keeping it 2172 * locked and don't unlock the page. 2173 */ 2174 for (; pidx < npages; a += pgsz, pidx += pgcnt) { 2175 sz = MIN(pgsz, ptob(npages - pidx)); 2176 hat_memload_array(sptseg->s_as->a_hat, a, 2177 sz, &ppa[pidx], sptd->spt_prot, 2178 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 2179 } 2180 } else { 2181 if (hat == seg->s_as->a_hat) { 2182 2183 /* 2184 * Migrate pages marked for migration. 2185 */ 2186 if (lgrp_optimizations()) 2187 page_migrate(seg, shm_addr, ppa, 2188 npages); 2189 2190 /* CPU HAT */ 2191 for (; pidx < npages; 2192 a += pgsz, pidx += pgcnt) { 2193 sz = MIN(pgsz, ptob(npages - pidx)); 2194 hat_memload_array(sptseg->s_as->a_hat, 2195 a, sz, &ppa[pidx], 2196 sptd->spt_prot, HAT_LOAD_SHARE); 2197 } 2198 } else { 2199 /* XHAT. Pass real address */ 2200 hat_memload_array(hat, shm_addr, 2201 ptob(npages), ppa, sptd->spt_prot, 2202 HAT_LOAD_SHARE); 2203 } 2204 2205 /* 2206 * And now drop the SE_SHARED lock(s). 2207 */ 2208 for (i = 0; i < npages; i++) 2209 page_unlock(ppa[i]); 2210 } 2211 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 2212 2213 kmem_free(ppa, sizeof (page_t *) * npages); 2214 return (0); 2215 case F_SOFTUNLOCK: 2216 2217 /* 2218 * This is a bit ugly, we pass in the real seg pointer, 2219 * but the sptseg_addr is the virtual address within the 2220 * dummy seg. 2221 */ 2222 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw); 2223 return (0); 2224 2225 case F_PROT: 2226 2227 /* 2228 * This takes care of the unusual case where a user 2229 * allocates a stack in shared memory and a register 2230 * window overflow is written to that stack page before 2231 * it is otherwise modified. 2232 * 2233 * We can get away with this because ISM segments are 2234 * always rw. Other than this unusual case, there 2235 * should be no instances of protection violations. 2236 */ 2237 return (0); 2238 2239 default: 2240 #ifdef DEBUG 2241 cmn_err(CE_WARN, "segspt_shmfault default type?"); 2242 #endif 2243 return (FC_NOMAP); 2244 } 2245 } 2246 2247 /*ARGSUSED*/ 2248 static faultcode_t 2249 segspt_shmfaulta(struct seg *seg, caddr_t addr) 2250 { 2251 return (0); 2252 } 2253 2254 /*ARGSUSED*/ 2255 static int 2256 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta) 2257 { 2258 return (0); 2259 } 2260 2261 /*ARGSUSED*/ 2262 static size_t 2263 segspt_shmswapout(struct seg *seg) 2264 { 2265 return (0); 2266 } 2267 2268 /* 2269 * duplicate the shared page tables 2270 */ 2271 int 2272 segspt_shmdup(struct seg *seg, struct seg *newseg) 2273 { 2274 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2275 struct anon_map *amp = shmd->shm_amp; 2276 struct shm_data *shmd_new; 2277 struct seg *spt_seg = shmd->shm_sptseg; 2278 struct spt_data *sptd = spt_seg->s_data; 2279 int error = 0; 2280 2281 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2282 2283 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP); 2284 newseg->s_data = (void *)shmd_new; 2285 shmd_new->shm_sptas = shmd->shm_sptas; 2286 shmd_new->shm_amp = amp; 2287 shmd_new->shm_sptseg = shmd->shm_sptseg; 2288 newseg->s_ops = &segspt_shmops; 2289 newseg->s_szc = seg->s_szc; 2290 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc); 2291 2292 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2293 amp->refcnt++; 2294 ANON_LOCK_EXIT(&->a_rwlock); 2295 2296 if (sptd->spt_flags & SHM_PAGEABLE) { 2297 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP); 2298 shmd_new->shm_lckpgs = 0; 2299 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 2300 if ((error = hat_share(newseg->s_as->a_hat, 2301 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR, 2302 seg->s_size, seg->s_szc)) != 0) { 2303 kmem_free(shmd_new->shm_vpage, 2304 btopr(amp->size)); 2305 } 2306 } 2307 return (error); 2308 } else { 2309 return (hat_share(newseg->s_as->a_hat, newseg->s_base, 2310 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size, 2311 seg->s_szc)); 2312 2313 } 2314 } 2315 2316 /*ARGSUSED*/ 2317 int 2318 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 2319 { 2320 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2321 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2322 2323 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2324 2325 /* 2326 * ISM segment is always rw. 2327 */ 2328 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0); 2329 } 2330 2331 /* 2332 * Return an array of locked large pages, for empty slots allocate 2333 * private zero-filled anon pages. 2334 */ 2335 static int 2336 spt_anon_getpages( 2337 struct seg *sptseg, 2338 caddr_t sptaddr, 2339 size_t len, 2340 page_t *ppa[]) 2341 { 2342 struct spt_data *sptd = sptseg->s_data; 2343 struct anon_map *amp = sptd->spt_amp; 2344 enum seg_rw rw = sptd->spt_prot; 2345 uint_t szc = sptseg->s_szc; 2346 size_t pg_sz, share_sz = page_get_pagesize(szc); 2347 pgcnt_t lp_npgs; 2348 caddr_t lp_addr, e_sptaddr; 2349 uint_t vpprot, ppa_szc = 0; 2350 struct vpage *vpage = NULL; 2351 ulong_t j, ppa_idx; 2352 int err, ierr = 0; 2353 pgcnt_t an_idx; 2354 anon_sync_obj_t cookie; 2355 int anon_locked = 0; 2356 pgcnt_t amp_pgs; 2357 2358 2359 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz)); 2360 ASSERT(len != 0); 2361 2362 pg_sz = share_sz; 2363 lp_npgs = btop(pg_sz); 2364 lp_addr = sptaddr; 2365 e_sptaddr = sptaddr + len; 2366 an_idx = seg_page(sptseg, sptaddr); 2367 ppa_idx = 0; 2368 2369 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2370 2371 amp_pgs = page_get_pagecnt(amp->a_szc); 2372 2373 /*CONSTCOND*/ 2374 while (1) { 2375 for (; lp_addr < e_sptaddr; 2376 an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) { 2377 2378 /* 2379 * If we're currently locked, and we get to a new 2380 * page, unlock our current anon chunk. 2381 */ 2382 if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) { 2383 anon_array_exit(&cookie); 2384 anon_locked = 0; 2385 } 2386 if (!anon_locked) { 2387 anon_array_enter(amp, an_idx, &cookie); 2388 anon_locked = 1; 2389 } 2390 ppa_szc = (uint_t)-1; 2391 ierr = anon_map_getpages(amp, an_idx, szc, sptseg, 2392 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx], 2393 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred); 2394 2395 if (ierr != 0) { 2396 if (ierr > 0) { 2397 err = FC_MAKE_ERR(ierr); 2398 goto lpgs_err; 2399 } 2400 break; 2401 } 2402 } 2403 if (lp_addr == e_sptaddr) { 2404 break; 2405 } 2406 ASSERT(lp_addr < e_sptaddr); 2407 2408 /* 2409 * ierr == -1 means we failed to allocate a large page. 2410 * so do a size down operation. 2411 * 2412 * ierr == -2 means some other process that privately shares 2413 * pages with this process has allocated a larger page and we 2414 * need to retry with larger pages. So do a size up 2415 * operation. This relies on the fact that large pages are 2416 * never partially shared i.e. if we share any constituent 2417 * page of a large page with another process we must share the 2418 * entire large page. Note this cannot happen for SOFTLOCK 2419 * case, unless current address (lpaddr) is at the beginning 2420 * of the next page size boundary because the other process 2421 * couldn't have relocated locked pages. 2422 */ 2423 ASSERT(ierr == -1 || ierr == -2); 2424 if (segvn_anypgsz) { 2425 ASSERT(ierr == -2 || szc != 0); 2426 ASSERT(ierr == -1 || szc < sptseg->s_szc); 2427 szc = (ierr == -1) ? szc - 1 : szc + 1; 2428 } else { 2429 /* 2430 * For faults and segvn_anypgsz == 0 2431 * we need to be careful not to loop forever 2432 * if existing page is found with szc other 2433 * than 0 or seg->s_szc. This could be due 2434 * to page relocations on behalf of DR or 2435 * more likely large page creation. For this 2436 * case simply re-size to existing page's szc 2437 * if returned by anon_map_getpages(). 2438 */ 2439 if (ppa_szc == (uint_t)-1) { 2440 szc = (ierr == -1) ? 0 : sptseg->s_szc; 2441 } else { 2442 ASSERT(ppa_szc <= sptseg->s_szc); 2443 ASSERT(ierr == -2 || ppa_szc < szc); 2444 ASSERT(ierr == -1 || ppa_szc > szc); 2445 szc = ppa_szc; 2446 } 2447 } 2448 pg_sz = page_get_pagesize(szc); 2449 lp_npgs = btop(pg_sz); 2450 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz)); 2451 } 2452 if (anon_locked) { 2453 anon_array_exit(&cookie); 2454 } 2455 ANON_LOCK_EXIT(&->a_rwlock); 2456 return (0); 2457 2458 lpgs_err: 2459 if (anon_locked) { 2460 anon_array_exit(&cookie); 2461 } 2462 ANON_LOCK_EXIT(&->a_rwlock); 2463 for (j = 0; j < ppa_idx; j++) 2464 page_unlock(ppa[j]); 2465 return (err); 2466 } 2467 2468 /* 2469 * count the number of bytes in a set of spt pages that are currently not 2470 * locked 2471 */ 2472 static rctl_qty_t 2473 spt_unlockedbytes(pgcnt_t npages, page_t **ppa) 2474 { 2475 ulong_t i; 2476 rctl_qty_t unlocked = 0; 2477 2478 for (i = 0; i < npages; i++) { 2479 if (ppa[i]->p_lckcnt == 0) 2480 unlocked += PAGESIZE; 2481 } 2482 return (unlocked); 2483 } 2484 2485 int 2486 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages, 2487 page_t **ppa, ulong_t *lockmap, size_t pos, 2488 rctl_qty_t *locked) 2489 { 2490 struct shm_data *shmd = seg->s_data; 2491 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2492 ulong_t i; 2493 int kernel; 2494 2495 /* return the number of bytes actually locked */ 2496 *locked = 0; 2497 for (i = 0; i < npages; anon_index++, pos++, i++) { 2498 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) { 2499 if (sptd->spt_ppa_lckcnt[anon_index] < 2500 (ushort_t)DISM_LOCK_MAX) { 2501 if (++sptd->spt_ppa_lckcnt[anon_index] == 2502 (ushort_t)DISM_LOCK_MAX) { 2503 cmn_err(CE_WARN, 2504 "DISM page lock limit " 2505 "reached on DISM offset 0x%lx\n", 2506 anon_index << PAGESHIFT); 2507 } 2508 kernel = (sptd->spt_ppa && 2509 sptd->spt_ppa[anon_index]) ? 1 : 0; 2510 if (!page_pp_lock(ppa[i], 0, kernel)) { 2511 sptd->spt_ppa_lckcnt[anon_index]--; 2512 return (EAGAIN); 2513 } 2514 /* if this is a newly locked page, count it */ 2515 if (ppa[i]->p_lckcnt == 1) { 2516 *locked += PAGESIZE; 2517 } 2518 shmd->shm_lckpgs++; 2519 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED; 2520 if (lockmap != NULL) 2521 BT_SET(lockmap, pos); 2522 } 2523 } 2524 } 2525 return (0); 2526 } 2527 2528 /*ARGSUSED*/ 2529 static int 2530 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 2531 int attr, int op, ulong_t *lockmap, size_t pos) 2532 { 2533 struct shm_data *shmd = seg->s_data; 2534 struct seg *sptseg = shmd->shm_sptseg; 2535 struct spt_data *sptd = sptseg->s_data; 2536 struct kshmid *sp = sptd->spt_amp->a_sp; 2537 pgcnt_t npages, a_npages; 2538 page_t **ppa; 2539 pgcnt_t an_idx, a_an_idx, ppa_idx; 2540 caddr_t spt_addr, a_addr; /* spt and aligned address */ 2541 size_t a_len; /* aligned len */ 2542 size_t share_sz; 2543 ulong_t i; 2544 int sts = 0; 2545 rctl_qty_t unlocked = 0; 2546 rctl_qty_t locked = 0; 2547 struct proc *p = curproc; 2548 kproject_t *proj; 2549 2550 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2551 ASSERT(sp != NULL); 2552 2553 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 2554 return (0); 2555 } 2556 2557 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 2558 an_idx = seg_page(seg, addr); 2559 npages = btopr(len); 2560 2561 if (an_idx + npages > btopr(shmd->shm_amp->size)) { 2562 return (ENOMEM); 2563 } 2564 2565 /* 2566 * A shm's project never changes, so no lock needed. 2567 * The shm has a hold on the project, so it will not go away. 2568 * Since we have a mapping to shm within this zone, we know 2569 * that the zone will not go away. 2570 */ 2571 proj = sp->shm_perm.ipc_proj; 2572 2573 if (op == MC_LOCK) { 2574 2575 /* 2576 * Need to align addr and size request if they are not 2577 * aligned so we can always allocate large page(s) however 2578 * we only lock what was requested in initial request. 2579 */ 2580 share_sz = page_get_pagesize(sptseg->s_szc); 2581 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz); 2582 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)), 2583 share_sz); 2584 a_npages = btop(a_len); 2585 a_an_idx = seg_page(seg, a_addr); 2586 spt_addr = sptseg->s_base + ptob(a_an_idx); 2587 ppa_idx = an_idx - a_an_idx; 2588 2589 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages), 2590 KM_NOSLEEP)) == NULL) { 2591 return (ENOMEM); 2592 } 2593 2594 /* 2595 * Don't cache any new pages for IO and 2596 * flush any cached pages. 2597 */ 2598 mutex_enter(&sptd->spt_lock); 2599 if (sptd->spt_ppa != NULL) 2600 sptd->spt_flags |= DISM_PPA_CHANGED; 2601 2602 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa); 2603 if (sts != 0) { 2604 mutex_exit(&sptd->spt_lock); 2605 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2606 return (sts); 2607 } 2608 2609 mutex_enter(&sp->shm_mlock); 2610 /* enforce locked memory rctl */ 2611 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]); 2612 2613 mutex_enter(&p->p_lock); 2614 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) { 2615 mutex_exit(&p->p_lock); 2616 sts = EAGAIN; 2617 } else { 2618 mutex_exit(&p->p_lock); 2619 sts = spt_lockpages(seg, an_idx, npages, 2620 &ppa[ppa_idx], lockmap, pos, &locked); 2621 2622 /* 2623 * correct locked count if not all pages could be 2624 * locked 2625 */ 2626 if ((unlocked - locked) > 0) { 2627 rctl_decr_locked_mem(NULL, proj, 2628 (unlocked - locked), 0); 2629 } 2630 } 2631 /* 2632 * unlock pages 2633 */ 2634 for (i = 0; i < a_npages; i++) 2635 page_unlock(ppa[i]); 2636 if (sptd->spt_ppa != NULL) 2637 sptd->spt_flags |= DISM_PPA_CHANGED; 2638 mutex_exit(&sp->shm_mlock); 2639 mutex_exit(&sptd->spt_lock); 2640 2641 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2642 2643 } else if (op == MC_UNLOCK) { /* unlock */ 2644 struct anon_map *amp; 2645 struct anon *ap; 2646 struct vnode *vp; 2647 u_offset_t off; 2648 struct page *pp; 2649 int kernel; 2650 anon_sync_obj_t cookie; 2651 rctl_qty_t unlocked = 0; 2652 page_t **ppa; 2653 2654 amp = sptd->spt_amp; 2655 mutex_enter(&sptd->spt_lock); 2656 if (shmd->shm_lckpgs == 0) { 2657 mutex_exit(&sptd->spt_lock); 2658 return (0); 2659 } 2660 /* 2661 * Don't cache new IO pages. 2662 */ 2663 if (sptd->spt_ppa != NULL) 2664 sptd->spt_flags |= DISM_PPA_CHANGED; 2665 2666 mutex_enter(&sp->shm_mlock); 2667 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2668 for (i = 0; i < npages; i++, an_idx++) { 2669 if (shmd->shm_vpage[an_idx] & DISM_PG_LOCKED) { 2670 anon_array_enter(amp, an_idx, &cookie); 2671 ap = anon_get_ptr(amp->ahp, an_idx); 2672 ASSERT(ap); 2673 2674 swap_xlate(ap, &vp, &off); 2675 anon_array_exit(&cookie); 2676 pp = page_lookup(vp, off, SE_SHARED); 2677 ASSERT(pp); 2678 /* 2679 * the availrmem is decremented only for 2680 * pages which are not in seg pcache, 2681 * for pages in seg pcache availrmem was 2682 * decremented in _dismpagelock() (if 2683 * they were not locked here) 2684 */ 2685 kernel = (sptd->spt_ppa && 2686 sptd->spt_ppa[an_idx]) ? 1 : 0; 2687 ASSERT(pp->p_lckcnt > 0); 2688 page_pp_unlock(pp, 0, kernel); 2689 if (pp->p_lckcnt == 0) 2690 unlocked += PAGESIZE; 2691 page_unlock(pp); 2692 shmd->shm_vpage[an_idx] &= ~DISM_PG_LOCKED; 2693 sptd->spt_ppa_lckcnt[an_idx]--; 2694 shmd->shm_lckpgs--; 2695 } 2696 } 2697 ANON_LOCK_EXIT(&->a_rwlock); 2698 if ((ppa = sptd->spt_ppa) != NULL) 2699 sptd->spt_flags |= DISM_PPA_CHANGED; 2700 mutex_exit(&sptd->spt_lock); 2701 2702 rctl_decr_locked_mem(NULL, proj, unlocked, 0); 2703 mutex_exit(&sp->shm_mlock); 2704 2705 if (ppa != NULL) 2706 seg_ppurge_wiredpp(ppa); 2707 } 2708 return (sts); 2709 } 2710 2711 /*ARGSUSED*/ 2712 int 2713 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 2714 { 2715 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2716 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2717 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1; 2718 2719 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2720 2721 /* 2722 * ISM segment is always rw. 2723 */ 2724 while (--pgno >= 0) 2725 *protv++ = sptd->spt_prot; 2726 return (0); 2727 } 2728 2729 /*ARGSUSED*/ 2730 u_offset_t 2731 segspt_shmgetoffset(struct seg *seg, caddr_t addr) 2732 { 2733 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2734 2735 /* Offset does not matter in ISM memory */ 2736 2737 return ((u_offset_t)0); 2738 } 2739 2740 /* ARGSUSED */ 2741 int 2742 segspt_shmgettype(struct seg *seg, caddr_t addr) 2743 { 2744 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2745 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2746 2747 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2748 2749 /* 2750 * The shared memory mapping is always MAP_SHARED, SWAP is only 2751 * reserved for DISM 2752 */ 2753 return (MAP_SHARED | 2754 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE)); 2755 } 2756 2757 /*ARGSUSED*/ 2758 int 2759 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 2760 { 2761 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2762 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2763 2764 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2765 2766 *vpp = sptd->spt_vp; 2767 return (0); 2768 } 2769 2770 /* 2771 * We need to wait for pending IO to complete to a DISM segment in order for 2772 * pages to get kicked out of the seg_pcache. 120 seconds should be more 2773 * than enough time to wait. 2774 */ 2775 static clock_t spt_pcache_wait = 120; 2776 2777 /*ARGSUSED*/ 2778 static int 2779 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 2780 { 2781 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2782 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2783 struct anon_map *amp; 2784 pgcnt_t pg_idx; 2785 ushort_t gen; 2786 clock_t end_lbolt; 2787 int writer; 2788 page_t **ppa; 2789 2790 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2791 2792 if (behav == MADV_FREE) { 2793 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) 2794 return (0); 2795 2796 amp = sptd->spt_amp; 2797 pg_idx = seg_page(seg, addr); 2798 2799 mutex_enter(&sptd->spt_lock); 2800 if ((ppa = sptd->spt_ppa) == NULL) { 2801 mutex_exit(&sptd->spt_lock); 2802 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2803 anon_disclaim(amp, pg_idx, len); 2804 ANON_LOCK_EXIT(&->a_rwlock); 2805 return (0); 2806 } 2807 2808 sptd->spt_flags |= DISM_PPA_CHANGED; 2809 gen = sptd->spt_gen; 2810 2811 mutex_exit(&sptd->spt_lock); 2812 2813 /* 2814 * Purge all DISM cached pages 2815 */ 2816 seg_ppurge_wiredpp(ppa); 2817 2818 /* 2819 * Drop the AS_LOCK so that other threads can grab it 2820 * in the as_pageunlock path and hopefully get the segment 2821 * kicked out of the seg_pcache. We bump the shm_softlockcnt 2822 * to keep this segment resident. 2823 */ 2824 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock); 2825 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 2826 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock); 2827 2828 mutex_enter(&sptd->spt_lock); 2829 2830 end_lbolt = lbolt + (hz * spt_pcache_wait); 2831 2832 /* 2833 * Try to wait for pages to get kicked out of the seg_pcache. 2834 */ 2835 while (sptd->spt_gen == gen && 2836 (sptd->spt_flags & DISM_PPA_CHANGED) && 2837 lbolt < end_lbolt) { 2838 if (!cv_timedwait_sig(&sptd->spt_cv, 2839 &sptd->spt_lock, end_lbolt)) { 2840 break; 2841 } 2842 } 2843 2844 mutex_exit(&sptd->spt_lock); 2845 2846 /* Regrab the AS_LOCK and release our hold on the segment */ 2847 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock, 2848 writer ? RW_WRITER : RW_READER); 2849 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1); 2850 if (shmd->shm_softlockcnt <= 0) { 2851 if (AS_ISUNMAPWAIT(seg->s_as)) { 2852 mutex_enter(&seg->s_as->a_contents); 2853 if (AS_ISUNMAPWAIT(seg->s_as)) { 2854 AS_CLRUNMAPWAIT(seg->s_as); 2855 cv_broadcast(&seg->s_as->a_cv); 2856 } 2857 mutex_exit(&seg->s_as->a_contents); 2858 } 2859 } 2860 2861 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2862 anon_disclaim(amp, pg_idx, len); 2863 ANON_LOCK_EXIT(&->a_rwlock); 2864 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP || 2865 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) { 2866 int already_set; 2867 ulong_t anon_index; 2868 lgrp_mem_policy_t policy; 2869 caddr_t shm_addr; 2870 size_t share_size; 2871 size_t size; 2872 struct seg *sptseg = shmd->shm_sptseg; 2873 caddr_t sptseg_addr; 2874 2875 /* 2876 * Align address and length to page size of underlying segment 2877 */ 2878 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc); 2879 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size); 2880 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), 2881 share_size); 2882 2883 amp = shmd->shm_amp; 2884 anon_index = seg_page(seg, shm_addr); 2885 2886 /* 2887 * And now we may have to adjust size downward if we have 2888 * exceeded the realsize of the segment or initial anon 2889 * allocations. 2890 */ 2891 sptseg_addr = sptseg->s_base + ptob(anon_index); 2892 if ((sptseg_addr + size) > 2893 (sptseg->s_base + sptd->spt_realsize)) 2894 size = (sptseg->s_base + sptd->spt_realsize) - 2895 sptseg_addr; 2896 2897 /* 2898 * Set memory allocation policy for this segment 2899 */ 2900 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED); 2901 already_set = lgrp_shm_policy_set(policy, amp, anon_index, 2902 NULL, 0, len); 2903 2904 /* 2905 * If random memory allocation policy set already, 2906 * don't bother reapplying it. 2907 */ 2908 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 2909 return (0); 2910 2911 /* 2912 * Mark any existing pages in the given range for 2913 * migration, flushing the I/O page cache, and using 2914 * underlying segment to calculate anon index and get 2915 * anonmap and vnode pointer from 2916 */ 2917 if (shmd->shm_softlockcnt > 0) 2918 segspt_purge(seg); 2919 2920 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0); 2921 } 2922 2923 return (0); 2924 } 2925 2926 /*ARGSUSED*/ 2927 void 2928 segspt_shmdump(struct seg *seg) 2929 { 2930 /* no-op for ISM segment */ 2931 } 2932 2933 /*ARGSUSED*/ 2934 static faultcode_t 2935 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 2936 { 2937 return (ENOTSUP); 2938 } 2939 2940 /* 2941 * get a memory ID for an addr in a given segment 2942 */ 2943 static int 2944 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 2945 { 2946 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2947 struct anon *ap; 2948 size_t anon_index; 2949 struct anon_map *amp = shmd->shm_amp; 2950 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2951 struct seg *sptseg = shmd->shm_sptseg; 2952 anon_sync_obj_t cookie; 2953 2954 anon_index = seg_page(seg, addr); 2955 2956 if (addr > (seg->s_base + sptd->spt_realsize)) { 2957 return (EFAULT); 2958 } 2959 2960 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2961 anon_array_enter(amp, anon_index, &cookie); 2962 ap = anon_get_ptr(amp->ahp, anon_index); 2963 if (ap == NULL) { 2964 struct page *pp; 2965 caddr_t spt_addr = sptseg->s_base + ptob(anon_index); 2966 2967 pp = anon_zero(sptseg, spt_addr, &ap, kcred); 2968 if (pp == NULL) { 2969 anon_array_exit(&cookie); 2970 ANON_LOCK_EXIT(&->a_rwlock); 2971 return (ENOMEM); 2972 } 2973 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 2974 page_unlock(pp); 2975 } 2976 anon_array_exit(&cookie); 2977 ANON_LOCK_EXIT(&->a_rwlock); 2978 memidp->val[0] = (uintptr_t)ap; 2979 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 2980 return (0); 2981 } 2982 2983 /* 2984 * Get memory allocation policy info for specified address in given segment 2985 */ 2986 static lgrp_mem_policy_info_t * 2987 segspt_shmgetpolicy(struct seg *seg, caddr_t addr) 2988 { 2989 struct anon_map *amp; 2990 ulong_t anon_index; 2991 lgrp_mem_policy_info_t *policy_info; 2992 struct shm_data *shm_data; 2993 2994 ASSERT(seg != NULL); 2995 2996 /* 2997 * Get anon_map from segshm 2998 * 2999 * Assume that no lock needs to be held on anon_map, since 3000 * it should be protected by its reference count which must be 3001 * nonzero for an existing segment 3002 * Need to grab readers lock on policy tree though 3003 */ 3004 shm_data = (struct shm_data *)seg->s_data; 3005 if (shm_data == NULL) 3006 return (NULL); 3007 amp = shm_data->shm_amp; 3008 ASSERT(amp->refcnt != 0); 3009 3010 /* 3011 * Get policy info 3012 * 3013 * Assume starting anon index of 0 3014 */ 3015 anon_index = seg_page(seg, addr); 3016 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 3017 3018 return (policy_info); 3019 } 3020 3021 /*ARGSUSED*/ 3022 static int 3023 segspt_shmcapable(struct seg *seg, segcapability_t capability) 3024 { 3025 return (0); 3026 } 3027