1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/param.h> 27 #include <sys/user.h> 28 #include <sys/mman.h> 29 #include <sys/kmem.h> 30 #include <sys/sysmacros.h> 31 #include <sys/cmn_err.h> 32 #include <sys/systm.h> 33 #include <sys/tuneable.h> 34 #include <vm/hat.h> 35 #include <vm/seg.h> 36 #include <vm/as.h> 37 #include <vm/anon.h> 38 #include <vm/page.h> 39 #include <sys/buf.h> 40 #include <sys/swap.h> 41 #include <sys/atomic.h> 42 #include <vm/seg_spt.h> 43 #include <sys/debug.h> 44 #include <sys/vtrace.h> 45 #include <sys/shm.h> 46 #include <sys/shm_impl.h> 47 #include <sys/lgrp.h> 48 #include <sys/vmsystm.h> 49 #include <sys/policy.h> 50 #include <sys/project.h> 51 #include <sys/tnf_probe.h> 52 #include <sys/zone.h> 53 54 #define SEGSPTADDR (caddr_t)0x0 55 56 /* 57 * # pages used for spt 58 */ 59 size_t spt_used; 60 61 /* 62 * segspt_minfree is the memory left for system after ISM 63 * locked its pages; it is set up to 5% of availrmem in 64 * sptcreate when ISM is created. ISM should not use more 65 * than ~90% of availrmem; if it does, then the performance 66 * of the system may decrease. Machines with large memories may 67 * be able to use up more memory for ISM so we set the default 68 * segspt_minfree to 5% (which gives ISM max 95% of availrmem. 69 * If somebody wants even more memory for ISM (risking hanging 70 * the system) they can patch the segspt_minfree to smaller number. 71 */ 72 pgcnt_t segspt_minfree = 0; 73 74 static int segspt_create(struct seg *seg, caddr_t argsp); 75 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize); 76 static void segspt_free(struct seg *seg); 77 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len); 78 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr); 79 80 static void 81 segspt_badop() 82 { 83 panic("segspt_badop called"); 84 /*NOTREACHED*/ 85 } 86 87 #define SEGSPT_BADOP(t) (t(*)())segspt_badop 88 89 struct seg_ops segspt_ops = { 90 SEGSPT_BADOP(int), /* dup */ 91 segspt_unmap, 92 segspt_free, 93 SEGSPT_BADOP(int), /* fault */ 94 SEGSPT_BADOP(faultcode_t), /* faulta */ 95 SEGSPT_BADOP(int), /* setprot */ 96 SEGSPT_BADOP(int), /* checkprot */ 97 SEGSPT_BADOP(int), /* kluster */ 98 SEGSPT_BADOP(size_t), /* swapout */ 99 SEGSPT_BADOP(int), /* sync */ 100 SEGSPT_BADOP(size_t), /* incore */ 101 SEGSPT_BADOP(int), /* lockop */ 102 SEGSPT_BADOP(int), /* getprot */ 103 SEGSPT_BADOP(u_offset_t), /* getoffset */ 104 SEGSPT_BADOP(int), /* gettype */ 105 SEGSPT_BADOP(int), /* getvp */ 106 SEGSPT_BADOP(int), /* advise */ 107 SEGSPT_BADOP(void), /* dump */ 108 SEGSPT_BADOP(int), /* pagelock */ 109 SEGSPT_BADOP(int), /* setpgsz */ 110 SEGSPT_BADOP(int), /* getmemid */ 111 segspt_getpolicy, /* getpolicy */ 112 SEGSPT_BADOP(int), /* capable */ 113 }; 114 115 static int segspt_shmdup(struct seg *seg, struct seg *newseg); 116 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize); 117 static void segspt_shmfree(struct seg *seg); 118 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg, 119 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw); 120 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr); 121 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr, 122 register size_t len, register uint_t prot); 123 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, 124 uint_t prot); 125 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta); 126 static size_t segspt_shmswapout(struct seg *seg); 127 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, 128 register char *vec); 129 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len, 130 int attr, uint_t flags); 131 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 132 int attr, int op, ulong_t *lockmap, size_t pos); 133 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, 134 uint_t *protv); 135 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr); 136 static int segspt_shmgettype(struct seg *seg, caddr_t addr); 137 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp); 138 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, 139 uint_t behav); 140 static void segspt_shmdump(struct seg *seg); 141 static int segspt_shmpagelock(struct seg *, caddr_t, size_t, 142 struct page ***, enum lock_type, enum seg_rw); 143 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t); 144 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *); 145 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t); 146 static int segspt_shmcapable(struct seg *, segcapability_t); 147 148 struct seg_ops segspt_shmops = { 149 segspt_shmdup, 150 segspt_shmunmap, 151 segspt_shmfree, 152 segspt_shmfault, 153 segspt_shmfaulta, 154 segspt_shmsetprot, 155 segspt_shmcheckprot, 156 segspt_shmkluster, 157 segspt_shmswapout, 158 segspt_shmsync, 159 segspt_shmincore, 160 segspt_shmlockop, 161 segspt_shmgetprot, 162 segspt_shmgetoffset, 163 segspt_shmgettype, 164 segspt_shmgetvp, 165 segspt_shmadvise, /* advise */ 166 segspt_shmdump, 167 segspt_shmpagelock, 168 segspt_shmsetpgsz, 169 segspt_shmgetmemid, 170 segspt_shmgetpolicy, 171 segspt_shmcapable, 172 }; 173 174 static void segspt_purge(struct seg *seg); 175 static int segspt_reclaim(void *, caddr_t, size_t, struct page **, 176 enum seg_rw, int); 177 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len, 178 page_t **ppa); 179 180 181 182 /*ARGSUSED*/ 183 int 184 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp, 185 uint_t prot, uint_t flags, uint_t share_szc) 186 { 187 int err; 188 struct as *newas; 189 struct segspt_crargs sptcargs; 190 191 #ifdef DEBUG 192 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */, 193 tnf_ulong, size, size ); 194 #endif 195 if (segspt_minfree == 0) /* leave min 5% of availrmem for */ 196 segspt_minfree = availrmem/20; /* for the system */ 197 198 if (!hat_supported(HAT_SHARED_PT, (void *)0)) 199 return (EINVAL); 200 201 /* 202 * get a new as for this shared memory segment 203 */ 204 newas = as_alloc(); 205 newas->a_proc = NULL; 206 sptcargs.amp = amp; 207 sptcargs.prot = prot; 208 sptcargs.flags = flags; 209 sptcargs.szc = share_szc; 210 /* 211 * create a shared page table (spt) segment 212 */ 213 214 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) { 215 as_free(newas); 216 return (err); 217 } 218 *sptseg = sptcargs.seg_spt; 219 return (0); 220 } 221 222 void 223 sptdestroy(struct as *as, struct anon_map *amp) 224 { 225 226 #ifdef DEBUG 227 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */); 228 #endif 229 (void) as_unmap(as, SEGSPTADDR, amp->size); 230 as_free(as); 231 } 232 233 /* 234 * called from seg_free(). 235 * free (i.e., unlock, unmap, return to free list) 236 * all the pages in the given seg. 237 */ 238 void 239 segspt_free(struct seg *seg) 240 { 241 struct spt_data *sptd = (struct spt_data *)seg->s_data; 242 243 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 244 245 if (sptd != NULL) { 246 if (sptd->spt_realsize) 247 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize); 248 249 if (sptd->spt_ppa_lckcnt) 250 kmem_free(sptd->spt_ppa_lckcnt, 251 sizeof (*sptd->spt_ppa_lckcnt) 252 * btopr(sptd->spt_amp->size)); 253 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp)); 254 cv_destroy(&sptd->spt_cv); 255 mutex_destroy(&sptd->spt_lock); 256 kmem_free(sptd, sizeof (*sptd)); 257 } 258 } 259 260 /*ARGSUSED*/ 261 static int 262 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr, 263 uint_t flags) 264 { 265 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 266 267 return (0); 268 } 269 270 /*ARGSUSED*/ 271 static size_t 272 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec) 273 { 274 caddr_t eo_seg; 275 pgcnt_t npages; 276 struct shm_data *shmd = (struct shm_data *)seg->s_data; 277 struct seg *sptseg; 278 struct spt_data *sptd; 279 280 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 281 #ifdef lint 282 seg = seg; 283 #endif 284 sptseg = shmd->shm_sptseg; 285 sptd = sptseg->s_data; 286 287 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 288 eo_seg = addr + len; 289 while (addr < eo_seg) { 290 /* page exists, and it's locked. */ 291 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED | 292 SEG_PAGE_ANON; 293 addr += PAGESIZE; 294 } 295 return (len); 296 } else { 297 struct anon_map *amp = shmd->shm_amp; 298 struct anon *ap; 299 page_t *pp; 300 pgcnt_t anon_index; 301 struct vnode *vp; 302 u_offset_t off; 303 ulong_t i; 304 int ret; 305 anon_sync_obj_t cookie; 306 307 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 308 anon_index = seg_page(seg, addr); 309 npages = btopr(len); 310 if (anon_index + npages > btopr(shmd->shm_amp->size)) { 311 return (EINVAL); 312 } 313 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 314 for (i = 0; i < npages; i++, anon_index++) { 315 ret = 0; 316 anon_array_enter(amp, anon_index, &cookie); 317 ap = anon_get_ptr(amp->ahp, anon_index); 318 if (ap != NULL) { 319 swap_xlate(ap, &vp, &off); 320 anon_array_exit(&cookie); 321 pp = page_lookup_nowait(vp, off, SE_SHARED); 322 if (pp != NULL) { 323 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON; 324 page_unlock(pp); 325 } 326 } else { 327 anon_array_exit(&cookie); 328 } 329 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) { 330 ret |= SEG_PAGE_LOCKED; 331 } 332 *vec++ = (char)ret; 333 } 334 ANON_LOCK_EXIT(&->a_rwlock); 335 return (len); 336 } 337 } 338 339 static int 340 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize) 341 { 342 size_t share_size; 343 344 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 345 346 /* 347 * seg.s_size may have been rounded up to the largest page size 348 * in shmat(). 349 * XXX This should be cleanedup. sptdestroy should take a length 350 * argument which should be the same as sptcreate. Then 351 * this rounding would not be needed (or is done in shm.c) 352 * Only the check for full segment will be needed. 353 * 354 * XXX -- shouldn't raddr == 0 always? These tests don't seem 355 * to be useful at all. 356 */ 357 share_size = page_get_pagesize(seg->s_szc); 358 ssize = P2ROUNDUP(ssize, share_size); 359 360 if (raddr == seg->s_base && ssize == seg->s_size) { 361 seg_free(seg); 362 return (0); 363 } else 364 return (EINVAL); 365 } 366 367 int 368 segspt_create(struct seg *seg, caddr_t argsp) 369 { 370 int err; 371 caddr_t addr = seg->s_base; 372 struct spt_data *sptd; 373 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp; 374 struct anon_map *amp = sptcargs->amp; 375 struct kshmid *sp = amp->a_sp; 376 struct cred *cred = CRED(); 377 ulong_t i, j, anon_index = 0; 378 pgcnt_t npages = btopr(amp->size); 379 struct vnode *vp; 380 page_t **ppa; 381 uint_t hat_flags; 382 size_t pgsz; 383 pgcnt_t pgcnt; 384 caddr_t a; 385 pgcnt_t pidx; 386 size_t sz; 387 proc_t *procp = curproc; 388 rctl_qty_t lockedbytes = 0; 389 kproject_t *proj; 390 391 /* 392 * We are holding the a_lock on the underlying dummy as, 393 * so we can make calls to the HAT layer. 394 */ 395 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 396 ASSERT(sp != NULL); 397 398 #ifdef DEBUG 399 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */, 400 tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size); 401 #endif 402 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 403 if (err = anon_swap_adjust(npages)) 404 return (err); 405 } 406 err = ENOMEM; 407 408 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL) 409 goto out1; 410 411 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 412 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages), 413 KM_NOSLEEP)) == NULL) 414 goto out2; 415 } 416 417 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL); 418 419 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL) 420 goto out3; 421 422 seg->s_ops = &segspt_ops; 423 sptd->spt_vp = vp; 424 sptd->spt_amp = amp; 425 sptd->spt_prot = sptcargs->prot; 426 sptd->spt_flags = sptcargs->flags; 427 seg->s_data = (caddr_t)sptd; 428 sptd->spt_ppa = NULL; 429 sptd->spt_ppa_lckcnt = NULL; 430 seg->s_szc = sptcargs->szc; 431 cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL); 432 sptd->spt_gen = 0; 433 434 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 435 if (seg->s_szc > amp->a_szc) { 436 amp->a_szc = seg->s_szc; 437 } 438 ANON_LOCK_EXIT(&->a_rwlock); 439 440 /* 441 * Set policy to affect initial allocation of pages in 442 * anon_map_createpages() 443 */ 444 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index, 445 NULL, 0, ptob(npages)); 446 447 if (sptcargs->flags & SHM_PAGEABLE) { 448 size_t share_sz; 449 pgcnt_t new_npgs, more_pgs; 450 struct anon_hdr *nahp; 451 zone_t *zone; 452 453 share_sz = page_get_pagesize(seg->s_szc); 454 if (!IS_P2ALIGNED(amp->size, share_sz)) { 455 /* 456 * We are rounding up the size of the anon array 457 * on 4 M boundary because we always create 4 M 458 * of page(s) when locking, faulting pages and we 459 * don't have to check for all corner cases e.g. 460 * if there is enough space to allocate 4 M 461 * page. 462 */ 463 new_npgs = btop(P2ROUNDUP(amp->size, share_sz)); 464 more_pgs = new_npgs - npages; 465 466 /* 467 * The zone will never be NULL, as a fully created 468 * shm always has an owning zone. 469 */ 470 zone = sp->shm_perm.ipc_zone; 471 ASSERT(zone != NULL); 472 if (anon_resv_zone(ptob(more_pgs), zone) == 0) { 473 err = ENOMEM; 474 goto out4; 475 } 476 477 nahp = anon_create(new_npgs, ANON_SLEEP); 478 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 479 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages, 480 ANON_SLEEP); 481 anon_release(amp->ahp, npages); 482 amp->ahp = nahp; 483 ASSERT(amp->swresv == ptob(npages)); 484 amp->swresv = amp->size = ptob(new_npgs); 485 ANON_LOCK_EXIT(&->a_rwlock); 486 npages = new_npgs; 487 } 488 489 sptd->spt_ppa_lckcnt = kmem_zalloc(npages * 490 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP); 491 sptd->spt_pcachecnt = 0; 492 sptd->spt_realsize = ptob(npages); 493 sptcargs->seg_spt = seg; 494 return (0); 495 } 496 497 /* 498 * get array of pages for each anon slot in amp 499 */ 500 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa, 501 seg, addr, S_CREATE, cred)) != 0) 502 goto out4; 503 504 mutex_enter(&sp->shm_mlock); 505 506 /* May be partially locked, so, count bytes to charge for locking */ 507 for (i = 0; i < npages; i++) 508 if (ppa[i]->p_lckcnt == 0) 509 lockedbytes += PAGESIZE; 510 511 proj = sp->shm_perm.ipc_proj; 512 513 if (lockedbytes > 0) { 514 mutex_enter(&procp->p_lock); 515 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) { 516 mutex_exit(&procp->p_lock); 517 mutex_exit(&sp->shm_mlock); 518 for (i = 0; i < npages; i++) 519 page_unlock(ppa[i]); 520 err = ENOMEM; 521 goto out4; 522 } 523 mutex_exit(&procp->p_lock); 524 } 525 526 /* 527 * addr is initial address corresponding to the first page on ppa list 528 */ 529 for (i = 0; i < npages; i++) { 530 /* attempt to lock all pages */ 531 if (page_pp_lock(ppa[i], 0, 1) == 0) { 532 /* 533 * if unable to lock any page, unlock all 534 * of them and return error 535 */ 536 for (j = 0; j < i; j++) 537 page_pp_unlock(ppa[j], 0, 1); 538 for (i = 0; i < npages; i++) 539 page_unlock(ppa[i]); 540 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0); 541 mutex_exit(&sp->shm_mlock); 542 err = ENOMEM; 543 goto out4; 544 } 545 } 546 mutex_exit(&sp->shm_mlock); 547 548 /* 549 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 550 * for the entire life of the segment. For example platforms 551 * that do not support Dynamic Reconfiguration. 552 */ 553 hat_flags = HAT_LOAD_SHARE; 554 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL)) 555 hat_flags |= HAT_LOAD_LOCK; 556 557 /* 558 * Load translations one lare page at a time 559 * to make sure we don't create mappings bigger than 560 * segment's size code in case underlying pages 561 * are shared with segvn's segment that uses bigger 562 * size code than we do. 563 */ 564 pgsz = page_get_pagesize(seg->s_szc); 565 pgcnt = page_get_pagecnt(seg->s_szc); 566 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) { 567 sz = MIN(pgsz, ptob(npages - pidx)); 568 hat_memload_array(seg->s_as->a_hat, a, sz, 569 &ppa[pidx], sptd->spt_prot, hat_flags); 570 } 571 572 /* 573 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 574 * we will leave the pages locked SE_SHARED for the life 575 * of the ISM segment. This will prevent any calls to 576 * hat_pageunload() on this ISM segment for those platforms. 577 */ 578 if (!(hat_flags & HAT_LOAD_LOCK)) { 579 /* 580 * On platforms that support HAT_DYNAMIC_ISM_UNMAP, 581 * we no longer need to hold the SE_SHARED lock on the pages, 582 * since L_PAGELOCK and F_SOFTLOCK calls will grab the 583 * SE_SHARED lock on the pages as necessary. 584 */ 585 for (i = 0; i < npages; i++) 586 page_unlock(ppa[i]); 587 } 588 sptd->spt_pcachecnt = 0; 589 kmem_free(ppa, ((sizeof (page_t *)) * npages)); 590 sptd->spt_realsize = ptob(npages); 591 atomic_add_long(&spt_used, npages); 592 sptcargs->seg_spt = seg; 593 return (0); 594 595 out4: 596 seg->s_data = NULL; 597 kmem_free(vp, sizeof (*vp)); 598 cv_destroy(&sptd->spt_cv); 599 out3: 600 mutex_destroy(&sptd->spt_lock); 601 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 602 kmem_free(ppa, (sizeof (*ppa) * npages)); 603 out2: 604 kmem_free(sptd, sizeof (*sptd)); 605 out1: 606 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 607 anon_swap_restore(npages); 608 return (err); 609 } 610 611 /*ARGSUSED*/ 612 void 613 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len) 614 { 615 struct page *pp; 616 struct spt_data *sptd = (struct spt_data *)seg->s_data; 617 pgcnt_t npages; 618 ulong_t anon_idx; 619 struct anon_map *amp; 620 struct anon *ap; 621 struct vnode *vp; 622 u_offset_t off; 623 uint_t hat_flags; 624 int root = 0; 625 pgcnt_t pgs, curnpgs = 0; 626 page_t *rootpp; 627 rctl_qty_t unlocked_bytes = 0; 628 kproject_t *proj; 629 kshmid_t *sp; 630 631 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 632 633 len = P2ROUNDUP(len, PAGESIZE); 634 635 npages = btop(len); 636 637 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP; 638 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) || 639 (sptd->spt_flags & SHM_PAGEABLE)) { 640 hat_flags = HAT_UNLOAD_UNMAP; 641 } 642 643 hat_unload(seg->s_as->a_hat, addr, len, hat_flags); 644 645 amp = sptd->spt_amp; 646 if (sptd->spt_flags & SHM_PAGEABLE) 647 npages = btop(amp->size); 648 649 ASSERT(amp != NULL); 650 651 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 652 sp = amp->a_sp; 653 proj = sp->shm_perm.ipc_proj; 654 mutex_enter(&sp->shm_mlock); 655 } 656 for (anon_idx = 0; anon_idx < npages; anon_idx++) { 657 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 658 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) { 659 panic("segspt_free_pages: null app"); 660 /*NOTREACHED*/ 661 } 662 } else { 663 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx)) 664 == NULL) 665 continue; 666 } 667 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0); 668 swap_xlate(ap, &vp, &off); 669 670 /* 671 * If this platform supports HAT_DYNAMIC_ISM_UNMAP, 672 * the pages won't be having SE_SHARED lock at this 673 * point. 674 * 675 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 676 * the pages are still held SE_SHARED locked from the 677 * original segspt_create() 678 * 679 * Our goal is to get SE_EXCL lock on each page, remove 680 * permanent lock on it and invalidate the page. 681 */ 682 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 683 if (hat_flags == HAT_UNLOAD_UNMAP) 684 pp = page_lookup(vp, off, SE_EXCL); 685 else { 686 if ((pp = page_find(vp, off)) == NULL) { 687 panic("segspt_free_pages: " 688 "page not locked"); 689 /*NOTREACHED*/ 690 } 691 if (!page_tryupgrade(pp)) { 692 page_unlock(pp); 693 pp = page_lookup(vp, off, SE_EXCL); 694 } 695 } 696 if (pp == NULL) { 697 panic("segspt_free_pages: " 698 "page not in the system"); 699 /*NOTREACHED*/ 700 } 701 ASSERT(pp->p_lckcnt > 0); 702 page_pp_unlock(pp, 0, 1); 703 if (pp->p_lckcnt == 0) 704 unlocked_bytes += PAGESIZE; 705 } else { 706 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL) 707 continue; 708 } 709 /* 710 * It's logical to invalidate the pages here as in most cases 711 * these were created by segspt. 712 */ 713 if (pp->p_szc != 0) { 714 if (root == 0) { 715 ASSERT(curnpgs == 0); 716 root = 1; 717 rootpp = pp; 718 pgs = curnpgs = page_get_pagecnt(pp->p_szc); 719 ASSERT(pgs > 1); 720 ASSERT(IS_P2ALIGNED(pgs, pgs)); 721 ASSERT(!(page_pptonum(pp) & (pgs - 1))); 722 curnpgs--; 723 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) { 724 ASSERT(curnpgs == 1); 725 ASSERT(page_pptonum(pp) == 726 page_pptonum(rootpp) + (pgs - 1)); 727 page_destroy_pages(rootpp); 728 root = 0; 729 curnpgs = 0; 730 } else { 731 ASSERT(curnpgs > 1); 732 ASSERT(page_pptonum(pp) == 733 page_pptonum(rootpp) + (pgs - curnpgs)); 734 curnpgs--; 735 } 736 } else { 737 if (root != 0 || curnpgs != 0) { 738 panic("segspt_free_pages: bad large page"); 739 /*NOTREACHED*/ 740 } 741 /* 742 * Before destroying the pages, we need to take care 743 * of the rctl locked memory accounting. For that 744 * we need to calculte the unlocked_bytes. 745 */ 746 if (pp->p_lckcnt > 0) 747 unlocked_bytes += PAGESIZE; 748 /*LINTED: constant in conditional context */ 749 VN_DISPOSE(pp, B_INVAL, 0, kcred); 750 } 751 } 752 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 753 if (unlocked_bytes > 0) 754 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0); 755 mutex_exit(&sp->shm_mlock); 756 } 757 if (root != 0 || curnpgs != 0) { 758 panic("segspt_free_pages: bad large page"); 759 /*NOTREACHED*/ 760 } 761 762 /* 763 * mark that pages have been released 764 */ 765 sptd->spt_realsize = 0; 766 767 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 768 atomic_add_long(&spt_used, -npages); 769 anon_swap_restore(npages); 770 } 771 } 772 773 /* 774 * Get memory allocation policy info for specified address in given segment 775 */ 776 static lgrp_mem_policy_info_t * 777 segspt_getpolicy(struct seg *seg, caddr_t addr) 778 { 779 struct anon_map *amp; 780 ulong_t anon_index; 781 lgrp_mem_policy_info_t *policy_info; 782 struct spt_data *spt_data; 783 784 ASSERT(seg != NULL); 785 786 /* 787 * Get anon_map from segspt 788 * 789 * Assume that no lock needs to be held on anon_map, since 790 * it should be protected by its reference count which must be 791 * nonzero for an existing segment 792 * Need to grab readers lock on policy tree though 793 */ 794 spt_data = (struct spt_data *)seg->s_data; 795 if (spt_data == NULL) 796 return (NULL); 797 amp = spt_data->spt_amp; 798 ASSERT(amp->refcnt != 0); 799 800 /* 801 * Get policy info 802 * 803 * Assume starting anon index of 0 804 */ 805 anon_index = seg_page(seg, addr); 806 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 807 808 return (policy_info); 809 } 810 811 /* 812 * DISM only. 813 * Return locked pages over a given range. 814 * 815 * We will cache all DISM locked pages and save the pplist for the 816 * entire segment in the ppa field of the underlying DISM segment structure. 817 * Later, during a call to segspt_reclaim() we will use this ppa array 818 * to page_unlock() all of the pages and then we will free this ppa list. 819 */ 820 /*ARGSUSED*/ 821 static int 822 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len, 823 struct page ***ppp, enum lock_type type, enum seg_rw rw) 824 { 825 struct shm_data *shmd = (struct shm_data *)seg->s_data; 826 struct seg *sptseg = shmd->shm_sptseg; 827 struct spt_data *sptd = sptseg->s_data; 828 pgcnt_t pg_idx, npages, tot_npages, npgs; 829 struct page **pplist, **pl, **ppa, *pp; 830 struct anon_map *amp; 831 spgcnt_t an_idx; 832 int ret = ENOTSUP; 833 uint_t pl_built = 0; 834 struct anon *ap; 835 struct vnode *vp; 836 u_offset_t off; 837 pgcnt_t claim_availrmem = 0; 838 uint_t szc; 839 840 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 841 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); 842 843 /* 844 * We want to lock/unlock the entire ISM segment. Therefore, 845 * we will be using the underlying sptseg and it's base address 846 * and length for the caching arguments. 847 */ 848 ASSERT(sptseg); 849 ASSERT(sptd); 850 851 pg_idx = seg_page(seg, addr); 852 npages = btopr(len); 853 854 /* 855 * check if the request is larger than number of pages covered 856 * by amp 857 */ 858 if (pg_idx + npages > btopr(sptd->spt_amp->size)) { 859 *ppp = NULL; 860 return (ENOTSUP); 861 } 862 863 if (type == L_PAGEUNLOCK) { 864 ASSERT(sptd->spt_ppa != NULL); 865 866 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size, 867 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim); 868 869 /* 870 * If someone is blocked while unmapping, we purge 871 * segment page cache and thus reclaim pplist synchronously 872 * without waiting for seg_pasync_thread. This speeds up 873 * unmapping in cases where munmap(2) is called, while 874 * raw async i/o is still in progress or where a thread 875 * exits on data fault in a multithreaded application. 876 */ 877 if ((sptd->spt_flags & DISM_PPA_CHANGED) || 878 (AS_ISUNMAPWAIT(seg->s_as) && 879 shmd->shm_softlockcnt > 0)) { 880 segspt_purge(seg); 881 } 882 return (0); 883 } 884 885 /* The L_PAGELOCK case ... */ 886 887 if (sptd->spt_flags & DISM_PPA_CHANGED) { 888 segspt_purge(seg); 889 /* 890 * for DISM ppa needs to be rebuild since 891 * number of locked pages could be changed 892 */ 893 *ppp = NULL; 894 return (ENOTSUP); 895 } 896 897 /* 898 * First try to find pages in segment page cache, without 899 * holding the segment lock. 900 */ 901 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size, 902 S_WRITE, SEGP_FORCE_WIRED); 903 if (pplist != NULL) { 904 ASSERT(sptd->spt_ppa != NULL); 905 ASSERT(sptd->spt_ppa == pplist); 906 ppa = sptd->spt_ppa; 907 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 908 if (ppa[an_idx] == NULL) { 909 seg_pinactive(seg, NULL, seg->s_base, 910 sptd->spt_amp->size, ppa, 911 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim); 912 *ppp = NULL; 913 return (ENOTSUP); 914 } 915 if ((szc = ppa[an_idx]->p_szc) != 0) { 916 npgs = page_get_pagecnt(szc); 917 an_idx = P2ROUNDUP(an_idx + 1, npgs); 918 } else { 919 an_idx++; 920 } 921 } 922 /* 923 * Since we cache the entire DISM segment, we want to 924 * set ppp to point to the first slot that corresponds 925 * to the requested addr, i.e. pg_idx. 926 */ 927 *ppp = &(sptd->spt_ppa[pg_idx]); 928 return (0); 929 } 930 931 mutex_enter(&sptd->spt_lock); 932 /* 933 * try to find pages in segment page cache with mutex 934 */ 935 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size, 936 S_WRITE, SEGP_FORCE_WIRED); 937 if (pplist != NULL) { 938 ASSERT(sptd->spt_ppa != NULL); 939 ASSERT(sptd->spt_ppa == pplist); 940 ppa = sptd->spt_ppa; 941 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 942 if (ppa[an_idx] == NULL) { 943 mutex_exit(&sptd->spt_lock); 944 seg_pinactive(seg, NULL, seg->s_base, 945 sptd->spt_amp->size, ppa, 946 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim); 947 *ppp = NULL; 948 return (ENOTSUP); 949 } 950 if ((szc = ppa[an_idx]->p_szc) != 0) { 951 npgs = page_get_pagecnt(szc); 952 an_idx = P2ROUNDUP(an_idx + 1, npgs); 953 } else { 954 an_idx++; 955 } 956 } 957 /* 958 * Since we cache the entire DISM segment, we want to 959 * set ppp to point to the first slot that corresponds 960 * to the requested addr, i.e. pg_idx. 961 */ 962 mutex_exit(&sptd->spt_lock); 963 *ppp = &(sptd->spt_ppa[pg_idx]); 964 return (0); 965 } 966 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size, 967 SEGP_FORCE_WIRED) == SEGP_FAIL) { 968 mutex_exit(&sptd->spt_lock); 969 *ppp = NULL; 970 return (ENOTSUP); 971 } 972 973 /* 974 * No need to worry about protections because DISM pages are always rw. 975 */ 976 pl = pplist = NULL; 977 amp = sptd->spt_amp; 978 979 /* 980 * Do we need to build the ppa array? 981 */ 982 if (sptd->spt_ppa == NULL) { 983 pgcnt_t lpg_cnt = 0; 984 985 pl_built = 1; 986 tot_npages = btopr(sptd->spt_amp->size); 987 988 ASSERT(sptd->spt_pcachecnt == 0); 989 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP); 990 pl = pplist; 991 992 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 993 for (an_idx = 0; an_idx < tot_npages; ) { 994 ap = anon_get_ptr(amp->ahp, an_idx); 995 /* 996 * Cache only mlocked pages. For large pages 997 * if one (constituent) page is mlocked 998 * all pages for that large page 999 * are cached also. This is for quick 1000 * lookups of ppa array; 1001 */ 1002 if ((ap != NULL) && (lpg_cnt != 0 || 1003 (sptd->spt_ppa_lckcnt[an_idx] != 0))) { 1004 1005 swap_xlate(ap, &vp, &off); 1006 pp = page_lookup(vp, off, SE_SHARED); 1007 ASSERT(pp != NULL); 1008 if (lpg_cnt == 0) { 1009 lpg_cnt++; 1010 /* 1011 * For a small page, we are done -- 1012 * lpg_count is reset to 0 below. 1013 * 1014 * For a large page, we are guaranteed 1015 * to find the anon structures of all 1016 * constituent pages and a non-zero 1017 * lpg_cnt ensures that we don't test 1018 * for mlock for these. We are done 1019 * when lpg_count reaches (npgs + 1). 1020 * If we are not the first constituent 1021 * page, restart at the first one. 1022 */ 1023 npgs = page_get_pagecnt(pp->p_szc); 1024 if (!IS_P2ALIGNED(an_idx, npgs)) { 1025 an_idx = P2ALIGN(an_idx, npgs); 1026 page_unlock(pp); 1027 continue; 1028 } 1029 } 1030 if (++lpg_cnt > npgs) 1031 lpg_cnt = 0; 1032 1033 /* 1034 * availrmem is decremented only 1035 * for unlocked pages 1036 */ 1037 if (sptd->spt_ppa_lckcnt[an_idx] == 0) 1038 claim_availrmem++; 1039 pplist[an_idx] = pp; 1040 } 1041 an_idx++; 1042 } 1043 ANON_LOCK_EXIT(&->a_rwlock); 1044 1045 if (claim_availrmem) { 1046 mutex_enter(&freemem_lock); 1047 if (availrmem < tune.t_minarmem + claim_availrmem) { 1048 mutex_exit(&freemem_lock); 1049 ret = ENOTSUP; 1050 claim_availrmem = 0; 1051 goto insert_fail; 1052 } else { 1053 availrmem -= claim_availrmem; 1054 } 1055 mutex_exit(&freemem_lock); 1056 } 1057 1058 sptd->spt_ppa = pl; 1059 } else { 1060 /* 1061 * We already have a valid ppa[]. 1062 */ 1063 pl = sptd->spt_ppa; 1064 } 1065 1066 ASSERT(pl != NULL); 1067 1068 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size, 1069 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED, 1070 segspt_reclaim); 1071 if (ret == SEGP_FAIL) { 1072 /* 1073 * seg_pinsert failed. We return 1074 * ENOTSUP, so that the as_pagelock() code will 1075 * then try the slower F_SOFTLOCK path. 1076 */ 1077 if (pl_built) { 1078 /* 1079 * No one else has referenced the ppa[]. 1080 * We created it and we need to destroy it. 1081 */ 1082 sptd->spt_ppa = NULL; 1083 } 1084 ret = ENOTSUP; 1085 goto insert_fail; 1086 } 1087 1088 /* 1089 * In either case, we increment softlockcnt on the 'real' segment. 1090 */ 1091 sptd->spt_pcachecnt++; 1092 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 1093 1094 ppa = sptd->spt_ppa; 1095 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 1096 if (ppa[an_idx] == NULL) { 1097 mutex_exit(&sptd->spt_lock); 1098 seg_pinactive(seg, NULL, seg->s_base, 1099 sptd->spt_amp->size, 1100 pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim); 1101 *ppp = NULL; 1102 return (ENOTSUP); 1103 } 1104 if ((szc = ppa[an_idx]->p_szc) != 0) { 1105 npgs = page_get_pagecnt(szc); 1106 an_idx = P2ROUNDUP(an_idx + 1, npgs); 1107 } else { 1108 an_idx++; 1109 } 1110 } 1111 /* 1112 * We can now drop the sptd->spt_lock since the ppa[] 1113 * exists and he have incremented pacachecnt. 1114 */ 1115 mutex_exit(&sptd->spt_lock); 1116 1117 /* 1118 * Since we cache the entire segment, we want to 1119 * set ppp to point to the first slot that corresponds 1120 * to the requested addr, i.e. pg_idx. 1121 */ 1122 *ppp = &(sptd->spt_ppa[pg_idx]); 1123 return (0); 1124 1125 insert_fail: 1126 /* 1127 * We will only reach this code if we tried and failed. 1128 * 1129 * And we can drop the lock on the dummy seg, once we've failed 1130 * to set up a new ppa[]. 1131 */ 1132 mutex_exit(&sptd->spt_lock); 1133 1134 if (pl_built) { 1135 if (claim_availrmem) { 1136 mutex_enter(&freemem_lock); 1137 availrmem += claim_availrmem; 1138 mutex_exit(&freemem_lock); 1139 } 1140 1141 /* 1142 * We created pl and we need to destroy it. 1143 */ 1144 pplist = pl; 1145 for (an_idx = 0; an_idx < tot_npages; an_idx++) { 1146 if (pplist[an_idx] != NULL) 1147 page_unlock(pplist[an_idx]); 1148 } 1149 kmem_free(pl, sizeof (page_t *) * tot_npages); 1150 } 1151 1152 if (shmd->shm_softlockcnt <= 0) { 1153 if (AS_ISUNMAPWAIT(seg->s_as)) { 1154 mutex_enter(&seg->s_as->a_contents); 1155 if (AS_ISUNMAPWAIT(seg->s_as)) { 1156 AS_CLRUNMAPWAIT(seg->s_as); 1157 cv_broadcast(&seg->s_as->a_cv); 1158 } 1159 mutex_exit(&seg->s_as->a_contents); 1160 } 1161 } 1162 *ppp = NULL; 1163 return (ret); 1164 } 1165 1166 1167 1168 /* 1169 * return locked pages over a given range. 1170 * 1171 * We will cache the entire ISM segment and save the pplist for the 1172 * entire segment in the ppa field of the underlying ISM segment structure. 1173 * Later, during a call to segspt_reclaim() we will use this ppa array 1174 * to page_unlock() all of the pages and then we will free this ppa list. 1175 */ 1176 /*ARGSUSED*/ 1177 static int 1178 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len, 1179 struct page ***ppp, enum lock_type type, enum seg_rw rw) 1180 { 1181 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1182 struct seg *sptseg = shmd->shm_sptseg; 1183 struct spt_data *sptd = sptseg->s_data; 1184 pgcnt_t np, page_index, npages; 1185 caddr_t a, spt_base; 1186 struct page **pplist, **pl, *pp; 1187 struct anon_map *amp; 1188 ulong_t anon_index; 1189 int ret = ENOTSUP; 1190 uint_t pl_built = 0; 1191 struct anon *ap; 1192 struct vnode *vp; 1193 u_offset_t off; 1194 1195 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1196 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); 1197 1198 1199 /* 1200 * We want to lock/unlock the entire ISM segment. Therefore, 1201 * we will be using the underlying sptseg and it's base address 1202 * and length for the caching arguments. 1203 */ 1204 ASSERT(sptseg); 1205 ASSERT(sptd); 1206 1207 if (sptd->spt_flags & SHM_PAGEABLE) { 1208 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw)); 1209 } 1210 1211 page_index = seg_page(seg, addr); 1212 npages = btopr(len); 1213 1214 /* 1215 * check if the request is larger than number of pages covered 1216 * by amp 1217 */ 1218 if (page_index + npages > btopr(sptd->spt_amp->size)) { 1219 *ppp = NULL; 1220 return (ENOTSUP); 1221 } 1222 1223 if (type == L_PAGEUNLOCK) { 1224 1225 ASSERT(sptd->spt_ppa != NULL); 1226 1227 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size, 1228 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim); 1229 1230 /* 1231 * If someone is blocked while unmapping, we purge 1232 * segment page cache and thus reclaim pplist synchronously 1233 * without waiting for seg_pasync_thread. This speeds up 1234 * unmapping in cases where munmap(2) is called, while 1235 * raw async i/o is still in progress or where a thread 1236 * exits on data fault in a multithreaded application. 1237 */ 1238 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 1239 segspt_purge(seg); 1240 } 1241 return (0); 1242 } 1243 1244 /* The L_PAGELOCK case... */ 1245 1246 /* 1247 * First try to find pages in segment page cache, without 1248 * holding the segment lock. 1249 */ 1250 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size, 1251 S_WRITE, SEGP_FORCE_WIRED); 1252 if (pplist != NULL) { 1253 ASSERT(sptd->spt_ppa == pplist); 1254 ASSERT(sptd->spt_ppa[page_index]); 1255 /* 1256 * Since we cache the entire ISM segment, we want to 1257 * set ppp to point to the first slot that corresponds 1258 * to the requested addr, i.e. page_index. 1259 */ 1260 *ppp = &(sptd->spt_ppa[page_index]); 1261 return (0); 1262 } 1263 1264 mutex_enter(&sptd->spt_lock); 1265 1266 /* 1267 * try to find pages in segment page cache 1268 */ 1269 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size, 1270 S_WRITE, SEGP_FORCE_WIRED); 1271 if (pplist != NULL) { 1272 ASSERT(sptd->spt_ppa == pplist); 1273 /* 1274 * Since we cache the entire segment, we want to 1275 * set ppp to point to the first slot that corresponds 1276 * to the requested addr, i.e. page_index. 1277 */ 1278 mutex_exit(&sptd->spt_lock); 1279 *ppp = &(sptd->spt_ppa[page_index]); 1280 return (0); 1281 } 1282 1283 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size, 1284 SEGP_FORCE_WIRED) == SEGP_FAIL) { 1285 mutex_exit(&sptd->spt_lock); 1286 *ppp = NULL; 1287 return (ENOTSUP); 1288 } 1289 1290 /* 1291 * No need to worry about protections because ISM pages 1292 * are always rw. 1293 */ 1294 pl = pplist = NULL; 1295 1296 /* 1297 * Do we need to build the ppa array? 1298 */ 1299 if (sptd->spt_ppa == NULL) { 1300 ASSERT(sptd->spt_ppa == pplist); 1301 1302 spt_base = sptseg->s_base; 1303 pl_built = 1; 1304 1305 /* 1306 * availrmem is decremented once during anon_swap_adjust() 1307 * and is incremented during the anon_unresv(), which is 1308 * called from shm_rm_amp() when the segment is destroyed. 1309 */ 1310 amp = sptd->spt_amp; 1311 ASSERT(amp != NULL); 1312 1313 /* pcachecnt is protected by sptd->spt_lock */ 1314 ASSERT(sptd->spt_pcachecnt == 0); 1315 pplist = kmem_zalloc(sizeof (page_t *) 1316 * btopr(sptd->spt_amp->size), KM_SLEEP); 1317 pl = pplist; 1318 1319 anon_index = seg_page(sptseg, spt_base); 1320 1321 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1322 for (a = spt_base; a < (spt_base + sptd->spt_amp->size); 1323 a += PAGESIZE, anon_index++, pplist++) { 1324 ap = anon_get_ptr(amp->ahp, anon_index); 1325 ASSERT(ap != NULL); 1326 swap_xlate(ap, &vp, &off); 1327 pp = page_lookup(vp, off, SE_SHARED); 1328 ASSERT(pp != NULL); 1329 *pplist = pp; 1330 } 1331 ANON_LOCK_EXIT(&->a_rwlock); 1332 1333 if (a < (spt_base + sptd->spt_amp->size)) { 1334 ret = ENOTSUP; 1335 goto insert_fail; 1336 } 1337 sptd->spt_ppa = pl; 1338 } else { 1339 /* 1340 * We already have a valid ppa[]. 1341 */ 1342 pl = sptd->spt_ppa; 1343 } 1344 1345 ASSERT(pl != NULL); 1346 1347 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size, 1348 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED, 1349 segspt_reclaim); 1350 if (ret == SEGP_FAIL) { 1351 /* 1352 * seg_pinsert failed. We return 1353 * ENOTSUP, so that the as_pagelock() code will 1354 * then try the slower F_SOFTLOCK path. 1355 */ 1356 if (pl_built) { 1357 /* 1358 * No one else has referenced the ppa[]. 1359 * We created it and we need to destroy it. 1360 */ 1361 sptd->spt_ppa = NULL; 1362 } 1363 ret = ENOTSUP; 1364 goto insert_fail; 1365 } 1366 1367 /* 1368 * In either case, we increment softlockcnt on the 'real' segment. 1369 */ 1370 sptd->spt_pcachecnt++; 1371 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 1372 1373 /* 1374 * We can now drop the sptd->spt_lock since the ppa[] 1375 * exists and he have incremented pacachecnt. 1376 */ 1377 mutex_exit(&sptd->spt_lock); 1378 1379 /* 1380 * Since we cache the entire segment, we want to 1381 * set ppp to point to the first slot that corresponds 1382 * to the requested addr, i.e. page_index. 1383 */ 1384 *ppp = &(sptd->spt_ppa[page_index]); 1385 return (0); 1386 1387 insert_fail: 1388 /* 1389 * We will only reach this code if we tried and failed. 1390 * 1391 * And we can drop the lock on the dummy seg, once we've failed 1392 * to set up a new ppa[]. 1393 */ 1394 mutex_exit(&sptd->spt_lock); 1395 1396 if (pl_built) { 1397 /* 1398 * We created pl and we need to destroy it. 1399 */ 1400 pplist = pl; 1401 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT); 1402 while (np) { 1403 page_unlock(*pplist); 1404 np--; 1405 pplist++; 1406 } 1407 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size)); 1408 } 1409 if (shmd->shm_softlockcnt <= 0) { 1410 if (AS_ISUNMAPWAIT(seg->s_as)) { 1411 mutex_enter(&seg->s_as->a_contents); 1412 if (AS_ISUNMAPWAIT(seg->s_as)) { 1413 AS_CLRUNMAPWAIT(seg->s_as); 1414 cv_broadcast(&seg->s_as->a_cv); 1415 } 1416 mutex_exit(&seg->s_as->a_contents); 1417 } 1418 } 1419 *ppp = NULL; 1420 return (ret); 1421 } 1422 1423 /* 1424 * purge any cached pages in the I/O page cache 1425 */ 1426 static void 1427 segspt_purge(struct seg *seg) 1428 { 1429 seg_ppurge(seg, NULL, SEGP_FORCE_WIRED); 1430 } 1431 1432 static int 1433 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 1434 enum seg_rw rw, int async) 1435 { 1436 struct seg *seg = (struct seg *)ptag; 1437 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1438 struct seg *sptseg; 1439 struct spt_data *sptd; 1440 pgcnt_t npages, i, free_availrmem = 0; 1441 int done = 0; 1442 1443 #ifdef lint 1444 addr = addr; 1445 #endif 1446 sptseg = shmd->shm_sptseg; 1447 sptd = sptseg->s_data; 1448 npages = (len >> PAGESHIFT); 1449 ASSERT(npages); 1450 ASSERT(sptd->spt_pcachecnt != 0); 1451 ASSERT(sptd->spt_ppa == pplist); 1452 ASSERT(npages == btopr(sptd->spt_amp->size)); 1453 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1454 1455 /* 1456 * Acquire the lock on the dummy seg and destroy the 1457 * ppa array IF this is the last pcachecnt. 1458 */ 1459 mutex_enter(&sptd->spt_lock); 1460 if (--sptd->spt_pcachecnt == 0) { 1461 for (i = 0; i < npages; i++) { 1462 if (pplist[i] == NULL) { 1463 continue; 1464 } 1465 if (rw == S_WRITE) { 1466 hat_setrefmod(pplist[i]); 1467 } else { 1468 hat_setref(pplist[i]); 1469 } 1470 if ((sptd->spt_flags & SHM_PAGEABLE) && 1471 (sptd->spt_ppa_lckcnt[i] == 0)) 1472 free_availrmem++; 1473 page_unlock(pplist[i]); 1474 } 1475 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) { 1476 mutex_enter(&freemem_lock); 1477 availrmem += free_availrmem; 1478 mutex_exit(&freemem_lock); 1479 } 1480 /* 1481 * Since we want to cach/uncache the entire ISM segment, 1482 * we will track the pplist in a segspt specific field 1483 * ppa, that is initialized at the time we add an entry to 1484 * the cache. 1485 */ 1486 ASSERT(sptd->spt_pcachecnt == 0); 1487 kmem_free(pplist, sizeof (page_t *) * npages); 1488 sptd->spt_ppa = NULL; 1489 sptd->spt_flags &= ~DISM_PPA_CHANGED; 1490 sptd->spt_gen++; 1491 cv_broadcast(&sptd->spt_cv); 1492 done = 1; 1493 } 1494 mutex_exit(&sptd->spt_lock); 1495 1496 /* 1497 * If we are pcache async thread or called via seg_ppurge_wiredpp() we 1498 * may not hold AS lock (in this case async argument is not 0). This 1499 * means if softlockcnt drops to 0 after the decrement below address 1500 * space may get freed. We can't allow it since after softlock 1501 * derement to 0 we still need to access as structure for possible 1502 * wakeup of unmap waiters. To prevent the disappearance of as we take 1503 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes 1504 * this mutex as a barrier to make sure this routine completes before 1505 * segment is freed. 1506 * 1507 * The second complication we have to deal with in async case is a 1508 * possibility of missed wake up of unmap wait thread. When we don't 1509 * hold as lock here we may take a_contents lock before unmap wait 1510 * thread that was first to see softlockcnt was still not 0. As a 1511 * result we'll fail to wake up an unmap wait thread. To avoid this 1512 * race we set nounmapwait flag in as structure if we drop softlockcnt 1513 * to 0 if async is not 0. unmapwait thread 1514 * will not block if this flag is set. 1515 */ 1516 if (async) 1517 mutex_enter(&shmd->shm_segfree_syncmtx); 1518 1519 /* 1520 * Now decrement softlockcnt. 1521 */ 1522 ASSERT(shmd->shm_softlockcnt > 0); 1523 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1); 1524 1525 if (shmd->shm_softlockcnt <= 0) { 1526 if (async || AS_ISUNMAPWAIT(seg->s_as)) { 1527 mutex_enter(&seg->s_as->a_contents); 1528 if (async) 1529 AS_SETNOUNMAPWAIT(seg->s_as); 1530 if (AS_ISUNMAPWAIT(seg->s_as)) { 1531 AS_CLRUNMAPWAIT(seg->s_as); 1532 cv_broadcast(&seg->s_as->a_cv); 1533 } 1534 mutex_exit(&seg->s_as->a_contents); 1535 } 1536 } 1537 1538 if (async) 1539 mutex_exit(&shmd->shm_segfree_syncmtx); 1540 1541 return (done); 1542 } 1543 1544 /* 1545 * Do a F_SOFTUNLOCK call over the range requested. 1546 * The range must have already been F_SOFTLOCK'ed. 1547 * 1548 * The calls to acquire and release the anon map lock mutex were 1549 * removed in order to avoid a deadly embrace during a DR 1550 * memory delete operation. (Eg. DR blocks while waiting for a 1551 * exclusive lock on a page that is being used for kaio; the 1552 * thread that will complete the kaio and call segspt_softunlock 1553 * blocks on the anon map lock; another thread holding the anon 1554 * map lock blocks on another page lock via the segspt_shmfault 1555 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.) 1556 * 1557 * The appropriateness of the removal is based upon the following: 1558 * 1. If we are holding a segment's reader lock and the page is held 1559 * shared, then the corresponding element in anonmap which points to 1560 * anon struct cannot change and there is no need to acquire the 1561 * anonymous map lock. 1562 * 2. Threads in segspt_softunlock have a reader lock on the segment 1563 * and already have the shared page lock, so we are guaranteed that 1564 * the anon map slot cannot change and therefore can call anon_get_ptr() 1565 * without grabbing the anonymous map lock. 1566 * 3. Threads that softlock a shared page break copy-on-write, even if 1567 * its a read. Thus cow faults can be ignored with respect to soft 1568 * unlocking, since the breaking of cow means that the anon slot(s) will 1569 * not be shared. 1570 */ 1571 static void 1572 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr, 1573 size_t len, enum seg_rw rw) 1574 { 1575 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1576 struct seg *sptseg; 1577 struct spt_data *sptd; 1578 page_t *pp; 1579 caddr_t adr; 1580 struct vnode *vp; 1581 u_offset_t offset; 1582 ulong_t anon_index; 1583 struct anon_map *amp; /* XXX - for locknest */ 1584 struct anon *ap = NULL; 1585 pgcnt_t npages; 1586 1587 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1588 1589 sptseg = shmd->shm_sptseg; 1590 sptd = sptseg->s_data; 1591 1592 /* 1593 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 1594 * and therefore their pages are SE_SHARED locked 1595 * for the entire life of the segment. 1596 */ 1597 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) && 1598 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) { 1599 goto softlock_decrement; 1600 } 1601 1602 /* 1603 * Any thread is free to do a page_find and 1604 * page_unlock() on the pages within this seg. 1605 * 1606 * We are already holding the as->a_lock on the user's 1607 * real segment, but we need to hold the a_lock on the 1608 * underlying dummy as. This is mostly to satisfy the 1609 * underlying HAT layer. 1610 */ 1611 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1612 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len); 1613 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1614 1615 amp = sptd->spt_amp; 1616 ASSERT(amp != NULL); 1617 anon_index = seg_page(sptseg, sptseg_addr); 1618 1619 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) { 1620 ap = anon_get_ptr(amp->ahp, anon_index++); 1621 ASSERT(ap != NULL); 1622 swap_xlate(ap, &vp, &offset); 1623 1624 /* 1625 * Use page_find() instead of page_lookup() to 1626 * find the page since we know that it has a 1627 * "shared" lock. 1628 */ 1629 pp = page_find(vp, offset); 1630 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1)); 1631 if (pp == NULL) { 1632 panic("segspt_softunlock: " 1633 "addr %p, ap %p, vp %p, off %llx", 1634 (void *)adr, (void *)ap, (void *)vp, offset); 1635 /*NOTREACHED*/ 1636 } 1637 1638 if (rw == S_WRITE) { 1639 hat_setrefmod(pp); 1640 } else if (rw != S_OTHER) { 1641 hat_setref(pp); 1642 } 1643 page_unlock(pp); 1644 } 1645 1646 softlock_decrement: 1647 npages = btopr(len); 1648 ASSERT(shmd->shm_softlockcnt >= npages); 1649 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages); 1650 if (shmd->shm_softlockcnt == 0) { 1651 /* 1652 * All SOFTLOCKS are gone. Wakeup any waiting 1653 * unmappers so they can try again to unmap. 1654 * Check for waiters first without the mutex 1655 * held so we don't always grab the mutex on 1656 * softunlocks. 1657 */ 1658 if (AS_ISUNMAPWAIT(seg->s_as)) { 1659 mutex_enter(&seg->s_as->a_contents); 1660 if (AS_ISUNMAPWAIT(seg->s_as)) { 1661 AS_CLRUNMAPWAIT(seg->s_as); 1662 cv_broadcast(&seg->s_as->a_cv); 1663 } 1664 mutex_exit(&seg->s_as->a_contents); 1665 } 1666 } 1667 } 1668 1669 int 1670 segspt_shmattach(struct seg *seg, caddr_t *argsp) 1671 { 1672 struct shm_data *shmd_arg = (struct shm_data *)argsp; 1673 struct shm_data *shmd; 1674 struct anon_map *shm_amp = shmd_arg->shm_amp; 1675 struct spt_data *sptd; 1676 int error = 0; 1677 1678 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1679 1680 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP); 1681 if (shmd == NULL) 1682 return (ENOMEM); 1683 1684 shmd->shm_sptas = shmd_arg->shm_sptas; 1685 shmd->shm_amp = shm_amp; 1686 shmd->shm_sptseg = shmd_arg->shm_sptseg; 1687 1688 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0, 1689 NULL, 0, seg->s_size); 1690 1691 mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL); 1692 1693 seg->s_data = (void *)shmd; 1694 seg->s_ops = &segspt_shmops; 1695 seg->s_szc = shmd->shm_sptseg->s_szc; 1696 sptd = shmd->shm_sptseg->s_data; 1697 1698 if (sptd->spt_flags & SHM_PAGEABLE) { 1699 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size), 1700 KM_NOSLEEP)) == NULL) { 1701 seg->s_data = (void *)NULL; 1702 kmem_free(shmd, (sizeof (*shmd))); 1703 return (ENOMEM); 1704 } 1705 shmd->shm_lckpgs = 0; 1706 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 1707 if ((error = hat_share(seg->s_as->a_hat, seg->s_base, 1708 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1709 seg->s_size, seg->s_szc)) != 0) { 1710 kmem_free(shmd->shm_vpage, 1711 btopr(shm_amp->size)); 1712 } 1713 } 1714 } else { 1715 error = hat_share(seg->s_as->a_hat, seg->s_base, 1716 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1717 seg->s_size, seg->s_szc); 1718 } 1719 if (error) { 1720 seg->s_szc = 0; 1721 seg->s_data = (void *)NULL; 1722 kmem_free(shmd, (sizeof (*shmd))); 1723 } else { 1724 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1725 shm_amp->refcnt++; 1726 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1727 } 1728 return (error); 1729 } 1730 1731 int 1732 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize) 1733 { 1734 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1735 int reclaim = 1; 1736 1737 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1738 retry: 1739 if (shmd->shm_softlockcnt > 0) { 1740 if (reclaim == 1) { 1741 segspt_purge(seg); 1742 reclaim = 0; 1743 goto retry; 1744 } 1745 return (EAGAIN); 1746 } 1747 1748 if (ssize != seg->s_size) { 1749 #ifdef DEBUG 1750 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n", 1751 ssize, seg->s_size); 1752 #endif 1753 return (EINVAL); 1754 } 1755 1756 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK, 1757 NULL, 0); 1758 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc); 1759 1760 seg_free(seg); 1761 1762 return (0); 1763 } 1764 1765 void 1766 segspt_shmfree(struct seg *seg) 1767 { 1768 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1769 struct anon_map *shm_amp = shmd->shm_amp; 1770 1771 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1772 1773 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0, 1774 MC_UNLOCK, NULL, 0); 1775 1776 /* 1777 * Need to increment refcnt when attaching 1778 * and decrement when detaching because of dup(). 1779 */ 1780 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1781 shm_amp->refcnt--; 1782 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1783 1784 if (shmd->shm_vpage) { /* only for DISM */ 1785 kmem_free(shmd->shm_vpage, btopr(shm_amp->size)); 1786 shmd->shm_vpage = NULL; 1787 } 1788 1789 /* 1790 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's 1791 * still working with this segment without holding as lock. 1792 */ 1793 ASSERT(shmd->shm_softlockcnt == 0); 1794 mutex_enter(&shmd->shm_segfree_syncmtx); 1795 mutex_destroy(&shmd->shm_segfree_syncmtx); 1796 1797 kmem_free(shmd, sizeof (*shmd)); 1798 } 1799 1800 /*ARGSUSED*/ 1801 int 1802 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 1803 { 1804 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1805 1806 /* 1807 * Shared page table is more than shared mapping. 1808 * Individual process sharing page tables can't change prot 1809 * because there is only one set of page tables. 1810 * This will be allowed after private page table is 1811 * supported. 1812 */ 1813 /* need to return correct status error? */ 1814 return (0); 1815 } 1816 1817 1818 faultcode_t 1819 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr, 1820 size_t len, enum fault_type type, enum seg_rw rw) 1821 { 1822 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1823 struct seg *sptseg = shmd->shm_sptseg; 1824 struct as *curspt = shmd->shm_sptas; 1825 struct spt_data *sptd = sptseg->s_data; 1826 pgcnt_t npages; 1827 size_t size; 1828 caddr_t segspt_addr, shm_addr; 1829 page_t **ppa; 1830 int i; 1831 ulong_t an_idx = 0; 1832 int err = 0; 1833 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0); 1834 size_t pgsz; 1835 pgcnt_t pgcnt; 1836 caddr_t a; 1837 pgcnt_t pidx; 1838 1839 #ifdef lint 1840 hat = hat; 1841 #endif 1842 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1843 1844 /* 1845 * Because of the way spt is implemented 1846 * the realsize of the segment does not have to be 1847 * equal to the segment size itself. The segment size is 1848 * often in multiples of a page size larger than PAGESIZE. 1849 * The realsize is rounded up to the nearest PAGESIZE 1850 * based on what the user requested. This is a bit of 1851 * ungliness that is historical but not easily fixed 1852 * without re-designing the higher levels of ISM. 1853 */ 1854 ASSERT(addr >= seg->s_base); 1855 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 1856 return (FC_NOMAP); 1857 /* 1858 * For all of the following cases except F_PROT, we need to 1859 * make any necessary adjustments to addr and len 1860 * and get all of the necessary page_t's into an array called ppa[]. 1861 * 1862 * The code in shmat() forces base addr and len of ISM segment 1863 * to be aligned to largest page size supported. Therefore, 1864 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 1865 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 1866 * in large pagesize chunks, or else we will screw up the HAT 1867 * layer by calling hat_memload_array() with differing page sizes 1868 * over a given virtual range. 1869 */ 1870 pgsz = page_get_pagesize(sptseg->s_szc); 1871 pgcnt = page_get_pagecnt(sptseg->s_szc); 1872 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); 1873 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz); 1874 npages = btopr(size); 1875 1876 /* 1877 * Now we need to convert from addr in segshm to addr in segspt. 1878 */ 1879 an_idx = seg_page(seg, shm_addr); 1880 segspt_addr = sptseg->s_base + ptob(an_idx); 1881 1882 ASSERT((segspt_addr + ptob(npages)) <= 1883 (sptseg->s_base + sptd->spt_realsize)); 1884 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size)); 1885 1886 switch (type) { 1887 1888 case F_SOFTLOCK: 1889 1890 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 1891 /* 1892 * Fall through to the F_INVAL case to load up the hat layer 1893 * entries with the HAT_LOAD_LOCK flag. 1894 */ 1895 /* FALLTHRU */ 1896 case F_INVAL: 1897 1898 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 1899 return (FC_NOMAP); 1900 1901 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP); 1902 1903 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa); 1904 if (err != 0) { 1905 if (type == F_SOFTLOCK) { 1906 atomic_add_long((ulong_t *)( 1907 &(shmd->shm_softlockcnt)), -npages); 1908 } 1909 goto dism_err; 1910 } 1911 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1912 a = segspt_addr; 1913 pidx = 0; 1914 if (type == F_SOFTLOCK) { 1915 1916 /* 1917 * Load up the translation keeping it 1918 * locked and don't unlock the page. 1919 */ 1920 for (; pidx < npages; a += pgsz, pidx += pgcnt) { 1921 hat_memload_array(sptseg->s_as->a_hat, 1922 a, pgsz, &ppa[pidx], sptd->spt_prot, 1923 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 1924 } 1925 } else { 1926 if (hat == seg->s_as->a_hat) { 1927 1928 /* 1929 * Migrate pages marked for migration 1930 */ 1931 if (lgrp_optimizations()) 1932 page_migrate(seg, shm_addr, ppa, 1933 npages); 1934 1935 /* CPU HAT */ 1936 for (; pidx < npages; 1937 a += pgsz, pidx += pgcnt) { 1938 hat_memload_array(sptseg->s_as->a_hat, 1939 a, pgsz, &ppa[pidx], 1940 sptd->spt_prot, 1941 HAT_LOAD_SHARE); 1942 } 1943 } else { 1944 /* XHAT. Pass real address */ 1945 hat_memload_array(hat, shm_addr, 1946 size, ppa, sptd->spt_prot, HAT_LOAD_SHARE); 1947 } 1948 1949 /* 1950 * And now drop the SE_SHARED lock(s). 1951 */ 1952 if (dyn_ism_unmap) { 1953 for (i = 0; i < npages; i++) { 1954 page_unlock(ppa[i]); 1955 } 1956 } 1957 } 1958 1959 if (!dyn_ism_unmap) { 1960 if (hat_share(seg->s_as->a_hat, shm_addr, 1961 curspt->a_hat, segspt_addr, ptob(npages), 1962 seg->s_szc) != 0) { 1963 panic("hat_share err in DISM fault"); 1964 /* NOTREACHED */ 1965 } 1966 if (type == F_INVAL) { 1967 for (i = 0; i < npages; i++) { 1968 page_unlock(ppa[i]); 1969 } 1970 } 1971 } 1972 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1973 dism_err: 1974 kmem_free(ppa, npages * sizeof (page_t *)); 1975 return (err); 1976 1977 case F_SOFTUNLOCK: 1978 1979 /* 1980 * This is a bit ugly, we pass in the real seg pointer, 1981 * but the segspt_addr is the virtual address within the 1982 * dummy seg. 1983 */ 1984 segspt_softunlock(seg, segspt_addr, size, rw); 1985 return (0); 1986 1987 case F_PROT: 1988 1989 /* 1990 * This takes care of the unusual case where a user 1991 * allocates a stack in shared memory and a register 1992 * window overflow is written to that stack page before 1993 * it is otherwise modified. 1994 * 1995 * We can get away with this because ISM segments are 1996 * always rw. Other than this unusual case, there 1997 * should be no instances of protection violations. 1998 */ 1999 return (0); 2000 2001 default: 2002 #ifdef DEBUG 2003 panic("segspt_dismfault default type?"); 2004 #else 2005 return (FC_NOMAP); 2006 #endif 2007 } 2008 } 2009 2010 2011 faultcode_t 2012 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr, 2013 size_t len, enum fault_type type, enum seg_rw rw) 2014 { 2015 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2016 struct seg *sptseg = shmd->shm_sptseg; 2017 struct as *curspt = shmd->shm_sptas; 2018 struct spt_data *sptd = sptseg->s_data; 2019 pgcnt_t npages; 2020 size_t size; 2021 caddr_t sptseg_addr, shm_addr; 2022 page_t *pp, **ppa; 2023 int i; 2024 u_offset_t offset; 2025 ulong_t anon_index = 0; 2026 struct vnode *vp; 2027 struct anon_map *amp; /* XXX - for locknest */ 2028 struct anon *ap = NULL; 2029 size_t pgsz; 2030 pgcnt_t pgcnt; 2031 caddr_t a; 2032 pgcnt_t pidx; 2033 size_t sz; 2034 2035 #ifdef lint 2036 hat = hat; 2037 #endif 2038 2039 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2040 2041 if (sptd->spt_flags & SHM_PAGEABLE) { 2042 return (segspt_dismfault(hat, seg, addr, len, type, rw)); 2043 } 2044 2045 /* 2046 * Because of the way spt is implemented 2047 * the realsize of the segment does not have to be 2048 * equal to the segment size itself. The segment size is 2049 * often in multiples of a page size larger than PAGESIZE. 2050 * The realsize is rounded up to the nearest PAGESIZE 2051 * based on what the user requested. This is a bit of 2052 * ungliness that is historical but not easily fixed 2053 * without re-designing the higher levels of ISM. 2054 */ 2055 ASSERT(addr >= seg->s_base); 2056 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 2057 return (FC_NOMAP); 2058 /* 2059 * For all of the following cases except F_PROT, we need to 2060 * make any necessary adjustments to addr and len 2061 * and get all of the necessary page_t's into an array called ppa[]. 2062 * 2063 * The code in shmat() forces base addr and len of ISM segment 2064 * to be aligned to largest page size supported. Therefore, 2065 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 2066 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 2067 * in large pagesize chunks, or else we will screw up the HAT 2068 * layer by calling hat_memload_array() with differing page sizes 2069 * over a given virtual range. 2070 */ 2071 pgsz = page_get_pagesize(sptseg->s_szc); 2072 pgcnt = page_get_pagecnt(sptseg->s_szc); 2073 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); 2074 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz); 2075 npages = btopr(size); 2076 2077 /* 2078 * Now we need to convert from addr in segshm to addr in segspt. 2079 */ 2080 anon_index = seg_page(seg, shm_addr); 2081 sptseg_addr = sptseg->s_base + ptob(anon_index); 2082 2083 /* 2084 * And now we may have to adjust npages downward if we have 2085 * exceeded the realsize of the segment or initial anon 2086 * allocations. 2087 */ 2088 if ((sptseg_addr + ptob(npages)) > 2089 (sptseg->s_base + sptd->spt_realsize)) 2090 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr; 2091 2092 npages = btopr(size); 2093 2094 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size)); 2095 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0); 2096 2097 switch (type) { 2098 2099 case F_SOFTLOCK: 2100 2101 /* 2102 * availrmem is decremented once during anon_swap_adjust() 2103 * and is incremented during the anon_unresv(), which is 2104 * called from shm_rm_amp() when the segment is destroyed. 2105 */ 2106 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 2107 /* 2108 * Some platforms assume that ISM pages are SE_SHARED 2109 * locked for the entire life of the segment. 2110 */ 2111 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) 2112 return (0); 2113 /* 2114 * Fall through to the F_INVAL case to load up the hat layer 2115 * entries with the HAT_LOAD_LOCK flag. 2116 */ 2117 2118 /* FALLTHRU */ 2119 case F_INVAL: 2120 2121 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 2122 return (FC_NOMAP); 2123 2124 /* 2125 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP 2126 * may still rely on this call to hat_share(). That 2127 * would imply that those hat's can fault on a 2128 * HAT_LOAD_LOCK translation, which would seem 2129 * contradictory. 2130 */ 2131 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 2132 if (hat_share(seg->s_as->a_hat, seg->s_base, 2133 curspt->a_hat, sptseg->s_base, 2134 sptseg->s_size, sptseg->s_szc) != 0) { 2135 panic("hat_share error in ISM fault"); 2136 /*NOTREACHED*/ 2137 } 2138 return (0); 2139 } 2140 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP); 2141 2142 /* 2143 * I see no need to lock the real seg, 2144 * here, because all of our work will be on the underlying 2145 * dummy seg. 2146 * 2147 * sptseg_addr and npages now account for large pages. 2148 */ 2149 amp = sptd->spt_amp; 2150 ASSERT(amp != NULL); 2151 anon_index = seg_page(sptseg, sptseg_addr); 2152 2153 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2154 for (i = 0; i < npages; i++) { 2155 ap = anon_get_ptr(amp->ahp, anon_index++); 2156 ASSERT(ap != NULL); 2157 swap_xlate(ap, &vp, &offset); 2158 pp = page_lookup(vp, offset, SE_SHARED); 2159 ASSERT(pp != NULL); 2160 ppa[i] = pp; 2161 } 2162 ANON_LOCK_EXIT(&->a_rwlock); 2163 ASSERT(i == npages); 2164 2165 /* 2166 * We are already holding the as->a_lock on the user's 2167 * real segment, but we need to hold the a_lock on the 2168 * underlying dummy as. This is mostly to satisfy the 2169 * underlying HAT layer. 2170 */ 2171 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 2172 a = sptseg_addr; 2173 pidx = 0; 2174 if (type == F_SOFTLOCK) { 2175 /* 2176 * Load up the translation keeping it 2177 * locked and don't unlock the page. 2178 */ 2179 for (; pidx < npages; a += pgsz, pidx += pgcnt) { 2180 sz = MIN(pgsz, ptob(npages - pidx)); 2181 hat_memload_array(sptseg->s_as->a_hat, a, 2182 sz, &ppa[pidx], sptd->spt_prot, 2183 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 2184 } 2185 } else { 2186 if (hat == seg->s_as->a_hat) { 2187 2188 /* 2189 * Migrate pages marked for migration. 2190 */ 2191 if (lgrp_optimizations()) 2192 page_migrate(seg, shm_addr, ppa, 2193 npages); 2194 2195 /* CPU HAT */ 2196 for (; pidx < npages; 2197 a += pgsz, pidx += pgcnt) { 2198 sz = MIN(pgsz, ptob(npages - pidx)); 2199 hat_memload_array(sptseg->s_as->a_hat, 2200 a, sz, &ppa[pidx], 2201 sptd->spt_prot, HAT_LOAD_SHARE); 2202 } 2203 } else { 2204 /* XHAT. Pass real address */ 2205 hat_memload_array(hat, shm_addr, 2206 ptob(npages), ppa, sptd->spt_prot, 2207 HAT_LOAD_SHARE); 2208 } 2209 2210 /* 2211 * And now drop the SE_SHARED lock(s). 2212 */ 2213 for (i = 0; i < npages; i++) 2214 page_unlock(ppa[i]); 2215 } 2216 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 2217 2218 kmem_free(ppa, sizeof (page_t *) * npages); 2219 return (0); 2220 case F_SOFTUNLOCK: 2221 2222 /* 2223 * This is a bit ugly, we pass in the real seg pointer, 2224 * but the sptseg_addr is the virtual address within the 2225 * dummy seg. 2226 */ 2227 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw); 2228 return (0); 2229 2230 case F_PROT: 2231 2232 /* 2233 * This takes care of the unusual case where a user 2234 * allocates a stack in shared memory and a register 2235 * window overflow is written to that stack page before 2236 * it is otherwise modified. 2237 * 2238 * We can get away with this because ISM segments are 2239 * always rw. Other than this unusual case, there 2240 * should be no instances of protection violations. 2241 */ 2242 return (0); 2243 2244 default: 2245 #ifdef DEBUG 2246 cmn_err(CE_WARN, "segspt_shmfault default type?"); 2247 #endif 2248 return (FC_NOMAP); 2249 } 2250 } 2251 2252 /*ARGSUSED*/ 2253 static faultcode_t 2254 segspt_shmfaulta(struct seg *seg, caddr_t addr) 2255 { 2256 return (0); 2257 } 2258 2259 /*ARGSUSED*/ 2260 static int 2261 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta) 2262 { 2263 return (0); 2264 } 2265 2266 /*ARGSUSED*/ 2267 static size_t 2268 segspt_shmswapout(struct seg *seg) 2269 { 2270 return (0); 2271 } 2272 2273 /* 2274 * duplicate the shared page tables 2275 */ 2276 int 2277 segspt_shmdup(struct seg *seg, struct seg *newseg) 2278 { 2279 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2280 struct anon_map *amp = shmd->shm_amp; 2281 struct shm_data *shmd_new; 2282 struct seg *spt_seg = shmd->shm_sptseg; 2283 struct spt_data *sptd = spt_seg->s_data; 2284 int error = 0; 2285 2286 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2287 2288 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP); 2289 newseg->s_data = (void *)shmd_new; 2290 shmd_new->shm_sptas = shmd->shm_sptas; 2291 shmd_new->shm_amp = amp; 2292 shmd_new->shm_sptseg = shmd->shm_sptseg; 2293 newseg->s_ops = &segspt_shmops; 2294 newseg->s_szc = seg->s_szc; 2295 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc); 2296 2297 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2298 amp->refcnt++; 2299 ANON_LOCK_EXIT(&->a_rwlock); 2300 2301 if (sptd->spt_flags & SHM_PAGEABLE) { 2302 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP); 2303 shmd_new->shm_lckpgs = 0; 2304 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 2305 if ((error = hat_share(newseg->s_as->a_hat, 2306 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR, 2307 seg->s_size, seg->s_szc)) != 0) { 2308 kmem_free(shmd_new->shm_vpage, 2309 btopr(amp->size)); 2310 } 2311 } 2312 return (error); 2313 } else { 2314 return (hat_share(newseg->s_as->a_hat, newseg->s_base, 2315 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size, 2316 seg->s_szc)); 2317 2318 } 2319 } 2320 2321 /*ARGSUSED*/ 2322 int 2323 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 2324 { 2325 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2326 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2327 2328 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2329 2330 /* 2331 * ISM segment is always rw. 2332 */ 2333 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0); 2334 } 2335 2336 /* 2337 * Return an array of locked large pages, for empty slots allocate 2338 * private zero-filled anon pages. 2339 */ 2340 static int 2341 spt_anon_getpages( 2342 struct seg *sptseg, 2343 caddr_t sptaddr, 2344 size_t len, 2345 page_t *ppa[]) 2346 { 2347 struct spt_data *sptd = sptseg->s_data; 2348 struct anon_map *amp = sptd->spt_amp; 2349 enum seg_rw rw = sptd->spt_prot; 2350 uint_t szc = sptseg->s_szc; 2351 size_t pg_sz, share_sz = page_get_pagesize(szc); 2352 pgcnt_t lp_npgs; 2353 caddr_t lp_addr, e_sptaddr; 2354 uint_t vpprot, ppa_szc = 0; 2355 struct vpage *vpage = NULL; 2356 ulong_t j, ppa_idx; 2357 int err, ierr = 0; 2358 pgcnt_t an_idx; 2359 anon_sync_obj_t cookie; 2360 int anon_locked = 0; 2361 pgcnt_t amp_pgs; 2362 2363 2364 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz)); 2365 ASSERT(len != 0); 2366 2367 pg_sz = share_sz; 2368 lp_npgs = btop(pg_sz); 2369 lp_addr = sptaddr; 2370 e_sptaddr = sptaddr + len; 2371 an_idx = seg_page(sptseg, sptaddr); 2372 ppa_idx = 0; 2373 2374 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2375 2376 amp_pgs = page_get_pagecnt(amp->a_szc); 2377 2378 /*CONSTCOND*/ 2379 while (1) { 2380 for (; lp_addr < e_sptaddr; 2381 an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) { 2382 2383 /* 2384 * If we're currently locked, and we get to a new 2385 * page, unlock our current anon chunk. 2386 */ 2387 if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) { 2388 anon_array_exit(&cookie); 2389 anon_locked = 0; 2390 } 2391 if (!anon_locked) { 2392 anon_array_enter(amp, an_idx, &cookie); 2393 anon_locked = 1; 2394 } 2395 ppa_szc = (uint_t)-1; 2396 ierr = anon_map_getpages(amp, an_idx, szc, sptseg, 2397 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx], 2398 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred); 2399 2400 if (ierr != 0) { 2401 if (ierr > 0) { 2402 err = FC_MAKE_ERR(ierr); 2403 goto lpgs_err; 2404 } 2405 break; 2406 } 2407 } 2408 if (lp_addr == e_sptaddr) { 2409 break; 2410 } 2411 ASSERT(lp_addr < e_sptaddr); 2412 2413 /* 2414 * ierr == -1 means we failed to allocate a large page. 2415 * so do a size down operation. 2416 * 2417 * ierr == -2 means some other process that privately shares 2418 * pages with this process has allocated a larger page and we 2419 * need to retry with larger pages. So do a size up 2420 * operation. This relies on the fact that large pages are 2421 * never partially shared i.e. if we share any constituent 2422 * page of a large page with another process we must share the 2423 * entire large page. Note this cannot happen for SOFTLOCK 2424 * case, unless current address (lpaddr) is at the beginning 2425 * of the next page size boundary because the other process 2426 * couldn't have relocated locked pages. 2427 */ 2428 ASSERT(ierr == -1 || ierr == -2); 2429 if (segvn_anypgsz) { 2430 ASSERT(ierr == -2 || szc != 0); 2431 ASSERT(ierr == -1 || szc < sptseg->s_szc); 2432 szc = (ierr == -1) ? szc - 1 : szc + 1; 2433 } else { 2434 /* 2435 * For faults and segvn_anypgsz == 0 2436 * we need to be careful not to loop forever 2437 * if existing page is found with szc other 2438 * than 0 or seg->s_szc. This could be due 2439 * to page relocations on behalf of DR or 2440 * more likely large page creation. For this 2441 * case simply re-size to existing page's szc 2442 * if returned by anon_map_getpages(). 2443 */ 2444 if (ppa_szc == (uint_t)-1) { 2445 szc = (ierr == -1) ? 0 : sptseg->s_szc; 2446 } else { 2447 ASSERT(ppa_szc <= sptseg->s_szc); 2448 ASSERT(ierr == -2 || ppa_szc < szc); 2449 ASSERT(ierr == -1 || ppa_szc > szc); 2450 szc = ppa_szc; 2451 } 2452 } 2453 pg_sz = page_get_pagesize(szc); 2454 lp_npgs = btop(pg_sz); 2455 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz)); 2456 } 2457 if (anon_locked) { 2458 anon_array_exit(&cookie); 2459 } 2460 ANON_LOCK_EXIT(&->a_rwlock); 2461 return (0); 2462 2463 lpgs_err: 2464 if (anon_locked) { 2465 anon_array_exit(&cookie); 2466 } 2467 ANON_LOCK_EXIT(&->a_rwlock); 2468 for (j = 0; j < ppa_idx; j++) 2469 page_unlock(ppa[j]); 2470 return (err); 2471 } 2472 2473 /* 2474 * count the number of bytes in a set of spt pages that are currently not 2475 * locked 2476 */ 2477 static rctl_qty_t 2478 spt_unlockedbytes(pgcnt_t npages, page_t **ppa) 2479 { 2480 ulong_t i; 2481 rctl_qty_t unlocked = 0; 2482 2483 for (i = 0; i < npages; i++) { 2484 if (ppa[i]->p_lckcnt == 0) 2485 unlocked += PAGESIZE; 2486 } 2487 return (unlocked); 2488 } 2489 2490 int 2491 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages, 2492 page_t **ppa, ulong_t *lockmap, size_t pos, 2493 rctl_qty_t *locked) 2494 { 2495 struct shm_data *shmd = seg->s_data; 2496 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2497 ulong_t i; 2498 int kernel; 2499 2500 /* return the number of bytes actually locked */ 2501 *locked = 0; 2502 for (i = 0; i < npages; anon_index++, pos++, i++) { 2503 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) { 2504 if (sptd->spt_ppa_lckcnt[anon_index] < 2505 (ushort_t)DISM_LOCK_MAX) { 2506 if (++sptd->spt_ppa_lckcnt[anon_index] == 2507 (ushort_t)DISM_LOCK_MAX) { 2508 cmn_err(CE_WARN, 2509 "DISM page lock limit " 2510 "reached on DISM offset 0x%lx\n", 2511 anon_index << PAGESHIFT); 2512 } 2513 kernel = (sptd->spt_ppa && 2514 sptd->spt_ppa[anon_index]) ? 1 : 0; 2515 if (!page_pp_lock(ppa[i], 0, kernel)) { 2516 sptd->spt_ppa_lckcnt[anon_index]--; 2517 return (EAGAIN); 2518 } 2519 /* if this is a newly locked page, count it */ 2520 if (ppa[i]->p_lckcnt == 1) { 2521 *locked += PAGESIZE; 2522 } 2523 shmd->shm_lckpgs++; 2524 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED; 2525 if (lockmap != NULL) 2526 BT_SET(lockmap, pos); 2527 } 2528 } 2529 } 2530 return (0); 2531 } 2532 2533 /*ARGSUSED*/ 2534 static int 2535 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 2536 int attr, int op, ulong_t *lockmap, size_t pos) 2537 { 2538 struct shm_data *shmd = seg->s_data; 2539 struct seg *sptseg = shmd->shm_sptseg; 2540 struct spt_data *sptd = sptseg->s_data; 2541 struct kshmid *sp = sptd->spt_amp->a_sp; 2542 pgcnt_t npages, a_npages; 2543 page_t **ppa; 2544 pgcnt_t an_idx, a_an_idx, ppa_idx; 2545 caddr_t spt_addr, a_addr; /* spt and aligned address */ 2546 size_t a_len; /* aligned len */ 2547 size_t share_sz; 2548 ulong_t i; 2549 int sts = 0; 2550 rctl_qty_t unlocked = 0; 2551 rctl_qty_t locked = 0; 2552 struct proc *p = curproc; 2553 kproject_t *proj; 2554 2555 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2556 ASSERT(sp != NULL); 2557 2558 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 2559 return (0); 2560 } 2561 2562 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 2563 an_idx = seg_page(seg, addr); 2564 npages = btopr(len); 2565 2566 if (an_idx + npages > btopr(shmd->shm_amp->size)) { 2567 return (ENOMEM); 2568 } 2569 2570 /* 2571 * A shm's project never changes, so no lock needed. 2572 * The shm has a hold on the project, so it will not go away. 2573 * Since we have a mapping to shm within this zone, we know 2574 * that the zone will not go away. 2575 */ 2576 proj = sp->shm_perm.ipc_proj; 2577 2578 if (op == MC_LOCK) { 2579 2580 /* 2581 * Need to align addr and size request if they are not 2582 * aligned so we can always allocate large page(s) however 2583 * we only lock what was requested in initial request. 2584 */ 2585 share_sz = page_get_pagesize(sptseg->s_szc); 2586 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz); 2587 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)), 2588 share_sz); 2589 a_npages = btop(a_len); 2590 a_an_idx = seg_page(seg, a_addr); 2591 spt_addr = sptseg->s_base + ptob(a_an_idx); 2592 ppa_idx = an_idx - a_an_idx; 2593 2594 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages), 2595 KM_NOSLEEP)) == NULL) { 2596 return (ENOMEM); 2597 } 2598 2599 /* 2600 * Don't cache any new pages for IO and 2601 * flush any cached pages. 2602 */ 2603 mutex_enter(&sptd->spt_lock); 2604 if (sptd->spt_ppa != NULL) 2605 sptd->spt_flags |= DISM_PPA_CHANGED; 2606 2607 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa); 2608 if (sts != 0) { 2609 mutex_exit(&sptd->spt_lock); 2610 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2611 return (sts); 2612 } 2613 2614 mutex_enter(&sp->shm_mlock); 2615 /* enforce locked memory rctl */ 2616 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]); 2617 2618 mutex_enter(&p->p_lock); 2619 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) { 2620 mutex_exit(&p->p_lock); 2621 sts = EAGAIN; 2622 } else { 2623 mutex_exit(&p->p_lock); 2624 sts = spt_lockpages(seg, an_idx, npages, 2625 &ppa[ppa_idx], lockmap, pos, &locked); 2626 2627 /* 2628 * correct locked count if not all pages could be 2629 * locked 2630 */ 2631 if ((unlocked - locked) > 0) { 2632 rctl_decr_locked_mem(NULL, proj, 2633 (unlocked - locked), 0); 2634 } 2635 } 2636 /* 2637 * unlock pages 2638 */ 2639 for (i = 0; i < a_npages; i++) 2640 page_unlock(ppa[i]); 2641 if (sptd->spt_ppa != NULL) 2642 sptd->spt_flags |= DISM_PPA_CHANGED; 2643 mutex_exit(&sp->shm_mlock); 2644 mutex_exit(&sptd->spt_lock); 2645 2646 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2647 2648 } else if (op == MC_UNLOCK) { /* unlock */ 2649 struct anon_map *amp; 2650 struct anon *ap; 2651 struct vnode *vp; 2652 u_offset_t off; 2653 struct page *pp; 2654 int kernel; 2655 anon_sync_obj_t cookie; 2656 rctl_qty_t unlocked = 0; 2657 page_t **ppa; 2658 2659 amp = sptd->spt_amp; 2660 mutex_enter(&sptd->spt_lock); 2661 if (shmd->shm_lckpgs == 0) { 2662 mutex_exit(&sptd->spt_lock); 2663 return (0); 2664 } 2665 /* 2666 * Don't cache new IO pages. 2667 */ 2668 if (sptd->spt_ppa != NULL) 2669 sptd->spt_flags |= DISM_PPA_CHANGED; 2670 2671 mutex_enter(&sp->shm_mlock); 2672 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2673 for (i = 0; i < npages; i++, an_idx++) { 2674 if (shmd->shm_vpage[an_idx] & DISM_PG_LOCKED) { 2675 anon_array_enter(amp, an_idx, &cookie); 2676 ap = anon_get_ptr(amp->ahp, an_idx); 2677 ASSERT(ap); 2678 2679 swap_xlate(ap, &vp, &off); 2680 anon_array_exit(&cookie); 2681 pp = page_lookup(vp, off, SE_SHARED); 2682 ASSERT(pp); 2683 /* 2684 * the availrmem is decremented only for 2685 * pages which are not in seg pcache, 2686 * for pages in seg pcache availrmem was 2687 * decremented in _dismpagelock() (if 2688 * they were not locked here) 2689 */ 2690 kernel = (sptd->spt_ppa && 2691 sptd->spt_ppa[an_idx]) ? 1 : 0; 2692 ASSERT(pp->p_lckcnt > 0); 2693 page_pp_unlock(pp, 0, kernel); 2694 if (pp->p_lckcnt == 0) 2695 unlocked += PAGESIZE; 2696 page_unlock(pp); 2697 shmd->shm_vpage[an_idx] &= ~DISM_PG_LOCKED; 2698 sptd->spt_ppa_lckcnt[an_idx]--; 2699 shmd->shm_lckpgs--; 2700 } 2701 } 2702 ANON_LOCK_EXIT(&->a_rwlock); 2703 if ((ppa = sptd->spt_ppa) != NULL) 2704 sptd->spt_flags |= DISM_PPA_CHANGED; 2705 mutex_exit(&sptd->spt_lock); 2706 2707 rctl_decr_locked_mem(NULL, proj, unlocked, 0); 2708 mutex_exit(&sp->shm_mlock); 2709 2710 if (ppa != NULL) 2711 seg_ppurge_wiredpp(ppa); 2712 } 2713 return (sts); 2714 } 2715 2716 /*ARGSUSED*/ 2717 int 2718 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 2719 { 2720 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2721 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2722 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1; 2723 2724 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2725 2726 /* 2727 * ISM segment is always rw. 2728 */ 2729 while (--pgno >= 0) 2730 *protv++ = sptd->spt_prot; 2731 return (0); 2732 } 2733 2734 /*ARGSUSED*/ 2735 u_offset_t 2736 segspt_shmgetoffset(struct seg *seg, caddr_t addr) 2737 { 2738 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2739 2740 /* Offset does not matter in ISM memory */ 2741 2742 return ((u_offset_t)0); 2743 } 2744 2745 /* ARGSUSED */ 2746 int 2747 segspt_shmgettype(struct seg *seg, caddr_t addr) 2748 { 2749 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2750 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2751 2752 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2753 2754 /* 2755 * The shared memory mapping is always MAP_SHARED, SWAP is only 2756 * reserved for DISM 2757 */ 2758 return (MAP_SHARED | 2759 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE)); 2760 } 2761 2762 /*ARGSUSED*/ 2763 int 2764 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 2765 { 2766 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2767 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2768 2769 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2770 2771 *vpp = sptd->spt_vp; 2772 return (0); 2773 } 2774 2775 /* 2776 * We need to wait for pending IO to complete to a DISM segment in order for 2777 * pages to get kicked out of the seg_pcache. 120 seconds should be more 2778 * than enough time to wait. 2779 */ 2780 static clock_t spt_pcache_wait = 120; 2781 2782 /*ARGSUSED*/ 2783 static int 2784 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 2785 { 2786 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2787 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2788 struct anon_map *amp; 2789 pgcnt_t pg_idx; 2790 ushort_t gen; 2791 clock_t end_lbolt; 2792 int writer; 2793 page_t **ppa; 2794 2795 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2796 2797 if (behav == MADV_FREE) { 2798 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) 2799 return (0); 2800 2801 amp = sptd->spt_amp; 2802 pg_idx = seg_page(seg, addr); 2803 2804 mutex_enter(&sptd->spt_lock); 2805 if ((ppa = sptd->spt_ppa) == NULL) { 2806 mutex_exit(&sptd->spt_lock); 2807 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2808 anon_disclaim(amp, pg_idx, len); 2809 ANON_LOCK_EXIT(&->a_rwlock); 2810 return (0); 2811 } 2812 2813 sptd->spt_flags |= DISM_PPA_CHANGED; 2814 gen = sptd->spt_gen; 2815 2816 mutex_exit(&sptd->spt_lock); 2817 2818 /* 2819 * Purge all DISM cached pages 2820 */ 2821 seg_ppurge_wiredpp(ppa); 2822 2823 /* 2824 * Drop the AS_LOCK so that other threads can grab it 2825 * in the as_pageunlock path and hopefully get the segment 2826 * kicked out of the seg_pcache. We bump the shm_softlockcnt 2827 * to keep this segment resident. 2828 */ 2829 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock); 2830 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 2831 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock); 2832 2833 mutex_enter(&sptd->spt_lock); 2834 2835 end_lbolt = lbolt + (hz * spt_pcache_wait); 2836 2837 /* 2838 * Try to wait for pages to get kicked out of the seg_pcache. 2839 */ 2840 while (sptd->spt_gen == gen && 2841 (sptd->spt_flags & DISM_PPA_CHANGED) && 2842 lbolt < end_lbolt) { 2843 if (!cv_timedwait_sig(&sptd->spt_cv, 2844 &sptd->spt_lock, end_lbolt)) { 2845 break; 2846 } 2847 } 2848 2849 mutex_exit(&sptd->spt_lock); 2850 2851 /* Regrab the AS_LOCK and release our hold on the segment */ 2852 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock, 2853 writer ? RW_WRITER : RW_READER); 2854 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1); 2855 if (shmd->shm_softlockcnt <= 0) { 2856 if (AS_ISUNMAPWAIT(seg->s_as)) { 2857 mutex_enter(&seg->s_as->a_contents); 2858 if (AS_ISUNMAPWAIT(seg->s_as)) { 2859 AS_CLRUNMAPWAIT(seg->s_as); 2860 cv_broadcast(&seg->s_as->a_cv); 2861 } 2862 mutex_exit(&seg->s_as->a_contents); 2863 } 2864 } 2865 2866 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2867 anon_disclaim(amp, pg_idx, len); 2868 ANON_LOCK_EXIT(&->a_rwlock); 2869 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP || 2870 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) { 2871 int already_set; 2872 ulong_t anon_index; 2873 lgrp_mem_policy_t policy; 2874 caddr_t shm_addr; 2875 size_t share_size; 2876 size_t size; 2877 struct seg *sptseg = shmd->shm_sptseg; 2878 caddr_t sptseg_addr; 2879 2880 /* 2881 * Align address and length to page size of underlying segment 2882 */ 2883 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc); 2884 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size); 2885 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), 2886 share_size); 2887 2888 amp = shmd->shm_amp; 2889 anon_index = seg_page(seg, shm_addr); 2890 2891 /* 2892 * And now we may have to adjust size downward if we have 2893 * exceeded the realsize of the segment or initial anon 2894 * allocations. 2895 */ 2896 sptseg_addr = sptseg->s_base + ptob(anon_index); 2897 if ((sptseg_addr + size) > 2898 (sptseg->s_base + sptd->spt_realsize)) 2899 size = (sptseg->s_base + sptd->spt_realsize) - 2900 sptseg_addr; 2901 2902 /* 2903 * Set memory allocation policy for this segment 2904 */ 2905 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED); 2906 already_set = lgrp_shm_policy_set(policy, amp, anon_index, 2907 NULL, 0, len); 2908 2909 /* 2910 * If random memory allocation policy set already, 2911 * don't bother reapplying it. 2912 */ 2913 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 2914 return (0); 2915 2916 /* 2917 * Mark any existing pages in the given range for 2918 * migration, flushing the I/O page cache, and using 2919 * underlying segment to calculate anon index and get 2920 * anonmap and vnode pointer from 2921 */ 2922 if (shmd->shm_softlockcnt > 0) 2923 segspt_purge(seg); 2924 2925 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0); 2926 } 2927 2928 return (0); 2929 } 2930 2931 /*ARGSUSED*/ 2932 void 2933 segspt_shmdump(struct seg *seg) 2934 { 2935 /* no-op for ISM segment */ 2936 } 2937 2938 /*ARGSUSED*/ 2939 static faultcode_t 2940 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 2941 { 2942 return (ENOTSUP); 2943 } 2944 2945 /* 2946 * get a memory ID for an addr in a given segment 2947 */ 2948 static int 2949 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 2950 { 2951 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2952 struct anon *ap; 2953 size_t anon_index; 2954 struct anon_map *amp = shmd->shm_amp; 2955 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2956 struct seg *sptseg = shmd->shm_sptseg; 2957 anon_sync_obj_t cookie; 2958 2959 anon_index = seg_page(seg, addr); 2960 2961 if (addr > (seg->s_base + sptd->spt_realsize)) { 2962 return (EFAULT); 2963 } 2964 2965 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2966 anon_array_enter(amp, anon_index, &cookie); 2967 ap = anon_get_ptr(amp->ahp, anon_index); 2968 if (ap == NULL) { 2969 struct page *pp; 2970 caddr_t spt_addr = sptseg->s_base + ptob(anon_index); 2971 2972 pp = anon_zero(sptseg, spt_addr, &ap, kcred); 2973 if (pp == NULL) { 2974 anon_array_exit(&cookie); 2975 ANON_LOCK_EXIT(&->a_rwlock); 2976 return (ENOMEM); 2977 } 2978 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 2979 page_unlock(pp); 2980 } 2981 anon_array_exit(&cookie); 2982 ANON_LOCK_EXIT(&->a_rwlock); 2983 memidp->val[0] = (uintptr_t)ap; 2984 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 2985 return (0); 2986 } 2987 2988 /* 2989 * Get memory allocation policy info for specified address in given segment 2990 */ 2991 static lgrp_mem_policy_info_t * 2992 segspt_shmgetpolicy(struct seg *seg, caddr_t addr) 2993 { 2994 struct anon_map *amp; 2995 ulong_t anon_index; 2996 lgrp_mem_policy_info_t *policy_info; 2997 struct shm_data *shm_data; 2998 2999 ASSERT(seg != NULL); 3000 3001 /* 3002 * Get anon_map from segshm 3003 * 3004 * Assume that no lock needs to be held on anon_map, since 3005 * it should be protected by its reference count which must be 3006 * nonzero for an existing segment 3007 * Need to grab readers lock on policy tree though 3008 */ 3009 shm_data = (struct shm_data *)seg->s_data; 3010 if (shm_data == NULL) 3011 return (NULL); 3012 amp = shm_data->shm_amp; 3013 ASSERT(amp->refcnt != 0); 3014 3015 /* 3016 * Get policy info 3017 * 3018 * Assume starting anon index of 0 3019 */ 3020 anon_index = seg_page(seg, addr); 3021 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 3022 3023 return (policy_info); 3024 } 3025 3026 /*ARGSUSED*/ 3027 static int 3028 segspt_shmcapable(struct seg *seg, segcapability_t capability) 3029 { 3030 return (0); 3031 } 3032