1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/param.h> 29 #include <sys/user.h> 30 #include <sys/mman.h> 31 #include <sys/kmem.h> 32 #include <sys/sysmacros.h> 33 #include <sys/cmn_err.h> 34 #include <sys/systm.h> 35 #include <sys/tuneable.h> 36 #include <vm/hat.h> 37 #include <vm/seg.h> 38 #include <vm/as.h> 39 #include <vm/anon.h> 40 #include <vm/page.h> 41 #include <sys/buf.h> 42 #include <sys/swap.h> 43 #include <sys/atomic.h> 44 #include <vm/seg_spt.h> 45 #include <sys/debug.h> 46 #include <sys/vtrace.h> 47 #include <sys/shm.h> 48 #include <sys/shm_impl.h> 49 #include <sys/lgrp.h> 50 #include <sys/vmsystm.h> 51 #include <sys/policy.h> 52 #include <sys/project.h> 53 #include <sys/tnf_probe.h> 54 #include <sys/zone.h> 55 56 #define SEGSPTADDR (caddr_t)0x0 57 58 /* 59 * # pages used for spt 60 */ 61 size_t spt_used; 62 63 /* 64 * segspt_minfree is the memory left for system after ISM 65 * locked its pages; it is set up to 5% of availrmem in 66 * sptcreate when ISM is created. ISM should not use more 67 * than ~90% of availrmem; if it does, then the performance 68 * of the system may decrease. Machines with large memories may 69 * be able to use up more memory for ISM so we set the default 70 * segspt_minfree to 5% (which gives ISM max 95% of availrmem. 71 * If somebody wants even more memory for ISM (risking hanging 72 * the system) they can patch the segspt_minfree to smaller number. 73 */ 74 pgcnt_t segspt_minfree = 0; 75 76 static int segspt_create(struct seg *seg, caddr_t argsp); 77 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize); 78 static void segspt_free(struct seg *seg); 79 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len); 80 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr); 81 82 static void 83 segspt_badop() 84 { 85 panic("segspt_badop called"); 86 /*NOTREACHED*/ 87 } 88 89 #define SEGSPT_BADOP(t) (t(*)())segspt_badop 90 91 struct seg_ops segspt_ops = { 92 SEGSPT_BADOP(int), /* dup */ 93 segspt_unmap, 94 segspt_free, 95 SEGSPT_BADOP(int), /* fault */ 96 SEGSPT_BADOP(faultcode_t), /* faulta */ 97 SEGSPT_BADOP(int), /* setprot */ 98 SEGSPT_BADOP(int), /* checkprot */ 99 SEGSPT_BADOP(int), /* kluster */ 100 SEGSPT_BADOP(size_t), /* swapout */ 101 SEGSPT_BADOP(int), /* sync */ 102 SEGSPT_BADOP(size_t), /* incore */ 103 SEGSPT_BADOP(int), /* lockop */ 104 SEGSPT_BADOP(int), /* getprot */ 105 SEGSPT_BADOP(u_offset_t), /* getoffset */ 106 SEGSPT_BADOP(int), /* gettype */ 107 SEGSPT_BADOP(int), /* getvp */ 108 SEGSPT_BADOP(int), /* advise */ 109 SEGSPT_BADOP(void), /* dump */ 110 SEGSPT_BADOP(int), /* pagelock */ 111 SEGSPT_BADOP(int), /* setpgsz */ 112 SEGSPT_BADOP(int), /* getmemid */ 113 segspt_getpolicy, /* getpolicy */ 114 SEGSPT_BADOP(int), /* capable */ 115 }; 116 117 static int segspt_shmdup(struct seg *seg, struct seg *newseg); 118 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize); 119 static void segspt_shmfree(struct seg *seg); 120 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg, 121 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw); 122 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr); 123 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr, 124 register size_t len, register uint_t prot); 125 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, 126 uint_t prot); 127 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta); 128 static size_t segspt_shmswapout(struct seg *seg); 129 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, 130 register char *vec); 131 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len, 132 int attr, uint_t flags); 133 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 134 int attr, int op, ulong_t *lockmap, size_t pos); 135 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, 136 uint_t *protv); 137 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr); 138 static int segspt_shmgettype(struct seg *seg, caddr_t addr); 139 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp); 140 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, 141 uint_t behav); 142 static void segspt_shmdump(struct seg *seg); 143 static int segspt_shmpagelock(struct seg *, caddr_t, size_t, 144 struct page ***, enum lock_type, enum seg_rw); 145 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t); 146 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *); 147 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t); 148 static int segspt_shmcapable(struct seg *, segcapability_t); 149 150 struct seg_ops segspt_shmops = { 151 segspt_shmdup, 152 segspt_shmunmap, 153 segspt_shmfree, 154 segspt_shmfault, 155 segspt_shmfaulta, 156 segspt_shmsetprot, 157 segspt_shmcheckprot, 158 segspt_shmkluster, 159 segspt_shmswapout, 160 segspt_shmsync, 161 segspt_shmincore, 162 segspt_shmlockop, 163 segspt_shmgetprot, 164 segspt_shmgetoffset, 165 segspt_shmgettype, 166 segspt_shmgetvp, 167 segspt_shmadvise, /* advise */ 168 segspt_shmdump, 169 segspt_shmpagelock, 170 segspt_shmsetpgsz, 171 segspt_shmgetmemid, 172 segspt_shmgetpolicy, 173 segspt_shmcapable, 174 }; 175 176 static void segspt_purge(struct seg *seg); 177 static int segspt_reclaim(struct seg *, caddr_t, size_t, struct page **, 178 enum seg_rw); 179 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len, 180 page_t **ppa); 181 182 183 184 /*ARGSUSED*/ 185 int 186 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp, 187 uint_t prot, uint_t flags, uint_t share_szc) 188 { 189 int err; 190 struct as *newas; 191 struct segspt_crargs sptcargs; 192 193 #ifdef DEBUG 194 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */, 195 tnf_ulong, size, size ); 196 #endif 197 if (segspt_minfree == 0) /* leave min 5% of availrmem for */ 198 segspt_minfree = availrmem/20; /* for the system */ 199 200 if (!hat_supported(HAT_SHARED_PT, (void *)0)) 201 return (EINVAL); 202 203 /* 204 * get a new as for this shared memory segment 205 */ 206 newas = as_alloc(); 207 newas->a_proc = NULL; 208 sptcargs.amp = amp; 209 sptcargs.prot = prot; 210 sptcargs.flags = flags; 211 sptcargs.szc = share_szc; 212 /* 213 * create a shared page table (spt) segment 214 */ 215 216 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) { 217 as_free(newas); 218 return (err); 219 } 220 *sptseg = sptcargs.seg_spt; 221 return (0); 222 } 223 224 void 225 sptdestroy(struct as *as, struct anon_map *amp) 226 { 227 228 #ifdef DEBUG 229 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */); 230 #endif 231 (void) as_unmap(as, SEGSPTADDR, amp->size); 232 as_free(as); 233 } 234 235 /* 236 * called from seg_free(). 237 * free (i.e., unlock, unmap, return to free list) 238 * all the pages in the given seg. 239 */ 240 void 241 segspt_free(struct seg *seg) 242 { 243 struct spt_data *sptd = (struct spt_data *)seg->s_data; 244 245 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 246 247 if (sptd != NULL) { 248 if (sptd->spt_realsize) 249 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize); 250 251 if (sptd->spt_ppa_lckcnt) 252 kmem_free(sptd->spt_ppa_lckcnt, 253 sizeof (*sptd->spt_ppa_lckcnt) 254 * btopr(sptd->spt_amp->size)); 255 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp)); 256 mutex_destroy(&sptd->spt_lock); 257 kmem_free(sptd, sizeof (*sptd)); 258 } 259 } 260 261 /*ARGSUSED*/ 262 static int 263 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr, 264 uint_t flags) 265 { 266 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 267 268 return (0); 269 } 270 271 /*ARGSUSED*/ 272 static size_t 273 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec) 274 { 275 caddr_t eo_seg; 276 pgcnt_t npages; 277 struct shm_data *shmd = (struct shm_data *)seg->s_data; 278 struct seg *sptseg; 279 struct spt_data *sptd; 280 281 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 282 #ifdef lint 283 seg = seg; 284 #endif 285 sptseg = shmd->shm_sptseg; 286 sptd = sptseg->s_data; 287 288 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 289 eo_seg = addr + len; 290 while (addr < eo_seg) { 291 /* page exists, and it's locked. */ 292 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED | 293 SEG_PAGE_ANON; 294 addr += PAGESIZE; 295 } 296 return (len); 297 } else { 298 struct anon_map *amp = shmd->shm_amp; 299 struct anon *ap; 300 page_t *pp; 301 pgcnt_t anon_index; 302 struct vnode *vp; 303 u_offset_t off; 304 ulong_t i; 305 int ret; 306 anon_sync_obj_t cookie; 307 308 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 309 anon_index = seg_page(seg, addr); 310 npages = btopr(len); 311 if (anon_index + npages > btopr(shmd->shm_amp->size)) { 312 return (EINVAL); 313 } 314 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 315 for (i = 0; i < npages; i++, anon_index++) { 316 ret = 0; 317 anon_array_enter(amp, anon_index, &cookie); 318 ap = anon_get_ptr(amp->ahp, anon_index); 319 if (ap != NULL) { 320 swap_xlate(ap, &vp, &off); 321 anon_array_exit(&cookie); 322 pp = page_lookup_nowait(vp, off, SE_SHARED); 323 if (pp != NULL) { 324 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON; 325 page_unlock(pp); 326 } 327 } else { 328 anon_array_exit(&cookie); 329 } 330 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) { 331 ret |= SEG_PAGE_LOCKED; 332 } 333 *vec++ = (char)ret; 334 } 335 ANON_LOCK_EXIT(&->a_rwlock); 336 return (len); 337 } 338 } 339 340 static int 341 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize) 342 { 343 size_t share_size; 344 345 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 346 347 /* 348 * seg.s_size may have been rounded up to the largest page size 349 * in shmat(). 350 * XXX This should be cleanedup. sptdestroy should take a length 351 * argument which should be the same as sptcreate. Then 352 * this rounding would not be needed (or is done in shm.c) 353 * Only the check for full segment will be needed. 354 * 355 * XXX -- shouldn't raddr == 0 always? These tests don't seem 356 * to be useful at all. 357 */ 358 share_size = page_get_pagesize(seg->s_szc); 359 ssize = P2ROUNDUP(ssize, share_size); 360 361 if (raddr == seg->s_base && ssize == seg->s_size) { 362 seg_free(seg); 363 return (0); 364 } else 365 return (EINVAL); 366 } 367 368 int 369 segspt_create(struct seg *seg, caddr_t argsp) 370 { 371 int err; 372 caddr_t addr = seg->s_base; 373 struct spt_data *sptd; 374 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp; 375 struct anon_map *amp = sptcargs->amp; 376 struct kshmid *sp = amp->a_sp; 377 struct cred *cred = CRED(); 378 ulong_t i, j, anon_index = 0; 379 pgcnt_t npages = btopr(amp->size); 380 struct vnode *vp; 381 page_t **ppa; 382 uint_t hat_flags; 383 size_t pgsz; 384 pgcnt_t pgcnt; 385 caddr_t a; 386 pgcnt_t pidx; 387 size_t sz; 388 proc_t *procp = curproc; 389 rctl_qty_t lockedbytes = 0; 390 kproject_t *proj; 391 392 /* 393 * We are holding the a_lock on the underlying dummy as, 394 * so we can make calls to the HAT layer. 395 */ 396 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 397 ASSERT(sp != NULL); 398 399 #ifdef DEBUG 400 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */, 401 tnf_opaque, addr, addr, 402 tnf_ulong, len, seg->s_size); 403 #endif 404 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 405 if (err = anon_swap_adjust(npages)) 406 return (err); 407 } 408 err = ENOMEM; 409 410 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL) 411 goto out1; 412 413 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 414 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages), 415 KM_NOSLEEP)) == NULL) 416 goto out2; 417 } 418 419 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL); 420 421 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL) 422 goto out3; 423 424 seg->s_ops = &segspt_ops; 425 sptd->spt_vp = vp; 426 sptd->spt_amp = amp; 427 sptd->spt_prot = sptcargs->prot; 428 sptd->spt_flags = sptcargs->flags; 429 seg->s_data = (caddr_t)sptd; 430 sptd->spt_ppa = NULL; 431 sptd->spt_ppa_lckcnt = NULL; 432 seg->s_szc = sptcargs->szc; 433 434 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 435 if (seg->s_szc > amp->a_szc) { 436 amp->a_szc = seg->s_szc; 437 } 438 ANON_LOCK_EXIT(&->a_rwlock); 439 440 /* 441 * Set policy to affect initial allocation of pages in 442 * anon_map_createpages() 443 */ 444 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index, 445 NULL, 0, ptob(npages)); 446 447 if (sptcargs->flags & SHM_PAGEABLE) { 448 size_t share_sz; 449 pgcnt_t new_npgs, more_pgs; 450 struct anon_hdr *nahp; 451 zone_t *zone; 452 453 share_sz = page_get_pagesize(seg->s_szc); 454 if (!IS_P2ALIGNED(amp->size, share_sz)) { 455 /* 456 * We are rounding up the size of the anon array 457 * on 4 M boundary because we always create 4 M 458 * of page(s) when locking, faulting pages and we 459 * don't have to check for all corner cases e.g. 460 * if there is enough space to allocate 4 M 461 * page. 462 */ 463 new_npgs = btop(P2ROUNDUP(amp->size, share_sz)); 464 more_pgs = new_npgs - npages; 465 466 /* 467 * The zone will never be NULL, as a fully created 468 * shm always has an owning zone. 469 */ 470 zone = sp->shm_perm.ipc_zone; 471 ASSERT(zone != NULL); 472 if (anon_resv_zone(ptob(more_pgs), zone) == 0) { 473 err = ENOMEM; 474 goto out4; 475 } 476 477 nahp = anon_create(new_npgs, ANON_SLEEP); 478 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 479 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages, 480 ANON_SLEEP); 481 anon_release(amp->ahp, npages); 482 amp->ahp = nahp; 483 ASSERT(amp->swresv == ptob(npages)); 484 amp->swresv = amp->size = ptob(new_npgs); 485 ANON_LOCK_EXIT(&->a_rwlock); 486 npages = new_npgs; 487 } 488 489 sptd->spt_ppa_lckcnt = kmem_zalloc(npages * 490 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP); 491 sptd->spt_pcachecnt = 0; 492 sptd->spt_realsize = ptob(npages); 493 sptcargs->seg_spt = seg; 494 return (0); 495 } 496 497 /* 498 * get array of pages for each anon slot in amp 499 */ 500 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa, 501 seg, addr, S_CREATE, cred)) != 0) 502 goto out4; 503 504 mutex_enter(&sp->shm_mlock); 505 506 /* May be partially locked, so, count bytes to charge for locking */ 507 for (i = 0; i < npages; i++) 508 if (ppa[i]->p_lckcnt == 0) 509 lockedbytes += PAGESIZE; 510 511 proj = sp->shm_perm.ipc_proj; 512 513 if (lockedbytes > 0) { 514 mutex_enter(&procp->p_lock); 515 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) { 516 mutex_exit(&procp->p_lock); 517 mutex_exit(&sp->shm_mlock); 518 for (i = 0; i < npages; i++) 519 page_unlock(ppa[i]); 520 err = ENOMEM; 521 goto out4; 522 } 523 mutex_exit(&procp->p_lock); 524 } 525 526 /* 527 * addr is initial address corresponding to the first page on ppa list 528 */ 529 for (i = 0; i < npages; i++) { 530 /* attempt to lock all pages */ 531 if (page_pp_lock(ppa[i], 0, 1) == 0) { 532 /* 533 * if unable to lock any page, unlock all 534 * of them and return error 535 */ 536 for (j = 0; j < i; j++) 537 page_pp_unlock(ppa[j], 0, 1); 538 for (i = 0; i < npages; i++) 539 page_unlock(ppa[i]); 540 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0); 541 mutex_exit(&sp->shm_mlock); 542 err = ENOMEM; 543 goto out4; 544 } 545 } 546 mutex_exit(&sp->shm_mlock); 547 548 /* 549 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 550 * for the entire life of the segment. For example platforms 551 * that do not support Dynamic Reconfiguration. 552 */ 553 hat_flags = HAT_LOAD_SHARE; 554 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL)) 555 hat_flags |= HAT_LOAD_LOCK; 556 557 /* 558 * Load translations one lare page at a time 559 * to make sure we don't create mappings bigger than 560 * segment's size code in case underlying pages 561 * are shared with segvn's segment that uses bigger 562 * size code than we do. 563 */ 564 pgsz = page_get_pagesize(seg->s_szc); 565 pgcnt = page_get_pagecnt(seg->s_szc); 566 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) { 567 sz = MIN(pgsz, ptob(npages - pidx)); 568 hat_memload_array(seg->s_as->a_hat, a, sz, 569 &ppa[pidx], sptd->spt_prot, hat_flags); 570 } 571 572 /* 573 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 574 * we will leave the pages locked SE_SHARED for the life 575 * of the ISM segment. This will prevent any calls to 576 * hat_pageunload() on this ISM segment for those platforms. 577 */ 578 if (!(hat_flags & HAT_LOAD_LOCK)) { 579 /* 580 * On platforms that support HAT_DYNAMIC_ISM_UNMAP, 581 * we no longer need to hold the SE_SHARED lock on the pages, 582 * since L_PAGELOCK and F_SOFTLOCK calls will grab the 583 * SE_SHARED lock on the pages as necessary. 584 */ 585 for (i = 0; i < npages; i++) 586 page_unlock(ppa[i]); 587 } 588 sptd->spt_pcachecnt = 0; 589 kmem_free(ppa, ((sizeof (page_t *)) * npages)); 590 sptd->spt_realsize = ptob(npages); 591 atomic_add_long(&spt_used, npages); 592 sptcargs->seg_spt = seg; 593 return (0); 594 595 out4: 596 seg->s_data = NULL; 597 kmem_free(vp, sizeof (*vp)); 598 out3: 599 mutex_destroy(&sptd->spt_lock); 600 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 601 kmem_free(ppa, (sizeof (*ppa) * npages)); 602 out2: 603 kmem_free(sptd, sizeof (*sptd)); 604 out1: 605 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 606 anon_swap_restore(npages); 607 return (err); 608 } 609 610 /*ARGSUSED*/ 611 void 612 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len) 613 { 614 struct page *pp; 615 struct spt_data *sptd = (struct spt_data *)seg->s_data; 616 pgcnt_t npages; 617 ulong_t anon_idx; 618 struct anon_map *amp; 619 struct anon *ap; 620 struct vnode *vp; 621 u_offset_t off; 622 uint_t hat_flags; 623 int root = 0; 624 pgcnt_t pgs, curnpgs = 0; 625 page_t *rootpp; 626 rctl_qty_t unlocked_bytes = 0; 627 kproject_t *proj; 628 kshmid_t *sp; 629 630 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 631 632 len = P2ROUNDUP(len, PAGESIZE); 633 634 npages = btop(len); 635 636 hat_flags = HAT_UNLOAD_UNLOCK; 637 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) || 638 (sptd->spt_flags & SHM_PAGEABLE)) { 639 hat_flags = HAT_UNLOAD; 640 } 641 642 hat_unload(seg->s_as->a_hat, addr, len, hat_flags); 643 644 amp = sptd->spt_amp; 645 if (sptd->spt_flags & SHM_PAGEABLE) 646 npages = btop(amp->size); 647 648 ASSERT(amp != NULL); 649 650 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 651 sp = amp->a_sp; 652 proj = sp->shm_perm.ipc_proj; 653 mutex_enter(&sp->shm_mlock); 654 } 655 for (anon_idx = 0; anon_idx < npages; anon_idx++) { 656 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 657 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) { 658 panic("segspt_free_pages: null app"); 659 /*NOTREACHED*/ 660 } 661 } else { 662 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx)) 663 == NULL) 664 continue; 665 } 666 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0); 667 swap_xlate(ap, &vp, &off); 668 669 /* 670 * If this platform supports HAT_DYNAMIC_ISM_UNMAP, 671 * the pages won't be having SE_SHARED lock at this 672 * point. 673 * 674 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 675 * the pages are still held SE_SHARED locked from the 676 * original segspt_create() 677 * 678 * Our goal is to get SE_EXCL lock on each page, remove 679 * permanent lock on it and invalidate the page. 680 */ 681 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 682 if (hat_flags == HAT_UNLOAD) 683 pp = page_lookup(vp, off, SE_EXCL); 684 else { 685 if ((pp = page_find(vp, off)) == NULL) { 686 panic("segspt_free_pages: " 687 "page not locked"); 688 /*NOTREACHED*/ 689 } 690 if (!page_tryupgrade(pp)) { 691 page_unlock(pp); 692 pp = page_lookup(vp, off, SE_EXCL); 693 } 694 } 695 if (pp == NULL) { 696 panic("segspt_free_pages: " 697 "page not in the system"); 698 /*NOTREACHED*/ 699 } 700 ASSERT(pp->p_lckcnt > 0); 701 page_pp_unlock(pp, 0, 1); 702 if (pp->p_lckcnt == 0) 703 unlocked_bytes += PAGESIZE; 704 } else { 705 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL) 706 continue; 707 } 708 /* 709 * It's logical to invalidate the pages here as in most cases 710 * these were created by segspt. 711 */ 712 if (pp->p_szc != 0) { 713 /* 714 * For DISM swap is released in shm_rm_amp. 715 */ 716 if ((sptd->spt_flags & SHM_PAGEABLE) == 0 && 717 ap->an_pvp != NULL) { 718 panic("segspt_free_pages: pvp non NULL"); 719 /*NOTREACHED*/ 720 } 721 if (root == 0) { 722 ASSERT(curnpgs == 0); 723 root = 1; 724 rootpp = pp; 725 pgs = curnpgs = page_get_pagecnt(pp->p_szc); 726 ASSERT(pgs > 1); 727 ASSERT(IS_P2ALIGNED(pgs, pgs)); 728 ASSERT(!(page_pptonum(pp) & (pgs - 1))); 729 curnpgs--; 730 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) { 731 ASSERT(curnpgs == 1); 732 ASSERT(page_pptonum(pp) == 733 page_pptonum(rootpp) + (pgs - 1)); 734 page_destroy_pages(rootpp); 735 root = 0; 736 curnpgs = 0; 737 } else { 738 ASSERT(curnpgs > 1); 739 ASSERT(page_pptonum(pp) == 740 page_pptonum(rootpp) + (pgs - curnpgs)); 741 curnpgs--; 742 } 743 } else { 744 if (root != 0 || curnpgs != 0) { 745 panic("segspt_free_pages: bad large page"); 746 /*NOTREACHED*/ 747 } 748 /*LINTED: constant in conditional context */ 749 VN_DISPOSE(pp, B_INVAL, 0, kcred); 750 } 751 } 752 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 753 if (unlocked_bytes > 0) 754 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0); 755 mutex_exit(&sp->shm_mlock); 756 } 757 if (root != 0 || curnpgs != 0) { 758 panic("segspt_free_pages: bad large page"); 759 /*NOTREACHED*/ 760 } 761 762 /* 763 * mark that pages have been released 764 */ 765 sptd->spt_realsize = 0; 766 767 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 768 atomic_add_long(&spt_used, -npages); 769 anon_swap_restore(npages); 770 } 771 } 772 773 /* 774 * Get memory allocation policy info for specified address in given segment 775 */ 776 static lgrp_mem_policy_info_t * 777 segspt_getpolicy(struct seg *seg, caddr_t addr) 778 { 779 struct anon_map *amp; 780 ulong_t anon_index; 781 lgrp_mem_policy_info_t *policy_info; 782 struct spt_data *spt_data; 783 784 ASSERT(seg != NULL); 785 786 /* 787 * Get anon_map from segspt 788 * 789 * Assume that no lock needs to be held on anon_map, since 790 * it should be protected by its reference count which must be 791 * nonzero for an existing segment 792 * Need to grab readers lock on policy tree though 793 */ 794 spt_data = (struct spt_data *)seg->s_data; 795 if (spt_data == NULL) 796 return (NULL); 797 amp = spt_data->spt_amp; 798 ASSERT(amp->refcnt != 0); 799 800 /* 801 * Get policy info 802 * 803 * Assume starting anon index of 0 804 */ 805 anon_index = seg_page(seg, addr); 806 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 807 808 return (policy_info); 809 } 810 811 /* 812 * DISM only. 813 * Return locked pages over a given range. 814 * 815 * We will cache all DISM locked pages and save the pplist for the 816 * entire segment in the ppa field of the underlying DISM segment structure. 817 * Later, during a call to segspt_reclaim() we will use this ppa array 818 * to page_unlock() all of the pages and then we will free this ppa list. 819 */ 820 /*ARGSUSED*/ 821 static int 822 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len, 823 struct page ***ppp, enum lock_type type, enum seg_rw rw) 824 { 825 struct shm_data *shmd = (struct shm_data *)seg->s_data; 826 struct seg *sptseg = shmd->shm_sptseg; 827 struct spt_data *sptd = sptseg->s_data; 828 pgcnt_t pg_idx, npages, tot_npages, npgs; 829 struct page **pplist, **pl, **ppa, *pp; 830 struct anon_map *amp; 831 spgcnt_t an_idx; 832 int ret = ENOTSUP; 833 uint_t pl_built = 0; 834 struct anon *ap; 835 struct vnode *vp; 836 u_offset_t off; 837 pgcnt_t claim_availrmem = 0; 838 uint_t szc; 839 840 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 841 842 /* 843 * We want to lock/unlock the entire ISM segment. Therefore, 844 * we will be using the underlying sptseg and it's base address 845 * and length for the caching arguments. 846 */ 847 ASSERT(sptseg); 848 ASSERT(sptd); 849 850 pg_idx = seg_page(seg, addr); 851 npages = btopr(len); 852 853 /* 854 * check if the request is larger than number of pages covered 855 * by amp 856 */ 857 if (pg_idx + npages > btopr(sptd->spt_amp->size)) { 858 *ppp = NULL; 859 return (ENOTSUP); 860 } 861 862 if (type == L_PAGEUNLOCK) { 863 ASSERT(sptd->spt_ppa != NULL); 864 865 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 866 sptd->spt_ppa, sptd->spt_prot, segspt_reclaim); 867 868 /* 869 * If someone is blocked while unmapping, we purge 870 * segment page cache and thus reclaim pplist synchronously 871 * without waiting for seg_pasync_thread. This speeds up 872 * unmapping in cases where munmap(2) is called, while 873 * raw async i/o is still in progress or where a thread 874 * exits on data fault in a multithreaded application. 875 */ 876 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 877 segspt_purge(seg); 878 } 879 return (0); 880 } else if (type == L_PAGERECLAIM) { 881 ASSERT(sptd->spt_ppa != NULL); 882 (void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size, 883 sptd->spt_ppa, sptd->spt_prot); 884 return (0); 885 } 886 887 if (sptd->spt_flags & DISM_PPA_CHANGED) { 888 segspt_purge(seg); 889 /* 890 * for DISM ppa needs to be rebuild since 891 * number of locked pages could be changed 892 */ 893 *ppp = NULL; 894 return (ENOTSUP); 895 } 896 897 /* 898 * First try to find pages in segment page cache, without 899 * holding the segment lock. 900 */ 901 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 902 sptd->spt_prot); 903 if (pplist != NULL) { 904 ASSERT(sptd->spt_ppa != NULL); 905 ASSERT(sptd->spt_ppa == pplist); 906 ppa = sptd->spt_ppa; 907 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 908 if (ppa[an_idx] == NULL) { 909 seg_pinactive(seg, seg->s_base, 910 sptd->spt_amp->size, ppa, 911 sptd->spt_prot, segspt_reclaim); 912 *ppp = NULL; 913 return (ENOTSUP); 914 } 915 if ((szc = ppa[an_idx]->p_szc) != 0) { 916 npgs = page_get_pagecnt(szc); 917 an_idx = P2ROUNDUP(an_idx + 1, npgs); 918 } else { 919 an_idx++; 920 } 921 } 922 /* 923 * Since we cache the entire DISM segment, we want to 924 * set ppp to point to the first slot that corresponds 925 * to the requested addr, i.e. pg_idx. 926 */ 927 *ppp = &(sptd->spt_ppa[pg_idx]); 928 return (0); 929 } 930 931 /* The L_PAGELOCK case... */ 932 mutex_enter(&sptd->spt_lock); 933 /* 934 * try to find pages in segment page cache with mutex 935 */ 936 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 937 sptd->spt_prot); 938 if (pplist != NULL) { 939 ASSERT(sptd->spt_ppa != NULL); 940 ASSERT(sptd->spt_ppa == pplist); 941 ppa = sptd->spt_ppa; 942 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 943 if (ppa[an_idx] == NULL) { 944 mutex_exit(&sptd->spt_lock); 945 seg_pinactive(seg, seg->s_base, 946 sptd->spt_amp->size, ppa, 947 sptd->spt_prot, segspt_reclaim); 948 *ppp = NULL; 949 return (ENOTSUP); 950 } 951 if ((szc = ppa[an_idx]->p_szc) != 0) { 952 npgs = page_get_pagecnt(szc); 953 an_idx = P2ROUNDUP(an_idx + 1, npgs); 954 } else { 955 an_idx++; 956 } 957 } 958 /* 959 * Since we cache the entire DISM segment, we want to 960 * set ppp to point to the first slot that corresponds 961 * to the requested addr, i.e. pg_idx. 962 */ 963 mutex_exit(&sptd->spt_lock); 964 *ppp = &(sptd->spt_ppa[pg_idx]); 965 return (0); 966 } 967 if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) == 968 SEGP_FAIL) { 969 mutex_exit(&sptd->spt_lock); 970 *ppp = NULL; 971 return (ENOTSUP); 972 } 973 974 /* 975 * No need to worry about protections because DISM pages are always rw. 976 */ 977 pl = pplist = NULL; 978 amp = sptd->spt_amp; 979 980 /* 981 * Do we need to build the ppa array? 982 */ 983 if (sptd->spt_ppa == NULL) { 984 pgcnt_t lpg_cnt = 0; 985 986 pl_built = 1; 987 tot_npages = btopr(sptd->spt_amp->size); 988 989 ASSERT(sptd->spt_pcachecnt == 0); 990 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP); 991 pl = pplist; 992 993 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 994 for (an_idx = 0; an_idx < tot_npages; ) { 995 ap = anon_get_ptr(amp->ahp, an_idx); 996 /* 997 * Cache only mlocked pages. For large pages 998 * if one (constituent) page is mlocked 999 * all pages for that large page 1000 * are cached also. This is for quick 1001 * lookups of ppa array; 1002 */ 1003 if ((ap != NULL) && (lpg_cnt != 0 || 1004 (sptd->spt_ppa_lckcnt[an_idx] != 0))) { 1005 1006 swap_xlate(ap, &vp, &off); 1007 pp = page_lookup(vp, off, SE_SHARED); 1008 ASSERT(pp != NULL); 1009 if (lpg_cnt == 0) { 1010 lpg_cnt++; 1011 /* 1012 * For a small page, we are done -- 1013 * lpg_count is reset to 0 below. 1014 * 1015 * For a large page, we are guaranteed 1016 * to find the anon structures of all 1017 * constituent pages and a non-zero 1018 * lpg_cnt ensures that we don't test 1019 * for mlock for these. We are done 1020 * when lpg_count reaches (npgs + 1). 1021 * If we are not the first constituent 1022 * page, restart at the first one. 1023 */ 1024 npgs = page_get_pagecnt(pp->p_szc); 1025 if (!IS_P2ALIGNED(an_idx, npgs)) { 1026 an_idx = P2ALIGN(an_idx, npgs); 1027 page_unlock(pp); 1028 continue; 1029 } 1030 } 1031 if (++lpg_cnt > npgs) 1032 lpg_cnt = 0; 1033 1034 /* 1035 * availrmem is decremented only 1036 * for unlocked pages 1037 */ 1038 if (sptd->spt_ppa_lckcnt[an_idx] == 0) 1039 claim_availrmem++; 1040 pplist[an_idx] = pp; 1041 } 1042 an_idx++; 1043 } 1044 ANON_LOCK_EXIT(&->a_rwlock); 1045 1046 mutex_enter(&freemem_lock); 1047 if (availrmem < tune.t_minarmem + claim_availrmem) { 1048 mutex_exit(&freemem_lock); 1049 ret = FC_MAKE_ERR(ENOMEM); 1050 claim_availrmem = 0; 1051 goto insert_fail; 1052 } else { 1053 availrmem -= claim_availrmem; 1054 } 1055 mutex_exit(&freemem_lock); 1056 1057 sptd->spt_ppa = pl; 1058 } else { 1059 /* 1060 * We already have a valid ppa[]. 1061 */ 1062 pl = sptd->spt_ppa; 1063 } 1064 1065 ASSERT(pl != NULL); 1066 1067 ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size, 1068 pl, sptd->spt_prot, SEGP_FORCE_WIRED | SEGP_ASYNC_FLUSH, 1069 segspt_reclaim); 1070 if (ret == SEGP_FAIL) { 1071 /* 1072 * seg_pinsert failed. We return 1073 * ENOTSUP, so that the as_pagelock() code will 1074 * then try the slower F_SOFTLOCK path. 1075 */ 1076 if (pl_built) { 1077 /* 1078 * No one else has referenced the ppa[]. 1079 * We created it and we need to destroy it. 1080 */ 1081 sptd->spt_ppa = NULL; 1082 } 1083 ret = ENOTSUP; 1084 goto insert_fail; 1085 } 1086 1087 /* 1088 * In either case, we increment softlockcnt on the 'real' segment. 1089 */ 1090 sptd->spt_pcachecnt++; 1091 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 1092 1093 ppa = sptd->spt_ppa; 1094 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 1095 if (ppa[an_idx] == NULL) { 1096 mutex_exit(&sptd->spt_lock); 1097 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 1098 pl, sptd->spt_prot, segspt_reclaim); 1099 *ppp = NULL; 1100 return (ENOTSUP); 1101 } 1102 if ((szc = ppa[an_idx]->p_szc) != 0) { 1103 npgs = page_get_pagecnt(szc); 1104 an_idx = P2ROUNDUP(an_idx + 1, npgs); 1105 } else { 1106 an_idx++; 1107 } 1108 } 1109 /* 1110 * We can now drop the sptd->spt_lock since the ppa[] 1111 * exists and he have incremented pacachecnt. 1112 */ 1113 mutex_exit(&sptd->spt_lock); 1114 1115 /* 1116 * Since we cache the entire segment, we want to 1117 * set ppp to point to the first slot that corresponds 1118 * to the requested addr, i.e. pg_idx. 1119 */ 1120 *ppp = &(sptd->spt_ppa[pg_idx]); 1121 return (ret); 1122 1123 insert_fail: 1124 /* 1125 * We will only reach this code if we tried and failed. 1126 * 1127 * And we can drop the lock on the dummy seg, once we've failed 1128 * to set up a new ppa[]. 1129 */ 1130 mutex_exit(&sptd->spt_lock); 1131 1132 if (pl_built) { 1133 mutex_enter(&freemem_lock); 1134 availrmem += claim_availrmem; 1135 mutex_exit(&freemem_lock); 1136 1137 /* 1138 * We created pl and we need to destroy it. 1139 */ 1140 pplist = pl; 1141 for (an_idx = 0; an_idx < tot_npages; an_idx++) { 1142 if (pplist[an_idx] != NULL) 1143 page_unlock(pplist[an_idx]); 1144 } 1145 kmem_free(pl, sizeof (page_t *) * tot_npages); 1146 } 1147 1148 if (shmd->shm_softlockcnt <= 0) { 1149 if (AS_ISUNMAPWAIT(seg->s_as)) { 1150 mutex_enter(&seg->s_as->a_contents); 1151 if (AS_ISUNMAPWAIT(seg->s_as)) { 1152 AS_CLRUNMAPWAIT(seg->s_as); 1153 cv_broadcast(&seg->s_as->a_cv); 1154 } 1155 mutex_exit(&seg->s_as->a_contents); 1156 } 1157 } 1158 *ppp = NULL; 1159 return (ret); 1160 } 1161 1162 1163 1164 /* 1165 * return locked pages over a given range. 1166 * 1167 * We will cache the entire ISM segment and save the pplist for the 1168 * entire segment in the ppa field of the underlying ISM segment structure. 1169 * Later, during a call to segspt_reclaim() we will use this ppa array 1170 * to page_unlock() all of the pages and then we will free this ppa list. 1171 */ 1172 /*ARGSUSED*/ 1173 static int 1174 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len, 1175 struct page ***ppp, enum lock_type type, enum seg_rw rw) 1176 { 1177 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1178 struct seg *sptseg = shmd->shm_sptseg; 1179 struct spt_data *sptd = sptseg->s_data; 1180 pgcnt_t np, page_index, npages; 1181 caddr_t a, spt_base; 1182 struct page **pplist, **pl, *pp; 1183 struct anon_map *amp; 1184 ulong_t anon_index; 1185 int ret = ENOTSUP; 1186 uint_t pl_built = 0; 1187 struct anon *ap; 1188 struct vnode *vp; 1189 u_offset_t off; 1190 1191 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1192 1193 /* 1194 * We want to lock/unlock the entire ISM segment. Therefore, 1195 * we will be using the underlying sptseg and it's base address 1196 * and length for the caching arguments. 1197 */ 1198 ASSERT(sptseg); 1199 ASSERT(sptd); 1200 1201 if (sptd->spt_flags & SHM_PAGEABLE) { 1202 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw)); 1203 } 1204 1205 page_index = seg_page(seg, addr); 1206 npages = btopr(len); 1207 1208 /* 1209 * check if the request is larger than number of pages covered 1210 * by amp 1211 */ 1212 if (page_index + npages > btopr(sptd->spt_amp->size)) { 1213 *ppp = NULL; 1214 return (ENOTSUP); 1215 } 1216 1217 if (type == L_PAGEUNLOCK) { 1218 1219 ASSERT(sptd->spt_ppa != NULL); 1220 1221 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 1222 sptd->spt_ppa, sptd->spt_prot, segspt_reclaim); 1223 1224 /* 1225 * If someone is blocked while unmapping, we purge 1226 * segment page cache and thus reclaim pplist synchronously 1227 * without waiting for seg_pasync_thread. This speeds up 1228 * unmapping in cases where munmap(2) is called, while 1229 * raw async i/o is still in progress or where a thread 1230 * exits on data fault in a multithreaded application. 1231 */ 1232 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 1233 segspt_purge(seg); 1234 } 1235 return (0); 1236 } else if (type == L_PAGERECLAIM) { 1237 ASSERT(sptd->spt_ppa != NULL); 1238 1239 (void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size, 1240 sptd->spt_ppa, sptd->spt_prot); 1241 return (0); 1242 } 1243 1244 /* 1245 * First try to find pages in segment page cache, without 1246 * holding the segment lock. 1247 */ 1248 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 1249 sptd->spt_prot); 1250 if (pplist != NULL) { 1251 ASSERT(sptd->spt_ppa == pplist); 1252 ASSERT(sptd->spt_ppa[page_index]); 1253 /* 1254 * Since we cache the entire ISM segment, we want to 1255 * set ppp to point to the first slot that corresponds 1256 * to the requested addr, i.e. page_index. 1257 */ 1258 *ppp = &(sptd->spt_ppa[page_index]); 1259 return (0); 1260 } 1261 1262 /* The L_PAGELOCK case... */ 1263 mutex_enter(&sptd->spt_lock); 1264 1265 /* 1266 * try to find pages in segment page cache 1267 */ 1268 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 1269 sptd->spt_prot); 1270 if (pplist != NULL) { 1271 ASSERT(sptd->spt_ppa == pplist); 1272 /* 1273 * Since we cache the entire segment, we want to 1274 * set ppp to point to the first slot that corresponds 1275 * to the requested addr, i.e. page_index. 1276 */ 1277 mutex_exit(&sptd->spt_lock); 1278 *ppp = &(sptd->spt_ppa[page_index]); 1279 return (0); 1280 } 1281 1282 if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) == 1283 SEGP_FAIL) { 1284 mutex_exit(&sptd->spt_lock); 1285 *ppp = NULL; 1286 return (ENOTSUP); 1287 } 1288 1289 /* 1290 * No need to worry about protections because ISM pages 1291 * are always rw. 1292 */ 1293 pl = pplist = NULL; 1294 1295 /* 1296 * Do we need to build the ppa array? 1297 */ 1298 if (sptd->spt_ppa == NULL) { 1299 ASSERT(sptd->spt_ppa == pplist); 1300 1301 spt_base = sptseg->s_base; 1302 pl_built = 1; 1303 1304 /* 1305 * availrmem is decremented once during anon_swap_adjust() 1306 * and is incremented during the anon_unresv(), which is 1307 * called from shm_rm_amp() when the segment is destroyed. 1308 */ 1309 amp = sptd->spt_amp; 1310 ASSERT(amp != NULL); 1311 1312 /* pcachecnt is protected by sptd->spt_lock */ 1313 ASSERT(sptd->spt_pcachecnt == 0); 1314 pplist = kmem_zalloc(sizeof (page_t *) 1315 * btopr(sptd->spt_amp->size), KM_SLEEP); 1316 pl = pplist; 1317 1318 anon_index = seg_page(sptseg, spt_base); 1319 1320 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1321 for (a = spt_base; a < (spt_base + sptd->spt_amp->size); 1322 a += PAGESIZE, anon_index++, pplist++) { 1323 ap = anon_get_ptr(amp->ahp, anon_index); 1324 ASSERT(ap != NULL); 1325 swap_xlate(ap, &vp, &off); 1326 pp = page_lookup(vp, off, SE_SHARED); 1327 ASSERT(pp != NULL); 1328 *pplist = pp; 1329 } 1330 ANON_LOCK_EXIT(&->a_rwlock); 1331 1332 if (a < (spt_base + sptd->spt_amp->size)) { 1333 ret = ENOTSUP; 1334 goto insert_fail; 1335 } 1336 sptd->spt_ppa = pl; 1337 } else { 1338 /* 1339 * We already have a valid ppa[]. 1340 */ 1341 pl = sptd->spt_ppa; 1342 } 1343 1344 ASSERT(pl != NULL); 1345 1346 ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size, 1347 pl, sptd->spt_prot, SEGP_FORCE_WIRED, segspt_reclaim); 1348 if (ret == SEGP_FAIL) { 1349 /* 1350 * seg_pinsert failed. We return 1351 * ENOTSUP, so that the as_pagelock() code will 1352 * then try the slower F_SOFTLOCK path. 1353 */ 1354 if (pl_built) { 1355 /* 1356 * No one else has referenced the ppa[]. 1357 * We created it and we need to destroy it. 1358 */ 1359 sptd->spt_ppa = NULL; 1360 } 1361 ret = ENOTSUP; 1362 goto insert_fail; 1363 } 1364 1365 /* 1366 * In either case, we increment softlockcnt on the 'real' segment. 1367 */ 1368 sptd->spt_pcachecnt++; 1369 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 1370 1371 /* 1372 * We can now drop the sptd->spt_lock since the ppa[] 1373 * exists and he have incremented pacachecnt. 1374 */ 1375 mutex_exit(&sptd->spt_lock); 1376 1377 /* 1378 * Since we cache the entire segment, we want to 1379 * set ppp to point to the first slot that corresponds 1380 * to the requested addr, i.e. page_index. 1381 */ 1382 *ppp = &(sptd->spt_ppa[page_index]); 1383 return (ret); 1384 1385 insert_fail: 1386 /* 1387 * We will only reach this code if we tried and failed. 1388 * 1389 * And we can drop the lock on the dummy seg, once we've failed 1390 * to set up a new ppa[]. 1391 */ 1392 mutex_exit(&sptd->spt_lock); 1393 1394 if (pl_built) { 1395 /* 1396 * We created pl and we need to destroy it. 1397 */ 1398 pplist = pl; 1399 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT); 1400 while (np) { 1401 page_unlock(*pplist); 1402 np--; 1403 pplist++; 1404 } 1405 kmem_free(pl, sizeof (page_t *) * 1406 btopr(sptd->spt_amp->size)); 1407 } 1408 if (shmd->shm_softlockcnt <= 0) { 1409 if (AS_ISUNMAPWAIT(seg->s_as)) { 1410 mutex_enter(&seg->s_as->a_contents); 1411 if (AS_ISUNMAPWAIT(seg->s_as)) { 1412 AS_CLRUNMAPWAIT(seg->s_as); 1413 cv_broadcast(&seg->s_as->a_cv); 1414 } 1415 mutex_exit(&seg->s_as->a_contents); 1416 } 1417 } 1418 *ppp = NULL; 1419 return (ret); 1420 } 1421 1422 /* 1423 * purge any cached pages in the I/O page cache 1424 */ 1425 static void 1426 segspt_purge(struct seg *seg) 1427 { 1428 seg_ppurge(seg); 1429 } 1430 1431 static int 1432 segspt_reclaim(struct seg *seg, caddr_t addr, size_t len, struct page **pplist, 1433 enum seg_rw rw) 1434 { 1435 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1436 struct seg *sptseg; 1437 struct spt_data *sptd; 1438 pgcnt_t npages, i, free_availrmem = 0; 1439 int done = 0; 1440 1441 #ifdef lint 1442 addr = addr; 1443 #endif 1444 sptseg = shmd->shm_sptseg; 1445 sptd = sptseg->s_data; 1446 npages = (len >> PAGESHIFT); 1447 ASSERT(npages); 1448 ASSERT(sptd->spt_pcachecnt != 0); 1449 ASSERT(sptd->spt_ppa == pplist); 1450 ASSERT(npages == btopr(sptd->spt_amp->size)); 1451 /* 1452 * Acquire the lock on the dummy seg and destroy the 1453 * ppa array IF this is the last pcachecnt. 1454 */ 1455 mutex_enter(&sptd->spt_lock); 1456 if (--sptd->spt_pcachecnt == 0) { 1457 for (i = 0; i < npages; i++) { 1458 if (pplist[i] == NULL) { 1459 continue; 1460 } 1461 if (rw == S_WRITE) { 1462 hat_setrefmod(pplist[i]); 1463 } else { 1464 hat_setref(pplist[i]); 1465 } 1466 if ((sptd->spt_flags & SHM_PAGEABLE) && 1467 (sptd->spt_ppa_lckcnt[i] == 0)) 1468 free_availrmem++; 1469 page_unlock(pplist[i]); 1470 } 1471 if (sptd->spt_flags & SHM_PAGEABLE) { 1472 mutex_enter(&freemem_lock); 1473 availrmem += free_availrmem; 1474 mutex_exit(&freemem_lock); 1475 } 1476 /* 1477 * Since we want to cach/uncache the entire ISM segment, 1478 * we will track the pplist in a segspt specific field 1479 * ppa, that is initialized at the time we add an entry to 1480 * the cache. 1481 */ 1482 ASSERT(sptd->spt_pcachecnt == 0); 1483 kmem_free(pplist, sizeof (page_t *) * npages); 1484 sptd->spt_ppa = NULL; 1485 sptd->spt_flags &= ~DISM_PPA_CHANGED; 1486 done = 1; 1487 } 1488 mutex_exit(&sptd->spt_lock); 1489 /* 1490 * Now decrement softlockcnt. 1491 */ 1492 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1); 1493 1494 if (shmd->shm_softlockcnt <= 0) { 1495 if (AS_ISUNMAPWAIT(seg->s_as)) { 1496 mutex_enter(&seg->s_as->a_contents); 1497 if (AS_ISUNMAPWAIT(seg->s_as)) { 1498 AS_CLRUNMAPWAIT(seg->s_as); 1499 cv_broadcast(&seg->s_as->a_cv); 1500 } 1501 mutex_exit(&seg->s_as->a_contents); 1502 } 1503 } 1504 return (done); 1505 } 1506 1507 /* 1508 * Do a F_SOFTUNLOCK call over the range requested. 1509 * The range must have already been F_SOFTLOCK'ed. 1510 * 1511 * The calls to acquire and release the anon map lock mutex were 1512 * removed in order to avoid a deadly embrace during a DR 1513 * memory delete operation. (Eg. DR blocks while waiting for a 1514 * exclusive lock on a page that is being used for kaio; the 1515 * thread that will complete the kaio and call segspt_softunlock 1516 * blocks on the anon map lock; another thread holding the anon 1517 * map lock blocks on another page lock via the segspt_shmfault 1518 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.) 1519 * 1520 * The appropriateness of the removal is based upon the following: 1521 * 1. If we are holding a segment's reader lock and the page is held 1522 * shared, then the corresponding element in anonmap which points to 1523 * anon struct cannot change and there is no need to acquire the 1524 * anonymous map lock. 1525 * 2. Threads in segspt_softunlock have a reader lock on the segment 1526 * and already have the shared page lock, so we are guaranteed that 1527 * the anon map slot cannot change and therefore can call anon_get_ptr() 1528 * without grabbing the anonymous map lock. 1529 * 3. Threads that softlock a shared page break copy-on-write, even if 1530 * its a read. Thus cow faults can be ignored with respect to soft 1531 * unlocking, since the breaking of cow means that the anon slot(s) will 1532 * not be shared. 1533 */ 1534 static void 1535 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr, 1536 size_t len, enum seg_rw rw) 1537 { 1538 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1539 struct seg *sptseg; 1540 struct spt_data *sptd; 1541 page_t *pp; 1542 caddr_t adr; 1543 struct vnode *vp; 1544 u_offset_t offset; 1545 ulong_t anon_index; 1546 struct anon_map *amp; /* XXX - for locknest */ 1547 struct anon *ap = NULL; 1548 pgcnt_t npages; 1549 1550 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1551 1552 sptseg = shmd->shm_sptseg; 1553 sptd = sptseg->s_data; 1554 1555 /* 1556 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 1557 * and therefore their pages are SE_SHARED locked 1558 * for the entire life of the segment. 1559 */ 1560 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) && 1561 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) { 1562 goto softlock_decrement; 1563 } 1564 1565 /* 1566 * Any thread is free to do a page_find and 1567 * page_unlock() on the pages within this seg. 1568 * 1569 * We are already holding the as->a_lock on the user's 1570 * real segment, but we need to hold the a_lock on the 1571 * underlying dummy as. This is mostly to satisfy the 1572 * underlying HAT layer. 1573 */ 1574 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1575 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len); 1576 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1577 1578 amp = sptd->spt_amp; 1579 ASSERT(amp != NULL); 1580 anon_index = seg_page(sptseg, sptseg_addr); 1581 1582 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) { 1583 ap = anon_get_ptr(amp->ahp, anon_index++); 1584 ASSERT(ap != NULL); 1585 swap_xlate(ap, &vp, &offset); 1586 1587 /* 1588 * Use page_find() instead of page_lookup() to 1589 * find the page since we know that it has a 1590 * "shared" lock. 1591 */ 1592 pp = page_find(vp, offset); 1593 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1)); 1594 if (pp == NULL) { 1595 panic("segspt_softunlock: " 1596 "addr %p, ap %p, vp %p, off %llx", 1597 (void *)adr, (void *)ap, (void *)vp, offset); 1598 /*NOTREACHED*/ 1599 } 1600 1601 if (rw == S_WRITE) { 1602 hat_setrefmod(pp); 1603 } else if (rw != S_OTHER) { 1604 hat_setref(pp); 1605 } 1606 page_unlock(pp); 1607 } 1608 1609 softlock_decrement: 1610 npages = btopr(len); 1611 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages); 1612 if (shmd->shm_softlockcnt == 0) { 1613 /* 1614 * All SOFTLOCKS are gone. Wakeup any waiting 1615 * unmappers so they can try again to unmap. 1616 * Check for waiters first without the mutex 1617 * held so we don't always grab the mutex on 1618 * softunlocks. 1619 */ 1620 if (AS_ISUNMAPWAIT(seg->s_as)) { 1621 mutex_enter(&seg->s_as->a_contents); 1622 if (AS_ISUNMAPWAIT(seg->s_as)) { 1623 AS_CLRUNMAPWAIT(seg->s_as); 1624 cv_broadcast(&seg->s_as->a_cv); 1625 } 1626 mutex_exit(&seg->s_as->a_contents); 1627 } 1628 } 1629 } 1630 1631 int 1632 segspt_shmattach(struct seg *seg, caddr_t *argsp) 1633 { 1634 struct shm_data *shmd_arg = (struct shm_data *)argsp; 1635 struct shm_data *shmd; 1636 struct anon_map *shm_amp = shmd_arg->shm_amp; 1637 struct spt_data *sptd; 1638 int error = 0; 1639 1640 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1641 1642 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP); 1643 if (shmd == NULL) 1644 return (ENOMEM); 1645 1646 shmd->shm_sptas = shmd_arg->shm_sptas; 1647 shmd->shm_amp = shm_amp; 1648 shmd->shm_sptseg = shmd_arg->shm_sptseg; 1649 1650 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0, 1651 NULL, 0, seg->s_size); 1652 1653 seg->s_data = (void *)shmd; 1654 seg->s_ops = &segspt_shmops; 1655 seg->s_szc = shmd->shm_sptseg->s_szc; 1656 sptd = shmd->shm_sptseg->s_data; 1657 1658 if (sptd->spt_flags & SHM_PAGEABLE) { 1659 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size), 1660 KM_NOSLEEP)) == NULL) { 1661 seg->s_data = (void *)NULL; 1662 kmem_free(shmd, (sizeof (*shmd))); 1663 return (ENOMEM); 1664 } 1665 shmd->shm_lckpgs = 0; 1666 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 1667 if ((error = hat_share(seg->s_as->a_hat, seg->s_base, 1668 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1669 seg->s_size, seg->s_szc)) != 0) { 1670 kmem_free(shmd->shm_vpage, 1671 btopr(shm_amp->size)); 1672 } 1673 } 1674 } else { 1675 error = hat_share(seg->s_as->a_hat, seg->s_base, 1676 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1677 seg->s_size, seg->s_szc); 1678 } 1679 if (error) { 1680 seg->s_szc = 0; 1681 seg->s_data = (void *)NULL; 1682 kmem_free(shmd, (sizeof (*shmd))); 1683 } else { 1684 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1685 shm_amp->refcnt++; 1686 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1687 } 1688 return (error); 1689 } 1690 1691 int 1692 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize) 1693 { 1694 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1695 int reclaim = 1; 1696 1697 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1698 retry: 1699 if (shmd->shm_softlockcnt > 0) { 1700 if (reclaim == 1) { 1701 segspt_purge(seg); 1702 reclaim = 0; 1703 goto retry; 1704 } 1705 return (EAGAIN); 1706 } 1707 1708 if (ssize != seg->s_size) { 1709 #ifdef DEBUG 1710 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n", 1711 ssize, seg->s_size); 1712 #endif 1713 return (EINVAL); 1714 } 1715 1716 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK, 1717 NULL, 0); 1718 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc); 1719 1720 seg_free(seg); 1721 1722 return (0); 1723 } 1724 1725 void 1726 segspt_shmfree(struct seg *seg) 1727 { 1728 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1729 struct anon_map *shm_amp = shmd->shm_amp; 1730 1731 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1732 1733 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0, 1734 MC_UNLOCK, NULL, 0); 1735 1736 /* 1737 * Need to increment refcnt when attaching 1738 * and decrement when detaching because of dup(). 1739 */ 1740 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1741 shm_amp->refcnt--; 1742 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1743 1744 if (shmd->shm_vpage) { /* only for DISM */ 1745 kmem_free(shmd->shm_vpage, btopr(shm_amp->size)); 1746 shmd->shm_vpage = NULL; 1747 } 1748 kmem_free(shmd, sizeof (*shmd)); 1749 } 1750 1751 /*ARGSUSED*/ 1752 int 1753 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 1754 { 1755 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1756 1757 /* 1758 * Shared page table is more than shared mapping. 1759 * Individual process sharing page tables can't change prot 1760 * because there is only one set of page tables. 1761 * This will be allowed after private page table is 1762 * supported. 1763 */ 1764 /* need to return correct status error? */ 1765 return (0); 1766 } 1767 1768 1769 faultcode_t 1770 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr, 1771 size_t len, enum fault_type type, enum seg_rw rw) 1772 { 1773 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1774 struct seg *sptseg = shmd->shm_sptseg; 1775 struct as *curspt = shmd->shm_sptas; 1776 struct spt_data *sptd = sptseg->s_data; 1777 pgcnt_t npages; 1778 size_t size; 1779 caddr_t segspt_addr, shm_addr; 1780 page_t **ppa; 1781 int i; 1782 ulong_t an_idx = 0; 1783 int err = 0; 1784 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0); 1785 size_t pgsz; 1786 pgcnt_t pgcnt; 1787 caddr_t a; 1788 pgcnt_t pidx; 1789 1790 #ifdef lint 1791 hat = hat; 1792 #endif 1793 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1794 1795 /* 1796 * Because of the way spt is implemented 1797 * the realsize of the segment does not have to be 1798 * equal to the segment size itself. The segment size is 1799 * often in multiples of a page size larger than PAGESIZE. 1800 * The realsize is rounded up to the nearest PAGESIZE 1801 * based on what the user requested. This is a bit of 1802 * ungliness that is historical but not easily fixed 1803 * without re-designing the higher levels of ISM. 1804 */ 1805 ASSERT(addr >= seg->s_base); 1806 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 1807 return (FC_NOMAP); 1808 /* 1809 * For all of the following cases except F_PROT, we need to 1810 * make any necessary adjustments to addr and len 1811 * and get all of the necessary page_t's into an array called ppa[]. 1812 * 1813 * The code in shmat() forces base addr and len of ISM segment 1814 * to be aligned to largest page size supported. Therefore, 1815 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 1816 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 1817 * in large pagesize chunks, or else we will screw up the HAT 1818 * layer by calling hat_memload_array() with differing page sizes 1819 * over a given virtual range. 1820 */ 1821 pgsz = page_get_pagesize(sptseg->s_szc); 1822 pgcnt = page_get_pagecnt(sptseg->s_szc); 1823 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); 1824 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz); 1825 npages = btopr(size); 1826 1827 /* 1828 * Now we need to convert from addr in segshm to addr in segspt. 1829 */ 1830 an_idx = seg_page(seg, shm_addr); 1831 segspt_addr = sptseg->s_base + ptob(an_idx); 1832 1833 ASSERT((segspt_addr + ptob(npages)) <= 1834 (sptseg->s_base + sptd->spt_realsize)); 1835 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size)); 1836 1837 switch (type) { 1838 1839 case F_SOFTLOCK: 1840 1841 mutex_enter(&freemem_lock); 1842 if (availrmem < tune.t_minarmem + npages) { 1843 mutex_exit(&freemem_lock); 1844 return (FC_MAKE_ERR(ENOMEM)); 1845 } else { 1846 availrmem -= npages; 1847 } 1848 mutex_exit(&freemem_lock); 1849 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 1850 /* 1851 * Fall through to the F_INVAL case to load up the hat layer 1852 * entries with the HAT_LOAD_LOCK flag. 1853 */ 1854 /* FALLTHRU */ 1855 case F_INVAL: 1856 1857 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 1858 return (FC_NOMAP); 1859 1860 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP); 1861 1862 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa); 1863 if (err != 0) { 1864 if (type == F_SOFTLOCK) { 1865 mutex_enter(&freemem_lock); 1866 availrmem += npages; 1867 mutex_exit(&freemem_lock); 1868 atomic_add_long((ulong_t *)( 1869 &(shmd->shm_softlockcnt)), -npages); 1870 } 1871 goto dism_err; 1872 } 1873 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1874 a = segspt_addr; 1875 pidx = 0; 1876 if (type == F_SOFTLOCK) { 1877 1878 /* 1879 * Load up the translation keeping it 1880 * locked and don't unlock the page. 1881 */ 1882 for (; pidx < npages; a += pgsz, pidx += pgcnt) { 1883 hat_memload_array(sptseg->s_as->a_hat, 1884 a, pgsz, &ppa[pidx], sptd->spt_prot, 1885 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 1886 } 1887 } else { 1888 if (hat == seg->s_as->a_hat) { 1889 1890 /* 1891 * Migrate pages marked for migration 1892 */ 1893 if (lgrp_optimizations()) 1894 page_migrate(seg, shm_addr, ppa, 1895 npages); 1896 1897 /* CPU HAT */ 1898 for (; pidx < npages; 1899 a += pgsz, pidx += pgcnt) { 1900 hat_memload_array(sptseg->s_as->a_hat, 1901 a, pgsz, &ppa[pidx], 1902 sptd->spt_prot, 1903 HAT_LOAD_SHARE); 1904 } 1905 } else { 1906 /* XHAT. Pass real address */ 1907 hat_memload_array(hat, shm_addr, 1908 size, ppa, sptd->spt_prot, HAT_LOAD_SHARE); 1909 } 1910 1911 /* 1912 * And now drop the SE_SHARED lock(s). 1913 */ 1914 if (dyn_ism_unmap) { 1915 for (i = 0; i < npages; i++) { 1916 page_unlock(ppa[i]); 1917 } 1918 } 1919 } 1920 1921 if (!dyn_ism_unmap) { 1922 if (hat_share(seg->s_as->a_hat, shm_addr, 1923 curspt->a_hat, segspt_addr, ptob(npages), 1924 seg->s_szc) != 0) { 1925 panic("hat_share err in DISM fault"); 1926 /* NOTREACHED */ 1927 } 1928 if (type == F_INVAL) { 1929 for (i = 0; i < npages; i++) { 1930 page_unlock(ppa[i]); 1931 } 1932 } 1933 } 1934 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1935 dism_err: 1936 kmem_free(ppa, npages * sizeof (page_t *)); 1937 return (err); 1938 1939 case F_SOFTUNLOCK: 1940 1941 mutex_enter(&freemem_lock); 1942 availrmem += npages; 1943 mutex_exit(&freemem_lock); 1944 1945 /* 1946 * This is a bit ugly, we pass in the real seg pointer, 1947 * but the segspt_addr is the virtual address within the 1948 * dummy seg. 1949 */ 1950 segspt_softunlock(seg, segspt_addr, size, rw); 1951 return (0); 1952 1953 case F_PROT: 1954 1955 /* 1956 * This takes care of the unusual case where a user 1957 * allocates a stack in shared memory and a register 1958 * window overflow is written to that stack page before 1959 * it is otherwise modified. 1960 * 1961 * We can get away with this because ISM segments are 1962 * always rw. Other than this unusual case, there 1963 * should be no instances of protection violations. 1964 */ 1965 return (0); 1966 1967 default: 1968 #ifdef DEBUG 1969 panic("segspt_dismfault default type?"); 1970 #else 1971 return (FC_NOMAP); 1972 #endif 1973 } 1974 } 1975 1976 1977 faultcode_t 1978 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr, 1979 size_t len, enum fault_type type, enum seg_rw rw) 1980 { 1981 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1982 struct seg *sptseg = shmd->shm_sptseg; 1983 struct as *curspt = shmd->shm_sptas; 1984 struct spt_data *sptd = sptseg->s_data; 1985 pgcnt_t npages; 1986 size_t size; 1987 caddr_t sptseg_addr, shm_addr; 1988 page_t *pp, **ppa; 1989 int i; 1990 u_offset_t offset; 1991 ulong_t anon_index = 0; 1992 struct vnode *vp; 1993 struct anon_map *amp; /* XXX - for locknest */ 1994 struct anon *ap = NULL; 1995 size_t pgsz; 1996 pgcnt_t pgcnt; 1997 caddr_t a; 1998 pgcnt_t pidx; 1999 size_t sz; 2000 2001 #ifdef lint 2002 hat = hat; 2003 #endif 2004 2005 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2006 2007 if (sptd->spt_flags & SHM_PAGEABLE) { 2008 return (segspt_dismfault(hat, seg, addr, len, type, rw)); 2009 } 2010 2011 /* 2012 * Because of the way spt is implemented 2013 * the realsize of the segment does not have to be 2014 * equal to the segment size itself. The segment size is 2015 * often in multiples of a page size larger than PAGESIZE. 2016 * The realsize is rounded up to the nearest PAGESIZE 2017 * based on what the user requested. This is a bit of 2018 * ungliness that is historical but not easily fixed 2019 * without re-designing the higher levels of ISM. 2020 */ 2021 ASSERT(addr >= seg->s_base); 2022 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 2023 return (FC_NOMAP); 2024 /* 2025 * For all of the following cases except F_PROT, we need to 2026 * make any necessary adjustments to addr and len 2027 * and get all of the necessary page_t's into an array called ppa[]. 2028 * 2029 * The code in shmat() forces base addr and len of ISM segment 2030 * to be aligned to largest page size supported. Therefore, 2031 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 2032 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 2033 * in large pagesize chunks, or else we will screw up the HAT 2034 * layer by calling hat_memload_array() with differing page sizes 2035 * over a given virtual range. 2036 */ 2037 pgsz = page_get_pagesize(sptseg->s_szc); 2038 pgcnt = page_get_pagecnt(sptseg->s_szc); 2039 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); 2040 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz); 2041 npages = btopr(size); 2042 2043 /* 2044 * Now we need to convert from addr in segshm to addr in segspt. 2045 */ 2046 anon_index = seg_page(seg, shm_addr); 2047 sptseg_addr = sptseg->s_base + ptob(anon_index); 2048 2049 /* 2050 * And now we may have to adjust npages downward if we have 2051 * exceeded the realsize of the segment or initial anon 2052 * allocations. 2053 */ 2054 if ((sptseg_addr + ptob(npages)) > 2055 (sptseg->s_base + sptd->spt_realsize)) 2056 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr; 2057 2058 npages = btopr(size); 2059 2060 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size)); 2061 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0); 2062 2063 switch (type) { 2064 2065 case F_SOFTLOCK: 2066 2067 /* 2068 * availrmem is decremented once during anon_swap_adjust() 2069 * and is incremented during the anon_unresv(), which is 2070 * called from shm_rm_amp() when the segment is destroyed. 2071 */ 2072 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 2073 /* 2074 * Some platforms assume that ISM pages are SE_SHARED 2075 * locked for the entire life of the segment. 2076 */ 2077 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) 2078 return (0); 2079 /* 2080 * Fall through to the F_INVAL case to load up the hat layer 2081 * entries with the HAT_LOAD_LOCK flag. 2082 */ 2083 2084 /* FALLTHRU */ 2085 case F_INVAL: 2086 2087 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 2088 return (FC_NOMAP); 2089 2090 /* 2091 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP 2092 * may still rely on this call to hat_share(). That 2093 * would imply that those hat's can fault on a 2094 * HAT_LOAD_LOCK translation, which would seem 2095 * contradictory. 2096 */ 2097 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 2098 if (hat_share(seg->s_as->a_hat, seg->s_base, 2099 curspt->a_hat, sptseg->s_base, 2100 sptseg->s_size, sptseg->s_szc) != 0) { 2101 panic("hat_share error in ISM fault"); 2102 /*NOTREACHED*/ 2103 } 2104 return (0); 2105 } 2106 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP); 2107 2108 /* 2109 * I see no need to lock the real seg, 2110 * here, because all of our work will be on the underlying 2111 * dummy seg. 2112 * 2113 * sptseg_addr and npages now account for large pages. 2114 */ 2115 amp = sptd->spt_amp; 2116 ASSERT(amp != NULL); 2117 anon_index = seg_page(sptseg, sptseg_addr); 2118 2119 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2120 for (i = 0; i < npages; i++) { 2121 ap = anon_get_ptr(amp->ahp, anon_index++); 2122 ASSERT(ap != NULL); 2123 swap_xlate(ap, &vp, &offset); 2124 pp = page_lookup(vp, offset, SE_SHARED); 2125 ASSERT(pp != NULL); 2126 ppa[i] = pp; 2127 } 2128 ANON_LOCK_EXIT(&->a_rwlock); 2129 ASSERT(i == npages); 2130 2131 /* 2132 * We are already holding the as->a_lock on the user's 2133 * real segment, but we need to hold the a_lock on the 2134 * underlying dummy as. This is mostly to satisfy the 2135 * underlying HAT layer. 2136 */ 2137 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 2138 a = sptseg_addr; 2139 pidx = 0; 2140 if (type == F_SOFTLOCK) { 2141 /* 2142 * Load up the translation keeping it 2143 * locked and don't unlock the page. 2144 */ 2145 for (; pidx < npages; a += pgsz, pidx += pgcnt) { 2146 sz = MIN(pgsz, ptob(npages - pidx)); 2147 hat_memload_array(sptseg->s_as->a_hat, a, 2148 sz, &ppa[pidx], sptd->spt_prot, 2149 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 2150 } 2151 } else { 2152 if (hat == seg->s_as->a_hat) { 2153 2154 /* 2155 * Migrate pages marked for migration. 2156 */ 2157 if (lgrp_optimizations()) 2158 page_migrate(seg, shm_addr, ppa, 2159 npages); 2160 2161 /* CPU HAT */ 2162 for (; pidx < npages; 2163 a += pgsz, pidx += pgcnt) { 2164 sz = MIN(pgsz, ptob(npages - pidx)); 2165 hat_memload_array(sptseg->s_as->a_hat, 2166 a, sz, &ppa[pidx], 2167 sptd->spt_prot, HAT_LOAD_SHARE); 2168 } 2169 } else { 2170 /* XHAT. Pass real address */ 2171 hat_memload_array(hat, shm_addr, 2172 ptob(npages), ppa, sptd->spt_prot, 2173 HAT_LOAD_SHARE); 2174 } 2175 2176 /* 2177 * And now drop the SE_SHARED lock(s). 2178 */ 2179 for (i = 0; i < npages; i++) 2180 page_unlock(ppa[i]); 2181 } 2182 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 2183 2184 kmem_free(ppa, sizeof (page_t *) * npages); 2185 return (0); 2186 case F_SOFTUNLOCK: 2187 2188 /* 2189 * This is a bit ugly, we pass in the real seg pointer, 2190 * but the sptseg_addr is the virtual address within the 2191 * dummy seg. 2192 */ 2193 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw); 2194 return (0); 2195 2196 case F_PROT: 2197 2198 /* 2199 * This takes care of the unusual case where a user 2200 * allocates a stack in shared memory and a register 2201 * window overflow is written to that stack page before 2202 * it is otherwise modified. 2203 * 2204 * We can get away with this because ISM segments are 2205 * always rw. Other than this unusual case, there 2206 * should be no instances of protection violations. 2207 */ 2208 return (0); 2209 2210 default: 2211 #ifdef DEBUG 2212 cmn_err(CE_WARN, "segspt_shmfault default type?"); 2213 #endif 2214 return (FC_NOMAP); 2215 } 2216 } 2217 2218 /*ARGSUSED*/ 2219 static faultcode_t 2220 segspt_shmfaulta(struct seg *seg, caddr_t addr) 2221 { 2222 return (0); 2223 } 2224 2225 /*ARGSUSED*/ 2226 static int 2227 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta) 2228 { 2229 return (0); 2230 } 2231 2232 /*ARGSUSED*/ 2233 static size_t 2234 segspt_shmswapout(struct seg *seg) 2235 { 2236 return (0); 2237 } 2238 2239 /* 2240 * duplicate the shared page tables 2241 */ 2242 int 2243 segspt_shmdup(struct seg *seg, struct seg *newseg) 2244 { 2245 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2246 struct anon_map *amp = shmd->shm_amp; 2247 struct shm_data *shmd_new; 2248 struct seg *spt_seg = shmd->shm_sptseg; 2249 struct spt_data *sptd = spt_seg->s_data; 2250 int error = 0; 2251 2252 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2253 2254 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP); 2255 newseg->s_data = (void *)shmd_new; 2256 shmd_new->shm_sptas = shmd->shm_sptas; 2257 shmd_new->shm_amp = amp; 2258 shmd_new->shm_sptseg = shmd->shm_sptseg; 2259 newseg->s_ops = &segspt_shmops; 2260 newseg->s_szc = seg->s_szc; 2261 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc); 2262 2263 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2264 amp->refcnt++; 2265 ANON_LOCK_EXIT(&->a_rwlock); 2266 2267 if (sptd->spt_flags & SHM_PAGEABLE) { 2268 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP); 2269 shmd_new->shm_lckpgs = 0; 2270 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 2271 if ((error = hat_share(newseg->s_as->a_hat, 2272 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR, 2273 seg->s_size, seg->s_szc)) != 0) { 2274 kmem_free(shmd_new->shm_vpage, 2275 btopr(amp->size)); 2276 } 2277 } 2278 return (error); 2279 } else { 2280 return (hat_share(newseg->s_as->a_hat, newseg->s_base, 2281 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size, 2282 seg->s_szc)); 2283 2284 } 2285 } 2286 2287 /*ARGSUSED*/ 2288 int 2289 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 2290 { 2291 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2292 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2293 2294 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2295 2296 /* 2297 * ISM segment is always rw. 2298 */ 2299 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0); 2300 } 2301 2302 /* 2303 * Return an array of locked large pages, for empty slots allocate 2304 * private zero-filled anon pages. 2305 */ 2306 static int 2307 spt_anon_getpages( 2308 struct seg *sptseg, 2309 caddr_t sptaddr, 2310 size_t len, 2311 page_t *ppa[]) 2312 { 2313 struct spt_data *sptd = sptseg->s_data; 2314 struct anon_map *amp = sptd->spt_amp; 2315 enum seg_rw rw = sptd->spt_prot; 2316 uint_t szc = sptseg->s_szc; 2317 size_t pg_sz, share_sz = page_get_pagesize(szc); 2318 pgcnt_t lp_npgs; 2319 caddr_t lp_addr, e_sptaddr; 2320 uint_t vpprot, ppa_szc = 0; 2321 struct vpage *vpage = NULL; 2322 ulong_t j, ppa_idx; 2323 int err, ierr = 0; 2324 pgcnt_t an_idx; 2325 anon_sync_obj_t cookie; 2326 2327 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz)); 2328 ASSERT(len != 0); 2329 2330 pg_sz = share_sz; 2331 lp_npgs = btop(pg_sz); 2332 lp_addr = sptaddr; 2333 e_sptaddr = sptaddr + len; 2334 an_idx = seg_page(sptseg, sptaddr); 2335 ppa_idx = 0; 2336 2337 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2338 /*CONSTCOND*/ 2339 while (1) { 2340 for (; lp_addr < e_sptaddr; 2341 an_idx += lp_npgs, lp_addr += pg_sz, 2342 ppa_idx += lp_npgs) { 2343 2344 anon_array_enter(amp, an_idx, &cookie); 2345 ppa_szc = (uint_t)-1; 2346 ierr = anon_map_getpages(amp, an_idx, szc, sptseg, 2347 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx], 2348 &ppa_szc, vpage, rw, 0, segvn_anypgsz, kcred); 2349 anon_array_exit(&cookie); 2350 2351 if (ierr != 0) { 2352 if (ierr > 0) { 2353 err = FC_MAKE_ERR(ierr); 2354 goto lpgs_err; 2355 } 2356 break; 2357 } 2358 } 2359 if (lp_addr == e_sptaddr) { 2360 break; 2361 } 2362 ASSERT(lp_addr < e_sptaddr); 2363 2364 /* 2365 * ierr == -1 means we failed to allocate a large page. 2366 * so do a size down operation. 2367 * 2368 * ierr == -2 means some other process that privately shares 2369 * pages with this process has allocated a larger page and we 2370 * need to retry with larger pages. So do a size up 2371 * operation. This relies on the fact that large pages are 2372 * never partially shared i.e. if we share any constituent 2373 * page of a large page with another process we must share the 2374 * entire large page. Note this cannot happen for SOFTLOCK 2375 * case, unless current address (lpaddr) is at the beginning 2376 * of the next page size boundary because the other process 2377 * couldn't have relocated locked pages. 2378 */ 2379 ASSERT(ierr == -1 || ierr == -2); 2380 if (segvn_anypgsz) { 2381 ASSERT(ierr == -2 || szc != 0); 2382 ASSERT(ierr == -1 || szc < sptseg->s_szc); 2383 szc = (ierr == -1) ? szc - 1 : szc + 1; 2384 } else { 2385 /* 2386 * For faults and segvn_anypgsz == 0 2387 * we need to be careful not to loop forever 2388 * if existing page is found with szc other 2389 * than 0 or seg->s_szc. This could be due 2390 * to page relocations on behalf of DR or 2391 * more likely large page creation. For this 2392 * case simply re-size to existing page's szc 2393 * if returned by anon_map_getpages(). 2394 */ 2395 if (ppa_szc == (uint_t)-1) { 2396 szc = (ierr == -1) ? 0 : sptseg->s_szc; 2397 } else { 2398 ASSERT(ppa_szc <= sptseg->s_szc); 2399 ASSERT(ierr == -2 || ppa_szc < szc); 2400 ASSERT(ierr == -1 || ppa_szc > szc); 2401 szc = ppa_szc; 2402 } 2403 } 2404 pg_sz = page_get_pagesize(szc); 2405 lp_npgs = btop(pg_sz); 2406 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz)); 2407 } 2408 ANON_LOCK_EXIT(&->a_rwlock); 2409 return (0); 2410 2411 lpgs_err: 2412 ANON_LOCK_EXIT(&->a_rwlock); 2413 for (j = 0; j < ppa_idx; j++) 2414 page_unlock(ppa[j]); 2415 return (err); 2416 } 2417 2418 /* 2419 * count the number of bytes in a set of spt pages that are currently not 2420 * locked 2421 */ 2422 static rctl_qty_t 2423 spt_unlockedbytes(pgcnt_t npages, page_t **ppa) 2424 { 2425 ulong_t i; 2426 rctl_qty_t unlocked = 0; 2427 2428 for (i = 0; i < npages; i++) { 2429 if (ppa[i]->p_lckcnt == 0) 2430 unlocked += PAGESIZE; 2431 } 2432 return (unlocked); 2433 } 2434 2435 int 2436 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages, 2437 page_t **ppa, ulong_t *lockmap, size_t pos, 2438 rctl_qty_t *locked) 2439 { 2440 struct shm_data *shmd = seg->s_data; 2441 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2442 ulong_t i; 2443 int kernel; 2444 2445 /* return the number of bytes actually locked */ 2446 *locked = 0; 2447 for (i = 0; i < npages; anon_index++, pos++, i++) { 2448 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) { 2449 if (sptd->spt_ppa_lckcnt[anon_index] < 2450 (ushort_t)DISM_LOCK_MAX) { 2451 if (++sptd->spt_ppa_lckcnt[anon_index] == 2452 (ushort_t)DISM_LOCK_MAX) { 2453 cmn_err(CE_WARN, 2454 "DISM page lock limit " 2455 "reached on DISM offset 0x%lx\n", 2456 anon_index << PAGESHIFT); 2457 } 2458 kernel = (sptd->spt_ppa && 2459 sptd->spt_ppa[anon_index]) ? 1 : 0; 2460 if (!page_pp_lock(ppa[i], 0, kernel)) { 2461 sptd->spt_ppa_lckcnt[anon_index]--; 2462 return (EAGAIN); 2463 } 2464 /* if this is a newly locked page, count it */ 2465 if (ppa[i]->p_lckcnt == 1) { 2466 *locked += PAGESIZE; 2467 } 2468 shmd->shm_lckpgs++; 2469 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED; 2470 if (lockmap != NULL) 2471 BT_SET(lockmap, pos); 2472 } 2473 } 2474 } 2475 return (0); 2476 } 2477 2478 /*ARGSUSED*/ 2479 static int 2480 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 2481 int attr, int op, ulong_t *lockmap, size_t pos) 2482 { 2483 struct shm_data *shmd = seg->s_data; 2484 struct seg *sptseg = shmd->shm_sptseg; 2485 struct spt_data *sptd = sptseg->s_data; 2486 struct kshmid *sp = sptd->spt_amp->a_sp; 2487 pgcnt_t npages, a_npages; 2488 page_t **ppa; 2489 pgcnt_t an_idx, a_an_idx, ppa_idx; 2490 caddr_t spt_addr, a_addr; /* spt and aligned address */ 2491 size_t a_len; /* aligned len */ 2492 size_t share_sz; 2493 ulong_t i; 2494 int sts = 0; 2495 rctl_qty_t unlocked = 0; 2496 rctl_qty_t locked = 0; 2497 struct proc *p = curproc; 2498 kproject_t *proj; 2499 2500 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2501 ASSERT(sp != NULL); 2502 2503 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 2504 return (0); 2505 } 2506 2507 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 2508 an_idx = seg_page(seg, addr); 2509 npages = btopr(len); 2510 2511 if (an_idx + npages > btopr(shmd->shm_amp->size)) { 2512 return (ENOMEM); 2513 } 2514 2515 /* 2516 * A shm's project never changes, so no lock needed. 2517 * The shm has a hold on the project, so it will not go away. 2518 * Since we have a mapping to shm within this zone, we know 2519 * that the zone will not go away. 2520 */ 2521 proj = sp->shm_perm.ipc_proj; 2522 2523 if (op == MC_LOCK) { 2524 2525 /* 2526 * Need to align addr and size request if they are not 2527 * aligned so we can always allocate large page(s) however 2528 * we only lock what was requested in initial request. 2529 */ 2530 share_sz = page_get_pagesize(sptseg->s_szc); 2531 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz); 2532 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)), 2533 share_sz); 2534 a_npages = btop(a_len); 2535 a_an_idx = seg_page(seg, a_addr); 2536 spt_addr = sptseg->s_base + ptob(a_an_idx); 2537 ppa_idx = an_idx - a_an_idx; 2538 2539 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages), 2540 KM_NOSLEEP)) == NULL) { 2541 return (ENOMEM); 2542 } 2543 2544 /* 2545 * Don't cache any new pages for IO and 2546 * flush any cached pages. 2547 */ 2548 mutex_enter(&sptd->spt_lock); 2549 if (sptd->spt_ppa != NULL) 2550 sptd->spt_flags |= DISM_PPA_CHANGED; 2551 2552 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa); 2553 if (sts != 0) { 2554 mutex_exit(&sptd->spt_lock); 2555 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2556 return (sts); 2557 } 2558 2559 mutex_enter(&sp->shm_mlock); 2560 /* enforce locked memory rctl */ 2561 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]); 2562 2563 mutex_enter(&p->p_lock); 2564 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) { 2565 mutex_exit(&p->p_lock); 2566 sts = EAGAIN; 2567 } else { 2568 mutex_exit(&p->p_lock); 2569 sts = spt_lockpages(seg, an_idx, npages, 2570 &ppa[ppa_idx], lockmap, pos, &locked); 2571 2572 /* 2573 * correct locked count if not all pages could be 2574 * locked 2575 */ 2576 if ((unlocked - locked) > 0) { 2577 rctl_decr_locked_mem(NULL, proj, 2578 (unlocked - locked), 0); 2579 } 2580 } 2581 /* 2582 * unlock pages 2583 */ 2584 for (i = 0; i < a_npages; i++) 2585 page_unlock(ppa[i]); 2586 if (sptd->spt_ppa != NULL) 2587 sptd->spt_flags |= DISM_PPA_CHANGED; 2588 mutex_exit(&sp->shm_mlock); 2589 mutex_exit(&sptd->spt_lock); 2590 2591 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2592 2593 } else if (op == MC_UNLOCK) { /* unlock */ 2594 struct anon_map *amp; 2595 struct anon *ap; 2596 struct vnode *vp; 2597 u_offset_t off; 2598 struct page *pp; 2599 int kernel; 2600 anon_sync_obj_t cookie; 2601 rctl_qty_t unlocked = 0; 2602 2603 amp = sptd->spt_amp; 2604 mutex_enter(&sptd->spt_lock); 2605 if (shmd->shm_lckpgs == 0) { 2606 mutex_exit(&sptd->spt_lock); 2607 return (0); 2608 } 2609 /* 2610 * Don't cache new IO pages. 2611 */ 2612 if (sptd->spt_ppa != NULL) 2613 sptd->spt_flags |= DISM_PPA_CHANGED; 2614 2615 mutex_enter(&sp->shm_mlock); 2616 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2617 for (i = 0; i < npages; i++, an_idx++) { 2618 if (shmd->shm_vpage[an_idx] & DISM_PG_LOCKED) { 2619 anon_array_enter(amp, an_idx, &cookie); 2620 ap = anon_get_ptr(amp->ahp, an_idx); 2621 ASSERT(ap); 2622 2623 swap_xlate(ap, &vp, &off); 2624 anon_array_exit(&cookie); 2625 pp = page_lookup(vp, off, SE_SHARED); 2626 ASSERT(pp); 2627 /* 2628 * the availrmem is decremented only for 2629 * pages which are not in seg pcache, 2630 * for pages in seg pcache availrmem was 2631 * decremented in _dismpagelock() (if 2632 * they were not locked here) 2633 */ 2634 kernel = (sptd->spt_ppa && 2635 sptd->spt_ppa[an_idx]) ? 1 : 0; 2636 ASSERT(pp->p_lckcnt > 0); 2637 page_pp_unlock(pp, 0, kernel); 2638 if (pp->p_lckcnt == 0) 2639 unlocked += PAGESIZE; 2640 page_unlock(pp); 2641 shmd->shm_vpage[an_idx] &= ~DISM_PG_LOCKED; 2642 sptd->spt_ppa_lckcnt[an_idx]--; 2643 shmd->shm_lckpgs--; 2644 } 2645 } 2646 ANON_LOCK_EXIT(&->a_rwlock); 2647 if (sptd->spt_ppa != NULL) 2648 sptd->spt_flags |= DISM_PPA_CHANGED; 2649 mutex_exit(&sptd->spt_lock); 2650 2651 rctl_decr_locked_mem(NULL, proj, unlocked, 0); 2652 mutex_exit(&sp->shm_mlock); 2653 } 2654 return (sts); 2655 } 2656 2657 /*ARGSUSED*/ 2658 int 2659 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 2660 { 2661 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2662 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2663 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1; 2664 2665 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2666 2667 /* 2668 * ISM segment is always rw. 2669 */ 2670 while (--pgno >= 0) 2671 *protv++ = sptd->spt_prot; 2672 return (0); 2673 } 2674 2675 /*ARGSUSED*/ 2676 u_offset_t 2677 segspt_shmgetoffset(struct seg *seg, caddr_t addr) 2678 { 2679 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2680 2681 /* Offset does not matter in ISM memory */ 2682 2683 return ((u_offset_t)0); 2684 } 2685 2686 /* ARGSUSED */ 2687 int 2688 segspt_shmgettype(struct seg *seg, caddr_t addr) 2689 { 2690 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2691 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2692 2693 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2694 2695 /* 2696 * The shared memory mapping is always MAP_SHARED, SWAP is only 2697 * reserved for DISM 2698 */ 2699 return (MAP_SHARED | 2700 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE)); 2701 } 2702 2703 /*ARGSUSED*/ 2704 int 2705 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 2706 { 2707 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2708 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2709 2710 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2711 2712 *vpp = sptd->spt_vp; 2713 return (0); 2714 } 2715 2716 /*ARGSUSED*/ 2717 static int 2718 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 2719 { 2720 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2721 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2722 struct anon_map *amp; 2723 pgcnt_t pg_idx; 2724 2725 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2726 2727 if (behav == MADV_FREE) { 2728 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) 2729 return (0); 2730 2731 amp = sptd->spt_amp; 2732 pg_idx = seg_page(seg, addr); 2733 2734 mutex_enter(&sptd->spt_lock); 2735 if (sptd->spt_ppa != NULL) 2736 sptd->spt_flags |= DISM_PPA_CHANGED; 2737 mutex_exit(&sptd->spt_lock); 2738 2739 /* 2740 * Purge all DISM cached pages 2741 */ 2742 seg_ppurge_seg(segspt_reclaim); 2743 2744 mutex_enter(&sptd->spt_lock); 2745 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2746 anon_disclaim(amp, pg_idx, len, ANON_PGLOOKUP_BLK); 2747 ANON_LOCK_EXIT(&->a_rwlock); 2748 mutex_exit(&sptd->spt_lock); 2749 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP || 2750 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) { 2751 int already_set; 2752 ulong_t anon_index; 2753 lgrp_mem_policy_t policy; 2754 caddr_t shm_addr; 2755 size_t share_size; 2756 size_t size; 2757 struct seg *sptseg = shmd->shm_sptseg; 2758 caddr_t sptseg_addr; 2759 2760 /* 2761 * Align address and length to page size of underlying segment 2762 */ 2763 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc); 2764 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size); 2765 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), 2766 share_size); 2767 2768 amp = shmd->shm_amp; 2769 anon_index = seg_page(seg, shm_addr); 2770 2771 /* 2772 * And now we may have to adjust size downward if we have 2773 * exceeded the realsize of the segment or initial anon 2774 * allocations. 2775 */ 2776 sptseg_addr = sptseg->s_base + ptob(anon_index); 2777 if ((sptseg_addr + size) > 2778 (sptseg->s_base + sptd->spt_realsize)) 2779 size = (sptseg->s_base + sptd->spt_realsize) - 2780 sptseg_addr; 2781 2782 /* 2783 * Set memory allocation policy for this segment 2784 */ 2785 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED); 2786 already_set = lgrp_shm_policy_set(policy, amp, anon_index, 2787 NULL, 0, len); 2788 2789 /* 2790 * If random memory allocation policy set already, 2791 * don't bother reapplying it. 2792 */ 2793 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 2794 return (0); 2795 2796 /* 2797 * Mark any existing pages in the given range for 2798 * migration, flushing the I/O page cache, and using 2799 * underlying segment to calculate anon index and get 2800 * anonmap and vnode pointer from 2801 */ 2802 if (shmd->shm_softlockcnt > 0) 2803 segspt_purge(seg); 2804 2805 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0); 2806 } 2807 2808 return (0); 2809 } 2810 2811 /*ARGSUSED*/ 2812 void 2813 segspt_shmdump(struct seg *seg) 2814 { 2815 /* no-op for ISM segment */ 2816 } 2817 2818 /*ARGSUSED*/ 2819 static faultcode_t 2820 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 2821 { 2822 return (ENOTSUP); 2823 } 2824 2825 /* 2826 * get a memory ID for an addr in a given segment 2827 */ 2828 static int 2829 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 2830 { 2831 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2832 struct anon *ap; 2833 size_t anon_index; 2834 struct anon_map *amp = shmd->shm_amp; 2835 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2836 struct seg *sptseg = shmd->shm_sptseg; 2837 anon_sync_obj_t cookie; 2838 2839 anon_index = seg_page(seg, addr); 2840 2841 if (addr > (seg->s_base + sptd->spt_realsize)) { 2842 return (EFAULT); 2843 } 2844 2845 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2846 anon_array_enter(amp, anon_index, &cookie); 2847 ap = anon_get_ptr(amp->ahp, anon_index); 2848 if (ap == NULL) { 2849 struct page *pp; 2850 caddr_t spt_addr = sptseg->s_base + ptob(anon_index); 2851 2852 pp = anon_zero(sptseg, spt_addr, &ap, kcred); 2853 if (pp == NULL) { 2854 anon_array_exit(&cookie); 2855 ANON_LOCK_EXIT(&->a_rwlock); 2856 return (ENOMEM); 2857 } 2858 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 2859 page_unlock(pp); 2860 } 2861 anon_array_exit(&cookie); 2862 ANON_LOCK_EXIT(&->a_rwlock); 2863 memidp->val[0] = (uintptr_t)ap; 2864 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 2865 return (0); 2866 } 2867 2868 /* 2869 * Get memory allocation policy info for specified address in given segment 2870 */ 2871 static lgrp_mem_policy_info_t * 2872 segspt_shmgetpolicy(struct seg *seg, caddr_t addr) 2873 { 2874 struct anon_map *amp; 2875 ulong_t anon_index; 2876 lgrp_mem_policy_info_t *policy_info; 2877 struct shm_data *shm_data; 2878 2879 ASSERT(seg != NULL); 2880 2881 /* 2882 * Get anon_map from segshm 2883 * 2884 * Assume that no lock needs to be held on anon_map, since 2885 * it should be protected by its reference count which must be 2886 * nonzero for an existing segment 2887 * Need to grab readers lock on policy tree though 2888 */ 2889 shm_data = (struct shm_data *)seg->s_data; 2890 if (shm_data == NULL) 2891 return (NULL); 2892 amp = shm_data->shm_amp; 2893 ASSERT(amp->refcnt != 0); 2894 2895 /* 2896 * Get policy info 2897 * 2898 * Assume starting anon index of 0 2899 */ 2900 anon_index = seg_page(seg, addr); 2901 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 2902 2903 return (policy_info); 2904 } 2905 2906 /*ARGSUSED*/ 2907 static int 2908 segspt_shmcapable(struct seg *seg, segcapability_t capability) 2909 { 2910 return (0); 2911 } 2912