1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/param.h> 29 #include <sys/user.h> 30 #include <sys/mman.h> 31 #include <sys/kmem.h> 32 #include <sys/sysmacros.h> 33 #include <sys/cmn_err.h> 34 #include <sys/systm.h> 35 #include <sys/tuneable.h> 36 #include <vm/hat.h> 37 #include <vm/seg.h> 38 #include <vm/as.h> 39 #include <vm/anon.h> 40 #include <vm/page.h> 41 #include <sys/buf.h> 42 #include <sys/swap.h> 43 #include <sys/atomic.h> 44 #include <vm/seg_spt.h> 45 #include <sys/debug.h> 46 #include <sys/vtrace.h> 47 #include <sys/shm.h> 48 #include <sys/shm_impl.h> 49 #include <sys/lgrp.h> 50 #include <sys/vmsystm.h> 51 #include <sys/policy.h> 52 #include <sys/project.h> 53 #include <sys/tnf_probe.h> 54 #include <sys/zone.h> 55 56 #define SEGSPTADDR (caddr_t)0x0 57 58 /* 59 * # pages used for spt 60 */ 61 size_t spt_used; 62 63 /* 64 * segspt_minfree is the memory left for system after ISM 65 * locked its pages; it is set up to 5% of availrmem in 66 * sptcreate when ISM is created. ISM should not use more 67 * than ~90% of availrmem; if it does, then the performance 68 * of the system may decrease. Machines with large memories may 69 * be able to use up more memory for ISM so we set the default 70 * segspt_minfree to 5% (which gives ISM max 95% of availrmem. 71 * If somebody wants even more memory for ISM (risking hanging 72 * the system) they can patch the segspt_minfree to smaller number. 73 */ 74 pgcnt_t segspt_minfree = 0; 75 76 static int segspt_create(struct seg *seg, caddr_t argsp); 77 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize); 78 static void segspt_free(struct seg *seg); 79 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len); 80 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr); 81 82 static void 83 segspt_badop() 84 { 85 panic("segspt_badop called"); 86 /*NOTREACHED*/ 87 } 88 89 #define SEGSPT_BADOP(t) (t(*)())segspt_badop 90 91 struct seg_ops segspt_ops = { 92 SEGSPT_BADOP(int), /* dup */ 93 segspt_unmap, 94 segspt_free, 95 SEGSPT_BADOP(int), /* fault */ 96 SEGSPT_BADOP(faultcode_t), /* faulta */ 97 SEGSPT_BADOP(int), /* setprot */ 98 SEGSPT_BADOP(int), /* checkprot */ 99 SEGSPT_BADOP(int), /* kluster */ 100 SEGSPT_BADOP(size_t), /* swapout */ 101 SEGSPT_BADOP(int), /* sync */ 102 SEGSPT_BADOP(size_t), /* incore */ 103 SEGSPT_BADOP(int), /* lockop */ 104 SEGSPT_BADOP(int), /* getprot */ 105 SEGSPT_BADOP(u_offset_t), /* getoffset */ 106 SEGSPT_BADOP(int), /* gettype */ 107 SEGSPT_BADOP(int), /* getvp */ 108 SEGSPT_BADOP(int), /* advise */ 109 SEGSPT_BADOP(void), /* dump */ 110 SEGSPT_BADOP(int), /* pagelock */ 111 SEGSPT_BADOP(int), /* setpgsz */ 112 SEGSPT_BADOP(int), /* getmemid */ 113 segspt_getpolicy, /* getpolicy */ 114 SEGSPT_BADOP(int), /* capable */ 115 }; 116 117 static int segspt_shmdup(struct seg *seg, struct seg *newseg); 118 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize); 119 static void segspt_shmfree(struct seg *seg); 120 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg, 121 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw); 122 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr); 123 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr, 124 register size_t len, register uint_t prot); 125 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, 126 uint_t prot); 127 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta); 128 static size_t segspt_shmswapout(struct seg *seg); 129 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, 130 register char *vec); 131 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len, 132 int attr, uint_t flags); 133 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 134 int attr, int op, ulong_t *lockmap, size_t pos); 135 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, 136 uint_t *protv); 137 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr); 138 static int segspt_shmgettype(struct seg *seg, caddr_t addr); 139 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp); 140 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, 141 uint_t behav); 142 static void segspt_shmdump(struct seg *seg); 143 static int segspt_shmpagelock(struct seg *, caddr_t, size_t, 144 struct page ***, enum lock_type, enum seg_rw); 145 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t); 146 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *); 147 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t); 148 static int segspt_shmcapable(struct seg *, segcapability_t); 149 150 struct seg_ops segspt_shmops = { 151 segspt_shmdup, 152 segspt_shmunmap, 153 segspt_shmfree, 154 segspt_shmfault, 155 segspt_shmfaulta, 156 segspt_shmsetprot, 157 segspt_shmcheckprot, 158 segspt_shmkluster, 159 segspt_shmswapout, 160 segspt_shmsync, 161 segspt_shmincore, 162 segspt_shmlockop, 163 segspt_shmgetprot, 164 segspt_shmgetoffset, 165 segspt_shmgettype, 166 segspt_shmgetvp, 167 segspt_shmadvise, /* advise */ 168 segspt_shmdump, 169 segspt_shmpagelock, 170 segspt_shmsetpgsz, 171 segspt_shmgetmemid, 172 segspt_shmgetpolicy, 173 segspt_shmcapable, 174 }; 175 176 static void segspt_purge(struct seg *seg); 177 static int segspt_reclaim(struct seg *, caddr_t, size_t, struct page **, 178 enum seg_rw); 179 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len, 180 page_t **ppa); 181 182 183 184 /*ARGSUSED*/ 185 int 186 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp, 187 uint_t prot, uint_t flags, uint_t share_szc) 188 { 189 int err; 190 struct as *newas; 191 struct segspt_crargs sptcargs; 192 193 #ifdef DEBUG 194 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */, 195 tnf_ulong, size, size ); 196 #endif 197 if (segspt_minfree == 0) /* leave min 5% of availrmem for */ 198 segspt_minfree = availrmem/20; /* for the system */ 199 200 if (!hat_supported(HAT_SHARED_PT, (void *)0)) 201 return (EINVAL); 202 203 /* 204 * get a new as for this shared memory segment 205 */ 206 newas = as_alloc(); 207 newas->a_proc = NULL; 208 sptcargs.amp = amp; 209 sptcargs.prot = prot; 210 sptcargs.flags = flags; 211 sptcargs.szc = share_szc; 212 /* 213 * create a shared page table (spt) segment 214 */ 215 216 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) { 217 as_free(newas); 218 return (err); 219 } 220 *sptseg = sptcargs.seg_spt; 221 return (0); 222 } 223 224 void 225 sptdestroy(struct as *as, struct anon_map *amp) 226 { 227 228 #ifdef DEBUG 229 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */); 230 #endif 231 (void) as_unmap(as, SEGSPTADDR, amp->size); 232 as_free(as); 233 } 234 235 /* 236 * called from seg_free(). 237 * free (i.e., unlock, unmap, return to free list) 238 * all the pages in the given seg. 239 */ 240 void 241 segspt_free(struct seg *seg) 242 { 243 struct spt_data *sptd = (struct spt_data *)seg->s_data; 244 245 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 246 247 if (sptd != NULL) { 248 if (sptd->spt_realsize) 249 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize); 250 251 if (sptd->spt_ppa_lckcnt) 252 kmem_free(sptd->spt_ppa_lckcnt, 253 sizeof (*sptd->spt_ppa_lckcnt) 254 * btopr(sptd->spt_amp->size)); 255 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp)); 256 cv_destroy(&sptd->spt_cv); 257 mutex_destroy(&sptd->spt_lock); 258 kmem_free(sptd, sizeof (*sptd)); 259 } 260 } 261 262 /*ARGSUSED*/ 263 static int 264 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr, 265 uint_t flags) 266 { 267 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 268 269 return (0); 270 } 271 272 /*ARGSUSED*/ 273 static size_t 274 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec) 275 { 276 caddr_t eo_seg; 277 pgcnt_t npages; 278 struct shm_data *shmd = (struct shm_data *)seg->s_data; 279 struct seg *sptseg; 280 struct spt_data *sptd; 281 282 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 283 #ifdef lint 284 seg = seg; 285 #endif 286 sptseg = shmd->shm_sptseg; 287 sptd = sptseg->s_data; 288 289 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 290 eo_seg = addr + len; 291 while (addr < eo_seg) { 292 /* page exists, and it's locked. */ 293 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED | 294 SEG_PAGE_ANON; 295 addr += PAGESIZE; 296 } 297 return (len); 298 } else { 299 struct anon_map *amp = shmd->shm_amp; 300 struct anon *ap; 301 page_t *pp; 302 pgcnt_t anon_index; 303 struct vnode *vp; 304 u_offset_t off; 305 ulong_t i; 306 int ret; 307 anon_sync_obj_t cookie; 308 309 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 310 anon_index = seg_page(seg, addr); 311 npages = btopr(len); 312 if (anon_index + npages > btopr(shmd->shm_amp->size)) { 313 return (EINVAL); 314 } 315 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 316 for (i = 0; i < npages; i++, anon_index++) { 317 ret = 0; 318 anon_array_enter(amp, anon_index, &cookie); 319 ap = anon_get_ptr(amp->ahp, anon_index); 320 if (ap != NULL) { 321 swap_xlate(ap, &vp, &off); 322 anon_array_exit(&cookie); 323 pp = page_lookup_nowait(vp, off, SE_SHARED); 324 if (pp != NULL) { 325 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON; 326 page_unlock(pp); 327 } 328 } else { 329 anon_array_exit(&cookie); 330 } 331 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) { 332 ret |= SEG_PAGE_LOCKED; 333 } 334 *vec++ = (char)ret; 335 } 336 ANON_LOCK_EXIT(&->a_rwlock); 337 return (len); 338 } 339 } 340 341 static int 342 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize) 343 { 344 size_t share_size; 345 346 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 347 348 /* 349 * seg.s_size may have been rounded up to the largest page size 350 * in shmat(). 351 * XXX This should be cleanedup. sptdestroy should take a length 352 * argument which should be the same as sptcreate. Then 353 * this rounding would not be needed (or is done in shm.c) 354 * Only the check for full segment will be needed. 355 * 356 * XXX -- shouldn't raddr == 0 always? These tests don't seem 357 * to be useful at all. 358 */ 359 share_size = page_get_pagesize(seg->s_szc); 360 ssize = P2ROUNDUP(ssize, share_size); 361 362 if (raddr == seg->s_base && ssize == seg->s_size) { 363 seg_free(seg); 364 return (0); 365 } else 366 return (EINVAL); 367 } 368 369 int 370 segspt_create(struct seg *seg, caddr_t argsp) 371 { 372 int err; 373 caddr_t addr = seg->s_base; 374 struct spt_data *sptd; 375 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp; 376 struct anon_map *amp = sptcargs->amp; 377 struct kshmid *sp = amp->a_sp; 378 struct cred *cred = CRED(); 379 ulong_t i, j, anon_index = 0; 380 pgcnt_t npages = btopr(amp->size); 381 struct vnode *vp; 382 page_t **ppa; 383 uint_t hat_flags; 384 size_t pgsz; 385 pgcnt_t pgcnt; 386 caddr_t a; 387 pgcnt_t pidx; 388 size_t sz; 389 proc_t *procp = curproc; 390 rctl_qty_t lockedbytes = 0; 391 kproject_t *proj; 392 393 /* 394 * We are holding the a_lock on the underlying dummy as, 395 * so we can make calls to the HAT layer. 396 */ 397 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 398 ASSERT(sp != NULL); 399 400 #ifdef DEBUG 401 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */, 402 tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size); 403 #endif 404 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 405 if (err = anon_swap_adjust(npages)) 406 return (err); 407 } 408 err = ENOMEM; 409 410 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL) 411 goto out1; 412 413 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 414 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages), 415 KM_NOSLEEP)) == NULL) 416 goto out2; 417 } 418 419 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL); 420 421 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL) 422 goto out3; 423 424 seg->s_ops = &segspt_ops; 425 sptd->spt_vp = vp; 426 sptd->spt_amp = amp; 427 sptd->spt_prot = sptcargs->prot; 428 sptd->spt_flags = sptcargs->flags; 429 seg->s_data = (caddr_t)sptd; 430 sptd->spt_ppa = NULL; 431 sptd->spt_ppa_lckcnt = NULL; 432 seg->s_szc = sptcargs->szc; 433 cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL); 434 sptd->spt_gen = 0; 435 436 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 437 if (seg->s_szc > amp->a_szc) { 438 amp->a_szc = seg->s_szc; 439 } 440 ANON_LOCK_EXIT(&->a_rwlock); 441 442 /* 443 * Set policy to affect initial allocation of pages in 444 * anon_map_createpages() 445 */ 446 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index, 447 NULL, 0, ptob(npages)); 448 449 if (sptcargs->flags & SHM_PAGEABLE) { 450 size_t share_sz; 451 pgcnt_t new_npgs, more_pgs; 452 struct anon_hdr *nahp; 453 zone_t *zone; 454 455 share_sz = page_get_pagesize(seg->s_szc); 456 if (!IS_P2ALIGNED(amp->size, share_sz)) { 457 /* 458 * We are rounding up the size of the anon array 459 * on 4 M boundary because we always create 4 M 460 * of page(s) when locking, faulting pages and we 461 * don't have to check for all corner cases e.g. 462 * if there is enough space to allocate 4 M 463 * page. 464 */ 465 new_npgs = btop(P2ROUNDUP(amp->size, share_sz)); 466 more_pgs = new_npgs - npages; 467 468 /* 469 * The zone will never be NULL, as a fully created 470 * shm always has an owning zone. 471 */ 472 zone = sp->shm_perm.ipc_zone; 473 ASSERT(zone != NULL); 474 if (anon_resv_zone(ptob(more_pgs), zone) == 0) { 475 err = ENOMEM; 476 goto out4; 477 } 478 479 nahp = anon_create(new_npgs, ANON_SLEEP); 480 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 481 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages, 482 ANON_SLEEP); 483 anon_release(amp->ahp, npages); 484 amp->ahp = nahp; 485 ASSERT(amp->swresv == ptob(npages)); 486 amp->swresv = amp->size = ptob(new_npgs); 487 ANON_LOCK_EXIT(&->a_rwlock); 488 npages = new_npgs; 489 } 490 491 sptd->spt_ppa_lckcnt = kmem_zalloc(npages * 492 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP); 493 sptd->spt_pcachecnt = 0; 494 sptd->spt_realsize = ptob(npages); 495 sptcargs->seg_spt = seg; 496 return (0); 497 } 498 499 /* 500 * get array of pages for each anon slot in amp 501 */ 502 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa, 503 seg, addr, S_CREATE, cred)) != 0) 504 goto out4; 505 506 mutex_enter(&sp->shm_mlock); 507 508 /* May be partially locked, so, count bytes to charge for locking */ 509 for (i = 0; i < npages; i++) 510 if (ppa[i]->p_lckcnt == 0) 511 lockedbytes += PAGESIZE; 512 513 proj = sp->shm_perm.ipc_proj; 514 515 if (lockedbytes > 0) { 516 mutex_enter(&procp->p_lock); 517 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) { 518 mutex_exit(&procp->p_lock); 519 mutex_exit(&sp->shm_mlock); 520 for (i = 0; i < npages; i++) 521 page_unlock(ppa[i]); 522 err = ENOMEM; 523 goto out4; 524 } 525 mutex_exit(&procp->p_lock); 526 } 527 528 /* 529 * addr is initial address corresponding to the first page on ppa list 530 */ 531 for (i = 0; i < npages; i++) { 532 /* attempt to lock all pages */ 533 if (page_pp_lock(ppa[i], 0, 1) == 0) { 534 /* 535 * if unable to lock any page, unlock all 536 * of them and return error 537 */ 538 for (j = 0; j < i; j++) 539 page_pp_unlock(ppa[j], 0, 1); 540 for (i = 0; i < npages; i++) 541 page_unlock(ppa[i]); 542 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0); 543 mutex_exit(&sp->shm_mlock); 544 err = ENOMEM; 545 goto out4; 546 } 547 } 548 mutex_exit(&sp->shm_mlock); 549 550 /* 551 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 552 * for the entire life of the segment. For example platforms 553 * that do not support Dynamic Reconfiguration. 554 */ 555 hat_flags = HAT_LOAD_SHARE; 556 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL)) 557 hat_flags |= HAT_LOAD_LOCK; 558 559 /* 560 * Load translations one lare page at a time 561 * to make sure we don't create mappings bigger than 562 * segment's size code in case underlying pages 563 * are shared with segvn's segment that uses bigger 564 * size code than we do. 565 */ 566 pgsz = page_get_pagesize(seg->s_szc); 567 pgcnt = page_get_pagecnt(seg->s_szc); 568 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) { 569 sz = MIN(pgsz, ptob(npages - pidx)); 570 hat_memload_array(seg->s_as->a_hat, a, sz, 571 &ppa[pidx], sptd->spt_prot, hat_flags); 572 } 573 574 /* 575 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 576 * we will leave the pages locked SE_SHARED for the life 577 * of the ISM segment. This will prevent any calls to 578 * hat_pageunload() on this ISM segment for those platforms. 579 */ 580 if (!(hat_flags & HAT_LOAD_LOCK)) { 581 /* 582 * On platforms that support HAT_DYNAMIC_ISM_UNMAP, 583 * we no longer need to hold the SE_SHARED lock on the pages, 584 * since L_PAGELOCK and F_SOFTLOCK calls will grab the 585 * SE_SHARED lock on the pages as necessary. 586 */ 587 for (i = 0; i < npages; i++) 588 page_unlock(ppa[i]); 589 } 590 sptd->spt_pcachecnt = 0; 591 kmem_free(ppa, ((sizeof (page_t *)) * npages)); 592 sptd->spt_realsize = ptob(npages); 593 atomic_add_long(&spt_used, npages); 594 sptcargs->seg_spt = seg; 595 return (0); 596 597 out4: 598 seg->s_data = NULL; 599 kmem_free(vp, sizeof (*vp)); 600 cv_destroy(&sptd->spt_cv); 601 out3: 602 mutex_destroy(&sptd->spt_lock); 603 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 604 kmem_free(ppa, (sizeof (*ppa) * npages)); 605 out2: 606 kmem_free(sptd, sizeof (*sptd)); 607 out1: 608 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 609 anon_swap_restore(npages); 610 return (err); 611 } 612 613 /*ARGSUSED*/ 614 void 615 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len) 616 { 617 struct page *pp; 618 struct spt_data *sptd = (struct spt_data *)seg->s_data; 619 pgcnt_t npages; 620 ulong_t anon_idx; 621 struct anon_map *amp; 622 struct anon *ap; 623 struct vnode *vp; 624 u_offset_t off; 625 uint_t hat_flags; 626 int root = 0; 627 pgcnt_t pgs, curnpgs = 0; 628 page_t *rootpp; 629 rctl_qty_t unlocked_bytes = 0; 630 kproject_t *proj; 631 kshmid_t *sp; 632 633 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 634 635 len = P2ROUNDUP(len, PAGESIZE); 636 637 npages = btop(len); 638 639 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP; 640 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) || 641 (sptd->spt_flags & SHM_PAGEABLE)) { 642 hat_flags = HAT_UNLOAD_UNMAP; 643 } 644 645 hat_unload(seg->s_as->a_hat, addr, len, hat_flags); 646 647 amp = sptd->spt_amp; 648 if (sptd->spt_flags & SHM_PAGEABLE) 649 npages = btop(amp->size); 650 651 ASSERT(amp != NULL); 652 653 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 654 sp = amp->a_sp; 655 proj = sp->shm_perm.ipc_proj; 656 mutex_enter(&sp->shm_mlock); 657 } 658 for (anon_idx = 0; anon_idx < npages; anon_idx++) { 659 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 660 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) { 661 panic("segspt_free_pages: null app"); 662 /*NOTREACHED*/ 663 } 664 } else { 665 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx)) 666 == NULL) 667 continue; 668 } 669 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0); 670 swap_xlate(ap, &vp, &off); 671 672 /* 673 * If this platform supports HAT_DYNAMIC_ISM_UNMAP, 674 * the pages won't be having SE_SHARED lock at this 675 * point. 676 * 677 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 678 * the pages are still held SE_SHARED locked from the 679 * original segspt_create() 680 * 681 * Our goal is to get SE_EXCL lock on each page, remove 682 * permanent lock on it and invalidate the page. 683 */ 684 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 685 if (hat_flags == HAT_UNLOAD_UNMAP) 686 pp = page_lookup(vp, off, SE_EXCL); 687 else { 688 if ((pp = page_find(vp, off)) == NULL) { 689 panic("segspt_free_pages: " 690 "page not locked"); 691 /*NOTREACHED*/ 692 } 693 if (!page_tryupgrade(pp)) { 694 page_unlock(pp); 695 pp = page_lookup(vp, off, SE_EXCL); 696 } 697 } 698 if (pp == NULL) { 699 panic("segspt_free_pages: " 700 "page not in the system"); 701 /*NOTREACHED*/ 702 } 703 ASSERT(pp->p_lckcnt > 0); 704 page_pp_unlock(pp, 0, 1); 705 if (pp->p_lckcnt == 0) 706 unlocked_bytes += PAGESIZE; 707 } else { 708 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL) 709 continue; 710 } 711 /* 712 * It's logical to invalidate the pages here as in most cases 713 * these were created by segspt. 714 */ 715 if (pp->p_szc != 0) { 716 if (root == 0) { 717 ASSERT(curnpgs == 0); 718 root = 1; 719 rootpp = pp; 720 pgs = curnpgs = page_get_pagecnt(pp->p_szc); 721 ASSERT(pgs > 1); 722 ASSERT(IS_P2ALIGNED(pgs, pgs)); 723 ASSERT(!(page_pptonum(pp) & (pgs - 1))); 724 curnpgs--; 725 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) { 726 ASSERT(curnpgs == 1); 727 ASSERT(page_pptonum(pp) == 728 page_pptonum(rootpp) + (pgs - 1)); 729 page_destroy_pages(rootpp); 730 root = 0; 731 curnpgs = 0; 732 } else { 733 ASSERT(curnpgs > 1); 734 ASSERT(page_pptonum(pp) == 735 page_pptonum(rootpp) + (pgs - curnpgs)); 736 curnpgs--; 737 } 738 } else { 739 if (root != 0 || curnpgs != 0) { 740 panic("segspt_free_pages: bad large page"); 741 /*NOTREACHED*/ 742 } 743 /*LINTED: constant in conditional context */ 744 VN_DISPOSE(pp, B_INVAL, 0, kcred); 745 } 746 } 747 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 748 if (unlocked_bytes > 0) 749 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0); 750 mutex_exit(&sp->shm_mlock); 751 } 752 if (root != 0 || curnpgs != 0) { 753 panic("segspt_free_pages: bad large page"); 754 /*NOTREACHED*/ 755 } 756 757 /* 758 * mark that pages have been released 759 */ 760 sptd->spt_realsize = 0; 761 762 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 763 atomic_add_long(&spt_used, -npages); 764 anon_swap_restore(npages); 765 } 766 } 767 768 /* 769 * Get memory allocation policy info for specified address in given segment 770 */ 771 static lgrp_mem_policy_info_t * 772 segspt_getpolicy(struct seg *seg, caddr_t addr) 773 { 774 struct anon_map *amp; 775 ulong_t anon_index; 776 lgrp_mem_policy_info_t *policy_info; 777 struct spt_data *spt_data; 778 779 ASSERT(seg != NULL); 780 781 /* 782 * Get anon_map from segspt 783 * 784 * Assume that no lock needs to be held on anon_map, since 785 * it should be protected by its reference count which must be 786 * nonzero for an existing segment 787 * Need to grab readers lock on policy tree though 788 */ 789 spt_data = (struct spt_data *)seg->s_data; 790 if (spt_data == NULL) 791 return (NULL); 792 amp = spt_data->spt_amp; 793 ASSERT(amp->refcnt != 0); 794 795 /* 796 * Get policy info 797 * 798 * Assume starting anon index of 0 799 */ 800 anon_index = seg_page(seg, addr); 801 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 802 803 return (policy_info); 804 } 805 806 /* 807 * DISM only. 808 * Return locked pages over a given range. 809 * 810 * We will cache all DISM locked pages and save the pplist for the 811 * entire segment in the ppa field of the underlying DISM segment structure. 812 * Later, during a call to segspt_reclaim() we will use this ppa array 813 * to page_unlock() all of the pages and then we will free this ppa list. 814 */ 815 /*ARGSUSED*/ 816 static int 817 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len, 818 struct page ***ppp, enum lock_type type, enum seg_rw rw) 819 { 820 struct shm_data *shmd = (struct shm_data *)seg->s_data; 821 struct seg *sptseg = shmd->shm_sptseg; 822 struct spt_data *sptd = sptseg->s_data; 823 pgcnt_t pg_idx, npages, tot_npages, npgs; 824 struct page **pplist, **pl, **ppa, *pp; 825 struct anon_map *amp; 826 spgcnt_t an_idx; 827 int ret = ENOTSUP; 828 uint_t pl_built = 0; 829 struct anon *ap; 830 struct vnode *vp; 831 u_offset_t off; 832 pgcnt_t claim_availrmem = 0; 833 uint_t szc; 834 835 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 836 837 /* 838 * We want to lock/unlock the entire ISM segment. Therefore, 839 * we will be using the underlying sptseg and it's base address 840 * and length for the caching arguments. 841 */ 842 ASSERT(sptseg); 843 ASSERT(sptd); 844 845 pg_idx = seg_page(seg, addr); 846 npages = btopr(len); 847 848 /* 849 * check if the request is larger than number of pages covered 850 * by amp 851 */ 852 if (pg_idx + npages > btopr(sptd->spt_amp->size)) { 853 *ppp = NULL; 854 return (ENOTSUP); 855 } 856 857 if (type == L_PAGEUNLOCK) { 858 ASSERT(sptd->spt_ppa != NULL); 859 860 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 861 sptd->spt_ppa, sptd->spt_prot, segspt_reclaim); 862 863 /* 864 * If someone is blocked while unmapping, we purge 865 * segment page cache and thus reclaim pplist synchronously 866 * without waiting for seg_pasync_thread. This speeds up 867 * unmapping in cases where munmap(2) is called, while 868 * raw async i/o is still in progress or where a thread 869 * exits on data fault in a multithreaded application. 870 */ 871 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 872 segspt_purge(seg); 873 } 874 return (0); 875 } else if (type == L_PAGERECLAIM) { 876 ASSERT(sptd->spt_ppa != NULL); 877 (void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size, 878 sptd->spt_ppa, sptd->spt_prot); 879 return (0); 880 } 881 882 if (sptd->spt_flags & DISM_PPA_CHANGED) { 883 segspt_purge(seg); 884 /* 885 * for DISM ppa needs to be rebuild since 886 * number of locked pages could be changed 887 */ 888 *ppp = NULL; 889 return (ENOTSUP); 890 } 891 892 /* 893 * First try to find pages in segment page cache, without 894 * holding the segment lock. 895 */ 896 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 897 sptd->spt_prot); 898 if (pplist != NULL) { 899 ASSERT(sptd->spt_ppa != NULL); 900 ASSERT(sptd->spt_ppa == pplist); 901 ppa = sptd->spt_ppa; 902 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 903 if (ppa[an_idx] == NULL) { 904 seg_pinactive(seg, seg->s_base, 905 sptd->spt_amp->size, ppa, 906 sptd->spt_prot, segspt_reclaim); 907 *ppp = NULL; 908 return (ENOTSUP); 909 } 910 if ((szc = ppa[an_idx]->p_szc) != 0) { 911 npgs = page_get_pagecnt(szc); 912 an_idx = P2ROUNDUP(an_idx + 1, npgs); 913 } else { 914 an_idx++; 915 } 916 } 917 /* 918 * Since we cache the entire DISM segment, we want to 919 * set ppp to point to the first slot that corresponds 920 * to the requested addr, i.e. pg_idx. 921 */ 922 *ppp = &(sptd->spt_ppa[pg_idx]); 923 return (0); 924 } 925 926 /* The L_PAGELOCK case... */ 927 mutex_enter(&sptd->spt_lock); 928 /* 929 * try to find pages in segment page cache with mutex 930 */ 931 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 932 sptd->spt_prot); 933 if (pplist != NULL) { 934 ASSERT(sptd->spt_ppa != NULL); 935 ASSERT(sptd->spt_ppa == pplist); 936 ppa = sptd->spt_ppa; 937 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 938 if (ppa[an_idx] == NULL) { 939 mutex_exit(&sptd->spt_lock); 940 seg_pinactive(seg, seg->s_base, 941 sptd->spt_amp->size, ppa, 942 sptd->spt_prot, segspt_reclaim); 943 *ppp = NULL; 944 return (ENOTSUP); 945 } 946 if ((szc = ppa[an_idx]->p_szc) != 0) { 947 npgs = page_get_pagecnt(szc); 948 an_idx = P2ROUNDUP(an_idx + 1, npgs); 949 } else { 950 an_idx++; 951 } 952 } 953 /* 954 * Since we cache the entire DISM segment, we want to 955 * set ppp to point to the first slot that corresponds 956 * to the requested addr, i.e. pg_idx. 957 */ 958 mutex_exit(&sptd->spt_lock); 959 *ppp = &(sptd->spt_ppa[pg_idx]); 960 return (0); 961 } 962 if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) == 963 SEGP_FAIL) { 964 mutex_exit(&sptd->spt_lock); 965 *ppp = NULL; 966 return (ENOTSUP); 967 } 968 969 /* 970 * No need to worry about protections because DISM pages are always rw. 971 */ 972 pl = pplist = NULL; 973 amp = sptd->spt_amp; 974 975 /* 976 * Do we need to build the ppa array? 977 */ 978 if (sptd->spt_ppa == NULL) { 979 pgcnt_t lpg_cnt = 0; 980 981 pl_built = 1; 982 tot_npages = btopr(sptd->spt_amp->size); 983 984 ASSERT(sptd->spt_pcachecnt == 0); 985 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP); 986 pl = pplist; 987 988 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 989 for (an_idx = 0; an_idx < tot_npages; ) { 990 ap = anon_get_ptr(amp->ahp, an_idx); 991 /* 992 * Cache only mlocked pages. For large pages 993 * if one (constituent) page is mlocked 994 * all pages for that large page 995 * are cached also. This is for quick 996 * lookups of ppa array; 997 */ 998 if ((ap != NULL) && (lpg_cnt != 0 || 999 (sptd->spt_ppa_lckcnt[an_idx] != 0))) { 1000 1001 swap_xlate(ap, &vp, &off); 1002 pp = page_lookup(vp, off, SE_SHARED); 1003 ASSERT(pp != NULL); 1004 if (lpg_cnt == 0) { 1005 lpg_cnt++; 1006 /* 1007 * For a small page, we are done -- 1008 * lpg_count is reset to 0 below. 1009 * 1010 * For a large page, we are guaranteed 1011 * to find the anon structures of all 1012 * constituent pages and a non-zero 1013 * lpg_cnt ensures that we don't test 1014 * for mlock for these. We are done 1015 * when lpg_count reaches (npgs + 1). 1016 * If we are not the first constituent 1017 * page, restart at the first one. 1018 */ 1019 npgs = page_get_pagecnt(pp->p_szc); 1020 if (!IS_P2ALIGNED(an_idx, npgs)) { 1021 an_idx = P2ALIGN(an_idx, npgs); 1022 page_unlock(pp); 1023 continue; 1024 } 1025 } 1026 if (++lpg_cnt > npgs) 1027 lpg_cnt = 0; 1028 1029 /* 1030 * availrmem is decremented only 1031 * for unlocked pages 1032 */ 1033 if (sptd->spt_ppa_lckcnt[an_idx] == 0) 1034 claim_availrmem++; 1035 pplist[an_idx] = pp; 1036 } 1037 an_idx++; 1038 } 1039 ANON_LOCK_EXIT(&->a_rwlock); 1040 1041 mutex_enter(&freemem_lock); 1042 if (availrmem < tune.t_minarmem + claim_availrmem) { 1043 mutex_exit(&freemem_lock); 1044 ret = FC_MAKE_ERR(ENOMEM); 1045 claim_availrmem = 0; 1046 goto insert_fail; 1047 } else { 1048 availrmem -= claim_availrmem; 1049 } 1050 mutex_exit(&freemem_lock); 1051 1052 sptd->spt_ppa = pl; 1053 } else { 1054 /* 1055 * We already have a valid ppa[]. 1056 */ 1057 pl = sptd->spt_ppa; 1058 } 1059 1060 ASSERT(pl != NULL); 1061 1062 ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size, 1063 pl, sptd->spt_prot, SEGP_FORCE_WIRED | SEGP_ASYNC_FLUSH, 1064 segspt_reclaim); 1065 if (ret == SEGP_FAIL) { 1066 /* 1067 * seg_pinsert failed. We return 1068 * ENOTSUP, so that the as_pagelock() code will 1069 * then try the slower F_SOFTLOCK path. 1070 */ 1071 if (pl_built) { 1072 /* 1073 * No one else has referenced the ppa[]. 1074 * We created it and we need to destroy it. 1075 */ 1076 sptd->spt_ppa = NULL; 1077 } 1078 ret = ENOTSUP; 1079 goto insert_fail; 1080 } 1081 1082 /* 1083 * In either case, we increment softlockcnt on the 'real' segment. 1084 */ 1085 sptd->spt_pcachecnt++; 1086 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 1087 1088 ppa = sptd->spt_ppa; 1089 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 1090 if (ppa[an_idx] == NULL) { 1091 mutex_exit(&sptd->spt_lock); 1092 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 1093 pl, sptd->spt_prot, segspt_reclaim); 1094 *ppp = NULL; 1095 return (ENOTSUP); 1096 } 1097 if ((szc = ppa[an_idx]->p_szc) != 0) { 1098 npgs = page_get_pagecnt(szc); 1099 an_idx = P2ROUNDUP(an_idx + 1, npgs); 1100 } else { 1101 an_idx++; 1102 } 1103 } 1104 /* 1105 * We can now drop the sptd->spt_lock since the ppa[] 1106 * exists and he have incremented pacachecnt. 1107 */ 1108 mutex_exit(&sptd->spt_lock); 1109 1110 /* 1111 * Since we cache the entire segment, we want to 1112 * set ppp to point to the first slot that corresponds 1113 * to the requested addr, i.e. pg_idx. 1114 */ 1115 *ppp = &(sptd->spt_ppa[pg_idx]); 1116 return (ret); 1117 1118 insert_fail: 1119 /* 1120 * We will only reach this code if we tried and failed. 1121 * 1122 * And we can drop the lock on the dummy seg, once we've failed 1123 * to set up a new ppa[]. 1124 */ 1125 mutex_exit(&sptd->spt_lock); 1126 1127 if (pl_built) { 1128 mutex_enter(&freemem_lock); 1129 availrmem += claim_availrmem; 1130 mutex_exit(&freemem_lock); 1131 1132 /* 1133 * We created pl and we need to destroy it. 1134 */ 1135 pplist = pl; 1136 for (an_idx = 0; an_idx < tot_npages; an_idx++) { 1137 if (pplist[an_idx] != NULL) 1138 page_unlock(pplist[an_idx]); 1139 } 1140 kmem_free(pl, sizeof (page_t *) * tot_npages); 1141 } 1142 1143 if (shmd->shm_softlockcnt <= 0) { 1144 if (AS_ISUNMAPWAIT(seg->s_as)) { 1145 mutex_enter(&seg->s_as->a_contents); 1146 if (AS_ISUNMAPWAIT(seg->s_as)) { 1147 AS_CLRUNMAPWAIT(seg->s_as); 1148 cv_broadcast(&seg->s_as->a_cv); 1149 } 1150 mutex_exit(&seg->s_as->a_contents); 1151 } 1152 } 1153 *ppp = NULL; 1154 return (ret); 1155 } 1156 1157 1158 1159 /* 1160 * return locked pages over a given range. 1161 * 1162 * We will cache the entire ISM segment and save the pplist for the 1163 * entire segment in the ppa field of the underlying ISM segment structure. 1164 * Later, during a call to segspt_reclaim() we will use this ppa array 1165 * to page_unlock() all of the pages and then we will free this ppa list. 1166 */ 1167 /*ARGSUSED*/ 1168 static int 1169 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len, 1170 struct page ***ppp, enum lock_type type, enum seg_rw rw) 1171 { 1172 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1173 struct seg *sptseg = shmd->shm_sptseg; 1174 struct spt_data *sptd = sptseg->s_data; 1175 pgcnt_t np, page_index, npages; 1176 caddr_t a, spt_base; 1177 struct page **pplist, **pl, *pp; 1178 struct anon_map *amp; 1179 ulong_t anon_index; 1180 int ret = ENOTSUP; 1181 uint_t pl_built = 0; 1182 struct anon *ap; 1183 struct vnode *vp; 1184 u_offset_t off; 1185 1186 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1187 1188 /* 1189 * We want to lock/unlock the entire ISM segment. Therefore, 1190 * we will be using the underlying sptseg and it's base address 1191 * and length for the caching arguments. 1192 */ 1193 ASSERT(sptseg); 1194 ASSERT(sptd); 1195 1196 if (sptd->spt_flags & SHM_PAGEABLE) { 1197 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw)); 1198 } 1199 1200 page_index = seg_page(seg, addr); 1201 npages = btopr(len); 1202 1203 /* 1204 * check if the request is larger than number of pages covered 1205 * by amp 1206 */ 1207 if (page_index + npages > btopr(sptd->spt_amp->size)) { 1208 *ppp = NULL; 1209 return (ENOTSUP); 1210 } 1211 1212 if (type == L_PAGEUNLOCK) { 1213 1214 ASSERT(sptd->spt_ppa != NULL); 1215 1216 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 1217 sptd->spt_ppa, sptd->spt_prot, segspt_reclaim); 1218 1219 /* 1220 * If someone is blocked while unmapping, we purge 1221 * segment page cache and thus reclaim pplist synchronously 1222 * without waiting for seg_pasync_thread. This speeds up 1223 * unmapping in cases where munmap(2) is called, while 1224 * raw async i/o is still in progress or where a thread 1225 * exits on data fault in a multithreaded application. 1226 */ 1227 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 1228 segspt_purge(seg); 1229 } 1230 return (0); 1231 } else if (type == L_PAGERECLAIM) { 1232 ASSERT(sptd->spt_ppa != NULL); 1233 1234 (void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size, 1235 sptd->spt_ppa, sptd->spt_prot); 1236 return (0); 1237 } 1238 1239 /* 1240 * First try to find pages in segment page cache, without 1241 * holding the segment lock. 1242 */ 1243 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 1244 sptd->spt_prot); 1245 if (pplist != NULL) { 1246 ASSERT(sptd->spt_ppa == pplist); 1247 ASSERT(sptd->spt_ppa[page_index]); 1248 /* 1249 * Since we cache the entire ISM segment, we want to 1250 * set ppp to point to the first slot that corresponds 1251 * to the requested addr, i.e. page_index. 1252 */ 1253 *ppp = &(sptd->spt_ppa[page_index]); 1254 return (0); 1255 } 1256 1257 /* The L_PAGELOCK case... */ 1258 mutex_enter(&sptd->spt_lock); 1259 1260 /* 1261 * try to find pages in segment page cache 1262 */ 1263 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 1264 sptd->spt_prot); 1265 if (pplist != NULL) { 1266 ASSERT(sptd->spt_ppa == pplist); 1267 /* 1268 * Since we cache the entire segment, we want to 1269 * set ppp to point to the first slot that corresponds 1270 * to the requested addr, i.e. page_index. 1271 */ 1272 mutex_exit(&sptd->spt_lock); 1273 *ppp = &(sptd->spt_ppa[page_index]); 1274 return (0); 1275 } 1276 1277 if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) == 1278 SEGP_FAIL) { 1279 mutex_exit(&sptd->spt_lock); 1280 *ppp = NULL; 1281 return (ENOTSUP); 1282 } 1283 1284 /* 1285 * No need to worry about protections because ISM pages 1286 * are always rw. 1287 */ 1288 pl = pplist = NULL; 1289 1290 /* 1291 * Do we need to build the ppa array? 1292 */ 1293 if (sptd->spt_ppa == NULL) { 1294 ASSERT(sptd->spt_ppa == pplist); 1295 1296 spt_base = sptseg->s_base; 1297 pl_built = 1; 1298 1299 /* 1300 * availrmem is decremented once during anon_swap_adjust() 1301 * and is incremented during the anon_unresv(), which is 1302 * called from shm_rm_amp() when the segment is destroyed. 1303 */ 1304 amp = sptd->spt_amp; 1305 ASSERT(amp != NULL); 1306 1307 /* pcachecnt is protected by sptd->spt_lock */ 1308 ASSERT(sptd->spt_pcachecnt == 0); 1309 pplist = kmem_zalloc(sizeof (page_t *) 1310 * btopr(sptd->spt_amp->size), KM_SLEEP); 1311 pl = pplist; 1312 1313 anon_index = seg_page(sptseg, spt_base); 1314 1315 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1316 for (a = spt_base; a < (spt_base + sptd->spt_amp->size); 1317 a += PAGESIZE, anon_index++, pplist++) { 1318 ap = anon_get_ptr(amp->ahp, anon_index); 1319 ASSERT(ap != NULL); 1320 swap_xlate(ap, &vp, &off); 1321 pp = page_lookup(vp, off, SE_SHARED); 1322 ASSERT(pp != NULL); 1323 *pplist = pp; 1324 } 1325 ANON_LOCK_EXIT(&->a_rwlock); 1326 1327 if (a < (spt_base + sptd->spt_amp->size)) { 1328 ret = ENOTSUP; 1329 goto insert_fail; 1330 } 1331 sptd->spt_ppa = pl; 1332 } else { 1333 /* 1334 * We already have a valid ppa[]. 1335 */ 1336 pl = sptd->spt_ppa; 1337 } 1338 1339 ASSERT(pl != NULL); 1340 1341 ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size, 1342 pl, sptd->spt_prot, SEGP_FORCE_WIRED, segspt_reclaim); 1343 if (ret == SEGP_FAIL) { 1344 /* 1345 * seg_pinsert failed. We return 1346 * ENOTSUP, so that the as_pagelock() code will 1347 * then try the slower F_SOFTLOCK path. 1348 */ 1349 if (pl_built) { 1350 /* 1351 * No one else has referenced the ppa[]. 1352 * We created it and we need to destroy it. 1353 */ 1354 sptd->spt_ppa = NULL; 1355 } 1356 ret = ENOTSUP; 1357 goto insert_fail; 1358 } 1359 1360 /* 1361 * In either case, we increment softlockcnt on the 'real' segment. 1362 */ 1363 sptd->spt_pcachecnt++; 1364 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 1365 1366 /* 1367 * We can now drop the sptd->spt_lock since the ppa[] 1368 * exists and he have incremented pacachecnt. 1369 */ 1370 mutex_exit(&sptd->spt_lock); 1371 1372 /* 1373 * Since we cache the entire segment, we want to 1374 * set ppp to point to the first slot that corresponds 1375 * to the requested addr, i.e. page_index. 1376 */ 1377 *ppp = &(sptd->spt_ppa[page_index]); 1378 return (ret); 1379 1380 insert_fail: 1381 /* 1382 * We will only reach this code if we tried and failed. 1383 * 1384 * And we can drop the lock on the dummy seg, once we've failed 1385 * to set up a new ppa[]. 1386 */ 1387 mutex_exit(&sptd->spt_lock); 1388 1389 if (pl_built) { 1390 /* 1391 * We created pl and we need to destroy it. 1392 */ 1393 pplist = pl; 1394 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT); 1395 while (np) { 1396 page_unlock(*pplist); 1397 np--; 1398 pplist++; 1399 } 1400 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size)); 1401 } 1402 if (shmd->shm_softlockcnt <= 0) { 1403 if (AS_ISUNMAPWAIT(seg->s_as)) { 1404 mutex_enter(&seg->s_as->a_contents); 1405 if (AS_ISUNMAPWAIT(seg->s_as)) { 1406 AS_CLRUNMAPWAIT(seg->s_as); 1407 cv_broadcast(&seg->s_as->a_cv); 1408 } 1409 mutex_exit(&seg->s_as->a_contents); 1410 } 1411 } 1412 *ppp = NULL; 1413 return (ret); 1414 } 1415 1416 /* 1417 * purge any cached pages in the I/O page cache 1418 */ 1419 static void 1420 segspt_purge(struct seg *seg) 1421 { 1422 seg_ppurge(seg); 1423 } 1424 1425 static int 1426 segspt_reclaim(struct seg *seg, caddr_t addr, size_t len, struct page **pplist, 1427 enum seg_rw rw) 1428 { 1429 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1430 struct seg *sptseg; 1431 struct spt_data *sptd; 1432 pgcnt_t npages, i, free_availrmem = 0; 1433 int done = 0; 1434 1435 #ifdef lint 1436 addr = addr; 1437 #endif 1438 sptseg = shmd->shm_sptseg; 1439 sptd = sptseg->s_data; 1440 npages = (len >> PAGESHIFT); 1441 ASSERT(npages); 1442 ASSERT(sptd->spt_pcachecnt != 0); 1443 ASSERT(sptd->spt_ppa == pplist); 1444 ASSERT(npages == btopr(sptd->spt_amp->size)); 1445 /* 1446 * Acquire the lock on the dummy seg and destroy the 1447 * ppa array IF this is the last pcachecnt. 1448 */ 1449 mutex_enter(&sptd->spt_lock); 1450 if (--sptd->spt_pcachecnt == 0) { 1451 for (i = 0; i < npages; i++) { 1452 if (pplist[i] == NULL) { 1453 continue; 1454 } 1455 if (rw == S_WRITE) { 1456 hat_setrefmod(pplist[i]); 1457 } else { 1458 hat_setref(pplist[i]); 1459 } 1460 if ((sptd->spt_flags & SHM_PAGEABLE) && 1461 (sptd->spt_ppa_lckcnt[i] == 0)) 1462 free_availrmem++; 1463 page_unlock(pplist[i]); 1464 } 1465 if (sptd->spt_flags & SHM_PAGEABLE) { 1466 mutex_enter(&freemem_lock); 1467 availrmem += free_availrmem; 1468 mutex_exit(&freemem_lock); 1469 } 1470 /* 1471 * Since we want to cach/uncache the entire ISM segment, 1472 * we will track the pplist in a segspt specific field 1473 * ppa, that is initialized at the time we add an entry to 1474 * the cache. 1475 */ 1476 ASSERT(sptd->spt_pcachecnt == 0); 1477 kmem_free(pplist, sizeof (page_t *) * npages); 1478 sptd->spt_ppa = NULL; 1479 sptd->spt_flags &= ~DISM_PPA_CHANGED; 1480 sptd->spt_gen++; 1481 cv_broadcast(&sptd->spt_cv); 1482 done = 1; 1483 } 1484 mutex_exit(&sptd->spt_lock); 1485 /* 1486 * Now decrement softlockcnt. 1487 */ 1488 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1); 1489 1490 if (shmd->shm_softlockcnt <= 0) { 1491 if (AS_ISUNMAPWAIT(seg->s_as)) { 1492 mutex_enter(&seg->s_as->a_contents); 1493 if (AS_ISUNMAPWAIT(seg->s_as)) { 1494 AS_CLRUNMAPWAIT(seg->s_as); 1495 cv_broadcast(&seg->s_as->a_cv); 1496 } 1497 mutex_exit(&seg->s_as->a_contents); 1498 } 1499 } 1500 return (done); 1501 } 1502 1503 /* 1504 * Do a F_SOFTUNLOCK call over the range requested. 1505 * The range must have already been F_SOFTLOCK'ed. 1506 * 1507 * The calls to acquire and release the anon map lock mutex were 1508 * removed in order to avoid a deadly embrace during a DR 1509 * memory delete operation. (Eg. DR blocks while waiting for a 1510 * exclusive lock on a page that is being used for kaio; the 1511 * thread that will complete the kaio and call segspt_softunlock 1512 * blocks on the anon map lock; another thread holding the anon 1513 * map lock blocks on another page lock via the segspt_shmfault 1514 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.) 1515 * 1516 * The appropriateness of the removal is based upon the following: 1517 * 1. If we are holding a segment's reader lock and the page is held 1518 * shared, then the corresponding element in anonmap which points to 1519 * anon struct cannot change and there is no need to acquire the 1520 * anonymous map lock. 1521 * 2. Threads in segspt_softunlock have a reader lock on the segment 1522 * and already have the shared page lock, so we are guaranteed that 1523 * the anon map slot cannot change and therefore can call anon_get_ptr() 1524 * without grabbing the anonymous map lock. 1525 * 3. Threads that softlock a shared page break copy-on-write, even if 1526 * its a read. Thus cow faults can be ignored with respect to soft 1527 * unlocking, since the breaking of cow means that the anon slot(s) will 1528 * not be shared. 1529 */ 1530 static void 1531 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr, 1532 size_t len, enum seg_rw rw) 1533 { 1534 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1535 struct seg *sptseg; 1536 struct spt_data *sptd; 1537 page_t *pp; 1538 caddr_t adr; 1539 struct vnode *vp; 1540 u_offset_t offset; 1541 ulong_t anon_index; 1542 struct anon_map *amp; /* XXX - for locknest */ 1543 struct anon *ap = NULL; 1544 pgcnt_t npages; 1545 1546 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1547 1548 sptseg = shmd->shm_sptseg; 1549 sptd = sptseg->s_data; 1550 1551 /* 1552 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 1553 * and therefore their pages are SE_SHARED locked 1554 * for the entire life of the segment. 1555 */ 1556 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) && 1557 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) { 1558 goto softlock_decrement; 1559 } 1560 1561 /* 1562 * Any thread is free to do a page_find and 1563 * page_unlock() on the pages within this seg. 1564 * 1565 * We are already holding the as->a_lock on the user's 1566 * real segment, but we need to hold the a_lock on the 1567 * underlying dummy as. This is mostly to satisfy the 1568 * underlying HAT layer. 1569 */ 1570 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1571 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len); 1572 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1573 1574 amp = sptd->spt_amp; 1575 ASSERT(amp != NULL); 1576 anon_index = seg_page(sptseg, sptseg_addr); 1577 1578 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) { 1579 ap = anon_get_ptr(amp->ahp, anon_index++); 1580 ASSERT(ap != NULL); 1581 swap_xlate(ap, &vp, &offset); 1582 1583 /* 1584 * Use page_find() instead of page_lookup() to 1585 * find the page since we know that it has a 1586 * "shared" lock. 1587 */ 1588 pp = page_find(vp, offset); 1589 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1)); 1590 if (pp == NULL) { 1591 panic("segspt_softunlock: " 1592 "addr %p, ap %p, vp %p, off %llx", 1593 (void *)adr, (void *)ap, (void *)vp, offset); 1594 /*NOTREACHED*/ 1595 } 1596 1597 if (rw == S_WRITE) { 1598 hat_setrefmod(pp); 1599 } else if (rw != S_OTHER) { 1600 hat_setref(pp); 1601 } 1602 page_unlock(pp); 1603 } 1604 1605 softlock_decrement: 1606 npages = btopr(len); 1607 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages); 1608 if (shmd->shm_softlockcnt == 0) { 1609 /* 1610 * All SOFTLOCKS are gone. Wakeup any waiting 1611 * unmappers so they can try again to unmap. 1612 * Check for waiters first without the mutex 1613 * held so we don't always grab the mutex on 1614 * softunlocks. 1615 */ 1616 if (AS_ISUNMAPWAIT(seg->s_as)) { 1617 mutex_enter(&seg->s_as->a_contents); 1618 if (AS_ISUNMAPWAIT(seg->s_as)) { 1619 AS_CLRUNMAPWAIT(seg->s_as); 1620 cv_broadcast(&seg->s_as->a_cv); 1621 } 1622 mutex_exit(&seg->s_as->a_contents); 1623 } 1624 } 1625 } 1626 1627 int 1628 segspt_shmattach(struct seg *seg, caddr_t *argsp) 1629 { 1630 struct shm_data *shmd_arg = (struct shm_data *)argsp; 1631 struct shm_data *shmd; 1632 struct anon_map *shm_amp = shmd_arg->shm_amp; 1633 struct spt_data *sptd; 1634 int error = 0; 1635 1636 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1637 1638 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP); 1639 if (shmd == NULL) 1640 return (ENOMEM); 1641 1642 shmd->shm_sptas = shmd_arg->shm_sptas; 1643 shmd->shm_amp = shm_amp; 1644 shmd->shm_sptseg = shmd_arg->shm_sptseg; 1645 1646 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0, 1647 NULL, 0, seg->s_size); 1648 1649 seg->s_data = (void *)shmd; 1650 seg->s_ops = &segspt_shmops; 1651 seg->s_szc = shmd->shm_sptseg->s_szc; 1652 sptd = shmd->shm_sptseg->s_data; 1653 1654 if (sptd->spt_flags & SHM_PAGEABLE) { 1655 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size), 1656 KM_NOSLEEP)) == NULL) { 1657 seg->s_data = (void *)NULL; 1658 kmem_free(shmd, (sizeof (*shmd))); 1659 return (ENOMEM); 1660 } 1661 shmd->shm_lckpgs = 0; 1662 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 1663 if ((error = hat_share(seg->s_as->a_hat, seg->s_base, 1664 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1665 seg->s_size, seg->s_szc)) != 0) { 1666 kmem_free(shmd->shm_vpage, 1667 btopr(shm_amp->size)); 1668 } 1669 } 1670 } else { 1671 error = hat_share(seg->s_as->a_hat, seg->s_base, 1672 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1673 seg->s_size, seg->s_szc); 1674 } 1675 if (error) { 1676 seg->s_szc = 0; 1677 seg->s_data = (void *)NULL; 1678 kmem_free(shmd, (sizeof (*shmd))); 1679 } else { 1680 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1681 shm_amp->refcnt++; 1682 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1683 } 1684 return (error); 1685 } 1686 1687 int 1688 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize) 1689 { 1690 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1691 int reclaim = 1; 1692 1693 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1694 retry: 1695 if (shmd->shm_softlockcnt > 0) { 1696 if (reclaim == 1) { 1697 segspt_purge(seg); 1698 reclaim = 0; 1699 goto retry; 1700 } 1701 return (EAGAIN); 1702 } 1703 1704 if (ssize != seg->s_size) { 1705 #ifdef DEBUG 1706 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n", 1707 ssize, seg->s_size); 1708 #endif 1709 return (EINVAL); 1710 } 1711 1712 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK, 1713 NULL, 0); 1714 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc); 1715 1716 seg_free(seg); 1717 1718 return (0); 1719 } 1720 1721 void 1722 segspt_shmfree(struct seg *seg) 1723 { 1724 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1725 struct anon_map *shm_amp = shmd->shm_amp; 1726 1727 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1728 1729 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0, 1730 MC_UNLOCK, NULL, 0); 1731 1732 /* 1733 * Need to increment refcnt when attaching 1734 * and decrement when detaching because of dup(). 1735 */ 1736 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1737 shm_amp->refcnt--; 1738 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1739 1740 if (shmd->shm_vpage) { /* only for DISM */ 1741 kmem_free(shmd->shm_vpage, btopr(shm_amp->size)); 1742 shmd->shm_vpage = NULL; 1743 } 1744 kmem_free(shmd, sizeof (*shmd)); 1745 } 1746 1747 /*ARGSUSED*/ 1748 int 1749 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 1750 { 1751 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1752 1753 /* 1754 * Shared page table is more than shared mapping. 1755 * Individual process sharing page tables can't change prot 1756 * because there is only one set of page tables. 1757 * This will be allowed after private page table is 1758 * supported. 1759 */ 1760 /* need to return correct status error? */ 1761 return (0); 1762 } 1763 1764 1765 faultcode_t 1766 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr, 1767 size_t len, enum fault_type type, enum seg_rw rw) 1768 { 1769 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1770 struct seg *sptseg = shmd->shm_sptseg; 1771 struct as *curspt = shmd->shm_sptas; 1772 struct spt_data *sptd = sptseg->s_data; 1773 pgcnt_t npages; 1774 size_t size; 1775 caddr_t segspt_addr, shm_addr; 1776 page_t **ppa; 1777 int i; 1778 ulong_t an_idx = 0; 1779 int err = 0; 1780 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0); 1781 size_t pgsz; 1782 pgcnt_t pgcnt; 1783 caddr_t a; 1784 pgcnt_t pidx; 1785 1786 #ifdef lint 1787 hat = hat; 1788 #endif 1789 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1790 1791 /* 1792 * Because of the way spt is implemented 1793 * the realsize of the segment does not have to be 1794 * equal to the segment size itself. The segment size is 1795 * often in multiples of a page size larger than PAGESIZE. 1796 * The realsize is rounded up to the nearest PAGESIZE 1797 * based on what the user requested. This is a bit of 1798 * ungliness that is historical but not easily fixed 1799 * without re-designing the higher levels of ISM. 1800 */ 1801 ASSERT(addr >= seg->s_base); 1802 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 1803 return (FC_NOMAP); 1804 /* 1805 * For all of the following cases except F_PROT, we need to 1806 * make any necessary adjustments to addr and len 1807 * and get all of the necessary page_t's into an array called ppa[]. 1808 * 1809 * The code in shmat() forces base addr and len of ISM segment 1810 * to be aligned to largest page size supported. Therefore, 1811 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 1812 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 1813 * in large pagesize chunks, or else we will screw up the HAT 1814 * layer by calling hat_memload_array() with differing page sizes 1815 * over a given virtual range. 1816 */ 1817 pgsz = page_get_pagesize(sptseg->s_szc); 1818 pgcnt = page_get_pagecnt(sptseg->s_szc); 1819 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); 1820 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz); 1821 npages = btopr(size); 1822 1823 /* 1824 * Now we need to convert from addr in segshm to addr in segspt. 1825 */ 1826 an_idx = seg_page(seg, shm_addr); 1827 segspt_addr = sptseg->s_base + ptob(an_idx); 1828 1829 ASSERT((segspt_addr + ptob(npages)) <= 1830 (sptseg->s_base + sptd->spt_realsize)); 1831 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size)); 1832 1833 switch (type) { 1834 1835 case F_SOFTLOCK: 1836 1837 mutex_enter(&freemem_lock); 1838 if (availrmem < tune.t_minarmem + npages) { 1839 mutex_exit(&freemem_lock); 1840 return (FC_MAKE_ERR(ENOMEM)); 1841 } else { 1842 availrmem -= npages; 1843 } 1844 mutex_exit(&freemem_lock); 1845 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 1846 /* 1847 * Fall through to the F_INVAL case to load up the hat layer 1848 * entries with the HAT_LOAD_LOCK flag. 1849 */ 1850 /* FALLTHRU */ 1851 case F_INVAL: 1852 1853 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 1854 return (FC_NOMAP); 1855 1856 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP); 1857 1858 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa); 1859 if (err != 0) { 1860 if (type == F_SOFTLOCK) { 1861 mutex_enter(&freemem_lock); 1862 availrmem += npages; 1863 mutex_exit(&freemem_lock); 1864 atomic_add_long((ulong_t *)( 1865 &(shmd->shm_softlockcnt)), -npages); 1866 } 1867 goto dism_err; 1868 } 1869 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1870 a = segspt_addr; 1871 pidx = 0; 1872 if (type == F_SOFTLOCK) { 1873 1874 /* 1875 * Load up the translation keeping it 1876 * locked and don't unlock the page. 1877 */ 1878 for (; pidx < npages; a += pgsz, pidx += pgcnt) { 1879 hat_memload_array(sptseg->s_as->a_hat, 1880 a, pgsz, &ppa[pidx], sptd->spt_prot, 1881 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 1882 } 1883 } else { 1884 if (hat == seg->s_as->a_hat) { 1885 1886 /* 1887 * Migrate pages marked for migration 1888 */ 1889 if (lgrp_optimizations()) 1890 page_migrate(seg, shm_addr, ppa, 1891 npages); 1892 1893 /* CPU HAT */ 1894 for (; pidx < npages; 1895 a += pgsz, pidx += pgcnt) { 1896 hat_memload_array(sptseg->s_as->a_hat, 1897 a, pgsz, &ppa[pidx], 1898 sptd->spt_prot, 1899 HAT_LOAD_SHARE); 1900 } 1901 } else { 1902 /* XHAT. Pass real address */ 1903 hat_memload_array(hat, shm_addr, 1904 size, ppa, sptd->spt_prot, HAT_LOAD_SHARE); 1905 } 1906 1907 /* 1908 * And now drop the SE_SHARED lock(s). 1909 */ 1910 if (dyn_ism_unmap) { 1911 for (i = 0; i < npages; i++) { 1912 page_unlock(ppa[i]); 1913 } 1914 } 1915 } 1916 1917 if (!dyn_ism_unmap) { 1918 if (hat_share(seg->s_as->a_hat, shm_addr, 1919 curspt->a_hat, segspt_addr, ptob(npages), 1920 seg->s_szc) != 0) { 1921 panic("hat_share err in DISM fault"); 1922 /* NOTREACHED */ 1923 } 1924 if (type == F_INVAL) { 1925 for (i = 0; i < npages; i++) { 1926 page_unlock(ppa[i]); 1927 } 1928 } 1929 } 1930 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1931 dism_err: 1932 kmem_free(ppa, npages * sizeof (page_t *)); 1933 return (err); 1934 1935 case F_SOFTUNLOCK: 1936 1937 mutex_enter(&freemem_lock); 1938 availrmem += npages; 1939 mutex_exit(&freemem_lock); 1940 1941 /* 1942 * This is a bit ugly, we pass in the real seg pointer, 1943 * but the segspt_addr is the virtual address within the 1944 * dummy seg. 1945 */ 1946 segspt_softunlock(seg, segspt_addr, size, rw); 1947 return (0); 1948 1949 case F_PROT: 1950 1951 /* 1952 * This takes care of the unusual case where a user 1953 * allocates a stack in shared memory and a register 1954 * window overflow is written to that stack page before 1955 * it is otherwise modified. 1956 * 1957 * We can get away with this because ISM segments are 1958 * always rw. Other than this unusual case, there 1959 * should be no instances of protection violations. 1960 */ 1961 return (0); 1962 1963 default: 1964 #ifdef DEBUG 1965 panic("segspt_dismfault default type?"); 1966 #else 1967 return (FC_NOMAP); 1968 #endif 1969 } 1970 } 1971 1972 1973 faultcode_t 1974 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr, 1975 size_t len, enum fault_type type, enum seg_rw rw) 1976 { 1977 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1978 struct seg *sptseg = shmd->shm_sptseg; 1979 struct as *curspt = shmd->shm_sptas; 1980 struct spt_data *sptd = sptseg->s_data; 1981 pgcnt_t npages; 1982 size_t size; 1983 caddr_t sptseg_addr, shm_addr; 1984 page_t *pp, **ppa; 1985 int i; 1986 u_offset_t offset; 1987 ulong_t anon_index = 0; 1988 struct vnode *vp; 1989 struct anon_map *amp; /* XXX - for locknest */ 1990 struct anon *ap = NULL; 1991 size_t pgsz; 1992 pgcnt_t pgcnt; 1993 caddr_t a; 1994 pgcnt_t pidx; 1995 size_t sz; 1996 1997 #ifdef lint 1998 hat = hat; 1999 #endif 2000 2001 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2002 2003 if (sptd->spt_flags & SHM_PAGEABLE) { 2004 return (segspt_dismfault(hat, seg, addr, len, type, rw)); 2005 } 2006 2007 /* 2008 * Because of the way spt is implemented 2009 * the realsize of the segment does not have to be 2010 * equal to the segment size itself. The segment size is 2011 * often in multiples of a page size larger than PAGESIZE. 2012 * The realsize is rounded up to the nearest PAGESIZE 2013 * based on what the user requested. This is a bit of 2014 * ungliness that is historical but not easily fixed 2015 * without re-designing the higher levels of ISM. 2016 */ 2017 ASSERT(addr >= seg->s_base); 2018 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 2019 return (FC_NOMAP); 2020 /* 2021 * For all of the following cases except F_PROT, we need to 2022 * make any necessary adjustments to addr and len 2023 * and get all of the necessary page_t's into an array called ppa[]. 2024 * 2025 * The code in shmat() forces base addr and len of ISM segment 2026 * to be aligned to largest page size supported. Therefore, 2027 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 2028 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 2029 * in large pagesize chunks, or else we will screw up the HAT 2030 * layer by calling hat_memload_array() with differing page sizes 2031 * over a given virtual range. 2032 */ 2033 pgsz = page_get_pagesize(sptseg->s_szc); 2034 pgcnt = page_get_pagecnt(sptseg->s_szc); 2035 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); 2036 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz); 2037 npages = btopr(size); 2038 2039 /* 2040 * Now we need to convert from addr in segshm to addr in segspt. 2041 */ 2042 anon_index = seg_page(seg, shm_addr); 2043 sptseg_addr = sptseg->s_base + ptob(anon_index); 2044 2045 /* 2046 * And now we may have to adjust npages downward if we have 2047 * exceeded the realsize of the segment or initial anon 2048 * allocations. 2049 */ 2050 if ((sptseg_addr + ptob(npages)) > 2051 (sptseg->s_base + sptd->spt_realsize)) 2052 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr; 2053 2054 npages = btopr(size); 2055 2056 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size)); 2057 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0); 2058 2059 switch (type) { 2060 2061 case F_SOFTLOCK: 2062 2063 /* 2064 * availrmem is decremented once during anon_swap_adjust() 2065 * and is incremented during the anon_unresv(), which is 2066 * called from shm_rm_amp() when the segment is destroyed. 2067 */ 2068 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 2069 /* 2070 * Some platforms assume that ISM pages are SE_SHARED 2071 * locked for the entire life of the segment. 2072 */ 2073 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) 2074 return (0); 2075 /* 2076 * Fall through to the F_INVAL case to load up the hat layer 2077 * entries with the HAT_LOAD_LOCK flag. 2078 */ 2079 2080 /* FALLTHRU */ 2081 case F_INVAL: 2082 2083 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 2084 return (FC_NOMAP); 2085 2086 /* 2087 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP 2088 * may still rely on this call to hat_share(). That 2089 * would imply that those hat's can fault on a 2090 * HAT_LOAD_LOCK translation, which would seem 2091 * contradictory. 2092 */ 2093 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 2094 if (hat_share(seg->s_as->a_hat, seg->s_base, 2095 curspt->a_hat, sptseg->s_base, 2096 sptseg->s_size, sptseg->s_szc) != 0) { 2097 panic("hat_share error in ISM fault"); 2098 /*NOTREACHED*/ 2099 } 2100 return (0); 2101 } 2102 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP); 2103 2104 /* 2105 * I see no need to lock the real seg, 2106 * here, because all of our work will be on the underlying 2107 * dummy seg. 2108 * 2109 * sptseg_addr and npages now account for large pages. 2110 */ 2111 amp = sptd->spt_amp; 2112 ASSERT(amp != NULL); 2113 anon_index = seg_page(sptseg, sptseg_addr); 2114 2115 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2116 for (i = 0; i < npages; i++) { 2117 ap = anon_get_ptr(amp->ahp, anon_index++); 2118 ASSERT(ap != NULL); 2119 swap_xlate(ap, &vp, &offset); 2120 pp = page_lookup(vp, offset, SE_SHARED); 2121 ASSERT(pp != NULL); 2122 ppa[i] = pp; 2123 } 2124 ANON_LOCK_EXIT(&->a_rwlock); 2125 ASSERT(i == npages); 2126 2127 /* 2128 * We are already holding the as->a_lock on the user's 2129 * real segment, but we need to hold the a_lock on the 2130 * underlying dummy as. This is mostly to satisfy the 2131 * underlying HAT layer. 2132 */ 2133 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 2134 a = sptseg_addr; 2135 pidx = 0; 2136 if (type == F_SOFTLOCK) { 2137 /* 2138 * Load up the translation keeping it 2139 * locked and don't unlock the page. 2140 */ 2141 for (; pidx < npages; a += pgsz, pidx += pgcnt) { 2142 sz = MIN(pgsz, ptob(npages - pidx)); 2143 hat_memload_array(sptseg->s_as->a_hat, a, 2144 sz, &ppa[pidx], sptd->spt_prot, 2145 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 2146 } 2147 } else { 2148 if (hat == seg->s_as->a_hat) { 2149 2150 /* 2151 * Migrate pages marked for migration. 2152 */ 2153 if (lgrp_optimizations()) 2154 page_migrate(seg, shm_addr, ppa, 2155 npages); 2156 2157 /* CPU HAT */ 2158 for (; pidx < npages; 2159 a += pgsz, pidx += pgcnt) { 2160 sz = MIN(pgsz, ptob(npages - pidx)); 2161 hat_memload_array(sptseg->s_as->a_hat, 2162 a, sz, &ppa[pidx], 2163 sptd->spt_prot, HAT_LOAD_SHARE); 2164 } 2165 } else { 2166 /* XHAT. Pass real address */ 2167 hat_memload_array(hat, shm_addr, 2168 ptob(npages), ppa, sptd->spt_prot, 2169 HAT_LOAD_SHARE); 2170 } 2171 2172 /* 2173 * And now drop the SE_SHARED lock(s). 2174 */ 2175 for (i = 0; i < npages; i++) 2176 page_unlock(ppa[i]); 2177 } 2178 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 2179 2180 kmem_free(ppa, sizeof (page_t *) * npages); 2181 return (0); 2182 case F_SOFTUNLOCK: 2183 2184 /* 2185 * This is a bit ugly, we pass in the real seg pointer, 2186 * but the sptseg_addr is the virtual address within the 2187 * dummy seg. 2188 */ 2189 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw); 2190 return (0); 2191 2192 case F_PROT: 2193 2194 /* 2195 * This takes care of the unusual case where a user 2196 * allocates a stack in shared memory and a register 2197 * window overflow is written to that stack page before 2198 * it is otherwise modified. 2199 * 2200 * We can get away with this because ISM segments are 2201 * always rw. Other than this unusual case, there 2202 * should be no instances of protection violations. 2203 */ 2204 return (0); 2205 2206 default: 2207 #ifdef DEBUG 2208 cmn_err(CE_WARN, "segspt_shmfault default type?"); 2209 #endif 2210 return (FC_NOMAP); 2211 } 2212 } 2213 2214 /*ARGSUSED*/ 2215 static faultcode_t 2216 segspt_shmfaulta(struct seg *seg, caddr_t addr) 2217 { 2218 return (0); 2219 } 2220 2221 /*ARGSUSED*/ 2222 static int 2223 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta) 2224 { 2225 return (0); 2226 } 2227 2228 /*ARGSUSED*/ 2229 static size_t 2230 segspt_shmswapout(struct seg *seg) 2231 { 2232 return (0); 2233 } 2234 2235 /* 2236 * duplicate the shared page tables 2237 */ 2238 int 2239 segspt_shmdup(struct seg *seg, struct seg *newseg) 2240 { 2241 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2242 struct anon_map *amp = shmd->shm_amp; 2243 struct shm_data *shmd_new; 2244 struct seg *spt_seg = shmd->shm_sptseg; 2245 struct spt_data *sptd = spt_seg->s_data; 2246 int error = 0; 2247 2248 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2249 2250 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP); 2251 newseg->s_data = (void *)shmd_new; 2252 shmd_new->shm_sptas = shmd->shm_sptas; 2253 shmd_new->shm_amp = amp; 2254 shmd_new->shm_sptseg = shmd->shm_sptseg; 2255 newseg->s_ops = &segspt_shmops; 2256 newseg->s_szc = seg->s_szc; 2257 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc); 2258 2259 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2260 amp->refcnt++; 2261 ANON_LOCK_EXIT(&->a_rwlock); 2262 2263 if (sptd->spt_flags & SHM_PAGEABLE) { 2264 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP); 2265 shmd_new->shm_lckpgs = 0; 2266 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 2267 if ((error = hat_share(newseg->s_as->a_hat, 2268 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR, 2269 seg->s_size, seg->s_szc)) != 0) { 2270 kmem_free(shmd_new->shm_vpage, 2271 btopr(amp->size)); 2272 } 2273 } 2274 return (error); 2275 } else { 2276 return (hat_share(newseg->s_as->a_hat, newseg->s_base, 2277 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size, 2278 seg->s_szc)); 2279 2280 } 2281 } 2282 2283 /*ARGSUSED*/ 2284 int 2285 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 2286 { 2287 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2288 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2289 2290 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2291 2292 /* 2293 * ISM segment is always rw. 2294 */ 2295 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0); 2296 } 2297 2298 /* 2299 * Return an array of locked large pages, for empty slots allocate 2300 * private zero-filled anon pages. 2301 */ 2302 static int 2303 spt_anon_getpages( 2304 struct seg *sptseg, 2305 caddr_t sptaddr, 2306 size_t len, 2307 page_t *ppa[]) 2308 { 2309 struct spt_data *sptd = sptseg->s_data; 2310 struct anon_map *amp = sptd->spt_amp; 2311 enum seg_rw rw = sptd->spt_prot; 2312 uint_t szc = sptseg->s_szc; 2313 size_t pg_sz, share_sz = page_get_pagesize(szc); 2314 pgcnt_t lp_npgs; 2315 caddr_t lp_addr, e_sptaddr; 2316 uint_t vpprot, ppa_szc = 0; 2317 struct vpage *vpage = NULL; 2318 ulong_t j, ppa_idx; 2319 int err, ierr = 0; 2320 pgcnt_t an_idx; 2321 anon_sync_obj_t cookie; 2322 int anon_locked = 0; 2323 pgcnt_t amp_pgs; 2324 2325 2326 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz)); 2327 ASSERT(len != 0); 2328 2329 pg_sz = share_sz; 2330 lp_npgs = btop(pg_sz); 2331 lp_addr = sptaddr; 2332 e_sptaddr = sptaddr + len; 2333 an_idx = seg_page(sptseg, sptaddr); 2334 ppa_idx = 0; 2335 2336 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2337 2338 amp_pgs = page_get_pagecnt(amp->a_szc); 2339 2340 /*CONSTCOND*/ 2341 while (1) { 2342 for (; lp_addr < e_sptaddr; 2343 an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) { 2344 2345 /* 2346 * If we're currently locked, and we get to a new 2347 * page, unlock our current anon chunk. 2348 */ 2349 if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) { 2350 anon_array_exit(&cookie); 2351 anon_locked = 0; 2352 } 2353 if (!anon_locked) { 2354 anon_array_enter(amp, an_idx, &cookie); 2355 anon_locked = 1; 2356 } 2357 ppa_szc = (uint_t)-1; 2358 ierr = anon_map_getpages(amp, an_idx, szc, sptseg, 2359 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx], 2360 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred); 2361 2362 if (ierr != 0) { 2363 if (ierr > 0) { 2364 err = FC_MAKE_ERR(ierr); 2365 goto lpgs_err; 2366 } 2367 break; 2368 } 2369 } 2370 if (lp_addr == e_sptaddr) { 2371 break; 2372 } 2373 ASSERT(lp_addr < e_sptaddr); 2374 2375 /* 2376 * ierr == -1 means we failed to allocate a large page. 2377 * so do a size down operation. 2378 * 2379 * ierr == -2 means some other process that privately shares 2380 * pages with this process has allocated a larger page and we 2381 * need to retry with larger pages. So do a size up 2382 * operation. This relies on the fact that large pages are 2383 * never partially shared i.e. if we share any constituent 2384 * page of a large page with another process we must share the 2385 * entire large page. Note this cannot happen for SOFTLOCK 2386 * case, unless current address (lpaddr) is at the beginning 2387 * of the next page size boundary because the other process 2388 * couldn't have relocated locked pages. 2389 */ 2390 ASSERT(ierr == -1 || ierr == -2); 2391 if (segvn_anypgsz) { 2392 ASSERT(ierr == -2 || szc != 0); 2393 ASSERT(ierr == -1 || szc < sptseg->s_szc); 2394 szc = (ierr == -1) ? szc - 1 : szc + 1; 2395 } else { 2396 /* 2397 * For faults and segvn_anypgsz == 0 2398 * we need to be careful not to loop forever 2399 * if existing page is found with szc other 2400 * than 0 or seg->s_szc. This could be due 2401 * to page relocations on behalf of DR or 2402 * more likely large page creation. For this 2403 * case simply re-size to existing page's szc 2404 * if returned by anon_map_getpages(). 2405 */ 2406 if (ppa_szc == (uint_t)-1) { 2407 szc = (ierr == -1) ? 0 : sptseg->s_szc; 2408 } else { 2409 ASSERT(ppa_szc <= sptseg->s_szc); 2410 ASSERT(ierr == -2 || ppa_szc < szc); 2411 ASSERT(ierr == -1 || ppa_szc > szc); 2412 szc = ppa_szc; 2413 } 2414 } 2415 pg_sz = page_get_pagesize(szc); 2416 lp_npgs = btop(pg_sz); 2417 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz)); 2418 } 2419 if (anon_locked) { 2420 anon_array_exit(&cookie); 2421 } 2422 ANON_LOCK_EXIT(&->a_rwlock); 2423 return (0); 2424 2425 lpgs_err: 2426 if (anon_locked) { 2427 anon_array_exit(&cookie); 2428 } 2429 ANON_LOCK_EXIT(&->a_rwlock); 2430 for (j = 0; j < ppa_idx; j++) 2431 page_unlock(ppa[j]); 2432 return (err); 2433 } 2434 2435 /* 2436 * count the number of bytes in a set of spt pages that are currently not 2437 * locked 2438 */ 2439 static rctl_qty_t 2440 spt_unlockedbytes(pgcnt_t npages, page_t **ppa) 2441 { 2442 ulong_t i; 2443 rctl_qty_t unlocked = 0; 2444 2445 for (i = 0; i < npages; i++) { 2446 if (ppa[i]->p_lckcnt == 0) 2447 unlocked += PAGESIZE; 2448 } 2449 return (unlocked); 2450 } 2451 2452 int 2453 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages, 2454 page_t **ppa, ulong_t *lockmap, size_t pos, 2455 rctl_qty_t *locked) 2456 { 2457 struct shm_data *shmd = seg->s_data; 2458 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2459 ulong_t i; 2460 int kernel; 2461 2462 /* return the number of bytes actually locked */ 2463 *locked = 0; 2464 for (i = 0; i < npages; anon_index++, pos++, i++) { 2465 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) { 2466 if (sptd->spt_ppa_lckcnt[anon_index] < 2467 (ushort_t)DISM_LOCK_MAX) { 2468 if (++sptd->spt_ppa_lckcnt[anon_index] == 2469 (ushort_t)DISM_LOCK_MAX) { 2470 cmn_err(CE_WARN, 2471 "DISM page lock limit " 2472 "reached on DISM offset 0x%lx\n", 2473 anon_index << PAGESHIFT); 2474 } 2475 kernel = (sptd->spt_ppa && 2476 sptd->spt_ppa[anon_index]) ? 1 : 0; 2477 if (!page_pp_lock(ppa[i], 0, kernel)) { 2478 sptd->spt_ppa_lckcnt[anon_index]--; 2479 return (EAGAIN); 2480 } 2481 /* if this is a newly locked page, count it */ 2482 if (ppa[i]->p_lckcnt == 1) { 2483 *locked += PAGESIZE; 2484 } 2485 shmd->shm_lckpgs++; 2486 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED; 2487 if (lockmap != NULL) 2488 BT_SET(lockmap, pos); 2489 } 2490 } 2491 } 2492 return (0); 2493 } 2494 2495 /*ARGSUSED*/ 2496 static int 2497 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 2498 int attr, int op, ulong_t *lockmap, size_t pos) 2499 { 2500 struct shm_data *shmd = seg->s_data; 2501 struct seg *sptseg = shmd->shm_sptseg; 2502 struct spt_data *sptd = sptseg->s_data; 2503 struct kshmid *sp = sptd->spt_amp->a_sp; 2504 pgcnt_t npages, a_npages; 2505 page_t **ppa; 2506 pgcnt_t an_idx, a_an_idx, ppa_idx; 2507 caddr_t spt_addr, a_addr; /* spt and aligned address */ 2508 size_t a_len; /* aligned len */ 2509 size_t share_sz; 2510 ulong_t i; 2511 int sts = 0; 2512 rctl_qty_t unlocked = 0; 2513 rctl_qty_t locked = 0; 2514 struct proc *p = curproc; 2515 kproject_t *proj; 2516 2517 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2518 ASSERT(sp != NULL); 2519 2520 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 2521 return (0); 2522 } 2523 2524 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 2525 an_idx = seg_page(seg, addr); 2526 npages = btopr(len); 2527 2528 if (an_idx + npages > btopr(shmd->shm_amp->size)) { 2529 return (ENOMEM); 2530 } 2531 2532 /* 2533 * A shm's project never changes, so no lock needed. 2534 * The shm has a hold on the project, so it will not go away. 2535 * Since we have a mapping to shm within this zone, we know 2536 * that the zone will not go away. 2537 */ 2538 proj = sp->shm_perm.ipc_proj; 2539 2540 if (op == MC_LOCK) { 2541 2542 /* 2543 * Need to align addr and size request if they are not 2544 * aligned so we can always allocate large page(s) however 2545 * we only lock what was requested in initial request. 2546 */ 2547 share_sz = page_get_pagesize(sptseg->s_szc); 2548 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz); 2549 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)), 2550 share_sz); 2551 a_npages = btop(a_len); 2552 a_an_idx = seg_page(seg, a_addr); 2553 spt_addr = sptseg->s_base + ptob(a_an_idx); 2554 ppa_idx = an_idx - a_an_idx; 2555 2556 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages), 2557 KM_NOSLEEP)) == NULL) { 2558 return (ENOMEM); 2559 } 2560 2561 /* 2562 * Don't cache any new pages for IO and 2563 * flush any cached pages. 2564 */ 2565 mutex_enter(&sptd->spt_lock); 2566 if (sptd->spt_ppa != NULL) 2567 sptd->spt_flags |= DISM_PPA_CHANGED; 2568 2569 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa); 2570 if (sts != 0) { 2571 mutex_exit(&sptd->spt_lock); 2572 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2573 return (sts); 2574 } 2575 2576 mutex_enter(&sp->shm_mlock); 2577 /* enforce locked memory rctl */ 2578 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]); 2579 2580 mutex_enter(&p->p_lock); 2581 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) { 2582 mutex_exit(&p->p_lock); 2583 sts = EAGAIN; 2584 } else { 2585 mutex_exit(&p->p_lock); 2586 sts = spt_lockpages(seg, an_idx, npages, 2587 &ppa[ppa_idx], lockmap, pos, &locked); 2588 2589 /* 2590 * correct locked count if not all pages could be 2591 * locked 2592 */ 2593 if ((unlocked - locked) > 0) { 2594 rctl_decr_locked_mem(NULL, proj, 2595 (unlocked - locked), 0); 2596 } 2597 } 2598 /* 2599 * unlock pages 2600 */ 2601 for (i = 0; i < a_npages; i++) 2602 page_unlock(ppa[i]); 2603 if (sptd->spt_ppa != NULL) 2604 sptd->spt_flags |= DISM_PPA_CHANGED; 2605 mutex_exit(&sp->shm_mlock); 2606 mutex_exit(&sptd->spt_lock); 2607 2608 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2609 2610 } else if (op == MC_UNLOCK) { /* unlock */ 2611 struct anon_map *amp; 2612 struct anon *ap; 2613 struct vnode *vp; 2614 u_offset_t off; 2615 struct page *pp; 2616 int kernel; 2617 anon_sync_obj_t cookie; 2618 rctl_qty_t unlocked = 0; 2619 2620 amp = sptd->spt_amp; 2621 mutex_enter(&sptd->spt_lock); 2622 if (shmd->shm_lckpgs == 0) { 2623 mutex_exit(&sptd->spt_lock); 2624 return (0); 2625 } 2626 /* 2627 * Don't cache new IO pages. 2628 */ 2629 if (sptd->spt_ppa != NULL) 2630 sptd->spt_flags |= DISM_PPA_CHANGED; 2631 2632 mutex_enter(&sp->shm_mlock); 2633 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2634 for (i = 0; i < npages; i++, an_idx++) { 2635 if (shmd->shm_vpage[an_idx] & DISM_PG_LOCKED) { 2636 anon_array_enter(amp, an_idx, &cookie); 2637 ap = anon_get_ptr(amp->ahp, an_idx); 2638 ASSERT(ap); 2639 2640 swap_xlate(ap, &vp, &off); 2641 anon_array_exit(&cookie); 2642 pp = page_lookup(vp, off, SE_SHARED); 2643 ASSERT(pp); 2644 /* 2645 * the availrmem is decremented only for 2646 * pages which are not in seg pcache, 2647 * for pages in seg pcache availrmem was 2648 * decremented in _dismpagelock() (if 2649 * they were not locked here) 2650 */ 2651 kernel = (sptd->spt_ppa && 2652 sptd->spt_ppa[an_idx]) ? 1 : 0; 2653 ASSERT(pp->p_lckcnt > 0); 2654 page_pp_unlock(pp, 0, kernel); 2655 if (pp->p_lckcnt == 0) 2656 unlocked += PAGESIZE; 2657 page_unlock(pp); 2658 shmd->shm_vpage[an_idx] &= ~DISM_PG_LOCKED; 2659 sptd->spt_ppa_lckcnt[an_idx]--; 2660 shmd->shm_lckpgs--; 2661 } 2662 } 2663 ANON_LOCK_EXIT(&->a_rwlock); 2664 if (sptd->spt_ppa != NULL) 2665 sptd->spt_flags |= DISM_PPA_CHANGED; 2666 mutex_exit(&sptd->spt_lock); 2667 2668 rctl_decr_locked_mem(NULL, proj, unlocked, 0); 2669 mutex_exit(&sp->shm_mlock); 2670 } 2671 return (sts); 2672 } 2673 2674 /*ARGSUSED*/ 2675 int 2676 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 2677 { 2678 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2679 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2680 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1; 2681 2682 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2683 2684 /* 2685 * ISM segment is always rw. 2686 */ 2687 while (--pgno >= 0) 2688 *protv++ = sptd->spt_prot; 2689 return (0); 2690 } 2691 2692 /*ARGSUSED*/ 2693 u_offset_t 2694 segspt_shmgetoffset(struct seg *seg, caddr_t addr) 2695 { 2696 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2697 2698 /* Offset does not matter in ISM memory */ 2699 2700 return ((u_offset_t)0); 2701 } 2702 2703 /* ARGSUSED */ 2704 int 2705 segspt_shmgettype(struct seg *seg, caddr_t addr) 2706 { 2707 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2708 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2709 2710 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2711 2712 /* 2713 * The shared memory mapping is always MAP_SHARED, SWAP is only 2714 * reserved for DISM 2715 */ 2716 return (MAP_SHARED | 2717 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE)); 2718 } 2719 2720 /*ARGSUSED*/ 2721 int 2722 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 2723 { 2724 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2725 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2726 2727 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2728 2729 *vpp = sptd->spt_vp; 2730 return (0); 2731 } 2732 2733 /* 2734 * We need to wait for pending IO to complete to a DISM segment in order for 2735 * pages to get kicked out of the seg_pcache. 120 seconds should be more 2736 * than enough time to wait. 2737 */ 2738 static clock_t spt_pcache_wait = 120; 2739 2740 /*ARGSUSED*/ 2741 static int 2742 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 2743 { 2744 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2745 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2746 struct anon_map *amp; 2747 pgcnt_t pg_idx; 2748 ushort_t gen; 2749 clock_t end_lbolt; 2750 int writer; 2751 2752 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2753 2754 if (behav == MADV_FREE) { 2755 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) 2756 return (0); 2757 2758 amp = sptd->spt_amp; 2759 pg_idx = seg_page(seg, addr); 2760 2761 mutex_enter(&sptd->spt_lock); 2762 if (sptd->spt_ppa == NULL) { 2763 mutex_exit(&sptd->spt_lock); 2764 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2765 anon_disclaim(amp, pg_idx, len); 2766 ANON_LOCK_EXIT(&->a_rwlock); 2767 return (0); 2768 } 2769 2770 sptd->spt_flags |= DISM_PPA_CHANGED; 2771 gen = sptd->spt_gen; 2772 2773 mutex_exit(&sptd->spt_lock); 2774 2775 /* 2776 * Purge all DISM cached pages 2777 */ 2778 seg_ppurge_seg(segspt_reclaim); 2779 2780 /* 2781 * Drop the AS_LOCK so that other threads can grab it 2782 * in the as_pageunlock path and hopefully get the segment 2783 * kicked out of the seg_pcache. We bump the shm_softlockcnt 2784 * to keep this segment resident. 2785 */ 2786 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock); 2787 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 2788 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock); 2789 2790 mutex_enter(&sptd->spt_lock); 2791 2792 end_lbolt = lbolt + (hz * spt_pcache_wait); 2793 2794 /* 2795 * Try to wait for pages to get kicked out of the seg_pcache. 2796 */ 2797 while (sptd->spt_gen == gen && 2798 (sptd->spt_flags & DISM_PPA_CHANGED) && 2799 lbolt < end_lbolt) { 2800 if (!cv_timedwait_sig(&sptd->spt_cv, 2801 &sptd->spt_lock, end_lbolt)) { 2802 break; 2803 } 2804 } 2805 2806 mutex_exit(&sptd->spt_lock); 2807 2808 /* Regrab the AS_LOCK and release our hold on the segment */ 2809 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock, 2810 writer ? RW_WRITER : RW_READER); 2811 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1); 2812 if (shmd->shm_softlockcnt <= 0) { 2813 if (AS_ISUNMAPWAIT(seg->s_as)) { 2814 mutex_enter(&seg->s_as->a_contents); 2815 if (AS_ISUNMAPWAIT(seg->s_as)) { 2816 AS_CLRUNMAPWAIT(seg->s_as); 2817 cv_broadcast(&seg->s_as->a_cv); 2818 } 2819 mutex_exit(&seg->s_as->a_contents); 2820 } 2821 } 2822 2823 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2824 anon_disclaim(amp, pg_idx, len); 2825 ANON_LOCK_EXIT(&->a_rwlock); 2826 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP || 2827 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) { 2828 int already_set; 2829 ulong_t anon_index; 2830 lgrp_mem_policy_t policy; 2831 caddr_t shm_addr; 2832 size_t share_size; 2833 size_t size; 2834 struct seg *sptseg = shmd->shm_sptseg; 2835 caddr_t sptseg_addr; 2836 2837 /* 2838 * Align address and length to page size of underlying segment 2839 */ 2840 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc); 2841 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size); 2842 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), 2843 share_size); 2844 2845 amp = shmd->shm_amp; 2846 anon_index = seg_page(seg, shm_addr); 2847 2848 /* 2849 * And now we may have to adjust size downward if we have 2850 * exceeded the realsize of the segment or initial anon 2851 * allocations. 2852 */ 2853 sptseg_addr = sptseg->s_base + ptob(anon_index); 2854 if ((sptseg_addr + size) > 2855 (sptseg->s_base + sptd->spt_realsize)) 2856 size = (sptseg->s_base + sptd->spt_realsize) - 2857 sptseg_addr; 2858 2859 /* 2860 * Set memory allocation policy for this segment 2861 */ 2862 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED); 2863 already_set = lgrp_shm_policy_set(policy, amp, anon_index, 2864 NULL, 0, len); 2865 2866 /* 2867 * If random memory allocation policy set already, 2868 * don't bother reapplying it. 2869 */ 2870 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 2871 return (0); 2872 2873 /* 2874 * Mark any existing pages in the given range for 2875 * migration, flushing the I/O page cache, and using 2876 * underlying segment to calculate anon index and get 2877 * anonmap and vnode pointer from 2878 */ 2879 if (shmd->shm_softlockcnt > 0) 2880 segspt_purge(seg); 2881 2882 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0); 2883 } 2884 2885 return (0); 2886 } 2887 2888 /*ARGSUSED*/ 2889 void 2890 segspt_shmdump(struct seg *seg) 2891 { 2892 /* no-op for ISM segment */ 2893 } 2894 2895 /*ARGSUSED*/ 2896 static faultcode_t 2897 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 2898 { 2899 return (ENOTSUP); 2900 } 2901 2902 /* 2903 * get a memory ID for an addr in a given segment 2904 */ 2905 static int 2906 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 2907 { 2908 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2909 struct anon *ap; 2910 size_t anon_index; 2911 struct anon_map *amp = shmd->shm_amp; 2912 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2913 struct seg *sptseg = shmd->shm_sptseg; 2914 anon_sync_obj_t cookie; 2915 2916 anon_index = seg_page(seg, addr); 2917 2918 if (addr > (seg->s_base + sptd->spt_realsize)) { 2919 return (EFAULT); 2920 } 2921 2922 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2923 anon_array_enter(amp, anon_index, &cookie); 2924 ap = anon_get_ptr(amp->ahp, anon_index); 2925 if (ap == NULL) { 2926 struct page *pp; 2927 caddr_t spt_addr = sptseg->s_base + ptob(anon_index); 2928 2929 pp = anon_zero(sptseg, spt_addr, &ap, kcred); 2930 if (pp == NULL) { 2931 anon_array_exit(&cookie); 2932 ANON_LOCK_EXIT(&->a_rwlock); 2933 return (ENOMEM); 2934 } 2935 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 2936 page_unlock(pp); 2937 } 2938 anon_array_exit(&cookie); 2939 ANON_LOCK_EXIT(&->a_rwlock); 2940 memidp->val[0] = (uintptr_t)ap; 2941 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 2942 return (0); 2943 } 2944 2945 /* 2946 * Get memory allocation policy info for specified address in given segment 2947 */ 2948 static lgrp_mem_policy_info_t * 2949 segspt_shmgetpolicy(struct seg *seg, caddr_t addr) 2950 { 2951 struct anon_map *amp; 2952 ulong_t anon_index; 2953 lgrp_mem_policy_info_t *policy_info; 2954 struct shm_data *shm_data; 2955 2956 ASSERT(seg != NULL); 2957 2958 /* 2959 * Get anon_map from segshm 2960 * 2961 * Assume that no lock needs to be held on anon_map, since 2962 * it should be protected by its reference count which must be 2963 * nonzero for an existing segment 2964 * Need to grab readers lock on policy tree though 2965 */ 2966 shm_data = (struct shm_data *)seg->s_data; 2967 if (shm_data == NULL) 2968 return (NULL); 2969 amp = shm_data->shm_amp; 2970 ASSERT(amp->refcnt != 0); 2971 2972 /* 2973 * Get policy info 2974 * 2975 * Assume starting anon index of 0 2976 */ 2977 anon_index = seg_page(seg, addr); 2978 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 2979 2980 return (policy_info); 2981 } 2982 2983 /*ARGSUSED*/ 2984 static int 2985 segspt_shmcapable(struct seg *seg, segcapability_t capability) 2986 { 2987 return (0); 2988 } 2989