1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/param.h> 29 #include <sys/user.h> 30 #include <sys/mman.h> 31 #include <sys/kmem.h> 32 #include <sys/sysmacros.h> 33 #include <sys/cmn_err.h> 34 #include <sys/systm.h> 35 #include <sys/tuneable.h> 36 #include <vm/hat.h> 37 #include <vm/seg.h> 38 #include <vm/as.h> 39 #include <vm/anon.h> 40 #include <vm/page.h> 41 #include <sys/buf.h> 42 #include <sys/swap.h> 43 #include <sys/atomic.h> 44 #include <vm/seg_spt.h> 45 #include <sys/debug.h> 46 #include <sys/vtrace.h> 47 #include <sys/shm.h> 48 #include <sys/shm_impl.h> 49 #include <sys/lgrp.h> 50 #include <sys/vmsystm.h> 51 #include <sys/policy.h> 52 #include <sys/project.h> 53 #include <sys/tnf_probe.h> 54 #include <sys/zone.h> 55 56 #define SEGSPTADDR (caddr_t)0x0 57 58 /* 59 * # pages used for spt 60 */ 61 static size_t spt_used; 62 63 /* 64 * segspt_minfree is the memory left for system after ISM 65 * locked its pages; it is set up to 5% of availrmem in 66 * sptcreate when ISM is created. ISM should not use more 67 * than ~90% of availrmem; if it does, then the performance 68 * of the system may decrease. Machines with large memories may 69 * be able to use up more memory for ISM so we set the default 70 * segspt_minfree to 5% (which gives ISM max 95% of availrmem. 71 * If somebody wants even more memory for ISM (risking hanging 72 * the system) they can patch the segspt_minfree to smaller number. 73 */ 74 pgcnt_t segspt_minfree = 0; 75 76 static int segspt_create(struct seg *seg, caddr_t argsp); 77 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize); 78 static void segspt_free(struct seg *seg); 79 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len); 80 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr); 81 82 static void 83 segspt_badop() 84 { 85 panic("segspt_badop called"); 86 /*NOTREACHED*/ 87 } 88 89 #define SEGSPT_BADOP(t) (t(*)())segspt_badop 90 91 struct seg_ops segspt_ops = { 92 SEGSPT_BADOP(int), /* dup */ 93 segspt_unmap, 94 segspt_free, 95 SEGSPT_BADOP(int), /* fault */ 96 SEGSPT_BADOP(faultcode_t), /* faulta */ 97 SEGSPT_BADOP(int), /* setprot */ 98 SEGSPT_BADOP(int), /* checkprot */ 99 SEGSPT_BADOP(int), /* kluster */ 100 SEGSPT_BADOP(size_t), /* swapout */ 101 SEGSPT_BADOP(int), /* sync */ 102 SEGSPT_BADOP(size_t), /* incore */ 103 SEGSPT_BADOP(int), /* lockop */ 104 SEGSPT_BADOP(int), /* getprot */ 105 SEGSPT_BADOP(u_offset_t), /* getoffset */ 106 SEGSPT_BADOP(int), /* gettype */ 107 SEGSPT_BADOP(int), /* getvp */ 108 SEGSPT_BADOP(int), /* advise */ 109 SEGSPT_BADOP(void), /* dump */ 110 SEGSPT_BADOP(int), /* pagelock */ 111 SEGSPT_BADOP(int), /* setpgsz */ 112 SEGSPT_BADOP(int), /* getmemid */ 113 segspt_getpolicy, /* getpolicy */ 114 SEGSPT_BADOP(int), /* capable */ 115 }; 116 117 static int segspt_shmdup(struct seg *seg, struct seg *newseg); 118 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize); 119 static void segspt_shmfree(struct seg *seg); 120 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg, 121 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw); 122 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr); 123 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr, 124 register size_t len, register uint_t prot); 125 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, 126 uint_t prot); 127 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta); 128 static size_t segspt_shmswapout(struct seg *seg); 129 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, 130 register char *vec); 131 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len, 132 int attr, uint_t flags); 133 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 134 int attr, int op, ulong_t *lockmap, size_t pos); 135 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, 136 uint_t *protv); 137 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr); 138 static int segspt_shmgettype(struct seg *seg, caddr_t addr); 139 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp); 140 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, 141 uint_t behav); 142 static void segspt_shmdump(struct seg *seg); 143 static int segspt_shmpagelock(struct seg *, caddr_t, size_t, 144 struct page ***, enum lock_type, enum seg_rw); 145 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t); 146 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *); 147 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t); 148 static int segspt_shmcapable(struct seg *, segcapability_t); 149 150 struct seg_ops segspt_shmops = { 151 segspt_shmdup, 152 segspt_shmunmap, 153 segspt_shmfree, 154 segspt_shmfault, 155 segspt_shmfaulta, 156 segspt_shmsetprot, 157 segspt_shmcheckprot, 158 segspt_shmkluster, 159 segspt_shmswapout, 160 segspt_shmsync, 161 segspt_shmincore, 162 segspt_shmlockop, 163 segspt_shmgetprot, 164 segspt_shmgetoffset, 165 segspt_shmgettype, 166 segspt_shmgetvp, 167 segspt_shmadvise, /* advise */ 168 segspt_shmdump, 169 segspt_shmpagelock, 170 segspt_shmsetpgsz, 171 segspt_shmgetmemid, 172 segspt_shmgetpolicy, 173 segspt_shmcapable, 174 }; 175 176 static void segspt_purge(struct seg *seg); 177 static int segspt_reclaim(struct seg *, caddr_t, size_t, struct page **, 178 enum seg_rw); 179 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len, 180 page_t **ppa); 181 182 183 184 /*ARGSUSED*/ 185 int 186 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp, 187 uint_t prot, uint_t flags, uint_t share_szc) 188 { 189 int err; 190 struct as *newas; 191 struct segspt_crargs sptcargs; 192 193 #ifdef DEBUG 194 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */, 195 tnf_ulong, size, size ); 196 #endif 197 if (segspt_minfree == 0) /* leave min 5% of availrmem for */ 198 segspt_minfree = availrmem/20; /* for the system */ 199 200 if (!hat_supported(HAT_SHARED_PT, (void *)0)) 201 return (EINVAL); 202 203 /* 204 * get a new as for this shared memory segment 205 */ 206 newas = as_alloc(); 207 newas->a_proc = NULL; 208 sptcargs.amp = amp; 209 sptcargs.prot = prot; 210 sptcargs.flags = flags; 211 sptcargs.szc = share_szc; 212 /* 213 * create a shared page table (spt) segment 214 */ 215 216 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) { 217 as_free(newas); 218 return (err); 219 } 220 *sptseg = sptcargs.seg_spt; 221 return (0); 222 } 223 224 void 225 sptdestroy(struct as *as, struct anon_map *amp) 226 { 227 228 #ifdef DEBUG 229 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */); 230 #endif 231 (void) as_unmap(as, SEGSPTADDR, amp->size); 232 as_free(as); 233 } 234 235 /* 236 * called from seg_free(). 237 * free (i.e., unlock, unmap, return to free list) 238 * all the pages in the given seg. 239 */ 240 void 241 segspt_free(struct seg *seg) 242 { 243 struct spt_data *sptd = (struct spt_data *)seg->s_data; 244 245 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 246 247 if (sptd != NULL) { 248 if (sptd->spt_realsize) 249 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize); 250 251 if (sptd->spt_ppa_lckcnt) 252 kmem_free(sptd->spt_ppa_lckcnt, 253 sizeof (*sptd->spt_ppa_lckcnt) 254 * btopr(sptd->spt_amp->size)); 255 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp)); 256 mutex_destroy(&sptd->spt_lock); 257 kmem_free(sptd, sizeof (*sptd)); 258 } 259 } 260 261 /*ARGSUSED*/ 262 static int 263 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr, 264 uint_t flags) 265 { 266 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 267 268 return (0); 269 } 270 271 /*ARGSUSED*/ 272 static size_t 273 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec) 274 { 275 caddr_t eo_seg; 276 pgcnt_t npages; 277 struct shm_data *shmd = (struct shm_data *)seg->s_data; 278 struct seg *sptseg; 279 struct spt_data *sptd; 280 281 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 282 #ifdef lint 283 seg = seg; 284 #endif 285 sptseg = shmd->shm_sptseg; 286 sptd = sptseg->s_data; 287 288 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 289 eo_seg = addr + len; 290 while (addr < eo_seg) { 291 /* page exists, and it's locked. */ 292 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED | 293 SEG_PAGE_ANON; 294 addr += PAGESIZE; 295 } 296 return (len); 297 } else { 298 struct anon_map *amp = shmd->shm_amp; 299 struct anon *ap; 300 page_t *pp; 301 pgcnt_t anon_index; 302 struct vnode *vp; 303 u_offset_t off; 304 ulong_t i; 305 int ret; 306 anon_sync_obj_t cookie; 307 308 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 309 anon_index = seg_page(seg, addr); 310 npages = btopr(len); 311 if (anon_index + npages > btopr(shmd->shm_amp->size)) { 312 return (EINVAL); 313 } 314 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 315 for (i = 0; i < npages; i++, anon_index++) { 316 ret = 0; 317 anon_array_enter(amp, anon_index, &cookie); 318 ap = anon_get_ptr(amp->ahp, anon_index); 319 if (ap != NULL) { 320 swap_xlate(ap, &vp, &off); 321 anon_array_exit(&cookie); 322 pp = page_lookup_nowait(vp, off, SE_SHARED); 323 if (pp != NULL) { 324 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON; 325 page_unlock(pp); 326 } 327 } else { 328 anon_array_exit(&cookie); 329 } 330 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) { 331 ret |= SEG_PAGE_LOCKED; 332 } 333 *vec++ = (char)ret; 334 } 335 ANON_LOCK_EXIT(&->a_rwlock); 336 return (len); 337 } 338 } 339 340 static int 341 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize) 342 { 343 size_t share_size; 344 345 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 346 347 /* 348 * seg.s_size may have been rounded up to the largest page size 349 * in shmat(). 350 * XXX This should be cleanedup. sptdestroy should take a length 351 * argument which should be the same as sptcreate. Then 352 * this rounding would not be needed (or is done in shm.c) 353 * Only the check for full segment will be needed. 354 * 355 * XXX -- shouldn't raddr == 0 always? These tests don't seem 356 * to be useful at all. 357 */ 358 share_size = page_get_pagesize(seg->s_szc); 359 ssize = P2ROUNDUP(ssize, share_size); 360 361 if (raddr == seg->s_base && ssize == seg->s_size) { 362 seg_free(seg); 363 return (0); 364 } else 365 return (EINVAL); 366 } 367 368 int 369 segspt_create(struct seg *seg, caddr_t argsp) 370 { 371 int err; 372 caddr_t addr = seg->s_base; 373 struct spt_data *sptd; 374 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp; 375 struct anon_map *amp = sptcargs->amp; 376 struct kshmid *sp = amp->a_sp; 377 struct cred *cred = CRED(); 378 ulong_t i, j, anon_index = 0; 379 pgcnt_t npages = btopr(amp->size); 380 struct vnode *vp; 381 page_t **ppa; 382 uint_t hat_flags; 383 size_t pgsz; 384 pgcnt_t pgcnt; 385 caddr_t a; 386 pgcnt_t pidx; 387 size_t sz; 388 proc_t *procp = curproc; 389 rctl_qty_t lockedbytes = 0; 390 kproject_t *proj; 391 392 /* 393 * We are holding the a_lock on the underlying dummy as, 394 * so we can make calls to the HAT layer. 395 */ 396 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 397 ASSERT(sp != NULL); 398 399 #ifdef DEBUG 400 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */, 401 tnf_opaque, addr, addr, 402 tnf_ulong, len, seg->s_size); 403 #endif 404 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 405 if (err = anon_swap_adjust(npages)) 406 return (err); 407 } 408 err = ENOMEM; 409 410 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL) 411 goto out1; 412 413 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 414 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages), 415 KM_NOSLEEP)) == NULL) 416 goto out2; 417 } 418 419 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL); 420 421 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL) 422 goto out3; 423 424 seg->s_ops = &segspt_ops; 425 sptd->spt_vp = vp; 426 sptd->spt_amp = amp; 427 sptd->spt_prot = sptcargs->prot; 428 sptd->spt_flags = sptcargs->flags; 429 seg->s_data = (caddr_t)sptd; 430 sptd->spt_ppa = NULL; 431 sptd->spt_ppa_lckcnt = NULL; 432 seg->s_szc = sptcargs->szc; 433 434 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 435 if (seg->s_szc > amp->a_szc) { 436 amp->a_szc = seg->s_szc; 437 } 438 ANON_LOCK_EXIT(&->a_rwlock); 439 440 /* 441 * Set policy to affect initial allocation of pages in 442 * anon_map_createpages() 443 */ 444 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index, 445 NULL, 0, ptob(npages)); 446 447 if (sptcargs->flags & SHM_PAGEABLE) { 448 size_t share_sz; 449 pgcnt_t new_npgs, more_pgs; 450 struct anon_hdr *nahp; 451 452 share_sz = page_get_pagesize(seg->s_szc); 453 if (!IS_P2ALIGNED(amp->size, share_sz)) { 454 /* 455 * We are rounding up the size of the anon array 456 * on 4 M boundary because we always create 4 M 457 * of page(s) when locking, faulting pages and we 458 * don't have to check for all corner cases e.g. 459 * if there is enough space to allocate 4 M 460 * page. 461 */ 462 new_npgs = btop(P2ROUNDUP(amp->size, share_sz)); 463 more_pgs = new_npgs - npages; 464 465 if (anon_resv(ptob(more_pgs)) == 0) { 466 err = ENOMEM; 467 goto out4; 468 } 469 nahp = anon_create(new_npgs, ANON_SLEEP); 470 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 471 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages, 472 ANON_SLEEP); 473 anon_release(amp->ahp, npages); 474 amp->ahp = nahp; 475 amp->swresv = amp->size = ptob(new_npgs); 476 ANON_LOCK_EXIT(&->a_rwlock); 477 npages = new_npgs; 478 } 479 480 sptd->spt_ppa_lckcnt = kmem_zalloc(npages * 481 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP); 482 sptd->spt_pcachecnt = 0; 483 sptd->spt_realsize = ptob(npages); 484 sptcargs->seg_spt = seg; 485 return (0); 486 } 487 488 /* 489 * get array of pages for each anon slot in amp 490 */ 491 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa, 492 seg, addr, S_CREATE, cred)) != 0) 493 goto out4; 494 495 mutex_enter(&sp->shm_mlock); 496 497 /* May be partially locked, so, count bytes to charge for locking */ 498 for (i = 0; i < npages; i++) 499 if (ppa[i]->p_lckcnt == 0) 500 lockedbytes += PAGESIZE; 501 502 proj = sp->shm_perm.ipc_proj; 503 504 if (lockedbytes > 0) { 505 mutex_enter(&procp->p_lock); 506 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) { 507 mutex_exit(&procp->p_lock); 508 mutex_exit(&sp->shm_mlock); 509 for (i = 0; i < npages; i++) 510 page_unlock(ppa[i]); 511 err = ENOMEM; 512 goto out4; 513 } 514 mutex_exit(&procp->p_lock); 515 } 516 517 /* 518 * addr is initial address corresponding to the first page on ppa list 519 */ 520 for (i = 0; i < npages; i++) { 521 /* attempt to lock all pages */ 522 if (page_pp_lock(ppa[i], 0, 1) == 0) { 523 /* 524 * if unable to lock any page, unlock all 525 * of them and return error 526 */ 527 for (j = 0; j < i; j++) 528 page_pp_unlock(ppa[j], 0, 1); 529 for (i = 0; i < npages; i++) 530 page_unlock(ppa[i]); 531 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0); 532 mutex_exit(&sp->shm_mlock); 533 err = ENOMEM; 534 goto out4; 535 } 536 } 537 mutex_exit(&sp->shm_mlock); 538 539 /* 540 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 541 * for the entire life of the segment. For example platforms 542 * that do not support Dynamic Reconfiguration. 543 */ 544 hat_flags = HAT_LOAD_SHARE; 545 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL)) 546 hat_flags |= HAT_LOAD_LOCK; 547 548 /* 549 * Load translations one lare page at a time 550 * to make sure we don't create mappings bigger than 551 * segment's size code in case underlying pages 552 * are shared with segvn's segment that uses bigger 553 * size code than we do. 554 */ 555 pgsz = page_get_pagesize(seg->s_szc); 556 pgcnt = page_get_pagecnt(seg->s_szc); 557 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) { 558 sz = MIN(pgsz, ptob(npages - pidx)); 559 hat_memload_array(seg->s_as->a_hat, a, sz, 560 &ppa[pidx], sptd->spt_prot, hat_flags); 561 } 562 563 /* 564 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 565 * we will leave the pages locked SE_SHARED for the life 566 * of the ISM segment. This will prevent any calls to 567 * hat_pageunload() on this ISM segment for those platforms. 568 */ 569 if (!(hat_flags & HAT_LOAD_LOCK)) { 570 /* 571 * On platforms that support HAT_DYNAMIC_ISM_UNMAP, 572 * we no longer need to hold the SE_SHARED lock on the pages, 573 * since L_PAGELOCK and F_SOFTLOCK calls will grab the 574 * SE_SHARED lock on the pages as necessary. 575 */ 576 for (i = 0; i < npages; i++) 577 page_unlock(ppa[i]); 578 } 579 sptd->spt_pcachecnt = 0; 580 kmem_free(ppa, ((sizeof (page_t *)) * npages)); 581 sptd->spt_realsize = ptob(npages); 582 atomic_add_long(&spt_used, npages); 583 sptcargs->seg_spt = seg; 584 return (0); 585 586 out4: 587 seg->s_data = NULL; 588 kmem_free(vp, sizeof (*vp)); 589 out3: 590 mutex_destroy(&sptd->spt_lock); 591 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 592 kmem_free(ppa, (sizeof (*ppa) * npages)); 593 out2: 594 kmem_free(sptd, sizeof (*sptd)); 595 out1: 596 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 597 anon_swap_restore(npages); 598 return (err); 599 } 600 601 /*ARGSUSED*/ 602 void 603 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len) 604 { 605 struct page *pp; 606 struct spt_data *sptd = (struct spt_data *)seg->s_data; 607 pgcnt_t npages; 608 ulong_t anon_idx; 609 struct anon_map *amp; 610 struct anon *ap; 611 struct vnode *vp; 612 u_offset_t off; 613 uint_t hat_flags; 614 int root = 0; 615 pgcnt_t pgs, curnpgs = 0; 616 page_t *rootpp; 617 rctl_qty_t unlocked_bytes = 0; 618 kproject_t *proj; 619 kshmid_t *sp; 620 621 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 622 623 len = P2ROUNDUP(len, PAGESIZE); 624 625 npages = btop(len); 626 627 hat_flags = HAT_UNLOAD_UNLOCK; 628 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) || 629 (sptd->spt_flags & SHM_PAGEABLE)) { 630 hat_flags = HAT_UNLOAD; 631 } 632 633 hat_unload(seg->s_as->a_hat, addr, len, hat_flags); 634 635 amp = sptd->spt_amp; 636 if (sptd->spt_flags & SHM_PAGEABLE) 637 npages = btop(amp->size); 638 639 ASSERT(amp != NULL); 640 641 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 642 sp = amp->a_sp; 643 proj = sp->shm_perm.ipc_proj; 644 mutex_enter(&sp->shm_mlock); 645 } 646 for (anon_idx = 0; anon_idx < npages; anon_idx++) { 647 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 648 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) { 649 panic("segspt_free_pages: null app"); 650 /*NOTREACHED*/ 651 } 652 } else { 653 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx)) 654 == NULL) 655 continue; 656 } 657 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0); 658 swap_xlate(ap, &vp, &off); 659 660 /* 661 * If this platform supports HAT_DYNAMIC_ISM_UNMAP, 662 * the pages won't be having SE_SHARED lock at this 663 * point. 664 * 665 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 666 * the pages are still held SE_SHARED locked from the 667 * original segspt_create() 668 * 669 * Our goal is to get SE_EXCL lock on each page, remove 670 * permanent lock on it and invalidate the page. 671 */ 672 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 673 if (hat_flags == HAT_UNLOAD) 674 pp = page_lookup(vp, off, SE_EXCL); 675 else { 676 if ((pp = page_find(vp, off)) == NULL) { 677 panic("segspt_free_pages: " 678 "page not locked"); 679 /*NOTREACHED*/ 680 } 681 if (!page_tryupgrade(pp)) { 682 page_unlock(pp); 683 pp = page_lookup(vp, off, SE_EXCL); 684 } 685 } 686 if (pp == NULL) { 687 panic("segspt_free_pages: " 688 "page not in the system"); 689 /*NOTREACHED*/ 690 } 691 ASSERT(pp->p_lckcnt > 0); 692 page_pp_unlock(pp, 0, 1); 693 if (pp->p_lckcnt == 0) 694 unlocked_bytes += PAGESIZE; 695 } else { 696 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL) 697 continue; 698 } 699 /* 700 * It's logical to invalidate the pages here as in most cases 701 * these were created by segspt. 702 */ 703 if (pp->p_szc != 0) { 704 /* 705 * For DISM swap is released in shm_rm_amp. 706 */ 707 if ((sptd->spt_flags & SHM_PAGEABLE) == 0 && 708 ap->an_pvp != NULL) { 709 panic("segspt_free_pages: pvp non NULL"); 710 /*NOTREACHED*/ 711 } 712 if (root == 0) { 713 ASSERT(curnpgs == 0); 714 root = 1; 715 rootpp = pp; 716 pgs = curnpgs = page_get_pagecnt(pp->p_szc); 717 ASSERT(pgs > 1); 718 ASSERT(IS_P2ALIGNED(pgs, pgs)); 719 ASSERT(!(page_pptonum(pp) & (pgs - 1))); 720 curnpgs--; 721 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) { 722 ASSERT(curnpgs == 1); 723 ASSERT(page_pptonum(pp) == 724 page_pptonum(rootpp) + (pgs - 1)); 725 page_destroy_pages(rootpp); 726 root = 0; 727 curnpgs = 0; 728 } else { 729 ASSERT(curnpgs > 1); 730 ASSERT(page_pptonum(pp) == 731 page_pptonum(rootpp) + (pgs - curnpgs)); 732 curnpgs--; 733 } 734 } else { 735 if (root != 0 || curnpgs != 0) { 736 panic("segspt_free_pages: bad large page"); 737 /*NOTREACHED*/ 738 } 739 /*LINTED: constant in conditional context */ 740 VN_DISPOSE(pp, B_INVAL, 0, kcred); 741 } 742 } 743 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 744 if (unlocked_bytes > 0) 745 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0); 746 mutex_exit(&sp->shm_mlock); 747 } 748 if (root != 0 || curnpgs != 0) { 749 panic("segspt_free_pages: bad large page"); 750 /*NOTREACHED*/ 751 } 752 753 /* 754 * mark that pages have been released 755 */ 756 sptd->spt_realsize = 0; 757 758 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 759 atomic_add_long(&spt_used, -npages); 760 anon_swap_restore(npages); 761 } 762 } 763 764 /* 765 * Get memory allocation policy info for specified address in given segment 766 */ 767 static lgrp_mem_policy_info_t * 768 segspt_getpolicy(struct seg *seg, caddr_t addr) 769 { 770 struct anon_map *amp; 771 ulong_t anon_index; 772 lgrp_mem_policy_info_t *policy_info; 773 struct spt_data *spt_data; 774 775 ASSERT(seg != NULL); 776 777 /* 778 * Get anon_map from segspt 779 * 780 * Assume that no lock needs to be held on anon_map, since 781 * it should be protected by its reference count which must be 782 * nonzero for an existing segment 783 * Need to grab readers lock on policy tree though 784 */ 785 spt_data = (struct spt_data *)seg->s_data; 786 if (spt_data == NULL) 787 return (NULL); 788 amp = spt_data->spt_amp; 789 ASSERT(amp->refcnt != 0); 790 791 /* 792 * Get policy info 793 * 794 * Assume starting anon index of 0 795 */ 796 anon_index = seg_page(seg, addr); 797 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 798 799 return (policy_info); 800 } 801 802 /* 803 * DISM only. 804 * Return locked pages over a given range. 805 * 806 * We will cache all DISM locked pages and save the pplist for the 807 * entire segment in the ppa field of the underlying DISM segment structure. 808 * Later, during a call to segspt_reclaim() we will use this ppa array 809 * to page_unlock() all of the pages and then we will free this ppa list. 810 */ 811 /*ARGSUSED*/ 812 static int 813 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len, 814 struct page ***ppp, enum lock_type type, enum seg_rw rw) 815 { 816 struct shm_data *shmd = (struct shm_data *)seg->s_data; 817 struct seg *sptseg = shmd->shm_sptseg; 818 struct spt_data *sptd = sptseg->s_data; 819 pgcnt_t pg_idx, npages, tot_npages, npgs; 820 struct page **pplist, **pl, **ppa, *pp; 821 struct anon_map *amp; 822 spgcnt_t an_idx; 823 int ret = ENOTSUP; 824 uint_t pl_built = 0; 825 struct anon *ap; 826 struct vnode *vp; 827 u_offset_t off; 828 pgcnt_t claim_availrmem = 0; 829 uint_t szc; 830 831 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 832 833 /* 834 * We want to lock/unlock the entire ISM segment. Therefore, 835 * we will be using the underlying sptseg and it's base address 836 * and length for the caching arguments. 837 */ 838 ASSERT(sptseg); 839 ASSERT(sptd); 840 841 pg_idx = seg_page(seg, addr); 842 npages = btopr(len); 843 844 /* 845 * check if the request is larger than number of pages covered 846 * by amp 847 */ 848 if (pg_idx + npages > btopr(sptd->spt_amp->size)) { 849 *ppp = NULL; 850 return (ENOTSUP); 851 } 852 853 if (type == L_PAGEUNLOCK) { 854 ASSERT(sptd->spt_ppa != NULL); 855 856 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 857 sptd->spt_ppa, sptd->spt_prot, segspt_reclaim); 858 859 /* 860 * If someone is blocked while unmapping, we purge 861 * segment page cache and thus reclaim pplist synchronously 862 * without waiting for seg_pasync_thread. This speeds up 863 * unmapping in cases where munmap(2) is called, while 864 * raw async i/o is still in progress or where a thread 865 * exits on data fault in a multithreaded application. 866 */ 867 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 868 segspt_purge(seg); 869 } 870 return (0); 871 } else if (type == L_PAGERECLAIM) { 872 ASSERT(sptd->spt_ppa != NULL); 873 (void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size, 874 sptd->spt_ppa, sptd->spt_prot); 875 return (0); 876 } 877 878 if (sptd->spt_flags & DISM_PPA_CHANGED) { 879 segspt_purge(seg); 880 /* 881 * for DISM ppa needs to be rebuild since 882 * number of locked pages could be changed 883 */ 884 *ppp = NULL; 885 return (ENOTSUP); 886 } 887 888 /* 889 * First try to find pages in segment page cache, without 890 * holding the segment lock. 891 */ 892 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 893 sptd->spt_prot); 894 if (pplist != NULL) { 895 ASSERT(sptd->spt_ppa != NULL); 896 ASSERT(sptd->spt_ppa == pplist); 897 ppa = sptd->spt_ppa; 898 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 899 if (ppa[an_idx] == NULL) { 900 seg_pinactive(seg, seg->s_base, 901 sptd->spt_amp->size, ppa, 902 sptd->spt_prot, segspt_reclaim); 903 *ppp = NULL; 904 return (ENOTSUP); 905 } 906 if ((szc = ppa[an_idx]->p_szc) != 0) { 907 npgs = page_get_pagecnt(szc); 908 an_idx = P2ROUNDUP(an_idx + 1, npgs); 909 } else { 910 an_idx++; 911 } 912 } 913 /* 914 * Since we cache the entire DISM segment, we want to 915 * set ppp to point to the first slot that corresponds 916 * to the requested addr, i.e. pg_idx. 917 */ 918 *ppp = &(sptd->spt_ppa[pg_idx]); 919 return (0); 920 } 921 922 /* The L_PAGELOCK case... */ 923 mutex_enter(&sptd->spt_lock); 924 /* 925 * try to find pages in segment page cache with mutex 926 */ 927 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 928 sptd->spt_prot); 929 if (pplist != NULL) { 930 ASSERT(sptd->spt_ppa != NULL); 931 ASSERT(sptd->spt_ppa == pplist); 932 ppa = sptd->spt_ppa; 933 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 934 if (ppa[an_idx] == NULL) { 935 mutex_exit(&sptd->spt_lock); 936 seg_pinactive(seg, seg->s_base, 937 sptd->spt_amp->size, ppa, 938 sptd->spt_prot, segspt_reclaim); 939 *ppp = NULL; 940 return (ENOTSUP); 941 } 942 if ((szc = ppa[an_idx]->p_szc) != 0) { 943 npgs = page_get_pagecnt(szc); 944 an_idx = P2ROUNDUP(an_idx + 1, npgs); 945 } else { 946 an_idx++; 947 } 948 } 949 /* 950 * Since we cache the entire DISM segment, we want to 951 * set ppp to point to the first slot that corresponds 952 * to the requested addr, i.e. pg_idx. 953 */ 954 mutex_exit(&sptd->spt_lock); 955 *ppp = &(sptd->spt_ppa[pg_idx]); 956 return (0); 957 } 958 if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) == 959 SEGP_FAIL) { 960 mutex_exit(&sptd->spt_lock); 961 *ppp = NULL; 962 return (ENOTSUP); 963 } 964 965 /* 966 * No need to worry about protections because DISM pages are always rw. 967 */ 968 pl = pplist = NULL; 969 amp = sptd->spt_amp; 970 971 /* 972 * Do we need to build the ppa array? 973 */ 974 if (sptd->spt_ppa == NULL) { 975 pgcnt_t lpg_cnt = 0; 976 977 pl_built = 1; 978 tot_npages = btopr(sptd->spt_amp->size); 979 980 ASSERT(sptd->spt_pcachecnt == 0); 981 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP); 982 pl = pplist; 983 984 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 985 for (an_idx = 0; an_idx < tot_npages; ) { 986 ap = anon_get_ptr(amp->ahp, an_idx); 987 /* 988 * Cache only mlocked pages. For large pages 989 * if one (constituent) page is mlocked 990 * all pages for that large page 991 * are cached also. This is for quick 992 * lookups of ppa array; 993 */ 994 if ((ap != NULL) && (lpg_cnt != 0 || 995 (sptd->spt_ppa_lckcnt[an_idx] != 0))) { 996 997 swap_xlate(ap, &vp, &off); 998 pp = page_lookup(vp, off, SE_SHARED); 999 ASSERT(pp != NULL); 1000 if (lpg_cnt == 0) { 1001 lpg_cnt++; 1002 /* 1003 * For a small page, we are done -- 1004 * lpg_count is reset to 0 below. 1005 * 1006 * For a large page, we are guaranteed 1007 * to find the anon structures of all 1008 * constituent pages and a non-zero 1009 * lpg_cnt ensures that we don't test 1010 * for mlock for these. We are done 1011 * when lpg_count reaches (npgs + 1). 1012 * If we are not the first constituent 1013 * page, restart at the first one. 1014 */ 1015 npgs = page_get_pagecnt(pp->p_szc); 1016 if (!IS_P2ALIGNED(an_idx, npgs)) { 1017 an_idx = P2ALIGN(an_idx, npgs); 1018 page_unlock(pp); 1019 continue; 1020 } 1021 } 1022 if (++lpg_cnt > npgs) 1023 lpg_cnt = 0; 1024 1025 /* 1026 * availrmem is decremented only 1027 * for unlocked pages 1028 */ 1029 if (sptd->spt_ppa_lckcnt[an_idx] == 0) 1030 claim_availrmem++; 1031 pplist[an_idx] = pp; 1032 } 1033 an_idx++; 1034 } 1035 ANON_LOCK_EXIT(&->a_rwlock); 1036 1037 mutex_enter(&freemem_lock); 1038 if (availrmem < tune.t_minarmem + claim_availrmem) { 1039 mutex_exit(&freemem_lock); 1040 ret = FC_MAKE_ERR(ENOMEM); 1041 claim_availrmem = 0; 1042 goto insert_fail; 1043 } else { 1044 availrmem -= claim_availrmem; 1045 } 1046 mutex_exit(&freemem_lock); 1047 1048 sptd->spt_ppa = pl; 1049 } else { 1050 /* 1051 * We already have a valid ppa[]. 1052 */ 1053 pl = sptd->spt_ppa; 1054 } 1055 1056 ASSERT(pl != NULL); 1057 1058 ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size, 1059 pl, sptd->spt_prot, SEGP_FORCE_WIRED | SEGP_ASYNC_FLUSH, 1060 segspt_reclaim); 1061 if (ret == SEGP_FAIL) { 1062 /* 1063 * seg_pinsert failed. We return 1064 * ENOTSUP, so that the as_pagelock() code will 1065 * then try the slower F_SOFTLOCK path. 1066 */ 1067 if (pl_built) { 1068 /* 1069 * No one else has referenced the ppa[]. 1070 * We created it and we need to destroy it. 1071 */ 1072 sptd->spt_ppa = NULL; 1073 } 1074 ret = ENOTSUP; 1075 goto insert_fail; 1076 } 1077 1078 /* 1079 * In either case, we increment softlockcnt on the 'real' segment. 1080 */ 1081 sptd->spt_pcachecnt++; 1082 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 1083 1084 ppa = sptd->spt_ppa; 1085 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 1086 if (ppa[an_idx] == NULL) { 1087 mutex_exit(&sptd->spt_lock); 1088 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 1089 pl, sptd->spt_prot, segspt_reclaim); 1090 *ppp = NULL; 1091 return (ENOTSUP); 1092 } 1093 if ((szc = ppa[an_idx]->p_szc) != 0) { 1094 npgs = page_get_pagecnt(szc); 1095 an_idx = P2ROUNDUP(an_idx + 1, npgs); 1096 } else { 1097 an_idx++; 1098 } 1099 } 1100 /* 1101 * We can now drop the sptd->spt_lock since the ppa[] 1102 * exists and he have incremented pacachecnt. 1103 */ 1104 mutex_exit(&sptd->spt_lock); 1105 1106 /* 1107 * Since we cache the entire segment, we want to 1108 * set ppp to point to the first slot that corresponds 1109 * to the requested addr, i.e. pg_idx. 1110 */ 1111 *ppp = &(sptd->spt_ppa[pg_idx]); 1112 return (ret); 1113 1114 insert_fail: 1115 /* 1116 * We will only reach this code if we tried and failed. 1117 * 1118 * And we can drop the lock on the dummy seg, once we've failed 1119 * to set up a new ppa[]. 1120 */ 1121 mutex_exit(&sptd->spt_lock); 1122 1123 if (pl_built) { 1124 mutex_enter(&freemem_lock); 1125 availrmem += claim_availrmem; 1126 mutex_exit(&freemem_lock); 1127 1128 /* 1129 * We created pl and we need to destroy it. 1130 */ 1131 pplist = pl; 1132 for (an_idx = 0; an_idx < tot_npages; an_idx++) { 1133 if (pplist[an_idx] != NULL) 1134 page_unlock(pplist[an_idx]); 1135 } 1136 kmem_free(pl, sizeof (page_t *) * tot_npages); 1137 } 1138 1139 if (shmd->shm_softlockcnt <= 0) { 1140 if (AS_ISUNMAPWAIT(seg->s_as)) { 1141 mutex_enter(&seg->s_as->a_contents); 1142 if (AS_ISUNMAPWAIT(seg->s_as)) { 1143 AS_CLRUNMAPWAIT(seg->s_as); 1144 cv_broadcast(&seg->s_as->a_cv); 1145 } 1146 mutex_exit(&seg->s_as->a_contents); 1147 } 1148 } 1149 *ppp = NULL; 1150 return (ret); 1151 } 1152 1153 1154 1155 /* 1156 * return locked pages over a given range. 1157 * 1158 * We will cache the entire ISM segment and save the pplist for the 1159 * entire segment in the ppa field of the underlying ISM segment structure. 1160 * Later, during a call to segspt_reclaim() we will use this ppa array 1161 * to page_unlock() all of the pages and then we will free this ppa list. 1162 */ 1163 /*ARGSUSED*/ 1164 static int 1165 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len, 1166 struct page ***ppp, enum lock_type type, enum seg_rw rw) 1167 { 1168 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1169 struct seg *sptseg = shmd->shm_sptseg; 1170 struct spt_data *sptd = sptseg->s_data; 1171 pgcnt_t np, page_index, npages; 1172 caddr_t a, spt_base; 1173 struct page **pplist, **pl, *pp; 1174 struct anon_map *amp; 1175 ulong_t anon_index; 1176 int ret = ENOTSUP; 1177 uint_t pl_built = 0; 1178 struct anon *ap; 1179 struct vnode *vp; 1180 u_offset_t off; 1181 1182 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1183 1184 /* 1185 * We want to lock/unlock the entire ISM segment. Therefore, 1186 * we will be using the underlying sptseg and it's base address 1187 * and length for the caching arguments. 1188 */ 1189 ASSERT(sptseg); 1190 ASSERT(sptd); 1191 1192 if (sptd->spt_flags & SHM_PAGEABLE) { 1193 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw)); 1194 } 1195 1196 page_index = seg_page(seg, addr); 1197 npages = btopr(len); 1198 1199 /* 1200 * check if the request is larger than number of pages covered 1201 * by amp 1202 */ 1203 if (page_index + npages > btopr(sptd->spt_amp->size)) { 1204 *ppp = NULL; 1205 return (ENOTSUP); 1206 } 1207 1208 if (type == L_PAGEUNLOCK) { 1209 1210 ASSERT(sptd->spt_ppa != NULL); 1211 1212 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 1213 sptd->spt_ppa, sptd->spt_prot, segspt_reclaim); 1214 1215 /* 1216 * If someone is blocked while unmapping, we purge 1217 * segment page cache and thus reclaim pplist synchronously 1218 * without waiting for seg_pasync_thread. This speeds up 1219 * unmapping in cases where munmap(2) is called, while 1220 * raw async i/o is still in progress or where a thread 1221 * exits on data fault in a multithreaded application. 1222 */ 1223 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 1224 segspt_purge(seg); 1225 } 1226 return (0); 1227 } else if (type == L_PAGERECLAIM) { 1228 ASSERT(sptd->spt_ppa != NULL); 1229 1230 (void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size, 1231 sptd->spt_ppa, sptd->spt_prot); 1232 return (0); 1233 } 1234 1235 /* 1236 * First try to find pages in segment page cache, without 1237 * holding the segment lock. 1238 */ 1239 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 1240 sptd->spt_prot); 1241 if (pplist != NULL) { 1242 ASSERT(sptd->spt_ppa == pplist); 1243 ASSERT(sptd->spt_ppa[page_index]); 1244 /* 1245 * Since we cache the entire ISM segment, we want to 1246 * set ppp to point to the first slot that corresponds 1247 * to the requested addr, i.e. page_index. 1248 */ 1249 *ppp = &(sptd->spt_ppa[page_index]); 1250 return (0); 1251 } 1252 1253 /* The L_PAGELOCK case... */ 1254 mutex_enter(&sptd->spt_lock); 1255 1256 /* 1257 * try to find pages in segment page cache 1258 */ 1259 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 1260 sptd->spt_prot); 1261 if (pplist != NULL) { 1262 ASSERT(sptd->spt_ppa == pplist); 1263 /* 1264 * Since we cache the entire segment, we want to 1265 * set ppp to point to the first slot that corresponds 1266 * to the requested addr, i.e. page_index. 1267 */ 1268 mutex_exit(&sptd->spt_lock); 1269 *ppp = &(sptd->spt_ppa[page_index]); 1270 return (0); 1271 } 1272 1273 if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) == 1274 SEGP_FAIL) { 1275 mutex_exit(&sptd->spt_lock); 1276 *ppp = NULL; 1277 return (ENOTSUP); 1278 } 1279 1280 /* 1281 * No need to worry about protections because ISM pages 1282 * are always rw. 1283 */ 1284 pl = pplist = NULL; 1285 1286 /* 1287 * Do we need to build the ppa array? 1288 */ 1289 if (sptd->spt_ppa == NULL) { 1290 ASSERT(sptd->spt_ppa == pplist); 1291 1292 spt_base = sptseg->s_base; 1293 pl_built = 1; 1294 1295 /* 1296 * availrmem is decremented once during anon_swap_adjust() 1297 * and is incremented during the anon_unresv(), which is 1298 * called from shm_rm_amp() when the segment is destroyed. 1299 */ 1300 amp = sptd->spt_amp; 1301 ASSERT(amp != NULL); 1302 1303 /* pcachecnt is protected by sptd->spt_lock */ 1304 ASSERT(sptd->spt_pcachecnt == 0); 1305 pplist = kmem_zalloc(sizeof (page_t *) 1306 * btopr(sptd->spt_amp->size), KM_SLEEP); 1307 pl = pplist; 1308 1309 anon_index = seg_page(sptseg, spt_base); 1310 1311 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1312 for (a = spt_base; a < (spt_base + sptd->spt_amp->size); 1313 a += PAGESIZE, anon_index++, pplist++) { 1314 ap = anon_get_ptr(amp->ahp, anon_index); 1315 ASSERT(ap != NULL); 1316 swap_xlate(ap, &vp, &off); 1317 pp = page_lookup(vp, off, SE_SHARED); 1318 ASSERT(pp != NULL); 1319 *pplist = pp; 1320 } 1321 ANON_LOCK_EXIT(&->a_rwlock); 1322 1323 if (a < (spt_base + sptd->spt_amp->size)) { 1324 ret = ENOTSUP; 1325 goto insert_fail; 1326 } 1327 sptd->spt_ppa = pl; 1328 } else { 1329 /* 1330 * We already have a valid ppa[]. 1331 */ 1332 pl = sptd->spt_ppa; 1333 } 1334 1335 ASSERT(pl != NULL); 1336 1337 ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size, 1338 pl, sptd->spt_prot, SEGP_FORCE_WIRED, segspt_reclaim); 1339 if (ret == SEGP_FAIL) { 1340 /* 1341 * seg_pinsert failed. We return 1342 * ENOTSUP, so that the as_pagelock() code will 1343 * then try the slower F_SOFTLOCK path. 1344 */ 1345 if (pl_built) { 1346 /* 1347 * No one else has referenced the ppa[]. 1348 * We created it and we need to destroy it. 1349 */ 1350 sptd->spt_ppa = NULL; 1351 } 1352 ret = ENOTSUP; 1353 goto insert_fail; 1354 } 1355 1356 /* 1357 * In either case, we increment softlockcnt on the 'real' segment. 1358 */ 1359 sptd->spt_pcachecnt++; 1360 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 1361 1362 /* 1363 * We can now drop the sptd->spt_lock since the ppa[] 1364 * exists and he have incremented pacachecnt. 1365 */ 1366 mutex_exit(&sptd->spt_lock); 1367 1368 /* 1369 * Since we cache the entire segment, we want to 1370 * set ppp to point to the first slot that corresponds 1371 * to the requested addr, i.e. page_index. 1372 */ 1373 *ppp = &(sptd->spt_ppa[page_index]); 1374 return (ret); 1375 1376 insert_fail: 1377 /* 1378 * We will only reach this code if we tried and failed. 1379 * 1380 * And we can drop the lock on the dummy seg, once we've failed 1381 * to set up a new ppa[]. 1382 */ 1383 mutex_exit(&sptd->spt_lock); 1384 1385 if (pl_built) { 1386 /* 1387 * We created pl and we need to destroy it. 1388 */ 1389 pplist = pl; 1390 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT); 1391 while (np) { 1392 page_unlock(*pplist); 1393 np--; 1394 pplist++; 1395 } 1396 kmem_free(pl, sizeof (page_t *) * 1397 btopr(sptd->spt_amp->size)); 1398 } 1399 if (shmd->shm_softlockcnt <= 0) { 1400 if (AS_ISUNMAPWAIT(seg->s_as)) { 1401 mutex_enter(&seg->s_as->a_contents); 1402 if (AS_ISUNMAPWAIT(seg->s_as)) { 1403 AS_CLRUNMAPWAIT(seg->s_as); 1404 cv_broadcast(&seg->s_as->a_cv); 1405 } 1406 mutex_exit(&seg->s_as->a_contents); 1407 } 1408 } 1409 *ppp = NULL; 1410 return (ret); 1411 } 1412 1413 /* 1414 * purge any cached pages in the I/O page cache 1415 */ 1416 static void 1417 segspt_purge(struct seg *seg) 1418 { 1419 seg_ppurge(seg); 1420 } 1421 1422 static int 1423 segspt_reclaim(struct seg *seg, caddr_t addr, size_t len, struct page **pplist, 1424 enum seg_rw rw) 1425 { 1426 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1427 struct seg *sptseg; 1428 struct spt_data *sptd; 1429 pgcnt_t npages, i, free_availrmem = 0; 1430 int done = 0; 1431 1432 #ifdef lint 1433 addr = addr; 1434 #endif 1435 sptseg = shmd->shm_sptseg; 1436 sptd = sptseg->s_data; 1437 npages = (len >> PAGESHIFT); 1438 ASSERT(npages); 1439 ASSERT(sptd->spt_pcachecnt != 0); 1440 ASSERT(sptd->spt_ppa == pplist); 1441 ASSERT(npages == btopr(sptd->spt_amp->size)); 1442 /* 1443 * Acquire the lock on the dummy seg and destroy the 1444 * ppa array IF this is the last pcachecnt. 1445 */ 1446 mutex_enter(&sptd->spt_lock); 1447 if (--sptd->spt_pcachecnt == 0) { 1448 for (i = 0; i < npages; i++) { 1449 if (pplist[i] == NULL) { 1450 continue; 1451 } 1452 if (rw == S_WRITE) { 1453 hat_setrefmod(pplist[i]); 1454 } else { 1455 hat_setref(pplist[i]); 1456 } 1457 if ((sptd->spt_flags & SHM_PAGEABLE) && 1458 (sptd->spt_ppa_lckcnt[i] == 0)) 1459 free_availrmem++; 1460 page_unlock(pplist[i]); 1461 } 1462 if (sptd->spt_flags & SHM_PAGEABLE) { 1463 mutex_enter(&freemem_lock); 1464 availrmem += free_availrmem; 1465 mutex_exit(&freemem_lock); 1466 } 1467 /* 1468 * Since we want to cach/uncache the entire ISM segment, 1469 * we will track the pplist in a segspt specific field 1470 * ppa, that is initialized at the time we add an entry to 1471 * the cache. 1472 */ 1473 ASSERT(sptd->spt_pcachecnt == 0); 1474 kmem_free(pplist, sizeof (page_t *) * npages); 1475 sptd->spt_ppa = NULL; 1476 sptd->spt_flags &= ~DISM_PPA_CHANGED; 1477 done = 1; 1478 } 1479 mutex_exit(&sptd->spt_lock); 1480 /* 1481 * Now decrement softlockcnt. 1482 */ 1483 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1); 1484 1485 if (shmd->shm_softlockcnt <= 0) { 1486 if (AS_ISUNMAPWAIT(seg->s_as)) { 1487 mutex_enter(&seg->s_as->a_contents); 1488 if (AS_ISUNMAPWAIT(seg->s_as)) { 1489 AS_CLRUNMAPWAIT(seg->s_as); 1490 cv_broadcast(&seg->s_as->a_cv); 1491 } 1492 mutex_exit(&seg->s_as->a_contents); 1493 } 1494 } 1495 return (done); 1496 } 1497 1498 /* 1499 * Do a F_SOFTUNLOCK call over the range requested. 1500 * The range must have already been F_SOFTLOCK'ed. 1501 * 1502 * The calls to acquire and release the anon map lock mutex were 1503 * removed in order to avoid a deadly embrace during a DR 1504 * memory delete operation. (Eg. DR blocks while waiting for a 1505 * exclusive lock on a page that is being used for kaio; the 1506 * thread that will complete the kaio and call segspt_softunlock 1507 * blocks on the anon map lock; another thread holding the anon 1508 * map lock blocks on another page lock via the segspt_shmfault 1509 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.) 1510 * 1511 * The appropriateness of the removal is based upon the following: 1512 * 1. If we are holding a segment's reader lock and the page is held 1513 * shared, then the corresponding element in anonmap which points to 1514 * anon struct cannot change and there is no need to acquire the 1515 * anonymous map lock. 1516 * 2. Threads in segspt_softunlock have a reader lock on the segment 1517 * and already have the shared page lock, so we are guaranteed that 1518 * the anon map slot cannot change and therefore can call anon_get_ptr() 1519 * without grabbing the anonymous map lock. 1520 * 3. Threads that softlock a shared page break copy-on-write, even if 1521 * its a read. Thus cow faults can be ignored with respect to soft 1522 * unlocking, since the breaking of cow means that the anon slot(s) will 1523 * not be shared. 1524 */ 1525 static void 1526 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr, 1527 size_t len, enum seg_rw rw) 1528 { 1529 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1530 struct seg *sptseg; 1531 struct spt_data *sptd; 1532 page_t *pp; 1533 caddr_t adr; 1534 struct vnode *vp; 1535 u_offset_t offset; 1536 ulong_t anon_index; 1537 struct anon_map *amp; /* XXX - for locknest */ 1538 struct anon *ap = NULL; 1539 pgcnt_t npages; 1540 1541 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1542 1543 sptseg = shmd->shm_sptseg; 1544 sptd = sptseg->s_data; 1545 1546 /* 1547 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 1548 * and therefore their pages are SE_SHARED locked 1549 * for the entire life of the segment. 1550 */ 1551 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) && 1552 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) { 1553 goto softlock_decrement; 1554 } 1555 1556 /* 1557 * Any thread is free to do a page_find and 1558 * page_unlock() on the pages within this seg. 1559 * 1560 * We are already holding the as->a_lock on the user's 1561 * real segment, but we need to hold the a_lock on the 1562 * underlying dummy as. This is mostly to satisfy the 1563 * underlying HAT layer. 1564 */ 1565 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1566 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len); 1567 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1568 1569 amp = sptd->spt_amp; 1570 ASSERT(amp != NULL); 1571 anon_index = seg_page(sptseg, sptseg_addr); 1572 1573 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) { 1574 ap = anon_get_ptr(amp->ahp, anon_index++); 1575 ASSERT(ap != NULL); 1576 swap_xlate(ap, &vp, &offset); 1577 1578 /* 1579 * Use page_find() instead of page_lookup() to 1580 * find the page since we know that it has a 1581 * "shared" lock. 1582 */ 1583 pp = page_find(vp, offset); 1584 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1)); 1585 if (pp == NULL) { 1586 panic("segspt_softunlock: " 1587 "addr %p, ap %p, vp %p, off %llx", 1588 (void *)adr, (void *)ap, (void *)vp, offset); 1589 /*NOTREACHED*/ 1590 } 1591 1592 if (rw == S_WRITE) { 1593 hat_setrefmod(pp); 1594 } else if (rw != S_OTHER) { 1595 hat_setref(pp); 1596 } 1597 page_unlock(pp); 1598 } 1599 1600 softlock_decrement: 1601 npages = btopr(len); 1602 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages); 1603 if (shmd->shm_softlockcnt == 0) { 1604 /* 1605 * All SOFTLOCKS are gone. Wakeup any waiting 1606 * unmappers so they can try again to unmap. 1607 * Check for waiters first without the mutex 1608 * held so we don't always grab the mutex on 1609 * softunlocks. 1610 */ 1611 if (AS_ISUNMAPWAIT(seg->s_as)) { 1612 mutex_enter(&seg->s_as->a_contents); 1613 if (AS_ISUNMAPWAIT(seg->s_as)) { 1614 AS_CLRUNMAPWAIT(seg->s_as); 1615 cv_broadcast(&seg->s_as->a_cv); 1616 } 1617 mutex_exit(&seg->s_as->a_contents); 1618 } 1619 } 1620 } 1621 1622 int 1623 segspt_shmattach(struct seg *seg, caddr_t *argsp) 1624 { 1625 struct shm_data *shmd_arg = (struct shm_data *)argsp; 1626 struct shm_data *shmd; 1627 struct anon_map *shm_amp = shmd_arg->shm_amp; 1628 struct spt_data *sptd; 1629 int error = 0; 1630 1631 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1632 1633 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP); 1634 if (shmd == NULL) 1635 return (ENOMEM); 1636 1637 shmd->shm_sptas = shmd_arg->shm_sptas; 1638 shmd->shm_amp = shm_amp; 1639 shmd->shm_sptseg = shmd_arg->shm_sptseg; 1640 1641 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0, 1642 NULL, 0, seg->s_size); 1643 1644 seg->s_data = (void *)shmd; 1645 seg->s_ops = &segspt_shmops; 1646 seg->s_szc = shmd->shm_sptseg->s_szc; 1647 sptd = shmd->shm_sptseg->s_data; 1648 1649 if (sptd->spt_flags & SHM_PAGEABLE) { 1650 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size), 1651 KM_NOSLEEP)) == NULL) { 1652 seg->s_data = (void *)NULL; 1653 kmem_free(shmd, (sizeof (*shmd))); 1654 return (ENOMEM); 1655 } 1656 shmd->shm_lckpgs = 0; 1657 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 1658 if ((error = hat_share(seg->s_as->a_hat, seg->s_base, 1659 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1660 seg->s_size, seg->s_szc)) != 0) { 1661 kmem_free(shmd->shm_vpage, 1662 btopr(shm_amp->size)); 1663 } 1664 } 1665 } else { 1666 error = hat_share(seg->s_as->a_hat, seg->s_base, 1667 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1668 seg->s_size, seg->s_szc); 1669 } 1670 if (error) { 1671 seg->s_szc = 0; 1672 seg->s_data = (void *)NULL; 1673 kmem_free(shmd, (sizeof (*shmd))); 1674 } else { 1675 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1676 shm_amp->refcnt++; 1677 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1678 } 1679 return (error); 1680 } 1681 1682 int 1683 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize) 1684 { 1685 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1686 int reclaim = 1; 1687 1688 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1689 retry: 1690 if (shmd->shm_softlockcnt > 0) { 1691 if (reclaim == 1) { 1692 segspt_purge(seg); 1693 reclaim = 0; 1694 goto retry; 1695 } 1696 return (EAGAIN); 1697 } 1698 1699 if (ssize != seg->s_size) { 1700 #ifdef DEBUG 1701 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n", 1702 ssize, seg->s_size); 1703 #endif 1704 return (EINVAL); 1705 } 1706 1707 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK, 1708 NULL, 0); 1709 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc); 1710 1711 seg_free(seg); 1712 1713 return (0); 1714 } 1715 1716 void 1717 segspt_shmfree(struct seg *seg) 1718 { 1719 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1720 struct anon_map *shm_amp = shmd->shm_amp; 1721 1722 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1723 1724 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0, 1725 MC_UNLOCK, NULL, 0); 1726 1727 /* 1728 * Need to increment refcnt when attaching 1729 * and decrement when detaching because of dup(). 1730 */ 1731 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1732 shm_amp->refcnt--; 1733 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1734 1735 if (shmd->shm_vpage) { /* only for DISM */ 1736 kmem_free(shmd->shm_vpage, btopr(shm_amp->size)); 1737 shmd->shm_vpage = NULL; 1738 } 1739 kmem_free(shmd, sizeof (*shmd)); 1740 } 1741 1742 /*ARGSUSED*/ 1743 int 1744 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 1745 { 1746 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1747 1748 /* 1749 * Shared page table is more than shared mapping. 1750 * Individual process sharing page tables can't change prot 1751 * because there is only one set of page tables. 1752 * This will be allowed after private page table is 1753 * supported. 1754 */ 1755 /* need to return correct status error? */ 1756 return (0); 1757 } 1758 1759 1760 faultcode_t 1761 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr, 1762 size_t len, enum fault_type type, enum seg_rw rw) 1763 { 1764 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1765 struct seg *sptseg = shmd->shm_sptseg; 1766 struct as *curspt = shmd->shm_sptas; 1767 struct spt_data *sptd = sptseg->s_data; 1768 pgcnt_t npages; 1769 size_t size; 1770 caddr_t segspt_addr, shm_addr; 1771 page_t **ppa; 1772 int i; 1773 ulong_t an_idx = 0; 1774 int err = 0; 1775 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0); 1776 size_t pgsz; 1777 pgcnt_t pgcnt; 1778 caddr_t a; 1779 pgcnt_t pidx; 1780 1781 #ifdef lint 1782 hat = hat; 1783 #endif 1784 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1785 1786 /* 1787 * Because of the way spt is implemented 1788 * the realsize of the segment does not have to be 1789 * equal to the segment size itself. The segment size is 1790 * often in multiples of a page size larger than PAGESIZE. 1791 * The realsize is rounded up to the nearest PAGESIZE 1792 * based on what the user requested. This is a bit of 1793 * ungliness that is historical but not easily fixed 1794 * without re-designing the higher levels of ISM. 1795 */ 1796 ASSERT(addr >= seg->s_base); 1797 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 1798 return (FC_NOMAP); 1799 /* 1800 * For all of the following cases except F_PROT, we need to 1801 * make any necessary adjustments to addr and len 1802 * and get all of the necessary page_t's into an array called ppa[]. 1803 * 1804 * The code in shmat() forces base addr and len of ISM segment 1805 * to be aligned to largest page size supported. Therefore, 1806 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 1807 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 1808 * in large pagesize chunks, or else we will screw up the HAT 1809 * layer by calling hat_memload_array() with differing page sizes 1810 * over a given virtual range. 1811 */ 1812 pgsz = page_get_pagesize(sptseg->s_szc); 1813 pgcnt = page_get_pagecnt(sptseg->s_szc); 1814 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); 1815 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz); 1816 npages = btopr(size); 1817 1818 /* 1819 * Now we need to convert from addr in segshm to addr in segspt. 1820 */ 1821 an_idx = seg_page(seg, shm_addr); 1822 segspt_addr = sptseg->s_base + ptob(an_idx); 1823 1824 ASSERT((segspt_addr + ptob(npages)) <= 1825 (sptseg->s_base + sptd->spt_realsize)); 1826 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size)); 1827 1828 switch (type) { 1829 1830 case F_SOFTLOCK: 1831 1832 mutex_enter(&freemem_lock); 1833 if (availrmem < tune.t_minarmem + npages) { 1834 mutex_exit(&freemem_lock); 1835 return (FC_MAKE_ERR(ENOMEM)); 1836 } else { 1837 availrmem -= npages; 1838 } 1839 mutex_exit(&freemem_lock); 1840 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 1841 /* 1842 * Fall through to the F_INVAL case to load up the hat layer 1843 * entries with the HAT_LOAD_LOCK flag. 1844 */ 1845 /* FALLTHRU */ 1846 case F_INVAL: 1847 1848 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 1849 return (FC_NOMAP); 1850 1851 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP); 1852 1853 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa); 1854 if (err != 0) { 1855 if (type == F_SOFTLOCK) { 1856 mutex_enter(&freemem_lock); 1857 availrmem += npages; 1858 mutex_exit(&freemem_lock); 1859 atomic_add_long((ulong_t *)( 1860 &(shmd->shm_softlockcnt)), -npages); 1861 } 1862 goto dism_err; 1863 } 1864 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1865 a = segspt_addr; 1866 pidx = 0; 1867 if (type == F_SOFTLOCK) { 1868 1869 /* 1870 * Load up the translation keeping it 1871 * locked and don't unlock the page. 1872 */ 1873 for (; pidx < npages; a += pgsz, pidx += pgcnt) { 1874 hat_memload_array(sptseg->s_as->a_hat, 1875 a, pgsz, &ppa[pidx], sptd->spt_prot, 1876 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 1877 } 1878 } else { 1879 if (hat == seg->s_as->a_hat) { 1880 1881 /* 1882 * Migrate pages marked for migration 1883 */ 1884 if (lgrp_optimizations()) 1885 page_migrate(seg, shm_addr, ppa, 1886 npages); 1887 1888 /* CPU HAT */ 1889 for (; pidx < npages; 1890 a += pgsz, pidx += pgcnt) { 1891 hat_memload_array(sptseg->s_as->a_hat, 1892 a, pgsz, &ppa[pidx], 1893 sptd->spt_prot, 1894 HAT_LOAD_SHARE); 1895 } 1896 } else { 1897 /* XHAT. Pass real address */ 1898 hat_memload_array(hat, shm_addr, 1899 size, ppa, sptd->spt_prot, HAT_LOAD_SHARE); 1900 } 1901 1902 /* 1903 * And now drop the SE_SHARED lock(s). 1904 */ 1905 if (dyn_ism_unmap) { 1906 for (i = 0; i < npages; i++) { 1907 page_unlock(ppa[i]); 1908 } 1909 } 1910 } 1911 1912 if (!dyn_ism_unmap) { 1913 if (hat_share(seg->s_as->a_hat, shm_addr, 1914 curspt->a_hat, segspt_addr, ptob(npages), 1915 seg->s_szc) != 0) { 1916 panic("hat_share err in DISM fault"); 1917 /* NOTREACHED */ 1918 } 1919 if (type == F_INVAL) { 1920 for (i = 0; i < npages; i++) { 1921 page_unlock(ppa[i]); 1922 } 1923 } 1924 } 1925 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1926 dism_err: 1927 kmem_free(ppa, npages * sizeof (page_t *)); 1928 return (err); 1929 1930 case F_SOFTUNLOCK: 1931 1932 mutex_enter(&freemem_lock); 1933 availrmem += npages; 1934 mutex_exit(&freemem_lock); 1935 1936 /* 1937 * This is a bit ugly, we pass in the real seg pointer, 1938 * but the segspt_addr is the virtual address within the 1939 * dummy seg. 1940 */ 1941 segspt_softunlock(seg, segspt_addr, size, rw); 1942 return (0); 1943 1944 case F_PROT: 1945 1946 /* 1947 * This takes care of the unusual case where a user 1948 * allocates a stack in shared memory and a register 1949 * window overflow is written to that stack page before 1950 * it is otherwise modified. 1951 * 1952 * We can get away with this because ISM segments are 1953 * always rw. Other than this unusual case, there 1954 * should be no instances of protection violations. 1955 */ 1956 return (0); 1957 1958 default: 1959 #ifdef DEBUG 1960 panic("segspt_dismfault default type?"); 1961 #else 1962 return (FC_NOMAP); 1963 #endif 1964 } 1965 } 1966 1967 1968 faultcode_t 1969 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr, 1970 size_t len, enum fault_type type, enum seg_rw rw) 1971 { 1972 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1973 struct seg *sptseg = shmd->shm_sptseg; 1974 struct as *curspt = shmd->shm_sptas; 1975 struct spt_data *sptd = sptseg->s_data; 1976 pgcnt_t npages; 1977 size_t size; 1978 caddr_t sptseg_addr, shm_addr; 1979 page_t *pp, **ppa; 1980 int i; 1981 u_offset_t offset; 1982 ulong_t anon_index = 0; 1983 struct vnode *vp; 1984 struct anon_map *amp; /* XXX - for locknest */ 1985 struct anon *ap = NULL; 1986 anon_sync_obj_t cookie; 1987 size_t pgsz; 1988 pgcnt_t pgcnt; 1989 caddr_t a; 1990 pgcnt_t pidx; 1991 size_t sz; 1992 1993 #ifdef lint 1994 hat = hat; 1995 #endif 1996 1997 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1998 1999 if (sptd->spt_flags & SHM_PAGEABLE) { 2000 return (segspt_dismfault(hat, seg, addr, len, type, rw)); 2001 } 2002 2003 /* 2004 * Because of the way spt is implemented 2005 * the realsize of the segment does not have to be 2006 * equal to the segment size itself. The segment size is 2007 * often in multiples of a page size larger than PAGESIZE. 2008 * The realsize is rounded up to the nearest PAGESIZE 2009 * based on what the user requested. This is a bit of 2010 * ungliness that is historical but not easily fixed 2011 * without re-designing the higher levels of ISM. 2012 */ 2013 ASSERT(addr >= seg->s_base); 2014 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 2015 return (FC_NOMAP); 2016 /* 2017 * For all of the following cases except F_PROT, we need to 2018 * make any necessary adjustments to addr and len 2019 * and get all of the necessary page_t's into an array called ppa[]. 2020 * 2021 * The code in shmat() forces base addr and len of ISM segment 2022 * to be aligned to largest page size supported. Therefore, 2023 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 2024 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 2025 * in large pagesize chunks, or else we will screw up the HAT 2026 * layer by calling hat_memload_array() with differing page sizes 2027 * over a given virtual range. 2028 */ 2029 pgsz = page_get_pagesize(sptseg->s_szc); 2030 pgcnt = page_get_pagecnt(sptseg->s_szc); 2031 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); 2032 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz); 2033 npages = btopr(size); 2034 2035 /* 2036 * Now we need to convert from addr in segshm to addr in segspt. 2037 */ 2038 anon_index = seg_page(seg, shm_addr); 2039 sptseg_addr = sptseg->s_base + ptob(anon_index); 2040 2041 /* 2042 * And now we may have to adjust npages downward if we have 2043 * exceeded the realsize of the segment or initial anon 2044 * allocations. 2045 */ 2046 if ((sptseg_addr + ptob(npages)) > 2047 (sptseg->s_base + sptd->spt_realsize)) 2048 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr; 2049 2050 npages = btopr(size); 2051 2052 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size)); 2053 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0); 2054 2055 switch (type) { 2056 2057 case F_SOFTLOCK: 2058 2059 /* 2060 * availrmem is decremented once during anon_swap_adjust() 2061 * and is incremented during the anon_unresv(), which is 2062 * called from shm_rm_amp() when the segment is destroyed. 2063 */ 2064 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 2065 /* 2066 * Some platforms assume that ISM pages are SE_SHARED 2067 * locked for the entire life of the segment. 2068 */ 2069 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) 2070 return (0); 2071 /* 2072 * Fall through to the F_INVAL case to load up the hat layer 2073 * entries with the HAT_LOAD_LOCK flag. 2074 */ 2075 2076 /* FALLTHRU */ 2077 case F_INVAL: 2078 2079 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 2080 return (FC_NOMAP); 2081 2082 /* 2083 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP 2084 * may still rely on this call to hat_share(). That 2085 * would imply that those hat's can fault on a 2086 * HAT_LOAD_LOCK translation, which would seem 2087 * contradictory. 2088 */ 2089 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 2090 if (hat_share(seg->s_as->a_hat, seg->s_base, 2091 curspt->a_hat, sptseg->s_base, 2092 sptseg->s_size, sptseg->s_szc) != 0) { 2093 panic("hat_share error in ISM fault"); 2094 /*NOTREACHED*/ 2095 } 2096 return (0); 2097 } 2098 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP); 2099 2100 /* 2101 * I see no need to lock the real seg, 2102 * here, because all of our work will be on the underlying 2103 * dummy seg. 2104 * 2105 * sptseg_addr and npages now account for large pages. 2106 */ 2107 amp = sptd->spt_amp; 2108 ASSERT(amp != NULL); 2109 anon_index = seg_page(sptseg, sptseg_addr); 2110 2111 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2112 for (i = 0; i < npages; i++) { 2113 anon_array_enter(amp, anon_index, &cookie); 2114 ap = anon_get_ptr(amp->ahp, anon_index++); 2115 ASSERT(ap != NULL); 2116 swap_xlate(ap, &vp, &offset); 2117 anon_array_exit(&cookie); 2118 pp = page_lookup(vp, offset, SE_SHARED); 2119 ASSERT(pp != NULL); 2120 ppa[i] = pp; 2121 } 2122 ANON_LOCK_EXIT(&->a_rwlock); 2123 ASSERT(i == npages); 2124 2125 /* 2126 * We are already holding the as->a_lock on the user's 2127 * real segment, but we need to hold the a_lock on the 2128 * underlying dummy as. This is mostly to satisfy the 2129 * underlying HAT layer. 2130 */ 2131 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 2132 a = sptseg_addr; 2133 pidx = 0; 2134 if (type == F_SOFTLOCK) { 2135 /* 2136 * Load up the translation keeping it 2137 * locked and don't unlock the page. 2138 */ 2139 for (; pidx < npages; a += pgsz, pidx += pgcnt) { 2140 sz = MIN(pgsz, ptob(npages - pidx)); 2141 hat_memload_array(sptseg->s_as->a_hat, a, 2142 sz, &ppa[pidx], sptd->spt_prot, 2143 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 2144 } 2145 } else { 2146 if (hat == seg->s_as->a_hat) { 2147 2148 /* 2149 * Migrate pages marked for migration. 2150 */ 2151 if (lgrp_optimizations()) 2152 page_migrate(seg, shm_addr, ppa, 2153 npages); 2154 2155 /* CPU HAT */ 2156 for (; pidx < npages; 2157 a += pgsz, pidx += pgcnt) { 2158 sz = MIN(pgsz, ptob(npages - pidx)); 2159 hat_memload_array(sptseg->s_as->a_hat, 2160 a, sz, &ppa[pidx], 2161 sptd->spt_prot, HAT_LOAD_SHARE); 2162 } 2163 } else { 2164 /* XHAT. Pass real address */ 2165 hat_memload_array(hat, shm_addr, 2166 ptob(npages), ppa, sptd->spt_prot, 2167 HAT_LOAD_SHARE); 2168 } 2169 2170 /* 2171 * And now drop the SE_SHARED lock(s). 2172 */ 2173 for (i = 0; i < npages; i++) 2174 page_unlock(ppa[i]); 2175 } 2176 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 2177 2178 kmem_free(ppa, sizeof (page_t *) * npages); 2179 return (0); 2180 case F_SOFTUNLOCK: 2181 2182 /* 2183 * This is a bit ugly, we pass in the real seg pointer, 2184 * but the sptseg_addr is the virtual address within the 2185 * dummy seg. 2186 */ 2187 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw); 2188 return (0); 2189 2190 case F_PROT: 2191 2192 /* 2193 * This takes care of the unusual case where a user 2194 * allocates a stack in shared memory and a register 2195 * window overflow is written to that stack page before 2196 * it is otherwise modified. 2197 * 2198 * We can get away with this because ISM segments are 2199 * always rw. Other than this unusual case, there 2200 * should be no instances of protection violations. 2201 */ 2202 return (0); 2203 2204 default: 2205 #ifdef DEBUG 2206 cmn_err(CE_WARN, "segspt_shmfault default type?"); 2207 #endif 2208 return (FC_NOMAP); 2209 } 2210 } 2211 2212 /*ARGSUSED*/ 2213 static faultcode_t 2214 segspt_shmfaulta(struct seg *seg, caddr_t addr) 2215 { 2216 return (0); 2217 } 2218 2219 /*ARGSUSED*/ 2220 static int 2221 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta) 2222 { 2223 return (0); 2224 } 2225 2226 /*ARGSUSED*/ 2227 static size_t 2228 segspt_shmswapout(struct seg *seg) 2229 { 2230 return (0); 2231 } 2232 2233 /* 2234 * duplicate the shared page tables 2235 */ 2236 int 2237 segspt_shmdup(struct seg *seg, struct seg *newseg) 2238 { 2239 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2240 struct anon_map *amp = shmd->shm_amp; 2241 struct shm_data *shmd_new; 2242 struct seg *spt_seg = shmd->shm_sptseg; 2243 struct spt_data *sptd = spt_seg->s_data; 2244 int error = 0; 2245 2246 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2247 2248 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP); 2249 newseg->s_data = (void *)shmd_new; 2250 shmd_new->shm_sptas = shmd->shm_sptas; 2251 shmd_new->shm_amp = amp; 2252 shmd_new->shm_sptseg = shmd->shm_sptseg; 2253 newseg->s_ops = &segspt_shmops; 2254 newseg->s_szc = seg->s_szc; 2255 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc); 2256 2257 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2258 amp->refcnt++; 2259 ANON_LOCK_EXIT(&->a_rwlock); 2260 2261 if (sptd->spt_flags & SHM_PAGEABLE) { 2262 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP); 2263 shmd_new->shm_lckpgs = 0; 2264 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 2265 if ((error = hat_share(newseg->s_as->a_hat, 2266 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR, 2267 seg->s_size, seg->s_szc)) != 0) { 2268 kmem_free(shmd_new->shm_vpage, 2269 btopr(amp->size)); 2270 } 2271 } 2272 return (error); 2273 } else { 2274 return (hat_share(newseg->s_as->a_hat, newseg->s_base, 2275 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size, 2276 seg->s_szc)); 2277 2278 } 2279 } 2280 2281 /*ARGSUSED*/ 2282 int 2283 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 2284 { 2285 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2286 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2287 2288 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2289 2290 /* 2291 * ISM segment is always rw. 2292 */ 2293 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0); 2294 } 2295 2296 /* 2297 * Return an array of locked large pages, for empty slots allocate 2298 * private zero-filled anon pages. 2299 */ 2300 static int 2301 spt_anon_getpages( 2302 struct seg *sptseg, 2303 caddr_t sptaddr, 2304 size_t len, 2305 page_t *ppa[]) 2306 { 2307 struct spt_data *sptd = sptseg->s_data; 2308 struct anon_map *amp = sptd->spt_amp; 2309 enum seg_rw rw = sptd->spt_prot; 2310 uint_t szc = sptseg->s_szc; 2311 size_t pg_sz, share_sz = page_get_pagesize(szc); 2312 pgcnt_t lp_npgs; 2313 caddr_t lp_addr, e_sptaddr; 2314 uint_t vpprot, ppa_szc = 0; 2315 struct vpage *vpage = NULL; 2316 ulong_t j, ppa_idx; 2317 int err, ierr = 0; 2318 pgcnt_t an_idx; 2319 anon_sync_obj_t cookie; 2320 2321 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz)); 2322 ASSERT(len != 0); 2323 2324 pg_sz = share_sz; 2325 lp_npgs = btop(pg_sz); 2326 lp_addr = sptaddr; 2327 e_sptaddr = sptaddr + len; 2328 an_idx = seg_page(sptseg, sptaddr); 2329 ppa_idx = 0; 2330 2331 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2332 /*CONSTCOND*/ 2333 while (1) { 2334 for (; lp_addr < e_sptaddr; 2335 an_idx += lp_npgs, lp_addr += pg_sz, 2336 ppa_idx += lp_npgs) { 2337 2338 anon_array_enter(amp, an_idx, &cookie); 2339 ppa_szc = (uint_t)-1; 2340 ierr = anon_map_getpages(amp, an_idx, szc, sptseg, 2341 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx], 2342 &ppa_szc, vpage, rw, 0, segvn_anypgsz, kcred); 2343 anon_array_exit(&cookie); 2344 2345 if (ierr != 0) { 2346 if (ierr > 0) { 2347 err = FC_MAKE_ERR(ierr); 2348 goto lpgs_err; 2349 } 2350 break; 2351 } 2352 } 2353 if (lp_addr == e_sptaddr) { 2354 break; 2355 } 2356 ASSERT(lp_addr < e_sptaddr); 2357 2358 /* 2359 * ierr == -1 means we failed to allocate a large page. 2360 * so do a size down operation. 2361 * 2362 * ierr == -2 means some other process that privately shares 2363 * pages with this process has allocated a larger page and we 2364 * need to retry with larger pages. So do a size up 2365 * operation. This relies on the fact that large pages are 2366 * never partially shared i.e. if we share any constituent 2367 * page of a large page with another process we must share the 2368 * entire large page. Note this cannot happen for SOFTLOCK 2369 * case, unless current address (lpaddr) is at the beginning 2370 * of the next page size boundary because the other process 2371 * couldn't have relocated locked pages. 2372 */ 2373 ASSERT(ierr == -1 || ierr == -2); 2374 if (segvn_anypgsz) { 2375 ASSERT(ierr == -2 || szc != 0); 2376 ASSERT(ierr == -1 || szc < sptseg->s_szc); 2377 szc = (ierr == -1) ? szc - 1 : szc + 1; 2378 } else { 2379 /* 2380 * For faults and segvn_anypgsz == 0 2381 * we need to be careful not to loop forever 2382 * if existing page is found with szc other 2383 * than 0 or seg->s_szc. This could be due 2384 * to page relocations on behalf of DR or 2385 * more likely large page creation. For this 2386 * case simply re-size to existing page's szc 2387 * if returned by anon_map_getpages(). 2388 */ 2389 if (ppa_szc == (uint_t)-1) { 2390 szc = (ierr == -1) ? 0 : sptseg->s_szc; 2391 } else { 2392 ASSERT(ppa_szc <= sptseg->s_szc); 2393 ASSERT(ierr == -2 || ppa_szc < szc); 2394 ASSERT(ierr == -1 || ppa_szc > szc); 2395 szc = ppa_szc; 2396 } 2397 } 2398 pg_sz = page_get_pagesize(szc); 2399 lp_npgs = btop(pg_sz); 2400 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz)); 2401 } 2402 ANON_LOCK_EXIT(&->a_rwlock); 2403 return (0); 2404 2405 lpgs_err: 2406 ANON_LOCK_EXIT(&->a_rwlock); 2407 for (j = 0; j < ppa_idx; j++) 2408 page_unlock(ppa[j]); 2409 return (err); 2410 } 2411 2412 /* 2413 * count the number of bytes in a set of spt pages that are currently not 2414 * locked 2415 */ 2416 static rctl_qty_t 2417 spt_unlockedbytes(pgcnt_t npages, page_t **ppa) 2418 { 2419 ulong_t i; 2420 rctl_qty_t unlocked = 0; 2421 2422 for (i = 0; i < npages; i++) { 2423 if (ppa[i]->p_lckcnt == 0) 2424 unlocked += PAGESIZE; 2425 } 2426 return (unlocked); 2427 } 2428 2429 int 2430 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages, 2431 page_t **ppa, ulong_t *lockmap, size_t pos, 2432 rctl_qty_t *locked) 2433 { 2434 struct shm_data *shmd = seg->s_data; 2435 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2436 ulong_t i; 2437 int kernel; 2438 2439 /* return the number of bytes actually locked */ 2440 *locked = 0; 2441 for (i = 0; i < npages; anon_index++, pos++, i++) { 2442 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) { 2443 if (sptd->spt_ppa_lckcnt[anon_index] < 2444 (ushort_t)DISM_LOCK_MAX) { 2445 if (++sptd->spt_ppa_lckcnt[anon_index] == 2446 (ushort_t)DISM_LOCK_MAX) { 2447 cmn_err(CE_WARN, 2448 "DISM page lock limit " 2449 "reached on DISM offset 0x%lx\n", 2450 anon_index << PAGESHIFT); 2451 } 2452 kernel = (sptd->spt_ppa && 2453 sptd->spt_ppa[anon_index]) ? 1 : 0; 2454 if (!page_pp_lock(ppa[i], 0, kernel)) { 2455 sptd->spt_ppa_lckcnt[anon_index]--; 2456 return (EAGAIN); 2457 } 2458 /* if this is a newly locked page, count it */ 2459 if (ppa[i]->p_lckcnt == 1) { 2460 *locked += PAGESIZE; 2461 } 2462 shmd->shm_lckpgs++; 2463 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED; 2464 if (lockmap != NULL) 2465 BT_SET(lockmap, pos); 2466 } 2467 } 2468 } 2469 return (0); 2470 } 2471 2472 /*ARGSUSED*/ 2473 static int 2474 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 2475 int attr, int op, ulong_t *lockmap, size_t pos) 2476 { 2477 struct shm_data *shmd = seg->s_data; 2478 struct seg *sptseg = shmd->shm_sptseg; 2479 struct spt_data *sptd = sptseg->s_data; 2480 struct kshmid *sp = sptd->spt_amp->a_sp; 2481 pgcnt_t npages, a_npages; 2482 page_t **ppa; 2483 pgcnt_t an_idx, a_an_idx, ppa_idx; 2484 caddr_t spt_addr, a_addr; /* spt and aligned address */ 2485 size_t a_len; /* aligned len */ 2486 size_t share_sz; 2487 ulong_t i; 2488 int sts = 0; 2489 rctl_qty_t unlocked = 0; 2490 rctl_qty_t locked = 0; 2491 struct proc *p = curproc; 2492 kproject_t *proj; 2493 2494 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2495 ASSERT(sp != NULL); 2496 2497 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 2498 return (0); 2499 } 2500 2501 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 2502 an_idx = seg_page(seg, addr); 2503 npages = btopr(len); 2504 2505 if (an_idx + npages > btopr(shmd->shm_amp->size)) { 2506 return (ENOMEM); 2507 } 2508 2509 /* 2510 * A shm's project never changes, so no lock needed. 2511 * The shm has a hold on the project, so it will not go away. 2512 * Since we have a mapping to shm within this zone, we know 2513 * that the zone will not go away. 2514 */ 2515 proj = sp->shm_perm.ipc_proj; 2516 2517 if (op == MC_LOCK) { 2518 2519 /* 2520 * Need to align addr and size request if they are not 2521 * aligned so we can always allocate large page(s) however 2522 * we only lock what was requested in initial request. 2523 */ 2524 share_sz = page_get_pagesize(sptseg->s_szc); 2525 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz); 2526 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)), 2527 share_sz); 2528 a_npages = btop(a_len); 2529 a_an_idx = seg_page(seg, a_addr); 2530 spt_addr = sptseg->s_base + ptob(a_an_idx); 2531 ppa_idx = an_idx - a_an_idx; 2532 2533 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages), 2534 KM_NOSLEEP)) == NULL) { 2535 return (ENOMEM); 2536 } 2537 2538 /* 2539 * Don't cache any new pages for IO and 2540 * flush any cached pages. 2541 */ 2542 mutex_enter(&sptd->spt_lock); 2543 if (sptd->spt_ppa != NULL) 2544 sptd->spt_flags |= DISM_PPA_CHANGED; 2545 2546 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa); 2547 if (sts != 0) { 2548 mutex_exit(&sptd->spt_lock); 2549 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2550 return (sts); 2551 } 2552 2553 mutex_enter(&sp->shm_mlock); 2554 /* enforce locked memory rctl */ 2555 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]); 2556 2557 mutex_enter(&p->p_lock); 2558 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) { 2559 mutex_exit(&p->p_lock); 2560 sts = EAGAIN; 2561 } else { 2562 mutex_exit(&p->p_lock); 2563 sts = spt_lockpages(seg, an_idx, npages, 2564 &ppa[ppa_idx], lockmap, pos, &locked); 2565 2566 /* 2567 * correct locked count if not all pages could be 2568 * locked 2569 */ 2570 if ((unlocked - locked) > 0) { 2571 rctl_decr_locked_mem(NULL, proj, 2572 (unlocked - locked), 0); 2573 } 2574 } 2575 /* 2576 * unlock pages 2577 */ 2578 for (i = 0; i < a_npages; i++) 2579 page_unlock(ppa[i]); 2580 if (sptd->spt_ppa != NULL) 2581 sptd->spt_flags |= DISM_PPA_CHANGED; 2582 mutex_exit(&sp->shm_mlock); 2583 mutex_exit(&sptd->spt_lock); 2584 2585 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2586 2587 } else if (op == MC_UNLOCK) { /* unlock */ 2588 struct anon_map *amp; 2589 struct anon *ap; 2590 struct vnode *vp; 2591 u_offset_t off; 2592 struct page *pp; 2593 int kernel; 2594 anon_sync_obj_t cookie; 2595 rctl_qty_t unlocked = 0; 2596 2597 amp = sptd->spt_amp; 2598 mutex_enter(&sptd->spt_lock); 2599 if (shmd->shm_lckpgs == 0) { 2600 mutex_exit(&sptd->spt_lock); 2601 return (0); 2602 } 2603 /* 2604 * Don't cache new IO pages. 2605 */ 2606 if (sptd->spt_ppa != NULL) 2607 sptd->spt_flags |= DISM_PPA_CHANGED; 2608 2609 mutex_enter(&sp->shm_mlock); 2610 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2611 for (i = 0; i < npages; i++, an_idx++) { 2612 if (shmd->shm_vpage[an_idx] & DISM_PG_LOCKED) { 2613 anon_array_enter(amp, an_idx, &cookie); 2614 ap = anon_get_ptr(amp->ahp, an_idx); 2615 ASSERT(ap); 2616 2617 swap_xlate(ap, &vp, &off); 2618 anon_array_exit(&cookie); 2619 pp = page_lookup(vp, off, SE_SHARED); 2620 ASSERT(pp); 2621 /* 2622 * the availrmem is decremented only for 2623 * pages which are not in seg pcache, 2624 * for pages in seg pcache availrmem was 2625 * decremented in _dismpagelock() (if 2626 * they were not locked here) 2627 */ 2628 kernel = (sptd->spt_ppa && 2629 sptd->spt_ppa[an_idx]) ? 1 : 0; 2630 ASSERT(pp->p_lckcnt > 0); 2631 page_pp_unlock(pp, 0, kernel); 2632 if (pp->p_lckcnt == 0) 2633 unlocked += PAGESIZE; 2634 page_unlock(pp); 2635 shmd->shm_vpage[an_idx] &= ~DISM_PG_LOCKED; 2636 sptd->spt_ppa_lckcnt[an_idx]--; 2637 shmd->shm_lckpgs--; 2638 } 2639 } 2640 ANON_LOCK_EXIT(&->a_rwlock); 2641 if (sptd->spt_ppa != NULL) 2642 sptd->spt_flags |= DISM_PPA_CHANGED; 2643 mutex_exit(&sptd->spt_lock); 2644 2645 rctl_decr_locked_mem(NULL, proj, unlocked, 0); 2646 mutex_exit(&sp->shm_mlock); 2647 } 2648 return (sts); 2649 } 2650 2651 /*ARGSUSED*/ 2652 int 2653 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 2654 { 2655 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2656 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2657 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1; 2658 2659 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2660 2661 /* 2662 * ISM segment is always rw. 2663 */ 2664 while (--pgno >= 0) 2665 *protv++ = sptd->spt_prot; 2666 return (0); 2667 } 2668 2669 /*ARGSUSED*/ 2670 u_offset_t 2671 segspt_shmgetoffset(struct seg *seg, caddr_t addr) 2672 { 2673 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2674 2675 /* Offset does not matter in ISM memory */ 2676 2677 return ((u_offset_t)0); 2678 } 2679 2680 /* ARGSUSED */ 2681 int 2682 segspt_shmgettype(struct seg *seg, caddr_t addr) 2683 { 2684 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2685 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2686 2687 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2688 2689 /* 2690 * The shared memory mapping is always MAP_SHARED, SWAP is only 2691 * reserved for DISM 2692 */ 2693 return (MAP_SHARED | 2694 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE)); 2695 } 2696 2697 /*ARGSUSED*/ 2698 int 2699 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 2700 { 2701 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2702 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2703 2704 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2705 2706 *vpp = sptd->spt_vp; 2707 return (0); 2708 } 2709 2710 /*ARGSUSED*/ 2711 static int 2712 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 2713 { 2714 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2715 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2716 struct anon_map *amp; 2717 pgcnt_t pg_idx; 2718 2719 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2720 2721 if (behav == MADV_FREE) { 2722 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) 2723 return (0); 2724 2725 amp = sptd->spt_amp; 2726 pg_idx = seg_page(seg, addr); 2727 2728 mutex_enter(&sptd->spt_lock); 2729 if (sptd->spt_ppa != NULL) 2730 sptd->spt_flags |= DISM_PPA_CHANGED; 2731 mutex_exit(&sptd->spt_lock); 2732 2733 /* 2734 * Purge all DISM cached pages 2735 */ 2736 seg_ppurge_seg(segspt_reclaim); 2737 2738 mutex_enter(&sptd->spt_lock); 2739 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2740 anon_disclaim(amp, pg_idx, len, ANON_PGLOOKUP_BLK); 2741 ANON_LOCK_EXIT(&->a_rwlock); 2742 mutex_exit(&sptd->spt_lock); 2743 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP || 2744 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) { 2745 int already_set; 2746 ulong_t anon_index; 2747 lgrp_mem_policy_t policy; 2748 caddr_t shm_addr; 2749 size_t share_size; 2750 size_t size; 2751 struct seg *sptseg = shmd->shm_sptseg; 2752 caddr_t sptseg_addr; 2753 2754 /* 2755 * Align address and length to page size of underlying segment 2756 */ 2757 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc); 2758 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size); 2759 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), 2760 share_size); 2761 2762 amp = shmd->shm_amp; 2763 anon_index = seg_page(seg, shm_addr); 2764 2765 /* 2766 * And now we may have to adjust size downward if we have 2767 * exceeded the realsize of the segment or initial anon 2768 * allocations. 2769 */ 2770 sptseg_addr = sptseg->s_base + ptob(anon_index); 2771 if ((sptseg_addr + size) > 2772 (sptseg->s_base + sptd->spt_realsize)) 2773 size = (sptseg->s_base + sptd->spt_realsize) - 2774 sptseg_addr; 2775 2776 /* 2777 * Set memory allocation policy for this segment 2778 */ 2779 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED); 2780 already_set = lgrp_shm_policy_set(policy, amp, anon_index, 2781 NULL, 0, len); 2782 2783 /* 2784 * If random memory allocation policy set already, 2785 * don't bother reapplying it. 2786 */ 2787 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 2788 return (0); 2789 2790 /* 2791 * Mark any existing pages in the given range for 2792 * migration, flushing the I/O page cache, and using 2793 * underlying segment to calculate anon index and get 2794 * anonmap and vnode pointer from 2795 */ 2796 if (shmd->shm_softlockcnt > 0) 2797 segspt_purge(seg); 2798 2799 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0); 2800 } 2801 2802 return (0); 2803 } 2804 2805 /*ARGSUSED*/ 2806 void 2807 segspt_shmdump(struct seg *seg) 2808 { 2809 /* no-op for ISM segment */ 2810 } 2811 2812 /*ARGSUSED*/ 2813 static faultcode_t 2814 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 2815 { 2816 return (ENOTSUP); 2817 } 2818 2819 /* 2820 * get a memory ID for an addr in a given segment 2821 */ 2822 static int 2823 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 2824 { 2825 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2826 struct anon *ap; 2827 size_t anon_index; 2828 struct anon_map *amp = shmd->shm_amp; 2829 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2830 struct seg *sptseg = shmd->shm_sptseg; 2831 anon_sync_obj_t cookie; 2832 2833 anon_index = seg_page(seg, addr); 2834 2835 if (addr > (seg->s_base + sptd->spt_realsize)) { 2836 return (EFAULT); 2837 } 2838 2839 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2840 anon_array_enter(amp, anon_index, &cookie); 2841 ap = anon_get_ptr(amp->ahp, anon_index); 2842 if (ap == NULL) { 2843 struct page *pp; 2844 caddr_t spt_addr = sptseg->s_base + ptob(anon_index); 2845 2846 pp = anon_zero(sptseg, spt_addr, &ap, kcred); 2847 if (pp == NULL) { 2848 anon_array_exit(&cookie); 2849 ANON_LOCK_EXIT(&->a_rwlock); 2850 return (ENOMEM); 2851 } 2852 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 2853 page_unlock(pp); 2854 } 2855 anon_array_exit(&cookie); 2856 ANON_LOCK_EXIT(&->a_rwlock); 2857 memidp->val[0] = (uintptr_t)ap; 2858 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 2859 return (0); 2860 } 2861 2862 /* 2863 * Get memory allocation policy info for specified address in given segment 2864 */ 2865 static lgrp_mem_policy_info_t * 2866 segspt_shmgetpolicy(struct seg *seg, caddr_t addr) 2867 { 2868 struct anon_map *amp; 2869 ulong_t anon_index; 2870 lgrp_mem_policy_info_t *policy_info; 2871 struct shm_data *shm_data; 2872 2873 ASSERT(seg != NULL); 2874 2875 /* 2876 * Get anon_map from segshm 2877 * 2878 * Assume that no lock needs to be held on anon_map, since 2879 * it should be protected by its reference count which must be 2880 * nonzero for an existing segment 2881 * Need to grab readers lock on policy tree though 2882 */ 2883 shm_data = (struct shm_data *)seg->s_data; 2884 if (shm_data == NULL) 2885 return (NULL); 2886 amp = shm_data->shm_amp; 2887 ASSERT(amp->refcnt != 0); 2888 2889 /* 2890 * Get policy info 2891 * 2892 * Assume starting anon index of 0 2893 */ 2894 anon_index = seg_page(seg, addr); 2895 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 2896 2897 return (policy_info); 2898 } 2899 2900 /*ARGSUSED*/ 2901 static int 2902 segspt_shmcapable(struct seg *seg, segcapability_t capability) 2903 { 2904 return (0); 2905 } 2906