1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/param.h> 29 #include <sys/user.h> 30 #include <sys/mman.h> 31 #include <sys/kmem.h> 32 #include <sys/sysmacros.h> 33 #include <sys/cmn_err.h> 34 #include <sys/systm.h> 35 #include <sys/tuneable.h> 36 #include <vm/hat.h> 37 #include <vm/seg.h> 38 #include <vm/as.h> 39 #include <vm/anon.h> 40 #include <vm/page.h> 41 #include <sys/buf.h> 42 #include <sys/swap.h> 43 #include <sys/atomic.h> 44 #include <vm/seg_spt.h> 45 #include <sys/debug.h> 46 #include <sys/vtrace.h> 47 #include <sys/shm.h> 48 #include <sys/shm_impl.h> 49 #include <sys/lgrp.h> 50 #include <sys/vmsystm.h> 51 #include <sys/policy.h> 52 #include <sys/project.h> 53 #include <sys/tnf_probe.h> 54 #include <sys/zone.h> 55 56 #define SEGSPTADDR (caddr_t)0x0 57 58 /* 59 * # pages used for spt 60 */ 61 size_t spt_used; 62 63 /* 64 * segspt_minfree is the memory left for system after ISM 65 * locked its pages; it is set up to 5% of availrmem in 66 * sptcreate when ISM is created. ISM should not use more 67 * than ~90% of availrmem; if it does, then the performance 68 * of the system may decrease. Machines with large memories may 69 * be able to use up more memory for ISM so we set the default 70 * segspt_minfree to 5% (which gives ISM max 95% of availrmem. 71 * If somebody wants even more memory for ISM (risking hanging 72 * the system) they can patch the segspt_minfree to smaller number. 73 */ 74 pgcnt_t segspt_minfree = 0; 75 76 static int segspt_create(struct seg *seg, caddr_t argsp); 77 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize); 78 static void segspt_free(struct seg *seg); 79 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len); 80 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr); 81 82 static void 83 segspt_badop() 84 { 85 panic("segspt_badop called"); 86 /*NOTREACHED*/ 87 } 88 89 #define SEGSPT_BADOP(t) (t(*)())segspt_badop 90 91 struct seg_ops segspt_ops = { 92 SEGSPT_BADOP(int), /* dup */ 93 segspt_unmap, 94 segspt_free, 95 SEGSPT_BADOP(int), /* fault */ 96 SEGSPT_BADOP(faultcode_t), /* faulta */ 97 SEGSPT_BADOP(int), /* setprot */ 98 SEGSPT_BADOP(int), /* checkprot */ 99 SEGSPT_BADOP(int), /* kluster */ 100 SEGSPT_BADOP(size_t), /* swapout */ 101 SEGSPT_BADOP(int), /* sync */ 102 SEGSPT_BADOP(size_t), /* incore */ 103 SEGSPT_BADOP(int), /* lockop */ 104 SEGSPT_BADOP(int), /* getprot */ 105 SEGSPT_BADOP(u_offset_t), /* getoffset */ 106 SEGSPT_BADOP(int), /* gettype */ 107 SEGSPT_BADOP(int), /* getvp */ 108 SEGSPT_BADOP(int), /* advise */ 109 SEGSPT_BADOP(void), /* dump */ 110 SEGSPT_BADOP(int), /* pagelock */ 111 SEGSPT_BADOP(int), /* setpgsz */ 112 SEGSPT_BADOP(int), /* getmemid */ 113 segspt_getpolicy, /* getpolicy */ 114 SEGSPT_BADOP(int), /* capable */ 115 }; 116 117 static int segspt_shmdup(struct seg *seg, struct seg *newseg); 118 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize); 119 static void segspt_shmfree(struct seg *seg); 120 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg, 121 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw); 122 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr); 123 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr, 124 register size_t len, register uint_t prot); 125 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, 126 uint_t prot); 127 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta); 128 static size_t segspt_shmswapout(struct seg *seg); 129 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, 130 register char *vec); 131 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len, 132 int attr, uint_t flags); 133 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 134 int attr, int op, ulong_t *lockmap, size_t pos); 135 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, 136 uint_t *protv); 137 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr); 138 static int segspt_shmgettype(struct seg *seg, caddr_t addr); 139 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp); 140 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, 141 uint_t behav); 142 static void segspt_shmdump(struct seg *seg); 143 static int segspt_shmpagelock(struct seg *, caddr_t, size_t, 144 struct page ***, enum lock_type, enum seg_rw); 145 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t); 146 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *); 147 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t); 148 static int segspt_shmcapable(struct seg *, segcapability_t); 149 150 struct seg_ops segspt_shmops = { 151 segspt_shmdup, 152 segspt_shmunmap, 153 segspt_shmfree, 154 segspt_shmfault, 155 segspt_shmfaulta, 156 segspt_shmsetprot, 157 segspt_shmcheckprot, 158 segspt_shmkluster, 159 segspt_shmswapout, 160 segspt_shmsync, 161 segspt_shmincore, 162 segspt_shmlockop, 163 segspt_shmgetprot, 164 segspt_shmgetoffset, 165 segspt_shmgettype, 166 segspt_shmgetvp, 167 segspt_shmadvise, /* advise */ 168 segspt_shmdump, 169 segspt_shmpagelock, 170 segspt_shmsetpgsz, 171 segspt_shmgetmemid, 172 segspt_shmgetpolicy, 173 segspt_shmcapable, 174 }; 175 176 static void segspt_purge(struct seg *seg); 177 static int segspt_reclaim(struct seg *, caddr_t, size_t, struct page **, 178 enum seg_rw); 179 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len, 180 page_t **ppa); 181 182 183 184 /*ARGSUSED*/ 185 int 186 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp, 187 uint_t prot, uint_t flags, uint_t share_szc) 188 { 189 int err; 190 struct as *newas; 191 struct segspt_crargs sptcargs; 192 193 #ifdef DEBUG 194 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */, 195 tnf_ulong, size, size ); 196 #endif 197 if (segspt_minfree == 0) /* leave min 5% of availrmem for */ 198 segspt_minfree = availrmem/20; /* for the system */ 199 200 if (!hat_supported(HAT_SHARED_PT, (void *)0)) 201 return (EINVAL); 202 203 /* 204 * get a new as for this shared memory segment 205 */ 206 newas = as_alloc(); 207 newas->a_proc = NULL; 208 sptcargs.amp = amp; 209 sptcargs.prot = prot; 210 sptcargs.flags = flags; 211 sptcargs.szc = share_szc; 212 /* 213 * create a shared page table (spt) segment 214 */ 215 216 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) { 217 as_free(newas); 218 return (err); 219 } 220 *sptseg = sptcargs.seg_spt; 221 return (0); 222 } 223 224 void 225 sptdestroy(struct as *as, struct anon_map *amp) 226 { 227 228 #ifdef DEBUG 229 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */); 230 #endif 231 (void) as_unmap(as, SEGSPTADDR, amp->size); 232 as_free(as); 233 } 234 235 /* 236 * called from seg_free(). 237 * free (i.e., unlock, unmap, return to free list) 238 * all the pages in the given seg. 239 */ 240 void 241 segspt_free(struct seg *seg) 242 { 243 struct spt_data *sptd = (struct spt_data *)seg->s_data; 244 245 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 246 247 if (sptd != NULL) { 248 if (sptd->spt_realsize) 249 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize); 250 251 if (sptd->spt_ppa_lckcnt) 252 kmem_free(sptd->spt_ppa_lckcnt, 253 sizeof (*sptd->spt_ppa_lckcnt) 254 * btopr(sptd->spt_amp->size)); 255 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp)); 256 mutex_destroy(&sptd->spt_lock); 257 kmem_free(sptd, sizeof (*sptd)); 258 } 259 } 260 261 /*ARGSUSED*/ 262 static int 263 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr, 264 uint_t flags) 265 { 266 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 267 268 return (0); 269 } 270 271 /*ARGSUSED*/ 272 static size_t 273 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec) 274 { 275 caddr_t eo_seg; 276 pgcnt_t npages; 277 struct shm_data *shmd = (struct shm_data *)seg->s_data; 278 struct seg *sptseg; 279 struct spt_data *sptd; 280 281 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 282 #ifdef lint 283 seg = seg; 284 #endif 285 sptseg = shmd->shm_sptseg; 286 sptd = sptseg->s_data; 287 288 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 289 eo_seg = addr + len; 290 while (addr < eo_seg) { 291 /* page exists, and it's locked. */ 292 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED | 293 SEG_PAGE_ANON; 294 addr += PAGESIZE; 295 } 296 return (len); 297 } else { 298 struct anon_map *amp = shmd->shm_amp; 299 struct anon *ap; 300 page_t *pp; 301 pgcnt_t anon_index; 302 struct vnode *vp; 303 u_offset_t off; 304 ulong_t i; 305 int ret; 306 anon_sync_obj_t cookie; 307 308 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 309 anon_index = seg_page(seg, addr); 310 npages = btopr(len); 311 if (anon_index + npages > btopr(shmd->shm_amp->size)) { 312 return (EINVAL); 313 } 314 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 315 for (i = 0; i < npages; i++, anon_index++) { 316 ret = 0; 317 anon_array_enter(amp, anon_index, &cookie); 318 ap = anon_get_ptr(amp->ahp, anon_index); 319 if (ap != NULL) { 320 swap_xlate(ap, &vp, &off); 321 anon_array_exit(&cookie); 322 pp = page_lookup_nowait(vp, off, SE_SHARED); 323 if (pp != NULL) { 324 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON; 325 page_unlock(pp); 326 } 327 } else { 328 anon_array_exit(&cookie); 329 } 330 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) { 331 ret |= SEG_PAGE_LOCKED; 332 } 333 *vec++ = (char)ret; 334 } 335 ANON_LOCK_EXIT(&->a_rwlock); 336 return (len); 337 } 338 } 339 340 static int 341 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize) 342 { 343 size_t share_size; 344 345 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 346 347 /* 348 * seg.s_size may have been rounded up to the largest page size 349 * in shmat(). 350 * XXX This should be cleanedup. sptdestroy should take a length 351 * argument which should be the same as sptcreate. Then 352 * this rounding would not be needed (or is done in shm.c) 353 * Only the check for full segment will be needed. 354 * 355 * XXX -- shouldn't raddr == 0 always? These tests don't seem 356 * to be useful at all. 357 */ 358 share_size = page_get_pagesize(seg->s_szc); 359 ssize = P2ROUNDUP(ssize, share_size); 360 361 if (raddr == seg->s_base && ssize == seg->s_size) { 362 seg_free(seg); 363 return (0); 364 } else 365 return (EINVAL); 366 } 367 368 int 369 segspt_create(struct seg *seg, caddr_t argsp) 370 { 371 int err; 372 caddr_t addr = seg->s_base; 373 struct spt_data *sptd; 374 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp; 375 struct anon_map *amp = sptcargs->amp; 376 struct kshmid *sp = amp->a_sp; 377 struct cred *cred = CRED(); 378 ulong_t i, j, anon_index = 0; 379 pgcnt_t npages = btopr(amp->size); 380 struct vnode *vp; 381 page_t **ppa; 382 uint_t hat_flags; 383 size_t pgsz; 384 pgcnt_t pgcnt; 385 caddr_t a; 386 pgcnt_t pidx; 387 size_t sz; 388 proc_t *procp = curproc; 389 rctl_qty_t lockedbytes = 0; 390 kproject_t *proj; 391 392 /* 393 * We are holding the a_lock on the underlying dummy as, 394 * so we can make calls to the HAT layer. 395 */ 396 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 397 ASSERT(sp != NULL); 398 399 #ifdef DEBUG 400 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */, 401 tnf_opaque, addr, addr, 402 tnf_ulong, len, seg->s_size); 403 #endif 404 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 405 if (err = anon_swap_adjust(npages)) 406 return (err); 407 } 408 err = ENOMEM; 409 410 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL) 411 goto out1; 412 413 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 414 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages), 415 KM_NOSLEEP)) == NULL) 416 goto out2; 417 } 418 419 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL); 420 421 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL) 422 goto out3; 423 424 seg->s_ops = &segspt_ops; 425 sptd->spt_vp = vp; 426 sptd->spt_amp = amp; 427 sptd->spt_prot = sptcargs->prot; 428 sptd->spt_flags = sptcargs->flags; 429 seg->s_data = (caddr_t)sptd; 430 sptd->spt_ppa = NULL; 431 sptd->spt_ppa_lckcnt = NULL; 432 seg->s_szc = sptcargs->szc; 433 434 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 435 if (seg->s_szc > amp->a_szc) { 436 amp->a_szc = seg->s_szc; 437 } 438 ANON_LOCK_EXIT(&->a_rwlock); 439 440 /* 441 * Set policy to affect initial allocation of pages in 442 * anon_map_createpages() 443 */ 444 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index, 445 NULL, 0, ptob(npages)); 446 447 if (sptcargs->flags & SHM_PAGEABLE) { 448 size_t share_sz; 449 pgcnt_t new_npgs, more_pgs; 450 struct anon_hdr *nahp; 451 zone_t *zone; 452 453 share_sz = page_get_pagesize(seg->s_szc); 454 if (!IS_P2ALIGNED(amp->size, share_sz)) { 455 /* 456 * We are rounding up the size of the anon array 457 * on 4 M boundary because we always create 4 M 458 * of page(s) when locking, faulting pages and we 459 * don't have to check for all corner cases e.g. 460 * if there is enough space to allocate 4 M 461 * page. 462 */ 463 new_npgs = btop(P2ROUNDUP(amp->size, share_sz)); 464 more_pgs = new_npgs - npages; 465 466 /* 467 * The zone will never be NULL, as a fully created 468 * shm always has an owning zone. 469 */ 470 zone = sp->shm_perm.ipc_zone; 471 ASSERT(zone != NULL); 472 if (anon_resv_zone(ptob(more_pgs), zone) == 0) { 473 err = ENOMEM; 474 goto out4; 475 } 476 477 nahp = anon_create(new_npgs, ANON_SLEEP); 478 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 479 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages, 480 ANON_SLEEP); 481 anon_release(amp->ahp, npages); 482 amp->ahp = nahp; 483 ASSERT(amp->swresv == ptob(npages)); 484 amp->swresv = amp->size = ptob(new_npgs); 485 ANON_LOCK_EXIT(&->a_rwlock); 486 npages = new_npgs; 487 } 488 489 sptd->spt_ppa_lckcnt = kmem_zalloc(npages * 490 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP); 491 sptd->spt_pcachecnt = 0; 492 sptd->spt_realsize = ptob(npages); 493 sptcargs->seg_spt = seg; 494 return (0); 495 } 496 497 /* 498 * get array of pages for each anon slot in amp 499 */ 500 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa, 501 seg, addr, S_CREATE, cred)) != 0) 502 goto out4; 503 504 mutex_enter(&sp->shm_mlock); 505 506 /* May be partially locked, so, count bytes to charge for locking */ 507 for (i = 0; i < npages; i++) 508 if (ppa[i]->p_lckcnt == 0) 509 lockedbytes += PAGESIZE; 510 511 proj = sp->shm_perm.ipc_proj; 512 513 if (lockedbytes > 0) { 514 mutex_enter(&procp->p_lock); 515 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) { 516 mutex_exit(&procp->p_lock); 517 mutex_exit(&sp->shm_mlock); 518 for (i = 0; i < npages; i++) 519 page_unlock(ppa[i]); 520 err = ENOMEM; 521 goto out4; 522 } 523 mutex_exit(&procp->p_lock); 524 } 525 526 /* 527 * addr is initial address corresponding to the first page on ppa list 528 */ 529 for (i = 0; i < npages; i++) { 530 /* attempt to lock all pages */ 531 if (page_pp_lock(ppa[i], 0, 1) == 0) { 532 /* 533 * if unable to lock any page, unlock all 534 * of them and return error 535 */ 536 for (j = 0; j < i; j++) 537 page_pp_unlock(ppa[j], 0, 1); 538 for (i = 0; i < npages; i++) 539 page_unlock(ppa[i]); 540 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0); 541 mutex_exit(&sp->shm_mlock); 542 err = ENOMEM; 543 goto out4; 544 } 545 } 546 mutex_exit(&sp->shm_mlock); 547 548 /* 549 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 550 * for the entire life of the segment. For example platforms 551 * that do not support Dynamic Reconfiguration. 552 */ 553 hat_flags = HAT_LOAD_SHARE; 554 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL)) 555 hat_flags |= HAT_LOAD_LOCK; 556 557 /* 558 * Load translations one lare page at a time 559 * to make sure we don't create mappings bigger than 560 * segment's size code in case underlying pages 561 * are shared with segvn's segment that uses bigger 562 * size code than we do. 563 */ 564 pgsz = page_get_pagesize(seg->s_szc); 565 pgcnt = page_get_pagecnt(seg->s_szc); 566 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) { 567 sz = MIN(pgsz, ptob(npages - pidx)); 568 hat_memload_array(seg->s_as->a_hat, a, sz, 569 &ppa[pidx], sptd->spt_prot, hat_flags); 570 } 571 572 /* 573 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 574 * we will leave the pages locked SE_SHARED for the life 575 * of the ISM segment. This will prevent any calls to 576 * hat_pageunload() on this ISM segment for those platforms. 577 */ 578 if (!(hat_flags & HAT_LOAD_LOCK)) { 579 /* 580 * On platforms that support HAT_DYNAMIC_ISM_UNMAP, 581 * we no longer need to hold the SE_SHARED lock on the pages, 582 * since L_PAGELOCK and F_SOFTLOCK calls will grab the 583 * SE_SHARED lock on the pages as necessary. 584 */ 585 for (i = 0; i < npages; i++) 586 page_unlock(ppa[i]); 587 } 588 sptd->spt_pcachecnt = 0; 589 kmem_free(ppa, ((sizeof (page_t *)) * npages)); 590 sptd->spt_realsize = ptob(npages); 591 atomic_add_long(&spt_used, npages); 592 sptcargs->seg_spt = seg; 593 return (0); 594 595 out4: 596 seg->s_data = NULL; 597 kmem_free(vp, sizeof (*vp)); 598 out3: 599 mutex_destroy(&sptd->spt_lock); 600 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 601 kmem_free(ppa, (sizeof (*ppa) * npages)); 602 out2: 603 kmem_free(sptd, sizeof (*sptd)); 604 out1: 605 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 606 anon_swap_restore(npages); 607 return (err); 608 } 609 610 /*ARGSUSED*/ 611 void 612 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len) 613 { 614 struct page *pp; 615 struct spt_data *sptd = (struct spt_data *)seg->s_data; 616 pgcnt_t npages; 617 ulong_t anon_idx; 618 struct anon_map *amp; 619 struct anon *ap; 620 struct vnode *vp; 621 u_offset_t off; 622 uint_t hat_flags; 623 int root = 0; 624 pgcnt_t pgs, curnpgs = 0; 625 page_t *rootpp; 626 rctl_qty_t unlocked_bytes = 0; 627 kproject_t *proj; 628 kshmid_t *sp; 629 630 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 631 632 len = P2ROUNDUP(len, PAGESIZE); 633 634 npages = btop(len); 635 636 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP; 637 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) || 638 (sptd->spt_flags & SHM_PAGEABLE)) { 639 hat_flags = HAT_UNLOAD_UNMAP; 640 } 641 642 hat_unload(seg->s_as->a_hat, addr, len, hat_flags); 643 644 amp = sptd->spt_amp; 645 if (sptd->spt_flags & SHM_PAGEABLE) 646 npages = btop(amp->size); 647 648 ASSERT(amp != NULL); 649 650 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 651 sp = amp->a_sp; 652 proj = sp->shm_perm.ipc_proj; 653 mutex_enter(&sp->shm_mlock); 654 } 655 for (anon_idx = 0; anon_idx < npages; anon_idx++) { 656 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 657 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) { 658 panic("segspt_free_pages: null app"); 659 /*NOTREACHED*/ 660 } 661 } else { 662 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx)) 663 == NULL) 664 continue; 665 } 666 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0); 667 swap_xlate(ap, &vp, &off); 668 669 /* 670 * If this platform supports HAT_DYNAMIC_ISM_UNMAP, 671 * the pages won't be having SE_SHARED lock at this 672 * point. 673 * 674 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 675 * the pages are still held SE_SHARED locked from the 676 * original segspt_create() 677 * 678 * Our goal is to get SE_EXCL lock on each page, remove 679 * permanent lock on it and invalidate the page. 680 */ 681 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 682 if (hat_flags == HAT_UNLOAD_UNMAP) 683 pp = page_lookup(vp, off, SE_EXCL); 684 else { 685 if ((pp = page_find(vp, off)) == NULL) { 686 panic("segspt_free_pages: " 687 "page not locked"); 688 /*NOTREACHED*/ 689 } 690 if (!page_tryupgrade(pp)) { 691 page_unlock(pp); 692 pp = page_lookup(vp, off, SE_EXCL); 693 } 694 } 695 if (pp == NULL) { 696 panic("segspt_free_pages: " 697 "page not in the system"); 698 /*NOTREACHED*/ 699 } 700 ASSERT(pp->p_lckcnt > 0); 701 page_pp_unlock(pp, 0, 1); 702 if (pp->p_lckcnt == 0) 703 unlocked_bytes += PAGESIZE; 704 } else { 705 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL) 706 continue; 707 } 708 /* 709 * It's logical to invalidate the pages here as in most cases 710 * these were created by segspt. 711 */ 712 if (pp->p_szc != 0) { 713 if (root == 0) { 714 ASSERT(curnpgs == 0); 715 root = 1; 716 rootpp = pp; 717 pgs = curnpgs = page_get_pagecnt(pp->p_szc); 718 ASSERT(pgs > 1); 719 ASSERT(IS_P2ALIGNED(pgs, pgs)); 720 ASSERT(!(page_pptonum(pp) & (pgs - 1))); 721 curnpgs--; 722 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) { 723 ASSERT(curnpgs == 1); 724 ASSERT(page_pptonum(pp) == 725 page_pptonum(rootpp) + (pgs - 1)); 726 page_destroy_pages(rootpp); 727 root = 0; 728 curnpgs = 0; 729 } else { 730 ASSERT(curnpgs > 1); 731 ASSERT(page_pptonum(pp) == 732 page_pptonum(rootpp) + (pgs - curnpgs)); 733 curnpgs--; 734 } 735 } else { 736 if (root != 0 || curnpgs != 0) { 737 panic("segspt_free_pages: bad large page"); 738 /*NOTREACHED*/ 739 } 740 /*LINTED: constant in conditional context */ 741 VN_DISPOSE(pp, B_INVAL, 0, kcred); 742 } 743 } 744 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 745 if (unlocked_bytes > 0) 746 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0); 747 mutex_exit(&sp->shm_mlock); 748 } 749 if (root != 0 || curnpgs != 0) { 750 panic("segspt_free_pages: bad large page"); 751 /*NOTREACHED*/ 752 } 753 754 /* 755 * mark that pages have been released 756 */ 757 sptd->spt_realsize = 0; 758 759 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 760 atomic_add_long(&spt_used, -npages); 761 anon_swap_restore(npages); 762 } 763 } 764 765 /* 766 * Get memory allocation policy info for specified address in given segment 767 */ 768 static lgrp_mem_policy_info_t * 769 segspt_getpolicy(struct seg *seg, caddr_t addr) 770 { 771 struct anon_map *amp; 772 ulong_t anon_index; 773 lgrp_mem_policy_info_t *policy_info; 774 struct spt_data *spt_data; 775 776 ASSERT(seg != NULL); 777 778 /* 779 * Get anon_map from segspt 780 * 781 * Assume that no lock needs to be held on anon_map, since 782 * it should be protected by its reference count which must be 783 * nonzero for an existing segment 784 * Need to grab readers lock on policy tree though 785 */ 786 spt_data = (struct spt_data *)seg->s_data; 787 if (spt_data == NULL) 788 return (NULL); 789 amp = spt_data->spt_amp; 790 ASSERT(amp->refcnt != 0); 791 792 /* 793 * Get policy info 794 * 795 * Assume starting anon index of 0 796 */ 797 anon_index = seg_page(seg, addr); 798 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 799 800 return (policy_info); 801 } 802 803 /* 804 * DISM only. 805 * Return locked pages over a given range. 806 * 807 * We will cache all DISM locked pages and save the pplist for the 808 * entire segment in the ppa field of the underlying DISM segment structure. 809 * Later, during a call to segspt_reclaim() we will use this ppa array 810 * to page_unlock() all of the pages and then we will free this ppa list. 811 */ 812 /*ARGSUSED*/ 813 static int 814 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len, 815 struct page ***ppp, enum lock_type type, enum seg_rw rw) 816 { 817 struct shm_data *shmd = (struct shm_data *)seg->s_data; 818 struct seg *sptseg = shmd->shm_sptseg; 819 struct spt_data *sptd = sptseg->s_data; 820 pgcnt_t pg_idx, npages, tot_npages, npgs; 821 struct page **pplist, **pl, **ppa, *pp; 822 struct anon_map *amp; 823 spgcnt_t an_idx; 824 int ret = ENOTSUP; 825 uint_t pl_built = 0; 826 struct anon *ap; 827 struct vnode *vp; 828 u_offset_t off; 829 pgcnt_t claim_availrmem = 0; 830 uint_t szc; 831 832 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 833 834 /* 835 * We want to lock/unlock the entire ISM segment. Therefore, 836 * we will be using the underlying sptseg and it's base address 837 * and length for the caching arguments. 838 */ 839 ASSERT(sptseg); 840 ASSERT(sptd); 841 842 pg_idx = seg_page(seg, addr); 843 npages = btopr(len); 844 845 /* 846 * check if the request is larger than number of pages covered 847 * by amp 848 */ 849 if (pg_idx + npages > btopr(sptd->spt_amp->size)) { 850 *ppp = NULL; 851 return (ENOTSUP); 852 } 853 854 if (type == L_PAGEUNLOCK) { 855 ASSERT(sptd->spt_ppa != NULL); 856 857 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 858 sptd->spt_ppa, sptd->spt_prot, segspt_reclaim); 859 860 /* 861 * If someone is blocked while unmapping, we purge 862 * segment page cache and thus reclaim pplist synchronously 863 * without waiting for seg_pasync_thread. This speeds up 864 * unmapping in cases where munmap(2) is called, while 865 * raw async i/o is still in progress or where a thread 866 * exits on data fault in a multithreaded application. 867 */ 868 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 869 segspt_purge(seg); 870 } 871 return (0); 872 } else if (type == L_PAGERECLAIM) { 873 ASSERT(sptd->spt_ppa != NULL); 874 (void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size, 875 sptd->spt_ppa, sptd->spt_prot); 876 return (0); 877 } 878 879 if (sptd->spt_flags & DISM_PPA_CHANGED) { 880 segspt_purge(seg); 881 /* 882 * for DISM ppa needs to be rebuild since 883 * number of locked pages could be changed 884 */ 885 *ppp = NULL; 886 return (ENOTSUP); 887 } 888 889 /* 890 * First try to find pages in segment page cache, without 891 * holding the segment lock. 892 */ 893 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 894 sptd->spt_prot); 895 if (pplist != NULL) { 896 ASSERT(sptd->spt_ppa != NULL); 897 ASSERT(sptd->spt_ppa == pplist); 898 ppa = sptd->spt_ppa; 899 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 900 if (ppa[an_idx] == NULL) { 901 seg_pinactive(seg, seg->s_base, 902 sptd->spt_amp->size, ppa, 903 sptd->spt_prot, segspt_reclaim); 904 *ppp = NULL; 905 return (ENOTSUP); 906 } 907 if ((szc = ppa[an_idx]->p_szc) != 0) { 908 npgs = page_get_pagecnt(szc); 909 an_idx = P2ROUNDUP(an_idx + 1, npgs); 910 } else { 911 an_idx++; 912 } 913 } 914 /* 915 * Since we cache the entire DISM segment, we want to 916 * set ppp to point to the first slot that corresponds 917 * to the requested addr, i.e. pg_idx. 918 */ 919 *ppp = &(sptd->spt_ppa[pg_idx]); 920 return (0); 921 } 922 923 /* The L_PAGELOCK case... */ 924 mutex_enter(&sptd->spt_lock); 925 /* 926 * try to find pages in segment page cache with mutex 927 */ 928 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 929 sptd->spt_prot); 930 if (pplist != NULL) { 931 ASSERT(sptd->spt_ppa != NULL); 932 ASSERT(sptd->spt_ppa == pplist); 933 ppa = sptd->spt_ppa; 934 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 935 if (ppa[an_idx] == NULL) { 936 mutex_exit(&sptd->spt_lock); 937 seg_pinactive(seg, seg->s_base, 938 sptd->spt_amp->size, ppa, 939 sptd->spt_prot, segspt_reclaim); 940 *ppp = NULL; 941 return (ENOTSUP); 942 } 943 if ((szc = ppa[an_idx]->p_szc) != 0) { 944 npgs = page_get_pagecnt(szc); 945 an_idx = P2ROUNDUP(an_idx + 1, npgs); 946 } else { 947 an_idx++; 948 } 949 } 950 /* 951 * Since we cache the entire DISM segment, we want to 952 * set ppp to point to the first slot that corresponds 953 * to the requested addr, i.e. pg_idx. 954 */ 955 mutex_exit(&sptd->spt_lock); 956 *ppp = &(sptd->spt_ppa[pg_idx]); 957 return (0); 958 } 959 if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) == 960 SEGP_FAIL) { 961 mutex_exit(&sptd->spt_lock); 962 *ppp = NULL; 963 return (ENOTSUP); 964 } 965 966 /* 967 * No need to worry about protections because DISM pages are always rw. 968 */ 969 pl = pplist = NULL; 970 amp = sptd->spt_amp; 971 972 /* 973 * Do we need to build the ppa array? 974 */ 975 if (sptd->spt_ppa == NULL) { 976 pgcnt_t lpg_cnt = 0; 977 978 pl_built = 1; 979 tot_npages = btopr(sptd->spt_amp->size); 980 981 ASSERT(sptd->spt_pcachecnt == 0); 982 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP); 983 pl = pplist; 984 985 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 986 for (an_idx = 0; an_idx < tot_npages; ) { 987 ap = anon_get_ptr(amp->ahp, an_idx); 988 /* 989 * Cache only mlocked pages. For large pages 990 * if one (constituent) page is mlocked 991 * all pages for that large page 992 * are cached also. This is for quick 993 * lookups of ppa array; 994 */ 995 if ((ap != NULL) && (lpg_cnt != 0 || 996 (sptd->spt_ppa_lckcnt[an_idx] != 0))) { 997 998 swap_xlate(ap, &vp, &off); 999 pp = page_lookup(vp, off, SE_SHARED); 1000 ASSERT(pp != NULL); 1001 if (lpg_cnt == 0) { 1002 lpg_cnt++; 1003 /* 1004 * For a small page, we are done -- 1005 * lpg_count is reset to 0 below. 1006 * 1007 * For a large page, we are guaranteed 1008 * to find the anon structures of all 1009 * constituent pages and a non-zero 1010 * lpg_cnt ensures that we don't test 1011 * for mlock for these. We are done 1012 * when lpg_count reaches (npgs + 1). 1013 * If we are not the first constituent 1014 * page, restart at the first one. 1015 */ 1016 npgs = page_get_pagecnt(pp->p_szc); 1017 if (!IS_P2ALIGNED(an_idx, npgs)) { 1018 an_idx = P2ALIGN(an_idx, npgs); 1019 page_unlock(pp); 1020 continue; 1021 } 1022 } 1023 if (++lpg_cnt > npgs) 1024 lpg_cnt = 0; 1025 1026 /* 1027 * availrmem is decremented only 1028 * for unlocked pages 1029 */ 1030 if (sptd->spt_ppa_lckcnt[an_idx] == 0) 1031 claim_availrmem++; 1032 pplist[an_idx] = pp; 1033 } 1034 an_idx++; 1035 } 1036 ANON_LOCK_EXIT(&->a_rwlock); 1037 1038 mutex_enter(&freemem_lock); 1039 if (availrmem < tune.t_minarmem + claim_availrmem) { 1040 mutex_exit(&freemem_lock); 1041 ret = FC_MAKE_ERR(ENOMEM); 1042 claim_availrmem = 0; 1043 goto insert_fail; 1044 } else { 1045 availrmem -= claim_availrmem; 1046 } 1047 mutex_exit(&freemem_lock); 1048 1049 sptd->spt_ppa = pl; 1050 } else { 1051 /* 1052 * We already have a valid ppa[]. 1053 */ 1054 pl = sptd->spt_ppa; 1055 } 1056 1057 ASSERT(pl != NULL); 1058 1059 ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size, 1060 pl, sptd->spt_prot, SEGP_FORCE_WIRED | SEGP_ASYNC_FLUSH, 1061 segspt_reclaim); 1062 if (ret == SEGP_FAIL) { 1063 /* 1064 * seg_pinsert failed. We return 1065 * ENOTSUP, so that the as_pagelock() code will 1066 * then try the slower F_SOFTLOCK path. 1067 */ 1068 if (pl_built) { 1069 /* 1070 * No one else has referenced the ppa[]. 1071 * We created it and we need to destroy it. 1072 */ 1073 sptd->spt_ppa = NULL; 1074 } 1075 ret = ENOTSUP; 1076 goto insert_fail; 1077 } 1078 1079 /* 1080 * In either case, we increment softlockcnt on the 'real' segment. 1081 */ 1082 sptd->spt_pcachecnt++; 1083 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 1084 1085 ppa = sptd->spt_ppa; 1086 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 1087 if (ppa[an_idx] == NULL) { 1088 mutex_exit(&sptd->spt_lock); 1089 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 1090 pl, sptd->spt_prot, segspt_reclaim); 1091 *ppp = NULL; 1092 return (ENOTSUP); 1093 } 1094 if ((szc = ppa[an_idx]->p_szc) != 0) { 1095 npgs = page_get_pagecnt(szc); 1096 an_idx = P2ROUNDUP(an_idx + 1, npgs); 1097 } else { 1098 an_idx++; 1099 } 1100 } 1101 /* 1102 * We can now drop the sptd->spt_lock since the ppa[] 1103 * exists and he have incremented pacachecnt. 1104 */ 1105 mutex_exit(&sptd->spt_lock); 1106 1107 /* 1108 * Since we cache the entire segment, we want to 1109 * set ppp to point to the first slot that corresponds 1110 * to the requested addr, i.e. pg_idx. 1111 */ 1112 *ppp = &(sptd->spt_ppa[pg_idx]); 1113 return (ret); 1114 1115 insert_fail: 1116 /* 1117 * We will only reach this code if we tried and failed. 1118 * 1119 * And we can drop the lock on the dummy seg, once we've failed 1120 * to set up a new ppa[]. 1121 */ 1122 mutex_exit(&sptd->spt_lock); 1123 1124 if (pl_built) { 1125 mutex_enter(&freemem_lock); 1126 availrmem += claim_availrmem; 1127 mutex_exit(&freemem_lock); 1128 1129 /* 1130 * We created pl and we need to destroy it. 1131 */ 1132 pplist = pl; 1133 for (an_idx = 0; an_idx < tot_npages; an_idx++) { 1134 if (pplist[an_idx] != NULL) 1135 page_unlock(pplist[an_idx]); 1136 } 1137 kmem_free(pl, sizeof (page_t *) * tot_npages); 1138 } 1139 1140 if (shmd->shm_softlockcnt <= 0) { 1141 if (AS_ISUNMAPWAIT(seg->s_as)) { 1142 mutex_enter(&seg->s_as->a_contents); 1143 if (AS_ISUNMAPWAIT(seg->s_as)) { 1144 AS_CLRUNMAPWAIT(seg->s_as); 1145 cv_broadcast(&seg->s_as->a_cv); 1146 } 1147 mutex_exit(&seg->s_as->a_contents); 1148 } 1149 } 1150 *ppp = NULL; 1151 return (ret); 1152 } 1153 1154 1155 1156 /* 1157 * return locked pages over a given range. 1158 * 1159 * We will cache the entire ISM segment and save the pplist for the 1160 * entire segment in the ppa field of the underlying ISM segment structure. 1161 * Later, during a call to segspt_reclaim() we will use this ppa array 1162 * to page_unlock() all of the pages and then we will free this ppa list. 1163 */ 1164 /*ARGSUSED*/ 1165 static int 1166 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len, 1167 struct page ***ppp, enum lock_type type, enum seg_rw rw) 1168 { 1169 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1170 struct seg *sptseg = shmd->shm_sptseg; 1171 struct spt_data *sptd = sptseg->s_data; 1172 pgcnt_t np, page_index, npages; 1173 caddr_t a, spt_base; 1174 struct page **pplist, **pl, *pp; 1175 struct anon_map *amp; 1176 ulong_t anon_index; 1177 int ret = ENOTSUP; 1178 uint_t pl_built = 0; 1179 struct anon *ap; 1180 struct vnode *vp; 1181 u_offset_t off; 1182 1183 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1184 1185 /* 1186 * We want to lock/unlock the entire ISM segment. Therefore, 1187 * we will be using the underlying sptseg and it's base address 1188 * and length for the caching arguments. 1189 */ 1190 ASSERT(sptseg); 1191 ASSERT(sptd); 1192 1193 if (sptd->spt_flags & SHM_PAGEABLE) { 1194 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw)); 1195 } 1196 1197 page_index = seg_page(seg, addr); 1198 npages = btopr(len); 1199 1200 /* 1201 * check if the request is larger than number of pages covered 1202 * by amp 1203 */ 1204 if (page_index + npages > btopr(sptd->spt_amp->size)) { 1205 *ppp = NULL; 1206 return (ENOTSUP); 1207 } 1208 1209 if (type == L_PAGEUNLOCK) { 1210 1211 ASSERT(sptd->spt_ppa != NULL); 1212 1213 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 1214 sptd->spt_ppa, sptd->spt_prot, segspt_reclaim); 1215 1216 /* 1217 * If someone is blocked while unmapping, we purge 1218 * segment page cache and thus reclaim pplist synchronously 1219 * without waiting for seg_pasync_thread. This speeds up 1220 * unmapping in cases where munmap(2) is called, while 1221 * raw async i/o is still in progress or where a thread 1222 * exits on data fault in a multithreaded application. 1223 */ 1224 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 1225 segspt_purge(seg); 1226 } 1227 return (0); 1228 } else if (type == L_PAGERECLAIM) { 1229 ASSERT(sptd->spt_ppa != NULL); 1230 1231 (void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size, 1232 sptd->spt_ppa, sptd->spt_prot); 1233 return (0); 1234 } 1235 1236 /* 1237 * First try to find pages in segment page cache, without 1238 * holding the segment lock. 1239 */ 1240 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 1241 sptd->spt_prot); 1242 if (pplist != NULL) { 1243 ASSERT(sptd->spt_ppa == pplist); 1244 ASSERT(sptd->spt_ppa[page_index]); 1245 /* 1246 * Since we cache the entire ISM segment, we want to 1247 * set ppp to point to the first slot that corresponds 1248 * to the requested addr, i.e. page_index. 1249 */ 1250 *ppp = &(sptd->spt_ppa[page_index]); 1251 return (0); 1252 } 1253 1254 /* The L_PAGELOCK case... */ 1255 mutex_enter(&sptd->spt_lock); 1256 1257 /* 1258 * try to find pages in segment page cache 1259 */ 1260 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 1261 sptd->spt_prot); 1262 if (pplist != NULL) { 1263 ASSERT(sptd->spt_ppa == pplist); 1264 /* 1265 * Since we cache the entire segment, we want to 1266 * set ppp to point to the first slot that corresponds 1267 * to the requested addr, i.e. page_index. 1268 */ 1269 mutex_exit(&sptd->spt_lock); 1270 *ppp = &(sptd->spt_ppa[page_index]); 1271 return (0); 1272 } 1273 1274 if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) == 1275 SEGP_FAIL) { 1276 mutex_exit(&sptd->spt_lock); 1277 *ppp = NULL; 1278 return (ENOTSUP); 1279 } 1280 1281 /* 1282 * No need to worry about protections because ISM pages 1283 * are always rw. 1284 */ 1285 pl = pplist = NULL; 1286 1287 /* 1288 * Do we need to build the ppa array? 1289 */ 1290 if (sptd->spt_ppa == NULL) { 1291 ASSERT(sptd->spt_ppa == pplist); 1292 1293 spt_base = sptseg->s_base; 1294 pl_built = 1; 1295 1296 /* 1297 * availrmem is decremented once during anon_swap_adjust() 1298 * and is incremented during the anon_unresv(), which is 1299 * called from shm_rm_amp() when the segment is destroyed. 1300 */ 1301 amp = sptd->spt_amp; 1302 ASSERT(amp != NULL); 1303 1304 /* pcachecnt is protected by sptd->spt_lock */ 1305 ASSERT(sptd->spt_pcachecnt == 0); 1306 pplist = kmem_zalloc(sizeof (page_t *) 1307 * btopr(sptd->spt_amp->size), KM_SLEEP); 1308 pl = pplist; 1309 1310 anon_index = seg_page(sptseg, spt_base); 1311 1312 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1313 for (a = spt_base; a < (spt_base + sptd->spt_amp->size); 1314 a += PAGESIZE, anon_index++, pplist++) { 1315 ap = anon_get_ptr(amp->ahp, anon_index); 1316 ASSERT(ap != NULL); 1317 swap_xlate(ap, &vp, &off); 1318 pp = page_lookup(vp, off, SE_SHARED); 1319 ASSERT(pp != NULL); 1320 *pplist = pp; 1321 } 1322 ANON_LOCK_EXIT(&->a_rwlock); 1323 1324 if (a < (spt_base + sptd->spt_amp->size)) { 1325 ret = ENOTSUP; 1326 goto insert_fail; 1327 } 1328 sptd->spt_ppa = pl; 1329 } else { 1330 /* 1331 * We already have a valid ppa[]. 1332 */ 1333 pl = sptd->spt_ppa; 1334 } 1335 1336 ASSERT(pl != NULL); 1337 1338 ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size, 1339 pl, sptd->spt_prot, SEGP_FORCE_WIRED, segspt_reclaim); 1340 if (ret == SEGP_FAIL) { 1341 /* 1342 * seg_pinsert failed. We return 1343 * ENOTSUP, so that the as_pagelock() code will 1344 * then try the slower F_SOFTLOCK path. 1345 */ 1346 if (pl_built) { 1347 /* 1348 * No one else has referenced the ppa[]. 1349 * We created it and we need to destroy it. 1350 */ 1351 sptd->spt_ppa = NULL; 1352 } 1353 ret = ENOTSUP; 1354 goto insert_fail; 1355 } 1356 1357 /* 1358 * In either case, we increment softlockcnt on the 'real' segment. 1359 */ 1360 sptd->spt_pcachecnt++; 1361 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 1362 1363 /* 1364 * We can now drop the sptd->spt_lock since the ppa[] 1365 * exists and he have incremented pacachecnt. 1366 */ 1367 mutex_exit(&sptd->spt_lock); 1368 1369 /* 1370 * Since we cache the entire segment, we want to 1371 * set ppp to point to the first slot that corresponds 1372 * to the requested addr, i.e. page_index. 1373 */ 1374 *ppp = &(sptd->spt_ppa[page_index]); 1375 return (ret); 1376 1377 insert_fail: 1378 /* 1379 * We will only reach this code if we tried and failed. 1380 * 1381 * And we can drop the lock on the dummy seg, once we've failed 1382 * to set up a new ppa[]. 1383 */ 1384 mutex_exit(&sptd->spt_lock); 1385 1386 if (pl_built) { 1387 /* 1388 * We created pl and we need to destroy it. 1389 */ 1390 pplist = pl; 1391 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT); 1392 while (np) { 1393 page_unlock(*pplist); 1394 np--; 1395 pplist++; 1396 } 1397 kmem_free(pl, sizeof (page_t *) * 1398 btopr(sptd->spt_amp->size)); 1399 } 1400 if (shmd->shm_softlockcnt <= 0) { 1401 if (AS_ISUNMAPWAIT(seg->s_as)) { 1402 mutex_enter(&seg->s_as->a_contents); 1403 if (AS_ISUNMAPWAIT(seg->s_as)) { 1404 AS_CLRUNMAPWAIT(seg->s_as); 1405 cv_broadcast(&seg->s_as->a_cv); 1406 } 1407 mutex_exit(&seg->s_as->a_contents); 1408 } 1409 } 1410 *ppp = NULL; 1411 return (ret); 1412 } 1413 1414 /* 1415 * purge any cached pages in the I/O page cache 1416 */ 1417 static void 1418 segspt_purge(struct seg *seg) 1419 { 1420 seg_ppurge(seg); 1421 } 1422 1423 static int 1424 segspt_reclaim(struct seg *seg, caddr_t addr, size_t len, struct page **pplist, 1425 enum seg_rw rw) 1426 { 1427 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1428 struct seg *sptseg; 1429 struct spt_data *sptd; 1430 pgcnt_t npages, i, free_availrmem = 0; 1431 int done = 0; 1432 1433 #ifdef lint 1434 addr = addr; 1435 #endif 1436 sptseg = shmd->shm_sptseg; 1437 sptd = sptseg->s_data; 1438 npages = (len >> PAGESHIFT); 1439 ASSERT(npages); 1440 ASSERT(sptd->spt_pcachecnt != 0); 1441 ASSERT(sptd->spt_ppa == pplist); 1442 ASSERT(npages == btopr(sptd->spt_amp->size)); 1443 /* 1444 * Acquire the lock on the dummy seg and destroy the 1445 * ppa array IF this is the last pcachecnt. 1446 */ 1447 mutex_enter(&sptd->spt_lock); 1448 if (--sptd->spt_pcachecnt == 0) { 1449 for (i = 0; i < npages; i++) { 1450 if (pplist[i] == NULL) { 1451 continue; 1452 } 1453 if (rw == S_WRITE) { 1454 hat_setrefmod(pplist[i]); 1455 } else { 1456 hat_setref(pplist[i]); 1457 } 1458 if ((sptd->spt_flags & SHM_PAGEABLE) && 1459 (sptd->spt_ppa_lckcnt[i] == 0)) 1460 free_availrmem++; 1461 page_unlock(pplist[i]); 1462 } 1463 if (sptd->spt_flags & SHM_PAGEABLE) { 1464 mutex_enter(&freemem_lock); 1465 availrmem += free_availrmem; 1466 mutex_exit(&freemem_lock); 1467 } 1468 /* 1469 * Since we want to cach/uncache the entire ISM segment, 1470 * we will track the pplist in a segspt specific field 1471 * ppa, that is initialized at the time we add an entry to 1472 * the cache. 1473 */ 1474 ASSERT(sptd->spt_pcachecnt == 0); 1475 kmem_free(pplist, sizeof (page_t *) * npages); 1476 sptd->spt_ppa = NULL; 1477 sptd->spt_flags &= ~DISM_PPA_CHANGED; 1478 done = 1; 1479 } 1480 mutex_exit(&sptd->spt_lock); 1481 /* 1482 * Now decrement softlockcnt. 1483 */ 1484 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1); 1485 1486 if (shmd->shm_softlockcnt <= 0) { 1487 if (AS_ISUNMAPWAIT(seg->s_as)) { 1488 mutex_enter(&seg->s_as->a_contents); 1489 if (AS_ISUNMAPWAIT(seg->s_as)) { 1490 AS_CLRUNMAPWAIT(seg->s_as); 1491 cv_broadcast(&seg->s_as->a_cv); 1492 } 1493 mutex_exit(&seg->s_as->a_contents); 1494 } 1495 } 1496 return (done); 1497 } 1498 1499 /* 1500 * Do a F_SOFTUNLOCK call over the range requested. 1501 * The range must have already been F_SOFTLOCK'ed. 1502 * 1503 * The calls to acquire and release the anon map lock mutex were 1504 * removed in order to avoid a deadly embrace during a DR 1505 * memory delete operation. (Eg. DR blocks while waiting for a 1506 * exclusive lock on a page that is being used for kaio; the 1507 * thread that will complete the kaio and call segspt_softunlock 1508 * blocks on the anon map lock; another thread holding the anon 1509 * map lock blocks on another page lock via the segspt_shmfault 1510 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.) 1511 * 1512 * The appropriateness of the removal is based upon the following: 1513 * 1. If we are holding a segment's reader lock and the page is held 1514 * shared, then the corresponding element in anonmap which points to 1515 * anon struct cannot change and there is no need to acquire the 1516 * anonymous map lock. 1517 * 2. Threads in segspt_softunlock have a reader lock on the segment 1518 * and already have the shared page lock, so we are guaranteed that 1519 * the anon map slot cannot change and therefore can call anon_get_ptr() 1520 * without grabbing the anonymous map lock. 1521 * 3. Threads that softlock a shared page break copy-on-write, even if 1522 * its a read. Thus cow faults can be ignored with respect to soft 1523 * unlocking, since the breaking of cow means that the anon slot(s) will 1524 * not be shared. 1525 */ 1526 static void 1527 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr, 1528 size_t len, enum seg_rw rw) 1529 { 1530 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1531 struct seg *sptseg; 1532 struct spt_data *sptd; 1533 page_t *pp; 1534 caddr_t adr; 1535 struct vnode *vp; 1536 u_offset_t offset; 1537 ulong_t anon_index; 1538 struct anon_map *amp; /* XXX - for locknest */ 1539 struct anon *ap = NULL; 1540 pgcnt_t npages; 1541 1542 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1543 1544 sptseg = shmd->shm_sptseg; 1545 sptd = sptseg->s_data; 1546 1547 /* 1548 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 1549 * and therefore their pages are SE_SHARED locked 1550 * for the entire life of the segment. 1551 */ 1552 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) && 1553 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) { 1554 goto softlock_decrement; 1555 } 1556 1557 /* 1558 * Any thread is free to do a page_find and 1559 * page_unlock() on the pages within this seg. 1560 * 1561 * We are already holding the as->a_lock on the user's 1562 * real segment, but we need to hold the a_lock on the 1563 * underlying dummy as. This is mostly to satisfy the 1564 * underlying HAT layer. 1565 */ 1566 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1567 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len); 1568 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1569 1570 amp = sptd->spt_amp; 1571 ASSERT(amp != NULL); 1572 anon_index = seg_page(sptseg, sptseg_addr); 1573 1574 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) { 1575 ap = anon_get_ptr(amp->ahp, anon_index++); 1576 ASSERT(ap != NULL); 1577 swap_xlate(ap, &vp, &offset); 1578 1579 /* 1580 * Use page_find() instead of page_lookup() to 1581 * find the page since we know that it has a 1582 * "shared" lock. 1583 */ 1584 pp = page_find(vp, offset); 1585 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1)); 1586 if (pp == NULL) { 1587 panic("segspt_softunlock: " 1588 "addr %p, ap %p, vp %p, off %llx", 1589 (void *)adr, (void *)ap, (void *)vp, offset); 1590 /*NOTREACHED*/ 1591 } 1592 1593 if (rw == S_WRITE) { 1594 hat_setrefmod(pp); 1595 } else if (rw != S_OTHER) { 1596 hat_setref(pp); 1597 } 1598 page_unlock(pp); 1599 } 1600 1601 softlock_decrement: 1602 npages = btopr(len); 1603 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages); 1604 if (shmd->shm_softlockcnt == 0) { 1605 /* 1606 * All SOFTLOCKS are gone. Wakeup any waiting 1607 * unmappers so they can try again to unmap. 1608 * Check for waiters first without the mutex 1609 * held so we don't always grab the mutex on 1610 * softunlocks. 1611 */ 1612 if (AS_ISUNMAPWAIT(seg->s_as)) { 1613 mutex_enter(&seg->s_as->a_contents); 1614 if (AS_ISUNMAPWAIT(seg->s_as)) { 1615 AS_CLRUNMAPWAIT(seg->s_as); 1616 cv_broadcast(&seg->s_as->a_cv); 1617 } 1618 mutex_exit(&seg->s_as->a_contents); 1619 } 1620 } 1621 } 1622 1623 int 1624 segspt_shmattach(struct seg *seg, caddr_t *argsp) 1625 { 1626 struct shm_data *shmd_arg = (struct shm_data *)argsp; 1627 struct shm_data *shmd; 1628 struct anon_map *shm_amp = shmd_arg->shm_amp; 1629 struct spt_data *sptd; 1630 int error = 0; 1631 1632 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1633 1634 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP); 1635 if (shmd == NULL) 1636 return (ENOMEM); 1637 1638 shmd->shm_sptas = shmd_arg->shm_sptas; 1639 shmd->shm_amp = shm_amp; 1640 shmd->shm_sptseg = shmd_arg->shm_sptseg; 1641 1642 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0, 1643 NULL, 0, seg->s_size); 1644 1645 seg->s_data = (void *)shmd; 1646 seg->s_ops = &segspt_shmops; 1647 seg->s_szc = shmd->shm_sptseg->s_szc; 1648 sptd = shmd->shm_sptseg->s_data; 1649 1650 if (sptd->spt_flags & SHM_PAGEABLE) { 1651 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size), 1652 KM_NOSLEEP)) == NULL) { 1653 seg->s_data = (void *)NULL; 1654 kmem_free(shmd, (sizeof (*shmd))); 1655 return (ENOMEM); 1656 } 1657 shmd->shm_lckpgs = 0; 1658 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 1659 if ((error = hat_share(seg->s_as->a_hat, seg->s_base, 1660 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1661 seg->s_size, seg->s_szc)) != 0) { 1662 kmem_free(shmd->shm_vpage, 1663 btopr(shm_amp->size)); 1664 } 1665 } 1666 } else { 1667 error = hat_share(seg->s_as->a_hat, seg->s_base, 1668 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1669 seg->s_size, seg->s_szc); 1670 } 1671 if (error) { 1672 seg->s_szc = 0; 1673 seg->s_data = (void *)NULL; 1674 kmem_free(shmd, (sizeof (*shmd))); 1675 } else { 1676 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1677 shm_amp->refcnt++; 1678 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1679 } 1680 return (error); 1681 } 1682 1683 int 1684 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize) 1685 { 1686 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1687 int reclaim = 1; 1688 1689 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1690 retry: 1691 if (shmd->shm_softlockcnt > 0) { 1692 if (reclaim == 1) { 1693 segspt_purge(seg); 1694 reclaim = 0; 1695 goto retry; 1696 } 1697 return (EAGAIN); 1698 } 1699 1700 if (ssize != seg->s_size) { 1701 #ifdef DEBUG 1702 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n", 1703 ssize, seg->s_size); 1704 #endif 1705 return (EINVAL); 1706 } 1707 1708 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK, 1709 NULL, 0); 1710 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc); 1711 1712 seg_free(seg); 1713 1714 return (0); 1715 } 1716 1717 void 1718 segspt_shmfree(struct seg *seg) 1719 { 1720 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1721 struct anon_map *shm_amp = shmd->shm_amp; 1722 1723 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1724 1725 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0, 1726 MC_UNLOCK, NULL, 0); 1727 1728 /* 1729 * Need to increment refcnt when attaching 1730 * and decrement when detaching because of dup(). 1731 */ 1732 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1733 shm_amp->refcnt--; 1734 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1735 1736 if (shmd->shm_vpage) { /* only for DISM */ 1737 kmem_free(shmd->shm_vpage, btopr(shm_amp->size)); 1738 shmd->shm_vpage = NULL; 1739 } 1740 kmem_free(shmd, sizeof (*shmd)); 1741 } 1742 1743 /*ARGSUSED*/ 1744 int 1745 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 1746 { 1747 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1748 1749 /* 1750 * Shared page table is more than shared mapping. 1751 * Individual process sharing page tables can't change prot 1752 * because there is only one set of page tables. 1753 * This will be allowed after private page table is 1754 * supported. 1755 */ 1756 /* need to return correct status error? */ 1757 return (0); 1758 } 1759 1760 1761 faultcode_t 1762 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr, 1763 size_t len, enum fault_type type, enum seg_rw rw) 1764 { 1765 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1766 struct seg *sptseg = shmd->shm_sptseg; 1767 struct as *curspt = shmd->shm_sptas; 1768 struct spt_data *sptd = sptseg->s_data; 1769 pgcnt_t npages; 1770 size_t size; 1771 caddr_t segspt_addr, shm_addr; 1772 page_t **ppa; 1773 int i; 1774 ulong_t an_idx = 0; 1775 int err = 0; 1776 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0); 1777 size_t pgsz; 1778 pgcnt_t pgcnt; 1779 caddr_t a; 1780 pgcnt_t pidx; 1781 1782 #ifdef lint 1783 hat = hat; 1784 #endif 1785 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1786 1787 /* 1788 * Because of the way spt is implemented 1789 * the realsize of the segment does not have to be 1790 * equal to the segment size itself. The segment size is 1791 * often in multiples of a page size larger than PAGESIZE. 1792 * The realsize is rounded up to the nearest PAGESIZE 1793 * based on what the user requested. This is a bit of 1794 * ungliness that is historical but not easily fixed 1795 * without re-designing the higher levels of ISM. 1796 */ 1797 ASSERT(addr >= seg->s_base); 1798 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 1799 return (FC_NOMAP); 1800 /* 1801 * For all of the following cases except F_PROT, we need to 1802 * make any necessary adjustments to addr and len 1803 * and get all of the necessary page_t's into an array called ppa[]. 1804 * 1805 * The code in shmat() forces base addr and len of ISM segment 1806 * to be aligned to largest page size supported. Therefore, 1807 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 1808 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 1809 * in large pagesize chunks, or else we will screw up the HAT 1810 * layer by calling hat_memload_array() with differing page sizes 1811 * over a given virtual range. 1812 */ 1813 pgsz = page_get_pagesize(sptseg->s_szc); 1814 pgcnt = page_get_pagecnt(sptseg->s_szc); 1815 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); 1816 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz); 1817 npages = btopr(size); 1818 1819 /* 1820 * Now we need to convert from addr in segshm to addr in segspt. 1821 */ 1822 an_idx = seg_page(seg, shm_addr); 1823 segspt_addr = sptseg->s_base + ptob(an_idx); 1824 1825 ASSERT((segspt_addr + ptob(npages)) <= 1826 (sptseg->s_base + sptd->spt_realsize)); 1827 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size)); 1828 1829 switch (type) { 1830 1831 case F_SOFTLOCK: 1832 1833 mutex_enter(&freemem_lock); 1834 if (availrmem < tune.t_minarmem + npages) { 1835 mutex_exit(&freemem_lock); 1836 return (FC_MAKE_ERR(ENOMEM)); 1837 } else { 1838 availrmem -= npages; 1839 } 1840 mutex_exit(&freemem_lock); 1841 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 1842 /* 1843 * Fall through to the F_INVAL case to load up the hat layer 1844 * entries with the HAT_LOAD_LOCK flag. 1845 */ 1846 /* FALLTHRU */ 1847 case F_INVAL: 1848 1849 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 1850 return (FC_NOMAP); 1851 1852 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP); 1853 1854 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa); 1855 if (err != 0) { 1856 if (type == F_SOFTLOCK) { 1857 mutex_enter(&freemem_lock); 1858 availrmem += npages; 1859 mutex_exit(&freemem_lock); 1860 atomic_add_long((ulong_t *)( 1861 &(shmd->shm_softlockcnt)), -npages); 1862 } 1863 goto dism_err; 1864 } 1865 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1866 a = segspt_addr; 1867 pidx = 0; 1868 if (type == F_SOFTLOCK) { 1869 1870 /* 1871 * Load up the translation keeping it 1872 * locked and don't unlock the page. 1873 */ 1874 for (; pidx < npages; a += pgsz, pidx += pgcnt) { 1875 hat_memload_array(sptseg->s_as->a_hat, 1876 a, pgsz, &ppa[pidx], sptd->spt_prot, 1877 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 1878 } 1879 } else { 1880 if (hat == seg->s_as->a_hat) { 1881 1882 /* 1883 * Migrate pages marked for migration 1884 */ 1885 if (lgrp_optimizations()) 1886 page_migrate(seg, shm_addr, ppa, 1887 npages); 1888 1889 /* CPU HAT */ 1890 for (; pidx < npages; 1891 a += pgsz, pidx += pgcnt) { 1892 hat_memload_array(sptseg->s_as->a_hat, 1893 a, pgsz, &ppa[pidx], 1894 sptd->spt_prot, 1895 HAT_LOAD_SHARE); 1896 } 1897 } else { 1898 /* XHAT. Pass real address */ 1899 hat_memload_array(hat, shm_addr, 1900 size, ppa, sptd->spt_prot, HAT_LOAD_SHARE); 1901 } 1902 1903 /* 1904 * And now drop the SE_SHARED lock(s). 1905 */ 1906 if (dyn_ism_unmap) { 1907 for (i = 0; i < npages; i++) { 1908 page_unlock(ppa[i]); 1909 } 1910 } 1911 } 1912 1913 if (!dyn_ism_unmap) { 1914 if (hat_share(seg->s_as->a_hat, shm_addr, 1915 curspt->a_hat, segspt_addr, ptob(npages), 1916 seg->s_szc) != 0) { 1917 panic("hat_share err in DISM fault"); 1918 /* NOTREACHED */ 1919 } 1920 if (type == F_INVAL) { 1921 for (i = 0; i < npages; i++) { 1922 page_unlock(ppa[i]); 1923 } 1924 } 1925 } 1926 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1927 dism_err: 1928 kmem_free(ppa, npages * sizeof (page_t *)); 1929 return (err); 1930 1931 case F_SOFTUNLOCK: 1932 1933 mutex_enter(&freemem_lock); 1934 availrmem += npages; 1935 mutex_exit(&freemem_lock); 1936 1937 /* 1938 * This is a bit ugly, we pass in the real seg pointer, 1939 * but the segspt_addr is the virtual address within the 1940 * dummy seg. 1941 */ 1942 segspt_softunlock(seg, segspt_addr, size, rw); 1943 return (0); 1944 1945 case F_PROT: 1946 1947 /* 1948 * This takes care of the unusual case where a user 1949 * allocates a stack in shared memory and a register 1950 * window overflow is written to that stack page before 1951 * it is otherwise modified. 1952 * 1953 * We can get away with this because ISM segments are 1954 * always rw. Other than this unusual case, there 1955 * should be no instances of protection violations. 1956 */ 1957 return (0); 1958 1959 default: 1960 #ifdef DEBUG 1961 panic("segspt_dismfault default type?"); 1962 #else 1963 return (FC_NOMAP); 1964 #endif 1965 } 1966 } 1967 1968 1969 faultcode_t 1970 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr, 1971 size_t len, enum fault_type type, enum seg_rw rw) 1972 { 1973 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1974 struct seg *sptseg = shmd->shm_sptseg; 1975 struct as *curspt = shmd->shm_sptas; 1976 struct spt_data *sptd = sptseg->s_data; 1977 pgcnt_t npages; 1978 size_t size; 1979 caddr_t sptseg_addr, shm_addr; 1980 page_t *pp, **ppa; 1981 int i; 1982 u_offset_t offset; 1983 ulong_t anon_index = 0; 1984 struct vnode *vp; 1985 struct anon_map *amp; /* XXX - for locknest */ 1986 struct anon *ap = NULL; 1987 size_t pgsz; 1988 pgcnt_t pgcnt; 1989 caddr_t a; 1990 pgcnt_t pidx; 1991 size_t sz; 1992 1993 #ifdef lint 1994 hat = hat; 1995 #endif 1996 1997 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1998 1999 if (sptd->spt_flags & SHM_PAGEABLE) { 2000 return (segspt_dismfault(hat, seg, addr, len, type, rw)); 2001 } 2002 2003 /* 2004 * Because of the way spt is implemented 2005 * the realsize of the segment does not have to be 2006 * equal to the segment size itself. The segment size is 2007 * often in multiples of a page size larger than PAGESIZE. 2008 * The realsize is rounded up to the nearest PAGESIZE 2009 * based on what the user requested. This is a bit of 2010 * ungliness that is historical but not easily fixed 2011 * without re-designing the higher levels of ISM. 2012 */ 2013 ASSERT(addr >= seg->s_base); 2014 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 2015 return (FC_NOMAP); 2016 /* 2017 * For all of the following cases except F_PROT, we need to 2018 * make any necessary adjustments to addr and len 2019 * and get all of the necessary page_t's into an array called ppa[]. 2020 * 2021 * The code in shmat() forces base addr and len of ISM segment 2022 * to be aligned to largest page size supported. Therefore, 2023 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 2024 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 2025 * in large pagesize chunks, or else we will screw up the HAT 2026 * layer by calling hat_memload_array() with differing page sizes 2027 * over a given virtual range. 2028 */ 2029 pgsz = page_get_pagesize(sptseg->s_szc); 2030 pgcnt = page_get_pagecnt(sptseg->s_szc); 2031 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); 2032 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz); 2033 npages = btopr(size); 2034 2035 /* 2036 * Now we need to convert from addr in segshm to addr in segspt. 2037 */ 2038 anon_index = seg_page(seg, shm_addr); 2039 sptseg_addr = sptseg->s_base + ptob(anon_index); 2040 2041 /* 2042 * And now we may have to adjust npages downward if we have 2043 * exceeded the realsize of the segment or initial anon 2044 * allocations. 2045 */ 2046 if ((sptseg_addr + ptob(npages)) > 2047 (sptseg->s_base + sptd->spt_realsize)) 2048 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr; 2049 2050 npages = btopr(size); 2051 2052 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size)); 2053 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0); 2054 2055 switch (type) { 2056 2057 case F_SOFTLOCK: 2058 2059 /* 2060 * availrmem is decremented once during anon_swap_adjust() 2061 * and is incremented during the anon_unresv(), which is 2062 * called from shm_rm_amp() when the segment is destroyed. 2063 */ 2064 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 2065 /* 2066 * Some platforms assume that ISM pages are SE_SHARED 2067 * locked for the entire life of the segment. 2068 */ 2069 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) 2070 return (0); 2071 /* 2072 * Fall through to the F_INVAL case to load up the hat layer 2073 * entries with the HAT_LOAD_LOCK flag. 2074 */ 2075 2076 /* FALLTHRU */ 2077 case F_INVAL: 2078 2079 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 2080 return (FC_NOMAP); 2081 2082 /* 2083 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP 2084 * may still rely on this call to hat_share(). That 2085 * would imply that those hat's can fault on a 2086 * HAT_LOAD_LOCK translation, which would seem 2087 * contradictory. 2088 */ 2089 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 2090 if (hat_share(seg->s_as->a_hat, seg->s_base, 2091 curspt->a_hat, sptseg->s_base, 2092 sptseg->s_size, sptseg->s_szc) != 0) { 2093 panic("hat_share error in ISM fault"); 2094 /*NOTREACHED*/ 2095 } 2096 return (0); 2097 } 2098 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP); 2099 2100 /* 2101 * I see no need to lock the real seg, 2102 * here, because all of our work will be on the underlying 2103 * dummy seg. 2104 * 2105 * sptseg_addr and npages now account for large pages. 2106 */ 2107 amp = sptd->spt_amp; 2108 ASSERT(amp != NULL); 2109 anon_index = seg_page(sptseg, sptseg_addr); 2110 2111 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2112 for (i = 0; i < npages; i++) { 2113 ap = anon_get_ptr(amp->ahp, anon_index++); 2114 ASSERT(ap != NULL); 2115 swap_xlate(ap, &vp, &offset); 2116 pp = page_lookup(vp, offset, SE_SHARED); 2117 ASSERT(pp != NULL); 2118 ppa[i] = pp; 2119 } 2120 ANON_LOCK_EXIT(&->a_rwlock); 2121 ASSERT(i == npages); 2122 2123 /* 2124 * We are already holding the as->a_lock on the user's 2125 * real segment, but we need to hold the a_lock on the 2126 * underlying dummy as. This is mostly to satisfy the 2127 * underlying HAT layer. 2128 */ 2129 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 2130 a = sptseg_addr; 2131 pidx = 0; 2132 if (type == F_SOFTLOCK) { 2133 /* 2134 * Load up the translation keeping it 2135 * locked and don't unlock the page. 2136 */ 2137 for (; pidx < npages; a += pgsz, pidx += pgcnt) { 2138 sz = MIN(pgsz, ptob(npages - pidx)); 2139 hat_memload_array(sptseg->s_as->a_hat, a, 2140 sz, &ppa[pidx], sptd->spt_prot, 2141 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 2142 } 2143 } else { 2144 if (hat == seg->s_as->a_hat) { 2145 2146 /* 2147 * Migrate pages marked for migration. 2148 */ 2149 if (lgrp_optimizations()) 2150 page_migrate(seg, shm_addr, ppa, 2151 npages); 2152 2153 /* CPU HAT */ 2154 for (; pidx < npages; 2155 a += pgsz, pidx += pgcnt) { 2156 sz = MIN(pgsz, ptob(npages - pidx)); 2157 hat_memload_array(sptseg->s_as->a_hat, 2158 a, sz, &ppa[pidx], 2159 sptd->spt_prot, HAT_LOAD_SHARE); 2160 } 2161 } else { 2162 /* XHAT. Pass real address */ 2163 hat_memload_array(hat, shm_addr, 2164 ptob(npages), ppa, sptd->spt_prot, 2165 HAT_LOAD_SHARE); 2166 } 2167 2168 /* 2169 * And now drop the SE_SHARED lock(s). 2170 */ 2171 for (i = 0; i < npages; i++) 2172 page_unlock(ppa[i]); 2173 } 2174 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 2175 2176 kmem_free(ppa, sizeof (page_t *) * npages); 2177 return (0); 2178 case F_SOFTUNLOCK: 2179 2180 /* 2181 * This is a bit ugly, we pass in the real seg pointer, 2182 * but the sptseg_addr is the virtual address within the 2183 * dummy seg. 2184 */ 2185 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw); 2186 return (0); 2187 2188 case F_PROT: 2189 2190 /* 2191 * This takes care of the unusual case where a user 2192 * allocates a stack in shared memory and a register 2193 * window overflow is written to that stack page before 2194 * it is otherwise modified. 2195 * 2196 * We can get away with this because ISM segments are 2197 * always rw. Other than this unusual case, there 2198 * should be no instances of protection violations. 2199 */ 2200 return (0); 2201 2202 default: 2203 #ifdef DEBUG 2204 cmn_err(CE_WARN, "segspt_shmfault default type?"); 2205 #endif 2206 return (FC_NOMAP); 2207 } 2208 } 2209 2210 /*ARGSUSED*/ 2211 static faultcode_t 2212 segspt_shmfaulta(struct seg *seg, caddr_t addr) 2213 { 2214 return (0); 2215 } 2216 2217 /*ARGSUSED*/ 2218 static int 2219 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta) 2220 { 2221 return (0); 2222 } 2223 2224 /*ARGSUSED*/ 2225 static size_t 2226 segspt_shmswapout(struct seg *seg) 2227 { 2228 return (0); 2229 } 2230 2231 /* 2232 * duplicate the shared page tables 2233 */ 2234 int 2235 segspt_shmdup(struct seg *seg, struct seg *newseg) 2236 { 2237 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2238 struct anon_map *amp = shmd->shm_amp; 2239 struct shm_data *shmd_new; 2240 struct seg *spt_seg = shmd->shm_sptseg; 2241 struct spt_data *sptd = spt_seg->s_data; 2242 int error = 0; 2243 2244 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2245 2246 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP); 2247 newseg->s_data = (void *)shmd_new; 2248 shmd_new->shm_sptas = shmd->shm_sptas; 2249 shmd_new->shm_amp = amp; 2250 shmd_new->shm_sptseg = shmd->shm_sptseg; 2251 newseg->s_ops = &segspt_shmops; 2252 newseg->s_szc = seg->s_szc; 2253 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc); 2254 2255 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2256 amp->refcnt++; 2257 ANON_LOCK_EXIT(&->a_rwlock); 2258 2259 if (sptd->spt_flags & SHM_PAGEABLE) { 2260 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP); 2261 shmd_new->shm_lckpgs = 0; 2262 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 2263 if ((error = hat_share(newseg->s_as->a_hat, 2264 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR, 2265 seg->s_size, seg->s_szc)) != 0) { 2266 kmem_free(shmd_new->shm_vpage, 2267 btopr(amp->size)); 2268 } 2269 } 2270 return (error); 2271 } else { 2272 return (hat_share(newseg->s_as->a_hat, newseg->s_base, 2273 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size, 2274 seg->s_szc)); 2275 2276 } 2277 } 2278 2279 /*ARGSUSED*/ 2280 int 2281 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 2282 { 2283 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2284 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2285 2286 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2287 2288 /* 2289 * ISM segment is always rw. 2290 */ 2291 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0); 2292 } 2293 2294 /* 2295 * Return an array of locked large pages, for empty slots allocate 2296 * private zero-filled anon pages. 2297 */ 2298 static int 2299 spt_anon_getpages( 2300 struct seg *sptseg, 2301 caddr_t sptaddr, 2302 size_t len, 2303 page_t *ppa[]) 2304 { 2305 struct spt_data *sptd = sptseg->s_data; 2306 struct anon_map *amp = sptd->spt_amp; 2307 enum seg_rw rw = sptd->spt_prot; 2308 uint_t szc = sptseg->s_szc; 2309 size_t pg_sz, share_sz = page_get_pagesize(szc); 2310 pgcnt_t lp_npgs; 2311 caddr_t lp_addr, e_sptaddr; 2312 uint_t vpprot, ppa_szc = 0; 2313 struct vpage *vpage = NULL; 2314 ulong_t j, ppa_idx; 2315 int err, ierr = 0; 2316 pgcnt_t an_idx; 2317 anon_sync_obj_t cookie; 2318 2319 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz)); 2320 ASSERT(len != 0); 2321 2322 pg_sz = share_sz; 2323 lp_npgs = btop(pg_sz); 2324 lp_addr = sptaddr; 2325 e_sptaddr = sptaddr + len; 2326 an_idx = seg_page(sptseg, sptaddr); 2327 ppa_idx = 0; 2328 2329 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2330 /*CONSTCOND*/ 2331 while (1) { 2332 for (; lp_addr < e_sptaddr; 2333 an_idx += lp_npgs, lp_addr += pg_sz, 2334 ppa_idx += lp_npgs) { 2335 2336 anon_array_enter(amp, an_idx, &cookie); 2337 ppa_szc = (uint_t)-1; 2338 ierr = anon_map_getpages(amp, an_idx, szc, sptseg, 2339 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx], 2340 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred); 2341 anon_array_exit(&cookie); 2342 2343 if (ierr != 0) { 2344 if (ierr > 0) { 2345 err = FC_MAKE_ERR(ierr); 2346 goto lpgs_err; 2347 } 2348 break; 2349 } 2350 } 2351 if (lp_addr == e_sptaddr) { 2352 break; 2353 } 2354 ASSERT(lp_addr < e_sptaddr); 2355 2356 /* 2357 * ierr == -1 means we failed to allocate a large page. 2358 * so do a size down operation. 2359 * 2360 * ierr == -2 means some other process that privately shares 2361 * pages with this process has allocated a larger page and we 2362 * need to retry with larger pages. So do a size up 2363 * operation. This relies on the fact that large pages are 2364 * never partially shared i.e. if we share any constituent 2365 * page of a large page with another process we must share the 2366 * entire large page. Note this cannot happen for SOFTLOCK 2367 * case, unless current address (lpaddr) is at the beginning 2368 * of the next page size boundary because the other process 2369 * couldn't have relocated locked pages. 2370 */ 2371 ASSERT(ierr == -1 || ierr == -2); 2372 if (segvn_anypgsz) { 2373 ASSERT(ierr == -2 || szc != 0); 2374 ASSERT(ierr == -1 || szc < sptseg->s_szc); 2375 szc = (ierr == -1) ? szc - 1 : szc + 1; 2376 } else { 2377 /* 2378 * For faults and segvn_anypgsz == 0 2379 * we need to be careful not to loop forever 2380 * if existing page is found with szc other 2381 * than 0 or seg->s_szc. This could be due 2382 * to page relocations on behalf of DR or 2383 * more likely large page creation. For this 2384 * case simply re-size to existing page's szc 2385 * if returned by anon_map_getpages(). 2386 */ 2387 if (ppa_szc == (uint_t)-1) { 2388 szc = (ierr == -1) ? 0 : sptseg->s_szc; 2389 } else { 2390 ASSERT(ppa_szc <= sptseg->s_szc); 2391 ASSERT(ierr == -2 || ppa_szc < szc); 2392 ASSERT(ierr == -1 || ppa_szc > szc); 2393 szc = ppa_szc; 2394 } 2395 } 2396 pg_sz = page_get_pagesize(szc); 2397 lp_npgs = btop(pg_sz); 2398 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz)); 2399 } 2400 ANON_LOCK_EXIT(&->a_rwlock); 2401 return (0); 2402 2403 lpgs_err: 2404 ANON_LOCK_EXIT(&->a_rwlock); 2405 for (j = 0; j < ppa_idx; j++) 2406 page_unlock(ppa[j]); 2407 return (err); 2408 } 2409 2410 /* 2411 * count the number of bytes in a set of spt pages that are currently not 2412 * locked 2413 */ 2414 static rctl_qty_t 2415 spt_unlockedbytes(pgcnt_t npages, page_t **ppa) 2416 { 2417 ulong_t i; 2418 rctl_qty_t unlocked = 0; 2419 2420 for (i = 0; i < npages; i++) { 2421 if (ppa[i]->p_lckcnt == 0) 2422 unlocked += PAGESIZE; 2423 } 2424 return (unlocked); 2425 } 2426 2427 int 2428 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages, 2429 page_t **ppa, ulong_t *lockmap, size_t pos, 2430 rctl_qty_t *locked) 2431 { 2432 struct shm_data *shmd = seg->s_data; 2433 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2434 ulong_t i; 2435 int kernel; 2436 2437 /* return the number of bytes actually locked */ 2438 *locked = 0; 2439 for (i = 0; i < npages; anon_index++, pos++, i++) { 2440 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) { 2441 if (sptd->spt_ppa_lckcnt[anon_index] < 2442 (ushort_t)DISM_LOCK_MAX) { 2443 if (++sptd->spt_ppa_lckcnt[anon_index] == 2444 (ushort_t)DISM_LOCK_MAX) { 2445 cmn_err(CE_WARN, 2446 "DISM page lock limit " 2447 "reached on DISM offset 0x%lx\n", 2448 anon_index << PAGESHIFT); 2449 } 2450 kernel = (sptd->spt_ppa && 2451 sptd->spt_ppa[anon_index]) ? 1 : 0; 2452 if (!page_pp_lock(ppa[i], 0, kernel)) { 2453 sptd->spt_ppa_lckcnt[anon_index]--; 2454 return (EAGAIN); 2455 } 2456 /* if this is a newly locked page, count it */ 2457 if (ppa[i]->p_lckcnt == 1) { 2458 *locked += PAGESIZE; 2459 } 2460 shmd->shm_lckpgs++; 2461 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED; 2462 if (lockmap != NULL) 2463 BT_SET(lockmap, pos); 2464 } 2465 } 2466 } 2467 return (0); 2468 } 2469 2470 /*ARGSUSED*/ 2471 static int 2472 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 2473 int attr, int op, ulong_t *lockmap, size_t pos) 2474 { 2475 struct shm_data *shmd = seg->s_data; 2476 struct seg *sptseg = shmd->shm_sptseg; 2477 struct spt_data *sptd = sptseg->s_data; 2478 struct kshmid *sp = sptd->spt_amp->a_sp; 2479 pgcnt_t npages, a_npages; 2480 page_t **ppa; 2481 pgcnt_t an_idx, a_an_idx, ppa_idx; 2482 caddr_t spt_addr, a_addr; /* spt and aligned address */ 2483 size_t a_len; /* aligned len */ 2484 size_t share_sz; 2485 ulong_t i; 2486 int sts = 0; 2487 rctl_qty_t unlocked = 0; 2488 rctl_qty_t locked = 0; 2489 struct proc *p = curproc; 2490 kproject_t *proj; 2491 2492 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2493 ASSERT(sp != NULL); 2494 2495 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 2496 return (0); 2497 } 2498 2499 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 2500 an_idx = seg_page(seg, addr); 2501 npages = btopr(len); 2502 2503 if (an_idx + npages > btopr(shmd->shm_amp->size)) { 2504 return (ENOMEM); 2505 } 2506 2507 /* 2508 * A shm's project never changes, so no lock needed. 2509 * The shm has a hold on the project, so it will not go away. 2510 * Since we have a mapping to shm within this zone, we know 2511 * that the zone will not go away. 2512 */ 2513 proj = sp->shm_perm.ipc_proj; 2514 2515 if (op == MC_LOCK) { 2516 2517 /* 2518 * Need to align addr and size request if they are not 2519 * aligned so we can always allocate large page(s) however 2520 * we only lock what was requested in initial request. 2521 */ 2522 share_sz = page_get_pagesize(sptseg->s_szc); 2523 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz); 2524 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)), 2525 share_sz); 2526 a_npages = btop(a_len); 2527 a_an_idx = seg_page(seg, a_addr); 2528 spt_addr = sptseg->s_base + ptob(a_an_idx); 2529 ppa_idx = an_idx - a_an_idx; 2530 2531 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages), 2532 KM_NOSLEEP)) == NULL) { 2533 return (ENOMEM); 2534 } 2535 2536 /* 2537 * Don't cache any new pages for IO and 2538 * flush any cached pages. 2539 */ 2540 mutex_enter(&sptd->spt_lock); 2541 if (sptd->spt_ppa != NULL) 2542 sptd->spt_flags |= DISM_PPA_CHANGED; 2543 2544 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa); 2545 if (sts != 0) { 2546 mutex_exit(&sptd->spt_lock); 2547 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2548 return (sts); 2549 } 2550 2551 mutex_enter(&sp->shm_mlock); 2552 /* enforce locked memory rctl */ 2553 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]); 2554 2555 mutex_enter(&p->p_lock); 2556 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) { 2557 mutex_exit(&p->p_lock); 2558 sts = EAGAIN; 2559 } else { 2560 mutex_exit(&p->p_lock); 2561 sts = spt_lockpages(seg, an_idx, npages, 2562 &ppa[ppa_idx], lockmap, pos, &locked); 2563 2564 /* 2565 * correct locked count if not all pages could be 2566 * locked 2567 */ 2568 if ((unlocked - locked) > 0) { 2569 rctl_decr_locked_mem(NULL, proj, 2570 (unlocked - locked), 0); 2571 } 2572 } 2573 /* 2574 * unlock pages 2575 */ 2576 for (i = 0; i < a_npages; i++) 2577 page_unlock(ppa[i]); 2578 if (sptd->spt_ppa != NULL) 2579 sptd->spt_flags |= DISM_PPA_CHANGED; 2580 mutex_exit(&sp->shm_mlock); 2581 mutex_exit(&sptd->spt_lock); 2582 2583 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2584 2585 } else if (op == MC_UNLOCK) { /* unlock */ 2586 struct anon_map *amp; 2587 struct anon *ap; 2588 struct vnode *vp; 2589 u_offset_t off; 2590 struct page *pp; 2591 int kernel; 2592 anon_sync_obj_t cookie; 2593 rctl_qty_t unlocked = 0; 2594 2595 amp = sptd->spt_amp; 2596 mutex_enter(&sptd->spt_lock); 2597 if (shmd->shm_lckpgs == 0) { 2598 mutex_exit(&sptd->spt_lock); 2599 return (0); 2600 } 2601 /* 2602 * Don't cache new IO pages. 2603 */ 2604 if (sptd->spt_ppa != NULL) 2605 sptd->spt_flags |= DISM_PPA_CHANGED; 2606 2607 mutex_enter(&sp->shm_mlock); 2608 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2609 for (i = 0; i < npages; i++, an_idx++) { 2610 if (shmd->shm_vpage[an_idx] & DISM_PG_LOCKED) { 2611 anon_array_enter(amp, an_idx, &cookie); 2612 ap = anon_get_ptr(amp->ahp, an_idx); 2613 ASSERT(ap); 2614 2615 swap_xlate(ap, &vp, &off); 2616 anon_array_exit(&cookie); 2617 pp = page_lookup(vp, off, SE_SHARED); 2618 ASSERT(pp); 2619 /* 2620 * the availrmem is decremented only for 2621 * pages which are not in seg pcache, 2622 * for pages in seg pcache availrmem was 2623 * decremented in _dismpagelock() (if 2624 * they were not locked here) 2625 */ 2626 kernel = (sptd->spt_ppa && 2627 sptd->spt_ppa[an_idx]) ? 1 : 0; 2628 ASSERT(pp->p_lckcnt > 0); 2629 page_pp_unlock(pp, 0, kernel); 2630 if (pp->p_lckcnt == 0) 2631 unlocked += PAGESIZE; 2632 page_unlock(pp); 2633 shmd->shm_vpage[an_idx] &= ~DISM_PG_LOCKED; 2634 sptd->spt_ppa_lckcnt[an_idx]--; 2635 shmd->shm_lckpgs--; 2636 } 2637 } 2638 ANON_LOCK_EXIT(&->a_rwlock); 2639 if (sptd->spt_ppa != NULL) 2640 sptd->spt_flags |= DISM_PPA_CHANGED; 2641 mutex_exit(&sptd->spt_lock); 2642 2643 rctl_decr_locked_mem(NULL, proj, unlocked, 0); 2644 mutex_exit(&sp->shm_mlock); 2645 } 2646 return (sts); 2647 } 2648 2649 /*ARGSUSED*/ 2650 int 2651 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 2652 { 2653 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2654 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2655 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1; 2656 2657 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2658 2659 /* 2660 * ISM segment is always rw. 2661 */ 2662 while (--pgno >= 0) 2663 *protv++ = sptd->spt_prot; 2664 return (0); 2665 } 2666 2667 /*ARGSUSED*/ 2668 u_offset_t 2669 segspt_shmgetoffset(struct seg *seg, caddr_t addr) 2670 { 2671 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2672 2673 /* Offset does not matter in ISM memory */ 2674 2675 return ((u_offset_t)0); 2676 } 2677 2678 /* ARGSUSED */ 2679 int 2680 segspt_shmgettype(struct seg *seg, caddr_t addr) 2681 { 2682 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2683 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2684 2685 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2686 2687 /* 2688 * The shared memory mapping is always MAP_SHARED, SWAP is only 2689 * reserved for DISM 2690 */ 2691 return (MAP_SHARED | 2692 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE)); 2693 } 2694 2695 /*ARGSUSED*/ 2696 int 2697 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 2698 { 2699 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2700 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2701 2702 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2703 2704 *vpp = sptd->spt_vp; 2705 return (0); 2706 } 2707 2708 /*ARGSUSED*/ 2709 static int 2710 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 2711 { 2712 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2713 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2714 struct anon_map *amp; 2715 pgcnt_t pg_idx; 2716 2717 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2718 2719 if (behav == MADV_FREE) { 2720 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) 2721 return (0); 2722 2723 amp = sptd->spt_amp; 2724 pg_idx = seg_page(seg, addr); 2725 2726 mutex_enter(&sptd->spt_lock); 2727 if (sptd->spt_ppa != NULL) 2728 sptd->spt_flags |= DISM_PPA_CHANGED; 2729 mutex_exit(&sptd->spt_lock); 2730 2731 /* 2732 * Purge all DISM cached pages 2733 */ 2734 seg_ppurge_seg(segspt_reclaim); 2735 2736 mutex_enter(&sptd->spt_lock); 2737 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2738 anon_disclaim(amp, pg_idx, len, ANON_PGLOOKUP_BLK); 2739 ANON_LOCK_EXIT(&->a_rwlock); 2740 mutex_exit(&sptd->spt_lock); 2741 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP || 2742 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) { 2743 int already_set; 2744 ulong_t anon_index; 2745 lgrp_mem_policy_t policy; 2746 caddr_t shm_addr; 2747 size_t share_size; 2748 size_t size; 2749 struct seg *sptseg = shmd->shm_sptseg; 2750 caddr_t sptseg_addr; 2751 2752 /* 2753 * Align address and length to page size of underlying segment 2754 */ 2755 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc); 2756 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size); 2757 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), 2758 share_size); 2759 2760 amp = shmd->shm_amp; 2761 anon_index = seg_page(seg, shm_addr); 2762 2763 /* 2764 * And now we may have to adjust size downward if we have 2765 * exceeded the realsize of the segment or initial anon 2766 * allocations. 2767 */ 2768 sptseg_addr = sptseg->s_base + ptob(anon_index); 2769 if ((sptseg_addr + size) > 2770 (sptseg->s_base + sptd->spt_realsize)) 2771 size = (sptseg->s_base + sptd->spt_realsize) - 2772 sptseg_addr; 2773 2774 /* 2775 * Set memory allocation policy for this segment 2776 */ 2777 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED); 2778 already_set = lgrp_shm_policy_set(policy, amp, anon_index, 2779 NULL, 0, len); 2780 2781 /* 2782 * If random memory allocation policy set already, 2783 * don't bother reapplying it. 2784 */ 2785 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 2786 return (0); 2787 2788 /* 2789 * Mark any existing pages in the given range for 2790 * migration, flushing the I/O page cache, and using 2791 * underlying segment to calculate anon index and get 2792 * anonmap and vnode pointer from 2793 */ 2794 if (shmd->shm_softlockcnt > 0) 2795 segspt_purge(seg); 2796 2797 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0); 2798 } 2799 2800 return (0); 2801 } 2802 2803 /*ARGSUSED*/ 2804 void 2805 segspt_shmdump(struct seg *seg) 2806 { 2807 /* no-op for ISM segment */ 2808 } 2809 2810 /*ARGSUSED*/ 2811 static faultcode_t 2812 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 2813 { 2814 return (ENOTSUP); 2815 } 2816 2817 /* 2818 * get a memory ID for an addr in a given segment 2819 */ 2820 static int 2821 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 2822 { 2823 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2824 struct anon *ap; 2825 size_t anon_index; 2826 struct anon_map *amp = shmd->shm_amp; 2827 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2828 struct seg *sptseg = shmd->shm_sptseg; 2829 anon_sync_obj_t cookie; 2830 2831 anon_index = seg_page(seg, addr); 2832 2833 if (addr > (seg->s_base + sptd->spt_realsize)) { 2834 return (EFAULT); 2835 } 2836 2837 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2838 anon_array_enter(amp, anon_index, &cookie); 2839 ap = anon_get_ptr(amp->ahp, anon_index); 2840 if (ap == NULL) { 2841 struct page *pp; 2842 caddr_t spt_addr = sptseg->s_base + ptob(anon_index); 2843 2844 pp = anon_zero(sptseg, spt_addr, &ap, kcred); 2845 if (pp == NULL) { 2846 anon_array_exit(&cookie); 2847 ANON_LOCK_EXIT(&->a_rwlock); 2848 return (ENOMEM); 2849 } 2850 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 2851 page_unlock(pp); 2852 } 2853 anon_array_exit(&cookie); 2854 ANON_LOCK_EXIT(&->a_rwlock); 2855 memidp->val[0] = (uintptr_t)ap; 2856 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 2857 return (0); 2858 } 2859 2860 /* 2861 * Get memory allocation policy info for specified address in given segment 2862 */ 2863 static lgrp_mem_policy_info_t * 2864 segspt_shmgetpolicy(struct seg *seg, caddr_t addr) 2865 { 2866 struct anon_map *amp; 2867 ulong_t anon_index; 2868 lgrp_mem_policy_info_t *policy_info; 2869 struct shm_data *shm_data; 2870 2871 ASSERT(seg != NULL); 2872 2873 /* 2874 * Get anon_map from segshm 2875 * 2876 * Assume that no lock needs to be held on anon_map, since 2877 * it should be protected by its reference count which must be 2878 * nonzero for an existing segment 2879 * Need to grab readers lock on policy tree though 2880 */ 2881 shm_data = (struct shm_data *)seg->s_data; 2882 if (shm_data == NULL) 2883 return (NULL); 2884 amp = shm_data->shm_amp; 2885 ASSERT(amp->refcnt != 0); 2886 2887 /* 2888 * Get policy info 2889 * 2890 * Assume starting anon index of 0 2891 */ 2892 anon_index = seg_page(seg, addr); 2893 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 2894 2895 return (policy_info); 2896 } 2897 2898 /*ARGSUSED*/ 2899 static int 2900 segspt_shmcapable(struct seg *seg, segcapability_t capability) 2901 { 2902 return (0); 2903 } 2904