1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/param.h> 30 #include <sys/user.h> 31 #include <sys/mman.h> 32 #include <sys/kmem.h> 33 #include <sys/sysmacros.h> 34 #include <sys/cmn_err.h> 35 #include <sys/systm.h> 36 #include <sys/tuneable.h> 37 #include <vm/hat.h> 38 #include <vm/seg.h> 39 #include <vm/as.h> 40 #include <vm/anon.h> 41 #include <vm/page.h> 42 #include <sys/buf.h> 43 #include <sys/swap.h> 44 #include <sys/atomic.h> 45 #include <vm/seg_spt.h> 46 #include <sys/debug.h> 47 #include <sys/vtrace.h> 48 #include <sys/shm.h> 49 #include <sys/lgrp.h> 50 #include <sys/vmsystm.h> 51 52 #include <sys/tnf_probe.h> 53 54 #define SEGSPTADDR (caddr_t)0x0 55 56 /* 57 * # pages used for spt 58 */ 59 static size_t spt_used; 60 61 /* 62 * segspt_minfree is the memory left for system after ISM 63 * locked its pages; it is set up to 5% of availrmem in 64 * sptcreate when ISM is created. ISM should not use more 65 * than ~90% of availrmem; if it does, then the performance 66 * of the system may decrease. Machines with large memories may 67 * be able to use up more memory for ISM so we set the default 68 * segspt_minfree to 5% (which gives ISM max 95% of availrmem. 69 * If somebody wants even more memory for ISM (risking hanging 70 * the system) they can patch the segspt_minfree to smaller number. 71 */ 72 pgcnt_t segspt_minfree = 0; 73 74 static int segspt_create(struct seg *seg, caddr_t argsp); 75 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize); 76 static void segspt_free(struct seg *seg); 77 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len); 78 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr); 79 80 static void 81 segspt_badop() 82 { 83 panic("segspt_badop called"); 84 /*NOTREACHED*/ 85 } 86 87 #define SEGSPT_BADOP(t) (t(*)())segspt_badop 88 89 struct seg_ops segspt_ops = { 90 SEGSPT_BADOP(int), /* dup */ 91 segspt_unmap, 92 segspt_free, 93 SEGSPT_BADOP(int), /* fault */ 94 SEGSPT_BADOP(faultcode_t), /* faulta */ 95 SEGSPT_BADOP(int), /* setprot */ 96 SEGSPT_BADOP(int), /* checkprot */ 97 SEGSPT_BADOP(int), /* kluster */ 98 SEGSPT_BADOP(size_t), /* swapout */ 99 SEGSPT_BADOP(int), /* sync */ 100 SEGSPT_BADOP(size_t), /* incore */ 101 SEGSPT_BADOP(int), /* lockop */ 102 SEGSPT_BADOP(int), /* getprot */ 103 SEGSPT_BADOP(u_offset_t), /* getoffset */ 104 SEGSPT_BADOP(int), /* gettype */ 105 SEGSPT_BADOP(int), /* getvp */ 106 SEGSPT_BADOP(int), /* advise */ 107 SEGSPT_BADOP(void), /* dump */ 108 SEGSPT_BADOP(int), /* pagelock */ 109 SEGSPT_BADOP(int), /* setpgsz */ 110 SEGSPT_BADOP(int), /* getmemid */ 111 segspt_getpolicy, /* getpolicy */ 112 SEGSPT_BADOP(int), /* capable */ 113 }; 114 115 static int segspt_shmdup(struct seg *seg, struct seg *newseg); 116 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize); 117 static void segspt_shmfree(struct seg *seg); 118 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg, 119 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw); 120 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr); 121 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr, 122 register size_t len, register uint_t prot); 123 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, 124 uint_t prot); 125 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta); 126 static size_t segspt_shmswapout(struct seg *seg); 127 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, 128 register char *vec); 129 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len, 130 int attr, uint_t flags); 131 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 132 int attr, int op, ulong_t *lockmap, size_t pos); 133 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, 134 uint_t *protv); 135 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr); 136 static int segspt_shmgettype(struct seg *seg, caddr_t addr); 137 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp); 138 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, 139 uint_t behav); 140 static void segspt_shmdump(struct seg *seg); 141 static int segspt_shmpagelock(struct seg *, caddr_t, size_t, 142 struct page ***, enum lock_type, enum seg_rw); 143 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t); 144 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *); 145 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t); 146 static int segspt_shmcapable(struct seg *, segcapability_t); 147 148 struct seg_ops segspt_shmops = { 149 segspt_shmdup, 150 segspt_shmunmap, 151 segspt_shmfree, 152 segspt_shmfault, 153 segspt_shmfaulta, 154 segspt_shmsetprot, 155 segspt_shmcheckprot, 156 segspt_shmkluster, 157 segspt_shmswapout, 158 segspt_shmsync, 159 segspt_shmincore, 160 segspt_shmlockop, 161 segspt_shmgetprot, 162 segspt_shmgetoffset, 163 segspt_shmgettype, 164 segspt_shmgetvp, 165 segspt_shmadvise, /* advise */ 166 segspt_shmdump, 167 segspt_shmpagelock, 168 segspt_shmsetpgsz, 169 segspt_shmgetmemid, 170 segspt_shmgetpolicy, 171 segspt_shmcapable, 172 }; 173 174 static void segspt_purge(struct seg *seg); 175 static int segspt_reclaim(struct seg *, caddr_t, size_t, struct page **, 176 enum seg_rw); 177 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len, 178 page_t **ppa); 179 180 181 182 /*ARGSUSED*/ 183 int 184 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp, 185 uint_t prot, uint_t flags, uint_t share_szc) 186 { 187 int err; 188 struct as *newas; 189 struct segspt_crargs sptcargs; 190 191 #ifdef DEBUG 192 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */, 193 tnf_ulong, size, size ); 194 #endif 195 if (segspt_minfree == 0) /* leave min 5% of availrmem for */ 196 segspt_minfree = availrmem/20; /* for the system */ 197 198 if (!hat_supported(HAT_SHARED_PT, (void *)0)) 199 return (EINVAL); 200 201 /* 202 * get a new as for this shared memory segment 203 */ 204 newas = as_alloc(); 205 sptcargs.amp = amp; 206 sptcargs.prot = prot; 207 sptcargs.flags = flags; 208 sptcargs.szc = share_szc; 209 210 /* 211 * create a shared page table (spt) segment 212 */ 213 214 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) { 215 as_free(newas); 216 return (err); 217 } 218 *sptseg = sptcargs.seg_spt; 219 return (0); 220 } 221 222 void 223 sptdestroy(struct as *as, struct anon_map *amp) 224 { 225 226 #ifdef DEBUG 227 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */); 228 #endif 229 (void) as_unmap(as, SEGSPTADDR, amp->size); 230 as_free(as); 231 } 232 233 /* 234 * called from seg_free(). 235 * free (i.e., unlock, unmap, return to free list) 236 * all the pages in the given seg. 237 */ 238 void 239 segspt_free(struct seg *seg) 240 { 241 struct spt_data *sptd = (struct spt_data *)seg->s_data; 242 243 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 244 245 if (sptd != NULL) { 246 if (sptd->spt_realsize) 247 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize); 248 249 if (sptd->spt_ppa_lckcnt) 250 kmem_free(sptd->spt_ppa_lckcnt, 251 sizeof (*sptd->spt_ppa_lckcnt) 252 * btopr(sptd->spt_amp->size)); 253 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp)); 254 mutex_destroy(&sptd->spt_lock); 255 kmem_free(sptd, sizeof (*sptd)); 256 } 257 } 258 259 /*ARGSUSED*/ 260 static int 261 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr, 262 uint_t flags) 263 { 264 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 265 266 return (0); 267 } 268 269 /*ARGSUSED*/ 270 static size_t 271 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec) 272 { 273 caddr_t eo_seg; 274 pgcnt_t npages; 275 struct shm_data *shmd = (struct shm_data *)seg->s_data; 276 struct seg *sptseg; 277 struct spt_data *sptd; 278 279 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 280 #ifdef lint 281 seg = seg; 282 #endif 283 sptseg = shmd->shm_sptseg; 284 sptd = sptseg->s_data; 285 286 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 287 eo_seg = addr + len; 288 while (addr < eo_seg) { 289 /* page exists, and it's locked. */ 290 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED | 291 SEG_PAGE_ANON; 292 addr += PAGESIZE; 293 } 294 return (len); 295 } else { 296 struct anon_map *amp = shmd->shm_amp; 297 struct anon *ap; 298 page_t *pp; 299 pgcnt_t anon_index; 300 struct vnode *vp; 301 u_offset_t off; 302 ulong_t i; 303 int ret; 304 anon_sync_obj_t cookie; 305 306 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 307 anon_index = seg_page(seg, addr); 308 npages = btopr(len); 309 if (anon_index + npages > btopr(shmd->shm_amp->size)) { 310 return (EINVAL); 311 } 312 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 313 for (i = 0; i < npages; i++, anon_index++) { 314 ret = 0; 315 anon_array_enter(amp, anon_index, &cookie); 316 ap = anon_get_ptr(amp->ahp, anon_index); 317 if (ap != NULL) { 318 swap_xlate(ap, &vp, &off); 319 anon_array_exit(&cookie); 320 pp = page_lookup_nowait(vp, off, SE_SHARED); 321 if (pp != NULL) { 322 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON; 323 page_unlock(pp); 324 } 325 } else { 326 anon_array_exit(&cookie); 327 } 328 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) { 329 ret |= SEG_PAGE_LOCKED; 330 } 331 *vec++ = (char)ret; 332 } 333 ANON_LOCK_EXIT(&->a_rwlock); 334 return (len); 335 } 336 } 337 338 static int 339 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize) 340 { 341 size_t share_size; 342 343 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 344 345 /* 346 * seg.s_size may have been rounded up to the largest page size 347 * in shmat(). 348 * XXX This should be cleanedup. sptdestroy should take a length 349 * argument which should be the same as sptcreate. Then 350 * this rounding would not be needed (or is done in shm.c) 351 * Only the check for full segment will be needed. 352 * 353 * XXX -- shouldn't raddr == 0 always? These tests don't seem 354 * to be useful at all. 355 */ 356 share_size = page_get_pagesize(seg->s_szc); 357 ssize = P2ROUNDUP(ssize, share_size); 358 359 if (raddr == seg->s_base && ssize == seg->s_size) { 360 seg_free(seg); 361 return (0); 362 } else 363 return (EINVAL); 364 } 365 366 int 367 segspt_create(struct seg *seg, caddr_t argsp) 368 { 369 int err; 370 caddr_t addr = seg->s_base; 371 struct spt_data *sptd; 372 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp; 373 struct anon_map *amp = sptcargs->amp; 374 struct cred *cred = CRED(); 375 ulong_t i, j, anon_index = 0; 376 pgcnt_t npages = btopr(amp->size); 377 struct vnode *vp; 378 page_t **ppa; 379 uint_t hat_flags; 380 381 /* 382 * We are holding the a_lock on the underlying dummy as, 383 * so we can make calls to the HAT layer. 384 */ 385 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 386 387 #ifdef DEBUG 388 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */, 389 tnf_opaque, addr, addr, 390 tnf_ulong, len, seg->s_size); 391 #endif 392 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 393 if (err = anon_swap_adjust(npages)) 394 return (err); 395 } 396 err = ENOMEM; 397 398 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL) 399 goto out1; 400 401 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 402 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages), 403 KM_NOSLEEP)) == NULL) 404 goto out2; 405 } 406 407 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL); 408 409 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL) 410 goto out3; 411 412 seg->s_ops = &segspt_ops; 413 sptd->spt_vp = vp; 414 sptd->spt_amp = amp; 415 sptd->spt_prot = sptcargs->prot; 416 sptd->spt_flags = sptcargs->flags; 417 seg->s_data = (caddr_t)sptd; 418 sptd->spt_ppa = NULL; 419 sptd->spt_ppa_lckcnt = NULL; 420 seg->s_szc = sptcargs->szc; 421 422 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 423 amp->a_szc = seg->s_szc; 424 ANON_LOCK_EXIT(&->a_rwlock); 425 426 /* 427 * Set policy to affect initial allocation of pages in 428 * anon_map_createpages() 429 */ 430 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index, 431 NULL, 0, ptob(npages)); 432 433 if (sptcargs->flags & SHM_PAGEABLE) { 434 size_t share_sz; 435 pgcnt_t new_npgs, more_pgs; 436 struct anon_hdr *nahp; 437 438 share_sz = page_get_pagesize(seg->s_szc); 439 if (!IS_P2ALIGNED(amp->size, share_sz)) { 440 /* 441 * We are rounding up the size of the anon array 442 * on 4 M boundary because we always create 4 M 443 * of page(s) when locking, faulting pages and we 444 * don't have to check for all corner cases e.g. 445 * if there is enough space to allocate 4 M 446 * page. 447 */ 448 new_npgs = btop(P2ROUNDUP(amp->size, share_sz)); 449 more_pgs = new_npgs - npages; 450 451 if (anon_resv(ptob(more_pgs)) == 0) { 452 err = ENOMEM; 453 goto out4; 454 } 455 nahp = anon_create(new_npgs, ANON_SLEEP); 456 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 457 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages, 458 ANON_SLEEP); 459 anon_release(amp->ahp, npages); 460 amp->ahp = nahp; 461 amp->swresv = amp->size = ptob(new_npgs); 462 ANON_LOCK_EXIT(&->a_rwlock); 463 npages = new_npgs; 464 } 465 466 sptd->spt_ppa_lckcnt = kmem_zalloc(npages * 467 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP); 468 sptd->spt_pcachecnt = 0; 469 sptd->spt_realsize = ptob(npages); 470 sptcargs->seg_spt = seg; 471 return (0); 472 } 473 474 /* 475 * get array of pages for each anon slot in amp 476 */ 477 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa, 478 seg, addr, S_CREATE, cred)) != 0) 479 goto out4; 480 481 /* 482 * addr is initial address corresponding to the first page on ppa list 483 */ 484 for (i = 0; i < npages; i++) { 485 /* attempt to lock all pages */ 486 if (!page_pp_lock(ppa[i], 0, 1)) { 487 /* 488 * if unable to lock any page, unlock all 489 * of them and return error 490 */ 491 for (j = 0; j < i; j++) 492 page_pp_unlock(ppa[j], 0, 1); 493 for (i = 0; i < npages; i++) { 494 page_unlock(ppa[i]); 495 } 496 err = ENOMEM; 497 goto out4; 498 } 499 } 500 501 /* 502 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 503 * for the entire life of the segment. For example platforms 504 * that do not support Dynamic Reconfiguration. 505 */ 506 hat_flags = HAT_LOAD_SHARE; 507 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL)) 508 hat_flags |= HAT_LOAD_LOCK; 509 510 hat_memload_array(seg->s_as->a_hat, addr, ptob(npages), 511 ppa, sptd->spt_prot, hat_flags); 512 513 /* 514 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 515 * we will leave the pages locked SE_SHARED for the life 516 * of the ISM segment. This will prevent any calls to 517 * hat_pageunload() on this ISM segment for those platforms. 518 */ 519 if (!(hat_flags & HAT_LOAD_LOCK)) { 520 /* 521 * On platforms that support HAT_DYNAMIC_ISM_UNMAP, 522 * we no longer need to hold the SE_SHARED lock on the pages, 523 * since L_PAGELOCK and F_SOFTLOCK calls will grab the 524 * SE_SHARED lock on the pages as necessary. 525 */ 526 for (i = 0; i < npages; i++) 527 page_unlock(ppa[i]); 528 } 529 sptd->spt_pcachecnt = 0; 530 kmem_free(ppa, ((sizeof (page_t *)) * npages)); 531 sptd->spt_realsize = ptob(npages); 532 atomic_add_long(&spt_used, npages); 533 sptcargs->seg_spt = seg; 534 return (0); 535 536 out4: 537 seg->s_data = NULL; 538 kmem_free(vp, sizeof (*vp)); 539 out3: 540 mutex_destroy(&sptd->spt_lock); 541 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 542 kmem_free(ppa, (sizeof (*ppa) * npages)); 543 out2: 544 kmem_free(sptd, sizeof (*sptd)); 545 out1: 546 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 547 anon_swap_restore(npages); 548 return (err); 549 } 550 551 /*ARGSUSED*/ 552 void 553 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len) 554 { 555 struct page *pp; 556 struct spt_data *sptd = (struct spt_data *)seg->s_data; 557 pgcnt_t npages; 558 ulong_t anon_idx; 559 struct anon_map *amp; 560 struct anon *ap; 561 struct vnode *vp; 562 u_offset_t off; 563 uint_t hat_flags; 564 int root = 0; 565 pgcnt_t pgs, curnpgs = 0; 566 page_t *rootpp; 567 568 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 569 570 len = P2ROUNDUP(len, PAGESIZE); 571 572 npages = btop(len); 573 574 hat_flags = HAT_UNLOAD_UNLOCK; 575 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) || 576 (sptd->spt_flags & SHM_PAGEABLE)) { 577 hat_flags = HAT_UNLOAD; 578 } 579 580 hat_unload(seg->s_as->a_hat, addr, len, hat_flags); 581 582 amp = sptd->spt_amp; 583 if (sptd->spt_flags & SHM_PAGEABLE) 584 npages = btop(amp->size); 585 586 ASSERT(amp); 587 for (anon_idx = 0; anon_idx < npages; anon_idx++) { 588 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 589 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) { 590 panic("segspt_free_pages: null app"); 591 /*NOTREACHED*/ 592 } 593 } else { 594 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx)) 595 == NULL) 596 continue; 597 } 598 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0); 599 swap_xlate(ap, &vp, &off); 600 601 /* 602 * If this platform supports HAT_DYNAMIC_ISM_UNMAP, 603 * the pages won't be having SE_SHARED lock at this 604 * point. 605 * 606 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 607 * the pages are still held SE_SHARED locked from the 608 * original segspt_create() 609 * 610 * Our goal is to get SE_EXCL lock on each page, remove 611 * permanent lock on it and invalidate the page. 612 */ 613 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 614 if (hat_flags == HAT_UNLOAD) 615 pp = page_lookup(vp, off, SE_EXCL); 616 else { 617 if ((pp = page_find(vp, off)) == NULL) { 618 panic("segspt_free_pages: " 619 "page not locked"); 620 /*NOTREACHED*/ 621 } 622 if (!page_tryupgrade(pp)) { 623 page_unlock(pp); 624 pp = page_lookup(vp, off, SE_EXCL); 625 } 626 } 627 if (pp == NULL) { 628 panic("segspt_free_pages: " 629 "page not in the system"); 630 /*NOTREACHED*/ 631 } 632 page_pp_unlock(pp, 0, 1); 633 } else { 634 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL) 635 continue; 636 page_pp_unlock(pp, 0, 0); 637 } 638 /* 639 * It's logical to invalidate the pages here as in most cases 640 * these were created by segspt. 641 */ 642 if (pp->p_szc != 0) { 643 /* 644 * For DISM swap is released in shm_rm_amp. 645 */ 646 if ((sptd->spt_flags & SHM_PAGEABLE) == 0 && 647 ap->an_pvp != NULL) { 648 panic("segspt_free_pages: pvp non NULL"); 649 /*NOTREACHED*/ 650 } 651 if (root == 0) { 652 ASSERT(curnpgs == 0); 653 root = 1; 654 rootpp = pp; 655 pgs = curnpgs = page_get_pagecnt(pp->p_szc); 656 ASSERT(pgs > 1); 657 ASSERT(IS_P2ALIGNED(pgs, pgs)); 658 ASSERT(!(page_pptonum(pp) & (pgs - 1))); 659 curnpgs--; 660 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) { 661 ASSERT(curnpgs == 1); 662 ASSERT(page_pptonum(pp) == 663 page_pptonum(rootpp) + (pgs - 1)); 664 page_destroy_pages(rootpp); 665 root = 0; 666 curnpgs = 0; 667 } else { 668 ASSERT(curnpgs > 1); 669 ASSERT(page_pptonum(pp) == 670 page_pptonum(rootpp) + (pgs - curnpgs)); 671 curnpgs--; 672 } 673 } else { 674 if (root != 0 || curnpgs != 0) { 675 panic("segspt_free_pages: bad large page"); 676 /*NOTREACHED*/ 677 } 678 /*LINTED: constant in conditional context */ 679 VN_DISPOSE(pp, B_INVAL, 0, kcred); 680 } 681 } 682 683 if (root != 0 || curnpgs != 0) { 684 panic("segspt_free_pages: bad large page"); 685 /*NOTREACHED*/ 686 } 687 688 /* 689 * mark that pages have been released 690 */ 691 sptd->spt_realsize = 0; 692 693 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 694 atomic_add_long(&spt_used, -npages); 695 anon_swap_restore(npages); 696 } 697 } 698 699 /* 700 * Get memory allocation policy info for specified address in given segment 701 */ 702 static lgrp_mem_policy_info_t * 703 segspt_getpolicy(struct seg *seg, caddr_t addr) 704 { 705 struct anon_map *amp; 706 ulong_t anon_index; 707 lgrp_mem_policy_info_t *policy_info; 708 struct spt_data *spt_data; 709 710 ASSERT(seg != NULL); 711 712 /* 713 * Get anon_map from segspt 714 * 715 * Assume that no lock needs to be held on anon_map, since 716 * it should be protected by its reference count which must be 717 * nonzero for an existing segment 718 * Need to grab readers lock on policy tree though 719 */ 720 spt_data = (struct spt_data *)seg->s_data; 721 if (spt_data == NULL) 722 return (NULL); 723 amp = spt_data->spt_amp; 724 ASSERT(amp->refcnt != 0); 725 726 /* 727 * Get policy info 728 * 729 * Assume starting anon index of 0 730 */ 731 anon_index = seg_page(seg, addr); 732 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 733 734 return (policy_info); 735 } 736 737 /* 738 * DISM only. 739 * Return locked pages over a given range. 740 * 741 * We will cache all DISM locked pages and save the pplist for the 742 * entire segment in the ppa field of the underlying DISM segment structure. 743 * Later, during a call to segspt_reclaim() we will use this ppa array 744 * to page_unlock() all of the pages and then we will free this ppa list. 745 */ 746 /*ARGSUSED*/ 747 static int 748 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len, 749 struct page ***ppp, enum lock_type type, enum seg_rw rw) 750 { 751 struct shm_data *shmd = (struct shm_data *)seg->s_data; 752 struct seg *sptseg = shmd->shm_sptseg; 753 struct spt_data *sptd = sptseg->s_data; 754 pgcnt_t pg_idx, npages, tot_npages, npgs; 755 struct page **pplist, **pl, **ppa, *pp; 756 struct anon_map *amp; 757 spgcnt_t an_idx; 758 int ret = ENOTSUP; 759 uint_t pl_built = 0; 760 struct anon *ap; 761 struct vnode *vp; 762 u_offset_t off; 763 pgcnt_t claim_availrmem = 0; 764 uint_t szc; 765 766 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 767 768 /* 769 * We want to lock/unlock the entire ISM segment. Therefore, 770 * we will be using the underlying sptseg and it's base address 771 * and length for the caching arguments. 772 */ 773 ASSERT(sptseg); 774 ASSERT(sptd); 775 776 pg_idx = seg_page(seg, addr); 777 npages = btopr(len); 778 779 /* 780 * check if the request is larger than number of pages covered 781 * by amp 782 */ 783 if (pg_idx + npages > btopr(sptd->spt_amp->size)) { 784 *ppp = NULL; 785 return (ENOTSUP); 786 } 787 788 if (type == L_PAGEUNLOCK) { 789 ASSERT(sptd->spt_ppa != NULL); 790 791 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 792 sptd->spt_ppa, sptd->spt_prot, segspt_reclaim); 793 794 /* 795 * If someone is blocked while unmapping, we purge 796 * segment page cache and thus reclaim pplist synchronously 797 * without waiting for seg_pasync_thread. This speeds up 798 * unmapping in cases where munmap(2) is called, while 799 * raw async i/o is still in progress or where a thread 800 * exits on data fault in a multithreaded application. 801 */ 802 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 803 segspt_purge(seg); 804 } 805 return (0); 806 } else if (type == L_PAGERECLAIM) { 807 ASSERT(sptd->spt_ppa != NULL); 808 (void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size, 809 sptd->spt_ppa, sptd->spt_prot); 810 return (0); 811 } 812 813 if (sptd->spt_flags & DISM_PPA_CHANGED) { 814 segspt_purge(seg); 815 /* 816 * for DISM ppa needs to be rebuild since 817 * number of locked pages could be changed 818 */ 819 *ppp = NULL; 820 return (ENOTSUP); 821 } 822 823 /* 824 * First try to find pages in segment page cache, without 825 * holding the segment lock. 826 */ 827 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 828 sptd->spt_prot); 829 if (pplist != NULL) { 830 ASSERT(sptd->spt_ppa != NULL); 831 ASSERT(sptd->spt_ppa == pplist); 832 ppa = sptd->spt_ppa; 833 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 834 if (ppa[an_idx] == NULL) { 835 seg_pinactive(seg, seg->s_base, 836 sptd->spt_amp->size, ppa, 837 sptd->spt_prot, segspt_reclaim); 838 *ppp = NULL; 839 return (ENOTSUP); 840 } 841 if ((szc = ppa[an_idx]->p_szc) != 0) { 842 npgs = page_get_pagecnt(szc); 843 an_idx = P2ROUNDUP(an_idx + 1, npgs); 844 } else { 845 an_idx++; 846 } 847 } 848 /* 849 * Since we cache the entire DISM segment, we want to 850 * set ppp to point to the first slot that corresponds 851 * to the requested addr, i.e. pg_idx. 852 */ 853 *ppp = &(sptd->spt_ppa[pg_idx]); 854 return (0); 855 } 856 857 /* The L_PAGELOCK case... */ 858 mutex_enter(&sptd->spt_lock); 859 /* 860 * try to find pages in segment page cache with mutex 861 */ 862 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 863 sptd->spt_prot); 864 if (pplist != NULL) { 865 ASSERT(sptd->spt_ppa != NULL); 866 ASSERT(sptd->spt_ppa == pplist); 867 ppa = sptd->spt_ppa; 868 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 869 if (ppa[an_idx] == NULL) { 870 mutex_exit(&sptd->spt_lock); 871 seg_pinactive(seg, seg->s_base, 872 sptd->spt_amp->size, ppa, 873 sptd->spt_prot, segspt_reclaim); 874 *ppp = NULL; 875 return (ENOTSUP); 876 } 877 if ((szc = ppa[an_idx]->p_szc) != 0) { 878 npgs = page_get_pagecnt(szc); 879 an_idx = P2ROUNDUP(an_idx + 1, npgs); 880 } else { 881 an_idx++; 882 } 883 } 884 /* 885 * Since we cache the entire DISM segment, we want to 886 * set ppp to point to the first slot that corresponds 887 * to the requested addr, i.e. pg_idx. 888 */ 889 mutex_exit(&sptd->spt_lock); 890 *ppp = &(sptd->spt_ppa[pg_idx]); 891 return (0); 892 } 893 if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) == 894 SEGP_FAIL) { 895 mutex_exit(&sptd->spt_lock); 896 *ppp = NULL; 897 return (ENOTSUP); 898 } 899 900 /* 901 * No need to worry about protections because DISM pages are always rw. 902 */ 903 pl = pplist = NULL; 904 amp = sptd->spt_amp; 905 906 /* 907 * Do we need to build the ppa array? 908 */ 909 if (sptd->spt_ppa == NULL) { 910 pgcnt_t lpg_cnt = 0; 911 912 pl_built = 1; 913 tot_npages = btopr(sptd->spt_amp->size); 914 915 ASSERT(sptd->spt_pcachecnt == 0); 916 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP); 917 pl = pplist; 918 919 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 920 for (an_idx = 0; an_idx < tot_npages; ) { 921 ap = anon_get_ptr(amp->ahp, an_idx); 922 /* 923 * Cache only mlocked pages. For large pages 924 * if one (constituent) page is mlocked 925 * all pages for that large page 926 * are cached also. This is for quick 927 * lookups of ppa array; 928 */ 929 if ((ap != NULL) && (lpg_cnt != 0 || 930 (sptd->spt_ppa_lckcnt[an_idx] != 0))) { 931 932 swap_xlate(ap, &vp, &off); 933 pp = page_lookup(vp, off, SE_SHARED); 934 ASSERT(pp != NULL); 935 if (lpg_cnt == 0) { 936 npgs = page_get_pagecnt(pp->p_szc); 937 if (!IS_P2ALIGNED(an_idx, npgs)) { 938 an_idx = P2ALIGN(an_idx, npgs); 939 page_unlock(pp); 940 continue; 941 } 942 } 943 if (++lpg_cnt == npgs) 944 lpg_cnt = 0; 945 946 /* 947 * availrmem is decremented only 948 * for unlocked pages 949 */ 950 if (sptd->spt_ppa_lckcnt[an_idx] == 0) 951 claim_availrmem++; 952 pplist[an_idx] = pp; 953 } 954 an_idx++; 955 } 956 ANON_LOCK_EXIT(&->a_rwlock); 957 958 mutex_enter(&freemem_lock); 959 if (availrmem < tune.t_minarmem + claim_availrmem) { 960 mutex_exit(&freemem_lock); 961 ret = FC_MAKE_ERR(ENOMEM); 962 claim_availrmem = 0; 963 goto insert_fail; 964 } else { 965 availrmem -= claim_availrmem; 966 } 967 mutex_exit(&freemem_lock); 968 969 sptd->spt_ppa = pl; 970 } else { 971 /* 972 * We already have a valid ppa[]. 973 */ 974 pl = sptd->spt_ppa; 975 } 976 977 ASSERT(pl != NULL); 978 979 ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size, 980 pl, sptd->spt_prot, SEGP_FORCE_WIRED | SEGP_ASYNC_FLUSH, 981 segspt_reclaim); 982 if (ret == SEGP_FAIL) { 983 /* 984 * seg_pinsert failed. We return 985 * ENOTSUP, so that the as_pagelock() code will 986 * then try the slower F_SOFTLOCK path. 987 */ 988 sptd->spt_ppa = NULL; 989 ret = ENOTSUP; 990 goto insert_fail; 991 } 992 993 /* 994 * In either case, we increment softlockcnt on the 'real' segment. 995 */ 996 sptd->spt_pcachecnt++; 997 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 998 999 ppa = sptd->spt_ppa; 1000 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 1001 if (ppa[an_idx] == NULL) { 1002 mutex_exit(&sptd->spt_lock); 1003 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 1004 pl, sptd->spt_prot, segspt_reclaim); 1005 *ppp = NULL; 1006 return (ENOTSUP); 1007 } 1008 if ((szc = ppa[an_idx]->p_szc) != 0) { 1009 npgs = page_get_pagecnt(szc); 1010 an_idx = P2ROUNDUP(an_idx + 1, npgs); 1011 } else { 1012 an_idx++; 1013 } 1014 } 1015 /* 1016 * We can now drop the sptd->spt_lock since the ppa[] 1017 * exists and he have incremented pacachecnt. 1018 */ 1019 mutex_exit(&sptd->spt_lock); 1020 1021 /* 1022 * Since we cache the entire segment, we want to 1023 * set ppp to point to the first slot that corresponds 1024 * to the requested addr, i.e. pg_idx. 1025 */ 1026 *ppp = &(sptd->spt_ppa[pg_idx]); 1027 return (ret); 1028 1029 insert_fail: 1030 /* 1031 * We will only reach this code if we tried and failed. 1032 * 1033 * And we can drop the lock on the dummy seg, once we've failed 1034 * to set up a new ppa[]. 1035 */ 1036 mutex_exit(&sptd->spt_lock); 1037 1038 if (pl_built) { 1039 mutex_enter(&freemem_lock); 1040 availrmem += claim_availrmem; 1041 mutex_exit(&freemem_lock); 1042 1043 /* 1044 * We created pl and we need to destroy it. 1045 */ 1046 pplist = pl; 1047 for (an_idx = 0; an_idx < tot_npages; an_idx++) { 1048 if (pplist[an_idx] != NULL) 1049 page_unlock(pplist[an_idx]); 1050 } 1051 kmem_free(pl, sizeof (page_t *) * tot_npages); 1052 } 1053 1054 if (shmd->shm_softlockcnt <= 0) { 1055 if (AS_ISUNMAPWAIT(seg->s_as)) { 1056 mutex_enter(&seg->s_as->a_contents); 1057 if (AS_ISUNMAPWAIT(seg->s_as)) { 1058 AS_CLRUNMAPWAIT(seg->s_as); 1059 cv_broadcast(&seg->s_as->a_cv); 1060 } 1061 mutex_exit(&seg->s_as->a_contents); 1062 } 1063 } 1064 *ppp = NULL; 1065 return (ret); 1066 } 1067 1068 1069 1070 /* 1071 * return locked pages over a given range. 1072 * 1073 * We will cache the entire ISM segment and save the pplist for the 1074 * entire segment in the ppa field of the underlying ISM segment structure. 1075 * Later, during a call to segspt_reclaim() we will use this ppa array 1076 * to page_unlock() all of the pages and then we will free this ppa list. 1077 */ 1078 /*ARGSUSED*/ 1079 static int 1080 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len, 1081 struct page ***ppp, enum lock_type type, enum seg_rw rw) 1082 { 1083 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1084 struct seg *sptseg = shmd->shm_sptseg; 1085 struct spt_data *sptd = sptseg->s_data; 1086 pgcnt_t np, page_index, npages; 1087 caddr_t a, spt_base; 1088 struct page **pplist, **pl, *pp; 1089 struct anon_map *amp; 1090 ulong_t anon_index; 1091 int ret = ENOTSUP; 1092 uint_t pl_built = 0; 1093 struct anon *ap; 1094 struct vnode *vp; 1095 u_offset_t off; 1096 1097 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1098 1099 /* 1100 * We want to lock/unlock the entire ISM segment. Therefore, 1101 * we will be using the underlying sptseg and it's base address 1102 * and length for the caching arguments. 1103 */ 1104 ASSERT(sptseg); 1105 ASSERT(sptd); 1106 1107 if (sptd->spt_flags & SHM_PAGEABLE) { 1108 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw)); 1109 } 1110 1111 page_index = seg_page(seg, addr); 1112 npages = btopr(len); 1113 1114 /* 1115 * check if the request is larger than number of pages covered 1116 * by amp 1117 */ 1118 if (page_index + npages > btopr(sptd->spt_amp->size)) { 1119 *ppp = NULL; 1120 return (ENOTSUP); 1121 } 1122 1123 if (type == L_PAGEUNLOCK) { 1124 1125 ASSERT(sptd->spt_ppa != NULL); 1126 1127 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 1128 sptd->spt_ppa, sptd->spt_prot, segspt_reclaim); 1129 1130 /* 1131 * If someone is blocked while unmapping, we purge 1132 * segment page cache and thus reclaim pplist synchronously 1133 * without waiting for seg_pasync_thread. This speeds up 1134 * unmapping in cases where munmap(2) is called, while 1135 * raw async i/o is still in progress or where a thread 1136 * exits on data fault in a multithreaded application. 1137 */ 1138 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 1139 segspt_purge(seg); 1140 } 1141 return (0); 1142 } else if (type == L_PAGERECLAIM) { 1143 ASSERT(sptd->spt_ppa != NULL); 1144 1145 (void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size, 1146 sptd->spt_ppa, sptd->spt_prot); 1147 return (0); 1148 } 1149 1150 /* 1151 * First try to find pages in segment page cache, without 1152 * holding the segment lock. 1153 */ 1154 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 1155 sptd->spt_prot); 1156 if (pplist != NULL) { 1157 ASSERT(sptd->spt_ppa == pplist); 1158 ASSERT(sptd->spt_ppa[page_index]); 1159 /* 1160 * Since we cache the entire ISM segment, we want to 1161 * set ppp to point to the first slot that corresponds 1162 * to the requested addr, i.e. page_index. 1163 */ 1164 *ppp = &(sptd->spt_ppa[page_index]); 1165 return (0); 1166 } 1167 1168 /* The L_PAGELOCK case... */ 1169 mutex_enter(&sptd->spt_lock); 1170 1171 /* 1172 * try to find pages in segment page cache 1173 */ 1174 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 1175 sptd->spt_prot); 1176 if (pplist != NULL) { 1177 ASSERT(sptd->spt_ppa == pplist); 1178 /* 1179 * Since we cache the entire segment, we want to 1180 * set ppp to point to the first slot that corresponds 1181 * to the requested addr, i.e. page_index. 1182 */ 1183 mutex_exit(&sptd->spt_lock); 1184 *ppp = &(sptd->spt_ppa[page_index]); 1185 return (0); 1186 } 1187 1188 if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) == 1189 SEGP_FAIL) { 1190 mutex_exit(&sptd->spt_lock); 1191 *ppp = NULL; 1192 return (ENOTSUP); 1193 } 1194 1195 /* 1196 * No need to worry about protections because ISM pages 1197 * are always rw. 1198 */ 1199 pl = pplist = NULL; 1200 1201 /* 1202 * Do we need to build the ppa array? 1203 */ 1204 if (sptd->spt_ppa == NULL) { 1205 ASSERT(sptd->spt_ppa == pplist); 1206 1207 spt_base = sptseg->s_base; 1208 pl_built = 1; 1209 1210 /* 1211 * availrmem is decremented once during anon_swap_adjust() 1212 * and is incremented during the anon_unresv(), which is 1213 * called from shm_rm_amp() when the segment is destroyed. 1214 */ 1215 amp = sptd->spt_amp; 1216 ASSERT(amp != NULL); 1217 1218 /* pcachecnt is protected by sptd->spt_lock */ 1219 ASSERT(sptd->spt_pcachecnt == 0); 1220 pplist = kmem_zalloc(sizeof (page_t *) 1221 * btopr(sptd->spt_amp->size), KM_SLEEP); 1222 pl = pplist; 1223 1224 anon_index = seg_page(sptseg, spt_base); 1225 1226 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1227 for (a = spt_base; a < (spt_base + sptd->spt_amp->size); 1228 a += PAGESIZE, anon_index++, pplist++) { 1229 ap = anon_get_ptr(amp->ahp, anon_index); 1230 ASSERT(ap != NULL); 1231 swap_xlate(ap, &vp, &off); 1232 pp = page_lookup(vp, off, SE_SHARED); 1233 ASSERT(pp != NULL); 1234 *pplist = pp; 1235 } 1236 ANON_LOCK_EXIT(&->a_rwlock); 1237 1238 if (a < (spt_base + sptd->spt_amp->size)) { 1239 ret = ENOTSUP; 1240 goto insert_fail; 1241 } 1242 sptd->spt_ppa = pl; 1243 } else { 1244 /* 1245 * We already have a valid ppa[]. 1246 */ 1247 pl = sptd->spt_ppa; 1248 } 1249 1250 ASSERT(pl != NULL); 1251 1252 ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size, 1253 pl, sptd->spt_prot, SEGP_FORCE_WIRED, segspt_reclaim); 1254 if (ret == SEGP_FAIL) { 1255 /* 1256 * seg_pinsert failed. We return 1257 * ENOTSUP, so that the as_pagelock() code will 1258 * then try the slower F_SOFTLOCK path. 1259 */ 1260 if (pl_built) { 1261 /* 1262 * No one else has referenced the ppa[]. 1263 * We created it and we need to destroy it. 1264 */ 1265 sptd->spt_ppa = NULL; 1266 } 1267 ret = ENOTSUP; 1268 goto insert_fail; 1269 } 1270 1271 /* 1272 * In either case, we increment softlockcnt on the 'real' segment. 1273 */ 1274 sptd->spt_pcachecnt++; 1275 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 1276 1277 /* 1278 * We can now drop the sptd->spt_lock since the ppa[] 1279 * exists and he have incremented pacachecnt. 1280 */ 1281 mutex_exit(&sptd->spt_lock); 1282 1283 /* 1284 * Since we cache the entire segment, we want to 1285 * set ppp to point to the first slot that corresponds 1286 * to the requested addr, i.e. page_index. 1287 */ 1288 *ppp = &(sptd->spt_ppa[page_index]); 1289 return (ret); 1290 1291 insert_fail: 1292 /* 1293 * We will only reach this code if we tried and failed. 1294 * 1295 * And we can drop the lock on the dummy seg, once we've failed 1296 * to set up a new ppa[]. 1297 */ 1298 mutex_exit(&sptd->spt_lock); 1299 1300 if (pl_built) { 1301 /* 1302 * We created pl and we need to destroy it. 1303 */ 1304 pplist = pl; 1305 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT); 1306 while (np) { 1307 page_unlock(*pplist); 1308 np--; 1309 pplist++; 1310 } 1311 kmem_free(pl, sizeof (page_t *) * 1312 btopr(sptd->spt_amp->size)); 1313 } 1314 if (shmd->shm_softlockcnt <= 0) { 1315 if (AS_ISUNMAPWAIT(seg->s_as)) { 1316 mutex_enter(&seg->s_as->a_contents); 1317 if (AS_ISUNMAPWAIT(seg->s_as)) { 1318 AS_CLRUNMAPWAIT(seg->s_as); 1319 cv_broadcast(&seg->s_as->a_cv); 1320 } 1321 mutex_exit(&seg->s_as->a_contents); 1322 } 1323 } 1324 *ppp = NULL; 1325 return (ret); 1326 } 1327 1328 /* 1329 * purge any cached pages in the I/O page cache 1330 */ 1331 static void 1332 segspt_purge(struct seg *seg) 1333 { 1334 seg_ppurge(seg); 1335 } 1336 1337 static int 1338 segspt_reclaim(struct seg *seg, caddr_t addr, size_t len, struct page **pplist, 1339 enum seg_rw rw) 1340 { 1341 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1342 struct seg *sptseg; 1343 struct spt_data *sptd; 1344 pgcnt_t npages, i, free_availrmem = 0; 1345 int done = 0; 1346 1347 #ifdef lint 1348 addr = addr; 1349 #endif 1350 sptseg = shmd->shm_sptseg; 1351 sptd = sptseg->s_data; 1352 npages = (len >> PAGESHIFT); 1353 ASSERT(npages); 1354 ASSERT(sptd->spt_pcachecnt != 0); 1355 ASSERT(sptd->spt_ppa == pplist); 1356 ASSERT(npages == btopr(sptd->spt_amp->size)); 1357 1358 /* 1359 * Acquire the lock on the dummy seg and destroy the 1360 * ppa array IF this is the last pcachecnt. 1361 */ 1362 mutex_enter(&sptd->spt_lock); 1363 if (--sptd->spt_pcachecnt == 0) { 1364 for (i = 0; i < npages; i++) { 1365 if (pplist[i] == NULL) { 1366 continue; 1367 } 1368 if (rw == S_WRITE) { 1369 hat_setrefmod(pplist[i]); 1370 } else { 1371 hat_setref(pplist[i]); 1372 } 1373 if ((sptd->spt_flags & SHM_PAGEABLE) && 1374 (sptd->spt_ppa_lckcnt[i] == 0)) 1375 free_availrmem++; 1376 page_unlock(pplist[i]); 1377 } 1378 if (sptd->spt_flags & SHM_PAGEABLE) { 1379 mutex_enter(&freemem_lock); 1380 availrmem += free_availrmem; 1381 mutex_exit(&freemem_lock); 1382 } 1383 /* 1384 * Since we want to cach/uncache the entire ISM segment, 1385 * we will track the pplist in a segspt specific field 1386 * ppa, that is initialized at the time we add an entry to 1387 * the cache. 1388 */ 1389 ASSERT(sptd->spt_pcachecnt == 0); 1390 kmem_free(pplist, sizeof (page_t *) * npages); 1391 sptd->spt_ppa = NULL; 1392 sptd->spt_flags &= ~DISM_PPA_CHANGED; 1393 done = 1; 1394 } 1395 mutex_exit(&sptd->spt_lock); 1396 /* 1397 * Now decrement softlockcnt. 1398 */ 1399 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1); 1400 1401 if (shmd->shm_softlockcnt <= 0) { 1402 if (AS_ISUNMAPWAIT(seg->s_as)) { 1403 mutex_enter(&seg->s_as->a_contents); 1404 if (AS_ISUNMAPWAIT(seg->s_as)) { 1405 AS_CLRUNMAPWAIT(seg->s_as); 1406 cv_broadcast(&seg->s_as->a_cv); 1407 } 1408 mutex_exit(&seg->s_as->a_contents); 1409 } 1410 } 1411 return (done); 1412 } 1413 1414 /* 1415 * Do a F_SOFTUNLOCK call over the range requested. 1416 * The range must have already been F_SOFTLOCK'ed. 1417 * 1418 * The calls to acquire and release the anon map lock mutex were 1419 * removed in order to avoid a deadly embrace during a DR 1420 * memory delete operation. (Eg. DR blocks while waiting for a 1421 * exclusive lock on a page that is being used for kaio; the 1422 * thread that will complete the kaio and call segspt_softunlock 1423 * blocks on the anon map lock; another thread holding the anon 1424 * map lock blocks on another page lock via the segspt_shmfault 1425 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.) 1426 * 1427 * The appropriateness of the removal is based upon the following: 1428 * 1. If we are holding a segment's reader lock and the page is held 1429 * shared, then the corresponding element in anonmap which points to 1430 * anon struct cannot change and there is no need to acquire the 1431 * anonymous map lock. 1432 * 2. Threads in segspt_softunlock have a reader lock on the segment 1433 * and already have the shared page lock, so we are guaranteed that 1434 * the anon map slot cannot change and therefore can call anon_get_ptr() 1435 * without grabbing the anonymous map lock. 1436 * 3. Threads that softlock a shared page break copy-on-write, even if 1437 * its a read. Thus cow faults can be ignored with respect to soft 1438 * unlocking, since the breaking of cow means that the anon slot(s) will 1439 * not be shared. 1440 */ 1441 static void 1442 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr, 1443 size_t len, enum seg_rw rw) 1444 { 1445 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1446 struct seg *sptseg; 1447 struct spt_data *sptd; 1448 page_t *pp; 1449 caddr_t adr; 1450 struct vnode *vp; 1451 u_offset_t offset; 1452 ulong_t anon_index; 1453 struct anon_map *amp; /* XXX - for locknest */ 1454 struct anon *ap = NULL; 1455 pgcnt_t npages; 1456 1457 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1458 1459 sptseg = shmd->shm_sptseg; 1460 sptd = sptseg->s_data; 1461 1462 /* 1463 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 1464 * and therefore their pages are SE_SHARED locked 1465 * for the entire life of the segment. 1466 */ 1467 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) && 1468 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) { 1469 goto softlock_decrement; 1470 } 1471 1472 /* 1473 * Any thread is free to do a page_find and 1474 * page_unlock() on the pages within this seg. 1475 * 1476 * We are already holding the as->a_lock on the user's 1477 * real segment, but we need to hold the a_lock on the 1478 * underlying dummy as. This is mostly to satisfy the 1479 * underlying HAT layer. 1480 */ 1481 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1482 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len); 1483 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1484 1485 amp = sptd->spt_amp; 1486 ASSERT(amp != NULL); 1487 anon_index = seg_page(sptseg, sptseg_addr); 1488 1489 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) { 1490 ap = anon_get_ptr(amp->ahp, anon_index++); 1491 ASSERT(ap != NULL); 1492 swap_xlate(ap, &vp, &offset); 1493 1494 /* 1495 * Use page_find() instead of page_lookup() to 1496 * find the page since we know that it has a 1497 * "shared" lock. 1498 */ 1499 pp = page_find(vp, offset); 1500 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1)); 1501 if (pp == NULL) { 1502 panic("segspt_softunlock: " 1503 "addr %p, ap %p, vp %p, off %llx", 1504 (void *)adr, (void *)ap, (void *)vp, offset); 1505 /*NOTREACHED*/ 1506 } 1507 1508 if (rw == S_WRITE) { 1509 hat_setrefmod(pp); 1510 } else if (rw != S_OTHER) { 1511 hat_setref(pp); 1512 } 1513 page_unlock(pp); 1514 } 1515 1516 softlock_decrement: 1517 npages = btopr(len); 1518 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages); 1519 if (shmd->shm_softlockcnt == 0) { 1520 /* 1521 * All SOFTLOCKS are gone. Wakeup any waiting 1522 * unmappers so they can try again to unmap. 1523 * Check for waiters first without the mutex 1524 * held so we don't always grab the mutex on 1525 * softunlocks. 1526 */ 1527 if (AS_ISUNMAPWAIT(seg->s_as)) { 1528 mutex_enter(&seg->s_as->a_contents); 1529 if (AS_ISUNMAPWAIT(seg->s_as)) { 1530 AS_CLRUNMAPWAIT(seg->s_as); 1531 cv_broadcast(&seg->s_as->a_cv); 1532 } 1533 mutex_exit(&seg->s_as->a_contents); 1534 } 1535 } 1536 } 1537 1538 int 1539 segspt_shmattach(struct seg *seg, caddr_t *argsp) 1540 { 1541 struct shm_data *shmd_arg = (struct shm_data *)argsp; 1542 struct shm_data *shmd; 1543 struct anon_map *shm_amp = shmd_arg->shm_amp; 1544 struct spt_data *sptd; 1545 int error = 0; 1546 1547 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1548 1549 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP); 1550 if (shmd == NULL) 1551 return (ENOMEM); 1552 1553 shmd->shm_sptas = shmd_arg->shm_sptas; 1554 shmd->shm_amp = shm_amp; 1555 shmd->shm_sptseg = shmd_arg->shm_sptseg; 1556 1557 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0, 1558 NULL, 0, seg->s_size); 1559 1560 seg->s_data = (void *)shmd; 1561 seg->s_ops = &segspt_shmops; 1562 seg->s_szc = shmd->shm_sptseg->s_szc; 1563 sptd = shmd->shm_sptseg->s_data; 1564 1565 if (sptd->spt_flags & SHM_PAGEABLE) { 1566 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size), 1567 KM_NOSLEEP)) == NULL) { 1568 seg->s_data = (void *)NULL; 1569 kmem_free(shmd, (sizeof (*shmd))); 1570 return (ENOMEM); 1571 } 1572 shmd->shm_lckpgs = 0; 1573 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 1574 if ((error = hat_share(seg->s_as->a_hat, seg->s_base, 1575 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1576 seg->s_size, seg->s_szc)) != 0) { 1577 kmem_free(shmd->shm_vpage, 1578 btopr(shm_amp->size)); 1579 } 1580 } 1581 } else { 1582 error = hat_share(seg->s_as->a_hat, seg->s_base, 1583 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1584 seg->s_size, seg->s_szc); 1585 } 1586 if (error) { 1587 seg->s_szc = 0; 1588 seg->s_data = (void *)NULL; 1589 kmem_free(shmd, (sizeof (*shmd))); 1590 } else { 1591 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1592 shm_amp->refcnt++; 1593 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1594 } 1595 return (error); 1596 } 1597 1598 int 1599 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize) 1600 { 1601 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1602 int reclaim = 1; 1603 1604 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1605 retry: 1606 if (shmd->shm_softlockcnt > 0) { 1607 if (reclaim == 1) { 1608 segspt_purge(seg); 1609 reclaim = 0; 1610 goto retry; 1611 } 1612 return (EAGAIN); 1613 } 1614 1615 if (ssize != seg->s_size) { 1616 #ifdef DEBUG 1617 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n", 1618 ssize, seg->s_size); 1619 #endif 1620 return (EINVAL); 1621 } 1622 1623 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK, 1624 NULL, 0); 1625 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc); 1626 1627 seg_free(seg); 1628 1629 return (0); 1630 } 1631 1632 void 1633 segspt_shmfree(struct seg *seg) 1634 { 1635 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1636 struct anon_map *shm_amp = shmd->shm_amp; 1637 1638 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1639 1640 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0, 1641 MC_UNLOCK, NULL, 0); 1642 1643 /* 1644 * Need to increment refcnt when attaching 1645 * and decrement when detaching because of dup(). 1646 */ 1647 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1648 shm_amp->refcnt--; 1649 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1650 1651 if (shmd->shm_vpage) { /* only for DISM */ 1652 kmem_free(shmd->shm_vpage, btopr(shm_amp->size)); 1653 shmd->shm_vpage = NULL; 1654 } 1655 kmem_free(shmd, sizeof (*shmd)); 1656 } 1657 1658 /*ARGSUSED*/ 1659 int 1660 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 1661 { 1662 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1663 1664 /* 1665 * Shared page table is more than shared mapping. 1666 * Individual process sharing page tables can't change prot 1667 * because there is only one set of page tables. 1668 * This will be allowed after private page table is 1669 * supported. 1670 */ 1671 /* need to return correct status error? */ 1672 return (0); 1673 } 1674 1675 1676 faultcode_t 1677 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr, 1678 size_t len, enum fault_type type, enum seg_rw rw) 1679 { 1680 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1681 struct seg *sptseg = shmd->shm_sptseg; 1682 struct as *curspt = shmd->shm_sptas; 1683 struct spt_data *sptd = sptseg->s_data; 1684 pgcnt_t npages; 1685 size_t share_sz, size; 1686 caddr_t segspt_addr, shm_addr; 1687 page_t **ppa; 1688 int i; 1689 ulong_t an_idx = 0; 1690 int err = 0; 1691 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0); 1692 1693 #ifdef lint 1694 hat = hat; 1695 #endif 1696 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1697 1698 /* 1699 * Because of the way spt is implemented 1700 * the realsize of the segment does not have to be 1701 * equal to the segment size itself. The segment size is 1702 * often in multiples of a page size larger than PAGESIZE. 1703 * The realsize is rounded up to the nearest PAGESIZE 1704 * based on what the user requested. This is a bit of 1705 * ungliness that is historical but not easily fixed 1706 * without re-designing the higher levels of ISM. 1707 */ 1708 ASSERT(addr >= seg->s_base); 1709 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 1710 return (FC_NOMAP); 1711 /* 1712 * For all of the following cases except F_PROT, we need to 1713 * make any necessary adjustments to addr and len 1714 * and get all of the necessary page_t's into an array called ppa[]. 1715 * 1716 * The code in shmat() forces base addr and len of ISM segment 1717 * to be aligned to largest page size supported. Therefore, 1718 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 1719 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 1720 * in large pagesize chunks, or else we will screw up the HAT 1721 * layer by calling hat_memload_array() with differing page sizes 1722 * over a given virtual range. 1723 */ 1724 share_sz = page_get_pagesize(sptseg->s_szc); 1725 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz); 1726 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), share_sz); 1727 npages = btopr(size); 1728 1729 /* 1730 * Now we need to convert from addr in segshm to addr in segspt. 1731 */ 1732 an_idx = seg_page(seg, shm_addr); 1733 segspt_addr = sptseg->s_base + ptob(an_idx); 1734 1735 ASSERT((segspt_addr + ptob(npages)) <= 1736 (sptseg->s_base + sptd->spt_realsize)); 1737 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size)); 1738 1739 switch (type) { 1740 1741 case F_SOFTLOCK: 1742 1743 mutex_enter(&freemem_lock); 1744 if (availrmem < tune.t_minarmem + npages) { 1745 mutex_exit(&freemem_lock); 1746 return (FC_MAKE_ERR(ENOMEM)); 1747 } else { 1748 availrmem -= npages; 1749 } 1750 mutex_exit(&freemem_lock); 1751 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 1752 /* 1753 * Fall through to the F_INVAL case to load up the hat layer 1754 * entries with the HAT_LOAD_LOCK flag. 1755 */ 1756 /* FALLTHRU */ 1757 case F_INVAL: 1758 1759 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 1760 return (FC_NOMAP); 1761 1762 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP); 1763 1764 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa); 1765 if (err != 0) { 1766 if (type == F_SOFTLOCK) { 1767 mutex_enter(&freemem_lock); 1768 availrmem += npages; 1769 mutex_exit(&freemem_lock); 1770 atomic_add_long((ulong_t *)( 1771 &(shmd->shm_softlockcnt)), -npages); 1772 } 1773 goto dism_err; 1774 } 1775 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1776 if (type == F_SOFTLOCK) { 1777 1778 /* 1779 * Load up the translation keeping it 1780 * locked and don't unlock the page. 1781 */ 1782 hat_memload_array(sptseg->s_as->a_hat, segspt_addr, 1783 size, ppa, sptd->spt_prot, 1784 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 1785 } else { 1786 if (hat == seg->s_as->a_hat) { 1787 1788 /* 1789 * Migrate pages marked for migration 1790 */ 1791 if (lgrp_optimizations()) 1792 page_migrate(seg, shm_addr, ppa, 1793 npages); 1794 1795 /* CPU HAT */ 1796 hat_memload_array(sptseg->s_as->a_hat, 1797 segspt_addr, size, ppa, sptd->spt_prot, 1798 HAT_LOAD_SHARE); 1799 } else { 1800 /* XHAT. Pass real address */ 1801 hat_memload_array(hat, shm_addr, 1802 size, ppa, sptd->spt_prot, HAT_LOAD_SHARE); 1803 } 1804 1805 /* 1806 * And now drop the SE_SHARED lock(s). 1807 */ 1808 if (dyn_ism_unmap) { 1809 for (i = 0; i < npages; i++) { 1810 page_unlock(ppa[i]); 1811 } 1812 } 1813 } 1814 1815 if (!dyn_ism_unmap) { 1816 if (hat_share(seg->s_as->a_hat, shm_addr, 1817 curspt->a_hat, segspt_addr, ptob(npages), 1818 seg->s_szc) != 0) { 1819 panic("hat_share err in DISM fault"); 1820 /* NOTREACHED */ 1821 } 1822 if (type == F_INVAL) { 1823 for (i = 0; i < npages; i++) { 1824 page_unlock(ppa[i]); 1825 } 1826 } 1827 } 1828 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1829 dism_err: 1830 kmem_free(ppa, npages * sizeof (page_t *)); 1831 return (err); 1832 1833 case F_SOFTUNLOCK: 1834 1835 mutex_enter(&freemem_lock); 1836 availrmem += npages; 1837 mutex_exit(&freemem_lock); 1838 1839 /* 1840 * This is a bit ugly, we pass in the real seg pointer, 1841 * but the segspt_addr is the virtual address within the 1842 * dummy seg. 1843 */ 1844 segspt_softunlock(seg, segspt_addr, size, rw); 1845 return (0); 1846 1847 case F_PROT: 1848 1849 /* 1850 * This takes care of the unusual case where a user 1851 * allocates a stack in shared memory and a register 1852 * window overflow is written to that stack page before 1853 * it is otherwise modified. 1854 * 1855 * We can get away with this because ISM segments are 1856 * always rw. Other than this unusual case, there 1857 * should be no instances of protection violations. 1858 */ 1859 return (0); 1860 1861 default: 1862 #ifdef DEBUG 1863 panic("segspt_dismfault default type?"); 1864 #else 1865 return (FC_NOMAP); 1866 #endif 1867 } 1868 } 1869 1870 1871 faultcode_t 1872 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr, 1873 size_t len, enum fault_type type, enum seg_rw rw) 1874 { 1875 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1876 struct seg *sptseg = shmd->shm_sptseg; 1877 struct as *curspt = shmd->shm_sptas; 1878 struct spt_data *sptd = sptseg->s_data; 1879 pgcnt_t npages; 1880 size_t share_size, size; 1881 caddr_t sptseg_addr, shm_addr; 1882 page_t *pp, **ppa; 1883 int i; 1884 u_offset_t offset; 1885 ulong_t anon_index = 0; 1886 struct vnode *vp; 1887 struct anon_map *amp; /* XXX - for locknest */ 1888 struct anon *ap = NULL; 1889 anon_sync_obj_t cookie; 1890 1891 #ifdef lint 1892 hat = hat; 1893 #endif 1894 1895 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1896 1897 if (sptd->spt_flags & SHM_PAGEABLE) { 1898 return (segspt_dismfault(hat, seg, addr, len, type, rw)); 1899 } 1900 1901 /* 1902 * Because of the way spt is implemented 1903 * the realsize of the segment does not have to be 1904 * equal to the segment size itself. The segment size is 1905 * often in multiples of a page size larger than PAGESIZE. 1906 * The realsize is rounded up to the nearest PAGESIZE 1907 * based on what the user requested. This is a bit of 1908 * ungliness that is historical but not easily fixed 1909 * without re-designing the higher levels of ISM. 1910 */ 1911 ASSERT(addr >= seg->s_base); 1912 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 1913 return (FC_NOMAP); 1914 /* 1915 * For all of the following cases except F_PROT, we need to 1916 * make any necessary adjustments to addr and len 1917 * and get all of the necessary page_t's into an array called ppa[]. 1918 * 1919 * The code in shmat() forces base addr and len of ISM segment 1920 * to be aligned to largest page size supported. Therefore, 1921 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 1922 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 1923 * in large pagesize chunks, or else we will screw up the HAT 1924 * layer by calling hat_memload_array() with differing page sizes 1925 * over a given virtual range. 1926 */ 1927 share_size = page_get_pagesize(sptseg->s_szc); 1928 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size); 1929 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), share_size); 1930 npages = btopr(size); 1931 1932 /* 1933 * Now we need to convert from addr in segshm to addr in segspt. 1934 */ 1935 anon_index = seg_page(seg, shm_addr); 1936 sptseg_addr = sptseg->s_base + ptob(anon_index); 1937 1938 /* 1939 * And now we may have to adjust npages downward if we have 1940 * exceeded the realsize of the segment or initial anon 1941 * allocations. 1942 */ 1943 if ((sptseg_addr + ptob(npages)) > 1944 (sptseg->s_base + sptd->spt_realsize)) 1945 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr; 1946 1947 npages = btopr(size); 1948 1949 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size)); 1950 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0); 1951 1952 switch (type) { 1953 1954 case F_SOFTLOCK: 1955 1956 /* 1957 * availrmem is decremented once during anon_swap_adjust() 1958 * and is incremented during the anon_unresv(), which is 1959 * called from shm_rm_amp() when the segment is destroyed. 1960 */ 1961 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 1962 /* 1963 * Some platforms assume that ISM pages are SE_SHARED 1964 * locked for the entire life of the segment. 1965 */ 1966 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) 1967 return (0); 1968 /* 1969 * Fall through to the F_INVAL case to load up the hat layer 1970 * entries with the HAT_LOAD_LOCK flag. 1971 */ 1972 1973 /* FALLTHRU */ 1974 case F_INVAL: 1975 1976 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 1977 return (FC_NOMAP); 1978 1979 /* 1980 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP 1981 * may still rely on this call to hat_share(). That 1982 * would imply that those hat's can fault on a 1983 * HAT_LOAD_LOCK translation, which would seem 1984 * contradictory. 1985 */ 1986 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 1987 if (hat_share(seg->s_as->a_hat, seg->s_base, 1988 curspt->a_hat, sptseg->s_base, 1989 sptseg->s_size, sptseg->s_szc) != 0) { 1990 panic("hat_share error in ISM fault"); 1991 /*NOTREACHED*/ 1992 } 1993 return (0); 1994 } 1995 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP); 1996 1997 /* 1998 * I see no need to lock the real seg, 1999 * here, because all of our work will be on the underlying 2000 * dummy seg. 2001 * 2002 * sptseg_addr and npages now account for large pages. 2003 */ 2004 amp = sptd->spt_amp; 2005 ASSERT(amp != NULL); 2006 anon_index = seg_page(sptseg, sptseg_addr); 2007 2008 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2009 for (i = 0; i < npages; i++) { 2010 anon_array_enter(amp, anon_index, &cookie); 2011 ap = anon_get_ptr(amp->ahp, anon_index++); 2012 ASSERT(ap != NULL); 2013 swap_xlate(ap, &vp, &offset); 2014 anon_array_exit(&cookie); 2015 pp = page_lookup(vp, offset, SE_SHARED); 2016 ASSERT(pp != NULL); 2017 ppa[i] = pp; 2018 } 2019 ANON_LOCK_EXIT(&->a_rwlock); 2020 ASSERT(i == npages); 2021 2022 /* 2023 * We are already holding the as->a_lock on the user's 2024 * real segment, but we need to hold the a_lock on the 2025 * underlying dummy as. This is mostly to satisfy the 2026 * underlying HAT layer. 2027 */ 2028 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 2029 if (type == F_SOFTLOCK) { 2030 /* 2031 * Load up the translation keeping it 2032 * locked and don't unlock the page. 2033 */ 2034 hat_memload_array(sptseg->s_as->a_hat, sptseg_addr, 2035 ptob(npages), ppa, sptd->spt_prot, 2036 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 2037 } else { 2038 if (hat == seg->s_as->a_hat) { 2039 2040 /* 2041 * Migrate pages marked for migration. 2042 */ 2043 if (lgrp_optimizations()) 2044 page_migrate(seg, shm_addr, ppa, 2045 npages); 2046 2047 /* CPU HAT */ 2048 hat_memload_array(sptseg->s_as->a_hat, 2049 sptseg_addr, ptob(npages), ppa, 2050 sptd->spt_prot, HAT_LOAD_SHARE); 2051 } else { 2052 /* XHAT. Pass real address */ 2053 hat_memload_array(hat, shm_addr, 2054 ptob(npages), ppa, sptd->spt_prot, 2055 HAT_LOAD_SHARE); 2056 } 2057 2058 /* 2059 * And now drop the SE_SHARED lock(s). 2060 */ 2061 for (i = 0; i < npages; i++) 2062 page_unlock(ppa[i]); 2063 } 2064 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 2065 2066 kmem_free(ppa, sizeof (page_t *) * npages); 2067 return (0); 2068 case F_SOFTUNLOCK: 2069 2070 /* 2071 * This is a bit ugly, we pass in the real seg pointer, 2072 * but the sptseg_addr is the virtual address within the 2073 * dummy seg. 2074 */ 2075 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw); 2076 return (0); 2077 2078 case F_PROT: 2079 2080 /* 2081 * This takes care of the unusual case where a user 2082 * allocates a stack in shared memory and a register 2083 * window overflow is written to that stack page before 2084 * it is otherwise modified. 2085 * 2086 * We can get away with this because ISM segments are 2087 * always rw. Other than this unusual case, there 2088 * should be no instances of protection violations. 2089 */ 2090 return (0); 2091 2092 default: 2093 #ifdef DEBUG 2094 cmn_err(CE_WARN, "segspt_shmfault default type?"); 2095 #endif 2096 return (FC_NOMAP); 2097 } 2098 } 2099 2100 /*ARGSUSED*/ 2101 static faultcode_t 2102 segspt_shmfaulta(struct seg *seg, caddr_t addr) 2103 { 2104 return (0); 2105 } 2106 2107 /*ARGSUSED*/ 2108 static int 2109 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta) 2110 { 2111 return (0); 2112 } 2113 2114 /*ARGSUSED*/ 2115 static size_t 2116 segspt_shmswapout(struct seg *seg) 2117 { 2118 return (0); 2119 } 2120 2121 /* 2122 * duplicate the shared page tables 2123 */ 2124 int 2125 segspt_shmdup(struct seg *seg, struct seg *newseg) 2126 { 2127 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2128 struct anon_map *amp = shmd->shm_amp; 2129 struct shm_data *shmd_new; 2130 struct seg *spt_seg = shmd->shm_sptseg; 2131 struct spt_data *sptd = spt_seg->s_data; 2132 int error = 0; 2133 2134 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2135 2136 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP); 2137 newseg->s_data = (void *)shmd_new; 2138 shmd_new->shm_sptas = shmd->shm_sptas; 2139 shmd_new->shm_amp = amp; 2140 shmd_new->shm_sptseg = shmd->shm_sptseg; 2141 newseg->s_ops = &segspt_shmops; 2142 newseg->s_szc = seg->s_szc; 2143 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc); 2144 2145 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2146 amp->refcnt++; 2147 ANON_LOCK_EXIT(&->a_rwlock); 2148 2149 if (sptd->spt_flags & SHM_PAGEABLE) { 2150 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP); 2151 shmd_new->shm_lckpgs = 0; 2152 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 2153 if ((error = hat_share(newseg->s_as->a_hat, 2154 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR, 2155 seg->s_size, seg->s_szc)) != 0) { 2156 kmem_free(shmd_new->shm_vpage, 2157 btopr(amp->size)); 2158 } 2159 } 2160 return (error); 2161 } else { 2162 return (hat_share(newseg->s_as->a_hat, newseg->s_base, 2163 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size, 2164 seg->s_szc)); 2165 2166 } 2167 } 2168 2169 /*ARGSUSED*/ 2170 int 2171 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 2172 { 2173 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2174 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2175 2176 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2177 2178 /* 2179 * ISM segment is always rw. 2180 */ 2181 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0); 2182 } 2183 2184 /* 2185 * Return an array of locked large pages, for empty slots allocate 2186 * private zero-filled anon pages. 2187 */ 2188 static int 2189 spt_anon_getpages( 2190 struct seg *sptseg, 2191 caddr_t sptaddr, 2192 size_t len, 2193 page_t *ppa[]) 2194 { 2195 struct spt_data *sptd = sptseg->s_data; 2196 struct anon_map *amp = sptd->spt_amp; 2197 enum seg_rw rw = sptd->spt_prot; 2198 uint_t szc = sptseg->s_szc; 2199 size_t pg_sz, share_sz = page_get_pagesize(szc); 2200 pgcnt_t lp_npgs; 2201 caddr_t lp_addr, e_sptaddr; 2202 uint_t vpprot, ppa_szc = 0; 2203 struct vpage *vpage = NULL; 2204 ulong_t j, ppa_idx; 2205 int err, ierr = 0; 2206 pgcnt_t an_idx; 2207 anon_sync_obj_t cookie; 2208 2209 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz)); 2210 ASSERT(len != 0); 2211 2212 pg_sz = share_sz; 2213 lp_npgs = btop(pg_sz); 2214 lp_addr = sptaddr; 2215 e_sptaddr = sptaddr + len; 2216 an_idx = seg_page(sptseg, sptaddr); 2217 ppa_idx = 0; 2218 2219 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2220 /*CONSTCOND*/ 2221 while (1) { 2222 for (; lp_addr < e_sptaddr; 2223 an_idx += lp_npgs, lp_addr += pg_sz, 2224 ppa_idx += lp_npgs) { 2225 2226 anon_array_enter(amp, an_idx, &cookie); 2227 ppa_szc = (uint_t)-1; 2228 ierr = anon_map_getpages(amp, an_idx, szc, sptseg, 2229 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx], 2230 &ppa_szc, vpage, rw, 0, segvn_anypgsz, kcred); 2231 anon_array_exit(&cookie); 2232 2233 if (ierr != 0) { 2234 if (ierr > 0) { 2235 err = FC_MAKE_ERR(ierr); 2236 goto lpgs_err; 2237 } 2238 break; 2239 } 2240 } 2241 if (lp_addr == e_sptaddr) { 2242 break; 2243 } 2244 ASSERT(lp_addr < e_sptaddr); 2245 2246 /* 2247 * ierr == -1 means we failed to allocate a large page. 2248 * so do a size down operation. 2249 * 2250 * ierr == -2 means some other process that privately shares 2251 * pages with this process has allocated a larger page and we 2252 * need to retry with larger pages. So do a size up 2253 * operation. This relies on the fact that large pages are 2254 * never partially shared i.e. if we share any constituent 2255 * page of a large page with another process we must share the 2256 * entire large page. Note this cannot happen for SOFTLOCK 2257 * case, unless current address (lpaddr) is at the beginning 2258 * of the next page size boundary because the other process 2259 * couldn't have relocated locked pages. 2260 */ 2261 ASSERT(ierr == -1 || ierr == -2); 2262 if (segvn_anypgsz) { 2263 ASSERT(ierr == -2 || szc != 0); 2264 ASSERT(ierr == -1 || szc < sptseg->s_szc); 2265 szc = (ierr == -1) ? szc - 1 : szc + 1; 2266 } else { 2267 /* 2268 * For faults and segvn_anypgsz == 0 2269 * we need to be careful not to loop forever 2270 * if existing page is found with szc other 2271 * than 0 or seg->s_szc. This could be due 2272 * to page relocations on behalf of DR or 2273 * more likely large page creation. For this 2274 * case simply re-size to existing page's szc 2275 * if returned by anon_map_getpages(). 2276 */ 2277 if (ppa_szc == (uint_t)-1) { 2278 szc = (ierr == -1) ? 0 : sptseg->s_szc; 2279 } else { 2280 ASSERT(ppa_szc <= sptseg->s_szc); 2281 ASSERT(ierr == -2 || ppa_szc < szc); 2282 ASSERT(ierr == -1 || ppa_szc > szc); 2283 szc = ppa_szc; 2284 } 2285 } 2286 pg_sz = page_get_pagesize(szc); 2287 lp_npgs = btop(pg_sz); 2288 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz)); 2289 } 2290 ANON_LOCK_EXIT(&->a_rwlock); 2291 return (0); 2292 2293 lpgs_err: 2294 ANON_LOCK_EXIT(&->a_rwlock); 2295 for (j = 0; j < ppa_idx; j++) 2296 page_unlock(ppa[j]); 2297 return (err); 2298 } 2299 2300 int 2301 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages, 2302 page_t **ppa, ulong_t *lockmap, size_t pos) 2303 { 2304 struct shm_data *shmd = seg->s_data; 2305 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2306 ulong_t i; 2307 int kernel; 2308 2309 for (i = 0; i < npages; anon_index++, pos++, i++) { 2310 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) { 2311 if (sptd->spt_ppa_lckcnt[anon_index] < 2312 (ushort_t)DISM_LOCK_MAX) { 2313 if (++sptd->spt_ppa_lckcnt[anon_index] == 2314 (ushort_t)DISM_LOCK_MAX) { 2315 cmn_err(CE_WARN, 2316 "DISM page lock limit " 2317 "reached on DISM offset 0x%lx\n", 2318 anon_index << PAGESHIFT); 2319 } 2320 kernel = (sptd->spt_ppa && 2321 sptd->spt_ppa[anon_index]) ? 1 : 0; 2322 if (!page_pp_lock(ppa[i], 0, kernel)) { 2323 /* unlock rest of the pages */ 2324 for (; i < npages; i++) 2325 page_unlock(ppa[i]); 2326 sptd->spt_ppa_lckcnt[anon_index]--; 2327 return (EAGAIN); 2328 } 2329 shmd->shm_lckpgs++; 2330 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED; 2331 if (lockmap != NULL) 2332 BT_SET(lockmap, pos); 2333 } 2334 } 2335 page_unlock(ppa[i]); 2336 } 2337 return (0); 2338 } 2339 2340 /*ARGSUSED*/ 2341 static int 2342 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 2343 int attr, int op, ulong_t *lockmap, size_t pos) 2344 { 2345 struct shm_data *shmd = seg->s_data; 2346 struct seg *sptseg = shmd->shm_sptseg; 2347 struct spt_data *sptd = sptseg->s_data; 2348 pgcnt_t npages, a_npages; 2349 page_t **ppa; 2350 pgcnt_t an_idx, a_an_idx, ppa_idx; 2351 caddr_t spt_addr, a_addr; /* spt and aligned address */ 2352 size_t a_len; /* aligned len */ 2353 size_t share_sz; 2354 ulong_t i; 2355 int sts = 0; 2356 2357 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2358 2359 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 2360 return (0); 2361 } 2362 2363 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 2364 an_idx = seg_page(seg, addr); 2365 npages = btopr(len); 2366 2367 if (an_idx + npages > btopr(shmd->shm_amp->size)) { 2368 return (ENOMEM); 2369 } 2370 2371 if (op == MC_LOCK) { 2372 /* 2373 * Need to align addr and size request if they are not 2374 * aligned so we can always allocate large page(s) however 2375 * we only lock what was requested in initial request. 2376 */ 2377 share_sz = page_get_pagesize(sptseg->s_szc); 2378 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz); 2379 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)), 2380 share_sz); 2381 a_npages = btop(a_len); 2382 a_an_idx = seg_page(seg, a_addr); 2383 spt_addr = sptseg->s_base + ptob(a_an_idx); 2384 ppa_idx = an_idx - a_an_idx; 2385 2386 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages), 2387 KM_NOSLEEP)) == NULL) { 2388 return (ENOMEM); 2389 } 2390 2391 /* 2392 * Don't cache any new pages for IO and 2393 * flush any cached pages. 2394 */ 2395 mutex_enter(&sptd->spt_lock); 2396 if (sptd->spt_ppa != NULL) 2397 sptd->spt_flags |= DISM_PPA_CHANGED; 2398 2399 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa); 2400 if (sts != 0) { 2401 mutex_exit(&sptd->spt_lock); 2402 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2403 return (sts); 2404 } 2405 2406 sts = spt_lockpages(seg, an_idx, npages, 2407 &ppa[ppa_idx], lockmap, pos); 2408 /* 2409 * unlock remaining pages for requests which are not 2410 * aligned or not in 4 M chunks 2411 */ 2412 for (i = 0; i < ppa_idx; i++) 2413 page_unlock(ppa[i]); 2414 for (i = ppa_idx + npages; i < a_npages; i++) 2415 page_unlock(ppa[i]); 2416 if (sptd->spt_ppa != NULL) 2417 sptd->spt_flags |= DISM_PPA_CHANGED; 2418 mutex_exit(&sptd->spt_lock); 2419 2420 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2421 2422 } else if (op == MC_UNLOCK) { /* unlock */ 2423 struct anon_map *amp; 2424 struct anon *ap; 2425 struct vnode *vp; 2426 u_offset_t off; 2427 struct page *pp; 2428 int kernel; 2429 anon_sync_obj_t cookie; 2430 2431 amp = sptd->spt_amp; 2432 mutex_enter(&sptd->spt_lock); 2433 if (shmd->shm_lckpgs == 0) { 2434 mutex_exit(&sptd->spt_lock); 2435 return (0); 2436 } 2437 /* 2438 * Don't cache new IO pages. 2439 */ 2440 if (sptd->spt_ppa != NULL) 2441 sptd->spt_flags |= DISM_PPA_CHANGED; 2442 2443 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2444 for (i = 0; i < npages; i++, an_idx++) { 2445 if (shmd->shm_vpage[an_idx] & DISM_PG_LOCKED) { 2446 anon_array_enter(amp, an_idx, &cookie); 2447 ap = anon_get_ptr(amp->ahp, an_idx); 2448 ASSERT(ap); 2449 ASSERT(sptd->spt_ppa_lckcnt[an_idx] > 0); 2450 2451 swap_xlate(ap, &vp, &off); 2452 anon_array_exit(&cookie); 2453 pp = page_lookup(vp, off, SE_SHARED); 2454 ASSERT(pp); 2455 /* 2456 * the availrmem is decremented only for 2457 * pages which are not in seg pcache, 2458 * for pages in seg pcache availrmem was 2459 * decremented in _dismpagelock() (if 2460 * they were not locked here) 2461 */ 2462 kernel = (sptd->spt_ppa && 2463 sptd->spt_ppa[an_idx]) ? 1 : 0; 2464 page_pp_unlock(pp, 0, kernel); 2465 page_unlock(pp); 2466 shmd->shm_vpage[an_idx] &= ~DISM_PG_LOCKED; 2467 sptd->spt_ppa_lckcnt[an_idx]--; 2468 shmd->shm_lckpgs--; 2469 } 2470 } 2471 ANON_LOCK_EXIT(&->a_rwlock); 2472 if (sptd->spt_ppa != NULL) 2473 sptd->spt_flags |= DISM_PPA_CHANGED; 2474 mutex_exit(&sptd->spt_lock); 2475 } 2476 return (sts); 2477 } 2478 2479 /*ARGSUSED*/ 2480 int 2481 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 2482 { 2483 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2484 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2485 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1; 2486 2487 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2488 2489 /* 2490 * ISM segment is always rw. 2491 */ 2492 while (--pgno >= 0) 2493 *protv++ = sptd->spt_prot; 2494 return (0); 2495 } 2496 2497 /*ARGSUSED*/ 2498 u_offset_t 2499 segspt_shmgetoffset(struct seg *seg, caddr_t addr) 2500 { 2501 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2502 2503 /* Offset does not matter in ISM memory */ 2504 2505 return ((u_offset_t)0); 2506 } 2507 2508 /* ARGSUSED */ 2509 int 2510 segspt_shmgettype(struct seg *seg, caddr_t addr) 2511 { 2512 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2513 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2514 2515 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2516 2517 /* 2518 * The shared memory mapping is always MAP_SHARED, SWAP is only 2519 * reserved for DISM 2520 */ 2521 return (MAP_SHARED | 2522 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE)); 2523 } 2524 2525 /*ARGSUSED*/ 2526 int 2527 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 2528 { 2529 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2530 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2531 2532 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2533 2534 *vpp = sptd->spt_vp; 2535 return (0); 2536 } 2537 2538 /*ARGSUSED*/ 2539 static int 2540 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 2541 { 2542 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2543 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2544 struct anon_map *amp; 2545 pgcnt_t pg_idx; 2546 2547 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2548 2549 if (behav == MADV_FREE) { 2550 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) 2551 return (0); 2552 2553 amp = sptd->spt_amp; 2554 pg_idx = seg_page(seg, addr); 2555 2556 mutex_enter(&sptd->spt_lock); 2557 if (sptd->spt_ppa != NULL) 2558 sptd->spt_flags |= DISM_PPA_CHANGED; 2559 mutex_exit(&sptd->spt_lock); 2560 2561 /* 2562 * Purge all DISM cached pages 2563 */ 2564 seg_ppurge_seg(segspt_reclaim); 2565 2566 mutex_enter(&sptd->spt_lock); 2567 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2568 anon_disclaim(amp, pg_idx, len, ANON_PGLOOKUP_BLK); 2569 ANON_LOCK_EXIT(&->a_rwlock); 2570 mutex_exit(&sptd->spt_lock); 2571 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP || 2572 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) { 2573 int already_set; 2574 ulong_t anon_index; 2575 lgrp_mem_policy_t policy; 2576 caddr_t shm_addr; 2577 size_t share_size; 2578 size_t size; 2579 struct seg *sptseg = shmd->shm_sptseg; 2580 caddr_t sptseg_addr; 2581 2582 /* 2583 * Align address and length to page size of underlying segment 2584 */ 2585 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc); 2586 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size); 2587 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), 2588 share_size); 2589 2590 amp = shmd->shm_amp; 2591 anon_index = seg_page(seg, shm_addr); 2592 2593 /* 2594 * And now we may have to adjust size downward if we have 2595 * exceeded the realsize of the segment or initial anon 2596 * allocations. 2597 */ 2598 sptseg_addr = sptseg->s_base + ptob(anon_index); 2599 if ((sptseg_addr + size) > 2600 (sptseg->s_base + sptd->spt_realsize)) 2601 size = (sptseg->s_base + sptd->spt_realsize) - 2602 sptseg_addr; 2603 2604 /* 2605 * Set memory allocation policy for this segment 2606 */ 2607 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED); 2608 already_set = lgrp_shm_policy_set(policy, amp, anon_index, 2609 NULL, 0, len); 2610 2611 /* 2612 * If random memory allocation policy set already, 2613 * don't bother reapplying it. 2614 */ 2615 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 2616 return (0); 2617 2618 /* 2619 * Mark any existing pages in the given range for 2620 * migration, flushing the I/O page cache, and using 2621 * underlying segment to calculate anon index and get 2622 * anonmap and vnode pointer from 2623 */ 2624 if (shmd->shm_softlockcnt > 0) 2625 segspt_purge(seg); 2626 2627 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0); 2628 } 2629 2630 return (0); 2631 } 2632 2633 /*ARGSUSED*/ 2634 void 2635 segspt_shmdump(struct seg *seg) 2636 { 2637 /* no-op for ISM segment */ 2638 } 2639 2640 /*ARGSUSED*/ 2641 static faultcode_t 2642 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 2643 { 2644 return (ENOTSUP); 2645 } 2646 2647 /* 2648 * get a memory ID for an addr in a given segment 2649 */ 2650 static int 2651 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 2652 { 2653 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2654 struct anon *ap; 2655 size_t anon_index; 2656 struct anon_map *amp = shmd->shm_amp; 2657 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2658 struct seg *sptseg = shmd->shm_sptseg; 2659 anon_sync_obj_t cookie; 2660 2661 anon_index = seg_page(seg, addr); 2662 2663 if (addr > (seg->s_base + sptd->spt_realsize)) { 2664 return (EFAULT); 2665 } 2666 2667 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2668 anon_array_enter(amp, anon_index, &cookie); 2669 ap = anon_get_ptr(amp->ahp, anon_index); 2670 if (ap == NULL) { 2671 struct page *pp; 2672 caddr_t spt_addr = sptseg->s_base + ptob(anon_index); 2673 2674 pp = anon_zero(sptseg, spt_addr, &ap, kcred); 2675 if (pp == NULL) { 2676 anon_array_exit(&cookie); 2677 ANON_LOCK_EXIT(&->a_rwlock); 2678 return (ENOMEM); 2679 } 2680 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 2681 page_unlock(pp); 2682 } 2683 anon_array_exit(&cookie); 2684 ANON_LOCK_EXIT(&->a_rwlock); 2685 memidp->val[0] = (uintptr_t)ap; 2686 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 2687 return (0); 2688 } 2689 2690 /* 2691 * Get memory allocation policy info for specified address in given segment 2692 */ 2693 static lgrp_mem_policy_info_t * 2694 segspt_shmgetpolicy(struct seg *seg, caddr_t addr) 2695 { 2696 struct anon_map *amp; 2697 ulong_t anon_index; 2698 lgrp_mem_policy_info_t *policy_info; 2699 struct shm_data *shm_data; 2700 2701 ASSERT(seg != NULL); 2702 2703 /* 2704 * Get anon_map from segshm 2705 * 2706 * Assume that no lock needs to be held on anon_map, since 2707 * it should be protected by its reference count which must be 2708 * nonzero for an existing segment 2709 * Need to grab readers lock on policy tree though 2710 */ 2711 shm_data = (struct shm_data *)seg->s_data; 2712 if (shm_data == NULL) 2713 return (NULL); 2714 amp = shm_data->shm_amp; 2715 ASSERT(amp->refcnt != 0); 2716 2717 /* 2718 * Get policy info 2719 * 2720 * Assume starting anon index of 0 2721 */ 2722 anon_index = seg_page(seg, addr); 2723 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 2724 2725 return (policy_info); 2726 } 2727 2728 /*ARGSUSED*/ 2729 static int 2730 segspt_shmcapable(struct seg *seg, segcapability_t capability) 2731 { 2732 return (0); 2733 } 2734