1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/param.h> 30 #include <sys/user.h> 31 #include <sys/mman.h> 32 #include <sys/kmem.h> 33 #include <sys/sysmacros.h> 34 #include <sys/cmn_err.h> 35 #include <sys/systm.h> 36 #include <sys/tuneable.h> 37 #include <vm/hat.h> 38 #include <vm/seg.h> 39 #include <vm/as.h> 40 #include <vm/anon.h> 41 #include <vm/page.h> 42 #include <sys/buf.h> 43 #include <sys/swap.h> 44 #include <sys/atomic.h> 45 #include <vm/seg_spt.h> 46 #include <sys/debug.h> 47 #include <sys/vtrace.h> 48 #include <sys/shm.h> 49 #include <sys/lgrp.h> 50 #include <sys/vmsystm.h> 51 52 #include <sys/tnf_probe.h> 53 54 #define SEGSPTADDR (caddr_t)0x0 55 56 /* 57 * # pages used for spt 58 */ 59 static size_t spt_used; 60 61 /* 62 * segspt_minfree is the memory left for system after ISM 63 * locked its pages; it is set up to 5% of availrmem in 64 * sptcreate when ISM is created. ISM should not use more 65 * than ~90% of availrmem; if it does, then the performance 66 * of the system may decrease. Machines with large memories may 67 * be able to use up more memory for ISM so we set the default 68 * segspt_minfree to 5% (which gives ISM max 95% of availrmem. 69 * If somebody wants even more memory for ISM (risking hanging 70 * the system) they can patch the segspt_minfree to smaller number. 71 */ 72 pgcnt_t segspt_minfree = 0; 73 74 static int segspt_create(struct seg *seg, caddr_t argsp); 75 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize); 76 static void segspt_free(struct seg *seg); 77 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len); 78 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr); 79 80 static void 81 segspt_badop() 82 { 83 panic("segspt_badop called"); 84 /*NOTREACHED*/ 85 } 86 87 #define SEGSPT_BADOP(t) (t(*)())segspt_badop 88 89 struct seg_ops segspt_ops = { 90 SEGSPT_BADOP(int), /* dup */ 91 segspt_unmap, 92 segspt_free, 93 SEGSPT_BADOP(int), /* fault */ 94 SEGSPT_BADOP(faultcode_t), /* faulta */ 95 SEGSPT_BADOP(int), /* setprot */ 96 SEGSPT_BADOP(int), /* checkprot */ 97 SEGSPT_BADOP(int), /* kluster */ 98 SEGSPT_BADOP(size_t), /* swapout */ 99 SEGSPT_BADOP(int), /* sync */ 100 SEGSPT_BADOP(size_t), /* incore */ 101 SEGSPT_BADOP(int), /* lockop */ 102 SEGSPT_BADOP(int), /* getprot */ 103 SEGSPT_BADOP(u_offset_t), /* getoffset */ 104 SEGSPT_BADOP(int), /* gettype */ 105 SEGSPT_BADOP(int), /* getvp */ 106 SEGSPT_BADOP(int), /* advise */ 107 SEGSPT_BADOP(void), /* dump */ 108 SEGSPT_BADOP(int), /* pagelock */ 109 SEGSPT_BADOP(int), /* setpgsz */ 110 SEGSPT_BADOP(int), /* getmemid */ 111 segspt_getpolicy, /* getpolicy */ 112 }; 113 114 static int segspt_shmdup(struct seg *seg, struct seg *newseg); 115 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize); 116 static void segspt_shmfree(struct seg *seg); 117 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg, 118 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw); 119 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr); 120 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr, 121 register size_t len, register uint_t prot); 122 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, 123 uint_t prot); 124 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta); 125 static size_t segspt_shmswapout(struct seg *seg); 126 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, 127 register char *vec); 128 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len, 129 int attr, uint_t flags); 130 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 131 int attr, int op, ulong_t *lockmap, size_t pos); 132 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, 133 uint_t *protv); 134 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr); 135 static int segspt_shmgettype(struct seg *seg, caddr_t addr); 136 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp); 137 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, 138 uint_t behav); 139 static void segspt_shmdump(struct seg *seg); 140 static int segspt_shmpagelock(struct seg *, caddr_t, size_t, 141 struct page ***, enum lock_type, enum seg_rw); 142 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t); 143 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *); 144 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t); 145 146 struct seg_ops segspt_shmops = { 147 segspt_shmdup, 148 segspt_shmunmap, 149 segspt_shmfree, 150 segspt_shmfault, 151 segspt_shmfaulta, 152 segspt_shmsetprot, 153 segspt_shmcheckprot, 154 segspt_shmkluster, 155 segspt_shmswapout, 156 segspt_shmsync, 157 segspt_shmincore, 158 segspt_shmlockop, 159 segspt_shmgetprot, 160 segspt_shmgetoffset, 161 segspt_shmgettype, 162 segspt_shmgetvp, 163 segspt_shmadvise, /* advise */ 164 segspt_shmdump, 165 segspt_shmpagelock, 166 segspt_shmsetpgsz, 167 segspt_shmgetmemid, 168 segspt_shmgetpolicy, 169 }; 170 171 static void segspt_purge(struct seg *seg); 172 static int segspt_reclaim(struct seg *, caddr_t, size_t, struct page **, 173 enum seg_rw); 174 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len, 175 page_t **ppa); 176 177 178 179 /*ARGSUSED*/ 180 int 181 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp, 182 uint_t prot, uint_t flags, uint_t share_szc) 183 { 184 int err; 185 struct as *newas; 186 struct segspt_crargs sptcargs; 187 188 #ifdef DEBUG 189 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */, 190 tnf_ulong, size, size ); 191 #endif 192 if (segspt_minfree == 0) /* leave min 5% of availrmem for */ 193 segspt_minfree = availrmem/20; /* for the system */ 194 195 if (!hat_supported(HAT_SHARED_PT, (void *)0)) 196 return (EINVAL); 197 198 /* 199 * get a new as for this shared memory segment 200 */ 201 newas = as_alloc(); 202 sptcargs.amp = amp; 203 sptcargs.prot = prot; 204 sptcargs.flags = flags; 205 sptcargs.szc = share_szc; 206 207 /* 208 * create a shared page table (spt) segment 209 */ 210 211 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) { 212 as_free(newas); 213 return (err); 214 } 215 *sptseg = sptcargs.seg_spt; 216 return (0); 217 } 218 219 void 220 sptdestroy(struct as *as, struct anon_map *amp) 221 { 222 223 #ifdef DEBUG 224 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */); 225 #endif 226 (void) as_unmap(as, SEGSPTADDR, amp->size); 227 as_free(as); 228 } 229 230 /* 231 * called from seg_free(). 232 * free (i.e., unlock, unmap, return to free list) 233 * all the pages in the given seg. 234 */ 235 void 236 segspt_free(struct seg *seg) 237 { 238 struct spt_data *sptd = (struct spt_data *)seg->s_data; 239 240 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 241 242 if (sptd != NULL) { 243 if (sptd->spt_realsize) 244 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize); 245 246 if (sptd->spt_ppa_lckcnt) 247 kmem_free(sptd->spt_ppa_lckcnt, 248 sizeof (*sptd->spt_ppa_lckcnt) 249 * btopr(sptd->spt_amp->size)); 250 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp)); 251 mutex_destroy(&sptd->spt_lock); 252 kmem_free(sptd, sizeof (*sptd)); 253 } 254 } 255 256 /*ARGSUSED*/ 257 static int 258 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr, 259 uint_t flags) 260 { 261 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 262 263 return (0); 264 } 265 266 /*ARGSUSED*/ 267 static size_t 268 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec) 269 { 270 caddr_t eo_seg; 271 pgcnt_t npages; 272 struct shm_data *shmd = (struct shm_data *)seg->s_data; 273 struct seg *sptseg; 274 struct spt_data *sptd; 275 276 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 277 #ifdef lint 278 seg = seg; 279 #endif 280 sptseg = shmd->shm_sptseg; 281 sptd = sptseg->s_data; 282 283 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 284 eo_seg = addr + len; 285 while (addr < eo_seg) { 286 /* page exists, and it's locked. */ 287 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED | 288 SEG_PAGE_ANON; 289 addr += PAGESIZE; 290 } 291 return (len); 292 } else { 293 struct anon_map *amp = shmd->shm_amp; 294 struct anon *ap; 295 page_t *pp; 296 pgcnt_t anon_index; 297 struct vnode *vp; 298 u_offset_t off; 299 ulong_t i; 300 int ret; 301 anon_sync_obj_t cookie; 302 303 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 304 anon_index = seg_page(seg, addr); 305 npages = btopr(len); 306 if (anon_index + npages > btopr(shmd->shm_amp->size)) { 307 return (EINVAL); 308 } 309 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 310 for (i = 0; i < npages; i++, anon_index++) { 311 ret = 0; 312 anon_array_enter(amp, anon_index, &cookie); 313 ap = anon_get_ptr(amp->ahp, anon_index); 314 if (ap != NULL) { 315 swap_xlate(ap, &vp, &off); 316 anon_array_exit(&cookie); 317 pp = page_lookup_nowait(vp, off, SE_SHARED); 318 if (pp != NULL) { 319 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON; 320 page_unlock(pp); 321 } 322 } else { 323 anon_array_exit(&cookie); 324 } 325 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) { 326 ret |= SEG_PAGE_LOCKED; 327 } 328 *vec++ = (char)ret; 329 } 330 ANON_LOCK_EXIT(&->a_rwlock); 331 return (len); 332 } 333 } 334 335 static int 336 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize) 337 { 338 size_t share_size; 339 340 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 341 342 /* 343 * seg.s_size may have been rounded up to the largest page size 344 * in shmat(). 345 * XXX This should be cleanedup. sptdestroy should take a length 346 * argument which should be the same as sptcreate. Then 347 * this rounding would not be needed (or is done in shm.c) 348 * Only the check for full segment will be needed. 349 * 350 * XXX -- shouldn't raddr == 0 always? These tests don't seem 351 * to be useful at all. 352 */ 353 share_size = page_get_pagesize(seg->s_szc); 354 ssize = P2ROUNDUP(ssize, share_size); 355 356 if (raddr == seg->s_base && ssize == seg->s_size) { 357 seg_free(seg); 358 return (0); 359 } else 360 return (EINVAL); 361 } 362 363 int 364 segspt_create(struct seg *seg, caddr_t argsp) 365 { 366 int err; 367 caddr_t addr = seg->s_base; 368 struct spt_data *sptd; 369 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp; 370 struct anon_map *amp = sptcargs->amp; 371 struct cred *cred = CRED(); 372 ulong_t i, j, anon_index = 0; 373 pgcnt_t npages = btopr(amp->size); 374 struct vnode *vp; 375 page_t **ppa; 376 uint_t hat_flags; 377 378 /* 379 * We are holding the a_lock on the underlying dummy as, 380 * so we can make calls to the HAT layer. 381 */ 382 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 383 384 #ifdef DEBUG 385 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */, 386 tnf_opaque, addr, addr, 387 tnf_ulong, len, seg->s_size); 388 #endif 389 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 390 if (err = anon_swap_adjust(npages)) 391 return (err); 392 } 393 err = ENOMEM; 394 395 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL) 396 goto out1; 397 398 if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 399 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages), 400 KM_NOSLEEP)) == NULL) 401 goto out2; 402 } 403 404 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL); 405 406 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL) 407 goto out3; 408 409 seg->s_ops = &segspt_ops; 410 sptd->spt_vp = vp; 411 sptd->spt_amp = amp; 412 sptd->spt_prot = sptcargs->prot; 413 sptd->spt_flags = sptcargs->flags; 414 seg->s_data = (caddr_t)sptd; 415 sptd->spt_ppa = NULL; 416 sptd->spt_ppa_lckcnt = NULL; 417 seg->s_szc = sptcargs->szc; 418 419 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 420 amp->a_szc = seg->s_szc; 421 ANON_LOCK_EXIT(&->a_rwlock); 422 423 /* 424 * Set policy to affect initial allocation of pages in 425 * anon_map_createpages() 426 */ 427 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index, 428 NULL, 0, ptob(npages)); 429 430 if (sptcargs->flags & SHM_PAGEABLE) { 431 size_t share_sz; 432 pgcnt_t new_npgs, more_pgs; 433 struct anon_hdr *nahp; 434 435 share_sz = page_get_pagesize(seg->s_szc); 436 if (!IS_P2ALIGNED(amp->size, share_sz)) { 437 /* 438 * We are rounding up the size of the anon array 439 * on 4 M boundary because we always create 4 M 440 * of page(s) when locking, faulting pages and we 441 * don't have to check for all corner cases e.g. 442 * if there is enough space to allocate 4 M 443 * page. 444 */ 445 new_npgs = btop(P2ROUNDUP(amp->size, share_sz)); 446 more_pgs = new_npgs - npages; 447 448 if (anon_resv(ptob(more_pgs)) == 0) { 449 err = ENOMEM; 450 goto out4; 451 } 452 nahp = anon_create(new_npgs, ANON_SLEEP); 453 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 454 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages, 455 ANON_SLEEP); 456 anon_release(amp->ahp, npages); 457 amp->ahp = nahp; 458 amp->swresv = amp->size = ptob(new_npgs); 459 ANON_LOCK_EXIT(&->a_rwlock); 460 npages = new_npgs; 461 } 462 463 sptd->spt_ppa_lckcnt = kmem_zalloc(npages * 464 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP); 465 sptd->spt_pcachecnt = 0; 466 sptd->spt_realsize = ptob(npages); 467 sptcargs->seg_spt = seg; 468 return (0); 469 } 470 471 /* 472 * get array of pages for each anon slot in amp 473 */ 474 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa, 475 seg, addr, S_CREATE, cred)) != 0) 476 goto out4; 477 478 /* 479 * addr is initial address corresponding to the first page on ppa list 480 */ 481 for (i = 0; i < npages; i++) { 482 /* attempt to lock all pages */ 483 if (!page_pp_lock(ppa[i], 0, 1)) { 484 /* 485 * if unable to lock any page, unlock all 486 * of them and return error 487 */ 488 for (j = 0; j < i; j++) 489 page_pp_unlock(ppa[j], 0, 1); 490 for (i = 0; i < npages; i++) { 491 page_unlock(ppa[i]); 492 } 493 err = ENOMEM; 494 goto out4; 495 } 496 } 497 498 /* 499 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 500 * for the entire life of the segment. For example platforms 501 * that do not support Dynamic Reconfiguration. 502 */ 503 hat_flags = HAT_LOAD_SHARE; 504 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL)) 505 hat_flags |= HAT_LOAD_LOCK; 506 507 hat_memload_array(seg->s_as->a_hat, addr, ptob(npages), 508 ppa, sptd->spt_prot, hat_flags); 509 510 /* 511 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 512 * we will leave the pages locked SE_SHARED for the life 513 * of the ISM segment. This will prevent any calls to 514 * hat_pageunload() on this ISM segment for those platforms. 515 */ 516 if (!(hat_flags & HAT_LOAD_LOCK)) { 517 /* 518 * On platforms that support HAT_DYNAMIC_ISM_UNMAP, 519 * we no longer need to hold the SE_SHARED lock on the pages, 520 * since L_PAGELOCK and F_SOFTLOCK calls will grab the 521 * SE_SHARED lock on the pages as necessary. 522 */ 523 for (i = 0; i < npages; i++) 524 page_unlock(ppa[i]); 525 } 526 sptd->spt_pcachecnt = 0; 527 kmem_free(ppa, ((sizeof (page_t *)) * npages)); 528 sptd->spt_realsize = ptob(npages); 529 atomic_add_long(&spt_used, npages); 530 sptcargs->seg_spt = seg; 531 return (0); 532 533 out4: 534 seg->s_data = NULL; 535 kmem_free(vp, sizeof (*vp)); 536 out3: 537 mutex_destroy(&sptd->spt_lock); 538 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 539 kmem_free(ppa, (sizeof (*ppa) * npages)); 540 out2: 541 kmem_free(sptd, sizeof (*sptd)); 542 out1: 543 if ((sptcargs->flags & SHM_PAGEABLE) == 0) 544 anon_swap_restore(npages); 545 return (err); 546 } 547 548 /*ARGSUSED*/ 549 void 550 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len) 551 { 552 struct page *pp; 553 struct spt_data *sptd = (struct spt_data *)seg->s_data; 554 pgcnt_t npages; 555 ulong_t anon_idx; 556 struct anon_map *amp; 557 struct anon *ap; 558 struct vnode *vp; 559 u_offset_t off; 560 uint_t hat_flags; 561 int root = 0; 562 pgcnt_t pgs, curnpgs = 0; 563 page_t *rootpp; 564 565 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 566 567 len = P2ROUNDUP(len, PAGESIZE); 568 569 npages = btop(len); 570 571 hat_flags = HAT_UNLOAD_UNLOCK; 572 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) || 573 (sptd->spt_flags & SHM_PAGEABLE)) { 574 hat_flags = HAT_UNLOAD; 575 } 576 577 hat_unload(seg->s_as->a_hat, addr, len, hat_flags); 578 579 amp = sptd->spt_amp; 580 if (sptd->spt_flags & SHM_PAGEABLE) 581 npages = btop(amp->size); 582 583 ASSERT(amp); 584 for (anon_idx = 0; anon_idx < npages; anon_idx++) { 585 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 586 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) { 587 panic("segspt_free_pages: null app"); 588 /*NOTREACHED*/ 589 } 590 } else { 591 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx)) 592 == NULL) 593 continue; 594 } 595 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0); 596 swap_xlate(ap, &vp, &off); 597 598 /* 599 * If this platform supports HAT_DYNAMIC_ISM_UNMAP, 600 * the pages won't be having SE_SHARED lock at this 601 * point. 602 * 603 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 604 * the pages are still held SE_SHARED locked from the 605 * original segspt_create() 606 * 607 * Our goal is to get SE_EXCL lock on each page, remove 608 * permanent lock on it and invalidate the page. 609 */ 610 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 611 if (hat_flags == HAT_UNLOAD) 612 pp = page_lookup(vp, off, SE_EXCL); 613 else { 614 if ((pp = page_find(vp, off)) == NULL) { 615 panic("segspt_free_pages: " 616 "page not locked"); 617 /*NOTREACHED*/ 618 } 619 if (!page_tryupgrade(pp)) { 620 page_unlock(pp); 621 pp = page_lookup(vp, off, SE_EXCL); 622 } 623 } 624 if (pp == NULL) { 625 panic("segspt_free_pages: " 626 "page not in the system"); 627 /*NOTREACHED*/ 628 } 629 page_pp_unlock(pp, 0, 1); 630 } else { 631 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL) 632 continue; 633 page_pp_unlock(pp, 0, 0); 634 } 635 /* 636 * It's logical to invalidate the pages here as in most cases 637 * these were created by segspt. 638 */ 639 if (pp->p_szc != 0) { 640 /* 641 * For DISM swap is released in shm_rm_amp. 642 */ 643 if ((sptd->spt_flags & SHM_PAGEABLE) == 0 && 644 ap->an_pvp != NULL) { 645 panic("segspt_free_pages: pvp non NULL"); 646 /*NOTREACHED*/ 647 } 648 if (root == 0) { 649 ASSERT(curnpgs == 0); 650 root = 1; 651 rootpp = pp; 652 pgs = curnpgs = page_get_pagecnt(pp->p_szc); 653 ASSERT(pgs > 1); 654 ASSERT(IS_P2ALIGNED(pgs, pgs)); 655 ASSERT(!(page_pptonum(pp) & (pgs - 1))); 656 curnpgs--; 657 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) { 658 ASSERT(curnpgs == 1); 659 ASSERT(page_pptonum(pp) == 660 page_pptonum(rootpp) + (pgs - 1)); 661 page_destroy_pages(rootpp); 662 root = 0; 663 curnpgs = 0; 664 } else { 665 ASSERT(curnpgs > 1); 666 ASSERT(page_pptonum(pp) == 667 page_pptonum(rootpp) + (pgs - curnpgs)); 668 curnpgs--; 669 } 670 } else { 671 if (root != 0 || curnpgs != 0) { 672 panic("segspt_free_pages: bad large page"); 673 /*NOTREACHED*/ 674 } 675 /*LINTED: constant in conditional context */ 676 VN_DISPOSE(pp, B_INVAL, 0, kcred); 677 } 678 } 679 680 if (root != 0 || curnpgs != 0) { 681 panic("segspt_free_pages: bad large page"); 682 /*NOTREACHED*/ 683 } 684 685 /* 686 * mark that pages have been released 687 */ 688 sptd->spt_realsize = 0; 689 690 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 691 atomic_add_long(&spt_used, -npages); 692 anon_swap_restore(npages); 693 } 694 } 695 696 /* 697 * Get memory allocation policy info for specified address in given segment 698 */ 699 static lgrp_mem_policy_info_t * 700 segspt_getpolicy(struct seg *seg, caddr_t addr) 701 { 702 struct anon_map *amp; 703 ulong_t anon_index; 704 lgrp_mem_policy_info_t *policy_info; 705 struct spt_data *spt_data; 706 707 ASSERT(seg != NULL); 708 709 /* 710 * Get anon_map from segspt 711 * 712 * Assume that no lock needs to be held on anon_map, since 713 * it should be protected by its reference count which must be 714 * nonzero for an existing segment 715 * Need to grab readers lock on policy tree though 716 */ 717 spt_data = (struct spt_data *)seg->s_data; 718 if (spt_data == NULL) 719 return (NULL); 720 amp = spt_data->spt_amp; 721 ASSERT(amp->refcnt != 0); 722 723 /* 724 * Get policy info 725 * 726 * Assume starting anon index of 0 727 */ 728 anon_index = seg_page(seg, addr); 729 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 730 731 return (policy_info); 732 } 733 734 /* 735 * DISM only. 736 * Return locked pages over a given range. 737 * 738 * We will cache all DISM locked pages and save the pplist for the 739 * entire segment in the ppa field of the underlying DISM segment structure. 740 * Later, during a call to segspt_reclaim() we will use this ppa array 741 * to page_unlock() all of the pages and then we will free this ppa list. 742 */ 743 /*ARGSUSED*/ 744 static int 745 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len, 746 struct page ***ppp, enum lock_type type, enum seg_rw rw) 747 { 748 struct shm_data *shmd = (struct shm_data *)seg->s_data; 749 struct seg *sptseg = shmd->shm_sptseg; 750 struct spt_data *sptd = sptseg->s_data; 751 pgcnt_t pg_idx, npages, tot_npages, npgs; 752 struct page **pplist, **pl, **ppa, *pp; 753 struct anon_map *amp; 754 spgcnt_t an_idx; 755 int ret = ENOTSUP; 756 uint_t pl_built = 0; 757 struct anon *ap; 758 struct vnode *vp; 759 u_offset_t off; 760 pgcnt_t claim_availrmem = 0; 761 uint_t szc; 762 763 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 764 765 /* 766 * We want to lock/unlock the entire ISM segment. Therefore, 767 * we will be using the underlying sptseg and it's base address 768 * and length for the caching arguments. 769 */ 770 ASSERT(sptseg); 771 ASSERT(sptd); 772 773 pg_idx = seg_page(seg, addr); 774 npages = btopr(len); 775 776 /* 777 * check if the request is larger than number of pages covered 778 * by amp 779 */ 780 if (pg_idx + npages > btopr(sptd->spt_amp->size)) { 781 *ppp = NULL; 782 return (ENOTSUP); 783 } 784 785 if (type == L_PAGEUNLOCK) { 786 ASSERT(sptd->spt_ppa != NULL); 787 788 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 789 sptd->spt_ppa, sptd->spt_prot, segspt_reclaim); 790 791 /* 792 * If someone is blocked while unmapping, we purge 793 * segment page cache and thus reclaim pplist synchronously 794 * without waiting for seg_pasync_thread. This speeds up 795 * unmapping in cases where munmap(2) is called, while 796 * raw async i/o is still in progress or where a thread 797 * exits on data fault in a multithreaded application. 798 */ 799 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 800 segspt_purge(seg); 801 } 802 return (0); 803 } else if (type == L_PAGERECLAIM) { 804 ASSERT(sptd->spt_ppa != NULL); 805 (void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size, 806 sptd->spt_ppa, sptd->spt_prot); 807 return (0); 808 } 809 810 if (sptd->spt_flags & DISM_PPA_CHANGED) { 811 segspt_purge(seg); 812 /* 813 * for DISM ppa needs to be rebuild since 814 * number of locked pages could be changed 815 */ 816 *ppp = NULL; 817 return (ENOTSUP); 818 } 819 820 /* 821 * First try to find pages in segment page cache, without 822 * holding the segment lock. 823 */ 824 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 825 sptd->spt_prot); 826 if (pplist != NULL) { 827 ASSERT(sptd->spt_ppa != NULL); 828 ASSERT(sptd->spt_ppa == pplist); 829 ppa = sptd->spt_ppa; 830 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 831 if (ppa[an_idx] == NULL) { 832 seg_pinactive(seg, seg->s_base, 833 sptd->spt_amp->size, ppa, 834 sptd->spt_prot, segspt_reclaim); 835 *ppp = NULL; 836 return (ENOTSUP); 837 } 838 if ((szc = ppa[an_idx]->p_szc) != 0) { 839 npgs = page_get_pagecnt(szc); 840 an_idx = P2ROUNDUP(an_idx + 1, npgs); 841 } else { 842 an_idx++; 843 } 844 } 845 /* 846 * Since we cache the entire DISM segment, we want to 847 * set ppp to point to the first slot that corresponds 848 * to the requested addr, i.e. pg_idx. 849 */ 850 *ppp = &(sptd->spt_ppa[pg_idx]); 851 return (0); 852 } 853 854 /* The L_PAGELOCK case... */ 855 mutex_enter(&sptd->spt_lock); 856 /* 857 * try to find pages in segment page cache with mutex 858 */ 859 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 860 sptd->spt_prot); 861 if (pplist != NULL) { 862 ASSERT(sptd->spt_ppa != NULL); 863 ASSERT(sptd->spt_ppa == pplist); 864 ppa = sptd->spt_ppa; 865 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 866 if (ppa[an_idx] == NULL) { 867 mutex_exit(&sptd->spt_lock); 868 seg_pinactive(seg, seg->s_base, 869 sptd->spt_amp->size, ppa, 870 sptd->spt_prot, segspt_reclaim); 871 *ppp = NULL; 872 return (ENOTSUP); 873 } 874 if ((szc = ppa[an_idx]->p_szc) != 0) { 875 npgs = page_get_pagecnt(szc); 876 an_idx = P2ROUNDUP(an_idx + 1, npgs); 877 } else { 878 an_idx++; 879 } 880 } 881 /* 882 * Since we cache the entire DISM segment, we want to 883 * set ppp to point to the first slot that corresponds 884 * to the requested addr, i.e. pg_idx. 885 */ 886 mutex_exit(&sptd->spt_lock); 887 *ppp = &(sptd->spt_ppa[pg_idx]); 888 return (0); 889 } 890 if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) == 891 SEGP_FAIL) { 892 mutex_exit(&sptd->spt_lock); 893 *ppp = NULL; 894 return (ENOTSUP); 895 } 896 897 /* 898 * No need to worry about protections because DISM pages are always rw. 899 */ 900 pl = pplist = NULL; 901 amp = sptd->spt_amp; 902 903 /* 904 * Do we need to build the ppa array? 905 */ 906 if (sptd->spt_ppa == NULL) { 907 pgcnt_t lpg_cnt = 0; 908 909 pl_built = 1; 910 tot_npages = btopr(sptd->spt_amp->size); 911 912 ASSERT(sptd->spt_pcachecnt == 0); 913 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP); 914 pl = pplist; 915 916 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 917 for (an_idx = 0; an_idx < tot_npages; ) { 918 ap = anon_get_ptr(amp->ahp, an_idx); 919 /* 920 * Cache only mlocked pages. For large pages 921 * if one (constituent) page is mlocked 922 * all pages for that large page 923 * are cached also. This is for quick 924 * lookups of ppa array; 925 */ 926 if ((ap != NULL) && (lpg_cnt != 0 || 927 (sptd->spt_ppa_lckcnt[an_idx] != 0))) { 928 929 swap_xlate(ap, &vp, &off); 930 pp = page_lookup(vp, off, SE_SHARED); 931 ASSERT(pp != NULL); 932 if (lpg_cnt == 0) { 933 npgs = page_get_pagecnt(pp->p_szc); 934 if (!IS_P2ALIGNED(an_idx, npgs)) { 935 an_idx = P2ALIGN(an_idx, npgs); 936 page_unlock(pp); 937 continue; 938 } 939 } 940 if (++lpg_cnt == npgs) 941 lpg_cnt = 0; 942 943 /* 944 * availrmem is decremented only 945 * for unlocked pages 946 */ 947 if (sptd->spt_ppa_lckcnt[an_idx] == 0) 948 claim_availrmem++; 949 pplist[an_idx] = pp; 950 } 951 an_idx++; 952 } 953 ANON_LOCK_EXIT(&->a_rwlock); 954 955 mutex_enter(&freemem_lock); 956 if (availrmem < tune.t_minarmem + claim_availrmem) { 957 mutex_exit(&freemem_lock); 958 ret = FC_MAKE_ERR(ENOMEM); 959 claim_availrmem = 0; 960 goto insert_fail; 961 } else { 962 availrmem -= claim_availrmem; 963 } 964 mutex_exit(&freemem_lock); 965 966 sptd->spt_ppa = pl; 967 } else { 968 /* 969 * We already have a valid ppa[]. 970 */ 971 pl = sptd->spt_ppa; 972 } 973 974 ASSERT(pl != NULL); 975 976 ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size, 977 pl, sptd->spt_prot, SEGP_FORCE_WIRED | SEGP_ASYNC_FLUSH, 978 segspt_reclaim); 979 if (ret == SEGP_FAIL) { 980 /* 981 * seg_pinsert failed. We return 982 * ENOTSUP, so that the as_pagelock() code will 983 * then try the slower F_SOFTLOCK path. 984 */ 985 sptd->spt_ppa = NULL; 986 ret = ENOTSUP; 987 goto insert_fail; 988 } 989 990 /* 991 * In either case, we increment softlockcnt on the 'real' segment. 992 */ 993 sptd->spt_pcachecnt++; 994 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 995 996 ppa = sptd->spt_ppa; 997 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 998 if (ppa[an_idx] == NULL) { 999 mutex_exit(&sptd->spt_lock); 1000 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 1001 pl, sptd->spt_prot, segspt_reclaim); 1002 *ppp = NULL; 1003 return (ENOTSUP); 1004 } 1005 if ((szc = ppa[an_idx]->p_szc) != 0) { 1006 npgs = page_get_pagecnt(szc); 1007 an_idx = P2ROUNDUP(an_idx + 1, npgs); 1008 } else { 1009 an_idx++; 1010 } 1011 } 1012 /* 1013 * We can now drop the sptd->spt_lock since the ppa[] 1014 * exists and he have incremented pacachecnt. 1015 */ 1016 mutex_exit(&sptd->spt_lock); 1017 1018 /* 1019 * Since we cache the entire segment, we want to 1020 * set ppp to point to the first slot that corresponds 1021 * to the requested addr, i.e. pg_idx. 1022 */ 1023 *ppp = &(sptd->spt_ppa[pg_idx]); 1024 return (ret); 1025 1026 insert_fail: 1027 /* 1028 * We will only reach this code if we tried and failed. 1029 * 1030 * And we can drop the lock on the dummy seg, once we've failed 1031 * to set up a new ppa[]. 1032 */ 1033 mutex_exit(&sptd->spt_lock); 1034 1035 if (pl_built) { 1036 mutex_enter(&freemem_lock); 1037 availrmem += claim_availrmem; 1038 mutex_exit(&freemem_lock); 1039 1040 /* 1041 * We created pl and we need to destroy it. 1042 */ 1043 pplist = pl; 1044 for (an_idx = 0; an_idx < tot_npages; an_idx++) { 1045 if (pplist[an_idx] != NULL) 1046 page_unlock(pplist[an_idx]); 1047 } 1048 kmem_free(pl, sizeof (page_t *) * tot_npages); 1049 } 1050 1051 if (shmd->shm_softlockcnt <= 0) { 1052 if (AS_ISUNMAPWAIT(seg->s_as)) { 1053 mutex_enter(&seg->s_as->a_contents); 1054 if (AS_ISUNMAPWAIT(seg->s_as)) { 1055 AS_CLRUNMAPWAIT(seg->s_as); 1056 cv_broadcast(&seg->s_as->a_cv); 1057 } 1058 mutex_exit(&seg->s_as->a_contents); 1059 } 1060 } 1061 *ppp = NULL; 1062 return (ret); 1063 } 1064 1065 1066 1067 /* 1068 * return locked pages over a given range. 1069 * 1070 * We will cache the entire ISM segment and save the pplist for the 1071 * entire segment in the ppa field of the underlying ISM segment structure. 1072 * Later, during a call to segspt_reclaim() we will use this ppa array 1073 * to page_unlock() all of the pages and then we will free this ppa list. 1074 */ 1075 /*ARGSUSED*/ 1076 static int 1077 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len, 1078 struct page ***ppp, enum lock_type type, enum seg_rw rw) 1079 { 1080 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1081 struct seg *sptseg = shmd->shm_sptseg; 1082 struct spt_data *sptd = sptseg->s_data; 1083 pgcnt_t np, page_index, npages; 1084 caddr_t a, spt_base; 1085 struct page **pplist, **pl, *pp; 1086 struct anon_map *amp; 1087 ulong_t anon_index; 1088 int ret = ENOTSUP; 1089 uint_t pl_built = 0; 1090 struct anon *ap; 1091 struct vnode *vp; 1092 u_offset_t off; 1093 1094 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1095 1096 /* 1097 * We want to lock/unlock the entire ISM segment. Therefore, 1098 * we will be using the underlying sptseg and it's base address 1099 * and length for the caching arguments. 1100 */ 1101 ASSERT(sptseg); 1102 ASSERT(sptd); 1103 1104 if (sptd->spt_flags & SHM_PAGEABLE) { 1105 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw)); 1106 } 1107 1108 page_index = seg_page(seg, addr); 1109 npages = btopr(len); 1110 1111 /* 1112 * check if the request is larger than number of pages covered 1113 * by amp 1114 */ 1115 if (page_index + npages > btopr(sptd->spt_amp->size)) { 1116 *ppp = NULL; 1117 return (ENOTSUP); 1118 } 1119 1120 if (type == L_PAGEUNLOCK) { 1121 1122 ASSERT(sptd->spt_ppa != NULL); 1123 1124 seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 1125 sptd->spt_ppa, sptd->spt_prot, segspt_reclaim); 1126 1127 /* 1128 * If someone is blocked while unmapping, we purge 1129 * segment page cache and thus reclaim pplist synchronously 1130 * without waiting for seg_pasync_thread. This speeds up 1131 * unmapping in cases where munmap(2) is called, while 1132 * raw async i/o is still in progress or where a thread 1133 * exits on data fault in a multithreaded application. 1134 */ 1135 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 1136 segspt_purge(seg); 1137 } 1138 return (0); 1139 } else if (type == L_PAGERECLAIM) { 1140 ASSERT(sptd->spt_ppa != NULL); 1141 1142 (void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size, 1143 sptd->spt_ppa, sptd->spt_prot); 1144 return (0); 1145 } 1146 1147 /* 1148 * First try to find pages in segment page cache, without 1149 * holding the segment lock. 1150 */ 1151 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 1152 sptd->spt_prot); 1153 if (pplist != NULL) { 1154 ASSERT(sptd->spt_ppa == pplist); 1155 ASSERT(sptd->spt_ppa[page_index]); 1156 /* 1157 * Since we cache the entire ISM segment, we want to 1158 * set ppp to point to the first slot that corresponds 1159 * to the requested addr, i.e. page_index. 1160 */ 1161 *ppp = &(sptd->spt_ppa[page_index]); 1162 return (0); 1163 } 1164 1165 /* The L_PAGELOCK case... */ 1166 mutex_enter(&sptd->spt_lock); 1167 1168 /* 1169 * try to find pages in segment page cache 1170 */ 1171 pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 1172 sptd->spt_prot); 1173 if (pplist != NULL) { 1174 ASSERT(sptd->spt_ppa == pplist); 1175 /* 1176 * Since we cache the entire segment, we want to 1177 * set ppp to point to the first slot that corresponds 1178 * to the requested addr, i.e. page_index. 1179 */ 1180 mutex_exit(&sptd->spt_lock); 1181 *ppp = &(sptd->spt_ppa[page_index]); 1182 return (0); 1183 } 1184 1185 if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) == 1186 SEGP_FAIL) { 1187 mutex_exit(&sptd->spt_lock); 1188 *ppp = NULL; 1189 return (ENOTSUP); 1190 } 1191 1192 /* 1193 * No need to worry about protections because ISM pages 1194 * are always rw. 1195 */ 1196 pl = pplist = NULL; 1197 1198 /* 1199 * Do we need to build the ppa array? 1200 */ 1201 if (sptd->spt_ppa == NULL) { 1202 ASSERT(sptd->spt_ppa == pplist); 1203 1204 spt_base = sptseg->s_base; 1205 pl_built = 1; 1206 1207 /* 1208 * availrmem is decremented once during anon_swap_adjust() 1209 * and is incremented during the anon_unresv(), which is 1210 * called from shm_rm_amp() when the segment is destroyed. 1211 */ 1212 amp = sptd->spt_amp; 1213 ASSERT(amp != NULL); 1214 1215 /* pcachecnt is protected by sptd->spt_lock */ 1216 ASSERT(sptd->spt_pcachecnt == 0); 1217 pplist = kmem_zalloc(sizeof (page_t *) 1218 * btopr(sptd->spt_amp->size), KM_SLEEP); 1219 pl = pplist; 1220 1221 anon_index = seg_page(sptseg, spt_base); 1222 1223 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1224 for (a = spt_base; a < (spt_base + sptd->spt_amp->size); 1225 a += PAGESIZE, anon_index++, pplist++) { 1226 ap = anon_get_ptr(amp->ahp, anon_index); 1227 ASSERT(ap != NULL); 1228 swap_xlate(ap, &vp, &off); 1229 pp = page_lookup(vp, off, SE_SHARED); 1230 ASSERT(pp != NULL); 1231 *pplist = pp; 1232 } 1233 ANON_LOCK_EXIT(&->a_rwlock); 1234 1235 if (a < (spt_base + sptd->spt_amp->size)) { 1236 ret = ENOTSUP; 1237 goto insert_fail; 1238 } 1239 sptd->spt_ppa = pl; 1240 } else { 1241 /* 1242 * We already have a valid ppa[]. 1243 */ 1244 pl = sptd->spt_ppa; 1245 } 1246 1247 ASSERT(pl != NULL); 1248 1249 ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size, 1250 pl, sptd->spt_prot, SEGP_FORCE_WIRED, segspt_reclaim); 1251 if (ret == SEGP_FAIL) { 1252 /* 1253 * seg_pinsert failed. We return 1254 * ENOTSUP, so that the as_pagelock() code will 1255 * then try the slower F_SOFTLOCK path. 1256 */ 1257 if (pl_built) { 1258 /* 1259 * No one else has referenced the ppa[]. 1260 * We created it and we need to destroy it. 1261 */ 1262 sptd->spt_ppa = NULL; 1263 } 1264 ret = ENOTSUP; 1265 goto insert_fail; 1266 } 1267 1268 /* 1269 * In either case, we increment softlockcnt on the 'real' segment. 1270 */ 1271 sptd->spt_pcachecnt++; 1272 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 1273 1274 /* 1275 * We can now drop the sptd->spt_lock since the ppa[] 1276 * exists and he have incremented pacachecnt. 1277 */ 1278 mutex_exit(&sptd->spt_lock); 1279 1280 /* 1281 * Since we cache the entire segment, we want to 1282 * set ppp to point to the first slot that corresponds 1283 * to the requested addr, i.e. page_index. 1284 */ 1285 *ppp = &(sptd->spt_ppa[page_index]); 1286 return (ret); 1287 1288 insert_fail: 1289 /* 1290 * We will only reach this code if we tried and failed. 1291 * 1292 * And we can drop the lock on the dummy seg, once we've failed 1293 * to set up a new ppa[]. 1294 */ 1295 mutex_exit(&sptd->spt_lock); 1296 1297 if (pl_built) { 1298 /* 1299 * We created pl and we need to destroy it. 1300 */ 1301 pplist = pl; 1302 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT); 1303 while (np) { 1304 page_unlock(*pplist); 1305 np--; 1306 pplist++; 1307 } 1308 kmem_free(pl, sizeof (page_t *) * 1309 btopr(sptd->spt_amp->size)); 1310 } 1311 if (shmd->shm_softlockcnt <= 0) { 1312 if (AS_ISUNMAPWAIT(seg->s_as)) { 1313 mutex_enter(&seg->s_as->a_contents); 1314 if (AS_ISUNMAPWAIT(seg->s_as)) { 1315 AS_CLRUNMAPWAIT(seg->s_as); 1316 cv_broadcast(&seg->s_as->a_cv); 1317 } 1318 mutex_exit(&seg->s_as->a_contents); 1319 } 1320 } 1321 *ppp = NULL; 1322 return (ret); 1323 } 1324 1325 /* 1326 * purge any cached pages in the I/O page cache 1327 */ 1328 static void 1329 segspt_purge(struct seg *seg) 1330 { 1331 seg_ppurge(seg); 1332 } 1333 1334 static int 1335 segspt_reclaim(struct seg *seg, caddr_t addr, size_t len, struct page **pplist, 1336 enum seg_rw rw) 1337 { 1338 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1339 struct seg *sptseg; 1340 struct spt_data *sptd; 1341 pgcnt_t npages, i, free_availrmem = 0; 1342 int done = 0; 1343 1344 #ifdef lint 1345 addr = addr; 1346 #endif 1347 sptseg = shmd->shm_sptseg; 1348 sptd = sptseg->s_data; 1349 npages = (len >> PAGESHIFT); 1350 ASSERT(npages); 1351 ASSERT(sptd->spt_pcachecnt != 0); 1352 ASSERT(sptd->spt_ppa == pplist); 1353 ASSERT(npages == btopr(sptd->spt_amp->size)); 1354 1355 /* 1356 * Acquire the lock on the dummy seg and destroy the 1357 * ppa array IF this is the last pcachecnt. 1358 */ 1359 mutex_enter(&sptd->spt_lock); 1360 if (--sptd->spt_pcachecnt == 0) { 1361 for (i = 0; i < npages; i++) { 1362 if (pplist[i] == NULL) { 1363 continue; 1364 } 1365 if (rw == S_WRITE) { 1366 hat_setrefmod(pplist[i]); 1367 } else { 1368 hat_setref(pplist[i]); 1369 } 1370 if ((sptd->spt_flags & SHM_PAGEABLE) && 1371 (sptd->spt_ppa_lckcnt[i] == 0)) 1372 free_availrmem++; 1373 page_unlock(pplist[i]); 1374 } 1375 if (sptd->spt_flags & SHM_PAGEABLE) { 1376 mutex_enter(&freemem_lock); 1377 availrmem += free_availrmem; 1378 mutex_exit(&freemem_lock); 1379 } 1380 /* 1381 * Since we want to cach/uncache the entire ISM segment, 1382 * we will track the pplist in a segspt specific field 1383 * ppa, that is initialized at the time we add an entry to 1384 * the cache. 1385 */ 1386 ASSERT(sptd->spt_pcachecnt == 0); 1387 kmem_free(pplist, sizeof (page_t *) * npages); 1388 sptd->spt_ppa = NULL; 1389 sptd->spt_flags &= ~DISM_PPA_CHANGED; 1390 done = 1; 1391 } 1392 mutex_exit(&sptd->spt_lock); 1393 /* 1394 * Now decrement softlockcnt. 1395 */ 1396 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1); 1397 1398 if (shmd->shm_softlockcnt <= 0) { 1399 if (AS_ISUNMAPWAIT(seg->s_as)) { 1400 mutex_enter(&seg->s_as->a_contents); 1401 if (AS_ISUNMAPWAIT(seg->s_as)) { 1402 AS_CLRUNMAPWAIT(seg->s_as); 1403 cv_broadcast(&seg->s_as->a_cv); 1404 } 1405 mutex_exit(&seg->s_as->a_contents); 1406 } 1407 } 1408 return (done); 1409 } 1410 1411 /* 1412 * Do a F_SOFTUNLOCK call over the range requested. 1413 * The range must have already been F_SOFTLOCK'ed. 1414 * 1415 * The calls to acquire and release the anon map lock mutex were 1416 * removed in order to avoid a deadly embrace during a DR 1417 * memory delete operation. (Eg. DR blocks while waiting for a 1418 * exclusive lock on a page that is being used for kaio; the 1419 * thread that will complete the kaio and call segspt_softunlock 1420 * blocks on the anon map lock; another thread holding the anon 1421 * map lock blocks on another page lock via the segspt_shmfault 1422 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.) 1423 * 1424 * The appropriateness of the removal is based upon the following: 1425 * 1. If we are holding a segment's reader lock and the page is held 1426 * shared, then the corresponding element in anonmap which points to 1427 * anon struct cannot change and there is no need to acquire the 1428 * anonymous map lock. 1429 * 2. Threads in segspt_softunlock have a reader lock on the segment 1430 * and already have the shared page lock, so we are guaranteed that 1431 * the anon map slot cannot change and therefore can call anon_get_ptr() 1432 * without grabbing the anonymous map lock. 1433 * 3. Threads that softlock a shared page break copy-on-write, even if 1434 * its a read. Thus cow faults can be ignored with respect to soft 1435 * unlocking, since the breaking of cow means that the anon slot(s) will 1436 * not be shared. 1437 */ 1438 static void 1439 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr, 1440 size_t len, enum seg_rw rw) 1441 { 1442 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1443 struct seg *sptseg; 1444 struct spt_data *sptd; 1445 page_t *pp; 1446 caddr_t adr; 1447 struct vnode *vp; 1448 u_offset_t offset; 1449 ulong_t anon_index; 1450 struct anon_map *amp; /* XXX - for locknest */ 1451 struct anon *ap = NULL; 1452 pgcnt_t npages; 1453 1454 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1455 1456 sptseg = shmd->shm_sptseg; 1457 sptd = sptseg->s_data; 1458 1459 /* 1460 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 1461 * and therefore their pages are SE_SHARED locked 1462 * for the entire life of the segment. 1463 */ 1464 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) && 1465 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) { 1466 goto softlock_decrement; 1467 } 1468 1469 /* 1470 * Any thread is free to do a page_find and 1471 * page_unlock() on the pages within this seg. 1472 * 1473 * We are already holding the as->a_lock on the user's 1474 * real segment, but we need to hold the a_lock on the 1475 * underlying dummy as. This is mostly to satisfy the 1476 * underlying HAT layer. 1477 */ 1478 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1479 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len); 1480 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1481 1482 amp = sptd->spt_amp; 1483 ASSERT(amp != NULL); 1484 anon_index = seg_page(sptseg, sptseg_addr); 1485 1486 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) { 1487 ap = anon_get_ptr(amp->ahp, anon_index++); 1488 ASSERT(ap != NULL); 1489 swap_xlate(ap, &vp, &offset); 1490 1491 /* 1492 * Use page_find() instead of page_lookup() to 1493 * find the page since we know that it has a 1494 * "shared" lock. 1495 */ 1496 pp = page_find(vp, offset); 1497 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1)); 1498 if (pp == NULL) { 1499 panic("segspt_softunlock: " 1500 "addr %p, ap %p, vp %p, off %llx", 1501 (void *)adr, (void *)ap, (void *)vp, offset); 1502 /*NOTREACHED*/ 1503 } 1504 1505 if (rw == S_WRITE) { 1506 hat_setrefmod(pp); 1507 } else if (rw != S_OTHER) { 1508 hat_setref(pp); 1509 } 1510 page_unlock(pp); 1511 } 1512 1513 softlock_decrement: 1514 npages = btopr(len); 1515 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages); 1516 if (shmd->shm_softlockcnt == 0) { 1517 /* 1518 * All SOFTLOCKS are gone. Wakeup any waiting 1519 * unmappers so they can try again to unmap. 1520 * Check for waiters first without the mutex 1521 * held so we don't always grab the mutex on 1522 * softunlocks. 1523 */ 1524 if (AS_ISUNMAPWAIT(seg->s_as)) { 1525 mutex_enter(&seg->s_as->a_contents); 1526 if (AS_ISUNMAPWAIT(seg->s_as)) { 1527 AS_CLRUNMAPWAIT(seg->s_as); 1528 cv_broadcast(&seg->s_as->a_cv); 1529 } 1530 mutex_exit(&seg->s_as->a_contents); 1531 } 1532 } 1533 } 1534 1535 int 1536 segspt_shmattach(struct seg *seg, caddr_t *argsp) 1537 { 1538 struct shm_data *shmd_arg = (struct shm_data *)argsp; 1539 struct shm_data *shmd; 1540 struct anon_map *shm_amp = shmd_arg->shm_amp; 1541 struct spt_data *sptd; 1542 int error = 0; 1543 1544 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1545 1546 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP); 1547 if (shmd == NULL) 1548 return (ENOMEM); 1549 1550 shmd->shm_sptas = shmd_arg->shm_sptas; 1551 shmd->shm_amp = shm_amp; 1552 shmd->shm_sptseg = shmd_arg->shm_sptseg; 1553 1554 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0, 1555 NULL, 0, seg->s_size); 1556 1557 seg->s_data = (void *)shmd; 1558 seg->s_ops = &segspt_shmops; 1559 seg->s_szc = shmd->shm_sptseg->s_szc; 1560 sptd = shmd->shm_sptseg->s_data; 1561 1562 if (sptd->spt_flags & SHM_PAGEABLE) { 1563 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size), 1564 KM_NOSLEEP)) == NULL) { 1565 seg->s_data = (void *)NULL; 1566 kmem_free(shmd, (sizeof (*shmd))); 1567 return (ENOMEM); 1568 } 1569 shmd->shm_lckpgs = 0; 1570 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 1571 if ((error = hat_share(seg->s_as->a_hat, seg->s_base, 1572 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1573 seg->s_size, seg->s_szc)) != 0) { 1574 kmem_free(shmd->shm_vpage, 1575 btopr(shm_amp->size)); 1576 } 1577 } 1578 } else { 1579 error = hat_share(seg->s_as->a_hat, seg->s_base, 1580 shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 1581 seg->s_size, seg->s_szc); 1582 } 1583 if (error) { 1584 seg->s_szc = 0; 1585 seg->s_data = (void *)NULL; 1586 kmem_free(shmd, (sizeof (*shmd))); 1587 } else { 1588 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1589 shm_amp->refcnt++; 1590 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1591 } 1592 return (error); 1593 } 1594 1595 int 1596 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize) 1597 { 1598 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1599 int reclaim = 1; 1600 1601 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1602 retry: 1603 if (shmd->shm_softlockcnt > 0) { 1604 if (reclaim == 1) { 1605 segspt_purge(seg); 1606 reclaim = 0; 1607 goto retry; 1608 } 1609 return (EAGAIN); 1610 } 1611 1612 if (ssize != seg->s_size) { 1613 #ifdef DEBUG 1614 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n", 1615 ssize, seg->s_size); 1616 #endif 1617 return (EINVAL); 1618 } 1619 1620 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK, 1621 NULL, 0); 1622 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc); 1623 1624 seg_free(seg); 1625 1626 return (0); 1627 } 1628 1629 void 1630 segspt_shmfree(struct seg *seg) 1631 { 1632 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1633 struct anon_map *shm_amp = shmd->shm_amp; 1634 1635 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1636 1637 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0, 1638 MC_UNLOCK, NULL, 0); 1639 1640 /* 1641 * Need to increment refcnt when attaching 1642 * and decrement when detaching because of dup(). 1643 */ 1644 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 1645 shm_amp->refcnt--; 1646 ANON_LOCK_EXIT(&shm_amp->a_rwlock); 1647 1648 if (shmd->shm_vpage) { /* only for DISM */ 1649 kmem_free(shmd->shm_vpage, btopr(shm_amp->size)); 1650 shmd->shm_vpage = NULL; 1651 } 1652 kmem_free(shmd, sizeof (*shmd)); 1653 } 1654 1655 /*ARGSUSED*/ 1656 int 1657 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 1658 { 1659 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1660 1661 /* 1662 * Shared page table is more than shared mapping. 1663 * Individual process sharing page tables can't change prot 1664 * because there is only one set of page tables. 1665 * This will be allowed after private page table is 1666 * supported. 1667 */ 1668 /* need to return correct status error? */ 1669 return (0); 1670 } 1671 1672 1673 faultcode_t 1674 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr, 1675 size_t len, enum fault_type type, enum seg_rw rw) 1676 { 1677 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1678 struct seg *sptseg = shmd->shm_sptseg; 1679 struct as *curspt = shmd->shm_sptas; 1680 struct spt_data *sptd = sptseg->s_data; 1681 pgcnt_t npages; 1682 size_t share_sz, size; 1683 caddr_t segspt_addr, shm_addr; 1684 page_t **ppa; 1685 int i; 1686 ulong_t an_idx = 0; 1687 int err = 0; 1688 1689 #ifdef lint 1690 hat = hat; 1691 #endif 1692 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1693 1694 /* 1695 * Because of the way spt is implemented 1696 * the realsize of the segment does not have to be 1697 * equal to the segment size itself. The segment size is 1698 * often in multiples of a page size larger than PAGESIZE. 1699 * The realsize is rounded up to the nearest PAGESIZE 1700 * based on what the user requested. This is a bit of 1701 * ungliness that is historical but not easily fixed 1702 * without re-designing the higher levels of ISM. 1703 */ 1704 ASSERT(addr >= seg->s_base); 1705 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 1706 return (FC_NOMAP); 1707 /* 1708 * For all of the following cases except F_PROT, we need to 1709 * make any necessary adjustments to addr and len 1710 * and get all of the necessary page_t's into an array called ppa[]. 1711 * 1712 * The code in shmat() forces base addr and len of ISM segment 1713 * to be aligned to largest page size supported. Therefore, 1714 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 1715 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 1716 * in large pagesize chunks, or else we will screw up the HAT 1717 * layer by calling hat_memload_array() with differing page sizes 1718 * over a given virtual range. 1719 */ 1720 share_sz = page_get_pagesize(sptseg->s_szc); 1721 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz); 1722 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), share_sz); 1723 npages = btopr(size); 1724 1725 /* 1726 * Now we need to convert from addr in segshm to addr in segspt. 1727 */ 1728 an_idx = seg_page(seg, shm_addr); 1729 segspt_addr = sptseg->s_base + ptob(an_idx); 1730 1731 ASSERT((segspt_addr + ptob(npages)) <= 1732 (sptseg->s_base + sptd->spt_realsize)); 1733 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size)); 1734 1735 switch (type) { 1736 1737 case F_SOFTLOCK: 1738 1739 mutex_enter(&freemem_lock); 1740 if (availrmem < tune.t_minarmem + npages) { 1741 mutex_exit(&freemem_lock); 1742 return (FC_MAKE_ERR(ENOMEM)); 1743 } else { 1744 availrmem -= npages; 1745 } 1746 mutex_exit(&freemem_lock); 1747 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 1748 /* 1749 * Fall through to the F_INVAL case to load up the hat layer 1750 * entries with the HAT_LOAD_LOCK flag. 1751 */ 1752 /* FALLTHRU */ 1753 case F_INVAL: 1754 1755 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 1756 return (FC_NOMAP); 1757 1758 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP); 1759 1760 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa); 1761 if (err != 0) { 1762 if (type == F_SOFTLOCK) { 1763 mutex_enter(&freemem_lock); 1764 availrmem += npages; 1765 mutex_exit(&freemem_lock); 1766 atomic_add_long((ulong_t *)( 1767 &(shmd->shm_softlockcnt)), -npages); 1768 } 1769 goto dism_err; 1770 } 1771 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 1772 if (type == F_SOFTLOCK) { 1773 1774 /* 1775 * Load up the translation keeping it 1776 * locked and don't unlock the page. 1777 */ 1778 hat_memload_array(sptseg->s_as->a_hat, segspt_addr, 1779 size, ppa, sptd->spt_prot, 1780 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 1781 } else { 1782 if (hat == seg->s_as->a_hat) { 1783 1784 /* 1785 * Migrate pages marked for migration 1786 */ 1787 if (lgrp_optimizations()) 1788 page_migrate(seg, shm_addr, ppa, 1789 npages); 1790 1791 /* CPU HAT */ 1792 hat_memload_array(sptseg->s_as->a_hat, 1793 segspt_addr, size, ppa, sptd->spt_prot, 1794 HAT_LOAD_SHARE); 1795 } else { 1796 /* XHAT. Pass real address */ 1797 hat_memload_array(hat, shm_addr, 1798 size, ppa, sptd->spt_prot, HAT_LOAD_SHARE); 1799 } 1800 1801 /* 1802 * And now drop the SE_SHARED lock(s). 1803 */ 1804 for (i = 0; i < npages; i++) 1805 page_unlock(ppa[i]); 1806 } 1807 1808 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 1809 if (hat_share(seg->s_as->a_hat, shm_addr, 1810 curspt->a_hat, segspt_addr, ptob(npages), 1811 seg->s_szc) != 0) { 1812 panic("hat_share err in DISM fault"); 1813 /* NOTREACHED */ 1814 } 1815 } 1816 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 1817 dism_err: 1818 kmem_free(ppa, npages * sizeof (page_t *)); 1819 return (err); 1820 1821 case F_SOFTUNLOCK: 1822 1823 mutex_enter(&freemem_lock); 1824 availrmem += npages; 1825 mutex_exit(&freemem_lock); 1826 1827 /* 1828 * This is a bit ugly, we pass in the real seg pointer, 1829 * but the segspt_addr is the virtual address within the 1830 * dummy seg. 1831 */ 1832 segspt_softunlock(seg, segspt_addr, size, rw); 1833 return (0); 1834 1835 case F_PROT: 1836 1837 /* 1838 * This takes care of the unusual case where a user 1839 * allocates a stack in shared memory and a register 1840 * window overflow is written to that stack page before 1841 * it is otherwise modified. 1842 * 1843 * We can get away with this because ISM segments are 1844 * always rw. Other than this unusual case, there 1845 * should be no instances of protection violations. 1846 */ 1847 return (0); 1848 1849 default: 1850 #ifdef DEBUG 1851 panic("segspt_dismfault default type?"); 1852 #else 1853 return (FC_NOMAP); 1854 #endif 1855 } 1856 } 1857 1858 1859 faultcode_t 1860 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr, 1861 size_t len, enum fault_type type, enum seg_rw rw) 1862 { 1863 struct shm_data *shmd = (struct shm_data *)seg->s_data; 1864 struct seg *sptseg = shmd->shm_sptseg; 1865 struct as *curspt = shmd->shm_sptas; 1866 struct spt_data *sptd = sptseg->s_data; 1867 pgcnt_t npages; 1868 size_t share_size, size; 1869 caddr_t sptseg_addr, shm_addr; 1870 page_t *pp, **ppa; 1871 int i; 1872 u_offset_t offset; 1873 ulong_t anon_index = 0; 1874 struct vnode *vp; 1875 struct anon_map *amp; /* XXX - for locknest */ 1876 struct anon *ap = NULL; 1877 anon_sync_obj_t cookie; 1878 1879 #ifdef lint 1880 hat = hat; 1881 #endif 1882 1883 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1884 1885 if (sptd->spt_flags & SHM_PAGEABLE) { 1886 return (segspt_dismfault(hat, seg, addr, len, type, rw)); 1887 } 1888 1889 /* 1890 * Because of the way spt is implemented 1891 * the realsize of the segment does not have to be 1892 * equal to the segment size itself. The segment size is 1893 * often in multiples of a page size larger than PAGESIZE. 1894 * The realsize is rounded up to the nearest PAGESIZE 1895 * based on what the user requested. This is a bit of 1896 * ungliness that is historical but not easily fixed 1897 * without re-designing the higher levels of ISM. 1898 */ 1899 ASSERT(addr >= seg->s_base); 1900 if (((addr + len) - seg->s_base) > sptd->spt_realsize) 1901 return (FC_NOMAP); 1902 /* 1903 * For all of the following cases except F_PROT, we need to 1904 * make any necessary adjustments to addr and len 1905 * and get all of the necessary page_t's into an array called ppa[]. 1906 * 1907 * The code in shmat() forces base addr and len of ISM segment 1908 * to be aligned to largest page size supported. Therefore, 1909 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 1910 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 1911 * in large pagesize chunks, or else we will screw up the HAT 1912 * layer by calling hat_memload_array() with differing page sizes 1913 * over a given virtual range. 1914 */ 1915 share_size = page_get_pagesize(sptseg->s_szc); 1916 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size); 1917 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), share_size); 1918 npages = btopr(size); 1919 1920 /* 1921 * Now we need to convert from addr in segshm to addr in segspt. 1922 */ 1923 anon_index = seg_page(seg, shm_addr); 1924 sptseg_addr = sptseg->s_base + ptob(anon_index); 1925 1926 /* 1927 * And now we may have to adjust npages downward if we have 1928 * exceeded the realsize of the segment or initial anon 1929 * allocations. 1930 */ 1931 if ((sptseg_addr + ptob(npages)) > 1932 (sptseg->s_base + sptd->spt_realsize)) 1933 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr; 1934 1935 npages = btopr(size); 1936 1937 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size)); 1938 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0); 1939 1940 switch (type) { 1941 1942 case F_SOFTLOCK: 1943 1944 /* 1945 * availrmem is decremented once during anon_swap_adjust() 1946 * and is incremented during the anon_unresv(), which is 1947 * called from shm_rm_amp() when the segment is destroyed. 1948 */ 1949 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 1950 /* 1951 * Some platforms assume that ISM pages are SE_SHARED 1952 * locked for the entire life of the segment. 1953 */ 1954 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) 1955 return (0); 1956 /* 1957 * Fall through to the F_INVAL case to load up the hat layer 1958 * entries with the HAT_LOAD_LOCK flag. 1959 */ 1960 1961 /* FALLTHRU */ 1962 case F_INVAL: 1963 1964 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 1965 return (FC_NOMAP); 1966 1967 /* 1968 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP 1969 * may still rely on this call to hat_share(). That 1970 * would imply that those hat's can fault on a 1971 * HAT_LOAD_LOCK translation, which would seem 1972 * contradictory. 1973 */ 1974 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 1975 if (hat_share(seg->s_as->a_hat, seg->s_base, 1976 curspt->a_hat, sptseg->s_base, 1977 sptseg->s_size, sptseg->s_szc) != 0) { 1978 panic("hat_share error in ISM fault"); 1979 /*NOTREACHED*/ 1980 } 1981 return (0); 1982 } 1983 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP); 1984 1985 /* 1986 * I see no need to lock the real seg, 1987 * here, because all of our work will be on the underlying 1988 * dummy seg. 1989 * 1990 * sptseg_addr and npages now account for large pages. 1991 */ 1992 amp = sptd->spt_amp; 1993 ASSERT(amp != NULL); 1994 anon_index = seg_page(sptseg, sptseg_addr); 1995 1996 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 1997 for (i = 0; i < npages; i++) { 1998 anon_array_enter(amp, anon_index, &cookie); 1999 ap = anon_get_ptr(amp->ahp, anon_index++); 2000 ASSERT(ap != NULL); 2001 swap_xlate(ap, &vp, &offset); 2002 anon_array_exit(&cookie); 2003 pp = page_lookup(vp, offset, SE_SHARED); 2004 ASSERT(pp != NULL); 2005 ppa[i] = pp; 2006 } 2007 ANON_LOCK_EXIT(&->a_rwlock); 2008 ASSERT(i == npages); 2009 2010 /* 2011 * We are already holding the as->a_lock on the user's 2012 * real segment, but we need to hold the a_lock on the 2013 * underlying dummy as. This is mostly to satisfy the 2014 * underlying HAT layer. 2015 */ 2016 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 2017 if (type == F_SOFTLOCK) { 2018 /* 2019 * Load up the translation keeping it 2020 * locked and don't unlock the page. 2021 */ 2022 hat_memload_array(sptseg->s_as->a_hat, sptseg_addr, 2023 ptob(npages), ppa, sptd->spt_prot, 2024 HAT_LOAD_LOCK | HAT_LOAD_SHARE); 2025 } else { 2026 if (hat == seg->s_as->a_hat) { 2027 2028 /* 2029 * Migrate pages marked for migration. 2030 */ 2031 if (lgrp_optimizations()) 2032 page_migrate(seg, shm_addr, ppa, 2033 npages); 2034 2035 /* CPU HAT */ 2036 hat_memload_array(sptseg->s_as->a_hat, 2037 sptseg_addr, ptob(npages), ppa, 2038 sptd->spt_prot, HAT_LOAD_SHARE); 2039 } else { 2040 /* XHAT. Pass real address */ 2041 hat_memload_array(hat, shm_addr, 2042 ptob(npages), ppa, sptd->spt_prot, 2043 HAT_LOAD_SHARE); 2044 } 2045 2046 /* 2047 * And now drop the SE_SHARED lock(s). 2048 */ 2049 for (i = 0; i < npages; i++) 2050 page_unlock(ppa[i]); 2051 } 2052 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 2053 2054 kmem_free(ppa, sizeof (page_t *) * npages); 2055 return (0); 2056 case F_SOFTUNLOCK: 2057 2058 /* 2059 * This is a bit ugly, we pass in the real seg pointer, 2060 * but the sptseg_addr is the virtual address within the 2061 * dummy seg. 2062 */ 2063 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw); 2064 return (0); 2065 2066 case F_PROT: 2067 2068 /* 2069 * This takes care of the unusual case where a user 2070 * allocates a stack in shared memory and a register 2071 * window overflow is written to that stack page before 2072 * it is otherwise modified. 2073 * 2074 * We can get away with this because ISM segments are 2075 * always rw. Other than this unusual case, there 2076 * should be no instances of protection violations. 2077 */ 2078 return (0); 2079 2080 default: 2081 #ifdef DEBUG 2082 cmn_err(CE_WARN, "segspt_shmfault default type?"); 2083 #endif 2084 return (FC_NOMAP); 2085 } 2086 } 2087 2088 /*ARGSUSED*/ 2089 static faultcode_t 2090 segspt_shmfaulta(struct seg *seg, caddr_t addr) 2091 { 2092 return (0); 2093 } 2094 2095 /*ARGSUSED*/ 2096 static int 2097 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta) 2098 { 2099 return (0); 2100 } 2101 2102 /*ARGSUSED*/ 2103 static size_t 2104 segspt_shmswapout(struct seg *seg) 2105 { 2106 return (0); 2107 } 2108 2109 /* 2110 * duplicate the shared page tables 2111 */ 2112 int 2113 segspt_shmdup(struct seg *seg, struct seg *newseg) 2114 { 2115 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2116 struct anon_map *amp = shmd->shm_amp; 2117 struct shm_data *shmd_new; 2118 struct seg *spt_seg = shmd->shm_sptseg; 2119 struct spt_data *sptd = spt_seg->s_data; 2120 2121 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2122 2123 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP); 2124 newseg->s_data = (void *)shmd_new; 2125 shmd_new->shm_sptas = shmd->shm_sptas; 2126 shmd_new->shm_amp = amp; 2127 shmd_new->shm_sptseg = shmd->shm_sptseg; 2128 newseg->s_ops = &segspt_shmops; 2129 newseg->s_szc = seg->s_szc; 2130 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc); 2131 2132 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2133 amp->refcnt++; 2134 ANON_LOCK_EXIT(&->a_rwlock); 2135 2136 if (sptd->spt_flags & SHM_PAGEABLE) { 2137 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP); 2138 shmd_new->shm_lckpgs = 0; 2139 } 2140 return (hat_share(newseg->s_as->a_hat, newseg->s_base, 2141 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size, seg->s_szc)); 2142 } 2143 2144 /*ARGSUSED*/ 2145 int 2146 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 2147 { 2148 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2149 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2150 2151 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2152 2153 /* 2154 * ISM segment is always rw. 2155 */ 2156 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0); 2157 } 2158 2159 /* 2160 * Return an array of locked large pages, for empty slots allocate 2161 * private zero-filled anon pages. 2162 */ 2163 static int 2164 spt_anon_getpages( 2165 struct seg *sptseg, 2166 caddr_t sptaddr, 2167 size_t len, 2168 page_t *ppa[]) 2169 { 2170 struct spt_data *sptd = sptseg->s_data; 2171 struct anon_map *amp = sptd->spt_amp; 2172 enum seg_rw rw = sptd->spt_prot; 2173 uint_t szc = sptseg->s_szc; 2174 size_t pg_sz, share_sz = page_get_pagesize(szc); 2175 pgcnt_t lp_npgs; 2176 caddr_t lp_addr, e_sptaddr; 2177 uint_t vpprot, ppa_szc = 0; 2178 struct vpage *vpage = NULL; 2179 ulong_t j, ppa_idx; 2180 int err, ierr = 0; 2181 pgcnt_t an_idx; 2182 anon_sync_obj_t cookie; 2183 2184 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz)); 2185 ASSERT(len != 0); 2186 2187 pg_sz = share_sz; 2188 lp_npgs = btop(pg_sz); 2189 lp_addr = sptaddr; 2190 e_sptaddr = sptaddr + len; 2191 an_idx = seg_page(sptseg, sptaddr); 2192 ppa_idx = 0; 2193 2194 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2195 /*CONSTCOND*/ 2196 while (1) { 2197 for (; lp_addr < e_sptaddr; 2198 an_idx += lp_npgs, lp_addr += pg_sz, 2199 ppa_idx += lp_npgs) { 2200 2201 anon_array_enter(amp, an_idx, &cookie); 2202 ppa_szc = (uint_t)-1; 2203 ierr = anon_map_getpages(amp, an_idx, szc, sptseg, 2204 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx], 2205 &ppa_szc, vpage, rw, 0, segvn_anypgsz, kcred); 2206 anon_array_exit(&cookie); 2207 2208 if (ierr != 0) { 2209 if (ierr > 0) { 2210 err = FC_MAKE_ERR(ierr); 2211 goto lpgs_err; 2212 } 2213 break; 2214 } 2215 } 2216 if (lp_addr == e_sptaddr) { 2217 break; 2218 } 2219 ASSERT(lp_addr < e_sptaddr); 2220 2221 /* 2222 * ierr == -1 means we failed to allocate a large page. 2223 * so do a size down operation. 2224 * 2225 * ierr == -2 means some other process that privately shares 2226 * pages with this process has allocated a larger page and we 2227 * need to retry with larger pages. So do a size up 2228 * operation. This relies on the fact that large pages are 2229 * never partially shared i.e. if we share any constituent 2230 * page of a large page with another process we must share the 2231 * entire large page. Note this cannot happen for SOFTLOCK 2232 * case, unless current address (lpaddr) is at the beginning 2233 * of the next page size boundary because the other process 2234 * couldn't have relocated locked pages. 2235 */ 2236 ASSERT(ierr == -1 || ierr == -2); 2237 if (segvn_anypgsz) { 2238 ASSERT(ierr == -2 || szc != 0); 2239 ASSERT(ierr == -1 || szc < sptseg->s_szc); 2240 szc = (ierr == -1) ? szc - 1 : szc + 1; 2241 } else { 2242 /* 2243 * For faults and segvn_anypgsz == 0 2244 * we need to be careful not to loop forever 2245 * if existing page is found with szc other 2246 * than 0 or seg->s_szc. This could be due 2247 * to page relocations on behalf of DR or 2248 * more likely large page creation. For this 2249 * case simply re-size to existing page's szc 2250 * if returned by anon_map_getpages(). 2251 */ 2252 if (ppa_szc == (uint_t)-1) { 2253 szc = (ierr == -1) ? 0 : sptseg->s_szc; 2254 } else { 2255 ASSERT(ppa_szc <= sptseg->s_szc); 2256 ASSERT(ierr == -2 || ppa_szc < szc); 2257 ASSERT(ierr == -1 || ppa_szc > szc); 2258 szc = ppa_szc; 2259 } 2260 } 2261 pg_sz = page_get_pagesize(szc); 2262 lp_npgs = btop(pg_sz); 2263 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz)); 2264 } 2265 ANON_LOCK_EXIT(&->a_rwlock); 2266 return (0); 2267 2268 lpgs_err: 2269 ANON_LOCK_EXIT(&->a_rwlock); 2270 for (j = 0; j < ppa_idx; j++) 2271 page_unlock(ppa[j]); 2272 return (err); 2273 } 2274 2275 int 2276 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages, 2277 page_t **ppa, ulong_t *lockmap, size_t pos) 2278 { 2279 struct shm_data *shmd = seg->s_data; 2280 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2281 ulong_t i; 2282 int kernel; 2283 2284 for (i = 0; i < npages; anon_index++, pos++, i++) { 2285 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) { 2286 if (sptd->spt_ppa_lckcnt[anon_index] < 2287 (ushort_t)DISM_LOCK_MAX) { 2288 if (++sptd->spt_ppa_lckcnt[anon_index] == 2289 (ushort_t)DISM_LOCK_MAX) { 2290 cmn_err(CE_WARN, 2291 "DISM page lock limit " 2292 "reached on DISM offset 0x%lx\n", 2293 anon_index << PAGESHIFT); 2294 } 2295 kernel = (sptd->spt_ppa && 2296 sptd->spt_ppa[anon_index]) ? 1 : 0; 2297 if (!page_pp_lock(ppa[i], 0, kernel)) { 2298 /* unlock rest of the pages */ 2299 for (; i < npages; i++) 2300 page_unlock(ppa[i]); 2301 sptd->spt_ppa_lckcnt[anon_index]--; 2302 return (EAGAIN); 2303 } 2304 shmd->shm_lckpgs++; 2305 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED; 2306 if (lockmap != NULL) 2307 BT_SET(lockmap, pos); 2308 } 2309 } 2310 page_unlock(ppa[i]); 2311 } 2312 return (0); 2313 } 2314 2315 /*ARGSUSED*/ 2316 static int 2317 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 2318 int attr, int op, ulong_t *lockmap, size_t pos) 2319 { 2320 struct shm_data *shmd = seg->s_data; 2321 struct seg *sptseg = shmd->shm_sptseg; 2322 struct spt_data *sptd = sptseg->s_data; 2323 pgcnt_t npages, a_npages; 2324 page_t **ppa; 2325 pgcnt_t an_idx, a_an_idx, ppa_idx; 2326 caddr_t spt_addr, a_addr; /* spt and aligned address */ 2327 size_t a_len; /* aligned len */ 2328 size_t share_sz; 2329 ulong_t i; 2330 int sts = 0; 2331 2332 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2333 2334 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 2335 return (0); 2336 } 2337 2338 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 2339 an_idx = seg_page(seg, addr); 2340 npages = btopr(len); 2341 2342 if (an_idx + npages > btopr(shmd->shm_amp->size)) { 2343 return (ENOMEM); 2344 } 2345 2346 if (op == MC_LOCK) { 2347 /* 2348 * Need to align addr and size request if they are not 2349 * aligned so we can always allocate large page(s) however 2350 * we only lock what was requested in initial request. 2351 */ 2352 share_sz = page_get_pagesize(sptseg->s_szc); 2353 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz); 2354 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)), 2355 share_sz); 2356 a_npages = btop(a_len); 2357 a_an_idx = seg_page(seg, a_addr); 2358 spt_addr = sptseg->s_base + ptob(a_an_idx); 2359 ppa_idx = an_idx - a_an_idx; 2360 2361 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages), 2362 KM_NOSLEEP)) == NULL) { 2363 return (ENOMEM); 2364 } 2365 2366 /* 2367 * Don't cache any new pages for IO and 2368 * flush any cached pages. 2369 */ 2370 mutex_enter(&sptd->spt_lock); 2371 if (sptd->spt_ppa != NULL) 2372 sptd->spt_flags |= DISM_PPA_CHANGED; 2373 2374 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa); 2375 if (sts != 0) { 2376 mutex_exit(&sptd->spt_lock); 2377 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2378 return (sts); 2379 } 2380 2381 sts = spt_lockpages(seg, an_idx, npages, 2382 &ppa[ppa_idx], lockmap, pos); 2383 /* 2384 * unlock remaining pages for requests which are not 2385 * aligned or not in 4 M chunks 2386 */ 2387 for (i = 0; i < ppa_idx; i++) 2388 page_unlock(ppa[i]); 2389 for (i = ppa_idx + npages; i < a_npages; i++) 2390 page_unlock(ppa[i]); 2391 if (sptd->spt_ppa != NULL) 2392 sptd->spt_flags |= DISM_PPA_CHANGED; 2393 mutex_exit(&sptd->spt_lock); 2394 2395 kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 2396 2397 } else if (op == MC_UNLOCK) { /* unlock */ 2398 struct anon_map *amp; 2399 struct anon *ap; 2400 struct vnode *vp; 2401 u_offset_t off; 2402 struct page *pp; 2403 int kernel; 2404 anon_sync_obj_t cookie; 2405 2406 amp = sptd->spt_amp; 2407 mutex_enter(&sptd->spt_lock); 2408 if (shmd->shm_lckpgs == 0) { 2409 mutex_exit(&sptd->spt_lock); 2410 return (0); 2411 } 2412 /* 2413 * Don't cache new IO pages. 2414 */ 2415 if (sptd->spt_ppa != NULL) 2416 sptd->spt_flags |= DISM_PPA_CHANGED; 2417 2418 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2419 for (i = 0; i < npages; i++, an_idx++) { 2420 if (shmd->shm_vpage[an_idx] & DISM_PG_LOCKED) { 2421 anon_array_enter(amp, an_idx, &cookie); 2422 ap = anon_get_ptr(amp->ahp, an_idx); 2423 ASSERT(ap); 2424 ASSERT(sptd->spt_ppa_lckcnt[an_idx] > 0); 2425 2426 swap_xlate(ap, &vp, &off); 2427 anon_array_exit(&cookie); 2428 pp = page_lookup(vp, off, SE_SHARED); 2429 ASSERT(pp); 2430 /* 2431 * the availrmem is decremented only for 2432 * pages which are not in seg pcache, 2433 * for pages in seg pcache availrmem was 2434 * decremented in _dismpagelock() (if 2435 * they were not locked here) 2436 */ 2437 kernel = (sptd->spt_ppa && 2438 sptd->spt_ppa[an_idx]) ? 1 : 0; 2439 page_pp_unlock(pp, 0, kernel); 2440 page_unlock(pp); 2441 shmd->shm_vpage[an_idx] &= ~DISM_PG_LOCKED; 2442 sptd->spt_ppa_lckcnt[an_idx]--; 2443 shmd->shm_lckpgs--; 2444 } 2445 } 2446 ANON_LOCK_EXIT(&->a_rwlock); 2447 if (sptd->spt_ppa != NULL) 2448 sptd->spt_flags |= DISM_PPA_CHANGED; 2449 mutex_exit(&sptd->spt_lock); 2450 } 2451 return (sts); 2452 } 2453 2454 /*ARGSUSED*/ 2455 int 2456 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 2457 { 2458 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2459 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2460 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1; 2461 2462 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2463 2464 /* 2465 * ISM segment is always rw. 2466 */ 2467 while (--pgno >= 0) 2468 *protv++ = sptd->spt_prot; 2469 return (0); 2470 } 2471 2472 /*ARGSUSED*/ 2473 u_offset_t 2474 segspt_shmgetoffset(struct seg *seg, caddr_t addr) 2475 { 2476 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2477 2478 /* Offset does not matter in ISM memory */ 2479 2480 return ((u_offset_t)0); 2481 } 2482 2483 /* ARGSUSED */ 2484 int 2485 segspt_shmgettype(struct seg *seg, caddr_t addr) 2486 { 2487 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2488 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2489 2490 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2491 2492 /* 2493 * The shared memory mapping is always MAP_SHARED, SWAP is only 2494 * reserved for DISM 2495 */ 2496 return (MAP_SHARED | 2497 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE)); 2498 } 2499 2500 /*ARGSUSED*/ 2501 int 2502 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 2503 { 2504 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2505 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2506 2507 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2508 2509 *vpp = sptd->spt_vp; 2510 return (0); 2511 } 2512 2513 /*ARGSUSED*/ 2514 static int 2515 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 2516 { 2517 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2518 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2519 struct anon_map *amp; 2520 pgcnt_t pg_idx; 2521 2522 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2523 2524 if (behav == MADV_FREE) { 2525 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) 2526 return (0); 2527 2528 amp = sptd->spt_amp; 2529 pg_idx = seg_page(seg, addr); 2530 2531 mutex_enter(&sptd->spt_lock); 2532 if (sptd->spt_ppa != NULL) 2533 sptd->spt_flags |= DISM_PPA_CHANGED; 2534 mutex_exit(&sptd->spt_lock); 2535 2536 /* 2537 * Purge all DISM cached pages 2538 */ 2539 seg_ppurge_seg(segspt_reclaim); 2540 2541 mutex_enter(&sptd->spt_lock); 2542 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2543 anon_disclaim(amp, pg_idx, len, ANON_PGLOOKUP_BLK); 2544 ANON_LOCK_EXIT(&->a_rwlock); 2545 mutex_exit(&sptd->spt_lock); 2546 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP || 2547 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) { 2548 int already_set; 2549 ulong_t anon_index; 2550 lgrp_mem_policy_t policy; 2551 caddr_t shm_addr; 2552 size_t share_size; 2553 size_t size; 2554 struct seg *sptseg = shmd->shm_sptseg; 2555 caddr_t sptseg_addr; 2556 2557 /* 2558 * Align address and length to page size of underlying segment 2559 */ 2560 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc); 2561 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size); 2562 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), 2563 share_size); 2564 2565 amp = shmd->shm_amp; 2566 anon_index = seg_page(seg, shm_addr); 2567 2568 /* 2569 * And now we may have to adjust size downward if we have 2570 * exceeded the realsize of the segment or initial anon 2571 * allocations. 2572 */ 2573 sptseg_addr = sptseg->s_base + ptob(anon_index); 2574 if ((sptseg_addr + size) > 2575 (sptseg->s_base + sptd->spt_realsize)) 2576 size = (sptseg->s_base + sptd->spt_realsize) - 2577 sptseg_addr; 2578 2579 /* 2580 * Set memory allocation policy for this segment 2581 */ 2582 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED); 2583 already_set = lgrp_shm_policy_set(policy, amp, anon_index, 2584 NULL, 0, len); 2585 2586 /* 2587 * If random memory allocation policy set already, 2588 * don't bother reapplying it. 2589 */ 2590 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 2591 return (0); 2592 2593 /* 2594 * Mark any existing pages in the given range for 2595 * migration, flushing the I/O page cache, and using 2596 * underlying segment to calculate anon index and get 2597 * anonmap and vnode pointer from 2598 */ 2599 if (shmd->shm_softlockcnt > 0) 2600 segspt_purge(seg); 2601 2602 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0); 2603 } 2604 2605 return (0); 2606 } 2607 2608 /*ARGSUSED*/ 2609 void 2610 segspt_shmdump(struct seg *seg) 2611 { 2612 /* no-op for ISM segment */ 2613 } 2614 2615 /*ARGSUSED*/ 2616 static faultcode_t 2617 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 2618 { 2619 return (ENOTSUP); 2620 } 2621 2622 /* 2623 * get a memory ID for an addr in a given segment 2624 */ 2625 static int 2626 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 2627 { 2628 struct shm_data *shmd = (struct shm_data *)seg->s_data; 2629 struct anon *ap; 2630 size_t anon_index; 2631 struct anon_map *amp = shmd->shm_amp; 2632 struct spt_data *sptd = shmd->shm_sptseg->s_data; 2633 struct seg *sptseg = shmd->shm_sptseg; 2634 anon_sync_obj_t cookie; 2635 2636 anon_index = seg_page(seg, addr); 2637 2638 if (addr > (seg->s_base + sptd->spt_realsize)) { 2639 return (EFAULT); 2640 } 2641 2642 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2643 anon_array_enter(amp, anon_index, &cookie); 2644 ap = anon_get_ptr(amp->ahp, anon_index); 2645 if (ap == NULL) { 2646 struct page *pp; 2647 caddr_t spt_addr = sptseg->s_base + ptob(anon_index); 2648 2649 pp = anon_zero(sptseg, spt_addr, &ap, kcred); 2650 if (pp == NULL) { 2651 anon_array_exit(&cookie); 2652 ANON_LOCK_EXIT(&->a_rwlock); 2653 return (ENOMEM); 2654 } 2655 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 2656 page_unlock(pp); 2657 } 2658 anon_array_exit(&cookie); 2659 ANON_LOCK_EXIT(&->a_rwlock); 2660 memidp->val[0] = (uintptr_t)ap; 2661 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 2662 return (0); 2663 } 2664 2665 /* 2666 * Get memory allocation policy info for specified address in given segment 2667 */ 2668 static lgrp_mem_policy_info_t * 2669 segspt_shmgetpolicy(struct seg *seg, caddr_t addr) 2670 { 2671 struct anon_map *amp; 2672 ulong_t anon_index; 2673 lgrp_mem_policy_info_t *policy_info; 2674 struct shm_data *shm_data; 2675 2676 ASSERT(seg != NULL); 2677 2678 /* 2679 * Get anon_map from segshm 2680 * 2681 * Assume that no lock needs to be held on anon_map, since 2682 * it should be protected by its reference count which must be 2683 * nonzero for an existing segment 2684 * Need to grab readers lock on policy tree though 2685 */ 2686 shm_data = (struct shm_data *)seg->s_data; 2687 if (shm_data == NULL) 2688 return (NULL); 2689 amp = shm_data->shm_amp; 2690 ASSERT(amp->refcnt != 0); 2691 2692 /* 2693 * Get policy info 2694 * 2695 * Assume starting anon index of 0 2696 */ 2697 anon_index = seg_page(seg, addr); 2698 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 2699 2700 return (policy_info); 2701 } 2702