17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5e67882ffSbs21162 * Common Development and Distribution License (the "License"). 6e67882ffSbs21162 * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22*68803f2dSsl108498 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 277c478bd9Sstevel@tonic-gate 287c478bd9Sstevel@tonic-gate #include <sys/param.h> 297c478bd9Sstevel@tonic-gate #include <sys/user.h> 307c478bd9Sstevel@tonic-gate #include <sys/mman.h> 317c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 327c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 337c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 347c478bd9Sstevel@tonic-gate #include <sys/systm.h> 357c478bd9Sstevel@tonic-gate #include <sys/tuneable.h> 367c478bd9Sstevel@tonic-gate #include <vm/hat.h> 377c478bd9Sstevel@tonic-gate #include <vm/seg.h> 387c478bd9Sstevel@tonic-gate #include <vm/as.h> 397c478bd9Sstevel@tonic-gate #include <vm/anon.h> 407c478bd9Sstevel@tonic-gate #include <vm/page.h> 417c478bd9Sstevel@tonic-gate #include <sys/buf.h> 427c478bd9Sstevel@tonic-gate #include <sys/swap.h> 437c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 447c478bd9Sstevel@tonic-gate #include <vm/seg_spt.h> 457c478bd9Sstevel@tonic-gate #include <sys/debug.h> 467c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 477c478bd9Sstevel@tonic-gate #include <sys/shm.h> 48c6939658Ssl108498 #include <sys/shm_impl.h> 497c478bd9Sstevel@tonic-gate #include <sys/lgrp.h> 507c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h> 51c6939658Ssl108498 #include <sys/policy.h> 52c6939658Ssl108498 #include <sys/project.h> 537c478bd9Sstevel@tonic-gate #include <sys/tnf_probe.h> 54c6939658Ssl108498 #include <sys/zone.h> 557c478bd9Sstevel@tonic-gate 567c478bd9Sstevel@tonic-gate #define SEGSPTADDR (caddr_t)0x0 577c478bd9Sstevel@tonic-gate 587c478bd9Sstevel@tonic-gate /* 597c478bd9Sstevel@tonic-gate * # pages used for spt 607c478bd9Sstevel@tonic-gate */ 617c478bd9Sstevel@tonic-gate static size_t spt_used; 627c478bd9Sstevel@tonic-gate 637c478bd9Sstevel@tonic-gate /* 647c478bd9Sstevel@tonic-gate * segspt_minfree is the memory left for system after ISM 657c478bd9Sstevel@tonic-gate * locked its pages; it is set up to 5% of availrmem in 667c478bd9Sstevel@tonic-gate * sptcreate when ISM is created. ISM should not use more 677c478bd9Sstevel@tonic-gate * than ~90% of availrmem; if it does, then the performance 687c478bd9Sstevel@tonic-gate * of the system may decrease. Machines with large memories may 697c478bd9Sstevel@tonic-gate * be able to use up more memory for ISM so we set the default 707c478bd9Sstevel@tonic-gate * segspt_minfree to 5% (which gives ISM max 95% of availrmem. 717c478bd9Sstevel@tonic-gate * If somebody wants even more memory for ISM (risking hanging 727c478bd9Sstevel@tonic-gate * the system) they can patch the segspt_minfree to smaller number. 737c478bd9Sstevel@tonic-gate */ 747c478bd9Sstevel@tonic-gate pgcnt_t segspt_minfree = 0; 757c478bd9Sstevel@tonic-gate 767c478bd9Sstevel@tonic-gate static int segspt_create(struct seg *seg, caddr_t argsp); 777c478bd9Sstevel@tonic-gate static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize); 787c478bd9Sstevel@tonic-gate static void segspt_free(struct seg *seg); 797c478bd9Sstevel@tonic-gate static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len); 807c478bd9Sstevel@tonic-gate static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr); 817c478bd9Sstevel@tonic-gate 827c478bd9Sstevel@tonic-gate static void 837c478bd9Sstevel@tonic-gate segspt_badop() 847c478bd9Sstevel@tonic-gate { 857c478bd9Sstevel@tonic-gate panic("segspt_badop called"); 867c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 877c478bd9Sstevel@tonic-gate } 887c478bd9Sstevel@tonic-gate 897c478bd9Sstevel@tonic-gate #define SEGSPT_BADOP(t) (t(*)())segspt_badop 907c478bd9Sstevel@tonic-gate 917c478bd9Sstevel@tonic-gate struct seg_ops segspt_ops = { 927c478bd9Sstevel@tonic-gate SEGSPT_BADOP(int), /* dup */ 937c478bd9Sstevel@tonic-gate segspt_unmap, 947c478bd9Sstevel@tonic-gate segspt_free, 957c478bd9Sstevel@tonic-gate SEGSPT_BADOP(int), /* fault */ 967c478bd9Sstevel@tonic-gate SEGSPT_BADOP(faultcode_t), /* faulta */ 977c478bd9Sstevel@tonic-gate SEGSPT_BADOP(int), /* setprot */ 987c478bd9Sstevel@tonic-gate SEGSPT_BADOP(int), /* checkprot */ 997c478bd9Sstevel@tonic-gate SEGSPT_BADOP(int), /* kluster */ 1007c478bd9Sstevel@tonic-gate SEGSPT_BADOP(size_t), /* swapout */ 1017c478bd9Sstevel@tonic-gate SEGSPT_BADOP(int), /* sync */ 1027c478bd9Sstevel@tonic-gate SEGSPT_BADOP(size_t), /* incore */ 1037c478bd9Sstevel@tonic-gate SEGSPT_BADOP(int), /* lockop */ 1047c478bd9Sstevel@tonic-gate SEGSPT_BADOP(int), /* getprot */ 1057c478bd9Sstevel@tonic-gate SEGSPT_BADOP(u_offset_t), /* getoffset */ 1067c478bd9Sstevel@tonic-gate SEGSPT_BADOP(int), /* gettype */ 1077c478bd9Sstevel@tonic-gate SEGSPT_BADOP(int), /* getvp */ 1087c478bd9Sstevel@tonic-gate SEGSPT_BADOP(int), /* advise */ 1097c478bd9Sstevel@tonic-gate SEGSPT_BADOP(void), /* dump */ 1107c478bd9Sstevel@tonic-gate SEGSPT_BADOP(int), /* pagelock */ 1117c478bd9Sstevel@tonic-gate SEGSPT_BADOP(int), /* setpgsz */ 1127c478bd9Sstevel@tonic-gate SEGSPT_BADOP(int), /* getmemid */ 1137c478bd9Sstevel@tonic-gate segspt_getpolicy, /* getpolicy */ 1141bd5c35fSelowe SEGSPT_BADOP(int), /* capable */ 1157c478bd9Sstevel@tonic-gate }; 1167c478bd9Sstevel@tonic-gate 1177c478bd9Sstevel@tonic-gate static int segspt_shmdup(struct seg *seg, struct seg *newseg); 1187c478bd9Sstevel@tonic-gate static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize); 1197c478bd9Sstevel@tonic-gate static void segspt_shmfree(struct seg *seg); 1207c478bd9Sstevel@tonic-gate static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg, 1217c478bd9Sstevel@tonic-gate caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw); 1227c478bd9Sstevel@tonic-gate static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr); 1237c478bd9Sstevel@tonic-gate static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr, 1247c478bd9Sstevel@tonic-gate register size_t len, register uint_t prot); 1257c478bd9Sstevel@tonic-gate static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, 1267c478bd9Sstevel@tonic-gate uint_t prot); 1277c478bd9Sstevel@tonic-gate static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta); 1287c478bd9Sstevel@tonic-gate static size_t segspt_shmswapout(struct seg *seg); 1297c478bd9Sstevel@tonic-gate static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, 1307c478bd9Sstevel@tonic-gate register char *vec); 1317c478bd9Sstevel@tonic-gate static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len, 1327c478bd9Sstevel@tonic-gate int attr, uint_t flags); 1337c478bd9Sstevel@tonic-gate static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 1347c478bd9Sstevel@tonic-gate int attr, int op, ulong_t *lockmap, size_t pos); 1357c478bd9Sstevel@tonic-gate static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, 1367c478bd9Sstevel@tonic-gate uint_t *protv); 1377c478bd9Sstevel@tonic-gate static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr); 1387c478bd9Sstevel@tonic-gate static int segspt_shmgettype(struct seg *seg, caddr_t addr); 1397c478bd9Sstevel@tonic-gate static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp); 1407c478bd9Sstevel@tonic-gate static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, 1417c478bd9Sstevel@tonic-gate uint_t behav); 1427c478bd9Sstevel@tonic-gate static void segspt_shmdump(struct seg *seg); 1437c478bd9Sstevel@tonic-gate static int segspt_shmpagelock(struct seg *, caddr_t, size_t, 1447c478bd9Sstevel@tonic-gate struct page ***, enum lock_type, enum seg_rw); 1457c478bd9Sstevel@tonic-gate static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t); 1467c478bd9Sstevel@tonic-gate static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *); 1477c478bd9Sstevel@tonic-gate static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t); 1481bd5c35fSelowe static int segspt_shmcapable(struct seg *, segcapability_t); 1497c478bd9Sstevel@tonic-gate 1507c478bd9Sstevel@tonic-gate struct seg_ops segspt_shmops = { 1517c478bd9Sstevel@tonic-gate segspt_shmdup, 1527c478bd9Sstevel@tonic-gate segspt_shmunmap, 1537c478bd9Sstevel@tonic-gate segspt_shmfree, 1547c478bd9Sstevel@tonic-gate segspt_shmfault, 1557c478bd9Sstevel@tonic-gate segspt_shmfaulta, 1567c478bd9Sstevel@tonic-gate segspt_shmsetprot, 1577c478bd9Sstevel@tonic-gate segspt_shmcheckprot, 1587c478bd9Sstevel@tonic-gate segspt_shmkluster, 1597c478bd9Sstevel@tonic-gate segspt_shmswapout, 1607c478bd9Sstevel@tonic-gate segspt_shmsync, 1617c478bd9Sstevel@tonic-gate segspt_shmincore, 1627c478bd9Sstevel@tonic-gate segspt_shmlockop, 1637c478bd9Sstevel@tonic-gate segspt_shmgetprot, 1647c478bd9Sstevel@tonic-gate segspt_shmgetoffset, 1657c478bd9Sstevel@tonic-gate segspt_shmgettype, 1667c478bd9Sstevel@tonic-gate segspt_shmgetvp, 1677c478bd9Sstevel@tonic-gate segspt_shmadvise, /* advise */ 1687c478bd9Sstevel@tonic-gate segspt_shmdump, 1697c478bd9Sstevel@tonic-gate segspt_shmpagelock, 1707c478bd9Sstevel@tonic-gate segspt_shmsetpgsz, 1717c478bd9Sstevel@tonic-gate segspt_shmgetmemid, 1727c478bd9Sstevel@tonic-gate segspt_shmgetpolicy, 1731bd5c35fSelowe segspt_shmcapable, 1747c478bd9Sstevel@tonic-gate }; 1757c478bd9Sstevel@tonic-gate 1767c478bd9Sstevel@tonic-gate static void segspt_purge(struct seg *seg); 1777c478bd9Sstevel@tonic-gate static int segspt_reclaim(struct seg *, caddr_t, size_t, struct page **, 1787c478bd9Sstevel@tonic-gate enum seg_rw); 1797c478bd9Sstevel@tonic-gate static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len, 1807c478bd9Sstevel@tonic-gate page_t **ppa); 1817c478bd9Sstevel@tonic-gate 1827c478bd9Sstevel@tonic-gate 1837c478bd9Sstevel@tonic-gate 1847c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 1857c478bd9Sstevel@tonic-gate int 1867c478bd9Sstevel@tonic-gate sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp, 1877c478bd9Sstevel@tonic-gate uint_t prot, uint_t flags, uint_t share_szc) 1887c478bd9Sstevel@tonic-gate { 1897c478bd9Sstevel@tonic-gate int err; 1907c478bd9Sstevel@tonic-gate struct as *newas; 1917c478bd9Sstevel@tonic-gate struct segspt_crargs sptcargs; 1927c478bd9Sstevel@tonic-gate 1937c478bd9Sstevel@tonic-gate #ifdef DEBUG 1947c478bd9Sstevel@tonic-gate TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */, 1957c478bd9Sstevel@tonic-gate tnf_ulong, size, size ); 1967c478bd9Sstevel@tonic-gate #endif 1977c478bd9Sstevel@tonic-gate if (segspt_minfree == 0) /* leave min 5% of availrmem for */ 1987c478bd9Sstevel@tonic-gate segspt_minfree = availrmem/20; /* for the system */ 1997c478bd9Sstevel@tonic-gate 2007c478bd9Sstevel@tonic-gate if (!hat_supported(HAT_SHARED_PT, (void *)0)) 2017c478bd9Sstevel@tonic-gate return (EINVAL); 2027c478bd9Sstevel@tonic-gate 2037c478bd9Sstevel@tonic-gate /* 2047c478bd9Sstevel@tonic-gate * get a new as for this shared memory segment 2057c478bd9Sstevel@tonic-gate */ 2067c478bd9Sstevel@tonic-gate newas = as_alloc(); 207c6939658Ssl108498 newas->a_proc = NULL; 2087c478bd9Sstevel@tonic-gate sptcargs.amp = amp; 2097c478bd9Sstevel@tonic-gate sptcargs.prot = prot; 2107c478bd9Sstevel@tonic-gate sptcargs.flags = flags; 2117c478bd9Sstevel@tonic-gate sptcargs.szc = share_szc; 2127c478bd9Sstevel@tonic-gate /* 2137c478bd9Sstevel@tonic-gate * create a shared page table (spt) segment 2147c478bd9Sstevel@tonic-gate */ 2157c478bd9Sstevel@tonic-gate 2167c478bd9Sstevel@tonic-gate if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) { 2177c478bd9Sstevel@tonic-gate as_free(newas); 2187c478bd9Sstevel@tonic-gate return (err); 2197c478bd9Sstevel@tonic-gate } 2207c478bd9Sstevel@tonic-gate *sptseg = sptcargs.seg_spt; 2217c478bd9Sstevel@tonic-gate return (0); 2227c478bd9Sstevel@tonic-gate } 2237c478bd9Sstevel@tonic-gate 2247c478bd9Sstevel@tonic-gate void 2257c478bd9Sstevel@tonic-gate sptdestroy(struct as *as, struct anon_map *amp) 2267c478bd9Sstevel@tonic-gate { 2277c478bd9Sstevel@tonic-gate 2287c478bd9Sstevel@tonic-gate #ifdef DEBUG 2297c478bd9Sstevel@tonic-gate TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */); 2307c478bd9Sstevel@tonic-gate #endif 2317c478bd9Sstevel@tonic-gate (void) as_unmap(as, SEGSPTADDR, amp->size); 2327c478bd9Sstevel@tonic-gate as_free(as); 2337c478bd9Sstevel@tonic-gate } 2347c478bd9Sstevel@tonic-gate 2357c478bd9Sstevel@tonic-gate /* 2367c478bd9Sstevel@tonic-gate * called from seg_free(). 2377c478bd9Sstevel@tonic-gate * free (i.e., unlock, unmap, return to free list) 2387c478bd9Sstevel@tonic-gate * all the pages in the given seg. 2397c478bd9Sstevel@tonic-gate */ 2407c478bd9Sstevel@tonic-gate void 2417c478bd9Sstevel@tonic-gate segspt_free(struct seg *seg) 2427c478bd9Sstevel@tonic-gate { 2437c478bd9Sstevel@tonic-gate struct spt_data *sptd = (struct spt_data *)seg->s_data; 2447c478bd9Sstevel@tonic-gate 2457c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2467c478bd9Sstevel@tonic-gate 2477c478bd9Sstevel@tonic-gate if (sptd != NULL) { 2487c478bd9Sstevel@tonic-gate if (sptd->spt_realsize) 2497c478bd9Sstevel@tonic-gate segspt_free_pages(seg, seg->s_base, sptd->spt_realsize); 2507c478bd9Sstevel@tonic-gate 2517c478bd9Sstevel@tonic-gate if (sptd->spt_ppa_lckcnt) 2527c478bd9Sstevel@tonic-gate kmem_free(sptd->spt_ppa_lckcnt, 2537c478bd9Sstevel@tonic-gate sizeof (*sptd->spt_ppa_lckcnt) 2547c478bd9Sstevel@tonic-gate * btopr(sptd->spt_amp->size)); 2557c478bd9Sstevel@tonic-gate kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp)); 2567c478bd9Sstevel@tonic-gate mutex_destroy(&sptd->spt_lock); 2577c478bd9Sstevel@tonic-gate kmem_free(sptd, sizeof (*sptd)); 2587c478bd9Sstevel@tonic-gate } 2597c478bd9Sstevel@tonic-gate } 2607c478bd9Sstevel@tonic-gate 2617c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 2627c478bd9Sstevel@tonic-gate static int 2637c478bd9Sstevel@tonic-gate segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr, 2647c478bd9Sstevel@tonic-gate uint_t flags) 2657c478bd9Sstevel@tonic-gate { 2667c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2677c478bd9Sstevel@tonic-gate 2687c478bd9Sstevel@tonic-gate return (0); 2697c478bd9Sstevel@tonic-gate } 2707c478bd9Sstevel@tonic-gate 2717c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 2727c478bd9Sstevel@tonic-gate static size_t 2737c478bd9Sstevel@tonic-gate segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec) 2747c478bd9Sstevel@tonic-gate { 2757c478bd9Sstevel@tonic-gate caddr_t eo_seg; 2767c478bd9Sstevel@tonic-gate pgcnt_t npages; 2777c478bd9Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 2787c478bd9Sstevel@tonic-gate struct seg *sptseg; 2797c478bd9Sstevel@tonic-gate struct spt_data *sptd; 2807c478bd9Sstevel@tonic-gate 2817c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2827c478bd9Sstevel@tonic-gate #ifdef lint 2837c478bd9Sstevel@tonic-gate seg = seg; 2847c478bd9Sstevel@tonic-gate #endif 2857c478bd9Sstevel@tonic-gate sptseg = shmd->shm_sptseg; 2867c478bd9Sstevel@tonic-gate sptd = sptseg->s_data; 2877c478bd9Sstevel@tonic-gate 2887c478bd9Sstevel@tonic-gate if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 2897c478bd9Sstevel@tonic-gate eo_seg = addr + len; 2907c478bd9Sstevel@tonic-gate while (addr < eo_seg) { 2917c478bd9Sstevel@tonic-gate /* page exists, and it's locked. */ 2927c478bd9Sstevel@tonic-gate *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED | 2937c478bd9Sstevel@tonic-gate SEG_PAGE_ANON; 2947c478bd9Sstevel@tonic-gate addr += PAGESIZE; 2957c478bd9Sstevel@tonic-gate } 2967c478bd9Sstevel@tonic-gate return (len); 2977c478bd9Sstevel@tonic-gate } else { 2987c478bd9Sstevel@tonic-gate struct anon_map *amp = shmd->shm_amp; 2997c478bd9Sstevel@tonic-gate struct anon *ap; 3007c478bd9Sstevel@tonic-gate page_t *pp; 3017c478bd9Sstevel@tonic-gate pgcnt_t anon_index; 3027c478bd9Sstevel@tonic-gate struct vnode *vp; 3037c478bd9Sstevel@tonic-gate u_offset_t off; 3047c478bd9Sstevel@tonic-gate ulong_t i; 3057c478bd9Sstevel@tonic-gate int ret; 3067c478bd9Sstevel@tonic-gate anon_sync_obj_t cookie; 3077c478bd9Sstevel@tonic-gate 3087c478bd9Sstevel@tonic-gate addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 3097c478bd9Sstevel@tonic-gate anon_index = seg_page(seg, addr); 3107c478bd9Sstevel@tonic-gate npages = btopr(len); 3117c478bd9Sstevel@tonic-gate if (anon_index + npages > btopr(shmd->shm_amp->size)) { 3127c478bd9Sstevel@tonic-gate return (EINVAL); 3137c478bd9Sstevel@tonic-gate } 3147c478bd9Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 3157c478bd9Sstevel@tonic-gate for (i = 0; i < npages; i++, anon_index++) { 3167c478bd9Sstevel@tonic-gate ret = 0; 3177c478bd9Sstevel@tonic-gate anon_array_enter(amp, anon_index, &cookie); 3187c478bd9Sstevel@tonic-gate ap = anon_get_ptr(amp->ahp, anon_index); 3197c478bd9Sstevel@tonic-gate if (ap != NULL) { 3207c478bd9Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 3217c478bd9Sstevel@tonic-gate anon_array_exit(&cookie); 3227c478bd9Sstevel@tonic-gate pp = page_lookup_nowait(vp, off, SE_SHARED); 3237c478bd9Sstevel@tonic-gate if (pp != NULL) { 3247c478bd9Sstevel@tonic-gate ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON; 3257c478bd9Sstevel@tonic-gate page_unlock(pp); 3267c478bd9Sstevel@tonic-gate } 3277c478bd9Sstevel@tonic-gate } else { 3287c478bd9Sstevel@tonic-gate anon_array_exit(&cookie); 3297c478bd9Sstevel@tonic-gate } 3307c478bd9Sstevel@tonic-gate if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) { 3317c478bd9Sstevel@tonic-gate ret |= SEG_PAGE_LOCKED; 3327c478bd9Sstevel@tonic-gate } 3337c478bd9Sstevel@tonic-gate *vec++ = (char)ret; 3347c478bd9Sstevel@tonic-gate } 3357c478bd9Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 3367c478bd9Sstevel@tonic-gate return (len); 3377c478bd9Sstevel@tonic-gate } 3387c478bd9Sstevel@tonic-gate } 3397c478bd9Sstevel@tonic-gate 3407c478bd9Sstevel@tonic-gate static int 3417c478bd9Sstevel@tonic-gate segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize) 3427c478bd9Sstevel@tonic-gate { 3437c478bd9Sstevel@tonic-gate size_t share_size; 3447c478bd9Sstevel@tonic-gate 3457c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 3467c478bd9Sstevel@tonic-gate 3477c478bd9Sstevel@tonic-gate /* 3487c478bd9Sstevel@tonic-gate * seg.s_size may have been rounded up to the largest page size 3497c478bd9Sstevel@tonic-gate * in shmat(). 3507c478bd9Sstevel@tonic-gate * XXX This should be cleanedup. sptdestroy should take a length 3517c478bd9Sstevel@tonic-gate * argument which should be the same as sptcreate. Then 3527c478bd9Sstevel@tonic-gate * this rounding would not be needed (or is done in shm.c) 3537c478bd9Sstevel@tonic-gate * Only the check for full segment will be needed. 3547c478bd9Sstevel@tonic-gate * 3557c478bd9Sstevel@tonic-gate * XXX -- shouldn't raddr == 0 always? These tests don't seem 3567c478bd9Sstevel@tonic-gate * to be useful at all. 3577c478bd9Sstevel@tonic-gate */ 3587c478bd9Sstevel@tonic-gate share_size = page_get_pagesize(seg->s_szc); 3597c478bd9Sstevel@tonic-gate ssize = P2ROUNDUP(ssize, share_size); 3607c478bd9Sstevel@tonic-gate 3617c478bd9Sstevel@tonic-gate if (raddr == seg->s_base && ssize == seg->s_size) { 3627c478bd9Sstevel@tonic-gate seg_free(seg); 3637c478bd9Sstevel@tonic-gate return (0); 3647c478bd9Sstevel@tonic-gate } else 3657c478bd9Sstevel@tonic-gate return (EINVAL); 3667c478bd9Sstevel@tonic-gate } 3677c478bd9Sstevel@tonic-gate 3687c478bd9Sstevel@tonic-gate int 3697c478bd9Sstevel@tonic-gate segspt_create(struct seg *seg, caddr_t argsp) 3707c478bd9Sstevel@tonic-gate { 3717c478bd9Sstevel@tonic-gate int err; 3727c478bd9Sstevel@tonic-gate caddr_t addr = seg->s_base; 3737c478bd9Sstevel@tonic-gate struct spt_data *sptd; 3747c478bd9Sstevel@tonic-gate struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp; 3757c478bd9Sstevel@tonic-gate struct anon_map *amp = sptcargs->amp; 376c6939658Ssl108498 struct kshmid *sp = amp->a_sp; 3777c478bd9Sstevel@tonic-gate struct cred *cred = CRED(); 3787c478bd9Sstevel@tonic-gate ulong_t i, j, anon_index = 0; 3797c478bd9Sstevel@tonic-gate pgcnt_t npages = btopr(amp->size); 3807c478bd9Sstevel@tonic-gate struct vnode *vp; 3817c478bd9Sstevel@tonic-gate page_t **ppa; 3827c478bd9Sstevel@tonic-gate uint_t hat_flags; 38307b65a64Saguzovsk size_t pgsz; 38407b65a64Saguzovsk pgcnt_t pgcnt; 38507b65a64Saguzovsk caddr_t a; 38607b65a64Saguzovsk pgcnt_t pidx; 38707b65a64Saguzovsk size_t sz; 388c6939658Ssl108498 proc_t *procp = curproc; 389c6939658Ssl108498 rctl_qty_t lockedbytes = 0; 390c6939658Ssl108498 kproject_t *proj; 3917c478bd9Sstevel@tonic-gate 3927c478bd9Sstevel@tonic-gate /* 3937c478bd9Sstevel@tonic-gate * We are holding the a_lock on the underlying dummy as, 3947c478bd9Sstevel@tonic-gate * so we can make calls to the HAT layer. 3957c478bd9Sstevel@tonic-gate */ 3967c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 397c6939658Ssl108498 ASSERT(sp != NULL); 3987c478bd9Sstevel@tonic-gate 3997c478bd9Sstevel@tonic-gate #ifdef DEBUG 4007c478bd9Sstevel@tonic-gate TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */, 4017c478bd9Sstevel@tonic-gate tnf_opaque, addr, addr, 4027c478bd9Sstevel@tonic-gate tnf_ulong, len, seg->s_size); 4037c478bd9Sstevel@tonic-gate #endif 4047c478bd9Sstevel@tonic-gate if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 4057c478bd9Sstevel@tonic-gate if (err = anon_swap_adjust(npages)) 4067c478bd9Sstevel@tonic-gate return (err); 4077c478bd9Sstevel@tonic-gate } 4087c478bd9Sstevel@tonic-gate err = ENOMEM; 4097c478bd9Sstevel@tonic-gate 4107c478bd9Sstevel@tonic-gate if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL) 4117c478bd9Sstevel@tonic-gate goto out1; 4127c478bd9Sstevel@tonic-gate 4137c478bd9Sstevel@tonic-gate if ((sptcargs->flags & SHM_PAGEABLE) == 0) { 4147c478bd9Sstevel@tonic-gate if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages), 4157c478bd9Sstevel@tonic-gate KM_NOSLEEP)) == NULL) 4167c478bd9Sstevel@tonic-gate goto out2; 4177c478bd9Sstevel@tonic-gate } 4187c478bd9Sstevel@tonic-gate 4197c478bd9Sstevel@tonic-gate mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL); 4207c478bd9Sstevel@tonic-gate 4217c478bd9Sstevel@tonic-gate if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL) 4227c478bd9Sstevel@tonic-gate goto out3; 4237c478bd9Sstevel@tonic-gate 4247c478bd9Sstevel@tonic-gate seg->s_ops = &segspt_ops; 4257c478bd9Sstevel@tonic-gate sptd->spt_vp = vp; 4267c478bd9Sstevel@tonic-gate sptd->spt_amp = amp; 4277c478bd9Sstevel@tonic-gate sptd->spt_prot = sptcargs->prot; 4287c478bd9Sstevel@tonic-gate sptd->spt_flags = sptcargs->flags; 4297c478bd9Sstevel@tonic-gate seg->s_data = (caddr_t)sptd; 4307c478bd9Sstevel@tonic-gate sptd->spt_ppa = NULL; 4317c478bd9Sstevel@tonic-gate sptd->spt_ppa_lckcnt = NULL; 4327c478bd9Sstevel@tonic-gate seg->s_szc = sptcargs->szc; 4337c478bd9Sstevel@tonic-gate 4347c478bd9Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 43507b65a64Saguzovsk if (seg->s_szc > amp->a_szc) { 4367c478bd9Sstevel@tonic-gate amp->a_szc = seg->s_szc; 43707b65a64Saguzovsk } 4387c478bd9Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 4397c478bd9Sstevel@tonic-gate 4407c478bd9Sstevel@tonic-gate /* 4417c478bd9Sstevel@tonic-gate * Set policy to affect initial allocation of pages in 4427c478bd9Sstevel@tonic-gate * anon_map_createpages() 4437c478bd9Sstevel@tonic-gate */ 4447c478bd9Sstevel@tonic-gate (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index, 4457c478bd9Sstevel@tonic-gate NULL, 0, ptob(npages)); 4467c478bd9Sstevel@tonic-gate 4477c478bd9Sstevel@tonic-gate if (sptcargs->flags & SHM_PAGEABLE) { 4487c478bd9Sstevel@tonic-gate size_t share_sz; 4497c478bd9Sstevel@tonic-gate pgcnt_t new_npgs, more_pgs; 4507c478bd9Sstevel@tonic-gate struct anon_hdr *nahp; 451*68803f2dSsl108498 zone_t *zone; 4527c478bd9Sstevel@tonic-gate 4537c478bd9Sstevel@tonic-gate share_sz = page_get_pagesize(seg->s_szc); 4547c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(amp->size, share_sz)) { 4557c478bd9Sstevel@tonic-gate /* 4567c478bd9Sstevel@tonic-gate * We are rounding up the size of the anon array 4577c478bd9Sstevel@tonic-gate * on 4 M boundary because we always create 4 M 4587c478bd9Sstevel@tonic-gate * of page(s) when locking, faulting pages and we 4597c478bd9Sstevel@tonic-gate * don't have to check for all corner cases e.g. 4607c478bd9Sstevel@tonic-gate * if there is enough space to allocate 4 M 4617c478bd9Sstevel@tonic-gate * page. 4627c478bd9Sstevel@tonic-gate */ 4637c478bd9Sstevel@tonic-gate new_npgs = btop(P2ROUNDUP(amp->size, share_sz)); 4647c478bd9Sstevel@tonic-gate more_pgs = new_npgs - npages; 4657c478bd9Sstevel@tonic-gate 466*68803f2dSsl108498 /* 467*68803f2dSsl108498 * This may return NULL if global zone is removing a 468*68803f2dSsl108498 * shm created by a non-global zone that has been 469*68803f2dSsl108498 * destroyed. 470*68803f2dSsl108498 */ 471*68803f2dSsl108498 zone = 472*68803f2dSsl108498 zone_find_by_id(sp->shm_perm.ipc_proj->kpj_zoneid); 473*68803f2dSsl108498 474*68803f2dSsl108498 if (anon_resv_zone(ptob(more_pgs), zone) == 0) { 475*68803f2dSsl108498 if (zone != NULL) 476*68803f2dSsl108498 zone_rele(zone); 4777c478bd9Sstevel@tonic-gate err = ENOMEM; 4787c478bd9Sstevel@tonic-gate goto out4; 4797c478bd9Sstevel@tonic-gate } 480*68803f2dSsl108498 if (zone != NULL) 481*68803f2dSsl108498 zone_rele(zone); 482*68803f2dSsl108498 4837c478bd9Sstevel@tonic-gate nahp = anon_create(new_npgs, ANON_SLEEP); 4847c478bd9Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 4857c478bd9Sstevel@tonic-gate (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages, 4867c478bd9Sstevel@tonic-gate ANON_SLEEP); 4877c478bd9Sstevel@tonic-gate anon_release(amp->ahp, npages); 4887c478bd9Sstevel@tonic-gate amp->ahp = nahp; 489*68803f2dSsl108498 ASSERT(amp->swresv == ptob(npages)); 4907c478bd9Sstevel@tonic-gate amp->swresv = amp->size = ptob(new_npgs); 4917c478bd9Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 4927c478bd9Sstevel@tonic-gate npages = new_npgs; 4937c478bd9Sstevel@tonic-gate } 4947c478bd9Sstevel@tonic-gate 4957c478bd9Sstevel@tonic-gate sptd->spt_ppa_lckcnt = kmem_zalloc(npages * 4967c478bd9Sstevel@tonic-gate sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP); 4977c478bd9Sstevel@tonic-gate sptd->spt_pcachecnt = 0; 4987c478bd9Sstevel@tonic-gate sptd->spt_realsize = ptob(npages); 4997c478bd9Sstevel@tonic-gate sptcargs->seg_spt = seg; 5007c478bd9Sstevel@tonic-gate return (0); 5017c478bd9Sstevel@tonic-gate } 5027c478bd9Sstevel@tonic-gate 5037c478bd9Sstevel@tonic-gate /* 5047c478bd9Sstevel@tonic-gate * get array of pages for each anon slot in amp 5057c478bd9Sstevel@tonic-gate */ 5067c478bd9Sstevel@tonic-gate if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa, 5077c478bd9Sstevel@tonic-gate seg, addr, S_CREATE, cred)) != 0) 5087c478bd9Sstevel@tonic-gate goto out4; 5097c478bd9Sstevel@tonic-gate 510c6939658Ssl108498 mutex_enter(&sp->shm_mlock); 511c6939658Ssl108498 512c6939658Ssl108498 /* May be partially locked, so, count bytes to charge for locking */ 513c6939658Ssl108498 for (i = 0; i < npages; i++) 514c6939658Ssl108498 if (ppa[i]->p_lckcnt == 0) 515c6939658Ssl108498 lockedbytes += PAGESIZE; 516c6939658Ssl108498 517c6939658Ssl108498 proj = sp->shm_perm.ipc_proj; 518c6939658Ssl108498 519c6939658Ssl108498 if (lockedbytes > 0) { 520c6939658Ssl108498 mutex_enter(&procp->p_lock); 521c6939658Ssl108498 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) { 522c6939658Ssl108498 mutex_exit(&procp->p_lock); 523c6939658Ssl108498 mutex_exit(&sp->shm_mlock); 524c6939658Ssl108498 for (i = 0; i < npages; i++) 525c6939658Ssl108498 page_unlock(ppa[i]); 526c6939658Ssl108498 err = ENOMEM; 527c6939658Ssl108498 goto out4; 528c6939658Ssl108498 } 529c6939658Ssl108498 mutex_exit(&procp->p_lock); 530c6939658Ssl108498 } 531c6939658Ssl108498 5327c478bd9Sstevel@tonic-gate /* 5337c478bd9Sstevel@tonic-gate * addr is initial address corresponding to the first page on ppa list 5347c478bd9Sstevel@tonic-gate */ 5357c478bd9Sstevel@tonic-gate for (i = 0; i < npages; i++) { 5367c478bd9Sstevel@tonic-gate /* attempt to lock all pages */ 537c6939658Ssl108498 if (page_pp_lock(ppa[i], 0, 1) == 0) { 5387c478bd9Sstevel@tonic-gate /* 5397c478bd9Sstevel@tonic-gate * if unable to lock any page, unlock all 5407c478bd9Sstevel@tonic-gate * of them and return error 5417c478bd9Sstevel@tonic-gate */ 5427c478bd9Sstevel@tonic-gate for (j = 0; j < i; j++) 5437c478bd9Sstevel@tonic-gate page_pp_unlock(ppa[j], 0, 1); 544c6939658Ssl108498 for (i = 0; i < npages; i++) 5457c478bd9Sstevel@tonic-gate page_unlock(ppa[i]); 546c6939658Ssl108498 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0); 547c6939658Ssl108498 mutex_exit(&sp->shm_mlock); 5487c478bd9Sstevel@tonic-gate err = ENOMEM; 5497c478bd9Sstevel@tonic-gate goto out4; 5507c478bd9Sstevel@tonic-gate } 5517c478bd9Sstevel@tonic-gate } 552c6939658Ssl108498 mutex_exit(&sp->shm_mlock); 5537c478bd9Sstevel@tonic-gate 5547c478bd9Sstevel@tonic-gate /* 5557c478bd9Sstevel@tonic-gate * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 5567c478bd9Sstevel@tonic-gate * for the entire life of the segment. For example platforms 5577c478bd9Sstevel@tonic-gate * that do not support Dynamic Reconfiguration. 5587c478bd9Sstevel@tonic-gate */ 5597c478bd9Sstevel@tonic-gate hat_flags = HAT_LOAD_SHARE; 5607c478bd9Sstevel@tonic-gate if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL)) 5617c478bd9Sstevel@tonic-gate hat_flags |= HAT_LOAD_LOCK; 5627c478bd9Sstevel@tonic-gate 56307b65a64Saguzovsk /* 56407b65a64Saguzovsk * Load translations one lare page at a time 56507b65a64Saguzovsk * to make sure we don't create mappings bigger than 56607b65a64Saguzovsk * segment's size code in case underlying pages 56707b65a64Saguzovsk * are shared with segvn's segment that uses bigger 56807b65a64Saguzovsk * size code than we do. 56907b65a64Saguzovsk */ 57007b65a64Saguzovsk pgsz = page_get_pagesize(seg->s_szc); 57107b65a64Saguzovsk pgcnt = page_get_pagecnt(seg->s_szc); 57207b65a64Saguzovsk for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) { 57307b65a64Saguzovsk sz = MIN(pgsz, ptob(npages - pidx)); 57407b65a64Saguzovsk hat_memload_array(seg->s_as->a_hat, a, sz, 57507b65a64Saguzovsk &ppa[pidx], sptd->spt_prot, hat_flags); 57607b65a64Saguzovsk } 5777c478bd9Sstevel@tonic-gate 5787c478bd9Sstevel@tonic-gate /* 5797c478bd9Sstevel@tonic-gate * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 5807c478bd9Sstevel@tonic-gate * we will leave the pages locked SE_SHARED for the life 5817c478bd9Sstevel@tonic-gate * of the ISM segment. This will prevent any calls to 5827c478bd9Sstevel@tonic-gate * hat_pageunload() on this ISM segment for those platforms. 5837c478bd9Sstevel@tonic-gate */ 5847c478bd9Sstevel@tonic-gate if (!(hat_flags & HAT_LOAD_LOCK)) { 5857c478bd9Sstevel@tonic-gate /* 5867c478bd9Sstevel@tonic-gate * On platforms that support HAT_DYNAMIC_ISM_UNMAP, 5877c478bd9Sstevel@tonic-gate * we no longer need to hold the SE_SHARED lock on the pages, 5887c478bd9Sstevel@tonic-gate * since L_PAGELOCK and F_SOFTLOCK calls will grab the 5897c478bd9Sstevel@tonic-gate * SE_SHARED lock on the pages as necessary. 5907c478bd9Sstevel@tonic-gate */ 5917c478bd9Sstevel@tonic-gate for (i = 0; i < npages; i++) 5927c478bd9Sstevel@tonic-gate page_unlock(ppa[i]); 5937c478bd9Sstevel@tonic-gate } 5947c478bd9Sstevel@tonic-gate sptd->spt_pcachecnt = 0; 5957c478bd9Sstevel@tonic-gate kmem_free(ppa, ((sizeof (page_t *)) * npages)); 5967c478bd9Sstevel@tonic-gate sptd->spt_realsize = ptob(npages); 5977c478bd9Sstevel@tonic-gate atomic_add_long(&spt_used, npages); 5987c478bd9Sstevel@tonic-gate sptcargs->seg_spt = seg; 5997c478bd9Sstevel@tonic-gate return (0); 6007c478bd9Sstevel@tonic-gate 6017c478bd9Sstevel@tonic-gate out4: 6027c478bd9Sstevel@tonic-gate seg->s_data = NULL; 6037c478bd9Sstevel@tonic-gate kmem_free(vp, sizeof (*vp)); 6047c478bd9Sstevel@tonic-gate out3: 6057c478bd9Sstevel@tonic-gate mutex_destroy(&sptd->spt_lock); 6067c478bd9Sstevel@tonic-gate if ((sptcargs->flags & SHM_PAGEABLE) == 0) 6077c478bd9Sstevel@tonic-gate kmem_free(ppa, (sizeof (*ppa) * npages)); 6087c478bd9Sstevel@tonic-gate out2: 6097c478bd9Sstevel@tonic-gate kmem_free(sptd, sizeof (*sptd)); 6107c478bd9Sstevel@tonic-gate out1: 6117c478bd9Sstevel@tonic-gate if ((sptcargs->flags & SHM_PAGEABLE) == 0) 6127c478bd9Sstevel@tonic-gate anon_swap_restore(npages); 6137c478bd9Sstevel@tonic-gate return (err); 6147c478bd9Sstevel@tonic-gate } 6157c478bd9Sstevel@tonic-gate 6167c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 6177c478bd9Sstevel@tonic-gate void 6187c478bd9Sstevel@tonic-gate segspt_free_pages(struct seg *seg, caddr_t addr, size_t len) 6197c478bd9Sstevel@tonic-gate { 6207c478bd9Sstevel@tonic-gate struct page *pp; 6217c478bd9Sstevel@tonic-gate struct spt_data *sptd = (struct spt_data *)seg->s_data; 6227c478bd9Sstevel@tonic-gate pgcnt_t npages; 6237c478bd9Sstevel@tonic-gate ulong_t anon_idx; 6247c478bd9Sstevel@tonic-gate struct anon_map *amp; 6257c478bd9Sstevel@tonic-gate struct anon *ap; 6267c478bd9Sstevel@tonic-gate struct vnode *vp; 6277c478bd9Sstevel@tonic-gate u_offset_t off; 6287c478bd9Sstevel@tonic-gate uint_t hat_flags; 6297c478bd9Sstevel@tonic-gate int root = 0; 6307c478bd9Sstevel@tonic-gate pgcnt_t pgs, curnpgs = 0; 6317c478bd9Sstevel@tonic-gate page_t *rootpp; 632c6939658Ssl108498 rctl_qty_t unlocked_bytes = 0; 633c6939658Ssl108498 kproject_t *proj; 634c6939658Ssl108498 kshmid_t *sp; 6357c478bd9Sstevel@tonic-gate 6367c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6377c478bd9Sstevel@tonic-gate 6387c478bd9Sstevel@tonic-gate len = P2ROUNDUP(len, PAGESIZE); 6397c478bd9Sstevel@tonic-gate 6407c478bd9Sstevel@tonic-gate npages = btop(len); 6417c478bd9Sstevel@tonic-gate 6427c478bd9Sstevel@tonic-gate hat_flags = HAT_UNLOAD_UNLOCK; 6437c478bd9Sstevel@tonic-gate if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) || 6447c478bd9Sstevel@tonic-gate (sptd->spt_flags & SHM_PAGEABLE)) { 6457c478bd9Sstevel@tonic-gate hat_flags = HAT_UNLOAD; 6467c478bd9Sstevel@tonic-gate } 6477c478bd9Sstevel@tonic-gate 6487c478bd9Sstevel@tonic-gate hat_unload(seg->s_as->a_hat, addr, len, hat_flags); 6497c478bd9Sstevel@tonic-gate 6507c478bd9Sstevel@tonic-gate amp = sptd->spt_amp; 6517c478bd9Sstevel@tonic-gate if (sptd->spt_flags & SHM_PAGEABLE) 6527c478bd9Sstevel@tonic-gate npages = btop(amp->size); 6537c478bd9Sstevel@tonic-gate 654c6939658Ssl108498 ASSERT(amp != NULL); 655c6939658Ssl108498 656c6939658Ssl108498 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 657c6939658Ssl108498 sp = amp->a_sp; 658c6939658Ssl108498 proj = sp->shm_perm.ipc_proj; 659c6939658Ssl108498 mutex_enter(&sp->shm_mlock); 660c6939658Ssl108498 } 6617c478bd9Sstevel@tonic-gate for (anon_idx = 0; anon_idx < npages; anon_idx++) { 6627c478bd9Sstevel@tonic-gate if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 6637c478bd9Sstevel@tonic-gate if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) { 6647c478bd9Sstevel@tonic-gate panic("segspt_free_pages: null app"); 6657c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 6667c478bd9Sstevel@tonic-gate } 6677c478bd9Sstevel@tonic-gate } else { 6687c478bd9Sstevel@tonic-gate if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx)) 6697c478bd9Sstevel@tonic-gate == NULL) 6707c478bd9Sstevel@tonic-gate continue; 6717c478bd9Sstevel@tonic-gate } 6727c478bd9Sstevel@tonic-gate ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0); 6737c478bd9Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 6747c478bd9Sstevel@tonic-gate 6757c478bd9Sstevel@tonic-gate /* 6767c478bd9Sstevel@tonic-gate * If this platform supports HAT_DYNAMIC_ISM_UNMAP, 6777c478bd9Sstevel@tonic-gate * the pages won't be having SE_SHARED lock at this 6787c478bd9Sstevel@tonic-gate * point. 6797c478bd9Sstevel@tonic-gate * 6807c478bd9Sstevel@tonic-gate * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP, 6817c478bd9Sstevel@tonic-gate * the pages are still held SE_SHARED locked from the 6827c478bd9Sstevel@tonic-gate * original segspt_create() 6837c478bd9Sstevel@tonic-gate * 6847c478bd9Sstevel@tonic-gate * Our goal is to get SE_EXCL lock on each page, remove 6857c478bd9Sstevel@tonic-gate * permanent lock on it and invalidate the page. 6867c478bd9Sstevel@tonic-gate */ 6877c478bd9Sstevel@tonic-gate if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 6887c478bd9Sstevel@tonic-gate if (hat_flags == HAT_UNLOAD) 6897c478bd9Sstevel@tonic-gate pp = page_lookup(vp, off, SE_EXCL); 6907c478bd9Sstevel@tonic-gate else { 6917c478bd9Sstevel@tonic-gate if ((pp = page_find(vp, off)) == NULL) { 6927c478bd9Sstevel@tonic-gate panic("segspt_free_pages: " 6937c478bd9Sstevel@tonic-gate "page not locked"); 6947c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 6957c478bd9Sstevel@tonic-gate } 6967c478bd9Sstevel@tonic-gate if (!page_tryupgrade(pp)) { 6977c478bd9Sstevel@tonic-gate page_unlock(pp); 6987c478bd9Sstevel@tonic-gate pp = page_lookup(vp, off, SE_EXCL); 6997c478bd9Sstevel@tonic-gate } 7007c478bd9Sstevel@tonic-gate } 7017c478bd9Sstevel@tonic-gate if (pp == NULL) { 7027c478bd9Sstevel@tonic-gate panic("segspt_free_pages: " 7037c478bd9Sstevel@tonic-gate "page not in the system"); 7047c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 7057c478bd9Sstevel@tonic-gate } 706c6939658Ssl108498 ASSERT(pp->p_lckcnt > 0); 7077c478bd9Sstevel@tonic-gate page_pp_unlock(pp, 0, 1); 708c6939658Ssl108498 if (pp->p_lckcnt == 0) 709c6939658Ssl108498 unlocked_bytes += PAGESIZE; 7107c478bd9Sstevel@tonic-gate } else { 7117c478bd9Sstevel@tonic-gate if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL) 7127c478bd9Sstevel@tonic-gate continue; 7137c478bd9Sstevel@tonic-gate } 7147c478bd9Sstevel@tonic-gate /* 7157c478bd9Sstevel@tonic-gate * It's logical to invalidate the pages here as in most cases 7167c478bd9Sstevel@tonic-gate * these were created by segspt. 7177c478bd9Sstevel@tonic-gate */ 7187c478bd9Sstevel@tonic-gate if (pp->p_szc != 0) { 7197c478bd9Sstevel@tonic-gate /* 7207c478bd9Sstevel@tonic-gate * For DISM swap is released in shm_rm_amp. 7217c478bd9Sstevel@tonic-gate */ 7227c478bd9Sstevel@tonic-gate if ((sptd->spt_flags & SHM_PAGEABLE) == 0 && 7237c478bd9Sstevel@tonic-gate ap->an_pvp != NULL) { 7247c478bd9Sstevel@tonic-gate panic("segspt_free_pages: pvp non NULL"); 7257c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 7267c478bd9Sstevel@tonic-gate } 7277c478bd9Sstevel@tonic-gate if (root == 0) { 7287c478bd9Sstevel@tonic-gate ASSERT(curnpgs == 0); 7297c478bd9Sstevel@tonic-gate root = 1; 7307c478bd9Sstevel@tonic-gate rootpp = pp; 7317c478bd9Sstevel@tonic-gate pgs = curnpgs = page_get_pagecnt(pp->p_szc); 7327c478bd9Sstevel@tonic-gate ASSERT(pgs > 1); 7337c478bd9Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(pgs, pgs)); 7347c478bd9Sstevel@tonic-gate ASSERT(!(page_pptonum(pp) & (pgs - 1))); 7357c478bd9Sstevel@tonic-gate curnpgs--; 7367c478bd9Sstevel@tonic-gate } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) { 7377c478bd9Sstevel@tonic-gate ASSERT(curnpgs == 1); 7387c478bd9Sstevel@tonic-gate ASSERT(page_pptonum(pp) == 7397c478bd9Sstevel@tonic-gate page_pptonum(rootpp) + (pgs - 1)); 7407c478bd9Sstevel@tonic-gate page_destroy_pages(rootpp); 7417c478bd9Sstevel@tonic-gate root = 0; 7427c478bd9Sstevel@tonic-gate curnpgs = 0; 7437c478bd9Sstevel@tonic-gate } else { 7447c478bd9Sstevel@tonic-gate ASSERT(curnpgs > 1); 7457c478bd9Sstevel@tonic-gate ASSERT(page_pptonum(pp) == 7467c478bd9Sstevel@tonic-gate page_pptonum(rootpp) + (pgs - curnpgs)); 7477c478bd9Sstevel@tonic-gate curnpgs--; 7487c478bd9Sstevel@tonic-gate } 7497c478bd9Sstevel@tonic-gate } else { 7507c478bd9Sstevel@tonic-gate if (root != 0 || curnpgs != 0) { 7517c478bd9Sstevel@tonic-gate panic("segspt_free_pages: bad large page"); 7527c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 7537c478bd9Sstevel@tonic-gate } 7547c478bd9Sstevel@tonic-gate /*LINTED: constant in conditional context */ 7557c478bd9Sstevel@tonic-gate VN_DISPOSE(pp, B_INVAL, 0, kcred); 7567c478bd9Sstevel@tonic-gate } 7577c478bd9Sstevel@tonic-gate } 758c6939658Ssl108498 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 759c6939658Ssl108498 if (unlocked_bytes > 0) 760c6939658Ssl108498 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0); 761c6939658Ssl108498 mutex_exit(&sp->shm_mlock); 762c6939658Ssl108498 } 7637c478bd9Sstevel@tonic-gate if (root != 0 || curnpgs != 0) { 7647c478bd9Sstevel@tonic-gate panic("segspt_free_pages: bad large page"); 7657c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 7667c478bd9Sstevel@tonic-gate } 7677c478bd9Sstevel@tonic-gate 7687c478bd9Sstevel@tonic-gate /* 7697c478bd9Sstevel@tonic-gate * mark that pages have been released 7707c478bd9Sstevel@tonic-gate */ 7717c478bd9Sstevel@tonic-gate sptd->spt_realsize = 0; 7727c478bd9Sstevel@tonic-gate 7737c478bd9Sstevel@tonic-gate if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 7747c478bd9Sstevel@tonic-gate atomic_add_long(&spt_used, -npages); 7757c478bd9Sstevel@tonic-gate anon_swap_restore(npages); 7767c478bd9Sstevel@tonic-gate } 7777c478bd9Sstevel@tonic-gate } 7787c478bd9Sstevel@tonic-gate 7797c478bd9Sstevel@tonic-gate /* 7807c478bd9Sstevel@tonic-gate * Get memory allocation policy info for specified address in given segment 7817c478bd9Sstevel@tonic-gate */ 7827c478bd9Sstevel@tonic-gate static lgrp_mem_policy_info_t * 7837c478bd9Sstevel@tonic-gate segspt_getpolicy(struct seg *seg, caddr_t addr) 7847c478bd9Sstevel@tonic-gate { 7857c478bd9Sstevel@tonic-gate struct anon_map *amp; 7867c478bd9Sstevel@tonic-gate ulong_t anon_index; 7877c478bd9Sstevel@tonic-gate lgrp_mem_policy_info_t *policy_info; 7887c478bd9Sstevel@tonic-gate struct spt_data *spt_data; 7897c478bd9Sstevel@tonic-gate 7907c478bd9Sstevel@tonic-gate ASSERT(seg != NULL); 7917c478bd9Sstevel@tonic-gate 7927c478bd9Sstevel@tonic-gate /* 7937c478bd9Sstevel@tonic-gate * Get anon_map from segspt 7947c478bd9Sstevel@tonic-gate * 7957c478bd9Sstevel@tonic-gate * Assume that no lock needs to be held on anon_map, since 7967c478bd9Sstevel@tonic-gate * it should be protected by its reference count which must be 7977c478bd9Sstevel@tonic-gate * nonzero for an existing segment 7987c478bd9Sstevel@tonic-gate * Need to grab readers lock on policy tree though 7997c478bd9Sstevel@tonic-gate */ 8007c478bd9Sstevel@tonic-gate spt_data = (struct spt_data *)seg->s_data; 8017c478bd9Sstevel@tonic-gate if (spt_data == NULL) 8027c478bd9Sstevel@tonic-gate return (NULL); 8037c478bd9Sstevel@tonic-gate amp = spt_data->spt_amp; 8047c478bd9Sstevel@tonic-gate ASSERT(amp->refcnt != 0); 8057c478bd9Sstevel@tonic-gate 8067c478bd9Sstevel@tonic-gate /* 8077c478bd9Sstevel@tonic-gate * Get policy info 8087c478bd9Sstevel@tonic-gate * 8097c478bd9Sstevel@tonic-gate * Assume starting anon index of 0 8107c478bd9Sstevel@tonic-gate */ 8117c478bd9Sstevel@tonic-gate anon_index = seg_page(seg, addr); 8127c478bd9Sstevel@tonic-gate policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 8137c478bd9Sstevel@tonic-gate 8147c478bd9Sstevel@tonic-gate return (policy_info); 8157c478bd9Sstevel@tonic-gate } 8167c478bd9Sstevel@tonic-gate 8177c478bd9Sstevel@tonic-gate /* 8187c478bd9Sstevel@tonic-gate * DISM only. 8197c478bd9Sstevel@tonic-gate * Return locked pages over a given range. 8207c478bd9Sstevel@tonic-gate * 8217c478bd9Sstevel@tonic-gate * We will cache all DISM locked pages and save the pplist for the 8227c478bd9Sstevel@tonic-gate * entire segment in the ppa field of the underlying DISM segment structure. 8237c478bd9Sstevel@tonic-gate * Later, during a call to segspt_reclaim() we will use this ppa array 8247c478bd9Sstevel@tonic-gate * to page_unlock() all of the pages and then we will free this ppa list. 8257c478bd9Sstevel@tonic-gate */ 8267c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 8277c478bd9Sstevel@tonic-gate static int 8287c478bd9Sstevel@tonic-gate segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len, 8297c478bd9Sstevel@tonic-gate struct page ***ppp, enum lock_type type, enum seg_rw rw) 8307c478bd9Sstevel@tonic-gate { 8317c478bd9Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 8327c478bd9Sstevel@tonic-gate struct seg *sptseg = shmd->shm_sptseg; 8337c478bd9Sstevel@tonic-gate struct spt_data *sptd = sptseg->s_data; 8347c478bd9Sstevel@tonic-gate pgcnt_t pg_idx, npages, tot_npages, npgs; 8357c478bd9Sstevel@tonic-gate struct page **pplist, **pl, **ppa, *pp; 8367c478bd9Sstevel@tonic-gate struct anon_map *amp; 8377c478bd9Sstevel@tonic-gate spgcnt_t an_idx; 8387c478bd9Sstevel@tonic-gate int ret = ENOTSUP; 8397c478bd9Sstevel@tonic-gate uint_t pl_built = 0; 8407c478bd9Sstevel@tonic-gate struct anon *ap; 8417c478bd9Sstevel@tonic-gate struct vnode *vp; 8427c478bd9Sstevel@tonic-gate u_offset_t off; 8437c478bd9Sstevel@tonic-gate pgcnt_t claim_availrmem = 0; 8447c478bd9Sstevel@tonic-gate uint_t szc; 8457c478bd9Sstevel@tonic-gate 8467c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8477c478bd9Sstevel@tonic-gate 8487c478bd9Sstevel@tonic-gate /* 8497c478bd9Sstevel@tonic-gate * We want to lock/unlock the entire ISM segment. Therefore, 8507c478bd9Sstevel@tonic-gate * we will be using the underlying sptseg and it's base address 8517c478bd9Sstevel@tonic-gate * and length for the caching arguments. 8527c478bd9Sstevel@tonic-gate */ 8537c478bd9Sstevel@tonic-gate ASSERT(sptseg); 8547c478bd9Sstevel@tonic-gate ASSERT(sptd); 8557c478bd9Sstevel@tonic-gate 8567c478bd9Sstevel@tonic-gate pg_idx = seg_page(seg, addr); 8577c478bd9Sstevel@tonic-gate npages = btopr(len); 8587c478bd9Sstevel@tonic-gate 8597c478bd9Sstevel@tonic-gate /* 8607c478bd9Sstevel@tonic-gate * check if the request is larger than number of pages covered 8617c478bd9Sstevel@tonic-gate * by amp 8627c478bd9Sstevel@tonic-gate */ 8637c478bd9Sstevel@tonic-gate if (pg_idx + npages > btopr(sptd->spt_amp->size)) { 8647c478bd9Sstevel@tonic-gate *ppp = NULL; 8657c478bd9Sstevel@tonic-gate return (ENOTSUP); 8667c478bd9Sstevel@tonic-gate } 8677c478bd9Sstevel@tonic-gate 8687c478bd9Sstevel@tonic-gate if (type == L_PAGEUNLOCK) { 8697c478bd9Sstevel@tonic-gate ASSERT(sptd->spt_ppa != NULL); 8707c478bd9Sstevel@tonic-gate 8717c478bd9Sstevel@tonic-gate seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 8727c478bd9Sstevel@tonic-gate sptd->spt_ppa, sptd->spt_prot, segspt_reclaim); 8737c478bd9Sstevel@tonic-gate 8747c478bd9Sstevel@tonic-gate /* 8757c478bd9Sstevel@tonic-gate * If someone is blocked while unmapping, we purge 8767c478bd9Sstevel@tonic-gate * segment page cache and thus reclaim pplist synchronously 8777c478bd9Sstevel@tonic-gate * without waiting for seg_pasync_thread. This speeds up 8787c478bd9Sstevel@tonic-gate * unmapping in cases where munmap(2) is called, while 8797c478bd9Sstevel@tonic-gate * raw async i/o is still in progress or where a thread 8807c478bd9Sstevel@tonic-gate * exits on data fault in a multithreaded application. 8817c478bd9Sstevel@tonic-gate */ 8827c478bd9Sstevel@tonic-gate if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 8837c478bd9Sstevel@tonic-gate segspt_purge(seg); 8847c478bd9Sstevel@tonic-gate } 8857c478bd9Sstevel@tonic-gate return (0); 8867c478bd9Sstevel@tonic-gate } else if (type == L_PAGERECLAIM) { 8877c478bd9Sstevel@tonic-gate ASSERT(sptd->spt_ppa != NULL); 8887c478bd9Sstevel@tonic-gate (void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size, 8897c478bd9Sstevel@tonic-gate sptd->spt_ppa, sptd->spt_prot); 8907c478bd9Sstevel@tonic-gate return (0); 8917c478bd9Sstevel@tonic-gate } 8927c478bd9Sstevel@tonic-gate 8937c478bd9Sstevel@tonic-gate if (sptd->spt_flags & DISM_PPA_CHANGED) { 8947c478bd9Sstevel@tonic-gate segspt_purge(seg); 8957c478bd9Sstevel@tonic-gate /* 8967c478bd9Sstevel@tonic-gate * for DISM ppa needs to be rebuild since 8977c478bd9Sstevel@tonic-gate * number of locked pages could be changed 8987c478bd9Sstevel@tonic-gate */ 8997c478bd9Sstevel@tonic-gate *ppp = NULL; 9007c478bd9Sstevel@tonic-gate return (ENOTSUP); 9017c478bd9Sstevel@tonic-gate } 9027c478bd9Sstevel@tonic-gate 9037c478bd9Sstevel@tonic-gate /* 9047c478bd9Sstevel@tonic-gate * First try to find pages in segment page cache, without 9057c478bd9Sstevel@tonic-gate * holding the segment lock. 9067c478bd9Sstevel@tonic-gate */ 9077c478bd9Sstevel@tonic-gate pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 9087c478bd9Sstevel@tonic-gate sptd->spt_prot); 9097c478bd9Sstevel@tonic-gate if (pplist != NULL) { 9107c478bd9Sstevel@tonic-gate ASSERT(sptd->spt_ppa != NULL); 9117c478bd9Sstevel@tonic-gate ASSERT(sptd->spt_ppa == pplist); 9127c478bd9Sstevel@tonic-gate ppa = sptd->spt_ppa; 9137c478bd9Sstevel@tonic-gate for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 9147c478bd9Sstevel@tonic-gate if (ppa[an_idx] == NULL) { 9157c478bd9Sstevel@tonic-gate seg_pinactive(seg, seg->s_base, 9167c478bd9Sstevel@tonic-gate sptd->spt_amp->size, ppa, 9177c478bd9Sstevel@tonic-gate sptd->spt_prot, segspt_reclaim); 9187c478bd9Sstevel@tonic-gate *ppp = NULL; 9197c478bd9Sstevel@tonic-gate return (ENOTSUP); 9207c478bd9Sstevel@tonic-gate } 9217c478bd9Sstevel@tonic-gate if ((szc = ppa[an_idx]->p_szc) != 0) { 9227c478bd9Sstevel@tonic-gate npgs = page_get_pagecnt(szc); 9237c478bd9Sstevel@tonic-gate an_idx = P2ROUNDUP(an_idx + 1, npgs); 9247c478bd9Sstevel@tonic-gate } else { 9257c478bd9Sstevel@tonic-gate an_idx++; 9267c478bd9Sstevel@tonic-gate } 9277c478bd9Sstevel@tonic-gate } 9287c478bd9Sstevel@tonic-gate /* 9297c478bd9Sstevel@tonic-gate * Since we cache the entire DISM segment, we want to 9307c478bd9Sstevel@tonic-gate * set ppp to point to the first slot that corresponds 9317c478bd9Sstevel@tonic-gate * to the requested addr, i.e. pg_idx. 9327c478bd9Sstevel@tonic-gate */ 9337c478bd9Sstevel@tonic-gate *ppp = &(sptd->spt_ppa[pg_idx]); 9347c478bd9Sstevel@tonic-gate return (0); 9357c478bd9Sstevel@tonic-gate } 9367c478bd9Sstevel@tonic-gate 9377c478bd9Sstevel@tonic-gate /* The L_PAGELOCK case... */ 9387c478bd9Sstevel@tonic-gate mutex_enter(&sptd->spt_lock); 9397c478bd9Sstevel@tonic-gate /* 9407c478bd9Sstevel@tonic-gate * try to find pages in segment page cache with mutex 9417c478bd9Sstevel@tonic-gate */ 9427c478bd9Sstevel@tonic-gate pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 9437c478bd9Sstevel@tonic-gate sptd->spt_prot); 9447c478bd9Sstevel@tonic-gate if (pplist != NULL) { 9457c478bd9Sstevel@tonic-gate ASSERT(sptd->spt_ppa != NULL); 9467c478bd9Sstevel@tonic-gate ASSERT(sptd->spt_ppa == pplist); 9477c478bd9Sstevel@tonic-gate ppa = sptd->spt_ppa; 9487c478bd9Sstevel@tonic-gate for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 9497c478bd9Sstevel@tonic-gate if (ppa[an_idx] == NULL) { 9507c478bd9Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 9517c478bd9Sstevel@tonic-gate seg_pinactive(seg, seg->s_base, 9527c478bd9Sstevel@tonic-gate sptd->spt_amp->size, ppa, 9537c478bd9Sstevel@tonic-gate sptd->spt_prot, segspt_reclaim); 9547c478bd9Sstevel@tonic-gate *ppp = NULL; 9557c478bd9Sstevel@tonic-gate return (ENOTSUP); 9567c478bd9Sstevel@tonic-gate } 9577c478bd9Sstevel@tonic-gate if ((szc = ppa[an_idx]->p_szc) != 0) { 9587c478bd9Sstevel@tonic-gate npgs = page_get_pagecnt(szc); 9597c478bd9Sstevel@tonic-gate an_idx = P2ROUNDUP(an_idx + 1, npgs); 9607c478bd9Sstevel@tonic-gate } else { 9617c478bd9Sstevel@tonic-gate an_idx++; 9627c478bd9Sstevel@tonic-gate } 9637c478bd9Sstevel@tonic-gate } 9647c478bd9Sstevel@tonic-gate /* 9657c478bd9Sstevel@tonic-gate * Since we cache the entire DISM segment, we want to 9667c478bd9Sstevel@tonic-gate * set ppp to point to the first slot that corresponds 9677c478bd9Sstevel@tonic-gate * to the requested addr, i.e. pg_idx. 9687c478bd9Sstevel@tonic-gate */ 9697c478bd9Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 9707c478bd9Sstevel@tonic-gate *ppp = &(sptd->spt_ppa[pg_idx]); 9717c478bd9Sstevel@tonic-gate return (0); 9727c478bd9Sstevel@tonic-gate } 9737c478bd9Sstevel@tonic-gate if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) == 9747c478bd9Sstevel@tonic-gate SEGP_FAIL) { 9757c478bd9Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 9767c478bd9Sstevel@tonic-gate *ppp = NULL; 9777c478bd9Sstevel@tonic-gate return (ENOTSUP); 9787c478bd9Sstevel@tonic-gate } 9797c478bd9Sstevel@tonic-gate 9807c478bd9Sstevel@tonic-gate /* 9817c478bd9Sstevel@tonic-gate * No need to worry about protections because DISM pages are always rw. 9827c478bd9Sstevel@tonic-gate */ 9837c478bd9Sstevel@tonic-gate pl = pplist = NULL; 9847c478bd9Sstevel@tonic-gate amp = sptd->spt_amp; 9857c478bd9Sstevel@tonic-gate 9867c478bd9Sstevel@tonic-gate /* 9877c478bd9Sstevel@tonic-gate * Do we need to build the ppa array? 9887c478bd9Sstevel@tonic-gate */ 9897c478bd9Sstevel@tonic-gate if (sptd->spt_ppa == NULL) { 9907c478bd9Sstevel@tonic-gate pgcnt_t lpg_cnt = 0; 9917c478bd9Sstevel@tonic-gate 9927c478bd9Sstevel@tonic-gate pl_built = 1; 9937c478bd9Sstevel@tonic-gate tot_npages = btopr(sptd->spt_amp->size); 9947c478bd9Sstevel@tonic-gate 9957c478bd9Sstevel@tonic-gate ASSERT(sptd->spt_pcachecnt == 0); 9967c478bd9Sstevel@tonic-gate pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP); 9977c478bd9Sstevel@tonic-gate pl = pplist; 9987c478bd9Sstevel@tonic-gate 9997c478bd9Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 10007c478bd9Sstevel@tonic-gate for (an_idx = 0; an_idx < tot_npages; ) { 10017c478bd9Sstevel@tonic-gate ap = anon_get_ptr(amp->ahp, an_idx); 10027c478bd9Sstevel@tonic-gate /* 10037c478bd9Sstevel@tonic-gate * Cache only mlocked pages. For large pages 10047c478bd9Sstevel@tonic-gate * if one (constituent) page is mlocked 10057c478bd9Sstevel@tonic-gate * all pages for that large page 10067c478bd9Sstevel@tonic-gate * are cached also. This is for quick 10077c478bd9Sstevel@tonic-gate * lookups of ppa array; 10087c478bd9Sstevel@tonic-gate */ 10097c478bd9Sstevel@tonic-gate if ((ap != NULL) && (lpg_cnt != 0 || 10107c478bd9Sstevel@tonic-gate (sptd->spt_ppa_lckcnt[an_idx] != 0))) { 10117c478bd9Sstevel@tonic-gate 10127c478bd9Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 10137c478bd9Sstevel@tonic-gate pp = page_lookup(vp, off, SE_SHARED); 10147c478bd9Sstevel@tonic-gate ASSERT(pp != NULL); 10157c478bd9Sstevel@tonic-gate if (lpg_cnt == 0) { 1016e67882ffSbs21162 lpg_cnt++; 1017e67882ffSbs21162 /* 1018e67882ffSbs21162 * For a small page, we are done -- 1019e67882ffSbs21162 * lpg_count is reset to 0 below. 1020e67882ffSbs21162 * 1021e67882ffSbs21162 * For a large page, we are guaranteed 1022e67882ffSbs21162 * to find the anon structures of all 1023e67882ffSbs21162 * constituent pages and a non-zero 1024e67882ffSbs21162 * lpg_cnt ensures that we don't test 1025e67882ffSbs21162 * for mlock for these. We are done 1026e67882ffSbs21162 * when lpg_count reaches (npgs + 1). 1027e67882ffSbs21162 * If we are not the first constituent 1028e67882ffSbs21162 * page, restart at the first one. 1029e67882ffSbs21162 */ 10307c478bd9Sstevel@tonic-gate npgs = page_get_pagecnt(pp->p_szc); 10317c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(an_idx, npgs)) { 10327c478bd9Sstevel@tonic-gate an_idx = P2ALIGN(an_idx, npgs); 10337c478bd9Sstevel@tonic-gate page_unlock(pp); 10347c478bd9Sstevel@tonic-gate continue; 10357c478bd9Sstevel@tonic-gate } 10367c478bd9Sstevel@tonic-gate } 1037e67882ffSbs21162 if (++lpg_cnt > npgs) 10387c478bd9Sstevel@tonic-gate lpg_cnt = 0; 10397c478bd9Sstevel@tonic-gate 10407c478bd9Sstevel@tonic-gate /* 10417c478bd9Sstevel@tonic-gate * availrmem is decremented only 10427c478bd9Sstevel@tonic-gate * for unlocked pages 10437c478bd9Sstevel@tonic-gate */ 10447c478bd9Sstevel@tonic-gate if (sptd->spt_ppa_lckcnt[an_idx] == 0) 10457c478bd9Sstevel@tonic-gate claim_availrmem++; 10467c478bd9Sstevel@tonic-gate pplist[an_idx] = pp; 10477c478bd9Sstevel@tonic-gate } 10487c478bd9Sstevel@tonic-gate an_idx++; 10497c478bd9Sstevel@tonic-gate } 10507c478bd9Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 10517c478bd9Sstevel@tonic-gate 10527c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 10537c478bd9Sstevel@tonic-gate if (availrmem < tune.t_minarmem + claim_availrmem) { 10547c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 10557c478bd9Sstevel@tonic-gate ret = FC_MAKE_ERR(ENOMEM); 10567c478bd9Sstevel@tonic-gate claim_availrmem = 0; 10577c478bd9Sstevel@tonic-gate goto insert_fail; 10587c478bd9Sstevel@tonic-gate } else { 10597c478bd9Sstevel@tonic-gate availrmem -= claim_availrmem; 10607c478bd9Sstevel@tonic-gate } 10617c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 10627c478bd9Sstevel@tonic-gate 10637c478bd9Sstevel@tonic-gate sptd->spt_ppa = pl; 10647c478bd9Sstevel@tonic-gate } else { 10657c478bd9Sstevel@tonic-gate /* 10667c478bd9Sstevel@tonic-gate * We already have a valid ppa[]. 10677c478bd9Sstevel@tonic-gate */ 10687c478bd9Sstevel@tonic-gate pl = sptd->spt_ppa; 10697c478bd9Sstevel@tonic-gate } 10707c478bd9Sstevel@tonic-gate 10717c478bd9Sstevel@tonic-gate ASSERT(pl != NULL); 10727c478bd9Sstevel@tonic-gate 10737c478bd9Sstevel@tonic-gate ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size, 10747c478bd9Sstevel@tonic-gate pl, sptd->spt_prot, SEGP_FORCE_WIRED | SEGP_ASYNC_FLUSH, 10757c478bd9Sstevel@tonic-gate segspt_reclaim); 10767c478bd9Sstevel@tonic-gate if (ret == SEGP_FAIL) { 10777c478bd9Sstevel@tonic-gate /* 10787c478bd9Sstevel@tonic-gate * seg_pinsert failed. We return 10797c478bd9Sstevel@tonic-gate * ENOTSUP, so that the as_pagelock() code will 10807c478bd9Sstevel@tonic-gate * then try the slower F_SOFTLOCK path. 10817c478bd9Sstevel@tonic-gate */ 10820da3d2a8Srd117015 if (pl_built) { 10830da3d2a8Srd117015 /* 10840da3d2a8Srd117015 * No one else has referenced the ppa[]. 10850da3d2a8Srd117015 * We created it and we need to destroy it. 10860da3d2a8Srd117015 */ 10877c478bd9Sstevel@tonic-gate sptd->spt_ppa = NULL; 10880da3d2a8Srd117015 } 10897c478bd9Sstevel@tonic-gate ret = ENOTSUP; 10907c478bd9Sstevel@tonic-gate goto insert_fail; 10917c478bd9Sstevel@tonic-gate } 10927c478bd9Sstevel@tonic-gate 10937c478bd9Sstevel@tonic-gate /* 10947c478bd9Sstevel@tonic-gate * In either case, we increment softlockcnt on the 'real' segment. 10957c478bd9Sstevel@tonic-gate */ 10967c478bd9Sstevel@tonic-gate sptd->spt_pcachecnt++; 10977c478bd9Sstevel@tonic-gate atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 10987c478bd9Sstevel@tonic-gate 10997c478bd9Sstevel@tonic-gate ppa = sptd->spt_ppa; 11007c478bd9Sstevel@tonic-gate for (an_idx = pg_idx; an_idx < pg_idx + npages; ) { 11017c478bd9Sstevel@tonic-gate if (ppa[an_idx] == NULL) { 11027c478bd9Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 11037c478bd9Sstevel@tonic-gate seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 11047c478bd9Sstevel@tonic-gate pl, sptd->spt_prot, segspt_reclaim); 11057c478bd9Sstevel@tonic-gate *ppp = NULL; 11067c478bd9Sstevel@tonic-gate return (ENOTSUP); 11077c478bd9Sstevel@tonic-gate } 11087c478bd9Sstevel@tonic-gate if ((szc = ppa[an_idx]->p_szc) != 0) { 11097c478bd9Sstevel@tonic-gate npgs = page_get_pagecnt(szc); 11107c478bd9Sstevel@tonic-gate an_idx = P2ROUNDUP(an_idx + 1, npgs); 11117c478bd9Sstevel@tonic-gate } else { 11127c478bd9Sstevel@tonic-gate an_idx++; 11137c478bd9Sstevel@tonic-gate } 11147c478bd9Sstevel@tonic-gate } 11157c478bd9Sstevel@tonic-gate /* 11167c478bd9Sstevel@tonic-gate * We can now drop the sptd->spt_lock since the ppa[] 11177c478bd9Sstevel@tonic-gate * exists and he have incremented pacachecnt. 11187c478bd9Sstevel@tonic-gate */ 11197c478bd9Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 11207c478bd9Sstevel@tonic-gate 11217c478bd9Sstevel@tonic-gate /* 11227c478bd9Sstevel@tonic-gate * Since we cache the entire segment, we want to 11237c478bd9Sstevel@tonic-gate * set ppp to point to the first slot that corresponds 11247c478bd9Sstevel@tonic-gate * to the requested addr, i.e. pg_idx. 11257c478bd9Sstevel@tonic-gate */ 11267c478bd9Sstevel@tonic-gate *ppp = &(sptd->spt_ppa[pg_idx]); 11277c478bd9Sstevel@tonic-gate return (ret); 11287c478bd9Sstevel@tonic-gate 11297c478bd9Sstevel@tonic-gate insert_fail: 11307c478bd9Sstevel@tonic-gate /* 11317c478bd9Sstevel@tonic-gate * We will only reach this code if we tried and failed. 11327c478bd9Sstevel@tonic-gate * 11337c478bd9Sstevel@tonic-gate * And we can drop the lock on the dummy seg, once we've failed 11347c478bd9Sstevel@tonic-gate * to set up a new ppa[]. 11357c478bd9Sstevel@tonic-gate */ 11367c478bd9Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 11377c478bd9Sstevel@tonic-gate 11387c478bd9Sstevel@tonic-gate if (pl_built) { 11397c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 11407c478bd9Sstevel@tonic-gate availrmem += claim_availrmem; 11417c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 11427c478bd9Sstevel@tonic-gate 11437c478bd9Sstevel@tonic-gate /* 11447c478bd9Sstevel@tonic-gate * We created pl and we need to destroy it. 11457c478bd9Sstevel@tonic-gate */ 11467c478bd9Sstevel@tonic-gate pplist = pl; 11477c478bd9Sstevel@tonic-gate for (an_idx = 0; an_idx < tot_npages; an_idx++) { 11487c478bd9Sstevel@tonic-gate if (pplist[an_idx] != NULL) 11497c478bd9Sstevel@tonic-gate page_unlock(pplist[an_idx]); 11507c478bd9Sstevel@tonic-gate } 11517c478bd9Sstevel@tonic-gate kmem_free(pl, sizeof (page_t *) * tot_npages); 11527c478bd9Sstevel@tonic-gate } 11537c478bd9Sstevel@tonic-gate 11547c478bd9Sstevel@tonic-gate if (shmd->shm_softlockcnt <= 0) { 11557c478bd9Sstevel@tonic-gate if (AS_ISUNMAPWAIT(seg->s_as)) { 11567c478bd9Sstevel@tonic-gate mutex_enter(&seg->s_as->a_contents); 11577c478bd9Sstevel@tonic-gate if (AS_ISUNMAPWAIT(seg->s_as)) { 11587c478bd9Sstevel@tonic-gate AS_CLRUNMAPWAIT(seg->s_as); 11597c478bd9Sstevel@tonic-gate cv_broadcast(&seg->s_as->a_cv); 11607c478bd9Sstevel@tonic-gate } 11617c478bd9Sstevel@tonic-gate mutex_exit(&seg->s_as->a_contents); 11627c478bd9Sstevel@tonic-gate } 11637c478bd9Sstevel@tonic-gate } 11647c478bd9Sstevel@tonic-gate *ppp = NULL; 11657c478bd9Sstevel@tonic-gate return (ret); 11667c478bd9Sstevel@tonic-gate } 11677c478bd9Sstevel@tonic-gate 11687c478bd9Sstevel@tonic-gate 11697c478bd9Sstevel@tonic-gate 11707c478bd9Sstevel@tonic-gate /* 11717c478bd9Sstevel@tonic-gate * return locked pages over a given range. 11727c478bd9Sstevel@tonic-gate * 11737c478bd9Sstevel@tonic-gate * We will cache the entire ISM segment and save the pplist for the 11747c478bd9Sstevel@tonic-gate * entire segment in the ppa field of the underlying ISM segment structure. 11757c478bd9Sstevel@tonic-gate * Later, during a call to segspt_reclaim() we will use this ppa array 11767c478bd9Sstevel@tonic-gate * to page_unlock() all of the pages and then we will free this ppa list. 11777c478bd9Sstevel@tonic-gate */ 11787c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 11797c478bd9Sstevel@tonic-gate static int 11807c478bd9Sstevel@tonic-gate segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len, 11817c478bd9Sstevel@tonic-gate struct page ***ppp, enum lock_type type, enum seg_rw rw) 11827c478bd9Sstevel@tonic-gate { 11837c478bd9Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 11847c478bd9Sstevel@tonic-gate struct seg *sptseg = shmd->shm_sptseg; 11857c478bd9Sstevel@tonic-gate struct spt_data *sptd = sptseg->s_data; 11867c478bd9Sstevel@tonic-gate pgcnt_t np, page_index, npages; 11877c478bd9Sstevel@tonic-gate caddr_t a, spt_base; 11887c478bd9Sstevel@tonic-gate struct page **pplist, **pl, *pp; 11897c478bd9Sstevel@tonic-gate struct anon_map *amp; 11907c478bd9Sstevel@tonic-gate ulong_t anon_index; 11917c478bd9Sstevel@tonic-gate int ret = ENOTSUP; 11927c478bd9Sstevel@tonic-gate uint_t pl_built = 0; 11937c478bd9Sstevel@tonic-gate struct anon *ap; 11947c478bd9Sstevel@tonic-gate struct vnode *vp; 11957c478bd9Sstevel@tonic-gate u_offset_t off; 11967c478bd9Sstevel@tonic-gate 11977c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 11987c478bd9Sstevel@tonic-gate 11997c478bd9Sstevel@tonic-gate /* 12007c478bd9Sstevel@tonic-gate * We want to lock/unlock the entire ISM segment. Therefore, 12017c478bd9Sstevel@tonic-gate * we will be using the underlying sptseg and it's base address 12027c478bd9Sstevel@tonic-gate * and length for the caching arguments. 12037c478bd9Sstevel@tonic-gate */ 12047c478bd9Sstevel@tonic-gate ASSERT(sptseg); 12057c478bd9Sstevel@tonic-gate ASSERT(sptd); 12067c478bd9Sstevel@tonic-gate 12077c478bd9Sstevel@tonic-gate if (sptd->spt_flags & SHM_PAGEABLE) { 12087c478bd9Sstevel@tonic-gate return (segspt_dismpagelock(seg, addr, len, ppp, type, rw)); 12097c478bd9Sstevel@tonic-gate } 12107c478bd9Sstevel@tonic-gate 12117c478bd9Sstevel@tonic-gate page_index = seg_page(seg, addr); 12127c478bd9Sstevel@tonic-gate npages = btopr(len); 12137c478bd9Sstevel@tonic-gate 12147c478bd9Sstevel@tonic-gate /* 12157c478bd9Sstevel@tonic-gate * check if the request is larger than number of pages covered 12167c478bd9Sstevel@tonic-gate * by amp 12177c478bd9Sstevel@tonic-gate */ 12187c478bd9Sstevel@tonic-gate if (page_index + npages > btopr(sptd->spt_amp->size)) { 12197c478bd9Sstevel@tonic-gate *ppp = NULL; 12207c478bd9Sstevel@tonic-gate return (ENOTSUP); 12217c478bd9Sstevel@tonic-gate } 12227c478bd9Sstevel@tonic-gate 12237c478bd9Sstevel@tonic-gate if (type == L_PAGEUNLOCK) { 12247c478bd9Sstevel@tonic-gate 12257c478bd9Sstevel@tonic-gate ASSERT(sptd->spt_ppa != NULL); 12267c478bd9Sstevel@tonic-gate 12277c478bd9Sstevel@tonic-gate seg_pinactive(seg, seg->s_base, sptd->spt_amp->size, 12287c478bd9Sstevel@tonic-gate sptd->spt_ppa, sptd->spt_prot, segspt_reclaim); 12297c478bd9Sstevel@tonic-gate 12307c478bd9Sstevel@tonic-gate /* 12317c478bd9Sstevel@tonic-gate * If someone is blocked while unmapping, we purge 12327c478bd9Sstevel@tonic-gate * segment page cache and thus reclaim pplist synchronously 12337c478bd9Sstevel@tonic-gate * without waiting for seg_pasync_thread. This speeds up 12347c478bd9Sstevel@tonic-gate * unmapping in cases where munmap(2) is called, while 12357c478bd9Sstevel@tonic-gate * raw async i/o is still in progress or where a thread 12367c478bd9Sstevel@tonic-gate * exits on data fault in a multithreaded application. 12377c478bd9Sstevel@tonic-gate */ 12387c478bd9Sstevel@tonic-gate if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) { 12397c478bd9Sstevel@tonic-gate segspt_purge(seg); 12407c478bd9Sstevel@tonic-gate } 12417c478bd9Sstevel@tonic-gate return (0); 12427c478bd9Sstevel@tonic-gate } else if (type == L_PAGERECLAIM) { 12437c478bd9Sstevel@tonic-gate ASSERT(sptd->spt_ppa != NULL); 12447c478bd9Sstevel@tonic-gate 12457c478bd9Sstevel@tonic-gate (void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size, 12467c478bd9Sstevel@tonic-gate sptd->spt_ppa, sptd->spt_prot); 12477c478bd9Sstevel@tonic-gate return (0); 12487c478bd9Sstevel@tonic-gate } 12497c478bd9Sstevel@tonic-gate 12507c478bd9Sstevel@tonic-gate /* 12517c478bd9Sstevel@tonic-gate * First try to find pages in segment page cache, without 12527c478bd9Sstevel@tonic-gate * holding the segment lock. 12537c478bd9Sstevel@tonic-gate */ 12547c478bd9Sstevel@tonic-gate pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 12557c478bd9Sstevel@tonic-gate sptd->spt_prot); 12567c478bd9Sstevel@tonic-gate if (pplist != NULL) { 12577c478bd9Sstevel@tonic-gate ASSERT(sptd->spt_ppa == pplist); 12587c478bd9Sstevel@tonic-gate ASSERT(sptd->spt_ppa[page_index]); 12597c478bd9Sstevel@tonic-gate /* 12607c478bd9Sstevel@tonic-gate * Since we cache the entire ISM segment, we want to 12617c478bd9Sstevel@tonic-gate * set ppp to point to the first slot that corresponds 12627c478bd9Sstevel@tonic-gate * to the requested addr, i.e. page_index. 12637c478bd9Sstevel@tonic-gate */ 12647c478bd9Sstevel@tonic-gate *ppp = &(sptd->spt_ppa[page_index]); 12657c478bd9Sstevel@tonic-gate return (0); 12667c478bd9Sstevel@tonic-gate } 12677c478bd9Sstevel@tonic-gate 12687c478bd9Sstevel@tonic-gate /* The L_PAGELOCK case... */ 12697c478bd9Sstevel@tonic-gate mutex_enter(&sptd->spt_lock); 12707c478bd9Sstevel@tonic-gate 12717c478bd9Sstevel@tonic-gate /* 12727c478bd9Sstevel@tonic-gate * try to find pages in segment page cache 12737c478bd9Sstevel@tonic-gate */ 12747c478bd9Sstevel@tonic-gate pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size, 12757c478bd9Sstevel@tonic-gate sptd->spt_prot); 12767c478bd9Sstevel@tonic-gate if (pplist != NULL) { 12777c478bd9Sstevel@tonic-gate ASSERT(sptd->spt_ppa == pplist); 12787c478bd9Sstevel@tonic-gate /* 12797c478bd9Sstevel@tonic-gate * Since we cache the entire segment, we want to 12807c478bd9Sstevel@tonic-gate * set ppp to point to the first slot that corresponds 12817c478bd9Sstevel@tonic-gate * to the requested addr, i.e. page_index. 12827c478bd9Sstevel@tonic-gate */ 12837c478bd9Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 12847c478bd9Sstevel@tonic-gate *ppp = &(sptd->spt_ppa[page_index]); 12857c478bd9Sstevel@tonic-gate return (0); 12867c478bd9Sstevel@tonic-gate } 12877c478bd9Sstevel@tonic-gate 12887c478bd9Sstevel@tonic-gate if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) == 12897c478bd9Sstevel@tonic-gate SEGP_FAIL) { 12907c478bd9Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 12917c478bd9Sstevel@tonic-gate *ppp = NULL; 12927c478bd9Sstevel@tonic-gate return (ENOTSUP); 12937c478bd9Sstevel@tonic-gate } 12947c478bd9Sstevel@tonic-gate 12957c478bd9Sstevel@tonic-gate /* 12967c478bd9Sstevel@tonic-gate * No need to worry about protections because ISM pages 12977c478bd9Sstevel@tonic-gate * are always rw. 12987c478bd9Sstevel@tonic-gate */ 12997c478bd9Sstevel@tonic-gate pl = pplist = NULL; 13007c478bd9Sstevel@tonic-gate 13017c478bd9Sstevel@tonic-gate /* 13027c478bd9Sstevel@tonic-gate * Do we need to build the ppa array? 13037c478bd9Sstevel@tonic-gate */ 13047c478bd9Sstevel@tonic-gate if (sptd->spt_ppa == NULL) { 13057c478bd9Sstevel@tonic-gate ASSERT(sptd->spt_ppa == pplist); 13067c478bd9Sstevel@tonic-gate 13077c478bd9Sstevel@tonic-gate spt_base = sptseg->s_base; 13087c478bd9Sstevel@tonic-gate pl_built = 1; 13097c478bd9Sstevel@tonic-gate 13107c478bd9Sstevel@tonic-gate /* 13117c478bd9Sstevel@tonic-gate * availrmem is decremented once during anon_swap_adjust() 13127c478bd9Sstevel@tonic-gate * and is incremented during the anon_unresv(), which is 13137c478bd9Sstevel@tonic-gate * called from shm_rm_amp() when the segment is destroyed. 13147c478bd9Sstevel@tonic-gate */ 13157c478bd9Sstevel@tonic-gate amp = sptd->spt_amp; 13167c478bd9Sstevel@tonic-gate ASSERT(amp != NULL); 13177c478bd9Sstevel@tonic-gate 13187c478bd9Sstevel@tonic-gate /* pcachecnt is protected by sptd->spt_lock */ 13197c478bd9Sstevel@tonic-gate ASSERT(sptd->spt_pcachecnt == 0); 13207c478bd9Sstevel@tonic-gate pplist = kmem_zalloc(sizeof (page_t *) 13217c478bd9Sstevel@tonic-gate * btopr(sptd->spt_amp->size), KM_SLEEP); 13227c478bd9Sstevel@tonic-gate pl = pplist; 13237c478bd9Sstevel@tonic-gate 13247c478bd9Sstevel@tonic-gate anon_index = seg_page(sptseg, spt_base); 13257c478bd9Sstevel@tonic-gate 13267c478bd9Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 13277c478bd9Sstevel@tonic-gate for (a = spt_base; a < (spt_base + sptd->spt_amp->size); 13287c478bd9Sstevel@tonic-gate a += PAGESIZE, anon_index++, pplist++) { 13297c478bd9Sstevel@tonic-gate ap = anon_get_ptr(amp->ahp, anon_index); 13307c478bd9Sstevel@tonic-gate ASSERT(ap != NULL); 13317c478bd9Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 13327c478bd9Sstevel@tonic-gate pp = page_lookup(vp, off, SE_SHARED); 13337c478bd9Sstevel@tonic-gate ASSERT(pp != NULL); 13347c478bd9Sstevel@tonic-gate *pplist = pp; 13357c478bd9Sstevel@tonic-gate } 13367c478bd9Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 13377c478bd9Sstevel@tonic-gate 13387c478bd9Sstevel@tonic-gate if (a < (spt_base + sptd->spt_amp->size)) { 13397c478bd9Sstevel@tonic-gate ret = ENOTSUP; 13407c478bd9Sstevel@tonic-gate goto insert_fail; 13417c478bd9Sstevel@tonic-gate } 13427c478bd9Sstevel@tonic-gate sptd->spt_ppa = pl; 13437c478bd9Sstevel@tonic-gate } else { 13447c478bd9Sstevel@tonic-gate /* 13457c478bd9Sstevel@tonic-gate * We already have a valid ppa[]. 13467c478bd9Sstevel@tonic-gate */ 13477c478bd9Sstevel@tonic-gate pl = sptd->spt_ppa; 13487c478bd9Sstevel@tonic-gate } 13497c478bd9Sstevel@tonic-gate 13507c478bd9Sstevel@tonic-gate ASSERT(pl != NULL); 13517c478bd9Sstevel@tonic-gate 13527c478bd9Sstevel@tonic-gate ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size, 13537c478bd9Sstevel@tonic-gate pl, sptd->spt_prot, SEGP_FORCE_WIRED, segspt_reclaim); 13547c478bd9Sstevel@tonic-gate if (ret == SEGP_FAIL) { 13557c478bd9Sstevel@tonic-gate /* 13567c478bd9Sstevel@tonic-gate * seg_pinsert failed. We return 13577c478bd9Sstevel@tonic-gate * ENOTSUP, so that the as_pagelock() code will 13587c478bd9Sstevel@tonic-gate * then try the slower F_SOFTLOCK path. 13597c478bd9Sstevel@tonic-gate */ 13607c478bd9Sstevel@tonic-gate if (pl_built) { 13617c478bd9Sstevel@tonic-gate /* 13627c478bd9Sstevel@tonic-gate * No one else has referenced the ppa[]. 13637c478bd9Sstevel@tonic-gate * We created it and we need to destroy it. 13647c478bd9Sstevel@tonic-gate */ 13657c478bd9Sstevel@tonic-gate sptd->spt_ppa = NULL; 13667c478bd9Sstevel@tonic-gate } 13677c478bd9Sstevel@tonic-gate ret = ENOTSUP; 13687c478bd9Sstevel@tonic-gate goto insert_fail; 13697c478bd9Sstevel@tonic-gate } 13707c478bd9Sstevel@tonic-gate 13717c478bd9Sstevel@tonic-gate /* 13727c478bd9Sstevel@tonic-gate * In either case, we increment softlockcnt on the 'real' segment. 13737c478bd9Sstevel@tonic-gate */ 13747c478bd9Sstevel@tonic-gate sptd->spt_pcachecnt++; 13757c478bd9Sstevel@tonic-gate atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1); 13767c478bd9Sstevel@tonic-gate 13777c478bd9Sstevel@tonic-gate /* 13787c478bd9Sstevel@tonic-gate * We can now drop the sptd->spt_lock since the ppa[] 13797c478bd9Sstevel@tonic-gate * exists and he have incremented pacachecnt. 13807c478bd9Sstevel@tonic-gate */ 13817c478bd9Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 13827c478bd9Sstevel@tonic-gate 13837c478bd9Sstevel@tonic-gate /* 13847c478bd9Sstevel@tonic-gate * Since we cache the entire segment, we want to 13857c478bd9Sstevel@tonic-gate * set ppp to point to the first slot that corresponds 13867c478bd9Sstevel@tonic-gate * to the requested addr, i.e. page_index. 13877c478bd9Sstevel@tonic-gate */ 13887c478bd9Sstevel@tonic-gate *ppp = &(sptd->spt_ppa[page_index]); 13897c478bd9Sstevel@tonic-gate return (ret); 13907c478bd9Sstevel@tonic-gate 13917c478bd9Sstevel@tonic-gate insert_fail: 13927c478bd9Sstevel@tonic-gate /* 13937c478bd9Sstevel@tonic-gate * We will only reach this code if we tried and failed. 13947c478bd9Sstevel@tonic-gate * 13957c478bd9Sstevel@tonic-gate * And we can drop the lock on the dummy seg, once we've failed 13967c478bd9Sstevel@tonic-gate * to set up a new ppa[]. 13977c478bd9Sstevel@tonic-gate */ 13987c478bd9Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 13997c478bd9Sstevel@tonic-gate 14007c478bd9Sstevel@tonic-gate if (pl_built) { 14017c478bd9Sstevel@tonic-gate /* 14027c478bd9Sstevel@tonic-gate * We created pl and we need to destroy it. 14037c478bd9Sstevel@tonic-gate */ 14047c478bd9Sstevel@tonic-gate pplist = pl; 14057c478bd9Sstevel@tonic-gate np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT); 14067c478bd9Sstevel@tonic-gate while (np) { 14077c478bd9Sstevel@tonic-gate page_unlock(*pplist); 14087c478bd9Sstevel@tonic-gate np--; 14097c478bd9Sstevel@tonic-gate pplist++; 14107c478bd9Sstevel@tonic-gate } 14117c478bd9Sstevel@tonic-gate kmem_free(pl, sizeof (page_t *) * 14127c478bd9Sstevel@tonic-gate btopr(sptd->spt_amp->size)); 14137c478bd9Sstevel@tonic-gate } 14147c478bd9Sstevel@tonic-gate if (shmd->shm_softlockcnt <= 0) { 14157c478bd9Sstevel@tonic-gate if (AS_ISUNMAPWAIT(seg->s_as)) { 14167c478bd9Sstevel@tonic-gate mutex_enter(&seg->s_as->a_contents); 14177c478bd9Sstevel@tonic-gate if (AS_ISUNMAPWAIT(seg->s_as)) { 14187c478bd9Sstevel@tonic-gate AS_CLRUNMAPWAIT(seg->s_as); 14197c478bd9Sstevel@tonic-gate cv_broadcast(&seg->s_as->a_cv); 14207c478bd9Sstevel@tonic-gate } 14217c478bd9Sstevel@tonic-gate mutex_exit(&seg->s_as->a_contents); 14227c478bd9Sstevel@tonic-gate } 14237c478bd9Sstevel@tonic-gate } 14247c478bd9Sstevel@tonic-gate *ppp = NULL; 14257c478bd9Sstevel@tonic-gate return (ret); 14267c478bd9Sstevel@tonic-gate } 14277c478bd9Sstevel@tonic-gate 14287c478bd9Sstevel@tonic-gate /* 14297c478bd9Sstevel@tonic-gate * purge any cached pages in the I/O page cache 14307c478bd9Sstevel@tonic-gate */ 14317c478bd9Sstevel@tonic-gate static void 14327c478bd9Sstevel@tonic-gate segspt_purge(struct seg *seg) 14337c478bd9Sstevel@tonic-gate { 14347c478bd9Sstevel@tonic-gate seg_ppurge(seg); 14357c478bd9Sstevel@tonic-gate } 14367c478bd9Sstevel@tonic-gate 14377c478bd9Sstevel@tonic-gate static int 14387c478bd9Sstevel@tonic-gate segspt_reclaim(struct seg *seg, caddr_t addr, size_t len, struct page **pplist, 14397c478bd9Sstevel@tonic-gate enum seg_rw rw) 14407c478bd9Sstevel@tonic-gate { 14417c478bd9Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 14427c478bd9Sstevel@tonic-gate struct seg *sptseg; 14437c478bd9Sstevel@tonic-gate struct spt_data *sptd; 14447c478bd9Sstevel@tonic-gate pgcnt_t npages, i, free_availrmem = 0; 14457c478bd9Sstevel@tonic-gate int done = 0; 14467c478bd9Sstevel@tonic-gate 14477c478bd9Sstevel@tonic-gate #ifdef lint 14487c478bd9Sstevel@tonic-gate addr = addr; 14497c478bd9Sstevel@tonic-gate #endif 14507c478bd9Sstevel@tonic-gate sptseg = shmd->shm_sptseg; 14517c478bd9Sstevel@tonic-gate sptd = sptseg->s_data; 14527c478bd9Sstevel@tonic-gate npages = (len >> PAGESHIFT); 14537c478bd9Sstevel@tonic-gate ASSERT(npages); 14547c478bd9Sstevel@tonic-gate ASSERT(sptd->spt_pcachecnt != 0); 14557c478bd9Sstevel@tonic-gate ASSERT(sptd->spt_ppa == pplist); 14567c478bd9Sstevel@tonic-gate ASSERT(npages == btopr(sptd->spt_amp->size)); 14577c478bd9Sstevel@tonic-gate /* 14587c478bd9Sstevel@tonic-gate * Acquire the lock on the dummy seg and destroy the 14597c478bd9Sstevel@tonic-gate * ppa array IF this is the last pcachecnt. 14607c478bd9Sstevel@tonic-gate */ 14617c478bd9Sstevel@tonic-gate mutex_enter(&sptd->spt_lock); 14627c478bd9Sstevel@tonic-gate if (--sptd->spt_pcachecnt == 0) { 14637c478bd9Sstevel@tonic-gate for (i = 0; i < npages; i++) { 14647c478bd9Sstevel@tonic-gate if (pplist[i] == NULL) { 14657c478bd9Sstevel@tonic-gate continue; 14667c478bd9Sstevel@tonic-gate } 14677c478bd9Sstevel@tonic-gate if (rw == S_WRITE) { 14687c478bd9Sstevel@tonic-gate hat_setrefmod(pplist[i]); 14697c478bd9Sstevel@tonic-gate } else { 14707c478bd9Sstevel@tonic-gate hat_setref(pplist[i]); 14717c478bd9Sstevel@tonic-gate } 14727c478bd9Sstevel@tonic-gate if ((sptd->spt_flags & SHM_PAGEABLE) && 14737c478bd9Sstevel@tonic-gate (sptd->spt_ppa_lckcnt[i] == 0)) 14747c478bd9Sstevel@tonic-gate free_availrmem++; 14757c478bd9Sstevel@tonic-gate page_unlock(pplist[i]); 14767c478bd9Sstevel@tonic-gate } 14777c478bd9Sstevel@tonic-gate if (sptd->spt_flags & SHM_PAGEABLE) { 14787c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 14797c478bd9Sstevel@tonic-gate availrmem += free_availrmem; 14807c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 14817c478bd9Sstevel@tonic-gate } 14827c478bd9Sstevel@tonic-gate /* 14837c478bd9Sstevel@tonic-gate * Since we want to cach/uncache the entire ISM segment, 14847c478bd9Sstevel@tonic-gate * we will track the pplist in a segspt specific field 14857c478bd9Sstevel@tonic-gate * ppa, that is initialized at the time we add an entry to 14867c478bd9Sstevel@tonic-gate * the cache. 14877c478bd9Sstevel@tonic-gate */ 14887c478bd9Sstevel@tonic-gate ASSERT(sptd->spt_pcachecnt == 0); 14897c478bd9Sstevel@tonic-gate kmem_free(pplist, sizeof (page_t *) * npages); 14907c478bd9Sstevel@tonic-gate sptd->spt_ppa = NULL; 14917c478bd9Sstevel@tonic-gate sptd->spt_flags &= ~DISM_PPA_CHANGED; 14927c478bd9Sstevel@tonic-gate done = 1; 14937c478bd9Sstevel@tonic-gate } 14947c478bd9Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 14957c478bd9Sstevel@tonic-gate /* 14967c478bd9Sstevel@tonic-gate * Now decrement softlockcnt. 14977c478bd9Sstevel@tonic-gate */ 14987c478bd9Sstevel@tonic-gate atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1); 14997c478bd9Sstevel@tonic-gate 15007c478bd9Sstevel@tonic-gate if (shmd->shm_softlockcnt <= 0) { 15017c478bd9Sstevel@tonic-gate if (AS_ISUNMAPWAIT(seg->s_as)) { 15027c478bd9Sstevel@tonic-gate mutex_enter(&seg->s_as->a_contents); 15037c478bd9Sstevel@tonic-gate if (AS_ISUNMAPWAIT(seg->s_as)) { 15047c478bd9Sstevel@tonic-gate AS_CLRUNMAPWAIT(seg->s_as); 15057c478bd9Sstevel@tonic-gate cv_broadcast(&seg->s_as->a_cv); 15067c478bd9Sstevel@tonic-gate } 15077c478bd9Sstevel@tonic-gate mutex_exit(&seg->s_as->a_contents); 15087c478bd9Sstevel@tonic-gate } 15097c478bd9Sstevel@tonic-gate } 15107c478bd9Sstevel@tonic-gate return (done); 15117c478bd9Sstevel@tonic-gate } 15127c478bd9Sstevel@tonic-gate 15137c478bd9Sstevel@tonic-gate /* 15147c478bd9Sstevel@tonic-gate * Do a F_SOFTUNLOCK call over the range requested. 15157c478bd9Sstevel@tonic-gate * The range must have already been F_SOFTLOCK'ed. 15167c478bd9Sstevel@tonic-gate * 15177c478bd9Sstevel@tonic-gate * The calls to acquire and release the anon map lock mutex were 15187c478bd9Sstevel@tonic-gate * removed in order to avoid a deadly embrace during a DR 15197c478bd9Sstevel@tonic-gate * memory delete operation. (Eg. DR blocks while waiting for a 15207c478bd9Sstevel@tonic-gate * exclusive lock on a page that is being used for kaio; the 15217c478bd9Sstevel@tonic-gate * thread that will complete the kaio and call segspt_softunlock 15227c478bd9Sstevel@tonic-gate * blocks on the anon map lock; another thread holding the anon 15237c478bd9Sstevel@tonic-gate * map lock blocks on another page lock via the segspt_shmfault 15247c478bd9Sstevel@tonic-gate * -> page_lookup -> page_lookup_create -> page_lock_es code flow.) 15257c478bd9Sstevel@tonic-gate * 15267c478bd9Sstevel@tonic-gate * The appropriateness of the removal is based upon the following: 15277c478bd9Sstevel@tonic-gate * 1. If we are holding a segment's reader lock and the page is held 15287c478bd9Sstevel@tonic-gate * shared, then the corresponding element in anonmap which points to 15297c478bd9Sstevel@tonic-gate * anon struct cannot change and there is no need to acquire the 15307c478bd9Sstevel@tonic-gate * anonymous map lock. 15317c478bd9Sstevel@tonic-gate * 2. Threads in segspt_softunlock have a reader lock on the segment 15327c478bd9Sstevel@tonic-gate * and already have the shared page lock, so we are guaranteed that 15337c478bd9Sstevel@tonic-gate * the anon map slot cannot change and therefore can call anon_get_ptr() 15347c478bd9Sstevel@tonic-gate * without grabbing the anonymous map lock. 15357c478bd9Sstevel@tonic-gate * 3. Threads that softlock a shared page break copy-on-write, even if 15367c478bd9Sstevel@tonic-gate * its a read. Thus cow faults can be ignored with respect to soft 15377c478bd9Sstevel@tonic-gate * unlocking, since the breaking of cow means that the anon slot(s) will 15387c478bd9Sstevel@tonic-gate * not be shared. 15397c478bd9Sstevel@tonic-gate */ 15407c478bd9Sstevel@tonic-gate static void 15417c478bd9Sstevel@tonic-gate segspt_softunlock(struct seg *seg, caddr_t sptseg_addr, 15427c478bd9Sstevel@tonic-gate size_t len, enum seg_rw rw) 15437c478bd9Sstevel@tonic-gate { 15447c478bd9Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 15457c478bd9Sstevel@tonic-gate struct seg *sptseg; 15467c478bd9Sstevel@tonic-gate struct spt_data *sptd; 15477c478bd9Sstevel@tonic-gate page_t *pp; 15487c478bd9Sstevel@tonic-gate caddr_t adr; 15497c478bd9Sstevel@tonic-gate struct vnode *vp; 15507c478bd9Sstevel@tonic-gate u_offset_t offset; 15517c478bd9Sstevel@tonic-gate ulong_t anon_index; 15527c478bd9Sstevel@tonic-gate struct anon_map *amp; /* XXX - for locknest */ 15537c478bd9Sstevel@tonic-gate struct anon *ap = NULL; 15547c478bd9Sstevel@tonic-gate pgcnt_t npages; 15557c478bd9Sstevel@tonic-gate 15567c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 15577c478bd9Sstevel@tonic-gate 15587c478bd9Sstevel@tonic-gate sptseg = shmd->shm_sptseg; 15597c478bd9Sstevel@tonic-gate sptd = sptseg->s_data; 15607c478bd9Sstevel@tonic-gate 15617c478bd9Sstevel@tonic-gate /* 15627c478bd9Sstevel@tonic-gate * Some platforms assume that ISM mappings are HAT_LOAD_LOCK 15637c478bd9Sstevel@tonic-gate * and therefore their pages are SE_SHARED locked 15647c478bd9Sstevel@tonic-gate * for the entire life of the segment. 15657c478bd9Sstevel@tonic-gate */ 15667c478bd9Sstevel@tonic-gate if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) && 15677c478bd9Sstevel@tonic-gate ((sptd->spt_flags & SHM_PAGEABLE) == 0)) { 15687c478bd9Sstevel@tonic-gate goto softlock_decrement; 15697c478bd9Sstevel@tonic-gate } 15707c478bd9Sstevel@tonic-gate 15717c478bd9Sstevel@tonic-gate /* 15727c478bd9Sstevel@tonic-gate * Any thread is free to do a page_find and 15737c478bd9Sstevel@tonic-gate * page_unlock() on the pages within this seg. 15747c478bd9Sstevel@tonic-gate * 15757c478bd9Sstevel@tonic-gate * We are already holding the as->a_lock on the user's 15767c478bd9Sstevel@tonic-gate * real segment, but we need to hold the a_lock on the 15777c478bd9Sstevel@tonic-gate * underlying dummy as. This is mostly to satisfy the 15787c478bd9Sstevel@tonic-gate * underlying HAT layer. 15797c478bd9Sstevel@tonic-gate */ 15807c478bd9Sstevel@tonic-gate AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 15817c478bd9Sstevel@tonic-gate hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len); 15827c478bd9Sstevel@tonic-gate AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 15837c478bd9Sstevel@tonic-gate 15847c478bd9Sstevel@tonic-gate amp = sptd->spt_amp; 15857c478bd9Sstevel@tonic-gate ASSERT(amp != NULL); 15867c478bd9Sstevel@tonic-gate anon_index = seg_page(sptseg, sptseg_addr); 15877c478bd9Sstevel@tonic-gate 15887c478bd9Sstevel@tonic-gate for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) { 15897c478bd9Sstevel@tonic-gate ap = anon_get_ptr(amp->ahp, anon_index++); 15907c478bd9Sstevel@tonic-gate ASSERT(ap != NULL); 15917c478bd9Sstevel@tonic-gate swap_xlate(ap, &vp, &offset); 15927c478bd9Sstevel@tonic-gate 15937c478bd9Sstevel@tonic-gate /* 15947c478bd9Sstevel@tonic-gate * Use page_find() instead of page_lookup() to 15957c478bd9Sstevel@tonic-gate * find the page since we know that it has a 15967c478bd9Sstevel@tonic-gate * "shared" lock. 15977c478bd9Sstevel@tonic-gate */ 15987c478bd9Sstevel@tonic-gate pp = page_find(vp, offset); 15997c478bd9Sstevel@tonic-gate ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1)); 16007c478bd9Sstevel@tonic-gate if (pp == NULL) { 16017c478bd9Sstevel@tonic-gate panic("segspt_softunlock: " 16027c478bd9Sstevel@tonic-gate "addr %p, ap %p, vp %p, off %llx", 16037c478bd9Sstevel@tonic-gate (void *)adr, (void *)ap, (void *)vp, offset); 16047c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 16057c478bd9Sstevel@tonic-gate } 16067c478bd9Sstevel@tonic-gate 16077c478bd9Sstevel@tonic-gate if (rw == S_WRITE) { 16087c478bd9Sstevel@tonic-gate hat_setrefmod(pp); 16097c478bd9Sstevel@tonic-gate } else if (rw != S_OTHER) { 16107c478bd9Sstevel@tonic-gate hat_setref(pp); 16117c478bd9Sstevel@tonic-gate } 16127c478bd9Sstevel@tonic-gate page_unlock(pp); 16137c478bd9Sstevel@tonic-gate } 16147c478bd9Sstevel@tonic-gate 16157c478bd9Sstevel@tonic-gate softlock_decrement: 16167c478bd9Sstevel@tonic-gate npages = btopr(len); 16177c478bd9Sstevel@tonic-gate atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages); 16187c478bd9Sstevel@tonic-gate if (shmd->shm_softlockcnt == 0) { 16197c478bd9Sstevel@tonic-gate /* 16207c478bd9Sstevel@tonic-gate * All SOFTLOCKS are gone. Wakeup any waiting 16217c478bd9Sstevel@tonic-gate * unmappers so they can try again to unmap. 16227c478bd9Sstevel@tonic-gate * Check for waiters first without the mutex 16237c478bd9Sstevel@tonic-gate * held so we don't always grab the mutex on 16247c478bd9Sstevel@tonic-gate * softunlocks. 16257c478bd9Sstevel@tonic-gate */ 16267c478bd9Sstevel@tonic-gate if (AS_ISUNMAPWAIT(seg->s_as)) { 16277c478bd9Sstevel@tonic-gate mutex_enter(&seg->s_as->a_contents); 16287c478bd9Sstevel@tonic-gate if (AS_ISUNMAPWAIT(seg->s_as)) { 16297c478bd9Sstevel@tonic-gate AS_CLRUNMAPWAIT(seg->s_as); 16307c478bd9Sstevel@tonic-gate cv_broadcast(&seg->s_as->a_cv); 16317c478bd9Sstevel@tonic-gate } 16327c478bd9Sstevel@tonic-gate mutex_exit(&seg->s_as->a_contents); 16337c478bd9Sstevel@tonic-gate } 16347c478bd9Sstevel@tonic-gate } 16357c478bd9Sstevel@tonic-gate } 16367c478bd9Sstevel@tonic-gate 16377c478bd9Sstevel@tonic-gate int 16387c478bd9Sstevel@tonic-gate segspt_shmattach(struct seg *seg, caddr_t *argsp) 16397c478bd9Sstevel@tonic-gate { 16407c478bd9Sstevel@tonic-gate struct shm_data *shmd_arg = (struct shm_data *)argsp; 16417c478bd9Sstevel@tonic-gate struct shm_data *shmd; 16427c478bd9Sstevel@tonic-gate struct anon_map *shm_amp = shmd_arg->shm_amp; 16437c478bd9Sstevel@tonic-gate struct spt_data *sptd; 16447c478bd9Sstevel@tonic-gate int error = 0; 16457c478bd9Sstevel@tonic-gate 16467c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 16477c478bd9Sstevel@tonic-gate 16487c478bd9Sstevel@tonic-gate shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP); 16497c478bd9Sstevel@tonic-gate if (shmd == NULL) 16507c478bd9Sstevel@tonic-gate return (ENOMEM); 16517c478bd9Sstevel@tonic-gate 16527c478bd9Sstevel@tonic-gate shmd->shm_sptas = shmd_arg->shm_sptas; 16537c478bd9Sstevel@tonic-gate shmd->shm_amp = shm_amp; 16547c478bd9Sstevel@tonic-gate shmd->shm_sptseg = shmd_arg->shm_sptseg; 16557c478bd9Sstevel@tonic-gate 16567c478bd9Sstevel@tonic-gate (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0, 16577c478bd9Sstevel@tonic-gate NULL, 0, seg->s_size); 16587c478bd9Sstevel@tonic-gate 16597c478bd9Sstevel@tonic-gate seg->s_data = (void *)shmd; 16607c478bd9Sstevel@tonic-gate seg->s_ops = &segspt_shmops; 16617c478bd9Sstevel@tonic-gate seg->s_szc = shmd->shm_sptseg->s_szc; 16627c478bd9Sstevel@tonic-gate sptd = shmd->shm_sptseg->s_data; 16637c478bd9Sstevel@tonic-gate 16647c478bd9Sstevel@tonic-gate if (sptd->spt_flags & SHM_PAGEABLE) { 16657c478bd9Sstevel@tonic-gate if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size), 16667c478bd9Sstevel@tonic-gate KM_NOSLEEP)) == NULL) { 16677c478bd9Sstevel@tonic-gate seg->s_data = (void *)NULL; 16687c478bd9Sstevel@tonic-gate kmem_free(shmd, (sizeof (*shmd))); 16697c478bd9Sstevel@tonic-gate return (ENOMEM); 16707c478bd9Sstevel@tonic-gate } 16717c478bd9Sstevel@tonic-gate shmd->shm_lckpgs = 0; 16727c478bd9Sstevel@tonic-gate if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 16737c478bd9Sstevel@tonic-gate if ((error = hat_share(seg->s_as->a_hat, seg->s_base, 16747c478bd9Sstevel@tonic-gate shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 16757c478bd9Sstevel@tonic-gate seg->s_size, seg->s_szc)) != 0) { 16767c478bd9Sstevel@tonic-gate kmem_free(shmd->shm_vpage, 16777c478bd9Sstevel@tonic-gate btopr(shm_amp->size)); 16787c478bd9Sstevel@tonic-gate } 16797c478bd9Sstevel@tonic-gate } 16807c478bd9Sstevel@tonic-gate } else { 16817c478bd9Sstevel@tonic-gate error = hat_share(seg->s_as->a_hat, seg->s_base, 16827c478bd9Sstevel@tonic-gate shmd_arg->shm_sptas->a_hat, SEGSPTADDR, 16837c478bd9Sstevel@tonic-gate seg->s_size, seg->s_szc); 16847c478bd9Sstevel@tonic-gate } 16857c478bd9Sstevel@tonic-gate if (error) { 16867c478bd9Sstevel@tonic-gate seg->s_szc = 0; 16877c478bd9Sstevel@tonic-gate seg->s_data = (void *)NULL; 16887c478bd9Sstevel@tonic-gate kmem_free(shmd, (sizeof (*shmd))); 16897c478bd9Sstevel@tonic-gate } else { 16907c478bd9Sstevel@tonic-gate ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 16917c478bd9Sstevel@tonic-gate shm_amp->refcnt++; 16927c478bd9Sstevel@tonic-gate ANON_LOCK_EXIT(&shm_amp->a_rwlock); 16937c478bd9Sstevel@tonic-gate } 16947c478bd9Sstevel@tonic-gate return (error); 16957c478bd9Sstevel@tonic-gate } 16967c478bd9Sstevel@tonic-gate 16977c478bd9Sstevel@tonic-gate int 16987c478bd9Sstevel@tonic-gate segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize) 16997c478bd9Sstevel@tonic-gate { 17007c478bd9Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 17017c478bd9Sstevel@tonic-gate int reclaim = 1; 17027c478bd9Sstevel@tonic-gate 17037c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 17047c478bd9Sstevel@tonic-gate retry: 17057c478bd9Sstevel@tonic-gate if (shmd->shm_softlockcnt > 0) { 17067c478bd9Sstevel@tonic-gate if (reclaim == 1) { 17077c478bd9Sstevel@tonic-gate segspt_purge(seg); 17087c478bd9Sstevel@tonic-gate reclaim = 0; 17097c478bd9Sstevel@tonic-gate goto retry; 17107c478bd9Sstevel@tonic-gate } 17117c478bd9Sstevel@tonic-gate return (EAGAIN); 17127c478bd9Sstevel@tonic-gate } 17137c478bd9Sstevel@tonic-gate 17147c478bd9Sstevel@tonic-gate if (ssize != seg->s_size) { 17157c478bd9Sstevel@tonic-gate #ifdef DEBUG 17167c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n", 17177c478bd9Sstevel@tonic-gate ssize, seg->s_size); 17187c478bd9Sstevel@tonic-gate #endif 17197c478bd9Sstevel@tonic-gate return (EINVAL); 17207c478bd9Sstevel@tonic-gate } 17217c478bd9Sstevel@tonic-gate 17227c478bd9Sstevel@tonic-gate (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK, 17237c478bd9Sstevel@tonic-gate NULL, 0); 17247c478bd9Sstevel@tonic-gate hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc); 17257c478bd9Sstevel@tonic-gate 17267c478bd9Sstevel@tonic-gate seg_free(seg); 17277c478bd9Sstevel@tonic-gate 17287c478bd9Sstevel@tonic-gate return (0); 17297c478bd9Sstevel@tonic-gate } 17307c478bd9Sstevel@tonic-gate 17317c478bd9Sstevel@tonic-gate void 17327c478bd9Sstevel@tonic-gate segspt_shmfree(struct seg *seg) 17337c478bd9Sstevel@tonic-gate { 17347c478bd9Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 17357c478bd9Sstevel@tonic-gate struct anon_map *shm_amp = shmd->shm_amp; 17367c478bd9Sstevel@tonic-gate 17377c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 17387c478bd9Sstevel@tonic-gate 17397c478bd9Sstevel@tonic-gate (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0, 17407c478bd9Sstevel@tonic-gate MC_UNLOCK, NULL, 0); 17417c478bd9Sstevel@tonic-gate 17427c478bd9Sstevel@tonic-gate /* 17437c478bd9Sstevel@tonic-gate * Need to increment refcnt when attaching 17447c478bd9Sstevel@tonic-gate * and decrement when detaching because of dup(). 17457c478bd9Sstevel@tonic-gate */ 17467c478bd9Sstevel@tonic-gate ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER); 17477c478bd9Sstevel@tonic-gate shm_amp->refcnt--; 17487c478bd9Sstevel@tonic-gate ANON_LOCK_EXIT(&shm_amp->a_rwlock); 17497c478bd9Sstevel@tonic-gate 17507c478bd9Sstevel@tonic-gate if (shmd->shm_vpage) { /* only for DISM */ 17517c478bd9Sstevel@tonic-gate kmem_free(shmd->shm_vpage, btopr(shm_amp->size)); 17527c478bd9Sstevel@tonic-gate shmd->shm_vpage = NULL; 17537c478bd9Sstevel@tonic-gate } 17547c478bd9Sstevel@tonic-gate kmem_free(shmd, sizeof (*shmd)); 17557c478bd9Sstevel@tonic-gate } 17567c478bd9Sstevel@tonic-gate 17577c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 17587c478bd9Sstevel@tonic-gate int 17597c478bd9Sstevel@tonic-gate segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 17607c478bd9Sstevel@tonic-gate { 17617c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 17627c478bd9Sstevel@tonic-gate 17637c478bd9Sstevel@tonic-gate /* 17647c478bd9Sstevel@tonic-gate * Shared page table is more than shared mapping. 17657c478bd9Sstevel@tonic-gate * Individual process sharing page tables can't change prot 17667c478bd9Sstevel@tonic-gate * because there is only one set of page tables. 17677c478bd9Sstevel@tonic-gate * This will be allowed after private page table is 17687c478bd9Sstevel@tonic-gate * supported. 17697c478bd9Sstevel@tonic-gate */ 17707c478bd9Sstevel@tonic-gate /* need to return correct status error? */ 17717c478bd9Sstevel@tonic-gate return (0); 17727c478bd9Sstevel@tonic-gate } 17737c478bd9Sstevel@tonic-gate 17747c478bd9Sstevel@tonic-gate 17757c478bd9Sstevel@tonic-gate faultcode_t 17767c478bd9Sstevel@tonic-gate segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr, 17777c478bd9Sstevel@tonic-gate size_t len, enum fault_type type, enum seg_rw rw) 17787c478bd9Sstevel@tonic-gate { 17797c478bd9Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 17807c478bd9Sstevel@tonic-gate struct seg *sptseg = shmd->shm_sptseg; 17817c478bd9Sstevel@tonic-gate struct as *curspt = shmd->shm_sptas; 17827c478bd9Sstevel@tonic-gate struct spt_data *sptd = sptseg->s_data; 17837c478bd9Sstevel@tonic-gate pgcnt_t npages; 178407b65a64Saguzovsk size_t size; 17857c478bd9Sstevel@tonic-gate caddr_t segspt_addr, shm_addr; 17867c478bd9Sstevel@tonic-gate page_t **ppa; 17877c478bd9Sstevel@tonic-gate int i; 17887c478bd9Sstevel@tonic-gate ulong_t an_idx = 0; 17897c478bd9Sstevel@tonic-gate int err = 0; 17901b42782eSmec int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0); 179107b65a64Saguzovsk size_t pgsz; 179207b65a64Saguzovsk pgcnt_t pgcnt; 179307b65a64Saguzovsk caddr_t a; 179407b65a64Saguzovsk pgcnt_t pidx; 17957c478bd9Sstevel@tonic-gate 17967c478bd9Sstevel@tonic-gate #ifdef lint 17977c478bd9Sstevel@tonic-gate hat = hat; 17987c478bd9Sstevel@tonic-gate #endif 17997c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 18007c478bd9Sstevel@tonic-gate 18017c478bd9Sstevel@tonic-gate /* 18027c478bd9Sstevel@tonic-gate * Because of the way spt is implemented 18037c478bd9Sstevel@tonic-gate * the realsize of the segment does not have to be 18047c478bd9Sstevel@tonic-gate * equal to the segment size itself. The segment size is 18057c478bd9Sstevel@tonic-gate * often in multiples of a page size larger than PAGESIZE. 18067c478bd9Sstevel@tonic-gate * The realsize is rounded up to the nearest PAGESIZE 18077c478bd9Sstevel@tonic-gate * based on what the user requested. This is a bit of 18087c478bd9Sstevel@tonic-gate * ungliness that is historical but not easily fixed 18097c478bd9Sstevel@tonic-gate * without re-designing the higher levels of ISM. 18107c478bd9Sstevel@tonic-gate */ 18117c478bd9Sstevel@tonic-gate ASSERT(addr >= seg->s_base); 18127c478bd9Sstevel@tonic-gate if (((addr + len) - seg->s_base) > sptd->spt_realsize) 18137c478bd9Sstevel@tonic-gate return (FC_NOMAP); 18147c478bd9Sstevel@tonic-gate /* 18157c478bd9Sstevel@tonic-gate * For all of the following cases except F_PROT, we need to 18167c478bd9Sstevel@tonic-gate * make any necessary adjustments to addr and len 18177c478bd9Sstevel@tonic-gate * and get all of the necessary page_t's into an array called ppa[]. 18187c478bd9Sstevel@tonic-gate * 18197c478bd9Sstevel@tonic-gate * The code in shmat() forces base addr and len of ISM segment 18207c478bd9Sstevel@tonic-gate * to be aligned to largest page size supported. Therefore, 18217c478bd9Sstevel@tonic-gate * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 18227c478bd9Sstevel@tonic-gate * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 18237c478bd9Sstevel@tonic-gate * in large pagesize chunks, or else we will screw up the HAT 18247c478bd9Sstevel@tonic-gate * layer by calling hat_memload_array() with differing page sizes 18257c478bd9Sstevel@tonic-gate * over a given virtual range. 18267c478bd9Sstevel@tonic-gate */ 182707b65a64Saguzovsk pgsz = page_get_pagesize(sptseg->s_szc); 182807b65a64Saguzovsk pgcnt = page_get_pagecnt(sptseg->s_szc); 182907b65a64Saguzovsk shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); 183007b65a64Saguzovsk size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz); 18317c478bd9Sstevel@tonic-gate npages = btopr(size); 18327c478bd9Sstevel@tonic-gate 18337c478bd9Sstevel@tonic-gate /* 18347c478bd9Sstevel@tonic-gate * Now we need to convert from addr in segshm to addr in segspt. 18357c478bd9Sstevel@tonic-gate */ 18367c478bd9Sstevel@tonic-gate an_idx = seg_page(seg, shm_addr); 18377c478bd9Sstevel@tonic-gate segspt_addr = sptseg->s_base + ptob(an_idx); 18387c478bd9Sstevel@tonic-gate 18397c478bd9Sstevel@tonic-gate ASSERT((segspt_addr + ptob(npages)) <= 18407c478bd9Sstevel@tonic-gate (sptseg->s_base + sptd->spt_realsize)); 18417c478bd9Sstevel@tonic-gate ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size)); 18427c478bd9Sstevel@tonic-gate 18437c478bd9Sstevel@tonic-gate switch (type) { 18447c478bd9Sstevel@tonic-gate 18457c478bd9Sstevel@tonic-gate case F_SOFTLOCK: 18467c478bd9Sstevel@tonic-gate 18477c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 18487c478bd9Sstevel@tonic-gate if (availrmem < tune.t_minarmem + npages) { 18497c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 18507c478bd9Sstevel@tonic-gate return (FC_MAKE_ERR(ENOMEM)); 18517c478bd9Sstevel@tonic-gate } else { 18527c478bd9Sstevel@tonic-gate availrmem -= npages; 18537c478bd9Sstevel@tonic-gate } 18547c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 18557c478bd9Sstevel@tonic-gate atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 18567c478bd9Sstevel@tonic-gate /* 18577c478bd9Sstevel@tonic-gate * Fall through to the F_INVAL case to load up the hat layer 18587c478bd9Sstevel@tonic-gate * entries with the HAT_LOAD_LOCK flag. 18597c478bd9Sstevel@tonic-gate */ 18607c478bd9Sstevel@tonic-gate /* FALLTHRU */ 18617c478bd9Sstevel@tonic-gate case F_INVAL: 18627c478bd9Sstevel@tonic-gate 18637c478bd9Sstevel@tonic-gate if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 18647c478bd9Sstevel@tonic-gate return (FC_NOMAP); 18657c478bd9Sstevel@tonic-gate 18667c478bd9Sstevel@tonic-gate ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP); 18677c478bd9Sstevel@tonic-gate 18687c478bd9Sstevel@tonic-gate err = spt_anon_getpages(sptseg, segspt_addr, size, ppa); 18697c478bd9Sstevel@tonic-gate if (err != 0) { 18707c478bd9Sstevel@tonic-gate if (type == F_SOFTLOCK) { 18717c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 18727c478bd9Sstevel@tonic-gate availrmem += npages; 18737c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 18747c478bd9Sstevel@tonic-gate atomic_add_long((ulong_t *)( 18757c478bd9Sstevel@tonic-gate &(shmd->shm_softlockcnt)), -npages); 18767c478bd9Sstevel@tonic-gate } 18777c478bd9Sstevel@tonic-gate goto dism_err; 18787c478bd9Sstevel@tonic-gate } 18797c478bd9Sstevel@tonic-gate AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 188007b65a64Saguzovsk a = segspt_addr; 188107b65a64Saguzovsk pidx = 0; 18827c478bd9Sstevel@tonic-gate if (type == F_SOFTLOCK) { 18837c478bd9Sstevel@tonic-gate 18847c478bd9Sstevel@tonic-gate /* 18857c478bd9Sstevel@tonic-gate * Load up the translation keeping it 18867c478bd9Sstevel@tonic-gate * locked and don't unlock the page. 18877c478bd9Sstevel@tonic-gate */ 188807b65a64Saguzovsk for (; pidx < npages; a += pgsz, pidx += pgcnt) { 188907b65a64Saguzovsk hat_memload_array(sptseg->s_as->a_hat, 189007b65a64Saguzovsk a, pgsz, &ppa[pidx], sptd->spt_prot, 18917c478bd9Sstevel@tonic-gate HAT_LOAD_LOCK | HAT_LOAD_SHARE); 189207b65a64Saguzovsk } 18937c478bd9Sstevel@tonic-gate } else { 18947c478bd9Sstevel@tonic-gate if (hat == seg->s_as->a_hat) { 18957c478bd9Sstevel@tonic-gate 18967c478bd9Sstevel@tonic-gate /* 18977c478bd9Sstevel@tonic-gate * Migrate pages marked for migration 18987c478bd9Sstevel@tonic-gate */ 18997c478bd9Sstevel@tonic-gate if (lgrp_optimizations()) 19007c478bd9Sstevel@tonic-gate page_migrate(seg, shm_addr, ppa, 19017c478bd9Sstevel@tonic-gate npages); 19027c478bd9Sstevel@tonic-gate 19037c478bd9Sstevel@tonic-gate /* CPU HAT */ 190407b65a64Saguzovsk for (; pidx < npages; 190507b65a64Saguzovsk a += pgsz, pidx += pgcnt) { 19067c478bd9Sstevel@tonic-gate hat_memload_array(sptseg->s_as->a_hat, 190707b65a64Saguzovsk a, pgsz, &ppa[pidx], 190807b65a64Saguzovsk sptd->spt_prot, 19097c478bd9Sstevel@tonic-gate HAT_LOAD_SHARE); 191007b65a64Saguzovsk } 19117c478bd9Sstevel@tonic-gate } else { 19127c478bd9Sstevel@tonic-gate /* XHAT. Pass real address */ 19137c478bd9Sstevel@tonic-gate hat_memload_array(hat, shm_addr, 19147c478bd9Sstevel@tonic-gate size, ppa, sptd->spt_prot, HAT_LOAD_SHARE); 19157c478bd9Sstevel@tonic-gate } 19167c478bd9Sstevel@tonic-gate 19177c478bd9Sstevel@tonic-gate /* 19187c478bd9Sstevel@tonic-gate * And now drop the SE_SHARED lock(s). 19197c478bd9Sstevel@tonic-gate */ 19201b42782eSmec if (dyn_ism_unmap) { 19211b42782eSmec for (i = 0; i < npages; i++) { 19227c478bd9Sstevel@tonic-gate page_unlock(ppa[i]); 19237c478bd9Sstevel@tonic-gate } 19241b42782eSmec } 19251b42782eSmec } 19267c478bd9Sstevel@tonic-gate 19271b42782eSmec if (!dyn_ism_unmap) { 19287c478bd9Sstevel@tonic-gate if (hat_share(seg->s_as->a_hat, shm_addr, 19297c478bd9Sstevel@tonic-gate curspt->a_hat, segspt_addr, ptob(npages), 19307c478bd9Sstevel@tonic-gate seg->s_szc) != 0) { 19317c478bd9Sstevel@tonic-gate panic("hat_share err in DISM fault"); 19327c478bd9Sstevel@tonic-gate /* NOTREACHED */ 19337c478bd9Sstevel@tonic-gate } 19341b42782eSmec if (type == F_INVAL) { 19351b42782eSmec for (i = 0; i < npages; i++) { 19361b42782eSmec page_unlock(ppa[i]); 19371b42782eSmec } 19381b42782eSmec } 19397c478bd9Sstevel@tonic-gate } 19407c478bd9Sstevel@tonic-gate AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 19417c478bd9Sstevel@tonic-gate dism_err: 19427c478bd9Sstevel@tonic-gate kmem_free(ppa, npages * sizeof (page_t *)); 19437c478bd9Sstevel@tonic-gate return (err); 19447c478bd9Sstevel@tonic-gate 19457c478bd9Sstevel@tonic-gate case F_SOFTUNLOCK: 19467c478bd9Sstevel@tonic-gate 19477c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock); 19487c478bd9Sstevel@tonic-gate availrmem += npages; 19497c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock); 19507c478bd9Sstevel@tonic-gate 19517c478bd9Sstevel@tonic-gate /* 19527c478bd9Sstevel@tonic-gate * This is a bit ugly, we pass in the real seg pointer, 19537c478bd9Sstevel@tonic-gate * but the segspt_addr is the virtual address within the 19547c478bd9Sstevel@tonic-gate * dummy seg. 19557c478bd9Sstevel@tonic-gate */ 19567c478bd9Sstevel@tonic-gate segspt_softunlock(seg, segspt_addr, size, rw); 19577c478bd9Sstevel@tonic-gate return (0); 19587c478bd9Sstevel@tonic-gate 19597c478bd9Sstevel@tonic-gate case F_PROT: 19607c478bd9Sstevel@tonic-gate 19617c478bd9Sstevel@tonic-gate /* 19627c478bd9Sstevel@tonic-gate * This takes care of the unusual case where a user 19637c478bd9Sstevel@tonic-gate * allocates a stack in shared memory and a register 19647c478bd9Sstevel@tonic-gate * window overflow is written to that stack page before 19657c478bd9Sstevel@tonic-gate * it is otherwise modified. 19667c478bd9Sstevel@tonic-gate * 19677c478bd9Sstevel@tonic-gate * We can get away with this because ISM segments are 19687c478bd9Sstevel@tonic-gate * always rw. Other than this unusual case, there 19697c478bd9Sstevel@tonic-gate * should be no instances of protection violations. 19707c478bd9Sstevel@tonic-gate */ 19717c478bd9Sstevel@tonic-gate return (0); 19727c478bd9Sstevel@tonic-gate 19737c478bd9Sstevel@tonic-gate default: 19747c478bd9Sstevel@tonic-gate #ifdef DEBUG 19757c478bd9Sstevel@tonic-gate panic("segspt_dismfault default type?"); 19767c478bd9Sstevel@tonic-gate #else 19777c478bd9Sstevel@tonic-gate return (FC_NOMAP); 19787c478bd9Sstevel@tonic-gate #endif 19797c478bd9Sstevel@tonic-gate } 19807c478bd9Sstevel@tonic-gate } 19817c478bd9Sstevel@tonic-gate 19827c478bd9Sstevel@tonic-gate 19837c478bd9Sstevel@tonic-gate faultcode_t 19847c478bd9Sstevel@tonic-gate segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr, 19857c478bd9Sstevel@tonic-gate size_t len, enum fault_type type, enum seg_rw rw) 19867c478bd9Sstevel@tonic-gate { 19877c478bd9Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 19887c478bd9Sstevel@tonic-gate struct seg *sptseg = shmd->shm_sptseg; 19897c478bd9Sstevel@tonic-gate struct as *curspt = shmd->shm_sptas; 19907c478bd9Sstevel@tonic-gate struct spt_data *sptd = sptseg->s_data; 19917c478bd9Sstevel@tonic-gate pgcnt_t npages; 199207b65a64Saguzovsk size_t size; 19937c478bd9Sstevel@tonic-gate caddr_t sptseg_addr, shm_addr; 19947c478bd9Sstevel@tonic-gate page_t *pp, **ppa; 19957c478bd9Sstevel@tonic-gate int i; 19967c478bd9Sstevel@tonic-gate u_offset_t offset; 19977c478bd9Sstevel@tonic-gate ulong_t anon_index = 0; 19987c478bd9Sstevel@tonic-gate struct vnode *vp; 19997c478bd9Sstevel@tonic-gate struct anon_map *amp; /* XXX - for locknest */ 20007c478bd9Sstevel@tonic-gate struct anon *ap = NULL; 20017c478bd9Sstevel@tonic-gate anon_sync_obj_t cookie; 200207b65a64Saguzovsk size_t pgsz; 200307b65a64Saguzovsk pgcnt_t pgcnt; 200407b65a64Saguzovsk caddr_t a; 200507b65a64Saguzovsk pgcnt_t pidx; 200607b65a64Saguzovsk size_t sz; 20077c478bd9Sstevel@tonic-gate 20087c478bd9Sstevel@tonic-gate #ifdef lint 20097c478bd9Sstevel@tonic-gate hat = hat; 20107c478bd9Sstevel@tonic-gate #endif 20117c478bd9Sstevel@tonic-gate 20127c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 20137c478bd9Sstevel@tonic-gate 20147c478bd9Sstevel@tonic-gate if (sptd->spt_flags & SHM_PAGEABLE) { 20157c478bd9Sstevel@tonic-gate return (segspt_dismfault(hat, seg, addr, len, type, rw)); 20167c478bd9Sstevel@tonic-gate } 20177c478bd9Sstevel@tonic-gate 20187c478bd9Sstevel@tonic-gate /* 20197c478bd9Sstevel@tonic-gate * Because of the way spt is implemented 20207c478bd9Sstevel@tonic-gate * the realsize of the segment does not have to be 20217c478bd9Sstevel@tonic-gate * equal to the segment size itself. The segment size is 20227c478bd9Sstevel@tonic-gate * often in multiples of a page size larger than PAGESIZE. 20237c478bd9Sstevel@tonic-gate * The realsize is rounded up to the nearest PAGESIZE 20247c478bd9Sstevel@tonic-gate * based on what the user requested. This is a bit of 20257c478bd9Sstevel@tonic-gate * ungliness that is historical but not easily fixed 20267c478bd9Sstevel@tonic-gate * without re-designing the higher levels of ISM. 20277c478bd9Sstevel@tonic-gate */ 20287c478bd9Sstevel@tonic-gate ASSERT(addr >= seg->s_base); 20297c478bd9Sstevel@tonic-gate if (((addr + len) - seg->s_base) > sptd->spt_realsize) 20307c478bd9Sstevel@tonic-gate return (FC_NOMAP); 20317c478bd9Sstevel@tonic-gate /* 20327c478bd9Sstevel@tonic-gate * For all of the following cases except F_PROT, we need to 20337c478bd9Sstevel@tonic-gate * make any necessary adjustments to addr and len 20347c478bd9Sstevel@tonic-gate * and get all of the necessary page_t's into an array called ppa[]. 20357c478bd9Sstevel@tonic-gate * 20367c478bd9Sstevel@tonic-gate * The code in shmat() forces base addr and len of ISM segment 20377c478bd9Sstevel@tonic-gate * to be aligned to largest page size supported. Therefore, 20387c478bd9Sstevel@tonic-gate * we are able to handle F_SOFTLOCK and F_INVAL calls in "large 20397c478bd9Sstevel@tonic-gate * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK 20407c478bd9Sstevel@tonic-gate * in large pagesize chunks, or else we will screw up the HAT 20417c478bd9Sstevel@tonic-gate * layer by calling hat_memload_array() with differing page sizes 20427c478bd9Sstevel@tonic-gate * over a given virtual range. 20437c478bd9Sstevel@tonic-gate */ 204407b65a64Saguzovsk pgsz = page_get_pagesize(sptseg->s_szc); 204507b65a64Saguzovsk pgcnt = page_get_pagecnt(sptseg->s_szc); 204607b65a64Saguzovsk shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); 204707b65a64Saguzovsk size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz); 20487c478bd9Sstevel@tonic-gate npages = btopr(size); 20497c478bd9Sstevel@tonic-gate 20507c478bd9Sstevel@tonic-gate /* 20517c478bd9Sstevel@tonic-gate * Now we need to convert from addr in segshm to addr in segspt. 20527c478bd9Sstevel@tonic-gate */ 20537c478bd9Sstevel@tonic-gate anon_index = seg_page(seg, shm_addr); 20547c478bd9Sstevel@tonic-gate sptseg_addr = sptseg->s_base + ptob(anon_index); 20557c478bd9Sstevel@tonic-gate 20567c478bd9Sstevel@tonic-gate /* 20577c478bd9Sstevel@tonic-gate * And now we may have to adjust npages downward if we have 20587c478bd9Sstevel@tonic-gate * exceeded the realsize of the segment or initial anon 20597c478bd9Sstevel@tonic-gate * allocations. 20607c478bd9Sstevel@tonic-gate */ 20617c478bd9Sstevel@tonic-gate if ((sptseg_addr + ptob(npages)) > 20627c478bd9Sstevel@tonic-gate (sptseg->s_base + sptd->spt_realsize)) 20637c478bd9Sstevel@tonic-gate size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr; 20647c478bd9Sstevel@tonic-gate 20657c478bd9Sstevel@tonic-gate npages = btopr(size); 20667c478bd9Sstevel@tonic-gate 20677c478bd9Sstevel@tonic-gate ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size)); 20687c478bd9Sstevel@tonic-gate ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0); 20697c478bd9Sstevel@tonic-gate 20707c478bd9Sstevel@tonic-gate switch (type) { 20717c478bd9Sstevel@tonic-gate 20727c478bd9Sstevel@tonic-gate case F_SOFTLOCK: 20737c478bd9Sstevel@tonic-gate 20747c478bd9Sstevel@tonic-gate /* 20757c478bd9Sstevel@tonic-gate * availrmem is decremented once during anon_swap_adjust() 20767c478bd9Sstevel@tonic-gate * and is incremented during the anon_unresv(), which is 20777c478bd9Sstevel@tonic-gate * called from shm_rm_amp() when the segment is destroyed. 20787c478bd9Sstevel@tonic-gate */ 20797c478bd9Sstevel@tonic-gate atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages); 20807c478bd9Sstevel@tonic-gate /* 20817c478bd9Sstevel@tonic-gate * Some platforms assume that ISM pages are SE_SHARED 20827c478bd9Sstevel@tonic-gate * locked for the entire life of the segment. 20837c478bd9Sstevel@tonic-gate */ 20847c478bd9Sstevel@tonic-gate if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) 20857c478bd9Sstevel@tonic-gate return (0); 20867c478bd9Sstevel@tonic-gate /* 20877c478bd9Sstevel@tonic-gate * Fall through to the F_INVAL case to load up the hat layer 20887c478bd9Sstevel@tonic-gate * entries with the HAT_LOAD_LOCK flag. 20897c478bd9Sstevel@tonic-gate */ 20907c478bd9Sstevel@tonic-gate 20917c478bd9Sstevel@tonic-gate /* FALLTHRU */ 20927c478bd9Sstevel@tonic-gate case F_INVAL: 20937c478bd9Sstevel@tonic-gate 20947c478bd9Sstevel@tonic-gate if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC)) 20957c478bd9Sstevel@tonic-gate return (FC_NOMAP); 20967c478bd9Sstevel@tonic-gate 20977c478bd9Sstevel@tonic-gate /* 20987c478bd9Sstevel@tonic-gate * Some platforms that do NOT support DYNAMIC_ISM_UNMAP 20997c478bd9Sstevel@tonic-gate * may still rely on this call to hat_share(). That 21007c478bd9Sstevel@tonic-gate * would imply that those hat's can fault on a 21017c478bd9Sstevel@tonic-gate * HAT_LOAD_LOCK translation, which would seem 21027c478bd9Sstevel@tonic-gate * contradictory. 21037c478bd9Sstevel@tonic-gate */ 21047c478bd9Sstevel@tonic-gate if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 21057c478bd9Sstevel@tonic-gate if (hat_share(seg->s_as->a_hat, seg->s_base, 21067c478bd9Sstevel@tonic-gate curspt->a_hat, sptseg->s_base, 21077c478bd9Sstevel@tonic-gate sptseg->s_size, sptseg->s_szc) != 0) { 21087c478bd9Sstevel@tonic-gate panic("hat_share error in ISM fault"); 21097c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 21107c478bd9Sstevel@tonic-gate } 21117c478bd9Sstevel@tonic-gate return (0); 21127c478bd9Sstevel@tonic-gate } 21137c478bd9Sstevel@tonic-gate ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP); 21147c478bd9Sstevel@tonic-gate 21157c478bd9Sstevel@tonic-gate /* 21167c478bd9Sstevel@tonic-gate * I see no need to lock the real seg, 21177c478bd9Sstevel@tonic-gate * here, because all of our work will be on the underlying 21187c478bd9Sstevel@tonic-gate * dummy seg. 21197c478bd9Sstevel@tonic-gate * 21207c478bd9Sstevel@tonic-gate * sptseg_addr and npages now account for large pages. 21217c478bd9Sstevel@tonic-gate */ 21227c478bd9Sstevel@tonic-gate amp = sptd->spt_amp; 21237c478bd9Sstevel@tonic-gate ASSERT(amp != NULL); 21247c478bd9Sstevel@tonic-gate anon_index = seg_page(sptseg, sptseg_addr); 21257c478bd9Sstevel@tonic-gate 21267c478bd9Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 21277c478bd9Sstevel@tonic-gate for (i = 0; i < npages; i++) { 21287c478bd9Sstevel@tonic-gate anon_array_enter(amp, anon_index, &cookie); 21297c478bd9Sstevel@tonic-gate ap = anon_get_ptr(amp->ahp, anon_index++); 21307c478bd9Sstevel@tonic-gate ASSERT(ap != NULL); 21317c478bd9Sstevel@tonic-gate swap_xlate(ap, &vp, &offset); 21327c478bd9Sstevel@tonic-gate anon_array_exit(&cookie); 21337c478bd9Sstevel@tonic-gate pp = page_lookup(vp, offset, SE_SHARED); 21347c478bd9Sstevel@tonic-gate ASSERT(pp != NULL); 21357c478bd9Sstevel@tonic-gate ppa[i] = pp; 21367c478bd9Sstevel@tonic-gate } 21377c478bd9Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 21387c478bd9Sstevel@tonic-gate ASSERT(i == npages); 21397c478bd9Sstevel@tonic-gate 21407c478bd9Sstevel@tonic-gate /* 21417c478bd9Sstevel@tonic-gate * We are already holding the as->a_lock on the user's 21427c478bd9Sstevel@tonic-gate * real segment, but we need to hold the a_lock on the 21437c478bd9Sstevel@tonic-gate * underlying dummy as. This is mostly to satisfy the 21447c478bd9Sstevel@tonic-gate * underlying HAT layer. 21457c478bd9Sstevel@tonic-gate */ 21467c478bd9Sstevel@tonic-gate AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); 214707b65a64Saguzovsk a = sptseg_addr; 214807b65a64Saguzovsk pidx = 0; 21497c478bd9Sstevel@tonic-gate if (type == F_SOFTLOCK) { 21507c478bd9Sstevel@tonic-gate /* 21517c478bd9Sstevel@tonic-gate * Load up the translation keeping it 21527c478bd9Sstevel@tonic-gate * locked and don't unlock the page. 21537c478bd9Sstevel@tonic-gate */ 215407b65a64Saguzovsk for (; pidx < npages; a += pgsz, pidx += pgcnt) { 215507b65a64Saguzovsk sz = MIN(pgsz, ptob(npages - pidx)); 215607b65a64Saguzovsk hat_memload_array(sptseg->s_as->a_hat, a, 215707b65a64Saguzovsk sz, &ppa[pidx], sptd->spt_prot, 21587c478bd9Sstevel@tonic-gate HAT_LOAD_LOCK | HAT_LOAD_SHARE); 215907b65a64Saguzovsk } 21607c478bd9Sstevel@tonic-gate } else { 21617c478bd9Sstevel@tonic-gate if (hat == seg->s_as->a_hat) { 21627c478bd9Sstevel@tonic-gate 21637c478bd9Sstevel@tonic-gate /* 21647c478bd9Sstevel@tonic-gate * Migrate pages marked for migration. 21657c478bd9Sstevel@tonic-gate */ 21667c478bd9Sstevel@tonic-gate if (lgrp_optimizations()) 21677c478bd9Sstevel@tonic-gate page_migrate(seg, shm_addr, ppa, 21687c478bd9Sstevel@tonic-gate npages); 21697c478bd9Sstevel@tonic-gate 21707c478bd9Sstevel@tonic-gate /* CPU HAT */ 217107b65a64Saguzovsk for (; pidx < npages; 217207b65a64Saguzovsk a += pgsz, pidx += pgcnt) { 217307b65a64Saguzovsk sz = MIN(pgsz, ptob(npages - pidx)); 21747c478bd9Sstevel@tonic-gate hat_memload_array(sptseg->s_as->a_hat, 217507b65a64Saguzovsk a, sz, &ppa[pidx], 21767c478bd9Sstevel@tonic-gate sptd->spt_prot, HAT_LOAD_SHARE); 217707b65a64Saguzovsk } 21787c478bd9Sstevel@tonic-gate } else { 21797c478bd9Sstevel@tonic-gate /* XHAT. Pass real address */ 21807c478bd9Sstevel@tonic-gate hat_memload_array(hat, shm_addr, 21817c478bd9Sstevel@tonic-gate ptob(npages), ppa, sptd->spt_prot, 21827c478bd9Sstevel@tonic-gate HAT_LOAD_SHARE); 21837c478bd9Sstevel@tonic-gate } 21847c478bd9Sstevel@tonic-gate 21857c478bd9Sstevel@tonic-gate /* 21867c478bd9Sstevel@tonic-gate * And now drop the SE_SHARED lock(s). 21877c478bd9Sstevel@tonic-gate */ 21887c478bd9Sstevel@tonic-gate for (i = 0; i < npages; i++) 21897c478bd9Sstevel@tonic-gate page_unlock(ppa[i]); 21907c478bd9Sstevel@tonic-gate } 21917c478bd9Sstevel@tonic-gate AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); 21927c478bd9Sstevel@tonic-gate 21937c478bd9Sstevel@tonic-gate kmem_free(ppa, sizeof (page_t *) * npages); 21947c478bd9Sstevel@tonic-gate return (0); 21957c478bd9Sstevel@tonic-gate case F_SOFTUNLOCK: 21967c478bd9Sstevel@tonic-gate 21977c478bd9Sstevel@tonic-gate /* 21987c478bd9Sstevel@tonic-gate * This is a bit ugly, we pass in the real seg pointer, 21997c478bd9Sstevel@tonic-gate * but the sptseg_addr is the virtual address within the 22007c478bd9Sstevel@tonic-gate * dummy seg. 22017c478bd9Sstevel@tonic-gate */ 22027c478bd9Sstevel@tonic-gate segspt_softunlock(seg, sptseg_addr, ptob(npages), rw); 22037c478bd9Sstevel@tonic-gate return (0); 22047c478bd9Sstevel@tonic-gate 22057c478bd9Sstevel@tonic-gate case F_PROT: 22067c478bd9Sstevel@tonic-gate 22077c478bd9Sstevel@tonic-gate /* 22087c478bd9Sstevel@tonic-gate * This takes care of the unusual case where a user 22097c478bd9Sstevel@tonic-gate * allocates a stack in shared memory and a register 22107c478bd9Sstevel@tonic-gate * window overflow is written to that stack page before 22117c478bd9Sstevel@tonic-gate * it is otherwise modified. 22127c478bd9Sstevel@tonic-gate * 22137c478bd9Sstevel@tonic-gate * We can get away with this because ISM segments are 22147c478bd9Sstevel@tonic-gate * always rw. Other than this unusual case, there 22157c478bd9Sstevel@tonic-gate * should be no instances of protection violations. 22167c478bd9Sstevel@tonic-gate */ 22177c478bd9Sstevel@tonic-gate return (0); 22187c478bd9Sstevel@tonic-gate 22197c478bd9Sstevel@tonic-gate default: 22207c478bd9Sstevel@tonic-gate #ifdef DEBUG 22217c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "segspt_shmfault default type?"); 22227c478bd9Sstevel@tonic-gate #endif 22237c478bd9Sstevel@tonic-gate return (FC_NOMAP); 22247c478bd9Sstevel@tonic-gate } 22257c478bd9Sstevel@tonic-gate } 22267c478bd9Sstevel@tonic-gate 22277c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 22287c478bd9Sstevel@tonic-gate static faultcode_t 22297c478bd9Sstevel@tonic-gate segspt_shmfaulta(struct seg *seg, caddr_t addr) 22307c478bd9Sstevel@tonic-gate { 22317c478bd9Sstevel@tonic-gate return (0); 22327c478bd9Sstevel@tonic-gate } 22337c478bd9Sstevel@tonic-gate 22347c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 22357c478bd9Sstevel@tonic-gate static int 22367c478bd9Sstevel@tonic-gate segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta) 22377c478bd9Sstevel@tonic-gate { 22387c478bd9Sstevel@tonic-gate return (0); 22397c478bd9Sstevel@tonic-gate } 22407c478bd9Sstevel@tonic-gate 22417c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 22427c478bd9Sstevel@tonic-gate static size_t 22437c478bd9Sstevel@tonic-gate segspt_shmswapout(struct seg *seg) 22447c478bd9Sstevel@tonic-gate { 22457c478bd9Sstevel@tonic-gate return (0); 22467c478bd9Sstevel@tonic-gate } 22477c478bd9Sstevel@tonic-gate 22487c478bd9Sstevel@tonic-gate /* 22497c478bd9Sstevel@tonic-gate * duplicate the shared page tables 22507c478bd9Sstevel@tonic-gate */ 22517c478bd9Sstevel@tonic-gate int 22527c478bd9Sstevel@tonic-gate segspt_shmdup(struct seg *seg, struct seg *newseg) 22537c478bd9Sstevel@tonic-gate { 22547c478bd9Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 22557c478bd9Sstevel@tonic-gate struct anon_map *amp = shmd->shm_amp; 22567c478bd9Sstevel@tonic-gate struct shm_data *shmd_new; 22577c478bd9Sstevel@tonic-gate struct seg *spt_seg = shmd->shm_sptseg; 22587c478bd9Sstevel@tonic-gate struct spt_data *sptd = spt_seg->s_data; 22591b42782eSmec int error = 0; 22607c478bd9Sstevel@tonic-gate 22617c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 22627c478bd9Sstevel@tonic-gate 22637c478bd9Sstevel@tonic-gate shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP); 22647c478bd9Sstevel@tonic-gate newseg->s_data = (void *)shmd_new; 22657c478bd9Sstevel@tonic-gate shmd_new->shm_sptas = shmd->shm_sptas; 22667c478bd9Sstevel@tonic-gate shmd_new->shm_amp = amp; 22677c478bd9Sstevel@tonic-gate shmd_new->shm_sptseg = shmd->shm_sptseg; 22687c478bd9Sstevel@tonic-gate newseg->s_ops = &segspt_shmops; 22697c478bd9Sstevel@tonic-gate newseg->s_szc = seg->s_szc; 22707c478bd9Sstevel@tonic-gate ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc); 22717c478bd9Sstevel@tonic-gate 22727c478bd9Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 22737c478bd9Sstevel@tonic-gate amp->refcnt++; 22747c478bd9Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 22757c478bd9Sstevel@tonic-gate 22767c478bd9Sstevel@tonic-gate if (sptd->spt_flags & SHM_PAGEABLE) { 22777c478bd9Sstevel@tonic-gate shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP); 22787c478bd9Sstevel@tonic-gate shmd_new->shm_lckpgs = 0; 22791b42782eSmec if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) { 22801b42782eSmec if ((error = hat_share(newseg->s_as->a_hat, 22811b42782eSmec newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR, 22821b42782eSmec seg->s_size, seg->s_szc)) != 0) { 22831b42782eSmec kmem_free(shmd_new->shm_vpage, 22841b42782eSmec btopr(amp->size)); 22857c478bd9Sstevel@tonic-gate } 22861b42782eSmec } 22871b42782eSmec return (error); 22881b42782eSmec } else { 22897c478bd9Sstevel@tonic-gate return (hat_share(newseg->s_as->a_hat, newseg->s_base, 22901b42782eSmec shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size, 22911b42782eSmec seg->s_szc)); 22921b42782eSmec 22931b42782eSmec } 22947c478bd9Sstevel@tonic-gate } 22957c478bd9Sstevel@tonic-gate 22967c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 22977c478bd9Sstevel@tonic-gate int 22987c478bd9Sstevel@tonic-gate segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 22997c478bd9Sstevel@tonic-gate { 23007c478bd9Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 23017c478bd9Sstevel@tonic-gate struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 23027c478bd9Sstevel@tonic-gate 23037c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 23047c478bd9Sstevel@tonic-gate 23057c478bd9Sstevel@tonic-gate /* 23067c478bd9Sstevel@tonic-gate * ISM segment is always rw. 23077c478bd9Sstevel@tonic-gate */ 23087c478bd9Sstevel@tonic-gate return (((sptd->spt_prot & prot) != prot) ? EACCES : 0); 23097c478bd9Sstevel@tonic-gate } 23107c478bd9Sstevel@tonic-gate 23117c478bd9Sstevel@tonic-gate /* 23127c478bd9Sstevel@tonic-gate * Return an array of locked large pages, for empty slots allocate 23137c478bd9Sstevel@tonic-gate * private zero-filled anon pages. 23147c478bd9Sstevel@tonic-gate */ 23157c478bd9Sstevel@tonic-gate static int 23167c478bd9Sstevel@tonic-gate spt_anon_getpages( 23177c478bd9Sstevel@tonic-gate struct seg *sptseg, 23187c478bd9Sstevel@tonic-gate caddr_t sptaddr, 23197c478bd9Sstevel@tonic-gate size_t len, 23207c478bd9Sstevel@tonic-gate page_t *ppa[]) 23217c478bd9Sstevel@tonic-gate { 23227c478bd9Sstevel@tonic-gate struct spt_data *sptd = sptseg->s_data; 23237c478bd9Sstevel@tonic-gate struct anon_map *amp = sptd->spt_amp; 23247c478bd9Sstevel@tonic-gate enum seg_rw rw = sptd->spt_prot; 23257c478bd9Sstevel@tonic-gate uint_t szc = sptseg->s_szc; 23267c478bd9Sstevel@tonic-gate size_t pg_sz, share_sz = page_get_pagesize(szc); 23277c478bd9Sstevel@tonic-gate pgcnt_t lp_npgs; 23287c478bd9Sstevel@tonic-gate caddr_t lp_addr, e_sptaddr; 23297c478bd9Sstevel@tonic-gate uint_t vpprot, ppa_szc = 0; 23307c478bd9Sstevel@tonic-gate struct vpage *vpage = NULL; 23317c478bd9Sstevel@tonic-gate ulong_t j, ppa_idx; 23327c478bd9Sstevel@tonic-gate int err, ierr = 0; 23337c478bd9Sstevel@tonic-gate pgcnt_t an_idx; 23347c478bd9Sstevel@tonic-gate anon_sync_obj_t cookie; 23357c478bd9Sstevel@tonic-gate 23367c478bd9Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz)); 23377c478bd9Sstevel@tonic-gate ASSERT(len != 0); 23387c478bd9Sstevel@tonic-gate 23397c478bd9Sstevel@tonic-gate pg_sz = share_sz; 23407c478bd9Sstevel@tonic-gate lp_npgs = btop(pg_sz); 23417c478bd9Sstevel@tonic-gate lp_addr = sptaddr; 23427c478bd9Sstevel@tonic-gate e_sptaddr = sptaddr + len; 23437c478bd9Sstevel@tonic-gate an_idx = seg_page(sptseg, sptaddr); 23447c478bd9Sstevel@tonic-gate ppa_idx = 0; 23457c478bd9Sstevel@tonic-gate 23467c478bd9Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 23477c478bd9Sstevel@tonic-gate /*CONSTCOND*/ 23487c478bd9Sstevel@tonic-gate while (1) { 23497c478bd9Sstevel@tonic-gate for (; lp_addr < e_sptaddr; 23507c478bd9Sstevel@tonic-gate an_idx += lp_npgs, lp_addr += pg_sz, 23517c478bd9Sstevel@tonic-gate ppa_idx += lp_npgs) { 23527c478bd9Sstevel@tonic-gate 23537c478bd9Sstevel@tonic-gate anon_array_enter(amp, an_idx, &cookie); 23547c478bd9Sstevel@tonic-gate ppa_szc = (uint_t)-1; 23557c478bd9Sstevel@tonic-gate ierr = anon_map_getpages(amp, an_idx, szc, sptseg, 23567c478bd9Sstevel@tonic-gate lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx], 23577c478bd9Sstevel@tonic-gate &ppa_szc, vpage, rw, 0, segvn_anypgsz, kcred); 23587c478bd9Sstevel@tonic-gate anon_array_exit(&cookie); 23597c478bd9Sstevel@tonic-gate 23607c478bd9Sstevel@tonic-gate if (ierr != 0) { 23617c478bd9Sstevel@tonic-gate if (ierr > 0) { 23627c478bd9Sstevel@tonic-gate err = FC_MAKE_ERR(ierr); 23637c478bd9Sstevel@tonic-gate goto lpgs_err; 23647c478bd9Sstevel@tonic-gate } 23657c478bd9Sstevel@tonic-gate break; 23667c478bd9Sstevel@tonic-gate } 23677c478bd9Sstevel@tonic-gate } 23687c478bd9Sstevel@tonic-gate if (lp_addr == e_sptaddr) { 23697c478bd9Sstevel@tonic-gate break; 23707c478bd9Sstevel@tonic-gate } 23717c478bd9Sstevel@tonic-gate ASSERT(lp_addr < e_sptaddr); 23727c478bd9Sstevel@tonic-gate 23737c478bd9Sstevel@tonic-gate /* 23747c478bd9Sstevel@tonic-gate * ierr == -1 means we failed to allocate a large page. 23757c478bd9Sstevel@tonic-gate * so do a size down operation. 23767c478bd9Sstevel@tonic-gate * 23777c478bd9Sstevel@tonic-gate * ierr == -2 means some other process that privately shares 23787c478bd9Sstevel@tonic-gate * pages with this process has allocated a larger page and we 23797c478bd9Sstevel@tonic-gate * need to retry with larger pages. So do a size up 23807c478bd9Sstevel@tonic-gate * operation. This relies on the fact that large pages are 23817c478bd9Sstevel@tonic-gate * never partially shared i.e. if we share any constituent 23827c478bd9Sstevel@tonic-gate * page of a large page with another process we must share the 23837c478bd9Sstevel@tonic-gate * entire large page. Note this cannot happen for SOFTLOCK 23847c478bd9Sstevel@tonic-gate * case, unless current address (lpaddr) is at the beginning 23857c478bd9Sstevel@tonic-gate * of the next page size boundary because the other process 23867c478bd9Sstevel@tonic-gate * couldn't have relocated locked pages. 23877c478bd9Sstevel@tonic-gate */ 23887c478bd9Sstevel@tonic-gate ASSERT(ierr == -1 || ierr == -2); 23897c478bd9Sstevel@tonic-gate if (segvn_anypgsz) { 23907c478bd9Sstevel@tonic-gate ASSERT(ierr == -2 || szc != 0); 23917c478bd9Sstevel@tonic-gate ASSERT(ierr == -1 || szc < sptseg->s_szc); 23927c478bd9Sstevel@tonic-gate szc = (ierr == -1) ? szc - 1 : szc + 1; 23937c478bd9Sstevel@tonic-gate } else { 23947c478bd9Sstevel@tonic-gate /* 23957c478bd9Sstevel@tonic-gate * For faults and segvn_anypgsz == 0 23967c478bd9Sstevel@tonic-gate * we need to be careful not to loop forever 23977c478bd9Sstevel@tonic-gate * if existing page is found with szc other 23987c478bd9Sstevel@tonic-gate * than 0 or seg->s_szc. This could be due 23997c478bd9Sstevel@tonic-gate * to page relocations on behalf of DR or 24007c478bd9Sstevel@tonic-gate * more likely large page creation. For this 24017c478bd9Sstevel@tonic-gate * case simply re-size to existing page's szc 24027c478bd9Sstevel@tonic-gate * if returned by anon_map_getpages(). 24037c478bd9Sstevel@tonic-gate */ 24047c478bd9Sstevel@tonic-gate if (ppa_szc == (uint_t)-1) { 24057c478bd9Sstevel@tonic-gate szc = (ierr == -1) ? 0 : sptseg->s_szc; 24067c478bd9Sstevel@tonic-gate } else { 24077c478bd9Sstevel@tonic-gate ASSERT(ppa_szc <= sptseg->s_szc); 24087c478bd9Sstevel@tonic-gate ASSERT(ierr == -2 || ppa_szc < szc); 24097c478bd9Sstevel@tonic-gate ASSERT(ierr == -1 || ppa_szc > szc); 24107c478bd9Sstevel@tonic-gate szc = ppa_szc; 24117c478bd9Sstevel@tonic-gate } 24127c478bd9Sstevel@tonic-gate } 24137c478bd9Sstevel@tonic-gate pg_sz = page_get_pagesize(szc); 24147c478bd9Sstevel@tonic-gate lp_npgs = btop(pg_sz); 24157c478bd9Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(lp_addr, pg_sz)); 24167c478bd9Sstevel@tonic-gate } 24177c478bd9Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 24187c478bd9Sstevel@tonic-gate return (0); 24197c478bd9Sstevel@tonic-gate 24207c478bd9Sstevel@tonic-gate lpgs_err: 24217c478bd9Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 24227c478bd9Sstevel@tonic-gate for (j = 0; j < ppa_idx; j++) 24237c478bd9Sstevel@tonic-gate page_unlock(ppa[j]); 24247c478bd9Sstevel@tonic-gate return (err); 24257c478bd9Sstevel@tonic-gate } 24267c478bd9Sstevel@tonic-gate 2427c6939658Ssl108498 /* 2428c6939658Ssl108498 * count the number of bytes in a set of spt pages that are currently not 2429c6939658Ssl108498 * locked 2430c6939658Ssl108498 */ 2431c6939658Ssl108498 static rctl_qty_t 2432c6939658Ssl108498 spt_unlockedbytes(pgcnt_t npages, page_t **ppa) 2433c6939658Ssl108498 { 2434c6939658Ssl108498 ulong_t i; 2435c6939658Ssl108498 rctl_qty_t unlocked = 0; 2436c6939658Ssl108498 2437c6939658Ssl108498 for (i = 0; i < npages; i++) { 2438c6939658Ssl108498 if (ppa[i]->p_lckcnt == 0) 2439c6939658Ssl108498 unlocked += PAGESIZE; 2440c6939658Ssl108498 } 2441c6939658Ssl108498 return (unlocked); 2442c6939658Ssl108498 } 2443c6939658Ssl108498 24447c478bd9Sstevel@tonic-gate int 24457c478bd9Sstevel@tonic-gate spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages, 2446c6939658Ssl108498 page_t **ppa, ulong_t *lockmap, size_t pos, 2447c6939658Ssl108498 rctl_qty_t *locked) 24487c478bd9Sstevel@tonic-gate { 24497c478bd9Sstevel@tonic-gate struct shm_data *shmd = seg->s_data; 24507c478bd9Sstevel@tonic-gate struct spt_data *sptd = shmd->shm_sptseg->s_data; 24517c478bd9Sstevel@tonic-gate ulong_t i; 24527c478bd9Sstevel@tonic-gate int kernel; 24537c478bd9Sstevel@tonic-gate 2454c6939658Ssl108498 /* return the number of bytes actually locked */ 2455c6939658Ssl108498 *locked = 0; 24567c478bd9Sstevel@tonic-gate for (i = 0; i < npages; anon_index++, pos++, i++) { 24577c478bd9Sstevel@tonic-gate if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) { 24587c478bd9Sstevel@tonic-gate if (sptd->spt_ppa_lckcnt[anon_index] < 24597c478bd9Sstevel@tonic-gate (ushort_t)DISM_LOCK_MAX) { 24607c478bd9Sstevel@tonic-gate if (++sptd->spt_ppa_lckcnt[anon_index] == 24617c478bd9Sstevel@tonic-gate (ushort_t)DISM_LOCK_MAX) { 24627c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, 24637c478bd9Sstevel@tonic-gate "DISM page lock limit " 24647c478bd9Sstevel@tonic-gate "reached on DISM offset 0x%lx\n", 24657c478bd9Sstevel@tonic-gate anon_index << PAGESHIFT); 24667c478bd9Sstevel@tonic-gate } 24677c478bd9Sstevel@tonic-gate kernel = (sptd->spt_ppa && 24687c478bd9Sstevel@tonic-gate sptd->spt_ppa[anon_index]) ? 1 : 0; 24697c478bd9Sstevel@tonic-gate if (!page_pp_lock(ppa[i], 0, kernel)) { 24707c478bd9Sstevel@tonic-gate sptd->spt_ppa_lckcnt[anon_index]--; 24717c478bd9Sstevel@tonic-gate return (EAGAIN); 24727c478bd9Sstevel@tonic-gate } 2473c6939658Ssl108498 /* if this is a newly locked page, count it */ 2474c6939658Ssl108498 if (ppa[i]->p_lckcnt == 1) { 2475c6939658Ssl108498 *locked += PAGESIZE; 2476c6939658Ssl108498 } 24777c478bd9Sstevel@tonic-gate shmd->shm_lckpgs++; 24787c478bd9Sstevel@tonic-gate shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED; 24797c478bd9Sstevel@tonic-gate if (lockmap != NULL) 24807c478bd9Sstevel@tonic-gate BT_SET(lockmap, pos); 24817c478bd9Sstevel@tonic-gate } 24827c478bd9Sstevel@tonic-gate } 24837c478bd9Sstevel@tonic-gate } 24847c478bd9Sstevel@tonic-gate return (0); 24857c478bd9Sstevel@tonic-gate } 24867c478bd9Sstevel@tonic-gate 24877c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 24887c478bd9Sstevel@tonic-gate static int 24897c478bd9Sstevel@tonic-gate segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, 24907c478bd9Sstevel@tonic-gate int attr, int op, ulong_t *lockmap, size_t pos) 24917c478bd9Sstevel@tonic-gate { 24927c478bd9Sstevel@tonic-gate struct shm_data *shmd = seg->s_data; 24937c478bd9Sstevel@tonic-gate struct seg *sptseg = shmd->shm_sptseg; 24947c478bd9Sstevel@tonic-gate struct spt_data *sptd = sptseg->s_data; 2495c6939658Ssl108498 struct kshmid *sp = sptd->spt_amp->a_sp; 24967c478bd9Sstevel@tonic-gate pgcnt_t npages, a_npages; 24977c478bd9Sstevel@tonic-gate page_t **ppa; 24987c478bd9Sstevel@tonic-gate pgcnt_t an_idx, a_an_idx, ppa_idx; 24997c478bd9Sstevel@tonic-gate caddr_t spt_addr, a_addr; /* spt and aligned address */ 25007c478bd9Sstevel@tonic-gate size_t a_len; /* aligned len */ 25017c478bd9Sstevel@tonic-gate size_t share_sz; 25027c478bd9Sstevel@tonic-gate ulong_t i; 25037c478bd9Sstevel@tonic-gate int sts = 0; 2504c6939658Ssl108498 rctl_qty_t unlocked = 0; 2505c6939658Ssl108498 rctl_qty_t locked = 0; 2506c6939658Ssl108498 struct proc *p = curproc; 2507c6939658Ssl108498 kproject_t *proj; 25087c478bd9Sstevel@tonic-gate 25097c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2510c6939658Ssl108498 ASSERT(sp != NULL); 25117c478bd9Sstevel@tonic-gate 25127c478bd9Sstevel@tonic-gate if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { 25137c478bd9Sstevel@tonic-gate return (0); 25147c478bd9Sstevel@tonic-gate } 25157c478bd9Sstevel@tonic-gate 25167c478bd9Sstevel@tonic-gate addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 25177c478bd9Sstevel@tonic-gate an_idx = seg_page(seg, addr); 25187c478bd9Sstevel@tonic-gate npages = btopr(len); 25197c478bd9Sstevel@tonic-gate 25207c478bd9Sstevel@tonic-gate if (an_idx + npages > btopr(shmd->shm_amp->size)) { 25217c478bd9Sstevel@tonic-gate return (ENOMEM); 25227c478bd9Sstevel@tonic-gate } 25237c478bd9Sstevel@tonic-gate 2524c6939658Ssl108498 /* 2525c6939658Ssl108498 * A shm's project never changes, so no lock needed. 2526c6939658Ssl108498 * The shm has a hold on the project, so it will not go away. 2527c6939658Ssl108498 * Since we have a mapping to shm within this zone, we know 2528c6939658Ssl108498 * that the zone will not go away. 2529c6939658Ssl108498 */ 2530c6939658Ssl108498 proj = sp->shm_perm.ipc_proj; 2531c6939658Ssl108498 25327c478bd9Sstevel@tonic-gate if (op == MC_LOCK) { 2533c6939658Ssl108498 25347c478bd9Sstevel@tonic-gate /* 25357c478bd9Sstevel@tonic-gate * Need to align addr and size request if they are not 25367c478bd9Sstevel@tonic-gate * aligned so we can always allocate large page(s) however 25377c478bd9Sstevel@tonic-gate * we only lock what was requested in initial request. 25387c478bd9Sstevel@tonic-gate */ 25397c478bd9Sstevel@tonic-gate share_sz = page_get_pagesize(sptseg->s_szc); 25407c478bd9Sstevel@tonic-gate a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz); 25417c478bd9Sstevel@tonic-gate a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)), 25427c478bd9Sstevel@tonic-gate share_sz); 25437c478bd9Sstevel@tonic-gate a_npages = btop(a_len); 25447c478bd9Sstevel@tonic-gate a_an_idx = seg_page(seg, a_addr); 25457c478bd9Sstevel@tonic-gate spt_addr = sptseg->s_base + ptob(a_an_idx); 25467c478bd9Sstevel@tonic-gate ppa_idx = an_idx - a_an_idx; 25477c478bd9Sstevel@tonic-gate 25487c478bd9Sstevel@tonic-gate if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages), 25497c478bd9Sstevel@tonic-gate KM_NOSLEEP)) == NULL) { 25507c478bd9Sstevel@tonic-gate return (ENOMEM); 25517c478bd9Sstevel@tonic-gate } 25527c478bd9Sstevel@tonic-gate 25537c478bd9Sstevel@tonic-gate /* 25547c478bd9Sstevel@tonic-gate * Don't cache any new pages for IO and 25557c478bd9Sstevel@tonic-gate * flush any cached pages. 25567c478bd9Sstevel@tonic-gate */ 25577c478bd9Sstevel@tonic-gate mutex_enter(&sptd->spt_lock); 25587c478bd9Sstevel@tonic-gate if (sptd->spt_ppa != NULL) 25597c478bd9Sstevel@tonic-gate sptd->spt_flags |= DISM_PPA_CHANGED; 25607c478bd9Sstevel@tonic-gate 25617c478bd9Sstevel@tonic-gate sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa); 25627c478bd9Sstevel@tonic-gate if (sts != 0) { 25637c478bd9Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 25647c478bd9Sstevel@tonic-gate kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 25657c478bd9Sstevel@tonic-gate return (sts); 25667c478bd9Sstevel@tonic-gate } 25677c478bd9Sstevel@tonic-gate 2568c6939658Ssl108498 mutex_enter(&sp->shm_mlock); 2569c6939658Ssl108498 /* enforce locked memory rctl */ 2570c6939658Ssl108498 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]); 2571c6939658Ssl108498 2572c6939658Ssl108498 mutex_enter(&p->p_lock); 2573c6939658Ssl108498 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) { 2574c6939658Ssl108498 mutex_exit(&p->p_lock); 2575c6939658Ssl108498 sts = EAGAIN; 2576c6939658Ssl108498 } else { 2577c6939658Ssl108498 mutex_exit(&p->p_lock); 25787c478bd9Sstevel@tonic-gate sts = spt_lockpages(seg, an_idx, npages, 2579c6939658Ssl108498 &ppa[ppa_idx], lockmap, pos, &locked); 2580c6939658Ssl108498 25817c478bd9Sstevel@tonic-gate /* 2582c6939658Ssl108498 * correct locked count if not all pages could be 2583c6939658Ssl108498 * locked 25847c478bd9Sstevel@tonic-gate */ 2585c6939658Ssl108498 if ((unlocked - locked) > 0) { 2586c6939658Ssl108498 rctl_decr_locked_mem(NULL, proj, 2587c6939658Ssl108498 (unlocked - locked), 0); 2588c6939658Ssl108498 } 2589c6939658Ssl108498 } 2590c6939658Ssl108498 /* 2591c6939658Ssl108498 * unlock pages 2592c6939658Ssl108498 */ 2593c6939658Ssl108498 for (i = 0; i < a_npages; i++) 25947c478bd9Sstevel@tonic-gate page_unlock(ppa[i]); 25957c478bd9Sstevel@tonic-gate if (sptd->spt_ppa != NULL) 25967c478bd9Sstevel@tonic-gate sptd->spt_flags |= DISM_PPA_CHANGED; 2597c6939658Ssl108498 mutex_exit(&sp->shm_mlock); 25987c478bd9Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 25997c478bd9Sstevel@tonic-gate 26007c478bd9Sstevel@tonic-gate kmem_free(ppa, ((sizeof (page_t *)) * a_npages)); 26017c478bd9Sstevel@tonic-gate 26027c478bd9Sstevel@tonic-gate } else if (op == MC_UNLOCK) { /* unlock */ 26037c478bd9Sstevel@tonic-gate struct anon_map *amp; 26047c478bd9Sstevel@tonic-gate struct anon *ap; 26057c478bd9Sstevel@tonic-gate struct vnode *vp; 26067c478bd9Sstevel@tonic-gate u_offset_t off; 26077c478bd9Sstevel@tonic-gate struct page *pp; 26087c478bd9Sstevel@tonic-gate int kernel; 26097c478bd9Sstevel@tonic-gate anon_sync_obj_t cookie; 2610c6939658Ssl108498 rctl_qty_t unlocked = 0; 26117c478bd9Sstevel@tonic-gate 26127c478bd9Sstevel@tonic-gate amp = sptd->spt_amp; 26137c478bd9Sstevel@tonic-gate mutex_enter(&sptd->spt_lock); 26147c478bd9Sstevel@tonic-gate if (shmd->shm_lckpgs == 0) { 26157c478bd9Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 26167c478bd9Sstevel@tonic-gate return (0); 26177c478bd9Sstevel@tonic-gate } 26187c478bd9Sstevel@tonic-gate /* 26197c478bd9Sstevel@tonic-gate * Don't cache new IO pages. 26207c478bd9Sstevel@tonic-gate */ 26217c478bd9Sstevel@tonic-gate if (sptd->spt_ppa != NULL) 26227c478bd9Sstevel@tonic-gate sptd->spt_flags |= DISM_PPA_CHANGED; 26237c478bd9Sstevel@tonic-gate 2624c6939658Ssl108498 mutex_enter(&sp->shm_mlock); 26257c478bd9Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 26267c478bd9Sstevel@tonic-gate for (i = 0; i < npages; i++, an_idx++) { 26277c478bd9Sstevel@tonic-gate if (shmd->shm_vpage[an_idx] & DISM_PG_LOCKED) { 26287c478bd9Sstevel@tonic-gate anon_array_enter(amp, an_idx, &cookie); 26297c478bd9Sstevel@tonic-gate ap = anon_get_ptr(amp->ahp, an_idx); 26307c478bd9Sstevel@tonic-gate ASSERT(ap); 26317c478bd9Sstevel@tonic-gate 26327c478bd9Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 26337c478bd9Sstevel@tonic-gate anon_array_exit(&cookie); 26347c478bd9Sstevel@tonic-gate pp = page_lookup(vp, off, SE_SHARED); 26357c478bd9Sstevel@tonic-gate ASSERT(pp); 26367c478bd9Sstevel@tonic-gate /* 26377c478bd9Sstevel@tonic-gate * the availrmem is decremented only for 26387c478bd9Sstevel@tonic-gate * pages which are not in seg pcache, 26397c478bd9Sstevel@tonic-gate * for pages in seg pcache availrmem was 26407c478bd9Sstevel@tonic-gate * decremented in _dismpagelock() (if 26417c478bd9Sstevel@tonic-gate * they were not locked here) 26427c478bd9Sstevel@tonic-gate */ 26437c478bd9Sstevel@tonic-gate kernel = (sptd->spt_ppa && 26447c478bd9Sstevel@tonic-gate sptd->spt_ppa[an_idx]) ? 1 : 0; 2645c6939658Ssl108498 ASSERT(pp->p_lckcnt > 0); 26467c478bd9Sstevel@tonic-gate page_pp_unlock(pp, 0, kernel); 2647c6939658Ssl108498 if (pp->p_lckcnt == 0) 2648c6939658Ssl108498 unlocked += PAGESIZE; 26497c478bd9Sstevel@tonic-gate page_unlock(pp); 26507c478bd9Sstevel@tonic-gate shmd->shm_vpage[an_idx] &= ~DISM_PG_LOCKED; 26517c478bd9Sstevel@tonic-gate sptd->spt_ppa_lckcnt[an_idx]--; 26527c478bd9Sstevel@tonic-gate shmd->shm_lckpgs--; 26537c478bd9Sstevel@tonic-gate } 26547c478bd9Sstevel@tonic-gate } 26557c478bd9Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 26567c478bd9Sstevel@tonic-gate if (sptd->spt_ppa != NULL) 26577c478bd9Sstevel@tonic-gate sptd->spt_flags |= DISM_PPA_CHANGED; 26587c478bd9Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 2659c6939658Ssl108498 2660c6939658Ssl108498 rctl_decr_locked_mem(NULL, proj, unlocked, 0); 2661c6939658Ssl108498 mutex_exit(&sp->shm_mlock); 26627c478bd9Sstevel@tonic-gate } 26637c478bd9Sstevel@tonic-gate return (sts); 26647c478bd9Sstevel@tonic-gate } 26657c478bd9Sstevel@tonic-gate 26667c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 26677c478bd9Sstevel@tonic-gate int 26687c478bd9Sstevel@tonic-gate segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 26697c478bd9Sstevel@tonic-gate { 26707c478bd9Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 26717c478bd9Sstevel@tonic-gate struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 26727c478bd9Sstevel@tonic-gate spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1; 26737c478bd9Sstevel@tonic-gate 26747c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 26757c478bd9Sstevel@tonic-gate 26767c478bd9Sstevel@tonic-gate /* 26777c478bd9Sstevel@tonic-gate * ISM segment is always rw. 26787c478bd9Sstevel@tonic-gate */ 26797c478bd9Sstevel@tonic-gate while (--pgno >= 0) 26807c478bd9Sstevel@tonic-gate *protv++ = sptd->spt_prot; 26817c478bd9Sstevel@tonic-gate return (0); 26827c478bd9Sstevel@tonic-gate } 26837c478bd9Sstevel@tonic-gate 26847c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 26857c478bd9Sstevel@tonic-gate u_offset_t 26867c478bd9Sstevel@tonic-gate segspt_shmgetoffset(struct seg *seg, caddr_t addr) 26877c478bd9Sstevel@tonic-gate { 26887c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 26897c478bd9Sstevel@tonic-gate 26907c478bd9Sstevel@tonic-gate /* Offset does not matter in ISM memory */ 26917c478bd9Sstevel@tonic-gate 26927c478bd9Sstevel@tonic-gate return ((u_offset_t)0); 26937c478bd9Sstevel@tonic-gate } 26947c478bd9Sstevel@tonic-gate 26957c478bd9Sstevel@tonic-gate /* ARGSUSED */ 26967c478bd9Sstevel@tonic-gate int 26977c478bd9Sstevel@tonic-gate segspt_shmgettype(struct seg *seg, caddr_t addr) 26987c478bd9Sstevel@tonic-gate { 26997c478bd9Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 27007c478bd9Sstevel@tonic-gate struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 27017c478bd9Sstevel@tonic-gate 27027c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 27037c478bd9Sstevel@tonic-gate 27047c478bd9Sstevel@tonic-gate /* 27057c478bd9Sstevel@tonic-gate * The shared memory mapping is always MAP_SHARED, SWAP is only 27067c478bd9Sstevel@tonic-gate * reserved for DISM 27077c478bd9Sstevel@tonic-gate */ 27087c478bd9Sstevel@tonic-gate return (MAP_SHARED | 27097c478bd9Sstevel@tonic-gate ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE)); 27107c478bd9Sstevel@tonic-gate } 27117c478bd9Sstevel@tonic-gate 27127c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 27137c478bd9Sstevel@tonic-gate int 27147c478bd9Sstevel@tonic-gate segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 27157c478bd9Sstevel@tonic-gate { 27167c478bd9Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 27177c478bd9Sstevel@tonic-gate struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 27187c478bd9Sstevel@tonic-gate 27197c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 27207c478bd9Sstevel@tonic-gate 27217c478bd9Sstevel@tonic-gate *vpp = sptd->spt_vp; 27227c478bd9Sstevel@tonic-gate return (0); 27237c478bd9Sstevel@tonic-gate } 27247c478bd9Sstevel@tonic-gate 27257c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 27267c478bd9Sstevel@tonic-gate static int 27277c478bd9Sstevel@tonic-gate segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 27287c478bd9Sstevel@tonic-gate { 27297c478bd9Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 27307c478bd9Sstevel@tonic-gate struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 27317c478bd9Sstevel@tonic-gate struct anon_map *amp; 27327c478bd9Sstevel@tonic-gate pgcnt_t pg_idx; 27337c478bd9Sstevel@tonic-gate 27347c478bd9Sstevel@tonic-gate ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 27357c478bd9Sstevel@tonic-gate 27367c478bd9Sstevel@tonic-gate if (behav == MADV_FREE) { 27377c478bd9Sstevel@tonic-gate if ((sptd->spt_flags & SHM_PAGEABLE) == 0) 27387c478bd9Sstevel@tonic-gate return (0); 27397c478bd9Sstevel@tonic-gate 27407c478bd9Sstevel@tonic-gate amp = sptd->spt_amp; 27417c478bd9Sstevel@tonic-gate pg_idx = seg_page(seg, addr); 27427c478bd9Sstevel@tonic-gate 27437c478bd9Sstevel@tonic-gate mutex_enter(&sptd->spt_lock); 27447c478bd9Sstevel@tonic-gate if (sptd->spt_ppa != NULL) 27457c478bd9Sstevel@tonic-gate sptd->spt_flags |= DISM_PPA_CHANGED; 27467c478bd9Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 27477c478bd9Sstevel@tonic-gate 27487c478bd9Sstevel@tonic-gate /* 27497c478bd9Sstevel@tonic-gate * Purge all DISM cached pages 27507c478bd9Sstevel@tonic-gate */ 27517c478bd9Sstevel@tonic-gate seg_ppurge_seg(segspt_reclaim); 27527c478bd9Sstevel@tonic-gate 27537c478bd9Sstevel@tonic-gate mutex_enter(&sptd->spt_lock); 27547c478bd9Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 27557c478bd9Sstevel@tonic-gate anon_disclaim(amp, pg_idx, len, ANON_PGLOOKUP_BLK); 27567c478bd9Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 27577c478bd9Sstevel@tonic-gate mutex_exit(&sptd->spt_lock); 27587c478bd9Sstevel@tonic-gate } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP || 27597c478bd9Sstevel@tonic-gate behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) { 27607c478bd9Sstevel@tonic-gate int already_set; 27617c478bd9Sstevel@tonic-gate ulong_t anon_index; 27627c478bd9Sstevel@tonic-gate lgrp_mem_policy_t policy; 27637c478bd9Sstevel@tonic-gate caddr_t shm_addr; 27647c478bd9Sstevel@tonic-gate size_t share_size; 27657c478bd9Sstevel@tonic-gate size_t size; 27667c478bd9Sstevel@tonic-gate struct seg *sptseg = shmd->shm_sptseg; 27677c478bd9Sstevel@tonic-gate caddr_t sptseg_addr; 27687c478bd9Sstevel@tonic-gate 27697c478bd9Sstevel@tonic-gate /* 27707c478bd9Sstevel@tonic-gate * Align address and length to page size of underlying segment 27717c478bd9Sstevel@tonic-gate */ 27727c478bd9Sstevel@tonic-gate share_size = page_get_pagesize(shmd->shm_sptseg->s_szc); 27737c478bd9Sstevel@tonic-gate shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size); 27747c478bd9Sstevel@tonic-gate size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), 27757c478bd9Sstevel@tonic-gate share_size); 27767c478bd9Sstevel@tonic-gate 27777c478bd9Sstevel@tonic-gate amp = shmd->shm_amp; 27787c478bd9Sstevel@tonic-gate anon_index = seg_page(seg, shm_addr); 27797c478bd9Sstevel@tonic-gate 27807c478bd9Sstevel@tonic-gate /* 27817c478bd9Sstevel@tonic-gate * And now we may have to adjust size downward if we have 27827c478bd9Sstevel@tonic-gate * exceeded the realsize of the segment or initial anon 27837c478bd9Sstevel@tonic-gate * allocations. 27847c478bd9Sstevel@tonic-gate */ 27857c478bd9Sstevel@tonic-gate sptseg_addr = sptseg->s_base + ptob(anon_index); 27867c478bd9Sstevel@tonic-gate if ((sptseg_addr + size) > 27877c478bd9Sstevel@tonic-gate (sptseg->s_base + sptd->spt_realsize)) 27887c478bd9Sstevel@tonic-gate size = (sptseg->s_base + sptd->spt_realsize) - 27897c478bd9Sstevel@tonic-gate sptseg_addr; 27907c478bd9Sstevel@tonic-gate 27917c478bd9Sstevel@tonic-gate /* 27927c478bd9Sstevel@tonic-gate * Set memory allocation policy for this segment 27937c478bd9Sstevel@tonic-gate */ 27947c478bd9Sstevel@tonic-gate policy = lgrp_madv_to_policy(behav, len, MAP_SHARED); 27957c478bd9Sstevel@tonic-gate already_set = lgrp_shm_policy_set(policy, amp, anon_index, 27967c478bd9Sstevel@tonic-gate NULL, 0, len); 27977c478bd9Sstevel@tonic-gate 27987c478bd9Sstevel@tonic-gate /* 27997c478bd9Sstevel@tonic-gate * If random memory allocation policy set already, 28007c478bd9Sstevel@tonic-gate * don't bother reapplying it. 28017c478bd9Sstevel@tonic-gate */ 28027c478bd9Sstevel@tonic-gate if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 28037c478bd9Sstevel@tonic-gate return (0); 28047c478bd9Sstevel@tonic-gate 28057c478bd9Sstevel@tonic-gate /* 28067c478bd9Sstevel@tonic-gate * Mark any existing pages in the given range for 28077c478bd9Sstevel@tonic-gate * migration, flushing the I/O page cache, and using 28087c478bd9Sstevel@tonic-gate * underlying segment to calculate anon index and get 28097c478bd9Sstevel@tonic-gate * anonmap and vnode pointer from 28107c478bd9Sstevel@tonic-gate */ 28117c478bd9Sstevel@tonic-gate if (shmd->shm_softlockcnt > 0) 28127c478bd9Sstevel@tonic-gate segspt_purge(seg); 28137c478bd9Sstevel@tonic-gate 28147c478bd9Sstevel@tonic-gate page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0); 28157c478bd9Sstevel@tonic-gate } 28167c478bd9Sstevel@tonic-gate 28177c478bd9Sstevel@tonic-gate return (0); 28187c478bd9Sstevel@tonic-gate } 28197c478bd9Sstevel@tonic-gate 28207c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 28217c478bd9Sstevel@tonic-gate void 28227c478bd9Sstevel@tonic-gate segspt_shmdump(struct seg *seg) 28237c478bd9Sstevel@tonic-gate { 28247c478bd9Sstevel@tonic-gate /* no-op for ISM segment */ 28257c478bd9Sstevel@tonic-gate } 28267c478bd9Sstevel@tonic-gate 28277c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 28287c478bd9Sstevel@tonic-gate static faultcode_t 28297c478bd9Sstevel@tonic-gate segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 28307c478bd9Sstevel@tonic-gate { 28317c478bd9Sstevel@tonic-gate return (ENOTSUP); 28327c478bd9Sstevel@tonic-gate } 28337c478bd9Sstevel@tonic-gate 28347c478bd9Sstevel@tonic-gate /* 28357c478bd9Sstevel@tonic-gate * get a memory ID for an addr in a given segment 28367c478bd9Sstevel@tonic-gate */ 28377c478bd9Sstevel@tonic-gate static int 28387c478bd9Sstevel@tonic-gate segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 28397c478bd9Sstevel@tonic-gate { 28407c478bd9Sstevel@tonic-gate struct shm_data *shmd = (struct shm_data *)seg->s_data; 28417c478bd9Sstevel@tonic-gate struct anon *ap; 28427c478bd9Sstevel@tonic-gate size_t anon_index; 28437c478bd9Sstevel@tonic-gate struct anon_map *amp = shmd->shm_amp; 28447c478bd9Sstevel@tonic-gate struct spt_data *sptd = shmd->shm_sptseg->s_data; 28457c478bd9Sstevel@tonic-gate struct seg *sptseg = shmd->shm_sptseg; 28467c478bd9Sstevel@tonic-gate anon_sync_obj_t cookie; 28477c478bd9Sstevel@tonic-gate 28487c478bd9Sstevel@tonic-gate anon_index = seg_page(seg, addr); 28497c478bd9Sstevel@tonic-gate 28507c478bd9Sstevel@tonic-gate if (addr > (seg->s_base + sptd->spt_realsize)) { 28517c478bd9Sstevel@tonic-gate return (EFAULT); 28527c478bd9Sstevel@tonic-gate } 28537c478bd9Sstevel@tonic-gate 28547c478bd9Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 28557c478bd9Sstevel@tonic-gate anon_array_enter(amp, anon_index, &cookie); 28567c478bd9Sstevel@tonic-gate ap = anon_get_ptr(amp->ahp, anon_index); 28577c478bd9Sstevel@tonic-gate if (ap == NULL) { 28587c478bd9Sstevel@tonic-gate struct page *pp; 28597c478bd9Sstevel@tonic-gate caddr_t spt_addr = sptseg->s_base + ptob(anon_index); 28607c478bd9Sstevel@tonic-gate 28617c478bd9Sstevel@tonic-gate pp = anon_zero(sptseg, spt_addr, &ap, kcred); 28627c478bd9Sstevel@tonic-gate if (pp == NULL) { 28637c478bd9Sstevel@tonic-gate anon_array_exit(&cookie); 28647c478bd9Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 28657c478bd9Sstevel@tonic-gate return (ENOMEM); 28667c478bd9Sstevel@tonic-gate } 28677c478bd9Sstevel@tonic-gate (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 28687c478bd9Sstevel@tonic-gate page_unlock(pp); 28697c478bd9Sstevel@tonic-gate } 28707c478bd9Sstevel@tonic-gate anon_array_exit(&cookie); 28717c478bd9Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 28727c478bd9Sstevel@tonic-gate memidp->val[0] = (uintptr_t)ap; 28737c478bd9Sstevel@tonic-gate memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 28747c478bd9Sstevel@tonic-gate return (0); 28757c478bd9Sstevel@tonic-gate } 28767c478bd9Sstevel@tonic-gate 28777c478bd9Sstevel@tonic-gate /* 28787c478bd9Sstevel@tonic-gate * Get memory allocation policy info for specified address in given segment 28797c478bd9Sstevel@tonic-gate */ 28807c478bd9Sstevel@tonic-gate static lgrp_mem_policy_info_t * 28817c478bd9Sstevel@tonic-gate segspt_shmgetpolicy(struct seg *seg, caddr_t addr) 28827c478bd9Sstevel@tonic-gate { 28837c478bd9Sstevel@tonic-gate struct anon_map *amp; 28847c478bd9Sstevel@tonic-gate ulong_t anon_index; 28857c478bd9Sstevel@tonic-gate lgrp_mem_policy_info_t *policy_info; 28867c478bd9Sstevel@tonic-gate struct shm_data *shm_data; 28877c478bd9Sstevel@tonic-gate 28887c478bd9Sstevel@tonic-gate ASSERT(seg != NULL); 28897c478bd9Sstevel@tonic-gate 28907c478bd9Sstevel@tonic-gate /* 28917c478bd9Sstevel@tonic-gate * Get anon_map from segshm 28927c478bd9Sstevel@tonic-gate * 28937c478bd9Sstevel@tonic-gate * Assume that no lock needs to be held on anon_map, since 28947c478bd9Sstevel@tonic-gate * it should be protected by its reference count which must be 28957c478bd9Sstevel@tonic-gate * nonzero for an existing segment 28967c478bd9Sstevel@tonic-gate * Need to grab readers lock on policy tree though 28977c478bd9Sstevel@tonic-gate */ 28987c478bd9Sstevel@tonic-gate shm_data = (struct shm_data *)seg->s_data; 28997c478bd9Sstevel@tonic-gate if (shm_data == NULL) 29007c478bd9Sstevel@tonic-gate return (NULL); 29017c478bd9Sstevel@tonic-gate amp = shm_data->shm_amp; 29027c478bd9Sstevel@tonic-gate ASSERT(amp->refcnt != 0); 29037c478bd9Sstevel@tonic-gate 29047c478bd9Sstevel@tonic-gate /* 29057c478bd9Sstevel@tonic-gate * Get policy info 29067c478bd9Sstevel@tonic-gate * 29077c478bd9Sstevel@tonic-gate * Assume starting anon index of 0 29087c478bd9Sstevel@tonic-gate */ 29097c478bd9Sstevel@tonic-gate anon_index = seg_page(seg, addr); 29107c478bd9Sstevel@tonic-gate policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0); 29117c478bd9Sstevel@tonic-gate 29127c478bd9Sstevel@tonic-gate return (policy_info); 29137c478bd9Sstevel@tonic-gate } 29141bd5c35fSelowe 29151bd5c35fSelowe /*ARGSUSED*/ 29161bd5c35fSelowe static int 29171bd5c35fSelowe segspt_shmcapable(struct seg *seg, segcapability_t capability) 29181bd5c35fSelowe { 29191bd5c35fSelowe return (0); 29201bd5c35fSelowe } 2921