xref: /titanic_44/usr/src/uts/common/vm/seg_spt.c (revision 7752631cb397da3298ed0b30d6bfee48679c340f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/param.h>
29 #include <sys/user.h>
30 #include <sys/mman.h>
31 #include <sys/kmem.h>
32 #include <sys/sysmacros.h>
33 #include <sys/cmn_err.h>
34 #include <sys/systm.h>
35 #include <sys/tuneable.h>
36 #include <vm/hat.h>
37 #include <vm/seg.h>
38 #include <vm/as.h>
39 #include <vm/anon.h>
40 #include <vm/page.h>
41 #include <sys/buf.h>
42 #include <sys/swap.h>
43 #include <sys/atomic.h>
44 #include <vm/seg_spt.h>
45 #include <sys/debug.h>
46 #include <sys/vtrace.h>
47 #include <sys/shm.h>
48 #include <sys/shm_impl.h>
49 #include <sys/lgrp.h>
50 #include <sys/vmsystm.h>
51 #include <sys/policy.h>
52 #include <sys/project.h>
53 #include <sys/tnf_probe.h>
54 #include <sys/zone.h>
55 
56 #define	SEGSPTADDR	(caddr_t)0x0
57 
58 /*
59  * # pages used for spt
60  */
61 static size_t	spt_used;
62 
63 /*
64  * segspt_minfree is the memory left for system after ISM
65  * locked its pages; it is set up to 5% of availrmem in
66  * sptcreate when ISM is created.  ISM should not use more
67  * than ~90% of availrmem; if it does, then the performance
68  * of the system may decrease. Machines with large memories may
69  * be able to use up more memory for ISM so we set the default
70  * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
71  * If somebody wants even more memory for ISM (risking hanging
72  * the system) they can patch the segspt_minfree to smaller number.
73  */
74 pgcnt_t segspt_minfree = 0;
75 
76 static int segspt_create(struct seg *seg, caddr_t argsp);
77 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
78 static void segspt_free(struct seg *seg);
79 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
80 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
81 
82 static void
83 segspt_badop()
84 {
85 	panic("segspt_badop called");
86 	/*NOTREACHED*/
87 }
88 
89 #define	SEGSPT_BADOP(t)	(t(*)())segspt_badop
90 
91 struct seg_ops segspt_ops = {
92 	SEGSPT_BADOP(int),		/* dup */
93 	segspt_unmap,
94 	segspt_free,
95 	SEGSPT_BADOP(int),		/* fault */
96 	SEGSPT_BADOP(faultcode_t),	/* faulta */
97 	SEGSPT_BADOP(int),		/* setprot */
98 	SEGSPT_BADOP(int),		/* checkprot */
99 	SEGSPT_BADOP(int),		/* kluster */
100 	SEGSPT_BADOP(size_t),		/* swapout */
101 	SEGSPT_BADOP(int),		/* sync */
102 	SEGSPT_BADOP(size_t),		/* incore */
103 	SEGSPT_BADOP(int),		/* lockop */
104 	SEGSPT_BADOP(int),		/* getprot */
105 	SEGSPT_BADOP(u_offset_t), 	/* getoffset */
106 	SEGSPT_BADOP(int),		/* gettype */
107 	SEGSPT_BADOP(int),		/* getvp */
108 	SEGSPT_BADOP(int),		/* advise */
109 	SEGSPT_BADOP(void),		/* dump */
110 	SEGSPT_BADOP(int),		/* pagelock */
111 	SEGSPT_BADOP(int),		/* setpgsz */
112 	SEGSPT_BADOP(int),		/* getmemid */
113 	segspt_getpolicy,		/* getpolicy */
114 	SEGSPT_BADOP(int),		/* capable */
115 };
116 
117 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
118 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
119 static void segspt_shmfree(struct seg *seg);
120 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
121 		caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
122 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
123 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
124 			register size_t len, register uint_t prot);
125 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
126 			uint_t prot);
127 static int	segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
128 static size_t	segspt_shmswapout(struct seg *seg);
129 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
130 			register char *vec);
131 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
132 			int attr, uint_t flags);
133 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
134 			int attr, int op, ulong_t *lockmap, size_t pos);
135 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
136 			uint_t *protv);
137 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
138 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
139 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
140 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
141 			uint_t behav);
142 static void segspt_shmdump(struct seg *seg);
143 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
144 			struct page ***, enum lock_type, enum seg_rw);
145 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
146 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
147 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
148 static int segspt_shmcapable(struct seg *, segcapability_t);
149 
150 struct seg_ops segspt_shmops = {
151 	segspt_shmdup,
152 	segspt_shmunmap,
153 	segspt_shmfree,
154 	segspt_shmfault,
155 	segspt_shmfaulta,
156 	segspt_shmsetprot,
157 	segspt_shmcheckprot,
158 	segspt_shmkluster,
159 	segspt_shmswapout,
160 	segspt_shmsync,
161 	segspt_shmincore,
162 	segspt_shmlockop,
163 	segspt_shmgetprot,
164 	segspt_shmgetoffset,
165 	segspt_shmgettype,
166 	segspt_shmgetvp,
167 	segspt_shmadvise,	/* advise */
168 	segspt_shmdump,
169 	segspt_shmpagelock,
170 	segspt_shmsetpgsz,
171 	segspt_shmgetmemid,
172 	segspt_shmgetpolicy,
173 	segspt_shmcapable,
174 };
175 
176 static void segspt_purge(struct seg *seg);
177 static int segspt_reclaim(struct seg *, caddr_t, size_t, struct page **,
178 		enum seg_rw);
179 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
180 		page_t **ppa);
181 
182 
183 
184 /*ARGSUSED*/
185 int
186 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
187 	uint_t prot, uint_t flags, uint_t share_szc)
188 {
189 	int 	err;
190 	struct  as	*newas;
191 	struct	segspt_crargs sptcargs;
192 
193 #ifdef DEBUG
194 	TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
195 			tnf_ulong, size, size );
196 #endif
197 	if (segspt_minfree == 0)	/* leave min 5% of availrmem for */
198 		segspt_minfree = availrmem/20;	/* for the system */
199 
200 	if (!hat_supported(HAT_SHARED_PT, (void *)0))
201 		return (EINVAL);
202 
203 	/*
204 	 * get a new as for this shared memory segment
205 	 */
206 	newas = as_alloc();
207 	newas->a_proc = NULL;
208 	sptcargs.amp = amp;
209 	sptcargs.prot = prot;
210 	sptcargs.flags = flags;
211 	sptcargs.szc = share_szc;
212 	/*
213 	 * create a shared page table (spt) segment
214 	 */
215 
216 	if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
217 		as_free(newas);
218 		return (err);
219 	}
220 	*sptseg = sptcargs.seg_spt;
221 	return (0);
222 }
223 
224 void
225 sptdestroy(struct as *as, struct anon_map *amp)
226 {
227 
228 #ifdef DEBUG
229 	TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
230 #endif
231 	(void) as_unmap(as, SEGSPTADDR, amp->size);
232 	as_free(as);
233 }
234 
235 /*
236  * called from seg_free().
237  * free (i.e., unlock, unmap, return to free list)
238  *  all the pages in the given seg.
239  */
240 void
241 segspt_free(struct seg	*seg)
242 {
243 	struct spt_data *sptd = (struct spt_data *)seg->s_data;
244 
245 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
246 
247 	if (sptd != NULL) {
248 		if (sptd->spt_realsize)
249 			segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
250 
251 	if (sptd->spt_ppa_lckcnt)
252 		kmem_free(sptd->spt_ppa_lckcnt,
253 		    sizeof (*sptd->spt_ppa_lckcnt)
254 		    * btopr(sptd->spt_amp->size));
255 		kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
256 		mutex_destroy(&sptd->spt_lock);
257 		kmem_free(sptd, sizeof (*sptd));
258 	}
259 }
260 
261 /*ARGSUSED*/
262 static int
263 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
264 	uint_t flags)
265 {
266 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
267 
268 	return (0);
269 }
270 
271 /*ARGSUSED*/
272 static size_t
273 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
274 {
275 	caddr_t	eo_seg;
276 	pgcnt_t	npages;
277 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
278 	struct seg	*sptseg;
279 	struct spt_data *sptd;
280 
281 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
282 #ifdef lint
283 	seg = seg;
284 #endif
285 	sptseg = shmd->shm_sptseg;
286 	sptd = sptseg->s_data;
287 
288 	if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
289 		eo_seg = addr + len;
290 		while (addr < eo_seg) {
291 			/* page exists, and it's locked. */
292 			*vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
293 				SEG_PAGE_ANON;
294 			addr += PAGESIZE;
295 		}
296 		return (len);
297 	} else {
298 		struct  anon_map *amp = shmd->shm_amp;
299 		struct  anon	*ap;
300 		page_t		*pp;
301 		pgcnt_t 	anon_index;
302 		struct vnode 	*vp;
303 		u_offset_t 	off;
304 		ulong_t		i;
305 		int		ret;
306 		anon_sync_obj_t	cookie;
307 
308 		addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
309 		anon_index = seg_page(seg, addr);
310 		npages = btopr(len);
311 		if (anon_index + npages > btopr(shmd->shm_amp->size)) {
312 			return (EINVAL);
313 		}
314 		ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
315 		for (i = 0; i < npages; i++, anon_index++) {
316 			ret = 0;
317 			anon_array_enter(amp, anon_index, &cookie);
318 			ap = anon_get_ptr(amp->ahp, anon_index);
319 			if (ap != NULL) {
320 				swap_xlate(ap, &vp, &off);
321 				anon_array_exit(&cookie);
322 				pp = page_lookup_nowait(vp, off, SE_SHARED);
323 				if (pp != NULL) {
324 					ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
325 					page_unlock(pp);
326 				}
327 			} else {
328 				anon_array_exit(&cookie);
329 			}
330 			if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
331 				ret |= SEG_PAGE_LOCKED;
332 			}
333 			*vec++ = (char)ret;
334 		}
335 		ANON_LOCK_EXIT(&amp->a_rwlock);
336 		return (len);
337 	}
338 }
339 
340 static int
341 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
342 {
343 	size_t share_size;
344 
345 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
346 
347 	/*
348 	 * seg.s_size may have been rounded up to the largest page size
349 	 * in shmat().
350 	 * XXX This should be cleanedup. sptdestroy should take a length
351 	 * argument which should be the same as sptcreate. Then
352 	 * this rounding would not be needed (or is done in shm.c)
353 	 * Only the check for full segment will be needed.
354 	 *
355 	 * XXX -- shouldn't raddr == 0 always? These tests don't seem
356 	 * to be useful at all.
357 	 */
358 	share_size = page_get_pagesize(seg->s_szc);
359 	ssize = P2ROUNDUP(ssize, share_size);
360 
361 	if (raddr == seg->s_base && ssize == seg->s_size) {
362 		seg_free(seg);
363 		return (0);
364 	} else
365 		return (EINVAL);
366 }
367 
368 int
369 segspt_create(struct seg *seg, caddr_t argsp)
370 {
371 	int		err;
372 	caddr_t		addr = seg->s_base;
373 	struct spt_data *sptd;
374 	struct 	segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
375 	struct anon_map *amp = sptcargs->amp;
376 	struct kshmid	*sp = amp->a_sp;
377 	struct	cred	*cred = CRED();
378 	ulong_t		i, j, anon_index = 0;
379 	pgcnt_t		npages = btopr(amp->size);
380 	struct vnode	*vp;
381 	page_t		**ppa;
382 	uint_t		hat_flags;
383 	size_t		pgsz;
384 	pgcnt_t		pgcnt;
385 	caddr_t		a;
386 	pgcnt_t		pidx;
387 	size_t		sz;
388 	proc_t		*procp = curproc;
389 	rctl_qty_t	lockedbytes = 0;
390 	kproject_t	*proj;
391 
392 	/*
393 	 * We are holding the a_lock on the underlying dummy as,
394 	 * so we can make calls to the HAT layer.
395 	 */
396 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
397 	ASSERT(sp != NULL);
398 
399 #ifdef DEBUG
400 	TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
401 				tnf_opaque, addr, addr,
402 				tnf_ulong, len, seg->s_size);
403 #endif
404 	if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
405 		if (err = anon_swap_adjust(npages))
406 			return (err);
407 	}
408 	err = ENOMEM;
409 
410 	if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
411 		goto out1;
412 
413 	if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
414 		if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
415 		    KM_NOSLEEP)) == NULL)
416 			goto out2;
417 	}
418 
419 	mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
420 
421 	if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
422 		goto out3;
423 
424 	seg->s_ops = &segspt_ops;
425 	sptd->spt_vp = vp;
426 	sptd->spt_amp = amp;
427 	sptd->spt_prot = sptcargs->prot;
428 	sptd->spt_flags = sptcargs->flags;
429 	seg->s_data = (caddr_t)sptd;
430 	sptd->spt_ppa = NULL;
431 	sptd->spt_ppa_lckcnt = NULL;
432 	seg->s_szc = sptcargs->szc;
433 
434 	ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
435 	if (seg->s_szc > amp->a_szc) {
436 		amp->a_szc = seg->s_szc;
437 	}
438 	ANON_LOCK_EXIT(&amp->a_rwlock);
439 
440 	/*
441 	 * Set policy to affect initial allocation of pages in
442 	 * anon_map_createpages()
443 	 */
444 	(void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
445 	    NULL, 0, ptob(npages));
446 
447 	if (sptcargs->flags & SHM_PAGEABLE) {
448 		size_t  share_sz;
449 		pgcnt_t new_npgs, more_pgs;
450 		struct anon_hdr *nahp;
451 		zone_t *zone;
452 
453 		share_sz = page_get_pagesize(seg->s_szc);
454 		if (!IS_P2ALIGNED(amp->size, share_sz)) {
455 			/*
456 			 * We are rounding up the size of the anon array
457 			 * on 4 M boundary because we always create 4 M
458 			 * of page(s) when locking, faulting pages and we
459 			 * don't have to check for all corner cases e.g.
460 			 * if there is enough space to allocate 4 M
461 			 * page.
462 			 */
463 			new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
464 			more_pgs = new_npgs - npages;
465 
466 			/*
467 			 * This may return NULL if global zone is removing a
468 			 * shm created by a non-global zone that has been
469 			 * destroyed.
470 			 */
471 			zone =
472 			    zone_find_by_id(sp->shm_perm.ipc_proj->kpj_zoneid);
473 
474 			if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
475 				if (zone != NULL)
476 					zone_rele(zone);
477 				err = ENOMEM;
478 				goto out4;
479 			}
480 			if (zone != NULL)
481 				zone_rele(zone);
482 
483 			nahp = anon_create(new_npgs, ANON_SLEEP);
484 			ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
485 			(void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
486 			    ANON_SLEEP);
487 			anon_release(amp->ahp, npages);
488 			amp->ahp = nahp;
489 			ASSERT(amp->swresv == ptob(npages));
490 			amp->swresv = amp->size = ptob(new_npgs);
491 			ANON_LOCK_EXIT(&amp->a_rwlock);
492 			npages = new_npgs;
493 		}
494 
495 		sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
496 		    sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
497 		sptd->spt_pcachecnt = 0;
498 		sptd->spt_realsize = ptob(npages);
499 		sptcargs->seg_spt = seg;
500 		return (0);
501 	}
502 
503 	/*
504 	 * get array of pages for each anon slot in amp
505 	 */
506 	if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
507 	    seg, addr, S_CREATE, cred)) != 0)
508 		goto out4;
509 
510 	mutex_enter(&sp->shm_mlock);
511 
512 	/* May be partially locked, so, count bytes to charge for locking */
513 	for (i = 0; i < npages; i++)
514 		if (ppa[i]->p_lckcnt == 0)
515 			lockedbytes += PAGESIZE;
516 
517 	proj = sp->shm_perm.ipc_proj;
518 
519 	if (lockedbytes > 0) {
520 		mutex_enter(&procp->p_lock);
521 		if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
522 			mutex_exit(&procp->p_lock);
523 			mutex_exit(&sp->shm_mlock);
524 			for (i = 0; i < npages; i++)
525 				page_unlock(ppa[i]);
526 			err = ENOMEM;
527 			goto out4;
528 		}
529 		mutex_exit(&procp->p_lock);
530 	}
531 
532 	/*
533 	 * addr is initial address corresponding to the first page on ppa list
534 	 */
535 	for (i = 0; i < npages; i++) {
536 		/* attempt to lock all pages */
537 		if (page_pp_lock(ppa[i], 0, 1) == 0) {
538 			/*
539 			 * if unable to lock any page, unlock all
540 			 * of them and return error
541 			 */
542 			for (j = 0; j < i; j++)
543 				page_pp_unlock(ppa[j], 0, 1);
544 			for (i = 0; i < npages; i++)
545 				page_unlock(ppa[i]);
546 			rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
547 			mutex_exit(&sp->shm_mlock);
548 			err = ENOMEM;
549 			goto out4;
550 		}
551 	}
552 	mutex_exit(&sp->shm_mlock);
553 
554 	/*
555 	 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
556 	 * for the entire life of the segment. For example platforms
557 	 * that do not support Dynamic Reconfiguration.
558 	 */
559 	hat_flags = HAT_LOAD_SHARE;
560 	if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
561 		hat_flags |= HAT_LOAD_LOCK;
562 
563 	/*
564 	 * Load translations one lare page at a time
565 	 * to make sure we don't create mappings bigger than
566 	 * segment's size code in case underlying pages
567 	 * are shared with segvn's segment that uses bigger
568 	 * size code than we do.
569 	 */
570 	pgsz = page_get_pagesize(seg->s_szc);
571 	pgcnt = page_get_pagecnt(seg->s_szc);
572 	for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
573 		sz = MIN(pgsz, ptob(npages - pidx));
574 		hat_memload_array(seg->s_as->a_hat, a, sz,
575 		    &ppa[pidx], sptd->spt_prot, hat_flags);
576 	}
577 
578 	/*
579 	 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
580 	 * we will leave the pages locked SE_SHARED for the life
581 	 * of the ISM segment. This will prevent any calls to
582 	 * hat_pageunload() on this ISM segment for those platforms.
583 	 */
584 	if (!(hat_flags & HAT_LOAD_LOCK)) {
585 		/*
586 		 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
587 		 * we no longer need to hold the SE_SHARED lock on the pages,
588 		 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
589 		 * SE_SHARED lock on the pages as necessary.
590 		 */
591 		for (i = 0; i < npages; i++)
592 			page_unlock(ppa[i]);
593 	}
594 	sptd->spt_pcachecnt = 0;
595 	kmem_free(ppa, ((sizeof (page_t *)) * npages));
596 	sptd->spt_realsize = ptob(npages);
597 	atomic_add_long(&spt_used, npages);
598 	sptcargs->seg_spt = seg;
599 	return (0);
600 
601 out4:
602 	seg->s_data = NULL;
603 	kmem_free(vp, sizeof (*vp));
604 out3:
605 	mutex_destroy(&sptd->spt_lock);
606 	if ((sptcargs->flags & SHM_PAGEABLE) == 0)
607 		kmem_free(ppa, (sizeof (*ppa) * npages));
608 out2:
609 	kmem_free(sptd, sizeof (*sptd));
610 out1:
611 	if ((sptcargs->flags & SHM_PAGEABLE) == 0)
612 		anon_swap_restore(npages);
613 	return (err);
614 }
615 
616 /*ARGSUSED*/
617 void
618 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
619 {
620 	struct page 	*pp;
621 	struct spt_data *sptd = (struct spt_data *)seg->s_data;
622 	pgcnt_t		npages;
623 	ulong_t		anon_idx;
624 	struct anon_map *amp;
625 	struct anon 	*ap;
626 	struct vnode 	*vp;
627 	u_offset_t 	off;
628 	uint_t		hat_flags;
629 	int		root = 0;
630 	pgcnt_t		pgs, curnpgs = 0;
631 	page_t		*rootpp;
632 	rctl_qty_t	unlocked_bytes = 0;
633 	kproject_t	*proj;
634 	kshmid_t	*sp;
635 
636 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
637 
638 	len = P2ROUNDUP(len, PAGESIZE);
639 
640 	npages = btop(len);
641 
642 	hat_flags = HAT_UNLOAD_UNLOCK;
643 	if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
644 	    (sptd->spt_flags & SHM_PAGEABLE)) {
645 		hat_flags = HAT_UNLOAD;
646 	}
647 
648 	hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
649 
650 	amp = sptd->spt_amp;
651 	if (sptd->spt_flags & SHM_PAGEABLE)
652 		npages = btop(amp->size);
653 
654 	ASSERT(amp != NULL);
655 
656 	if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
657 		sp = amp->a_sp;
658 		proj = sp->shm_perm.ipc_proj;
659 		mutex_enter(&sp->shm_mlock);
660 	}
661 	for (anon_idx = 0; anon_idx < npages; anon_idx++) {
662 		if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
663 			if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
664 				panic("segspt_free_pages: null app");
665 				/*NOTREACHED*/
666 			}
667 		} else {
668 			if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
669 			    == NULL)
670 				continue;
671 		}
672 		ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
673 		swap_xlate(ap, &vp, &off);
674 
675 		/*
676 		 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
677 		 * the pages won't be having SE_SHARED lock at this
678 		 * point.
679 		 *
680 		 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
681 		 * the pages are still held SE_SHARED locked from the
682 		 * original segspt_create()
683 		 *
684 		 * Our goal is to get SE_EXCL lock on each page, remove
685 		 * permanent lock on it and invalidate the page.
686 		 */
687 		if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
688 			if (hat_flags == HAT_UNLOAD)
689 				pp = page_lookup(vp, off, SE_EXCL);
690 			else {
691 				if ((pp = page_find(vp, off)) == NULL) {
692 					panic("segspt_free_pages: "
693 					    "page not locked");
694 					/*NOTREACHED*/
695 				}
696 				if (!page_tryupgrade(pp)) {
697 					page_unlock(pp);
698 					pp = page_lookup(vp, off, SE_EXCL);
699 				}
700 			}
701 			if (pp == NULL) {
702 				panic("segspt_free_pages: "
703 				    "page not in the system");
704 				/*NOTREACHED*/
705 			}
706 			ASSERT(pp->p_lckcnt > 0);
707 			page_pp_unlock(pp, 0, 1);
708 			if (pp->p_lckcnt == 0)
709 				    unlocked_bytes += PAGESIZE;
710 		} else {
711 			if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
712 				continue;
713 		}
714 		/*
715 		 * It's logical to invalidate the pages here as in most cases
716 		 * these were created by segspt.
717 		 */
718 		if (pp->p_szc != 0) {
719 			/*
720 			 * For DISM swap is released in shm_rm_amp.
721 			 */
722 			if ((sptd->spt_flags & SHM_PAGEABLE) == 0 &&
723 			    ap->an_pvp != NULL) {
724 				panic("segspt_free_pages: pvp non NULL");
725 				/*NOTREACHED*/
726 			}
727 			if (root == 0) {
728 				ASSERT(curnpgs == 0);
729 				root = 1;
730 				rootpp = pp;
731 				pgs = curnpgs = page_get_pagecnt(pp->p_szc);
732 				ASSERT(pgs > 1);
733 				ASSERT(IS_P2ALIGNED(pgs, pgs));
734 				ASSERT(!(page_pptonum(pp) & (pgs - 1)));
735 				curnpgs--;
736 			} else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
737 				ASSERT(curnpgs == 1);
738 				ASSERT(page_pptonum(pp) ==
739 				    page_pptonum(rootpp) + (pgs - 1));
740 				page_destroy_pages(rootpp);
741 				root = 0;
742 				curnpgs = 0;
743 			} else {
744 				ASSERT(curnpgs > 1);
745 				ASSERT(page_pptonum(pp) ==
746 				    page_pptonum(rootpp) + (pgs - curnpgs));
747 				curnpgs--;
748 			}
749 		} else {
750 			if (root != 0 || curnpgs != 0) {
751 				panic("segspt_free_pages: bad large page");
752 				/*NOTREACHED*/
753 			}
754 			/*LINTED: constant in conditional context */
755 			VN_DISPOSE(pp, B_INVAL, 0, kcred);
756 		}
757 	}
758 	if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
759 		if (unlocked_bytes > 0)
760 			rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
761 		mutex_exit(&sp->shm_mlock);
762 	}
763 	if (root != 0 || curnpgs != 0) {
764 		panic("segspt_free_pages: bad large page");
765 		/*NOTREACHED*/
766 	}
767 
768 	/*
769 	 * mark that pages have been released
770 	 */
771 	sptd->spt_realsize = 0;
772 
773 	if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
774 		atomic_add_long(&spt_used, -npages);
775 		anon_swap_restore(npages);
776 	}
777 }
778 
779 /*
780  * Get memory allocation policy info for specified address in given segment
781  */
782 static lgrp_mem_policy_info_t *
783 segspt_getpolicy(struct seg *seg, caddr_t addr)
784 {
785 	struct anon_map		*amp;
786 	ulong_t			anon_index;
787 	lgrp_mem_policy_info_t	*policy_info;
788 	struct spt_data		*spt_data;
789 
790 	ASSERT(seg != NULL);
791 
792 	/*
793 	 * Get anon_map from segspt
794 	 *
795 	 * Assume that no lock needs to be held on anon_map, since
796 	 * it should be protected by its reference count which must be
797 	 * nonzero for an existing segment
798 	 * Need to grab readers lock on policy tree though
799 	 */
800 	spt_data = (struct spt_data *)seg->s_data;
801 	if (spt_data == NULL)
802 		return (NULL);
803 	amp = spt_data->spt_amp;
804 	ASSERT(amp->refcnt != 0);
805 
806 	/*
807 	 * Get policy info
808 	 *
809 	 * Assume starting anon index of 0
810 	 */
811 	anon_index = seg_page(seg, addr);
812 	policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
813 
814 	return (policy_info);
815 }
816 
817 /*
818  * DISM only.
819  * Return locked pages over a given range.
820  *
821  * We will cache all DISM locked pages and save the pplist for the
822  * entire segment in the ppa field of the underlying DISM segment structure.
823  * Later, during a call to segspt_reclaim() we will use this ppa array
824  * to page_unlock() all of the pages and then we will free this ppa list.
825  */
826 /*ARGSUSED*/
827 static int
828 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
829     struct page ***ppp, enum lock_type type, enum seg_rw rw)
830 {
831 	struct  shm_data *shmd = (struct shm_data *)seg->s_data;
832 	struct  seg	*sptseg = shmd->shm_sptseg;
833 	struct  spt_data *sptd = sptseg->s_data;
834 	pgcnt_t pg_idx, npages, tot_npages, npgs;
835 	struct  page **pplist, **pl, **ppa, *pp;
836 	struct  anon_map *amp;
837 	spgcnt_t	an_idx;
838 	int 	ret = ENOTSUP;
839 	uint_t	pl_built = 0;
840 	struct  anon *ap;
841 	struct  vnode *vp;
842 	u_offset_t off;
843 	pgcnt_t claim_availrmem = 0;
844 	uint_t	szc;
845 
846 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
847 
848 	/*
849 	 * We want to lock/unlock the entire ISM segment. Therefore,
850 	 * we will be using the underlying sptseg and it's base address
851 	 * and length for the caching arguments.
852 	 */
853 	ASSERT(sptseg);
854 	ASSERT(sptd);
855 
856 	pg_idx = seg_page(seg, addr);
857 	npages = btopr(len);
858 
859 	/*
860 	 * check if the request is larger than number of pages covered
861 	 * by amp
862 	 */
863 	if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
864 		*ppp = NULL;
865 		return (ENOTSUP);
866 	}
867 
868 	if (type == L_PAGEUNLOCK) {
869 		ASSERT(sptd->spt_ppa != NULL);
870 
871 		seg_pinactive(seg, seg->s_base, sptd->spt_amp->size,
872 		    sptd->spt_ppa, sptd->spt_prot, segspt_reclaim);
873 
874 		/*
875 		 * If someone is blocked while unmapping, we purge
876 		 * segment page cache and thus reclaim pplist synchronously
877 		 * without waiting for seg_pasync_thread. This speeds up
878 		 * unmapping in cases where munmap(2) is called, while
879 		 * raw async i/o is still in progress or where a thread
880 		 * exits on data fault in a multithreaded application.
881 		 */
882 		if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
883 			segspt_purge(seg);
884 		}
885 		return (0);
886 	} else if (type == L_PAGERECLAIM) {
887 		ASSERT(sptd->spt_ppa != NULL);
888 		(void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size,
889 		    sptd->spt_ppa, sptd->spt_prot);
890 		return (0);
891 	}
892 
893 	if (sptd->spt_flags & DISM_PPA_CHANGED) {
894 		segspt_purge(seg);
895 		/*
896 		 * for DISM ppa needs to be rebuild since
897 		 * number of locked pages could be changed
898 		 */
899 		*ppp = NULL;
900 		return (ENOTSUP);
901 	}
902 
903 	/*
904 	 * First try to find pages in segment page cache, without
905 	 * holding the segment lock.
906 	 */
907 	pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size,
908 	    sptd->spt_prot);
909 	if (pplist != NULL) {
910 		ASSERT(sptd->spt_ppa != NULL);
911 		ASSERT(sptd->spt_ppa == pplist);
912 		ppa = sptd->spt_ppa;
913 		for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
914 			if (ppa[an_idx] == NULL) {
915 				seg_pinactive(seg, seg->s_base,
916 				    sptd->spt_amp->size, ppa,
917 				    sptd->spt_prot, segspt_reclaim);
918 				*ppp = NULL;
919 				return (ENOTSUP);
920 			}
921 			if ((szc = ppa[an_idx]->p_szc) != 0) {
922 				npgs = page_get_pagecnt(szc);
923 				an_idx = P2ROUNDUP(an_idx + 1, npgs);
924 			} else {
925 				an_idx++;
926 			}
927 		}
928 		/*
929 		 * Since we cache the entire DISM segment, we want to
930 		 * set ppp to point to the first slot that corresponds
931 		 * to the requested addr, i.e. pg_idx.
932 		 */
933 		*ppp = &(sptd->spt_ppa[pg_idx]);
934 		return (0);
935 	}
936 
937 	/* The L_PAGELOCK case... */
938 	mutex_enter(&sptd->spt_lock);
939 	/*
940 	 * try to find pages in segment page cache with mutex
941 	 */
942 	pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size,
943 	    sptd->spt_prot);
944 	if (pplist != NULL) {
945 		ASSERT(sptd->spt_ppa != NULL);
946 		ASSERT(sptd->spt_ppa == pplist);
947 		ppa = sptd->spt_ppa;
948 		for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
949 			if (ppa[an_idx] == NULL) {
950 				mutex_exit(&sptd->spt_lock);
951 				seg_pinactive(seg, seg->s_base,
952 				    sptd->spt_amp->size, ppa,
953 				    sptd->spt_prot, segspt_reclaim);
954 				*ppp = NULL;
955 				return (ENOTSUP);
956 			}
957 			if ((szc = ppa[an_idx]->p_szc) != 0) {
958 				npgs = page_get_pagecnt(szc);
959 				an_idx = P2ROUNDUP(an_idx + 1, npgs);
960 			} else {
961 				an_idx++;
962 			}
963 		}
964 		/*
965 		 * Since we cache the entire DISM segment, we want to
966 		 * set ppp to point to the first slot that corresponds
967 		 * to the requested addr, i.e. pg_idx.
968 		 */
969 		mutex_exit(&sptd->spt_lock);
970 		*ppp = &(sptd->spt_ppa[pg_idx]);
971 		return (0);
972 	}
973 	if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) ==
974 	    SEGP_FAIL) {
975 		mutex_exit(&sptd->spt_lock);
976 		*ppp = NULL;
977 		return (ENOTSUP);
978 	}
979 
980 	/*
981 	 * No need to worry about protections because DISM pages are always rw.
982 	 */
983 	pl = pplist = NULL;
984 	amp = sptd->spt_amp;
985 
986 	/*
987 	 * Do we need to build the ppa array?
988 	 */
989 	if (sptd->spt_ppa == NULL) {
990 		pgcnt_t lpg_cnt = 0;
991 
992 		pl_built = 1;
993 		tot_npages = btopr(sptd->spt_amp->size);
994 
995 		ASSERT(sptd->spt_pcachecnt == 0);
996 		pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
997 		pl = pplist;
998 
999 		ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
1000 		for (an_idx = 0; an_idx < tot_npages; ) {
1001 			ap = anon_get_ptr(amp->ahp, an_idx);
1002 			/*
1003 			 * Cache only mlocked pages. For large pages
1004 			 * if one (constituent) page is mlocked
1005 			 * all pages for that large page
1006 			 * are cached also. This is for quick
1007 			 * lookups of ppa array;
1008 			 */
1009 			if ((ap != NULL) && (lpg_cnt != 0 ||
1010 			    (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
1011 
1012 				swap_xlate(ap, &vp, &off);
1013 				pp = page_lookup(vp, off, SE_SHARED);
1014 				ASSERT(pp != NULL);
1015 				if (lpg_cnt == 0) {
1016 					lpg_cnt++;
1017 					/*
1018 					 * For a small page, we are done --
1019 					 * lpg_count is reset to 0 below.
1020 					 *
1021 					 * For a large page, we are guaranteed
1022 					 * to find the anon structures of all
1023 					 * constituent pages and a non-zero
1024 					 * lpg_cnt ensures that we don't test
1025 					 * for mlock for these. We are done
1026 					 * when lpg_count reaches (npgs + 1).
1027 					 * If we are not the first constituent
1028 					 * page, restart at the first one.
1029 					 */
1030 					npgs = page_get_pagecnt(pp->p_szc);
1031 					if (!IS_P2ALIGNED(an_idx, npgs)) {
1032 						an_idx = P2ALIGN(an_idx, npgs);
1033 						page_unlock(pp);
1034 						continue;
1035 					}
1036 				}
1037 				if (++lpg_cnt > npgs)
1038 					lpg_cnt = 0;
1039 
1040 				/*
1041 				 * availrmem is decremented only
1042 				 * for unlocked pages
1043 				 */
1044 				if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1045 					claim_availrmem++;
1046 				pplist[an_idx] = pp;
1047 			}
1048 			an_idx++;
1049 		}
1050 		ANON_LOCK_EXIT(&amp->a_rwlock);
1051 
1052 		mutex_enter(&freemem_lock);
1053 		if (availrmem < tune.t_minarmem + claim_availrmem) {
1054 			mutex_exit(&freemem_lock);
1055 			ret = FC_MAKE_ERR(ENOMEM);
1056 			claim_availrmem = 0;
1057 			goto insert_fail;
1058 		} else {
1059 			availrmem -= claim_availrmem;
1060 		}
1061 		mutex_exit(&freemem_lock);
1062 
1063 		sptd->spt_ppa = pl;
1064 	} else {
1065 		/*
1066 		 * We already have a valid ppa[].
1067 		 */
1068 		pl = sptd->spt_ppa;
1069 	}
1070 
1071 	ASSERT(pl != NULL);
1072 
1073 	ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size,
1074 	    pl, sptd->spt_prot, SEGP_FORCE_WIRED | SEGP_ASYNC_FLUSH,
1075 	    segspt_reclaim);
1076 	if (ret == SEGP_FAIL) {
1077 		/*
1078 		 * seg_pinsert failed. We return
1079 		 * ENOTSUP, so that the as_pagelock() code will
1080 		 * then try the slower F_SOFTLOCK path.
1081 		 */
1082 		if (pl_built) {
1083 			/*
1084 			 * No one else has referenced the ppa[].
1085 			 * We created it and we need to destroy it.
1086 			 */
1087 			sptd->spt_ppa = NULL;
1088 		}
1089 		ret = ENOTSUP;
1090 		goto insert_fail;
1091 	}
1092 
1093 	/*
1094 	 * In either case, we increment softlockcnt on the 'real' segment.
1095 	 */
1096 	sptd->spt_pcachecnt++;
1097 	atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1);
1098 
1099 	ppa = sptd->spt_ppa;
1100 	for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1101 		if (ppa[an_idx] == NULL) {
1102 			mutex_exit(&sptd->spt_lock);
1103 			seg_pinactive(seg, seg->s_base, sptd->spt_amp->size,
1104 			    pl, sptd->spt_prot, segspt_reclaim);
1105 			*ppp = NULL;
1106 			return (ENOTSUP);
1107 		}
1108 		if ((szc = ppa[an_idx]->p_szc) != 0) {
1109 			npgs = page_get_pagecnt(szc);
1110 			an_idx = P2ROUNDUP(an_idx + 1, npgs);
1111 		} else {
1112 			an_idx++;
1113 		}
1114 	}
1115 	/*
1116 	 * We can now drop the sptd->spt_lock since the ppa[]
1117 	 * exists and he have incremented pacachecnt.
1118 	 */
1119 	mutex_exit(&sptd->spt_lock);
1120 
1121 	/*
1122 	 * Since we cache the entire segment, we want to
1123 	 * set ppp to point to the first slot that corresponds
1124 	 * to the requested addr, i.e. pg_idx.
1125 	 */
1126 	*ppp = &(sptd->spt_ppa[pg_idx]);
1127 	return (ret);
1128 
1129 insert_fail:
1130 	/*
1131 	 * We will only reach this code if we tried and failed.
1132 	 *
1133 	 * And we can drop the lock on the dummy seg, once we've failed
1134 	 * to set up a new ppa[].
1135 	 */
1136 	mutex_exit(&sptd->spt_lock);
1137 
1138 	if (pl_built) {
1139 		mutex_enter(&freemem_lock);
1140 		availrmem += claim_availrmem;
1141 		mutex_exit(&freemem_lock);
1142 
1143 		/*
1144 		 * We created pl and we need to destroy it.
1145 		 */
1146 		pplist = pl;
1147 		for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1148 			if (pplist[an_idx] != NULL)
1149 				page_unlock(pplist[an_idx]);
1150 		}
1151 		kmem_free(pl, sizeof (page_t *) * tot_npages);
1152 	}
1153 
1154 	if (shmd->shm_softlockcnt <= 0) {
1155 		if (AS_ISUNMAPWAIT(seg->s_as)) {
1156 			mutex_enter(&seg->s_as->a_contents);
1157 			if (AS_ISUNMAPWAIT(seg->s_as)) {
1158 				AS_CLRUNMAPWAIT(seg->s_as);
1159 				cv_broadcast(&seg->s_as->a_cv);
1160 			}
1161 			mutex_exit(&seg->s_as->a_contents);
1162 		}
1163 	}
1164 	*ppp = NULL;
1165 	return (ret);
1166 }
1167 
1168 
1169 
1170 /*
1171  * return locked pages over a given range.
1172  *
1173  * We will cache the entire ISM segment and save the pplist for the
1174  * entire segment in the ppa field of the underlying ISM segment structure.
1175  * Later, during a call to segspt_reclaim() we will use this ppa array
1176  * to page_unlock() all of the pages and then we will free this ppa list.
1177  */
1178 /*ARGSUSED*/
1179 static int
1180 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1181     struct page ***ppp, enum lock_type type, enum seg_rw rw)
1182 {
1183 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
1184 	struct seg	*sptseg = shmd->shm_sptseg;
1185 	struct spt_data *sptd = sptseg->s_data;
1186 	pgcnt_t np, page_index, npages;
1187 	caddr_t a, spt_base;
1188 	struct page **pplist, **pl, *pp;
1189 	struct anon_map *amp;
1190 	ulong_t anon_index;
1191 	int ret = ENOTSUP;
1192 	uint_t	pl_built = 0;
1193 	struct anon *ap;
1194 	struct vnode *vp;
1195 	u_offset_t off;
1196 
1197 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1198 
1199 	/*
1200 	 * We want to lock/unlock the entire ISM segment. Therefore,
1201 	 * we will be using the underlying sptseg and it's base address
1202 	 * and length for the caching arguments.
1203 	 */
1204 	ASSERT(sptseg);
1205 	ASSERT(sptd);
1206 
1207 	if (sptd->spt_flags & SHM_PAGEABLE) {
1208 		return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1209 	}
1210 
1211 	page_index = seg_page(seg, addr);
1212 	npages = btopr(len);
1213 
1214 	/*
1215 	 * check if the request is larger than number of pages covered
1216 	 * by amp
1217 	 */
1218 	if (page_index + npages > btopr(sptd->spt_amp->size)) {
1219 		*ppp = NULL;
1220 		return (ENOTSUP);
1221 	}
1222 
1223 	if (type == L_PAGEUNLOCK) {
1224 
1225 		ASSERT(sptd->spt_ppa != NULL);
1226 
1227 		seg_pinactive(seg, seg->s_base, sptd->spt_amp->size,
1228 		    sptd->spt_ppa, sptd->spt_prot, segspt_reclaim);
1229 
1230 		/*
1231 		 * If someone is blocked while unmapping, we purge
1232 		 * segment page cache and thus reclaim pplist synchronously
1233 		 * without waiting for seg_pasync_thread. This speeds up
1234 		 * unmapping in cases where munmap(2) is called, while
1235 		 * raw async i/o is still in progress or where a thread
1236 		 * exits on data fault in a multithreaded application.
1237 		 */
1238 		if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1239 			segspt_purge(seg);
1240 		}
1241 		return (0);
1242 	} else if (type == L_PAGERECLAIM) {
1243 		ASSERT(sptd->spt_ppa != NULL);
1244 
1245 		(void) segspt_reclaim(seg, seg->s_base, sptd->spt_amp->size,
1246 		    sptd->spt_ppa, sptd->spt_prot);
1247 		return (0);
1248 	}
1249 
1250 	/*
1251 	 * First try to find pages in segment page cache, without
1252 	 * holding the segment lock.
1253 	 */
1254 	pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size,
1255 	    sptd->spt_prot);
1256 	if (pplist != NULL) {
1257 		ASSERT(sptd->spt_ppa == pplist);
1258 		ASSERT(sptd->spt_ppa[page_index]);
1259 		/*
1260 		 * Since we cache the entire ISM segment, we want to
1261 		 * set ppp to point to the first slot that corresponds
1262 		 * to the requested addr, i.e. page_index.
1263 		 */
1264 		*ppp = &(sptd->spt_ppa[page_index]);
1265 		return (0);
1266 	}
1267 
1268 	/* The L_PAGELOCK case... */
1269 	mutex_enter(&sptd->spt_lock);
1270 
1271 	/*
1272 	 * try to find pages in segment page cache
1273 	 */
1274 	pplist = seg_plookup(seg, seg->s_base, sptd->spt_amp->size,
1275 	    sptd->spt_prot);
1276 	if (pplist != NULL) {
1277 		ASSERT(sptd->spt_ppa == pplist);
1278 		/*
1279 		 * Since we cache the entire segment, we want to
1280 		 * set ppp to point to the first slot that corresponds
1281 		 * to the requested addr, i.e. page_index.
1282 		 */
1283 		mutex_exit(&sptd->spt_lock);
1284 		*ppp = &(sptd->spt_ppa[page_index]);
1285 		return (0);
1286 	}
1287 
1288 	if (seg_pinsert_check(seg, sptd->spt_amp->size, SEGP_FORCE_WIRED) ==
1289 	    SEGP_FAIL) {
1290 		mutex_exit(&sptd->spt_lock);
1291 		*ppp = NULL;
1292 		return (ENOTSUP);
1293 	}
1294 
1295 	/*
1296 	 * No need to worry about protections because ISM pages
1297 	 * are always rw.
1298 	 */
1299 	pl = pplist = NULL;
1300 
1301 	/*
1302 	 * Do we need to build the ppa array?
1303 	 */
1304 	if (sptd->spt_ppa == NULL) {
1305 		ASSERT(sptd->spt_ppa == pplist);
1306 
1307 		spt_base = sptseg->s_base;
1308 		pl_built = 1;
1309 
1310 		/*
1311 		 * availrmem is decremented once during anon_swap_adjust()
1312 		 * and is incremented during the anon_unresv(), which is
1313 		 * called from shm_rm_amp() when the segment is destroyed.
1314 		 */
1315 		amp = sptd->spt_amp;
1316 		ASSERT(amp != NULL);
1317 
1318 		/* pcachecnt is protected by sptd->spt_lock */
1319 		ASSERT(sptd->spt_pcachecnt == 0);
1320 		pplist = kmem_zalloc(sizeof (page_t *)
1321 		    * btopr(sptd->spt_amp->size), KM_SLEEP);
1322 		pl = pplist;
1323 
1324 		anon_index = seg_page(sptseg, spt_base);
1325 
1326 		ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
1327 		for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1328 		    a += PAGESIZE, anon_index++, pplist++) {
1329 			ap = anon_get_ptr(amp->ahp, anon_index);
1330 			ASSERT(ap != NULL);
1331 			swap_xlate(ap, &vp, &off);
1332 			pp = page_lookup(vp, off, SE_SHARED);
1333 			ASSERT(pp != NULL);
1334 			*pplist = pp;
1335 		}
1336 		ANON_LOCK_EXIT(&amp->a_rwlock);
1337 
1338 		if (a < (spt_base + sptd->spt_amp->size)) {
1339 			ret = ENOTSUP;
1340 			goto insert_fail;
1341 		}
1342 		sptd->spt_ppa = pl;
1343 	} else {
1344 		/*
1345 		 * We already have a valid ppa[].
1346 		 */
1347 		pl = sptd->spt_ppa;
1348 	}
1349 
1350 	ASSERT(pl != NULL);
1351 
1352 	ret = seg_pinsert(seg, seg->s_base, sptd->spt_amp->size,
1353 	    pl, sptd->spt_prot, SEGP_FORCE_WIRED, segspt_reclaim);
1354 	if (ret == SEGP_FAIL) {
1355 		/*
1356 		 * seg_pinsert failed. We return
1357 		 * ENOTSUP, so that the as_pagelock() code will
1358 		 * then try the slower F_SOFTLOCK path.
1359 		 */
1360 		if (pl_built) {
1361 			/*
1362 			 * No one else has referenced the ppa[].
1363 			 * We created it and we need to destroy it.
1364 			 */
1365 			sptd->spt_ppa = NULL;
1366 		}
1367 		ret = ENOTSUP;
1368 		goto insert_fail;
1369 	}
1370 
1371 	/*
1372 	 * In either case, we increment softlockcnt on the 'real' segment.
1373 	 */
1374 	sptd->spt_pcachecnt++;
1375 	atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1);
1376 
1377 	/*
1378 	 * We can now drop the sptd->spt_lock since the ppa[]
1379 	 * exists and he have incremented pacachecnt.
1380 	 */
1381 	mutex_exit(&sptd->spt_lock);
1382 
1383 	/*
1384 	 * Since we cache the entire segment, we want to
1385 	 * set ppp to point to the first slot that corresponds
1386 	 * to the requested addr, i.e. page_index.
1387 	 */
1388 	*ppp = &(sptd->spt_ppa[page_index]);
1389 	return (ret);
1390 
1391 insert_fail:
1392 	/*
1393 	 * We will only reach this code if we tried and failed.
1394 	 *
1395 	 * And we can drop the lock on the dummy seg, once we've failed
1396 	 * to set up a new ppa[].
1397 	 */
1398 	mutex_exit(&sptd->spt_lock);
1399 
1400 	if (pl_built) {
1401 		/*
1402 		 * We created pl and we need to destroy it.
1403 		 */
1404 		pplist = pl;
1405 		np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1406 		while (np) {
1407 			page_unlock(*pplist);
1408 			np--;
1409 			pplist++;
1410 		}
1411 		kmem_free(pl, sizeof (page_t *) *
1412 				btopr(sptd->spt_amp->size));
1413 	}
1414 	if (shmd->shm_softlockcnt <= 0) {
1415 		if (AS_ISUNMAPWAIT(seg->s_as)) {
1416 			mutex_enter(&seg->s_as->a_contents);
1417 			if (AS_ISUNMAPWAIT(seg->s_as)) {
1418 				AS_CLRUNMAPWAIT(seg->s_as);
1419 				cv_broadcast(&seg->s_as->a_cv);
1420 			}
1421 			mutex_exit(&seg->s_as->a_contents);
1422 		}
1423 	}
1424 	*ppp = NULL;
1425 	return (ret);
1426 }
1427 
1428 /*
1429  * purge any cached pages in the I/O page cache
1430  */
1431 static void
1432 segspt_purge(struct seg *seg)
1433 {
1434 	seg_ppurge(seg);
1435 }
1436 
1437 static int
1438 segspt_reclaim(struct seg *seg, caddr_t addr, size_t len, struct page **pplist,
1439 	enum seg_rw rw)
1440 {
1441 	struct	shm_data *shmd = (struct shm_data *)seg->s_data;
1442 	struct	seg	*sptseg;
1443 	struct	spt_data *sptd;
1444 	pgcnt_t npages, i, free_availrmem = 0;
1445 	int	done = 0;
1446 
1447 #ifdef lint
1448 	addr = addr;
1449 #endif
1450 	sptseg = shmd->shm_sptseg;
1451 	sptd = sptseg->s_data;
1452 	npages = (len >> PAGESHIFT);
1453 	ASSERT(npages);
1454 	ASSERT(sptd->spt_pcachecnt != 0);
1455 	ASSERT(sptd->spt_ppa == pplist);
1456 	ASSERT(npages == btopr(sptd->spt_amp->size));
1457 	/*
1458 	 * Acquire the lock on the dummy seg and destroy the
1459 	 * ppa array IF this is the last pcachecnt.
1460 	 */
1461 	mutex_enter(&sptd->spt_lock);
1462 	if (--sptd->spt_pcachecnt == 0) {
1463 		for (i = 0; i < npages; i++) {
1464 			if (pplist[i] == NULL) {
1465 				continue;
1466 			}
1467 			if (rw == S_WRITE) {
1468 				hat_setrefmod(pplist[i]);
1469 			} else {
1470 				hat_setref(pplist[i]);
1471 			}
1472 			if ((sptd->spt_flags & SHM_PAGEABLE) &&
1473 			    (sptd->spt_ppa_lckcnt[i] == 0))
1474 				free_availrmem++;
1475 			page_unlock(pplist[i]);
1476 		}
1477 		if (sptd->spt_flags & SHM_PAGEABLE) {
1478 			mutex_enter(&freemem_lock);
1479 			availrmem += free_availrmem;
1480 			mutex_exit(&freemem_lock);
1481 		}
1482 		/*
1483 		 * Since we want to cach/uncache the entire ISM segment,
1484 		 * we will track the pplist in a segspt specific field
1485 		 * ppa, that is initialized at the time we add an entry to
1486 		 * the cache.
1487 		 */
1488 		ASSERT(sptd->spt_pcachecnt == 0);
1489 		kmem_free(pplist, sizeof (page_t *) * npages);
1490 		sptd->spt_ppa = NULL;
1491 		sptd->spt_flags &= ~DISM_PPA_CHANGED;
1492 		done = 1;
1493 	}
1494 	mutex_exit(&sptd->spt_lock);
1495 	/*
1496 	 * Now decrement softlockcnt.
1497 	 */
1498 	atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1);
1499 
1500 	if (shmd->shm_softlockcnt <= 0) {
1501 		if (AS_ISUNMAPWAIT(seg->s_as)) {
1502 			mutex_enter(&seg->s_as->a_contents);
1503 			if (AS_ISUNMAPWAIT(seg->s_as)) {
1504 				AS_CLRUNMAPWAIT(seg->s_as);
1505 				cv_broadcast(&seg->s_as->a_cv);
1506 			}
1507 			mutex_exit(&seg->s_as->a_contents);
1508 		}
1509 	}
1510 	return (done);
1511 }
1512 
1513 /*
1514  * Do a F_SOFTUNLOCK call over the range requested.
1515  * The range must have already been F_SOFTLOCK'ed.
1516  *
1517  * The calls to acquire and release the anon map lock mutex were
1518  * removed in order to avoid a deadly embrace during a DR
1519  * memory delete operation.  (Eg. DR blocks while waiting for a
1520  * exclusive lock on a page that is being used for kaio; the
1521  * thread that will complete the kaio and call segspt_softunlock
1522  * blocks on the anon map lock; another thread holding the anon
1523  * map lock blocks on another page lock via the segspt_shmfault
1524  * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1525  *
1526  * The appropriateness of the removal is based upon the following:
1527  * 1. If we are holding a segment's reader lock and the page is held
1528  * shared, then the corresponding element in anonmap which points to
1529  * anon struct cannot change and there is no need to acquire the
1530  * anonymous map lock.
1531  * 2. Threads in segspt_softunlock have a reader lock on the segment
1532  * and already have the shared page lock, so we are guaranteed that
1533  * the anon map slot cannot change and therefore can call anon_get_ptr()
1534  * without grabbing the anonymous map lock.
1535  * 3. Threads that softlock a shared page break copy-on-write, even if
1536  * its a read.  Thus cow faults can be ignored with respect to soft
1537  * unlocking, since the breaking of cow means that the anon slot(s) will
1538  * not be shared.
1539  */
1540 static void
1541 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1542 	size_t len, enum seg_rw rw)
1543 {
1544 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
1545 	struct seg	*sptseg;
1546 	struct spt_data *sptd;
1547 	page_t *pp;
1548 	caddr_t adr;
1549 	struct vnode *vp;
1550 	u_offset_t offset;
1551 	ulong_t anon_index;
1552 	struct anon_map *amp;		/* XXX - for locknest */
1553 	struct anon *ap = NULL;
1554 	pgcnt_t npages;
1555 
1556 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1557 
1558 	sptseg = shmd->shm_sptseg;
1559 	sptd = sptseg->s_data;
1560 
1561 	/*
1562 	 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1563 	 * and therefore their pages are SE_SHARED locked
1564 	 * for the entire life of the segment.
1565 	 */
1566 	if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1567 		((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1568 		goto softlock_decrement;
1569 	}
1570 
1571 	/*
1572 	 * Any thread is free to do a page_find and
1573 	 * page_unlock() on the pages within this seg.
1574 	 *
1575 	 * We are already holding the as->a_lock on the user's
1576 	 * real segment, but we need to hold the a_lock on the
1577 	 * underlying dummy as. This is mostly to satisfy the
1578 	 * underlying HAT layer.
1579 	 */
1580 	AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1581 	hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1582 	AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1583 
1584 	amp = sptd->spt_amp;
1585 	ASSERT(amp != NULL);
1586 	anon_index = seg_page(sptseg, sptseg_addr);
1587 
1588 	for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1589 		ap = anon_get_ptr(amp->ahp, anon_index++);
1590 		ASSERT(ap != NULL);
1591 		swap_xlate(ap, &vp, &offset);
1592 
1593 		/*
1594 		 * Use page_find() instead of page_lookup() to
1595 		 * find the page since we know that it has a
1596 		 * "shared" lock.
1597 		 */
1598 		pp = page_find(vp, offset);
1599 		ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1600 		if (pp == NULL) {
1601 			panic("segspt_softunlock: "
1602 			    "addr %p, ap %p, vp %p, off %llx",
1603 			    (void *)adr, (void *)ap, (void *)vp, offset);
1604 			/*NOTREACHED*/
1605 		}
1606 
1607 		if (rw == S_WRITE) {
1608 			hat_setrefmod(pp);
1609 		} else if (rw != S_OTHER) {
1610 			hat_setref(pp);
1611 		}
1612 		page_unlock(pp);
1613 	}
1614 
1615 softlock_decrement:
1616 	npages = btopr(len);
1617 	atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1618 	if (shmd->shm_softlockcnt == 0) {
1619 		/*
1620 		 * All SOFTLOCKS are gone. Wakeup any waiting
1621 		 * unmappers so they can try again to unmap.
1622 		 * Check for waiters first without the mutex
1623 		 * held so we don't always grab the mutex on
1624 		 * softunlocks.
1625 		 */
1626 		if (AS_ISUNMAPWAIT(seg->s_as)) {
1627 			mutex_enter(&seg->s_as->a_contents);
1628 			if (AS_ISUNMAPWAIT(seg->s_as)) {
1629 				AS_CLRUNMAPWAIT(seg->s_as);
1630 				cv_broadcast(&seg->s_as->a_cv);
1631 			}
1632 			mutex_exit(&seg->s_as->a_contents);
1633 		}
1634 	}
1635 }
1636 
1637 int
1638 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1639 {
1640 	struct shm_data *shmd_arg = (struct shm_data *)argsp;
1641 	struct shm_data *shmd;
1642 	struct anon_map *shm_amp = shmd_arg->shm_amp;
1643 	struct spt_data *sptd;
1644 	int error = 0;
1645 
1646 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1647 
1648 	shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1649 	if (shmd == NULL)
1650 		return (ENOMEM);
1651 
1652 	shmd->shm_sptas = shmd_arg->shm_sptas;
1653 	shmd->shm_amp = shm_amp;
1654 	shmd->shm_sptseg = shmd_arg->shm_sptseg;
1655 
1656 	(void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1657 	    NULL, 0, seg->s_size);
1658 
1659 	seg->s_data = (void *)shmd;
1660 	seg->s_ops = &segspt_shmops;
1661 	seg->s_szc = shmd->shm_sptseg->s_szc;
1662 	sptd = shmd->shm_sptseg->s_data;
1663 
1664 	if (sptd->spt_flags & SHM_PAGEABLE) {
1665 		if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1666 		    KM_NOSLEEP)) == NULL) {
1667 			seg->s_data = (void *)NULL;
1668 			kmem_free(shmd, (sizeof (*shmd)));
1669 			return (ENOMEM);
1670 		}
1671 		shmd->shm_lckpgs = 0;
1672 		if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1673 			if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1674 			    shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1675 			    seg->s_size, seg->s_szc)) != 0) {
1676 				kmem_free(shmd->shm_vpage,
1677 					btopr(shm_amp->size));
1678 			}
1679 		}
1680 	} else {
1681 		error = hat_share(seg->s_as->a_hat, seg->s_base,
1682 				shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1683 				seg->s_size, seg->s_szc);
1684 	}
1685 	if (error) {
1686 		seg->s_szc = 0;
1687 		seg->s_data = (void *)NULL;
1688 		kmem_free(shmd, (sizeof (*shmd)));
1689 	} else {
1690 		ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1691 		shm_amp->refcnt++;
1692 		ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1693 	}
1694 	return (error);
1695 }
1696 
1697 int
1698 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1699 {
1700 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
1701 	int reclaim = 1;
1702 
1703 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1704 retry:
1705 	if (shmd->shm_softlockcnt > 0) {
1706 		if (reclaim == 1) {
1707 			segspt_purge(seg);
1708 			reclaim = 0;
1709 			goto retry;
1710 		}
1711 		return (EAGAIN);
1712 	}
1713 
1714 	if (ssize != seg->s_size) {
1715 #ifdef DEBUG
1716 		cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1717 		    ssize, seg->s_size);
1718 #endif
1719 		return (EINVAL);
1720 	}
1721 
1722 	(void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1723 	    NULL, 0);
1724 	hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1725 
1726 	seg_free(seg);
1727 
1728 	return (0);
1729 }
1730 
1731 void
1732 segspt_shmfree(struct seg *seg)
1733 {
1734 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
1735 	struct anon_map *shm_amp = shmd->shm_amp;
1736 
1737 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1738 
1739 	(void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1740 		MC_UNLOCK, NULL, 0);
1741 
1742 	/*
1743 	 * Need to increment refcnt when attaching
1744 	 * and decrement when detaching because of dup().
1745 	 */
1746 	ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1747 	shm_amp->refcnt--;
1748 	ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1749 
1750 	if (shmd->shm_vpage) {	/* only for DISM */
1751 		kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1752 		shmd->shm_vpage = NULL;
1753 	}
1754 	kmem_free(shmd, sizeof (*shmd));
1755 }
1756 
1757 /*ARGSUSED*/
1758 int
1759 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1760 {
1761 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1762 
1763 	/*
1764 	 * Shared page table is more than shared mapping.
1765 	 *  Individual process sharing page tables can't change prot
1766 	 *  because there is only one set of page tables.
1767 	 *  This will be allowed after private page table is
1768 	 *  supported.
1769 	 */
1770 /* need to return correct status error? */
1771 	return (0);
1772 }
1773 
1774 
1775 faultcode_t
1776 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1777     size_t len, enum fault_type type, enum seg_rw rw)
1778 {
1779 	struct  shm_data 	*shmd = (struct shm_data *)seg->s_data;
1780 	struct  seg		*sptseg = shmd->shm_sptseg;
1781 	struct  as		*curspt = shmd->shm_sptas;
1782 	struct  spt_data 	*sptd = sptseg->s_data;
1783 	pgcnt_t npages;
1784 	size_t  size;
1785 	caddr_t segspt_addr, shm_addr;
1786 	page_t  **ppa;
1787 	int	i;
1788 	ulong_t an_idx = 0;
1789 	int	err = 0;
1790 	int	dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1791 	size_t	pgsz;
1792 	pgcnt_t	pgcnt;
1793 	caddr_t	a;
1794 	pgcnt_t	pidx;
1795 
1796 #ifdef lint
1797 	hat = hat;
1798 #endif
1799 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1800 
1801 	/*
1802 	 * Because of the way spt is implemented
1803 	 * the realsize of the segment does not have to be
1804 	 * equal to the segment size itself. The segment size is
1805 	 * often in multiples of a page size larger than PAGESIZE.
1806 	 * The realsize is rounded up to the nearest PAGESIZE
1807 	 * based on what the user requested. This is a bit of
1808 	 * ungliness that is historical but not easily fixed
1809 	 * without re-designing the higher levels of ISM.
1810 	 */
1811 	ASSERT(addr >= seg->s_base);
1812 	if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1813 		return (FC_NOMAP);
1814 	/*
1815 	 * For all of the following cases except F_PROT, we need to
1816 	 * make any necessary adjustments to addr and len
1817 	 * and get all of the necessary page_t's into an array called ppa[].
1818 	 *
1819 	 * The code in shmat() forces base addr and len of ISM segment
1820 	 * to be aligned to largest page size supported. Therefore,
1821 	 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1822 	 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1823 	 * in large pagesize chunks, or else we will screw up the HAT
1824 	 * layer by calling hat_memload_array() with differing page sizes
1825 	 * over a given virtual range.
1826 	 */
1827 	pgsz = page_get_pagesize(sptseg->s_szc);
1828 	pgcnt = page_get_pagecnt(sptseg->s_szc);
1829 	shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1830 	size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1831 	npages = btopr(size);
1832 
1833 	/*
1834 	 * Now we need to convert from addr in segshm to addr in segspt.
1835 	 */
1836 	an_idx = seg_page(seg, shm_addr);
1837 	segspt_addr = sptseg->s_base + ptob(an_idx);
1838 
1839 	ASSERT((segspt_addr + ptob(npages)) <=
1840 		(sptseg->s_base + sptd->spt_realsize));
1841 	ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1842 
1843 	switch (type) {
1844 
1845 	case F_SOFTLOCK:
1846 
1847 		mutex_enter(&freemem_lock);
1848 		if (availrmem < tune.t_minarmem + npages) {
1849 			mutex_exit(&freemem_lock);
1850 			return (FC_MAKE_ERR(ENOMEM));
1851 		} else {
1852 			availrmem -= npages;
1853 		}
1854 		mutex_exit(&freemem_lock);
1855 		atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1856 		/*
1857 		 * Fall through to the F_INVAL case to load up the hat layer
1858 		 * entries with the HAT_LOAD_LOCK flag.
1859 		 */
1860 		/* FALLTHRU */
1861 	case F_INVAL:
1862 
1863 		if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1864 			return (FC_NOMAP);
1865 
1866 		ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1867 
1868 		err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1869 		if (err != 0) {
1870 			if (type == F_SOFTLOCK) {
1871 				mutex_enter(&freemem_lock);
1872 				availrmem += npages;
1873 				mutex_exit(&freemem_lock);
1874 				atomic_add_long((ulong_t *)(
1875 				    &(shmd->shm_softlockcnt)), -npages);
1876 			}
1877 			goto dism_err;
1878 		}
1879 		AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1880 		a = segspt_addr;
1881 		pidx = 0;
1882 		if (type == F_SOFTLOCK) {
1883 
1884 			/*
1885 			 * Load up the translation keeping it
1886 			 * locked and don't unlock the page.
1887 			 */
1888 			for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1889 				hat_memload_array(sptseg->s_as->a_hat,
1890 				    a, pgsz, &ppa[pidx], sptd->spt_prot,
1891 				    HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1892 			}
1893 		} else {
1894 			if (hat == seg->s_as->a_hat) {
1895 
1896 				/*
1897 				 * Migrate pages marked for migration
1898 				 */
1899 				if (lgrp_optimizations())
1900 					page_migrate(seg, shm_addr, ppa,
1901 					    npages);
1902 
1903 				/* CPU HAT */
1904 				for (; pidx < npages;
1905 				    a += pgsz, pidx += pgcnt) {
1906 					hat_memload_array(sptseg->s_as->a_hat,
1907 					    a, pgsz, &ppa[pidx],
1908 					    sptd->spt_prot,
1909 					    HAT_LOAD_SHARE);
1910 				}
1911 			} else {
1912 				/* XHAT. Pass real address */
1913 				hat_memload_array(hat, shm_addr,
1914 				    size, ppa, sptd->spt_prot, HAT_LOAD_SHARE);
1915 			}
1916 
1917 			/*
1918 			 * And now drop the SE_SHARED lock(s).
1919 			 */
1920 			if (dyn_ism_unmap) {
1921 				for (i = 0; i < npages; i++) {
1922 					page_unlock(ppa[i]);
1923 				}
1924 			}
1925 		}
1926 
1927 		if (!dyn_ism_unmap) {
1928 			if (hat_share(seg->s_as->a_hat, shm_addr,
1929 			    curspt->a_hat, segspt_addr, ptob(npages),
1930 			    seg->s_szc) != 0) {
1931 				panic("hat_share err in DISM fault");
1932 				/* NOTREACHED */
1933 			}
1934 			if (type == F_INVAL) {
1935 				for (i = 0; i < npages; i++) {
1936 					page_unlock(ppa[i]);
1937 				}
1938 			}
1939 		}
1940 		AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1941 dism_err:
1942 		kmem_free(ppa, npages * sizeof (page_t *));
1943 		return (err);
1944 
1945 	case F_SOFTUNLOCK:
1946 
1947 		mutex_enter(&freemem_lock);
1948 		availrmem += npages;
1949 		mutex_exit(&freemem_lock);
1950 
1951 		/*
1952 		 * This is a bit ugly, we pass in the real seg pointer,
1953 		 * but the segspt_addr is the virtual address within the
1954 		 * dummy seg.
1955 		 */
1956 		segspt_softunlock(seg, segspt_addr, size, rw);
1957 		return (0);
1958 
1959 	case F_PROT:
1960 
1961 		/*
1962 		 * This takes care of the unusual case where a user
1963 		 * allocates a stack in shared memory and a register
1964 		 * window overflow is written to that stack page before
1965 		 * it is otherwise modified.
1966 		 *
1967 		 * We can get away with this because ISM segments are
1968 		 * always rw. Other than this unusual case, there
1969 		 * should be no instances of protection violations.
1970 		 */
1971 		return (0);
1972 
1973 	default:
1974 #ifdef DEBUG
1975 		panic("segspt_dismfault default type?");
1976 #else
1977 		return (FC_NOMAP);
1978 #endif
1979 	}
1980 }
1981 
1982 
1983 faultcode_t
1984 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
1985     size_t len, enum fault_type type, enum seg_rw rw)
1986 {
1987 	struct shm_data 	*shmd = (struct shm_data *)seg->s_data;
1988 	struct seg		*sptseg = shmd->shm_sptseg;
1989 	struct as		*curspt = shmd->shm_sptas;
1990 	struct spt_data 	*sptd   = sptseg->s_data;
1991 	pgcnt_t npages;
1992 	size_t size;
1993 	caddr_t sptseg_addr, shm_addr;
1994 	page_t *pp, **ppa;
1995 	int	i;
1996 	u_offset_t offset;
1997 	ulong_t anon_index = 0;
1998 	struct vnode *vp;
1999 	struct anon_map *amp;		/* XXX - for locknest */
2000 	struct anon *ap = NULL;
2001 	anon_sync_obj_t cookie;
2002 	size_t		pgsz;
2003 	pgcnt_t		pgcnt;
2004 	caddr_t		a;
2005 	pgcnt_t		pidx;
2006 	size_t		sz;
2007 
2008 #ifdef lint
2009 	hat = hat;
2010 #endif
2011 
2012 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2013 
2014 	if (sptd->spt_flags & SHM_PAGEABLE) {
2015 		return (segspt_dismfault(hat, seg, addr, len, type, rw));
2016 	}
2017 
2018 	/*
2019 	 * Because of the way spt is implemented
2020 	 * the realsize of the segment does not have to be
2021 	 * equal to the segment size itself. The segment size is
2022 	 * often in multiples of a page size larger than PAGESIZE.
2023 	 * The realsize is rounded up to the nearest PAGESIZE
2024 	 * based on what the user requested. This is a bit of
2025 	 * ungliness that is historical but not easily fixed
2026 	 * without re-designing the higher levels of ISM.
2027 	 */
2028 	ASSERT(addr >= seg->s_base);
2029 	if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2030 		return (FC_NOMAP);
2031 	/*
2032 	 * For all of the following cases except F_PROT, we need to
2033 	 * make any necessary adjustments to addr and len
2034 	 * and get all of the necessary page_t's into an array called ppa[].
2035 	 *
2036 	 * The code in shmat() forces base addr and len of ISM segment
2037 	 * to be aligned to largest page size supported. Therefore,
2038 	 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2039 	 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2040 	 * in large pagesize chunks, or else we will screw up the HAT
2041 	 * layer by calling hat_memload_array() with differing page sizes
2042 	 * over a given virtual range.
2043 	 */
2044 	pgsz = page_get_pagesize(sptseg->s_szc);
2045 	pgcnt = page_get_pagecnt(sptseg->s_szc);
2046 	shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2047 	size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2048 	npages = btopr(size);
2049 
2050 	/*
2051 	 * Now we need to convert from addr in segshm to addr in segspt.
2052 	 */
2053 	anon_index = seg_page(seg, shm_addr);
2054 	sptseg_addr = sptseg->s_base + ptob(anon_index);
2055 
2056 	/*
2057 	 * And now we may have to adjust npages downward if we have
2058 	 * exceeded the realsize of the segment or initial anon
2059 	 * allocations.
2060 	 */
2061 	if ((sptseg_addr + ptob(npages)) >
2062 	    (sptseg->s_base + sptd->spt_realsize))
2063 		size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2064 
2065 	npages = btopr(size);
2066 
2067 	ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2068 	ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2069 
2070 	switch (type) {
2071 
2072 	case F_SOFTLOCK:
2073 
2074 		/*
2075 		 * availrmem is decremented once during anon_swap_adjust()
2076 		 * and is incremented during the anon_unresv(), which is
2077 		 * called from shm_rm_amp() when the segment is destroyed.
2078 		 */
2079 		atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2080 		/*
2081 		 * Some platforms assume that ISM pages are SE_SHARED
2082 		 * locked for the entire life of the segment.
2083 		 */
2084 		if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2085 			return (0);
2086 		/*
2087 		 * Fall through to the F_INVAL case to load up the hat layer
2088 		 * entries with the HAT_LOAD_LOCK flag.
2089 		 */
2090 
2091 		/* FALLTHRU */
2092 	case F_INVAL:
2093 
2094 		if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2095 			return (FC_NOMAP);
2096 
2097 		/*
2098 		 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2099 		 * may still rely on this call to hat_share(). That
2100 		 * would imply that those hat's can fault on a
2101 		 * HAT_LOAD_LOCK translation, which would seem
2102 		 * contradictory.
2103 		 */
2104 		if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2105 			if (hat_share(seg->s_as->a_hat, seg->s_base,
2106 			    curspt->a_hat, sptseg->s_base,
2107 			    sptseg->s_size, sptseg->s_szc) != 0) {
2108 				panic("hat_share error in ISM fault");
2109 				/*NOTREACHED*/
2110 			}
2111 			return (0);
2112 		}
2113 		ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2114 
2115 		/*
2116 		 * I see no need to lock the real seg,
2117 		 * here, because all of our work will be on the underlying
2118 		 * dummy seg.
2119 		 *
2120 		 * sptseg_addr and npages now account for large pages.
2121 		 */
2122 		amp = sptd->spt_amp;
2123 		ASSERT(amp != NULL);
2124 		anon_index = seg_page(sptseg, sptseg_addr);
2125 
2126 		ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2127 		for (i = 0; i < npages; i++) {
2128 			anon_array_enter(amp, anon_index, &cookie);
2129 			ap = anon_get_ptr(amp->ahp, anon_index++);
2130 			ASSERT(ap != NULL);
2131 			swap_xlate(ap, &vp, &offset);
2132 			anon_array_exit(&cookie);
2133 			pp = page_lookup(vp, offset, SE_SHARED);
2134 			ASSERT(pp != NULL);
2135 			ppa[i] = pp;
2136 		}
2137 		ANON_LOCK_EXIT(&amp->a_rwlock);
2138 		ASSERT(i == npages);
2139 
2140 		/*
2141 		 * We are already holding the as->a_lock on the user's
2142 		 * real segment, but we need to hold the a_lock on the
2143 		 * underlying dummy as. This is mostly to satisfy the
2144 		 * underlying HAT layer.
2145 		 */
2146 		AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2147 		a = sptseg_addr;
2148 		pidx = 0;
2149 		if (type == F_SOFTLOCK) {
2150 			/*
2151 			 * Load up the translation keeping it
2152 			 * locked and don't unlock the page.
2153 			 */
2154 			for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2155 				sz = MIN(pgsz, ptob(npages - pidx));
2156 				hat_memload_array(sptseg->s_as->a_hat, a,
2157 				    sz, &ppa[pidx], sptd->spt_prot,
2158 				    HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2159 			}
2160 		} else {
2161 			if (hat == seg->s_as->a_hat) {
2162 
2163 				/*
2164 				 * Migrate pages marked for migration.
2165 				 */
2166 				if (lgrp_optimizations())
2167 					page_migrate(seg, shm_addr, ppa,
2168 					    npages);
2169 
2170 				/* CPU HAT */
2171 				for (; pidx < npages;
2172 				    a += pgsz, pidx += pgcnt) {
2173 					sz = MIN(pgsz, ptob(npages - pidx));
2174 					hat_memload_array(sptseg->s_as->a_hat,
2175 					    a, sz, &ppa[pidx],
2176 					    sptd->spt_prot, HAT_LOAD_SHARE);
2177 				}
2178 			} else {
2179 				/* XHAT. Pass real address */
2180 				hat_memload_array(hat, shm_addr,
2181 				    ptob(npages), ppa, sptd->spt_prot,
2182 				    HAT_LOAD_SHARE);
2183 			}
2184 
2185 			/*
2186 			 * And now drop the SE_SHARED lock(s).
2187 			 */
2188 			for (i = 0; i < npages; i++)
2189 				page_unlock(ppa[i]);
2190 		}
2191 		AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2192 
2193 		kmem_free(ppa, sizeof (page_t *) * npages);
2194 		return (0);
2195 	case F_SOFTUNLOCK:
2196 
2197 		/*
2198 		 * This is a bit ugly, we pass in the real seg pointer,
2199 		 * but the sptseg_addr is the virtual address within the
2200 		 * dummy seg.
2201 		 */
2202 		segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2203 		return (0);
2204 
2205 	case F_PROT:
2206 
2207 		/*
2208 		 * This takes care of the unusual case where a user
2209 		 * allocates a stack in shared memory and a register
2210 		 * window overflow is written to that stack page before
2211 		 * it is otherwise modified.
2212 		 *
2213 		 * We can get away with this because ISM segments are
2214 		 * always rw. Other than this unusual case, there
2215 		 * should be no instances of protection violations.
2216 		 */
2217 		return (0);
2218 
2219 	default:
2220 #ifdef DEBUG
2221 		cmn_err(CE_WARN, "segspt_shmfault default type?");
2222 #endif
2223 		return (FC_NOMAP);
2224 	}
2225 }
2226 
2227 /*ARGSUSED*/
2228 static faultcode_t
2229 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2230 {
2231 	return (0);
2232 }
2233 
2234 /*ARGSUSED*/
2235 static int
2236 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2237 {
2238 	return (0);
2239 }
2240 
2241 /*ARGSUSED*/
2242 static size_t
2243 segspt_shmswapout(struct seg *seg)
2244 {
2245 	return (0);
2246 }
2247 
2248 /*
2249  * duplicate the shared page tables
2250  */
2251 int
2252 segspt_shmdup(struct seg *seg, struct seg *newseg)
2253 {
2254 	struct shm_data		*shmd = (struct shm_data *)seg->s_data;
2255 	struct anon_map 	*amp = shmd->shm_amp;
2256 	struct shm_data 	*shmd_new;
2257 	struct seg		*spt_seg = shmd->shm_sptseg;
2258 	struct spt_data		*sptd = spt_seg->s_data;
2259 	int			error = 0;
2260 
2261 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2262 
2263 	shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2264 	newseg->s_data = (void *)shmd_new;
2265 	shmd_new->shm_sptas = shmd->shm_sptas;
2266 	shmd_new->shm_amp = amp;
2267 	shmd_new->shm_sptseg = shmd->shm_sptseg;
2268 	newseg->s_ops = &segspt_shmops;
2269 	newseg->s_szc = seg->s_szc;
2270 	ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2271 
2272 	ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
2273 	amp->refcnt++;
2274 	ANON_LOCK_EXIT(&amp->a_rwlock);
2275 
2276 	if (sptd->spt_flags & SHM_PAGEABLE) {
2277 		shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2278 		shmd_new->shm_lckpgs = 0;
2279 		if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2280 			if ((error = hat_share(newseg->s_as->a_hat,
2281 			    newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2282 			    seg->s_size, seg->s_szc)) != 0) {
2283 				kmem_free(shmd_new->shm_vpage,
2284 				btopr(amp->size));
2285 			}
2286 		}
2287 		return (error);
2288 	} else {
2289 		return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2290 		    shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2291 		    seg->s_szc));
2292 
2293 	}
2294 }
2295 
2296 /*ARGSUSED*/
2297 int
2298 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2299 {
2300 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
2301 	struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2302 
2303 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2304 
2305 	/*
2306 	 * ISM segment is always rw.
2307 	 */
2308 	return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2309 }
2310 
2311 /*
2312  * Return an array of locked large pages, for empty slots allocate
2313  * private zero-filled anon pages.
2314  */
2315 static int
2316 spt_anon_getpages(
2317 	struct seg *sptseg,
2318 	caddr_t sptaddr,
2319 	size_t len,
2320 	page_t *ppa[])
2321 {
2322 	struct  spt_data *sptd = sptseg->s_data;
2323 	struct  anon_map *amp = sptd->spt_amp;
2324 	enum 	seg_rw rw = sptd->spt_prot;
2325 	uint_t	szc = sptseg->s_szc;
2326 	size_t	pg_sz, share_sz = page_get_pagesize(szc);
2327 	pgcnt_t	lp_npgs;
2328 	caddr_t	lp_addr, e_sptaddr;
2329 	uint_t	vpprot, ppa_szc = 0;
2330 	struct  vpage *vpage = NULL;
2331 	ulong_t	j, ppa_idx;
2332 	int	err, ierr = 0;
2333 	pgcnt_t	an_idx;
2334 	anon_sync_obj_t cookie;
2335 
2336 	ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2337 	ASSERT(len != 0);
2338 
2339 	pg_sz = share_sz;
2340 	lp_npgs = btop(pg_sz);
2341 	lp_addr = sptaddr;
2342 	e_sptaddr = sptaddr + len;
2343 	an_idx = seg_page(sptseg, sptaddr);
2344 	ppa_idx = 0;
2345 
2346 	ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2347 	/*CONSTCOND*/
2348 	while (1) {
2349 		for (; lp_addr < e_sptaddr;
2350 			an_idx += lp_npgs, lp_addr += pg_sz,
2351 			ppa_idx += lp_npgs) {
2352 
2353 			anon_array_enter(amp, an_idx, &cookie);
2354 			ppa_szc = (uint_t)-1;
2355 			ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2356 			    lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2357 			    &ppa_szc, vpage, rw, 0, segvn_anypgsz, kcred);
2358 			anon_array_exit(&cookie);
2359 
2360 			if (ierr != 0) {
2361 				if (ierr > 0) {
2362 					err = FC_MAKE_ERR(ierr);
2363 					goto lpgs_err;
2364 				}
2365 				break;
2366 			}
2367 		}
2368 		if (lp_addr == e_sptaddr) {
2369 			break;
2370 		}
2371 		ASSERT(lp_addr < e_sptaddr);
2372 
2373 		/*
2374 		 * ierr == -1 means we failed to allocate a large page.
2375 		 * so do a size down operation.
2376 		 *
2377 		 * ierr == -2 means some other process that privately shares
2378 		 * pages with this process has allocated a larger page and we
2379 		 * need to retry with larger pages. So do a size up
2380 		 * operation. This relies on the fact that large pages are
2381 		 * never partially shared i.e. if we share any constituent
2382 		 * page of a large page with another process we must share the
2383 		 * entire large page. Note this cannot happen for SOFTLOCK
2384 		 * case, unless current address (lpaddr) is at the beginning
2385 		 * of the next page size boundary because the other process
2386 		 * couldn't have relocated locked pages.
2387 		 */
2388 		ASSERT(ierr == -1 || ierr == -2);
2389 		if (segvn_anypgsz) {
2390 			ASSERT(ierr == -2 || szc != 0);
2391 			ASSERT(ierr == -1 || szc < sptseg->s_szc);
2392 			szc = (ierr == -1) ? szc - 1 : szc + 1;
2393 		} else {
2394 			/*
2395 			 * For faults and segvn_anypgsz == 0
2396 			 * we need to be careful not to loop forever
2397 			 * if existing page is found with szc other
2398 			 * than 0 or seg->s_szc. This could be due
2399 			 * to page relocations on behalf of DR or
2400 			 * more likely large page creation. For this
2401 			 * case simply re-size to existing page's szc
2402 			 * if returned by anon_map_getpages().
2403 			 */
2404 			if (ppa_szc == (uint_t)-1) {
2405 				szc = (ierr == -1) ? 0 : sptseg->s_szc;
2406 			} else {
2407 				ASSERT(ppa_szc <= sptseg->s_szc);
2408 				ASSERT(ierr == -2 || ppa_szc < szc);
2409 				ASSERT(ierr == -1 || ppa_szc > szc);
2410 				szc = ppa_szc;
2411 			}
2412 		}
2413 		pg_sz = page_get_pagesize(szc);
2414 		lp_npgs = btop(pg_sz);
2415 		ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2416 	}
2417 	ANON_LOCK_EXIT(&amp->a_rwlock);
2418 	return (0);
2419 
2420 lpgs_err:
2421 	ANON_LOCK_EXIT(&amp->a_rwlock);
2422 	for (j = 0; j < ppa_idx; j++)
2423 		page_unlock(ppa[j]);
2424 	return (err);
2425 }
2426 
2427 /*
2428  * count the number of bytes in a set of spt pages that are currently not
2429  * locked
2430  */
2431 static rctl_qty_t
2432 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2433 {
2434 	ulong_t	i;
2435 	rctl_qty_t unlocked = 0;
2436 
2437 	for (i = 0; i < npages; i++) {
2438 		if (ppa[i]->p_lckcnt == 0)
2439 			unlocked += PAGESIZE;
2440 	}
2441 	return (unlocked);
2442 }
2443 
2444 int
2445 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2446     page_t **ppa, ulong_t *lockmap, size_t pos,
2447     rctl_qty_t *locked)
2448 {
2449 	struct shm_data *shmd = seg->s_data;
2450 	struct spt_data *sptd = shmd->shm_sptseg->s_data;
2451 	ulong_t	i;
2452 	int	kernel;
2453 
2454 	/* return the number of bytes actually locked */
2455 	*locked = 0;
2456 	for (i = 0; i < npages; anon_index++, pos++, i++) {
2457 		if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2458 			if (sptd->spt_ppa_lckcnt[anon_index] <
2459 			    (ushort_t)DISM_LOCK_MAX) {
2460 				if (++sptd->spt_ppa_lckcnt[anon_index] ==
2461 				    (ushort_t)DISM_LOCK_MAX) {
2462 					cmn_err(CE_WARN,
2463 					    "DISM page lock limit "
2464 					    "reached on DISM offset 0x%lx\n",
2465 					    anon_index << PAGESHIFT);
2466 				}
2467 				kernel = (sptd->spt_ppa &&
2468 				    sptd->spt_ppa[anon_index]) ? 1 : 0;
2469 				if (!page_pp_lock(ppa[i], 0, kernel)) {
2470 					sptd->spt_ppa_lckcnt[anon_index]--;
2471 					return (EAGAIN);
2472 				}
2473 				/* if this is a newly locked page, count it */
2474 				if (ppa[i]->p_lckcnt == 1) {
2475 					*locked += PAGESIZE;
2476 				}
2477 				shmd->shm_lckpgs++;
2478 				shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2479 				if (lockmap != NULL)
2480 					BT_SET(lockmap, pos);
2481 			}
2482 		}
2483 	}
2484 	return (0);
2485 }
2486 
2487 /*ARGSUSED*/
2488 static int
2489 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2490     int attr, int op, ulong_t *lockmap, size_t pos)
2491 {
2492 	struct shm_data *shmd = seg->s_data;
2493 	struct seg	*sptseg = shmd->shm_sptseg;
2494 	struct spt_data *sptd = sptseg->s_data;
2495 	struct kshmid	*sp = sptd->spt_amp->a_sp;
2496 	pgcnt_t		npages, a_npages;
2497 	page_t		**ppa;
2498 	pgcnt_t 	an_idx, a_an_idx, ppa_idx;
2499 	caddr_t		spt_addr, a_addr;	/* spt and aligned address */
2500 	size_t		a_len;			/* aligned len */
2501 	size_t		share_sz;
2502 	ulong_t		i;
2503 	int		sts = 0;
2504 	rctl_qty_t	unlocked = 0;
2505 	rctl_qty_t	locked = 0;
2506 	struct proc	*p = curproc;
2507 	kproject_t	*proj;
2508 
2509 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2510 	ASSERT(sp != NULL);
2511 
2512 	if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2513 		return (0);
2514 	}
2515 
2516 	addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2517 	an_idx = seg_page(seg, addr);
2518 	npages = btopr(len);
2519 
2520 	if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2521 		return (ENOMEM);
2522 	}
2523 
2524 	/*
2525 	 * A shm's project never changes, so no lock needed.
2526 	 * The shm has a hold on the project, so it will not go away.
2527 	 * Since we have a mapping to shm within this zone, we know
2528 	 * that the zone will not go away.
2529 	 */
2530 	proj = sp->shm_perm.ipc_proj;
2531 
2532 	if (op == MC_LOCK) {
2533 
2534 		/*
2535 		 * Need to align addr and size request if they are not
2536 		 * aligned so we can always allocate large page(s) however
2537 		 * we only lock what was requested in initial request.
2538 		 */
2539 		share_sz = page_get_pagesize(sptseg->s_szc);
2540 		a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2541 		a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2542 				share_sz);
2543 		a_npages = btop(a_len);
2544 		a_an_idx = seg_page(seg, a_addr);
2545 		spt_addr = sptseg->s_base + ptob(a_an_idx);
2546 		ppa_idx = an_idx - a_an_idx;
2547 
2548 		if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2549 			KM_NOSLEEP)) == NULL) {
2550 			return (ENOMEM);
2551 		}
2552 
2553 		/*
2554 		 * Don't cache any new pages for IO and
2555 		 * flush any cached pages.
2556 		 */
2557 		mutex_enter(&sptd->spt_lock);
2558 		if (sptd->spt_ppa != NULL)
2559 			sptd->spt_flags |= DISM_PPA_CHANGED;
2560 
2561 		sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2562 		if (sts != 0) {
2563 			mutex_exit(&sptd->spt_lock);
2564 			kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2565 			return (sts);
2566 		}
2567 
2568 		mutex_enter(&sp->shm_mlock);
2569 		/* enforce locked memory rctl */
2570 		unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2571 
2572 		mutex_enter(&p->p_lock);
2573 		if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2574 			mutex_exit(&p->p_lock);
2575 			sts = EAGAIN;
2576 		} else {
2577 			mutex_exit(&p->p_lock);
2578 			sts = spt_lockpages(seg, an_idx, npages,
2579 			    &ppa[ppa_idx], lockmap, pos, &locked);
2580 
2581 			/*
2582 			 * correct locked count if not all pages could be
2583 			 * locked
2584 			 */
2585 			if ((unlocked - locked) > 0) {
2586 				rctl_decr_locked_mem(NULL, proj,
2587 				    (unlocked - locked), 0);
2588 			}
2589 		}
2590 		/*
2591 		 * unlock pages
2592 		 */
2593 		for (i = 0; i < a_npages; i++)
2594 			page_unlock(ppa[i]);
2595 		if (sptd->spt_ppa != NULL)
2596 			sptd->spt_flags |= DISM_PPA_CHANGED;
2597 		mutex_exit(&sp->shm_mlock);
2598 		mutex_exit(&sptd->spt_lock);
2599 
2600 		kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2601 
2602 	} else if (op == MC_UNLOCK) { /* unlock */
2603 		struct anon_map *amp;
2604 		struct anon 	*ap;
2605 		struct vnode 	*vp;
2606 		u_offset_t 	off;
2607 		struct page	*pp;
2608 		int		kernel;
2609 		anon_sync_obj_t cookie;
2610 		rctl_qty_t	unlocked = 0;
2611 
2612 		amp = sptd->spt_amp;
2613 		mutex_enter(&sptd->spt_lock);
2614 		if (shmd->shm_lckpgs == 0) {
2615 			mutex_exit(&sptd->spt_lock);
2616 			return (0);
2617 		}
2618 		/*
2619 		 * Don't cache new IO pages.
2620 		 */
2621 		if (sptd->spt_ppa != NULL)
2622 			sptd->spt_flags |= DISM_PPA_CHANGED;
2623 
2624 		mutex_enter(&sp->shm_mlock);
2625 		ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2626 		for (i = 0; i < npages; i++, an_idx++) {
2627 			if (shmd->shm_vpage[an_idx] & DISM_PG_LOCKED) {
2628 				anon_array_enter(amp, an_idx, &cookie);
2629 				ap = anon_get_ptr(amp->ahp, an_idx);
2630 				ASSERT(ap);
2631 
2632 				swap_xlate(ap, &vp, &off);
2633 				anon_array_exit(&cookie);
2634 				pp = page_lookup(vp, off, SE_SHARED);
2635 				ASSERT(pp);
2636 				/*
2637 				 * the availrmem is decremented only for
2638 				 * pages which are not in seg pcache,
2639 				 * for pages in seg pcache availrmem was
2640 				 * decremented in _dismpagelock() (if
2641 				 * they were not locked here)
2642 				 */
2643 				kernel = (sptd->spt_ppa &&
2644 				    sptd->spt_ppa[an_idx]) ? 1 : 0;
2645 				ASSERT(pp->p_lckcnt > 0);
2646 				page_pp_unlock(pp, 0, kernel);
2647 				if (pp->p_lckcnt == 0)
2648 					unlocked += PAGESIZE;
2649 				page_unlock(pp);
2650 				shmd->shm_vpage[an_idx] &= ~DISM_PG_LOCKED;
2651 				sptd->spt_ppa_lckcnt[an_idx]--;
2652 				shmd->shm_lckpgs--;
2653 			}
2654 		}
2655 		ANON_LOCK_EXIT(&amp->a_rwlock);
2656 		if (sptd->spt_ppa != NULL)
2657 			sptd->spt_flags |= DISM_PPA_CHANGED;
2658 		mutex_exit(&sptd->spt_lock);
2659 
2660 		rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2661 		mutex_exit(&sp->shm_mlock);
2662 	}
2663 	return (sts);
2664 }
2665 
2666 /*ARGSUSED*/
2667 int
2668 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2669 {
2670 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
2671 	struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2672 	spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2673 
2674 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2675 
2676 	/*
2677 	 * ISM segment is always rw.
2678 	 */
2679 	while (--pgno >= 0)
2680 		*protv++ = sptd->spt_prot;
2681 	return (0);
2682 }
2683 
2684 /*ARGSUSED*/
2685 u_offset_t
2686 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2687 {
2688 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2689 
2690 	/* Offset does not matter in ISM memory */
2691 
2692 	return ((u_offset_t)0);
2693 }
2694 
2695 /* ARGSUSED */
2696 int
2697 segspt_shmgettype(struct seg *seg, caddr_t addr)
2698 {
2699 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
2700 	struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2701 
2702 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2703 
2704 	/*
2705 	 * The shared memory mapping is always MAP_SHARED, SWAP is only
2706 	 * reserved for DISM
2707 	 */
2708 	return (MAP_SHARED |
2709 		((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2710 }
2711 
2712 /*ARGSUSED*/
2713 int
2714 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2715 {
2716 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
2717 	struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2718 
2719 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2720 
2721 	*vpp = sptd->spt_vp;
2722 	return (0);
2723 }
2724 
2725 /*ARGSUSED*/
2726 static int
2727 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2728 {
2729 	struct shm_data 	*shmd = (struct shm_data *)seg->s_data;
2730 	struct spt_data	*sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2731 	struct anon_map	*amp;
2732 	pgcnt_t		pg_idx;
2733 
2734 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2735 
2736 	if (behav == MADV_FREE) {
2737 		if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2738 			return (0);
2739 
2740 		amp = sptd->spt_amp;
2741 		pg_idx = seg_page(seg, addr);
2742 
2743 		mutex_enter(&sptd->spt_lock);
2744 		if (sptd->spt_ppa != NULL)
2745 			sptd->spt_flags |= DISM_PPA_CHANGED;
2746 		mutex_exit(&sptd->spt_lock);
2747 
2748 		/*
2749 		 * Purge all DISM cached pages
2750 		 */
2751 		seg_ppurge_seg(segspt_reclaim);
2752 
2753 		mutex_enter(&sptd->spt_lock);
2754 		ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2755 		anon_disclaim(amp, pg_idx, len, ANON_PGLOOKUP_BLK);
2756 		ANON_LOCK_EXIT(&amp->a_rwlock);
2757 		mutex_exit(&sptd->spt_lock);
2758 	} else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2759 	    behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2760 		int			already_set;
2761 		ulong_t			anon_index;
2762 		lgrp_mem_policy_t	policy;
2763 		caddr_t			shm_addr;
2764 		size_t			share_size;
2765 		size_t			size;
2766 		struct seg		*sptseg = shmd->shm_sptseg;
2767 		caddr_t			sptseg_addr;
2768 
2769 		/*
2770 		 * Align address and length to page size of underlying segment
2771 		 */
2772 		share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2773 		shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2774 		size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2775 		    share_size);
2776 
2777 		amp = shmd->shm_amp;
2778 		anon_index = seg_page(seg, shm_addr);
2779 
2780 		/*
2781 		 * And now we may have to adjust size downward if we have
2782 		 * exceeded the realsize of the segment or initial anon
2783 		 * allocations.
2784 		 */
2785 		sptseg_addr = sptseg->s_base + ptob(anon_index);
2786 		if ((sptseg_addr + size) >
2787 		    (sptseg->s_base + sptd->spt_realsize))
2788 			size = (sptseg->s_base + sptd->spt_realsize) -
2789 			    sptseg_addr;
2790 
2791 		/*
2792 		 * Set memory allocation policy for this segment
2793 		 */
2794 		policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2795 		already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2796 		    NULL, 0, len);
2797 
2798 		/*
2799 		 * If random memory allocation policy set already,
2800 		 * don't bother reapplying it.
2801 		 */
2802 		if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2803 			return (0);
2804 
2805 		/*
2806 		 * Mark any existing pages in the given range for
2807 		 * migration, flushing the I/O page cache, and using
2808 		 * underlying segment to calculate anon index and get
2809 		 * anonmap and vnode pointer from
2810 		 */
2811 		if (shmd->shm_softlockcnt > 0)
2812 			segspt_purge(seg);
2813 
2814 		page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
2815 	}
2816 
2817 	return (0);
2818 }
2819 
2820 /*ARGSUSED*/
2821 void
2822 segspt_shmdump(struct seg *seg)
2823 {
2824 	/* no-op for ISM segment */
2825 }
2826 
2827 /*ARGSUSED*/
2828 static faultcode_t
2829 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
2830 {
2831 	return (ENOTSUP);
2832 }
2833 
2834 /*
2835  * get a memory ID for an addr in a given segment
2836  */
2837 static int
2838 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
2839 {
2840 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
2841 	struct anon 	*ap;
2842 	size_t		anon_index;
2843 	struct anon_map	*amp = shmd->shm_amp;
2844 	struct spt_data	*sptd = shmd->shm_sptseg->s_data;
2845 	struct seg	*sptseg = shmd->shm_sptseg;
2846 	anon_sync_obj_t	cookie;
2847 
2848 	anon_index = seg_page(seg, addr);
2849 
2850 	if (addr > (seg->s_base + sptd->spt_realsize)) {
2851 		return (EFAULT);
2852 	}
2853 
2854 	ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2855 	anon_array_enter(amp, anon_index, &cookie);
2856 	ap = anon_get_ptr(amp->ahp, anon_index);
2857 	if (ap == NULL) {
2858 		struct page *pp;
2859 		caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
2860 
2861 		pp = anon_zero(sptseg, spt_addr, &ap, kcred);
2862 		if (pp == NULL) {
2863 			anon_array_exit(&cookie);
2864 			ANON_LOCK_EXIT(&amp->a_rwlock);
2865 			return (ENOMEM);
2866 		}
2867 		(void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
2868 		page_unlock(pp);
2869 	}
2870 	anon_array_exit(&cookie);
2871 	ANON_LOCK_EXIT(&amp->a_rwlock);
2872 	memidp->val[0] = (uintptr_t)ap;
2873 	memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
2874 	return (0);
2875 }
2876 
2877 /*
2878  * Get memory allocation policy info for specified address in given segment
2879  */
2880 static lgrp_mem_policy_info_t *
2881 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
2882 {
2883 	struct anon_map		*amp;
2884 	ulong_t			anon_index;
2885 	lgrp_mem_policy_info_t	*policy_info;
2886 	struct shm_data		*shm_data;
2887 
2888 	ASSERT(seg != NULL);
2889 
2890 	/*
2891 	 * Get anon_map from segshm
2892 	 *
2893 	 * Assume that no lock needs to be held on anon_map, since
2894 	 * it should be protected by its reference count which must be
2895 	 * nonzero for an existing segment
2896 	 * Need to grab readers lock on policy tree though
2897 	 */
2898 	shm_data = (struct shm_data *)seg->s_data;
2899 	if (shm_data == NULL)
2900 		return (NULL);
2901 	amp = shm_data->shm_amp;
2902 	ASSERT(amp->refcnt != 0);
2903 
2904 	/*
2905 	 * Get policy info
2906 	 *
2907 	 * Assume starting anon index of 0
2908 	 */
2909 	anon_index = seg_page(seg, addr);
2910 	policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
2911 
2912 	return (policy_info);
2913 }
2914 
2915 /*ARGSUSED*/
2916 static int
2917 segspt_shmcapable(struct seg *seg, segcapability_t capability)
2918 {
2919 	return (0);
2920 }
2921