xref: /illumos-gate/usr/src/uts/common/vm/seg_spt.c (revision 856f620e96e5413932a6607aea5094db2ece172f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2018 Joyent, Inc.
24  * Copyright (c) 2016 by Delphix. All rights reserved.
25  */
26 
27 #include <sys/param.h>
28 #include <sys/user.h>
29 #include <sys/mman.h>
30 #include <sys/kmem.h>
31 #include <sys/sysmacros.h>
32 #include <sys/cmn_err.h>
33 #include <sys/systm.h>
34 #include <sys/tuneable.h>
35 #include <vm/hat.h>
36 #include <vm/seg.h>
37 #include <vm/as.h>
38 #include <vm/anon.h>
39 #include <vm/page.h>
40 #include <sys/buf.h>
41 #include <sys/swap.h>
42 #include <sys/atomic.h>
43 #include <vm/seg_spt.h>
44 #include <sys/debug.h>
45 #include <sys/vtrace.h>
46 #include <sys/shm.h>
47 #include <sys/shm_impl.h>
48 #include <sys/lgrp.h>
49 #include <sys/vmsystm.h>
50 #include <sys/policy.h>
51 #include <sys/project.h>
52 #include <sys/tnf_probe.h>
53 #include <sys/zone.h>
54 
55 #define	SEGSPTADDR	(caddr_t)0x0
56 
57 /*
58  * # pages used for spt
59  */
60 size_t	spt_used;
61 
62 /*
63  * segspt_minfree is the memory left for system after ISM
64  * locked its pages; it is set up to 5% of availrmem in
65  * sptcreate when ISM is created.  ISM should not use more
66  * than ~90% of availrmem; if it does, then the performance
67  * of the system may decrease. Machines with large memories may
68  * be able to use up more memory for ISM so we set the default
69  * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
70  * If somebody wants even more memory for ISM (risking hanging
71  * the system) they can patch the segspt_minfree to smaller number.
72  */
73 pgcnt_t segspt_minfree = 0;
74 
75 static int segspt_create(struct seg **segpp, void *argsp);
76 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
77 static void segspt_free(struct seg *seg);
78 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
79 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
80 
81 static void
82 segspt_badop()
83 {
84 	panic("segspt_badop called");
85 	/*NOTREACHED*/
86 }
87 
88 #define	SEGSPT_BADOP(t)	(t(*)())segspt_badop
89 
90 struct seg_ops segspt_ops = {
91 	SEGSPT_BADOP(int),		/* dup */
92 	segspt_unmap,
93 	segspt_free,
94 	SEGSPT_BADOP(int),		/* fault */
95 	SEGSPT_BADOP(faultcode_t),	/* faulta */
96 	SEGSPT_BADOP(int),		/* setprot */
97 	SEGSPT_BADOP(int),		/* checkprot */
98 	SEGSPT_BADOP(int),		/* kluster */
99 	SEGSPT_BADOP(size_t),		/* swapout */
100 	SEGSPT_BADOP(int),		/* sync */
101 	SEGSPT_BADOP(size_t),		/* incore */
102 	SEGSPT_BADOP(int),		/* lockop */
103 	SEGSPT_BADOP(int),		/* getprot */
104 	SEGSPT_BADOP(u_offset_t), 	/* getoffset */
105 	SEGSPT_BADOP(int),		/* gettype */
106 	SEGSPT_BADOP(int),		/* getvp */
107 	SEGSPT_BADOP(int),		/* advise */
108 	SEGSPT_BADOP(void),		/* dump */
109 	SEGSPT_BADOP(int),		/* pagelock */
110 	SEGSPT_BADOP(int),		/* setpgsz */
111 	SEGSPT_BADOP(int),		/* getmemid */
112 	segspt_getpolicy,		/* getpolicy */
113 	SEGSPT_BADOP(int),		/* capable */
114 	seg_inherit_notsup		/* inherit */
115 };
116 
117 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
118 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
119 static void segspt_shmfree(struct seg *seg);
120 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
121 		caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
122 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
123 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
124 			register size_t len, register uint_t prot);
125 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
126 			uint_t prot);
127 static int	segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
128 static size_t	segspt_shmswapout(struct seg *seg);
129 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
130 			register char *vec);
131 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
132 			int attr, uint_t flags);
133 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
134 			int attr, int op, ulong_t *lockmap, size_t pos);
135 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
136 			uint_t *protv);
137 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
138 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
139 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
140 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
141 			uint_t behav);
142 static void segspt_shmdump(struct seg *seg);
143 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
144 			struct page ***, enum lock_type, enum seg_rw);
145 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
146 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
147 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
148 static int segspt_shmcapable(struct seg *, segcapability_t);
149 
150 struct seg_ops segspt_shmops = {
151 	segspt_shmdup,
152 	segspt_shmunmap,
153 	segspt_shmfree,
154 	segspt_shmfault,
155 	segspt_shmfaulta,
156 	segspt_shmsetprot,
157 	segspt_shmcheckprot,
158 	segspt_shmkluster,
159 	segspt_shmswapout,
160 	segspt_shmsync,
161 	segspt_shmincore,
162 	segspt_shmlockop,
163 	segspt_shmgetprot,
164 	segspt_shmgetoffset,
165 	segspt_shmgettype,
166 	segspt_shmgetvp,
167 	segspt_shmadvise,	/* advise */
168 	segspt_shmdump,
169 	segspt_shmpagelock,
170 	segspt_shmsetpgsz,
171 	segspt_shmgetmemid,
172 	segspt_shmgetpolicy,
173 	segspt_shmcapable,
174 	seg_inherit_notsup
175 };
176 
177 static void segspt_purge(struct seg *seg);
178 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
179 		enum seg_rw, int);
180 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
181 		page_t **ppa);
182 
183 
184 
185 /*ARGSUSED*/
186 int
187 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
188     uint_t prot, uint_t flags, uint_t share_szc)
189 {
190 	int 	err;
191 	struct  as	*newas;
192 	struct	segspt_crargs sptcargs;
193 
194 #ifdef DEBUG
195 	TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
196 			tnf_ulong, size, size );
197 #endif
198 	if (segspt_minfree == 0)	/* leave min 5% of availrmem for */
199 		segspt_minfree = availrmem/20;	/* for the system */
200 
201 	if (!hat_supported(HAT_SHARED_PT, (void *)0))
202 		return (EINVAL);
203 
204 	/*
205 	 * get a new as for this shared memory segment
206 	 */
207 	newas = as_alloc();
208 	newas->a_proc = NULL;
209 	sptcargs.amp = amp;
210 	sptcargs.prot = prot;
211 	sptcargs.flags = flags;
212 	sptcargs.szc = share_szc;
213 	/*
214 	 * create a shared page table (spt) segment
215 	 */
216 
217 	if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
218 		as_free(newas);
219 		return (err);
220 	}
221 	*sptseg = sptcargs.seg_spt;
222 	return (0);
223 }
224 
225 void
226 sptdestroy(struct as *as, struct anon_map *amp)
227 {
228 
229 #ifdef DEBUG
230 	TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
231 #endif
232 	(void) as_unmap(as, SEGSPTADDR, amp->size);
233 	as_free(as);
234 }
235 
236 /*
237  * called from seg_free().
238  * free (i.e., unlock, unmap, return to free list)
239  *  all the pages in the given seg.
240  */
241 void
242 segspt_free(struct seg	*seg)
243 {
244 	struct spt_data *sptd = (struct spt_data *)seg->s_data;
245 
246 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
247 
248 	if (sptd != NULL) {
249 		if (sptd->spt_realsize)
250 			segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
251 
252 		if (sptd->spt_ppa_lckcnt) {
253 			kmem_free(sptd->spt_ppa_lckcnt,
254 			    sizeof (*sptd->spt_ppa_lckcnt)
255 			    * btopr(sptd->spt_amp->size));
256 		}
257 		kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
258 		cv_destroy(&sptd->spt_cv);
259 		mutex_destroy(&sptd->spt_lock);
260 		kmem_free(sptd, sizeof (*sptd));
261 	}
262 }
263 
264 /*ARGSUSED*/
265 static int
266 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
267     uint_t flags)
268 {
269 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
270 
271 	return (0);
272 }
273 
274 /*ARGSUSED*/
275 static size_t
276 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
277 {
278 	caddr_t	eo_seg;
279 	pgcnt_t	npages;
280 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
281 	struct seg	*sptseg;
282 	struct spt_data *sptd;
283 
284 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
285 #ifdef lint
286 	seg = seg;
287 #endif
288 	sptseg = shmd->shm_sptseg;
289 	sptd = sptseg->s_data;
290 
291 	if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
292 		eo_seg = addr + len;
293 		while (addr < eo_seg) {
294 			/* page exists, and it's locked. */
295 			*vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
296 			    SEG_PAGE_ANON;
297 			addr += PAGESIZE;
298 		}
299 		return (len);
300 	} else {
301 		struct  anon_map *amp = shmd->shm_amp;
302 		struct  anon	*ap;
303 		page_t		*pp;
304 		pgcnt_t 	anon_index;
305 		struct vnode 	*vp;
306 		u_offset_t 	off;
307 		ulong_t		i;
308 		int		ret;
309 		anon_sync_obj_t	cookie;
310 
311 		addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
312 		anon_index = seg_page(seg, addr);
313 		npages = btopr(len);
314 		if (anon_index + npages > btopr(shmd->shm_amp->size)) {
315 			return (EINVAL);
316 		}
317 		ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
318 		for (i = 0; i < npages; i++, anon_index++) {
319 			ret = 0;
320 			anon_array_enter(amp, anon_index, &cookie);
321 			ap = anon_get_ptr(amp->ahp, anon_index);
322 			if (ap != NULL) {
323 				swap_xlate(ap, &vp, &off);
324 				anon_array_exit(&cookie);
325 				pp = page_lookup_nowait(vp, off, SE_SHARED);
326 				if (pp != NULL) {
327 					ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
328 					page_unlock(pp);
329 				}
330 			} else {
331 				anon_array_exit(&cookie);
332 			}
333 			if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
334 				ret |= SEG_PAGE_LOCKED;
335 			}
336 			*vec++ = (char)ret;
337 		}
338 		ANON_LOCK_EXIT(&amp->a_rwlock);
339 		return (len);
340 	}
341 }
342 
343 static int
344 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
345 {
346 	size_t share_size;
347 
348 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
349 
350 	/*
351 	 * seg.s_size may have been rounded up to the largest page size
352 	 * in shmat().
353 	 * XXX This should be cleanedup. sptdestroy should take a length
354 	 * argument which should be the same as sptcreate. Then
355 	 * this rounding would not be needed (or is done in shm.c)
356 	 * Only the check for full segment will be needed.
357 	 *
358 	 * XXX -- shouldn't raddr == 0 always? These tests don't seem
359 	 * to be useful at all.
360 	 */
361 	share_size = page_get_pagesize(seg->s_szc);
362 	ssize = P2ROUNDUP(ssize, share_size);
363 
364 	if (raddr == seg->s_base && ssize == seg->s_size) {
365 		seg_free(seg);
366 		return (0);
367 	} else
368 		return (EINVAL);
369 }
370 
371 int
372 segspt_create(struct seg **segpp, void *argsp)
373 {
374 	struct seg	*seg = *segpp;
375 	int		err;
376 	caddr_t		addr = seg->s_base;
377 	struct spt_data *sptd;
378 	struct 	segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
379 	struct anon_map *amp = sptcargs->amp;
380 	struct kshmid	*sp = amp->a_sp;
381 	struct	cred	*cred = CRED();
382 	ulong_t		i, j, anon_index = 0;
383 	pgcnt_t		npages = btopr(amp->size);
384 	struct vnode	*vp;
385 	page_t		**ppa;
386 	uint_t		hat_flags;
387 	size_t		pgsz;
388 	pgcnt_t		pgcnt;
389 	caddr_t		a;
390 	pgcnt_t		pidx;
391 	size_t		sz;
392 	proc_t		*procp = curproc;
393 	rctl_qty_t	lockedbytes = 0;
394 	kproject_t	*proj;
395 
396 	/*
397 	 * We are holding the a_lock on the underlying dummy as,
398 	 * so we can make calls to the HAT layer.
399 	 */
400 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
401 	ASSERT(sp != NULL);
402 
403 #ifdef DEBUG
404 	TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
405 	    tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
406 #endif
407 	if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
408 		if (err = anon_swap_adjust(npages))
409 			return (err);
410 	}
411 	err = ENOMEM;
412 
413 	if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
414 		goto out1;
415 
416 	if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
417 		if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
418 		    KM_NOSLEEP)) == NULL)
419 			goto out2;
420 	}
421 
422 	mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
423 
424 	if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
425 		goto out3;
426 
427 	seg->s_ops = &segspt_ops;
428 	sptd->spt_vp = vp;
429 	sptd->spt_amp = amp;
430 	sptd->spt_prot = sptcargs->prot;
431 	sptd->spt_flags = sptcargs->flags;
432 	seg->s_data = (caddr_t)sptd;
433 	sptd->spt_ppa = NULL;
434 	sptd->spt_ppa_lckcnt = NULL;
435 	seg->s_szc = sptcargs->szc;
436 	cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
437 	sptd->spt_gen = 0;
438 
439 	ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
440 	if (seg->s_szc > amp->a_szc) {
441 		amp->a_szc = seg->s_szc;
442 	}
443 	ANON_LOCK_EXIT(&amp->a_rwlock);
444 
445 	/*
446 	 * Set policy to affect initial allocation of pages in
447 	 * anon_map_createpages()
448 	 */
449 	(void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
450 	    NULL, 0, ptob(npages));
451 
452 	if (sptcargs->flags & SHM_PAGEABLE) {
453 		size_t  share_sz;
454 		pgcnt_t new_npgs, more_pgs;
455 		struct anon_hdr *nahp;
456 		zone_t *zone;
457 
458 		share_sz = page_get_pagesize(seg->s_szc);
459 		if (!IS_P2ALIGNED(amp->size, share_sz)) {
460 			/*
461 			 * We are rounding up the size of the anon array
462 			 * on 4 M boundary because we always create 4 M
463 			 * of page(s) when locking, faulting pages and we
464 			 * don't have to check for all corner cases e.g.
465 			 * if there is enough space to allocate 4 M
466 			 * page.
467 			 */
468 			new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
469 			more_pgs = new_npgs - npages;
470 
471 			/*
472 			 * The zone will never be NULL, as a fully created
473 			 * shm always has an owning zone.
474 			 */
475 			zone = sp->shm_perm.ipc_zone_ref.zref_zone;
476 			ASSERT(zone != NULL);
477 			if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
478 				err = ENOMEM;
479 				goto out4;
480 			}
481 
482 			nahp = anon_create(new_npgs, ANON_SLEEP);
483 			ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
484 			(void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
485 			    ANON_SLEEP);
486 			anon_release(amp->ahp, npages);
487 			amp->ahp = nahp;
488 			ASSERT(amp->swresv == ptob(npages));
489 			amp->swresv = amp->size = ptob(new_npgs);
490 			ANON_LOCK_EXIT(&amp->a_rwlock);
491 			npages = new_npgs;
492 		}
493 
494 		sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
495 		    sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
496 		sptd->spt_pcachecnt = 0;
497 		sptd->spt_realsize = ptob(npages);
498 		sptcargs->seg_spt = seg;
499 		return (0);
500 	}
501 
502 	/*
503 	 * get array of pages for each anon slot in amp
504 	 */
505 	if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
506 	    seg, addr, S_CREATE, cred)) != 0)
507 		goto out4;
508 
509 	mutex_enter(&sp->shm_mlock);
510 
511 	/* May be partially locked, so, count bytes to charge for locking */
512 	for (i = 0; i < npages; i++)
513 		if (ppa[i]->p_lckcnt == 0)
514 			lockedbytes += PAGESIZE;
515 
516 	proj = sp->shm_perm.ipc_proj;
517 
518 	if (lockedbytes > 0) {
519 		mutex_enter(&procp->p_lock);
520 		if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
521 			mutex_exit(&procp->p_lock);
522 			mutex_exit(&sp->shm_mlock);
523 			for (i = 0; i < npages; i++)
524 				page_unlock(ppa[i]);
525 			err = ENOMEM;
526 			goto out4;
527 		}
528 		mutex_exit(&procp->p_lock);
529 	}
530 
531 	/*
532 	 * addr is initial address corresponding to the first page on ppa list
533 	 */
534 	for (i = 0; i < npages; i++) {
535 		/* attempt to lock all pages */
536 		if (page_pp_lock(ppa[i], 0, 1) == 0) {
537 			/*
538 			 * if unable to lock any page, unlock all
539 			 * of them and return error
540 			 */
541 			for (j = 0; j < i; j++)
542 				page_pp_unlock(ppa[j], 0, 1);
543 			for (i = 0; i < npages; i++)
544 				page_unlock(ppa[i]);
545 			rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
546 			mutex_exit(&sp->shm_mlock);
547 			err = ENOMEM;
548 			goto out4;
549 		}
550 	}
551 	mutex_exit(&sp->shm_mlock);
552 
553 	/*
554 	 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
555 	 * for the entire life of the segment. For example platforms
556 	 * that do not support Dynamic Reconfiguration.
557 	 */
558 	hat_flags = HAT_LOAD_SHARE;
559 	if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
560 		hat_flags |= HAT_LOAD_LOCK;
561 
562 	/*
563 	 * Load translations one lare page at a time
564 	 * to make sure we don't create mappings bigger than
565 	 * segment's size code in case underlying pages
566 	 * are shared with segvn's segment that uses bigger
567 	 * size code than we do.
568 	 */
569 	pgsz = page_get_pagesize(seg->s_szc);
570 	pgcnt = page_get_pagecnt(seg->s_szc);
571 	for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
572 		sz = MIN(pgsz, ptob(npages - pidx));
573 		hat_memload_array(seg->s_as->a_hat, a, sz,
574 		    &ppa[pidx], sptd->spt_prot, hat_flags);
575 	}
576 
577 	/*
578 	 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
579 	 * we will leave the pages locked SE_SHARED for the life
580 	 * of the ISM segment. This will prevent any calls to
581 	 * hat_pageunload() on this ISM segment for those platforms.
582 	 */
583 	if (!(hat_flags & HAT_LOAD_LOCK)) {
584 		/*
585 		 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
586 		 * we no longer need to hold the SE_SHARED lock on the pages,
587 		 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
588 		 * SE_SHARED lock on the pages as necessary.
589 		 */
590 		for (i = 0; i < npages; i++)
591 			page_unlock(ppa[i]);
592 	}
593 	sptd->spt_pcachecnt = 0;
594 	kmem_free(ppa, ((sizeof (page_t *)) * npages));
595 	sptd->spt_realsize = ptob(npages);
596 	atomic_add_long(&spt_used, npages);
597 	sptcargs->seg_spt = seg;
598 	return (0);
599 
600 out4:
601 	seg->s_data = NULL;
602 	kmem_free(vp, sizeof (*vp));
603 	cv_destroy(&sptd->spt_cv);
604 out3:
605 	mutex_destroy(&sptd->spt_lock);
606 	if ((sptcargs->flags & SHM_PAGEABLE) == 0)
607 		kmem_free(ppa, (sizeof (*ppa) * npages));
608 out2:
609 	kmem_free(sptd, sizeof (*sptd));
610 out1:
611 	if ((sptcargs->flags & SHM_PAGEABLE) == 0)
612 		anon_swap_restore(npages);
613 	return (err);
614 }
615 
616 /*ARGSUSED*/
617 void
618 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
619 {
620 	struct page 	*pp;
621 	struct spt_data *sptd = (struct spt_data *)seg->s_data;
622 	pgcnt_t		npages;
623 	ulong_t		anon_idx;
624 	struct anon_map *amp;
625 	struct anon 	*ap;
626 	struct vnode 	*vp;
627 	u_offset_t 	off;
628 	uint_t		hat_flags;
629 	int		root = 0;
630 	pgcnt_t		pgs, curnpgs = 0;
631 	page_t		*rootpp;
632 	rctl_qty_t	unlocked_bytes = 0;
633 	kproject_t	*proj;
634 	kshmid_t	*sp;
635 
636 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
637 
638 	len = P2ROUNDUP(len, PAGESIZE);
639 
640 	npages = btop(len);
641 
642 	hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
643 	if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
644 	    (sptd->spt_flags & SHM_PAGEABLE)) {
645 		hat_flags = HAT_UNLOAD_UNMAP;
646 	}
647 
648 	hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
649 
650 	amp = sptd->spt_amp;
651 	if (sptd->spt_flags & SHM_PAGEABLE)
652 		npages = btop(amp->size);
653 
654 	ASSERT(amp != NULL);
655 
656 	if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
657 		sp = amp->a_sp;
658 		proj = sp->shm_perm.ipc_proj;
659 		mutex_enter(&sp->shm_mlock);
660 	}
661 	for (anon_idx = 0; anon_idx < npages; anon_idx++) {
662 		if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
663 			if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
664 				panic("segspt_free_pages: null app");
665 				/*NOTREACHED*/
666 			}
667 		} else {
668 			if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
669 			    == NULL)
670 				continue;
671 		}
672 		ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
673 		swap_xlate(ap, &vp, &off);
674 
675 		/*
676 		 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
677 		 * the pages won't be having SE_SHARED lock at this
678 		 * point.
679 		 *
680 		 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
681 		 * the pages are still held SE_SHARED locked from the
682 		 * original segspt_create()
683 		 *
684 		 * Our goal is to get SE_EXCL lock on each page, remove
685 		 * permanent lock on it and invalidate the page.
686 		 */
687 		if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
688 			if (hat_flags == HAT_UNLOAD_UNMAP)
689 				pp = page_lookup(vp, off, SE_EXCL);
690 			else {
691 				if ((pp = page_find(vp, off)) == NULL) {
692 					panic("segspt_free_pages: "
693 					    "page not locked");
694 					/*NOTREACHED*/
695 				}
696 				if (!page_tryupgrade(pp)) {
697 					page_unlock(pp);
698 					pp = page_lookup(vp, off, SE_EXCL);
699 				}
700 			}
701 			if (pp == NULL) {
702 				panic("segspt_free_pages: "
703 				    "page not in the system");
704 				/*NOTREACHED*/
705 			}
706 			ASSERT(pp->p_lckcnt > 0);
707 			page_pp_unlock(pp, 0, 1);
708 			if (pp->p_lckcnt == 0)
709 				unlocked_bytes += PAGESIZE;
710 		} else {
711 			if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
712 				continue;
713 		}
714 		/*
715 		 * It's logical to invalidate the pages here as in most cases
716 		 * these were created by segspt.
717 		 */
718 		if (pp->p_szc != 0) {
719 			if (root == 0) {
720 				ASSERT(curnpgs == 0);
721 				root = 1;
722 				rootpp = pp;
723 				pgs = curnpgs = page_get_pagecnt(pp->p_szc);
724 				ASSERT(pgs > 1);
725 				ASSERT(IS_P2ALIGNED(pgs, pgs));
726 				ASSERT(!(page_pptonum(pp) & (pgs - 1)));
727 				curnpgs--;
728 			} else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
729 				ASSERT(curnpgs == 1);
730 				ASSERT(page_pptonum(pp) ==
731 				    page_pptonum(rootpp) + (pgs - 1));
732 				page_destroy_pages(rootpp);
733 				root = 0;
734 				curnpgs = 0;
735 			} else {
736 				ASSERT(curnpgs > 1);
737 				ASSERT(page_pptonum(pp) ==
738 				    page_pptonum(rootpp) + (pgs - curnpgs));
739 				curnpgs--;
740 			}
741 		} else {
742 			if (root != 0 || curnpgs != 0) {
743 				panic("segspt_free_pages: bad large page");
744 				/*NOTREACHED*/
745 			}
746 			/*
747 			 * Before destroying the pages, we need to take care
748 			 * of the rctl locked memory accounting. For that
749 			 * we need to calculte the unlocked_bytes.
750 			 */
751 			if (pp->p_lckcnt > 0)
752 				unlocked_bytes += PAGESIZE;
753 			/*LINTED: constant in conditional context */
754 			VN_DISPOSE(pp, B_INVAL, 0, kcred);
755 		}
756 	}
757 	if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
758 		if (unlocked_bytes > 0)
759 			rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
760 		mutex_exit(&sp->shm_mlock);
761 	}
762 	if (root != 0 || curnpgs != 0) {
763 		panic("segspt_free_pages: bad large page");
764 		/*NOTREACHED*/
765 	}
766 
767 	/*
768 	 * mark that pages have been released
769 	 */
770 	sptd->spt_realsize = 0;
771 
772 	if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
773 		atomic_add_long(&spt_used, -npages);
774 		anon_swap_restore(npages);
775 	}
776 }
777 
778 /*
779  * Get memory allocation policy info for specified address in given segment
780  */
781 static lgrp_mem_policy_info_t *
782 segspt_getpolicy(struct seg *seg, caddr_t addr)
783 {
784 	struct anon_map		*amp;
785 	ulong_t			anon_index;
786 	lgrp_mem_policy_info_t	*policy_info;
787 	struct spt_data		*spt_data;
788 
789 	ASSERT(seg != NULL);
790 
791 	/*
792 	 * Get anon_map from segspt
793 	 *
794 	 * Assume that no lock needs to be held on anon_map, since
795 	 * it should be protected by its reference count which must be
796 	 * nonzero for an existing segment
797 	 * Need to grab readers lock on policy tree though
798 	 */
799 	spt_data = (struct spt_data *)seg->s_data;
800 	if (spt_data == NULL)
801 		return (NULL);
802 	amp = spt_data->spt_amp;
803 	ASSERT(amp->refcnt != 0);
804 
805 	/*
806 	 * Get policy info
807 	 *
808 	 * Assume starting anon index of 0
809 	 */
810 	anon_index = seg_page(seg, addr);
811 	policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
812 
813 	return (policy_info);
814 }
815 
816 /*
817  * DISM only.
818  * Return locked pages over a given range.
819  *
820  * We will cache all DISM locked pages and save the pplist for the
821  * entire segment in the ppa field of the underlying DISM segment structure.
822  * Later, during a call to segspt_reclaim() we will use this ppa array
823  * to page_unlock() all of the pages and then we will free this ppa list.
824  */
825 /*ARGSUSED*/
826 static int
827 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
828     struct page ***ppp, enum lock_type type, enum seg_rw rw)
829 {
830 	struct  shm_data *shmd = (struct shm_data *)seg->s_data;
831 	struct  seg	*sptseg = shmd->shm_sptseg;
832 	struct  spt_data *sptd = sptseg->s_data;
833 	pgcnt_t pg_idx, npages, tot_npages, npgs;
834 	struct  page **pplist, **pl, **ppa, *pp;
835 	struct  anon_map *amp;
836 	spgcnt_t	an_idx;
837 	int 	ret = ENOTSUP;
838 	uint_t	pl_built = 0;
839 	struct  anon *ap;
840 	struct  vnode *vp;
841 	u_offset_t off;
842 	pgcnt_t claim_availrmem = 0;
843 	uint_t	szc;
844 
845 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
846 	ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
847 
848 	/*
849 	 * We want to lock/unlock the entire ISM segment. Therefore,
850 	 * we will be using the underlying sptseg and it's base address
851 	 * and length for the caching arguments.
852 	 */
853 	ASSERT(sptseg);
854 	ASSERT(sptd);
855 
856 	pg_idx = seg_page(seg, addr);
857 	npages = btopr(len);
858 
859 	/*
860 	 * check if the request is larger than number of pages covered
861 	 * by amp
862 	 */
863 	if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
864 		*ppp = NULL;
865 		return (ENOTSUP);
866 	}
867 
868 	if (type == L_PAGEUNLOCK) {
869 		ASSERT(sptd->spt_ppa != NULL);
870 
871 		seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
872 		    sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
873 
874 		/*
875 		 * If someone is blocked while unmapping, we purge
876 		 * segment page cache and thus reclaim pplist synchronously
877 		 * without waiting for seg_pasync_thread. This speeds up
878 		 * unmapping in cases where munmap(2) is called, while
879 		 * raw async i/o is still in progress or where a thread
880 		 * exits on data fault in a multithreaded application.
881 		 */
882 		if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
883 		    (AS_ISUNMAPWAIT(seg->s_as) &&
884 		    shmd->shm_softlockcnt > 0)) {
885 			segspt_purge(seg);
886 		}
887 		return (0);
888 	}
889 
890 	/* The L_PAGELOCK case ... */
891 
892 	if (sptd->spt_flags & DISM_PPA_CHANGED) {
893 		segspt_purge(seg);
894 		/*
895 		 * for DISM ppa needs to be rebuild since
896 		 * number of locked pages could be changed
897 		 */
898 		*ppp = NULL;
899 		return (ENOTSUP);
900 	}
901 
902 	/*
903 	 * First try to find pages in segment page cache, without
904 	 * holding the segment lock.
905 	 */
906 	pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
907 	    S_WRITE, SEGP_FORCE_WIRED);
908 	if (pplist != NULL) {
909 		ASSERT(sptd->spt_ppa != NULL);
910 		ASSERT(sptd->spt_ppa == pplist);
911 		ppa = sptd->spt_ppa;
912 		for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
913 			if (ppa[an_idx] == NULL) {
914 				seg_pinactive(seg, NULL, seg->s_base,
915 				    sptd->spt_amp->size, ppa,
916 				    S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
917 				*ppp = NULL;
918 				return (ENOTSUP);
919 			}
920 			if ((szc = ppa[an_idx]->p_szc) != 0) {
921 				npgs = page_get_pagecnt(szc);
922 				an_idx = P2ROUNDUP(an_idx + 1, npgs);
923 			} else {
924 				an_idx++;
925 			}
926 		}
927 		/*
928 		 * Since we cache the entire DISM segment, we want to
929 		 * set ppp to point to the first slot that corresponds
930 		 * to the requested addr, i.e. pg_idx.
931 		 */
932 		*ppp = &(sptd->spt_ppa[pg_idx]);
933 		return (0);
934 	}
935 
936 	mutex_enter(&sptd->spt_lock);
937 	/*
938 	 * try to find pages in segment page cache with mutex
939 	 */
940 	pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
941 	    S_WRITE, SEGP_FORCE_WIRED);
942 	if (pplist != NULL) {
943 		ASSERT(sptd->spt_ppa != NULL);
944 		ASSERT(sptd->spt_ppa == pplist);
945 		ppa = sptd->spt_ppa;
946 		for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
947 			if (ppa[an_idx] == NULL) {
948 				mutex_exit(&sptd->spt_lock);
949 				seg_pinactive(seg, NULL, seg->s_base,
950 				    sptd->spt_amp->size, ppa,
951 				    S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
952 				*ppp = NULL;
953 				return (ENOTSUP);
954 			}
955 			if ((szc = ppa[an_idx]->p_szc) != 0) {
956 				npgs = page_get_pagecnt(szc);
957 				an_idx = P2ROUNDUP(an_idx + 1, npgs);
958 			} else {
959 				an_idx++;
960 			}
961 		}
962 		/*
963 		 * Since we cache the entire DISM segment, we want to
964 		 * set ppp to point to the first slot that corresponds
965 		 * to the requested addr, i.e. pg_idx.
966 		 */
967 		mutex_exit(&sptd->spt_lock);
968 		*ppp = &(sptd->spt_ppa[pg_idx]);
969 		return (0);
970 	}
971 	if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
972 	    SEGP_FORCE_WIRED) == SEGP_FAIL) {
973 		mutex_exit(&sptd->spt_lock);
974 		*ppp = NULL;
975 		return (ENOTSUP);
976 	}
977 
978 	/*
979 	 * No need to worry about protections because DISM pages are always rw.
980 	 */
981 	pl = pplist = NULL;
982 	amp = sptd->spt_amp;
983 
984 	/*
985 	 * Do we need to build the ppa array?
986 	 */
987 	if (sptd->spt_ppa == NULL) {
988 		pgcnt_t lpg_cnt = 0;
989 
990 		pl_built = 1;
991 		tot_npages = btopr(sptd->spt_amp->size);
992 
993 		ASSERT(sptd->spt_pcachecnt == 0);
994 		pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
995 		pl = pplist;
996 
997 		ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
998 		for (an_idx = 0; an_idx < tot_npages; ) {
999 			ap = anon_get_ptr(amp->ahp, an_idx);
1000 			/*
1001 			 * Cache only mlocked pages. For large pages
1002 			 * if one (constituent) page is mlocked
1003 			 * all pages for that large page
1004 			 * are cached also. This is for quick
1005 			 * lookups of ppa array;
1006 			 */
1007 			if ((ap != NULL) && (lpg_cnt != 0 ||
1008 			    (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
1009 
1010 				swap_xlate(ap, &vp, &off);
1011 				pp = page_lookup(vp, off, SE_SHARED);
1012 				ASSERT(pp != NULL);
1013 				if (lpg_cnt == 0) {
1014 					lpg_cnt++;
1015 					/*
1016 					 * For a small page, we are done --
1017 					 * lpg_count is reset to 0 below.
1018 					 *
1019 					 * For a large page, we are guaranteed
1020 					 * to find the anon structures of all
1021 					 * constituent pages and a non-zero
1022 					 * lpg_cnt ensures that we don't test
1023 					 * for mlock for these. We are done
1024 					 * when lpg_count reaches (npgs + 1).
1025 					 * If we are not the first constituent
1026 					 * page, restart at the first one.
1027 					 */
1028 					npgs = page_get_pagecnt(pp->p_szc);
1029 					if (!IS_P2ALIGNED(an_idx, npgs)) {
1030 						an_idx = P2ALIGN(an_idx, npgs);
1031 						page_unlock(pp);
1032 						continue;
1033 					}
1034 				}
1035 				if (++lpg_cnt > npgs)
1036 					lpg_cnt = 0;
1037 
1038 				/*
1039 				 * availrmem is decremented only
1040 				 * for unlocked pages
1041 				 */
1042 				if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1043 					claim_availrmem++;
1044 				pplist[an_idx] = pp;
1045 			}
1046 			an_idx++;
1047 		}
1048 		ANON_LOCK_EXIT(&amp->a_rwlock);
1049 
1050 		if (claim_availrmem) {
1051 			mutex_enter(&freemem_lock);
1052 			if (availrmem < tune.t_minarmem + claim_availrmem) {
1053 				mutex_exit(&freemem_lock);
1054 				ret = ENOTSUP;
1055 				claim_availrmem = 0;
1056 				goto insert_fail;
1057 			} else {
1058 				availrmem -= claim_availrmem;
1059 			}
1060 			mutex_exit(&freemem_lock);
1061 		}
1062 
1063 		sptd->spt_ppa = pl;
1064 	} else {
1065 		/*
1066 		 * We already have a valid ppa[].
1067 		 */
1068 		pl = sptd->spt_ppa;
1069 	}
1070 
1071 	ASSERT(pl != NULL);
1072 
1073 	ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1074 	    sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1075 	    segspt_reclaim);
1076 	if (ret == SEGP_FAIL) {
1077 		/*
1078 		 * seg_pinsert failed. We return
1079 		 * ENOTSUP, so that the as_pagelock() code will
1080 		 * then try the slower F_SOFTLOCK path.
1081 		 */
1082 		if (pl_built) {
1083 			/*
1084 			 * No one else has referenced the ppa[].
1085 			 * We created it and we need to destroy it.
1086 			 */
1087 			sptd->spt_ppa = NULL;
1088 		}
1089 		ret = ENOTSUP;
1090 		goto insert_fail;
1091 	}
1092 
1093 	/*
1094 	 * In either case, we increment softlockcnt on the 'real' segment.
1095 	 */
1096 	sptd->spt_pcachecnt++;
1097 	atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1098 
1099 	ppa = sptd->spt_ppa;
1100 	for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1101 		if (ppa[an_idx] == NULL) {
1102 			mutex_exit(&sptd->spt_lock);
1103 			seg_pinactive(seg, NULL, seg->s_base,
1104 			    sptd->spt_amp->size,
1105 			    pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1106 			*ppp = NULL;
1107 			return (ENOTSUP);
1108 		}
1109 		if ((szc = ppa[an_idx]->p_szc) != 0) {
1110 			npgs = page_get_pagecnt(szc);
1111 			an_idx = P2ROUNDUP(an_idx + 1, npgs);
1112 		} else {
1113 			an_idx++;
1114 		}
1115 	}
1116 	/*
1117 	 * We can now drop the sptd->spt_lock since the ppa[]
1118 	 * exists and we have incremented pacachecnt.
1119 	 */
1120 	mutex_exit(&sptd->spt_lock);
1121 
1122 	/*
1123 	 * Since we cache the entire segment, we want to
1124 	 * set ppp to point to the first slot that corresponds
1125 	 * to the requested addr, i.e. pg_idx.
1126 	 */
1127 	*ppp = &(sptd->spt_ppa[pg_idx]);
1128 	return (0);
1129 
1130 insert_fail:
1131 	/*
1132 	 * We will only reach this code if we tried and failed.
1133 	 *
1134 	 * And we can drop the lock on the dummy seg, once we've failed
1135 	 * to set up a new ppa[].
1136 	 */
1137 	mutex_exit(&sptd->spt_lock);
1138 
1139 	if (pl_built) {
1140 		if (claim_availrmem) {
1141 			mutex_enter(&freemem_lock);
1142 			availrmem += claim_availrmem;
1143 			mutex_exit(&freemem_lock);
1144 		}
1145 
1146 		/*
1147 		 * We created pl and we need to destroy it.
1148 		 */
1149 		pplist = pl;
1150 		for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1151 			if (pplist[an_idx] != NULL)
1152 				page_unlock(pplist[an_idx]);
1153 		}
1154 		kmem_free(pl, sizeof (page_t *) * tot_npages);
1155 	}
1156 
1157 	if (shmd->shm_softlockcnt <= 0) {
1158 		if (AS_ISUNMAPWAIT(seg->s_as)) {
1159 			mutex_enter(&seg->s_as->a_contents);
1160 			if (AS_ISUNMAPWAIT(seg->s_as)) {
1161 				AS_CLRUNMAPWAIT(seg->s_as);
1162 				cv_broadcast(&seg->s_as->a_cv);
1163 			}
1164 			mutex_exit(&seg->s_as->a_contents);
1165 		}
1166 	}
1167 	*ppp = NULL;
1168 	return (ret);
1169 }
1170 
1171 
1172 
1173 /*
1174  * return locked pages over a given range.
1175  *
1176  * We will cache the entire ISM segment and save the pplist for the
1177  * entire segment in the ppa field of the underlying ISM segment structure.
1178  * Later, during a call to segspt_reclaim() we will use this ppa array
1179  * to page_unlock() all of the pages and then we will free this ppa list.
1180  */
1181 /*ARGSUSED*/
1182 static int
1183 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1184     struct page ***ppp, enum lock_type type, enum seg_rw rw)
1185 {
1186 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
1187 	struct seg	*sptseg = shmd->shm_sptseg;
1188 	struct spt_data *sptd = sptseg->s_data;
1189 	pgcnt_t np, page_index, npages;
1190 	caddr_t a, spt_base;
1191 	struct page **pplist, **pl, *pp;
1192 	struct anon_map *amp;
1193 	ulong_t anon_index;
1194 	int ret = ENOTSUP;
1195 	uint_t	pl_built = 0;
1196 	struct anon *ap;
1197 	struct vnode *vp;
1198 	u_offset_t off;
1199 
1200 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1201 	ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1202 
1203 
1204 	/*
1205 	 * We want to lock/unlock the entire ISM segment. Therefore,
1206 	 * we will be using the underlying sptseg and it's base address
1207 	 * and length for the caching arguments.
1208 	 */
1209 	ASSERT(sptseg);
1210 	ASSERT(sptd);
1211 
1212 	if (sptd->spt_flags & SHM_PAGEABLE) {
1213 		return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1214 	}
1215 
1216 	page_index = seg_page(seg, addr);
1217 	npages = btopr(len);
1218 
1219 	/*
1220 	 * check if the request is larger than number of pages covered
1221 	 * by amp
1222 	 */
1223 	if (page_index + npages > btopr(sptd->spt_amp->size)) {
1224 		*ppp = NULL;
1225 		return (ENOTSUP);
1226 	}
1227 
1228 	if (type == L_PAGEUNLOCK) {
1229 
1230 		ASSERT(sptd->spt_ppa != NULL);
1231 
1232 		seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1233 		    sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1234 
1235 		/*
1236 		 * If someone is blocked while unmapping, we purge
1237 		 * segment page cache and thus reclaim pplist synchronously
1238 		 * without waiting for seg_pasync_thread. This speeds up
1239 		 * unmapping in cases where munmap(2) is called, while
1240 		 * raw async i/o is still in progress or where a thread
1241 		 * exits on data fault in a multithreaded application.
1242 		 */
1243 		if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1244 			segspt_purge(seg);
1245 		}
1246 		return (0);
1247 	}
1248 
1249 	/* The L_PAGELOCK case... */
1250 
1251 	/*
1252 	 * First try to find pages in segment page cache, without
1253 	 * holding the segment lock.
1254 	 */
1255 	pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1256 	    S_WRITE, SEGP_FORCE_WIRED);
1257 	if (pplist != NULL) {
1258 		ASSERT(sptd->spt_ppa == pplist);
1259 		ASSERT(sptd->spt_ppa[page_index]);
1260 		/*
1261 		 * Since we cache the entire ISM segment, we want to
1262 		 * set ppp to point to the first slot that corresponds
1263 		 * to the requested addr, i.e. page_index.
1264 		 */
1265 		*ppp = &(sptd->spt_ppa[page_index]);
1266 		return (0);
1267 	}
1268 
1269 	mutex_enter(&sptd->spt_lock);
1270 
1271 	/*
1272 	 * try to find pages in segment page cache
1273 	 */
1274 	pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1275 	    S_WRITE, SEGP_FORCE_WIRED);
1276 	if (pplist != NULL) {
1277 		ASSERT(sptd->spt_ppa == pplist);
1278 		/*
1279 		 * Since we cache the entire segment, we want to
1280 		 * set ppp to point to the first slot that corresponds
1281 		 * to the requested addr, i.e. page_index.
1282 		 */
1283 		mutex_exit(&sptd->spt_lock);
1284 		*ppp = &(sptd->spt_ppa[page_index]);
1285 		return (0);
1286 	}
1287 
1288 	if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1289 	    SEGP_FORCE_WIRED) == SEGP_FAIL) {
1290 		mutex_exit(&sptd->spt_lock);
1291 		*ppp = NULL;
1292 		return (ENOTSUP);
1293 	}
1294 
1295 	/*
1296 	 * No need to worry about protections because ISM pages
1297 	 * are always rw.
1298 	 */
1299 	pl = pplist = NULL;
1300 
1301 	/*
1302 	 * Do we need to build the ppa array?
1303 	 */
1304 	if (sptd->spt_ppa == NULL) {
1305 		ASSERT(sptd->spt_ppa == pplist);
1306 
1307 		spt_base = sptseg->s_base;
1308 		pl_built = 1;
1309 
1310 		/*
1311 		 * availrmem is decremented once during anon_swap_adjust()
1312 		 * and is incremented during the anon_unresv(), which is
1313 		 * called from shm_rm_amp() when the segment is destroyed.
1314 		 */
1315 		amp = sptd->spt_amp;
1316 		ASSERT(amp != NULL);
1317 
1318 		/* pcachecnt is protected by sptd->spt_lock */
1319 		ASSERT(sptd->spt_pcachecnt == 0);
1320 		pplist = kmem_zalloc(sizeof (page_t *)
1321 		    * btopr(sptd->spt_amp->size), KM_SLEEP);
1322 		pl = pplist;
1323 
1324 		anon_index = seg_page(sptseg, spt_base);
1325 
1326 		ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
1327 		for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1328 		    a += PAGESIZE, anon_index++, pplist++) {
1329 			ap = anon_get_ptr(amp->ahp, anon_index);
1330 			ASSERT(ap != NULL);
1331 			swap_xlate(ap, &vp, &off);
1332 			pp = page_lookup(vp, off, SE_SHARED);
1333 			ASSERT(pp != NULL);
1334 			*pplist = pp;
1335 		}
1336 		ANON_LOCK_EXIT(&amp->a_rwlock);
1337 
1338 		if (a < (spt_base + sptd->spt_amp->size)) {
1339 			ret = ENOTSUP;
1340 			goto insert_fail;
1341 		}
1342 		sptd->spt_ppa = pl;
1343 	} else {
1344 		/*
1345 		 * We already have a valid ppa[].
1346 		 */
1347 		pl = sptd->spt_ppa;
1348 	}
1349 
1350 	ASSERT(pl != NULL);
1351 
1352 	ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1353 	    sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1354 	    segspt_reclaim);
1355 	if (ret == SEGP_FAIL) {
1356 		/*
1357 		 * seg_pinsert failed. We return
1358 		 * ENOTSUP, so that the as_pagelock() code will
1359 		 * then try the slower F_SOFTLOCK path.
1360 		 */
1361 		if (pl_built) {
1362 			/*
1363 			 * No one else has referenced the ppa[].
1364 			 * We created it and we need to destroy it.
1365 			 */
1366 			sptd->spt_ppa = NULL;
1367 		}
1368 		ret = ENOTSUP;
1369 		goto insert_fail;
1370 	}
1371 
1372 	/*
1373 	 * In either case, we increment softlockcnt on the 'real' segment.
1374 	 */
1375 	sptd->spt_pcachecnt++;
1376 	atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1377 
1378 	/*
1379 	 * We can now drop the sptd->spt_lock since the ppa[]
1380 	 * exists and we have incremented pacachecnt.
1381 	 */
1382 	mutex_exit(&sptd->spt_lock);
1383 
1384 	/*
1385 	 * Since we cache the entire segment, we want to
1386 	 * set ppp to point to the first slot that corresponds
1387 	 * to the requested addr, i.e. page_index.
1388 	 */
1389 	*ppp = &(sptd->spt_ppa[page_index]);
1390 	return (0);
1391 
1392 insert_fail:
1393 	/*
1394 	 * We will only reach this code if we tried and failed.
1395 	 *
1396 	 * And we can drop the lock on the dummy seg, once we've failed
1397 	 * to set up a new ppa[].
1398 	 */
1399 	mutex_exit(&sptd->spt_lock);
1400 
1401 	if (pl_built) {
1402 		/*
1403 		 * We created pl and we need to destroy it.
1404 		 */
1405 		pplist = pl;
1406 		np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1407 		while (np) {
1408 			page_unlock(*pplist);
1409 			np--;
1410 			pplist++;
1411 		}
1412 		kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1413 	}
1414 	if (shmd->shm_softlockcnt <= 0) {
1415 		if (AS_ISUNMAPWAIT(seg->s_as)) {
1416 			mutex_enter(&seg->s_as->a_contents);
1417 			if (AS_ISUNMAPWAIT(seg->s_as)) {
1418 				AS_CLRUNMAPWAIT(seg->s_as);
1419 				cv_broadcast(&seg->s_as->a_cv);
1420 			}
1421 			mutex_exit(&seg->s_as->a_contents);
1422 		}
1423 	}
1424 	*ppp = NULL;
1425 	return (ret);
1426 }
1427 
1428 /*
1429  * purge any cached pages in the I/O page cache
1430  */
1431 static void
1432 segspt_purge(struct seg *seg)
1433 {
1434 	seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1435 }
1436 
1437 static int
1438 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1439     enum seg_rw rw, int async)
1440 {
1441 	struct seg *seg = (struct seg *)ptag;
1442 	struct	shm_data *shmd = (struct shm_data *)seg->s_data;
1443 	struct	seg	*sptseg;
1444 	struct	spt_data *sptd;
1445 	pgcnt_t npages, i, free_availrmem = 0;
1446 	int	done = 0;
1447 
1448 #ifdef lint
1449 	addr = addr;
1450 #endif
1451 	sptseg = shmd->shm_sptseg;
1452 	sptd = sptseg->s_data;
1453 	npages = (len >> PAGESHIFT);
1454 	ASSERT(npages);
1455 	ASSERT(sptd->spt_pcachecnt != 0);
1456 	ASSERT(sptd->spt_ppa == pplist);
1457 	ASSERT(npages == btopr(sptd->spt_amp->size));
1458 	ASSERT(async || AS_LOCK_HELD(seg->s_as));
1459 
1460 	/*
1461 	 * Acquire the lock on the dummy seg and destroy the
1462 	 * ppa array IF this is the last pcachecnt.
1463 	 */
1464 	mutex_enter(&sptd->spt_lock);
1465 	if (--sptd->spt_pcachecnt == 0) {
1466 		for (i = 0; i < npages; i++) {
1467 			if (pplist[i] == NULL) {
1468 				continue;
1469 			}
1470 			if (rw == S_WRITE) {
1471 				hat_setrefmod(pplist[i]);
1472 			} else {
1473 				hat_setref(pplist[i]);
1474 			}
1475 			if ((sptd->spt_flags & SHM_PAGEABLE) &&
1476 			    (sptd->spt_ppa_lckcnt[i] == 0))
1477 				free_availrmem++;
1478 			page_unlock(pplist[i]);
1479 		}
1480 		if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1481 			mutex_enter(&freemem_lock);
1482 			availrmem += free_availrmem;
1483 			mutex_exit(&freemem_lock);
1484 		}
1485 		/*
1486 		 * Since we want to cach/uncache the entire ISM segment,
1487 		 * we will track the pplist in a segspt specific field
1488 		 * ppa, that is initialized at the time we add an entry to
1489 		 * the cache.
1490 		 */
1491 		ASSERT(sptd->spt_pcachecnt == 0);
1492 		kmem_free(pplist, sizeof (page_t *) * npages);
1493 		sptd->spt_ppa = NULL;
1494 		sptd->spt_flags &= ~DISM_PPA_CHANGED;
1495 		sptd->spt_gen++;
1496 		cv_broadcast(&sptd->spt_cv);
1497 		done = 1;
1498 	}
1499 	mutex_exit(&sptd->spt_lock);
1500 
1501 	/*
1502 	 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1503 	 * may not hold AS lock (in this case async argument is not 0). This
1504 	 * means if softlockcnt drops to 0 after the decrement below address
1505 	 * space may get freed. We can't allow it since after softlock
1506 	 * derement to 0 we still need to access as structure for possible
1507 	 * wakeup of unmap waiters. To prevent the disappearance of as we take
1508 	 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1509 	 * this mutex as a barrier to make sure this routine completes before
1510 	 * segment is freed.
1511 	 *
1512 	 * The second complication we have to deal with in async case is a
1513 	 * possibility of missed wake up of unmap wait thread. When we don't
1514 	 * hold as lock here we may take a_contents lock before unmap wait
1515 	 * thread that was first to see softlockcnt was still not 0. As a
1516 	 * result we'll fail to wake up an unmap wait thread. To avoid this
1517 	 * race we set nounmapwait flag in as structure if we drop softlockcnt
1518 	 * to 0 if async is not 0.  unmapwait thread
1519 	 * will not block if this flag is set.
1520 	 */
1521 	if (async)
1522 		mutex_enter(&shmd->shm_segfree_syncmtx);
1523 
1524 	/*
1525 	 * Now decrement softlockcnt.
1526 	 */
1527 	ASSERT(shmd->shm_softlockcnt > 0);
1528 	atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1529 
1530 	if (shmd->shm_softlockcnt <= 0) {
1531 		if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1532 			mutex_enter(&seg->s_as->a_contents);
1533 			if (async)
1534 				AS_SETNOUNMAPWAIT(seg->s_as);
1535 			if (AS_ISUNMAPWAIT(seg->s_as)) {
1536 				AS_CLRUNMAPWAIT(seg->s_as);
1537 				cv_broadcast(&seg->s_as->a_cv);
1538 			}
1539 			mutex_exit(&seg->s_as->a_contents);
1540 		}
1541 	}
1542 
1543 	if (async)
1544 		mutex_exit(&shmd->shm_segfree_syncmtx);
1545 
1546 	return (done);
1547 }
1548 
1549 /*
1550  * Do a F_SOFTUNLOCK call over the range requested.
1551  * The range must have already been F_SOFTLOCK'ed.
1552  *
1553  * The calls to acquire and release the anon map lock mutex were
1554  * removed in order to avoid a deadly embrace during a DR
1555  * memory delete operation.  (Eg. DR blocks while waiting for a
1556  * exclusive lock on a page that is being used for kaio; the
1557  * thread that will complete the kaio and call segspt_softunlock
1558  * blocks on the anon map lock; another thread holding the anon
1559  * map lock blocks on another page lock via the segspt_shmfault
1560  * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1561  *
1562  * The appropriateness of the removal is based upon the following:
1563  * 1. If we are holding a segment's reader lock and the page is held
1564  * shared, then the corresponding element in anonmap which points to
1565  * anon struct cannot change and there is no need to acquire the
1566  * anonymous map lock.
1567  * 2. Threads in segspt_softunlock have a reader lock on the segment
1568  * and already have the shared page lock, so we are guaranteed that
1569  * the anon map slot cannot change and therefore can call anon_get_ptr()
1570  * without grabbing the anonymous map lock.
1571  * 3. Threads that softlock a shared page break copy-on-write, even if
1572  * its a read.  Thus cow faults can be ignored with respect to soft
1573  * unlocking, since the breaking of cow means that the anon slot(s) will
1574  * not be shared.
1575  */
1576 static void
1577 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1578     size_t len, enum seg_rw rw)
1579 {
1580 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
1581 	struct seg	*sptseg;
1582 	struct spt_data *sptd;
1583 	page_t *pp;
1584 	caddr_t adr;
1585 	struct vnode *vp;
1586 	u_offset_t offset;
1587 	ulong_t anon_index;
1588 	struct anon_map *amp;		/* XXX - for locknest */
1589 	struct anon *ap = NULL;
1590 	pgcnt_t npages;
1591 
1592 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1593 
1594 	sptseg = shmd->shm_sptseg;
1595 	sptd = sptseg->s_data;
1596 
1597 	/*
1598 	 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1599 	 * and therefore their pages are SE_SHARED locked
1600 	 * for the entire life of the segment.
1601 	 */
1602 	if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1603 	    ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1604 		goto softlock_decrement;
1605 	}
1606 
1607 	/*
1608 	 * Any thread is free to do a page_find and
1609 	 * page_unlock() on the pages within this seg.
1610 	 *
1611 	 * We are already holding the as->a_lock on the user's
1612 	 * real segment, but we need to hold the a_lock on the
1613 	 * underlying dummy as. This is mostly to satisfy the
1614 	 * underlying HAT layer.
1615 	 */
1616 	AS_LOCK_ENTER(sptseg->s_as, RW_READER);
1617 	hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1618 	AS_LOCK_EXIT(sptseg->s_as);
1619 
1620 	amp = sptd->spt_amp;
1621 	ASSERT(amp != NULL);
1622 	anon_index = seg_page(sptseg, sptseg_addr);
1623 
1624 	for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1625 		ap = anon_get_ptr(amp->ahp, anon_index++);
1626 		ASSERT(ap != NULL);
1627 		swap_xlate(ap, &vp, &offset);
1628 
1629 		/*
1630 		 * Use page_find() instead of page_lookup() to
1631 		 * find the page since we know that it has a
1632 		 * "shared" lock.
1633 		 */
1634 		pp = page_find(vp, offset);
1635 		ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1636 		if (pp == NULL) {
1637 			panic("segspt_softunlock: "
1638 			    "addr %p, ap %p, vp %p, off %llx",
1639 			    (void *)adr, (void *)ap, (void *)vp, offset);
1640 			/*NOTREACHED*/
1641 		}
1642 
1643 		if (rw == S_WRITE) {
1644 			hat_setrefmod(pp);
1645 		} else if (rw != S_OTHER) {
1646 			hat_setref(pp);
1647 		}
1648 		page_unlock(pp);
1649 	}
1650 
1651 softlock_decrement:
1652 	npages = btopr(len);
1653 	ASSERT(shmd->shm_softlockcnt >= npages);
1654 	atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1655 	if (shmd->shm_softlockcnt == 0) {
1656 		/*
1657 		 * All SOFTLOCKS are gone. Wakeup any waiting
1658 		 * unmappers so they can try again to unmap.
1659 		 * Check for waiters first without the mutex
1660 		 * held so we don't always grab the mutex on
1661 		 * softunlocks.
1662 		 */
1663 		if (AS_ISUNMAPWAIT(seg->s_as)) {
1664 			mutex_enter(&seg->s_as->a_contents);
1665 			if (AS_ISUNMAPWAIT(seg->s_as)) {
1666 				AS_CLRUNMAPWAIT(seg->s_as);
1667 				cv_broadcast(&seg->s_as->a_cv);
1668 			}
1669 			mutex_exit(&seg->s_as->a_contents);
1670 		}
1671 	}
1672 }
1673 
1674 int
1675 segspt_shmattach(struct seg **segpp, void *argsp)
1676 {
1677 	struct seg *seg = *segpp;
1678 	struct shm_data *shmd_arg = (struct shm_data *)argsp;
1679 	struct shm_data *shmd;
1680 	struct anon_map *shm_amp = shmd_arg->shm_amp;
1681 	struct spt_data *sptd;
1682 	int error = 0;
1683 
1684 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1685 
1686 	shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1687 	if (shmd == NULL)
1688 		return (ENOMEM);
1689 
1690 	shmd->shm_sptas = shmd_arg->shm_sptas;
1691 	shmd->shm_amp = shm_amp;
1692 	shmd->shm_sptseg = shmd_arg->shm_sptseg;
1693 
1694 	(void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1695 	    NULL, 0, seg->s_size);
1696 
1697 	mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1698 
1699 	seg->s_data = (void *)shmd;
1700 	seg->s_ops = &segspt_shmops;
1701 	seg->s_szc = shmd->shm_sptseg->s_szc;
1702 	sptd = shmd->shm_sptseg->s_data;
1703 
1704 	if (sptd->spt_flags & SHM_PAGEABLE) {
1705 		if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1706 		    KM_NOSLEEP)) == NULL) {
1707 			seg->s_data = (void *)NULL;
1708 			kmem_free(shmd, (sizeof (*shmd)));
1709 			return (ENOMEM);
1710 		}
1711 		shmd->shm_lckpgs = 0;
1712 		if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1713 			if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1714 			    shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1715 			    seg->s_size, seg->s_szc)) != 0) {
1716 				kmem_free(shmd->shm_vpage,
1717 				    btopr(shm_amp->size));
1718 			}
1719 		}
1720 	} else {
1721 		error = hat_share(seg->s_as->a_hat, seg->s_base,
1722 		    shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1723 		    seg->s_size, seg->s_szc);
1724 	}
1725 	if (error) {
1726 		seg->s_szc = 0;
1727 		seg->s_data = (void *)NULL;
1728 		kmem_free(shmd, (sizeof (*shmd)));
1729 	} else {
1730 		ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1731 		shm_amp->refcnt++;
1732 		ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1733 	}
1734 	return (error);
1735 }
1736 
1737 int
1738 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1739 {
1740 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
1741 	int reclaim = 1;
1742 
1743 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1744 retry:
1745 	if (shmd->shm_softlockcnt > 0) {
1746 		if (reclaim == 1) {
1747 			segspt_purge(seg);
1748 			reclaim = 0;
1749 			goto retry;
1750 		}
1751 		return (EAGAIN);
1752 	}
1753 
1754 	if (ssize != seg->s_size) {
1755 #ifdef DEBUG
1756 		cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1757 		    ssize, seg->s_size);
1758 #endif
1759 		return (EINVAL);
1760 	}
1761 
1762 	(void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1763 	    NULL, 0);
1764 	hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1765 
1766 	seg_free(seg);
1767 
1768 	return (0);
1769 }
1770 
1771 void
1772 segspt_shmfree(struct seg *seg)
1773 {
1774 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
1775 	struct anon_map *shm_amp = shmd->shm_amp;
1776 
1777 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1778 
1779 	(void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1780 	    MC_UNLOCK, NULL, 0);
1781 
1782 	/*
1783 	 * Need to increment refcnt when attaching
1784 	 * and decrement when detaching because of dup().
1785 	 */
1786 	ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1787 	shm_amp->refcnt--;
1788 	ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1789 
1790 	if (shmd->shm_vpage) {	/* only for DISM */
1791 		kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1792 		shmd->shm_vpage = NULL;
1793 	}
1794 
1795 	/*
1796 	 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1797 	 * still working with this segment without holding as lock.
1798 	 */
1799 	ASSERT(shmd->shm_softlockcnt == 0);
1800 	mutex_enter(&shmd->shm_segfree_syncmtx);
1801 	mutex_destroy(&shmd->shm_segfree_syncmtx);
1802 
1803 	kmem_free(shmd, sizeof (*shmd));
1804 }
1805 
1806 /*ARGSUSED*/
1807 int
1808 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1809 {
1810 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1811 
1812 	/*
1813 	 * Shared page table is more than shared mapping.
1814 	 *  Individual process sharing page tables can't change prot
1815 	 *  because there is only one set of page tables.
1816 	 *  This will be allowed after private page table is
1817 	 *  supported.
1818 	 */
1819 /* need to return correct status error? */
1820 	return (0);
1821 }
1822 
1823 
1824 faultcode_t
1825 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1826     size_t len, enum fault_type type, enum seg_rw rw)
1827 {
1828 	struct  shm_data 	*shmd = (struct shm_data *)seg->s_data;
1829 	struct  seg		*sptseg = shmd->shm_sptseg;
1830 	struct  as		*curspt = shmd->shm_sptas;
1831 	struct  spt_data 	*sptd = sptseg->s_data;
1832 	pgcnt_t npages;
1833 	size_t  size;
1834 	caddr_t segspt_addr, shm_addr;
1835 	page_t  **ppa;
1836 	int	i;
1837 	ulong_t an_idx = 0;
1838 	int	err = 0;
1839 	int	dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1840 	size_t	pgsz;
1841 	pgcnt_t	pgcnt;
1842 	caddr_t	a;
1843 	pgcnt_t	pidx;
1844 
1845 #ifdef lint
1846 	hat = hat;
1847 #endif
1848 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1849 
1850 	/*
1851 	 * Because of the way spt is implemented
1852 	 * the realsize of the segment does not have to be
1853 	 * equal to the segment size itself. The segment size is
1854 	 * often in multiples of a page size larger than PAGESIZE.
1855 	 * The realsize is rounded up to the nearest PAGESIZE
1856 	 * based on what the user requested. This is a bit of
1857 	 * ungliness that is historical but not easily fixed
1858 	 * without re-designing the higher levels of ISM.
1859 	 */
1860 	ASSERT(addr >= seg->s_base);
1861 	if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1862 		return (FC_NOMAP);
1863 	/*
1864 	 * For all of the following cases except F_PROT, we need to
1865 	 * make any necessary adjustments to addr and len
1866 	 * and get all of the necessary page_t's into an array called ppa[].
1867 	 *
1868 	 * The code in shmat() forces base addr and len of ISM segment
1869 	 * to be aligned to largest page size supported. Therefore,
1870 	 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1871 	 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1872 	 * in large pagesize chunks, or else we will screw up the HAT
1873 	 * layer by calling hat_memload_array() with differing page sizes
1874 	 * over a given virtual range.
1875 	 */
1876 	pgsz = page_get_pagesize(sptseg->s_szc);
1877 	pgcnt = page_get_pagecnt(sptseg->s_szc);
1878 	shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1879 	size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1880 	npages = btopr(size);
1881 
1882 	/*
1883 	 * Now we need to convert from addr in segshm to addr in segspt.
1884 	 */
1885 	an_idx = seg_page(seg, shm_addr);
1886 	segspt_addr = sptseg->s_base + ptob(an_idx);
1887 
1888 	ASSERT((segspt_addr + ptob(npages)) <=
1889 	    (sptseg->s_base + sptd->spt_realsize));
1890 	ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1891 
1892 	switch (type) {
1893 
1894 	case F_SOFTLOCK:
1895 
1896 		atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1897 		/*
1898 		 * Fall through to the F_INVAL case to load up the hat layer
1899 		 * entries with the HAT_LOAD_LOCK flag.
1900 		 */
1901 		/* FALLTHRU */
1902 	case F_INVAL:
1903 
1904 		if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1905 			return (FC_NOMAP);
1906 
1907 		ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1908 
1909 		err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1910 		if (err != 0) {
1911 			if (type == F_SOFTLOCK) {
1912 				atomic_add_long((ulong_t *)(
1913 				    &(shmd->shm_softlockcnt)), -npages);
1914 			}
1915 			goto dism_err;
1916 		}
1917 		AS_LOCK_ENTER(sptseg->s_as, RW_READER);
1918 		a = segspt_addr;
1919 		pidx = 0;
1920 		if (type == F_SOFTLOCK) {
1921 
1922 			/*
1923 			 * Load up the translation keeping it
1924 			 * locked and don't unlock the page.
1925 			 */
1926 			for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1927 				hat_memload_array(sptseg->s_as->a_hat,
1928 				    a, pgsz, &ppa[pidx], sptd->spt_prot,
1929 				    HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1930 			}
1931 		} else {
1932 			/*
1933 			 * Migrate pages marked for migration
1934 			 */
1935 			if (lgrp_optimizations())
1936 				page_migrate(seg, shm_addr, ppa, npages);
1937 
1938 			for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1939 				hat_memload_array(sptseg->s_as->a_hat,
1940 				    a, pgsz, &ppa[pidx],
1941 				    sptd->spt_prot,
1942 				    HAT_LOAD_SHARE);
1943 			}
1944 
1945 			/*
1946 			 * And now drop the SE_SHARED lock(s).
1947 			 */
1948 			if (dyn_ism_unmap) {
1949 				for (i = 0; i < npages; i++) {
1950 					page_unlock(ppa[i]);
1951 				}
1952 			}
1953 		}
1954 
1955 		if (!dyn_ism_unmap) {
1956 			if (hat_share(seg->s_as->a_hat, shm_addr,
1957 			    curspt->a_hat, segspt_addr, ptob(npages),
1958 			    seg->s_szc) != 0) {
1959 				panic("hat_share err in DISM fault");
1960 				/* NOTREACHED */
1961 			}
1962 			if (type == F_INVAL) {
1963 				for (i = 0; i < npages; i++) {
1964 					page_unlock(ppa[i]);
1965 				}
1966 			}
1967 		}
1968 		AS_LOCK_EXIT(sptseg->s_as);
1969 dism_err:
1970 		kmem_free(ppa, npages * sizeof (page_t *));
1971 		return (err);
1972 
1973 	case F_SOFTUNLOCK:
1974 
1975 		/*
1976 		 * This is a bit ugly, we pass in the real seg pointer,
1977 		 * but the segspt_addr is the virtual address within the
1978 		 * dummy seg.
1979 		 */
1980 		segspt_softunlock(seg, segspt_addr, size, rw);
1981 		return (0);
1982 
1983 	case F_PROT:
1984 
1985 		/*
1986 		 * This takes care of the unusual case where a user
1987 		 * allocates a stack in shared memory and a register
1988 		 * window overflow is written to that stack page before
1989 		 * it is otherwise modified.
1990 		 *
1991 		 * We can get away with this because ISM segments are
1992 		 * always rw. Other than this unusual case, there
1993 		 * should be no instances of protection violations.
1994 		 */
1995 		return (0);
1996 
1997 	default:
1998 #ifdef DEBUG
1999 		panic("segspt_dismfault default type?");
2000 #else
2001 		return (FC_NOMAP);
2002 #endif
2003 	}
2004 }
2005 
2006 
2007 faultcode_t
2008 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
2009     size_t len, enum fault_type type, enum seg_rw rw)
2010 {
2011 	struct shm_data 	*shmd = (struct shm_data *)seg->s_data;
2012 	struct seg		*sptseg = shmd->shm_sptseg;
2013 	struct as		*curspt = shmd->shm_sptas;
2014 	struct spt_data 	*sptd   = sptseg->s_data;
2015 	pgcnt_t npages;
2016 	size_t size;
2017 	caddr_t sptseg_addr, shm_addr;
2018 	page_t *pp, **ppa;
2019 	int	i;
2020 	u_offset_t offset;
2021 	ulong_t anon_index = 0;
2022 	struct vnode *vp;
2023 	struct anon_map *amp;		/* XXX - for locknest */
2024 	struct anon *ap = NULL;
2025 	size_t		pgsz;
2026 	pgcnt_t		pgcnt;
2027 	caddr_t		a;
2028 	pgcnt_t		pidx;
2029 	size_t		sz;
2030 
2031 #ifdef lint
2032 	hat = hat;
2033 #endif
2034 
2035 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2036 
2037 	if (sptd->spt_flags & SHM_PAGEABLE) {
2038 		return (segspt_dismfault(hat, seg, addr, len, type, rw));
2039 	}
2040 
2041 	/*
2042 	 * Because of the way spt is implemented
2043 	 * the realsize of the segment does not have to be
2044 	 * equal to the segment size itself. The segment size is
2045 	 * often in multiples of a page size larger than PAGESIZE.
2046 	 * The realsize is rounded up to the nearest PAGESIZE
2047 	 * based on what the user requested. This is a bit of
2048 	 * ungliness that is historical but not easily fixed
2049 	 * without re-designing the higher levels of ISM.
2050 	 */
2051 	ASSERT(addr >= seg->s_base);
2052 	if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2053 		return (FC_NOMAP);
2054 	/*
2055 	 * For all of the following cases except F_PROT, we need to
2056 	 * make any necessary adjustments to addr and len
2057 	 * and get all of the necessary page_t's into an array called ppa[].
2058 	 *
2059 	 * The code in shmat() forces base addr and len of ISM segment
2060 	 * to be aligned to largest page size supported. Therefore,
2061 	 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2062 	 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2063 	 * in large pagesize chunks, or else we will screw up the HAT
2064 	 * layer by calling hat_memload_array() with differing page sizes
2065 	 * over a given virtual range.
2066 	 */
2067 	pgsz = page_get_pagesize(sptseg->s_szc);
2068 	pgcnt = page_get_pagecnt(sptseg->s_szc);
2069 	shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2070 	size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2071 	npages = btopr(size);
2072 
2073 	/*
2074 	 * Now we need to convert from addr in segshm to addr in segspt.
2075 	 */
2076 	anon_index = seg_page(seg, shm_addr);
2077 	sptseg_addr = sptseg->s_base + ptob(anon_index);
2078 
2079 	/*
2080 	 * And now we may have to adjust npages downward if we have
2081 	 * exceeded the realsize of the segment or initial anon
2082 	 * allocations.
2083 	 */
2084 	if ((sptseg_addr + ptob(npages)) >
2085 	    (sptseg->s_base + sptd->spt_realsize))
2086 		size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2087 
2088 	npages = btopr(size);
2089 
2090 	ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2091 	ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2092 
2093 	switch (type) {
2094 
2095 	case F_SOFTLOCK:
2096 
2097 		/*
2098 		 * availrmem is decremented once during anon_swap_adjust()
2099 		 * and is incremented during the anon_unresv(), which is
2100 		 * called from shm_rm_amp() when the segment is destroyed.
2101 		 */
2102 		atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2103 		/*
2104 		 * Some platforms assume that ISM pages are SE_SHARED
2105 		 * locked for the entire life of the segment.
2106 		 */
2107 		if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2108 			return (0);
2109 		/*
2110 		 * Fall through to the F_INVAL case to load up the hat layer
2111 		 * entries with the HAT_LOAD_LOCK flag.
2112 		 */
2113 
2114 		/* FALLTHRU */
2115 	case F_INVAL:
2116 
2117 		if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2118 			return (FC_NOMAP);
2119 
2120 		/*
2121 		 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2122 		 * may still rely on this call to hat_share(). That
2123 		 * would imply that those hat's can fault on a
2124 		 * HAT_LOAD_LOCK translation, which would seem
2125 		 * contradictory.
2126 		 */
2127 		if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2128 			if (hat_share(seg->s_as->a_hat, seg->s_base,
2129 			    curspt->a_hat, sptseg->s_base,
2130 			    sptseg->s_size, sptseg->s_szc) != 0) {
2131 				panic("hat_share error in ISM fault");
2132 				/*NOTREACHED*/
2133 			}
2134 			return (0);
2135 		}
2136 		ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2137 
2138 		/*
2139 		 * I see no need to lock the real seg,
2140 		 * here, because all of our work will be on the underlying
2141 		 * dummy seg.
2142 		 *
2143 		 * sptseg_addr and npages now account for large pages.
2144 		 */
2145 		amp = sptd->spt_amp;
2146 		ASSERT(amp != NULL);
2147 		anon_index = seg_page(sptseg, sptseg_addr);
2148 
2149 		ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2150 		for (i = 0; i < npages; i++) {
2151 			ap = anon_get_ptr(amp->ahp, anon_index++);
2152 			ASSERT(ap != NULL);
2153 			swap_xlate(ap, &vp, &offset);
2154 			pp = page_lookup(vp, offset, SE_SHARED);
2155 			ASSERT(pp != NULL);
2156 			ppa[i] = pp;
2157 		}
2158 		ANON_LOCK_EXIT(&amp->a_rwlock);
2159 		ASSERT(i == npages);
2160 
2161 		/*
2162 		 * We are already holding the as->a_lock on the user's
2163 		 * real segment, but we need to hold the a_lock on the
2164 		 * underlying dummy as. This is mostly to satisfy the
2165 		 * underlying HAT layer.
2166 		 */
2167 		AS_LOCK_ENTER(sptseg->s_as, RW_READER);
2168 		a = sptseg_addr;
2169 		pidx = 0;
2170 		if (type == F_SOFTLOCK) {
2171 			/*
2172 			 * Load up the translation keeping it
2173 			 * locked and don't unlock the page.
2174 			 */
2175 			for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2176 				sz = MIN(pgsz, ptob(npages - pidx));
2177 				hat_memload_array(sptseg->s_as->a_hat, a,
2178 				    sz, &ppa[pidx], sptd->spt_prot,
2179 				    HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2180 			}
2181 		} else {
2182 			/*
2183 			 * Migrate pages marked for migration.
2184 			 */
2185 			if (lgrp_optimizations())
2186 				page_migrate(seg, shm_addr, ppa, npages);
2187 
2188 			for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2189 				sz = MIN(pgsz, ptob(npages - pidx));
2190 				hat_memload_array(sptseg->s_as->a_hat,
2191 				    a, sz, &ppa[pidx],
2192 				    sptd->spt_prot, HAT_LOAD_SHARE);
2193 			}
2194 
2195 			/*
2196 			 * And now drop the SE_SHARED lock(s).
2197 			 */
2198 			for (i = 0; i < npages; i++)
2199 				page_unlock(ppa[i]);
2200 		}
2201 		AS_LOCK_EXIT(sptseg->s_as);
2202 
2203 		kmem_free(ppa, sizeof (page_t *) * npages);
2204 		return (0);
2205 	case F_SOFTUNLOCK:
2206 
2207 		/*
2208 		 * This is a bit ugly, we pass in the real seg pointer,
2209 		 * but the sptseg_addr is the virtual address within the
2210 		 * dummy seg.
2211 		 */
2212 		segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2213 		return (0);
2214 
2215 	case F_PROT:
2216 
2217 		/*
2218 		 * This takes care of the unusual case where a user
2219 		 * allocates a stack in shared memory and a register
2220 		 * window overflow is written to that stack page before
2221 		 * it is otherwise modified.
2222 		 *
2223 		 * We can get away with this because ISM segments are
2224 		 * always rw. Other than this unusual case, there
2225 		 * should be no instances of protection violations.
2226 		 */
2227 		return (0);
2228 
2229 	default:
2230 #ifdef DEBUG
2231 		cmn_err(CE_WARN, "segspt_shmfault default type?");
2232 #endif
2233 		return (FC_NOMAP);
2234 	}
2235 }
2236 
2237 /*ARGSUSED*/
2238 static faultcode_t
2239 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2240 {
2241 	return (0);
2242 }
2243 
2244 /*ARGSUSED*/
2245 static int
2246 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2247 {
2248 	return (0);
2249 }
2250 
2251 /*ARGSUSED*/
2252 static size_t
2253 segspt_shmswapout(struct seg *seg)
2254 {
2255 	return (0);
2256 }
2257 
2258 /*
2259  * duplicate the shared page tables
2260  */
2261 int
2262 segspt_shmdup(struct seg *seg, struct seg *newseg)
2263 {
2264 	struct shm_data		*shmd = (struct shm_data *)seg->s_data;
2265 	struct anon_map 	*amp = shmd->shm_amp;
2266 	struct shm_data 	*shmd_new;
2267 	struct seg		*spt_seg = shmd->shm_sptseg;
2268 	struct spt_data		*sptd = spt_seg->s_data;
2269 	int			error = 0;
2270 
2271 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
2272 
2273 	shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2274 	newseg->s_data = (void *)shmd_new;
2275 	shmd_new->shm_sptas = shmd->shm_sptas;
2276 	shmd_new->shm_amp = amp;
2277 	shmd_new->shm_sptseg = shmd->shm_sptseg;
2278 	newseg->s_ops = &segspt_shmops;
2279 	newseg->s_szc = seg->s_szc;
2280 	ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2281 
2282 	ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
2283 	amp->refcnt++;
2284 	ANON_LOCK_EXIT(&amp->a_rwlock);
2285 
2286 	if (sptd->spt_flags & SHM_PAGEABLE) {
2287 		shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2288 		shmd_new->shm_lckpgs = 0;
2289 		if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2290 			if ((error = hat_share(newseg->s_as->a_hat,
2291 			    newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2292 			    seg->s_size, seg->s_szc)) != 0) {
2293 				kmem_free(shmd_new->shm_vpage,
2294 				    btopr(amp->size));
2295 			}
2296 		}
2297 		return (error);
2298 	} else {
2299 		return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2300 		    shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2301 		    seg->s_szc));
2302 
2303 	}
2304 }
2305 
2306 /*ARGSUSED*/
2307 int
2308 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2309 {
2310 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
2311 	struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2312 
2313 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2314 
2315 	/*
2316 	 * ISM segment is always rw.
2317 	 */
2318 	return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2319 }
2320 
2321 /*
2322  * Return an array of locked large pages, for empty slots allocate
2323  * private zero-filled anon pages.
2324  */
2325 static int
2326 spt_anon_getpages(
2327 	struct seg *sptseg,
2328 	caddr_t sptaddr,
2329 	size_t len,
2330 	page_t *ppa[])
2331 {
2332 	struct  spt_data *sptd = sptseg->s_data;
2333 	struct  anon_map *amp = sptd->spt_amp;
2334 	enum 	seg_rw rw = sptd->spt_prot;
2335 	uint_t	szc = sptseg->s_szc;
2336 	size_t	pg_sz, share_sz = page_get_pagesize(szc);
2337 	pgcnt_t	lp_npgs;
2338 	caddr_t	lp_addr, e_sptaddr;
2339 	uint_t	vpprot, ppa_szc = 0;
2340 	struct  vpage *vpage = NULL;
2341 	ulong_t	j, ppa_idx;
2342 	int	err, ierr = 0;
2343 	pgcnt_t	an_idx;
2344 	anon_sync_obj_t cookie;
2345 	int anon_locked = 0;
2346 	pgcnt_t amp_pgs;
2347 
2348 
2349 	ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2350 	ASSERT(len != 0);
2351 
2352 	pg_sz = share_sz;
2353 	lp_npgs = btop(pg_sz);
2354 	lp_addr = sptaddr;
2355 	e_sptaddr = sptaddr + len;
2356 	an_idx = seg_page(sptseg, sptaddr);
2357 	ppa_idx = 0;
2358 
2359 	ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2360 
2361 	amp_pgs = page_get_pagecnt(amp->a_szc);
2362 
2363 	/*CONSTCOND*/
2364 	while (1) {
2365 		for (; lp_addr < e_sptaddr;
2366 		    an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2367 
2368 			/*
2369 			 * If we're currently locked, and we get to a new
2370 			 * page, unlock our current anon chunk.
2371 			 */
2372 			if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2373 				anon_array_exit(&cookie);
2374 				anon_locked = 0;
2375 			}
2376 			if (!anon_locked) {
2377 				anon_array_enter(amp, an_idx, &cookie);
2378 				anon_locked = 1;
2379 			}
2380 			ppa_szc = (uint_t)-1;
2381 			ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2382 			    lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2383 			    &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2384 
2385 			if (ierr != 0) {
2386 				if (ierr > 0) {
2387 					err = FC_MAKE_ERR(ierr);
2388 					goto lpgs_err;
2389 				}
2390 				break;
2391 			}
2392 		}
2393 		if (lp_addr == e_sptaddr) {
2394 			break;
2395 		}
2396 		ASSERT(lp_addr < e_sptaddr);
2397 
2398 		/*
2399 		 * ierr == -1 means we failed to allocate a large page.
2400 		 * so do a size down operation.
2401 		 *
2402 		 * ierr == -2 means some other process that privately shares
2403 		 * pages with this process has allocated a larger page and we
2404 		 * need to retry with larger pages. So do a size up
2405 		 * operation. This relies on the fact that large pages are
2406 		 * never partially shared i.e. if we share any constituent
2407 		 * page of a large page with another process we must share the
2408 		 * entire large page. Note this cannot happen for SOFTLOCK
2409 		 * case, unless current address (lpaddr) is at the beginning
2410 		 * of the next page size boundary because the other process
2411 		 * couldn't have relocated locked pages.
2412 		 */
2413 		ASSERT(ierr == -1 || ierr == -2);
2414 		if (segvn_anypgsz) {
2415 			ASSERT(ierr == -2 || szc != 0);
2416 			ASSERT(ierr == -1 || szc < sptseg->s_szc);
2417 			szc = (ierr == -1) ? szc - 1 : szc + 1;
2418 		} else {
2419 			/*
2420 			 * For faults and segvn_anypgsz == 0
2421 			 * we need to be careful not to loop forever
2422 			 * if existing page is found with szc other
2423 			 * than 0 or seg->s_szc. This could be due
2424 			 * to page relocations on behalf of DR or
2425 			 * more likely large page creation. For this
2426 			 * case simply re-size to existing page's szc
2427 			 * if returned by anon_map_getpages().
2428 			 */
2429 			if (ppa_szc == (uint_t)-1) {
2430 				szc = (ierr == -1) ? 0 : sptseg->s_szc;
2431 			} else {
2432 				ASSERT(ppa_szc <= sptseg->s_szc);
2433 				ASSERT(ierr == -2 || ppa_szc < szc);
2434 				ASSERT(ierr == -1 || ppa_szc > szc);
2435 				szc = ppa_szc;
2436 			}
2437 		}
2438 		pg_sz = page_get_pagesize(szc);
2439 		lp_npgs = btop(pg_sz);
2440 		ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2441 	}
2442 	if (anon_locked) {
2443 		anon_array_exit(&cookie);
2444 	}
2445 	ANON_LOCK_EXIT(&amp->a_rwlock);
2446 	return (0);
2447 
2448 lpgs_err:
2449 	if (anon_locked) {
2450 		anon_array_exit(&cookie);
2451 	}
2452 	ANON_LOCK_EXIT(&amp->a_rwlock);
2453 	for (j = 0; j < ppa_idx; j++)
2454 		page_unlock(ppa[j]);
2455 	return (err);
2456 }
2457 
2458 /*
2459  * count the number of bytes in a set of spt pages that are currently not
2460  * locked
2461  */
2462 static rctl_qty_t
2463 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2464 {
2465 	ulong_t	i;
2466 	rctl_qty_t unlocked = 0;
2467 
2468 	for (i = 0; i < npages; i++) {
2469 		if (ppa[i]->p_lckcnt == 0)
2470 			unlocked += PAGESIZE;
2471 	}
2472 	return (unlocked);
2473 }
2474 
2475 extern	u_longlong_t randtick(void);
2476 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2477 #define	NLCK	(NCPU_P2)
2478 /* Random number with a range [0, n-1], n must be power of two */
2479 #define	RAND_P2(n)	\
2480 	((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2481 
2482 int
2483 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2484     page_t **ppa, ulong_t *lockmap, size_t pos,
2485     rctl_qty_t *locked)
2486 {
2487 	struct	shm_data *shmd = seg->s_data;
2488 	struct	spt_data *sptd = shmd->shm_sptseg->s_data;
2489 	ulong_t	i;
2490 	int	kernel;
2491 	pgcnt_t	nlck = 0;
2492 	int	rv = 0;
2493 	int	use_reserved = 1;
2494 
2495 	/* return the number of bytes actually locked */
2496 	*locked = 0;
2497 
2498 	/*
2499 	 * To avoid contention on freemem_lock, availrmem and pages_locked
2500 	 * global counters are updated only every nlck locked pages instead of
2501 	 * every time.  Reserve nlck locks up front and deduct from this
2502 	 * reservation for each page that requires a lock.  When the reservation
2503 	 * is consumed, reserve again.  nlck is randomized, so the competing
2504 	 * threads do not fall into a cyclic lock contention pattern. When
2505 	 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2506 	 * is used to lock pages.
2507 	 */
2508 	for (i = 0; i < npages; anon_index++, pos++, i++) {
2509 		if (nlck == 0 && use_reserved == 1) {
2510 			nlck = NLCK + RAND_P2(NLCK);
2511 			/* if fewer loops left, decrease nlck */
2512 			nlck = MIN(nlck, npages - i);
2513 			/*
2514 			 * Reserve nlck locks up front and deduct from this
2515 			 * reservation for each page that requires a lock.  When
2516 			 * the reservation is consumed, reserve again.
2517 			 */
2518 			mutex_enter(&freemem_lock);
2519 			if ((availrmem - nlck) < pages_pp_maximum) {
2520 				/* Do not do advance memory reserves */
2521 				use_reserved = 0;
2522 			} else {
2523 				availrmem	-= nlck;
2524 				pages_locked	+= nlck;
2525 			}
2526 			mutex_exit(&freemem_lock);
2527 		}
2528 		if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2529 			if (sptd->spt_ppa_lckcnt[anon_index] <
2530 			    (ushort_t)DISM_LOCK_MAX) {
2531 				if (++sptd->spt_ppa_lckcnt[anon_index] ==
2532 				    (ushort_t)DISM_LOCK_MAX) {
2533 					cmn_err(CE_WARN,
2534 					    "DISM page lock limit "
2535 					    "reached on DISM offset 0x%lx\n",
2536 					    anon_index << PAGESHIFT);
2537 				}
2538 				kernel = (sptd->spt_ppa &&
2539 				    sptd->spt_ppa[anon_index]);
2540 				if (!page_pp_lock(ppa[i], 0, kernel ||
2541 				    use_reserved)) {
2542 					sptd->spt_ppa_lckcnt[anon_index]--;
2543 					rv = EAGAIN;
2544 					break;
2545 				}
2546 				/* if this is a newly locked page, count it */
2547 				if (ppa[i]->p_lckcnt == 1) {
2548 					if (kernel == 0 && use_reserved == 1)
2549 						nlck--;
2550 					*locked += PAGESIZE;
2551 				}
2552 				shmd->shm_lckpgs++;
2553 				shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2554 				if (lockmap != NULL)
2555 					BT_SET(lockmap, pos);
2556 			}
2557 		}
2558 	}
2559 	/* Return unused lock reservation */
2560 	if (nlck != 0 && use_reserved == 1) {
2561 		mutex_enter(&freemem_lock);
2562 		availrmem	+= nlck;
2563 		pages_locked	-= nlck;
2564 		mutex_exit(&freemem_lock);
2565 	}
2566 
2567 	return (rv);
2568 }
2569 
2570 int
2571 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2572     rctl_qty_t *unlocked)
2573 {
2574 	struct shm_data	*shmd = seg->s_data;
2575 	struct spt_data	*sptd = shmd->shm_sptseg->s_data;
2576 	struct anon_map	*amp = sptd->spt_amp;
2577 	struct anon 	*ap;
2578 	struct vnode 	*vp;
2579 	u_offset_t 	off;
2580 	struct page	*pp;
2581 	int		kernel;
2582 	anon_sync_obj_t	cookie;
2583 	ulong_t		i;
2584 	pgcnt_t		nlck = 0;
2585 	pgcnt_t		nlck_limit = NLCK;
2586 
2587 	ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2588 	for (i = 0; i < npages; i++, anon_index++) {
2589 		if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2590 			anon_array_enter(amp, anon_index, &cookie);
2591 			ap = anon_get_ptr(amp->ahp, anon_index);
2592 			ASSERT(ap);
2593 
2594 			swap_xlate(ap, &vp, &off);
2595 			anon_array_exit(&cookie);
2596 			pp = page_lookup(vp, off, SE_SHARED);
2597 			ASSERT(pp);
2598 			/*
2599 			 * availrmem is decremented only for pages which are not
2600 			 * in seg pcache, for pages in seg pcache availrmem was
2601 			 * decremented in _dismpagelock()
2602 			 */
2603 			kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2604 			ASSERT(pp->p_lckcnt > 0);
2605 
2606 			/*
2607 			 * lock page but do not change availrmem, we do it
2608 			 * ourselves every nlck loops.
2609 			 */
2610 			page_pp_unlock(pp, 0, 1);
2611 			if (pp->p_lckcnt == 0) {
2612 				if (kernel == 0)
2613 					nlck++;
2614 				*unlocked += PAGESIZE;
2615 			}
2616 			page_unlock(pp);
2617 			shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2618 			sptd->spt_ppa_lckcnt[anon_index]--;
2619 			shmd->shm_lckpgs--;
2620 		}
2621 
2622 		/*
2623 		 * To reduce freemem_lock contention, do not update availrmem
2624 		 * until at least NLCK pages have been unlocked.
2625 		 * 1. No need to update if nlck is zero
2626 		 * 2. Always update if the last iteration
2627 		 */
2628 		if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2629 			mutex_enter(&freemem_lock);
2630 			availrmem	+= nlck;
2631 			pages_locked	-= nlck;
2632 			mutex_exit(&freemem_lock);
2633 			nlck = 0;
2634 			nlck_limit = NLCK + RAND_P2(NLCK);
2635 		}
2636 	}
2637 	ANON_LOCK_EXIT(&amp->a_rwlock);
2638 
2639 	return (0);
2640 }
2641 
2642 /*ARGSUSED*/
2643 static int
2644 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2645     int attr, int op, ulong_t *lockmap, size_t pos)
2646 {
2647 	struct shm_data *shmd = seg->s_data;
2648 	struct seg	*sptseg = shmd->shm_sptseg;
2649 	struct spt_data *sptd = sptseg->s_data;
2650 	struct kshmid	*sp = sptd->spt_amp->a_sp;
2651 	pgcnt_t		npages, a_npages;
2652 	page_t		**ppa;
2653 	pgcnt_t 	an_idx, a_an_idx, ppa_idx;
2654 	caddr_t		spt_addr, a_addr;	/* spt and aligned address */
2655 	size_t		a_len;			/* aligned len */
2656 	size_t		share_sz;
2657 	ulong_t		i;
2658 	int		sts = 0;
2659 	rctl_qty_t	unlocked = 0;
2660 	rctl_qty_t	locked = 0;
2661 	struct proc	*p = curproc;
2662 	kproject_t	*proj;
2663 
2664 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2665 	ASSERT(sp != NULL);
2666 
2667 	if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2668 		return (0);
2669 	}
2670 
2671 	addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2672 	an_idx = seg_page(seg, addr);
2673 	npages = btopr(len);
2674 
2675 	if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2676 		return (ENOMEM);
2677 	}
2678 
2679 	/*
2680 	 * A shm's project never changes, so no lock needed.
2681 	 * The shm has a hold on the project, so it will not go away.
2682 	 * Since we have a mapping to shm within this zone, we know
2683 	 * that the zone will not go away.
2684 	 */
2685 	proj = sp->shm_perm.ipc_proj;
2686 
2687 	if (op == MC_LOCK) {
2688 
2689 		/*
2690 		 * Need to align addr and size request if they are not
2691 		 * aligned so we can always allocate large page(s) however
2692 		 * we only lock what was requested in initial request.
2693 		 */
2694 		share_sz = page_get_pagesize(sptseg->s_szc);
2695 		a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2696 		a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2697 		    share_sz);
2698 		a_npages = btop(a_len);
2699 		a_an_idx = seg_page(seg, a_addr);
2700 		spt_addr = sptseg->s_base + ptob(a_an_idx);
2701 		ppa_idx = an_idx - a_an_idx;
2702 
2703 		if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2704 		    KM_NOSLEEP)) == NULL) {
2705 			return (ENOMEM);
2706 		}
2707 
2708 		/*
2709 		 * Don't cache any new pages for IO and
2710 		 * flush any cached pages.
2711 		 */
2712 		mutex_enter(&sptd->spt_lock);
2713 		if (sptd->spt_ppa != NULL)
2714 			sptd->spt_flags |= DISM_PPA_CHANGED;
2715 
2716 		sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2717 		if (sts != 0) {
2718 			mutex_exit(&sptd->spt_lock);
2719 			kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2720 			return (sts);
2721 		}
2722 
2723 		mutex_enter(&sp->shm_mlock);
2724 		/* enforce locked memory rctl */
2725 		unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2726 
2727 		mutex_enter(&p->p_lock);
2728 		if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2729 			mutex_exit(&p->p_lock);
2730 			sts = EAGAIN;
2731 		} else {
2732 			mutex_exit(&p->p_lock);
2733 			sts = spt_lockpages(seg, an_idx, npages,
2734 			    &ppa[ppa_idx], lockmap, pos, &locked);
2735 
2736 			/*
2737 			 * correct locked count if not all pages could be
2738 			 * locked
2739 			 */
2740 			if ((unlocked - locked) > 0) {
2741 				rctl_decr_locked_mem(NULL, proj,
2742 				    (unlocked - locked), 0);
2743 			}
2744 		}
2745 		/*
2746 		 * unlock pages
2747 		 */
2748 		for (i = 0; i < a_npages; i++)
2749 			page_unlock(ppa[i]);
2750 		if (sptd->spt_ppa != NULL)
2751 			sptd->spt_flags |= DISM_PPA_CHANGED;
2752 		mutex_exit(&sp->shm_mlock);
2753 		mutex_exit(&sptd->spt_lock);
2754 
2755 		kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2756 
2757 	} else if (op == MC_UNLOCK) { /* unlock */
2758 		page_t		**ppa;
2759 
2760 		mutex_enter(&sptd->spt_lock);
2761 		if (shmd->shm_lckpgs == 0) {
2762 			mutex_exit(&sptd->spt_lock);
2763 			return (0);
2764 		}
2765 		/*
2766 		 * Don't cache new IO pages.
2767 		 */
2768 		if (sptd->spt_ppa != NULL)
2769 			sptd->spt_flags |= DISM_PPA_CHANGED;
2770 
2771 		mutex_enter(&sp->shm_mlock);
2772 		sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2773 		if ((ppa = sptd->spt_ppa) != NULL)
2774 			sptd->spt_flags |= DISM_PPA_CHANGED;
2775 		mutex_exit(&sptd->spt_lock);
2776 
2777 		rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2778 		mutex_exit(&sp->shm_mlock);
2779 
2780 		if (ppa != NULL)
2781 			seg_ppurge_wiredpp(ppa);
2782 	}
2783 	return (sts);
2784 }
2785 
2786 /*ARGSUSED*/
2787 int
2788 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2789 {
2790 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
2791 	struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2792 	spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2793 
2794 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2795 
2796 	/*
2797 	 * ISM segment is always rw.
2798 	 */
2799 	while (--pgno >= 0)
2800 		*protv++ = sptd->spt_prot;
2801 	return (0);
2802 }
2803 
2804 /*ARGSUSED*/
2805 u_offset_t
2806 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2807 {
2808 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2809 
2810 	/* Offset does not matter in ISM memory */
2811 
2812 	return ((u_offset_t)0);
2813 }
2814 
2815 /* ARGSUSED */
2816 int
2817 segspt_shmgettype(struct seg *seg, caddr_t addr)
2818 {
2819 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
2820 	struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2821 
2822 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2823 
2824 	/*
2825 	 * The shared memory mapping is always MAP_SHARED, SWAP is only
2826 	 * reserved for DISM
2827 	 */
2828 	return (MAP_SHARED |
2829 	    ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2830 }
2831 
2832 /*ARGSUSED*/
2833 int
2834 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2835 {
2836 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
2837 	struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2838 
2839 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2840 
2841 	*vpp = sptd->spt_vp;
2842 	return (0);
2843 }
2844 
2845 /*
2846  * We need to wait for pending IO to complete to a DISM segment in order for
2847  * pages to get kicked out of the seg_pcache.  120 seconds should be more
2848  * than enough time to wait.
2849  */
2850 static clock_t spt_pcache_wait = 120;
2851 
2852 /*ARGSUSED*/
2853 static int
2854 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2855 {
2856 	struct shm_data	*shmd = (struct shm_data *)seg->s_data;
2857 	struct spt_data	*sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2858 	struct anon_map	*amp;
2859 	pgcnt_t pg_idx;
2860 	ushort_t gen;
2861 	clock_t	end_lbolt;
2862 	int writer;
2863 	page_t **ppa;
2864 
2865 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2866 
2867 	if (behav == MADV_FREE || behav == MADV_PURGE) {
2868 		if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2869 			return (0);
2870 
2871 		amp = sptd->spt_amp;
2872 		pg_idx = seg_page(seg, addr);
2873 
2874 		mutex_enter(&sptd->spt_lock);
2875 		if ((ppa = sptd->spt_ppa) == NULL) {
2876 			mutex_exit(&sptd->spt_lock);
2877 			ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2878 			(void) anon_disclaim(amp, pg_idx, len, behav, NULL);
2879 			ANON_LOCK_EXIT(&amp->a_rwlock);
2880 			return (0);
2881 		}
2882 
2883 		sptd->spt_flags |= DISM_PPA_CHANGED;
2884 		gen = sptd->spt_gen;
2885 
2886 		mutex_exit(&sptd->spt_lock);
2887 
2888 		/*
2889 		 * Purge all DISM cached pages
2890 		 */
2891 		seg_ppurge_wiredpp(ppa);
2892 
2893 		/*
2894 		 * Drop the AS_LOCK so that other threads can grab it
2895 		 * in the as_pageunlock path and hopefully get the segment
2896 		 * kicked out of the seg_pcache.  We bump the shm_softlockcnt
2897 		 * to keep this segment resident.
2898 		 */
2899 		writer = AS_WRITE_HELD(seg->s_as);
2900 		atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2901 		AS_LOCK_EXIT(seg->s_as);
2902 
2903 		mutex_enter(&sptd->spt_lock);
2904 
2905 		end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2906 
2907 		/*
2908 		 * Try to wait for pages to get kicked out of the seg_pcache.
2909 		 */
2910 		while (sptd->spt_gen == gen &&
2911 		    (sptd->spt_flags & DISM_PPA_CHANGED) &&
2912 		    ddi_get_lbolt() < end_lbolt) {
2913 			if (!cv_timedwait_sig(&sptd->spt_cv,
2914 			    &sptd->spt_lock, end_lbolt)) {
2915 				break;
2916 			}
2917 		}
2918 
2919 		mutex_exit(&sptd->spt_lock);
2920 
2921 		/* Regrab the AS_LOCK and release our hold on the segment */
2922 		AS_LOCK_ENTER(seg->s_as, writer ? RW_WRITER : RW_READER);
2923 		atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2924 		if (shmd->shm_softlockcnt <= 0) {
2925 			if (AS_ISUNMAPWAIT(seg->s_as)) {
2926 				mutex_enter(&seg->s_as->a_contents);
2927 				if (AS_ISUNMAPWAIT(seg->s_as)) {
2928 					AS_CLRUNMAPWAIT(seg->s_as);
2929 					cv_broadcast(&seg->s_as->a_cv);
2930 				}
2931 				mutex_exit(&seg->s_as->a_contents);
2932 			}
2933 		}
2934 
2935 		ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2936 		(void) anon_disclaim(amp, pg_idx, len, behav, NULL);
2937 		ANON_LOCK_EXIT(&amp->a_rwlock);
2938 	} else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2939 	    behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2940 		int			already_set;
2941 		ulong_t			anon_index;
2942 		lgrp_mem_policy_t	policy;
2943 		caddr_t			shm_addr;
2944 		size_t			share_size;
2945 		size_t			size;
2946 		struct seg		*sptseg = shmd->shm_sptseg;
2947 		caddr_t			sptseg_addr;
2948 
2949 		/*
2950 		 * Align address and length to page size of underlying segment
2951 		 */
2952 		share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2953 		shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2954 		size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2955 		    share_size);
2956 
2957 		amp = shmd->shm_amp;
2958 		anon_index = seg_page(seg, shm_addr);
2959 
2960 		/*
2961 		 * And now we may have to adjust size downward if we have
2962 		 * exceeded the realsize of the segment or initial anon
2963 		 * allocations.
2964 		 */
2965 		sptseg_addr = sptseg->s_base + ptob(anon_index);
2966 		if ((sptseg_addr + size) >
2967 		    (sptseg->s_base + sptd->spt_realsize))
2968 			size = (sptseg->s_base + sptd->spt_realsize) -
2969 			    sptseg_addr;
2970 
2971 		/*
2972 		 * Set memory allocation policy for this segment
2973 		 */
2974 		policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2975 		already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2976 		    NULL, 0, len);
2977 
2978 		/*
2979 		 * If random memory allocation policy set already,
2980 		 * don't bother reapplying it.
2981 		 */
2982 		if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2983 			return (0);
2984 
2985 		/*
2986 		 * Mark any existing pages in the given range for
2987 		 * migration, flushing the I/O page cache, and using
2988 		 * underlying segment to calculate anon index and get
2989 		 * anonmap and vnode pointer from
2990 		 */
2991 		if (shmd->shm_softlockcnt > 0)
2992 			segspt_purge(seg);
2993 
2994 		page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
2995 	}
2996 
2997 	return (0);
2998 }
2999 
3000 /*ARGSUSED*/
3001 void
3002 segspt_shmdump(struct seg *seg)
3003 {
3004 	/* no-op for ISM segment */
3005 }
3006 
3007 /*ARGSUSED*/
3008 static faultcode_t
3009 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
3010 {
3011 	return (ENOTSUP);
3012 }
3013 
3014 /*
3015  * get a memory ID for an addr in a given segment
3016  */
3017 static int
3018 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3019 {
3020 	struct shm_data *shmd = (struct shm_data *)seg->s_data;
3021 	struct anon 	*ap;
3022 	size_t		anon_index;
3023 	struct anon_map	*amp = shmd->shm_amp;
3024 	struct spt_data	*sptd = shmd->shm_sptseg->s_data;
3025 	struct seg	*sptseg = shmd->shm_sptseg;
3026 	anon_sync_obj_t	cookie;
3027 
3028 	anon_index = seg_page(seg, addr);
3029 
3030 	if (addr > (seg->s_base + sptd->spt_realsize)) {
3031 		return (EFAULT);
3032 	}
3033 
3034 	ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
3035 	anon_array_enter(amp, anon_index, &cookie);
3036 	ap = anon_get_ptr(amp->ahp, anon_index);
3037 	if (ap == NULL) {
3038 		struct page *pp;
3039 		caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
3040 
3041 		pp = anon_zero(sptseg, spt_addr, &ap, kcred);
3042 		if (pp == NULL) {
3043 			anon_array_exit(&cookie);
3044 			ANON_LOCK_EXIT(&amp->a_rwlock);
3045 			return (ENOMEM);
3046 		}
3047 		(void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3048 		page_unlock(pp);
3049 	}
3050 	anon_array_exit(&cookie);
3051 	ANON_LOCK_EXIT(&amp->a_rwlock);
3052 	memidp->val[0] = (uintptr_t)ap;
3053 	memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3054 	return (0);
3055 }
3056 
3057 /*
3058  * Get memory allocation policy info for specified address in given segment
3059  */
3060 static lgrp_mem_policy_info_t *
3061 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3062 {
3063 	struct anon_map		*amp;
3064 	ulong_t			anon_index;
3065 	lgrp_mem_policy_info_t	*policy_info;
3066 	struct shm_data		*shm_data;
3067 
3068 	ASSERT(seg != NULL);
3069 
3070 	/*
3071 	 * Get anon_map from segshm
3072 	 *
3073 	 * Assume that no lock needs to be held on anon_map, since
3074 	 * it should be protected by its reference count which must be
3075 	 * nonzero for an existing segment
3076 	 * Need to grab readers lock on policy tree though
3077 	 */
3078 	shm_data = (struct shm_data *)seg->s_data;
3079 	if (shm_data == NULL)
3080 		return (NULL);
3081 	amp = shm_data->shm_amp;
3082 	ASSERT(amp->refcnt != 0);
3083 
3084 	/*
3085 	 * Get policy info
3086 	 *
3087 	 * Assume starting anon index of 0
3088 	 */
3089 	anon_index = seg_page(seg, addr);
3090 	policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3091 
3092 	return (policy_info);
3093 }
3094 
3095 /*ARGSUSED*/
3096 static int
3097 segspt_shmcapable(struct seg *seg, segcapability_t capability)
3098 {
3099 	return (0);
3100 }
3101