xref: /titanic_50/usr/src/uts/common/vm/seg_kp.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 /*	All Rights Reserved   */
29 
30 /*
31  * Portions of this source code were derived from Berkeley 4.3 BSD
32  * under license from the Regents of the University of California.
33  */
34 
35 #pragma ident	"%Z%%M%	%I%	%E% SMI"
36 
37 /*
38  * segkp is a segment driver that administers the allocation and deallocation
39  * of pageable variable size chunks of kernel virtual address space. Each
40  * allocated resource is page-aligned.
41  *
42  * The user may specify whether the resource should be initialized to 0,
43  * include a redzone, or locked in memory.
44  */
45 
46 #include <sys/types.h>
47 #include <sys/t_lock.h>
48 #include <sys/thread.h>
49 #include <sys/param.h>
50 #include <sys/errno.h>
51 #include <sys/sysmacros.h>
52 #include <sys/systm.h>
53 #include <sys/buf.h>
54 #include <sys/mman.h>
55 #include <sys/vnode.h>
56 #include <sys/cmn_err.h>
57 #include <sys/swap.h>
58 #include <sys/tuneable.h>
59 #include <sys/kmem.h>
60 #include <sys/vmem.h>
61 #include <sys/cred.h>
62 #include <sys/dumphdr.h>
63 #include <sys/debug.h>
64 #include <sys/vtrace.h>
65 #include <sys/stack.h>
66 #include <sys/atomic.h>
67 #include <sys/archsystm.h>
68 #include <sys/lgrp.h>
69 
70 #include <vm/as.h>
71 #include <vm/seg.h>
72 #include <vm/seg_kp.h>
73 #include <vm/seg_kmem.h>
74 #include <vm/anon.h>
75 #include <vm/page.h>
76 #include <vm/hat.h>
77 #include <sys/bitmap.h>
78 
79 /*
80  * Private seg op routines
81  */
82 static void	segkp_badop(void);
83 static void	segkp_dump(struct seg *seg);
84 static int	segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
85 			uint_t prot);
86 static int	segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
87 static int	segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
88 			struct page ***page, enum lock_type type,
89 			enum seg_rw rw);
90 static void	segkp_insert(struct seg *seg, struct segkp_data *kpd);
91 static void	segkp_delete(struct seg *seg, struct segkp_data *kpd);
92 static caddr_t	segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
93 			struct segkp_data **tkpd, struct anon_map *amp);
94 static void	segkp_release_internal(struct seg *seg,
95 			struct segkp_data *kpd, size_t len);
96 static int	segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
97 			size_t len, struct segkp_data *kpd, uint_t flags);
98 static int	segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
99 			size_t len, struct segkp_data *kpd, uint_t flags);
100 static struct	segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
101 static int	segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
102 static lgrp_mem_policy_info_t	*segkp_getpolicy(struct seg *seg,
103     caddr_t addr);
104 
105 /*
106  * Lock used to protect the hash table(s) and caches.
107  */
108 static kmutex_t	segkp_lock;
109 
110 /*
111  * The segkp caches
112  */
113 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
114 
115 #define	SEGKP_BADOP(t)	(t(*)())segkp_badop
116 
117 /*
118  * When there are fewer than red_minavail bytes left on the stack,
119  * segkp_map_red() will map in the redzone (if called).  5000 seems
120  * to work reasonably well...
121  */
122 long		red_minavail = 5000;
123 
124 /*
125  * will be set to 1 for 32 bit x86 systems only, in startup.c
126  */
127 int	segkp_fromheap = 0;
128 ulong_t *segkp_bitmap;
129 
130 /*
131  * If segkp_map_red() is called with the redzone already mapped and
132  * with less than RED_DEEP_THRESHOLD bytes available on the stack,
133  * then the stack situation has become quite serious;  if much more stack
134  * is consumed, we have the potential of scrogging the next thread/LWP
135  * structure.  To help debug the "can't happen" panics which may
136  * result from this condition, we record lbolt and the calling thread
137  * in red_deep_lbolt and red_deep_thread respectively.
138  */
139 #define	RED_DEEP_THRESHOLD	2000
140 
141 clock_t		red_deep_lbolt;
142 kthread_t	*red_deep_thread;
143 
144 uint32_t	red_nmapped;
145 uint32_t	red_closest = UINT_MAX;
146 uint32_t	red_ndoubles;
147 
148 pgcnt_t anon_segkp_pages_locked;	/* See vm/anon.h */
149 
150 static struct	seg_ops segkp_ops = {
151 	SEGKP_BADOP(int),		/* dup */
152 	SEGKP_BADOP(int),		/* unmap */
153 	SEGKP_BADOP(void),		/* free */
154 	segkp_fault,
155 	SEGKP_BADOP(faultcode_t),	/* faulta */
156 	SEGKP_BADOP(int),		/* setprot */
157 	segkp_checkprot,
158 	segkp_kluster,
159 	SEGKP_BADOP(size_t),		/* swapout */
160 	SEGKP_BADOP(int),		/* sync */
161 	SEGKP_BADOP(size_t),		/* incore */
162 	SEGKP_BADOP(int),		/* lockop */
163 	SEGKP_BADOP(int),		/* getprot */
164 	SEGKP_BADOP(u_offset_t),		/* getoffset */
165 	SEGKP_BADOP(int),		/* gettype */
166 	SEGKP_BADOP(int),		/* getvp */
167 	SEGKP_BADOP(int),		/* advise */
168 	segkp_dump,			/* dump */
169 	segkp_pagelock,			/* pagelock */
170 	SEGKP_BADOP(int),		/* setpgsz */
171 	segkp_getmemid,			/* getmemid */
172 	segkp_getpolicy,		/* getpolicy */
173 };
174 
175 
176 static void
177 segkp_badop(void)
178 {
179 	panic("segkp_badop");
180 	/*NOTREACHED*/
181 }
182 
183 static void segkpinit_mem_config(struct seg *);
184 
185 static uint32_t segkp_indel;
186 
187 /*
188  * Allocate the segment specific private data struct and fill it in
189  * with the per kp segment mutex, anon ptr. array and hash table.
190  */
191 int
192 segkp_create(struct seg *seg)
193 {
194 	struct segkp_segdata *kpsd;
195 	size_t	np;
196 
197 	ASSERT(seg != NULL && seg->s_as == &kas);
198 	ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));
199 
200 	if (seg->s_size & PAGEOFFSET) {
201 		panic("Bad segkp size");
202 		/*NOTREACHED*/
203 	}
204 
205 	kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP);
206 
207 	/*
208 	 * Allocate the virtual memory for segkp and initialize it
209 	 */
210 	if (segkp_fromheap) {
211 		np = btop(kvseg.s_size);
212 		segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP);
213 		kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE,
214 		    vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP);
215 	} else {
216 		segkp_bitmap = NULL;
217 		np = btop(seg->s_size);
218 		kpsd->kpsd_arena = vmem_create("segkp", seg->s_base,
219 		    seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE,
220 		    VM_SLEEP);
221 	}
222 
223 	kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE);
224 
225 	kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *),
226 	    KM_SLEEP);
227 	seg->s_data = (void *)kpsd;
228 	seg->s_ops = &segkp_ops;
229 	segkpinit_mem_config(seg);
230 	return (0);
231 }
232 
233 
234 /*
235  * Find a free 'freelist' and initialize it with the appropriate attributes
236  */
237 void *
238 segkp_cache_init(struct seg *seg, int maxsize, size_t len, uint_t flags)
239 {
240 	int i;
241 
242 	if ((flags & KPD_NO_ANON) && !(flags & KPD_LOCKED))
243 		return ((void *)-1);
244 
245 	mutex_enter(&segkp_lock);
246 	for (i = 0; i < SEGKP_MAX_CACHE; i++) {
247 		if (segkp_cache[i].kpf_inuse)
248 			continue;
249 		segkp_cache[i].kpf_inuse = 1;
250 		segkp_cache[i].kpf_max = maxsize;
251 		segkp_cache[i].kpf_flags = flags;
252 		segkp_cache[i].kpf_seg = seg;
253 		segkp_cache[i].kpf_len = len;
254 		mutex_exit(&segkp_lock);
255 		return ((void *)(uintptr_t)i);
256 	}
257 	mutex_exit(&segkp_lock);
258 	return ((void *)-1);
259 }
260 
261 /*
262  * Free all the cache resources.
263  */
264 void
265 segkp_cache_free(void)
266 {
267 	struct segkp_data *kpd;
268 	struct seg *seg;
269 	int i;
270 
271 	mutex_enter(&segkp_lock);
272 	for (i = 0; i < SEGKP_MAX_CACHE; i++) {
273 		if (!segkp_cache[i].kpf_inuse)
274 			continue;
275 		/*
276 		 * Disconnect the freelist and process each element
277 		 */
278 		kpd = segkp_cache[i].kpf_list;
279 		seg = segkp_cache[i].kpf_seg;
280 		segkp_cache[i].kpf_list = NULL;
281 		segkp_cache[i].kpf_count = 0;
282 		mutex_exit(&segkp_lock);
283 
284 		while (kpd != NULL) {
285 			struct segkp_data *next;
286 
287 			next = kpd->kp_next;
288 			segkp_release_internal(seg, kpd, kpd->kp_len);
289 			kpd = next;
290 		}
291 		mutex_enter(&segkp_lock);
292 	}
293 	mutex_exit(&segkp_lock);
294 }
295 
296 /*
297  * There are 2 entries into segkp_get_internal. The first includes a cookie
298  * used to access a pool of cached segkp resources. The second does not
299  * use the cache.
300  */
301 caddr_t
302 segkp_get(struct seg *seg, size_t len, uint_t flags)
303 {
304 	struct segkp_data *kpd = NULL;
305 
306 	if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
307 		kpd->kp_cookie = -1;
308 		return (stom(kpd->kp_base, flags));
309 	}
310 	return (NULL);
311 }
312 
313 /*
314  * Return a 'cached' segkp address
315  */
316 caddr_t
317 segkp_cache_get(void *cookie)
318 {
319 	struct segkp_cache *freelist = NULL;
320 	struct segkp_data *kpd = NULL;
321 	int index = (int)(uintptr_t)cookie;
322 	struct seg *seg;
323 	size_t len;
324 	uint_t flags;
325 
326 	if (index < 0 || index >= SEGKP_MAX_CACHE)
327 		return (NULL);
328 	freelist = &segkp_cache[index];
329 
330 	mutex_enter(&segkp_lock);
331 	seg = freelist->kpf_seg;
332 	flags = freelist->kpf_flags;
333 	if (freelist->kpf_list != NULL) {
334 		kpd = freelist->kpf_list;
335 		freelist->kpf_list = kpd->kp_next;
336 		freelist->kpf_count--;
337 		mutex_exit(&segkp_lock);
338 		kpd->kp_next = NULL;
339 		segkp_insert(seg, kpd);
340 		return (stom(kpd->kp_base, flags));
341 	}
342 	len = freelist->kpf_len;
343 	mutex_exit(&segkp_lock);
344 	if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
345 		kpd->kp_cookie = index;
346 		return (stom(kpd->kp_base, flags));
347 	}
348 	return (NULL);
349 }
350 
351 caddr_t
352 segkp_get_withanonmap(
353 	struct seg *seg,
354 	size_t len,
355 	uint_t flags,
356 	struct anon_map *amp)
357 {
358 	struct segkp_data *kpd = NULL;
359 
360 	ASSERT(amp != NULL);
361 	flags |= KPD_HASAMP;
362 	if (segkp_get_internal(seg, len, flags, &kpd, amp) != NULL) {
363 		kpd->kp_cookie = -1;
364 		return (stom(kpd->kp_base, flags));
365 	}
366 	return (NULL);
367 }
368 
369 /*
370  * This does the real work of segkp allocation.
371  * Return to client base addr. len must be page-aligned. A null value is
372  * returned if there are no more vm resources (e.g. pages, swap). The len
373  * and base recorded in the private data structure include the redzone
374  * and the redzone length (if applicable). If the user requests a redzone
375  * either the first or last page is left unmapped depending whether stacks
376  * grow to low or high memory.
377  *
378  * The client may also specify a no-wait flag. If that is set then the
379  * request will choose a non-blocking path when requesting resources.
380  * The default is make the client wait.
381  */
382 static caddr_t
383 segkp_get_internal(
384 	struct seg *seg,
385 	size_t len,
386 	uint_t flags,
387 	struct segkp_data **tkpd,
388 	struct anon_map *amp)
389 {
390 	struct segkp_segdata	*kpsd = (struct segkp_segdata *)seg->s_data;
391 	struct segkp_data	*kpd;
392 	caddr_t vbase = NULL;	/* always first virtual, may not be mapped */
393 	pgcnt_t np = 0;		/* number of pages in the resource */
394 	pgcnt_t segkpindex;
395 	long i;
396 	caddr_t va;
397 	pgcnt_t pages = 0;
398 	ulong_t anon_idx = 0;
399 	int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
400 	caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base;
401 
402 	if (len & PAGEOFFSET) {
403 		panic("segkp_get: len is not page-aligned");
404 		/*NOTREACHED*/
405 	}
406 
407 	ASSERT(((flags & KPD_HASAMP) == 0) == (amp == NULL));
408 
409 	/* Only allow KPD_NO_ANON if we are going to lock it down */
410 	if ((flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON)
411 		return (NULL);
412 
413 	if ((kpd = kmem_zalloc(sizeof (struct segkp_data), kmflag)) == NULL)
414 		return (NULL);
415 	/*
416 	 * Fix up the len to reflect the REDZONE if applicable
417 	 */
418 	if (flags & KPD_HASREDZONE)
419 		len += PAGESIZE;
420 	np = btop(len);
421 
422 	vbase = vmem_alloc(SEGKP_VMEM(seg), len, kmflag | VM_BESTFIT);
423 	if (vbase == NULL) {
424 		kmem_free(kpd, sizeof (struct segkp_data));
425 		return (NULL);
426 	}
427 
428 	/* If locking, reserve physical memory */
429 	if (flags & KPD_LOCKED) {
430 		pages = btop(SEGKP_MAPLEN(len, flags));
431 		if (page_resv(pages, kmflag) == 0) {
432 			vmem_free(SEGKP_VMEM(seg), vbase, len);
433 			kmem_free(kpd, sizeof (struct segkp_data));
434 			return (NULL);
435 		}
436 		if ((flags & KPD_NO_ANON) == 0)
437 			atomic_add_long(&anon_segkp_pages_locked, pages);
438 	}
439 
440 	/*
441 	 * Reserve sufficient swap space for this vm resource.  We'll
442 	 * actually allocate it in the loop below, but reserving it
443 	 * here allows us to back out more gracefully than if we
444 	 * had an allocation failure in the body of the loop.
445 	 *
446 	 * Note that we don't need swap space for the red zone page.
447 	 */
448 	if (amp != NULL) {
449 		ASSERT((flags & KPD_NO_ANON) == 0);
450 		/* The reserve has been done and the anon_hdr is separate. */
451 		anon_idx = 0;
452 		kpd->kp_anon_idx = anon_idx;
453 		kpd->kp_anon = amp->ahp;
454 
455 		TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
456 		    kpd, vbase, len, flags, 1);
457 
458 	} else if ((flags & KPD_NO_ANON) == 0) {
459 		if (anon_resv(SEGKP_MAPLEN(len, flags)) == 0) {
460 			if (flags & KPD_LOCKED) {
461 				atomic_add_long(&anon_segkp_pages_locked,
462 				    -pages);
463 				page_unresv(pages);
464 			}
465 			vmem_free(SEGKP_VMEM(seg), vbase, len);
466 			kmem_free(kpd, sizeof (struct segkp_data));
467 			return (NULL);
468 		}
469 		anon_idx = ((uintptr_t)(vbase - s_base)) >> PAGESHIFT;
470 		kpd->kp_anon_idx = anon_idx;
471 		kpd->kp_anon = kpsd->kpsd_anon;
472 
473 		TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
474 		    kpd, vbase, len, flags, 1);
475 	} else {
476 		kpd->kp_anon = NULL;
477 		kpd->kp_anon_idx = 0;
478 	}
479 
480 	/*
481 	 * Allocate page and anon resources for the virtual address range
482 	 * except the redzone
483 	 */
484 	if (segkp_fromheap)
485 		segkpindex = btop((uintptr_t)(vbase - kvseg.s_base));
486 	for (i = 0, va = vbase; i < np; i++, va += PAGESIZE) {
487 		page_t		*pl[2];
488 		struct vnode	*vp;
489 		anoff_t		off;
490 		int		err;
491 		page_t		*pp = NULL;
492 
493 		/*
494 		 * Mark this page to be a segkp page in the bitmap.
495 		 */
496 		if (segkp_fromheap) {
497 			BT_ATOMIC_SET(segkp_bitmap, segkpindex);
498 			segkpindex++;
499 		}
500 
501 		/*
502 		 * If this page is the red zone page, we don't need swap
503 		 * space for it.  Note that we skip over the code that
504 		 * establishes MMU mappings, so that the page remains
505 		 * invalid.
506 		 */
507 		if ((flags & KPD_HASREDZONE) && KPD_REDZONE(kpd) == i)
508 			continue;
509 
510 		if (kpd->kp_anon != NULL) {
511 			struct anon *ap;
512 
513 			ASSERT(anon_get_ptr(kpd->kp_anon, anon_idx + i)
514 			    == NULL);
515 			/*
516 			 * Determine the "vp" and "off" of the anon slot.
517 			 */
518 			ap = anon_alloc(NULL, 0);
519 			if (amp != NULL)
520 				ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
521 			(void) anon_set_ptr(kpd->kp_anon, anon_idx + i,
522 			    ap, ANON_SLEEP);
523 			if (amp != NULL)
524 				ANON_LOCK_EXIT(&amp->a_rwlock);
525 			swap_xlate(ap, &vp, &off);
526 
527 			/*
528 			 * Create a page with the specified identity.  The
529 			 * page is returned with the "shared" lock held.
530 			 */
531 			err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
532 			    NULL, pl, PAGESIZE, seg, va, S_CREATE,
533 			    kcred);
534 			if (err) {
535 				/*
536 				 * XXX - This should not fail.
537 				 */
538 				panic("segkp_get: no pages");
539 				/*NOTREACHED*/
540 			}
541 			pp = pl[0];
542 		} else {
543 			ASSERT(page_exists(&kvp,
544 			    (u_offset_t)(uintptr_t)va) == NULL);
545 
546 			if ((pp = page_create_va(&kvp,
547 			    (u_offset_t)(uintptr_t)va, PAGESIZE,
548 			    (flags & KPD_NOWAIT ? 0 : PG_WAIT) | PG_EXCL |
549 			    PG_NORELOC, seg, va)) == NULL) {
550 				/*
551 				 * Legitimize resource; then destroy it.
552 				 * Easier than trying to unwind here.
553 				 */
554 				kpd->kp_flags = flags;
555 				kpd->kp_base = vbase;
556 				kpd->kp_len = len;
557 				segkp_release_internal(seg, kpd, va - vbase);
558 				return (NULL);
559 			}
560 			page_io_unlock(pp);
561 		}
562 
563 		if (flags & KPD_ZERO)
564 			pagezero(pp, 0, PAGESIZE);
565 
566 		/*
567 		 * Load and lock an MMU translation for the page.
568 		 */
569 		hat_memload(seg->s_as->a_hat, va, pp, (PROT_READ|PROT_WRITE),
570 		    ((flags & KPD_LOCKED) ? HAT_LOAD_LOCK : HAT_LOAD));
571 
572 		/*
573 		 * Now, release lock on the page.
574 		 */
575 		if (flags & KPD_LOCKED)
576 			page_downgrade(pp);
577 		else
578 			page_unlock(pp);
579 	}
580 
581 	kpd->kp_flags = flags;
582 	kpd->kp_base = vbase;
583 	kpd->kp_len = len;
584 	segkp_insert(seg, kpd);
585 	*tkpd = kpd;
586 	return (stom(kpd->kp_base, flags));
587 }
588 
589 /*
590  * Release the resource to cache if the pool(designate by the cookie)
591  * has less than the maximum allowable. If inserted in cache,
592  * segkp_delete insures element is taken off of active list.
593  */
594 void
595 segkp_release(struct seg *seg, caddr_t vaddr)
596 {
597 	struct segkp_cache *freelist;
598 	struct segkp_data *kpd = NULL;
599 
600 	if ((kpd = segkp_find(seg, vaddr)) == NULL) {
601 		panic("segkp_release: null kpd");
602 		/*NOTREACHED*/
603 	}
604 
605 	if (kpd->kp_cookie != -1) {
606 		freelist = &segkp_cache[kpd->kp_cookie];
607 		mutex_enter(&segkp_lock);
608 		if (!segkp_indel && freelist->kpf_count < freelist->kpf_max) {
609 			segkp_delete(seg, kpd);
610 			kpd->kp_next = freelist->kpf_list;
611 			freelist->kpf_list = kpd;
612 			freelist->kpf_count++;
613 			mutex_exit(&segkp_lock);
614 			return;
615 		} else {
616 			mutex_exit(&segkp_lock);
617 			kpd->kp_cookie = -1;
618 		}
619 	}
620 	segkp_release_internal(seg, kpd, kpd->kp_len);
621 }
622 
623 /*
624  * Free the entire resource. segkp_unlock gets called with the start of the
625  * mapped portion of the resource. The length is the size of the mapped
626  * portion
627  */
628 static void
629 segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len)
630 {
631 	caddr_t		va;
632 	long		i;
633 	long		redzone;
634 	size_t		np;
635 	page_t		*pp;
636 	struct vnode 	*vp;
637 	anoff_t		off;
638 	struct anon	*ap;
639 	pgcnt_t		segkpindex;
640 
641 	ASSERT(kpd != NULL);
642 	ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1);
643 	np = btop(len);
644 
645 	/* Remove from active hash list */
646 	if (kpd->kp_cookie == -1) {
647 		mutex_enter(&segkp_lock);
648 		segkp_delete(seg, kpd);
649 		mutex_exit(&segkp_lock);
650 	}
651 
652 	/*
653 	 * Precompute redzone page index.
654 	 */
655 	redzone = -1;
656 	if (kpd->kp_flags & KPD_HASREDZONE)
657 		redzone = KPD_REDZONE(kpd);
658 
659 
660 	va = kpd->kp_base;
661 
662 	hat_unload(seg->s_as->a_hat, va, (np << PAGESHIFT),
663 	    ((kpd->kp_flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
664 	/*
665 	 * Free up those anon resources that are quiescent.
666 	 */
667 	if (segkp_fromheap)
668 		segkpindex = btop((uintptr_t)(va - kvseg.s_base));
669 	for (i = 0; i < np; i++, va += PAGESIZE) {
670 
671 		/*
672 		 * Clear the bit for this page from the bitmap.
673 		 */
674 		if (segkp_fromheap) {
675 			BT_ATOMIC_CLEAR(segkp_bitmap, segkpindex);
676 			segkpindex++;
677 		}
678 
679 		if (i == redzone)
680 			continue;
681 		if (kpd->kp_anon) {
682 			/*
683 			 * Free up anon resources and destroy the
684 			 * associated pages.
685 			 *
686 			 * Release the lock if there is one. Have to get the
687 			 * page to do this, unfortunately.
688 			 */
689 			if (kpd->kp_flags & KPD_LOCKED) {
690 				ap = anon_get_ptr(kpd->kp_anon,
691 				    kpd->kp_anon_idx + i);
692 				swap_xlate(ap, &vp, &off);
693 				/* Find the shared-locked page. */
694 				pp = page_find(vp, (u_offset_t)off);
695 				if (pp == NULL) {
696 					panic("segkp_release: "
697 					    "kp_anon: no page to unlock ");
698 					/*NOTREACHED*/
699 				}
700 				page_unlock(pp);
701 			}
702 			if ((kpd->kp_flags & KPD_HASAMP) == 0) {
703 				anon_free(kpd->kp_anon, kpd->kp_anon_idx + i,
704 				    PAGESIZE);
705 				anon_unresv(PAGESIZE);
706 			}
707 			TRACE_5(TR_FAC_VM,
708 			    TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
709 			    kpd, va, PAGESIZE, 0, 0);
710 		} else {
711 			if (kpd->kp_flags & KPD_LOCKED) {
712 				pp = page_find(&kvp, (u_offset_t)(uintptr_t)va);
713 				if (pp == NULL) {
714 					panic("segkp_release: "
715 					    "no page to unlock");
716 					/*NOTREACHED*/
717 				}
718 				/*
719 				 * We should just upgrade the lock here
720 				 * but there is no upgrade that waits.
721 				 */
722 				page_unlock(pp);
723 			}
724 			pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)va,
725 			    SE_EXCL);
726 			if (pp != NULL)
727 				page_destroy(pp, 0);
728 		}
729 	}
730 
731 	/* If locked, release physical memory reservation */
732 	if (kpd->kp_flags & KPD_LOCKED) {
733 		pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
734 		if ((kpd->kp_flags & KPD_NO_ANON) == 0)
735 			atomic_add_long(&anon_segkp_pages_locked, -pages);
736 		page_unresv(pages);
737 	}
738 
739 	vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len);
740 	kmem_free(kpd, sizeof (struct segkp_data));
741 }
742 
743 /*
744  * segkp_map_red() will check the current frame pointer against the
745  * stack base.  If the amount of stack remaining is questionable
746  * (less than red_minavail), then segkp_map_red() will map in the redzone
747  * and return 1.  Otherwise, it will return 0.  segkp_map_red() can
748  * _only_ be called when:
749  *
750  *   - it is safe to sleep on page_create_va().
751  *   - the caller is non-swappable.
752  *
753  * It is up to the caller to remember whether segkp_map_red() successfully
754  * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
755  * time.  Note that the caller must _remain_ non-swappable until after
756  * calling segkp_unmap_red().
757  *
758  * Currently, this routine is only called from pagefault() (which necessarily
759  * satisfies the above conditions).
760  */
761 #if defined(STACK_GROWTH_DOWN)
762 int
763 segkp_map_red(void)
764 {
765 	uintptr_t fp = STACK_BIAS + (uintptr_t)getfp();
766 #ifndef _LP64
767 	caddr_t stkbase;
768 #endif
769 
770 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
771 
772 	/*
773 	 * Optimize for the common case where we simply return.
774 	 */
775 	if ((curthread->t_red_pp == NULL) &&
776 	    (fp - (uintptr_t)curthread->t_stkbase >= red_minavail))
777 		return (0);
778 
779 #if defined(_LP64)
780 	/*
781 	 * XXX	We probably need something better than this.
782 	 */
783 	panic("kernel stack overflow");
784 	/*NOTREACHED*/
785 #else /* _LP64 */
786 	if (curthread->t_red_pp == NULL) {
787 		page_t *red_pp;
788 		struct seg kseg;
789 
790 		caddr_t red_va = (caddr_t)
791 		    (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) -
792 		    PAGESIZE);
793 
794 		ASSERT(page_exists(&kvp, (u_offset_t)(uintptr_t)red_va) ==
795 		    NULL);
796 
797 		/*
798 		 * Allocate the physical for the red page.
799 		 */
800 		/*
801 		 * No PG_NORELOC here to avoid waits. Unlikely to get
802 		 * a relocate happening in the short time the page exists
803 		 * and it will be OK anyway.
804 		 */
805 
806 		kseg.s_as = &kas;
807 		red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va,
808 		    PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va);
809 		ASSERT(red_pp != NULL);
810 
811 		/*
812 		 * So we now have a page to jam into the redzone...
813 		 */
814 		page_io_unlock(red_pp);
815 
816 		hat_memload(kas.a_hat, red_va, red_pp,
817 		    (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK);
818 		page_downgrade(red_pp);
819 
820 		/*
821 		 * The page is left SE_SHARED locked so we can hold on to
822 		 * the page_t pointer.
823 		 */
824 		curthread->t_red_pp = red_pp;
825 
826 		atomic_add_32(&red_nmapped, 1);
827 		while (fp - (uintptr_t)curthread->t_stkbase < red_closest) {
828 			(void) cas32(&red_closest, red_closest,
829 			    (uint32_t)(fp - (uintptr_t)curthread->t_stkbase));
830 		}
831 		return (1);
832 	}
833 
834 	stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase &
835 	    (uintptr_t)PAGEMASK) - PAGESIZE);
836 
837 	atomic_add_32(&red_ndoubles, 1);
838 
839 	if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) {
840 		/*
841 		 * Oh boy.  We're already deep within the mapped-in
842 		 * redzone page, and the caller is trying to prepare
843 		 * for a deep stack run.  We're running without a
844 		 * redzone right now:  if the caller plows off the
845 		 * end of the stack, it'll plow another thread or
846 		 * LWP structure.  That situation could result in
847 		 * a very hard-to-debug panic, so, in the spirit of
848 		 * recording the name of one's killer in one's own
849 		 * blood, we're going to record lbolt and the calling
850 		 * thread.
851 		 */
852 		red_deep_lbolt = lbolt;
853 		red_deep_thread = curthread;
854 	}
855 
856 	/*
857 	 * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
858 	 */
859 	ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD);
860 	return (0);
861 #endif /* _LP64 */
862 }
863 
864 void
865 segkp_unmap_red(void)
866 {
867 	page_t *pp;
868 	caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase &
869 	    (uintptr_t)PAGEMASK) - PAGESIZE);
870 
871 	ASSERT(curthread->t_red_pp != NULL);
872 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
873 
874 	/*
875 	 * Because we locked the mapping down, we can't simply rely
876 	 * on page_destroy() to clean everything up;  we need to call
877 	 * hat_unload() to explicitly unlock the mapping resources.
878 	 */
879 	hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK);
880 
881 	pp = curthread->t_red_pp;
882 
883 	ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va));
884 
885 	/*
886 	 * Need to upgrade the SE_SHARED lock to SE_EXCL.
887 	 */
888 	if (!page_tryupgrade(pp)) {
889 		/*
890 		 * As there is now wait for upgrade, release the
891 		 * SE_SHARED lock and wait for SE_EXCL.
892 		 */
893 		page_unlock(pp);
894 		pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)red_va, SE_EXCL);
895 		/* pp may be NULL here, hence the test below */
896 	}
897 
898 	/*
899 	 * Destroy the page, with dontfree set to zero (i.e. free it).
900 	 */
901 	if (pp != NULL)
902 		page_destroy(pp, 0);
903 	curthread->t_red_pp = NULL;
904 }
905 #else
906 #error Red stacks only supported with downwards stack growth.
907 #endif
908 
909 /*
910  * Handle a fault on an address corresponding to one of the
911  * resources in the segkp segment.
912  */
913 faultcode_t
914 segkp_fault(
915 	struct hat	*hat,
916 	struct seg	*seg,
917 	caddr_t		vaddr,
918 	size_t		len,
919 	enum fault_type	type,
920 	enum seg_rw rw)
921 {
922 	struct segkp_data	*kpd = NULL;
923 	int			err;
924 
925 	ASSERT(seg->s_as == &kas && RW_READ_HELD(&seg->s_as->a_lock));
926 
927 	/*
928 	 * Sanity checks.
929 	 */
930 	if (type == F_PROT) {
931 		panic("segkp_fault: unexpected F_PROT fault");
932 		/*NOTREACHED*/
933 	}
934 
935 	if ((kpd = segkp_find(seg, vaddr)) == NULL)
936 		return (FC_NOMAP);
937 
938 	mutex_enter(&kpd->kp_lock);
939 
940 	if (type == F_SOFTLOCK) {
941 		ASSERT(!(kpd->kp_flags & KPD_LOCKED));
942 		/*
943 		 * The F_SOFTLOCK case has more stringent
944 		 * range requirements: the given range must exactly coincide
945 		 * with the resource's mapped portion. Note reference to
946 		 * redzone is handled since vaddr would not equal base
947 		 */
948 		if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
949 		    len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
950 			mutex_exit(&kpd->kp_lock);
951 			return (FC_MAKE_ERR(EFAULT));
952 		}
953 
954 		if ((err = segkp_load(hat, seg, vaddr, len, kpd, KPD_LOCKED))) {
955 			mutex_exit(&kpd->kp_lock);
956 			return (FC_MAKE_ERR(err));
957 		}
958 		kpd->kp_flags |= KPD_LOCKED;
959 		mutex_exit(&kpd->kp_lock);
960 		return (0);
961 	}
962 
963 	if (type == F_INVAL) {
964 		ASSERT(!(kpd->kp_flags & KPD_NO_ANON));
965 
966 		/*
967 		 * Check if we touched the redzone. Somewhat optimistic
968 		 * here if we are touching the redzone of our own stack
969 		 * since we wouldn't have a stack to get this far...
970 		 */
971 		if ((kpd->kp_flags & KPD_HASREDZONE) &&
972 		    btop((uintptr_t)(vaddr - kpd->kp_base)) == KPD_REDZONE(kpd))
973 			panic("segkp_fault: accessing redzone");
974 
975 		/*
976 		 * This fault may occur while the page is being F_SOFTLOCK'ed.
977 		 * Return since a 2nd segkp_load is unnecessary and also would
978 		 * result in the page being locked twice and eventually
979 		 * hang the thread_reaper thread.
980 		 */
981 		if (kpd->kp_flags & KPD_LOCKED) {
982 			mutex_exit(&kpd->kp_lock);
983 			return (0);
984 		}
985 
986 		err = segkp_load(hat, seg, vaddr, len, kpd, kpd->kp_flags);
987 		mutex_exit(&kpd->kp_lock);
988 		return (err ? FC_MAKE_ERR(err) : 0);
989 	}
990 
991 	if (type == F_SOFTUNLOCK) {
992 		uint_t	flags;
993 
994 		/*
995 		 * Make sure the addr is LOCKED and it has anon backing
996 		 * before unlocking
997 		 */
998 		if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON) {
999 			panic("segkp_fault: bad unlock");
1000 			/*NOTREACHED*/
1001 		}
1002 
1003 		if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
1004 		    len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
1005 			panic("segkp_fault: bad range");
1006 			/*NOTREACHED*/
1007 		}
1008 
1009 		if (rw == S_WRITE)
1010 			flags = kpd->kp_flags | KPD_WRITEDIRTY;
1011 		else
1012 			flags = kpd->kp_flags;
1013 		err = segkp_unlock(hat, seg, vaddr, len, kpd, flags);
1014 		kpd->kp_flags &= ~KPD_LOCKED;
1015 		mutex_exit(&kpd->kp_lock);
1016 		return (err ? FC_MAKE_ERR(err) : 0);
1017 	}
1018 	mutex_exit(&kpd->kp_lock);
1019 	panic("segkp_fault: bogus fault type: %d\n", type);
1020 	/*NOTREACHED*/
1021 }
1022 
1023 /*
1024  * Check that the given protections suffice over the range specified by
1025  * vaddr and len.  For this segment type, the only issue is whether or
1026  * not the range lies completely within the mapped part of an allocated
1027  * resource.
1028  */
1029 /* ARGSUSED */
1030 static int
1031 segkp_checkprot(struct seg *seg, caddr_t vaddr, size_t len, uint_t prot)
1032 {
1033 	struct segkp_data *kpd = NULL;
1034 	caddr_t mbase;
1035 	size_t mlen;
1036 
1037 	if ((kpd = segkp_find(seg, vaddr)) == NULL)
1038 		return (EACCES);
1039 
1040 	mutex_enter(&kpd->kp_lock);
1041 	mbase = stom(kpd->kp_base, kpd->kp_flags);
1042 	mlen = SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags);
1043 	if (len > mlen || vaddr < mbase ||
1044 	    ((vaddr + len) > (mbase + mlen))) {
1045 		mutex_exit(&kpd->kp_lock);
1046 		return (EACCES);
1047 	}
1048 	mutex_exit(&kpd->kp_lock);
1049 	return (0);
1050 }
1051 
1052 
1053 /*
1054  * Check to see if it makes sense to do kluster/read ahead to
1055  * addr + delta relative to the mapping at addr.  We assume here
1056  * that delta is a signed PAGESIZE'd multiple (which can be negative).
1057  *
1058  * For seg_u we always "approve" of this action from our standpoint.
1059  */
1060 /*ARGSUSED*/
1061 static int
1062 segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
1063 {
1064 	return (0);
1065 }
1066 
1067 /*
1068  * Load and possibly lock intra-slot resources in the range given by
1069  * vaddr and len.
1070  */
1071 static int
1072 segkp_load(
1073 	struct hat *hat,
1074 	struct seg *seg,
1075 	caddr_t vaddr,
1076 	size_t len,
1077 	struct segkp_data *kpd,
1078 	uint_t flags)
1079 {
1080 	caddr_t va;
1081 	caddr_t vlim;
1082 	ulong_t i;
1083 	uint_t lock;
1084 
1085 	ASSERT(MUTEX_HELD(&kpd->kp_lock));
1086 
1087 	len = P2ROUNDUP(len, PAGESIZE);
1088 
1089 	/* If locking, reserve physical memory */
1090 	if (flags & KPD_LOCKED) {
1091 		pgcnt_t pages = btop(len);
1092 		if ((kpd->kp_flags & KPD_NO_ANON) == 0)
1093 			atomic_add_long(&anon_segkp_pages_locked, pages);
1094 		(void) page_resv(pages, KM_SLEEP);
1095 	}
1096 
1097 	/*
1098 	 * Loop through the pages in the given range.
1099 	 */
1100 	va = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK);
1101 	vaddr = va;
1102 	vlim = va + len;
1103 	lock = flags & KPD_LOCKED;
1104 	i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
1105 	for (; va < vlim; va += PAGESIZE, i++) {
1106 		page_t		*pl[2];	/* second element NULL terminator */
1107 		struct vnode    *vp;
1108 		anoff_t		off;
1109 		int		err;
1110 		struct anon	*ap;
1111 
1112 		/*
1113 		 * Summon the page.  If it's not resident, arrange
1114 		 * for synchronous i/o to pull it in.
1115 		 */
1116 		ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
1117 		swap_xlate(ap, &vp, &off);
1118 
1119 		/*
1120 		 * The returned page list will have exactly one entry,
1121 		 * which is returned to us already kept.
1122 		 */
1123 		err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, NULL,
1124 		    pl, PAGESIZE, seg, va, S_READ, kcred);
1125 
1126 		if (err) {
1127 			/*
1128 			 * Back out of what we've done so far.
1129 			 */
1130 			(void) segkp_unlock(hat, seg, vaddr,
1131 			    (va - vaddr), kpd, flags);
1132 			return (err);
1133 		}
1134 
1135 		/*
1136 		 * Load an MMU translation for the page.
1137 		 */
1138 		hat_memload(hat, va, pl[0], (PROT_READ|PROT_WRITE),
1139 		    lock ? HAT_LOAD_LOCK : HAT_LOAD);
1140 
1141 		if (!lock) {
1142 			/*
1143 			 * Now, release "shared" lock on the page.
1144 			 */
1145 			page_unlock(pl[0]);
1146 		}
1147 	}
1148 	return (0);
1149 }
1150 
1151 /*
1152  * At the very least unload the mmu-translations and unlock the range if locked
1153  * Can be called with the following flag value KPD_WRITEDIRTY which specifies
1154  * any dirty pages should be written to disk.
1155  */
1156 static int
1157 segkp_unlock(
1158 	struct hat *hat,
1159 	struct seg *seg,
1160 	caddr_t vaddr,
1161 	size_t len,
1162 	struct segkp_data *kpd,
1163 	uint_t flags)
1164 {
1165 	caddr_t va;
1166 	caddr_t vlim;
1167 	ulong_t i;
1168 	struct page *pp;
1169 	struct vnode *vp;
1170 	anoff_t off;
1171 	struct anon *ap;
1172 
1173 #ifdef lint
1174 	seg = seg;
1175 #endif /* lint */
1176 
1177 	ASSERT(MUTEX_HELD(&kpd->kp_lock));
1178 
1179 	/*
1180 	 * Loop through the pages in the given range. It is assumed
1181 	 * segkp_unlock is called with page aligned base
1182 	 */
1183 	va = vaddr;
1184 	vlim = va + len;
1185 	i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
1186 	hat_unload(hat, va, len,
1187 	    ((flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
1188 	for (; va < vlim; va += PAGESIZE, i++) {
1189 		/*
1190 		 * Find the page associated with this part of the
1191 		 * slot, tracking it down through its associated swap
1192 		 * space.
1193 		 */
1194 		ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
1195 		swap_xlate(ap, &vp, &off);
1196 
1197 		if (flags & KPD_LOCKED) {
1198 			if ((pp = page_find(vp, off)) == NULL) {
1199 				if (flags & KPD_LOCKED) {
1200 					panic("segkp_softunlock: missing page");
1201 					/*NOTREACHED*/
1202 				}
1203 			}
1204 		} else {
1205 			/*
1206 			 * Nothing to do if the slot is not locked and the
1207 			 * page doesn't exist.
1208 			 */
1209 			if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL)
1210 				continue;
1211 		}
1212 
1213 		/*
1214 		 * If the page doesn't have any translations, is
1215 		 * dirty and not being shared, then push it out
1216 		 * asynchronously and avoid waiting for the
1217 		 * pageout daemon to do it for us.
1218 		 *
1219 		 * XXX - Do we really need to get the "exclusive"
1220 		 * lock via an upgrade?
1221 		 */
1222 		if ((flags & KPD_WRITEDIRTY) && !hat_page_is_mapped(pp) &&
1223 		    hat_ismod(pp) && page_tryupgrade(pp)) {
1224 			/*
1225 			 * Hold the vnode before releasing the page lock to
1226 			 * prevent it from being freed and re-used by some
1227 			 * other thread.
1228 			 */
1229 			VN_HOLD(vp);
1230 			page_unlock(pp);
1231 
1232 			/*
1233 			 * Want most powerful credentials we can get so
1234 			 * use kcred.
1235 			 */
1236 			(void) VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
1237 			    B_ASYNC | B_FREE, kcred);
1238 			VN_RELE(vp);
1239 		} else {
1240 			page_unlock(pp);
1241 		}
1242 	}
1243 
1244 	/* If unlocking, release physical memory */
1245 	if (flags & KPD_LOCKED) {
1246 		pgcnt_t pages = btopr(len);
1247 		if ((kpd->kp_flags & KPD_NO_ANON) == 0)
1248 			atomic_add_long(&anon_segkp_pages_locked, -pages);
1249 		page_unresv(pages);
1250 	}
1251 	return (0);
1252 }
1253 
1254 /*
1255  * Insert the kpd in the hash table.
1256  */
1257 static void
1258 segkp_insert(struct seg *seg, struct segkp_data *kpd)
1259 {
1260 	struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1261 	int index;
1262 
1263 	/*
1264 	 * Insert the kpd based on the address that will be returned
1265 	 * via segkp_release.
1266 	 */
1267 	index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
1268 	mutex_enter(&segkp_lock);
1269 	kpd->kp_next = kpsd->kpsd_hash[index];
1270 	kpsd->kpsd_hash[index] = kpd;
1271 	mutex_exit(&segkp_lock);
1272 }
1273 
1274 /*
1275  * Remove kpd from the hash table.
1276  */
1277 static void
1278 segkp_delete(struct seg *seg, struct segkp_data *kpd)
1279 {
1280 	struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1281 	struct segkp_data **kpp;
1282 	int index;
1283 
1284 	ASSERT(MUTEX_HELD(&segkp_lock));
1285 
1286 	index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
1287 	for (kpp = &kpsd->kpsd_hash[index];
1288 	    *kpp != NULL; kpp = &((*kpp)->kp_next)) {
1289 		if (*kpp == kpd) {
1290 			*kpp = kpd->kp_next;
1291 			return;
1292 		}
1293 	}
1294 	panic("segkp_delete: unable to find element to delete");
1295 	/*NOTREACHED*/
1296 }
1297 
1298 /*
1299  * Find the kpd associated with a vaddr.
1300  *
1301  * Most of the callers of segkp_find will pass the vaddr that
1302  * hashes to the desired index, but there are cases where
1303  * this is not true in which case we have to (potentially) scan
1304  * the whole table looking for it. This should be very rare
1305  * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the
1306  * middle of the segkp_data region).
1307  */
1308 static struct segkp_data *
1309 segkp_find(struct seg *seg, caddr_t vaddr)
1310 {
1311 	struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1312 	struct segkp_data *kpd;
1313 	int	i;
1314 	int	stop;
1315 
1316 	i = stop = SEGKP_HASH(vaddr);
1317 	mutex_enter(&segkp_lock);
1318 	do {
1319 		for (kpd = kpsd->kpsd_hash[i]; kpd != NULL;
1320 						kpd = kpd->kp_next) {
1321 			if (vaddr >= kpd->kp_base &&
1322 			    vaddr < kpd->kp_base + kpd->kp_len) {
1323 				mutex_exit(&segkp_lock);
1324 				return (kpd);
1325 			}
1326 		}
1327 		if (--i < 0)
1328 			i = SEGKP_HASHSZ - 1;	/* Wrap */
1329 	} while (i != stop);
1330 	mutex_exit(&segkp_lock);
1331 	return (NULL);		/* Not found */
1332 }
1333 
1334 /*
1335  * returns size of swappable area.
1336  */
1337 size_t
1338 swapsize(caddr_t v)
1339 {
1340 	struct segkp_data *kpd;
1341 
1342 	if ((kpd = segkp_find(segkp, v)) != NULL)
1343 		return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
1344 	else
1345 		return (NULL);
1346 }
1347 
1348 /*
1349  * Dump out all the active segkp pages
1350  */
1351 static void
1352 segkp_dump(struct seg *seg)
1353 {
1354 	int i;
1355 	struct segkp_data *kpd;
1356 	struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1357 
1358 	for (i = 0; i < SEGKP_HASHSZ; i++) {
1359 		for (kpd = kpsd->kpsd_hash[i];
1360 		    kpd != NULL; kpd = kpd->kp_next) {
1361 			pfn_t pfn;
1362 			caddr_t addr;
1363 			caddr_t eaddr;
1364 
1365 			addr = kpd->kp_base;
1366 			eaddr = addr + kpd->kp_len;
1367 			while (addr < eaddr) {
1368 				ASSERT(seg->s_as == &kas);
1369 				pfn = hat_getpfnum(seg->s_as->a_hat, addr);
1370 				if (pfn != PFN_INVALID)
1371 					dump_addpage(seg->s_as, addr, pfn);
1372 				addr += PAGESIZE;
1373 				dump_timeleft = dump_timeout;
1374 			}
1375 		}
1376 	}
1377 }
1378 
1379 /*ARGSUSED*/
1380 static int
1381 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1382     struct page ***ppp, enum lock_type type, enum seg_rw rw)
1383 {
1384 	return (ENOTSUP);
1385 }
1386 
1387 /*ARGSUSED*/
1388 static int
1389 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1390 {
1391 	return (ENODEV);
1392 }
1393 
1394 /*ARGSUSED*/
1395 static lgrp_mem_policy_info_t	*
1396 segkp_getpolicy(struct seg *seg, caddr_t addr)
1397 {
1398 	return (NULL);
1399 }
1400 
1401 #include <sys/mem_config.h>
1402 
1403 /*ARGSUSED*/
1404 static void
1405 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1406 {}
1407 
1408 /*
1409  * During memory delete, turn off caches so that pages are not held.
1410  * A better solution may be to unlock the pages while they are
1411  * in the cache so that they may be collected naturally.
1412  */
1413 
1414 /*ARGSUSED*/
1415 static int
1416 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1417 {
1418 	atomic_add_32(&segkp_indel, 1);
1419 	segkp_cache_free();
1420 	return (0);
1421 }
1422 
1423 /*ARGSUSED*/
1424 static void
1425 segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
1426 {
1427 	atomic_add_32(&segkp_indel, -1);
1428 }
1429 
1430 static kphysm_setup_vector_t segkp_mem_config_vec = {
1431 	KPHYSM_SETUP_VECTOR_VERSION,
1432 	segkp_mem_config_post_add,
1433 	segkp_mem_config_pre_del,
1434 	segkp_mem_config_post_del,
1435 };
1436 
1437 static void
1438 segkpinit_mem_config(struct seg *seg)
1439 {
1440 	int ret;
1441 
1442 	ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg);
1443 	ASSERT(ret == 0);
1444 }
1445