xref: /titanic_51/usr/src/uts/common/vm/vm_swap.c (revision 1bdd6c0e3710e91cb1f31aa78de33cb638494480)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
27 /*	  All Rights Reserved  	*/
28 
29 /*
30  * University Copyright- Copyright (c) 1982, 1986, 1988
31  * The Regents of the University of California
32  * All Rights Reserved
33  *
34  * University Acknowledgment- Portions of this document are derived from
35  * software developed by the University of California, Berkeley, and its
36  * contributors.
37  */
38 
39 /*
40  * Each physical swap area has an associated bitmap representing
41  * its physical storage. The bitmap records which swap slots are
42  * currently allocated or freed.  Allocation is done by searching
43  * through the bitmap for the first free slot. Thus, there's
44  * no linear relation between offset within the swap device and the
45  * address (within its segment(s)) of the page that the slot backs;
46  * instead, it's an arbitrary one-to-one mapping.
47  *
48  * Associated with each swap area is a swapinfo structure.  These
49  * structures are linked into a linear list that determines the
50  * ordering of swap areas in the logical swap device.  Each contains a
51  * pointer to the corresponding bitmap, the area's size, and its
52  * associated vnode.
53  */
54 
55 #include <sys/types.h>
56 #include <sys/inttypes.h>
57 #include <sys/param.h>
58 #include <sys/t_lock.h>
59 #include <sys/sysmacros.h>
60 #include <sys/systm.h>
61 #include <sys/errno.h>
62 #include <sys/kmem.h>
63 #include <sys/vfs.h>
64 #include <sys/vnode.h>
65 #include <sys/pathname.h>
66 #include <sys/cmn_err.h>
67 #include <sys/vtrace.h>
68 #include <sys/swap.h>
69 #include <sys/dumphdr.h>
70 #include <sys/debug.h>
71 #include <sys/fs/snode.h>
72 #include <sys/fs/swapnode.h>
73 #include <sys/policy.h>
74 #include <sys/zone.h>
75 
76 #include <vm/as.h>
77 #include <vm/seg.h>
78 #include <vm/page.h>
79 #include <vm/seg_vn.h>
80 #include <vm/hat.h>
81 #include <vm/anon.h>
82 #include <vm/seg_map.h>
83 
84 /*
85  * To balance the load among multiple swap areas, we don't allow
86  * more than swap_maxcontig allocations to be satisfied from a
87  * single swap area before moving on to the next swap area.  This
88  * effectively "interleaves" allocations among the many swap areas.
89  */
90 int swap_maxcontig;	/* set by anon_init() to 1 Mb */
91 
92 #define	MINIROOTSIZE	12000	/* ~6 Meg XXX */
93 
94 /*
95  * XXX - this lock is a kludge. It serializes some aspects of swapadd() and
96  * swapdel() (namely VOP_OPEN, VOP_CLOSE, VN_RELE).  It protects against
97  * somebody swapadd'ing and getting swap slots from a vnode, while someone
98  * else is in the process of closing or rele'ing it.
99  */
100 static kmutex_t swap_lock;
101 
102 kmutex_t swapinfo_lock;
103 
104 /*
105  * protected by the swapinfo_lock
106  */
107 struct swapinfo	*swapinfo;
108 
109 static	struct	swapinfo *silast;
110 static	int	nswapfiles;
111 
112 static u_offset_t	swap_getoff(struct swapinfo *);
113 static int	swapadd(struct vnode *, ulong_t, ulong_t, char *);
114 static int	swapdel(struct vnode *, ulong_t);
115 static int	swapslot_free(struct vnode *, u_offset_t, struct swapinfo *);
116 
117 /*
118  * swap device bitmap allocation macros
119  */
120 #define	MAPSHIFT	5
121 #define	NBBW		(NBPW * NBBY)	/* number of bits per word */
122 #define	TESTBIT(map, i)		(((map)[(i) >> MAPSHIFT] & (1 << (i) % NBBW)))
123 #define	SETBIT(map, i)		(((map)[(i) >> MAPSHIFT] |= (1 << (i) % NBBW)))
124 #define	CLEARBIT(map, i)	(((map)[(i) >> MAPSHIFT] &= ~(1 << (i) % NBBW)))
125 
126 int swap_debug = 0;	/* set for debug printf's */
127 int swap_verify = 0;	/* set to verify slots when freeing and allocating */
128 
129 uint_t swapalloc_maxcontig;
130 
131 /*
132  * Allocate a range of up to *lenp contiguous slots (page) from a physical
133  * swap device. Flags are one of:
134  *	SA_NOT  Must have a slot from a physical swap device other than the
135  * 		the one containing input (*vpp, *offp).
136  * Less slots than requested may be returned. *lenp allocated slots are
137  * returned starting at *offp on *vpp.
138  * Returns 1 for a successful allocation, 0 for couldn't allocate any slots.
139  */
140 int
141 swap_phys_alloc(
142 	struct vnode **vpp,
143 	u_offset_t *offp,
144 	size_t *lenp,
145 	uint_t flags)
146 {
147 	struct swapinfo *sip;
148 	offset_t soff, noff;
149 	size_t len;
150 
151 	mutex_enter(&swapinfo_lock);
152 	sip = silast;
153 
154 	/* Find a desirable physical device and allocate from it. */
155 	do {
156 		if (sip == NULL)
157 			break;
158 		if (!(sip->si_flags & ST_INDEL) &&
159 		    (spgcnt_t)sip->si_nfpgs > 0) {
160 			/* Caller wants other than specified swap device */
161 			if (flags & SA_NOT) {
162 				if (*vpp != sip->si_vp ||
163 				    *offp < sip->si_soff ||
164 				    *offp >= sip->si_eoff)
165 					goto found;
166 			/* Caller is loose, will take anything */
167 			} else
168 				goto found;
169 		} else if (sip->si_nfpgs == 0)
170 			sip->si_allocs = 0;
171 		if ((sip = sip->si_next) == NULL)
172 			sip = swapinfo;
173 	} while (sip != silast);
174 	mutex_exit(&swapinfo_lock);
175 	return (0);
176 found:
177 	soff = swap_getoff(sip);
178 	sip->si_nfpgs--;
179 	if (soff == -1)
180 		panic("swap_alloc: swap_getoff failed!");
181 
182 	for (len = PAGESIZE; len < *lenp; len += PAGESIZE) {
183 		if (sip->si_nfpgs == 0)
184 			break;
185 		if (swapalloc_maxcontig && len >= swapalloc_maxcontig)
186 			break;
187 		noff = swap_getoff(sip);
188 		if (noff == -1) {
189 			break;
190 		} else if (noff != soff + len) {
191 			CLEARBIT(sip->si_swapslots, btop(noff - sip->si_soff));
192 			break;
193 		}
194 		sip->si_nfpgs--;
195 	}
196 	*vpp = sip->si_vp;
197 	*offp = soff;
198 	*lenp = len;
199 	ASSERT((spgcnt_t)sip->si_nfpgs >= 0);
200 	sip->si_allocs += btop(len);
201 	if (sip->si_allocs >= swap_maxcontig) {
202 		sip->si_allocs = 0;
203 		if ((silast = sip->si_next) == NULL)
204 			silast = swapinfo;
205 	}
206 	TRACE_2(TR_FAC_VM, TR_SWAP_ALLOC,
207 	    "swap_alloc:sip %p offset %lx", sip, soff);
208 	mutex_exit(&swapinfo_lock);
209 	return (1);
210 }
211 
212 int swap_backsearch = 0;
213 
214 /*
215  * Get a free offset on swap device sip.
216  * Return >=0 offset if succeeded, -1 for failure.
217  */
218 static u_offset_t
219 swap_getoff(struct swapinfo *sip)
220 {
221 	uint_t *sp, *ep;
222 	size_t aoff, boff, poff, slotnumber;
223 
224 	ASSERT(MUTEX_HELD(&swapinfo_lock));
225 
226 	sip->si_alloccnt++;
227 	for (sp = &sip->si_swapslots[sip->si_hint >> MAPSHIFT],
228 	    ep = &sip->si_swapslots[sip->si_mapsize / NBPW]; sp < ep; sp++) {
229 		if (*sp != (uint_t)0xffffffff)
230 			goto foundentry;
231 		else
232 			sip->si_checkcnt++;
233 	}
234 	SWAP_PRINT(SW_ALLOC,
235 	    "swap_getoff: couldn't find slot from hint %ld to end\n",
236 	    sip->si_hint, 0, 0, 0, 0);
237 	/*
238 	 * Go backwards? Check for faster method XXX
239 	 */
240 	if (swap_backsearch) {
241 		for (sp = &sip->si_swapslots[sip->si_hint >> MAPSHIFT],
242 		    ep = sip->si_swapslots; sp > ep; sp--) {
243 			if (*sp != (uint_t)0xffffffff)
244 				goto foundentry;
245 			else
246 				sip->si_checkcnt++;
247 		}
248 	} else {
249 		for (sp = sip->si_swapslots,
250 		    ep = &sip->si_swapslots[sip->si_hint >> MAPSHIFT];
251 		    sp < ep; sp++) {
252 			if (*sp != (uint_t)0xffffffff)
253 				goto foundentry;
254 			else
255 				sip->si_checkcnt++;
256 		}
257 	}
258 	if (*sp == 0xffffffff) {
259 		cmn_err(CE_WARN, "No free swap slots!");
260 		return ((u_offset_t)-1);
261 	}
262 
263 foundentry:
264 	/*
265 	 * aoff is the page number offset (in bytes) of the si_swapslots
266 	 * array element containing a free page
267 	 *
268 	 * boff is the page number offset of the free page
269 	 * (i.e. cleared bit) in si_swapslots[aoff].
270 	 */
271 	aoff = ((char *)sp - (char *)sip->si_swapslots) * NBBY;
272 
273 	for (boff = (sip->si_hint % NBBW); boff < NBBW; boff++) {
274 		if (!TESTBIT(sip->si_swapslots, aoff + boff))
275 			goto foundslot;
276 		else
277 			sip->si_checkcnt++;
278 	}
279 	for (boff = 0; boff < (sip->si_hint % NBBW); boff++) {
280 		if (!TESTBIT(sip->si_swapslots, aoff + boff))
281 			goto foundslot;
282 		else
283 			sip->si_checkcnt++;
284 	}
285 	panic("swap_getoff: didn't find slot in word hint %ld", sip->si_hint);
286 
287 foundslot:
288 	/*
289 	 * Return the offset of the free page in swap device.
290 	 * Convert page number of byte offset and add starting
291 	 * offset of swap device.
292 	 */
293 	slotnumber = aoff + boff;
294 	SWAP_PRINT(SW_ALLOC, "swap_getoff: allocating slot %ld\n",
295 	    slotnumber, 0, 0, 0, 0);
296 	poff = ptob(slotnumber);
297 	if (poff + sip->si_soff >= sip->si_eoff)
298 		printf("ptob(aoff(%ld) + boff(%ld))(%ld) >= eoff(%ld)\n",
299 		    aoff, boff, ptob(slotnumber), (long)sip->si_eoff);
300 	ASSERT(poff < sip->si_eoff);
301 	/*
302 	 * We could verify here that the slot isn't already allocated
303 	 * by looking through all the anon slots.
304 	 */
305 	SETBIT(sip->si_swapslots, slotnumber);
306 	sip->si_hint = slotnumber + 1;	/* hint = next slot */
307 	return (poff + sip->si_soff);
308 }
309 
310 /*
311  * Free a swap page.
312  */
313 void
314 swap_phys_free(struct vnode *vp, u_offset_t off, size_t len)
315 {
316 	struct swapinfo *sip;
317 	ssize_t pagenumber, npage;
318 
319 	mutex_enter(&swapinfo_lock);
320 	sip = swapinfo;
321 
322 	do {
323 		if (sip->si_vp == vp &&
324 		    sip->si_soff <= off && off < sip->si_eoff) {
325 			for (pagenumber = btop(off - sip->si_soff),
326 			    npage = btop(len) + pagenumber;
327 			    pagenumber < npage; pagenumber++) {
328 				SWAP_PRINT(SW_ALLOC,
329 				    "swap_phys_free: freeing slot %ld on "
330 				    "sip %p\n",
331 				    pagenumber, sip, 0, 0, 0);
332 				if (!TESTBIT(sip->si_swapslots, pagenumber)) {
333 					panic(
334 					    "swap_phys_free: freeing free slot "
335 					    "%p,%lx\n", (void *)vp,
336 					    ptob(pagenumber) + sip->si_soff);
337 				}
338 				CLEARBIT(sip->si_swapslots, pagenumber);
339 				sip->si_nfpgs++;
340 			}
341 			ASSERT(sip->si_nfpgs <= sip->si_npgs);
342 			mutex_exit(&swapinfo_lock);
343 			return;
344 		}
345 	} while ((sip = sip->si_next) != NULL);
346 	panic("swap_phys_free");
347 	/*NOTREACHED*/
348 }
349 
350 /*
351  * Return the anon struct corresponding for the given
352  * <vnode, off> if it is part of the virtual swap device.
353  * Return the anon struct if found, otherwise NULL.
354  */
355 struct anon *
356 swap_anon(struct vnode *vp, u_offset_t off)
357 {
358 	struct anon *ap;
359 
360 	ASSERT(MUTEX_HELD(&anonhash_lock[AH_LOCK(vp, off)]));
361 
362 	for (ap = anon_hash[ANON_HASH(vp, off)]; ap != NULL; ap = ap->an_hash) {
363 		if (ap->an_vp == vp && ap->an_off == off)
364 			return (ap);
365 	}
366 	return (NULL);
367 }
368 
369 
370 /*
371  * Determine if the vp offset range overlap a swap device.
372  */
373 int
374 swap_in_range(struct vnode *vp, u_offset_t offset, size_t len)
375 {
376 	struct swapinfo *sip;
377 	u_offset_t eoff;
378 
379 	eoff = offset + len;
380 	ASSERT(eoff > offset);
381 
382 	mutex_enter(&swapinfo_lock);
383 	sip = swapinfo;
384 	if (vp && sip) {
385 		do {
386 			if (vp != sip->si_vp || eoff <= sip->si_soff ||
387 			    offset >= sip->si_eoff)
388 				continue;
389 			mutex_exit(&swapinfo_lock);
390 			return (1);
391 		} while ((sip = sip->si_next) != NULL);
392 	}
393 	mutex_exit(&swapinfo_lock);
394 	return (0);
395 }
396 
397 /*
398  * See if name is one of our swap files
399  * even though lookupname failed.
400  * This can be used by swapdel to delete
401  * swap resources on remote machines
402  * where the link has gone down.
403  */
404 static struct vnode *
405 swapdel_byname(
406 	char 	*name,			/* pathname to delete */
407 	ulong_t lowblk) 	/* Low block number of area to delete */
408 {
409 	struct swapinfo **sipp, *osip;
410 	u_offset_t soff;
411 
412 	/*
413 	 * Find the swap file entry for the file to
414 	 * be deleted. Skip any entries that are in
415 	 * transition.
416 	 */
417 
418 	soff = ptob(btopr(lowblk << SCTRSHFT)); /* must be page aligned */
419 
420 	mutex_enter(&swapinfo_lock);
421 	for (sipp = &swapinfo; (osip = *sipp) != NULL; sipp = &osip->si_next) {
422 		if ((strcmp(osip->si_pname, name) == 0) &&
423 		    (osip->si_soff == soff) && (osip->si_flags == 0)) {
424 			struct vnode *vp = osip->si_vp;
425 
426 			VN_HOLD(vp);
427 			mutex_exit(&swapinfo_lock);
428 			return (vp);
429 		}
430 	}
431 	mutex_exit(&swapinfo_lock);
432 	return (NULL);
433 }
434 
435 
436 /*
437  * New system call to manipulate swap files.
438  */
439 int
440 swapctl(int sc_cmd, void *sc_arg, int *rv)
441 {
442 	struct swapinfo *sip, *csip, *tsip;
443 	int error = 0;
444 	struct swapent st, *ust;
445 	struct swapres sr;
446 	struct vnode *vp;
447 	int cnt = 0;
448 	int tmp_nswapfiles;
449 	int nswap;
450 	int length, nlen;
451 	int gplen = 0, plen;
452 	char *swapname;
453 	char *pname;
454 	char *tpname;
455 	struct anoninfo ai;
456 	spgcnt_t avail;
457 	int global = INGLOBALZONE(curproc);
458 	struct zone *zp = curproc->p_zone;
459 
460 	/*
461 	 * When running in a zone we want to hide the details of the swap
462 	 * devices: we report there only being one swap device named "swap"
463 	 * having a size equal to the sum of the sizes of all real swap devices
464 	 * on the system.
465 	 */
466 	switch (sc_cmd) {
467 	case SC_GETNSWP:
468 		if (global)
469 			*rv = nswapfiles;
470 		else
471 			*rv = 1;
472 		return (0);
473 
474 	case SC_AINFO:
475 		/*
476 		 * Return anoninfo information with these changes:
477 		 * ani_max = maximum amount of swap space
478 		 *	(including potentially available physical memory)
479 		 * ani_free = amount of unallocated anonymous memory
480 		 *	(some of which might be reserved and including
481 		 *	 potentially available physical memory)
482 		 * ani_resv = amount of claimed (reserved) anonymous memory
483 		 */
484 		avail = MAX((spgcnt_t)(availrmem - swapfs_minfree), 0);
485 		ai.ani_max = (k_anoninfo.ani_max +
486 		    k_anoninfo.ani_mem_resv) + avail;
487 
488 		ai.ani_free = k_anoninfo.ani_free + avail;
489 
490 		ai.ani_resv = k_anoninfo.ani_phys_resv +
491 		    k_anoninfo.ani_mem_resv;
492 
493 		if (!global && zp->zone_max_swap_ctl != UINT64_MAX) {
494 			/*
495 			 * We're in a non-global zone with a swap cap.  We
496 			 * always report the system-wide values for the global
497 			 * zone, even though it too can have a swap cap.
498 			 */
499 
500 			/*
501 			 * For a swap-capped zone, the numbers are contrived
502 			 * since we don't have a correct value of 'reserved'
503 			 * for the zone.
504 			 *
505 			 * The ani_max value is always the zone's swap cap.
506 			 *
507 			 * The ani_free value is always the difference between
508 			 * the cap and the amount of swap in use by the zone.
509 			 *
510 			 * The ani_resv value is typically set to be the amount
511 			 * of swap in use by the zone, but can be adjusted
512 			 * upwards to indicate how much swap is currently
513 			 * unavailable to that zone due to usage by entities
514 			 * outside the zone.
515 			 *
516 			 * This works as follows.
517 			 *
518 			 * In the 'swap -s' output, the data is displayed
519 			 * as follows:
520 			 *    allocated = ani_max  - ani_free
521 			 *    reserved  = ani_resv - allocated
522 			 *    available = ani_max  - ani_resv
523 			 *
524 			 * Taking a contrived example, if the swap cap is 100
525 			 * and the amount of swap used by the zone is 75, this
526 			 * gives:
527 			 *    allocated = ani_max  - ani_free  = 100 - 25 = 75
528 			 *    reserved  = ani_resv - allocated =  75 - 75 =  0
529 			 *    available = ani_max  - ani_resv  = 100 - 75 = 25
530 			 *
531 			 * In this typical case, you can see that the 'swap -s'
532 			 * 'reserved' will always be 0 inside a swap capped
533 			 * zone.
534 			 *
535 			 * However, if the system as a whole has less free
536 			 * swap than the zone limits allow, then we adjust
537 			 * the ani_resv value up so that it is the difference
538 			 * between the zone cap and the amount of free system
539 			 * swap.  Taking the above example, but when the
540 			 * system as a whole only has 20 of swap available, we
541 			 * get an ani_resv of 100 - 20 = 80.  This gives:
542 			 *    allocated = ani_max  - ani_free  = 100 - 25 = 75
543 			 *    reserved  = ani_resv - allocated =  80 - 75 =  5
544 			 *    available = ani_max  - ani_resv  = 100 - 80 = 20
545 			 *
546 			 * In this case, you can see how the ani_resv value is
547 			 * tweaked up to make the 'swap -s' numbers work inside
548 			 * the zone.
549 			 */
550 			rctl_qty_t cap, used;
551 			pgcnt_t pgcap, sys_avail;
552 
553 			mutex_enter(&zp->zone_mem_lock);
554 			cap = zp->zone_max_swap_ctl;
555 			used = zp->zone_max_swap;
556 			mutex_exit(&zp->zone_mem_lock);
557 
558 			pgcap = MIN(btop(cap), ai.ani_max);
559 			ai.ani_free = pgcap - btop(used);
560 
561 			/* Get the system-wide swap currently available. */
562 			sys_avail = ai.ani_max - ai.ani_resv;
563 			if (sys_avail < ai.ani_free)
564 				ai.ani_resv = pgcap - sys_avail;
565 			else
566 				ai.ani_resv = btop(used);
567 
568 			ai.ani_max = pgcap;
569 		}
570 
571 		if (copyout(&ai, sc_arg, sizeof (struct anoninfo)) != 0)
572 			return (EFAULT);
573 		return (0);
574 
575 	case SC_LIST:
576 		if (copyin(sc_arg, &length, sizeof (int)) != 0)
577 			return (EFAULT);
578 		if (!global) {
579 			struct swapent st;
580 			char *swappath = "swap";
581 
582 			if (length < 1)
583 				return (ENOMEM);
584 			ust = (swapent_t *)((swaptbl_t *)sc_arg)->swt_ent;
585 			if (copyin(ust, &st, sizeof (swapent_t)) != 0)
586 				return (EFAULT);
587 			st.ste_start = PAGESIZE >> SCTRSHFT;
588 			st.ste_length = (off_t)0;
589 			st.ste_pages = 0;
590 			st.ste_free = 0;
591 			st.ste_flags = 0;
592 
593 			mutex_enter(&swapinfo_lock);
594 			for (sip = swapinfo, nswap = 0;
595 			    sip != NULL && nswap < nswapfiles;
596 			    sip = sip->si_next, nswap++) {
597 				st.ste_length +=
598 				    (sip->si_eoff - sip->si_soff) >> SCTRSHFT;
599 				st.ste_pages += sip->si_npgs;
600 				st.ste_free += sip->si_nfpgs;
601 			}
602 			mutex_exit(&swapinfo_lock);
603 
604 			if (zp->zone_max_swap_ctl != UINT64_MAX) {
605 				rctl_qty_t cap, used;
606 
607 				mutex_enter(&zp->zone_mem_lock);
608 				cap = zp->zone_max_swap_ctl;
609 				used = zp->zone_max_swap;
610 				mutex_exit(&zp->zone_mem_lock);
611 
612 				st.ste_length = MIN(cap, st.ste_length);
613 				st.ste_pages = MIN(btop(cap), st.ste_pages);
614 				st.ste_free = MIN(st.ste_pages - btop(used),
615 				    st.ste_free);
616 			}
617 
618 			if (copyout(&st, ust, sizeof (swapent_t)) != 0 ||
619 			    copyout(swappath, st.ste_path,
620 			    strlen(swappath) + 1) != 0) {
621 				return (EFAULT);
622 			}
623 			*rv = 1;
624 			return (0);
625 		}
626 beginning:
627 		tmp_nswapfiles = nswapfiles;
628 		/* Return an error if not enough space for the whole table. */
629 		if (length < tmp_nswapfiles)
630 			return (ENOMEM);
631 		/*
632 		 * Get memory to hold the swap entries and their names. We'll
633 		 * copy the real entries into these and then copy these out.
634 		 * Allocating the pathname memory is only a guess so we may
635 		 * find that we need more and have to do it again.
636 		 * All this is because we have to hold the anon lock while
637 		 * traversing the swapinfo list, and we can't be doing copyouts
638 		 * and/or kmem_alloc()s during this.
639 		 */
640 		csip = kmem_zalloc(tmp_nswapfiles * sizeof (struct swapinfo),
641 		    KM_SLEEP);
642 retry:
643 		nlen = tmp_nswapfiles * (gplen += 100);
644 		pname = kmem_zalloc(nlen, KM_SLEEP);
645 
646 		mutex_enter(&swapinfo_lock);
647 
648 		if (tmp_nswapfiles != nswapfiles) {
649 			mutex_exit(&swapinfo_lock);
650 			kmem_free(pname, nlen);
651 			kmem_free(csip,
652 			    tmp_nswapfiles * sizeof (struct swapinfo));
653 			gplen = 0;
654 			goto beginning;
655 		}
656 		for (sip = swapinfo, tsip = csip, tpname = pname, nswap = 0;
657 		    sip && nswap < tmp_nswapfiles;
658 		    sip = sip->si_next, tsip++, tpname += plen, nswap++) {
659 			plen = sip->si_pnamelen;
660 			if (tpname + plen - pname > nlen) {
661 				mutex_exit(&swapinfo_lock);
662 				kmem_free(pname, nlen);
663 				goto retry;
664 			}
665 			*tsip = *sip;
666 			tsip->si_pname = tpname;
667 			(void) strcpy(tsip->si_pname, sip->si_pname);
668 		}
669 		mutex_exit(&swapinfo_lock);
670 
671 		if (sip) {
672 			error = ENOMEM;
673 			goto lout;
674 		}
675 		ust = (swapent_t *)((swaptbl_t *)sc_arg)->swt_ent;
676 		for (tsip = csip, cnt = 0; cnt < nswap;  tsip++, ust++, cnt++) {
677 			if (copyin(ust, &st, sizeof (swapent_t)) != 0) {
678 				error = EFAULT;
679 				goto lout;
680 			}
681 			st.ste_flags = tsip->si_flags;
682 			st.ste_length =
683 			    (tsip->si_eoff - tsip->si_soff) >> SCTRSHFT;
684 			st.ste_start = tsip->si_soff >> SCTRSHFT;
685 			st.ste_pages = tsip->si_npgs;
686 			st.ste_free = tsip->si_nfpgs;
687 			if (copyout(&st, ust, sizeof (swapent_t)) != 0) {
688 				error = EFAULT;
689 				goto lout;
690 			}
691 			if (!tsip->si_pnamelen)
692 				continue;
693 			if (copyout(tsip->si_pname, st.ste_path,
694 			    tsip->si_pnamelen) != 0) {
695 				error = EFAULT;
696 				goto lout;
697 			}
698 		}
699 		*rv = nswap;
700 lout:
701 		kmem_free(csip, tmp_nswapfiles * sizeof (struct swapinfo));
702 		kmem_free(pname, nlen);
703 		return (error);
704 
705 	case SC_ADD:
706 	case SC_REMOVE:
707 		break;
708 	default:
709 		return (EINVAL);
710 	}
711 	if ((error = secpolicy_swapctl(CRED())) != 0)
712 		return (error);
713 
714 	if (copyin(sc_arg, &sr, sizeof (swapres_t)))
715 		return (EFAULT);
716 
717 	/* Allocate the space to read in pathname */
718 	if ((swapname = kmem_alloc(MAXPATHLEN, KM_NOSLEEP)) == NULL)
719 		return (ENOMEM);
720 
721 	error = copyinstr(sr.sr_name, swapname, MAXPATHLEN, 0);
722 	if (error)
723 		goto out;
724 
725 	error = lookupname(swapname, UIO_SYSSPACE, FOLLOW, NULLVPP, &vp);
726 	if (error) {
727 		if (sc_cmd == SC_ADD)
728 			goto out;
729 		/* see if we match by name */
730 		vp = swapdel_byname(swapname, (size_t)sr.sr_start);
731 		if (vp == NULL)
732 			goto out;
733 	}
734 
735 	if (vp->v_flag & (VNOMAP | VNOSWAP)) {
736 		VN_RELE(vp);
737 		error = ENOSYS;
738 		goto out;
739 	}
740 	switch (vp->v_type) {
741 	case VBLK:
742 		break;
743 
744 	case VREG:
745 		if (vp->v_vfsp && vn_is_readonly(vp))
746 			error = EROFS;
747 		else
748 			error = VOP_ACCESS(vp, VREAD|VWRITE, 0, CRED(), NULL);
749 		break;
750 
751 	case VDIR:
752 		error = EISDIR;
753 		break;
754 	default:
755 		error = ENOSYS;
756 		break;
757 	}
758 	if (error == 0) {
759 		if (sc_cmd == SC_REMOVE)
760 			error = swapdel(vp, sr.sr_start);
761 		else
762 			error = swapadd(vp, sr.sr_start,
763 			    sr.sr_length, swapname);
764 	}
765 	VN_RELE(vp);
766 out:
767 	kmem_free(swapname, MAXPATHLEN);
768 	return (error);
769 }
770 
771 #if defined(_LP64) && defined(_SYSCALL32)
772 
773 int
774 swapctl32(int sc_cmd, void *sc_arg, int *rv)
775 {
776 	struct swapinfo *sip, *csip, *tsip;
777 	int error = 0;
778 	struct swapent32 st, *ust;
779 	struct swapres32 sr;
780 	struct vnode *vp;
781 	int cnt = 0;
782 	int tmp_nswapfiles;
783 	int nswap;
784 	int length, nlen;
785 	int gplen = 0, plen;
786 	char *swapname;
787 	char *pname;
788 	char *tpname;
789 	struct anoninfo32 ai;
790 	size_t s;
791 	spgcnt_t avail;
792 	int global = INGLOBALZONE(curproc);
793 	struct zone *zp = curproc->p_zone;
794 
795 	/*
796 	 * When running in a zone we want to hide the details of the swap
797 	 * devices: we report there only being one swap device named "swap"
798 	 * having a size equal to the sum of the sizes of all real swap devices
799 	 * on the system.
800 	 */
801 	switch (sc_cmd) {
802 	case SC_GETNSWP:
803 		if (global)
804 			*rv = nswapfiles;
805 		else
806 			*rv = 1;
807 		return (0);
808 
809 	case SC_AINFO:
810 		/*
811 		 * Return anoninfo information with these changes:
812 		 * ani_max = maximum amount of swap space
813 		 *	(including potentially available physical memory)
814 		 * ani_free = amount of unallocated anonymous memory
815 		 *	(some of which might be reserved and including
816 		 *	 potentially available physical memory)
817 		 * ani_resv = amount of claimed (reserved) anonymous memory
818 		 */
819 		avail = MAX((spgcnt_t)(availrmem - swapfs_minfree), 0);
820 		s = (k_anoninfo.ani_max + k_anoninfo.ani_mem_resv) + avail;
821 		if (s > UINT32_MAX)
822 			return (EOVERFLOW);
823 		ai.ani_max = s;
824 
825 		s = k_anoninfo.ani_free + avail;
826 		if (s > UINT32_MAX)
827 			return (EOVERFLOW);
828 		ai.ani_free = s;
829 
830 		s = k_anoninfo.ani_phys_resv + k_anoninfo.ani_mem_resv;
831 		if (s > UINT32_MAX)
832 			return (EOVERFLOW);
833 		ai.ani_resv = s;
834 
835 		if (!global && zp->zone_max_swap_ctl != UINT64_MAX) {
836 			/*
837 			 * We're in a non-global zone with a swap cap.  We
838 			 * always report the system-wide values for the global
839 			 * zone, even though it too can have a swap cap.
840 			 * See the comment for the SC_AINFO case in swapctl()
841 			 * which explains the following logic.
842 			 */
843 			rctl_qty_t cap, used;
844 			pgcnt_t pgcap, sys_avail;
845 
846 			mutex_enter(&zp->zone_mem_lock);
847 			cap = zp->zone_max_swap_ctl;
848 			used = zp->zone_max_swap;
849 			mutex_exit(&zp->zone_mem_lock);
850 
851 			pgcap = MIN(btop(cap), ai.ani_max);
852 			ai.ani_free = pgcap - btop(used);
853 
854 			/* Get the system-wide swap currently available. */
855 			sys_avail = ai.ani_max - ai.ani_resv;
856 			if (sys_avail < ai.ani_free)
857 				ai.ani_resv = pgcap - sys_avail;
858 			else
859 				ai.ani_resv = btop(used);
860 
861 			ai.ani_max = pgcap;
862 		}
863 
864 		if (copyout(&ai, sc_arg, sizeof (ai)) != 0)
865 			return (EFAULT);
866 		return (0);
867 
868 	case SC_LIST:
869 		if (copyin(sc_arg, &length, sizeof (int32_t)) != 0)
870 			return (EFAULT);
871 		if (!global) {
872 			struct swapent32 st;
873 			char *swappath = "swap";
874 
875 			if (length < 1)
876 				return (ENOMEM);
877 			ust = (swapent32_t *)((swaptbl32_t *)sc_arg)->swt_ent;
878 			if (copyin(ust, &st, sizeof (swapent32_t)) != 0)
879 				return (EFAULT);
880 			st.ste_start = PAGESIZE >> SCTRSHFT;
881 			st.ste_length = (off_t)0;
882 			st.ste_pages = 0;
883 			st.ste_free = 0;
884 			st.ste_flags = 0;
885 
886 			mutex_enter(&swapinfo_lock);
887 			for (sip = swapinfo, nswap = 0;
888 			    sip != NULL && nswap < nswapfiles;
889 			    sip = sip->si_next, nswap++) {
890 				st.ste_length +=
891 				    (sip->si_eoff - sip->si_soff) >> SCTRSHFT;
892 				st.ste_pages += sip->si_npgs;
893 				st.ste_free += sip->si_nfpgs;
894 			}
895 			mutex_exit(&swapinfo_lock);
896 
897 			if (zp->zone_max_swap_ctl != UINT64_MAX) {
898 				rctl_qty_t cap, used;
899 
900 				mutex_enter(&zp->zone_mem_lock);
901 				cap = zp->zone_max_swap_ctl;
902 				used = zp->zone_max_swap;
903 				mutex_exit(&zp->zone_mem_lock);
904 
905 				st.ste_length = MIN(cap, st.ste_length);
906 				st.ste_pages = MIN(btop(cap), st.ste_pages);
907 				st.ste_free = MIN(st.ste_pages - btop(used),
908 				    st.ste_free);
909 			}
910 
911 			if (copyout(&st, ust, sizeof (swapent32_t)) != 0 ||
912 			    copyout(swappath, (caddr_t)(uintptr_t)st.ste_path,
913 			    strlen(swappath) + 1) != 0) {
914 				return (EFAULT);
915 			}
916 			*rv = 1;
917 			return (0);
918 		}
919 beginning:
920 		tmp_nswapfiles = nswapfiles;
921 		/* Return an error if not enough space for the whole table. */
922 		if (length < tmp_nswapfiles)
923 			return (ENOMEM);
924 		/*
925 		 * Get memory to hold the swap entries and their names. We'll
926 		 * copy the real entries into these and then copy these out.
927 		 * Allocating the pathname memory is only a guess so we may
928 		 * find that we need more and have to do it again.
929 		 * All this is because we have to hold the anon lock while
930 		 * traversing the swapinfo list, and we can't be doing copyouts
931 		 * and/or kmem_alloc()s during this.
932 		 */
933 		csip = kmem_zalloc(tmp_nswapfiles * sizeof (*csip), KM_SLEEP);
934 retry:
935 		nlen = tmp_nswapfiles * (gplen += 100);
936 		pname = kmem_zalloc(nlen, KM_SLEEP);
937 
938 		mutex_enter(&swapinfo_lock);
939 
940 		if (tmp_nswapfiles != nswapfiles) {
941 			mutex_exit(&swapinfo_lock);
942 			kmem_free(pname, nlen);
943 			kmem_free(csip, tmp_nswapfiles * sizeof (*csip));
944 			gplen = 0;
945 			goto beginning;
946 		}
947 		for (sip = swapinfo, tsip = csip, tpname = pname, nswap = 0;
948 		    (sip != NULL) && (nswap < tmp_nswapfiles);
949 		    sip = sip->si_next, tsip++, tpname += plen, nswap++) {
950 			plen = sip->si_pnamelen;
951 			if (tpname + plen - pname > nlen) {
952 				mutex_exit(&swapinfo_lock);
953 				kmem_free(pname, nlen);
954 				goto retry;
955 			}
956 			*tsip = *sip;
957 			tsip->si_pname = tpname;
958 			(void) strcpy(tsip->si_pname, sip->si_pname);
959 		}
960 		mutex_exit(&swapinfo_lock);
961 
962 		if (sip != NULL) {
963 			error = ENOMEM;
964 			goto lout;
965 		}
966 		ust = (swapent32_t *)((swaptbl32_t *)sc_arg)->swt_ent;
967 		for (tsip = csip, cnt = 0; cnt < nswap;  tsip++, ust++, cnt++) {
968 			if (copyin(ust, &st, sizeof (*ust)) != 0) {
969 				error = EFAULT;
970 				goto lout;
971 			}
972 			st.ste_flags = tsip->si_flags;
973 			st.ste_length =
974 			    (tsip->si_eoff - tsip->si_soff) >> SCTRSHFT;
975 			st.ste_start = tsip->si_soff >> SCTRSHFT;
976 			st.ste_pages = tsip->si_npgs;
977 			st.ste_free = tsip->si_nfpgs;
978 			if (copyout(&st, ust, sizeof (st)) != 0) {
979 				error = EFAULT;
980 				goto lout;
981 			}
982 			if (!tsip->si_pnamelen)
983 				continue;
984 			if (copyout(tsip->si_pname,
985 			    (caddr_t)(uintptr_t)st.ste_path,
986 			    tsip->si_pnamelen) != 0) {
987 				error = EFAULT;
988 				goto lout;
989 			}
990 		}
991 		*rv = nswap;
992 lout:
993 		kmem_free(csip, tmp_nswapfiles * sizeof (*csip));
994 		kmem_free(pname, nlen);
995 		return (error);
996 
997 	case SC_ADD:
998 	case SC_REMOVE:
999 		break;
1000 	default:
1001 		return (EINVAL);
1002 	}
1003 	if ((error = secpolicy_swapctl(CRED())) != 0)
1004 		return (error);
1005 
1006 	if (copyin(sc_arg, &sr, sizeof (sr)))
1007 		return (EFAULT);
1008 
1009 	/* Allocate the space to read in pathname */
1010 	if ((swapname = kmem_alloc(MAXPATHLEN, KM_NOSLEEP)) == NULL)
1011 		return (ENOMEM);
1012 
1013 	error = copyinstr((caddr_t)(uintptr_t)sr.sr_name,
1014 	    swapname, MAXPATHLEN, NULL);
1015 	if (error)
1016 		goto out;
1017 
1018 	error = lookupname(swapname, UIO_SYSSPACE, FOLLOW, NULLVPP, &vp);
1019 	if (error) {
1020 		if (sc_cmd == SC_ADD)
1021 			goto out;
1022 		/* see if we match by name */
1023 		vp = swapdel_byname(swapname, (uint_t)sr.sr_start);
1024 		if (vp == NULL)
1025 			goto out;
1026 	}
1027 
1028 	if (vp->v_flag & (VNOMAP | VNOSWAP)) {
1029 		VN_RELE(vp);
1030 		error = ENOSYS;
1031 		goto out;
1032 	}
1033 	switch (vp->v_type) {
1034 	case VBLK:
1035 		break;
1036 
1037 	case VREG:
1038 		if (vp->v_vfsp && vn_is_readonly(vp))
1039 			error = EROFS;
1040 		else
1041 			error = VOP_ACCESS(vp, VREAD|VWRITE, 0, CRED(), NULL);
1042 		break;
1043 
1044 	case VDIR:
1045 		error = EISDIR;
1046 		break;
1047 	default:
1048 		error = ENOSYS;
1049 		break;
1050 	}
1051 	if (error == 0) {
1052 		if (sc_cmd == SC_REMOVE)
1053 			error = swapdel(vp, sr.sr_start);
1054 		else
1055 			error = swapadd(vp, sr.sr_start, sr.sr_length,
1056 			    swapname);
1057 	}
1058 	VN_RELE(vp);
1059 out:
1060 	kmem_free(swapname, MAXPATHLEN);
1061 	return (error);
1062 }
1063 
1064 #endif /* _LP64 && _SYSCALL32 */
1065 
1066 /*
1067  * Add a new swap file.
1068  */
1069 int
1070 swapadd(struct vnode *vp, ulong_t lowblk, ulong_t nblks, char *swapname)
1071 {
1072 	struct swapinfo **sipp, *nsip = NULL, *esip = NULL;
1073 	struct vnode *cvp;
1074 	struct vattr vattr;
1075 	pgcnt_t pages;
1076 	u_offset_t soff, eoff;
1077 	int error;
1078 	ssize_t i, start, end;
1079 	ushort_t wasswap;
1080 	ulong_t startblk;
1081 	size_t	returned_mem;
1082 
1083 	SWAP_PRINT(SW_CTL, "swapadd: vp %p lowblk %ld nblks %ld swapname %s\n",
1084 	    vp, lowblk, nblks, swapname, 0);
1085 	/*
1086 	 * Get the real vnode. (If vp is not a specnode it just returns vp, so
1087 	 * it does the right thing, but having this code know about specnodes
1088 	 * violates the spirit of having it be indepedent of vnode type.)
1089 	 */
1090 	cvp = common_specvp(vp);
1091 
1092 	/*
1093 	 * Or in VISSWAP so file system has chance to deny swap-ons during open.
1094 	 */
1095 	mutex_enter(&cvp->v_lock);
1096 	wasswap = cvp->v_flag & VISSWAP;
1097 	cvp->v_flag |= VISSWAP;
1098 	mutex_exit(&cvp->v_lock);
1099 
1100 	mutex_enter(&swap_lock);
1101 	if (error = VOP_OPEN(&cvp, FREAD|FWRITE, CRED(), NULL)) {
1102 		mutex_exit(&swap_lock);
1103 		/* restore state of v_flag */
1104 		if (!wasswap) {
1105 			mutex_enter(&cvp->v_lock);
1106 			cvp->v_flag &= ~VISSWAP;
1107 			mutex_exit(&cvp->v_lock);
1108 		}
1109 		return (error);
1110 	}
1111 	mutex_exit(&swap_lock);
1112 
1113 	/*
1114 	 * Get partition size. Return error if empty partition,
1115 	 * or if request does not fit within the partition.
1116 	 * If this is the first swap device, we can reduce
1117 	 * the size of the swap area to match what is
1118 	 * available.  This can happen if the system was built
1119 	 * on a machine with a different size swap partition.
1120 	 */
1121 	vattr.va_mask = AT_SIZE;
1122 	if (error = VOP_GETATTR(cvp, &vattr, ATTR_COMM, CRED(), NULL))
1123 		goto out;
1124 
1125 	/*
1126 	 * Specfs returns a va_size of MAXOFFSET_T (UNKNOWN_SIZE) when the
1127 	 * size of the device can't be determined.
1128 	 */
1129 	if ((vattr.va_size == 0) || (vattr.va_size == MAXOFFSET_T)) {
1130 		error = EINVAL;
1131 		goto out;
1132 	}
1133 
1134 #ifdef	_ILP32
1135 	/*
1136 	 * No support for large swap in 32-bit OS, if the size of the swap is
1137 	 * bigger than MAXOFF32_T then the size used by swapfs must be limited.
1138 	 * This limitation is imposed by the swap subsystem itself, a D_64BIT
1139 	 * driver as the target of swap operation should be able to field
1140 	 * the IO.
1141 	 */
1142 	if (vattr.va_size > MAXOFF32_T) {
1143 		cmn_err(CE_NOTE,
1144 		    "!swap device %s truncated from 0x%llx to 0x%x bytes",
1145 		    swapname, vattr.va_size, MAXOFF32_T);
1146 		vattr.va_size = MAXOFF32_T;
1147 	}
1148 #endif	/* _ILP32 */
1149 
1150 	/* Fail if file not writeable (try to set size to current size) */
1151 	vattr.va_mask = AT_SIZE;
1152 	if (error = VOP_SETATTR(cvp, &vattr, 0, CRED(), NULL))
1153 		goto out;
1154 
1155 	/* Fail if fs does not support VOP_PAGEIO */
1156 	error = VOP_PAGEIO(cvp, (page_t *)NULL, (u_offset_t)0, 0, 0, CRED(),
1157 	    NULL);
1158 
1159 	if (error == ENOSYS)
1160 		goto out;
1161 	else
1162 		error = 0;
1163 	/*
1164 	 * If swapping on the root filesystem don't put swap blocks that
1165 	 * correspond to the miniroot filesystem on the swap free list.
1166 	 */
1167 	if (cvp == rootdir)
1168 		startblk = roundup(MINIROOTSIZE<<SCTRSHFT, klustsize)>>SCTRSHFT;
1169 	else				/* Skip 1st page (disk label) */
1170 		startblk = (ulong_t)(lowblk ? lowblk : 1);
1171 
1172 	soff = startblk << SCTRSHFT;
1173 	if (soff >= vattr.va_size) {
1174 		error = EINVAL;
1175 		goto out;
1176 	}
1177 
1178 	/*
1179 	 * If user specified 0 blks, use the size of the device
1180 	 */
1181 	eoff = nblks ?  soff + (nblks - (startblk - lowblk) << SCTRSHFT) :
1182 	    vattr.va_size;
1183 
1184 	SWAP_PRINT(SW_CTL, "swapadd: va_size %ld soff %ld eoff %ld\n",
1185 	    vattr.va_size, soff, eoff, 0, 0);
1186 
1187 	if (eoff > vattr.va_size) {
1188 		error = EINVAL;
1189 		goto out;
1190 	}
1191 
1192 	/*
1193 	 * The starting and ending offsets must be page aligned.
1194 	 * Round soff up to next page boundary, round eoff
1195 	 * down to previous page boundary.
1196 	 */
1197 	soff = ptob(btopr(soff));
1198 	eoff = ptob(btop(eoff));
1199 	if (soff >= eoff) {
1200 		SWAP_PRINT(SW_CTL, "swapadd: soff %ld >= eoff %ld\n",
1201 		    soff, eoff, 0, 0, 0);
1202 		error = EINVAL;
1203 		goto out;
1204 	}
1205 
1206 	pages = btop(eoff - soff);
1207 
1208 	/* Allocate and partially set up the new swapinfo */
1209 	nsip = kmem_zalloc(sizeof (struct swapinfo), KM_SLEEP);
1210 	nsip->si_vp = cvp;
1211 
1212 	nsip->si_soff = soff;
1213 	nsip->si_eoff = eoff;
1214 	nsip->si_hint = 0;
1215 	nsip->si_checkcnt = nsip->si_alloccnt = 0;
1216 
1217 	nsip->si_pnamelen = (int)strlen(swapname) + 1;
1218 	nsip->si_pname = (char *)kmem_zalloc(nsip->si_pnamelen, KM_SLEEP);
1219 	bcopy(swapname, nsip->si_pname, nsip->si_pnamelen - 1);
1220 	SWAP_PRINT(SW_CTL, "swapadd: allocating swapinfo for %s, %ld pages\n",
1221 	    swapname, pages, 0, 0, 0);
1222 	/*
1223 	 * Size of swapslots map in bytes
1224 	 */
1225 	nsip->si_mapsize = P2ROUNDUP(pages, NBBW) / NBBY;
1226 	nsip->si_swapslots = kmem_zalloc(nsip->si_mapsize, KM_SLEEP);
1227 
1228 	/*
1229 	 * Permanently set the bits that can't ever be allocated,
1230 	 * i.e. those from the ending offset to the round up slot for the
1231 	 * swapslots bit map.
1232 	 */
1233 	start = pages;
1234 	end = P2ROUNDUP(pages, NBBW);
1235 	for (i = start; i < end; i++) {
1236 		SWAP_PRINT(SW_CTL, "swapadd: set bit for page %ld\n", i,
1237 		    0, 0, 0, 0);
1238 		SETBIT(nsip->si_swapslots, i);
1239 	}
1240 	nsip->si_npgs = nsip->si_nfpgs = pages;
1241 	/*
1242 	 * Now check to see if we can add it. We wait til now to check because
1243 	 * we need the swapinfo_lock and we don't want sleep with it (e.g.,
1244 	 * during kmem_alloc()) while we're setting up the swapinfo.
1245 	 */
1246 	mutex_enter(&swapinfo_lock);
1247 	for (sipp = &swapinfo; (esip = *sipp) != NULL; sipp = &esip->si_next) {
1248 		if (esip->si_vp == cvp) {
1249 			if (esip->si_soff == soff && esip->si_npgs == pages &&
1250 			    (esip->si_flags & ST_DOINGDEL)) {
1251 				/*
1252 				 * We are adding a device that we are in the
1253 				 * middle of deleting. Just clear the
1254 				 * ST_DOINGDEL flag to signal this and
1255 				 * the deletion routine will eventually notice
1256 				 * it and add it back.
1257 				 */
1258 				esip->si_flags &= ~ST_DOINGDEL;
1259 				mutex_exit(&swapinfo_lock);
1260 				goto out;
1261 			}
1262 			/* disallow overlapping swap files */
1263 			if ((soff < esip->si_eoff) && (eoff > esip->si_soff)) {
1264 				error = EEXIST;
1265 				mutex_exit(&swapinfo_lock);
1266 				goto out;
1267 			}
1268 		}
1269 	}
1270 
1271 	nswapfiles++;
1272 
1273 	/*
1274 	 * add new swap device to list and shift allocations to it
1275 	 * before updating the anoninfo counters
1276 	 */
1277 	*sipp = nsip;
1278 	silast = nsip;
1279 
1280 	/*
1281 	 * Update the total amount of reservable swap space
1282 	 * accounting properly for swap space from physical memory
1283 	 */
1284 	/* New swap device soaks up currently reserved memory swap */
1285 	mutex_enter(&anoninfo_lock);
1286 
1287 	ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
1288 	ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
1289 
1290 	k_anoninfo.ani_max += pages;
1291 	ANI_ADD(pages);
1292 	if (k_anoninfo.ani_mem_resv > k_anoninfo.ani_locked_swap) {
1293 		returned_mem = MIN(k_anoninfo.ani_mem_resv -
1294 		    k_anoninfo.ani_locked_swap,
1295 		    k_anoninfo.ani_max - k_anoninfo.ani_phys_resv);
1296 
1297 		ANI_ADD(-returned_mem);
1298 		k_anoninfo.ani_free -= returned_mem;
1299 		k_anoninfo.ani_mem_resv -= returned_mem;
1300 		k_anoninfo.ani_phys_resv += returned_mem;
1301 
1302 		mutex_enter(&freemem_lock);
1303 		availrmem += returned_mem;
1304 		mutex_exit(&freemem_lock);
1305 	}
1306 	/*
1307 	 * At boot time, to permit booting small memory machines using
1308 	 * only physical memory as swap space, we allowed a dangerously
1309 	 * large amount of memory to be used as swap space; now that
1310 	 * more physical backing store is available bump down the amount
1311 	 * we can get from memory to a safer size.
1312 	 */
1313 	if (swapfs_minfree < swapfs_desfree) {
1314 		mutex_enter(&freemem_lock);
1315 		if (availrmem > swapfs_desfree || !k_anoninfo.ani_mem_resv)
1316 			swapfs_minfree = swapfs_desfree;
1317 		mutex_exit(&freemem_lock);
1318 	}
1319 
1320 	SWAP_PRINT(SW_CTL, "swapadd: ani_max %ld ani_free %ld\n",
1321 	    k_anoninfo.ani_free, k_anoninfo.ani_free, 0, 0, 0);
1322 
1323 	mutex_exit(&anoninfo_lock);
1324 
1325 	mutex_exit(&swapinfo_lock);
1326 
1327 	/* Initialize the dump device */
1328 	mutex_enter(&dump_lock);
1329 	if (dumpvp == NULL)
1330 		(void) dumpinit(vp, swapname, 0);
1331 	mutex_exit(&dump_lock);
1332 
1333 	VN_HOLD(cvp);
1334 out:
1335 	if (error || esip) {
1336 		SWAP_PRINT(SW_CTL, "swapadd: error (%d)\n", error, 0, 0, 0, 0);
1337 
1338 		if (!wasswap) {
1339 			mutex_enter(&cvp->v_lock);
1340 			cvp->v_flag &= ~VISSWAP;
1341 			mutex_exit(&cvp->v_lock);
1342 		}
1343 		if (nsip) {
1344 			kmem_free(nsip->si_swapslots, (size_t)nsip->si_mapsize);
1345 			kmem_free(nsip->si_pname, nsip->si_pnamelen);
1346 			kmem_free(nsip, sizeof (*nsip));
1347 		}
1348 		mutex_enter(&swap_lock);
1349 		(void) VOP_CLOSE(cvp, FREAD|FWRITE, 1, (offset_t)0, CRED(),
1350 		    NULL);
1351 		mutex_exit(&swap_lock);
1352 	}
1353 	return (error);
1354 }
1355 
1356 /*
1357  * Delete a swap file.
1358  */
1359 static int
1360 swapdel(
1361 	struct vnode *vp,
1362 	ulong_t lowblk) /* Low block number of area to delete. */
1363 {
1364 	struct swapinfo **sipp, *osip = NULL;
1365 	struct vnode *cvp;
1366 	u_offset_t soff;
1367 	int error = 0;
1368 	u_offset_t toff = 0;
1369 	struct vnode *tvp = NULL;
1370 	spgcnt_t pages;
1371 	struct anon **app, *ap;
1372 	kmutex_t *ahm;
1373 	pgcnt_t adjust_swap = 0;
1374 
1375 	/* Find the swap file entry for the file to be deleted */
1376 	cvp = common_specvp(vp);
1377 
1378 
1379 	lowblk = lowblk ? lowblk : 1; 	/* Skip first page (disk label) */
1380 	soff = ptob(btopr(lowblk << SCTRSHFT)); /* must be page aligned */
1381 
1382 	mutex_enter(&swapinfo_lock);
1383 	for (sipp = &swapinfo; (osip = *sipp) != NULL; sipp = &osip->si_next) {
1384 		if ((osip->si_vp == cvp) &&
1385 		    (osip->si_soff == soff) && (osip->si_flags == 0))
1386 			break;
1387 	}
1388 
1389 	/* If the file was not found, error.  */
1390 	if (osip == NULL) {
1391 		error = EINVAL;
1392 		mutex_exit(&swapinfo_lock);
1393 		goto out;
1394 	}
1395 
1396 	pages = osip->si_npgs;
1397 
1398 	/*
1399 	 * Do not delete if we will be low on swap pages.
1400 	 */
1401 	mutex_enter(&anoninfo_lock);
1402 
1403 	ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
1404 	ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
1405 
1406 	mutex_enter(&freemem_lock);
1407 	if (((k_anoninfo.ani_max - k_anoninfo.ani_phys_resv) +
1408 	    MAX((spgcnt_t)(availrmem - swapfs_minfree), 0)) < pages) {
1409 		mutex_exit(&freemem_lock);
1410 		mutex_exit(&anoninfo_lock);
1411 		error = ENOMEM;
1412 		cmn_err(CE_WARN, "swapdel - too few free pages");
1413 		mutex_exit(&swapinfo_lock);
1414 		goto out;
1415 	}
1416 	mutex_exit(&freemem_lock);
1417 
1418 	k_anoninfo.ani_max -= pages;
1419 
1420 	/* If needed, reserve memory swap to replace old device */
1421 	if (k_anoninfo.ani_phys_resv > k_anoninfo.ani_max) {
1422 		adjust_swap = k_anoninfo.ani_phys_resv - k_anoninfo.ani_max;
1423 		k_anoninfo.ani_phys_resv -= adjust_swap;
1424 		k_anoninfo.ani_mem_resv += adjust_swap;
1425 		mutex_enter(&freemem_lock);
1426 		availrmem -= adjust_swap;
1427 		mutex_exit(&freemem_lock);
1428 		ANI_ADD(adjust_swap);
1429 	}
1430 	ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
1431 	ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
1432 	mutex_exit(&anoninfo_lock);
1433 
1434 	ANI_ADD(-pages);
1435 
1436 	/*
1437 	 * Set the delete flag.  This prevents anyone from allocating more
1438 	 * pages from this file. Also set ST_DOINGDEL. Someone who wants to
1439 	 * add the file back while we're deleting it will signify by clearing
1440 	 * this flag.
1441 	 */
1442 	osip->si_flags |= ST_INDEL|ST_DOINGDEL;
1443 	mutex_exit(&swapinfo_lock);
1444 
1445 	/*
1446 	 * Free all the allocated physical slots for this file. We do this
1447 	 * by walking through the entire anon hash array, because we need
1448 	 * to update all the anon slots that have physical swap slots on
1449 	 * this file, and this is the only way to find them all. We go back
1450 	 * to the beginning of a bucket after each slot is freed because the
1451 	 * anonhash_lock is not held during the free and thus the hash table
1452 	 * may change under us.
1453 	 */
1454 	for (app = anon_hash; app < &anon_hash[ANON_HASH_SIZE]; app++) {
1455 		ahm = &anonhash_lock[(app-anon_hash) & (AH_LOCK_SIZE - 1)];
1456 		mutex_enter(ahm);
1457 top:
1458 		for (ap = *app; ap != NULL; ap = ap->an_hash) {
1459 			if (ap->an_pvp == cvp &&
1460 			    ap->an_poff >= osip->si_soff &&
1461 			    ap->an_poff < osip->si_eoff) {
1462 				ASSERT(TESTBIT(osip->si_swapslots,
1463 				    btop((size_t)(ap->an_poff -
1464 				    osip->si_soff))));
1465 				tvp = ap->an_vp;
1466 				toff = ap->an_off;
1467 				VN_HOLD(tvp);
1468 				mutex_exit(ahm);
1469 
1470 				error = swapslot_free(tvp, toff, osip);
1471 
1472 				VN_RELE(tvp);
1473 				mutex_enter(ahm);
1474 				if (!error && (osip->si_flags & ST_DOINGDEL)) {
1475 					goto top;
1476 				} else {
1477 					if (error) {
1478 						cmn_err(CE_WARN,
1479 						    "swapslot_free failed %d",
1480 						    error);
1481 					}
1482 
1483 					/*
1484 					 * Add device back before making it
1485 					 * visible.
1486 					 */
1487 					mutex_enter(&swapinfo_lock);
1488 					osip->si_flags &=
1489 					    ~(ST_INDEL | ST_DOINGDEL);
1490 					mutex_exit(&swapinfo_lock);
1491 
1492 					/*
1493 					 * Update the anon space available
1494 					 */
1495 					mutex_enter(&anoninfo_lock);
1496 
1497 					k_anoninfo.ani_phys_resv += adjust_swap;
1498 					k_anoninfo.ani_mem_resv -= adjust_swap;
1499 					k_anoninfo.ani_max += pages;
1500 
1501 					mutex_enter(&freemem_lock);
1502 					availrmem += adjust_swap;
1503 					mutex_exit(&freemem_lock);
1504 
1505 					mutex_exit(&anoninfo_lock);
1506 
1507 					ANI_ADD(pages);
1508 
1509 					mutex_exit(ahm);
1510 					goto out;
1511 				}
1512 			}
1513 		}
1514 		mutex_exit(ahm);
1515 	}
1516 
1517 	/* All done, they'd better all be free! */
1518 	mutex_enter(&swapinfo_lock);
1519 	ASSERT(osip->si_nfpgs == osip->si_npgs);
1520 
1521 	/* Now remove it from the swapinfo list */
1522 	for (sipp = &swapinfo; *sipp != NULL; sipp = &(*sipp)->si_next) {
1523 		if (*sipp == osip)
1524 			break;
1525 	}
1526 	ASSERT(*sipp);
1527 	*sipp = osip->si_next;
1528 	if (silast == osip)
1529 		if ((silast = osip->si_next) == NULL)
1530 			silast = swapinfo;
1531 	nswapfiles--;
1532 	mutex_exit(&swapinfo_lock);
1533 
1534 	kmem_free(osip->si_swapslots, osip->si_mapsize);
1535 	kmem_free(osip->si_pname, osip->si_pnamelen);
1536 	kmem_free(osip, sizeof (*osip));
1537 
1538 	mutex_enter(&dump_lock);
1539 	if (cvp == dumpvp)
1540 		dumpfini();
1541 	mutex_exit(&dump_lock);
1542 
1543 	/* Release the vnode */
1544 
1545 	mutex_enter(&swap_lock);
1546 	(void) VOP_CLOSE(cvp, FREAD|FWRITE, 1, (offset_t)0, CRED(), NULL);
1547 	mutex_enter(&cvp->v_lock);
1548 	cvp->v_flag &= ~VISSWAP;
1549 	mutex_exit(&cvp->v_lock);
1550 	VN_RELE(cvp);
1551 	mutex_exit(&swap_lock);
1552 out:
1553 	return (error);
1554 }
1555 
1556 /*
1557  * Free up a physical swap slot on swapinfo sip, currently in use by the
1558  * anonymous page whose name is (vp, off).
1559  */
1560 static int
1561 swapslot_free(
1562 	struct vnode *vp,
1563 	u_offset_t off,
1564 	struct swapinfo *sip)
1565 {
1566 	struct page *pp = NULL;
1567 	struct anon *ap = NULL;
1568 	int error = 0;
1569 	kmutex_t *ahm;
1570 	struct vnode *pvp = NULL;
1571 	u_offset_t poff;
1572 	int	alloc_pg = 0;
1573 
1574 	ASSERT(sip->si_vp != NULL);
1575 	/*
1576 	 * Get the page for the old swap slot if exists or create a new one.
1577 	 */
1578 again:
1579 	if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
1580 		pp = page_create_va(vp, off, PAGESIZE, PG_WAIT | PG_EXCL,
1581 		    segkmap, NULL);
1582 		if (pp == NULL)
1583 			goto again;
1584 		alloc_pg = 1;
1585 
1586 		error = swap_getphysname(vp, off, &pvp, &poff);
1587 		if (error || pvp != sip->si_vp || poff < sip->si_soff ||
1588 		    poff >= sip->si_eoff) {
1589 			page_io_unlock(pp);
1590 			/*LINTED: constant in conditional context*/
1591 			VN_DISPOSE(pp, B_INVAL, 0, kcred);
1592 			return (0);
1593 		}
1594 
1595 		error = VOP_PAGEIO(pvp, pp, poff, PAGESIZE, B_READ,
1596 		    CRED(), NULL);
1597 		if (error) {
1598 			page_io_unlock(pp);
1599 			if (error == EFAULT)
1600 				error = 0;
1601 			/*LINTED: constant in conditional context*/
1602 			VN_DISPOSE(pp, B_INVAL, 0, kcred);
1603 			return (error);
1604 		}
1605 	}
1606 
1607 	/*
1608 	 * The anon could have been removed by anon_decref* and/or reallocated
1609 	 * by anon layer (an_pvp == NULL) with the same vp, off.
1610 	 * In this case the page which has been allocated needs to
1611 	 * be freed.
1612 	 */
1613 	if (!alloc_pg)
1614 		page_io_lock(pp);
1615 	ahm = &anonhash_lock[AH_LOCK(vp, off)];
1616 	mutex_enter(ahm);
1617 	ap = swap_anon(vp, off);
1618 	if ((ap == NULL || ap->an_pvp == NULL) && alloc_pg) {
1619 		mutex_exit(ahm);
1620 		page_io_unlock(pp);
1621 		/*LINTED: constant in conditional context*/
1622 		VN_DISPOSE(pp, B_INVAL, 0, kcred);
1623 		return (0);
1624 	}
1625 
1626 	/*
1627 	 * Free the physical slot. It may have been freed up and replaced with
1628 	 * another one while we were getting the page so we have to re-verify
1629 	 * that this is really one we want. If we do free the slot we have
1630 	 * to mark the page modified, as its backing store is now gone.
1631 	 */
1632 	if ((ap != NULL) && (ap->an_pvp == sip->si_vp && ap->an_poff >=
1633 	    sip->si_soff && ap->an_poff < sip->si_eoff)) {
1634 		swap_phys_free(ap->an_pvp, ap->an_poff, PAGESIZE);
1635 		ap->an_pvp = NULL;
1636 		ap->an_poff = 0;
1637 		mutex_exit(ahm);
1638 		hat_setmod(pp);
1639 	} else {
1640 		mutex_exit(ahm);
1641 	}
1642 	page_io_unlock(pp);
1643 	page_unlock(pp);
1644 	return (0);
1645 }
1646 
1647 
1648 /*
1649  * Get contig physical backing store for vp, in the range
1650  * [*offp, *offp + *lenp), May back a subrange of this, but must
1651  * always include the requested offset or fail. Returns the offsets
1652  * backed as [*offp, *offp + *lenp) and the physical offsets used to
1653  * back them from *pvpp in the range [*pstartp, *pstartp + *lenp).
1654  * Returns 	0 for success
1655  * 		SE_NOANON -- no anon slot for requested paged
1656  *		SE_NOSWAP -- no physical swap space available
1657  */
1658 int
1659 swap_newphysname(
1660 	struct vnode *vp,
1661 	u_offset_t offset,
1662 	u_offset_t *offp,
1663 	size_t *lenp,
1664 	struct vnode **pvpp,
1665 	u_offset_t *poffp)
1666 {
1667 	struct anon *ap = NULL;		/* anon slot for vp, off */
1668 	int error = 0;
1669 	struct vnode *pvp;
1670 	u_offset_t poff, pstart, prem;
1671 	size_t plen;
1672 	u_offset_t off, start;
1673 	kmutex_t *ahm;
1674 
1675 	ASSERT(*offp <= offset && offset < *offp + *lenp);
1676 
1677 	/* Get new physical swap slots. */
1678 	plen = *lenp;
1679 	if (!swap_phys_alloc(&pvp, &pstart, &plen, 0)) {
1680 		/*
1681 		 * No swap available so return error unless requested
1682 		 * offset is already backed in which case return that.
1683 		 */
1684 		ahm = &anonhash_lock[AH_LOCK(vp, offset)];
1685 		mutex_enter(ahm);
1686 		if ((ap = swap_anon(vp, offset)) == NULL) {
1687 			error = SE_NOANON;
1688 			mutex_exit(ahm);
1689 			return (error);
1690 		}
1691 		error = (ap->an_pvp ? 0 : SE_NOSWAP);
1692 		*offp = offset;
1693 		*lenp = PAGESIZE;
1694 		*pvpp = ap->an_pvp;
1695 		*poffp = ap->an_poff;
1696 		mutex_exit(ahm);
1697 		return (error);
1698 	}
1699 
1700 	/*
1701 	 * We got plen (<= *lenp) contig slots. Use these to back a
1702 	 * subrange of [*offp, *offp + *lenp) which includes offset.
1703 	 * For now we just put offset at the end of the kluster.
1704 	 * Clearly there are other possible choices - which is best?
1705 	 */
1706 	start = MAX(*offp,
1707 	    (offset + PAGESIZE > plen) ? (offset + PAGESIZE - plen) : 0);
1708 	ASSERT(start + plen <= *offp + *lenp);
1709 
1710 	for (off = start, poff = pstart; poff < pstart + plen;
1711 	    off += PAGESIZE, poff += PAGESIZE) {
1712 		ahm = &anonhash_lock[AH_LOCK(vp, off)];
1713 		mutex_enter(ahm);
1714 		if ((ap = swap_anon(vp, off)) != NULL) {
1715 			/* Free old slot if any, and assign new one */
1716 			if (ap->an_pvp)
1717 				swap_phys_free(ap->an_pvp, ap->an_poff,
1718 				    PAGESIZE);
1719 			ap->an_pvp = pvp;
1720 			ap->an_poff = poff;
1721 		} else {	/* No anon slot for a klustered page, quit. */
1722 			prem = (pstart + plen) - poff;
1723 			/* Already did requested page, do partial kluster */
1724 			if (off > offset) {
1725 				plen = poff - pstart;
1726 				error = 0;
1727 			/* Fail on requested page, error */
1728 			} else if (off == offset)  {
1729 				error = SE_NOANON;
1730 			/* Fail on prior page, fail on requested page, error */
1731 			} else if ((ap = swap_anon(vp, offset)) == NULL) {
1732 				error = SE_NOANON;
1733 			/* Fail on prior page, got requested page, do only it */
1734 			} else {
1735 				/* Free old slot if any, and assign new one */
1736 				if (ap->an_pvp)
1737 					swap_phys_free(ap->an_pvp, ap->an_poff,
1738 					    PAGESIZE);
1739 				ap->an_pvp = pvp;
1740 				ap->an_poff = poff;
1741 				/* One page kluster */
1742 				start = offset;
1743 				plen = PAGESIZE;
1744 				pstart = poff;
1745 				poff += PAGESIZE;
1746 				prem -= PAGESIZE;
1747 			}
1748 			/* Free unassigned slots */
1749 			swap_phys_free(pvp, poff, prem);
1750 			mutex_exit(ahm);
1751 			break;
1752 		}
1753 		mutex_exit(ahm);
1754 	}
1755 	ASSERT(*offp <= start && start + plen <= *offp + *lenp);
1756 	ASSERT(start <= offset && offset < start + plen);
1757 	*offp = start;
1758 	*lenp = plen;
1759 	*pvpp = pvp;
1760 	*poffp = pstart;
1761 	return (error);
1762 }
1763 
1764 
1765 /*
1766  * Get the physical swap backing store location for a given anonymous page
1767  * named (vp, off). The backing store name is returned in (*pvpp, *poffp).
1768  * Returns	0 		success
1769  *		EIDRM --	no anon slot (page is not allocated)
1770  */
1771 int
1772 swap_getphysname(
1773 	struct vnode *vp,
1774 	u_offset_t off,
1775 	struct vnode **pvpp,
1776 	u_offset_t *poffp)
1777 {
1778 	struct anon *ap;
1779 	int error = 0;
1780 	kmutex_t *ahm;
1781 
1782 	ahm = &anonhash_lock[AH_LOCK(vp, off)];
1783 	mutex_enter(ahm);
1784 
1785 	/* Get anon slot for vp, off */
1786 	ap = swap_anon(vp, off);
1787 	if (ap == NULL) {
1788 		error = EIDRM;
1789 		goto out;
1790 	}
1791 	*pvpp = ap->an_pvp;
1792 	*poffp = ap->an_poff;
1793 out:
1794 	mutex_exit(ahm);
1795 	return (error);
1796 }
1797