xref: /freebsd/sys/i386/i386/vm_machdep.c (revision ac322158f5e51c70f36ae70ef518438706bc8508)
15b81b6b3SRodney W. Grimes /*-
25b81b6b3SRodney W. Grimes  * Copyright (c) 1982, 1986 The Regents of the University of California.
35b81b6b3SRodney W. Grimes  * Copyright (c) 1989, 1990 William Jolitz
45b81b6b3SRodney W. Grimes  * All rights reserved.
55b81b6b3SRodney W. Grimes  *
65b81b6b3SRodney W. Grimes  * This code is derived from software contributed to Berkeley by
75b81b6b3SRodney W. Grimes  * the Systems Programming Group of the University of Utah Computer
85b81b6b3SRodney W. Grimes  * Science Department, and William Jolitz.
95b81b6b3SRodney W. Grimes  *
105b81b6b3SRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
115b81b6b3SRodney W. Grimes  * modification, are permitted provided that the following conditions
125b81b6b3SRodney W. Grimes  * are met:
135b81b6b3SRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
145b81b6b3SRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
155b81b6b3SRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
165b81b6b3SRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
175b81b6b3SRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
185b81b6b3SRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
195b81b6b3SRodney W. Grimes  *    must display the following acknowledgement:
205b81b6b3SRodney W. Grimes  *	This product includes software developed by the University of
215b81b6b3SRodney W. Grimes  *	California, Berkeley and its contributors.
225b81b6b3SRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
235b81b6b3SRodney W. Grimes  *    may be used to endorse or promote products derived from this software
245b81b6b3SRodney W. Grimes  *    without specific prior written permission.
255b81b6b3SRodney W. Grimes  *
265b81b6b3SRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
275b81b6b3SRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
285b81b6b3SRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
295b81b6b3SRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
305b81b6b3SRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
315b81b6b3SRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
325b81b6b3SRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
335b81b6b3SRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
345b81b6b3SRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
355b81b6b3SRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
365b81b6b3SRodney W. Grimes  * SUCH DAMAGE.
375b81b6b3SRodney W. Grimes  *
38960173b9SRodney W. Grimes  *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
395b81b6b3SRodney W. Grimes  *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
40ac322158SDavid Greenman  *	$Id: vm_machdep.c,v 1.16 1994/03/30 02:17:47 davidg Exp $
415b81b6b3SRodney W. Grimes  */
425b81b6b3SRodney W. Grimes 
43960173b9SRodney W. Grimes #include "npx.h"
445b81b6b3SRodney W. Grimes #include "param.h"
455b81b6b3SRodney W. Grimes #include "systm.h"
465b81b6b3SRodney W. Grimes #include "proc.h"
475b81b6b3SRodney W. Grimes #include "malloc.h"
485b81b6b3SRodney W. Grimes #include "buf.h"
495b81b6b3SRodney W. Grimes #include "user.h"
505b81b6b3SRodney W. Grimes 
515b81b6b3SRodney W. Grimes #include "../include/cpu.h"
525b81b6b3SRodney W. Grimes 
535b81b6b3SRodney W. Grimes #include "vm/vm.h"
545b81b6b3SRodney W. Grimes #include "vm/vm_kern.h"
555b81b6b3SRodney W. Grimes 
56d5e26ef0SDavid Greenman #ifndef NOBOUNCE
57d5e26ef0SDavid Greenman 
58d5e26ef0SDavid Greenman caddr_t		bouncememory;
59d5e26ef0SDavid Greenman vm_offset_t	bouncepa, bouncepaend;
60ed7fcbd0SDavid Greenman int		bouncepages, bpwait;
61d5e26ef0SDavid Greenman vm_map_t	bounce_map;
62d5e26ef0SDavid Greenman int		bmwait, bmfreeing;
63d5e26ef0SDavid Greenman 
64ed7fcbd0SDavid Greenman #define BITS_IN_UNSIGNED (8*sizeof(unsigned))
65d5e26ef0SDavid Greenman int		bounceallocarraysize;
66d5e26ef0SDavid Greenman unsigned	*bounceallocarray;
67d5e26ef0SDavid Greenman int		bouncefree;
68d5e26ef0SDavid Greenman 
69d5e26ef0SDavid Greenman #define SIXTEENMEG (4096*4096)
70d5e26ef0SDavid Greenman #define MAXBKVA 512
71d5e26ef0SDavid Greenman 
72d5e26ef0SDavid Greenman /* special list that can be used at interrupt time for eventual kva free */
73d5e26ef0SDavid Greenman struct kvasfree {
74d5e26ef0SDavid Greenman 	vm_offset_t addr;
75d5e26ef0SDavid Greenman 	vm_offset_t size;
76d5e26ef0SDavid Greenman } kvaf[MAXBKVA];
77d5e26ef0SDavid Greenman 
78d5e26ef0SDavid Greenman int		kvasfreecnt;
79d5e26ef0SDavid Greenman 
80d5e26ef0SDavid Greenman /*
81d5e26ef0SDavid Greenman  * get bounce buffer pages (count physically contiguous)
82d5e26ef0SDavid Greenman  * (only 1 inplemented now)
83d5e26ef0SDavid Greenman  */
84d5e26ef0SDavid Greenman vm_offset_t
85d5e26ef0SDavid Greenman vm_bounce_page_find(count)
86d5e26ef0SDavid Greenman 	int count;
87d5e26ef0SDavid Greenman {
88d5e26ef0SDavid Greenman 	int bit;
89d5e26ef0SDavid Greenman 	int s,i;
90d5e26ef0SDavid Greenman 
91d5e26ef0SDavid Greenman 	if (count != 1)
92d5e26ef0SDavid Greenman 		panic("vm_bounce_page_find -- no support for > 1 page yet!!!");
93d5e26ef0SDavid Greenman 
94d5e26ef0SDavid Greenman 	s = splbio();
95d5e26ef0SDavid Greenman retry:
96d5e26ef0SDavid Greenman 	for (i = 0; i < bounceallocarraysize; i++) {
97d5e26ef0SDavid Greenman 		if (bounceallocarray[i] != 0xffffffff) {
98d5e26ef0SDavid Greenman 			if (bit = ffs(~bounceallocarray[i])) {
99d5e26ef0SDavid Greenman 				bounceallocarray[i] |= 1 << (bit - 1) ;
100d5e26ef0SDavid Greenman 				bouncefree -= count;
101d5e26ef0SDavid Greenman 				splx(s);
102ed7fcbd0SDavid Greenman 				return bouncepa + (i * BITS_IN_UNSIGNED + (bit - 1)) * NBPG;
103d5e26ef0SDavid Greenman 			}
104d5e26ef0SDavid Greenman 		}
105d5e26ef0SDavid Greenman 	}
106ed7fcbd0SDavid Greenman 	bpwait = 1;
107d5e26ef0SDavid Greenman 	tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0);
108d5e26ef0SDavid Greenman 	goto retry;
109d5e26ef0SDavid Greenman }
110d5e26ef0SDavid Greenman 
111d5e26ef0SDavid Greenman /*
112d5e26ef0SDavid Greenman  * free count bounce buffer pages
113d5e26ef0SDavid Greenman  */
114d5e26ef0SDavid Greenman void
115d5e26ef0SDavid Greenman vm_bounce_page_free(pa, count)
116d5e26ef0SDavid Greenman 	vm_offset_t pa;
117d5e26ef0SDavid Greenman 	int count;
118d5e26ef0SDavid Greenman {
119d5e26ef0SDavid Greenman 	int allocindex;
120d5e26ef0SDavid Greenman 	int index;
121d5e26ef0SDavid Greenman 	int bit;
122d5e26ef0SDavid Greenman 
123d5e26ef0SDavid Greenman 	if (count != 1)
124d5e26ef0SDavid Greenman 		panic("vm_bounce_page_free -- no support for > 1 page yet!!!\n");
125d5e26ef0SDavid Greenman 
126d5e26ef0SDavid Greenman 	index = (pa - bouncepa) / NBPG;
127d5e26ef0SDavid Greenman 
128d5e26ef0SDavid Greenman 	if ((index < 0) || (index >= bouncepages))
129d5e26ef0SDavid Greenman 		panic("vm_bounce_page_free -- bad index\n");
130d5e26ef0SDavid Greenman 
131ed7fcbd0SDavid Greenman 	allocindex = index / BITS_IN_UNSIGNED;
132ed7fcbd0SDavid Greenman 	bit = index % BITS_IN_UNSIGNED;
133d5e26ef0SDavid Greenman 
134d5e26ef0SDavid Greenman 	bounceallocarray[allocindex] &= ~(1 << bit);
135d5e26ef0SDavid Greenman 
136d5e26ef0SDavid Greenman 	bouncefree += count;
137ed7fcbd0SDavid Greenman 	if (bpwait) {
138ed7fcbd0SDavid Greenman 		bpwait = 0;
139d5e26ef0SDavid Greenman 		wakeup((caddr_t) &bounceallocarray);
140d5e26ef0SDavid Greenman 	}
141ed7fcbd0SDavid Greenman }
142d5e26ef0SDavid Greenman 
143d5e26ef0SDavid Greenman /*
144d5e26ef0SDavid Greenman  * allocate count bounce buffer kva pages
145d5e26ef0SDavid Greenman  */
146d5e26ef0SDavid Greenman vm_offset_t
147d5e26ef0SDavid Greenman vm_bounce_kva(count)
148d5e26ef0SDavid Greenman 	int count;
149d5e26ef0SDavid Greenman {
150d5e26ef0SDavid Greenman 	int tofree;
151d5e26ef0SDavid Greenman 	int i;
152d5e26ef0SDavid Greenman 	int startfree;
1536b4ac811SDavid Greenman 	vm_offset_t kva = 0;
154d5e26ef0SDavid Greenman 	int s = splbio();
1556b4ac811SDavid Greenman 	int size = count*NBPG;
156d5e26ef0SDavid Greenman 	startfree = 0;
157d5e26ef0SDavid Greenman more:
158d5e26ef0SDavid Greenman 	if (!bmfreeing && (tofree = kvasfreecnt)) {
159d5e26ef0SDavid Greenman 		bmfreeing = 1;
160d5e26ef0SDavid Greenman more1:
161d5e26ef0SDavid Greenman 		for (i = startfree; i < kvasfreecnt; i++) {
1626b4ac811SDavid Greenman 			/*
1636b4ac811SDavid Greenman 			 * if we have a kva of the right size, no sense
1646b4ac811SDavid Greenman 			 * in freeing/reallocating...
1656b4ac811SDavid Greenman 			 * might affect fragmentation short term, but
1666b4ac811SDavid Greenman 			 * as long as the amount of bounce_map is
1676b4ac811SDavid Greenman 			 * significantly more than the maximum transfer
1686b4ac811SDavid Greenman 			 * size, I don't think that it is a problem.
1696b4ac811SDavid Greenman 			 */
170d5e26ef0SDavid Greenman 			pmap_remove(kernel_pmap,
171d5e26ef0SDavid Greenman 				kvaf[i].addr, kvaf[i].addr + kvaf[i].size);
1726b4ac811SDavid Greenman 			if( !kva && kvaf[i].size == size) {
1736b4ac811SDavid Greenman 				kva = kvaf[i].addr;
1746b4ac811SDavid Greenman 			} else {
175d5e26ef0SDavid Greenman 				kmem_free_wakeup(bounce_map, kvaf[i].addr,
176d5e26ef0SDavid Greenman 					kvaf[i].size);
177d5e26ef0SDavid Greenman 			}
1786b4ac811SDavid Greenman 		}
179d5e26ef0SDavid Greenman 		if (kvasfreecnt != tofree) {
180d5e26ef0SDavid Greenman 			startfree = i;
181d5e26ef0SDavid Greenman 			bmfreeing = 0;
182d5e26ef0SDavid Greenman 			goto more;
183d5e26ef0SDavid Greenman 		}
184d5e26ef0SDavid Greenman 		kvasfreecnt = 0;
185d5e26ef0SDavid Greenman 		bmfreeing = 0;
186d5e26ef0SDavid Greenman 	}
187d5e26ef0SDavid Greenman 
1886b4ac811SDavid Greenman 	if (!kva && !(kva = kmem_alloc_pageable(bounce_map, size))) {
189d5e26ef0SDavid Greenman 		bmwait = 1;
190d5e26ef0SDavid Greenman 		tsleep((caddr_t) bounce_map, PRIBIO, "bmwait", 0);
191d5e26ef0SDavid Greenman 		goto more;
192d5e26ef0SDavid Greenman 	}
193d5e26ef0SDavid Greenman 	splx(s);
194d5e26ef0SDavid Greenman 
195d5e26ef0SDavid Greenman 	return kva;
196d5e26ef0SDavid Greenman }
197d5e26ef0SDavid Greenman 
198d5e26ef0SDavid Greenman /*
199d5e26ef0SDavid Greenman  * init the bounce buffer system
200d5e26ef0SDavid Greenman  */
201d5e26ef0SDavid Greenman void
202d5e26ef0SDavid Greenman vm_bounce_init()
203d5e26ef0SDavid Greenman {
204d5e26ef0SDavid Greenman 	vm_offset_t minaddr, maxaddr;
205d5e26ef0SDavid Greenman 
206d5e26ef0SDavid Greenman 	if (bouncepages == 0)
207d5e26ef0SDavid Greenman 		return;
208d5e26ef0SDavid Greenman 
209ed7fcbd0SDavid Greenman 	bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED;
210d5e26ef0SDavid Greenman 	bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT);
211d5e26ef0SDavid Greenman 
212d5e26ef0SDavid Greenman 	if (!bounceallocarray)
213d5e26ef0SDavid Greenman 		panic("Cannot allocate bounce resource array\n");
214d5e26ef0SDavid Greenman 
215d5e26ef0SDavid Greenman 	bzero(bounceallocarray, bounceallocarraysize * sizeof(long));
216d5e26ef0SDavid Greenman 
217d5e26ef0SDavid Greenman 	bounce_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, MAXBKVA * NBPG, FALSE);
218d5e26ef0SDavid Greenman 
219ed7fcbd0SDavid Greenman 	bouncepa = pmap_kextract((vm_offset_t) bouncememory);
220d5e26ef0SDavid Greenman 	bouncepaend = bouncepa + bouncepages * NBPG;
221d5e26ef0SDavid Greenman 	bouncefree = bouncepages;
222d5e26ef0SDavid Greenman 	kvasfreecnt = 0;
223d5e26ef0SDavid Greenman }
224d5e26ef0SDavid Greenman 
225d5e26ef0SDavid Greenman /*
226d5e26ef0SDavid Greenman  * do the things necessary to the struct buf to implement
227d5e26ef0SDavid Greenman  * bounce buffers...  inserted before the disk sort
228d5e26ef0SDavid Greenman  */
229d5e26ef0SDavid Greenman void
230d5e26ef0SDavid Greenman vm_bounce_alloc(bp)
231d5e26ef0SDavid Greenman 	struct buf *bp;
232d5e26ef0SDavid Greenman {
233d5e26ef0SDavid Greenman 	int countvmpg;
234d5e26ef0SDavid Greenman 	vm_offset_t vastart, vaend;
235d5e26ef0SDavid Greenman 	vm_offset_t vapstart, vapend;
236d5e26ef0SDavid Greenman 	vm_offset_t va, kva;
237d5e26ef0SDavid Greenman 	vm_offset_t pa;
238d5e26ef0SDavid Greenman 	int dobounceflag = 0;
239d5e26ef0SDavid Greenman 	int bounceindex;
240d5e26ef0SDavid Greenman 	int i;
241d5e26ef0SDavid Greenman 	int s;
242d5e26ef0SDavid Greenman 
243d5e26ef0SDavid Greenman 	if (bouncepages == 0)
244d5e26ef0SDavid Greenman 		return;
245d5e26ef0SDavid Greenman 
246d5e26ef0SDavid Greenman 	vastart = (vm_offset_t) bp->b_un.b_addr;
247ac322158SDavid Greenman 	vaend = (vm_offset_t) bp->b_un.b_addr + bp->b_bufsize;
248d5e26ef0SDavid Greenman 
249d5e26ef0SDavid Greenman 	vapstart = i386_trunc_page(vastart);
250d5e26ef0SDavid Greenman 	vapend = i386_round_page(vaend);
251d5e26ef0SDavid Greenman 	countvmpg = (vapend - vapstart) / NBPG;
252d5e26ef0SDavid Greenman 
253d5e26ef0SDavid Greenman /*
254d5e26ef0SDavid Greenman  * if any page is above 16MB, then go into bounce-buffer mode
255d5e26ef0SDavid Greenman  */
256d5e26ef0SDavid Greenman 	va = vapstart;
257d5e26ef0SDavid Greenman 	for (i = 0; i < countvmpg; i++) {
258ed7fcbd0SDavid Greenman 		pa = pmap_kextract(va);
259d5e26ef0SDavid Greenman 		if (pa >= SIXTEENMEG)
260d5e26ef0SDavid Greenman 			++dobounceflag;
261d5e26ef0SDavid Greenman 		va += NBPG;
262d5e26ef0SDavid Greenman 	}
263d5e26ef0SDavid Greenman 	if (dobounceflag == 0)
264d5e26ef0SDavid Greenman 		return;
265d5e26ef0SDavid Greenman 
266d5e26ef0SDavid Greenman 	if (bouncepages < dobounceflag)
267d5e26ef0SDavid Greenman 		panic("Not enough bounce buffers!!!");
268d5e26ef0SDavid Greenman 
269d5e26ef0SDavid Greenman /*
270d5e26ef0SDavid Greenman  * allocate a replacement kva for b_addr
271d5e26ef0SDavid Greenman  */
272d5e26ef0SDavid Greenman 	kva = vm_bounce_kva(countvmpg);
273d5e26ef0SDavid Greenman 	va = vapstart;
274d5e26ef0SDavid Greenman 	for (i = 0; i < countvmpg; i++) {
275ed7fcbd0SDavid Greenman 		pa = pmap_kextract(va);
276d5e26ef0SDavid Greenman 		if (pa >= SIXTEENMEG) {
277d5e26ef0SDavid Greenman 			/*
278d5e26ef0SDavid Greenman 			 * allocate a replacement page
279d5e26ef0SDavid Greenman 			 */
280d5e26ef0SDavid Greenman 			vm_offset_t bpa = vm_bounce_page_find(1);
2816b4ac811SDavid Greenman 			pmap_kenter(kva + (NBPG * i), bpa);
282d5e26ef0SDavid Greenman 			/*
283d5e26ef0SDavid Greenman 			 * if we are writing, the copy the data into the page
284d5e26ef0SDavid Greenman 			 */
285d5e26ef0SDavid Greenman 			if ((bp->b_flags & B_READ) == 0)
286d5e26ef0SDavid Greenman 				bcopy((caddr_t) va, (caddr_t) kva + (NBPG * i), NBPG);
287d5e26ef0SDavid Greenman 		} else {
288d5e26ef0SDavid Greenman 			/*
289d5e26ef0SDavid Greenman 			 * use original page
290d5e26ef0SDavid Greenman 			 */
2916b4ac811SDavid Greenman 			pmap_kenter(kva + (NBPG * i), pa);
292d5e26ef0SDavid Greenman 		}
293d5e26ef0SDavid Greenman 		va += NBPG;
294d5e26ef0SDavid Greenman 	}
2956b4ac811SDavid Greenman 	pmap_update();
296d5e26ef0SDavid Greenman 
297d5e26ef0SDavid Greenman /*
298d5e26ef0SDavid Greenman  * flag the buffer as being bounced
299d5e26ef0SDavid Greenman  */
300d5e26ef0SDavid Greenman 	bp->b_flags |= B_BOUNCE;
301d5e26ef0SDavid Greenman /*
302d5e26ef0SDavid Greenman  * save the original buffer kva
303d5e26ef0SDavid Greenman  */
304d5e26ef0SDavid Greenman 	bp->b_savekva = bp->b_un.b_addr;
305d5e26ef0SDavid Greenman /*
306d5e26ef0SDavid Greenman  * put our new kva into the buffer (offset by original offset)
307d5e26ef0SDavid Greenman  */
308d5e26ef0SDavid Greenman 	bp->b_un.b_addr = (caddr_t) (((vm_offset_t) kva) |
309d5e26ef0SDavid Greenman 				((vm_offset_t) bp->b_savekva & (NBPG - 1)));
310d5e26ef0SDavid Greenman 	return;
311d5e26ef0SDavid Greenman }
312d5e26ef0SDavid Greenman 
313d5e26ef0SDavid Greenman /*
314d5e26ef0SDavid Greenman  * hook into biodone to free bounce buffer
315d5e26ef0SDavid Greenman  */
316d5e26ef0SDavid Greenman void
317d5e26ef0SDavid Greenman vm_bounce_free(bp)
318d5e26ef0SDavid Greenman 	struct buf *bp;
319d5e26ef0SDavid Greenman {
320d5e26ef0SDavid Greenman 	int i;
321d5e26ef0SDavid Greenman 	vm_offset_t origkva, bouncekva;
322d5e26ef0SDavid Greenman 	vm_offset_t vastart, vaend;
323d5e26ef0SDavid Greenman 	vm_offset_t vapstart, vapend;
324d5e26ef0SDavid Greenman 	int countbounce = 0;
325d5e26ef0SDavid Greenman 	vm_offset_t firstbouncepa = 0;
326d5e26ef0SDavid Greenman 	int firstbounceindex;
327d5e26ef0SDavid Greenman 	int countvmpg;
328d5e26ef0SDavid Greenman 	vm_offset_t bcount;
329d5e26ef0SDavid Greenman 	int s;
330d5e26ef0SDavid Greenman 
331d5e26ef0SDavid Greenman /*
332d5e26ef0SDavid Greenman  * if this isn't a bounced buffer, then just return
333d5e26ef0SDavid Greenman  */
334d5e26ef0SDavid Greenman 	if ((bp->b_flags & B_BOUNCE) == 0)
335d5e26ef0SDavid Greenman 		return;
336d5e26ef0SDavid Greenman 
337d5e26ef0SDavid Greenman 	origkva = (vm_offset_t) bp->b_savekva;
338d5e26ef0SDavid Greenman 	bouncekva = (vm_offset_t) bp->b_un.b_addr;
339d5e26ef0SDavid Greenman 
340d5e26ef0SDavid Greenman 	vastart = bouncekva;
341ac322158SDavid Greenman 	vaend = bouncekva + bp->b_bufsize;
342ac322158SDavid Greenman 	bcount = bp->b_bufsize;
343d5e26ef0SDavid Greenman 
344d5e26ef0SDavid Greenman 	vapstart = i386_trunc_page(vastart);
345d5e26ef0SDavid Greenman 	vapend = i386_round_page(vaend);
346d5e26ef0SDavid Greenman 
347d5e26ef0SDavid Greenman 	countvmpg = (vapend - vapstart) / NBPG;
348d5e26ef0SDavid Greenman 
349d5e26ef0SDavid Greenman /*
350d5e26ef0SDavid Greenman  * check every page in the kva space for b_addr
351d5e26ef0SDavid Greenman  */
352d5e26ef0SDavid Greenman 	for (i = 0; i < countvmpg; i++) {
353d5e26ef0SDavid Greenman 		vm_offset_t mybouncepa;
354d5e26ef0SDavid Greenman 		vm_offset_t copycount;
355d5e26ef0SDavid Greenman 
356d5e26ef0SDavid Greenman 		copycount = i386_round_page(bouncekva + 1) - bouncekva;
357ed7fcbd0SDavid Greenman 		mybouncepa = pmap_kextract(i386_trunc_page(bouncekva));
358d5e26ef0SDavid Greenman 
359d5e26ef0SDavid Greenman /*
360d5e26ef0SDavid Greenman  * if this is a bounced pa, then process as one
361d5e26ef0SDavid Greenman  */
362d5e26ef0SDavid Greenman 		if ((mybouncepa >= bouncepa) && (mybouncepa < bouncepaend)) {
363d5e26ef0SDavid Greenman 			if (copycount > bcount)
364d5e26ef0SDavid Greenman 				copycount = bcount;
365d5e26ef0SDavid Greenman /*
366d5e26ef0SDavid Greenman  * if this is a read, then copy from bounce buffer into original buffer
367d5e26ef0SDavid Greenman  */
368d5e26ef0SDavid Greenman 			if (bp->b_flags & B_READ)
369d5e26ef0SDavid Greenman 				bcopy((caddr_t) bouncekva, (caddr_t) origkva, copycount);
370d5e26ef0SDavid Greenman /*
371d5e26ef0SDavid Greenman  * free the bounce allocation
372d5e26ef0SDavid Greenman  */
373d5e26ef0SDavid Greenman 			vm_bounce_page_free(i386_trunc_page(mybouncepa), 1);
374d5e26ef0SDavid Greenman 		}
375d5e26ef0SDavid Greenman 
376d5e26ef0SDavid Greenman 		origkva += copycount;
377d5e26ef0SDavid Greenman 		bouncekva += copycount;
378d5e26ef0SDavid Greenman 		bcount -= copycount;
379d5e26ef0SDavid Greenman 	}
380d5e26ef0SDavid Greenman 
381d5e26ef0SDavid Greenman /*
382d5e26ef0SDavid Greenman  * add the old kva into the "to free" list
383d5e26ef0SDavid Greenman  */
384d5e26ef0SDavid Greenman 	bouncekva = i386_trunc_page((vm_offset_t) bp->b_un.b_addr);
385d5e26ef0SDavid Greenman 	kvaf[kvasfreecnt].addr = bouncekva;
386d5e26ef0SDavid Greenman 	kvaf[kvasfreecnt++].size = countvmpg * NBPG;
387d5e26ef0SDavid Greenman 	if (bmwait) {
388d5e26ef0SDavid Greenman 		/*
389d5e26ef0SDavid Greenman 		 * if anyone is waiting on the bounce-map, then wakeup
390d5e26ef0SDavid Greenman 		 */
391d5e26ef0SDavid Greenman 		wakeup((caddr_t) bounce_map);
392d5e26ef0SDavid Greenman 		bmwait = 0;
393d5e26ef0SDavid Greenman 	}
394d5e26ef0SDavid Greenman 
395d5e26ef0SDavid Greenman 	bp->b_un.b_addr = bp->b_savekva;
396d5e26ef0SDavid Greenman 	bp->b_savekva = 0;
397d5e26ef0SDavid Greenman 	bp->b_flags &= ~B_BOUNCE;
398d5e26ef0SDavid Greenman 
399d5e26ef0SDavid Greenman 	return;
400d5e26ef0SDavid Greenman }
401d5e26ef0SDavid Greenman 
402d5e26ef0SDavid Greenman #endif /* NOBOUNCE */
403d5e26ef0SDavid Greenman 
4045b81b6b3SRodney W. Grimes /*
4055b81b6b3SRodney W. Grimes  * Finish a fork operation, with process p2 nearly set up.
4065b81b6b3SRodney W. Grimes  * Copy and update the kernel stack and pcb, making the child
4075b81b6b3SRodney W. Grimes  * ready to run, and marking it so that it can return differently
4085b81b6b3SRodney W. Grimes  * than the parent.  Returns 1 in the child process, 0 in the parent.
4095b81b6b3SRodney W. Grimes  * We currently double-map the user area so that the stack is at the same
4105b81b6b3SRodney W. Grimes  * address in each process; in the future we will probably relocate
4115b81b6b3SRodney W. Grimes  * the frame pointers on the stack after copying.
4125b81b6b3SRodney W. Grimes  */
413381fe1aaSGarrett Wollman int
4145b81b6b3SRodney W. Grimes cpu_fork(p1, p2)
4155b81b6b3SRodney W. Grimes 	register struct proc *p1, *p2;
4165b81b6b3SRodney W. Grimes {
4175b81b6b3SRodney W. Grimes 	register struct user *up = p2->p_addr;
4185b81b6b3SRodney W. Grimes 	int foo, offset, addr, i;
4195b81b6b3SRodney W. Grimes 	extern char kstack[];
4205b81b6b3SRodney W. Grimes 	extern int mvesp();
4215b81b6b3SRodney W. Grimes 
4225b81b6b3SRodney W. Grimes 	/*
4235b81b6b3SRodney W. Grimes 	 * Copy pcb and stack from proc p1 to p2.
4245b81b6b3SRodney W. Grimes 	 * We do this as cheaply as possible, copying only the active
4255b81b6b3SRodney W. Grimes 	 * part of the stack.  The stack and pcb need to agree;
4265b81b6b3SRodney W. Grimes 	 * this is tricky, as the final pcb is constructed by savectx,
4275b81b6b3SRodney W. Grimes 	 * but its frame isn't yet on the stack when the stack is copied.
4285b81b6b3SRodney W. Grimes 	 * swtch compensates for this when the child eventually runs.
4295b81b6b3SRodney W. Grimes 	 * This should be done differently, with a single call
4305b81b6b3SRodney W. Grimes 	 * that copies and updates the pcb+stack,
4315b81b6b3SRodney W. Grimes 	 * replacing the bcopy and savectx.
4325b81b6b3SRodney W. Grimes 	 */
4335b81b6b3SRodney W. Grimes 	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
4345b81b6b3SRodney W. Grimes 	offset = mvesp() - (int)kstack;
4355b81b6b3SRodney W. Grimes 	bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
4365b81b6b3SRodney W. Grimes 	    (unsigned) ctob(UPAGES) - offset);
4375b81b6b3SRodney W. Grimes 	p2->p_regs = p1->p_regs;
4385b81b6b3SRodney W. Grimes 
4395b81b6b3SRodney W. Grimes 	/*
4405b81b6b3SRodney W. Grimes 	 * Wire top of address space of child to it's kstack.
4415b81b6b3SRodney W. Grimes 	 * First, fault in a page of pte's to map it.
4425b81b6b3SRodney W. Grimes 	 */
4437f8cb368SDavid Greenman #if 0
4445b81b6b3SRodney W. Grimes         addr = trunc_page((u_int)vtopte(kstack));
4455b81b6b3SRodney W. Grimes 	vm_map_pageable(&p2->p_vmspace->vm_map, addr, addr+NBPG, FALSE);
4465b81b6b3SRodney W. Grimes 	for (i=0; i < UPAGES; i++)
4477f8cb368SDavid Greenman 		pmap_enter(&p2->p_vmspace->vm_pmap, kstack+i*NBPG,
44826931201SDavid Greenman 			   pmap_extract(kernel_pmap, ((int)p2->p_addr)+i*NBPG),
44926931201SDavid Greenman 			   /*
45026931201SDavid Greenman 			    * The user area has to be mapped writable because
45126931201SDavid Greenman 			    * it contains the kernel stack (when CR0_WP is on
45226931201SDavid Greenman 			    * on a 486 there is no user-read/kernel-write
45326931201SDavid Greenman 			    * mode).  It is protected from user mode access
45426931201SDavid Greenman 			    * by the segment limits.
45526931201SDavid Greenman 			    */
45626931201SDavid Greenman 			   VM_PROT_READ|VM_PROT_WRITE, TRUE);
4577f8cb368SDavid Greenman #endif
4585b81b6b3SRodney W. Grimes 	pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb);
4595b81b6b3SRodney W. Grimes 
4605b81b6b3SRodney W. Grimes 	/*
4615b81b6b3SRodney W. Grimes 	 *
4625b81b6b3SRodney W. Grimes 	 * Arrange for a non-local goto when the new process
4635b81b6b3SRodney W. Grimes 	 * is started, to resume here, returning nonzero from setjmp.
4645b81b6b3SRodney W. Grimes 	 */
4655b81b6b3SRodney W. Grimes 	if (savectx(up, 1)) {
4665b81b6b3SRodney W. Grimes 		/*
4675b81b6b3SRodney W. Grimes 		 * Return 1 in child.
4685b81b6b3SRodney W. Grimes 		 */
4695b81b6b3SRodney W. Grimes 		return (1);
4705b81b6b3SRodney W. Grimes 	}
4715b81b6b3SRodney W. Grimes 	return (0);
4725b81b6b3SRodney W. Grimes }
4735b81b6b3SRodney W. Grimes 
4745b81b6b3SRodney W. Grimes #ifdef notyet
4755b81b6b3SRodney W. Grimes /*
4765b81b6b3SRodney W. Grimes  * cpu_exit is called as the last action during exit.
4775b81b6b3SRodney W. Grimes  *
4785b81b6b3SRodney W. Grimes  * We change to an inactive address space and a "safe" stack,
4795b81b6b3SRodney W. Grimes  * passing thru an argument to the new stack. Now, safely isolated
4805b81b6b3SRodney W. Grimes  * from the resources we're shedding, we release the address space
4815b81b6b3SRodney W. Grimes  * and any remaining machine-dependent resources, including the
4825b81b6b3SRodney W. Grimes  * memory for the user structure and kernel stack.
4835b81b6b3SRodney W. Grimes  *
4845b81b6b3SRodney W. Grimes  * Next, we assign a dummy context to be written over by swtch,
4855b81b6b3SRodney W. Grimes  * calling it to send this process off to oblivion.
4865b81b6b3SRodney W. Grimes  * [The nullpcb allows us to minimize cost in swtch() by not having
4875b81b6b3SRodney W. Grimes  * a special case].
4885b81b6b3SRodney W. Grimes  */
4895b81b6b3SRodney W. Grimes struct proc *swtch_to_inactive();
49075124a8bSPaul Richards volatile void
4915b81b6b3SRodney W. Grimes cpu_exit(p)
4925b81b6b3SRodney W. Grimes 	register struct proc *p;
4935b81b6b3SRodney W. Grimes {
4945b81b6b3SRodney W. Grimes 	static struct pcb nullpcb;	/* pcb to overwrite on last swtch */
4955b81b6b3SRodney W. Grimes 
496960173b9SRodney W. Grimes #if NNPX > 0
4975b81b6b3SRodney W. Grimes 	npxexit(p);
498960173b9SRodney W. Grimes #endif	/* NNPX */
4995b81b6b3SRodney W. Grimes 
5005b81b6b3SRodney W. Grimes 	/* move to inactive space and stack, passing arg accross */
5015b81b6b3SRodney W. Grimes 	p = swtch_to_inactive(p);
5025b81b6b3SRodney W. Grimes 
5035b81b6b3SRodney W. Grimes 	/* drop per-process resources */
5045b81b6b3SRodney W. Grimes 	vmspace_free(p->p_vmspace);
5055b81b6b3SRodney W. Grimes 	kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
5065b81b6b3SRodney W. Grimes 
5075b81b6b3SRodney W. Grimes 	p->p_addr = (struct user *) &nullpcb;
5085b81b6b3SRodney W. Grimes 	splclock();
5095b81b6b3SRodney W. Grimes 	swtch();
5105b81b6b3SRodney W. Grimes 	/* NOTREACHED */
5115b81b6b3SRodney W. Grimes }
5125b81b6b3SRodney W. Grimes #else
5137c2b54e8SNate Williams void
5145b81b6b3SRodney W. Grimes cpu_exit(p)
5155b81b6b3SRodney W. Grimes 	register struct proc *p;
5165b81b6b3SRodney W. Grimes {
5175b81b6b3SRodney W. Grimes 
518960173b9SRodney W. Grimes #if NNPX > 0
5195b81b6b3SRodney W. Grimes 	npxexit(p);
520960173b9SRodney W. Grimes #endif	/* NNPX */
5215b81b6b3SRodney W. Grimes 	splclock();
5227f8cb368SDavid Greenman 	curproc = 0;
5235b81b6b3SRodney W. Grimes 	swtch();
5247c2b54e8SNate Williams 	/*
5257c2b54e8SNate Williams 	 * This is to shutup the compiler, and if swtch() failed I suppose
5267c2b54e8SNate Williams 	 * this would be a good thing.  This keeps gcc happy because panic
5277c2b54e8SNate Williams 	 * is a volatile void function as well.
5287c2b54e8SNate Williams 	 */
5297c2b54e8SNate Williams 	panic("cpu_exit");
5305b81b6b3SRodney W. Grimes }
5315b81b6b3SRodney W. Grimes 
532381fe1aaSGarrett Wollman void
5337f8cb368SDavid Greenman cpu_wait(p) struct proc *p; {
5347f8cb368SDavid Greenman /*	extern vm_map_t upages_map; */
5357f8cb368SDavid Greenman 	extern char kstack[];
5365b81b6b3SRodney W. Grimes 
5375b81b6b3SRodney W. Grimes 	/* drop per-process resources */
5387f8cb368SDavid Greenman  	pmap_remove(vm_map_pmap(kernel_map), (vm_offset_t) p->p_addr,
5397f8cb368SDavid Greenman 		((vm_offset_t) p->p_addr) + ctob(UPAGES));
5405b81b6b3SRodney W. Grimes 	kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
5417f8cb368SDavid Greenman 	vmspace_free(p->p_vmspace);
5425b81b6b3SRodney W. Grimes }
5435b81b6b3SRodney W. Grimes #endif
5445b81b6b3SRodney W. Grimes 
5455b81b6b3SRodney W. Grimes /*
5465b81b6b3SRodney W. Grimes  * Set a red zone in the kernel stack after the u. area.
5475b81b6b3SRodney W. Grimes  */
548381fe1aaSGarrett Wollman void
5495b81b6b3SRodney W. Grimes setredzone(pte, vaddr)
5505b81b6b3SRodney W. Grimes 	u_short *pte;
5515b81b6b3SRodney W. Grimes 	caddr_t vaddr;
5525b81b6b3SRodney W. Grimes {
5535b81b6b3SRodney W. Grimes /* eventually do this by setting up an expand-down stack segment
5545b81b6b3SRodney W. Grimes    for ss0: selector, allowing stack access down to top of u.
5555b81b6b3SRodney W. Grimes    this means though that protection violations need to be handled
5565b81b6b3SRodney W. Grimes    thru a double fault exception that must do an integral task
5575b81b6b3SRodney W. Grimes    switch to a known good context, within which a dump can be
5585b81b6b3SRodney W. Grimes    taken. a sensible scheme might be to save the initial context
5595b81b6b3SRodney W. Grimes    used by sched (that has physical memory mapped 1:1 at bottom)
5605b81b6b3SRodney W. Grimes    and take the dump while still in mapped mode */
5615b81b6b3SRodney W. Grimes }
5625b81b6b3SRodney W. Grimes 
5635b81b6b3SRodney W. Grimes /*
5645b81b6b3SRodney W. Grimes  * Convert kernel VA to physical address
5655b81b6b3SRodney W. Grimes  */
566aaf08d94SGarrett Wollman u_long
5677f8cb368SDavid Greenman kvtop(void *addr)
5685b81b6b3SRodney W. Grimes {
5695b81b6b3SRodney W. Grimes 	vm_offset_t va;
5705b81b6b3SRodney W. Grimes 
571ed7fcbd0SDavid Greenman 	va = pmap_kextract((vm_offset_t)addr);
5725b81b6b3SRodney W. Grimes 	if (va == 0)
5735b81b6b3SRodney W. Grimes 		panic("kvtop: zero page frame");
5747f8cb368SDavid Greenman 	return((int)va);
5755b81b6b3SRodney W. Grimes }
5765b81b6b3SRodney W. Grimes 
5775b81b6b3SRodney W. Grimes extern vm_map_t phys_map;
5785b81b6b3SRodney W. Grimes 
5795b81b6b3SRodney W. Grimes /*
580ac322158SDavid Greenman  * Map an IO request into kernel virtual address space.
5815b81b6b3SRodney W. Grimes  *
582ac322158SDavid Greenman  * All requests are (re)mapped into kernel VA space.
583ac322158SDavid Greenman  * Notice that we use b_bufsize for the size of the buffer
584ac322158SDavid Greenman  * to be mapped.  b_bcount might be modified by the driver.
5855b81b6b3SRodney W. Grimes  */
586381fe1aaSGarrett Wollman void
5875b81b6b3SRodney W. Grimes vmapbuf(bp)
5885b81b6b3SRodney W. Grimes 	register struct buf *bp;
5895b81b6b3SRodney W. Grimes {
5905b81b6b3SRodney W. Grimes 	register int npf;
5915b81b6b3SRodney W. Grimes 	register caddr_t addr;
5925b81b6b3SRodney W. Grimes 	register long flags = bp->b_flags;
5935b81b6b3SRodney W. Grimes 	struct proc *p;
5945b81b6b3SRodney W. Grimes 	int off;
5955b81b6b3SRodney W. Grimes 	vm_offset_t kva;
5965b81b6b3SRodney W. Grimes 	register vm_offset_t pa;
5975b81b6b3SRodney W. Grimes 
5985b81b6b3SRodney W. Grimes 	if ((flags & B_PHYS) == 0)
5995b81b6b3SRodney W. Grimes 		panic("vmapbuf");
6005b81b6b3SRodney W. Grimes 	addr = bp->b_saveaddr = bp->b_un.b_addr;
6015b81b6b3SRodney W. Grimes 	off = (int)addr & PGOFSET;
6025b81b6b3SRodney W. Grimes 	p = bp->b_proc;
603ac322158SDavid Greenman 	npf = btoc(round_page(bp->b_bufsize + off));
6045b81b6b3SRodney W. Grimes 	kva = kmem_alloc_wait(phys_map, ctob(npf));
6055b81b6b3SRodney W. Grimes 	bp->b_un.b_addr = (caddr_t) (kva + off);
6065b81b6b3SRodney W. Grimes 	while (npf--) {
6075b81b6b3SRodney W. Grimes 		pa = pmap_extract(&p->p_vmspace->vm_pmap, (vm_offset_t)addr);
6085b81b6b3SRodney W. Grimes 		if (pa == 0)
6095b81b6b3SRodney W. Grimes 			panic("vmapbuf: null page frame");
6106b4ac811SDavid Greenman 		pmap_kenter(kva, trunc_page(pa));
6115b81b6b3SRodney W. Grimes 		addr += PAGE_SIZE;
6125b81b6b3SRodney W. Grimes 		kva += PAGE_SIZE;
6135b81b6b3SRodney W. Grimes 	}
6146b4ac811SDavid Greenman 	pmap_update();
6155b81b6b3SRodney W. Grimes }
6165b81b6b3SRodney W. Grimes 
6175b81b6b3SRodney W. Grimes /*
6185b81b6b3SRodney W. Grimes  * Free the io map PTEs associated with this IO operation.
6195b81b6b3SRodney W. Grimes  * We also invalidate the TLB entries and restore the original b_addr.
6205b81b6b3SRodney W. Grimes  */
621381fe1aaSGarrett Wollman void
6225b81b6b3SRodney W. Grimes vunmapbuf(bp)
6235b81b6b3SRodney W. Grimes 	register struct buf *bp;
6245b81b6b3SRodney W. Grimes {
6255b81b6b3SRodney W. Grimes 	register int npf;
6265b81b6b3SRodney W. Grimes 	register caddr_t addr = bp->b_un.b_addr;
6275b81b6b3SRodney W. Grimes 	vm_offset_t kva;
6285b81b6b3SRodney W. Grimes 
6295b81b6b3SRodney W. Grimes 	if ((bp->b_flags & B_PHYS) == 0)
6305b81b6b3SRodney W. Grimes 		panic("vunmapbuf");
631ac322158SDavid Greenman 	npf = btoc(round_page(bp->b_bufsize + ((int)addr & PGOFSET)));
6325b81b6b3SRodney W. Grimes 	kva = (vm_offset_t)((int)addr & ~PGOFSET);
6335b81b6b3SRodney W. Grimes 	kmem_free_wakeup(phys_map, kva, ctob(npf));
6345b81b6b3SRodney W. Grimes 	bp->b_un.b_addr = bp->b_saveaddr;
6355b81b6b3SRodney W. Grimes 	bp->b_saveaddr = NULL;
6365b81b6b3SRodney W. Grimes }
6375b81b6b3SRodney W. Grimes 
6385b81b6b3SRodney W. Grimes /*
6395b81b6b3SRodney W. Grimes  * Force reset the processor by invalidating the entire address space!
6405b81b6b3SRodney W. Grimes  */
6417f8cb368SDavid Greenman void
6425b81b6b3SRodney W. Grimes cpu_reset() {
6435b81b6b3SRodney W. Grimes 
6445b81b6b3SRodney W. Grimes 	/* force a shutdown by unmapping entire address space ! */
6455b81b6b3SRodney W. Grimes 	bzero((caddr_t) PTD, NBPG);
6465b81b6b3SRodney W. Grimes 
6475b81b6b3SRodney W. Grimes 	/* "good night, sweet prince .... <THUNK!>" */
6485b81b6b3SRodney W. Grimes 	tlbflush();
6495b81b6b3SRodney W. Grimes 	/* NOTREACHED */
6507f8cb368SDavid Greenman 	while(1);
6515b81b6b3SRodney W. Grimes }
652b9d60b3fSDavid Greenman 
653b9d60b3fSDavid Greenman /*
654b9d60b3fSDavid Greenman  * Grow the user stack to allow for 'sp'. This version grows the stack in
65529360eb0SDavid Greenman  *	chunks of SGROWSIZ.
656b9d60b3fSDavid Greenman  */
657b9d60b3fSDavid Greenman int
658b9d60b3fSDavid Greenman grow(p, sp)
659b9d60b3fSDavid Greenman 	struct proc *p;
660b9d60b3fSDavid Greenman 	int sp;
661b9d60b3fSDavid Greenman {
662b9d60b3fSDavid Greenman 	unsigned int nss;
663b9d60b3fSDavid Greenman 	caddr_t v;
664b9d60b3fSDavid Greenman 	struct vmspace *vm = p->p_vmspace;
665b9d60b3fSDavid Greenman 
666b9d60b3fSDavid Greenman 	if ((caddr_t)sp <= vm->vm_maxsaddr || (unsigned)sp >= (unsigned)USRSTACK)
667b9d60b3fSDavid Greenman 	    return (1);
668b9d60b3fSDavid Greenman 
669b9d60b3fSDavid Greenman 	nss = roundup(USRSTACK - (unsigned)sp, PAGE_SIZE);
670b9d60b3fSDavid Greenman 
671b9d60b3fSDavid Greenman 	if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur)
672b9d60b3fSDavid Greenman 		return (0);
673b9d60b3fSDavid Greenman 
674b9d60b3fSDavid Greenman 	if (vm->vm_ssize && roundup(vm->vm_ssize << PAGE_SHIFT,
67529360eb0SDavid Greenman 	    SGROWSIZ) < nss) {
676b9d60b3fSDavid Greenman 		int grow_amount;
677b9d60b3fSDavid Greenman 		/*
678b9d60b3fSDavid Greenman 		 * If necessary, grow the VM that the stack occupies
679b9d60b3fSDavid Greenman 		 * to allow for the rlimit. This allows us to not have
680b9d60b3fSDavid Greenman 		 * to allocate all of the VM up-front in execve (which
681b9d60b3fSDavid Greenman 		 * is expensive).
682b9d60b3fSDavid Greenman 		 * Grow the VM by the amount requested rounded up to
68329360eb0SDavid Greenman 		 * the nearest SGROWSIZ to provide for some hysteresis.
684b9d60b3fSDavid Greenman 		 */
68529360eb0SDavid Greenman 		grow_amount = roundup((nss - (vm->vm_ssize << PAGE_SHIFT)), SGROWSIZ);
686b9d60b3fSDavid Greenman 		v = (char *)USRSTACK - roundup(vm->vm_ssize << PAGE_SHIFT,
68729360eb0SDavid Greenman 		    SGROWSIZ) - grow_amount;
688b9d60b3fSDavid Greenman 		/*
68929360eb0SDavid Greenman 		 * If there isn't enough room to extend by SGROWSIZ, then
690b9d60b3fSDavid Greenman 		 * just extend to the maximum size
691b9d60b3fSDavid Greenman 		 */
692b9d60b3fSDavid Greenman 		if (v < vm->vm_maxsaddr) {
693b9d60b3fSDavid Greenman 			v = vm->vm_maxsaddr;
694b9d60b3fSDavid Greenman 			grow_amount = MAXSSIZ - (vm->vm_ssize << PAGE_SHIFT);
695b9d60b3fSDavid Greenman 		}
696b9d60b3fSDavid Greenman 		if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v,
697b9d60b3fSDavid Greenman 		    grow_amount, FALSE) != KERN_SUCCESS) {
698b9d60b3fSDavid Greenman 			return (0);
699b9d60b3fSDavid Greenman 		}
700b9d60b3fSDavid Greenman 		vm->vm_ssize += grow_amount >> PAGE_SHIFT;
701b9d60b3fSDavid Greenman 	}
702b9d60b3fSDavid Greenman 
703b9d60b3fSDavid Greenman 	return (1);
704b9d60b3fSDavid Greenman }
705