xref: /freebsd/sys/i386/i386/vm_machdep.c (revision ed7fcbd079ef3617e4ddb1ed51a6a75a7cea57c8)
15b81b6b3SRodney W. Grimes /*-
25b81b6b3SRodney W. Grimes  * Copyright (c) 1982, 1986 The Regents of the University of California.
35b81b6b3SRodney W. Grimes  * Copyright (c) 1989, 1990 William Jolitz
45b81b6b3SRodney W. Grimes  * All rights reserved.
55b81b6b3SRodney W. Grimes  *
65b81b6b3SRodney W. Grimes  * This code is derived from software contributed to Berkeley by
75b81b6b3SRodney W. Grimes  * the Systems Programming Group of the University of Utah Computer
85b81b6b3SRodney W. Grimes  * Science Department, and William Jolitz.
95b81b6b3SRodney W. Grimes  *
105b81b6b3SRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
115b81b6b3SRodney W. Grimes  * modification, are permitted provided that the following conditions
125b81b6b3SRodney W. Grimes  * are met:
135b81b6b3SRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
145b81b6b3SRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
155b81b6b3SRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
165b81b6b3SRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
175b81b6b3SRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
185b81b6b3SRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
195b81b6b3SRodney W. Grimes  *    must display the following acknowledgement:
205b81b6b3SRodney W. Grimes  *	This product includes software developed by the University of
215b81b6b3SRodney W. Grimes  *	California, Berkeley and its contributors.
225b81b6b3SRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
235b81b6b3SRodney W. Grimes  *    may be used to endorse or promote products derived from this software
245b81b6b3SRodney W. Grimes  *    without specific prior written permission.
255b81b6b3SRodney W. Grimes  *
265b81b6b3SRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
275b81b6b3SRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
285b81b6b3SRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
295b81b6b3SRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
305b81b6b3SRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
315b81b6b3SRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
325b81b6b3SRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
335b81b6b3SRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
345b81b6b3SRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
355b81b6b3SRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
365b81b6b3SRodney W. Grimes  * SUCH DAMAGE.
375b81b6b3SRodney W. Grimes  *
38960173b9SRodney W. Grimes  *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
395b81b6b3SRodney W. Grimes  *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
40ed7fcbd0SDavid Greenman  *	$Id: vm_machdep.c,v 1.14 1994/03/23 09:15:06 davidg Exp $
415b81b6b3SRodney W. Grimes  */
425b81b6b3SRodney W. Grimes 
43960173b9SRodney W. Grimes #include "npx.h"
445b81b6b3SRodney W. Grimes #include "param.h"
455b81b6b3SRodney W. Grimes #include "systm.h"
465b81b6b3SRodney W. Grimes #include "proc.h"
475b81b6b3SRodney W. Grimes #include "malloc.h"
485b81b6b3SRodney W. Grimes #include "buf.h"
495b81b6b3SRodney W. Grimes #include "user.h"
505b81b6b3SRodney W. Grimes 
515b81b6b3SRodney W. Grimes #include "../include/cpu.h"
525b81b6b3SRodney W. Grimes 
535b81b6b3SRodney W. Grimes #include "vm/vm.h"
545b81b6b3SRodney W. Grimes #include "vm/vm_kern.h"
555b81b6b3SRodney W. Grimes 
56d5e26ef0SDavid Greenman #ifndef NOBOUNCE
57d5e26ef0SDavid Greenman 
58d5e26ef0SDavid Greenman caddr_t		bouncememory;
59d5e26ef0SDavid Greenman vm_offset_t	bouncepa, bouncepaend;
60ed7fcbd0SDavid Greenman int		bouncepages, bpwait;
61d5e26ef0SDavid Greenman vm_map_t	bounce_map;
62d5e26ef0SDavid Greenman int		bmwait, bmfreeing;
63d5e26ef0SDavid Greenman 
64ed7fcbd0SDavid Greenman #define BITS_IN_UNSIGNED (8*sizeof(unsigned))
65d5e26ef0SDavid Greenman int		bounceallocarraysize;
66d5e26ef0SDavid Greenman unsigned	*bounceallocarray;
67d5e26ef0SDavid Greenman int		bouncefree;
68d5e26ef0SDavid Greenman 
69d5e26ef0SDavid Greenman #define SIXTEENMEG (4096*4096)
70d5e26ef0SDavid Greenman #define MAXBKVA 512
71d5e26ef0SDavid Greenman 
72d5e26ef0SDavid Greenman /* special list that can be used at interrupt time for eventual kva free */
73d5e26ef0SDavid Greenman struct kvasfree {
74d5e26ef0SDavid Greenman 	vm_offset_t addr;
75d5e26ef0SDavid Greenman 	vm_offset_t size;
76d5e26ef0SDavid Greenman } kvaf[MAXBKVA];
77d5e26ef0SDavid Greenman 
78d5e26ef0SDavid Greenman int		kvasfreecnt;
79d5e26ef0SDavid Greenman 
80d5e26ef0SDavid Greenman /*
81d5e26ef0SDavid Greenman  * get bounce buffer pages (count physically contiguous)
82d5e26ef0SDavid Greenman  * (only 1 inplemented now)
83d5e26ef0SDavid Greenman  */
84d5e26ef0SDavid Greenman vm_offset_t
85d5e26ef0SDavid Greenman vm_bounce_page_find(count)
86d5e26ef0SDavid Greenman 	int count;
87d5e26ef0SDavid Greenman {
88d5e26ef0SDavid Greenman 	int bit;
89d5e26ef0SDavid Greenman 	int s,i;
90d5e26ef0SDavid Greenman 
91d5e26ef0SDavid Greenman 	if (count != 1)
92d5e26ef0SDavid Greenman 		panic("vm_bounce_page_find -- no support for > 1 page yet!!!");
93d5e26ef0SDavid Greenman 
94d5e26ef0SDavid Greenman 	s = splbio();
95d5e26ef0SDavid Greenman retry:
96d5e26ef0SDavid Greenman 	for (i = 0; i < bounceallocarraysize; i++) {
97d5e26ef0SDavid Greenman 		if (bounceallocarray[i] != 0xffffffff) {
98d5e26ef0SDavid Greenman 			if (bit = ffs(~bounceallocarray[i])) {
99d5e26ef0SDavid Greenman 				bounceallocarray[i] |= 1 << (bit - 1) ;
100d5e26ef0SDavid Greenman 				bouncefree -= count;
101d5e26ef0SDavid Greenman 				splx(s);
102ed7fcbd0SDavid Greenman 				return bouncepa + (i * BITS_IN_UNSIGNED + (bit - 1)) * NBPG;
103d5e26ef0SDavid Greenman 			}
104d5e26ef0SDavid Greenman 		}
105d5e26ef0SDavid Greenman 	}
106ed7fcbd0SDavid Greenman 	bpwait = 1;
107d5e26ef0SDavid Greenman 	tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0);
108d5e26ef0SDavid Greenman 	goto retry;
109d5e26ef0SDavid Greenman }
110d5e26ef0SDavid Greenman 
111d5e26ef0SDavid Greenman /*
112d5e26ef0SDavid Greenman  * free count bounce buffer pages
113d5e26ef0SDavid Greenman  */
114d5e26ef0SDavid Greenman void
115d5e26ef0SDavid Greenman vm_bounce_page_free(pa, count)
116d5e26ef0SDavid Greenman 	vm_offset_t pa;
117d5e26ef0SDavid Greenman 	int count;
118d5e26ef0SDavid Greenman {
119d5e26ef0SDavid Greenman 	int allocindex;
120d5e26ef0SDavid Greenman 	int index;
121d5e26ef0SDavid Greenman 	int bit;
122d5e26ef0SDavid Greenman 
123d5e26ef0SDavid Greenman 	if (count != 1)
124d5e26ef0SDavid Greenman 		panic("vm_bounce_page_free -- no support for > 1 page yet!!!\n");
125d5e26ef0SDavid Greenman 
126d5e26ef0SDavid Greenman 	index = (pa - bouncepa) / NBPG;
127d5e26ef0SDavid Greenman 
128d5e26ef0SDavid Greenman 	if ((index < 0) || (index >= bouncepages))
129d5e26ef0SDavid Greenman 		panic("vm_bounce_page_free -- bad index\n");
130d5e26ef0SDavid Greenman 
131ed7fcbd0SDavid Greenman 	allocindex = index / BITS_IN_UNSIGNED;
132ed7fcbd0SDavid Greenman 	bit = index % BITS_IN_UNSIGNED;
133d5e26ef0SDavid Greenman 
134d5e26ef0SDavid Greenman 	bounceallocarray[allocindex] &= ~(1 << bit);
135d5e26ef0SDavid Greenman 
136d5e26ef0SDavid Greenman 	bouncefree += count;
137ed7fcbd0SDavid Greenman 	if (bpwait) {
138ed7fcbd0SDavid Greenman 		bpwait = 0;
139d5e26ef0SDavid Greenman 		wakeup((caddr_t) &bounceallocarray);
140d5e26ef0SDavid Greenman 	}
141ed7fcbd0SDavid Greenman }
142d5e26ef0SDavid Greenman 
143d5e26ef0SDavid Greenman /*
144d5e26ef0SDavid Greenman  * allocate count bounce buffer kva pages
145d5e26ef0SDavid Greenman  */
146d5e26ef0SDavid Greenman vm_offset_t
147d5e26ef0SDavid Greenman vm_bounce_kva(count)
148d5e26ef0SDavid Greenman 	int count;
149d5e26ef0SDavid Greenman {
150d5e26ef0SDavid Greenman 	int tofree;
151d5e26ef0SDavid Greenman 	int i;
152d5e26ef0SDavid Greenman 	int startfree;
153d5e26ef0SDavid Greenman 	vm_offset_t kva;
154d5e26ef0SDavid Greenman 	int s = splbio();
155d5e26ef0SDavid Greenman 	startfree = 0;
156d5e26ef0SDavid Greenman more:
157d5e26ef0SDavid Greenman 	if (!bmfreeing && (tofree = kvasfreecnt)) {
158d5e26ef0SDavid Greenman 		bmfreeing = 1;
159d5e26ef0SDavid Greenman more1:
160d5e26ef0SDavid Greenman 		for (i = startfree; i < kvasfreecnt; i++) {
161d5e26ef0SDavid Greenman 			pmap_remove(kernel_pmap,
162d5e26ef0SDavid Greenman 				kvaf[i].addr, kvaf[i].addr + kvaf[i].size);
163d5e26ef0SDavid Greenman 			kmem_free_wakeup(bounce_map, kvaf[i].addr,
164d5e26ef0SDavid Greenman 				kvaf[i].size);
165d5e26ef0SDavid Greenman 		}
166d5e26ef0SDavid Greenman 		if (kvasfreecnt != tofree) {
167d5e26ef0SDavid Greenman 			startfree = i;
168d5e26ef0SDavid Greenman 			bmfreeing = 0;
169d5e26ef0SDavid Greenman 			goto more;
170d5e26ef0SDavid Greenman 		}
171d5e26ef0SDavid Greenman 		kvasfreecnt = 0;
172d5e26ef0SDavid Greenman 		bmfreeing = 0;
173d5e26ef0SDavid Greenman 	}
174d5e26ef0SDavid Greenman 
175d5e26ef0SDavid Greenman 	if (!(kva = kmem_alloc_pageable(bounce_map, count * NBPG))) {
176d5e26ef0SDavid Greenman 		bmwait = 1;
177d5e26ef0SDavid Greenman 		tsleep((caddr_t) bounce_map, PRIBIO, "bmwait", 0);
178d5e26ef0SDavid Greenman 		goto more;
179d5e26ef0SDavid Greenman 	}
180d5e26ef0SDavid Greenman 
181d5e26ef0SDavid Greenman 	splx(s);
182d5e26ef0SDavid Greenman 
183d5e26ef0SDavid Greenman 	return kva;
184d5e26ef0SDavid Greenman }
185d5e26ef0SDavid Greenman 
186d5e26ef0SDavid Greenman /*
187d5e26ef0SDavid Greenman  * init the bounce buffer system
188d5e26ef0SDavid Greenman  */
189d5e26ef0SDavid Greenman void
190d5e26ef0SDavid Greenman vm_bounce_init()
191d5e26ef0SDavid Greenman {
192d5e26ef0SDavid Greenman 	vm_offset_t minaddr, maxaddr;
193d5e26ef0SDavid Greenman 
194d5e26ef0SDavid Greenman 	if (bouncepages == 0)
195d5e26ef0SDavid Greenman 		return;
196d5e26ef0SDavid Greenman 
197ed7fcbd0SDavid Greenman 	bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED;
198d5e26ef0SDavid Greenman 	bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT);
199d5e26ef0SDavid Greenman 
200d5e26ef0SDavid Greenman 	if (!bounceallocarray)
201d5e26ef0SDavid Greenman 		panic("Cannot allocate bounce resource array\n");
202d5e26ef0SDavid Greenman 
203d5e26ef0SDavid Greenman 	bzero(bounceallocarray, bounceallocarraysize * sizeof(long));
204d5e26ef0SDavid Greenman 
205d5e26ef0SDavid Greenman 	bounce_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, MAXBKVA * NBPG, FALSE);
206d5e26ef0SDavid Greenman 
207ed7fcbd0SDavid Greenman 	bouncepa = pmap_kextract((vm_offset_t) bouncememory);
208d5e26ef0SDavid Greenman 	bouncepaend = bouncepa + bouncepages * NBPG;
209d5e26ef0SDavid Greenman 	bouncefree = bouncepages;
210d5e26ef0SDavid Greenman 	kvasfreecnt = 0;
211d5e26ef0SDavid Greenman }
212d5e26ef0SDavid Greenman 
213d5e26ef0SDavid Greenman /*
214d5e26ef0SDavid Greenman  * do the things necessary to the struct buf to implement
215d5e26ef0SDavid Greenman  * bounce buffers...  inserted before the disk sort
216d5e26ef0SDavid Greenman  */
217d5e26ef0SDavid Greenman void
218d5e26ef0SDavid Greenman vm_bounce_alloc(bp)
219d5e26ef0SDavid Greenman 	struct buf *bp;
220d5e26ef0SDavid Greenman {
221d5e26ef0SDavid Greenman 	int countvmpg;
222d5e26ef0SDavid Greenman 	vm_offset_t vastart, vaend;
223d5e26ef0SDavid Greenman 	vm_offset_t vapstart, vapend;
224d5e26ef0SDavid Greenman 	vm_offset_t va, kva;
225d5e26ef0SDavid Greenman 	vm_offset_t pa;
226d5e26ef0SDavid Greenman 	int dobounceflag = 0;
227d5e26ef0SDavid Greenman 	int bounceindex;
228d5e26ef0SDavid Greenman 	int i;
229d5e26ef0SDavid Greenman 	int s;
230d5e26ef0SDavid Greenman 
231d5e26ef0SDavid Greenman 	if (bouncepages == 0)
232d5e26ef0SDavid Greenman 		return;
233d5e26ef0SDavid Greenman 
234d5e26ef0SDavid Greenman 	vastart = (vm_offset_t) bp->b_un.b_addr;
235d5e26ef0SDavid Greenman 	vaend = (vm_offset_t) bp->b_un.b_addr + bp->b_bcount;
236d5e26ef0SDavid Greenman 
237d5e26ef0SDavid Greenman 	vapstart = i386_trunc_page(vastart);
238d5e26ef0SDavid Greenman 	vapend = i386_round_page(vaend);
239d5e26ef0SDavid Greenman 	countvmpg = (vapend - vapstart) / NBPG;
240d5e26ef0SDavid Greenman 
241d5e26ef0SDavid Greenman /*
242d5e26ef0SDavid Greenman  * if any page is above 16MB, then go into bounce-buffer mode
243d5e26ef0SDavid Greenman  */
244d5e26ef0SDavid Greenman 	va = vapstart;
245d5e26ef0SDavid Greenman 	for (i = 0; i < countvmpg; i++) {
246ed7fcbd0SDavid Greenman 		pa = pmap_kextract(va);
247d5e26ef0SDavid Greenman 		if (pa >= SIXTEENMEG)
248d5e26ef0SDavid Greenman 			++dobounceflag;
249d5e26ef0SDavid Greenman 		va += NBPG;
250d5e26ef0SDavid Greenman 	}
251d5e26ef0SDavid Greenman 	if (dobounceflag == 0)
252d5e26ef0SDavid Greenman 		return;
253d5e26ef0SDavid Greenman 
254d5e26ef0SDavid Greenman 	if (bouncepages < dobounceflag)
255d5e26ef0SDavid Greenman 		panic("Not enough bounce buffers!!!");
256d5e26ef0SDavid Greenman 
257d5e26ef0SDavid Greenman /*
258d5e26ef0SDavid Greenman  * allocate a replacement kva for b_addr
259d5e26ef0SDavid Greenman  */
260d5e26ef0SDavid Greenman 	kva = vm_bounce_kva(countvmpg);
261d5e26ef0SDavid Greenman 	va = vapstart;
262d5e26ef0SDavid Greenman 	for (i = 0; i < countvmpg; i++) {
263ed7fcbd0SDavid Greenman 		pa = pmap_kextract(va);
264d5e26ef0SDavid Greenman 		if (pa >= SIXTEENMEG) {
265d5e26ef0SDavid Greenman 			/*
266d5e26ef0SDavid Greenman 			 * allocate a replacement page
267d5e26ef0SDavid Greenman 			 */
268d5e26ef0SDavid Greenman 			vm_offset_t bpa = vm_bounce_page_find(1);
269d5e26ef0SDavid Greenman 			pmap_enter(kernel_pmap, kva + (NBPG * i), bpa, VM_PROT_DEFAULT,
270d5e26ef0SDavid Greenman 				TRUE);
271d5e26ef0SDavid Greenman 			/*
272d5e26ef0SDavid Greenman 			 * if we are writing, the copy the data into the page
273d5e26ef0SDavid Greenman 			 */
274d5e26ef0SDavid Greenman 			if ((bp->b_flags & B_READ) == 0)
275d5e26ef0SDavid Greenman 				bcopy((caddr_t) va, (caddr_t) kva + (NBPG * i), NBPG);
276d5e26ef0SDavid Greenman 		} else {
277d5e26ef0SDavid Greenman 			/*
278d5e26ef0SDavid Greenman 			 * use original page
279d5e26ef0SDavid Greenman 			 */
280d5e26ef0SDavid Greenman 			pmap_enter(kernel_pmap, kva + (NBPG * i), pa, VM_PROT_DEFAULT,
281d5e26ef0SDavid Greenman 				TRUE);
282d5e26ef0SDavid Greenman 		}
283d5e26ef0SDavid Greenman 		va += NBPG;
284d5e26ef0SDavid Greenman 	}
285d5e26ef0SDavid Greenman 
286d5e26ef0SDavid Greenman /*
287d5e26ef0SDavid Greenman  * flag the buffer as being bounced
288d5e26ef0SDavid Greenman  */
289d5e26ef0SDavid Greenman 	bp->b_flags |= B_BOUNCE;
290d5e26ef0SDavid Greenman /*
291d5e26ef0SDavid Greenman  * save the original buffer kva
292d5e26ef0SDavid Greenman  */
293d5e26ef0SDavid Greenman 	bp->b_savekva = bp->b_un.b_addr;
294d5e26ef0SDavid Greenman /*
295d5e26ef0SDavid Greenman  * put our new kva into the buffer (offset by original offset)
296d5e26ef0SDavid Greenman  */
297d5e26ef0SDavid Greenman 	bp->b_un.b_addr = (caddr_t) (((vm_offset_t) kva) |
298d5e26ef0SDavid Greenman 				((vm_offset_t) bp->b_savekva & (NBPG - 1)));
299d5e26ef0SDavid Greenman 	return;
300d5e26ef0SDavid Greenman }
301d5e26ef0SDavid Greenman 
302d5e26ef0SDavid Greenman /*
303d5e26ef0SDavid Greenman  * hook into biodone to free bounce buffer
304d5e26ef0SDavid Greenman  */
305d5e26ef0SDavid Greenman void
306d5e26ef0SDavid Greenman vm_bounce_free(bp)
307d5e26ef0SDavid Greenman 	struct buf *bp;
308d5e26ef0SDavid Greenman {
309d5e26ef0SDavid Greenman 	int i;
310d5e26ef0SDavid Greenman 	vm_offset_t origkva, bouncekva;
311d5e26ef0SDavid Greenman 	vm_offset_t vastart, vaend;
312d5e26ef0SDavid Greenman 	vm_offset_t vapstart, vapend;
313d5e26ef0SDavid Greenman 	int countbounce = 0;
314d5e26ef0SDavid Greenman 	vm_offset_t firstbouncepa = 0;
315d5e26ef0SDavid Greenman 	int firstbounceindex;
316d5e26ef0SDavid Greenman 	int countvmpg;
317d5e26ef0SDavid Greenman 	vm_offset_t bcount;
318d5e26ef0SDavid Greenman 	int s;
319d5e26ef0SDavid Greenman 
320d5e26ef0SDavid Greenman /*
321d5e26ef0SDavid Greenman  * if this isn't a bounced buffer, then just return
322d5e26ef0SDavid Greenman  */
323d5e26ef0SDavid Greenman 	if ((bp->b_flags & B_BOUNCE) == 0)
324d5e26ef0SDavid Greenman 		return;
325d5e26ef0SDavid Greenman 
326d5e26ef0SDavid Greenman 	origkva = (vm_offset_t) bp->b_savekva;
327d5e26ef0SDavid Greenman 	bouncekva = (vm_offset_t) bp->b_un.b_addr;
328d5e26ef0SDavid Greenman 
329d5e26ef0SDavid Greenman 	vastart = bouncekva;
330d5e26ef0SDavid Greenman 	vaend = bouncekva + bp->b_bcount;
331d5e26ef0SDavid Greenman 	bcount = bp->b_bcount;
332d5e26ef0SDavid Greenman 
333d5e26ef0SDavid Greenman 	vapstart = i386_trunc_page(vastart);
334d5e26ef0SDavid Greenman 	vapend = i386_round_page(vaend);
335d5e26ef0SDavid Greenman 
336d5e26ef0SDavid Greenman 	countvmpg = (vapend - vapstart) / NBPG;
337d5e26ef0SDavid Greenman 
338d5e26ef0SDavid Greenman /*
339d5e26ef0SDavid Greenman  * check every page in the kva space for b_addr
340d5e26ef0SDavid Greenman  */
341d5e26ef0SDavid Greenman 	for (i = 0; i < countvmpg; i++) {
342d5e26ef0SDavid Greenman 		vm_offset_t mybouncepa;
343d5e26ef0SDavid Greenman 		vm_offset_t copycount;
344d5e26ef0SDavid Greenman 
345d5e26ef0SDavid Greenman 		copycount = i386_round_page(bouncekva + 1) - bouncekva;
346ed7fcbd0SDavid Greenman 		mybouncepa = pmap_kextract(i386_trunc_page(bouncekva));
347d5e26ef0SDavid Greenman 
348d5e26ef0SDavid Greenman /*
349d5e26ef0SDavid Greenman  * if this is a bounced pa, then process as one
350d5e26ef0SDavid Greenman  */
351d5e26ef0SDavid Greenman 		if ((mybouncepa >= bouncepa) && (mybouncepa < bouncepaend)) {
352d5e26ef0SDavid Greenman 			if (copycount > bcount)
353d5e26ef0SDavid Greenman 				copycount = bcount;
354d5e26ef0SDavid Greenman /*
355d5e26ef0SDavid Greenman  * if this is a read, then copy from bounce buffer into original buffer
356d5e26ef0SDavid Greenman  */
357d5e26ef0SDavid Greenman 			if (bp->b_flags & B_READ)
358d5e26ef0SDavid Greenman 				bcopy((caddr_t) bouncekva, (caddr_t) origkva, copycount);
359d5e26ef0SDavid Greenman /*
360d5e26ef0SDavid Greenman  * free the bounce allocation
361d5e26ef0SDavid Greenman  */
362d5e26ef0SDavid Greenman 			vm_bounce_page_free(i386_trunc_page(mybouncepa), 1);
363d5e26ef0SDavid Greenman 		}
364d5e26ef0SDavid Greenman 
365d5e26ef0SDavid Greenman 		origkva += copycount;
366d5e26ef0SDavid Greenman 		bouncekva += copycount;
367d5e26ef0SDavid Greenman 		bcount -= copycount;
368d5e26ef0SDavid Greenman 	}
369d5e26ef0SDavid Greenman 
370d5e26ef0SDavid Greenman /*
371d5e26ef0SDavid Greenman  * add the old kva into the "to free" list
372d5e26ef0SDavid Greenman  */
373d5e26ef0SDavid Greenman 	bouncekva = i386_trunc_page((vm_offset_t) bp->b_un.b_addr);
374d5e26ef0SDavid Greenman 	kvaf[kvasfreecnt].addr = bouncekva;
375d5e26ef0SDavid Greenman 	kvaf[kvasfreecnt++].size = countvmpg * NBPG;
376d5e26ef0SDavid Greenman 	if (bmwait) {
377d5e26ef0SDavid Greenman 		/*
378d5e26ef0SDavid Greenman 		 * if anyone is waiting on the bounce-map, then wakeup
379d5e26ef0SDavid Greenman 		 */
380d5e26ef0SDavid Greenman 		wakeup((caddr_t) bounce_map);
381d5e26ef0SDavid Greenman 		bmwait = 0;
382d5e26ef0SDavid Greenman 	}
383d5e26ef0SDavid Greenman 
384d5e26ef0SDavid Greenman 	bp->b_un.b_addr = bp->b_savekva;
385d5e26ef0SDavid Greenman 	bp->b_savekva = 0;
386d5e26ef0SDavid Greenman 	bp->b_flags &= ~B_BOUNCE;
387d5e26ef0SDavid Greenman 
388d5e26ef0SDavid Greenman 	return;
389d5e26ef0SDavid Greenman }
390d5e26ef0SDavid Greenman 
391d5e26ef0SDavid Greenman #endif /* NOBOUNCE */
392d5e26ef0SDavid Greenman 
3935b81b6b3SRodney W. Grimes /*
3945b81b6b3SRodney W. Grimes  * Finish a fork operation, with process p2 nearly set up.
3955b81b6b3SRodney W. Grimes  * Copy and update the kernel stack and pcb, making the child
3965b81b6b3SRodney W. Grimes  * ready to run, and marking it so that it can return differently
3975b81b6b3SRodney W. Grimes  * than the parent.  Returns 1 in the child process, 0 in the parent.
3985b81b6b3SRodney W. Grimes  * We currently double-map the user area so that the stack is at the same
3995b81b6b3SRodney W. Grimes  * address in each process; in the future we will probably relocate
4005b81b6b3SRodney W. Grimes  * the frame pointers on the stack after copying.
4015b81b6b3SRodney W. Grimes  */
402381fe1aaSGarrett Wollman int
4035b81b6b3SRodney W. Grimes cpu_fork(p1, p2)
4045b81b6b3SRodney W. Grimes 	register struct proc *p1, *p2;
4055b81b6b3SRodney W. Grimes {
4065b81b6b3SRodney W. Grimes 	register struct user *up = p2->p_addr;
4075b81b6b3SRodney W. Grimes 	int foo, offset, addr, i;
4085b81b6b3SRodney W. Grimes 	extern char kstack[];
4095b81b6b3SRodney W. Grimes 	extern int mvesp();
4105b81b6b3SRodney W. Grimes 
4115b81b6b3SRodney W. Grimes 	/*
4125b81b6b3SRodney W. Grimes 	 * Copy pcb and stack from proc p1 to p2.
4135b81b6b3SRodney W. Grimes 	 * We do this as cheaply as possible, copying only the active
4145b81b6b3SRodney W. Grimes 	 * part of the stack.  The stack and pcb need to agree;
4155b81b6b3SRodney W. Grimes 	 * this is tricky, as the final pcb is constructed by savectx,
4165b81b6b3SRodney W. Grimes 	 * but its frame isn't yet on the stack when the stack is copied.
4175b81b6b3SRodney W. Grimes 	 * swtch compensates for this when the child eventually runs.
4185b81b6b3SRodney W. Grimes 	 * This should be done differently, with a single call
4195b81b6b3SRodney W. Grimes 	 * that copies and updates the pcb+stack,
4205b81b6b3SRodney W. Grimes 	 * replacing the bcopy and savectx.
4215b81b6b3SRodney W. Grimes 	 */
4225b81b6b3SRodney W. Grimes 	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
4235b81b6b3SRodney W. Grimes 	offset = mvesp() - (int)kstack;
4245b81b6b3SRodney W. Grimes 	bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
4255b81b6b3SRodney W. Grimes 	    (unsigned) ctob(UPAGES) - offset);
4265b81b6b3SRodney W. Grimes 	p2->p_regs = p1->p_regs;
4275b81b6b3SRodney W. Grimes 
4285b81b6b3SRodney W. Grimes 	/*
4295b81b6b3SRodney W. Grimes 	 * Wire top of address space of child to it's kstack.
4305b81b6b3SRodney W. Grimes 	 * First, fault in a page of pte's to map it.
4315b81b6b3SRodney W. Grimes 	 */
4327f8cb368SDavid Greenman #if 0
4335b81b6b3SRodney W. Grimes         addr = trunc_page((u_int)vtopte(kstack));
4345b81b6b3SRodney W. Grimes 	vm_map_pageable(&p2->p_vmspace->vm_map, addr, addr+NBPG, FALSE);
4355b81b6b3SRodney W. Grimes 	for (i=0; i < UPAGES; i++)
4367f8cb368SDavid Greenman 		pmap_enter(&p2->p_vmspace->vm_pmap, kstack+i*NBPG,
43726931201SDavid Greenman 			   pmap_extract(kernel_pmap, ((int)p2->p_addr)+i*NBPG),
43826931201SDavid Greenman 			   /*
43926931201SDavid Greenman 			    * The user area has to be mapped writable because
44026931201SDavid Greenman 			    * it contains the kernel stack (when CR0_WP is on
44126931201SDavid Greenman 			    * on a 486 there is no user-read/kernel-write
44226931201SDavid Greenman 			    * mode).  It is protected from user mode access
44326931201SDavid Greenman 			    * by the segment limits.
44426931201SDavid Greenman 			    */
44526931201SDavid Greenman 			   VM_PROT_READ|VM_PROT_WRITE, TRUE);
4467f8cb368SDavid Greenman #endif
4475b81b6b3SRodney W. Grimes 	pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb);
4485b81b6b3SRodney W. Grimes 
4495b81b6b3SRodney W. Grimes 	/*
4505b81b6b3SRodney W. Grimes 	 *
4515b81b6b3SRodney W. Grimes 	 * Arrange for a non-local goto when the new process
4525b81b6b3SRodney W. Grimes 	 * is started, to resume here, returning nonzero from setjmp.
4535b81b6b3SRodney W. Grimes 	 */
4545b81b6b3SRodney W. Grimes 	if (savectx(up, 1)) {
4555b81b6b3SRodney W. Grimes 		/*
4565b81b6b3SRodney W. Grimes 		 * Return 1 in child.
4575b81b6b3SRodney W. Grimes 		 */
4585b81b6b3SRodney W. Grimes 		return (1);
4595b81b6b3SRodney W. Grimes 	}
4605b81b6b3SRodney W. Grimes 	return (0);
4615b81b6b3SRodney W. Grimes }
4625b81b6b3SRodney W. Grimes 
4635b81b6b3SRodney W. Grimes #ifdef notyet
4645b81b6b3SRodney W. Grimes /*
4655b81b6b3SRodney W. Grimes  * cpu_exit is called as the last action during exit.
4665b81b6b3SRodney W. Grimes  *
4675b81b6b3SRodney W. Grimes  * We change to an inactive address space and a "safe" stack,
4685b81b6b3SRodney W. Grimes  * passing thru an argument to the new stack. Now, safely isolated
4695b81b6b3SRodney W. Grimes  * from the resources we're shedding, we release the address space
4705b81b6b3SRodney W. Grimes  * and any remaining machine-dependent resources, including the
4715b81b6b3SRodney W. Grimes  * memory for the user structure and kernel stack.
4725b81b6b3SRodney W. Grimes  *
4735b81b6b3SRodney W. Grimes  * Next, we assign a dummy context to be written over by swtch,
4745b81b6b3SRodney W. Grimes  * calling it to send this process off to oblivion.
4755b81b6b3SRodney W. Grimes  * [The nullpcb allows us to minimize cost in swtch() by not having
4765b81b6b3SRodney W. Grimes  * a special case].
4775b81b6b3SRodney W. Grimes  */
4785b81b6b3SRodney W. Grimes struct proc *swtch_to_inactive();
47975124a8bSPaul Richards volatile void
4805b81b6b3SRodney W. Grimes cpu_exit(p)
4815b81b6b3SRodney W. Grimes 	register struct proc *p;
4825b81b6b3SRodney W. Grimes {
4835b81b6b3SRodney W. Grimes 	static struct pcb nullpcb;	/* pcb to overwrite on last swtch */
4845b81b6b3SRodney W. Grimes 
485960173b9SRodney W. Grimes #if NNPX > 0
4865b81b6b3SRodney W. Grimes 	npxexit(p);
487960173b9SRodney W. Grimes #endif	/* NNPX */
4885b81b6b3SRodney W. Grimes 
4895b81b6b3SRodney W. Grimes 	/* move to inactive space and stack, passing arg accross */
4905b81b6b3SRodney W. Grimes 	p = swtch_to_inactive(p);
4915b81b6b3SRodney W. Grimes 
4925b81b6b3SRodney W. Grimes 	/* drop per-process resources */
4935b81b6b3SRodney W. Grimes 	vmspace_free(p->p_vmspace);
4945b81b6b3SRodney W. Grimes 	kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
4955b81b6b3SRodney W. Grimes 
4965b81b6b3SRodney W. Grimes 	p->p_addr = (struct user *) &nullpcb;
4975b81b6b3SRodney W. Grimes 	splclock();
4985b81b6b3SRodney W. Grimes 	swtch();
4995b81b6b3SRodney W. Grimes 	/* NOTREACHED */
5005b81b6b3SRodney W. Grimes }
5015b81b6b3SRodney W. Grimes #else
5027c2b54e8SNate Williams void
5035b81b6b3SRodney W. Grimes cpu_exit(p)
5045b81b6b3SRodney W. Grimes 	register struct proc *p;
5055b81b6b3SRodney W. Grimes {
5065b81b6b3SRodney W. Grimes 
507960173b9SRodney W. Grimes #if NNPX > 0
5085b81b6b3SRodney W. Grimes 	npxexit(p);
509960173b9SRodney W. Grimes #endif	/* NNPX */
5105b81b6b3SRodney W. Grimes 	splclock();
5117f8cb368SDavid Greenman 	curproc = 0;
5125b81b6b3SRodney W. Grimes 	swtch();
5137c2b54e8SNate Williams 	/*
5147c2b54e8SNate Williams 	 * This is to shutup the compiler, and if swtch() failed I suppose
5157c2b54e8SNate Williams 	 * this would be a good thing.  This keeps gcc happy because panic
5167c2b54e8SNate Williams 	 * is a volatile void function as well.
5177c2b54e8SNate Williams 	 */
5187c2b54e8SNate Williams 	panic("cpu_exit");
5195b81b6b3SRodney W. Grimes }
5205b81b6b3SRodney W. Grimes 
521381fe1aaSGarrett Wollman void
5227f8cb368SDavid Greenman cpu_wait(p) struct proc *p; {
5237f8cb368SDavid Greenman /*	extern vm_map_t upages_map; */
5247f8cb368SDavid Greenman 	extern char kstack[];
5255b81b6b3SRodney W. Grimes 
5265b81b6b3SRodney W. Grimes 	/* drop per-process resources */
5277f8cb368SDavid Greenman  	pmap_remove(vm_map_pmap(kernel_map), (vm_offset_t) p->p_addr,
5287f8cb368SDavid Greenman 		((vm_offset_t) p->p_addr) + ctob(UPAGES));
5295b81b6b3SRodney W. Grimes 	kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
5307f8cb368SDavid Greenman 	vmspace_free(p->p_vmspace);
5315b81b6b3SRodney W. Grimes }
5325b81b6b3SRodney W. Grimes #endif
5335b81b6b3SRodney W. Grimes 
5345b81b6b3SRodney W. Grimes /*
5355b81b6b3SRodney W. Grimes  * Set a red zone in the kernel stack after the u. area.
5365b81b6b3SRodney W. Grimes  */
537381fe1aaSGarrett Wollman void
5385b81b6b3SRodney W. Grimes setredzone(pte, vaddr)
5395b81b6b3SRodney W. Grimes 	u_short *pte;
5405b81b6b3SRodney W. Grimes 	caddr_t vaddr;
5415b81b6b3SRodney W. Grimes {
5425b81b6b3SRodney W. Grimes /* eventually do this by setting up an expand-down stack segment
5435b81b6b3SRodney W. Grimes    for ss0: selector, allowing stack access down to top of u.
5445b81b6b3SRodney W. Grimes    this means though that protection violations need to be handled
5455b81b6b3SRodney W. Grimes    thru a double fault exception that must do an integral task
5465b81b6b3SRodney W. Grimes    switch to a known good context, within which a dump can be
5475b81b6b3SRodney W. Grimes    taken. a sensible scheme might be to save the initial context
5485b81b6b3SRodney W. Grimes    used by sched (that has physical memory mapped 1:1 at bottom)
5495b81b6b3SRodney W. Grimes    and take the dump while still in mapped mode */
5505b81b6b3SRodney W. Grimes }
5515b81b6b3SRodney W. Grimes 
5525b81b6b3SRodney W. Grimes /*
5535b81b6b3SRodney W. Grimes  * Convert kernel VA to physical address
5545b81b6b3SRodney W. Grimes  */
555aaf08d94SGarrett Wollman u_long
5567f8cb368SDavid Greenman kvtop(void *addr)
5575b81b6b3SRodney W. Grimes {
5585b81b6b3SRodney W. Grimes 	vm_offset_t va;
5595b81b6b3SRodney W. Grimes 
560ed7fcbd0SDavid Greenman 	va = pmap_kextract((vm_offset_t)addr);
5615b81b6b3SRodney W. Grimes 	if (va == 0)
5625b81b6b3SRodney W. Grimes 		panic("kvtop: zero page frame");
5637f8cb368SDavid Greenman 	return((int)va);
5645b81b6b3SRodney W. Grimes }
5655b81b6b3SRodney W. Grimes 
5665b81b6b3SRodney W. Grimes extern vm_map_t phys_map;
5675b81b6b3SRodney W. Grimes 
5685b81b6b3SRodney W. Grimes /*
5695b81b6b3SRodney W. Grimes  * Map an IO request into kernel virtual address space.  Requests fall into
5705b81b6b3SRodney W. Grimes  * one of five catagories:
5715b81b6b3SRodney W. Grimes  *
5725b81b6b3SRodney W. Grimes  *	B_PHYS|B_UAREA:	User u-area swap.
5735b81b6b3SRodney W. Grimes  *			Address is relative to start of u-area (p_addr).
5745b81b6b3SRodney W. Grimes  *	B_PHYS|B_PAGET:	User page table swap.
5755b81b6b3SRodney W. Grimes  *			Address is a kernel VA in usrpt (Usrptmap).
5765b81b6b3SRodney W. Grimes  *	B_PHYS|B_DIRTY:	Dirty page push.
5775b81b6b3SRodney W. Grimes  *			Address is a VA in proc2's address space.
5785b81b6b3SRodney W. Grimes  *	B_PHYS|B_PGIN:	Kernel pagein of user pages.
5795b81b6b3SRodney W. Grimes  *			Address is VA in user's address space.
5805b81b6b3SRodney W. Grimes  *	B_PHYS:		User "raw" IO request.
5815b81b6b3SRodney W. Grimes  *			Address is VA in user's address space.
5825b81b6b3SRodney W. Grimes  *
5835b81b6b3SRodney W. Grimes  * All requests are (re)mapped into kernel VA space via the useriomap
5845b81b6b3SRodney W. Grimes  * (a name with only slightly more meaning than "kernelmap")
5855b81b6b3SRodney W. Grimes  */
586381fe1aaSGarrett Wollman void
5875b81b6b3SRodney W. Grimes vmapbuf(bp)
5885b81b6b3SRodney W. Grimes 	register struct buf *bp;
5895b81b6b3SRodney W. Grimes {
5905b81b6b3SRodney W. Grimes 	register int npf;
5915b81b6b3SRodney W. Grimes 	register caddr_t addr;
5925b81b6b3SRodney W. Grimes 	register long flags = bp->b_flags;
5935b81b6b3SRodney W. Grimes 	struct proc *p;
5945b81b6b3SRodney W. Grimes 	int off;
5955b81b6b3SRodney W. Grimes 	vm_offset_t kva;
5965b81b6b3SRodney W. Grimes 	register vm_offset_t pa;
5975b81b6b3SRodney W. Grimes 
5985b81b6b3SRodney W. Grimes 	if ((flags & B_PHYS) == 0)
5995b81b6b3SRodney W. Grimes 		panic("vmapbuf");
6005b81b6b3SRodney W. Grimes 	addr = bp->b_saveaddr = bp->b_un.b_addr;
6015b81b6b3SRodney W. Grimes 	off = (int)addr & PGOFSET;
6025b81b6b3SRodney W. Grimes 	p = bp->b_proc;
6035b81b6b3SRodney W. Grimes 	npf = btoc(round_page(bp->b_bcount + off));
6045b81b6b3SRodney W. Grimes 	kva = kmem_alloc_wait(phys_map, ctob(npf));
6055b81b6b3SRodney W. Grimes 	bp->b_un.b_addr = (caddr_t) (kva + off);
6065b81b6b3SRodney W. Grimes 	while (npf--) {
6075b81b6b3SRodney W. Grimes 		pa = pmap_extract(&p->p_vmspace->vm_pmap, (vm_offset_t)addr);
6085b81b6b3SRodney W. Grimes 		if (pa == 0)
6095b81b6b3SRodney W. Grimes 			panic("vmapbuf: null page frame");
6105b81b6b3SRodney W. Grimes 		pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa),
6115b81b6b3SRodney W. Grimes 			   VM_PROT_READ|VM_PROT_WRITE, TRUE);
6125b81b6b3SRodney W. Grimes 		addr += PAGE_SIZE;
6135b81b6b3SRodney W. Grimes 		kva += PAGE_SIZE;
6145b81b6b3SRodney W. Grimes 	}
6155b81b6b3SRodney W. Grimes }
6165b81b6b3SRodney W. Grimes 
6175b81b6b3SRodney W. Grimes /*
6185b81b6b3SRodney W. Grimes  * Free the io map PTEs associated with this IO operation.
6195b81b6b3SRodney W. Grimes  * We also invalidate the TLB entries and restore the original b_addr.
6205b81b6b3SRodney W. Grimes  */
621381fe1aaSGarrett Wollman void
6225b81b6b3SRodney W. Grimes vunmapbuf(bp)
6235b81b6b3SRodney W. Grimes 	register struct buf *bp;
6245b81b6b3SRodney W. Grimes {
6255b81b6b3SRodney W. Grimes 	register int npf;
6265b81b6b3SRodney W. Grimes 	register caddr_t addr = bp->b_un.b_addr;
6275b81b6b3SRodney W. Grimes 	vm_offset_t kva;
6285b81b6b3SRodney W. Grimes 
6295b81b6b3SRodney W. Grimes 	if ((bp->b_flags & B_PHYS) == 0)
6305b81b6b3SRodney W. Grimes 		panic("vunmapbuf");
6315b81b6b3SRodney W. Grimes 	npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET)));
6325b81b6b3SRodney W. Grimes 	kva = (vm_offset_t)((int)addr & ~PGOFSET);
6335b81b6b3SRodney W. Grimes 	kmem_free_wakeup(phys_map, kva, ctob(npf));
6345b81b6b3SRodney W. Grimes 	bp->b_un.b_addr = bp->b_saveaddr;
6355b81b6b3SRodney W. Grimes 	bp->b_saveaddr = NULL;
6365b81b6b3SRodney W. Grimes }
6375b81b6b3SRodney W. Grimes 
6385b81b6b3SRodney W. Grimes /*
6395b81b6b3SRodney W. Grimes  * Force reset the processor by invalidating the entire address space!
6405b81b6b3SRodney W. Grimes  */
6417f8cb368SDavid Greenman void
6425b81b6b3SRodney W. Grimes cpu_reset() {
6435b81b6b3SRodney W. Grimes 
6445b81b6b3SRodney W. Grimes 	/* force a shutdown by unmapping entire address space ! */
6455b81b6b3SRodney W. Grimes 	bzero((caddr_t) PTD, NBPG);
6465b81b6b3SRodney W. Grimes 
6475b81b6b3SRodney W. Grimes 	/* "good night, sweet prince .... <THUNK!>" */
6485b81b6b3SRodney W. Grimes 	tlbflush();
6495b81b6b3SRodney W. Grimes 	/* NOTREACHED */
6507f8cb368SDavid Greenman 	while(1);
6515b81b6b3SRodney W. Grimes }
652b9d60b3fSDavid Greenman 
653b9d60b3fSDavid Greenman /*
654b9d60b3fSDavid Greenman  * Grow the user stack to allow for 'sp'. This version grows the stack in
65529360eb0SDavid Greenman  *	chunks of SGROWSIZ.
656b9d60b3fSDavid Greenman  */
657b9d60b3fSDavid Greenman int
658b9d60b3fSDavid Greenman grow(p, sp)
659b9d60b3fSDavid Greenman 	struct proc *p;
660b9d60b3fSDavid Greenman 	int sp;
661b9d60b3fSDavid Greenman {
662b9d60b3fSDavid Greenman 	unsigned int nss;
663b9d60b3fSDavid Greenman 	caddr_t v;
664b9d60b3fSDavid Greenman 	struct vmspace *vm = p->p_vmspace;
665b9d60b3fSDavid Greenman 
666b9d60b3fSDavid Greenman 	if ((caddr_t)sp <= vm->vm_maxsaddr || (unsigned)sp >= (unsigned)USRSTACK)
667b9d60b3fSDavid Greenman 	    return (1);
668b9d60b3fSDavid Greenman 
669b9d60b3fSDavid Greenman 	nss = roundup(USRSTACK - (unsigned)sp, PAGE_SIZE);
670b9d60b3fSDavid Greenman 
671b9d60b3fSDavid Greenman 	if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur)
672b9d60b3fSDavid Greenman 		return (0);
673b9d60b3fSDavid Greenman 
674b9d60b3fSDavid Greenman 	if (vm->vm_ssize && roundup(vm->vm_ssize << PAGE_SHIFT,
67529360eb0SDavid Greenman 	    SGROWSIZ) < nss) {
676b9d60b3fSDavid Greenman 		int grow_amount;
677b9d60b3fSDavid Greenman 		/*
678b9d60b3fSDavid Greenman 		 * If necessary, grow the VM that the stack occupies
679b9d60b3fSDavid Greenman 		 * to allow for the rlimit. This allows us to not have
680b9d60b3fSDavid Greenman 		 * to allocate all of the VM up-front in execve (which
681b9d60b3fSDavid Greenman 		 * is expensive).
682b9d60b3fSDavid Greenman 		 * Grow the VM by the amount requested rounded up to
68329360eb0SDavid Greenman 		 * the nearest SGROWSIZ to provide for some hysteresis.
684b9d60b3fSDavid Greenman 		 */
68529360eb0SDavid Greenman 		grow_amount = roundup((nss - (vm->vm_ssize << PAGE_SHIFT)), SGROWSIZ);
686b9d60b3fSDavid Greenman 		v = (char *)USRSTACK - roundup(vm->vm_ssize << PAGE_SHIFT,
68729360eb0SDavid Greenman 		    SGROWSIZ) - grow_amount;
688b9d60b3fSDavid Greenman 		/*
68929360eb0SDavid Greenman 		 * If there isn't enough room to extend by SGROWSIZ, then
690b9d60b3fSDavid Greenman 		 * just extend to the maximum size
691b9d60b3fSDavid Greenman 		 */
692b9d60b3fSDavid Greenman 		if (v < vm->vm_maxsaddr) {
693b9d60b3fSDavid Greenman 			v = vm->vm_maxsaddr;
694b9d60b3fSDavid Greenman 			grow_amount = MAXSSIZ - (vm->vm_ssize << PAGE_SHIFT);
695b9d60b3fSDavid Greenman 		}
696b9d60b3fSDavid Greenman 		if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v,
697b9d60b3fSDavid Greenman 		    grow_amount, FALSE) != KERN_SUCCESS) {
698b9d60b3fSDavid Greenman 			return (0);
699b9d60b3fSDavid Greenman 		}
700b9d60b3fSDavid Greenman 		vm->vm_ssize += grow_amount >> PAGE_SHIFT;
701b9d60b3fSDavid Greenman 	}
702b9d60b3fSDavid Greenman 
703b9d60b3fSDavid Greenman 	return (1);
704b9d60b3fSDavid Greenman }
705