xref: /freebsd/sys/i386/i386/vm_machdep.c (revision f690bbace7e699c2e0bacc483107aef384b647d0)
15b81b6b3SRodney W. Grimes /*-
25b81b6b3SRodney W. Grimes  * Copyright (c) 1982, 1986 The Regents of the University of California.
35b81b6b3SRodney W. Grimes  * Copyright (c) 1989, 1990 William Jolitz
41561d038SDavid Greenman  * Copyright (c) 1994 John Dyson
55b81b6b3SRodney W. Grimes  * All rights reserved.
65b81b6b3SRodney W. Grimes  *
75b81b6b3SRodney W. Grimes  * This code is derived from software contributed to Berkeley by
85b81b6b3SRodney W. Grimes  * the Systems Programming Group of the University of Utah Computer
95b81b6b3SRodney W. Grimes  * Science Department, and William Jolitz.
105b81b6b3SRodney W. Grimes  *
115b81b6b3SRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
125b81b6b3SRodney W. Grimes  * modification, are permitted provided that the following conditions
135b81b6b3SRodney W. Grimes  * are met:
145b81b6b3SRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
155b81b6b3SRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
165b81b6b3SRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
175b81b6b3SRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
185b81b6b3SRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
195b81b6b3SRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
205b81b6b3SRodney W. Grimes  *    must display the following acknowledgement:
215b81b6b3SRodney W. Grimes  *	This product includes software developed by the University of
225b81b6b3SRodney W. Grimes  *	California, Berkeley and its contributors.
235b81b6b3SRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
245b81b6b3SRodney W. Grimes  *    may be used to endorse or promote products derived from this software
255b81b6b3SRodney W. Grimes  *    without specific prior written permission.
265b81b6b3SRodney W. Grimes  *
275b81b6b3SRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
285b81b6b3SRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
295b81b6b3SRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
305b81b6b3SRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
315b81b6b3SRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
325b81b6b3SRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
335b81b6b3SRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
345b81b6b3SRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
355b81b6b3SRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
365b81b6b3SRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
375b81b6b3SRodney W. Grimes  * SUCH DAMAGE.
385b81b6b3SRodney W. Grimes  *
39960173b9SRodney W. Grimes  *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
405b81b6b3SRodney W. Grimes  *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41f690bbacSDavid Greenman  *	$Id: vm_machdep.c,v 1.18 1994/04/05 03:23:09 davidg Exp $
425b81b6b3SRodney W. Grimes  */
435b81b6b3SRodney W. Grimes 
44960173b9SRodney W. Grimes #include "npx.h"
455b81b6b3SRodney W. Grimes #include "param.h"
465b81b6b3SRodney W. Grimes #include "systm.h"
475b81b6b3SRodney W. Grimes #include "proc.h"
485b81b6b3SRodney W. Grimes #include "malloc.h"
495b81b6b3SRodney W. Grimes #include "buf.h"
505b81b6b3SRodney W. Grimes #include "user.h"
515b81b6b3SRodney W. Grimes 
525b81b6b3SRodney W. Grimes #include "../include/cpu.h"
535b81b6b3SRodney W. Grimes 
545b81b6b3SRodney W. Grimes #include "vm/vm.h"
555b81b6b3SRodney W. Grimes #include "vm/vm_kern.h"
565b81b6b3SRodney W. Grimes 
571561d038SDavid Greenman #define b_cylin b_resid
581561d038SDavid Greenman 
59d5e26ef0SDavid Greenman #ifndef NOBOUNCE
60d5e26ef0SDavid Greenman 
61d5e26ef0SDavid Greenman caddr_t		bouncememory;
62d5e26ef0SDavid Greenman vm_offset_t	bouncepa, bouncepaend;
63ed7fcbd0SDavid Greenman int		bouncepages, bpwait;
641561d038SDavid Greenman vm_map_t	io_map;
65d5e26ef0SDavid Greenman int		bmwait, bmfreeing;
66d5e26ef0SDavid Greenman 
67ed7fcbd0SDavid Greenman #define BITS_IN_UNSIGNED (8*sizeof(unsigned))
68d5e26ef0SDavid Greenman int		bounceallocarraysize;
69d5e26ef0SDavid Greenman unsigned	*bounceallocarray;
70d5e26ef0SDavid Greenman int		bouncefree;
71d5e26ef0SDavid Greenman 
72d5e26ef0SDavid Greenman #define SIXTEENMEG (4096*4096)
731561d038SDavid Greenman #define MAXBKVA 1024
74d5e26ef0SDavid Greenman 
75d5e26ef0SDavid Greenman /* special list that can be used at interrupt time for eventual kva free */
76d5e26ef0SDavid Greenman struct kvasfree {
77d5e26ef0SDavid Greenman 	vm_offset_t addr;
78d5e26ef0SDavid Greenman 	vm_offset_t size;
79d5e26ef0SDavid Greenman } kvaf[MAXBKVA];
80d5e26ef0SDavid Greenman 
81d5e26ef0SDavid Greenman int		kvasfreecnt;
82d5e26ef0SDavid Greenman 
831561d038SDavid Greenman vm_offset_t vm_bounce_kva();
84d5e26ef0SDavid Greenman /*
85d5e26ef0SDavid Greenman  * get bounce buffer pages (count physically contiguous)
86d5e26ef0SDavid Greenman  * (only 1 inplemented now)
87d5e26ef0SDavid Greenman  */
88d5e26ef0SDavid Greenman vm_offset_t
89d5e26ef0SDavid Greenman vm_bounce_page_find(count)
90d5e26ef0SDavid Greenman 	int count;
91d5e26ef0SDavid Greenman {
92d5e26ef0SDavid Greenman 	int bit;
93d5e26ef0SDavid Greenman 	int s,i;
94d5e26ef0SDavid Greenman 
95d5e26ef0SDavid Greenman 	if (count != 1)
96d5e26ef0SDavid Greenman 		panic("vm_bounce_page_find -- no support for > 1 page yet!!!");
97d5e26ef0SDavid Greenman 
98d5e26ef0SDavid Greenman 	s = splbio();
99d5e26ef0SDavid Greenman retry:
100d5e26ef0SDavid Greenman 	for (i = 0; i < bounceallocarraysize; i++) {
101d5e26ef0SDavid Greenman 		if (bounceallocarray[i] != 0xffffffff) {
102d5e26ef0SDavid Greenman 			if (bit = ffs(~bounceallocarray[i])) {
103d5e26ef0SDavid Greenman 				bounceallocarray[i] |= 1 << (bit - 1) ;
104d5e26ef0SDavid Greenman 				bouncefree -= count;
105d5e26ef0SDavid Greenman 				splx(s);
106ed7fcbd0SDavid Greenman 				return bouncepa + (i * BITS_IN_UNSIGNED + (bit - 1)) * NBPG;
107d5e26ef0SDavid Greenman 			}
108d5e26ef0SDavid Greenman 		}
109d5e26ef0SDavid Greenman 	}
110ed7fcbd0SDavid Greenman 	bpwait = 1;
111d5e26ef0SDavid Greenman 	tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0);
112d5e26ef0SDavid Greenman 	goto retry;
113d5e26ef0SDavid Greenman }
114d5e26ef0SDavid Greenman 
1151561d038SDavid Greenman void
1161561d038SDavid Greenman vm_bounce_kva_free(addr, size, now)
1171561d038SDavid Greenman 	vm_offset_t addr;
1181561d038SDavid Greenman 	vm_offset_t size;
1191561d038SDavid Greenman 	int now;
1201561d038SDavid Greenman {
1211561d038SDavid Greenman 	int s = splbio();
1221561d038SDavid Greenman 	kvaf[kvasfreecnt].addr = addr;
1231561d038SDavid Greenman 	kvaf[kvasfreecnt++].size = size;
1241561d038SDavid Greenman 	if( now)
1251561d038SDavid Greenman 		vm_bounce_kva(0,0);
1261561d038SDavid Greenman 	else
1271561d038SDavid Greenman 		wakeup((caddr_t) io_map);
1281561d038SDavid Greenman 	splx(s);
1291561d038SDavid Greenman }
1301561d038SDavid Greenman 
131d5e26ef0SDavid Greenman /*
132d5e26ef0SDavid Greenman  * free count bounce buffer pages
133d5e26ef0SDavid Greenman  */
134d5e26ef0SDavid Greenman void
135d5e26ef0SDavid Greenman vm_bounce_page_free(pa, count)
136d5e26ef0SDavid Greenman 	vm_offset_t pa;
137d5e26ef0SDavid Greenman 	int count;
138d5e26ef0SDavid Greenman {
139d5e26ef0SDavid Greenman 	int allocindex;
140d5e26ef0SDavid Greenman 	int index;
141d5e26ef0SDavid Greenman 	int bit;
142d5e26ef0SDavid Greenman 
143d5e26ef0SDavid Greenman 	if (count != 1)
144d5e26ef0SDavid Greenman 		panic("vm_bounce_page_free -- no support for > 1 page yet!!!\n");
145d5e26ef0SDavid Greenman 
146d5e26ef0SDavid Greenman 	index = (pa - bouncepa) / NBPG;
147d5e26ef0SDavid Greenman 
148d5e26ef0SDavid Greenman 	if ((index < 0) || (index >= bouncepages))
149d5e26ef0SDavid Greenman 		panic("vm_bounce_page_free -- bad index\n");
150d5e26ef0SDavid Greenman 
151ed7fcbd0SDavid Greenman 	allocindex = index / BITS_IN_UNSIGNED;
152ed7fcbd0SDavid Greenman 	bit = index % BITS_IN_UNSIGNED;
153d5e26ef0SDavid Greenman 
154d5e26ef0SDavid Greenman 	bounceallocarray[allocindex] &= ~(1 << bit);
155d5e26ef0SDavid Greenman 
156d5e26ef0SDavid Greenman 	bouncefree += count;
157ed7fcbd0SDavid Greenman 	if (bpwait) {
158ed7fcbd0SDavid Greenman 		bpwait = 0;
159d5e26ef0SDavid Greenman 		wakeup((caddr_t) &bounceallocarray);
160d5e26ef0SDavid Greenman 	}
161ed7fcbd0SDavid Greenman }
162d5e26ef0SDavid Greenman 
163d5e26ef0SDavid Greenman /*
164d5e26ef0SDavid Greenman  * allocate count bounce buffer kva pages
165d5e26ef0SDavid Greenman  */
166d5e26ef0SDavid Greenman vm_offset_t
1671561d038SDavid Greenman vm_bounce_kva(count, waitok)
168d5e26ef0SDavid Greenman 	int count;
1691561d038SDavid Greenman 	int waitok;
170d5e26ef0SDavid Greenman {
171d5e26ef0SDavid Greenman 	int tofree;
172d5e26ef0SDavid Greenman 	int i;
173d5e26ef0SDavid Greenman 	int startfree;
1746b4ac811SDavid Greenman 	vm_offset_t kva = 0;
175d5e26ef0SDavid Greenman 	int s = splbio();
1761561d038SDavid Greenman 	int size = count;
177d5e26ef0SDavid Greenman 	startfree = 0;
178d5e26ef0SDavid Greenman more:
179d5e26ef0SDavid Greenman 	if (!bmfreeing && (tofree = kvasfreecnt)) {
180d5e26ef0SDavid Greenman 		bmfreeing = 1;
181d5e26ef0SDavid Greenman 		for (i = startfree; i < kvasfreecnt; i++) {
1826b4ac811SDavid Greenman 			/*
1836b4ac811SDavid Greenman 			 * if we have a kva of the right size, no sense
1846b4ac811SDavid Greenman 			 * in freeing/reallocating...
1856b4ac811SDavid Greenman 			 * might affect fragmentation short term, but
1861561d038SDavid Greenman 			 * as long as the amount of io_map is
1876b4ac811SDavid Greenman 			 * significantly more than the maximum transfer
1886b4ac811SDavid Greenman 			 * size, I don't think that it is a problem.
1896b4ac811SDavid Greenman 			 */
190d5e26ef0SDavid Greenman 			pmap_remove(kernel_pmap,
191d5e26ef0SDavid Greenman 				kvaf[i].addr, kvaf[i].addr + kvaf[i].size);
1921561d038SDavid Greenman 			if( size && !kva && kvaf[i].size == size) {
1936b4ac811SDavid Greenman 				kva = kvaf[i].addr;
1946b4ac811SDavid Greenman 			} else {
1951561d038SDavid Greenman 				kmem_free_wakeup(io_map, kvaf[i].addr,
196d5e26ef0SDavid Greenman 					kvaf[i].size);
197d5e26ef0SDavid Greenman 			}
1986b4ac811SDavid Greenman 		}
199d5e26ef0SDavid Greenman 		if (kvasfreecnt != tofree) {
200d5e26ef0SDavid Greenman 			startfree = i;
201d5e26ef0SDavid Greenman 			bmfreeing = 0;
202d5e26ef0SDavid Greenman 			goto more;
203d5e26ef0SDavid Greenman 		}
204d5e26ef0SDavid Greenman 		kvasfreecnt = 0;
205d5e26ef0SDavid Greenman 		bmfreeing = 0;
206d5e26ef0SDavid Greenman 	}
207d5e26ef0SDavid Greenman 
2081561d038SDavid Greenman 	if( size == 0) {
2091561d038SDavid Greenman 		splx(s);
2101561d038SDavid Greenman 		return NULL;
2111561d038SDavid Greenman 	}
2121561d038SDavid Greenman 
2131561d038SDavid Greenman 	if (!kva && !(kva = kmem_alloc_pageable(io_map, size))) {
2141561d038SDavid Greenman 		if( !waitok) {
2151561d038SDavid Greenman 			splx(s);
2161561d038SDavid Greenman 			return NULL;
2171561d038SDavid Greenman 		}
218d5e26ef0SDavid Greenman 		bmwait = 1;
2191561d038SDavid Greenman 		tsleep((caddr_t) io_map, PRIBIO, "bmwait", 0);
220d5e26ef0SDavid Greenman 		goto more;
221d5e26ef0SDavid Greenman 	}
222d5e26ef0SDavid Greenman 	splx(s);
223d5e26ef0SDavid Greenman 
224d5e26ef0SDavid Greenman 	return kva;
225d5e26ef0SDavid Greenman }
226d5e26ef0SDavid Greenman 
227d5e26ef0SDavid Greenman /*
228d5e26ef0SDavid Greenman  * do the things necessary to the struct buf to implement
229d5e26ef0SDavid Greenman  * bounce buffers...  inserted before the disk sort
230d5e26ef0SDavid Greenman  */
231d5e26ef0SDavid Greenman void
232d5e26ef0SDavid Greenman vm_bounce_alloc(bp)
233d5e26ef0SDavid Greenman 	struct buf *bp;
234d5e26ef0SDavid Greenman {
235d5e26ef0SDavid Greenman 	int countvmpg;
236d5e26ef0SDavid Greenman 	vm_offset_t vastart, vaend;
237d5e26ef0SDavid Greenman 	vm_offset_t vapstart, vapend;
238d5e26ef0SDavid Greenman 	vm_offset_t va, kva;
239d5e26ef0SDavid Greenman 	vm_offset_t pa;
240d5e26ef0SDavid Greenman 	int dobounceflag = 0;
241d5e26ef0SDavid Greenman 	int bounceindex;
242d5e26ef0SDavid Greenman 	int i;
243d5e26ef0SDavid Greenman 	int s;
244d5e26ef0SDavid Greenman 
245d5e26ef0SDavid Greenman 	if (bouncepages == 0)
246d5e26ef0SDavid Greenman 		return;
247d5e26ef0SDavid Greenman 
2481561d038SDavid Greenman 	if (bp->b_bufsize < bp->b_bcount) {
2491561d038SDavid Greenman 		printf("vm_bounce_alloc: b_bufsize(%d) < b_bcount(%d) !!!!\n",
2501561d038SDavid Greenman 			bp->b_bufsize, bp->b_bcount);
2511561d038SDavid Greenman 		bp->b_bufsize = bp->b_bcount;
2521561d038SDavid Greenman 	}
2531561d038SDavid Greenman 
254d5e26ef0SDavid Greenman 	vastart = (vm_offset_t) bp->b_un.b_addr;
255ac322158SDavid Greenman 	vaend = (vm_offset_t) bp->b_un.b_addr + bp->b_bufsize;
256d5e26ef0SDavid Greenman 
257d5e26ef0SDavid Greenman 	vapstart = i386_trunc_page(vastart);
258d5e26ef0SDavid Greenman 	vapend = i386_round_page(vaend);
259d5e26ef0SDavid Greenman 	countvmpg = (vapend - vapstart) / NBPG;
260d5e26ef0SDavid Greenman 
261d5e26ef0SDavid Greenman /*
262d5e26ef0SDavid Greenman  * if any page is above 16MB, then go into bounce-buffer mode
263d5e26ef0SDavid Greenman  */
264d5e26ef0SDavid Greenman 	va = vapstart;
265d5e26ef0SDavid Greenman 	for (i = 0; i < countvmpg; i++) {
266ed7fcbd0SDavid Greenman 		pa = pmap_kextract(va);
267d5e26ef0SDavid Greenman 		if (pa >= SIXTEENMEG)
268d5e26ef0SDavid Greenman 			++dobounceflag;
269d5e26ef0SDavid Greenman 		va += NBPG;
270d5e26ef0SDavid Greenman 	}
271d5e26ef0SDavid Greenman 	if (dobounceflag == 0)
272d5e26ef0SDavid Greenman 		return;
273d5e26ef0SDavid Greenman 
274d5e26ef0SDavid Greenman 	if (bouncepages < dobounceflag)
275d5e26ef0SDavid Greenman 		panic("Not enough bounce buffers!!!");
276d5e26ef0SDavid Greenman 
277d5e26ef0SDavid Greenman /*
278d5e26ef0SDavid Greenman  * allocate a replacement kva for b_addr
279d5e26ef0SDavid Greenman  */
2801561d038SDavid Greenman 	kva = vm_bounce_kva(countvmpg*NBPG, 1);
281d5e26ef0SDavid Greenman 	va = vapstart;
282d5e26ef0SDavid Greenman 	for (i = 0; i < countvmpg; i++) {
283ed7fcbd0SDavid Greenman 		pa = pmap_kextract(va);
284d5e26ef0SDavid Greenman 		if (pa >= SIXTEENMEG) {
285d5e26ef0SDavid Greenman 			/*
286d5e26ef0SDavid Greenman 			 * allocate a replacement page
287d5e26ef0SDavid Greenman 			 */
288d5e26ef0SDavid Greenman 			vm_offset_t bpa = vm_bounce_page_find(1);
2896b4ac811SDavid Greenman 			pmap_kenter(kva + (NBPG * i), bpa);
290d5e26ef0SDavid Greenman 			/*
291d5e26ef0SDavid Greenman 			 * if we are writing, the copy the data into the page
292d5e26ef0SDavid Greenman 			 */
2931561d038SDavid Greenman 			if ((bp->b_flags & B_READ) == 0) {
2941561d038SDavid Greenman 				pmap_update();
295d5e26ef0SDavid Greenman 				bcopy((caddr_t) va, (caddr_t) kva + (NBPG * i), NBPG);
2961561d038SDavid Greenman 			}
297d5e26ef0SDavid Greenman 		} else {
298d5e26ef0SDavid Greenman 			/*
299d5e26ef0SDavid Greenman 			 * use original page
300d5e26ef0SDavid Greenman 			 */
3016b4ac811SDavid Greenman 			pmap_kenter(kva + (NBPG * i), pa);
302d5e26ef0SDavid Greenman 		}
303d5e26ef0SDavid Greenman 		va += NBPG;
304d5e26ef0SDavid Greenman 	}
3056b4ac811SDavid Greenman 	pmap_update();
306d5e26ef0SDavid Greenman 
307d5e26ef0SDavid Greenman /*
308d5e26ef0SDavid Greenman  * flag the buffer as being bounced
309d5e26ef0SDavid Greenman  */
310d5e26ef0SDavid Greenman 	bp->b_flags |= B_BOUNCE;
311d5e26ef0SDavid Greenman /*
312d5e26ef0SDavid Greenman  * save the original buffer kva
313d5e26ef0SDavid Greenman  */
314d5e26ef0SDavid Greenman 	bp->b_savekva = bp->b_un.b_addr;
315d5e26ef0SDavid Greenman /*
316d5e26ef0SDavid Greenman  * put our new kva into the buffer (offset by original offset)
317d5e26ef0SDavid Greenman  */
318d5e26ef0SDavid Greenman 	bp->b_un.b_addr = (caddr_t) (((vm_offset_t) kva) |
319d5e26ef0SDavid Greenman 				((vm_offset_t) bp->b_savekva & (NBPG - 1)));
320d5e26ef0SDavid Greenman 	return;
321d5e26ef0SDavid Greenman }
322d5e26ef0SDavid Greenman 
323d5e26ef0SDavid Greenman /*
324d5e26ef0SDavid Greenman  * hook into biodone to free bounce buffer
325d5e26ef0SDavid Greenman  */
326d5e26ef0SDavid Greenman void
327d5e26ef0SDavid Greenman vm_bounce_free(bp)
328d5e26ef0SDavid Greenman 	struct buf *bp;
329d5e26ef0SDavid Greenman {
330d5e26ef0SDavid Greenman 	int i;
331d5e26ef0SDavid Greenman 	vm_offset_t origkva, bouncekva;
332d5e26ef0SDavid Greenman 	vm_offset_t vastart, vaend;
333d5e26ef0SDavid Greenman 	vm_offset_t vapstart, vapend;
334d5e26ef0SDavid Greenman 	int countbounce = 0;
335d5e26ef0SDavid Greenman 	vm_offset_t firstbouncepa = 0;
336d5e26ef0SDavid Greenman 	int firstbounceindex;
337d5e26ef0SDavid Greenman 	int countvmpg;
338d5e26ef0SDavid Greenman 	vm_offset_t bcount;
339d5e26ef0SDavid Greenman 	int s;
340d5e26ef0SDavid Greenman 
341d5e26ef0SDavid Greenman /*
342d5e26ef0SDavid Greenman  * if this isn't a bounced buffer, then just return
343d5e26ef0SDavid Greenman  */
344d5e26ef0SDavid Greenman 	if ((bp->b_flags & B_BOUNCE) == 0)
345d5e26ef0SDavid Greenman 		return;
346d5e26ef0SDavid Greenman 
347d5e26ef0SDavid Greenman 	origkva = (vm_offset_t) bp->b_savekva;
348d5e26ef0SDavid Greenman 	bouncekva = (vm_offset_t) bp->b_un.b_addr;
349d5e26ef0SDavid Greenman 
350d5e26ef0SDavid Greenman 	vastart = bouncekva;
351ac322158SDavid Greenman 	vaend = bouncekva + bp->b_bufsize;
352ac322158SDavid Greenman 	bcount = bp->b_bufsize;
353d5e26ef0SDavid Greenman 
354d5e26ef0SDavid Greenman 	vapstart = i386_trunc_page(vastart);
355d5e26ef0SDavid Greenman 	vapend = i386_round_page(vaend);
356d5e26ef0SDavid Greenman 
357d5e26ef0SDavid Greenman 	countvmpg = (vapend - vapstart) / NBPG;
358d5e26ef0SDavid Greenman 
359d5e26ef0SDavid Greenman /*
360d5e26ef0SDavid Greenman  * check every page in the kva space for b_addr
361d5e26ef0SDavid Greenman  */
362d5e26ef0SDavid Greenman 	for (i = 0; i < countvmpg; i++) {
363d5e26ef0SDavid Greenman 		vm_offset_t mybouncepa;
364d5e26ef0SDavid Greenman 		vm_offset_t copycount;
365d5e26ef0SDavid Greenman 
366d5e26ef0SDavid Greenman 		copycount = i386_round_page(bouncekva + 1) - bouncekva;
367ed7fcbd0SDavid Greenman 		mybouncepa = pmap_kextract(i386_trunc_page(bouncekva));
368d5e26ef0SDavid Greenman 
369d5e26ef0SDavid Greenman /*
370d5e26ef0SDavid Greenman  * if this is a bounced pa, then process as one
371d5e26ef0SDavid Greenman  */
372d5e26ef0SDavid Greenman 		if ((mybouncepa >= bouncepa) && (mybouncepa < bouncepaend)) {
373d5e26ef0SDavid Greenman 			if (copycount > bcount)
374d5e26ef0SDavid Greenman 				copycount = bcount;
375d5e26ef0SDavid Greenman /*
376d5e26ef0SDavid Greenman  * if this is a read, then copy from bounce buffer into original buffer
377d5e26ef0SDavid Greenman  */
378d5e26ef0SDavid Greenman 			if (bp->b_flags & B_READ)
379d5e26ef0SDavid Greenman 				bcopy((caddr_t) bouncekva, (caddr_t) origkva, copycount);
380d5e26ef0SDavid Greenman /*
381d5e26ef0SDavid Greenman  * free the bounce allocation
382d5e26ef0SDavid Greenman  */
383d5e26ef0SDavid Greenman 			vm_bounce_page_free(i386_trunc_page(mybouncepa), 1);
384d5e26ef0SDavid Greenman 		}
385d5e26ef0SDavid Greenman 
386d5e26ef0SDavid Greenman 		origkva += copycount;
387d5e26ef0SDavid Greenman 		bouncekva += copycount;
388d5e26ef0SDavid Greenman 		bcount -= copycount;
389d5e26ef0SDavid Greenman 	}
390d5e26ef0SDavid Greenman 
391d5e26ef0SDavid Greenman /*
392d5e26ef0SDavid Greenman  * add the old kva into the "to free" list
393d5e26ef0SDavid Greenman  */
394d5e26ef0SDavid Greenman 	bouncekva = i386_trunc_page((vm_offset_t) bp->b_un.b_addr);
3951561d038SDavid Greenman 	vm_bounce_kva_free( bouncekva, countvmpg*NBPG, 0);
396d5e26ef0SDavid Greenman 	if (bmwait) {
397d5e26ef0SDavid Greenman 		/*
398d5e26ef0SDavid Greenman 		 * if anyone is waiting on the bounce-map, then wakeup
399d5e26ef0SDavid Greenman 		 */
4001561d038SDavid Greenman 		wakeup((caddr_t) io_map);
401d5e26ef0SDavid Greenman 		bmwait = 0;
402d5e26ef0SDavid Greenman 	}
403d5e26ef0SDavid Greenman 
404d5e26ef0SDavid Greenman 	bp->b_un.b_addr = bp->b_savekva;
405d5e26ef0SDavid Greenman 	bp->b_savekva = 0;
406d5e26ef0SDavid Greenman 	bp->b_flags &= ~B_BOUNCE;
407d5e26ef0SDavid Greenman 
408d5e26ef0SDavid Greenman 	return;
409d5e26ef0SDavid Greenman }
410d5e26ef0SDavid Greenman 
411d5e26ef0SDavid Greenman #endif /* NOBOUNCE */
412d5e26ef0SDavid Greenman 
4135b81b6b3SRodney W. Grimes /*
4141561d038SDavid Greenman  * init the bounce buffer system
4151561d038SDavid Greenman  */
4161561d038SDavid Greenman void
4171561d038SDavid Greenman vm_bounce_init()
4181561d038SDavid Greenman {
4191561d038SDavid Greenman 	vm_offset_t minaddr, maxaddr;
4201561d038SDavid Greenman 
4211561d038SDavid Greenman 	io_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, MAXBKVA * NBPG, FALSE);
4221561d038SDavid Greenman 	kvasfreecnt = 0;
4231561d038SDavid Greenman 
4241561d038SDavid Greenman #ifndef NOBOUNCE
4251561d038SDavid Greenman 	if (bouncepages == 0)
4261561d038SDavid Greenman 		return;
4271561d038SDavid Greenman 
4281561d038SDavid Greenman 	bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED;
4291561d038SDavid Greenman 	bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT);
4301561d038SDavid Greenman 
4311561d038SDavid Greenman 	if (!bounceallocarray)
4321561d038SDavid Greenman 		panic("Cannot allocate bounce resource array\n");
4331561d038SDavid Greenman 
4341561d038SDavid Greenman 	bzero(bounceallocarray, bounceallocarraysize * sizeof(long));
4351561d038SDavid Greenman 
4361561d038SDavid Greenman 
4371561d038SDavid Greenman 	bouncepa = pmap_kextract((vm_offset_t) bouncememory);
4381561d038SDavid Greenman 	bouncepaend = bouncepa + bouncepages * NBPG;
4391561d038SDavid Greenman 	bouncefree = bouncepages;
4401561d038SDavid Greenman #endif
4411561d038SDavid Greenman 
4421561d038SDavid Greenman }
4431561d038SDavid Greenman 
4441561d038SDavid Greenman 
445f690bbacSDavid Greenman static void
446f690bbacSDavid Greenman cldiskvamerge( kvanew, orig1, orig1cnt, orig2, orig2cnt)
447f690bbacSDavid Greenman 	vm_offset_t kvanew;
448f690bbacSDavid Greenman 	vm_offset_t orig1, orig1cnt;
449f690bbacSDavid Greenman 	vm_offset_t orig2, orig2cnt;
450f690bbacSDavid Greenman {
451f690bbacSDavid Greenman 	int i;
452f690bbacSDavid Greenman 	vm_offset_t pa;
453f690bbacSDavid Greenman /*
454f690bbacSDavid Greenman  * enter the transfer physical addresses into the new kva
455f690bbacSDavid Greenman  */
456f690bbacSDavid Greenman 	for(i=0;i<orig1cnt;i++) {
457f690bbacSDavid Greenman 		vm_offset_t pa;
458f690bbacSDavid Greenman 		pa = pmap_kextract((caddr_t) orig1 + i * PAGE_SIZE);
459f690bbacSDavid Greenman 		pmap_kenter(kvanew + i * PAGE_SIZE, pa);
460f690bbacSDavid Greenman 	}
461f690bbacSDavid Greenman 
462f690bbacSDavid Greenman 	for(i=0;i<orig2cnt;i++) {
463f690bbacSDavid Greenman 		vm_offset_t pa;
464f690bbacSDavid Greenman 		pa = pmap_kextract((caddr_t) orig2 + i * PAGE_SIZE);
465f690bbacSDavid Greenman 		pmap_kenter(kvanew + (i + orig1cnt) * PAGE_SIZE, pa);
466f690bbacSDavid Greenman 	}
467f690bbacSDavid Greenman 	pmap_update();
468f690bbacSDavid Greenman }
469f690bbacSDavid Greenman 
4701561d038SDavid Greenman void
4711561d038SDavid Greenman cldisksort(struct buf *dp, struct buf *bp, vm_offset_t maxio)
4721561d038SDavid Greenman {
4731561d038SDavid Greenman 	register struct buf *ap, *newbp;
4741561d038SDavid Greenman 	int i, trycount=0;
4751561d038SDavid Greenman 	vm_offset_t orig1pages, orig2pages;
4761561d038SDavid Greenman 	vm_offset_t orig1begin, orig2begin;
4771561d038SDavid Greenman 	vm_offset_t kvanew, kvaorig;
4781561d038SDavid Greenman 
4791561d038SDavid Greenman 	/*
4801561d038SDavid Greenman 	 * If nothing on the activity queue, then
4811561d038SDavid Greenman 	 * we become the only thing.
4821561d038SDavid Greenman 	 */
4831561d038SDavid Greenman 	ap = dp->b_actf;
4841561d038SDavid Greenman 	if(ap == NULL) {
4851561d038SDavid Greenman 		dp->b_actf = bp;
4861561d038SDavid Greenman 		dp->b_actl = bp;
4871561d038SDavid Greenman 		bp->av_forw = NULL;
4881561d038SDavid Greenman 		return;
4891561d038SDavid Greenman 	}
4901561d038SDavid Greenman 
4911561d038SDavid Greenman 	/*
4921561d038SDavid Greenman 	 * If we lie after the first (currently active)
4931561d038SDavid Greenman 	 * request, then we must locate the second request list
4941561d038SDavid Greenman 	 * and add ourselves to it.
4951561d038SDavid Greenman 	 */
4961561d038SDavid Greenman 
4971561d038SDavid Greenman 	if (bp->b_cylin < ap->b_cylin) {
4981561d038SDavid Greenman 		while (ap->av_forw) {
4991561d038SDavid Greenman 			/*
5001561d038SDavid Greenman 			 * Check for an ``inversion'' in the
5011561d038SDavid Greenman 			 * normally ascending cylinder numbers,
5021561d038SDavid Greenman 			 * indicating the start of the second request list.
5031561d038SDavid Greenman 			 */
5041561d038SDavid Greenman 			if (ap->av_forw->b_cylin < ap->b_cylin) {
5051561d038SDavid Greenman 				/*
5061561d038SDavid Greenman 				 * Search the second request list
5071561d038SDavid Greenman 				 * for the first request at a larger
5081561d038SDavid Greenman 				 * cylinder number.  We go before that;
5091561d038SDavid Greenman 				 * if there is no such request, we go at end.
5101561d038SDavid Greenman 				 */
5111561d038SDavid Greenman 				do {
5121561d038SDavid Greenman 					if (bp->b_cylin < ap->av_forw->b_cylin)
5131561d038SDavid Greenman 						goto insert;
5141561d038SDavid Greenman 					ap = ap->av_forw;
5151561d038SDavid Greenman 				} while (ap->av_forw);
5161561d038SDavid Greenman 				goto insert;		/* after last */
5171561d038SDavid Greenman 			}
5181561d038SDavid Greenman 			ap = ap->av_forw;
5191561d038SDavid Greenman 		}
5201561d038SDavid Greenman 		/*
5211561d038SDavid Greenman 		 * No inversions... we will go after the last, and
5221561d038SDavid Greenman 		 * be the first request in the second request list.
5231561d038SDavid Greenman 		 */
5241561d038SDavid Greenman 		goto insert;
5251561d038SDavid Greenman 	}
5261561d038SDavid Greenman 	/*
5271561d038SDavid Greenman 	 * Request is at/after the current request...
5281561d038SDavid Greenman 	 * sort in the first request list.
5291561d038SDavid Greenman 	 */
5301561d038SDavid Greenman 	while (ap->av_forw) {
5311561d038SDavid Greenman 		/*
5321561d038SDavid Greenman 		 * We want to go after the current request
5331561d038SDavid Greenman 		 * if there is an inversion after it (i.e. it is
5341561d038SDavid Greenman 		 * the end of the first request list), or if
5351561d038SDavid Greenman 		 * the next request is a larger cylinder than our request.
5361561d038SDavid Greenman 		 */
5371561d038SDavid Greenman 		if (ap->av_forw->b_cylin < ap->b_cylin ||
5381561d038SDavid Greenman 		    bp->b_cylin < ap->av_forw->b_cylin )
5391561d038SDavid Greenman 			goto insert;
5401561d038SDavid Greenman 		ap = ap->av_forw;
5411561d038SDavid Greenman 	}
5421561d038SDavid Greenman 
5431561d038SDavid Greenman insert:
5441561d038SDavid Greenman 	/*
5451561d038SDavid Greenman 	 * we currently only cluster I/O transfers that are at page-aligned
5461561d038SDavid Greenman 	 * kvas and transfers that are multiples of page lengths.
5471561d038SDavid Greenman 	 */
548f690bbacSDavid Greenman 	if(((bp->b_bcount & PAGE_MASK) == 0) &&
549f690bbacSDavid Greenman 		(((vm_offset_t) bp->b_un.b_addr & PAGE_MASK) == 0)) {
5501561d038SDavid Greenman 		/*
5511561d038SDavid Greenman 		 * merge with previous?
5521561d038SDavid Greenman 		 * conditions:
5531561d038SDavid Greenman 		 * 	1) We reside physically immediately after the previous block.
5541561d038SDavid Greenman 		 *	2) The previous block is not first on the device queue because
5551561d038SDavid Greenman 		 *	   such a block might be active.
5561561d038SDavid Greenman 		 *  3) The mode of the two I/Os is identical.
5571561d038SDavid Greenman 		 *  4) The previous kva is page aligned and the previous transfer
5581561d038SDavid Greenman 		 *	   is a multiple of a page in length.
5591561d038SDavid Greenman 		 *	5) And the total I/O size would be below the maximum.
5601561d038SDavid Greenman 		 */
5611561d038SDavid Greenman 		if( (ap->b_blkno + (ap->b_bcount / DEV_BSIZE) == bp->b_blkno) &&
5621561d038SDavid Greenman 			(dp->b_actf != ap) &&
5631561d038SDavid Greenman 			((ap->b_flags & ~B_CLUSTER) == bp->b_flags) &&
564f690bbacSDavid Greenman 			((ap->b_bcount & PAGE_MASK) == 0) &&
565f690bbacSDavid Greenman 			(((vm_offset_t) ap->b_un.b_addr & PAGE_MASK) == 0) &&
5661561d038SDavid Greenman 			(ap->b_bcount + bp->b_bcount < maxio)) {
5671561d038SDavid Greenman 
5681561d038SDavid Greenman 			orig1begin = (vm_offset_t) ap->b_un.b_addr;
5691561d038SDavid Greenman 			orig1pages = ap->b_bcount / PAGE_SIZE;
5701561d038SDavid Greenman 
5711561d038SDavid Greenman 			orig2begin = (vm_offset_t) bp->b_un.b_addr;
5721561d038SDavid Greenman 			orig2pages = bp->b_bcount / PAGE_SIZE;
5731561d038SDavid Greenman 			/*
5741561d038SDavid Greenman 			 * see if we can allocate a kva, if we cannot, the don't
5751561d038SDavid Greenman 			 * cluster.
5761561d038SDavid Greenman 			 */
5771561d038SDavid Greenman 			kvanew = vm_bounce_kva( PAGE_SIZE * (orig1pages + orig2pages), 0);
5781561d038SDavid Greenman 			if( !kvanew) {
5791561d038SDavid Greenman 				goto nocluster;
5801561d038SDavid Greenman 			}
5811561d038SDavid Greenman 
582f690bbacSDavid Greenman 
583f690bbacSDavid Greenman 			if( (ap->b_flags & B_CLUSTER) == 0) {
584f690bbacSDavid Greenman 
5851561d038SDavid Greenman 				/*
5861561d038SDavid Greenman 				 * get a physical buf pointer
5871561d038SDavid Greenman 				 */
5881561d038SDavid Greenman 				newbp = (struct buf *)trypbuf();
5891561d038SDavid Greenman 				if( !newbp) {
5901561d038SDavid Greenman 					vm_bounce_kva_free( kvanew, PAGE_SIZE * (orig1pages + orig2pages), 1);
5911561d038SDavid Greenman 					goto nocluster;
5921561d038SDavid Greenman 				}
5931561d038SDavid Greenman 
594f690bbacSDavid Greenman 				cldiskvamerge( kvanew, orig1begin, orig1pages, orig2begin, orig2pages);
5951561d038SDavid Greenman 
5961561d038SDavid Greenman 				/*
5971561d038SDavid Greenman 				 * build the new bp to be handed off to the device
5981561d038SDavid Greenman 				 */
5991561d038SDavid Greenman 
6001561d038SDavid Greenman 				*newbp = *ap;
6011561d038SDavid Greenman 				newbp->b_flags |= B_CLUSTER;
6021561d038SDavid Greenman 				newbp->b_un.b_addr = (caddr_t) kvanew;
6031561d038SDavid Greenman 				newbp->b_bcount += bp->b_bcount;
6041561d038SDavid Greenman 				newbp->b_bufsize = newbp->b_bcount;
6051561d038SDavid Greenman 				newbp->b_clusterf = ap;
6061561d038SDavid Greenman 				newbp->b_clusterl = bp;
6071561d038SDavid Greenman 
6081561d038SDavid Greenman 				/*
6091561d038SDavid Greenman 				 * enter the new bp onto the device queue
6101561d038SDavid Greenman 				 */
6111561d038SDavid Greenman 				if( ap->av_forw)
6121561d038SDavid Greenman 					ap->av_forw->av_back = newbp;
6131561d038SDavid Greenman 				else
6141561d038SDavid Greenman 					dp->b_actl = newbp;
6151561d038SDavid Greenman 
6161561d038SDavid Greenman 				if( dp->b_actf != ap )
6171561d038SDavid Greenman 					ap->av_back->av_forw = newbp;
6181561d038SDavid Greenman 				else
6191561d038SDavid Greenman 					dp->b_actf = newbp;
6201561d038SDavid Greenman 
6211561d038SDavid Greenman 				/*
6221561d038SDavid Greenman 				 * enter the previous bps onto the cluster queue
6231561d038SDavid Greenman 				 */
6241561d038SDavid Greenman 				ap->av_forw = bp;
6251561d038SDavid Greenman 				bp->av_back = ap;
6261561d038SDavid Greenman 
6271561d038SDavid Greenman 				ap->av_back = NULL;
6281561d038SDavid Greenman 				bp->av_forw = NULL;
6291561d038SDavid Greenman 
6301561d038SDavid Greenman 			} else {
6311561d038SDavid Greenman 				vm_offset_t addr;
6321561d038SDavid Greenman 
633f690bbacSDavid Greenman 				cldiskvamerge( kvanew, orig1begin, orig1pages, orig2begin, orig2pages);
6341561d038SDavid Greenman 				/*
6351561d038SDavid Greenman 				 * free the old kva
6361561d038SDavid Greenman 				 */
637f690bbacSDavid Greenman 				vm_bounce_kva_free( orig1begin, ap->b_bufsize, 0);
6381561d038SDavid Greenman 
6391561d038SDavid Greenman 				ap->b_un.b_addr = (caddr_t) kvanew;
6401561d038SDavid Greenman 
6411561d038SDavid Greenman 				ap->b_clusterl->av_forw = bp;
6421561d038SDavid Greenman 				bp->av_forw = NULL;
6431561d038SDavid Greenman 				bp->av_back = ap->b_clusterl;
6441561d038SDavid Greenman 				ap->b_clusterl = bp;
6451561d038SDavid Greenman 
6461561d038SDavid Greenman 				ap->b_bcount += bp->b_bcount;
6471561d038SDavid Greenman 				ap->b_bufsize = ap->b_bcount;
6481561d038SDavid Greenman 			}
6491561d038SDavid Greenman 			return;
6501561d038SDavid Greenman 		/*
6511561d038SDavid Greenman 		 * merge with next?
6521561d038SDavid Greenman 		 * conditions:
6531561d038SDavid Greenman 		 * 	1) We reside physically before the next block.
6541561d038SDavid Greenman 		 *  3) The mode of the two I/Os is identical.
6551561d038SDavid Greenman 		 *  4) The next kva is page aligned and the next transfer
6561561d038SDavid Greenman 		 *	   is a multiple of a page in length.
6571561d038SDavid Greenman 		 *	5) And the total I/O size would be below the maximum.
6581561d038SDavid Greenman 		 */
6591561d038SDavid Greenman 		} else if( ap->av_forw &&
6601561d038SDavid Greenman 			(bp->b_blkno + (bp->b_bcount / DEV_BSIZE) == ap->av_forw->b_blkno) &&
6611561d038SDavid Greenman 			(bp->b_flags == (ap->av_forw->b_flags & ~B_CLUSTER)) &&
662f690bbacSDavid Greenman 			((ap->av_forw->b_bcount & PAGE_MASK) == 0) &&
663f690bbacSDavid Greenman 			(((vm_offset_t) ap->av_forw->b_un.b_addr & PAGE_MASK) == 0) &&
6641561d038SDavid Greenman 			(ap->av_forw->b_bcount + bp->b_bcount < maxio)) {
6651561d038SDavid Greenman 
6661561d038SDavid Greenman 			orig1begin = (vm_offset_t) bp->b_un.b_addr;
6671561d038SDavid Greenman 			orig1pages = bp->b_bcount / PAGE_SIZE;
6681561d038SDavid Greenman 
6691561d038SDavid Greenman 			orig2begin = (vm_offset_t) ap->av_forw->b_un.b_addr;
6701561d038SDavid Greenman 			orig2pages = ap->av_forw->b_bcount / PAGE_SIZE;
6711561d038SDavid Greenman 
6721561d038SDavid Greenman 			/*
6731561d038SDavid Greenman 			 * see if we can allocate a kva, if we cannot, the don't
6741561d038SDavid Greenman 			 * cluster.
6751561d038SDavid Greenman 			 */
6761561d038SDavid Greenman 			kvanew = vm_bounce_kva( PAGE_SIZE * (orig1pages + orig2pages), 0);
6771561d038SDavid Greenman 			if( !kvanew) {
6781561d038SDavid Greenman 				goto nocluster;
6791561d038SDavid Greenman 			}
6801561d038SDavid Greenman 
681f690bbacSDavid Greenman 
682f690bbacSDavid Greenman 			/*
683f690bbacSDavid Greenman 			 * if next isn't a cluster we need to create one
684f690bbacSDavid Greenman 			 */
685f690bbacSDavid Greenman 			if( (ap->av_forw->b_flags & B_CLUSTER) == 0) {
686f690bbacSDavid Greenman 
6871561d038SDavid Greenman 				/*
6881561d038SDavid Greenman 				 * get a physical buf pointer
6891561d038SDavid Greenman 				 */
6901561d038SDavid Greenman 				newbp = (struct buf *)trypbuf();
6911561d038SDavid Greenman 				if( !newbp) {
6921561d038SDavid Greenman 					vm_bounce_kva_free( kvanew, PAGE_SIZE * (orig1pages + orig2pages), 1);
6931561d038SDavid Greenman 					goto nocluster;
6941561d038SDavid Greenman 				}
6951561d038SDavid Greenman 
696f690bbacSDavid Greenman 				cldiskvamerge( kvanew, orig1begin, orig1pages, orig2begin, orig2pages);
697f690bbacSDavid Greenman 
698f690bbacSDavid Greenman 				pmap_update();
699f690bbacSDavid Greenman 
7001561d038SDavid Greenman 				ap = ap->av_forw;
7011561d038SDavid Greenman 				*newbp = *ap;
7021561d038SDavid Greenman 				newbp->b_flags |= B_CLUSTER;
7031561d038SDavid Greenman 				newbp->b_un.b_addr = (caddr_t) kvanew;
7041561d038SDavid Greenman 				newbp->b_blkno = bp->b_blkno;
7051561d038SDavid Greenman 				newbp->b_bcount += bp->b_bcount;
7061561d038SDavid Greenman 				newbp->b_bufsize = newbp->b_bcount;
7071561d038SDavid Greenman 				newbp->b_clusterf = bp;
7081561d038SDavid Greenman 				newbp->b_clusterl = ap;
7091561d038SDavid Greenman 
7101561d038SDavid Greenman 				if( ap->av_forw)
7111561d038SDavid Greenman 					ap->av_forw->av_back = newbp;
7121561d038SDavid Greenman 				else
7131561d038SDavid Greenman 					dp->b_actl = newbp;
7141561d038SDavid Greenman 
7151561d038SDavid Greenman 				if( dp->b_actf != ap )
7161561d038SDavid Greenman 					ap->av_back->av_forw = newbp;
7171561d038SDavid Greenman 				else
7181561d038SDavid Greenman 					dp->b_actf = newbp;
7191561d038SDavid Greenman 
7201561d038SDavid Greenman 				bp->av_forw = ap;
7211561d038SDavid Greenman 				ap->av_back = bp;
7221561d038SDavid Greenman 
7231561d038SDavid Greenman 				bp->av_back = NULL;
7241561d038SDavid Greenman 				ap->av_forw = NULL;
7251561d038SDavid Greenman 			} else {
7261561d038SDavid Greenman 				vm_offset_t addr;
7271561d038SDavid Greenman 
728f690bbacSDavid Greenman 				cldiskvamerge( kvanew, orig1begin, orig1pages, orig2begin, orig2pages);
7291561d038SDavid Greenman 				ap = ap->av_forw;
730f690bbacSDavid Greenman 				vm_bounce_kva_free( orig2begin, ap->b_bufsize, 0);
7311561d038SDavid Greenman 
7321561d038SDavid Greenman 				ap->b_un.b_addr = (caddr_t) kvanew;
7331561d038SDavid Greenman 				bp->av_forw = ap->b_clusterf;
7341561d038SDavid Greenman 				ap->b_clusterf->av_back = bp;
7351561d038SDavid Greenman 				ap->b_clusterf = bp;
7361561d038SDavid Greenman 				bp->av_back = NULL;
7371561d038SDavid Greenman 
7381561d038SDavid Greenman 				ap->b_blkno = bp->b_blkno;
7391561d038SDavid Greenman 				ap->b_bcount += bp->b_bcount;
7401561d038SDavid Greenman 				ap->b_bufsize = ap->b_bcount;
7411561d038SDavid Greenman 
7421561d038SDavid Greenman 			}
7431561d038SDavid Greenman 			return;
7441561d038SDavid Greenman 		}
7451561d038SDavid Greenman 	}
7461561d038SDavid Greenman 	/*
7471561d038SDavid Greenman 	 * don't merge
7481561d038SDavid Greenman 	 */
7491561d038SDavid Greenman nocluster:
7501561d038SDavid Greenman 	bp->av_forw = ap->av_forw;
7511561d038SDavid Greenman 	if( bp->av_forw)
7521561d038SDavid Greenman 		bp->av_forw->av_back = bp;
7531561d038SDavid Greenman 	else
7541561d038SDavid Greenman 		dp->b_actl = bp;
7551561d038SDavid Greenman 
7561561d038SDavid Greenman 	ap->av_forw = bp;
7571561d038SDavid Greenman 	bp->av_back = ap;
7581561d038SDavid Greenman }
7591561d038SDavid Greenman 
7601561d038SDavid Greenman 
7611561d038SDavid Greenman /*
7625b81b6b3SRodney W. Grimes  * Finish a fork operation, with process p2 nearly set up.
7635b81b6b3SRodney W. Grimes  * Copy and update the kernel stack and pcb, making the child
7645b81b6b3SRodney W. Grimes  * ready to run, and marking it so that it can return differently
7655b81b6b3SRodney W. Grimes  * than the parent.  Returns 1 in the child process, 0 in the parent.
7665b81b6b3SRodney W. Grimes  * We currently double-map the user area so that the stack is at the same
7675b81b6b3SRodney W. Grimes  * address in each process; in the future we will probably relocate
7685b81b6b3SRodney W. Grimes  * the frame pointers on the stack after copying.
7695b81b6b3SRodney W. Grimes  */
770381fe1aaSGarrett Wollman int
7715b81b6b3SRodney W. Grimes cpu_fork(p1, p2)
7725b81b6b3SRodney W. Grimes 	register struct proc *p1, *p2;
7735b81b6b3SRodney W. Grimes {
7745b81b6b3SRodney W. Grimes 	register struct user *up = p2->p_addr;
7755b81b6b3SRodney W. Grimes 	int foo, offset, addr, i;
7765b81b6b3SRodney W. Grimes 	extern char kstack[];
7775b81b6b3SRodney W. Grimes 	extern int mvesp();
7785b81b6b3SRodney W. Grimes 
7795b81b6b3SRodney W. Grimes 	/*
7805b81b6b3SRodney W. Grimes 	 * Copy pcb and stack from proc p1 to p2.
7815b81b6b3SRodney W. Grimes 	 * We do this as cheaply as possible, copying only the active
7825b81b6b3SRodney W. Grimes 	 * part of the stack.  The stack and pcb need to agree;
7835b81b6b3SRodney W. Grimes 	 * this is tricky, as the final pcb is constructed by savectx,
7845b81b6b3SRodney W. Grimes 	 * but its frame isn't yet on the stack when the stack is copied.
7855b81b6b3SRodney W. Grimes 	 * swtch compensates for this when the child eventually runs.
7865b81b6b3SRodney W. Grimes 	 * This should be done differently, with a single call
7875b81b6b3SRodney W. Grimes 	 * that copies and updates the pcb+stack,
7885b81b6b3SRodney W. Grimes 	 * replacing the bcopy and savectx.
7895b81b6b3SRodney W. Grimes 	 */
7905b81b6b3SRodney W. Grimes 	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
7915b81b6b3SRodney W. Grimes 	offset = mvesp() - (int)kstack;
7925b81b6b3SRodney W. Grimes 	bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
7935b81b6b3SRodney W. Grimes 	    (unsigned) ctob(UPAGES) - offset);
7945b81b6b3SRodney W. Grimes 	p2->p_regs = p1->p_regs;
7955b81b6b3SRodney W. Grimes 
7965b81b6b3SRodney W. Grimes 	/*
7975b81b6b3SRodney W. Grimes 	 * Wire top of address space of child to it's kstack.
7985b81b6b3SRodney W. Grimes 	 * First, fault in a page of pte's to map it.
7995b81b6b3SRodney W. Grimes 	 */
8007f8cb368SDavid Greenman #if 0
8015b81b6b3SRodney W. Grimes         addr = trunc_page((u_int)vtopte(kstack));
8025b81b6b3SRodney W. Grimes 	vm_map_pageable(&p2->p_vmspace->vm_map, addr, addr+NBPG, FALSE);
8035b81b6b3SRodney W. Grimes 	for (i=0; i < UPAGES; i++)
8047f8cb368SDavid Greenman 		pmap_enter(&p2->p_vmspace->vm_pmap, kstack+i*NBPG,
80526931201SDavid Greenman 			   pmap_extract(kernel_pmap, ((int)p2->p_addr)+i*NBPG),
80626931201SDavid Greenman 			   /*
80726931201SDavid Greenman 			    * The user area has to be mapped writable because
80826931201SDavid Greenman 			    * it contains the kernel stack (when CR0_WP is on
80926931201SDavid Greenman 			    * on a 486 there is no user-read/kernel-write
81026931201SDavid Greenman 			    * mode).  It is protected from user mode access
81126931201SDavid Greenman 			    * by the segment limits.
81226931201SDavid Greenman 			    */
81326931201SDavid Greenman 			   VM_PROT_READ|VM_PROT_WRITE, TRUE);
8147f8cb368SDavid Greenman #endif
8155b81b6b3SRodney W. Grimes 	pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb);
8165b81b6b3SRodney W. Grimes 
8175b81b6b3SRodney W. Grimes 	/*
8185b81b6b3SRodney W. Grimes 	 *
8195b81b6b3SRodney W. Grimes 	 * Arrange for a non-local goto when the new process
8205b81b6b3SRodney W. Grimes 	 * is started, to resume here, returning nonzero from setjmp.
8215b81b6b3SRodney W. Grimes 	 */
8225b81b6b3SRodney W. Grimes 	if (savectx(up, 1)) {
8235b81b6b3SRodney W. Grimes 		/*
8245b81b6b3SRodney W. Grimes 		 * Return 1 in child.
8255b81b6b3SRodney W. Grimes 		 */
8265b81b6b3SRodney W. Grimes 		return (1);
8275b81b6b3SRodney W. Grimes 	}
8285b81b6b3SRodney W. Grimes 	return (0);
8295b81b6b3SRodney W. Grimes }
8305b81b6b3SRodney W. Grimes 
8315b81b6b3SRodney W. Grimes #ifdef notyet
8325b81b6b3SRodney W. Grimes /*
8335b81b6b3SRodney W. Grimes  * cpu_exit is called as the last action during exit.
8345b81b6b3SRodney W. Grimes  *
8355b81b6b3SRodney W. Grimes  * We change to an inactive address space and a "safe" stack,
8365b81b6b3SRodney W. Grimes  * passing thru an argument to the new stack. Now, safely isolated
8375b81b6b3SRodney W. Grimes  * from the resources we're shedding, we release the address space
8385b81b6b3SRodney W. Grimes  * and any remaining machine-dependent resources, including the
8395b81b6b3SRodney W. Grimes  * memory for the user structure and kernel stack.
8405b81b6b3SRodney W. Grimes  *
8415b81b6b3SRodney W. Grimes  * Next, we assign a dummy context to be written over by swtch,
8425b81b6b3SRodney W. Grimes  * calling it to send this process off to oblivion.
8435b81b6b3SRodney W. Grimes  * [The nullpcb allows us to minimize cost in swtch() by not having
8445b81b6b3SRodney W. Grimes  * a special case].
8455b81b6b3SRodney W. Grimes  */
8465b81b6b3SRodney W. Grimes struct proc *swtch_to_inactive();
84775124a8bSPaul Richards volatile void
8485b81b6b3SRodney W. Grimes cpu_exit(p)
8495b81b6b3SRodney W. Grimes 	register struct proc *p;
8505b81b6b3SRodney W. Grimes {
8515b81b6b3SRodney W. Grimes 	static struct pcb nullpcb;	/* pcb to overwrite on last swtch */
8525b81b6b3SRodney W. Grimes 
853960173b9SRodney W. Grimes #if NNPX > 0
8545b81b6b3SRodney W. Grimes 	npxexit(p);
855960173b9SRodney W. Grimes #endif	/* NNPX */
8565b81b6b3SRodney W. Grimes 
8575b81b6b3SRodney W. Grimes 	/* move to inactive space and stack, passing arg accross */
8585b81b6b3SRodney W. Grimes 	p = swtch_to_inactive(p);
8595b81b6b3SRodney W. Grimes 
8605b81b6b3SRodney W. Grimes 	/* drop per-process resources */
8615b81b6b3SRodney W. Grimes 	vmspace_free(p->p_vmspace);
8625b81b6b3SRodney W. Grimes 	kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
8635b81b6b3SRodney W. Grimes 
8645b81b6b3SRodney W. Grimes 	p->p_addr = (struct user *) &nullpcb;
8655b81b6b3SRodney W. Grimes 	splclock();
8665b81b6b3SRodney W. Grimes 	swtch();
8675b81b6b3SRodney W. Grimes 	/* NOTREACHED */
8685b81b6b3SRodney W. Grimes }
8695b81b6b3SRodney W. Grimes #else
8707c2b54e8SNate Williams void
8715b81b6b3SRodney W. Grimes cpu_exit(p)
8725b81b6b3SRodney W. Grimes 	register struct proc *p;
8735b81b6b3SRodney W. Grimes {
8745b81b6b3SRodney W. Grimes 
875960173b9SRodney W. Grimes #if NNPX > 0
8765b81b6b3SRodney W. Grimes 	npxexit(p);
877960173b9SRodney W. Grimes #endif	/* NNPX */
8785b81b6b3SRodney W. Grimes 	splclock();
8797f8cb368SDavid Greenman 	curproc = 0;
8805b81b6b3SRodney W. Grimes 	swtch();
8817c2b54e8SNate Williams 	/*
8827c2b54e8SNate Williams 	 * This is to shutup the compiler, and if swtch() failed I suppose
8837c2b54e8SNate Williams 	 * this would be a good thing.  This keeps gcc happy because panic
8847c2b54e8SNate Williams 	 * is a volatile void function as well.
8857c2b54e8SNate Williams 	 */
8867c2b54e8SNate Williams 	panic("cpu_exit");
8875b81b6b3SRodney W. Grimes }
8885b81b6b3SRodney W. Grimes 
889381fe1aaSGarrett Wollman void
8907f8cb368SDavid Greenman cpu_wait(p) struct proc *p; {
8917f8cb368SDavid Greenman /*	extern vm_map_t upages_map; */
8927f8cb368SDavid Greenman 	extern char kstack[];
8935b81b6b3SRodney W. Grimes 
8945b81b6b3SRodney W. Grimes 	/* drop per-process resources */
8957f8cb368SDavid Greenman  	pmap_remove(vm_map_pmap(kernel_map), (vm_offset_t) p->p_addr,
8967f8cb368SDavid Greenman 		((vm_offset_t) p->p_addr) + ctob(UPAGES));
8975b81b6b3SRodney W. Grimes 	kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
8987f8cb368SDavid Greenman 	vmspace_free(p->p_vmspace);
8995b81b6b3SRodney W. Grimes }
9005b81b6b3SRodney W. Grimes #endif
9015b81b6b3SRodney W. Grimes 
9025b81b6b3SRodney W. Grimes /*
9035b81b6b3SRodney W. Grimes  * Set a red zone in the kernel stack after the u. area.
9045b81b6b3SRodney W. Grimes  */
905381fe1aaSGarrett Wollman void
9065b81b6b3SRodney W. Grimes setredzone(pte, vaddr)
9075b81b6b3SRodney W. Grimes 	u_short *pte;
9085b81b6b3SRodney W. Grimes 	caddr_t vaddr;
9095b81b6b3SRodney W. Grimes {
9105b81b6b3SRodney W. Grimes /* eventually do this by setting up an expand-down stack segment
9115b81b6b3SRodney W. Grimes    for ss0: selector, allowing stack access down to top of u.
9125b81b6b3SRodney W. Grimes    this means though that protection violations need to be handled
9135b81b6b3SRodney W. Grimes    thru a double fault exception that must do an integral task
9145b81b6b3SRodney W. Grimes    switch to a known good context, within which a dump can be
9155b81b6b3SRodney W. Grimes    taken. a sensible scheme might be to save the initial context
9165b81b6b3SRodney W. Grimes    used by sched (that has physical memory mapped 1:1 at bottom)
9175b81b6b3SRodney W. Grimes    and take the dump while still in mapped mode */
9185b81b6b3SRodney W. Grimes }
9195b81b6b3SRodney W. Grimes 
9205b81b6b3SRodney W. Grimes /*
9215b81b6b3SRodney W. Grimes  * Convert kernel VA to physical address
9225b81b6b3SRodney W. Grimes  */
923aaf08d94SGarrett Wollman u_long
9247f8cb368SDavid Greenman kvtop(void *addr)
9255b81b6b3SRodney W. Grimes {
9265b81b6b3SRodney W. Grimes 	vm_offset_t va;
9275b81b6b3SRodney W. Grimes 
928ed7fcbd0SDavid Greenman 	va = pmap_kextract((vm_offset_t)addr);
9295b81b6b3SRodney W. Grimes 	if (va == 0)
9305b81b6b3SRodney W. Grimes 		panic("kvtop: zero page frame");
9317f8cb368SDavid Greenman 	return((int)va);
9325b81b6b3SRodney W. Grimes }
9335b81b6b3SRodney W. Grimes 
9345b81b6b3SRodney W. Grimes extern vm_map_t phys_map;
9355b81b6b3SRodney W. Grimes 
9365b81b6b3SRodney W. Grimes /*
937ac322158SDavid Greenman  * Map an IO request into kernel virtual address space.
9385b81b6b3SRodney W. Grimes  *
939ac322158SDavid Greenman  * All requests are (re)mapped into kernel VA space.
940ac322158SDavid Greenman  * Notice that we use b_bufsize for the size of the buffer
941ac322158SDavid Greenman  * to be mapped.  b_bcount might be modified by the driver.
9425b81b6b3SRodney W. Grimes  */
943381fe1aaSGarrett Wollman void
9445b81b6b3SRodney W. Grimes vmapbuf(bp)
9455b81b6b3SRodney W. Grimes 	register struct buf *bp;
9465b81b6b3SRodney W. Grimes {
9475b81b6b3SRodney W. Grimes 	register int npf;
9485b81b6b3SRodney W. Grimes 	register caddr_t addr;
9495b81b6b3SRodney W. Grimes 	register long flags = bp->b_flags;
9505b81b6b3SRodney W. Grimes 	struct proc *p;
9515b81b6b3SRodney W. Grimes 	int off;
9525b81b6b3SRodney W. Grimes 	vm_offset_t kva;
9535b81b6b3SRodney W. Grimes 	register vm_offset_t pa;
9545b81b6b3SRodney W. Grimes 
9555b81b6b3SRodney W. Grimes 	if ((flags & B_PHYS) == 0)
9565b81b6b3SRodney W. Grimes 		panic("vmapbuf");
9575b81b6b3SRodney W. Grimes 	addr = bp->b_saveaddr = bp->b_un.b_addr;
9585b81b6b3SRodney W. Grimes 	off = (int)addr & PGOFSET;
9595b81b6b3SRodney W. Grimes 	p = bp->b_proc;
960ac322158SDavid Greenman 	npf = btoc(round_page(bp->b_bufsize + off));
9615b81b6b3SRodney W. Grimes 	kva = kmem_alloc_wait(phys_map, ctob(npf));
9625b81b6b3SRodney W. Grimes 	bp->b_un.b_addr = (caddr_t) (kva + off);
9635b81b6b3SRodney W. Grimes 	while (npf--) {
9645b81b6b3SRodney W. Grimes 		pa = pmap_extract(&p->p_vmspace->vm_pmap, (vm_offset_t)addr);
9655b81b6b3SRodney W. Grimes 		if (pa == 0)
9665b81b6b3SRodney W. Grimes 			panic("vmapbuf: null page frame");
9676b4ac811SDavid Greenman 		pmap_kenter(kva, trunc_page(pa));
9685b81b6b3SRodney W. Grimes 		addr += PAGE_SIZE;
9695b81b6b3SRodney W. Grimes 		kva += PAGE_SIZE;
9705b81b6b3SRodney W. Grimes 	}
9716b4ac811SDavid Greenman 	pmap_update();
9725b81b6b3SRodney W. Grimes }
9735b81b6b3SRodney W. Grimes 
9745b81b6b3SRodney W. Grimes /*
9755b81b6b3SRodney W. Grimes  * Free the io map PTEs associated with this IO operation.
9765b81b6b3SRodney W. Grimes  * We also invalidate the TLB entries and restore the original b_addr.
9775b81b6b3SRodney W. Grimes  */
978381fe1aaSGarrett Wollman void
9795b81b6b3SRodney W. Grimes vunmapbuf(bp)
9805b81b6b3SRodney W. Grimes 	register struct buf *bp;
9815b81b6b3SRodney W. Grimes {
9825b81b6b3SRodney W. Grimes 	register int npf;
9835b81b6b3SRodney W. Grimes 	register caddr_t addr = bp->b_un.b_addr;
9845b81b6b3SRodney W. Grimes 	vm_offset_t kva;
9855b81b6b3SRodney W. Grimes 
9865b81b6b3SRodney W. Grimes 	if ((bp->b_flags & B_PHYS) == 0)
9875b81b6b3SRodney W. Grimes 		panic("vunmapbuf");
988ac322158SDavid Greenman 	npf = btoc(round_page(bp->b_bufsize + ((int)addr & PGOFSET)));
9895b81b6b3SRodney W. Grimes 	kva = (vm_offset_t)((int)addr & ~PGOFSET);
9905b81b6b3SRodney W. Grimes 	kmem_free_wakeup(phys_map, kva, ctob(npf));
9915b81b6b3SRodney W. Grimes 	bp->b_un.b_addr = bp->b_saveaddr;
9925b81b6b3SRodney W. Grimes 	bp->b_saveaddr = NULL;
9935b81b6b3SRodney W. Grimes }
9945b81b6b3SRodney W. Grimes 
9955b81b6b3SRodney W. Grimes /*
9965b81b6b3SRodney W. Grimes  * Force reset the processor by invalidating the entire address space!
9975b81b6b3SRodney W. Grimes  */
9987f8cb368SDavid Greenman void
9995b81b6b3SRodney W. Grimes cpu_reset() {
10005b81b6b3SRodney W. Grimes 
10015b81b6b3SRodney W. Grimes 	/* force a shutdown by unmapping entire address space ! */
10025b81b6b3SRodney W. Grimes 	bzero((caddr_t) PTD, NBPG);
10035b81b6b3SRodney W. Grimes 
10045b81b6b3SRodney W. Grimes 	/* "good night, sweet prince .... <THUNK!>" */
10055b81b6b3SRodney W. Grimes 	tlbflush();
10065b81b6b3SRodney W. Grimes 	/* NOTREACHED */
10077f8cb368SDavid Greenman 	while(1);
10085b81b6b3SRodney W. Grimes }
1009b9d60b3fSDavid Greenman 
1010b9d60b3fSDavid Greenman /*
1011b9d60b3fSDavid Greenman  * Grow the user stack to allow for 'sp'. This version grows the stack in
101229360eb0SDavid Greenman  *	chunks of SGROWSIZ.
1013b9d60b3fSDavid Greenman  */
1014b9d60b3fSDavid Greenman int
1015b9d60b3fSDavid Greenman grow(p, sp)
1016b9d60b3fSDavid Greenman 	struct proc *p;
1017b9d60b3fSDavid Greenman 	int sp;
1018b9d60b3fSDavid Greenman {
1019b9d60b3fSDavid Greenman 	unsigned int nss;
1020b9d60b3fSDavid Greenman 	caddr_t v;
1021b9d60b3fSDavid Greenman 	struct vmspace *vm = p->p_vmspace;
1022b9d60b3fSDavid Greenman 
1023b9d60b3fSDavid Greenman 	if ((caddr_t)sp <= vm->vm_maxsaddr || (unsigned)sp >= (unsigned)USRSTACK)
1024b9d60b3fSDavid Greenman 	    return (1);
1025b9d60b3fSDavid Greenman 
1026b9d60b3fSDavid Greenman 	nss = roundup(USRSTACK - (unsigned)sp, PAGE_SIZE);
1027b9d60b3fSDavid Greenman 
1028b9d60b3fSDavid Greenman 	if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur)
1029b9d60b3fSDavid Greenman 		return (0);
1030b9d60b3fSDavid Greenman 
1031b9d60b3fSDavid Greenman 	if (vm->vm_ssize && roundup(vm->vm_ssize << PAGE_SHIFT,
103229360eb0SDavid Greenman 	    SGROWSIZ) < nss) {
1033b9d60b3fSDavid Greenman 		int grow_amount;
1034b9d60b3fSDavid Greenman 		/*
1035b9d60b3fSDavid Greenman 		 * If necessary, grow the VM that the stack occupies
1036b9d60b3fSDavid Greenman 		 * to allow for the rlimit. This allows us to not have
1037b9d60b3fSDavid Greenman 		 * to allocate all of the VM up-front in execve (which
1038b9d60b3fSDavid Greenman 		 * is expensive).
1039b9d60b3fSDavid Greenman 		 * Grow the VM by the amount requested rounded up to
104029360eb0SDavid Greenman 		 * the nearest SGROWSIZ to provide for some hysteresis.
1041b9d60b3fSDavid Greenman 		 */
104229360eb0SDavid Greenman 		grow_amount = roundup((nss - (vm->vm_ssize << PAGE_SHIFT)), SGROWSIZ);
1043b9d60b3fSDavid Greenman 		v = (char *)USRSTACK - roundup(vm->vm_ssize << PAGE_SHIFT,
104429360eb0SDavid Greenman 		    SGROWSIZ) - grow_amount;
1045b9d60b3fSDavid Greenman 		/*
104629360eb0SDavid Greenman 		 * If there isn't enough room to extend by SGROWSIZ, then
1047b9d60b3fSDavid Greenman 		 * just extend to the maximum size
1048b9d60b3fSDavid Greenman 		 */
1049b9d60b3fSDavid Greenman 		if (v < vm->vm_maxsaddr) {
1050b9d60b3fSDavid Greenman 			v = vm->vm_maxsaddr;
1051b9d60b3fSDavid Greenman 			grow_amount = MAXSSIZ - (vm->vm_ssize << PAGE_SHIFT);
1052b9d60b3fSDavid Greenman 		}
1053b9d60b3fSDavid Greenman 		if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v,
1054b9d60b3fSDavid Greenman 		    grow_amount, FALSE) != KERN_SUCCESS) {
1055b9d60b3fSDavid Greenman 			return (0);
1056b9d60b3fSDavid Greenman 		}
1057b9d60b3fSDavid Greenman 		vm->vm_ssize += grow_amount >> PAGE_SHIFT;
1058b9d60b3fSDavid Greenman 	}
1059b9d60b3fSDavid Greenman 
1060b9d60b3fSDavid Greenman 	return (1);
1061b9d60b3fSDavid Greenman }
1062