xref: /freebsd/sys/kern/vfs_bio.c (revision bcd92649c9952c9c9e8845dbd34276a60dd16664)
1 /*
2  * Copyright (c) 1994 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Absolutely no warranty of function or purpose is made by the author
15  *    John S. Dyson.
16  * 4. This work was done expressly for inclusion into FreeBSD.  Other use
17  *    is allowed if this notation is included.
18  * 5. Modifications may be freely made to this file if the above conditions
19  *    are met.
20  *
21  * $Id: vfs_bio.c,v 1.103 1996/10/06 07:50:05 dyson Exp $
22  */
23 
24 /*
25  * this file contains a new buffer I/O scheme implementing a coherent
26  * VM object and buffer cache scheme.  Pains have been taken to make
27  * sure that the performance degradation associated with schemes such
28  * as this is not realized.
29  *
30  * Author:  John S. Dyson
31  * Significant help during the development and debugging phases
32  * had been provided by David Greenman, also of the FreeBSD core team.
33  */
34 
35 #include "opt_bounce.h"
36 
37 #define VMIO
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kernel.h>
42 #include <sys/sysctl.h>
43 #include <sys/proc.h>
44 #include <sys/vnode.h>
45 #include <sys/vmmeter.h>
46 #include <vm/vm.h>
47 #include <vm/vm_param.h>
48 #include <vm/vm_prot.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_pageout.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_extern.h>
54 #include <sys/buf.h>
55 #include <sys/mount.h>
56 #include <sys/malloc.h>
57 #include <sys/resourcevar.h>
58 #include <sys/proc.h>
59 
60 #include <miscfs/specfs/specdev.h>
61 
62 static void vfs_update __P((void));
63 static struct	proc *updateproc;
64 static struct kproc_desc up_kp = {
65 	"update",
66 	vfs_update,
67 	&updateproc
68 };
69 SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
70 
71 struct buf *buf;		/* buffer header pool */
72 struct swqueue bswlist;
73 
74 int count_lock_queue __P((void));
75 static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
76 		vm_offset_t to);
77 static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
78 		vm_offset_t to);
79 static void vfs_clean_pages(struct buf * bp);
80 static void vfs_setdirty(struct buf *bp);
81 static void vfs_vmio_release(struct buf *bp);
82 
83 int needsbuffer;
84 
85 /*
86  * Internal update daemon, process 3
87  *	The variable vfs_update_wakeup allows for internal syncs.
88  */
89 int vfs_update_wakeup;
90 
91 
92 /*
93  * buffers base kva
94  */
95 caddr_t buffers_kva;
96 
97 /*
98  * bogus page -- for I/O to/from partially complete buffers
99  * this is a temporary solution to the problem, but it is not
100  * really that bad.  it would be better to split the buffer
101  * for input in the case of buffers partially already in memory,
102  * but the code is intricate enough already.
103  */
104 vm_page_t bogus_page;
105 static vm_offset_t bogus_offset;
106 
107 static int bufspace, maxbufspace, vmiospace, maxvmiobufspace,
108 	bufmallocspace, maxbufmallocspace;
109 
110 static struct bufhashhdr bufhashtbl[BUFHSZ], invalhash;
111 static struct bqueues bufqueues[BUFFER_QUEUES];
112 
113 extern int vm_swap_size;
114 
115 #define BUF_MAXUSE 16
116 
117 /*
118  * Initialize buffer headers and related structures.
119  */
120 void
121 bufinit()
122 {
123 	struct buf *bp;
124 	int i;
125 
126 	TAILQ_INIT(&bswlist);
127 	LIST_INIT(&invalhash);
128 
129 	/* first, make a null hash table */
130 	for (i = 0; i < BUFHSZ; i++)
131 		LIST_INIT(&bufhashtbl[i]);
132 
133 	/* next, make a null set of free lists */
134 	for (i = 0; i < BUFFER_QUEUES; i++)
135 		TAILQ_INIT(&bufqueues[i]);
136 
137 	buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf);
138 	/* finally, initialize each buffer header and stick on empty q */
139 	for (i = 0; i < nbuf; i++) {
140 		bp = &buf[i];
141 		bzero(bp, sizeof *bp);
142 		bp->b_flags = B_INVAL;	/* we're just an empty header */
143 		bp->b_dev = NODEV;
144 		bp->b_rcred = NOCRED;
145 		bp->b_wcred = NOCRED;
146 		bp->b_qindex = QUEUE_EMPTY;
147 		bp->b_vnbufs.le_next = NOLIST;
148 		bp->b_data = buffers_kva + i * MAXBSIZE;
149 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
150 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
151 	}
152 /*
153  * maxbufspace is currently calculated to support all filesystem blocks
154  * to be 8K.  If you happen to use a 16K filesystem, the size of the buffer
155  * cache is still the same as it would be for 8K filesystems.  This
156  * keeps the size of the buffer cache "in check" for big block filesystems.
157  */
158 	maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE;
159 /*
160  * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed
161  */
162 	maxvmiobufspace = 2 * maxbufspace / 3;
163 /*
164  * Limit the amount of malloc memory since it is wired permanently into
165  * the kernel space.  Even though this is accounted for in the buffer
166  * allocation, we don't want the malloced region to grow uncontrolled.
167  * The malloc scheme improves memory utilization significantly on average
168  * (small) directories.
169  */
170 	maxbufmallocspace = maxbufspace / 20;
171 
172 	bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
173 	bogus_page = vm_page_alloc(kernel_object,
174 			((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
175 			VM_ALLOC_NORMAL);
176 
177 }
178 
179 /*
180  * remove the buffer from the appropriate free list
181  */
182 void
183 bremfree(struct buf * bp)
184 {
185 	int s = splbio();
186 
187 	if (bp->b_qindex != QUEUE_NONE) {
188 		TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
189 		bp->b_qindex = QUEUE_NONE;
190 	} else {
191 		panic("bremfree: removing a buffer when not on a queue");
192 	}
193 	splx(s);
194 }
195 
196 /*
197  * Get a buffer with the specified data.  Look in the cache first.
198  */
199 int
200 bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
201     struct buf ** bpp)
202 {
203 	struct buf *bp;
204 
205 	bp = getblk(vp, blkno, size, 0, 0);
206 	*bpp = bp;
207 
208 	/* if not found in cache, do some I/O */
209 	if ((bp->b_flags & B_CACHE) == 0) {
210 		if (curproc != NULL)
211 			curproc->p_stats->p_ru.ru_inblock++;
212 		bp->b_flags |= B_READ;
213 		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
214 		if (bp->b_rcred == NOCRED) {
215 			if (cred != NOCRED)
216 				crhold(cred);
217 			bp->b_rcred = cred;
218 		}
219 		vfs_busy_pages(bp, 0);
220 		VOP_STRATEGY(bp);
221 		return (biowait(bp));
222 	}
223 	return (0);
224 }
225 
226 /*
227  * Operates like bread, but also starts asynchronous I/O on
228  * read-ahead blocks.
229  */
230 int
231 breadn(struct vnode * vp, daddr_t blkno, int size,
232     daddr_t * rablkno, int *rabsize,
233     int cnt, struct ucred * cred, struct buf ** bpp)
234 {
235 	struct buf *bp, *rabp;
236 	int i;
237 	int rv = 0, readwait = 0;
238 
239 	*bpp = bp = getblk(vp, blkno, size, 0, 0);
240 
241 	/* if not found in cache, do some I/O */
242 	if ((bp->b_flags & B_CACHE) == 0) {
243 		if (curproc != NULL)
244 			curproc->p_stats->p_ru.ru_inblock++;
245 		bp->b_flags |= B_READ;
246 		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
247 		if (bp->b_rcred == NOCRED) {
248 			if (cred != NOCRED)
249 				crhold(cred);
250 			bp->b_rcred = cred;
251 		}
252 		vfs_busy_pages(bp, 0);
253 		VOP_STRATEGY(bp);
254 		++readwait;
255 	}
256 	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
257 		if (inmem(vp, *rablkno))
258 			continue;
259 		rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
260 
261 		if ((rabp->b_flags & B_CACHE) == 0) {
262 			if (curproc != NULL)
263 				curproc->p_stats->p_ru.ru_inblock++;
264 			rabp->b_flags |= B_READ | B_ASYNC;
265 			rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
266 			if (rabp->b_rcred == NOCRED) {
267 				if (cred != NOCRED)
268 					crhold(cred);
269 				rabp->b_rcred = cred;
270 			}
271 			vfs_busy_pages(rabp, 0);
272 			VOP_STRATEGY(rabp);
273 		} else {
274 			brelse(rabp);
275 		}
276 	}
277 
278 	if (readwait) {
279 		rv = biowait(bp);
280 	}
281 	return (rv);
282 }
283 
284 /*
285  * Write, release buffer on completion.  (Done by iodone
286  * if async.)
287  */
288 int
289 bwrite(struct buf * bp)
290 {
291 	int oldflags = bp->b_flags;
292 
293 	if (bp->b_flags & B_INVAL) {
294 		brelse(bp);
295 		return (0);
296 	}
297 	if (!(bp->b_flags & B_BUSY))
298 		panic("bwrite: buffer is not busy???");
299 
300 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
301 	bp->b_flags |= B_WRITEINPROG;
302 
303 	if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) {
304 		reassignbuf(bp, bp->b_vp);
305 	}
306 
307 	bp->b_vp->v_numoutput++;
308 	vfs_busy_pages(bp, 1);
309 	if (curproc != NULL)
310 		curproc->p_stats->p_ru.ru_oublock++;
311 	VOP_STRATEGY(bp);
312 
313 	/*
314 	 * Handle ordered writes here.
315 	 * If the write was originally flagged as ordered,
316 	 * then we check to see if it was converted to async.
317 	 * If it was converted to async, and is done now, then
318 	 * we release the buffer.  Otherwise we clear the
319 	 * ordered flag because it is not needed anymore.
320 	 *
321  	 * Note that biodone has been modified so that it does
322 	 * not release ordered buffers.  This allows us to have
323 	 * a chance to determine whether or not the driver
324 	 * has set the async flag in the strategy routine.  Otherwise
325 	 * if biodone was not modified, then the buffer may have been
326 	 * reused before we have had a chance to check the flag.
327 	 */
328 
329 	if ((oldflags & B_ORDERED) == B_ORDERED) {
330 		int s;
331 		s = splbio();
332 		if (bp->b_flags & B_ASYNC)  {
333 			if ((bp->b_flags & B_DONE)) {
334 				if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
335 					brelse(bp);
336 				else
337 					bqrelse(bp);
338 			}
339 			splx(s);
340 			return (0);
341 		} else {
342 			bp->b_flags &= ~B_ORDERED;
343 		}
344 		splx(s);
345 	}
346 
347 	if ((oldflags & B_ASYNC) == 0) {
348 		int rtval = biowait(bp);
349 
350 		if (oldflags & B_DELWRI) {
351 			reassignbuf(bp, bp->b_vp);
352 		}
353 		brelse(bp);
354 		return (rtval);
355 	}
356 	return (0);
357 }
358 
359 int
360 vn_bwrite(ap)
361 	struct vop_bwrite_args *ap;
362 {
363 	return (bwrite(ap->a_bp));
364 }
365 
366 /*
367  * Delayed write. (Buffer is marked dirty).
368  */
369 void
370 bdwrite(struct buf * bp)
371 {
372 
373 	if ((bp->b_flags & B_BUSY) == 0) {
374 		panic("bdwrite: buffer is not busy");
375 	}
376 	if (bp->b_flags & B_INVAL) {
377 		brelse(bp);
378 		return;
379 	}
380 	if (bp->b_flags & B_TAPE) {
381 		bawrite(bp);
382 		return;
383 	}
384 	bp->b_flags &= ~(B_READ|B_RELBUF);
385 	if ((bp->b_flags & B_DELWRI) == 0) {
386 		bp->b_flags |= B_DONE | B_DELWRI;
387 		reassignbuf(bp, bp->b_vp);
388 	}
389 
390 	/*
391 	 * This bmap keeps the system from needing to do the bmap later,
392 	 * perhaps when the system is attempting to do a sync.  Since it
393 	 * is likely that the indirect block -- or whatever other datastructure
394 	 * that the filesystem needs is still in memory now, it is a good
395 	 * thing to do this.  Note also, that if the pageout daemon is
396 	 * requesting a sync -- there might not be enough memory to do
397 	 * the bmap then...  So, this is important to do.
398 	 */
399 	if( bp->b_lblkno == bp->b_blkno) {
400 		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
401 	}
402 
403 	/*
404 	 * Set the *dirty* buffer range based upon the VM system dirty pages.
405 	 */
406 	vfs_setdirty(bp);
407 
408 	/*
409 	 * We need to do this here to satisfy the vnode_pager and the
410 	 * pageout daemon, so that it thinks that the pages have been
411 	 * "cleaned".  Note that since the pages are in a delayed write
412 	 * buffer -- the VFS layer "will" see that the pages get written
413 	 * out on the next sync, or perhaps the cluster will be completed.
414 	 */
415 	vfs_clean_pages(bp);
416 	bqrelse(bp);
417 	return;
418 }
419 
420 /*
421  * Asynchronous write.
422  * Start output on a buffer, but do not wait for it to complete.
423  * The buffer is released when the output completes.
424  */
425 void
426 bawrite(struct buf * bp)
427 {
428 	bp->b_flags |= B_ASYNC;
429 	(void) VOP_BWRITE(bp);
430 }
431 
432 /*
433  * Ordered write.
434  * Start output on a buffer, but only wait for it to complete if the
435  * output device cannot guarantee ordering in some other way.  Devices
436  * that can perform asynchronous ordered writes will set the B_ASYNC
437  * flag in their strategy routine.
438  * The buffer is released when the output completes.
439  */
440 int
441 bowrite(struct buf * bp)
442 {
443 	bp->b_flags |= B_ORDERED;
444 	return (VOP_BWRITE(bp));
445 }
446 
447 /*
448  * Release a buffer.
449  */
450 void
451 brelse(struct buf * bp)
452 {
453 	int s;
454 
455 	if (bp->b_flags & B_CLUSTER) {
456 		relpbuf(bp);
457 		return;
458 	}
459 	/* anyone need a "free" block? */
460 	s = splbio();
461 
462 	/* anyone need this block? */
463 	if (bp->b_flags & B_WANTED) {
464 		bp->b_flags &= ~(B_WANTED | B_AGE);
465 		wakeup(bp);
466 	}
467 
468 	if (bp->b_flags & B_LOCKED)
469 		bp->b_flags &= ~B_ERROR;
470 
471 	if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) ||
472 	    (bp->b_bufsize <= 0)) {
473 		bp->b_flags |= B_INVAL;
474 		bp->b_flags &= ~(B_DELWRI | B_CACHE);
475 		if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) {
476 			if (bp->b_bufsize)
477 				allocbuf(bp, 0);
478 			brelvp(bp);
479 		}
480 	}
481 
482 	/*
483 	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
484 	 * constituted, so the B_INVAL flag is used to *invalidate* the buffer,
485 	 * but the VM object is kept around.  The B_NOCACHE flag is used to
486 	 * invalidate the pages in the VM object.
487 	 */
488 	if (bp->b_flags & B_VMIO) {
489 		vm_ooffset_t foff;
490 		vm_object_t obj;
491 		int i, resid;
492 		vm_page_t m;
493 		struct vnode *vp;
494 		int iototal = bp->b_bufsize;
495 
496 		vp = bp->b_vp;
497 		if (!vp)
498 			panic("brelse: missing vp");
499 
500 		if (bp->b_npages) {
501 			vm_pindex_t poff;
502 			obj = (vm_object_t) vp->v_object;
503 			if (vp->v_type == VBLK)
504 				foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT;
505 			else
506 				foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
507 			poff = OFF_TO_IDX(foff);
508 			for (i = 0; i < bp->b_npages; i++) {
509 				m = bp->b_pages[i];
510 				if (m == bogus_page) {
511 					m = vm_page_lookup(obj, poff + i);
512 					if (!m) {
513 						panic("brelse: page missing\n");
514 					}
515 					bp->b_pages[i] = m;
516 					pmap_qenter(trunc_page(bp->b_data),
517 						bp->b_pages, bp->b_npages);
518 				}
519 				resid = IDX_TO_OFF(m->pindex+1) - foff;
520 				if (resid > iototal)
521 					resid = iototal;
522 				if (resid > 0) {
523 					/*
524 					 * Don't invalidate the page if the local machine has already
525 					 * modified it.  This is the lesser of two evils, and should
526 					 * be fixed.
527 					 */
528 					if (bp->b_flags & (B_NOCACHE | B_ERROR)) {
529 						vm_page_test_dirty(m);
530 						if (m->dirty == 0) {
531 							vm_page_set_invalid(m, (vm_offset_t) foff, resid);
532 							if (m->valid == 0)
533 								vm_page_protect(m, VM_PROT_NONE);
534 						}
535 					}
536 					if (resid >= PAGE_SIZE) {
537 						if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
538 							bp->b_flags |= B_INVAL;
539 						}
540 					} else {
541 						if (!vm_page_is_valid(m,
542 							(((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) {
543 							bp->b_flags |= B_INVAL;
544 						}
545 					}
546 				}
547 				foff += resid;
548 				iototal -= resid;
549 			}
550 		}
551 		if (bp->b_flags & (B_INVAL | B_RELBUF))
552 			vfs_vmio_release(bp);
553 	}
554 	if (bp->b_qindex != QUEUE_NONE)
555 		panic("brelse: free buffer onto another queue???");
556 
557 	/* enqueue */
558 	/* buffers with no memory */
559 	if (bp->b_bufsize == 0) {
560 		bp->b_qindex = QUEUE_EMPTY;
561 		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
562 		LIST_REMOVE(bp, b_hash);
563 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
564 		bp->b_dev = NODEV;
565 		if (needsbuffer) {
566 			wakeup(&needsbuffer);
567 			needsbuffer=0;
568 		}
569 		/* buffers with junk contents */
570 	} else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
571 		bp->b_qindex = QUEUE_AGE;
572 		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
573 		LIST_REMOVE(bp, b_hash);
574 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
575 		bp->b_dev = NODEV;
576 		if (needsbuffer) {
577 			wakeup(&needsbuffer);
578 			needsbuffer=0;
579 		}
580 		/* buffers that are locked */
581 	} else if (bp->b_flags & B_LOCKED) {
582 		bp->b_qindex = QUEUE_LOCKED;
583 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
584 		/* buffers with stale but valid contents */
585 	} else if (bp->b_flags & B_AGE) {
586 		bp->b_qindex = QUEUE_AGE;
587 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist);
588 		if (needsbuffer) {
589 			wakeup(&needsbuffer);
590 			needsbuffer=0;
591 		}
592 		/* buffers with valid and quite potentially reuseable contents */
593 	} else {
594 		bp->b_qindex = QUEUE_LRU;
595 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
596 		if (needsbuffer) {
597 			wakeup(&needsbuffer);
598 			needsbuffer=0;
599 		}
600 	}
601 
602 	/* unlock */
603 	bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
604 				B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
605 	splx(s);
606 }
607 
608 /*
609  * Release a buffer.
610  */
611 void
612 bqrelse(struct buf * bp)
613 {
614 	int s;
615 
616 	s = splbio();
617 
618 
619 	/* anyone need this block? */
620 	if (bp->b_flags & B_WANTED) {
621 		bp->b_flags &= ~(B_WANTED | B_AGE);
622 		wakeup(bp);
623 	}
624 
625 	if (bp->b_qindex != QUEUE_NONE)
626 		panic("bqrelse: free buffer onto another queue???");
627 
628 	if (bp->b_flags & B_LOCKED) {
629 		bp->b_flags &= ~B_ERROR;
630 		bp->b_qindex = QUEUE_LOCKED;
631 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
632 		/* buffers with stale but valid contents */
633 	} else {
634 		bp->b_qindex = QUEUE_LRU;
635 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
636 		if (needsbuffer) {
637 			wakeup(&needsbuffer);
638 			needsbuffer=0;
639 		}
640 	}
641 
642 	/* unlock */
643 	bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
644 		B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
645 	splx(s);
646 }
647 
648 static void
649 vfs_vmio_release(bp)
650 	struct buf *bp;
651 {
652 	int i;
653 	vm_page_t m;
654 
655 	for (i = 0; i < bp->b_npages; i++) {
656 		m = bp->b_pages[i];
657 		bp->b_pages[i] = NULL;
658 		vm_page_unwire(m);
659 		/*
660 		 * We don't mess with busy pages, it is
661 		 * the responsibility of the process that
662 		 * busied the pages to deal with them.
663 		 */
664 		if ((m->flags & PG_BUSY) || (m->busy != 0))
665 			continue;
666 
667 		if (m->wire_count == 0) {
668 
669 			if (m->flags & PG_WANTED) {
670 				m->flags &= ~PG_WANTED;
671 				wakeup(m);
672 			}
673 
674 			/*
675 			 * If this is an async free -- we cannot place
676 			 * pages onto the cache queue, so our policy for
677 			 * such buffers is to avoid the cache queue, and
678 			 * only modify the active queue or free queue.
679 			 */
680 			if ((bp->b_flags & B_ASYNC) == 0) {
681 
682 			/*
683 			 * In the case of sync buffer frees, we can do pretty much
684 			 * anything to any of the memory queues.  Specifically,
685 			 * the cache queue is free to be modified.
686 			 */
687 				if (m->valid) {
688 					if(m->dirty == 0)
689 						vm_page_test_dirty(m);
690 					/*
691 					 * this keeps pressure off of the process memory
692 					 */
693 					if ((vm_swap_size == 0) ||
694 						(cnt.v_free_count < cnt.v_free_min)) {
695 						if ((m->dirty == 0) &&
696 							(m->hold_count == 0))
697 							vm_page_cache(m);
698 						else
699 							vm_page_deactivate(m);
700 					}
701 				} else if (m->hold_count == 0) {
702 					vm_page_protect(m, VM_PROT_NONE);
703 					vm_page_free(m);
704 				}
705 			} else {
706 				/*
707 				 * If async, then at least we clear the
708 				 * act_count.
709 				 */
710 				m->act_count = 0;
711 			}
712 		}
713 	}
714 	bufspace -= bp->b_bufsize;
715 	vmiospace -= bp->b_bufsize;
716 	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
717 	bp->b_npages = 0;
718 	bp->b_bufsize = 0;
719 	bp->b_flags &= ~B_VMIO;
720 	if (bp->b_vp)
721 		brelvp(bp);
722 }
723 
724 /*
725  * Check to see if a block is currently memory resident.
726  */
727 __inline struct buf *
728 gbincore(struct vnode * vp, daddr_t blkno)
729 {
730 	struct buf *bp;
731 	struct bufhashhdr *bh;
732 
733 	bh = BUFHASH(vp, blkno);
734 	bp = bh->lh_first;
735 
736 	/* Search hash chain */
737 	while (bp != NULL) {
738 		/* hit */
739 		if (bp->b_vp == vp && bp->b_lblkno == blkno &&
740 		    (bp->b_flags & B_INVAL) == 0) {
741 			break;
742 		}
743 		bp = bp->b_hash.le_next;
744 	}
745 	return (bp);
746 }
747 
748 /*
749  * this routine implements clustered async writes for
750  * clearing out B_DELWRI buffers...  This is much better
751  * than the old way of writing only one buffer at a time.
752  */
753 int
754 vfs_bio_awrite(struct buf * bp)
755 {
756 	int i;
757 	daddr_t lblkno = bp->b_lblkno;
758 	struct vnode *vp = bp->b_vp;
759 	int s;
760 	int ncl;
761 	struct buf *bpa;
762 	int nwritten;
763 
764 	s = splbio();
765 	/*
766 	 * right now we support clustered writing only to regular files
767 	 */
768 	if ((vp->v_type == VREG) &&
769 	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
770 	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
771 		int size;
772 		int maxcl;
773 
774 		size = vp->v_mount->mnt_stat.f_iosize;
775 		maxcl = MAXPHYS / size;
776 
777 		for (i = 1; i < maxcl; i++) {
778 			if ((bpa = gbincore(vp, lblkno + i)) &&
779 			    ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
780 			    (B_DELWRI | B_CLUSTEROK)) &&
781 			    (bpa->b_bufsize == size)) {
782 				if ((bpa->b_blkno == bpa->b_lblkno) ||
783 				    (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT)))
784 					break;
785 			} else {
786 				break;
787 			}
788 		}
789 		ncl = i;
790 		/*
791 		 * this is a possible cluster write
792 		 */
793 		if (ncl != 1) {
794 			nwritten = cluster_wbuild(vp, size, lblkno, ncl);
795 			splx(s);
796 			return nwritten;
797 		}
798 	}
799 	bremfree(bp);
800 	splx(s);
801 	/*
802 	 * default (old) behavior, writing out only one block
803 	 */
804 	bp->b_flags |= B_BUSY | B_ASYNC;
805 	nwritten = bp->b_bufsize;
806 	(void) VOP_BWRITE(bp);
807 	return nwritten;
808 }
809 
810 
811 /*
812  * Find a buffer header which is available for use.
813  */
814 static struct buf *
815 getnewbuf(int slpflag, int slptimeo, int doingvmio)
816 {
817 	struct buf *bp;
818 	int nbyteswritten = 0;
819 
820 start:
821 	if (bufspace >= maxbufspace)
822 		goto trytofreespace;
823 
824 	/* can we constitute a new buffer? */
825 	if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) {
826 		if (bp->b_qindex != QUEUE_EMPTY)
827 			panic("getnewbuf: inconsistent EMPTY queue, qindex=%d",
828 			    bp->b_qindex);
829 		bp->b_flags |= B_BUSY;
830 		bremfree(bp);
831 		goto fillbuf;
832 	}
833 trytofreespace:
834 	/*
835 	 * We keep the file I/O from hogging metadata I/O
836 	 * This is desirable because file data is cached in the
837 	 * VM/Buffer cache even if a buffer is freed.
838 	 */
839 	if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) {
840 		if (bp->b_qindex != QUEUE_AGE)
841 			panic("getnewbuf: inconsistent AGE queue, qindex=%d",
842 			    bp->b_qindex);
843 	} else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) {
844 		if (bp->b_qindex != QUEUE_LRU)
845 			panic("getnewbuf: inconsistent LRU queue, qindex=%d",
846 			    bp->b_qindex);
847 	}
848 	if (!bp) {
849 		/* wait for a free buffer of any kind */
850 		needsbuffer = 1;
851 		tsleep(&needsbuffer,
852 			(PRIBIO + 1) | slpflag, "newbuf", slptimeo);
853 		return (0);
854 	}
855 
856 #if defined(DIAGNOSTIC)
857 	if (bp->b_flags & B_BUSY) {
858 		panic("getnewbuf: busy buffer on free list\n");
859 	}
860 #endif
861 
862 	/*
863 	 * We are fairly aggressive about freeing VMIO buffers, but since
864 	 * the buffering is intact without buffer headers, there is not
865 	 * much loss.  We gain by maintaining non-VMIOed metadata in buffers.
866 	 */
867 	if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) {
868 		if ((bp->b_flags & B_VMIO) == 0 ||
869 			(vmiospace < maxvmiobufspace)) {
870 			--bp->b_usecount;
871 			TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
872 			if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) {
873 				TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
874 				goto start;
875 			}
876 			TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
877 		}
878 	}
879 
880 	/* if we are a delayed write, convert to an async write */
881 	if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) {
882 		nbyteswritten += vfs_bio_awrite(bp);
883 		if (!slpflag && !slptimeo) {
884 			return (0);
885 		}
886 		goto start;
887 	}
888 
889 	if (bp->b_flags & B_WANTED) {
890 		bp->b_flags &= ~B_WANTED;
891 		wakeup(bp);
892 	}
893 	bremfree(bp);
894 	bp->b_flags |= B_BUSY;
895 
896 	if (bp->b_flags & B_VMIO) {
897 		bp->b_flags &= ~B_ASYNC;
898 		vfs_vmio_release(bp);
899 	}
900 
901 	if (bp->b_vp)
902 		brelvp(bp);
903 
904 fillbuf:
905 	/* we are not free, nor do we contain interesting data */
906 	if (bp->b_rcred != NOCRED) {
907 		crfree(bp->b_rcred);
908 		bp->b_rcred = NOCRED;
909 	}
910 	if (bp->b_wcred != NOCRED) {
911 		crfree(bp->b_wcred);
912 		bp->b_wcred = NOCRED;
913 	}
914 
915 	LIST_REMOVE(bp, b_hash);
916 	LIST_INSERT_HEAD(&invalhash, bp, b_hash);
917 	if (bp->b_bufsize) {
918 		allocbuf(bp, 0);
919 	}
920 	bp->b_flags = B_BUSY;
921 	bp->b_dev = NODEV;
922 	bp->b_vp = NULL;
923 	bp->b_blkno = bp->b_lblkno = 0;
924 	bp->b_iodone = 0;
925 	bp->b_error = 0;
926 	bp->b_resid = 0;
927 	bp->b_bcount = 0;
928 	bp->b_npages = 0;
929 	bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE;
930 	bp->b_dirtyoff = bp->b_dirtyend = 0;
931 	bp->b_validoff = bp->b_validend = 0;
932 	bp->b_usecount = 4;
933 	if (bufspace >= maxbufspace + nbyteswritten) {
934 		bp->b_flags |= B_INVAL;
935 		brelse(bp);
936 		goto trytofreespace;
937 	}
938 	return (bp);
939 }
940 
941 /*
942  * Check to see if a block is currently memory resident.
943  */
944 struct buf *
945 incore(struct vnode * vp, daddr_t blkno)
946 {
947 	struct buf *bp;
948 
949 	int s = splbio();
950 	bp = gbincore(vp, blkno);
951 	splx(s);
952 	return (bp);
953 }
954 
955 /*
956  * Returns true if no I/O is needed to access the
957  * associated VM object.  This is like incore except
958  * it also hunts around in the VM system for the data.
959  */
960 
961 int
962 inmem(struct vnode * vp, daddr_t blkno)
963 {
964 	vm_object_t obj;
965 	vm_offset_t toff, tinc;
966 	vm_page_t m;
967 	vm_ooffset_t off;
968 
969 	if (incore(vp, blkno))
970 		return 1;
971 	if (vp->v_mount == NULL)
972 		return 0;
973 	if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0)
974 		return 0;
975 
976 	obj = vp->v_object;
977 	tinc = PAGE_SIZE;
978 	if (tinc > vp->v_mount->mnt_stat.f_iosize)
979 		tinc = vp->v_mount->mnt_stat.f_iosize;
980 	off = blkno * vp->v_mount->mnt_stat.f_iosize;
981 
982 	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
983 
984 		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
985 		if (!m)
986 			return 0;
987 		if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0)
988 			return 0;
989 	}
990 	return 1;
991 }
992 
993 /*
994  * now we set the dirty range for the buffer --
995  * for NFS -- if the file is mapped and pages have
996  * been written to, let it know.  We want the
997  * entire range of the buffer to be marked dirty if
998  * any of the pages have been written to for consistancy
999  * with the b_validoff, b_validend set in the nfs write
1000  * code, and used by the nfs read code.
1001  */
1002 static void
1003 vfs_setdirty(struct buf *bp) {
1004 	int i;
1005 	vm_object_t object;
1006 	vm_offset_t boffset, offset;
1007 	/*
1008 	 * We qualify the scan for modified pages on whether the
1009 	 * object has been flushed yet.  The OBJ_WRITEABLE flag
1010 	 * is not cleared simply by protecting pages off.
1011 	 */
1012 	if ((bp->b_flags & B_VMIO) &&
1013 		((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) {
1014 		/*
1015 		 * test the pages to see if they have been modified directly
1016 		 * by users through the VM system.
1017 		 */
1018 		for (i = 0; i < bp->b_npages; i++)
1019 			vm_page_test_dirty(bp->b_pages[i]);
1020 
1021 		/*
1022 		 * scan forwards for the first page modified
1023 		 */
1024 		for (i = 0; i < bp->b_npages; i++) {
1025 			if (bp->b_pages[i]->dirty) {
1026 				break;
1027 			}
1028 		}
1029 		boffset = (i << PAGE_SHIFT);
1030 		if (boffset < bp->b_dirtyoff) {
1031 			bp->b_dirtyoff = boffset;
1032 		}
1033 
1034 		/*
1035 		 * scan backwards for the last page modified
1036 		 */
1037 		for (i = bp->b_npages - 1; i >= 0; --i) {
1038 			if (bp->b_pages[i]->dirty) {
1039 				break;
1040 			}
1041 		}
1042 		boffset = (i + 1);
1043 		offset = boffset + bp->b_pages[0]->pindex;
1044 		if (offset >= object->size)
1045 			boffset = object->size - bp->b_pages[0]->pindex;
1046 		if (bp->b_dirtyend < (boffset << PAGE_SHIFT))
1047 			bp->b_dirtyend = (boffset << PAGE_SHIFT);
1048 	}
1049 }
1050 
1051 /*
1052  * Get a block given a specified block and offset into a file/device.
1053  */
1054 struct buf *
1055 getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1056 {
1057 	struct buf *bp;
1058 	int s;
1059 	struct bufhashhdr *bh;
1060 
1061 	s = splbio();
1062 loop:
1063 	if ((bp = gbincore(vp, blkno))) {
1064 		if (bp->b_flags & B_BUSY) {
1065 			bp->b_flags |= B_WANTED;
1066 			if (bp->b_usecount < BUF_MAXUSE)
1067 				++bp->b_usecount;
1068 			if (!tsleep(bp,
1069 				(PRIBIO + 1) | slpflag, "getblk", slptimeo))
1070 				goto loop;
1071 
1072 			splx(s);
1073 			return (struct buf *) NULL;
1074 		}
1075 		bp->b_flags |= B_BUSY | B_CACHE;
1076 		bremfree(bp);
1077 
1078 		/*
1079 		 * check for size inconsistancies (note that they shouldn't happen
1080 		 * but do when filesystems don't handle the size changes correctly.)
1081 		 * We are conservative on metadata and don't just extend the buffer
1082 		 * but write and re-constitute it.
1083 		 */
1084 
1085 		if (bp->b_bcount != size) {
1086 			if (bp->b_flags & B_VMIO) {
1087 				allocbuf(bp, size);
1088 			} else {
1089 				bp->b_flags |= B_NOCACHE;
1090 				VOP_BWRITE(bp);
1091 				goto loop;
1092 			}
1093 		}
1094 
1095 		if (bp->b_usecount < BUF_MAXUSE)
1096 			++bp->b_usecount;
1097 		splx(s);
1098 		return (bp);
1099 	} else {
1100 		vm_object_t obj;
1101 		int doingvmio;
1102 
1103 		if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) {
1104 			doingvmio = 1;
1105 		} else {
1106 			doingvmio = 0;
1107 		}
1108 		if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) {
1109 			if (slpflag || slptimeo) {
1110 				splx(s);
1111 				return NULL;
1112 			}
1113 			goto loop;
1114 		}
1115 
1116 		/*
1117 		 * This code is used to make sure that a buffer is not
1118 		 * created while the getnewbuf routine is blocked.
1119 		 * Normally the vnode is locked so this isn't a problem.
1120 		 * VBLK type I/O requests, however, don't lock the vnode.
1121 		 */
1122 		if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) {
1123 			bp->b_flags |= B_INVAL;
1124 			brelse(bp);
1125 			goto loop;
1126 		}
1127 
1128 		/*
1129 		 * Insert the buffer into the hash, so that it can
1130 		 * be found by incore.
1131 		 */
1132 		bp->b_blkno = bp->b_lblkno = blkno;
1133 		bgetvp(vp, bp);
1134 		LIST_REMOVE(bp, b_hash);
1135 		bh = BUFHASH(vp, blkno);
1136 		LIST_INSERT_HEAD(bh, bp, b_hash);
1137 
1138 		if (doingvmio) {
1139 			bp->b_flags |= (B_VMIO | B_CACHE);
1140 #if defined(VFS_BIO_DEBUG)
1141 			if (vp->v_type != VREG && vp->v_type != VBLK)
1142 				printf("getblk: vmioing file type %d???\n", vp->v_type);
1143 #endif
1144 		} else {
1145 			bp->b_flags &= ~B_VMIO;
1146 		}
1147 		splx(s);
1148 
1149 		allocbuf(bp, size);
1150 #ifdef	PC98
1151 		/*
1152 		 * 1024byte/sector support
1153 		 */
1154 #define B_XXX2 0x8000000
1155 		if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2;
1156 #endif
1157 		return (bp);
1158 	}
1159 }
1160 
1161 /*
1162  * Get an empty, disassociated buffer of given size.
1163  */
1164 struct buf *
1165 geteblk(int size)
1166 {
1167 	struct buf *bp;
1168 	int s;
1169 
1170 	s = splbio();
1171 	while ((bp = getnewbuf(0, 0, 0)) == 0);
1172 	splx(s);
1173 	allocbuf(bp, size);
1174 	bp->b_flags |= B_INVAL;
1175 	return (bp);
1176 }
1177 
1178 
1179 /*
1180  * This code constitutes the buffer memory from either anonymous system
1181  * memory (in the case of non-VMIO operations) or from an associated
1182  * VM object (in the case of VMIO operations).
1183  *
1184  * Note that this code is tricky, and has many complications to resolve
1185  * deadlock or inconsistant data situations.  Tread lightly!!!
1186  *
1187  * Modify the length of a buffer's underlying buffer storage without
1188  * destroying information (unless, of course the buffer is shrinking).
1189  */
1190 int
1191 allocbuf(struct buf * bp, int size)
1192 {
1193 
1194 	int s;
1195 	int newbsize, mbsize;
1196 	int i;
1197 
1198 	if (!(bp->b_flags & B_BUSY))
1199 		panic("allocbuf: buffer not busy");
1200 
1201 	if ((bp->b_flags & B_VMIO) == 0) {
1202 		caddr_t origbuf;
1203 		int origbufsize;
1204 		/*
1205 		 * Just get anonymous memory from the kernel
1206 		 */
1207 		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1208 #if !defined(NO_B_MALLOC)
1209 		if (bp->b_flags & B_MALLOC)
1210 			newbsize = mbsize;
1211 		else
1212 #endif
1213 			newbsize = round_page(size);
1214 
1215 		if (newbsize < bp->b_bufsize) {
1216 #if !defined(NO_B_MALLOC)
1217 			/*
1218 			 * malloced buffers are not shrunk
1219 			 */
1220 			if (bp->b_flags & B_MALLOC) {
1221 				if (newbsize) {
1222 					bp->b_bcount = size;
1223 				} else {
1224 					free(bp->b_data, M_BIOBUF);
1225 					bufspace -= bp->b_bufsize;
1226 					bufmallocspace -= bp->b_bufsize;
1227 					bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE;
1228 					bp->b_bufsize = 0;
1229 					bp->b_bcount = 0;
1230 					bp->b_flags &= ~B_MALLOC;
1231 				}
1232 				return 1;
1233 			}
1234 #endif
1235 			vm_hold_free_pages(
1236 			    bp,
1237 			    (vm_offset_t) bp->b_data + newbsize,
1238 			    (vm_offset_t) bp->b_data + bp->b_bufsize);
1239 		} else if (newbsize > bp->b_bufsize) {
1240 #if !defined(NO_B_MALLOC)
1241 			/*
1242 			 * We only use malloced memory on the first allocation.
1243 			 * and revert to page-allocated memory when the buffer grows.
1244 			 */
1245 			if ( (bufmallocspace < maxbufmallocspace) &&
1246 				(bp->b_bufsize == 0) &&
1247 				(mbsize <= PAGE_SIZE/2)) {
1248 
1249 				bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
1250 				bp->b_bufsize = mbsize;
1251 				bp->b_bcount = size;
1252 				bp->b_flags |= B_MALLOC;
1253 				bufspace += mbsize;
1254 				bufmallocspace += mbsize;
1255 				return 1;
1256 			}
1257 #endif
1258 			origbuf = NULL;
1259 			origbufsize = 0;
1260 #if !defined(NO_B_MALLOC)
1261 			/*
1262 			 * If the buffer is growing on it's other-than-first allocation,
1263 			 * then we revert to the page-allocation scheme.
1264 			 */
1265 			if (bp->b_flags & B_MALLOC) {
1266 				origbuf = bp->b_data;
1267 				origbufsize = bp->b_bufsize;
1268 				bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE;
1269 				bufspace -= bp->b_bufsize;
1270 				bufmallocspace -= bp->b_bufsize;
1271 				bp->b_bufsize = 0;
1272 				bp->b_flags &= ~B_MALLOC;
1273 				newbsize = round_page(newbsize);
1274 			}
1275 #endif
1276 			vm_hold_load_pages(
1277 			    bp,
1278 			    (vm_offset_t) bp->b_data + bp->b_bufsize,
1279 			    (vm_offset_t) bp->b_data + newbsize);
1280 #if !defined(NO_B_MALLOC)
1281 			if (origbuf) {
1282 				bcopy(origbuf, bp->b_data, origbufsize);
1283 				free(origbuf, M_BIOBUF);
1284 			}
1285 #endif
1286 		}
1287 	} else {
1288 		vm_page_t m;
1289 		int desiredpages;
1290 
1291 		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1292 		desiredpages = (round_page(newbsize) >> PAGE_SHIFT);
1293 
1294 #if !defined(NO_B_MALLOC)
1295 		if (bp->b_flags & B_MALLOC)
1296 			panic("allocbuf: VMIO buffer can't be malloced");
1297 #endif
1298 
1299 		if (newbsize < bp->b_bufsize) {
1300 			if (desiredpages < bp->b_npages) {
1301 				for (i = desiredpages; i < bp->b_npages; i++) {
1302 					/*
1303 					 * the page is not freed here -- it
1304 					 * is the responsibility of vnode_pager_setsize
1305 					 */
1306 					m = bp->b_pages[i];
1307 #if defined(DIAGNOSTIC)
1308 					if (m == bogus_page)
1309 						panic("allocbuf: bogus page found");
1310 #endif
1311 					s = splvm();
1312 					while ((m->flags & PG_BUSY) || (m->busy != 0)) {
1313 						m->flags |= PG_WANTED;
1314 						tsleep(m, PVM, "biodep", 0);
1315 					}
1316 					splx(s);
1317 
1318 					bp->b_pages[i] = NULL;
1319 					vm_page_unwire(m);
1320 				}
1321 				pmap_qremove((vm_offset_t) trunc_page(bp->b_data) +
1322 				    (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
1323 				bp->b_npages = desiredpages;
1324 			}
1325 		} else if (newbsize > bp->b_bufsize) {
1326 			vm_object_t obj;
1327 			vm_offset_t tinc, toff;
1328 			vm_ooffset_t off;
1329 			vm_pindex_t objoff;
1330 			int pageindex, curbpnpages;
1331 			struct vnode *vp;
1332 			int bsize;
1333 
1334 			vp = bp->b_vp;
1335 
1336 			if (vp->v_type == VBLK)
1337 				bsize = DEV_BSIZE;
1338 			else
1339 				bsize = vp->v_mount->mnt_stat.f_iosize;
1340 
1341 			if (bp->b_npages < desiredpages) {
1342 				obj = vp->v_object;
1343 				tinc = PAGE_SIZE;
1344 				if (tinc > bsize)
1345 					tinc = bsize;
1346 				off = (vm_ooffset_t) bp->b_lblkno * bsize;
1347 				curbpnpages = bp->b_npages;
1348 		doretry:
1349 				bp->b_flags |= B_CACHE;
1350 				for (toff = 0; toff < newbsize; toff += tinc) {
1351 					int bytesinpage;
1352 
1353 					pageindex = toff >> PAGE_SHIFT;
1354 					objoff = OFF_TO_IDX(off + toff);
1355 					if (pageindex < curbpnpages) {
1356 
1357 						m = bp->b_pages[pageindex];
1358 #ifdef VFS_BIO_DIAG
1359 						if (m->pindex != objoff)
1360 							panic("allocbuf: page changed offset??!!!?");
1361 #endif
1362 						bytesinpage = tinc;
1363 						if (tinc > (newbsize - toff))
1364 							bytesinpage = newbsize - toff;
1365 						if ((bp->b_flags & B_CACHE) &&
1366 							!vm_page_is_valid(m,
1367 							(vm_offset_t) ((toff + off) & PAGE_MASK),
1368 							bytesinpage)) {
1369 							bp->b_flags &= ~B_CACHE;
1370 						}
1371 						continue;
1372 					}
1373 					m = vm_page_lookup(obj, objoff);
1374 					if (!m) {
1375 						m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL);
1376 						if (!m) {
1377 							VM_WAIT;
1378 							goto doretry;
1379 						}
1380 						/*
1381 						 * Normally it is unwise to clear PG_BUSY without
1382 						 * PAGE_WAKEUP -- but it is okay here, as there is
1383 						 * no chance for blocking between here and vm_page_alloc
1384 						 */
1385 						m->flags &= ~PG_BUSY;
1386 						vm_page_wire(m);
1387 						bp->b_flags &= ~B_CACHE;
1388 					} else if (m->flags & PG_BUSY) {
1389 						s = splvm();
1390 						if (m->flags & PG_BUSY) {
1391 							m->flags |= PG_WANTED;
1392 							tsleep(m, PVM, "pgtblk", 0);
1393 						}
1394 						splx(s);
1395 						goto doretry;
1396 					} else {
1397 						if ((curproc != pageproc) &&
1398 							((m->queue - m->pc) == PQ_CACHE) &&
1399 						    ((cnt.v_free_count + cnt.v_cache_count) <
1400 								(cnt.v_free_min + cnt.v_cache_min))) {
1401 							pagedaemon_wakeup();
1402 						}
1403 						bytesinpage = tinc;
1404 						if (tinc > (newbsize - toff))
1405 							bytesinpage = newbsize - toff;
1406 						if ((bp->b_flags & B_CACHE) &&
1407 							!vm_page_is_valid(m,
1408 							(vm_offset_t) ((toff + off) & PAGE_MASK),
1409 							bytesinpage)) {
1410 							bp->b_flags &= ~B_CACHE;
1411 						}
1412 						vm_page_wire(m);
1413 					}
1414 					bp->b_pages[pageindex] = m;
1415 					curbpnpages = pageindex + 1;
1416 				}
1417 				bp->b_data = (caddr_t) trunc_page(bp->b_data);
1418 				bp->b_npages = curbpnpages;
1419 				pmap_qenter((vm_offset_t) bp->b_data,
1420 					bp->b_pages, bp->b_npages);
1421 				((vm_offset_t) bp->b_data) |= off & PAGE_MASK;
1422 			}
1423 		}
1424 	}
1425 	if (bp->b_flags & B_VMIO)
1426 		vmiospace += bp->b_bufsize;
1427 	bufspace += (newbsize - bp->b_bufsize);
1428 	bp->b_bufsize = newbsize;
1429 	bp->b_bcount = size;
1430 	return 1;
1431 }
1432 
1433 /*
1434  * Wait for buffer I/O completion, returning error status.
1435  */
1436 int
1437 biowait(register struct buf * bp)
1438 {
1439 	int s;
1440 
1441 	s = splbio();
1442 	while ((bp->b_flags & B_DONE) == 0)
1443 		tsleep(bp, PRIBIO, "biowait", 0);
1444 	splx(s);
1445 	if (bp->b_flags & B_EINTR) {
1446 		bp->b_flags &= ~B_EINTR;
1447 		return (EINTR);
1448 	}
1449 	if (bp->b_flags & B_ERROR) {
1450 		return (bp->b_error ? bp->b_error : EIO);
1451 	} else {
1452 		return (0);
1453 	}
1454 }
1455 
1456 /*
1457  * Finish I/O on a buffer, calling an optional function.
1458  * This is usually called from interrupt level, so process blocking
1459  * is not *a good idea*.
1460  */
1461 void
1462 biodone(register struct buf * bp)
1463 {
1464 	int s;
1465 
1466 	s = splbio();
1467 	if (!(bp->b_flags & B_BUSY))
1468 		panic("biodone: buffer not busy");
1469 
1470 	if (bp->b_flags & B_DONE) {
1471 		splx(s);
1472 		printf("biodone: buffer already done\n");
1473 		return;
1474 	}
1475 	bp->b_flags |= B_DONE;
1476 
1477 	if ((bp->b_flags & B_READ) == 0) {
1478 		vwakeup(bp);
1479 	}
1480 #ifdef BOUNCE_BUFFERS
1481 	if (bp->b_flags & B_BOUNCE)
1482 		vm_bounce_free(bp);
1483 #endif
1484 
1485 	/* call optional completion function if requested */
1486 	if (bp->b_flags & B_CALL) {
1487 		bp->b_flags &= ~B_CALL;
1488 		(*bp->b_iodone) (bp);
1489 		splx(s);
1490 		return;
1491 	}
1492 	if (bp->b_flags & B_VMIO) {
1493 		int i, resid;
1494 		vm_ooffset_t foff;
1495 		vm_page_t m;
1496 		vm_object_t obj;
1497 		int iosize;
1498 		struct vnode *vp = bp->b_vp;
1499 
1500 		if (vp->v_type == VBLK)
1501 			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1502 		else
1503 			foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1504 		obj = vp->v_object;
1505 		if (!obj) {
1506 			panic("biodone: no object");
1507 		}
1508 #if defined(VFS_BIO_DEBUG)
1509 		if (obj->paging_in_progress < bp->b_npages) {
1510 			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
1511 			    obj->paging_in_progress, bp->b_npages);
1512 		}
1513 #endif
1514 		iosize = bp->b_bufsize;
1515 		for (i = 0; i < bp->b_npages; i++) {
1516 			int bogusflag = 0;
1517 			m = bp->b_pages[i];
1518 			if (m == bogus_page) {
1519 				bogusflag = 1;
1520 				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
1521 				if (!m) {
1522 #if defined(VFS_BIO_DEBUG)
1523 					printf("biodone: page disappeared\n");
1524 #endif
1525 					--obj->paging_in_progress;
1526 					continue;
1527 				}
1528 				bp->b_pages[i] = m;
1529 				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1530 			}
1531 #if defined(VFS_BIO_DEBUG)
1532 			if (OFF_TO_IDX(foff) != m->pindex) {
1533 				printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex);
1534 			}
1535 #endif
1536 			resid = IDX_TO_OFF(m->pindex + 1) - foff;
1537 			if (resid > iosize)
1538 				resid = iosize;
1539 			/*
1540 			 * In the write case, the valid and clean bits are
1541 			 * already changed correctly, so we only need to do this
1542 			 * here in the read case.
1543 			 */
1544 			if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
1545 				vm_page_set_validclean(m,
1546 					(vm_offset_t) (foff & PAGE_MASK), resid);
1547 			}
1548 
1549 			/*
1550 			 * when debugging new filesystems or buffer I/O methods, this
1551 			 * is the most common error that pops up.  if you see this, you
1552 			 * have not set the page busy flag correctly!!!
1553 			 */
1554 			if (m->busy == 0) {
1555 				printf("biodone: page busy < 0, "
1556 				    "pindex: %d, foff: 0x(%x,%x), "
1557 				    "resid: %d, index: %d\n",
1558 				    (int) m->pindex, (int)(foff >> 32),
1559 						(int) foff & 0xffffffff, resid, i);
1560 				if (vp->v_type != VBLK)
1561 					printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n",
1562 					    bp->b_vp->v_mount->mnt_stat.f_iosize,
1563 					    (int) bp->b_lblkno,
1564 					    bp->b_flags, bp->b_npages);
1565 				else
1566 					printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n",
1567 					    (int) bp->b_lblkno,
1568 					    bp->b_flags, bp->b_npages);
1569 				printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
1570 				    m->valid, m->dirty, m->wire_count);
1571 				panic("biodone: page busy < 0\n");
1572 			}
1573 			--m->busy;
1574 			if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1575 				m->flags &= ~PG_WANTED;
1576 				wakeup(m);
1577 			}
1578 			--obj->paging_in_progress;
1579 			foff += resid;
1580 			iosize -= resid;
1581 		}
1582 		if (obj && obj->paging_in_progress == 0 &&
1583 		    (obj->flags & OBJ_PIPWNT)) {
1584 			obj->flags &= ~OBJ_PIPWNT;
1585 			wakeup(obj);
1586 		}
1587 	}
1588 	/*
1589 	 * For asynchronous completions, release the buffer now. The brelse
1590 	 * checks for B_WANTED and will do the wakeup there if necessary - so
1591 	 * no need to do a wakeup here in the async case.
1592 	 */
1593 
1594 	if (bp->b_flags & B_ASYNC) {
1595 		if ((bp->b_flags & B_ORDERED) == 0) {
1596 			if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
1597 				brelse(bp);
1598 			else
1599 				bqrelse(bp);
1600 		}
1601 	} else {
1602 		bp->b_flags &= ~B_WANTED;
1603 		wakeup(bp);
1604 	}
1605 	splx(s);
1606 }
1607 
1608 int
1609 count_lock_queue()
1610 {
1611 	int count;
1612 	struct buf *bp;
1613 
1614 	count = 0;
1615 	for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]);
1616 	    bp != NULL;
1617 	    bp = TAILQ_NEXT(bp, b_freelist))
1618 		count++;
1619 	return (count);
1620 }
1621 
1622 int vfs_update_interval = 30;
1623 
1624 static void
1625 vfs_update()
1626 {
1627 	(void) spl0();		/* XXX redundant?  wrong place? */
1628 	while (1) {
1629 		tsleep(&vfs_update_wakeup, PUSER, "update",
1630 		    hz * vfs_update_interval);
1631 		vfs_update_wakeup = 0;
1632 		sync(curproc, NULL, NULL);
1633 	}
1634 }
1635 
1636 static int
1637 sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS
1638 {
1639 	int error = sysctl_handle_int(oidp,
1640 		oidp->oid_arg1, oidp->oid_arg2, req);
1641 	if (!error)
1642 		wakeup(&vfs_update_wakeup);
1643 	return error;
1644 }
1645 
1646 SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW,
1647 	&vfs_update_interval, 0, sysctl_kern_updateinterval, "I", "");
1648 
1649 
1650 /*
1651  * This routine is called in lieu of iodone in the case of
1652  * incomplete I/O.  This keeps the busy status for pages
1653  * consistant.
1654  */
1655 void
1656 vfs_unbusy_pages(struct buf * bp)
1657 {
1658 	int i;
1659 
1660 	if (bp->b_flags & B_VMIO) {
1661 		struct vnode *vp = bp->b_vp;
1662 		vm_object_t obj = vp->v_object;
1663 		vm_ooffset_t foff;
1664 
1665 		foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1666 
1667 		for (i = 0; i < bp->b_npages; i++) {
1668 			vm_page_t m = bp->b_pages[i];
1669 
1670 			if (m == bogus_page) {
1671 				m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i);
1672 				if (!m) {
1673 					panic("vfs_unbusy_pages: page missing\n");
1674 				}
1675 				bp->b_pages[i] = m;
1676 				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1677 			}
1678 			--obj->paging_in_progress;
1679 			--m->busy;
1680 			if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1681 				m->flags &= ~PG_WANTED;
1682 				wakeup(m);
1683 			}
1684 		}
1685 		if (obj->paging_in_progress == 0 &&
1686 		    (obj->flags & OBJ_PIPWNT)) {
1687 			obj->flags &= ~OBJ_PIPWNT;
1688 			wakeup(obj);
1689 		}
1690 	}
1691 }
1692 
1693 /*
1694  * This routine is called before a device strategy routine.
1695  * It is used to tell the VM system that paging I/O is in
1696  * progress, and treat the pages associated with the buffer
1697  * almost as being PG_BUSY.  Also the object paging_in_progress
1698  * flag is handled to make sure that the object doesn't become
1699  * inconsistant.
1700  */
1701 void
1702 vfs_busy_pages(struct buf * bp, int clear_modify)
1703 {
1704 	int i;
1705 
1706 	if (bp->b_flags & B_VMIO) {
1707 		vm_object_t obj = bp->b_vp->v_object;
1708 		vm_ooffset_t foff;
1709 		int iocount = bp->b_bufsize;
1710 
1711 		if (bp->b_vp->v_type == VBLK)
1712 			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1713 		else
1714 			foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1715 		vfs_setdirty(bp);
1716 		for (i = 0; i < bp->b_npages; i++) {
1717 			vm_page_t m = bp->b_pages[i];
1718 			int resid = IDX_TO_OFF(m->pindex + 1) - foff;
1719 
1720 			if (resid > iocount)
1721 				resid = iocount;
1722 			if ((bp->b_flags & B_CLUSTER) == 0) {
1723 				obj->paging_in_progress++;
1724 				m->busy++;
1725 			}
1726 			vm_page_protect(m, VM_PROT_NONE);
1727 			if (clear_modify) {
1728 				vm_page_set_validclean(m,
1729 					(vm_offset_t) (foff & PAGE_MASK), resid);
1730 			} else if (bp->b_bcount >= PAGE_SIZE) {
1731 				if (m->valid && (bp->b_flags & B_CACHE) == 0) {
1732 					bp->b_pages[i] = bogus_page;
1733 					pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1734 				}
1735 			}
1736 			foff += resid;
1737 			iocount -= resid;
1738 		}
1739 	}
1740 }
1741 
1742 /*
1743  * Tell the VM system that the pages associated with this buffer
1744  * are clean.  This is used for delayed writes where the data is
1745  * going to go to disk eventually without additional VM intevention.
1746  */
1747 void
1748 vfs_clean_pages(struct buf * bp)
1749 {
1750 	int i;
1751 
1752 	if (bp->b_flags & B_VMIO) {
1753 		vm_ooffset_t foff;
1754 		int iocount = bp->b_bufsize;
1755 
1756 		if (bp->b_vp->v_type == VBLK)
1757 			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1758 		else
1759 			foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1760 
1761 		for (i = 0; i < bp->b_npages; i++) {
1762 			vm_page_t m = bp->b_pages[i];
1763 			int resid = IDX_TO_OFF(m->pindex + 1) - foff;
1764 
1765 			if (resid > iocount)
1766 				resid = iocount;
1767 			if (resid > 0) {
1768 				vm_page_set_validclean(m,
1769 					((vm_offset_t) foff & PAGE_MASK), resid);
1770 			}
1771 			foff += resid;
1772 			iocount -= resid;
1773 		}
1774 	}
1775 }
1776 
1777 void
1778 vfs_bio_clrbuf(struct buf *bp) {
1779 	int i;
1780 	if( bp->b_flags & B_VMIO) {
1781 		if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) {
1782 			int mask;
1783 			mask = 0;
1784 			for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE)
1785 				mask |= (1 << (i/DEV_BSIZE));
1786 			if( bp->b_pages[0]->valid != mask) {
1787 				bzero(bp->b_data, bp->b_bufsize);
1788 			}
1789 			bp->b_pages[0]->valid = mask;
1790 			bp->b_resid = 0;
1791 			return;
1792 		}
1793 		for(i=0;i<bp->b_npages;i++) {
1794 			if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL)
1795 				continue;
1796 			if( bp->b_pages[i]->valid == 0) {
1797 				if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
1798 					bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE);
1799 				}
1800 			} else {
1801 				int j;
1802 				for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) {
1803 					if( (bp->b_pages[i]->valid & (1<<j)) == 0)
1804 						bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE);
1805 				}
1806 			}
1807 			/* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */
1808 		}
1809 		bp->b_resid = 0;
1810 	} else {
1811 		clrbuf(bp);
1812 	}
1813 }
1814 
1815 /*
1816  * vm_hold_load_pages and vm_hold_unload pages get pages into
1817  * a buffers address space.  The pages are anonymous and are
1818  * not associated with a file object.
1819  */
1820 void
1821 vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
1822 {
1823 	vm_offset_t pg;
1824 	vm_page_t p;
1825 	int index;
1826 
1827 	to = round_page(to);
1828 	from = round_page(from);
1829 	index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT;
1830 
1831 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
1832 
1833 tryagain:
1834 
1835 		p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
1836 		    VM_ALLOC_NORMAL);
1837 		if (!p) {
1838 			VM_WAIT;
1839 			goto tryagain;
1840 		}
1841 		vm_page_wire(p);
1842 		pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
1843 		bp->b_pages[index] = p;
1844 		PAGE_WAKEUP(p);
1845 	}
1846 	bp->b_npages = to >> PAGE_SHIFT;
1847 }
1848 
1849 void
1850 vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
1851 {
1852 	vm_offset_t pg;
1853 	vm_page_t p;
1854 	int index;
1855 
1856 	from = round_page(from);
1857 	to = round_page(to);
1858 	index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT;
1859 
1860 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
1861 		p = bp->b_pages[index];
1862 		if (p && (index < bp->b_npages)) {
1863 			if (p->busy) {
1864 				printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n",
1865 					bp->b_blkno, bp->b_lblkno);
1866 			}
1867 			bp->b_pages[index] = NULL;
1868 			pmap_kremove(pg);
1869 			vm_page_unwire(p);
1870 			vm_page_free(p);
1871 		}
1872 	}
1873 	bp->b_npages = from >> PAGE_SHIFT;
1874 }
1875