xref: /freebsd/sys/kern/vfs_bio.c (revision ef5d438ed4bc17ad7ece3e40fe4d1f9baf3aadf7)
1 /*
2  * Copyright (c) 1994 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Absolutely no warranty of function or purpose is made by the author
15  *    John S. Dyson.
16  * 4. This work was done expressly for inclusion into FreeBSD.  Other use
17  *    is allowed if this notation is included.
18  * 5. Modifications may be freely made to this file if the above conditions
19  *    are met.
20  *
21  * $Id: vfs_bio.c,v 1.83 1996/01/06 23:58:03 davidg Exp $
22  */
23 
24 /*
25  * this file contains a new buffer I/O scheme implementing a coherent
26  * VM object and buffer cache scheme.  Pains have been taken to make
27  * sure that the performance degradation associated with schemes such
28  * as this is not realized.
29  *
30  * Author:  John S. Dyson
31  * Significant help during the development and debugging phases
32  * had been provided by David Greenman, also of the FreeBSD core team.
33  */
34 
35 #include "opt_bounce.h"
36 
37 #define VMIO
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kernel.h>
42 #include <sys/sysctl.h>
43 #include <sys/proc.h>
44 #include <sys/vnode.h>
45 #include <sys/vmmeter.h>
46 #include <vm/vm.h>
47 #include <vm/vm_param.h>
48 #include <vm/vm_prot.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_pageout.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_extern.h>
54 #include <sys/buf.h>
55 #include <sys/mount.h>
56 #include <sys/malloc.h>
57 #include <sys/resourcevar.h>
58 #include <sys/proc.h>
59 
60 #include <miscfs/specfs/specdev.h>
61 
62 static void vfs_update __P((void));
63 static struct	proc *updateproc;
64 static struct kproc_desc up_kp = {
65 	"update",
66 	vfs_update,
67 	&updateproc
68 };
69 SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
70 
71 struct buf *buf;		/* buffer header pool */
72 struct swqueue bswlist;
73 
74 int count_lock_queue __P((void));
75 static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
76 		vm_offset_t to);
77 static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
78 		vm_offset_t to);
79 static void vfs_clean_pages(struct buf * bp);
80 static void vfs_setdirty(struct buf *bp);
81 static void vfs_vmio_release(struct buf *bp);
82 
83 int needsbuffer;
84 
85 /*
86  * Internal update daemon, process 3
87  *	The variable vfs_update_wakeup allows for internal syncs.
88  */
89 int vfs_update_wakeup;
90 
91 
92 /*
93  * buffers base kva
94  */
95 caddr_t buffers_kva;
96 
97 /*
98  * bogus page -- for I/O to/from partially complete buffers
99  * this is a temporary solution to the problem, but it is not
100  * really that bad.  it would be better to split the buffer
101  * for input in the case of buffers partially already in memory,
102  * but the code is intricate enough already.
103  */
104 vm_page_t bogus_page;
105 static vm_offset_t bogus_offset;
106 
107 static int bufspace, maxbufspace;
108 
109 static struct bufhashhdr bufhashtbl[BUFHSZ], invalhash;
110 static struct bqueues bufqueues[BUFFER_QUEUES];
111 
112 extern int vm_swap_size;
113 
114 #define BUF_MAXUSE 8
115 
116 /*
117  * Initialize buffer headers and related structures.
118  */
119 void
120 bufinit()
121 {
122 	struct buf *bp;
123 	int i;
124 
125 	TAILQ_INIT(&bswlist);
126 	LIST_INIT(&invalhash);
127 
128 	/* first, make a null hash table */
129 	for (i = 0; i < BUFHSZ; i++)
130 		LIST_INIT(&bufhashtbl[i]);
131 
132 	/* next, make a null set of free lists */
133 	for (i = 0; i < BUFFER_QUEUES; i++)
134 		TAILQ_INIT(&bufqueues[i]);
135 
136 	buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf);
137 	/* finally, initialize each buffer header and stick on empty q */
138 	for (i = 0; i < nbuf; i++) {
139 		bp = &buf[i];
140 		bzero(bp, sizeof *bp);
141 		bp->b_flags = B_INVAL;	/* we're just an empty header */
142 		bp->b_dev = NODEV;
143 		bp->b_rcred = NOCRED;
144 		bp->b_wcred = NOCRED;
145 		bp->b_qindex = QUEUE_EMPTY;
146 		bp->b_vnbufs.le_next = NOLIST;
147 		bp->b_data = buffers_kva + i * MAXBSIZE;
148 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
149 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
150 	}
151 /*
152  * maxbufspace is currently calculated to support all filesystem blocks
153  * to be 8K.  If you happen to use a 16K filesystem, the size of the buffer
154  * cache is still the same as it would be for 8K filesystems.  This
155  * keeps the size of the buffer cache "in check" for big block filesystems.
156  */
157 	maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE;
158 
159 	bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
160 	bogus_page = vm_page_alloc(kernel_object,
161 			((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
162 			VM_ALLOC_NORMAL);
163 
164 }
165 
166 /*
167  * remove the buffer from the appropriate free list
168  */
169 void
170 bremfree(struct buf * bp)
171 {
172 	int s = splbio();
173 
174 	if (bp->b_qindex != QUEUE_NONE) {
175 		TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
176 		bp->b_qindex = QUEUE_NONE;
177 	} else {
178 		panic("bremfree: removing a buffer when not on a queue");
179 	}
180 	splx(s);
181 }
182 
183 /*
184  * Get a buffer with the specified data.  Look in the cache first.
185  */
186 int
187 bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
188     struct buf ** bpp)
189 {
190 	struct buf *bp;
191 
192 	bp = getblk(vp, blkno, size, 0, 0);
193 	*bpp = bp;
194 
195 	/* if not found in cache, do some I/O */
196 	if ((bp->b_flags & B_CACHE) == 0) {
197 		if (curproc != NULL)
198 			curproc->p_stats->p_ru.ru_inblock++;
199 		bp->b_flags |= B_READ;
200 		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
201 		if (bp->b_rcred == NOCRED) {
202 			if (cred != NOCRED)
203 				crhold(cred);
204 			bp->b_rcred = cred;
205 		}
206 		vfs_busy_pages(bp, 0);
207 		VOP_STRATEGY(bp);
208 		return (biowait(bp));
209 	}
210 	return (0);
211 }
212 
213 /*
214  * Operates like bread, but also starts asynchronous I/O on
215  * read-ahead blocks.
216  */
217 int
218 breadn(struct vnode * vp, daddr_t blkno, int size,
219     daddr_t * rablkno, int *rabsize,
220     int cnt, struct ucred * cred, struct buf ** bpp)
221 {
222 	struct buf *bp, *rabp;
223 	int i;
224 	int rv = 0, readwait = 0;
225 
226 	*bpp = bp = getblk(vp, blkno, size, 0, 0);
227 
228 	/* if not found in cache, do some I/O */
229 	if ((bp->b_flags & B_CACHE) == 0) {
230 		if (curproc != NULL)
231 			curproc->p_stats->p_ru.ru_inblock++;
232 		bp->b_flags |= B_READ;
233 		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
234 		if (bp->b_rcred == NOCRED) {
235 			if (cred != NOCRED)
236 				crhold(cred);
237 			bp->b_rcred = cred;
238 		}
239 		vfs_busy_pages(bp, 0);
240 		VOP_STRATEGY(bp);
241 		++readwait;
242 	}
243 	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
244 		if (inmem(vp, *rablkno))
245 			continue;
246 		rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
247 
248 		if ((rabp->b_flags & B_CACHE) == 0) {
249 			if (curproc != NULL)
250 				curproc->p_stats->p_ru.ru_inblock++;
251 			rabp->b_flags |= B_READ | B_ASYNC;
252 			rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
253 			if (rabp->b_rcred == NOCRED) {
254 				if (cred != NOCRED)
255 					crhold(cred);
256 				rabp->b_rcred = cred;
257 			}
258 			vfs_busy_pages(rabp, 0);
259 			VOP_STRATEGY(rabp);
260 		} else {
261 			brelse(rabp);
262 		}
263 	}
264 
265 	if (readwait) {
266 		rv = biowait(bp);
267 	}
268 	return (rv);
269 }
270 
271 /*
272  * Write, release buffer on completion.  (Done by iodone
273  * if async.)
274  */
275 int
276 bwrite(struct buf * bp)
277 {
278 	int oldflags = bp->b_flags;
279 
280 	if (bp->b_flags & B_INVAL) {
281 		brelse(bp);
282 		return (0);
283 	}
284 	if (!(bp->b_flags & B_BUSY))
285 		panic("bwrite: buffer is not busy???");
286 
287 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
288 	bp->b_flags |= B_WRITEINPROG;
289 
290 	if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) {
291 		reassignbuf(bp, bp->b_vp);
292 	}
293 
294 	bp->b_vp->v_numoutput++;
295 	vfs_busy_pages(bp, 1);
296 	if (curproc != NULL)
297 		curproc->p_stats->p_ru.ru_oublock++;
298 	VOP_STRATEGY(bp);
299 
300 	if ((oldflags & B_ASYNC) == 0) {
301 		int rtval = biowait(bp);
302 
303 		if (oldflags & B_DELWRI) {
304 			reassignbuf(bp, bp->b_vp);
305 		}
306 		brelse(bp);
307 		return (rtval);
308 	}
309 	return (0);
310 }
311 
312 int
313 vn_bwrite(ap)
314 	struct vop_bwrite_args *ap;
315 {
316 	return (bwrite(ap->a_bp));
317 }
318 
319 /*
320  * Delayed write. (Buffer is marked dirty).
321  */
322 void
323 bdwrite(struct buf * bp)
324 {
325 
326 	if ((bp->b_flags & B_BUSY) == 0) {
327 		panic("bdwrite: buffer is not busy");
328 	}
329 	if (bp->b_flags & B_INVAL) {
330 		brelse(bp);
331 		return;
332 	}
333 	if (bp->b_flags & B_TAPE) {
334 		bawrite(bp);
335 		return;
336 	}
337 	bp->b_flags &= ~(B_READ|B_RELBUF);
338 	if ((bp->b_flags & B_DELWRI) == 0) {
339 		bp->b_flags |= B_DONE | B_DELWRI;
340 		reassignbuf(bp, bp->b_vp);
341 	}
342 
343 	/*
344 	 * This bmap keeps the system from needing to do the bmap later,
345 	 * perhaps when the system is attempting to do a sync.  Since it
346 	 * is likely that the indirect block -- or whatever other datastructure
347 	 * that the filesystem needs is still in memory now, it is a good
348 	 * thing to do this.  Note also, that if the pageout daemon is
349 	 * requesting a sync -- there might not be enough memory to do
350 	 * the bmap then...  So, this is important to do.
351 	 */
352 	if( bp->b_lblkno == bp->b_blkno) {
353 		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
354 	}
355 
356 	/*
357 	 * Set the *dirty* buffer range based upon the VM system dirty pages.
358 	 */
359 	vfs_setdirty(bp);
360 
361 	/*
362 	 * We need to do this here to satisfy the vnode_pager and the
363 	 * pageout daemon, so that it thinks that the pages have been
364 	 * "cleaned".  Note that since the pages are in a delayed write
365 	 * buffer -- the VFS layer "will" see that the pages get written
366 	 * out on the next sync, or perhaps the cluster will be completed.
367 	 */
368 	vfs_clean_pages(bp);
369 	bqrelse(bp);
370 	return;
371 }
372 
373 /*
374  * Asynchronous write.
375  * Start output on a buffer, but do not wait for it to complete.
376  * The buffer is released when the output completes.
377  */
378 void
379 bawrite(struct buf * bp)
380 {
381 	bp->b_flags |= B_ASYNC;
382 	(void) VOP_BWRITE(bp);
383 }
384 
385 /*
386  * Release a buffer.
387  */
388 void
389 brelse(struct buf * bp)
390 {
391 	int s;
392 
393 	if (bp->b_flags & B_CLUSTER) {
394 		relpbuf(bp);
395 		return;
396 	}
397 	/* anyone need a "free" block? */
398 	s = splbio();
399 
400 	if (needsbuffer) {
401 		needsbuffer = 0;
402 		wakeup(&needsbuffer);
403 	}
404 
405 	/* anyone need this block? */
406 	if (bp->b_flags & B_WANTED) {
407 		bp->b_flags &= ~(B_WANTED | B_AGE);
408 		wakeup(bp);
409 	}
410 
411 	if (bp->b_flags & B_LOCKED)
412 		bp->b_flags &= ~B_ERROR;
413 
414 	if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) ||
415 	    (bp->b_bufsize <= 0)) {
416 		bp->b_flags |= B_INVAL;
417 		bp->b_flags &= ~(B_DELWRI | B_CACHE);
418 		if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) {
419 			if (bp->b_bufsize)
420 				allocbuf(bp, 0);
421 			brelvp(bp);
422 		}
423 	}
424 
425 	/*
426 	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
427 	 * constituted, so the B_INVAL flag is used to *invalidate* the buffer,
428 	 * but the VM object is kept around.  The B_NOCACHE flag is used to
429 	 * invalidate the pages in the VM object.
430 	 */
431 	if (bp->b_flags & B_VMIO) {
432 		vm_ooffset_t foff;
433 		vm_object_t obj;
434 		int i, resid;
435 		vm_page_t m;
436 		struct vnode *vp;
437 		int iototal = bp->b_bufsize;
438 
439 		vp = bp->b_vp;
440 		if (!vp)
441 			panic("brelse: missing vp");
442 
443 		if (bp->b_npages) {
444 			vm_pindex_t poff;
445 			obj = (vm_object_t) vp->v_object;
446 			if (vp->v_type == VBLK)
447 				foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT;
448 			else
449 				foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
450 			poff = OFF_TO_IDX(foff);
451 			for (i = 0; i < bp->b_npages; i++) {
452 				m = bp->b_pages[i];
453 				if (m == bogus_page) {
454 					m = vm_page_lookup(obj, poff + i);
455 					if (!m) {
456 						panic("brelse: page missing\n");
457 					}
458 					bp->b_pages[i] = m;
459 					pmap_qenter(trunc_page(bp->b_data),
460 						bp->b_pages, bp->b_npages);
461 				}
462 				resid = IDX_TO_OFF(m->pindex+1) - foff;
463 				if (resid > iototal)
464 					resid = iototal;
465 				if (resid > 0) {
466 					/*
467 					 * Don't invalidate the page if the local machine has already
468 					 * modified it.  This is the lesser of two evils, and should
469 					 * be fixed.
470 					 */
471 					if (bp->b_flags & (B_NOCACHE | B_ERROR)) {
472 						vm_page_test_dirty(m);
473 						if (m->dirty == 0) {
474 							vm_page_set_invalid(m, (vm_offset_t) foff, resid);
475 							if (m->valid == 0)
476 								vm_page_protect(m, VM_PROT_NONE);
477 						}
478 					}
479 					if (resid >= PAGE_SIZE) {
480 						if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
481 							bp->b_flags |= B_INVAL;
482 						}
483 					} else {
484 						if (!vm_page_is_valid(m,
485 							(((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) {
486 							bp->b_flags |= B_INVAL;
487 						}
488 					}
489 				}
490 				foff += resid;
491 				iototal -= resid;
492 			}
493 		}
494 		if (bp->b_flags & (B_INVAL | B_RELBUF))
495 			vfs_vmio_release(bp);
496 	}
497 	if (bp->b_qindex != QUEUE_NONE)
498 		panic("brelse: free buffer onto another queue???");
499 
500 	/* enqueue */
501 	/* buffers with no memory */
502 	if (bp->b_bufsize == 0) {
503 		bp->b_qindex = QUEUE_EMPTY;
504 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
505 		LIST_REMOVE(bp, b_hash);
506 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
507 		bp->b_dev = NODEV;
508 		/* buffers with junk contents */
509 	} else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
510 		bp->b_qindex = QUEUE_AGE;
511 		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
512 		LIST_REMOVE(bp, b_hash);
513 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
514 		bp->b_dev = NODEV;
515 		/* buffers that are locked */
516 	} else if (bp->b_flags & B_LOCKED) {
517 		bp->b_qindex = QUEUE_LOCKED;
518 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
519 		/* buffers with stale but valid contents */
520 	} else if (bp->b_flags & B_AGE) {
521 		bp->b_qindex = QUEUE_AGE;
522 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist);
523 		/* buffers with valid and quite potentially reuseable contents */
524 	} else {
525 		bp->b_qindex = QUEUE_LRU;
526 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
527 	}
528 
529 	/* unlock */
530 	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
531 	splx(s);
532 }
533 
534 /*
535  * Release a buffer.
536  */
537 void
538 bqrelse(struct buf * bp)
539 {
540 	int s;
541 
542 	s = splbio();
543 
544 	if (needsbuffer) {
545 		needsbuffer = 0;
546 		wakeup(&needsbuffer);
547 	}
548 
549 	/* anyone need this block? */
550 	if (bp->b_flags & B_WANTED) {
551 		bp->b_flags &= ~(B_WANTED | B_AGE);
552 		wakeup(bp);
553 	}
554 
555 	if (bp->b_qindex != QUEUE_NONE)
556 		panic("bqrelse: free buffer onto another queue???");
557 
558 	if (bp->b_flags & B_LOCKED) {
559 		bp->b_flags &= ~B_ERROR;
560 		bp->b_qindex = QUEUE_LOCKED;
561 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
562 		/* buffers with stale but valid contents */
563 	} else {
564 		bp->b_qindex = QUEUE_LRU;
565 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
566 	}
567 
568 	/* unlock */
569 	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
570 	splx(s);
571 }
572 
573 static void
574 vfs_vmio_release(bp)
575 	struct buf *bp;
576 {
577 	int i;
578 	vm_page_t m;
579 
580 	for (i = 0; i < bp->b_npages; i++) {
581 		m = bp->b_pages[i];
582 		bp->b_pages[i] = NULL;
583 		if (m->flags & PG_WANTED) {
584 			m->flags &= ~PG_WANTED;
585 			wakeup(m);
586 		}
587 		vm_page_unwire(m);
588 		if (m->wire_count == 0) {
589 			if (m->valid) {
590 				/*
591 				 * this keeps pressure off of the process memory
592 				 */
593 				if ((vm_swap_size == 0) ||
594 					(cnt.v_free_count < cnt.v_free_min))
595 					vm_page_cache(m);
596 			} else if ((m->hold_count == 0) &&
597 				((m->flags & PG_BUSY) == 0) &&
598 				(m->busy == 0)) {
599 				vm_page_protect(m, VM_PROT_NONE);
600 				vm_page_free(m);
601 			}
602 		}
603 	}
604 	bufspace -= bp->b_bufsize;
605 	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
606 	bp->b_npages = 0;
607 	bp->b_bufsize = 0;
608 	bp->b_flags &= ~B_VMIO;
609 	if (bp->b_vp)
610 		brelvp(bp);
611 }
612 
613 /*
614  * Check to see if a block is currently memory resident.
615  */
616 __inline struct buf *
617 gbincore(struct vnode * vp, daddr_t blkno)
618 {
619 	struct buf *bp;
620 	struct bufhashhdr *bh;
621 
622 	bh = BUFHASH(vp, blkno);
623 	bp = bh->lh_first;
624 
625 	/* Search hash chain */
626 	while (bp != NULL) {
627 		/* hit */
628 		if (bp->b_vp == vp && bp->b_lblkno == blkno &&
629 		    (bp->b_flags & B_INVAL) == 0) {
630 			break;
631 		}
632 		bp = bp->b_hash.le_next;
633 	}
634 	return (bp);
635 }
636 
637 /*
638  * this routine implements clustered async writes for
639  * clearing out B_DELWRI buffers...  This is much better
640  * than the old way of writing only one buffer at a time.
641  */
642 int
643 vfs_bio_awrite(struct buf * bp)
644 {
645 	int i;
646 	daddr_t lblkno = bp->b_lblkno;
647 	struct vnode *vp = bp->b_vp;
648 	int s;
649 	int ncl;
650 	struct buf *bpa;
651 	int nwritten;
652 
653 	s = splbio();
654 	/*
655 	 * right now we support clustered writing only to regular files
656 	 */
657 	if ((vp->v_type == VREG) &&
658 	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
659 	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
660 		int size;
661 		int maxcl;
662 
663 		size = vp->v_mount->mnt_stat.f_iosize;
664 		maxcl = MAXPHYS / size;
665 
666 		for (i = 1; i < maxcl; i++) {
667 			if ((bpa = gbincore(vp, lblkno + i)) &&
668 			    ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
669 			    (B_DELWRI | B_CLUSTEROK)) &&
670 			    (bpa->b_bufsize == size)) {
671 				if ((bpa->b_blkno == bpa->b_lblkno) ||
672 				    (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT)))
673 					break;
674 			} else {
675 				break;
676 			}
677 		}
678 		ncl = i;
679 		/*
680 		 * this is a possible cluster write
681 		 */
682 		if (ncl != 1) {
683 			nwritten = cluster_wbuild(vp, size, lblkno, ncl);
684 			splx(s);
685 			return nwritten;
686 		}
687 	}
688 	bremfree(bp);
689 	splx(s);
690 	/*
691 	 * default (old) behavior, writing out only one block
692 	 */
693 	bp->b_flags |= B_BUSY | B_ASYNC;
694 	nwritten = bp->b_bufsize;
695 	(void) VOP_BWRITE(bp);
696 	return nwritten;
697 }
698 
699 
700 /*
701  * Find a buffer header which is available for use.
702  */
703 static struct buf *
704 getnewbuf(int slpflag, int slptimeo, int doingvmio)
705 {
706 	struct buf *bp;
707 	int s;
708 	int nbyteswritten = 0;
709 
710 	s = splbio();
711 start:
712 	if (bufspace >= maxbufspace)
713 		goto trytofreespace;
714 
715 	/* can we constitute a new buffer? */
716 	if ((bp = bufqueues[QUEUE_EMPTY].tqh_first)) {
717 		if (bp->b_qindex != QUEUE_EMPTY)
718 			panic("getnewbuf: inconsistent EMPTY queue, qindex=%d",
719 			    bp->b_qindex);
720 		bp->b_flags |= B_BUSY;
721 		bremfree(bp);
722 		goto fillbuf;
723 	}
724 trytofreespace:
725 	/*
726 	 * We keep the file I/O from hogging metadata I/O
727 	 * This is desirable because file data is cached in the
728 	 * VM/Buffer cache even if a buffer is freed.
729 	 */
730 	if ((bp = bufqueues[QUEUE_AGE].tqh_first)) {
731 		if (bp->b_qindex != QUEUE_AGE)
732 			panic("getnewbuf: inconsistent AGE queue, qindex=%d",
733 			    bp->b_qindex);
734 	} else if ((bp = bufqueues[QUEUE_LRU].tqh_first)) {
735 		if (bp->b_qindex != QUEUE_LRU)
736 			panic("getnewbuf: inconsistent LRU queue, qindex=%d",
737 			    bp->b_qindex);
738 	}
739 	if (!bp) {
740 		/* wait for a free buffer of any kind */
741 		needsbuffer = 1;
742 		tsleep(&needsbuffer,
743 			(PRIBIO + 1) | slpflag, "newbuf", slptimeo);
744 		splx(s);
745 		return (0);
746 	}
747 
748 	if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) {
749 		--bp->b_usecount;
750 		TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
751 		if (bufqueues[QUEUE_LRU].tqh_first != NULL) {
752 			TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
753 			goto start;
754 		}
755 	}
756 
757 	/* if we are a delayed write, convert to an async write */
758 	if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) {
759 		nbyteswritten += vfs_bio_awrite(bp);
760 		if (!slpflag && !slptimeo) {
761 			splx(s);
762 			return (0);
763 		}
764 		goto start;
765 	}
766 
767 	if (bp->b_flags & B_WANTED) {
768 		bp->b_flags &= ~B_WANTED;
769 		wakeup(bp);
770 	}
771 	bremfree(bp);
772 	bp->b_flags |= B_BUSY;
773 
774 	if (bp->b_flags & B_VMIO)
775 		vfs_vmio_release(bp);
776 
777 	if (bp->b_vp)
778 		brelvp(bp);
779 
780 fillbuf:
781 	/* we are not free, nor do we contain interesting data */
782 	if (bp->b_rcred != NOCRED) {
783 		crfree(bp->b_rcred);
784 		bp->b_rcred = NOCRED;
785 	}
786 	if (bp->b_wcred != NOCRED) {
787 		crfree(bp->b_wcred);
788 		bp->b_wcred = NOCRED;
789 	}
790 
791 	LIST_REMOVE(bp, b_hash);
792 	LIST_INSERT_HEAD(&invalhash, bp, b_hash);
793 	splx(s);
794 	if (bp->b_bufsize) {
795 		allocbuf(bp, 0);
796 	}
797 	bp->b_flags = B_BUSY;
798 	bp->b_dev = NODEV;
799 	bp->b_vp = NULL;
800 	bp->b_blkno = bp->b_lblkno = 0;
801 	bp->b_iodone = 0;
802 	bp->b_error = 0;
803 	bp->b_resid = 0;
804 	bp->b_bcount = 0;
805 	bp->b_npages = 0;
806 	bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE;
807 	bp->b_dirtyoff = bp->b_dirtyend = 0;
808 	bp->b_validoff = bp->b_validend = 0;
809 	bp->b_usecount = 2;
810 	if (bufspace >= maxbufspace + nbyteswritten) {
811 		s = splbio();
812 		bp->b_flags |= B_INVAL;
813 		brelse(bp);
814 		goto trytofreespace;
815 	}
816 	return (bp);
817 }
818 
819 /*
820  * Check to see if a block is currently memory resident.
821  */
822 struct buf *
823 incore(struct vnode * vp, daddr_t blkno)
824 {
825 	struct buf *bp;
826 	struct bufhashhdr *bh;
827 
828 	int s = splbio();
829 	bp = gbincore(vp, blkno);
830 	splx(s);
831 	return (bp);
832 }
833 
834 /*
835  * Returns true if no I/O is needed to access the
836  * associated VM object.  This is like incore except
837  * it also hunts around in the VM system for the data.
838  */
839 
840 int
841 inmem(struct vnode * vp, daddr_t blkno)
842 {
843 	vm_object_t obj;
844 	vm_offset_t toff, tinc;
845 	vm_page_t m;
846 	vm_ooffset_t off;
847 
848 	if (incore(vp, blkno))
849 		return 1;
850 	if (vp->v_mount == NULL)
851 		return 0;
852 	if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0)
853 		return 0;
854 
855 	obj = vp->v_object;
856 	tinc = PAGE_SIZE;
857 	if (tinc > vp->v_mount->mnt_stat.f_iosize)
858 		tinc = vp->v_mount->mnt_stat.f_iosize;
859 	off = blkno * vp->v_mount->mnt_stat.f_iosize;
860 
861 	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
862 
863 		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
864 		if (!m)
865 			return 0;
866 		if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0)
867 			return 0;
868 	}
869 	return 1;
870 }
871 
872 /*
873  * now we set the dirty range for the buffer --
874  * for NFS -- if the file is mapped and pages have
875  * been written to, let it know.  We want the
876  * entire range of the buffer to be marked dirty if
877  * any of the pages have been written to for consistancy
878  * with the b_validoff, b_validend set in the nfs write
879  * code, and used by the nfs read code.
880  */
881 static void
882 vfs_setdirty(struct buf *bp) {
883 	int i;
884 	vm_object_t object;
885 	vm_offset_t boffset, offset;
886 	/*
887 	 * We qualify the scan for modified pages on whether the
888 	 * object has been flushed yet.  The OBJ_WRITEABLE flag
889 	 * is not cleared simply by protecting pages off.
890 	 */
891 	if ((bp->b_flags & B_VMIO) &&
892 		((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) {
893 		/*
894 		 * test the pages to see if they have been modified directly
895 		 * by users through the VM system.
896 		 */
897 		for (i = 0; i < bp->b_npages; i++)
898 			vm_page_test_dirty(bp->b_pages[i]);
899 
900 		/*
901 		 * scan forwards for the first page modified
902 		 */
903 		for (i = 0; i < bp->b_npages; i++) {
904 			if (bp->b_pages[i]->dirty) {
905 				break;
906 			}
907 		}
908 		boffset = (i << PAGE_SHIFT);
909 		if (boffset < bp->b_dirtyoff) {
910 			bp->b_dirtyoff = boffset;
911 		}
912 
913 		/*
914 		 * scan backwards for the last page modified
915 		 */
916 		for (i = bp->b_npages - 1; i >= 0; --i) {
917 			if (bp->b_pages[i]->dirty) {
918 				break;
919 			}
920 		}
921 		boffset = (i + 1);
922 		offset = boffset + bp->b_pages[0]->pindex;
923 		if (offset >= object->size)
924 			boffset = object->size - bp->b_pages[0]->pindex;
925 		if (bp->b_dirtyend < (boffset << PAGE_SHIFT))
926 			bp->b_dirtyend = (boffset << PAGE_SHIFT);
927 	}
928 }
929 
930 /*
931  * Get a block given a specified block and offset into a file/device.
932  */
933 struct buf *
934 getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
935 {
936 	struct buf *bp;
937 	int s;
938 	struct bufhashhdr *bh;
939 
940 	s = splbio();
941 loop:
942 	if ((bp = gbincore(vp, blkno))) {
943 		if (bp->b_flags & B_BUSY) {
944 			bp->b_flags |= B_WANTED;
945 			if (bp->b_usecount < BUF_MAXUSE)
946 				++bp->b_usecount;
947 			if (!tsleep(bp,
948 				(PRIBIO + 1) | slpflag, "getblk", slptimeo))
949 				goto loop;
950 
951 			splx(s);
952 			return (struct buf *) NULL;
953 		}
954 		bp->b_flags |= B_BUSY | B_CACHE;
955 		bremfree(bp);
956 
957 		/*
958 		 * check for size inconsistancies (note that they shouldn't happen
959 		 * but do when filesystems don't handle the size changes correctly.)
960 		 * We are conservative on metadata and don't just extend the buffer
961 		 * but write and re-constitute it.
962 		 */
963 
964 		if (bp->b_bcount != size) {
965 			if (bp->b_flags & B_VMIO) {
966 				allocbuf(bp, size);
967 			} else {
968 				bp->b_flags |= B_NOCACHE;
969 				VOP_BWRITE(bp);
970 				goto loop;
971 			}
972 		}
973 
974 		if (bp->b_usecount < BUF_MAXUSE)
975 			++bp->b_usecount;
976 		splx(s);
977 		return (bp);
978 	} else {
979 		vm_object_t obj;
980 		int doingvmio;
981 
982 		if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) {
983 			doingvmio = 1;
984 		} else {
985 			doingvmio = 0;
986 		}
987 		if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) {
988 			if (slpflag || slptimeo) {
989 				splx(s);
990 				return NULL;
991 			}
992 			goto loop;
993 		}
994 
995 		/*
996 		 * This code is used to make sure that a buffer is not
997 		 * created while the getnewbuf routine is blocked.
998 		 * Normally the vnode is locked so this isn't a problem.
999 		 * VBLK type I/O requests, however, don't lock the vnode.
1000 		 */
1001 		if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) {
1002 			bp->b_flags |= B_INVAL;
1003 			brelse(bp);
1004 			goto loop;
1005 		}
1006 
1007 		/*
1008 		 * Insert the buffer into the hash, so that it can
1009 		 * be found by incore.
1010 		 */
1011 		bp->b_blkno = bp->b_lblkno = blkno;
1012 		bgetvp(vp, bp);
1013 		LIST_REMOVE(bp, b_hash);
1014 		bh = BUFHASH(vp, blkno);
1015 		LIST_INSERT_HEAD(bh, bp, b_hash);
1016 
1017 		if (doingvmio) {
1018 			bp->b_flags |= (B_VMIO | B_CACHE);
1019 #if defined(VFS_BIO_DEBUG)
1020 			if (vp->v_type != VREG)
1021 				printf("getblk: vmioing file type %d???\n", vp->v_type);
1022 #endif
1023 		} else {
1024 			bp->b_flags &= ~B_VMIO;
1025 		}
1026 		splx(s);
1027 
1028 		allocbuf(bp, size);
1029 		return (bp);
1030 	}
1031 }
1032 
1033 /*
1034  * Get an empty, disassociated buffer of given size.
1035  */
1036 struct buf *
1037 geteblk(int size)
1038 {
1039 	struct buf *bp;
1040 
1041 	while ((bp = getnewbuf(0, 0, 0)) == 0);
1042 	allocbuf(bp, size);
1043 	bp->b_flags |= B_INVAL;
1044 	return (bp);
1045 }
1046 
1047 
1048 /*
1049  * This code constitutes the buffer memory from either anonymous system
1050  * memory (in the case of non-VMIO operations) or from an associated
1051  * VM object (in the case of VMIO operations).
1052  *
1053  * Note that this code is tricky, and has many complications to resolve
1054  * deadlock or inconsistant data situations.  Tread lightly!!!
1055  *
1056  * Modify the length of a buffer's underlying buffer storage without
1057  * destroying information (unless, of course the buffer is shrinking).
1058  */
1059 int
1060 allocbuf(struct buf * bp, int size)
1061 {
1062 
1063 	int s;
1064 	int newbsize, mbsize;
1065 	int i;
1066 
1067 	if (!(bp->b_flags & B_BUSY))
1068 		panic("allocbuf: buffer not busy");
1069 
1070 	if ((bp->b_flags & B_VMIO) == 0) {
1071 		/*
1072 		 * Just get anonymous memory from the kernel
1073 		 */
1074 		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1075 		newbsize = round_page(size);
1076 
1077 		if (newbsize < bp->b_bufsize) {
1078 			vm_hold_free_pages(
1079 			    bp,
1080 			    (vm_offset_t) bp->b_data + newbsize,
1081 			    (vm_offset_t) bp->b_data + bp->b_bufsize);
1082 		} else if (newbsize > bp->b_bufsize) {
1083 			vm_hold_load_pages(
1084 			    bp,
1085 			    (vm_offset_t) bp->b_data + bp->b_bufsize,
1086 			    (vm_offset_t) bp->b_data + newbsize);
1087 		}
1088 	} else {
1089 		vm_page_t m;
1090 		int desiredpages;
1091 
1092 		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1093 		desiredpages = (round_page(newbsize) >> PAGE_SHIFT);
1094 
1095 		if (newbsize < bp->b_bufsize) {
1096 			if (desiredpages < bp->b_npages) {
1097 				for (i = desiredpages; i < bp->b_npages; i++) {
1098 					/*
1099 					 * the page is not freed here -- it
1100 					 * is the responsibility of vnode_pager_setsize
1101 					 */
1102 					m = bp->b_pages[i];
1103 					s = splhigh();
1104 					while ((m->flags & PG_BUSY) || (m->busy != 0)) {
1105 						m->flags |= PG_WANTED;
1106 						tsleep(m, PVM, "biodep", 0);
1107 					}
1108 					splx(s);
1109 
1110 					bp->b_pages[i] = NULL;
1111 					vm_page_unwire(m);
1112 				}
1113 				pmap_qremove((vm_offset_t) trunc_page(bp->b_data) +
1114 				    (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
1115 				bp->b_npages = desiredpages;
1116 			}
1117 		} else if (newbsize > bp->b_bufsize) {
1118 			vm_object_t obj;
1119 			vm_offset_t tinc, toff;
1120 			vm_ooffset_t off;
1121 			vm_pindex_t objoff;
1122 			int pageindex, curbpnpages;
1123 			struct vnode *vp;
1124 			int bsize;
1125 
1126 			vp = bp->b_vp;
1127 
1128 			if (vp->v_type == VBLK)
1129 				bsize = DEV_BSIZE;
1130 			else
1131 				bsize = vp->v_mount->mnt_stat.f_iosize;
1132 
1133 			if (bp->b_npages < desiredpages) {
1134 				obj = vp->v_object;
1135 				tinc = PAGE_SIZE;
1136 				if (tinc > bsize)
1137 					tinc = bsize;
1138 				off = (vm_ooffset_t) bp->b_lblkno * bsize;
1139 		doretry:
1140 				curbpnpages = bp->b_npages;
1141 				bp->b_flags |= B_CACHE;
1142 				for (toff = 0; toff < newbsize; toff += tinc) {
1143 					int bytesinpage;
1144 
1145 					pageindex = toff >> PAGE_SHIFT;
1146 					objoff = OFF_TO_IDX(off + toff);
1147 					if (pageindex < curbpnpages) {
1148 
1149 						m = bp->b_pages[pageindex];
1150 #ifdef VFS_BIO_DIAG
1151 						if (m->pindex != objoff)
1152 							panic("allocbuf: page changed offset??!!!?");
1153 #endif
1154 						bytesinpage = tinc;
1155 						if (tinc > (newbsize - toff))
1156 							bytesinpage = newbsize - toff;
1157 						if ((bp->b_flags & B_CACHE) &&
1158 							!vm_page_is_valid(m,
1159 							(vm_offset_t) ((toff + off) & (PAGE_SIZE - 1)),
1160 							bytesinpage)) {
1161 							bp->b_flags &= ~B_CACHE;
1162 						}
1163 						continue;
1164 					}
1165 					m = vm_page_lookup(obj, objoff);
1166 					if (!m) {
1167 						m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL);
1168 						if (!m) {
1169 							VM_WAIT;
1170 							goto doretry;
1171 						}
1172 						/*
1173 						 * Normally it is unwise to clear PG_BUSY without
1174 						 * PAGE_WAKEUP -- but it is okay here, as there is
1175 						 * no chance for blocking between here and vm_page_alloc
1176 						 */
1177 						m->flags &= ~PG_BUSY;
1178 						vm_page_wire(m);
1179 						bp->b_flags &= ~B_CACHE;
1180 					} else if (m->flags & PG_BUSY) {
1181 
1182 						s = splhigh();
1183 						m->flags |= PG_WANTED;
1184 						tsleep(m, PVM, "pgtblk", 0);
1185 						splx(s);
1186 
1187 						goto doretry;
1188 					} else {
1189 						if ((curproc != pageproc) &&
1190 							(m->queue == PQ_CACHE) &&
1191 						    ((cnt.v_free_count + cnt.v_cache_count) <
1192 								(cnt.v_free_min + cnt.v_cache_min))) {
1193 							pagedaemon_wakeup();
1194 						}
1195 						bytesinpage = tinc;
1196 						if (tinc > (newbsize - toff))
1197 							bytesinpage = newbsize - toff;
1198 						if ((bp->b_flags & B_CACHE) &&
1199 							!vm_page_is_valid(m,
1200 							(vm_offset_t) ((toff + off) & (PAGE_SIZE - 1)),
1201 							bytesinpage)) {
1202 							bp->b_flags &= ~B_CACHE;
1203 						}
1204 						vm_page_wire(m);
1205 					}
1206 					bp->b_pages[pageindex] = m;
1207 					curbpnpages = pageindex + 1;
1208 				}
1209 /*
1210 				bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE;
1211 */
1212 				bp->b_data = (caddr_t) trunc_page(bp->b_data);
1213 				bp->b_npages = curbpnpages;
1214 				pmap_qenter((vm_offset_t) bp->b_data,
1215 					bp->b_pages, bp->b_npages);
1216 				((vm_offset_t) bp->b_data) |= off & (PAGE_SIZE - 1);
1217 			}
1218 		}
1219 	}
1220 	bufspace += (newbsize - bp->b_bufsize);
1221 	bp->b_bufsize = newbsize;
1222 	bp->b_bcount = size;
1223 	return 1;
1224 }
1225 
1226 /*
1227  * Wait for buffer I/O completion, returning error status.
1228  */
1229 int
1230 biowait(register struct buf * bp)
1231 {
1232 	int s;
1233 
1234 	s = splbio();
1235 	while ((bp->b_flags & B_DONE) == 0)
1236 		tsleep(bp, PRIBIO, "biowait", 0);
1237 	splx(s);
1238 	if (bp->b_flags & B_EINTR) {
1239 		bp->b_flags &= ~B_EINTR;
1240 		return (EINTR);
1241 	}
1242 	if (bp->b_flags & B_ERROR) {
1243 		return (bp->b_error ? bp->b_error : EIO);
1244 	} else {
1245 		return (0);
1246 	}
1247 }
1248 
1249 /*
1250  * Finish I/O on a buffer, calling an optional function.
1251  * This is usually called from interrupt level, so process blocking
1252  * is not *a good idea*.
1253  */
1254 void
1255 biodone(register struct buf * bp)
1256 {
1257 	int s;
1258 
1259 	s = splbio();
1260 	if (!(bp->b_flags & B_BUSY))
1261 		panic("biodone: buffer not busy");
1262 
1263 	if (bp->b_flags & B_DONE) {
1264 		splx(s);
1265 		printf("biodone: buffer already done\n");
1266 		return;
1267 	}
1268 	bp->b_flags |= B_DONE;
1269 
1270 	if ((bp->b_flags & B_READ) == 0) {
1271 		vwakeup(bp);
1272 	}
1273 #ifdef BOUNCE_BUFFERS
1274 	if (bp->b_flags & B_BOUNCE)
1275 		vm_bounce_free(bp);
1276 #endif
1277 
1278 	/* call optional completion function if requested */
1279 	if (bp->b_flags & B_CALL) {
1280 		bp->b_flags &= ~B_CALL;
1281 		(*bp->b_iodone) (bp);
1282 		splx(s);
1283 		return;
1284 	}
1285 	if (bp->b_flags & B_VMIO) {
1286 		int i, resid;
1287 		vm_ooffset_t foff;
1288 		vm_page_t m;
1289 		vm_object_t obj;
1290 		int iosize;
1291 		struct vnode *vp = bp->b_vp;
1292 
1293 		if (vp->v_type == VBLK)
1294 			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1295 		else
1296 			foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1297 		obj = vp->v_object;
1298 		if (!obj) {
1299 			panic("biodone: no object");
1300 		}
1301 #if defined(VFS_BIO_DEBUG)
1302 		if (obj->paging_in_progress < bp->b_npages) {
1303 			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
1304 			    obj->paging_in_progress, bp->b_npages);
1305 		}
1306 #endif
1307 		iosize = bp->b_bufsize;
1308 		for (i = 0; i < bp->b_npages; i++) {
1309 			int bogusflag = 0;
1310 			m = bp->b_pages[i];
1311 			if (m == bogus_page) {
1312 				bogusflag = 1;
1313 				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
1314 				if (!m) {
1315 #if defined(VFS_BIO_DEBUG)
1316 					printf("biodone: page disappeared\n");
1317 #endif
1318 					--obj->paging_in_progress;
1319 					continue;
1320 				}
1321 				bp->b_pages[i] = m;
1322 				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1323 			}
1324 #if defined(VFS_BIO_DEBUG)
1325 			if (OFF_TO_IDX(foff) != m->pindex) {
1326 				printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex);
1327 			}
1328 #endif
1329 			resid = IDX_TO_OFF(m->pindex + 1) - foff;
1330 			if (resid > iosize)
1331 				resid = iosize;
1332 			/*
1333 			 * In the write case, the valid and clean bits are
1334 			 * already changed correctly, so we only need to do this
1335 			 * here in the read case.
1336 			 */
1337 			if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
1338 				vm_page_set_validclean(m,
1339 					(vm_offset_t) (foff & (PAGE_SIZE-1)), resid);
1340 			}
1341 
1342 			/*
1343 			 * when debugging new filesystems or buffer I/O methods, this
1344 			 * is the most common error that pops up.  if you see this, you
1345 			 * have not set the page busy flag correctly!!!
1346 			 */
1347 			if (m->busy == 0) {
1348 				printf("biodone: page busy < 0, "
1349 				    "pindex: %d, foff: 0x(%x,%x), "
1350 				    "resid: %d, index: %d\n",
1351 				    (int) m->pindex, (int)(foff >> 32),
1352 						(int) foff & 0xffffffff, resid, i);
1353 				if (vp->v_type != VBLK)
1354 					printf(" iosize: %d, lblkno: %d, flags: 0x%lx, npages: %d\n",
1355 					    bp->b_vp->v_mount->mnt_stat.f_iosize,
1356 					    (int) bp->b_lblkno,
1357 					    bp->b_flags, bp->b_npages);
1358 				else
1359 					printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n",
1360 					    (int) bp->b_lblkno,
1361 					    bp->b_flags, bp->b_npages);
1362 				printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
1363 				    m->valid, m->dirty, m->wire_count);
1364 				panic("biodone: page busy < 0\n");
1365 			}
1366 			--m->busy;
1367 			if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1368 				m->flags &= ~PG_WANTED;
1369 				wakeup(m);
1370 			}
1371 			--obj->paging_in_progress;
1372 			foff += resid;
1373 			iosize -= resid;
1374 		}
1375 		if (obj && obj->paging_in_progress == 0 &&
1376 		    (obj->flags & OBJ_PIPWNT)) {
1377 			obj->flags &= ~OBJ_PIPWNT;
1378 			wakeup(obj);
1379 		}
1380 	}
1381 	/*
1382 	 * For asynchronous completions, release the buffer now. The brelse
1383 	 * checks for B_WANTED and will do the wakeup there if necessary - so
1384 	 * no need to do a wakeup here in the async case.
1385 	 */
1386 
1387 	if (bp->b_flags & B_ASYNC) {
1388 		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
1389 			brelse(bp);
1390 		else
1391 			bqrelse(bp);
1392 	} else {
1393 		wakeup(bp);
1394 	}
1395 	splx(s);
1396 }
1397 
1398 int
1399 count_lock_queue()
1400 {
1401 	int count;
1402 	struct buf *bp;
1403 
1404 	count = 0;
1405 	for (bp = bufqueues[QUEUE_LOCKED].tqh_first;
1406 	    bp != NULL;
1407 	    bp = bp->b_freelist.tqe_next)
1408 		count++;
1409 	return (count);
1410 }
1411 
1412 int vfs_update_interval = 30;
1413 
1414 static void
1415 vfs_update()
1416 {
1417 	(void) spl0();		/* XXX redundant?  wrong place? */
1418 	while (1) {
1419 		tsleep(&vfs_update_wakeup, PUSER, "update",
1420 		    hz * vfs_update_interval);
1421 		vfs_update_wakeup = 0;
1422 		sync(curproc, NULL, NULL);
1423 	}
1424 }
1425 
1426 static int
1427 sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS
1428 {
1429 	int error = sysctl_handle_int(oidp,
1430 		oidp->oid_arg1, oidp->oid_arg2, req);
1431 	if (!error)
1432 		wakeup(&vfs_update_wakeup);
1433 	return error;
1434 }
1435 
1436 SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW,
1437 	&vfs_update_interval, 0, sysctl_kern_updateinterval, "I", "");
1438 
1439 
1440 /*
1441  * This routine is called in lieu of iodone in the case of
1442  * incomplete I/O.  This keeps the busy status for pages
1443  * consistant.
1444  */
1445 void
1446 vfs_unbusy_pages(struct buf * bp)
1447 {
1448 	int i;
1449 
1450 	if (bp->b_flags & B_VMIO) {
1451 		struct vnode *vp = bp->b_vp;
1452 		vm_object_t obj = vp->v_object;
1453 		vm_ooffset_t foff;
1454 
1455 		foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1456 
1457 		for (i = 0; i < bp->b_npages; i++) {
1458 			vm_page_t m = bp->b_pages[i];
1459 
1460 			if (m == bogus_page) {
1461 				m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i);
1462 				if (!m) {
1463 					panic("vfs_unbusy_pages: page missing\n");
1464 				}
1465 				bp->b_pages[i] = m;
1466 				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1467 			}
1468 			--obj->paging_in_progress;
1469 			--m->busy;
1470 			if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1471 				m->flags &= ~PG_WANTED;
1472 				wakeup(m);
1473 			}
1474 		}
1475 		if (obj->paging_in_progress == 0 &&
1476 		    (obj->flags & OBJ_PIPWNT)) {
1477 			obj->flags &= ~OBJ_PIPWNT;
1478 			wakeup(obj);
1479 		}
1480 	}
1481 }
1482 
1483 /*
1484  * This routine is called before a device strategy routine.
1485  * It is used to tell the VM system that paging I/O is in
1486  * progress, and treat the pages associated with the buffer
1487  * almost as being PG_BUSY.  Also the object paging_in_progress
1488  * flag is handled to make sure that the object doesn't become
1489  * inconsistant.
1490  */
1491 void
1492 vfs_busy_pages(struct buf * bp, int clear_modify)
1493 {
1494 	int i;
1495 
1496 	if (bp->b_flags & B_VMIO) {
1497 		vm_object_t obj = bp->b_vp->v_object;
1498 		vm_ooffset_t foff;
1499 		int iocount = bp->b_bufsize;
1500 
1501 		if (bp->b_vp->v_type == VBLK)
1502 			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1503 		else
1504 			foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1505 		vfs_setdirty(bp);
1506 		for (i = 0; i < bp->b_npages; i++) {
1507 			vm_page_t m = bp->b_pages[i];
1508 			int resid = IDX_TO_OFF(m->pindex + 1) - foff;
1509 
1510 			if (resid > iocount)
1511 				resid = iocount;
1512 			if ((bp->b_flags & B_CLUSTER) == 0) {
1513 				obj->paging_in_progress++;
1514 				m->busy++;
1515 			}
1516 			if (clear_modify) {
1517 				vm_page_protect(m, VM_PROT_READ);
1518 				vm_page_set_validclean(m,
1519 					(vm_offset_t) (foff & (PAGE_SIZE-1)), resid);
1520 			} else if (bp->b_bcount >= PAGE_SIZE) {
1521 				if (m->valid && (bp->b_flags & B_CACHE) == 0) {
1522 					bp->b_pages[i] = bogus_page;
1523 					pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1524 				}
1525 			}
1526 			foff += resid;
1527 			iocount -= resid;
1528 		}
1529 	}
1530 }
1531 
1532 /*
1533  * Tell the VM system that the pages associated with this buffer
1534  * are clean.  This is used for delayed writes where the data is
1535  * going to go to disk eventually without additional VM intevention.
1536  */
1537 void
1538 vfs_clean_pages(struct buf * bp)
1539 {
1540 	int i;
1541 
1542 	if (bp->b_flags & B_VMIO) {
1543 		vm_ooffset_t foff;
1544 		int iocount = bp->b_bufsize;
1545 
1546 		if (bp->b_vp->v_type == VBLK)
1547 			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1548 		else
1549 			foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1550 
1551 		for (i = 0; i < bp->b_npages; i++) {
1552 			vm_page_t m = bp->b_pages[i];
1553 			int resid = IDX_TO_OFF(m->pindex + 1) - foff;
1554 
1555 			if (resid > iocount)
1556 				resid = iocount;
1557 			if (resid > 0) {
1558 				vm_page_set_validclean(m,
1559 					((vm_offset_t) foff & (PAGE_SIZE-1)), resid);
1560 			}
1561 			foff += resid;
1562 			iocount -= resid;
1563 		}
1564 	}
1565 }
1566 
1567 void
1568 vfs_bio_clrbuf(struct buf *bp) {
1569 	int i;
1570 	int remapbuffer = 0;
1571 	if( bp->b_flags & B_VMIO) {
1572 		if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) {
1573 			int mask;
1574 			mask = 0;
1575 			for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE)
1576 				mask |= (1 << (i/DEV_BSIZE));
1577 			if( bp->b_pages[0]->valid != mask) {
1578 				bzero(bp->b_data, bp->b_bufsize);
1579 			}
1580 			bp->b_pages[0]->valid = mask;
1581 			bp->b_resid = 0;
1582 			return;
1583 		}
1584 		for(i=0;i<bp->b_npages;i++) {
1585 			if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL)
1586 				continue;
1587 			if( bp->b_pages[i]->valid == 0) {
1588 				if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
1589 					bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE);
1590 				}
1591 			} else {
1592 				int j;
1593 				for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) {
1594 					if( (bp->b_pages[i]->valid & (1<<j)) == 0)
1595 						bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE);
1596 				}
1597 			}
1598 			bp->b_pages[i]->valid = VM_PAGE_BITS_ALL;
1599 		}
1600 		bp->b_resid = 0;
1601 	} else {
1602 		clrbuf(bp);
1603 	}
1604 	if (remapbuffer)
1605 			pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1606 }
1607 
1608 /*
1609  * vm_hold_load_pages and vm_hold_unload pages get pages into
1610  * a buffers address space.  The pages are anonymous and are
1611  * not associated with a file object.
1612  */
1613 void
1614 vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
1615 {
1616 	vm_offset_t pg;
1617 	vm_page_t p;
1618 	int index;
1619 
1620 	to = round_page(to);
1621 	from = round_page(from);
1622 	index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT;
1623 
1624 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
1625 
1626 tryagain:
1627 
1628 		p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
1629 		    VM_ALLOC_NORMAL);
1630 		if (!p) {
1631 			VM_WAIT;
1632 			goto tryagain;
1633 		}
1634 		vm_page_wire(p);
1635 		pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
1636 		bp->b_pages[index] = p;
1637 		PAGE_WAKEUP(p);
1638 	}
1639 	bp->b_npages = to >> PAGE_SHIFT;
1640 }
1641 
1642 void
1643 vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
1644 {
1645 	vm_offset_t pg;
1646 	vm_page_t p;
1647 	int index;
1648 
1649 	from = round_page(from);
1650 	to = round_page(to);
1651 	index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT;
1652 
1653 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
1654 		p = bp->b_pages[index];
1655 		if (p && (index < bp->b_npages)) {
1656 			if (p->busy) {
1657 				printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n",
1658 					bp->b_blkno, bp->b_lblkno);
1659 			}
1660 			bp->b_pages[index] = NULL;
1661 			pmap_kremove(pg);
1662 			vm_page_unwire(p);
1663 			vm_page_free(p);
1664 		}
1665 	}
1666 	bp->b_npages = from >> PAGE_SHIFT;
1667 }
1668