xref: /freebsd/sys/kern/vfs_bio.c (revision ce834215a70ff69e7e222827437116eee2f9ac6f)
1 /*
2  * Copyright (c) 1994 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Absolutely no warranty of function or purpose is made by the author
15  *    John S. Dyson.
16  * 4. This work was done expressly for inclusion into FreeBSD.  Other use
17  *    is allowed if this notation is included.
18  * 5. Modifications may be freely made to this file if the above conditions
19  *    are met.
20  *
21  * $Id: vfs_bio.c,v 1.120 1997/06/13 08:30:40 bde Exp $
22  */
23 
24 /*
25  * this file contains a new buffer I/O scheme implementing a coherent
26  * VM object and buffer cache scheme.  Pains have been taken to make
27  * sure that the performance degradation associated with schemes such
28  * as this is not realized.
29  *
30  * Author:  John S. Dyson
31  * Significant help during the development and debugging phases
32  * had been provided by David Greenman, also of the FreeBSD core team.
33  */
34 
35 #include "opt_bounce.h"
36 
37 #define VMIO
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kernel.h>
42 #include <sys/sysctl.h>
43 #include <sys/proc.h>
44 #include <sys/vnode.h>
45 #include <sys/vmmeter.h>
46 #include <vm/vm.h>
47 #include <vm/vm_param.h>
48 #include <vm/vm_prot.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_pageout.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_extern.h>
54 #include <vm/vm_map.h>
55 #include <sys/buf.h>
56 #include <sys/mount.h>
57 #include <sys/malloc.h>
58 #include <sys/resourcevar.h>
59 #include <sys/proc.h>
60 
61 #include <miscfs/specfs/specdev.h>
62 
63 static void vfs_update __P((void));
64 static struct	proc *updateproc;
65 static struct kproc_desc up_kp = {
66 	"update",
67 	vfs_update,
68 	&updateproc
69 };
70 SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
71 
72 struct buf *buf;		/* buffer header pool */
73 struct swqueue bswlist;
74 
75 int count_lock_queue __P((void));
76 static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
77 		vm_offset_t to);
78 static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
79 		vm_offset_t to);
80 static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff,
81 			      vm_offset_t off, vm_offset_t size,
82 			      vm_page_t m);
83 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
84 			       int pageno, vm_page_t m);
85 static void vfs_clean_pages(struct buf * bp);
86 static void vfs_setdirty(struct buf *bp);
87 static void vfs_vmio_release(struct buf *bp);
88 static void flushdirtybuffers(int slpflag, int slptimeo);
89 
90 int needsbuffer;
91 
92 /*
93  * Internal update daemon, process 3
94  *	The variable vfs_update_wakeup allows for internal syncs.
95  */
96 int vfs_update_wakeup;
97 
98 
99 /*
100  * buffers base kva
101  */
102 
103 /*
104  * bogus page -- for I/O to/from partially complete buffers
105  * this is a temporary solution to the problem, but it is not
106  * really that bad.  it would be better to split the buffer
107  * for input in the case of buffers partially already in memory,
108  * but the code is intricate enough already.
109  */
110 vm_page_t bogus_page;
111 static vm_offset_t bogus_offset;
112 
113 static int bufspace, maxbufspace, vmiospace, maxvmiobufspace,
114 	bufmallocspace, maxbufmallocspace;
115 int numdirtybuffers, lodirtybuffers, hidirtybuffers;
116 static int numfreebuffers, lofreebuffers, hifreebuffers;
117 
118 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD,
119 	&numdirtybuffers, 0, "");
120 SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW,
121 	&lodirtybuffers, 0, "");
122 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW,
123 	&hidirtybuffers, 0, "");
124 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD,
125 	&numfreebuffers, 0, "");
126 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW,
127 	&lofreebuffers, 0, "");
128 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW,
129 	&hifreebuffers, 0, "");
130 SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW,
131 	&maxbufspace, 0, "");
132 SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD,
133 	&bufspace, 0, "");
134 SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW,
135 	&maxvmiobufspace, 0, "");
136 SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD,
137 	&vmiospace, 0, "");
138 SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW,
139 	&maxbufmallocspace, 0, "");
140 SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD,
141 	&bufmallocspace, 0, "");
142 
143 static struct bufhashhdr bufhashtbl[BUFHSZ], invalhash;
144 static struct bqueues bufqueues[BUFFER_QUEUES];
145 
146 extern int vm_swap_size;
147 
148 #define BUF_MAXUSE 24
149 
150 #define VFS_BIO_NEED_ANY 1
151 #define VFS_BIO_NEED_LOWLIMIT 2
152 #define VFS_BIO_NEED_FREE 4
153 
154 /*
155  * Initialize buffer headers and related structures.
156  */
157 void
158 bufinit()
159 {
160 	struct buf *bp;
161 	int i;
162 
163 	TAILQ_INIT(&bswlist);
164 	LIST_INIT(&invalhash);
165 
166 	/* first, make a null hash table */
167 	for (i = 0; i < BUFHSZ; i++)
168 		LIST_INIT(&bufhashtbl[i]);
169 
170 	/* next, make a null set of free lists */
171 	for (i = 0; i < BUFFER_QUEUES; i++)
172 		TAILQ_INIT(&bufqueues[i]);
173 
174 	/* finally, initialize each buffer header and stick on empty q */
175 	for (i = 0; i < nbuf; i++) {
176 		bp = &buf[i];
177 		bzero(bp, sizeof *bp);
178 		bp->b_flags = B_INVAL;	/* we're just an empty header */
179 		bp->b_dev = NODEV;
180 		bp->b_rcred = NOCRED;
181 		bp->b_wcred = NOCRED;
182 		bp->b_qindex = QUEUE_EMPTY;
183 		bp->b_vnbufs.le_next = NOLIST;
184 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
185 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
186 	}
187 /*
188  * maxbufspace is currently calculated to support all filesystem blocks
189  * to be 8K.  If you happen to use a 16K filesystem, the size of the buffer
190  * cache is still the same as it would be for 8K filesystems.  This
191  * keeps the size of the buffer cache "in check" for big block filesystems.
192  */
193 	maxbufspace = (nbuf + 8) * DFLTBSIZE;
194 /*
195  * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed
196  */
197 	maxvmiobufspace = 2 * maxbufspace / 3;
198 /*
199  * Limit the amount of malloc memory since it is wired permanently into
200  * the kernel space.  Even though this is accounted for in the buffer
201  * allocation, we don't want the malloced region to grow uncontrolled.
202  * The malloc scheme improves memory utilization significantly on average
203  * (small) directories.
204  */
205 	maxbufmallocspace = maxbufspace / 20;
206 
207 /*
208  * Remove the probability of deadlock conditions by limiting the
209  * number of dirty buffers.
210  */
211 	hidirtybuffers = nbuf / 6 + 20;
212 	lodirtybuffers = nbuf / 12 + 10;
213 	numdirtybuffers = 0;
214 	lofreebuffers = nbuf / 18 + 5;
215 	hifreebuffers = 2 * lofreebuffers;
216 	numfreebuffers = nbuf;
217 
218 	bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
219 	bogus_page = vm_page_alloc(kernel_object,
220 			((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
221 			VM_ALLOC_NORMAL);
222 
223 }
224 
225 /*
226  * Free the kva allocation for a buffer
227  * Must be called only at splbio or higher,
228  *  as this is the only locking for buffer_map.
229  */
230 static void
231 bfreekva(struct buf * bp)
232 {
233 	if (bp->b_kvasize == 0)
234 		return;
235 
236 	vm_map_delete(buffer_map,
237 		(vm_offset_t) bp->b_kvabase,
238 		(vm_offset_t) bp->b_kvabase + bp->b_kvasize);
239 
240 	bp->b_kvasize = 0;
241 
242 }
243 
244 /*
245  * remove the buffer from the appropriate free list
246  */
247 void
248 bremfree(struct buf * bp)
249 {
250 	int s = splbio();
251 
252 	if (bp->b_qindex != QUEUE_NONE) {
253 		TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
254 		bp->b_qindex = QUEUE_NONE;
255 	} else {
256 #if !defined(MAX_PERF)
257 		panic("bremfree: removing a buffer when not on a queue");
258 #endif
259 	}
260 	if ((bp->b_flags & B_INVAL) ||
261 		(bp->b_flags & (B_DELWRI|B_LOCKED)) == 0)
262 		--numfreebuffers;
263 	splx(s);
264 }
265 
266 /*
267  * Get a buffer with the specified data.  Look in the cache first.
268  */
269 int
270 bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
271     struct buf ** bpp)
272 {
273 	struct buf *bp;
274 
275 	bp = getblk(vp, blkno, size, 0, 0);
276 	*bpp = bp;
277 
278 	/* if not found in cache, do some I/O */
279 	if ((bp->b_flags & B_CACHE) == 0) {
280 		if (curproc != NULL)
281 			curproc->p_stats->p_ru.ru_inblock++;
282 		bp->b_flags |= B_READ;
283 		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
284 		if (bp->b_rcred == NOCRED) {
285 			if (cred != NOCRED)
286 				crhold(cred);
287 			bp->b_rcred = cred;
288 		}
289 		vfs_busy_pages(bp, 0);
290 		VOP_STRATEGY(bp);
291 		return (biowait(bp));
292 	}
293 	return (0);
294 }
295 
296 /*
297  * Operates like bread, but also starts asynchronous I/O on
298  * read-ahead blocks.
299  */
300 int
301 breadn(struct vnode * vp, daddr_t blkno, int size,
302     daddr_t * rablkno, int *rabsize,
303     int cnt, struct ucred * cred, struct buf ** bpp)
304 {
305 	struct buf *bp, *rabp;
306 	int i;
307 	int rv = 0, readwait = 0;
308 
309 	*bpp = bp = getblk(vp, blkno, size, 0, 0);
310 
311 	/* if not found in cache, do some I/O */
312 	if ((bp->b_flags & B_CACHE) == 0) {
313 		if (curproc != NULL)
314 			curproc->p_stats->p_ru.ru_inblock++;
315 		bp->b_flags |= B_READ;
316 		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
317 		if (bp->b_rcred == NOCRED) {
318 			if (cred != NOCRED)
319 				crhold(cred);
320 			bp->b_rcred = cred;
321 		}
322 		vfs_busy_pages(bp, 0);
323 		VOP_STRATEGY(bp);
324 		++readwait;
325 	}
326 	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
327 		if (inmem(vp, *rablkno))
328 			continue;
329 		rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
330 
331 		if ((rabp->b_flags & B_CACHE) == 0) {
332 			if (curproc != NULL)
333 				curproc->p_stats->p_ru.ru_inblock++;
334 			rabp->b_flags |= B_READ | B_ASYNC;
335 			rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
336 			if (rabp->b_rcred == NOCRED) {
337 				if (cred != NOCRED)
338 					crhold(cred);
339 				rabp->b_rcred = cred;
340 			}
341 			vfs_busy_pages(rabp, 0);
342 			VOP_STRATEGY(rabp);
343 		} else {
344 			brelse(rabp);
345 		}
346 	}
347 
348 	if (readwait) {
349 		rv = biowait(bp);
350 	}
351 	return (rv);
352 }
353 
354 /*
355  * Write, release buffer on completion.  (Done by iodone
356  * if async.)
357  */
358 int
359 bwrite(struct buf * bp)
360 {
361 	int oldflags = bp->b_flags;
362 
363 	if (bp->b_flags & B_INVAL) {
364 		brelse(bp);
365 		return (0);
366 	}
367 #if !defined(MAX_PERF)
368 	if (!(bp->b_flags & B_BUSY))
369 		panic("bwrite: buffer is not busy???");
370 #endif
371 
372 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
373 	bp->b_flags |= B_WRITEINPROG;
374 
375 	if ((oldflags & B_DELWRI) == B_DELWRI) {
376 		--numdirtybuffers;
377 		reassignbuf(bp, bp->b_vp);
378 	}
379 
380 	bp->b_vp->v_numoutput++;
381 	vfs_busy_pages(bp, 1);
382 	if (curproc != NULL)
383 		curproc->p_stats->p_ru.ru_oublock++;
384 	VOP_STRATEGY(bp);
385 
386 	/*
387 	 * Handle ordered writes here.
388 	 * If the write was originally flagged as ordered,
389 	 * then we check to see if it was converted to async.
390 	 * If it was converted to async, and is done now, then
391 	 * we release the buffer.  Otherwise we clear the
392 	 * ordered flag because it is not needed anymore.
393 	 *
394  	 * Note that biodone has been modified so that it does
395 	 * not release ordered buffers.  This allows us to have
396 	 * a chance to determine whether or not the driver
397 	 * has set the async flag in the strategy routine.  Otherwise
398 	 * if biodone was not modified, then the buffer may have been
399 	 * reused before we have had a chance to check the flag.
400 	 */
401 
402 	if ((oldflags & B_ORDERED) == B_ORDERED) {
403 		int s;
404 		s = splbio();
405 		if (bp->b_flags & B_ASYNC)  {
406 			if ((bp->b_flags & B_DONE)) {
407 				if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
408 					brelse(bp);
409 				else
410 					bqrelse(bp);
411 			}
412 			splx(s);
413 			return (0);
414 		} else {
415 			bp->b_flags &= ~B_ORDERED;
416 		}
417 		splx(s);
418 	}
419 
420 	if ((oldflags & B_ASYNC) == 0) {
421 		int rtval = biowait(bp);
422 
423 		if (oldflags & B_DELWRI) {
424 			reassignbuf(bp, bp->b_vp);
425 		}
426 		brelse(bp);
427 		return (rtval);
428 	}
429 	return (0);
430 }
431 
432 int
433 vn_bwrite(ap)
434 	struct vop_bwrite_args *ap;
435 {
436 	return (bwrite(ap->a_bp));
437 }
438 
439 void
440 vfs_bio_need_satisfy(void) {
441 	++numfreebuffers;
442 	if (!needsbuffer)
443 		return;
444 	if (numdirtybuffers < lodirtybuffers) {
445 		needsbuffer &= ~(VFS_BIO_NEED_ANY | VFS_BIO_NEED_LOWLIMIT);
446 	} else {
447 		needsbuffer &= ~VFS_BIO_NEED_ANY;
448 	}
449 	if (numfreebuffers >= hifreebuffers) {
450 		needsbuffer &= ~VFS_BIO_NEED_FREE;
451 	}
452 	wakeup(&needsbuffer);
453 }
454 
455 /*
456  * Delayed write. (Buffer is marked dirty).
457  */
458 void
459 bdwrite(struct buf * bp)
460 {
461 
462 #if !defined(MAX_PERF)
463 	if ((bp->b_flags & B_BUSY) == 0) {
464 		panic("bdwrite: buffer is not busy");
465 	}
466 #endif
467 
468 	if (bp->b_flags & B_INVAL) {
469 		brelse(bp);
470 		return;
471 	}
472 	if (bp->b_flags & B_TAPE) {
473 		bawrite(bp);
474 		return;
475 	}
476 	bp->b_flags &= ~(B_READ|B_RELBUF);
477 	if ((bp->b_flags & B_DELWRI) == 0) {
478 		bp->b_flags |= B_DONE | B_DELWRI;
479 		reassignbuf(bp, bp->b_vp);
480 		++numdirtybuffers;
481 	}
482 
483 	/*
484 	 * This bmap keeps the system from needing to do the bmap later,
485 	 * perhaps when the system is attempting to do a sync.  Since it
486 	 * is likely that the indirect block -- or whatever other datastructure
487 	 * that the filesystem needs is still in memory now, it is a good
488 	 * thing to do this.  Note also, that if the pageout daemon is
489 	 * requesting a sync -- there might not be enough memory to do
490 	 * the bmap then...  So, this is important to do.
491 	 */
492 	if( bp->b_lblkno == bp->b_blkno) {
493 		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
494 	}
495 
496 	/*
497 	 * Set the *dirty* buffer range based upon the VM system dirty pages.
498 	 */
499 	vfs_setdirty(bp);
500 
501 	/*
502 	 * We need to do this here to satisfy the vnode_pager and the
503 	 * pageout daemon, so that it thinks that the pages have been
504 	 * "cleaned".  Note that since the pages are in a delayed write
505 	 * buffer -- the VFS layer "will" see that the pages get written
506 	 * out on the next sync, or perhaps the cluster will be completed.
507 	 */
508 	vfs_clean_pages(bp);
509 	bqrelse(bp);
510 
511 	if (numdirtybuffers >= hidirtybuffers)
512 		flushdirtybuffers(0, 0);
513 
514 	return;
515 }
516 
517 /*
518  * Asynchronous write.
519  * Start output on a buffer, but do not wait for it to complete.
520  * The buffer is released when the output completes.
521  */
522 void
523 bawrite(struct buf * bp)
524 {
525 	bp->b_flags |= B_ASYNC;
526 	(void) VOP_BWRITE(bp);
527 }
528 
529 /*
530  * Ordered write.
531  * Start output on a buffer, but only wait for it to complete if the
532  * output device cannot guarantee ordering in some other way.  Devices
533  * that can perform asynchronous ordered writes will set the B_ASYNC
534  * flag in their strategy routine.
535  * The buffer is released when the output completes.
536  */
537 int
538 bowrite(struct buf * bp)
539 {
540 	bp->b_flags |= B_ORDERED;
541 	return (VOP_BWRITE(bp));
542 }
543 
544 /*
545  * Release a buffer.
546  */
547 void
548 brelse(struct buf * bp)
549 {
550 	int s;
551 
552 	if (bp->b_flags & B_CLUSTER) {
553 		relpbuf(bp);
554 		return;
555 	}
556 	/* anyone need a "free" block? */
557 	s = splbio();
558 
559 	/* anyone need this block? */
560 	if (bp->b_flags & B_WANTED) {
561 		bp->b_flags &= ~(B_WANTED | B_AGE);
562 		wakeup(bp);
563 	}
564 
565 	if (bp->b_flags & B_LOCKED)
566 		bp->b_flags &= ~B_ERROR;
567 
568 	if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) ||
569 	    (bp->b_bufsize <= 0)) {
570 		bp->b_flags |= B_INVAL;
571 		if (bp->b_flags & B_DELWRI)
572 			--numdirtybuffers;
573 		bp->b_flags &= ~(B_DELWRI | B_CACHE);
574 		if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) {
575 			if (bp->b_bufsize)
576 				allocbuf(bp, 0);
577 			brelvp(bp);
578 		}
579 	}
580 
581 	/*
582 	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
583 	 * constituted, so the B_INVAL flag is used to *invalidate* the buffer,
584 	 * but the VM object is kept around.  The B_NOCACHE flag is used to
585 	 * invalidate the pages in the VM object.
586 	 *
587 	 * If the buffer is a partially filled NFS buffer, keep it
588 	 * since invalidating it now will lose informatio.  The valid
589 	 * flags in the vm_pages have only DEV_BSIZE resolution but
590 	 * the b_validoff, b_validend fields have byte resolution.
591 	 * This can avoid unnecessary re-reads of the buffer.
592 	 * XXX this seems to cause performance problems.
593 	 */
594 	if ((bp->b_flags & B_VMIO)
595 	    && !(bp->b_vp->v_tag == VT_NFS &&
596 		 (bp->b_flags & B_DELWRI) != 0)
597 #ifdef notdef
598 	    && (bp->b_vp->v_tag != VT_NFS
599 		|| (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR))
600 		|| bp->b_validend == 0
601 		|| (bp->b_validoff == 0
602 		    && bp->b_validend == bp->b_bufsize))
603 #endif
604 	    ) {
605 		vm_ooffset_t foff;
606 		vm_object_t obj;
607 		int i, resid;
608 		vm_page_t m;
609 		struct vnode *vp;
610 		int iototal = bp->b_bufsize;
611 
612 		vp = bp->b_vp;
613 
614 #if !defined(MAX_PERF)
615 		if (!vp)
616 			panic("brelse: missing vp");
617 #endif
618 
619 		if (bp->b_npages) {
620 			vm_pindex_t poff;
621 			obj = (vm_object_t) vp->v_object;
622 			if (vp->v_type == VBLK)
623 				foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT;
624 			else
625 				foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
626 			poff = OFF_TO_IDX(foff);
627 			for (i = 0; i < bp->b_npages; i++) {
628 				m = bp->b_pages[i];
629 				if (m == bogus_page) {
630 					m = vm_page_lookup(obj, poff + i);
631 #if !defined(MAX_PERF)
632 					if (!m) {
633 						panic("brelse: page missing\n");
634 					}
635 #endif
636 					bp->b_pages[i] = m;
637 					pmap_qenter(trunc_page(bp->b_data),
638 						bp->b_pages, bp->b_npages);
639 				}
640 				resid = IDX_TO_OFF(m->pindex+1) - foff;
641 				if (resid > iototal)
642 					resid = iototal;
643 				if (resid > 0) {
644 					/*
645 					 * Don't invalidate the page if the local machine has already
646 					 * modified it.  This is the lesser of two evils, and should
647 					 * be fixed.
648 					 */
649 					if (bp->b_flags & (B_NOCACHE | B_ERROR)) {
650 						vm_page_test_dirty(m);
651 						if (m->dirty == 0) {
652 							vm_page_set_invalid(m, (vm_offset_t) foff, resid);
653 							if (m->valid == 0)
654 								vm_page_protect(m, VM_PROT_NONE);
655 						}
656 					}
657 					if (resid >= PAGE_SIZE) {
658 						if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
659 							bp->b_flags |= B_INVAL;
660 						}
661 					} else {
662 						if (!vm_page_is_valid(m,
663 							(((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) {
664 							bp->b_flags |= B_INVAL;
665 						}
666 					}
667 				}
668 				foff += resid;
669 				iototal -= resid;
670 			}
671 		}
672 		if (bp->b_flags & (B_INVAL | B_RELBUF))
673 			vfs_vmio_release(bp);
674 	}
675 #if !defined(MAX_PERF)
676 	if (bp->b_qindex != QUEUE_NONE)
677 		panic("brelse: free buffer onto another queue???");
678 #endif
679 
680 	/* enqueue */
681 	/* buffers with no memory */
682 	if (bp->b_bufsize == 0) {
683 		bp->b_flags |= B_INVAL;
684 		bp->b_qindex = QUEUE_EMPTY;
685 		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
686 		LIST_REMOVE(bp, b_hash);
687 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
688 		bp->b_dev = NODEV;
689 		/*
690 		 * Get rid of the kva allocation *now*
691 		 */
692 		bfreekva(bp);
693 
694 	/* buffers with junk contents */
695 	} else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
696 		bp->b_flags |= B_INVAL;
697 		bp->b_qindex = QUEUE_AGE;
698 		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
699 		LIST_REMOVE(bp, b_hash);
700 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
701 		bp->b_dev = NODEV;
702 
703 	/* buffers that are locked */
704 	} else if (bp->b_flags & B_LOCKED) {
705 		bp->b_qindex = QUEUE_LOCKED;
706 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
707 
708 	/* buffers with stale but valid contents */
709 	} else if (bp->b_flags & B_AGE) {
710 		bp->b_qindex = QUEUE_AGE;
711 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist);
712 
713 	/* buffers with valid and quite potentially reuseable contents */
714 	} else {
715 		bp->b_qindex = QUEUE_LRU;
716 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
717 	}
718 
719 	if ((bp->b_flags & B_INVAL) ||
720 		(bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) {
721 		if (bp->b_flags & B_DELWRI) {
722 			--numdirtybuffers;
723 			bp->b_flags &= ~B_DELWRI;
724 		}
725 		vfs_bio_need_satisfy();
726 	}
727 
728 	/* unlock */
729 	bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
730 				B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
731 	splx(s);
732 }
733 
734 /*
735  * Release a buffer.
736  */
737 void
738 bqrelse(struct buf * bp)
739 {
740 	int s;
741 
742 	s = splbio();
743 
744 	/* anyone need this block? */
745 	if (bp->b_flags & B_WANTED) {
746 		bp->b_flags &= ~(B_WANTED | B_AGE);
747 		wakeup(bp);
748 	}
749 
750 #if !defined(MAX_PERF)
751 	if (bp->b_qindex != QUEUE_NONE)
752 		panic("bqrelse: free buffer onto another queue???");
753 #endif
754 
755 	if (bp->b_flags & B_LOCKED) {
756 		bp->b_flags &= ~B_ERROR;
757 		bp->b_qindex = QUEUE_LOCKED;
758 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
759 		/* buffers with stale but valid contents */
760 	} else {
761 		bp->b_qindex = QUEUE_LRU;
762 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
763 	}
764 
765 	if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) {
766 		vfs_bio_need_satisfy();
767 	}
768 
769 	/* unlock */
770 	bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
771 		B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
772 	splx(s);
773 }
774 
775 static void
776 vfs_vmio_release(bp)
777 	struct buf *bp;
778 {
779 	int i;
780 	vm_page_t m;
781 
782 	for (i = 0; i < bp->b_npages; i++) {
783 		m = bp->b_pages[i];
784 		bp->b_pages[i] = NULL;
785 		vm_page_unwire(m);
786 		/*
787 		 * We don't mess with busy pages, it is
788 		 * the responsibility of the process that
789 		 * busied the pages to deal with them.
790 		 */
791 		if ((m->flags & PG_BUSY) || (m->busy != 0))
792 			continue;
793 
794 		if (m->wire_count == 0) {
795 
796 			if (m->flags & PG_WANTED) {
797 				m->flags &= ~PG_WANTED;
798 				wakeup(m);
799 			}
800 
801 			/*
802 			 * If this is an async free -- we cannot place
803 			 * pages onto the cache queue.  If it is an
804 			 * async free, then we don't modify any queues.
805 			 * This is probably in error (for perf reasons),
806 			 * and we will eventually need to build
807 			 * a more complete infrastructure to support I/O
808 			 * rundown.
809 			 */
810 			if ((bp->b_flags & B_ASYNC) == 0) {
811 
812 			/*
813 			 * In the case of sync buffer frees, we can do pretty much
814 			 * anything to any of the memory queues.  Specifically,
815 			 * the cache queue is okay to be modified.
816 			 */
817 				if (m->valid) {
818 					if(m->dirty == 0)
819 						vm_page_test_dirty(m);
820 					/*
821 					 * this keeps pressure off of the process memory
822 					 */
823 					if (m->dirty == 0 && m->hold_count == 0)
824 						vm_page_cache(m);
825 					else
826 						vm_page_deactivate(m);
827 				} else if (m->hold_count == 0) {
828 					vm_page_protect(m, VM_PROT_NONE);
829 					vm_page_free(m);
830 				}
831 			} else {
832 				/*
833 				 * If async, then at least we clear the
834 				 * act_count.
835 				 */
836 				m->act_count = 0;
837 			}
838 		}
839 	}
840 	bufspace -= bp->b_bufsize;
841 	vmiospace -= bp->b_bufsize;
842 	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
843 	bp->b_npages = 0;
844 	bp->b_bufsize = 0;
845 	bp->b_flags &= ~B_VMIO;
846 	if (bp->b_vp)
847 		brelvp(bp);
848 }
849 
850 /*
851  * Check to see if a block is currently memory resident.
852  */
853 struct buf *
854 gbincore(struct vnode * vp, daddr_t blkno)
855 {
856 	struct buf *bp;
857 	struct bufhashhdr *bh;
858 
859 	bh = BUFHASH(vp, blkno);
860 	bp = bh->lh_first;
861 
862 	/* Search hash chain */
863 	while (bp != NULL) {
864 		/* hit */
865 		if (bp->b_vp == vp && bp->b_lblkno == blkno &&
866 		    (bp->b_flags & B_INVAL) == 0) {
867 			break;
868 		}
869 		bp = bp->b_hash.le_next;
870 	}
871 	return (bp);
872 }
873 
874 /*
875  * this routine implements clustered async writes for
876  * clearing out B_DELWRI buffers...  This is much better
877  * than the old way of writing only one buffer at a time.
878  */
879 int
880 vfs_bio_awrite(struct buf * bp)
881 {
882 	int i;
883 	daddr_t lblkno = bp->b_lblkno;
884 	struct vnode *vp = bp->b_vp;
885 	int s;
886 	int ncl;
887 	struct buf *bpa;
888 	int nwritten;
889 
890 	s = splbio();
891 	/*
892 	 * right now we support clustered writing only to regular files
893 	 */
894 	if ((vp->v_type == VREG) &&
895 	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
896 	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
897 		int size;
898 		int maxcl;
899 
900 		size = vp->v_mount->mnt_stat.f_iosize;
901 		maxcl = MAXPHYS / size;
902 
903 		for (i = 1; i < maxcl; i++) {
904 			if ((bpa = gbincore(vp, lblkno + i)) &&
905 			    ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
906 			    (B_DELWRI | B_CLUSTEROK)) &&
907 			    (bpa->b_bufsize == size)) {
908 				if ((bpa->b_blkno == bpa->b_lblkno) ||
909 				    (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT)))
910 					break;
911 			} else {
912 				break;
913 			}
914 		}
915 		ncl = i;
916 		/*
917 		 * this is a possible cluster write
918 		 */
919 		if (ncl != 1) {
920 			nwritten = cluster_wbuild(vp, size, lblkno, ncl);
921 			splx(s);
922 			return nwritten;
923 		}
924 	}
925 	bremfree(bp);
926 	splx(s);
927 	/*
928 	 * default (old) behavior, writing out only one block
929 	 */
930 	bp->b_flags |= B_BUSY | B_ASYNC;
931 	nwritten = bp->b_bufsize;
932 	(void) VOP_BWRITE(bp);
933 	return nwritten;
934 }
935 
936 
937 /*
938  * Find a buffer header which is available for use.
939  */
940 static struct buf *
941 getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize)
942 {
943 	struct buf *bp;
944 	int nbyteswritten = 0;
945 	vm_offset_t addr;
946 	static int writerecursion = 0;
947 
948 start:
949 	if (bufspace >= maxbufspace)
950 		goto trytofreespace;
951 
952 	/* can we constitute a new buffer? */
953 	if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) {
954 #if !defined(MAX_PERF)
955 		if (bp->b_qindex != QUEUE_EMPTY)
956 			panic("getnewbuf: inconsistent EMPTY queue, qindex=%d",
957 			    bp->b_qindex);
958 #endif
959 		bp->b_flags |= B_BUSY;
960 		bremfree(bp);
961 		goto fillbuf;
962 	}
963 trytofreespace:
964 	/*
965 	 * We keep the file I/O from hogging metadata I/O
966 	 * This is desirable because file data is cached in the
967 	 * VM/Buffer cache even if a buffer is freed.
968 	 */
969 	if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) {
970 #if !defined(MAX_PERF)
971 		if (bp->b_qindex != QUEUE_AGE)
972 			panic("getnewbuf: inconsistent AGE queue, qindex=%d",
973 			    bp->b_qindex);
974 #endif
975 	} else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) {
976 #if !defined(MAX_PERF)
977 		if (bp->b_qindex != QUEUE_LRU)
978 			panic("getnewbuf: inconsistent LRU queue, qindex=%d",
979 			    bp->b_qindex);
980 #endif
981 	}
982 	if (!bp) {
983 		/* wait for a free buffer of any kind */
984 		needsbuffer |= VFS_BIO_NEED_ANY;
985 		do
986 			tsleep(&needsbuffer, (PRIBIO + 1) | slpflag, "newbuf",
987 			    slptimeo);
988 		while (needsbuffer & VFS_BIO_NEED_ANY);
989 		return (0);
990 	}
991 
992 #if defined(DIAGNOSTIC)
993 	if (bp->b_flags & B_BUSY) {
994 		panic("getnewbuf: busy buffer on free list\n");
995 	}
996 #endif
997 
998 	/*
999 	 * We are fairly aggressive about freeing VMIO buffers, but since
1000 	 * the buffering is intact without buffer headers, there is not
1001 	 * much loss.  We gain by maintaining non-VMIOed metadata in buffers.
1002 	 */
1003 	if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) {
1004 		if ((bp->b_flags & B_VMIO) == 0 ||
1005 			(vmiospace < maxvmiobufspace)) {
1006 			--bp->b_usecount;
1007 			TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
1008 			if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) {
1009 				TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
1010 				goto start;
1011 			}
1012 			TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
1013 		}
1014 	}
1015 
1016 
1017 	/* if we are a delayed write, convert to an async write */
1018 	if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) {
1019 
1020 		if (writerecursion > 0) {
1021 			bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]);
1022 			while (bp) {
1023 				if ((bp->b_flags & B_DELWRI) == 0)
1024 					break;
1025 				bp = TAILQ_NEXT(bp, b_freelist);
1026 			}
1027 			if (bp == NULL) {
1028 				bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]);
1029 				while (bp) {
1030 					if ((bp->b_flags & B_DELWRI) == 0)
1031 						break;
1032 					bp = TAILQ_NEXT(bp, b_freelist);
1033 				}
1034 			}
1035 			if (bp == NULL)
1036 				panic("getnewbuf: cannot get buffer, infinite recursion failure");
1037 		} else {
1038 			++writerecursion;
1039 			nbyteswritten += vfs_bio_awrite(bp);
1040 			--writerecursion;
1041 			if (!slpflag && !slptimeo) {
1042 				return (0);
1043 			}
1044 			goto start;
1045 		}
1046 	}
1047 
1048 	if (bp->b_flags & B_WANTED) {
1049 		bp->b_flags &= ~B_WANTED;
1050 		wakeup(bp);
1051 	}
1052 	bremfree(bp);
1053 	bp->b_flags |= B_BUSY;
1054 
1055 	if (bp->b_flags & B_VMIO) {
1056 		bp->b_flags &= ~B_ASYNC;
1057 		vfs_vmio_release(bp);
1058 	}
1059 
1060 	if (bp->b_vp)
1061 		brelvp(bp);
1062 
1063 fillbuf:
1064 	/* we are not free, nor do we contain interesting data */
1065 	if (bp->b_rcred != NOCRED) {
1066 		crfree(bp->b_rcred);
1067 		bp->b_rcred = NOCRED;
1068 	}
1069 	if (bp->b_wcred != NOCRED) {
1070 		crfree(bp->b_wcred);
1071 		bp->b_wcred = NOCRED;
1072 	}
1073 
1074 	LIST_REMOVE(bp, b_hash);
1075 	LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1076 	if (bp->b_bufsize) {
1077 		allocbuf(bp, 0);
1078 	}
1079 	bp->b_flags = B_BUSY;
1080 	bp->b_dev = NODEV;
1081 	bp->b_vp = NULL;
1082 	bp->b_blkno = bp->b_lblkno = 0;
1083 	bp->b_iodone = 0;
1084 	bp->b_error = 0;
1085 	bp->b_resid = 0;
1086 	bp->b_bcount = 0;
1087 	bp->b_npages = 0;
1088 	bp->b_dirtyoff = bp->b_dirtyend = 0;
1089 	bp->b_validoff = bp->b_validend = 0;
1090 	bp->b_usecount = 4;
1091 
1092 	maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK;
1093 
1094 	/*
1095 	 * we assume that buffer_map is not at address 0
1096 	 */
1097 	addr = 0;
1098 	if (maxsize != bp->b_kvasize) {
1099 		bfreekva(bp);
1100 
1101 		/*
1102 		 * See if we have buffer kva space
1103 		 */
1104 		if (vm_map_findspace(buffer_map,
1105 			vm_map_min(buffer_map), maxsize, &addr)) {
1106 			bp->b_flags |= B_INVAL;
1107 			brelse(bp);
1108 			goto trytofreespace;
1109 		}
1110 	}
1111 
1112 	/*
1113 	 * See if we are below are allocated minimum
1114 	 */
1115 	if (bufspace >= (maxbufspace + nbyteswritten)) {
1116 		bp->b_flags |= B_INVAL;
1117 		brelse(bp);
1118 		goto trytofreespace;
1119 	}
1120 
1121 	/*
1122 	 * create a map entry for the buffer -- in essence
1123 	 * reserving the kva space.
1124 	 */
1125 	if (addr) {
1126 		vm_map_insert(buffer_map, NULL, 0,
1127 			addr, addr + maxsize,
1128 			VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
1129 
1130 		bp->b_kvabase = (caddr_t) addr;
1131 		bp->b_kvasize = maxsize;
1132 	}
1133 	bp->b_data = bp->b_kvabase;
1134 
1135 	return (bp);
1136 }
1137 
1138 static void
1139 waitfreebuffers(int slpflag, int slptimeo) {
1140 	while (numfreebuffers < hifreebuffers) {
1141 		flushdirtybuffers(slpflag, slptimeo);
1142 		if (numfreebuffers < hifreebuffers)
1143 			break;
1144 		needsbuffer |= VFS_BIO_NEED_FREE;
1145 		if (tsleep(&needsbuffer, PRIBIO|slpflag, "biofre", slptimeo))
1146 			break;
1147 	}
1148 }
1149 
1150 static void
1151 flushdirtybuffers(int slpflag, int slptimeo) {
1152 	int s;
1153 	static pid_t flushing = 0;
1154 
1155 	s = splbio();
1156 
1157 	if (flushing) {
1158 		if (flushing == curproc->p_pid) {
1159 			splx(s);
1160 			return;
1161 		}
1162 		while (flushing) {
1163 			if (tsleep(&flushing, PRIBIO|slpflag, "biofls", slptimeo)) {
1164 				splx(s);
1165 				return;
1166 			}
1167 		}
1168 	}
1169 	flushing = curproc->p_pid;
1170 
1171 	while (numdirtybuffers > lodirtybuffers) {
1172 		struct buf *bp;
1173 		needsbuffer |= VFS_BIO_NEED_LOWLIMIT;
1174 		bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]);
1175 		if (bp == NULL)
1176 			bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]);
1177 
1178 		while (bp && ((bp->b_flags & B_DELWRI) == 0)) {
1179 			bp = TAILQ_NEXT(bp, b_freelist);
1180 		}
1181 
1182 		if (bp) {
1183 			splx(s);
1184 			vfs_bio_awrite(bp);
1185 			s = splbio();
1186 			continue;
1187 		}
1188 		break;
1189 	}
1190 
1191 	flushing = 0;
1192 	wakeup(&flushing);
1193 	splx(s);
1194 }
1195 
1196 /*
1197  * Check to see if a block is currently memory resident.
1198  */
1199 struct buf *
1200 incore(struct vnode * vp, daddr_t blkno)
1201 {
1202 	struct buf *bp;
1203 
1204 	int s = splbio();
1205 	bp = gbincore(vp, blkno);
1206 	splx(s);
1207 	return (bp);
1208 }
1209 
1210 /*
1211  * Returns true if no I/O is needed to access the
1212  * associated VM object.  This is like incore except
1213  * it also hunts around in the VM system for the data.
1214  */
1215 
1216 int
1217 inmem(struct vnode * vp, daddr_t blkno)
1218 {
1219 	vm_object_t obj;
1220 	vm_offset_t toff, tinc;
1221 	vm_page_t m;
1222 	vm_ooffset_t off;
1223 
1224 	if (incore(vp, blkno))
1225 		return 1;
1226 	if (vp->v_mount == NULL)
1227 		return 0;
1228 	if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0)
1229 		return 0;
1230 
1231 	obj = vp->v_object;
1232 	tinc = PAGE_SIZE;
1233 	if (tinc > vp->v_mount->mnt_stat.f_iosize)
1234 		tinc = vp->v_mount->mnt_stat.f_iosize;
1235 	off = blkno * vp->v_mount->mnt_stat.f_iosize;
1236 
1237 	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
1238 
1239 		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
1240 		if (!m)
1241 			return 0;
1242 		if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0)
1243 			return 0;
1244 	}
1245 	return 1;
1246 }
1247 
1248 /*
1249  * now we set the dirty range for the buffer --
1250  * for NFS -- if the file is mapped and pages have
1251  * been written to, let it know.  We want the
1252  * entire range of the buffer to be marked dirty if
1253  * any of the pages have been written to for consistancy
1254  * with the b_validoff, b_validend set in the nfs write
1255  * code, and used by the nfs read code.
1256  */
1257 static void
1258 vfs_setdirty(struct buf *bp) {
1259 	int i;
1260 	vm_object_t object;
1261 	vm_offset_t boffset, offset;
1262 	/*
1263 	 * We qualify the scan for modified pages on whether the
1264 	 * object has been flushed yet.  The OBJ_WRITEABLE flag
1265 	 * is not cleared simply by protecting pages off.
1266 	 */
1267 	if ((bp->b_flags & B_VMIO) &&
1268 		((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) {
1269 		/*
1270 		 * test the pages to see if they have been modified directly
1271 		 * by users through the VM system.
1272 		 */
1273 		for (i = 0; i < bp->b_npages; i++)
1274 			vm_page_test_dirty(bp->b_pages[i]);
1275 
1276 		/*
1277 		 * scan forwards for the first page modified
1278 		 */
1279 		for (i = 0; i < bp->b_npages; i++) {
1280 			if (bp->b_pages[i]->dirty) {
1281 				break;
1282 			}
1283 		}
1284 		boffset = (i << PAGE_SHIFT);
1285 		if (boffset < bp->b_dirtyoff) {
1286 			bp->b_dirtyoff = boffset;
1287 		}
1288 
1289 		/*
1290 		 * scan backwards for the last page modified
1291 		 */
1292 		for (i = bp->b_npages - 1; i >= 0; --i) {
1293 			if (bp->b_pages[i]->dirty) {
1294 				break;
1295 			}
1296 		}
1297 		boffset = (i + 1);
1298 		offset = boffset + bp->b_pages[0]->pindex;
1299 		if (offset >= object->size)
1300 			boffset = object->size - bp->b_pages[0]->pindex;
1301 		if (bp->b_dirtyend < (boffset << PAGE_SHIFT))
1302 			bp->b_dirtyend = (boffset << PAGE_SHIFT);
1303 	}
1304 }
1305 
1306 /*
1307  * Get a block given a specified block and offset into a file/device.
1308  */
1309 struct buf *
1310 getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1311 {
1312 	struct buf *bp;
1313 	int s;
1314 	struct bufhashhdr *bh;
1315 	int maxsize;
1316 	static pid_t flushing = 0;
1317 
1318 	if (vp->v_mount) {
1319 		maxsize = vp->v_mount->mnt_stat.f_iosize;
1320 		/*
1321 		 * This happens on mount points.
1322 		 */
1323 		if (maxsize < size)
1324 			maxsize = size;
1325 	} else {
1326 		maxsize = size;
1327 	}
1328 
1329 #if !defined(MAX_PERF)
1330 	if (size > MAXBSIZE)
1331 		panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
1332 #endif
1333 
1334 	s = splbio();
1335 loop:
1336 	if (numfreebuffers < lofreebuffers) {
1337 		waitfreebuffers(slpflag, slptimeo);
1338 	}
1339 
1340 	if ((bp = gbincore(vp, blkno))) {
1341 		if (bp->b_flags & B_BUSY) {
1342 			bp->b_flags |= B_WANTED;
1343 			if (bp->b_usecount < BUF_MAXUSE)
1344 				++bp->b_usecount;
1345 			if (!tsleep(bp,
1346 				(PRIBIO + 1) | slpflag, "getblk", slptimeo))
1347 				goto loop;
1348 
1349 			splx(s);
1350 			return (struct buf *) NULL;
1351 		}
1352 		bp->b_flags |= B_BUSY | B_CACHE;
1353 		bremfree(bp);
1354 
1355 		/*
1356 		 * check for size inconsistancies (note that they shouldn't happen
1357 		 * but do when filesystems don't handle the size changes correctly.)
1358 		 * We are conservative on metadata and don't just extend the buffer
1359 		 * but write and re-constitute it.
1360 		 */
1361 
1362 		if (bp->b_bcount != size) {
1363 			if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) {
1364 				allocbuf(bp, size);
1365 			} else {
1366 				bp->b_flags |= B_NOCACHE;
1367 				VOP_BWRITE(bp);
1368 				goto loop;
1369 			}
1370 		}
1371 
1372 		if (bp->b_usecount < BUF_MAXUSE)
1373 			++bp->b_usecount;
1374 		splx(s);
1375 		return (bp);
1376 	} else {
1377 		vm_object_t obj;
1378 
1379 		if ((bp = getnewbuf(vp, slpflag, slptimeo, size, maxsize)) == 0) {
1380 			if (slpflag || slptimeo) {
1381 				splx(s);
1382 				return NULL;
1383 			}
1384 			goto loop;
1385 		}
1386 
1387 		/*
1388 		 * This code is used to make sure that a buffer is not
1389 		 * created while the getnewbuf routine is blocked.
1390 		 * Normally the vnode is locked so this isn't a problem.
1391 		 * VBLK type I/O requests, however, don't lock the vnode.
1392 		 */
1393 		if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) {
1394 			bp->b_flags |= B_INVAL;
1395 			brelse(bp);
1396 			goto loop;
1397 		}
1398 
1399 		/*
1400 		 * Insert the buffer into the hash, so that it can
1401 		 * be found by incore.
1402 		 */
1403 		bp->b_blkno = bp->b_lblkno = blkno;
1404 		bgetvp(vp, bp);
1405 		LIST_REMOVE(bp, b_hash);
1406 		bh = BUFHASH(vp, blkno);
1407 		LIST_INSERT_HEAD(bh, bp, b_hash);
1408 
1409 		if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) {
1410 			bp->b_flags |= (B_VMIO | B_CACHE);
1411 #if defined(VFS_BIO_DEBUG)
1412 			if (vp->v_type != VREG && vp->v_type != VBLK)
1413 				printf("getblk: vmioing file type %d???\n", vp->v_type);
1414 #endif
1415 		} else {
1416 			bp->b_flags &= ~B_VMIO;
1417 		}
1418 		splx(s);
1419 
1420 		allocbuf(bp, size);
1421 #ifdef	PC98
1422 		/*
1423 		 * 1024byte/sector support
1424 		 */
1425 #define B_XXX2 0x8000000
1426 		if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2;
1427 #endif
1428 		return (bp);
1429 	}
1430 }
1431 
1432 /*
1433  * Get an empty, disassociated buffer of given size.
1434  */
1435 struct buf *
1436 geteblk(int size)
1437 {
1438 	struct buf *bp;
1439 	int s;
1440 
1441 	s = splbio();
1442 	while ((bp = getnewbuf(0, 0, 0, size, MAXBSIZE)) == 0);
1443 	splx(s);
1444 	allocbuf(bp, size);
1445 	bp->b_flags |= B_INVAL;
1446 	return (bp);
1447 }
1448 
1449 
1450 /*
1451  * This code constitutes the buffer memory from either anonymous system
1452  * memory (in the case of non-VMIO operations) or from an associated
1453  * VM object (in the case of VMIO operations).
1454  *
1455  * Note that this code is tricky, and has many complications to resolve
1456  * deadlock or inconsistant data situations.  Tread lightly!!!
1457  *
1458  * Modify the length of a buffer's underlying buffer storage without
1459  * destroying information (unless, of course the buffer is shrinking).
1460  */
1461 int
1462 allocbuf(struct buf * bp, int size)
1463 {
1464 
1465 	int s;
1466 	int newbsize, mbsize;
1467 	int i;
1468 
1469 #if !defined(MAX_PERF)
1470 	if (!(bp->b_flags & B_BUSY))
1471 		panic("allocbuf: buffer not busy");
1472 
1473 	if (bp->b_kvasize < size)
1474 		panic("allocbuf: buffer too small");
1475 #endif
1476 
1477 	if ((bp->b_flags & B_VMIO) == 0) {
1478 		caddr_t origbuf;
1479 		int origbufsize;
1480 		/*
1481 		 * Just get anonymous memory from the kernel
1482 		 */
1483 		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1484 #if !defined(NO_B_MALLOC)
1485 		if (bp->b_flags & B_MALLOC)
1486 			newbsize = mbsize;
1487 		else
1488 #endif
1489 			newbsize = round_page(size);
1490 
1491 		if (newbsize < bp->b_bufsize) {
1492 #if !defined(NO_B_MALLOC)
1493 			/*
1494 			 * malloced buffers are not shrunk
1495 			 */
1496 			if (bp->b_flags & B_MALLOC) {
1497 				if (newbsize) {
1498 					bp->b_bcount = size;
1499 				} else {
1500 					free(bp->b_data, M_BIOBUF);
1501 					bufspace -= bp->b_bufsize;
1502 					bufmallocspace -= bp->b_bufsize;
1503 					bp->b_data = bp->b_kvabase;
1504 					bp->b_bufsize = 0;
1505 					bp->b_bcount = 0;
1506 					bp->b_flags &= ~B_MALLOC;
1507 				}
1508 				return 1;
1509 			}
1510 #endif
1511 			vm_hold_free_pages(
1512 			    bp,
1513 			    (vm_offset_t) bp->b_data + newbsize,
1514 			    (vm_offset_t) bp->b_data + bp->b_bufsize);
1515 		} else if (newbsize > bp->b_bufsize) {
1516 #if !defined(NO_B_MALLOC)
1517 			/*
1518 			 * We only use malloced memory on the first allocation.
1519 			 * and revert to page-allocated memory when the buffer grows.
1520 			 */
1521 			if ( (bufmallocspace < maxbufmallocspace) &&
1522 				(bp->b_bufsize == 0) &&
1523 				(mbsize <= PAGE_SIZE/2)) {
1524 
1525 				bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
1526 				bp->b_bufsize = mbsize;
1527 				bp->b_bcount = size;
1528 				bp->b_flags |= B_MALLOC;
1529 				bufspace += mbsize;
1530 				bufmallocspace += mbsize;
1531 				return 1;
1532 			}
1533 #endif
1534 			origbuf = NULL;
1535 			origbufsize = 0;
1536 #if !defined(NO_B_MALLOC)
1537 			/*
1538 			 * If the buffer is growing on it's other-than-first allocation,
1539 			 * then we revert to the page-allocation scheme.
1540 			 */
1541 			if (bp->b_flags & B_MALLOC) {
1542 				origbuf = bp->b_data;
1543 				origbufsize = bp->b_bufsize;
1544 				bp->b_data = bp->b_kvabase;
1545 				bufspace -= bp->b_bufsize;
1546 				bufmallocspace -= bp->b_bufsize;
1547 				bp->b_bufsize = 0;
1548 				bp->b_flags &= ~B_MALLOC;
1549 				newbsize = round_page(newbsize);
1550 			}
1551 #endif
1552 			vm_hold_load_pages(
1553 			    bp,
1554 			    (vm_offset_t) bp->b_data + bp->b_bufsize,
1555 			    (vm_offset_t) bp->b_data + newbsize);
1556 #if !defined(NO_B_MALLOC)
1557 			if (origbuf) {
1558 				bcopy(origbuf, bp->b_data, origbufsize);
1559 				free(origbuf, M_BIOBUF);
1560 			}
1561 #endif
1562 		}
1563 	} else {
1564 		vm_page_t m;
1565 		int desiredpages;
1566 
1567 		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1568 		desiredpages = (round_page(newbsize) >> PAGE_SHIFT);
1569 
1570 #if !defined(NO_B_MALLOC)
1571 		if (bp->b_flags & B_MALLOC)
1572 			panic("allocbuf: VMIO buffer can't be malloced");
1573 #endif
1574 
1575 		if (newbsize < bp->b_bufsize) {
1576 			if (desiredpages < bp->b_npages) {
1577 				for (i = desiredpages; i < bp->b_npages; i++) {
1578 					/*
1579 					 * the page is not freed here -- it
1580 					 * is the responsibility of vnode_pager_setsize
1581 					 */
1582 					m = bp->b_pages[i];
1583 #if defined(DIAGNOSTIC)
1584 					if (m == bogus_page)
1585 						panic("allocbuf: bogus page found");
1586 #endif
1587 					s = splvm();
1588 					while ((m->flags & PG_BUSY) || (m->busy != 0)) {
1589 						m->flags |= PG_WANTED;
1590 						tsleep(m, PVM, "biodep", 0);
1591 					}
1592 					splx(s);
1593 
1594 					bp->b_pages[i] = NULL;
1595 					vm_page_unwire(m);
1596 				}
1597 				pmap_qremove((vm_offset_t) trunc_page(bp->b_data) +
1598 				    (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
1599 				bp->b_npages = desiredpages;
1600 			}
1601 		} else if (newbsize > bp->b_bufsize) {
1602 			vm_object_t obj;
1603 			vm_offset_t tinc, toff;
1604 			vm_ooffset_t off;
1605 			vm_pindex_t objoff;
1606 			int pageindex, curbpnpages;
1607 			struct vnode *vp;
1608 			int bsize;
1609 
1610 			vp = bp->b_vp;
1611 
1612 			if (vp->v_type == VBLK)
1613 				bsize = DEV_BSIZE;
1614 			else
1615 				bsize = vp->v_mount->mnt_stat.f_iosize;
1616 
1617 			if (bp->b_npages < desiredpages) {
1618 				obj = vp->v_object;
1619 				tinc = PAGE_SIZE;
1620 				if (tinc > bsize)
1621 					tinc = bsize;
1622 				off = (vm_ooffset_t) bp->b_lblkno * bsize;
1623 				curbpnpages = bp->b_npages;
1624 		doretry:
1625 				bp->b_flags |= B_CACHE;
1626 				bp->b_validoff = bp->b_validend = 0;
1627 				for (toff = 0; toff < newbsize; toff += tinc) {
1628 					int bytesinpage;
1629 
1630 					pageindex = toff >> PAGE_SHIFT;
1631 					objoff = OFF_TO_IDX(off + toff);
1632 					if (pageindex < curbpnpages) {
1633 
1634 						m = bp->b_pages[pageindex];
1635 #ifdef VFS_BIO_DIAG
1636 						if (m->pindex != objoff)
1637 							panic("allocbuf: page changed offset??!!!?");
1638 #endif
1639 						bytesinpage = tinc;
1640 						if (tinc > (newbsize - toff))
1641 							bytesinpage = newbsize - toff;
1642 						if (bp->b_flags & B_CACHE)
1643 							vfs_buf_set_valid(bp, off, toff, bytesinpage, m);
1644 						continue;
1645 					}
1646 					m = vm_page_lookup(obj, objoff);
1647 					if (!m) {
1648 						m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL);
1649 						if (!m) {
1650 							VM_WAIT;
1651 							goto doretry;
1652 						}
1653 						/*
1654 						 * Normally it is unwise to clear PG_BUSY without
1655 						 * PAGE_WAKEUP -- but it is okay here, as there is
1656 						 * no chance for blocking between here and vm_page_alloc
1657 						 */
1658 						m->flags &= ~PG_BUSY;
1659 						vm_page_wire(m);
1660 						bp->b_flags &= ~B_CACHE;
1661 					} else if (m->flags & PG_BUSY) {
1662 						s = splvm();
1663 						if (m->flags & PG_BUSY) {
1664 							m->flags |= PG_WANTED;
1665 							tsleep(m, PVM, "pgtblk", 0);
1666 						}
1667 						splx(s);
1668 						goto doretry;
1669 					} else {
1670 						if ((curproc != pageproc) &&
1671 							((m->queue - m->pc) == PQ_CACHE) &&
1672 						    ((cnt.v_free_count + cnt.v_cache_count) <
1673 								(cnt.v_free_min + cnt.v_cache_min))) {
1674 							pagedaemon_wakeup();
1675 						}
1676 						bytesinpage = tinc;
1677 						if (tinc > (newbsize - toff))
1678 							bytesinpage = newbsize - toff;
1679 						if (bp->b_flags & B_CACHE)
1680 							vfs_buf_set_valid(bp, off, toff, bytesinpage, m);
1681 						vm_page_wire(m);
1682 					}
1683 					bp->b_pages[pageindex] = m;
1684 					curbpnpages = pageindex + 1;
1685 				}
1686 				if (vp->v_tag == VT_NFS) {
1687 					if (bp->b_dirtyend > 0) {
1688 						bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);
1689 						bp->b_validend = max(bp->b_validend, bp->b_dirtyend);
1690 					}
1691 					if (bp->b_validend == 0)
1692 						bp->b_flags &= ~B_CACHE;
1693 				}
1694 				bp->b_data = (caddr_t) trunc_page(bp->b_data);
1695 				bp->b_npages = curbpnpages;
1696 				pmap_qenter((vm_offset_t) bp->b_data,
1697 					bp->b_pages, bp->b_npages);
1698 				((vm_offset_t) bp->b_data) |= off & PAGE_MASK;
1699 			}
1700 		}
1701 	}
1702 	if (bp->b_flags & B_VMIO)
1703 		vmiospace += (newbsize - bp->b_bufsize);
1704 	bufspace += (newbsize - bp->b_bufsize);
1705 	bp->b_bufsize = newbsize;
1706 	bp->b_bcount = size;
1707 	return 1;
1708 }
1709 
1710 /*
1711  * Wait for buffer I/O completion, returning error status.
1712  */
1713 int
1714 biowait(register struct buf * bp)
1715 {
1716 	int s;
1717 
1718 	s = splbio();
1719 	while ((bp->b_flags & B_DONE) == 0)
1720 		tsleep(bp, PRIBIO, "biowait", 0);
1721 	splx(s);
1722 	if (bp->b_flags & B_EINTR) {
1723 		bp->b_flags &= ~B_EINTR;
1724 		return (EINTR);
1725 	}
1726 	if (bp->b_flags & B_ERROR) {
1727 		return (bp->b_error ? bp->b_error : EIO);
1728 	} else {
1729 		return (0);
1730 	}
1731 }
1732 
1733 /*
1734  * Finish I/O on a buffer, calling an optional function.
1735  * This is usually called from interrupt level, so process blocking
1736  * is not *a good idea*.
1737  */
1738 void
1739 biodone(register struct buf * bp)
1740 {
1741 	int s;
1742 
1743 	s = splbio();
1744 
1745 #if !defined(MAX_PERF)
1746 	if (!(bp->b_flags & B_BUSY))
1747 		panic("biodone: buffer not busy");
1748 #endif
1749 
1750 	if (bp->b_flags & B_DONE) {
1751 		splx(s);
1752 #if !defined(MAX_PERF)
1753 		printf("biodone: buffer already done\n");
1754 #endif
1755 		return;
1756 	}
1757 	bp->b_flags |= B_DONE;
1758 
1759 	if ((bp->b_flags & B_READ) == 0) {
1760 		vwakeup(bp);
1761 	}
1762 #ifdef BOUNCE_BUFFERS
1763 	if (bp->b_flags & B_BOUNCE)
1764 		vm_bounce_free(bp);
1765 #endif
1766 
1767 	/* call optional completion function if requested */
1768 	if (bp->b_flags & B_CALL) {
1769 		bp->b_flags &= ~B_CALL;
1770 		(*bp->b_iodone) (bp);
1771 		splx(s);
1772 		return;
1773 	}
1774 	if (bp->b_flags & B_VMIO) {
1775 		int i, resid;
1776 		vm_ooffset_t foff;
1777 		vm_page_t m;
1778 		vm_object_t obj;
1779 		int iosize;
1780 		struct vnode *vp = bp->b_vp;
1781 
1782 		if (vp->v_type == VBLK)
1783 			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1784 		else
1785 			foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1786 		obj = vp->v_object;
1787 #if !defined(MAX_PERF)
1788 		if (!obj) {
1789 			panic("biodone: no object");
1790 		}
1791 #endif
1792 #if defined(VFS_BIO_DEBUG)
1793 		if (obj->paging_in_progress < bp->b_npages) {
1794 			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
1795 			    obj->paging_in_progress, bp->b_npages);
1796 		}
1797 #endif
1798 		iosize = bp->b_bufsize;
1799 		for (i = 0; i < bp->b_npages; i++) {
1800 			int bogusflag = 0;
1801 			m = bp->b_pages[i];
1802 			if (m == bogus_page) {
1803 				bogusflag = 1;
1804 				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
1805 				if (!m) {
1806 #if defined(VFS_BIO_DEBUG)
1807 					printf("biodone: page disappeared\n");
1808 #endif
1809 					--obj->paging_in_progress;
1810 					continue;
1811 				}
1812 				bp->b_pages[i] = m;
1813 				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1814 			}
1815 #if defined(VFS_BIO_DEBUG)
1816 			if (OFF_TO_IDX(foff) != m->pindex) {
1817 				printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex);
1818 			}
1819 #endif
1820 			resid = IDX_TO_OFF(m->pindex + 1) - foff;
1821 			if (resid > iosize)
1822 				resid = iosize;
1823 			/*
1824 			 * In the write case, the valid and clean bits are
1825 			 * already changed correctly, so we only need to do this
1826 			 * here in the read case.
1827 			 */
1828 			if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
1829 				vfs_page_set_valid(bp, foff, i, m);
1830 			}
1831 
1832 			/*
1833 			 * when debugging new filesystems or buffer I/O methods, this
1834 			 * is the most common error that pops up.  if you see this, you
1835 			 * have not set the page busy flag correctly!!!
1836 			 */
1837 			if (m->busy == 0) {
1838 #if !defined(MAX_PERF)
1839 				printf("biodone: page busy < 0, "
1840 				    "pindex: %d, foff: 0x(%x,%x), "
1841 				    "resid: %d, index: %d\n",
1842 				    (int) m->pindex, (int)(foff >> 32),
1843 						(int) foff & 0xffffffff, resid, i);
1844 #endif
1845 				if (vp->v_type != VBLK)
1846 #if !defined(MAX_PERF)
1847 					printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n",
1848 					    bp->b_vp->v_mount->mnt_stat.f_iosize,
1849 					    (int) bp->b_lblkno,
1850 					    bp->b_flags, bp->b_npages);
1851 				else
1852 					printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n",
1853 					    (int) bp->b_lblkno,
1854 					    bp->b_flags, bp->b_npages);
1855 				printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
1856 				    m->valid, m->dirty, m->wire_count);
1857 #endif
1858 				panic("biodone: page busy < 0\n");
1859 			}
1860 			--m->busy;
1861 			if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1862 				m->flags &= ~PG_WANTED;
1863 				wakeup(m);
1864 			}
1865 			--obj->paging_in_progress;
1866 			foff += resid;
1867 			iosize -= resid;
1868 		}
1869 		if (obj && obj->paging_in_progress == 0 &&
1870 		    (obj->flags & OBJ_PIPWNT)) {
1871 			obj->flags &= ~OBJ_PIPWNT;
1872 			wakeup(obj);
1873 		}
1874 	}
1875 	/*
1876 	 * For asynchronous completions, release the buffer now. The brelse
1877 	 * checks for B_WANTED and will do the wakeup there if necessary - so
1878 	 * no need to do a wakeup here in the async case.
1879 	 */
1880 
1881 	if (bp->b_flags & B_ASYNC) {
1882 		if ((bp->b_flags & B_ORDERED) == 0) {
1883 			if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
1884 				brelse(bp);
1885 			else
1886 				bqrelse(bp);
1887 		}
1888 	} else {
1889 		bp->b_flags &= ~B_WANTED;
1890 		wakeup(bp);
1891 	}
1892 	splx(s);
1893 }
1894 
1895 int
1896 count_lock_queue()
1897 {
1898 	int count;
1899 	struct buf *bp;
1900 
1901 	count = 0;
1902 	for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]);
1903 	    bp != NULL;
1904 	    bp = TAILQ_NEXT(bp, b_freelist))
1905 		count++;
1906 	return (count);
1907 }
1908 
1909 int vfs_update_interval = 30;
1910 
1911 static void
1912 vfs_update()
1913 {
1914 	while (1) {
1915 		tsleep(&vfs_update_wakeup, PUSER, "update",
1916 		    hz * vfs_update_interval);
1917 		vfs_update_wakeup = 0;
1918 		sync(curproc, NULL, NULL);
1919 	}
1920 }
1921 
1922 static int
1923 sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS
1924 {
1925 	int error = sysctl_handle_int(oidp,
1926 		oidp->oid_arg1, oidp->oid_arg2, req);
1927 	if (!error)
1928 		wakeup(&vfs_update_wakeup);
1929 	return error;
1930 }
1931 
1932 SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW,
1933 	&vfs_update_interval, 0, sysctl_kern_updateinterval, "I", "");
1934 
1935 
1936 /*
1937  * This routine is called in lieu of iodone in the case of
1938  * incomplete I/O.  This keeps the busy status for pages
1939  * consistant.
1940  */
1941 void
1942 vfs_unbusy_pages(struct buf * bp)
1943 {
1944 	int i;
1945 
1946 	if (bp->b_flags & B_VMIO) {
1947 		struct vnode *vp = bp->b_vp;
1948 		vm_object_t obj = vp->v_object;
1949 		vm_ooffset_t foff;
1950 
1951 		foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1952 
1953 		for (i = 0; i < bp->b_npages; i++) {
1954 			vm_page_t m = bp->b_pages[i];
1955 
1956 			if (m == bogus_page) {
1957 				m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i);
1958 #if !defined(MAX_PERF)
1959 				if (!m) {
1960 					panic("vfs_unbusy_pages: page missing\n");
1961 				}
1962 #endif
1963 				bp->b_pages[i] = m;
1964 				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1965 			}
1966 			--obj->paging_in_progress;
1967 			--m->busy;
1968 			if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1969 				m->flags &= ~PG_WANTED;
1970 				wakeup(m);
1971 			}
1972 		}
1973 		if (obj->paging_in_progress == 0 &&
1974 		    (obj->flags & OBJ_PIPWNT)) {
1975 			obj->flags &= ~OBJ_PIPWNT;
1976 			wakeup(obj);
1977 		}
1978 	}
1979 }
1980 
1981 /*
1982  * Set NFS' b_validoff and b_validend fields from the valid bits
1983  * of a page.  If the consumer is not NFS, and the page is not
1984  * valid for the entire range, clear the B_CACHE flag to force
1985  * the consumer to re-read the page.
1986  */
1987 static void
1988 vfs_buf_set_valid(struct buf *bp,
1989 		  vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
1990 		  vm_page_t m)
1991 {
1992 	if (bp->b_vp->v_tag == VT_NFS) {
1993 		vm_offset_t svalid, evalid;
1994 		int validbits = m->valid;
1995 
1996 		/*
1997 		 * This only bothers with the first valid range in the
1998 		 * page.
1999 		 */
2000 		svalid = off;
2001 		while (validbits && !(validbits & 1)) {
2002 			svalid += DEV_BSIZE;
2003 			validbits >>= 1;
2004 		}
2005 		evalid = svalid;
2006 		while (validbits & 1) {
2007 			evalid += DEV_BSIZE;
2008 			validbits >>= 1;
2009 		}
2010 		/*
2011 		 * Make sure this range is contiguous with the range
2012 		 * built up from previous pages.  If not, then we will
2013 		 * just use the range from the previous pages.
2014 		 */
2015 		if (svalid == bp->b_validend) {
2016 			bp->b_validoff = min(bp->b_validoff, svalid);
2017 			bp->b_validend = max(bp->b_validend, evalid);
2018 		}
2019 	} else if (!vm_page_is_valid(m,
2020 				     (vm_offset_t) ((foff + off) & PAGE_MASK),
2021 				     size)) {
2022 		bp->b_flags &= ~B_CACHE;
2023 	}
2024 }
2025 
2026 /*
2027  * Set the valid bits in a page, taking care of the b_validoff,
2028  * b_validend fields which NFS uses to optimise small reads.  Off is
2029  * the offset within the file and pageno is the page index within the buf.
2030  */
2031 static void
2032 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
2033 {
2034 	struct vnode *vp = bp->b_vp;
2035 	vm_ooffset_t soff, eoff;
2036 
2037 	soff = off;
2038 	eoff = off + min(PAGE_SIZE, bp->b_bufsize);
2039 	vm_page_set_invalid(m,
2040 			    (vm_offset_t) (soff & PAGE_MASK),
2041 			    (vm_offset_t) (eoff - soff));
2042 	if (vp->v_tag == VT_NFS) {
2043 		vm_ooffset_t sv, ev;
2044 		off = off - pageno * PAGE_SIZE;
2045 		sv = off + ((bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1));
2046 		ev = off + (bp->b_validend & ~(DEV_BSIZE - 1));
2047 		soff = max(sv, soff);
2048 		eoff = min(ev, eoff);
2049 	}
2050 	if (eoff > soff)
2051 		vm_page_set_validclean(m,
2052 				       (vm_offset_t) (soff & PAGE_MASK),
2053 				       (vm_offset_t) (eoff - soff));
2054 }
2055 
2056 /*
2057  * This routine is called before a device strategy routine.
2058  * It is used to tell the VM system that paging I/O is in
2059  * progress, and treat the pages associated with the buffer
2060  * almost as being PG_BUSY.  Also the object paging_in_progress
2061  * flag is handled to make sure that the object doesn't become
2062  * inconsistant.
2063  */
2064 void
2065 vfs_busy_pages(struct buf * bp, int clear_modify)
2066 {
2067 	int i;
2068 
2069 	if (bp->b_flags & B_VMIO) {
2070 		struct vnode *vp = bp->b_vp;
2071 		vm_object_t obj = vp->v_object;
2072 		vm_ooffset_t foff;
2073 
2074 		if (vp->v_type == VBLK)
2075 			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
2076 		else
2077 			foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
2078 		vfs_setdirty(bp);
2079 		for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) {
2080 			vm_page_t m = bp->b_pages[i];
2081 
2082 			if ((bp->b_flags & B_CLUSTER) == 0) {
2083 				obj->paging_in_progress++;
2084 				m->busy++;
2085 			}
2086 			vm_page_protect(m, VM_PROT_NONE);
2087 			if (clear_modify)
2088 				vfs_page_set_valid(bp, foff, i, m);
2089 			else if (bp->b_bcount >= PAGE_SIZE) {
2090 				if (m->valid && (bp->b_flags & B_CACHE) == 0) {
2091 					bp->b_pages[i] = bogus_page;
2092 					pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
2093 				}
2094 			}
2095 		}
2096 	}
2097 }
2098 
2099 /*
2100  * Tell the VM system that the pages associated with this buffer
2101  * are clean.  This is used for delayed writes where the data is
2102  * going to go to disk eventually without additional VM intevention.
2103  */
2104 void
2105 vfs_clean_pages(struct buf * bp)
2106 {
2107 	int i;
2108 
2109 	if (bp->b_flags & B_VMIO) {
2110 		struct vnode *vp = bp->b_vp;
2111 		vm_object_t obj = vp->v_object;
2112 		vm_ooffset_t foff;
2113 
2114 		if (vp->v_type == VBLK)
2115 			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
2116 		else
2117 			foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
2118 		for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) {
2119 			vm_page_t m = bp->b_pages[i];
2120 
2121 			vfs_page_set_valid(bp, foff, i, m);
2122 		}
2123 	}
2124 }
2125 
2126 void
2127 vfs_bio_clrbuf(struct buf *bp) {
2128 	int i;
2129 	if( bp->b_flags & B_VMIO) {
2130 		if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) {
2131 			int mask;
2132 			mask = 0;
2133 			for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE)
2134 				mask |= (1 << (i/DEV_BSIZE));
2135 			if( bp->b_pages[0]->valid != mask) {
2136 				bzero(bp->b_data, bp->b_bufsize);
2137 			}
2138 			bp->b_pages[0]->valid = mask;
2139 			bp->b_resid = 0;
2140 			return;
2141 		}
2142 		for(i=0;i<bp->b_npages;i++) {
2143 			if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL)
2144 				continue;
2145 			if( bp->b_pages[i]->valid == 0) {
2146 				if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
2147 					bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE);
2148 				}
2149 			} else {
2150 				int j;
2151 				for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) {
2152 					if( (bp->b_pages[i]->valid & (1<<j)) == 0)
2153 						bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE);
2154 				}
2155 			}
2156 			/* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */
2157 		}
2158 		bp->b_resid = 0;
2159 	} else {
2160 		clrbuf(bp);
2161 	}
2162 }
2163 
2164 /*
2165  * vm_hold_load_pages and vm_hold_unload pages get pages into
2166  * a buffers address space.  The pages are anonymous and are
2167  * not associated with a file object.
2168  */
2169 void
2170 vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
2171 {
2172 	vm_offset_t pg;
2173 	vm_page_t p;
2174 	int index;
2175 
2176 	to = round_page(to);
2177 	from = round_page(from);
2178 	index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT;
2179 
2180 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
2181 
2182 tryagain:
2183 
2184 		p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
2185 		    VM_ALLOC_NORMAL);
2186 		if (!p) {
2187 			VM_WAIT;
2188 			goto tryagain;
2189 		}
2190 		vm_page_wire(p);
2191 		pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
2192 		bp->b_pages[index] = p;
2193 		PAGE_WAKEUP(p);
2194 	}
2195 	bp->b_npages = to >> PAGE_SHIFT;
2196 }
2197 
2198 void
2199 vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
2200 {
2201 	vm_offset_t pg;
2202 	vm_page_t p;
2203 	int index;
2204 
2205 	from = round_page(from);
2206 	to = round_page(to);
2207 	index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT;
2208 
2209 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
2210 		p = bp->b_pages[index];
2211 		if (p && (index < bp->b_npages)) {
2212 #if !defined(MAX_PERF)
2213 			if (p->busy) {
2214 				printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n",
2215 					bp->b_blkno, bp->b_lblkno);
2216 			}
2217 #endif
2218 			bp->b_pages[index] = NULL;
2219 			pmap_kremove(pg);
2220 			vm_page_unwire(p);
2221 			vm_page_free(p);
2222 		}
2223 	}
2224 	bp->b_npages = from >> PAGE_SHIFT;
2225 }
2226 
2227 
2228 #include "opt_ddb.h"
2229 #ifdef DDB
2230 #include <ddb/ddb.h>
2231 
2232 DB_SHOW_COMMAND(buffer, db_show_buffer)
2233 {
2234 	/* get args */
2235 	struct buf *bp = (struct buf *)addr;
2236 
2237 	if (!have_addr) {
2238 		db_printf("usage: show buffer <addr>\n");
2239 		return;
2240 	}
2241 
2242 	db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc,
2243 		  bp->b_flags, "\20\40bounce\37cluster\36vmio\35ram\34ordered"
2244 		  "\33paging\32xxx\31writeinprog\30wanted\27relbuf\26tape"
2245 		  "\25read\24raw\23phys\22clusterok\21malloc\20nocache"
2246 		  "\17locked\16inval\15gathered\14error\13eintr\12done\11dirty"
2247 		  "\10delwri\7call\6cache\5busy\4bad\3async\2needcommit\1age");
2248 	db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, "
2249 		  "b_resid = %ld\nb_dev = 0x%x, b_un.b_addr = %p, "
2250 		  "b_blkno = %d, b_pblkno = %d\n",
2251 		  bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
2252 		  bp->b_dev, bp->b_un.b_addr, bp->b_blkno, bp->b_pblkno);
2253 }
2254 #endif /* DDB */
2255