xref: /freebsd/sys/kern/vfs_bio.c (revision c6ec7d31830ab1c80edae95ad5e4b9dba10c47ac)
1 /*-
2  * Copyright (c) 2004 Poul-Henning Kamp
3  * Copyright (c) 1994,1997 John S. Dyson
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 /*
29  * this file contains a new buffer I/O scheme implementing a coherent
30  * VM object and buffer cache scheme.  Pains have been taken to make
31  * sure that the performance degradation associated with schemes such
32  * as this is not realized.
33  *
34  * Author:  John S. Dyson
35  * Significant help during the development and debugging phases
36  * had been provided by David Greenman, also of the FreeBSD core team.
37  *
38  * see man buf(9) for more info.
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/bio.h>
47 #include <sys/conf.h>
48 #include <sys/buf.h>
49 #include <sys/devicestat.h>
50 #include <sys/eventhandler.h>
51 #include <sys/fail.h>
52 #include <sys/limits.h>
53 #include <sys/lock.h>
54 #include <sys/malloc.h>
55 #include <sys/mount.h>
56 #include <sys/mutex.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/proc.h>
60 #include <sys/resourcevar.h>
61 #include <sys/sysctl.h>
62 #include <sys/vmmeter.h>
63 #include <sys/vnode.h>
64 #include <geom/geom.h>
65 #include <vm/vm.h>
66 #include <vm/vm_param.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_pageout.h>
69 #include <vm/vm_page.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_extern.h>
72 #include <vm/vm_map.h>
73 #include "opt_compat.h"
74 #include "opt_directio.h"
75 #include "opt_swap.h"
76 
77 static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
78 
79 struct	bio_ops bioops;		/* I/O operation notification */
80 
81 struct	buf_ops buf_ops_bio = {
82 	.bop_name	=	"buf_ops_bio",
83 	.bop_write	=	bufwrite,
84 	.bop_strategy	=	bufstrategy,
85 	.bop_sync	=	bufsync,
86 	.bop_bdflush	=	bufbdflush,
87 };
88 
89 /*
90  * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has
91  * carnal knowledge of buffers.  This knowledge should be moved to vfs_bio.c.
92  */
93 struct buf *buf;		/* buffer header pool */
94 
95 static struct proc *bufdaemonproc;
96 
97 static int inmem(struct vnode *vp, daddr_t blkno);
98 static void vm_hold_free_pages(struct buf *bp, int newbsize);
99 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
100 		vm_offset_t to);
101 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
102 static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
103 		vm_page_t m);
104 static void vfs_drain_busy_pages(struct buf *bp);
105 static void vfs_clean_pages_dirty_buf(struct buf *bp);
106 static void vfs_setdirty_locked_object(struct buf *bp);
107 static void vfs_vmio_release(struct buf *bp);
108 static int vfs_bio_clcheck(struct vnode *vp, int size,
109 		daddr_t lblkno, daddr_t blkno);
110 static int buf_do_flush(struct vnode *vp);
111 static int flushbufqueues(struct vnode *, int, int);
112 static void buf_daemon(void);
113 static void bremfreel(struct buf *bp);
114 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
115     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
116 static int sysctl_bufspace(SYSCTL_HANDLER_ARGS);
117 #endif
118 
119 int vmiodirenable = TRUE;
120 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
121     "Use the VM system for directory writes");
122 long runningbufspace;
123 SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
124     "Amount of presently outstanding async buffer io");
125 static long bufspace;
126 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
127     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
128 SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD,
129     &bufspace, 0, sysctl_bufspace, "L", "Virtual memory used for buffers");
130 #else
131 SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
132     "Virtual memory used for buffers");
133 #endif
134 static long maxbufspace;
135 SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0,
136     "Maximum allowed value of bufspace (including buf_daemon)");
137 static long bufmallocspace;
138 SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
139     "Amount of malloced memory for buffers");
140 static long maxbufmallocspace;
141 SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0,
142     "Maximum amount of malloced memory for buffers");
143 static long lobufspace;
144 SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0,
145     "Minimum amount of buffers we want to have");
146 long hibufspace;
147 SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0,
148     "Maximum allowed value of bufspace (excluding buf_daemon)");
149 static int bufreusecnt;
150 SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0,
151     "Number of times we have reused a buffer");
152 static int buffreekvacnt;
153 SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0,
154     "Number of times we have freed the KVA space from some buffer");
155 static int bufdefragcnt;
156 SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0,
157     "Number of times we have had to repeat buffer allocation to defragment");
158 static long lorunningspace;
159 SYSCTL_LONG(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0,
160     "Minimum preferred space used for in-progress I/O");
161 static long hirunningspace;
162 SYSCTL_LONG(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0,
163     "Maximum amount of space to use for in-progress I/O");
164 int dirtybufferflushes;
165 SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
166     0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
167 int bdwriteskip;
168 SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
169     0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
170 int altbufferflushes;
171 SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes,
172     0, "Number of fsync flushes to limit dirty buffers");
173 static int recursiveflushes;
174 SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes,
175     0, "Number of flushes skipped due to being recursive");
176 static int numdirtybuffers;
177 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
178     "Number of buffers that are dirty (has unwritten changes) at the moment");
179 static int lodirtybuffers;
180 SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
181     "How many buffers we want to have free before bufdaemon can sleep");
182 static int hidirtybuffers;
183 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
184     "When the number of dirty buffers is considered severe");
185 int dirtybufthresh;
186 SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh,
187     0, "Number of bdwrite to bawrite conversions to clear dirty buffers");
188 static int numfreebuffers;
189 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
190     "Number of free buffers");
191 static int lofreebuffers;
192 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
193    "XXX Unused");
194 static int hifreebuffers;
195 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
196    "XXX Complicatedly unused");
197 static int getnewbufcalls;
198 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0,
199    "Number of calls to getnewbuf");
200 static int getnewbufrestarts;
201 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0,
202     "Number of times getnewbuf has had to restart a buffer aquisition");
203 static int flushbufqtarget = 100;
204 SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0,
205     "Amount of work to do in flushbufqueues when helping bufdaemon");
206 static long notbufdflashes;
207 SYSCTL_LONG(_vfs, OID_AUTO, notbufdflashes, CTLFLAG_RD, &notbufdflashes, 0,
208     "Number of dirty buffer flushes done by the bufdaemon helpers");
209 
210 /*
211  * Wakeup point for bufdaemon, as well as indicator of whether it is already
212  * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
213  * is idling.
214  */
215 static int bd_request;
216 
217 /*
218  * Request for the buf daemon to write more buffers than is indicated by
219  * lodirtybuf.  This may be necessary to push out excess dependencies or
220  * defragment the address space where a simple count of the number of dirty
221  * buffers is insufficient to characterize the demand for flushing them.
222  */
223 static int bd_speedupreq;
224 
225 /*
226  * This lock synchronizes access to bd_request.
227  */
228 static struct mtx bdlock;
229 
230 /*
231  * bogus page -- for I/O to/from partially complete buffers
232  * this is a temporary solution to the problem, but it is not
233  * really that bad.  it would be better to split the buffer
234  * for input in the case of buffers partially already in memory,
235  * but the code is intricate enough already.
236  */
237 vm_page_t bogus_page;
238 
239 /*
240  * Synchronization (sleep/wakeup) variable for active buffer space requests.
241  * Set when wait starts, cleared prior to wakeup().
242  * Used in runningbufwakeup() and waitrunningbufspace().
243  */
244 static int runningbufreq;
245 
246 /*
247  * This lock protects the runningbufreq and synchronizes runningbufwakeup and
248  * waitrunningbufspace().
249  */
250 static struct mtx rbreqlock;
251 
252 /*
253  * Synchronization (sleep/wakeup) variable for buffer requests.
254  * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
255  * by and/or.
256  * Used in numdirtywakeup(), bufspacewakeup(), bufcountwakeup(), bwillwrite(),
257  * getnewbuf(), and getblk().
258  */
259 static int needsbuffer;
260 
261 /*
262  * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
263  */
264 static struct mtx nblock;
265 
266 /*
267  * Definitions for the buffer free lists.
268  */
269 #define BUFFER_QUEUES	5	/* number of free buffer queues */
270 
271 #define QUEUE_NONE	0	/* on no queue */
272 #define QUEUE_CLEAN	1	/* non-B_DELWRI buffers */
273 #define QUEUE_DIRTY	2	/* B_DELWRI buffers */
274 #define QUEUE_EMPTYKVA	3	/* empty buffer headers w/KVA assignment */
275 #define QUEUE_EMPTY	4	/* empty buffer headers */
276 #define QUEUE_SENTINEL	1024	/* not an queue index, but mark for sentinel */
277 
278 /* Queues for free buffers with various properties */
279 static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } };
280 
281 /* Lock for the bufqueues */
282 static struct mtx bqlock;
283 
284 /*
285  * Single global constant for BUF_WMESG, to avoid getting multiple references.
286  * buf_wmesg is referred from macros.
287  */
288 const char *buf_wmesg = BUF_WMESG;
289 
290 #define VFS_BIO_NEED_ANY	0x01	/* any freeable buffer */
291 #define VFS_BIO_NEED_DIRTYFLUSH	0x02	/* waiting for dirty buffer flush */
292 #define VFS_BIO_NEED_FREE	0x04	/* wait for free bufs, hi hysteresis */
293 #define VFS_BIO_NEED_BUFSPACE	0x08	/* wait for buf space, lo hysteresis */
294 
295 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
296     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
297 static int
298 sysctl_bufspace(SYSCTL_HANDLER_ARGS)
299 {
300 	long lvalue;
301 	int ivalue;
302 
303 	if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long))
304 		return (sysctl_handle_long(oidp, arg1, arg2, req));
305 	lvalue = *(long *)arg1;
306 	if (lvalue > INT_MAX)
307 		/* On overflow, still write out a long to trigger ENOMEM. */
308 		return (sysctl_handle_long(oidp, &lvalue, 0, req));
309 	ivalue = lvalue;
310 	return (sysctl_handle_int(oidp, &ivalue, 0, req));
311 }
312 #endif
313 
314 #ifdef DIRECTIO
315 extern void ffs_rawread_setup(void);
316 #endif /* DIRECTIO */
317 /*
318  *	numdirtywakeup:
319  *
320  *	If someone is blocked due to there being too many dirty buffers,
321  *	and numdirtybuffers is now reasonable, wake them up.
322  */
323 
324 static __inline void
325 numdirtywakeup(int level)
326 {
327 
328 	if (numdirtybuffers <= level) {
329 		mtx_lock(&nblock);
330 		if (needsbuffer & VFS_BIO_NEED_DIRTYFLUSH) {
331 			needsbuffer &= ~VFS_BIO_NEED_DIRTYFLUSH;
332 			wakeup(&needsbuffer);
333 		}
334 		mtx_unlock(&nblock);
335 	}
336 }
337 
338 /*
339  *	bufspacewakeup:
340  *
341  *	Called when buffer space is potentially available for recovery.
342  *	getnewbuf() will block on this flag when it is unable to free
343  *	sufficient buffer space.  Buffer space becomes recoverable when
344  *	bp's get placed back in the queues.
345  */
346 
347 static __inline void
348 bufspacewakeup(void)
349 {
350 
351 	/*
352 	 * If someone is waiting for BUF space, wake them up.  Even
353 	 * though we haven't freed the kva space yet, the waiting
354 	 * process will be able to now.
355 	 */
356 	mtx_lock(&nblock);
357 	if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
358 		needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
359 		wakeup(&needsbuffer);
360 	}
361 	mtx_unlock(&nblock);
362 }
363 
364 /*
365  * runningbufwakeup() - in-progress I/O accounting.
366  *
367  */
368 void
369 runningbufwakeup(struct buf *bp)
370 {
371 
372 	if (bp->b_runningbufspace) {
373 		atomic_subtract_long(&runningbufspace, bp->b_runningbufspace);
374 		bp->b_runningbufspace = 0;
375 		mtx_lock(&rbreqlock);
376 		if (runningbufreq && runningbufspace <= lorunningspace) {
377 			runningbufreq = 0;
378 			wakeup(&runningbufreq);
379 		}
380 		mtx_unlock(&rbreqlock);
381 	}
382 }
383 
384 /*
385  *	bufcountwakeup:
386  *
387  *	Called when a buffer has been added to one of the free queues to
388  *	account for the buffer and to wakeup anyone waiting for free buffers.
389  *	This typically occurs when large amounts of metadata are being handled
390  *	by the buffer cache ( else buffer space runs out first, usually ).
391  */
392 
393 static __inline void
394 bufcountwakeup(struct buf *bp)
395 {
396 	int old;
397 
398 	KASSERT((bp->b_vflags & BV_INFREECNT) == 0,
399 	    ("buf %p already counted as free", bp));
400 	if (bp->b_bufobj != NULL)
401 		mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED);
402 	bp->b_vflags |= BV_INFREECNT;
403 	old = atomic_fetchadd_int(&numfreebuffers, 1);
404 	KASSERT(old >= 0 && old < nbuf,
405 	    ("numfreebuffers climbed to %d", old + 1));
406 	mtx_lock(&nblock);
407 	if (needsbuffer) {
408 		needsbuffer &= ~VFS_BIO_NEED_ANY;
409 		if (numfreebuffers >= hifreebuffers)
410 			needsbuffer &= ~VFS_BIO_NEED_FREE;
411 		wakeup(&needsbuffer);
412 	}
413 	mtx_unlock(&nblock);
414 }
415 
416 /*
417  *	waitrunningbufspace()
418  *
419  *	runningbufspace is a measure of the amount of I/O currently
420  *	running.  This routine is used in async-write situations to
421  *	prevent creating huge backups of pending writes to a device.
422  *	Only asynchronous writes are governed by this function.
423  *
424  *	Reads will adjust runningbufspace, but will not block based on it.
425  *	The read load has a side effect of reducing the allowed write load.
426  *
427  *	This does NOT turn an async write into a sync write.  It waits
428  *	for earlier writes to complete and generally returns before the
429  *	caller's write has reached the device.
430  */
431 void
432 waitrunningbufspace(void)
433 {
434 
435 	mtx_lock(&rbreqlock);
436 	while (runningbufspace > hirunningspace) {
437 		++runningbufreq;
438 		msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
439 	}
440 	mtx_unlock(&rbreqlock);
441 }
442 
443 
444 /*
445  *	vfs_buf_test_cache:
446  *
447  *	Called when a buffer is extended.  This function clears the B_CACHE
448  *	bit if the newly extended portion of the buffer does not contain
449  *	valid data.
450  */
451 static __inline
452 void
453 vfs_buf_test_cache(struct buf *bp,
454 		  vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
455 		  vm_page_t m)
456 {
457 
458 	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
459 	if (bp->b_flags & B_CACHE) {
460 		int base = (foff + off) & PAGE_MASK;
461 		if (vm_page_is_valid(m, base, size) == 0)
462 			bp->b_flags &= ~B_CACHE;
463 	}
464 }
465 
466 /* Wake up the buffer daemon if necessary */
467 static __inline
468 void
469 bd_wakeup(int dirtybuflevel)
470 {
471 
472 	mtx_lock(&bdlock);
473 	if (bd_request == 0 && numdirtybuffers >= dirtybuflevel) {
474 		bd_request = 1;
475 		wakeup(&bd_request);
476 	}
477 	mtx_unlock(&bdlock);
478 }
479 
480 /*
481  * bd_speedup - speedup the buffer cache flushing code
482  */
483 
484 void
485 bd_speedup(void)
486 {
487 	int needwake;
488 
489 	mtx_lock(&bdlock);
490 	needwake = 0;
491 	if (bd_speedupreq == 0 || bd_request == 0)
492 		needwake = 1;
493 	bd_speedupreq = 1;
494 	bd_request = 1;
495 	if (needwake)
496 		wakeup(&bd_request);
497 	mtx_unlock(&bdlock);
498 }
499 
500 /*
501  * Calculating buffer cache scaling values and reserve space for buffer
502  * headers.  This is called during low level kernel initialization and
503  * may be called more then once.  We CANNOT write to the memory area
504  * being reserved at this time.
505  */
506 caddr_t
507 kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
508 {
509 	int tuned_nbuf;
510 	long maxbuf;
511 
512 	/*
513 	 * physmem_est is in pages.  Convert it to kilobytes (assumes
514 	 * PAGE_SIZE is >= 1K)
515 	 */
516 	physmem_est = physmem_est * (PAGE_SIZE / 1024);
517 
518 	/*
519 	 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
520 	 * For the first 64MB of ram nominally allocate sufficient buffers to
521 	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
522 	 * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
523 	 * the buffer cache we limit the eventual kva reservation to
524 	 * maxbcache bytes.
525 	 *
526 	 * factor represents the 1/4 x ram conversion.
527 	 */
528 	if (nbuf == 0) {
529 		int factor = 4 * BKVASIZE / 1024;
530 
531 		nbuf = 50;
532 		if (physmem_est > 4096)
533 			nbuf += min((physmem_est - 4096) / factor,
534 			    65536 / factor);
535 		if (physmem_est > 65536)
536 			nbuf += (physmem_est - 65536) * 2 / (factor * 5);
537 
538 		if (maxbcache && nbuf > maxbcache / BKVASIZE)
539 			nbuf = maxbcache / BKVASIZE;
540 		tuned_nbuf = 1;
541 	} else
542 		tuned_nbuf = 0;
543 
544 	/* XXX Avoid unsigned long overflows later on with maxbufspace. */
545 	maxbuf = (LONG_MAX / 3) / BKVASIZE;
546 	if (nbuf > maxbuf) {
547 		if (!tuned_nbuf)
548 			printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
549 			    maxbuf);
550 		nbuf = maxbuf;
551 	}
552 
553 	/*
554 	 * swbufs are used as temporary holders for I/O, such as paging I/O.
555 	 * We have no less then 16 and no more then 256.
556 	 */
557 	nswbuf = max(min(nbuf/4, 256), 16);
558 #ifdef NSWBUF_MIN
559 	if (nswbuf < NSWBUF_MIN)
560 		nswbuf = NSWBUF_MIN;
561 #endif
562 #ifdef DIRECTIO
563 	ffs_rawread_setup();
564 #endif
565 
566 	/*
567 	 * Reserve space for the buffer cache buffers
568 	 */
569 	swbuf = (void *)v;
570 	v = (caddr_t)(swbuf + nswbuf);
571 	buf = (void *)v;
572 	v = (caddr_t)(buf + nbuf);
573 
574 	return(v);
575 }
576 
577 /* Initialize the buffer subsystem.  Called before use of any buffers. */
578 void
579 bufinit(void)
580 {
581 	struct buf *bp;
582 	int i;
583 
584 	mtx_init(&bqlock, "buf queue lock", NULL, MTX_DEF);
585 	mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
586 	mtx_init(&nblock, "needsbuffer lock", NULL, MTX_DEF);
587 	mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
588 
589 	/* next, make a null set of free lists */
590 	for (i = 0; i < BUFFER_QUEUES; i++)
591 		TAILQ_INIT(&bufqueues[i]);
592 
593 	/* finally, initialize each buffer header and stick on empty q */
594 	for (i = 0; i < nbuf; i++) {
595 		bp = &buf[i];
596 		bzero(bp, sizeof *bp);
597 		bp->b_flags = B_INVAL;	/* we're just an empty header */
598 		bp->b_rcred = NOCRED;
599 		bp->b_wcred = NOCRED;
600 		bp->b_qindex = QUEUE_EMPTY;
601 		bp->b_vflags = BV_INFREECNT;	/* buf is counted as free */
602 		bp->b_xflags = 0;
603 		LIST_INIT(&bp->b_dep);
604 		BUF_LOCKINIT(bp);
605 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
606 	}
607 
608 	/*
609 	 * maxbufspace is the absolute maximum amount of buffer space we are
610 	 * allowed to reserve in KVM and in real terms.  The absolute maximum
611 	 * is nominally used by buf_daemon.  hibufspace is the nominal maximum
612 	 * used by most other processes.  The differential is required to
613 	 * ensure that buf_daemon is able to run when other processes might
614 	 * be blocked waiting for buffer space.
615 	 *
616 	 * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
617 	 * this may result in KVM fragmentation which is not handled optimally
618 	 * by the system.
619 	 */
620 	maxbufspace = (long)nbuf * BKVASIZE;
621 	hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10);
622 	lobufspace = hibufspace - MAXBSIZE;
623 
624 	/*
625 	 * Note: The 16 MiB upper limit for hirunningspace was chosen
626 	 * arbitrarily and may need further tuning. It corresponds to
627 	 * 128 outstanding write IO requests (if IO size is 128 KiB),
628 	 * which fits with many RAID controllers' tagged queuing limits.
629 	 * The lower 1 MiB limit is the historical upper limit for
630 	 * hirunningspace.
631 	 */
632 	hirunningspace = lmax(lmin(roundup(hibufspace / 64, MAXBSIZE),
633 	    16 * 1024 * 1024), 1024 * 1024);
634 	lorunningspace = roundup((hirunningspace * 2) / 3, MAXBSIZE);
635 
636 /*
637  * Limit the amount of malloc memory since it is wired permanently into
638  * the kernel space.  Even though this is accounted for in the buffer
639  * allocation, we don't want the malloced region to grow uncontrolled.
640  * The malloc scheme improves memory utilization significantly on average
641  * (small) directories.
642  */
643 	maxbufmallocspace = hibufspace / 20;
644 
645 /*
646  * Reduce the chance of a deadlock occuring by limiting the number
647  * of delayed-write dirty buffers we allow to stack up.
648  */
649 	hidirtybuffers = nbuf / 4 + 20;
650 	dirtybufthresh = hidirtybuffers * 9 / 10;
651 	numdirtybuffers = 0;
652 /*
653  * To support extreme low-memory systems, make sure hidirtybuffers cannot
654  * eat up all available buffer space.  This occurs when our minimum cannot
655  * be met.  We try to size hidirtybuffers to 3/4 our buffer space assuming
656  * BKVASIZE'd buffers.
657  */
658 	while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
659 		hidirtybuffers >>= 1;
660 	}
661 	lodirtybuffers = hidirtybuffers / 2;
662 
663 /*
664  * Try to keep the number of free buffers in the specified range,
665  * and give special processes (e.g. like buf_daemon) access to an
666  * emergency reserve.
667  */
668 	lofreebuffers = nbuf / 18 + 5;
669 	hifreebuffers = 2 * lofreebuffers;
670 	numfreebuffers = nbuf;
671 
672 	bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
673 	    VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
674 }
675 
676 /*
677  * bfreekva() - free the kva allocation for a buffer.
678  *
679  *	Since this call frees up buffer space, we call bufspacewakeup().
680  */
681 static void
682 bfreekva(struct buf *bp)
683 {
684 
685 	if (bp->b_kvasize) {
686 		atomic_add_int(&buffreekvacnt, 1);
687 		atomic_subtract_long(&bufspace, bp->b_kvasize);
688 		vm_map_remove(buffer_map, (vm_offset_t) bp->b_kvabase,
689 		    (vm_offset_t) bp->b_kvabase + bp->b_kvasize);
690 		bp->b_kvasize = 0;
691 		bufspacewakeup();
692 	}
693 }
694 
695 /*
696  *	bremfree:
697  *
698  *	Mark the buffer for removal from the appropriate free list in brelse.
699  *
700  */
701 void
702 bremfree(struct buf *bp)
703 {
704 	int old;
705 
706 	CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
707 	KASSERT((bp->b_flags & B_REMFREE) == 0,
708 	    ("bremfree: buffer %p already marked for delayed removal.", bp));
709 	KASSERT(bp->b_qindex != QUEUE_NONE,
710 	    ("bremfree: buffer %p not on a queue.", bp));
711 	BUF_ASSERT_HELD(bp);
712 
713 	bp->b_flags |= B_REMFREE;
714 	/* Fixup numfreebuffers count.  */
715 	if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
716 		KASSERT((bp->b_vflags & BV_INFREECNT) != 0,
717 		    ("buf %p not counted in numfreebuffers", bp));
718 		if (bp->b_bufobj != NULL)
719 			mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED);
720 		bp->b_vflags &= ~BV_INFREECNT;
721 		old = atomic_fetchadd_int(&numfreebuffers, -1);
722 		KASSERT(old > 0, ("numfreebuffers dropped to %d", old - 1));
723 	}
724 }
725 
726 /*
727  *	bremfreef:
728  *
729  *	Force an immediate removal from a free list.  Used only in nfs when
730  *	it abuses the b_freelist pointer.
731  */
732 void
733 bremfreef(struct buf *bp)
734 {
735 	mtx_lock(&bqlock);
736 	bremfreel(bp);
737 	mtx_unlock(&bqlock);
738 }
739 
740 /*
741  *	bremfreel:
742  *
743  *	Removes a buffer from the free list, must be called with the
744  *	bqlock held.
745  */
746 static void
747 bremfreel(struct buf *bp)
748 {
749 	int old;
750 
751 	CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X",
752 	    bp, bp->b_vp, bp->b_flags);
753 	KASSERT(bp->b_qindex != QUEUE_NONE,
754 	    ("bremfreel: buffer %p not on a queue.", bp));
755 	BUF_ASSERT_HELD(bp);
756 	mtx_assert(&bqlock, MA_OWNED);
757 
758 	TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
759 	bp->b_qindex = QUEUE_NONE;
760 	/*
761 	 * If this was a delayed bremfree() we only need to remove the buffer
762 	 * from the queue and return the stats are already done.
763 	 */
764 	if (bp->b_flags & B_REMFREE) {
765 		bp->b_flags &= ~B_REMFREE;
766 		return;
767 	}
768 	/*
769 	 * Fixup numfreebuffers count.  If the buffer is invalid or not
770 	 * delayed-write, the buffer was free and we must decrement
771 	 * numfreebuffers.
772 	 */
773 	if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
774 		KASSERT((bp->b_vflags & BV_INFREECNT) != 0,
775 		    ("buf %p not counted in numfreebuffers", bp));
776 		if (bp->b_bufobj != NULL)
777 			mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED);
778 		bp->b_vflags &= ~BV_INFREECNT;
779 		old = atomic_fetchadd_int(&numfreebuffers, -1);
780 		KASSERT(old > 0, ("numfreebuffers dropped to %d", old - 1));
781 	}
782 }
783 
784 /*
785  * Attempt to initiate asynchronous I/O on read-ahead blocks.  We must
786  * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
787  * the buffer is valid and we do not have to do anything.
788  */
789 void
790 breada(struct vnode * vp, daddr_t * rablkno, int * rabsize,
791     int cnt, struct ucred * cred)
792 {
793 	struct buf *rabp;
794 	int i;
795 
796 	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
797 		if (inmem(vp, *rablkno))
798 			continue;
799 		rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
800 
801 		if ((rabp->b_flags & B_CACHE) == 0) {
802 			if (!TD_IS_IDLETHREAD(curthread))
803 				curthread->td_ru.ru_inblock++;
804 			rabp->b_flags |= B_ASYNC;
805 			rabp->b_flags &= ~B_INVAL;
806 			rabp->b_ioflags &= ~BIO_ERROR;
807 			rabp->b_iocmd = BIO_READ;
808 			if (rabp->b_rcred == NOCRED && cred != NOCRED)
809 				rabp->b_rcred = crhold(cred);
810 			vfs_busy_pages(rabp, 0);
811 			BUF_KERNPROC(rabp);
812 			rabp->b_iooffset = dbtob(rabp->b_blkno);
813 			bstrategy(rabp);
814 		} else {
815 			brelse(rabp);
816 		}
817 	}
818 }
819 
820 /*
821  * Entry point for bread() and breadn() via #defines in sys/buf.h.
822  *
823  * Get a buffer with the specified data.  Look in the cache first.  We
824  * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
825  * is set, the buffer is valid and we do not have to do anything, see
826  * getblk(). Also starts asynchronous I/O on read-ahead blocks.
827  */
828 int
829 breadn_flags(struct vnode * vp, daddr_t blkno, int size,
830     daddr_t * rablkno, int *rabsize, int cnt,
831     struct ucred * cred, int flags, struct buf **bpp)
832 {
833 	struct buf *bp;
834 	int rv = 0, readwait = 0;
835 
836 	CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
837 	/*
838 	 * Can only return NULL if GB_LOCK_NOWAIT flag is specified.
839 	 */
840 	*bpp = bp = getblk(vp, blkno, size, 0, 0, flags);
841 	if (bp == NULL)
842 		return (EBUSY);
843 
844 	/* if not found in cache, do some I/O */
845 	if ((bp->b_flags & B_CACHE) == 0) {
846 		if (!TD_IS_IDLETHREAD(curthread))
847 			curthread->td_ru.ru_inblock++;
848 		bp->b_iocmd = BIO_READ;
849 		bp->b_flags &= ~B_INVAL;
850 		bp->b_ioflags &= ~BIO_ERROR;
851 		if (bp->b_rcred == NOCRED && cred != NOCRED)
852 			bp->b_rcred = crhold(cred);
853 		vfs_busy_pages(bp, 0);
854 		bp->b_iooffset = dbtob(bp->b_blkno);
855 		bstrategy(bp);
856 		++readwait;
857 	}
858 
859 	breada(vp, rablkno, rabsize, cnt, cred);
860 
861 	if (readwait) {
862 		rv = bufwait(bp);
863 	}
864 	return (rv);
865 }
866 
867 /*
868  * Write, release buffer on completion.  (Done by iodone
869  * if async).  Do not bother writing anything if the buffer
870  * is invalid.
871  *
872  * Note that we set B_CACHE here, indicating that buffer is
873  * fully valid and thus cacheable.  This is true even of NFS
874  * now so we set it generally.  This could be set either here
875  * or in biodone() since the I/O is synchronous.  We put it
876  * here.
877  */
878 int
879 bufwrite(struct buf *bp)
880 {
881 	int oldflags;
882 	struct vnode *vp;
883 	int vp_md;
884 
885 	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
886 	if (bp->b_flags & B_INVAL) {
887 		brelse(bp);
888 		return (0);
889 	}
890 
891 	oldflags = bp->b_flags;
892 
893 	BUF_ASSERT_HELD(bp);
894 
895 	if (bp->b_pin_count > 0)
896 		bunpin_wait(bp);
897 
898 	KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
899 	    ("FFS background buffer should not get here %p", bp));
900 
901 	vp = bp->b_vp;
902 	if (vp)
903 		vp_md = vp->v_vflag & VV_MD;
904 	else
905 		vp_md = 0;
906 
907 	/* Mark the buffer clean */
908 	bundirty(bp);
909 
910 	bp->b_flags &= ~B_DONE;
911 	bp->b_ioflags &= ~BIO_ERROR;
912 	bp->b_flags |= B_CACHE;
913 	bp->b_iocmd = BIO_WRITE;
914 
915 	bufobj_wref(bp->b_bufobj);
916 	vfs_busy_pages(bp, 1);
917 
918 	/*
919 	 * Normal bwrites pipeline writes
920 	 */
921 	bp->b_runningbufspace = bp->b_bufsize;
922 	atomic_add_long(&runningbufspace, bp->b_runningbufspace);
923 
924 	if (!TD_IS_IDLETHREAD(curthread))
925 		curthread->td_ru.ru_oublock++;
926 	if (oldflags & B_ASYNC)
927 		BUF_KERNPROC(bp);
928 	bp->b_iooffset = dbtob(bp->b_blkno);
929 	bstrategy(bp);
930 
931 	if ((oldflags & B_ASYNC) == 0) {
932 		int rtval = bufwait(bp);
933 		brelse(bp);
934 		return (rtval);
935 	} else {
936 		/*
937 		 * don't allow the async write to saturate the I/O
938 		 * system.  We will not deadlock here because
939 		 * we are blocking waiting for I/O that is already in-progress
940 		 * to complete. We do not block here if it is the update
941 		 * or syncer daemon trying to clean up as that can lead
942 		 * to deadlock.
943 		 */
944 		if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
945 			waitrunningbufspace();
946 	}
947 
948 	return (0);
949 }
950 
951 void
952 bufbdflush(struct bufobj *bo, struct buf *bp)
953 {
954 	struct buf *nbp;
955 
956 	if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) {
957 		(void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
958 		altbufferflushes++;
959 	} else if (bo->bo_dirty.bv_cnt > dirtybufthresh) {
960 		BO_LOCK(bo);
961 		/*
962 		 * Try to find a buffer to flush.
963 		 */
964 		TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
965 			if ((nbp->b_vflags & BV_BKGRDINPROG) ||
966 			    BUF_LOCK(nbp,
967 				     LK_EXCLUSIVE | LK_NOWAIT, NULL))
968 				continue;
969 			if (bp == nbp)
970 				panic("bdwrite: found ourselves");
971 			BO_UNLOCK(bo);
972 			/* Don't countdeps with the bo lock held. */
973 			if (buf_countdeps(nbp, 0)) {
974 				BO_LOCK(bo);
975 				BUF_UNLOCK(nbp);
976 				continue;
977 			}
978 			if (nbp->b_flags & B_CLUSTEROK) {
979 				vfs_bio_awrite(nbp);
980 			} else {
981 				bremfree(nbp);
982 				bawrite(nbp);
983 			}
984 			dirtybufferflushes++;
985 			break;
986 		}
987 		if (nbp == NULL)
988 			BO_UNLOCK(bo);
989 	}
990 }
991 
992 /*
993  * Delayed write. (Buffer is marked dirty).  Do not bother writing
994  * anything if the buffer is marked invalid.
995  *
996  * Note that since the buffer must be completely valid, we can safely
997  * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
998  * biodone() in order to prevent getblk from writing the buffer
999  * out synchronously.
1000  */
1001 void
1002 bdwrite(struct buf *bp)
1003 {
1004 	struct thread *td = curthread;
1005 	struct vnode *vp;
1006 	struct bufobj *bo;
1007 
1008 	CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1009 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1010 	BUF_ASSERT_HELD(bp);
1011 
1012 	if (bp->b_flags & B_INVAL) {
1013 		brelse(bp);
1014 		return;
1015 	}
1016 
1017 	/*
1018 	 * If we have too many dirty buffers, don't create any more.
1019 	 * If we are wildly over our limit, then force a complete
1020 	 * cleanup. Otherwise, just keep the situation from getting
1021 	 * out of control. Note that we have to avoid a recursive
1022 	 * disaster and not try to clean up after our own cleanup!
1023 	 */
1024 	vp = bp->b_vp;
1025 	bo = bp->b_bufobj;
1026 	if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
1027 		td->td_pflags |= TDP_INBDFLUSH;
1028 		BO_BDFLUSH(bo, bp);
1029 		td->td_pflags &= ~TDP_INBDFLUSH;
1030 	} else
1031 		recursiveflushes++;
1032 
1033 	bdirty(bp);
1034 	/*
1035 	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
1036 	 * true even of NFS now.
1037 	 */
1038 	bp->b_flags |= B_CACHE;
1039 
1040 	/*
1041 	 * This bmap keeps the system from needing to do the bmap later,
1042 	 * perhaps when the system is attempting to do a sync.  Since it
1043 	 * is likely that the indirect block -- or whatever other datastructure
1044 	 * that the filesystem needs is still in memory now, it is a good
1045 	 * thing to do this.  Note also, that if the pageout daemon is
1046 	 * requesting a sync -- there might not be enough memory to do
1047 	 * the bmap then...  So, this is important to do.
1048 	 */
1049 	if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
1050 		VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
1051 	}
1052 
1053 	/*
1054 	 * Set the *dirty* buffer range based upon the VM system dirty
1055 	 * pages.
1056 	 *
1057 	 * Mark the buffer pages as clean.  We need to do this here to
1058 	 * satisfy the vnode_pager and the pageout daemon, so that it
1059 	 * thinks that the pages have been "cleaned".  Note that since
1060 	 * the pages are in a delayed write buffer -- the VFS layer
1061 	 * "will" see that the pages get written out on the next sync,
1062 	 * or perhaps the cluster will be completed.
1063 	 */
1064 	vfs_clean_pages_dirty_buf(bp);
1065 	bqrelse(bp);
1066 
1067 	/*
1068 	 * Wakeup the buffer flushing daemon if we have a lot of dirty
1069 	 * buffers (midpoint between our recovery point and our stall
1070 	 * point).
1071 	 */
1072 	bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
1073 
1074 	/*
1075 	 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
1076 	 * due to the softdep code.
1077 	 */
1078 }
1079 
1080 /*
1081  *	bdirty:
1082  *
1083  *	Turn buffer into delayed write request.  We must clear BIO_READ and
1084  *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
1085  *	itself to properly update it in the dirty/clean lists.  We mark it
1086  *	B_DONE to ensure that any asynchronization of the buffer properly
1087  *	clears B_DONE ( else a panic will occur later ).
1088  *
1089  *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
1090  *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
1091  *	should only be called if the buffer is known-good.
1092  *
1093  *	Since the buffer is not on a queue, we do not update the numfreebuffers
1094  *	count.
1095  *
1096  *	The buffer must be on QUEUE_NONE.
1097  */
1098 void
1099 bdirty(struct buf *bp)
1100 {
1101 
1102 	CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
1103 	    bp, bp->b_vp, bp->b_flags);
1104 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1105 	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
1106 	    ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
1107 	BUF_ASSERT_HELD(bp);
1108 	bp->b_flags &= ~(B_RELBUF);
1109 	bp->b_iocmd = BIO_WRITE;
1110 
1111 	if ((bp->b_flags & B_DELWRI) == 0) {
1112 		bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
1113 		reassignbuf(bp);
1114 		atomic_add_int(&numdirtybuffers, 1);
1115 		bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
1116 	}
1117 }
1118 
1119 /*
1120  *	bundirty:
1121  *
1122  *	Clear B_DELWRI for buffer.
1123  *
1124  *	Since the buffer is not on a queue, we do not update the numfreebuffers
1125  *	count.
1126  *
1127  *	The buffer must be on QUEUE_NONE.
1128  */
1129 
1130 void
1131 bundirty(struct buf *bp)
1132 {
1133 
1134 	CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1135 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1136 	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
1137 	    ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
1138 	BUF_ASSERT_HELD(bp);
1139 
1140 	if (bp->b_flags & B_DELWRI) {
1141 		bp->b_flags &= ~B_DELWRI;
1142 		reassignbuf(bp);
1143 		atomic_subtract_int(&numdirtybuffers, 1);
1144 		numdirtywakeup(lodirtybuffers);
1145 	}
1146 	/*
1147 	 * Since it is now being written, we can clear its deferred write flag.
1148 	 */
1149 	bp->b_flags &= ~B_DEFERRED;
1150 }
1151 
1152 /*
1153  *	bawrite:
1154  *
1155  *	Asynchronous write.  Start output on a buffer, but do not wait for
1156  *	it to complete.  The buffer is released when the output completes.
1157  *
1158  *	bwrite() ( or the VOP routine anyway ) is responsible for handling
1159  *	B_INVAL buffers.  Not us.
1160  */
1161 void
1162 bawrite(struct buf *bp)
1163 {
1164 
1165 	bp->b_flags |= B_ASYNC;
1166 	(void) bwrite(bp);
1167 }
1168 
1169 /*
1170  *	bwillwrite:
1171  *
1172  *	Called prior to the locking of any vnodes when we are expecting to
1173  *	write.  We do not want to starve the buffer cache with too many
1174  *	dirty buffers so we block here.  By blocking prior to the locking
1175  *	of any vnodes we attempt to avoid the situation where a locked vnode
1176  *	prevents the various system daemons from flushing related buffers.
1177  */
1178 
1179 void
1180 bwillwrite(void)
1181 {
1182 
1183 	if (numdirtybuffers >= hidirtybuffers) {
1184 		mtx_lock(&nblock);
1185 		while (numdirtybuffers >= hidirtybuffers) {
1186 			bd_wakeup(1);
1187 			needsbuffer |= VFS_BIO_NEED_DIRTYFLUSH;
1188 			msleep(&needsbuffer, &nblock,
1189 			    (PRIBIO + 4), "flswai", 0);
1190 		}
1191 		mtx_unlock(&nblock);
1192 	}
1193 }
1194 
1195 /*
1196  * Return true if we have too many dirty buffers.
1197  */
1198 int
1199 buf_dirty_count_severe(void)
1200 {
1201 
1202 	return(numdirtybuffers >= hidirtybuffers);
1203 }
1204 
1205 static __noinline int
1206 buf_vm_page_count_severe(void)
1207 {
1208 
1209 	KFAIL_POINT_CODE(DEBUG_FP, buf_pressure, return 1);
1210 
1211 	return vm_page_count_severe();
1212 }
1213 
1214 /*
1215  *	brelse:
1216  *
1217  *	Release a busy buffer and, if requested, free its resources.  The
1218  *	buffer will be stashed in the appropriate bufqueue[] allowing it
1219  *	to be accessed later as a cache entity or reused for other purposes.
1220  */
1221 void
1222 brelse(struct buf *bp)
1223 {
1224 	CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
1225 	    bp, bp->b_vp, bp->b_flags);
1226 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
1227 	    ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1228 
1229 	if (bp->b_flags & B_MANAGED) {
1230 		bqrelse(bp);
1231 		return;
1232 	}
1233 
1234 	if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
1235 	    bp->b_error == EIO && !(bp->b_flags & B_INVAL)) {
1236 		/*
1237 		 * Failed write, redirty.  Must clear BIO_ERROR to prevent
1238 		 * pages from being scrapped.  If the error is anything
1239 		 * other than an I/O error (EIO), assume that retrying
1240 		 * is futile.
1241 		 */
1242 		bp->b_ioflags &= ~BIO_ERROR;
1243 		bdirty(bp);
1244 	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
1245 	    (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
1246 		/*
1247 		 * Either a failed I/O or we were asked to free or not
1248 		 * cache the buffer.
1249 		 */
1250 		bp->b_flags |= B_INVAL;
1251 		if (!LIST_EMPTY(&bp->b_dep))
1252 			buf_deallocate(bp);
1253 		if (bp->b_flags & B_DELWRI) {
1254 			atomic_subtract_int(&numdirtybuffers, 1);
1255 			numdirtywakeup(lodirtybuffers);
1256 		}
1257 		bp->b_flags &= ~(B_DELWRI | B_CACHE);
1258 		if ((bp->b_flags & B_VMIO) == 0) {
1259 			if (bp->b_bufsize)
1260 				allocbuf(bp, 0);
1261 			if (bp->b_vp)
1262 				brelvp(bp);
1263 		}
1264 	}
1265 
1266 	/*
1267 	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_release()
1268 	 * is called with B_DELWRI set, the underlying pages may wind up
1269 	 * getting freed causing a previous write (bdwrite()) to get 'lost'
1270 	 * because pages associated with a B_DELWRI bp are marked clean.
1271 	 *
1272 	 * We still allow the B_INVAL case to call vfs_vmio_release(), even
1273 	 * if B_DELWRI is set.
1274 	 *
1275 	 * If B_DELWRI is not set we may have to set B_RELBUF if we are low
1276 	 * on pages to return pages to the VM page queues.
1277 	 */
1278 	if (bp->b_flags & B_DELWRI)
1279 		bp->b_flags &= ~B_RELBUF;
1280 	else if (buf_vm_page_count_severe()) {
1281 		/*
1282 		 * The locking of the BO_LOCK is not necessary since
1283 		 * BKGRDINPROG cannot be set while we hold the buf
1284 		 * lock, it can only be cleared if it is already
1285 		 * pending.
1286 		 */
1287 		if (bp->b_vp) {
1288 			if (!(bp->b_vflags & BV_BKGRDINPROG))
1289 				bp->b_flags |= B_RELBUF;
1290 		} else
1291 			bp->b_flags |= B_RELBUF;
1292 	}
1293 
1294 	/*
1295 	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
1296 	 * constituted, not even NFS buffers now.  Two flags effect this.  If
1297 	 * B_INVAL, the struct buf is invalidated but the VM object is kept
1298 	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
1299 	 *
1300 	 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
1301 	 * invalidated.  BIO_ERROR cannot be set for a failed write unless the
1302 	 * buffer is also B_INVAL because it hits the re-dirtying code above.
1303 	 *
1304 	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
1305 	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
1306 	 * the commit state and we cannot afford to lose the buffer. If the
1307 	 * buffer has a background write in progress, we need to keep it
1308 	 * around to prevent it from being reconstituted and starting a second
1309 	 * background write.
1310 	 */
1311 	if ((bp->b_flags & B_VMIO)
1312 	    && !(bp->b_vp->v_mount != NULL &&
1313 		 (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
1314 		 !vn_isdisk(bp->b_vp, NULL) &&
1315 		 (bp->b_flags & B_DELWRI))
1316 	    ) {
1317 
1318 		int i, j, resid;
1319 		vm_page_t m;
1320 		off_t foff;
1321 		vm_pindex_t poff;
1322 		vm_object_t obj;
1323 
1324 		obj = bp->b_bufobj->bo_object;
1325 
1326 		/*
1327 		 * Get the base offset and length of the buffer.  Note that
1328 		 * in the VMIO case if the buffer block size is not
1329 		 * page-aligned then b_data pointer may not be page-aligned.
1330 		 * But our b_pages[] array *IS* page aligned.
1331 		 *
1332 		 * block sizes less then DEV_BSIZE (usually 512) are not
1333 		 * supported due to the page granularity bits (m->valid,
1334 		 * m->dirty, etc...).
1335 		 *
1336 		 * See man buf(9) for more information
1337 		 */
1338 		resid = bp->b_bufsize;
1339 		foff = bp->b_offset;
1340 		VM_OBJECT_LOCK(obj);
1341 		for (i = 0; i < bp->b_npages; i++) {
1342 			int had_bogus = 0;
1343 
1344 			m = bp->b_pages[i];
1345 
1346 			/*
1347 			 * If we hit a bogus page, fixup *all* the bogus pages
1348 			 * now.
1349 			 */
1350 			if (m == bogus_page) {
1351 				poff = OFF_TO_IDX(bp->b_offset);
1352 				had_bogus = 1;
1353 
1354 				for (j = i; j < bp->b_npages; j++) {
1355 					vm_page_t mtmp;
1356 					mtmp = bp->b_pages[j];
1357 					if (mtmp == bogus_page) {
1358 						mtmp = vm_page_lookup(obj, poff + j);
1359 						if (!mtmp) {
1360 							panic("brelse: page missing\n");
1361 						}
1362 						bp->b_pages[j] = mtmp;
1363 					}
1364 				}
1365 
1366 				if ((bp->b_flags & B_INVAL) == 0) {
1367 					pmap_qenter(
1368 					    trunc_page((vm_offset_t)bp->b_data),
1369 					    bp->b_pages, bp->b_npages);
1370 				}
1371 				m = bp->b_pages[i];
1372 			}
1373 			if ((bp->b_flags & B_NOCACHE) ||
1374 			    (bp->b_ioflags & BIO_ERROR &&
1375 			     bp->b_iocmd == BIO_READ)) {
1376 				int poffset = foff & PAGE_MASK;
1377 				int presid = resid > (PAGE_SIZE - poffset) ?
1378 					(PAGE_SIZE - poffset) : resid;
1379 
1380 				KASSERT(presid >= 0, ("brelse: extra page"));
1381 				vm_page_set_invalid(m, poffset, presid);
1382 				if (had_bogus)
1383 					printf("avoided corruption bug in bogus_page/brelse code\n");
1384 			}
1385 			resid -= PAGE_SIZE - (foff & PAGE_MASK);
1386 			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
1387 		}
1388 		VM_OBJECT_UNLOCK(obj);
1389 		if (bp->b_flags & (B_INVAL | B_RELBUF))
1390 			vfs_vmio_release(bp);
1391 
1392 	} else if (bp->b_flags & B_VMIO) {
1393 
1394 		if (bp->b_flags & (B_INVAL | B_RELBUF)) {
1395 			vfs_vmio_release(bp);
1396 		}
1397 
1398 	} else if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0) {
1399 		if (bp->b_bufsize != 0)
1400 			allocbuf(bp, 0);
1401 		if (bp->b_vp != NULL)
1402 			brelvp(bp);
1403 	}
1404 
1405 	if (BUF_LOCKRECURSED(bp)) {
1406 		/* do not release to free list */
1407 		BUF_UNLOCK(bp);
1408 		return;
1409 	}
1410 
1411 	/* enqueue */
1412 	mtx_lock(&bqlock);
1413 	/* Handle delayed bremfree() processing. */
1414 	if (bp->b_flags & B_REMFREE) {
1415 		struct bufobj *bo;
1416 
1417 		bo = bp->b_bufobj;
1418 		if (bo != NULL)
1419 			BO_LOCK(bo);
1420 		bremfreel(bp);
1421 		if (bo != NULL)
1422 			BO_UNLOCK(bo);
1423 	}
1424 	if (bp->b_qindex != QUEUE_NONE)
1425 		panic("brelse: free buffer onto another queue???");
1426 
1427 	/*
1428 	 * If the buffer has junk contents signal it and eventually
1429 	 * clean up B_DELWRI and diassociate the vnode so that gbincore()
1430 	 * doesn't find it.
1431 	 */
1432 	if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
1433 	    (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
1434 		bp->b_flags |= B_INVAL;
1435 	if (bp->b_flags & B_INVAL) {
1436 		if (bp->b_flags & B_DELWRI)
1437 			bundirty(bp);
1438 		if (bp->b_vp)
1439 			brelvp(bp);
1440 	}
1441 
1442 	/* buffers with no memory */
1443 	if (bp->b_bufsize == 0) {
1444 		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
1445 		if (bp->b_vflags & BV_BKGRDINPROG)
1446 			panic("losing buffer 1");
1447 		if (bp->b_kvasize) {
1448 			bp->b_qindex = QUEUE_EMPTYKVA;
1449 		} else {
1450 			bp->b_qindex = QUEUE_EMPTY;
1451 		}
1452 		TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1453 	/* buffers with junk contents */
1454 	} else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
1455 	    (bp->b_ioflags & BIO_ERROR)) {
1456 		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
1457 		if (bp->b_vflags & BV_BKGRDINPROG)
1458 			panic("losing buffer 2");
1459 		bp->b_qindex = QUEUE_CLEAN;
1460 		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
1461 	/* remaining buffers */
1462 	} else {
1463 		if (bp->b_flags & B_DELWRI)
1464 			bp->b_qindex = QUEUE_DIRTY;
1465 		else
1466 			bp->b_qindex = QUEUE_CLEAN;
1467 		if (bp->b_flags & B_AGE)
1468 			TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1469 		else
1470 			TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1471 	}
1472 	mtx_unlock(&bqlock);
1473 
1474 	/*
1475 	 * Fixup numfreebuffers count.  The bp is on an appropriate queue
1476 	 * unless locked.  We then bump numfreebuffers if it is not B_DELWRI.
1477 	 * We've already handled the B_INVAL case ( B_DELWRI will be clear
1478 	 * if B_INVAL is set ).
1479 	 */
1480 
1481 	if (!(bp->b_flags & B_DELWRI)) {
1482 		struct bufobj *bo;
1483 
1484 		bo = bp->b_bufobj;
1485 		if (bo != NULL)
1486 			BO_LOCK(bo);
1487 		bufcountwakeup(bp);
1488 		if (bo != NULL)
1489 			BO_UNLOCK(bo);
1490 	}
1491 
1492 	/*
1493 	 * Something we can maybe free or reuse
1494 	 */
1495 	if (bp->b_bufsize || bp->b_kvasize)
1496 		bufspacewakeup();
1497 
1498 	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT);
1499 	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
1500 		panic("brelse: not dirty");
1501 	/* unlock */
1502 	BUF_UNLOCK(bp);
1503 }
1504 
1505 /*
1506  * Release a buffer back to the appropriate queue but do not try to free
1507  * it.  The buffer is expected to be used again soon.
1508  *
1509  * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
1510  * biodone() to requeue an async I/O on completion.  It is also used when
1511  * known good buffers need to be requeued but we think we may need the data
1512  * again soon.
1513  *
1514  * XXX we should be able to leave the B_RELBUF hint set on completion.
1515  */
1516 void
1517 bqrelse(struct buf *bp)
1518 {
1519 	struct bufobj *bo;
1520 
1521 	CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1522 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
1523 	    ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1524 
1525 	if (BUF_LOCKRECURSED(bp)) {
1526 		/* do not release to free list */
1527 		BUF_UNLOCK(bp);
1528 		return;
1529 	}
1530 
1531 	bo = bp->b_bufobj;
1532 	if (bp->b_flags & B_MANAGED) {
1533 		if (bp->b_flags & B_REMFREE) {
1534 			mtx_lock(&bqlock);
1535 			if (bo != NULL)
1536 				BO_LOCK(bo);
1537 			bremfreel(bp);
1538 			if (bo != NULL)
1539 				BO_UNLOCK(bo);
1540 			mtx_unlock(&bqlock);
1541 		}
1542 		bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1543 		BUF_UNLOCK(bp);
1544 		return;
1545 	}
1546 
1547 	mtx_lock(&bqlock);
1548 	/* Handle delayed bremfree() processing. */
1549 	if (bp->b_flags & B_REMFREE) {
1550 		if (bo != NULL)
1551 			BO_LOCK(bo);
1552 		bremfreel(bp);
1553 		if (bo != NULL)
1554 			BO_UNLOCK(bo);
1555 	}
1556 	if (bp->b_qindex != QUEUE_NONE)
1557 		panic("bqrelse: free buffer onto another queue???");
1558 	/* buffers with stale but valid contents */
1559 	if (bp->b_flags & B_DELWRI) {
1560 		bp->b_qindex = QUEUE_DIRTY;
1561 		TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1562 	} else {
1563 		/*
1564 		 * The locking of the BO_LOCK for checking of the
1565 		 * BV_BKGRDINPROG is not necessary since the
1566 		 * BV_BKGRDINPROG cannot be set while we hold the buf
1567 		 * lock, it can only be cleared if it is already
1568 		 * pending.
1569 		 */
1570 		if (!buf_vm_page_count_severe() || (bp->b_vflags & BV_BKGRDINPROG)) {
1571 			bp->b_qindex = QUEUE_CLEAN;
1572 			TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp,
1573 			    b_freelist);
1574 		} else {
1575 			/*
1576 			 * We are too low on memory, we have to try to free
1577 			 * the buffer (most importantly: the wired pages
1578 			 * making up its backing store) *now*.
1579 			 */
1580 			mtx_unlock(&bqlock);
1581 			brelse(bp);
1582 			return;
1583 		}
1584 	}
1585 	mtx_unlock(&bqlock);
1586 
1587 	if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI)) {
1588 		if (bo != NULL)
1589 			BO_LOCK(bo);
1590 		bufcountwakeup(bp);
1591 		if (bo != NULL)
1592 			BO_UNLOCK(bo);
1593 	}
1594 
1595 	/*
1596 	 * Something we can maybe free or reuse.
1597 	 */
1598 	if (bp->b_bufsize && !(bp->b_flags & B_DELWRI))
1599 		bufspacewakeup();
1600 
1601 	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1602 	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
1603 		panic("bqrelse: not dirty");
1604 	/* unlock */
1605 	BUF_UNLOCK(bp);
1606 }
1607 
1608 /* Give pages used by the bp back to the VM system (where possible) */
1609 static void
1610 vfs_vmio_release(struct buf *bp)
1611 {
1612 	int i;
1613 	vm_page_t m;
1614 
1615 	pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
1616 	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
1617 	for (i = 0; i < bp->b_npages; i++) {
1618 		m = bp->b_pages[i];
1619 		bp->b_pages[i] = NULL;
1620 		/*
1621 		 * In order to keep page LRU ordering consistent, put
1622 		 * everything on the inactive queue.
1623 		 */
1624 		vm_page_lock(m);
1625 		vm_page_unwire(m, 0);
1626 		/*
1627 		 * We don't mess with busy pages, it is
1628 		 * the responsibility of the process that
1629 		 * busied the pages to deal with them.
1630 		 */
1631 		if ((m->oflags & VPO_BUSY) == 0 && m->busy == 0 &&
1632 		    m->wire_count == 0) {
1633 			/*
1634 			 * Might as well free the page if we can and it has
1635 			 * no valid data.  We also free the page if the
1636 			 * buffer was used for direct I/O
1637 			 */
1638 			if ((bp->b_flags & B_ASYNC) == 0 && !m->valid) {
1639 				vm_page_free(m);
1640 			} else if (bp->b_flags & B_DIRECT) {
1641 				vm_page_try_to_free(m);
1642 			} else if (buf_vm_page_count_severe()) {
1643 				vm_page_try_to_cache(m);
1644 			}
1645 		}
1646 		vm_page_unlock(m);
1647 	}
1648 	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
1649 
1650 	if (bp->b_bufsize) {
1651 		bufspacewakeup();
1652 		bp->b_bufsize = 0;
1653 	}
1654 	bp->b_npages = 0;
1655 	bp->b_flags &= ~B_VMIO;
1656 	if (bp->b_vp)
1657 		brelvp(bp);
1658 }
1659 
1660 /*
1661  * Check to see if a block at a particular lbn is available for a clustered
1662  * write.
1663  */
1664 static int
1665 vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
1666 {
1667 	struct buf *bpa;
1668 	int match;
1669 
1670 	match = 0;
1671 
1672 	/* If the buf isn't in core skip it */
1673 	if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
1674 		return (0);
1675 
1676 	/* If the buf is busy we don't want to wait for it */
1677 	if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1678 		return (0);
1679 
1680 	/* Only cluster with valid clusterable delayed write buffers */
1681 	if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
1682 	    (B_DELWRI | B_CLUSTEROK))
1683 		goto done;
1684 
1685 	if (bpa->b_bufsize != size)
1686 		goto done;
1687 
1688 	/*
1689 	 * Check to see if it is in the expected place on disk and that the
1690 	 * block has been mapped.
1691 	 */
1692 	if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
1693 		match = 1;
1694 done:
1695 	BUF_UNLOCK(bpa);
1696 	return (match);
1697 }
1698 
1699 /*
1700  *	vfs_bio_awrite:
1701  *
1702  *	Implement clustered async writes for clearing out B_DELWRI buffers.
1703  *	This is much better then the old way of writing only one buffer at
1704  *	a time.  Note that we may not be presented with the buffers in the
1705  *	correct order, so we search for the cluster in both directions.
1706  */
1707 int
1708 vfs_bio_awrite(struct buf *bp)
1709 {
1710 	struct bufobj *bo;
1711 	int i;
1712 	int j;
1713 	daddr_t lblkno = bp->b_lblkno;
1714 	struct vnode *vp = bp->b_vp;
1715 	int ncl;
1716 	int nwritten;
1717 	int size;
1718 	int maxcl;
1719 
1720 	bo = &vp->v_bufobj;
1721 	/*
1722 	 * right now we support clustered writing only to regular files.  If
1723 	 * we find a clusterable block we could be in the middle of a cluster
1724 	 * rather then at the beginning.
1725 	 */
1726 	if ((vp->v_type == VREG) &&
1727 	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
1728 	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
1729 
1730 		size = vp->v_mount->mnt_stat.f_iosize;
1731 		maxcl = MAXPHYS / size;
1732 
1733 		BO_LOCK(bo);
1734 		for (i = 1; i < maxcl; i++)
1735 			if (vfs_bio_clcheck(vp, size, lblkno + i,
1736 			    bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
1737 				break;
1738 
1739 		for (j = 1; i + j <= maxcl && j <= lblkno; j++)
1740 			if (vfs_bio_clcheck(vp, size, lblkno - j,
1741 			    bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
1742 				break;
1743 		BO_UNLOCK(bo);
1744 		--j;
1745 		ncl = i + j;
1746 		/*
1747 		 * this is a possible cluster write
1748 		 */
1749 		if (ncl != 1) {
1750 			BUF_UNLOCK(bp);
1751 			nwritten = cluster_wbuild(vp, size, lblkno - j, ncl);
1752 			return nwritten;
1753 		}
1754 	}
1755 	bremfree(bp);
1756 	bp->b_flags |= B_ASYNC;
1757 	/*
1758 	 * default (old) behavior, writing out only one block
1759 	 *
1760 	 * XXX returns b_bufsize instead of b_bcount for nwritten?
1761 	 */
1762 	nwritten = bp->b_bufsize;
1763 	(void) bwrite(bp);
1764 
1765 	return nwritten;
1766 }
1767 
1768 /*
1769  *	getnewbuf:
1770  *
1771  *	Find and initialize a new buffer header, freeing up existing buffers
1772  *	in the bufqueues as necessary.  The new buffer is returned locked.
1773  *
1774  *	Important:  B_INVAL is not set.  If the caller wishes to throw the
1775  *	buffer away, the caller must set B_INVAL prior to calling brelse().
1776  *
1777  *	We block if:
1778  *		We have insufficient buffer headers
1779  *		We have insufficient buffer space
1780  *		buffer_map is too fragmented ( space reservation fails )
1781  *		If we have to flush dirty buffers ( but we try to avoid this )
1782  *
1783  *	To avoid VFS layer recursion we do not flush dirty buffers ourselves.
1784  *	Instead we ask the buf daemon to do it for us.  We attempt to
1785  *	avoid piecemeal wakeups of the pageout daemon.
1786  */
1787 
1788 static struct buf *
1789 getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize,
1790     int gbflags)
1791 {
1792 	struct thread *td;
1793 	struct buf *bp;
1794 	struct buf *nbp;
1795 	int defrag = 0;
1796 	int nqindex;
1797 	static int flushingbufs;
1798 
1799 	td = curthread;
1800 	/*
1801 	 * We can't afford to block since we might be holding a vnode lock,
1802 	 * which may prevent system daemons from running.  We deal with
1803 	 * low-memory situations by proactively returning memory and running
1804 	 * async I/O rather then sync I/O.
1805 	 */
1806 	atomic_add_int(&getnewbufcalls, 1);
1807 	atomic_subtract_int(&getnewbufrestarts, 1);
1808 restart:
1809 	atomic_add_int(&getnewbufrestarts, 1);
1810 
1811 	/*
1812 	 * Setup for scan.  If we do not have enough free buffers,
1813 	 * we setup a degenerate case that immediately fails.  Note
1814 	 * that if we are specially marked process, we are allowed to
1815 	 * dip into our reserves.
1816 	 *
1817 	 * The scanning sequence is nominally:  EMPTY->EMPTYKVA->CLEAN
1818 	 *
1819 	 * We start with EMPTYKVA.  If the list is empty we backup to EMPTY.
1820 	 * However, there are a number of cases (defragging, reusing, ...)
1821 	 * where we cannot backup.
1822 	 */
1823 	mtx_lock(&bqlock);
1824 	nqindex = QUEUE_EMPTYKVA;
1825 	nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]);
1826 
1827 	if (nbp == NULL) {
1828 		/*
1829 		 * If no EMPTYKVA buffers and we are either
1830 		 * defragging or reusing, locate a CLEAN buffer
1831 		 * to free or reuse.  If bufspace useage is low
1832 		 * skip this step so we can allocate a new buffer.
1833 		 */
1834 		if (defrag || bufspace >= lobufspace) {
1835 			nqindex = QUEUE_CLEAN;
1836 			nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
1837 		}
1838 
1839 		/*
1840 		 * If we could not find or were not allowed to reuse a
1841 		 * CLEAN buffer, check to see if it is ok to use an EMPTY
1842 		 * buffer.  We can only use an EMPTY buffer if allocating
1843 		 * its KVA would not otherwise run us out of buffer space.
1844 		 */
1845 		if (nbp == NULL && defrag == 0 &&
1846 		    bufspace + maxsize < hibufspace) {
1847 			nqindex = QUEUE_EMPTY;
1848 			nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
1849 		}
1850 	}
1851 
1852 	/*
1853 	 * Run scan, possibly freeing data and/or kva mappings on the fly
1854 	 * depending.
1855 	 */
1856 
1857 	while ((bp = nbp) != NULL) {
1858 		int qindex = nqindex;
1859 
1860 		/*
1861 		 * Calculate next bp ( we can only use it if we do not block
1862 		 * or do other fancy things ).
1863 		 */
1864 		if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
1865 			switch(qindex) {
1866 			case QUEUE_EMPTY:
1867 				nqindex = QUEUE_EMPTYKVA;
1868 				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA])))
1869 					break;
1870 				/* FALLTHROUGH */
1871 			case QUEUE_EMPTYKVA:
1872 				nqindex = QUEUE_CLEAN;
1873 				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN])))
1874 					break;
1875 				/* FALLTHROUGH */
1876 			case QUEUE_CLEAN:
1877 				/*
1878 				 * nbp is NULL.
1879 				 */
1880 				break;
1881 			}
1882 		}
1883 		/*
1884 		 * If we are defragging then we need a buffer with
1885 		 * b_kvasize != 0.  XXX this situation should no longer
1886 		 * occur, if defrag is non-zero the buffer's b_kvasize
1887 		 * should also be non-zero at this point.  XXX
1888 		 */
1889 		if (defrag && bp->b_kvasize == 0) {
1890 			printf("Warning: defrag empty buffer %p\n", bp);
1891 			continue;
1892 		}
1893 
1894 		/*
1895 		 * Start freeing the bp.  This is somewhat involved.  nbp
1896 		 * remains valid only for QUEUE_EMPTY[KVA] bp's.
1897 		 */
1898 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1899 			continue;
1900 		if (bp->b_vp) {
1901 			BO_LOCK(bp->b_bufobj);
1902 			if (bp->b_vflags & BV_BKGRDINPROG) {
1903 				BO_UNLOCK(bp->b_bufobj);
1904 				BUF_UNLOCK(bp);
1905 				continue;
1906 			}
1907 			BO_UNLOCK(bp->b_bufobj);
1908 		}
1909 		CTR6(KTR_BUF,
1910 		    "getnewbuf(%p) vp %p flags %X kvasize %d bufsize %d "
1911 		    "queue %d (recycling)", bp, bp->b_vp, bp->b_flags,
1912 		    bp->b_kvasize, bp->b_bufsize, qindex);
1913 
1914 		/*
1915 		 * Sanity Checks
1916 		 */
1917 		KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp));
1918 
1919 		/*
1920 		 * Note: we no longer distinguish between VMIO and non-VMIO
1921 		 * buffers.
1922 		 */
1923 
1924 		KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex));
1925 
1926 		if (bp->b_bufobj != NULL)
1927 			BO_LOCK(bp->b_bufobj);
1928 		bremfreel(bp);
1929 		if (bp->b_bufobj != NULL)
1930 			BO_UNLOCK(bp->b_bufobj);
1931 		mtx_unlock(&bqlock);
1932 
1933 		if (qindex == QUEUE_CLEAN) {
1934 			if (bp->b_flags & B_VMIO) {
1935 				bp->b_flags &= ~B_ASYNC;
1936 				vfs_vmio_release(bp);
1937 			}
1938 			if (bp->b_vp)
1939 				brelvp(bp);
1940 		}
1941 
1942 		/*
1943 		 * NOTE:  nbp is now entirely invalid.  We can only restart
1944 		 * the scan from this point on.
1945 		 *
1946 		 * Get the rest of the buffer freed up.  b_kva* is still
1947 		 * valid after this operation.
1948 		 */
1949 
1950 		if (bp->b_rcred != NOCRED) {
1951 			crfree(bp->b_rcred);
1952 			bp->b_rcred = NOCRED;
1953 		}
1954 		if (bp->b_wcred != NOCRED) {
1955 			crfree(bp->b_wcred);
1956 			bp->b_wcred = NOCRED;
1957 		}
1958 		if (!LIST_EMPTY(&bp->b_dep))
1959 			buf_deallocate(bp);
1960 		if (bp->b_vflags & BV_BKGRDINPROG)
1961 			panic("losing buffer 3");
1962 		KASSERT(bp->b_vp == NULL,
1963 		    ("bp: %p still has vnode %p.  qindex: %d",
1964 		    bp, bp->b_vp, qindex));
1965 		KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
1966 		   ("bp: %p still on a buffer list. xflags %X",
1967 		    bp, bp->b_xflags));
1968 
1969 		if (bp->b_bufsize)
1970 			allocbuf(bp, 0);
1971 
1972 		bp->b_flags = 0;
1973 		bp->b_ioflags = 0;
1974 		bp->b_xflags = 0;
1975 		KASSERT((bp->b_vflags & BV_INFREECNT) == 0,
1976 		    ("buf %p still counted as free?", bp));
1977 		bp->b_vflags = 0;
1978 		bp->b_vp = NULL;
1979 		bp->b_blkno = bp->b_lblkno = 0;
1980 		bp->b_offset = NOOFFSET;
1981 		bp->b_iodone = 0;
1982 		bp->b_error = 0;
1983 		bp->b_resid = 0;
1984 		bp->b_bcount = 0;
1985 		bp->b_npages = 0;
1986 		bp->b_dirtyoff = bp->b_dirtyend = 0;
1987 		bp->b_bufobj = NULL;
1988 		bp->b_pin_count = 0;
1989 		bp->b_fsprivate1 = NULL;
1990 		bp->b_fsprivate2 = NULL;
1991 		bp->b_fsprivate3 = NULL;
1992 
1993 		LIST_INIT(&bp->b_dep);
1994 
1995 		/*
1996 		 * If we are defragging then free the buffer.
1997 		 */
1998 		if (defrag) {
1999 			bp->b_flags |= B_INVAL;
2000 			bfreekva(bp);
2001 			brelse(bp);
2002 			defrag = 0;
2003 			goto restart;
2004 		}
2005 
2006 		/*
2007 		 * Notify any waiters for the buffer lock about
2008 		 * identity change by freeing the buffer.
2009 		 */
2010 		if (qindex == QUEUE_CLEAN && BUF_LOCKWAITERS(bp)) {
2011 			bp->b_flags |= B_INVAL;
2012 			bfreekva(bp);
2013 			brelse(bp);
2014 			goto restart;
2015 		}
2016 
2017 		/*
2018 		 * If we are overcomitted then recover the buffer and its
2019 		 * KVM space.  This occurs in rare situations when multiple
2020 		 * processes are blocked in getnewbuf() or allocbuf().
2021 		 */
2022 		if (bufspace >= hibufspace)
2023 			flushingbufs = 1;
2024 		if (flushingbufs && bp->b_kvasize != 0) {
2025 			bp->b_flags |= B_INVAL;
2026 			bfreekva(bp);
2027 			brelse(bp);
2028 			goto restart;
2029 		}
2030 		if (bufspace < lobufspace)
2031 			flushingbufs = 0;
2032 		break;
2033 	}
2034 
2035 	/*
2036 	 * If we exhausted our list, sleep as appropriate.  We may have to
2037 	 * wakeup various daemons and write out some dirty buffers.
2038 	 *
2039 	 * Generally we are sleeping due to insufficient buffer space.
2040 	 */
2041 
2042 	if (bp == NULL) {
2043 		int flags, norunbuf;
2044 		char *waitmsg;
2045 		int fl;
2046 
2047 		if (defrag) {
2048 			flags = VFS_BIO_NEED_BUFSPACE;
2049 			waitmsg = "nbufkv";
2050 		} else if (bufspace >= hibufspace) {
2051 			waitmsg = "nbufbs";
2052 			flags = VFS_BIO_NEED_BUFSPACE;
2053 		} else {
2054 			waitmsg = "newbuf";
2055 			flags = VFS_BIO_NEED_ANY;
2056 		}
2057 		mtx_lock(&nblock);
2058 		needsbuffer |= flags;
2059 		mtx_unlock(&nblock);
2060 		mtx_unlock(&bqlock);
2061 
2062 		bd_speedup();	/* heeeelp */
2063 		if (gbflags & GB_NOWAIT_BD)
2064 			return (NULL);
2065 
2066 		mtx_lock(&nblock);
2067 		while (needsbuffer & flags) {
2068 			if (vp != NULL && (td->td_pflags & TDP_BUFNEED) == 0) {
2069 				mtx_unlock(&nblock);
2070 				/*
2071 				 * getblk() is called with a vnode
2072 				 * locked, and some majority of the
2073 				 * dirty buffers may as well belong to
2074 				 * the vnode. Flushing the buffers
2075 				 * there would make a progress that
2076 				 * cannot be achieved by the
2077 				 * buf_daemon, that cannot lock the
2078 				 * vnode.
2079 				 */
2080 				norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) |
2081 				    (td->td_pflags & TDP_NORUNNINGBUF);
2082 				/* play bufdaemon */
2083 				td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF;
2084 				fl = buf_do_flush(vp);
2085 				td->td_pflags &= norunbuf;
2086 				mtx_lock(&nblock);
2087 				if (fl != 0)
2088 					continue;
2089 				if ((needsbuffer & flags) == 0)
2090 					break;
2091 			}
2092 			if (msleep(&needsbuffer, &nblock,
2093 			    (PRIBIO + 4) | slpflag, waitmsg, slptimeo)) {
2094 				mtx_unlock(&nblock);
2095 				return (NULL);
2096 			}
2097 		}
2098 		mtx_unlock(&nblock);
2099 	} else {
2100 		/*
2101 		 * We finally have a valid bp.  We aren't quite out of the
2102 		 * woods, we still have to reserve kva space.  In order
2103 		 * to keep fragmentation sane we only allocate kva in
2104 		 * BKVASIZE chunks.
2105 		 */
2106 		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
2107 
2108 		if (maxsize != bp->b_kvasize) {
2109 			vm_offset_t addr = 0;
2110 			int rv;
2111 
2112 			bfreekva(bp);
2113 
2114 			vm_map_lock(buffer_map);
2115 			if (vm_map_findspace(buffer_map,
2116 			    vm_map_min(buffer_map), maxsize, &addr)) {
2117 				/*
2118 				 * Buffer map is too fragmented.
2119 				 * We must defragment the map.
2120 				 */
2121 				atomic_add_int(&bufdefragcnt, 1);
2122 				vm_map_unlock(buffer_map);
2123 				defrag = 1;
2124 				bp->b_flags |= B_INVAL;
2125 				brelse(bp);
2126 				goto restart;
2127 			}
2128 			rv = vm_map_insert(buffer_map, NULL, 0, addr,
2129 			    addr + maxsize, VM_PROT_ALL, VM_PROT_ALL,
2130 			    MAP_NOFAULT);
2131 			KASSERT(rv == KERN_SUCCESS,
2132 			    ("vm_map_insert(buffer_map) rv %d", rv));
2133 			vm_map_unlock(buffer_map);
2134 			bp->b_kvabase = (caddr_t)addr;
2135 			bp->b_kvasize = maxsize;
2136 			atomic_add_long(&bufspace, bp->b_kvasize);
2137 			atomic_add_int(&bufreusecnt, 1);
2138 		}
2139 		bp->b_saveaddr = bp->b_kvabase;
2140 		bp->b_data = bp->b_saveaddr;
2141 	}
2142 	return (bp);
2143 }
2144 
2145 /*
2146  *	buf_daemon:
2147  *
2148  *	buffer flushing daemon.  Buffers are normally flushed by the
2149  *	update daemon but if it cannot keep up this process starts to
2150  *	take the load in an attempt to prevent getnewbuf() from blocking.
2151  */
2152 
2153 static struct kproc_desc buf_kp = {
2154 	"bufdaemon",
2155 	buf_daemon,
2156 	&bufdaemonproc
2157 };
2158 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp);
2159 
2160 static int
2161 buf_do_flush(struct vnode *vp)
2162 {
2163 	int flushed;
2164 
2165 	flushed = flushbufqueues(vp, QUEUE_DIRTY, 0);
2166 	if (flushed == 0) {
2167 		/*
2168 		 * Could not find any buffers without rollback
2169 		 * dependencies, so just write the first one
2170 		 * in the hopes of eventually making progress.
2171 		 */
2172 		flushbufqueues(vp, QUEUE_DIRTY, 1);
2173 	}
2174 	return (flushed);
2175 }
2176 
2177 static void
2178 buf_daemon()
2179 {
2180 	int lodirtysave;
2181 
2182 	/*
2183 	 * This process needs to be suspended prior to shutdown sync.
2184 	 */
2185 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
2186 	    SHUTDOWN_PRI_LAST);
2187 
2188 	/*
2189 	 * This process is allowed to take the buffer cache to the limit
2190 	 */
2191 	curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED;
2192 	mtx_lock(&bdlock);
2193 	for (;;) {
2194 		bd_request = 0;
2195 		mtx_unlock(&bdlock);
2196 
2197 		kproc_suspend_check(bufdaemonproc);
2198 		lodirtysave = lodirtybuffers;
2199 		if (bd_speedupreq) {
2200 			lodirtybuffers = numdirtybuffers / 2;
2201 			bd_speedupreq = 0;
2202 		}
2203 		/*
2204 		 * Do the flush.  Limit the amount of in-transit I/O we
2205 		 * allow to build up, otherwise we would completely saturate
2206 		 * the I/O system.  Wakeup any waiting processes before we
2207 		 * normally would so they can run in parallel with our drain.
2208 		 */
2209 		while (numdirtybuffers > lodirtybuffers) {
2210 			if (buf_do_flush(NULL) == 0)
2211 				break;
2212 			kern_yield(PRI_USER);
2213 		}
2214 		lodirtybuffers = lodirtysave;
2215 
2216 		/*
2217 		 * Only clear bd_request if we have reached our low water
2218 		 * mark.  The buf_daemon normally waits 1 second and
2219 		 * then incrementally flushes any dirty buffers that have
2220 		 * built up, within reason.
2221 		 *
2222 		 * If we were unable to hit our low water mark and couldn't
2223 		 * find any flushable buffers, we sleep half a second.
2224 		 * Otherwise we loop immediately.
2225 		 */
2226 		mtx_lock(&bdlock);
2227 		if (numdirtybuffers <= lodirtybuffers) {
2228 			/*
2229 			 * We reached our low water mark, reset the
2230 			 * request and sleep until we are needed again.
2231 			 * The sleep is just so the suspend code works.
2232 			 */
2233 			bd_request = 0;
2234 			msleep(&bd_request, &bdlock, PVM, "psleep", hz);
2235 		} else {
2236 			/*
2237 			 * We couldn't find any flushable dirty buffers but
2238 			 * still have too many dirty buffers, we
2239 			 * have to sleep and try again.  (rare)
2240 			 */
2241 			msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
2242 		}
2243 	}
2244 }
2245 
2246 /*
2247  *	flushbufqueues:
2248  *
2249  *	Try to flush a buffer in the dirty queue.  We must be careful to
2250  *	free up B_INVAL buffers instead of write them, which NFS is
2251  *	particularly sensitive to.
2252  */
2253 static int flushwithdeps = 0;
2254 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
2255     0, "Number of buffers flushed with dependecies that require rollbacks");
2256 
2257 static int
2258 flushbufqueues(struct vnode *lvp, int queue, int flushdeps)
2259 {
2260 	struct buf *sentinel;
2261 	struct vnode *vp;
2262 	struct mount *mp;
2263 	struct buf *bp;
2264 	int hasdeps;
2265 	int flushed;
2266 	int target;
2267 
2268 	if (lvp == NULL) {
2269 		target = numdirtybuffers - lodirtybuffers;
2270 		if (flushdeps && target > 2)
2271 			target /= 2;
2272 	} else
2273 		target = flushbufqtarget;
2274 	flushed = 0;
2275 	bp = NULL;
2276 	sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO);
2277 	sentinel->b_qindex = QUEUE_SENTINEL;
2278 	mtx_lock(&bqlock);
2279 	TAILQ_INSERT_HEAD(&bufqueues[queue], sentinel, b_freelist);
2280 	while (flushed != target) {
2281 		bp = TAILQ_NEXT(sentinel, b_freelist);
2282 		if (bp != NULL) {
2283 			TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
2284 			TAILQ_INSERT_AFTER(&bufqueues[queue], bp, sentinel,
2285 			    b_freelist);
2286 		} else
2287 			break;
2288 		/*
2289 		 * Skip sentinels inserted by other invocations of the
2290 		 * flushbufqueues(), taking care to not reorder them.
2291 		 */
2292 		if (bp->b_qindex == QUEUE_SENTINEL)
2293 			continue;
2294 		/*
2295 		 * Only flush the buffers that belong to the
2296 		 * vnode locked by the curthread.
2297 		 */
2298 		if (lvp != NULL && bp->b_vp != lvp)
2299 			continue;
2300 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
2301 			continue;
2302 		if (bp->b_pin_count > 0) {
2303 			BUF_UNLOCK(bp);
2304 			continue;
2305 		}
2306 		BO_LOCK(bp->b_bufobj);
2307 		if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
2308 		    (bp->b_flags & B_DELWRI) == 0) {
2309 			BO_UNLOCK(bp->b_bufobj);
2310 			BUF_UNLOCK(bp);
2311 			continue;
2312 		}
2313 		BO_UNLOCK(bp->b_bufobj);
2314 		if (bp->b_flags & B_INVAL) {
2315 			bremfreel(bp);
2316 			mtx_unlock(&bqlock);
2317 			brelse(bp);
2318 			flushed++;
2319 			numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
2320 			mtx_lock(&bqlock);
2321 			continue;
2322 		}
2323 
2324 		if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
2325 			if (flushdeps == 0) {
2326 				BUF_UNLOCK(bp);
2327 				continue;
2328 			}
2329 			hasdeps = 1;
2330 		} else
2331 			hasdeps = 0;
2332 		/*
2333 		 * We must hold the lock on a vnode before writing
2334 		 * one of its buffers. Otherwise we may confuse, or
2335 		 * in the case of a snapshot vnode, deadlock the
2336 		 * system.
2337 		 *
2338 		 * The lock order here is the reverse of the normal
2339 		 * of vnode followed by buf lock.  This is ok because
2340 		 * the NOWAIT will prevent deadlock.
2341 		 */
2342 		vp = bp->b_vp;
2343 		if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2344 			BUF_UNLOCK(bp);
2345 			continue;
2346 		}
2347 		if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_CANRECURSE) == 0) {
2348 			mtx_unlock(&bqlock);
2349 			CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
2350 			    bp, bp->b_vp, bp->b_flags);
2351 			if (curproc == bufdaemonproc)
2352 				vfs_bio_awrite(bp);
2353 			else {
2354 				bremfree(bp);
2355 				bwrite(bp);
2356 				notbufdflashes++;
2357 			}
2358 			vn_finished_write(mp);
2359 			VOP_UNLOCK(vp, 0);
2360 			flushwithdeps += hasdeps;
2361 			flushed++;
2362 
2363 			/*
2364 			 * Sleeping on runningbufspace while holding
2365 			 * vnode lock leads to deadlock.
2366 			 */
2367 			if (curproc == bufdaemonproc)
2368 				waitrunningbufspace();
2369 			numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
2370 			mtx_lock(&bqlock);
2371 			continue;
2372 		}
2373 		vn_finished_write(mp);
2374 		BUF_UNLOCK(bp);
2375 	}
2376 	TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
2377 	mtx_unlock(&bqlock);
2378 	free(sentinel, M_TEMP);
2379 	return (flushed);
2380 }
2381 
2382 /*
2383  * Check to see if a block is currently memory resident.
2384  */
2385 struct buf *
2386 incore(struct bufobj *bo, daddr_t blkno)
2387 {
2388 	struct buf *bp;
2389 
2390 	BO_LOCK(bo);
2391 	bp = gbincore(bo, blkno);
2392 	BO_UNLOCK(bo);
2393 	return (bp);
2394 }
2395 
2396 /*
2397  * Returns true if no I/O is needed to access the
2398  * associated VM object.  This is like incore except
2399  * it also hunts around in the VM system for the data.
2400  */
2401 
2402 static int
2403 inmem(struct vnode * vp, daddr_t blkno)
2404 {
2405 	vm_object_t obj;
2406 	vm_offset_t toff, tinc, size;
2407 	vm_page_t m;
2408 	vm_ooffset_t off;
2409 
2410 	ASSERT_VOP_LOCKED(vp, "inmem");
2411 
2412 	if (incore(&vp->v_bufobj, blkno))
2413 		return 1;
2414 	if (vp->v_mount == NULL)
2415 		return 0;
2416 	obj = vp->v_object;
2417 	if (obj == NULL)
2418 		return (0);
2419 
2420 	size = PAGE_SIZE;
2421 	if (size > vp->v_mount->mnt_stat.f_iosize)
2422 		size = vp->v_mount->mnt_stat.f_iosize;
2423 	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
2424 
2425 	VM_OBJECT_LOCK(obj);
2426 	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
2427 		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
2428 		if (!m)
2429 			goto notinmem;
2430 		tinc = size;
2431 		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
2432 			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
2433 		if (vm_page_is_valid(m,
2434 		    (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
2435 			goto notinmem;
2436 	}
2437 	VM_OBJECT_UNLOCK(obj);
2438 	return 1;
2439 
2440 notinmem:
2441 	VM_OBJECT_UNLOCK(obj);
2442 	return (0);
2443 }
2444 
2445 /*
2446  * Set the dirty range for a buffer based on the status of the dirty
2447  * bits in the pages comprising the buffer.  The range is limited
2448  * to the size of the buffer.
2449  *
2450  * Tell the VM system that the pages associated with this buffer
2451  * are clean.  This is used for delayed writes where the data is
2452  * going to go to disk eventually without additional VM intevention.
2453  *
2454  * Note that while we only really need to clean through to b_bcount, we
2455  * just go ahead and clean through to b_bufsize.
2456  */
2457 static void
2458 vfs_clean_pages_dirty_buf(struct buf *bp)
2459 {
2460 	vm_ooffset_t foff, noff, eoff;
2461 	vm_page_t m;
2462 	int i;
2463 
2464 	if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0)
2465 		return;
2466 
2467 	foff = bp->b_offset;
2468 	KASSERT(bp->b_offset != NOOFFSET,
2469 	    ("vfs_clean_pages_dirty_buf: no buffer offset"));
2470 
2471 	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
2472 	vfs_drain_busy_pages(bp);
2473 	vfs_setdirty_locked_object(bp);
2474 	for (i = 0; i < bp->b_npages; i++) {
2475 		noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
2476 		eoff = noff;
2477 		if (eoff > bp->b_offset + bp->b_bufsize)
2478 			eoff = bp->b_offset + bp->b_bufsize;
2479 		m = bp->b_pages[i];
2480 		vfs_page_set_validclean(bp, foff, m);
2481 		/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
2482 		foff = noff;
2483 	}
2484 	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
2485 }
2486 
2487 static void
2488 vfs_setdirty_locked_object(struct buf *bp)
2489 {
2490 	vm_object_t object;
2491 	int i;
2492 
2493 	object = bp->b_bufobj->bo_object;
2494 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2495 
2496 	/*
2497 	 * We qualify the scan for modified pages on whether the
2498 	 * object has been flushed yet.
2499 	 */
2500 	if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) {
2501 		vm_offset_t boffset;
2502 		vm_offset_t eoffset;
2503 
2504 		/*
2505 		 * test the pages to see if they have been modified directly
2506 		 * by users through the VM system.
2507 		 */
2508 		for (i = 0; i < bp->b_npages; i++)
2509 			vm_page_test_dirty(bp->b_pages[i]);
2510 
2511 		/*
2512 		 * Calculate the encompassing dirty range, boffset and eoffset,
2513 		 * (eoffset - boffset) bytes.
2514 		 */
2515 
2516 		for (i = 0; i < bp->b_npages; i++) {
2517 			if (bp->b_pages[i]->dirty)
2518 				break;
2519 		}
2520 		boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
2521 
2522 		for (i = bp->b_npages - 1; i >= 0; --i) {
2523 			if (bp->b_pages[i]->dirty) {
2524 				break;
2525 			}
2526 		}
2527 		eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
2528 
2529 		/*
2530 		 * Fit it to the buffer.
2531 		 */
2532 
2533 		if (eoffset > bp->b_bcount)
2534 			eoffset = bp->b_bcount;
2535 
2536 		/*
2537 		 * If we have a good dirty range, merge with the existing
2538 		 * dirty range.
2539 		 */
2540 
2541 		if (boffset < eoffset) {
2542 			if (bp->b_dirtyoff > boffset)
2543 				bp->b_dirtyoff = boffset;
2544 			if (bp->b_dirtyend < eoffset)
2545 				bp->b_dirtyend = eoffset;
2546 		}
2547 	}
2548 }
2549 
2550 /*
2551  *	getblk:
2552  *
2553  *	Get a block given a specified block and offset into a file/device.
2554  *	The buffers B_DONE bit will be cleared on return, making it almost
2555  * 	ready for an I/O initiation.  B_INVAL may or may not be set on
2556  *	return.  The caller should clear B_INVAL prior to initiating a
2557  *	READ.
2558  *
2559  *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
2560  *	an existing buffer.
2561  *
2562  *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
2563  *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
2564  *	and then cleared based on the backing VM.  If the previous buffer is
2565  *	non-0-sized but invalid, B_CACHE will be cleared.
2566  *
2567  *	If getblk() must create a new buffer, the new buffer is returned with
2568  *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
2569  *	case it is returned with B_INVAL clear and B_CACHE set based on the
2570  *	backing VM.
2571  *
2572  *	getblk() also forces a bwrite() for any B_DELWRI buffer whos
2573  *	B_CACHE bit is clear.
2574  *
2575  *	What this means, basically, is that the caller should use B_CACHE to
2576  *	determine whether the buffer is fully valid or not and should clear
2577  *	B_INVAL prior to issuing a read.  If the caller intends to validate
2578  *	the buffer by loading its data area with something, the caller needs
2579  *	to clear B_INVAL.  If the caller does this without issuing an I/O,
2580  *	the caller should set B_CACHE ( as an optimization ), else the caller
2581  *	should issue the I/O and biodone() will set B_CACHE if the I/O was
2582  *	a write attempt or if it was a successfull read.  If the caller
2583  *	intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
2584  *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
2585  */
2586 struct buf *
2587 getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
2588     int flags)
2589 {
2590 	struct buf *bp;
2591 	struct bufobj *bo;
2592 	int error;
2593 
2594 	CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
2595 	ASSERT_VOP_LOCKED(vp, "getblk");
2596 	if (size > MAXBSIZE)
2597 		panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
2598 
2599 	bo = &vp->v_bufobj;
2600 loop:
2601 	/*
2602 	 * Block if we are low on buffers.   Certain processes are allowed
2603 	 * to completely exhaust the buffer cache.
2604          *
2605          * If this check ever becomes a bottleneck it may be better to
2606          * move it into the else, when gbincore() fails.  At the moment
2607          * it isn't a problem.
2608          */
2609 	if (numfreebuffers == 0) {
2610 		if (TD_IS_IDLETHREAD(curthread))
2611 			return NULL;
2612 		mtx_lock(&nblock);
2613 		needsbuffer |= VFS_BIO_NEED_ANY;
2614 		mtx_unlock(&nblock);
2615 	}
2616 
2617 	BO_LOCK(bo);
2618 	bp = gbincore(bo, blkno);
2619 	if (bp != NULL) {
2620 		int lockflags;
2621 		/*
2622 		 * Buffer is in-core.  If the buffer is not busy nor managed,
2623 		 * it must be on a queue.
2624 		 */
2625 		lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK;
2626 
2627 		if (flags & GB_LOCK_NOWAIT)
2628 			lockflags |= LK_NOWAIT;
2629 
2630 		error = BUF_TIMELOCK(bp, lockflags,
2631 		    BO_MTX(bo), "getblk", slpflag, slptimeo);
2632 
2633 		/*
2634 		 * If we slept and got the lock we have to restart in case
2635 		 * the buffer changed identities.
2636 		 */
2637 		if (error == ENOLCK)
2638 			goto loop;
2639 		/* We timed out or were interrupted. */
2640 		else if (error)
2641 			return (NULL);
2642 
2643 		/*
2644 		 * The buffer is locked.  B_CACHE is cleared if the buffer is
2645 		 * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
2646 		 * and for a VMIO buffer B_CACHE is adjusted according to the
2647 		 * backing VM cache.
2648 		 */
2649 		if (bp->b_flags & B_INVAL)
2650 			bp->b_flags &= ~B_CACHE;
2651 		else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
2652 			bp->b_flags |= B_CACHE;
2653 		if (bp->b_flags & B_MANAGED)
2654 			MPASS(bp->b_qindex == QUEUE_NONE);
2655 		else {
2656 			BO_LOCK(bo);
2657 			bremfree(bp);
2658 			BO_UNLOCK(bo);
2659 		}
2660 
2661 		/*
2662 		 * check for size inconsistancies for non-VMIO case.
2663 		 */
2664 
2665 		if (bp->b_bcount != size) {
2666 			if ((bp->b_flags & B_VMIO) == 0 ||
2667 			    (size > bp->b_kvasize)) {
2668 				if (bp->b_flags & B_DELWRI) {
2669 					/*
2670 					 * If buffer is pinned and caller does
2671 					 * not want sleep  waiting for it to be
2672 					 * unpinned, bail out
2673 					 * */
2674 					if (bp->b_pin_count > 0) {
2675 						if (flags & GB_LOCK_NOWAIT) {
2676 							bqrelse(bp);
2677 							return (NULL);
2678 						} else {
2679 							bunpin_wait(bp);
2680 						}
2681 					}
2682 					bp->b_flags |= B_NOCACHE;
2683 					bwrite(bp);
2684 				} else {
2685 					if (LIST_EMPTY(&bp->b_dep)) {
2686 						bp->b_flags |= B_RELBUF;
2687 						brelse(bp);
2688 					} else {
2689 						bp->b_flags |= B_NOCACHE;
2690 						bwrite(bp);
2691 					}
2692 				}
2693 				goto loop;
2694 			}
2695 		}
2696 
2697 		/*
2698 		 * If the size is inconsistant in the VMIO case, we can resize
2699 		 * the buffer.  This might lead to B_CACHE getting set or
2700 		 * cleared.  If the size has not changed, B_CACHE remains
2701 		 * unchanged from its previous state.
2702 		 */
2703 
2704 		if (bp->b_bcount != size)
2705 			allocbuf(bp, size);
2706 
2707 		KASSERT(bp->b_offset != NOOFFSET,
2708 		    ("getblk: no buffer offset"));
2709 
2710 		/*
2711 		 * A buffer with B_DELWRI set and B_CACHE clear must
2712 		 * be committed before we can return the buffer in
2713 		 * order to prevent the caller from issuing a read
2714 		 * ( due to B_CACHE not being set ) and overwriting
2715 		 * it.
2716 		 *
2717 		 * Most callers, including NFS and FFS, need this to
2718 		 * operate properly either because they assume they
2719 		 * can issue a read if B_CACHE is not set, or because
2720 		 * ( for example ) an uncached B_DELWRI might loop due
2721 		 * to softupdates re-dirtying the buffer.  In the latter
2722 		 * case, B_CACHE is set after the first write completes,
2723 		 * preventing further loops.
2724 		 * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
2725 		 * above while extending the buffer, we cannot allow the
2726 		 * buffer to remain with B_CACHE set after the write
2727 		 * completes or it will represent a corrupt state.  To
2728 		 * deal with this we set B_NOCACHE to scrap the buffer
2729 		 * after the write.
2730 		 *
2731 		 * We might be able to do something fancy, like setting
2732 		 * B_CACHE in bwrite() except if B_DELWRI is already set,
2733 		 * so the below call doesn't set B_CACHE, but that gets real
2734 		 * confusing.  This is much easier.
2735 		 */
2736 
2737 		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
2738 			bp->b_flags |= B_NOCACHE;
2739 			bwrite(bp);
2740 			goto loop;
2741 		}
2742 		bp->b_flags &= ~B_DONE;
2743 	} else {
2744 		int bsize, maxsize, vmio;
2745 		off_t offset;
2746 
2747 		/*
2748 		 * Buffer is not in-core, create new buffer.  The buffer
2749 		 * returned by getnewbuf() is locked.  Note that the returned
2750 		 * buffer is also considered valid (not marked B_INVAL).
2751 		 */
2752 		BO_UNLOCK(bo);
2753 		/*
2754 		 * If the user does not want us to create the buffer, bail out
2755 		 * here.
2756 		 */
2757 		if (flags & GB_NOCREAT)
2758 			return NULL;
2759 		bsize = vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize;
2760 		offset = blkno * bsize;
2761 		vmio = vp->v_object != NULL;
2762 		maxsize = vmio ? size + (offset & PAGE_MASK) : size;
2763 		maxsize = imax(maxsize, bsize);
2764 
2765 		bp = getnewbuf(vp, slpflag, slptimeo, size, maxsize, flags);
2766 		if (bp == NULL) {
2767 			if (slpflag || slptimeo)
2768 				return NULL;
2769 			goto loop;
2770 		}
2771 
2772 		/*
2773 		 * This code is used to make sure that a buffer is not
2774 		 * created while the getnewbuf routine is blocked.
2775 		 * This can be a problem whether the vnode is locked or not.
2776 		 * If the buffer is created out from under us, we have to
2777 		 * throw away the one we just created.
2778 		 *
2779 		 * Note: this must occur before we associate the buffer
2780 		 * with the vp especially considering limitations in
2781 		 * the splay tree implementation when dealing with duplicate
2782 		 * lblkno's.
2783 		 */
2784 		BO_LOCK(bo);
2785 		if (gbincore(bo, blkno)) {
2786 			BO_UNLOCK(bo);
2787 			bp->b_flags |= B_INVAL;
2788 			brelse(bp);
2789 			goto loop;
2790 		}
2791 
2792 		/*
2793 		 * Insert the buffer into the hash, so that it can
2794 		 * be found by incore.
2795 		 */
2796 		bp->b_blkno = bp->b_lblkno = blkno;
2797 		bp->b_offset = offset;
2798 		bgetvp(vp, bp);
2799 		BO_UNLOCK(bo);
2800 
2801 		/*
2802 		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
2803 		 * buffer size starts out as 0, B_CACHE will be set by
2804 		 * allocbuf() for the VMIO case prior to it testing the
2805 		 * backing store for validity.
2806 		 */
2807 
2808 		if (vmio) {
2809 			bp->b_flags |= B_VMIO;
2810 			KASSERT(vp->v_object == bp->b_bufobj->bo_object,
2811 			    ("ARGH! different b_bufobj->bo_object %p %p %p\n",
2812 			    bp, vp->v_object, bp->b_bufobj->bo_object));
2813 		} else {
2814 			bp->b_flags &= ~B_VMIO;
2815 			KASSERT(bp->b_bufobj->bo_object == NULL,
2816 			    ("ARGH! has b_bufobj->bo_object %p %p\n",
2817 			    bp, bp->b_bufobj->bo_object));
2818 		}
2819 
2820 		allocbuf(bp, size);
2821 		bp->b_flags &= ~B_DONE;
2822 	}
2823 	CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
2824 	BUF_ASSERT_HELD(bp);
2825 	KASSERT(bp->b_bufobj == bo,
2826 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2827 	return (bp);
2828 }
2829 
2830 /*
2831  * Get an empty, disassociated buffer of given size.  The buffer is initially
2832  * set to B_INVAL.
2833  */
2834 struct buf *
2835 geteblk(int size, int flags)
2836 {
2837 	struct buf *bp;
2838 	int maxsize;
2839 
2840 	maxsize = (size + BKVAMASK) & ~BKVAMASK;
2841 	while ((bp = getnewbuf(NULL, 0, 0, size, maxsize, flags)) == NULL) {
2842 		if ((flags & GB_NOWAIT_BD) &&
2843 		    (curthread->td_pflags & TDP_BUFNEED) != 0)
2844 			return (NULL);
2845 	}
2846 	allocbuf(bp, size);
2847 	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
2848 	BUF_ASSERT_HELD(bp);
2849 	return (bp);
2850 }
2851 
2852 
2853 /*
2854  * This code constitutes the buffer memory from either anonymous system
2855  * memory (in the case of non-VMIO operations) or from an associated
2856  * VM object (in the case of VMIO operations).  This code is able to
2857  * resize a buffer up or down.
2858  *
2859  * Note that this code is tricky, and has many complications to resolve
2860  * deadlock or inconsistant data situations.  Tread lightly!!!
2861  * There are B_CACHE and B_DELWRI interactions that must be dealt with by
2862  * the caller.  Calling this code willy nilly can result in the loss of data.
2863  *
2864  * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
2865  * B_CACHE for the non-VMIO case.
2866  */
2867 
2868 int
2869 allocbuf(struct buf *bp, int size)
2870 {
2871 	int newbsize, mbsize;
2872 	int i;
2873 
2874 	BUF_ASSERT_HELD(bp);
2875 
2876 	if (bp->b_kvasize < size)
2877 		panic("allocbuf: buffer too small");
2878 
2879 	if ((bp->b_flags & B_VMIO) == 0) {
2880 		caddr_t origbuf;
2881 		int origbufsize;
2882 		/*
2883 		 * Just get anonymous memory from the kernel.  Don't
2884 		 * mess with B_CACHE.
2885 		 */
2886 		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2887 		if (bp->b_flags & B_MALLOC)
2888 			newbsize = mbsize;
2889 		else
2890 			newbsize = round_page(size);
2891 
2892 		if (newbsize < bp->b_bufsize) {
2893 			/*
2894 			 * malloced buffers are not shrunk
2895 			 */
2896 			if (bp->b_flags & B_MALLOC) {
2897 				if (newbsize) {
2898 					bp->b_bcount = size;
2899 				} else {
2900 					free(bp->b_data, M_BIOBUF);
2901 					if (bp->b_bufsize) {
2902 						atomic_subtract_long(
2903 						    &bufmallocspace,
2904 						    bp->b_bufsize);
2905 						bufspacewakeup();
2906 						bp->b_bufsize = 0;
2907 					}
2908 					bp->b_saveaddr = bp->b_kvabase;
2909 					bp->b_data = bp->b_saveaddr;
2910 					bp->b_bcount = 0;
2911 					bp->b_flags &= ~B_MALLOC;
2912 				}
2913 				return 1;
2914 			}
2915 			vm_hold_free_pages(bp, newbsize);
2916 		} else if (newbsize > bp->b_bufsize) {
2917 			/*
2918 			 * We only use malloced memory on the first allocation.
2919 			 * and revert to page-allocated memory when the buffer
2920 			 * grows.
2921 			 */
2922 			/*
2923 			 * There is a potential smp race here that could lead
2924 			 * to bufmallocspace slightly passing the max.  It
2925 			 * is probably extremely rare and not worth worrying
2926 			 * over.
2927 			 */
2928 			if ( (bufmallocspace < maxbufmallocspace) &&
2929 				(bp->b_bufsize == 0) &&
2930 				(mbsize <= PAGE_SIZE/2)) {
2931 
2932 				bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
2933 				bp->b_bufsize = mbsize;
2934 				bp->b_bcount = size;
2935 				bp->b_flags |= B_MALLOC;
2936 				atomic_add_long(&bufmallocspace, mbsize);
2937 				return 1;
2938 			}
2939 			origbuf = NULL;
2940 			origbufsize = 0;
2941 			/*
2942 			 * If the buffer is growing on its other-than-first allocation,
2943 			 * then we revert to the page-allocation scheme.
2944 			 */
2945 			if (bp->b_flags & B_MALLOC) {
2946 				origbuf = bp->b_data;
2947 				origbufsize = bp->b_bufsize;
2948 				bp->b_data = bp->b_kvabase;
2949 				if (bp->b_bufsize) {
2950 					atomic_subtract_long(&bufmallocspace,
2951 					    bp->b_bufsize);
2952 					bufspacewakeup();
2953 					bp->b_bufsize = 0;
2954 				}
2955 				bp->b_flags &= ~B_MALLOC;
2956 				newbsize = round_page(newbsize);
2957 			}
2958 			vm_hold_load_pages(
2959 			    bp,
2960 			    (vm_offset_t) bp->b_data + bp->b_bufsize,
2961 			    (vm_offset_t) bp->b_data + newbsize);
2962 			if (origbuf) {
2963 				bcopy(origbuf, bp->b_data, origbufsize);
2964 				free(origbuf, M_BIOBUF);
2965 			}
2966 		}
2967 	} else {
2968 		int desiredpages;
2969 
2970 		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2971 		desiredpages = (size == 0) ? 0 :
2972 			num_pages((bp->b_offset & PAGE_MASK) + newbsize);
2973 
2974 		if (bp->b_flags & B_MALLOC)
2975 			panic("allocbuf: VMIO buffer can't be malloced");
2976 		/*
2977 		 * Set B_CACHE initially if buffer is 0 length or will become
2978 		 * 0-length.
2979 		 */
2980 		if (size == 0 || bp->b_bufsize == 0)
2981 			bp->b_flags |= B_CACHE;
2982 
2983 		if (newbsize < bp->b_bufsize) {
2984 			/*
2985 			 * DEV_BSIZE aligned new buffer size is less then the
2986 			 * DEV_BSIZE aligned existing buffer size.  Figure out
2987 			 * if we have to remove any pages.
2988 			 */
2989 			if (desiredpages < bp->b_npages) {
2990 				vm_page_t m;
2991 
2992 				pmap_qremove((vm_offset_t)trunc_page(
2993 				    (vm_offset_t)bp->b_data) +
2994 				    (desiredpages << PAGE_SHIFT),
2995 				    (bp->b_npages - desiredpages));
2996 				VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
2997 				for (i = desiredpages; i < bp->b_npages; i++) {
2998 					/*
2999 					 * the page is not freed here -- it
3000 					 * is the responsibility of
3001 					 * vnode_pager_setsize
3002 					 */
3003 					m = bp->b_pages[i];
3004 					KASSERT(m != bogus_page,
3005 					    ("allocbuf: bogus page found"));
3006 					while (vm_page_sleep_if_busy(m, TRUE,
3007 					    "biodep"))
3008 						continue;
3009 
3010 					bp->b_pages[i] = NULL;
3011 					vm_page_lock(m);
3012 					vm_page_unwire(m, 0);
3013 					vm_page_unlock(m);
3014 				}
3015 				VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
3016 				bp->b_npages = desiredpages;
3017 			}
3018 		} else if (size > bp->b_bcount) {
3019 			/*
3020 			 * We are growing the buffer, possibly in a
3021 			 * byte-granular fashion.
3022 			 */
3023 			vm_object_t obj;
3024 			vm_offset_t toff;
3025 			vm_offset_t tinc;
3026 
3027 			/*
3028 			 * Step 1, bring in the VM pages from the object,
3029 			 * allocating them if necessary.  We must clear
3030 			 * B_CACHE if these pages are not valid for the
3031 			 * range covered by the buffer.
3032 			 */
3033 
3034 			obj = bp->b_bufobj->bo_object;
3035 
3036 			VM_OBJECT_LOCK(obj);
3037 			while (bp->b_npages < desiredpages) {
3038 				vm_page_t m;
3039 
3040 				/*
3041 				 * We must allocate system pages since blocking
3042 				 * here could interfere with paging I/O, no
3043 				 * matter which process we are.
3044 				 *
3045 				 * We can only test VPO_BUSY here.  Blocking on
3046 				 * m->busy might lead to a deadlock:
3047 				 *  vm_fault->getpages->cluster_read->allocbuf
3048 				 * Thus, we specify VM_ALLOC_IGN_SBUSY.
3049 				 */
3050 				m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) +
3051 				    bp->b_npages, VM_ALLOC_NOBUSY |
3052 				    VM_ALLOC_SYSTEM | VM_ALLOC_WIRED |
3053 				    VM_ALLOC_RETRY | VM_ALLOC_IGN_SBUSY |
3054 				    VM_ALLOC_COUNT(desiredpages - bp->b_npages));
3055 				if (m->valid == 0)
3056 					bp->b_flags &= ~B_CACHE;
3057 				bp->b_pages[bp->b_npages] = m;
3058 				++bp->b_npages;
3059 			}
3060 
3061 			/*
3062 			 * Step 2.  We've loaded the pages into the buffer,
3063 			 * we have to figure out if we can still have B_CACHE
3064 			 * set.  Note that B_CACHE is set according to the
3065 			 * byte-granular range ( bcount and size ), new the
3066 			 * aligned range ( newbsize ).
3067 			 *
3068 			 * The VM test is against m->valid, which is DEV_BSIZE
3069 			 * aligned.  Needless to say, the validity of the data
3070 			 * needs to also be DEV_BSIZE aligned.  Note that this
3071 			 * fails with NFS if the server or some other client
3072 			 * extends the file's EOF.  If our buffer is resized,
3073 			 * B_CACHE may remain set! XXX
3074 			 */
3075 
3076 			toff = bp->b_bcount;
3077 			tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
3078 
3079 			while ((bp->b_flags & B_CACHE) && toff < size) {
3080 				vm_pindex_t pi;
3081 
3082 				if (tinc > (size - toff))
3083 					tinc = size - toff;
3084 
3085 				pi = ((bp->b_offset & PAGE_MASK) + toff) >>
3086 				    PAGE_SHIFT;
3087 
3088 				vfs_buf_test_cache(
3089 				    bp,
3090 				    bp->b_offset,
3091 				    toff,
3092 				    tinc,
3093 				    bp->b_pages[pi]
3094 				);
3095 				toff += tinc;
3096 				tinc = PAGE_SIZE;
3097 			}
3098 			VM_OBJECT_UNLOCK(obj);
3099 
3100 			/*
3101 			 * Step 3, fixup the KVM pmap.  Remember that
3102 			 * bp->b_data is relative to bp->b_offset, but
3103 			 * bp->b_offset may be offset into the first page.
3104 			 */
3105 
3106 			bp->b_data = (caddr_t)
3107 			    trunc_page((vm_offset_t)bp->b_data);
3108 			pmap_qenter(
3109 			    (vm_offset_t)bp->b_data,
3110 			    bp->b_pages,
3111 			    bp->b_npages
3112 			);
3113 
3114 			bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
3115 			    (vm_offset_t)(bp->b_offset & PAGE_MASK));
3116 		}
3117 	}
3118 	if (newbsize < bp->b_bufsize)
3119 		bufspacewakeup();
3120 	bp->b_bufsize = newbsize;	/* actual buffer allocation	*/
3121 	bp->b_bcount = size;		/* requested buffer size	*/
3122 	return 1;
3123 }
3124 
3125 void
3126 biodone(struct bio *bp)
3127 {
3128 	struct mtx *mtxp;
3129 	void (*done)(struct bio *);
3130 
3131 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3132 	mtx_lock(mtxp);
3133 	bp->bio_flags |= BIO_DONE;
3134 	done = bp->bio_done;
3135 	if (done == NULL)
3136 		wakeup(bp);
3137 	mtx_unlock(mtxp);
3138 	if (done != NULL)
3139 		done(bp);
3140 }
3141 
3142 /*
3143  * Wait for a BIO to finish.
3144  *
3145  * XXX: resort to a timeout for now.  The optimal locking (if any) for this
3146  * case is not yet clear.
3147  */
3148 int
3149 biowait(struct bio *bp, const char *wchan)
3150 {
3151 	struct mtx *mtxp;
3152 
3153 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3154 	mtx_lock(mtxp);
3155 	while ((bp->bio_flags & BIO_DONE) == 0)
3156 		msleep(bp, mtxp, PRIBIO, wchan, hz / 10);
3157 	mtx_unlock(mtxp);
3158 	if (bp->bio_error != 0)
3159 		return (bp->bio_error);
3160 	if (!(bp->bio_flags & BIO_ERROR))
3161 		return (0);
3162 	return (EIO);
3163 }
3164 
3165 void
3166 biofinish(struct bio *bp, struct devstat *stat, int error)
3167 {
3168 
3169 	if (error) {
3170 		bp->bio_error = error;
3171 		bp->bio_flags |= BIO_ERROR;
3172 	}
3173 	if (stat != NULL)
3174 		devstat_end_transaction_bio(stat, bp);
3175 	biodone(bp);
3176 }
3177 
3178 /*
3179  *	bufwait:
3180  *
3181  *	Wait for buffer I/O completion, returning error status.  The buffer
3182  *	is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
3183  *	error and cleared.
3184  */
3185 int
3186 bufwait(struct buf *bp)
3187 {
3188 	if (bp->b_iocmd == BIO_READ)
3189 		bwait(bp, PRIBIO, "biord");
3190 	else
3191 		bwait(bp, PRIBIO, "biowr");
3192 	if (bp->b_flags & B_EINTR) {
3193 		bp->b_flags &= ~B_EINTR;
3194 		return (EINTR);
3195 	}
3196 	if (bp->b_ioflags & BIO_ERROR) {
3197 		return (bp->b_error ? bp->b_error : EIO);
3198 	} else {
3199 		return (0);
3200 	}
3201 }
3202 
3203  /*
3204   * Call back function from struct bio back up to struct buf.
3205   */
3206 static void
3207 bufdonebio(struct bio *bip)
3208 {
3209 	struct buf *bp;
3210 
3211 	bp = bip->bio_caller2;
3212 	bp->b_resid = bp->b_bcount - bip->bio_completed;
3213 	bp->b_resid = bip->bio_resid;	/* XXX: remove */
3214 	bp->b_ioflags = bip->bio_flags;
3215 	bp->b_error = bip->bio_error;
3216 	if (bp->b_error)
3217 		bp->b_ioflags |= BIO_ERROR;
3218 	bufdone(bp);
3219 	g_destroy_bio(bip);
3220 }
3221 
3222 void
3223 dev_strategy(struct cdev *dev, struct buf *bp)
3224 {
3225 	struct cdevsw *csw;
3226 	struct bio *bip;
3227 	int ref;
3228 
3229 	if ((!bp->b_iocmd) || (bp->b_iocmd & (bp->b_iocmd - 1)))
3230 		panic("b_iocmd botch");
3231 	for (;;) {
3232 		bip = g_new_bio();
3233 		if (bip != NULL)
3234 			break;
3235 		/* Try again later */
3236 		tsleep(&bp, PRIBIO, "dev_strat", hz/10);
3237 	}
3238 	bip->bio_cmd = bp->b_iocmd;
3239 	bip->bio_offset = bp->b_iooffset;
3240 	bip->bio_length = bp->b_bcount;
3241 	bip->bio_bcount = bp->b_bcount;	/* XXX: remove */
3242 	bip->bio_data = bp->b_data;
3243 	bip->bio_done = bufdonebio;
3244 	bip->bio_caller2 = bp;
3245 	bip->bio_dev = dev;
3246 	KASSERT(dev->si_refcount > 0,
3247 	    ("dev_strategy on un-referenced struct cdev *(%s)",
3248 	    devtoname(dev)));
3249 	csw = dev_refthread(dev, &ref);
3250 	if (csw == NULL) {
3251 		g_destroy_bio(bip);
3252 		bp->b_error = ENXIO;
3253 		bp->b_ioflags = BIO_ERROR;
3254 		bufdone(bp);
3255 		return;
3256 	}
3257 	(*csw->d_strategy)(bip);
3258 	dev_relthread(dev, ref);
3259 }
3260 
3261 /*
3262  *	bufdone:
3263  *
3264  *	Finish I/O on a buffer, optionally calling a completion function.
3265  *	This is usually called from an interrupt so process blocking is
3266  *	not allowed.
3267  *
3268  *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
3269  *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
3270  *	assuming B_INVAL is clear.
3271  *
3272  *	For the VMIO case, we set B_CACHE if the op was a read and no
3273  *	read error occured, or if the op was a write.  B_CACHE is never
3274  *	set if the buffer is invalid or otherwise uncacheable.
3275  *
3276  *	biodone does not mess with B_INVAL, allowing the I/O routine or the
3277  *	initiator to leave B_INVAL set to brelse the buffer out of existance
3278  *	in the biodone routine.
3279  */
3280 void
3281 bufdone(struct buf *bp)
3282 {
3283 	struct bufobj *dropobj;
3284 	void    (*biodone)(struct buf *);
3285 
3286 	CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
3287 	dropobj = NULL;
3288 
3289 	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
3290 	BUF_ASSERT_HELD(bp);
3291 
3292 	runningbufwakeup(bp);
3293 	if (bp->b_iocmd == BIO_WRITE)
3294 		dropobj = bp->b_bufobj;
3295 	/* call optional completion function if requested */
3296 	if (bp->b_iodone != NULL) {
3297 		biodone = bp->b_iodone;
3298 		bp->b_iodone = NULL;
3299 		(*biodone) (bp);
3300 		if (dropobj)
3301 			bufobj_wdrop(dropobj);
3302 		return;
3303 	}
3304 
3305 	bufdone_finish(bp);
3306 
3307 	if (dropobj)
3308 		bufobj_wdrop(dropobj);
3309 }
3310 
3311 void
3312 bufdone_finish(struct buf *bp)
3313 {
3314 	BUF_ASSERT_HELD(bp);
3315 
3316 	if (!LIST_EMPTY(&bp->b_dep))
3317 		buf_complete(bp);
3318 
3319 	if (bp->b_flags & B_VMIO) {
3320 		vm_ooffset_t foff;
3321 		vm_page_t m;
3322 		vm_object_t obj;
3323 		struct vnode *vp;
3324 		int bogus, i, iosize;
3325 
3326 		obj = bp->b_bufobj->bo_object;
3327 		KASSERT(obj->paging_in_progress >= bp->b_npages,
3328 		    ("biodone_finish: paging in progress(%d) < b_npages(%d)",
3329 		    obj->paging_in_progress, bp->b_npages));
3330 
3331 		vp = bp->b_vp;
3332 		KASSERT(vp->v_holdcnt > 0,
3333 		    ("biodone_finish: vnode %p has zero hold count", vp));
3334 		KASSERT(vp->v_object != NULL,
3335 		    ("biodone_finish: vnode %p has no vm_object", vp));
3336 
3337 		foff = bp->b_offset;
3338 		KASSERT(bp->b_offset != NOOFFSET,
3339 		    ("biodone_finish: bp %p has no buffer offset", bp));
3340 
3341 		/*
3342 		 * Set B_CACHE if the op was a normal read and no error
3343 		 * occured.  B_CACHE is set for writes in the b*write()
3344 		 * routines.
3345 		 */
3346 		iosize = bp->b_bcount - bp->b_resid;
3347 		if (bp->b_iocmd == BIO_READ &&
3348 		    !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
3349 		    !(bp->b_ioflags & BIO_ERROR)) {
3350 			bp->b_flags |= B_CACHE;
3351 		}
3352 		bogus = 0;
3353 		VM_OBJECT_LOCK(obj);
3354 		for (i = 0; i < bp->b_npages; i++) {
3355 			int bogusflag = 0;
3356 			int resid;
3357 
3358 			resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
3359 			if (resid > iosize)
3360 				resid = iosize;
3361 
3362 			/*
3363 			 * cleanup bogus pages, restoring the originals
3364 			 */
3365 			m = bp->b_pages[i];
3366 			if (m == bogus_page) {
3367 				bogus = bogusflag = 1;
3368 				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
3369 				if (m == NULL)
3370 					panic("biodone: page disappeared!");
3371 				bp->b_pages[i] = m;
3372 			}
3373 			KASSERT(OFF_TO_IDX(foff) == m->pindex,
3374 			    ("biodone_finish: foff(%jd)/pindex(%ju) mismatch",
3375 			    (intmax_t)foff, (uintmax_t)m->pindex));
3376 
3377 			/*
3378 			 * In the write case, the valid and clean bits are
3379 			 * already changed correctly ( see bdwrite() ), so we
3380 			 * only need to do this here in the read case.
3381 			 */
3382 			if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) {
3383 				KASSERT((m->dirty & vm_page_bits(foff &
3384 				    PAGE_MASK, resid)) == 0, ("bufdone_finish:"
3385 				    " page %p has unexpected dirty bits", m));
3386 				vfs_page_set_valid(bp, foff, m);
3387 			}
3388 
3389 			vm_page_io_finish(m);
3390 			vm_object_pip_subtract(obj, 1);
3391 			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3392 			iosize -= resid;
3393 		}
3394 		vm_object_pip_wakeupn(obj, 0);
3395 		VM_OBJECT_UNLOCK(obj);
3396 		if (bogus)
3397 			pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3398 			    bp->b_pages, bp->b_npages);
3399 	}
3400 
3401 	/*
3402 	 * For asynchronous completions, release the buffer now. The brelse
3403 	 * will do a wakeup there if necessary - so no need to do a wakeup
3404 	 * here in the async case. The sync case always needs to do a wakeup.
3405 	 */
3406 
3407 	if (bp->b_flags & B_ASYNC) {
3408 		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR))
3409 			brelse(bp);
3410 		else
3411 			bqrelse(bp);
3412 	} else
3413 		bdone(bp);
3414 }
3415 
3416 /*
3417  * This routine is called in lieu of iodone in the case of
3418  * incomplete I/O.  This keeps the busy status for pages
3419  * consistant.
3420  */
3421 void
3422 vfs_unbusy_pages(struct buf *bp)
3423 {
3424 	int i;
3425 	vm_object_t obj;
3426 	vm_page_t m;
3427 
3428 	runningbufwakeup(bp);
3429 	if (!(bp->b_flags & B_VMIO))
3430 		return;
3431 
3432 	obj = bp->b_bufobj->bo_object;
3433 	VM_OBJECT_LOCK(obj);
3434 	for (i = 0; i < bp->b_npages; i++) {
3435 		m = bp->b_pages[i];
3436 		if (m == bogus_page) {
3437 			m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
3438 			if (!m)
3439 				panic("vfs_unbusy_pages: page missing\n");
3440 			bp->b_pages[i] = m;
3441 			pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3442 			    bp->b_pages, bp->b_npages);
3443 		}
3444 		vm_object_pip_subtract(obj, 1);
3445 		vm_page_io_finish(m);
3446 	}
3447 	vm_object_pip_wakeupn(obj, 0);
3448 	VM_OBJECT_UNLOCK(obj);
3449 }
3450 
3451 /*
3452  * vfs_page_set_valid:
3453  *
3454  *	Set the valid bits in a page based on the supplied offset.   The
3455  *	range is restricted to the buffer's size.
3456  *
3457  *	This routine is typically called after a read completes.
3458  */
3459 static void
3460 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
3461 {
3462 	vm_ooffset_t eoff;
3463 
3464 	/*
3465 	 * Compute the end offset, eoff, such that [off, eoff) does not span a
3466 	 * page boundary and eoff is not greater than the end of the buffer.
3467 	 * The end of the buffer, in this case, is our file EOF, not the
3468 	 * allocation size of the buffer.
3469 	 */
3470 	eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
3471 	if (eoff > bp->b_offset + bp->b_bcount)
3472 		eoff = bp->b_offset + bp->b_bcount;
3473 
3474 	/*
3475 	 * Set valid range.  This is typically the entire buffer and thus the
3476 	 * entire page.
3477 	 */
3478 	if (eoff > off)
3479 		vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off);
3480 }
3481 
3482 /*
3483  * vfs_page_set_validclean:
3484  *
3485  *	Set the valid bits and clear the dirty bits in a page based on the
3486  *	supplied offset.   The range is restricted to the buffer's size.
3487  */
3488 static void
3489 vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
3490 {
3491 	vm_ooffset_t soff, eoff;
3492 
3493 	/*
3494 	 * Start and end offsets in buffer.  eoff - soff may not cross a
3495 	 * page boundry or cross the end of the buffer.  The end of the
3496 	 * buffer, in this case, is our file EOF, not the allocation size
3497 	 * of the buffer.
3498 	 */
3499 	soff = off;
3500 	eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3501 	if (eoff > bp->b_offset + bp->b_bcount)
3502 		eoff = bp->b_offset + bp->b_bcount;
3503 
3504 	/*
3505 	 * Set valid range.  This is typically the entire buffer and thus the
3506 	 * entire page.
3507 	 */
3508 	if (eoff > soff) {
3509 		vm_page_set_validclean(
3510 		    m,
3511 		   (vm_offset_t) (soff & PAGE_MASK),
3512 		   (vm_offset_t) (eoff - soff)
3513 		);
3514 	}
3515 }
3516 
3517 /*
3518  * Ensure that all buffer pages are not busied by VPO_BUSY flag. If
3519  * any page is busy, drain the flag.
3520  */
3521 static void
3522 vfs_drain_busy_pages(struct buf *bp)
3523 {
3524 	vm_page_t m;
3525 	int i, last_busied;
3526 
3527 	VM_OBJECT_LOCK_ASSERT(bp->b_bufobj->bo_object, MA_OWNED);
3528 	last_busied = 0;
3529 	for (i = 0; i < bp->b_npages; i++) {
3530 		m = bp->b_pages[i];
3531 		if ((m->oflags & VPO_BUSY) != 0) {
3532 			for (; last_busied < i; last_busied++)
3533 				vm_page_busy(bp->b_pages[last_busied]);
3534 			while ((m->oflags & VPO_BUSY) != 0)
3535 				vm_page_sleep(m, "vbpage");
3536 		}
3537 	}
3538 	for (i = 0; i < last_busied; i++)
3539 		vm_page_wakeup(bp->b_pages[i]);
3540 }
3541 
3542 /*
3543  * This routine is called before a device strategy routine.
3544  * It is used to tell the VM system that paging I/O is in
3545  * progress, and treat the pages associated with the buffer
3546  * almost as being VPO_BUSY.  Also the object paging_in_progress
3547  * flag is handled to make sure that the object doesn't become
3548  * inconsistant.
3549  *
3550  * Since I/O has not been initiated yet, certain buffer flags
3551  * such as BIO_ERROR or B_INVAL may be in an inconsistant state
3552  * and should be ignored.
3553  */
3554 void
3555 vfs_busy_pages(struct buf *bp, int clear_modify)
3556 {
3557 	int i, bogus;
3558 	vm_object_t obj;
3559 	vm_ooffset_t foff;
3560 	vm_page_t m;
3561 
3562 	if (!(bp->b_flags & B_VMIO))
3563 		return;
3564 
3565 	obj = bp->b_bufobj->bo_object;
3566 	foff = bp->b_offset;
3567 	KASSERT(bp->b_offset != NOOFFSET,
3568 	    ("vfs_busy_pages: no buffer offset"));
3569 	VM_OBJECT_LOCK(obj);
3570 	vfs_drain_busy_pages(bp);
3571 	if (bp->b_bufsize != 0)
3572 		vfs_setdirty_locked_object(bp);
3573 	bogus = 0;
3574 	for (i = 0; i < bp->b_npages; i++) {
3575 		m = bp->b_pages[i];
3576 
3577 		if ((bp->b_flags & B_CLUSTER) == 0) {
3578 			vm_object_pip_add(obj, 1);
3579 			vm_page_io_start(m);
3580 		}
3581 		/*
3582 		 * When readying a buffer for a read ( i.e
3583 		 * clear_modify == 0 ), it is important to do
3584 		 * bogus_page replacement for valid pages in
3585 		 * partially instantiated buffers.  Partially
3586 		 * instantiated buffers can, in turn, occur when
3587 		 * reconstituting a buffer from its VM backing store
3588 		 * base.  We only have to do this if B_CACHE is
3589 		 * clear ( which causes the I/O to occur in the
3590 		 * first place ).  The replacement prevents the read
3591 		 * I/O from overwriting potentially dirty VM-backed
3592 		 * pages.  XXX bogus page replacement is, uh, bogus.
3593 		 * It may not work properly with small-block devices.
3594 		 * We need to find a better way.
3595 		 */
3596 		if (clear_modify) {
3597 			pmap_remove_write(m);
3598 			vfs_page_set_validclean(bp, foff, m);
3599 		} else if (m->valid == VM_PAGE_BITS_ALL &&
3600 		    (bp->b_flags & B_CACHE) == 0) {
3601 			bp->b_pages[i] = bogus_page;
3602 			bogus++;
3603 		}
3604 		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3605 	}
3606 	VM_OBJECT_UNLOCK(obj);
3607 	if (bogus)
3608 		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3609 		    bp->b_pages, bp->b_npages);
3610 }
3611 
3612 /*
3613  *	vfs_bio_set_valid:
3614  *
3615  *	Set the range within the buffer to valid.  The range is
3616  *	relative to the beginning of the buffer, b_offset.  Note that
3617  *	b_offset itself may be offset from the beginning of the first
3618  *	page.
3619  */
3620 void
3621 vfs_bio_set_valid(struct buf *bp, int base, int size)
3622 {
3623 	int i, n;
3624 	vm_page_t m;
3625 
3626 	if (!(bp->b_flags & B_VMIO))
3627 		return;
3628 
3629 	/*
3630 	 * Fixup base to be relative to beginning of first page.
3631 	 * Set initial n to be the maximum number of bytes in the
3632 	 * first page that can be validated.
3633 	 */
3634 	base += (bp->b_offset & PAGE_MASK);
3635 	n = PAGE_SIZE - (base & PAGE_MASK);
3636 
3637 	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
3638 	for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
3639 		m = bp->b_pages[i];
3640 		if (n > size)
3641 			n = size;
3642 		vm_page_set_valid_range(m, base & PAGE_MASK, n);
3643 		base += n;
3644 		size -= n;
3645 		n = PAGE_SIZE;
3646 	}
3647 	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
3648 }
3649 
3650 /*
3651  *	vfs_bio_clrbuf:
3652  *
3653  *	If the specified buffer is a non-VMIO buffer, clear the entire
3654  *	buffer.  If the specified buffer is a VMIO buffer, clear and
3655  *	validate only the previously invalid portions of the buffer.
3656  *	This routine essentially fakes an I/O, so we need to clear
3657  *	BIO_ERROR and B_INVAL.
3658  *
3659  *	Note that while we only theoretically need to clear through b_bcount,
3660  *	we go ahead and clear through b_bufsize.
3661  */
3662 void
3663 vfs_bio_clrbuf(struct buf *bp)
3664 {
3665 	int i, j, mask;
3666 	caddr_t sa, ea;
3667 
3668 	if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
3669 		clrbuf(bp);
3670 		return;
3671 	}
3672 	bp->b_flags &= ~B_INVAL;
3673 	bp->b_ioflags &= ~BIO_ERROR;
3674 	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
3675 	if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
3676 	    (bp->b_offset & PAGE_MASK) == 0) {
3677 		if (bp->b_pages[0] == bogus_page)
3678 			goto unlock;
3679 		mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
3680 		VM_OBJECT_LOCK_ASSERT(bp->b_pages[0]->object, MA_OWNED);
3681 		if ((bp->b_pages[0]->valid & mask) == mask)
3682 			goto unlock;
3683 		if ((bp->b_pages[0]->valid & mask) == 0) {
3684 			bzero(bp->b_data, bp->b_bufsize);
3685 			bp->b_pages[0]->valid |= mask;
3686 			goto unlock;
3687 		}
3688 	}
3689 	ea = sa = bp->b_data;
3690 	for(i = 0; i < bp->b_npages; i++, sa = ea) {
3691 		ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
3692 		ea = (caddr_t)(vm_offset_t)ulmin(
3693 		    (u_long)(vm_offset_t)ea,
3694 		    (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize);
3695 		if (bp->b_pages[i] == bogus_page)
3696 			continue;
3697 		j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
3698 		mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
3699 		VM_OBJECT_LOCK_ASSERT(bp->b_pages[i]->object, MA_OWNED);
3700 		if ((bp->b_pages[i]->valid & mask) == mask)
3701 			continue;
3702 		if ((bp->b_pages[i]->valid & mask) == 0)
3703 			bzero(sa, ea - sa);
3704 		else {
3705 			for (; sa < ea; sa += DEV_BSIZE, j++) {
3706 				if ((bp->b_pages[i]->valid & (1 << j)) == 0)
3707 					bzero(sa, DEV_BSIZE);
3708 			}
3709 		}
3710 		bp->b_pages[i]->valid |= mask;
3711 	}
3712 unlock:
3713 	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
3714 	bp->b_resid = 0;
3715 }
3716 
3717 /*
3718  * vm_hold_load_pages and vm_hold_free_pages get pages into
3719  * a buffers address space.  The pages are anonymous and are
3720  * not associated with a file object.
3721  */
3722 static void
3723 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
3724 {
3725 	vm_offset_t pg;
3726 	vm_page_t p;
3727 	int index;
3728 
3729 	to = round_page(to);
3730 	from = round_page(from);
3731 	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
3732 
3733 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
3734 tryagain:
3735 		/*
3736 		 * note: must allocate system pages since blocking here
3737 		 * could interfere with paging I/O, no matter which
3738 		 * process we are.
3739 		 */
3740 		p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
3741 		    VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT));
3742 		if (p == NULL) {
3743 			VM_WAIT;
3744 			goto tryagain;
3745 		}
3746 		pmap_qenter(pg, &p, 1);
3747 		bp->b_pages[index] = p;
3748 	}
3749 	bp->b_npages = index;
3750 }
3751 
3752 /* Return pages associated with this buf to the vm system */
3753 static void
3754 vm_hold_free_pages(struct buf *bp, int newbsize)
3755 {
3756 	vm_offset_t from;
3757 	vm_page_t p;
3758 	int index, newnpages;
3759 
3760 	from = round_page((vm_offset_t)bp->b_data + newbsize);
3761 	newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
3762 	if (bp->b_npages > newnpages)
3763 		pmap_qremove(from, bp->b_npages - newnpages);
3764 	for (index = newnpages; index < bp->b_npages; index++) {
3765 		p = bp->b_pages[index];
3766 		bp->b_pages[index] = NULL;
3767 		if (p->busy != 0)
3768 			printf("vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
3769 			    (intmax_t)bp->b_blkno, (intmax_t)bp->b_lblkno);
3770 		p->wire_count--;
3771 		vm_page_free(p);
3772 		atomic_subtract_int(&cnt.v_wire_count, 1);
3773 	}
3774 	bp->b_npages = newnpages;
3775 }
3776 
3777 /*
3778  * Map an IO request into kernel virtual address space.
3779  *
3780  * All requests are (re)mapped into kernel VA space.
3781  * Notice that we use b_bufsize for the size of the buffer
3782  * to be mapped.  b_bcount might be modified by the driver.
3783  *
3784  * Note that even if the caller determines that the address space should
3785  * be valid, a race or a smaller-file mapped into a larger space may
3786  * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
3787  * check the return value.
3788  */
3789 int
3790 vmapbuf(struct buf *bp)
3791 {
3792 	caddr_t kva;
3793 	vm_prot_t prot;
3794 	int pidx;
3795 
3796 	if (bp->b_bufsize < 0)
3797 		return (-1);
3798 	prot = VM_PROT_READ;
3799 	if (bp->b_iocmd == BIO_READ)
3800 		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
3801 	if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
3802 	    (vm_offset_t)bp->b_data, bp->b_bufsize, prot, bp->b_pages,
3803 	    btoc(MAXPHYS))) < 0)
3804 		return (-1);
3805 	pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
3806 
3807 	kva = bp->b_saveaddr;
3808 	bp->b_npages = pidx;
3809 	bp->b_saveaddr = bp->b_data;
3810 	bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
3811 	return(0);
3812 }
3813 
3814 /*
3815  * Free the io map PTEs associated with this IO operation.
3816  * We also invalidate the TLB entries and restore the original b_addr.
3817  */
3818 void
3819 vunmapbuf(struct buf *bp)
3820 {
3821 	int npages;
3822 
3823 	npages = bp->b_npages;
3824 	pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
3825 	vm_page_unhold_pages(bp->b_pages, npages);
3826 
3827 	bp->b_data = bp->b_saveaddr;
3828 }
3829 
3830 void
3831 bdone(struct buf *bp)
3832 {
3833 	struct mtx *mtxp;
3834 
3835 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3836 	mtx_lock(mtxp);
3837 	bp->b_flags |= B_DONE;
3838 	wakeup(bp);
3839 	mtx_unlock(mtxp);
3840 }
3841 
3842 void
3843 bwait(struct buf *bp, u_char pri, const char *wchan)
3844 {
3845 	struct mtx *mtxp;
3846 
3847 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3848 	mtx_lock(mtxp);
3849 	while ((bp->b_flags & B_DONE) == 0)
3850 		msleep(bp, mtxp, pri, wchan, 0);
3851 	mtx_unlock(mtxp);
3852 }
3853 
3854 int
3855 bufsync(struct bufobj *bo, int waitfor)
3856 {
3857 
3858 	return (VOP_FSYNC(bo->__bo_vnode, waitfor, curthread));
3859 }
3860 
3861 void
3862 bufstrategy(struct bufobj *bo, struct buf *bp)
3863 {
3864 	int i = 0;
3865 	struct vnode *vp;
3866 
3867 	vp = bp->b_vp;
3868 	KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
3869 	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
3870 	    ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
3871 	i = VOP_STRATEGY(vp, bp);
3872 	KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
3873 }
3874 
3875 void
3876 bufobj_wrefl(struct bufobj *bo)
3877 {
3878 
3879 	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
3880 	ASSERT_BO_LOCKED(bo);
3881 	bo->bo_numoutput++;
3882 }
3883 
3884 void
3885 bufobj_wref(struct bufobj *bo)
3886 {
3887 
3888 	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
3889 	BO_LOCK(bo);
3890 	bo->bo_numoutput++;
3891 	BO_UNLOCK(bo);
3892 }
3893 
3894 void
3895 bufobj_wdrop(struct bufobj *bo)
3896 {
3897 
3898 	KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
3899 	BO_LOCK(bo);
3900 	KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
3901 	if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
3902 		bo->bo_flag &= ~BO_WWAIT;
3903 		wakeup(&bo->bo_numoutput);
3904 	}
3905 	BO_UNLOCK(bo);
3906 }
3907 
3908 int
3909 bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
3910 {
3911 	int error;
3912 
3913 	KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
3914 	ASSERT_BO_LOCKED(bo);
3915 	error = 0;
3916 	while (bo->bo_numoutput) {
3917 		bo->bo_flag |= BO_WWAIT;
3918 		error = msleep(&bo->bo_numoutput, BO_MTX(bo),
3919 		    slpflag | (PRIBIO + 1), "bo_wwait", timeo);
3920 		if (error)
3921 			break;
3922 	}
3923 	return (error);
3924 }
3925 
3926 void
3927 bpin(struct buf *bp)
3928 {
3929 	struct mtx *mtxp;
3930 
3931 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3932 	mtx_lock(mtxp);
3933 	bp->b_pin_count++;
3934 	mtx_unlock(mtxp);
3935 }
3936 
3937 void
3938 bunpin(struct buf *bp)
3939 {
3940 	struct mtx *mtxp;
3941 
3942 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3943 	mtx_lock(mtxp);
3944 	if (--bp->b_pin_count == 0)
3945 		wakeup(bp);
3946 	mtx_unlock(mtxp);
3947 }
3948 
3949 void
3950 bunpin_wait(struct buf *bp)
3951 {
3952 	struct mtx *mtxp;
3953 
3954 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3955 	mtx_lock(mtxp);
3956 	while (bp->b_pin_count > 0)
3957 		msleep(bp, mtxp, PRIBIO, "bwunpin", 0);
3958 	mtx_unlock(mtxp);
3959 }
3960 
3961 #include "opt_ddb.h"
3962 #ifdef DDB
3963 #include <ddb/ddb.h>
3964 
3965 /* DDB command to show buffer data */
3966 DB_SHOW_COMMAND(buffer, db_show_buffer)
3967 {
3968 	/* get args */
3969 	struct buf *bp = (struct buf *)addr;
3970 
3971 	if (!have_addr) {
3972 		db_printf("usage: show buffer <addr>\n");
3973 		return;
3974 	}
3975 
3976 	db_printf("buf at %p\n", bp);
3977 	db_printf("b_flags = 0x%b, b_xflags=0x%b, b_vflags=0x%b\n",
3978 	    (u_int)bp->b_flags, PRINT_BUF_FLAGS, (u_int)bp->b_xflags,
3979 	    PRINT_BUF_XFLAGS, (u_int)bp->b_vflags, PRINT_BUF_VFLAGS);
3980 	db_printf(
3981 	    "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
3982 	    "b_bufobj = (%p), b_data = %p, b_blkno = %jd, b_lblkno = %jd, "
3983 	    "b_dep = %p\n",
3984 	    bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
3985 	    bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
3986 	    (intmax_t)bp->b_lblkno, bp->b_dep.lh_first);
3987 	if (bp->b_npages) {
3988 		int i;
3989 		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
3990 		for (i = 0; i < bp->b_npages; i++) {
3991 			vm_page_t m;
3992 			m = bp->b_pages[i];
3993 			db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
3994 			    (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
3995 			if ((i + 1) < bp->b_npages)
3996 				db_printf(",");
3997 		}
3998 		db_printf("\n");
3999 	}
4000 	db_printf(" ");
4001 	BUF_LOCKPRINTINFO(bp);
4002 }
4003 
4004 DB_SHOW_COMMAND(lockedbufs, lockedbufs)
4005 {
4006 	struct buf *bp;
4007 	int i;
4008 
4009 	for (i = 0; i < nbuf; i++) {
4010 		bp = &buf[i];
4011 		if (BUF_ISLOCKED(bp)) {
4012 			db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4013 			db_printf("\n");
4014 		}
4015 	}
4016 }
4017 
4018 DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
4019 {
4020 	struct vnode *vp;
4021 	struct buf *bp;
4022 
4023 	if (!have_addr) {
4024 		db_printf("usage: show vnodebufs <addr>\n");
4025 		return;
4026 	}
4027 	vp = (struct vnode *)addr;
4028 	db_printf("Clean buffers:\n");
4029 	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
4030 		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4031 		db_printf("\n");
4032 	}
4033 	db_printf("Dirty buffers:\n");
4034 	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
4035 		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4036 		db_printf("\n");
4037 	}
4038 }
4039 
4040 DB_COMMAND(countfreebufs, db_coundfreebufs)
4041 {
4042 	struct buf *bp;
4043 	int i, used = 0, nfree = 0;
4044 
4045 	if (have_addr) {
4046 		db_printf("usage: countfreebufs\n");
4047 		return;
4048 	}
4049 
4050 	for (i = 0; i < nbuf; i++) {
4051 		bp = &buf[i];
4052 		if ((bp->b_vflags & BV_INFREECNT) != 0)
4053 			nfree++;
4054 		else
4055 			used++;
4056 	}
4057 
4058 	db_printf("Counted %d free, %d used (%d tot)\n", nfree, used,
4059 	    nfree + used);
4060 	db_printf("numfreebuffers is %d\n", numfreebuffers);
4061 }
4062 #endif /* DDB */
4063