xref: /freebsd/sys/kern/vfs_bio.c (revision 99429157e8615dc3b7f11afbe3ed92de7476a5db)
1 /*-
2  * Copyright (c) 2004 Poul-Henning Kamp
3  * Copyright (c) 1994,1997 John S. Dyson
4  * Copyright (c) 2013 The FreeBSD Foundation
5  * All rights reserved.
6  *
7  * Portions of this software were developed by Konstantin Belousov
8  * under sponsorship from the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 /*
33  * this file contains a new buffer I/O scheme implementing a coherent
34  * VM object and buffer cache scheme.  Pains have been taken to make
35  * sure that the performance degradation associated with schemes such
36  * as this is not realized.
37  *
38  * Author:  John S. Dyson
39  * Significant help during the development and debugging phases
40  * had been provided by David Greenman, also of the FreeBSD core team.
41  *
42  * see man buf(9) for more info.
43  */
44 
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/bio.h>
51 #include <sys/conf.h>
52 #include <sys/buf.h>
53 #include <sys/devicestat.h>
54 #include <sys/eventhandler.h>
55 #include <sys/fail.h>
56 #include <sys/limits.h>
57 #include <sys/lock.h>
58 #include <sys/malloc.h>
59 #include <sys/mount.h>
60 #include <sys/mutex.h>
61 #include <sys/kernel.h>
62 #include <sys/kthread.h>
63 #include <sys/proc.h>
64 #include <sys/racct.h>
65 #include <sys/resourcevar.h>
66 #include <sys/rwlock.h>
67 #include <sys/smp.h>
68 #include <sys/sysctl.h>
69 #include <sys/sysproto.h>
70 #include <sys/vmem.h>
71 #include <sys/vmmeter.h>
72 #include <sys/vnode.h>
73 #include <sys/watchdog.h>
74 #include <geom/geom.h>
75 #include <vm/vm.h>
76 #include <vm/vm_param.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_page.h>
80 #include <vm/vm_pageout.h>
81 #include <vm/vm_pager.h>
82 #include <vm/vm_extern.h>
83 #include <vm/vm_map.h>
84 #include <vm/swap_pager.h>
85 #include "opt_compat.h"
86 #include "opt_swap.h"
87 
88 static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
89 
90 struct	bio_ops bioops;		/* I/O operation notification */
91 
92 struct	buf_ops buf_ops_bio = {
93 	.bop_name	=	"buf_ops_bio",
94 	.bop_write	=	bufwrite,
95 	.bop_strategy	=	bufstrategy,
96 	.bop_sync	=	bufsync,
97 	.bop_bdflush	=	bufbdflush,
98 };
99 
100 static struct buf *buf;		/* buffer header pool */
101 extern struct buf *swbuf;	/* Swap buffer header pool. */
102 caddr_t unmapped_buf;
103 
104 /* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */
105 struct proc *bufdaemonproc;
106 struct proc *bufspacedaemonproc;
107 
108 static int inmem(struct vnode *vp, daddr_t blkno);
109 static void vm_hold_free_pages(struct buf *bp, int newbsize);
110 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
111 		vm_offset_t to);
112 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
113 static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
114 		vm_page_t m);
115 static void vfs_clean_pages_dirty_buf(struct buf *bp);
116 static void vfs_setdirty_locked_object(struct buf *bp);
117 static void vfs_vmio_invalidate(struct buf *bp);
118 static void vfs_vmio_truncate(struct buf *bp, int npages);
119 static void vfs_vmio_extend(struct buf *bp, int npages, int size);
120 static int vfs_bio_clcheck(struct vnode *vp, int size,
121 		daddr_t lblkno, daddr_t blkno);
122 static int buf_flush(struct vnode *vp, int);
123 static int buf_recycle(bool);
124 static int buf_scan(bool);
125 static int flushbufqueues(struct vnode *, int, int);
126 static void buf_daemon(void);
127 static void bremfreel(struct buf *bp);
128 static __inline void bd_wakeup(void);
129 static int sysctl_runningspace(SYSCTL_HANDLER_ARGS);
130 static void bufkva_reclaim(vmem_t *, int);
131 static void bufkva_free(struct buf *);
132 static int buf_import(void *, void **, int, int);
133 static void buf_release(void *, void **, int);
134 
135 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
136     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
137 static int sysctl_bufspace(SYSCTL_HANDLER_ARGS);
138 #endif
139 
140 int vmiodirenable = TRUE;
141 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
142     "Use the VM system for directory writes");
143 long runningbufspace;
144 SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
145     "Amount of presently outstanding async buffer io");
146 static long bufspace;
147 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
148     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
149 SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD,
150     &bufspace, 0, sysctl_bufspace, "L", "Virtual memory used for buffers");
151 #else
152 SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
153     "Physical memory used for buffers");
154 #endif
155 static long bufkvaspace;
156 SYSCTL_LONG(_vfs, OID_AUTO, bufkvaspace, CTLFLAG_RD, &bufkvaspace, 0,
157     "Kernel virtual memory used for buffers");
158 static long maxbufspace;
159 SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, &maxbufspace, 0,
160     "Maximum allowed value of bufspace (including metadata)");
161 static long bufmallocspace;
162 SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
163     "Amount of malloced memory for buffers");
164 static long maxbufmallocspace;
165 SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace,
166     0, "Maximum amount of malloced memory for buffers");
167 static long lobufspace;
168 SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RW, &lobufspace, 0,
169     "Minimum amount of buffers we want to have");
170 long hibufspace;
171 SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RW, &hibufspace, 0,
172     "Maximum allowed value of bufspace (excluding metadata)");
173 long bufspacethresh;
174 SYSCTL_LONG(_vfs, OID_AUTO, bufspacethresh, CTLFLAG_RW, &bufspacethresh,
175     0, "Bufspace consumed before waking the daemon to free some");
176 static int buffreekvacnt;
177 SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0,
178     "Number of times we have freed the KVA space from some buffer");
179 static int bufdefragcnt;
180 SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0,
181     "Number of times we have had to repeat buffer allocation to defragment");
182 static long lorunningspace;
183 SYSCTL_PROC(_vfs, OID_AUTO, lorunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
184     CTLFLAG_RW, &lorunningspace, 0, sysctl_runningspace, "L",
185     "Minimum preferred space used for in-progress I/O");
186 static long hirunningspace;
187 SYSCTL_PROC(_vfs, OID_AUTO, hirunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
188     CTLFLAG_RW, &hirunningspace, 0, sysctl_runningspace, "L",
189     "Maximum amount of space to use for in-progress I/O");
190 int dirtybufferflushes;
191 SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
192     0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
193 int bdwriteskip;
194 SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
195     0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
196 int altbufferflushes;
197 SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes,
198     0, "Number of fsync flushes to limit dirty buffers");
199 static int recursiveflushes;
200 SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes,
201     0, "Number of flushes skipped due to being recursive");
202 static int numdirtybuffers;
203 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
204     "Number of buffers that are dirty (has unwritten changes) at the moment");
205 static int lodirtybuffers;
206 SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
207     "How many buffers we want to have free before bufdaemon can sleep");
208 static int hidirtybuffers;
209 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
210     "When the number of dirty buffers is considered severe");
211 int dirtybufthresh;
212 SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh,
213     0, "Number of bdwrite to bawrite conversions to clear dirty buffers");
214 static int numfreebuffers;
215 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
216     "Number of free buffers");
217 static int lofreebuffers;
218 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
219    "Target number of free buffers");
220 static int hifreebuffers;
221 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
222    "Threshold for clean buffer recycling");
223 static int getnewbufcalls;
224 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0,
225    "Number of calls to getnewbuf");
226 static int getnewbufrestarts;
227 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0,
228     "Number of times getnewbuf has had to restart a buffer acquisition");
229 static int mappingrestarts;
230 SYSCTL_INT(_vfs, OID_AUTO, mappingrestarts, CTLFLAG_RW, &mappingrestarts, 0,
231     "Number of times getblk has had to restart a buffer mapping for "
232     "unmapped buffer");
233 static int numbufallocfails;
234 SYSCTL_INT(_vfs, OID_AUTO, numbufallocfails, CTLFLAG_RW, &numbufallocfails, 0,
235     "Number of times buffer allocations failed");
236 static int flushbufqtarget = 100;
237 SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0,
238     "Amount of work to do in flushbufqueues when helping bufdaemon");
239 static long notbufdflushes;
240 SYSCTL_LONG(_vfs, OID_AUTO, notbufdflushes, CTLFLAG_RD, &notbufdflushes, 0,
241     "Number of dirty buffer flushes done by the bufdaemon helpers");
242 static long barrierwrites;
243 SYSCTL_LONG(_vfs, OID_AUTO, barrierwrites, CTLFLAG_RW, &barrierwrites, 0,
244     "Number of barrier writes");
245 SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD,
246     &unmapped_buf_allowed, 0,
247     "Permit the use of the unmapped i/o");
248 
249 /*
250  * This lock synchronizes access to bd_request.
251  */
252 static struct mtx_padalign bdlock;
253 
254 /*
255  * This lock protects the runningbufreq and synchronizes runningbufwakeup and
256  * waitrunningbufspace().
257  */
258 static struct mtx_padalign rbreqlock;
259 
260 /*
261  * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
262  */
263 static struct rwlock_padalign nblock;
264 
265 /*
266  * Lock that protects bdirtywait.
267  */
268 static struct mtx_padalign bdirtylock;
269 
270 /*
271  * Wakeup point for bufdaemon, as well as indicator of whether it is already
272  * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
273  * is idling.
274  */
275 static int bd_request;
276 
277 /*
278  * Request/wakeup point for the bufspace daemon.
279  */
280 static int bufspace_request;
281 
282 /*
283  * Request for the buf daemon to write more buffers than is indicated by
284  * lodirtybuf.  This may be necessary to push out excess dependencies or
285  * defragment the address space where a simple count of the number of dirty
286  * buffers is insufficient to characterize the demand for flushing them.
287  */
288 static int bd_speedupreq;
289 
290 /*
291  * Synchronization (sleep/wakeup) variable for active buffer space requests.
292  * Set when wait starts, cleared prior to wakeup().
293  * Used in runningbufwakeup() and waitrunningbufspace().
294  */
295 static int runningbufreq;
296 
297 /*
298  * Synchronization (sleep/wakeup) variable for buffer requests.
299  * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
300  * by and/or.
301  * Used in numdirtywakeup(), bufspace_wakeup(), bwillwrite(),
302  * getnewbuf(), and getblk().
303  */
304 static volatile int needsbuffer;
305 
306 /*
307  * Synchronization for bwillwrite() waiters.
308  */
309 static int bdirtywait;
310 
311 /*
312  * Definitions for the buffer free lists.
313  */
314 #define QUEUE_NONE	0	/* on no queue */
315 #define QUEUE_EMPTY	1	/* empty buffer headers */
316 #define QUEUE_DIRTY	2	/* B_DELWRI buffers */
317 #define QUEUE_CLEAN	3	/* non-B_DELWRI buffers */
318 #define QUEUE_SENTINEL	1024	/* not an queue index, but mark for sentinel */
319 
320 /* Maximum number of clean buffer queues. */
321 #define	CLEAN_QUEUES	16
322 
323 /* Configured number of clean queues. */
324 static int clean_queues;
325 
326 /* Maximum number of buffer queues. */
327 #define BUFFER_QUEUES	(QUEUE_CLEAN + CLEAN_QUEUES)
328 
329 /* Queues for free buffers with various properties */
330 static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } };
331 #ifdef INVARIANTS
332 static int bq_len[BUFFER_QUEUES];
333 #endif
334 
335 /*
336  * Lock for each bufqueue
337  */
338 static struct mtx_padalign bqlocks[BUFFER_QUEUES];
339 
340 /*
341  * per-cpu empty buffer cache.
342  */
343 uma_zone_t buf_zone;
344 
345 /*
346  * Single global constant for BUF_WMESG, to avoid getting multiple references.
347  * buf_wmesg is referred from macros.
348  */
349 const char *buf_wmesg = BUF_WMESG;
350 
351 static int
352 sysctl_runningspace(SYSCTL_HANDLER_ARGS)
353 {
354 	long value;
355 	int error;
356 
357 	value = *(long *)arg1;
358 	error = sysctl_handle_long(oidp, &value, 0, req);
359 	if (error != 0 || req->newptr == NULL)
360 		return (error);
361 	mtx_lock(&rbreqlock);
362 	if (arg1 == &hirunningspace) {
363 		if (value < lorunningspace)
364 			error = EINVAL;
365 		else
366 			hirunningspace = value;
367 	} else {
368 		KASSERT(arg1 == &lorunningspace,
369 		    ("%s: unknown arg1", __func__));
370 		if (value > hirunningspace)
371 			error = EINVAL;
372 		else
373 			lorunningspace = value;
374 	}
375 	mtx_unlock(&rbreqlock);
376 	return (error);
377 }
378 
379 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
380     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
381 static int
382 sysctl_bufspace(SYSCTL_HANDLER_ARGS)
383 {
384 	long lvalue;
385 	int ivalue;
386 
387 	if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long))
388 		return (sysctl_handle_long(oidp, arg1, arg2, req));
389 	lvalue = *(long *)arg1;
390 	if (lvalue > INT_MAX)
391 		/* On overflow, still write out a long to trigger ENOMEM. */
392 		return (sysctl_handle_long(oidp, &lvalue, 0, req));
393 	ivalue = lvalue;
394 	return (sysctl_handle_int(oidp, &ivalue, 0, req));
395 }
396 #endif
397 
398 static int
399 bqcleanq(void)
400 {
401 	static int nextq;
402 
403 	return ((atomic_fetchadd_int(&nextq, 1) % clean_queues) + QUEUE_CLEAN);
404 }
405 
406 static int
407 bqisclean(int qindex)
408 {
409 
410 	return (qindex >= QUEUE_CLEAN && qindex < QUEUE_CLEAN + CLEAN_QUEUES);
411 }
412 
413 /*
414  *	bqlock:
415  *
416  *	Return the appropriate queue lock based on the index.
417  */
418 static inline struct mtx *
419 bqlock(int qindex)
420 {
421 
422 	return (struct mtx *)&bqlocks[qindex];
423 }
424 
425 /*
426  *	bdirtywakeup:
427  *
428  *	Wakeup any bwillwrite() waiters.
429  */
430 static void
431 bdirtywakeup(void)
432 {
433 	mtx_lock(&bdirtylock);
434 	if (bdirtywait) {
435 		bdirtywait = 0;
436 		wakeup(&bdirtywait);
437 	}
438 	mtx_unlock(&bdirtylock);
439 }
440 
441 /*
442  *	bdirtysub:
443  *
444  *	Decrement the numdirtybuffers count by one and wakeup any
445  *	threads blocked in bwillwrite().
446  */
447 static void
448 bdirtysub(void)
449 {
450 
451 	if (atomic_fetchadd_int(&numdirtybuffers, -1) ==
452 	    (lodirtybuffers + hidirtybuffers) / 2)
453 		bdirtywakeup();
454 }
455 
456 /*
457  *	bdirtyadd:
458  *
459  *	Increment the numdirtybuffers count by one and wakeup the buf
460  *	daemon if needed.
461  */
462 static void
463 bdirtyadd(void)
464 {
465 
466 	/*
467 	 * Only do the wakeup once as we cross the boundary.  The
468 	 * buf daemon will keep running until the condition clears.
469 	 */
470 	if (atomic_fetchadd_int(&numdirtybuffers, 1) ==
471 	    (lodirtybuffers + hidirtybuffers) / 2)
472 		bd_wakeup();
473 }
474 
475 /*
476  *	bufspace_wakeup:
477  *
478  *	Called when buffer space is potentially available for recovery.
479  *	getnewbuf() will block on this flag when it is unable to free
480  *	sufficient buffer space.  Buffer space becomes recoverable when
481  *	bp's get placed back in the queues.
482  */
483 static void
484 bufspace_wakeup(void)
485 {
486 
487 	/*
488 	 * If someone is waiting for bufspace, wake them up.
489 	 *
490 	 * Since needsbuffer is set prior to doing an additional queue
491 	 * scan it is safe to check for the flag prior to acquiring the
492 	 * lock.  The thread that is preparing to scan again before
493 	 * blocking would discover the buf we released.
494 	 */
495 	if (needsbuffer) {
496 		rw_rlock(&nblock);
497 		if (atomic_cmpset_int(&needsbuffer, 1, 0) == 1)
498 			wakeup(__DEVOLATILE(void *, &needsbuffer));
499 		rw_runlock(&nblock);
500 	}
501 }
502 
503 /*
504  *	bufspace_daemonwakeup:
505  *
506  *	Wakeup the daemon responsible for freeing clean bufs.
507  */
508 static void
509 bufspace_daemonwakeup(void)
510 {
511 	rw_rlock(&nblock);
512 	if (bufspace_request == 0) {
513 		bufspace_request = 1;
514 		wakeup(&bufspace_request);
515 	}
516 	rw_runlock(&nblock);
517 }
518 
519 /*
520  *	bufspace_adjust:
521  *
522  *	Adjust the reported bufspace for a KVA managed buffer, possibly
523  * 	waking any waiters.
524  */
525 static void
526 bufspace_adjust(struct buf *bp, int bufsize)
527 {
528 	long space;
529 	int diff;
530 
531 	KASSERT((bp->b_flags & B_MALLOC) == 0,
532 	    ("bufspace_adjust: malloc buf %p", bp));
533 	diff = bufsize - bp->b_bufsize;
534 	if (diff < 0) {
535 		atomic_subtract_long(&bufspace, -diff);
536 		bufspace_wakeup();
537 	} else {
538 		space = atomic_fetchadd_long(&bufspace, diff);
539 		/* Wake up the daemon on the transition. */
540 		if (space < bufspacethresh && space + diff >= bufspacethresh)
541 			bufspace_daemonwakeup();
542 	}
543 	bp->b_bufsize = bufsize;
544 }
545 
546 /*
547  *	bufspace_reserve:
548  *
549  *	Reserve bufspace before calling allocbuf().  metadata has a
550  *	different space limit than data.
551  */
552 static int
553 bufspace_reserve(int size, bool metadata)
554 {
555 	long limit;
556 	long space;
557 
558 	if (metadata)
559 		limit = maxbufspace;
560 	else
561 		limit = hibufspace;
562 	do {
563 		space = bufspace;
564 		if (space + size > limit)
565 			return (ENOSPC);
566 	} while (atomic_cmpset_long(&bufspace, space, space + size) == 0);
567 
568 	/* Wake up the daemon on the transition. */
569 	if (space < bufspacethresh && space + size >= bufspacethresh)
570 		bufspace_daemonwakeup();
571 
572 	return (0);
573 }
574 
575 /*
576  *	bufspace_release:
577  *
578  *	Release reserved bufspace after bufspace_adjust() has consumed it.
579  */
580 static void
581 bufspace_release(int size)
582 {
583 	atomic_subtract_long(&bufspace, size);
584 	bufspace_wakeup();
585 }
586 
587 /*
588  *	bufspace_wait:
589  *
590  *	Wait for bufspace, acting as the buf daemon if a locked vnode is
591  *	supplied.  needsbuffer must be set in a safe fashion prior to
592  *	polling for space.  The operation must be re-tried on return.
593  */
594 static void
595 bufspace_wait(struct vnode *vp, int gbflags, int slpflag, int slptimeo)
596 {
597 	struct thread *td;
598 	int error, fl, norunbuf;
599 
600 	if ((gbflags & GB_NOWAIT_BD) != 0)
601 		return;
602 
603 	td = curthread;
604 	rw_wlock(&nblock);
605 	while (needsbuffer != 0) {
606 		if (vp != NULL && vp->v_type != VCHR &&
607 		    (td->td_pflags & TDP_BUFNEED) == 0) {
608 			rw_wunlock(&nblock);
609 			/*
610 			 * getblk() is called with a vnode locked, and
611 			 * some majority of the dirty buffers may as
612 			 * well belong to the vnode.  Flushing the
613 			 * buffers there would make a progress that
614 			 * cannot be achieved by the buf_daemon, that
615 			 * cannot lock the vnode.
616 			 */
617 			norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) |
618 			    (td->td_pflags & TDP_NORUNNINGBUF);
619 
620 			/*
621 			 * Play bufdaemon.  The getnewbuf() function
622 			 * may be called while the thread owns lock
623 			 * for another dirty buffer for the same
624 			 * vnode, which makes it impossible to use
625 			 * VOP_FSYNC() there, due to the buffer lock
626 			 * recursion.
627 			 */
628 			td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF;
629 			fl = buf_flush(vp, flushbufqtarget);
630 			td->td_pflags &= norunbuf;
631 			rw_wlock(&nblock);
632 			if (fl != 0)
633 				continue;
634 			if (needsbuffer == 0)
635 				break;
636 		}
637 		error = rw_sleep(__DEVOLATILE(void *, &needsbuffer), &nblock,
638 		    (PRIBIO + 4) | slpflag, "newbuf", slptimeo);
639 		if (error != 0)
640 			break;
641 	}
642 	rw_wunlock(&nblock);
643 }
644 
645 
646 /*
647  *	bufspace_daemon:
648  *
649  *	buffer space management daemon.  Tries to maintain some marginal
650  *	amount of free buffer space so that requesting processes neither
651  *	block nor work to reclaim buffers.
652  */
653 static void
654 bufspace_daemon(void)
655 {
656 	for (;;) {
657 		kproc_suspend_check(bufspacedaemonproc);
658 
659 		/*
660 		 * Free buffers from the clean queue until we meet our
661 		 * targets.
662 		 *
663 		 * Theory of operation:  The buffer cache is most efficient
664 		 * when some free buffer headers and space are always
665 		 * available to getnewbuf().  This daemon attempts to prevent
666 		 * the excessive blocking and synchronization associated
667 		 * with shortfall.  It goes through three phases according
668 		 * demand:
669 		 *
670 		 * 1)	The daemon wakes up voluntarily once per-second
671 		 *	during idle periods when the counters are below
672 		 *	the wakeup thresholds (bufspacethresh, lofreebuffers).
673 		 *
674 		 * 2)	The daemon wakes up as we cross the thresholds
675 		 *	ahead of any potential blocking.  This may bounce
676 		 *	slightly according to the rate of consumption and
677 		 *	release.
678 		 *
679 		 * 3)	The daemon and consumers are starved for working
680 		 *	clean buffers.  This is the 'bufspace' sleep below
681 		 *	which will inefficiently trade bufs with bqrelse
682 		 *	until we return to condition 2.
683 		 */
684 		while (bufspace > lobufspace ||
685 		    numfreebuffers < hifreebuffers) {
686 			if (buf_recycle(false) != 0) {
687 				atomic_set_int(&needsbuffer, 1);
688 				if (buf_recycle(false) != 0) {
689 					rw_wlock(&nblock);
690 					if (needsbuffer)
691 						rw_sleep(__DEVOLATILE(void *,
692 						    &needsbuffer), &nblock,
693 						    PRIBIO|PDROP, "bufspace",
694 						    hz/10);
695 					else
696 						rw_wunlock(&nblock);
697 				}
698 			}
699 			maybe_yield();
700 		}
701 
702 		/*
703 		 * Re-check our limits under the exclusive nblock.
704 		 */
705 		rw_wlock(&nblock);
706 		if (bufspace < bufspacethresh &&
707 		    numfreebuffers > lofreebuffers) {
708 			bufspace_request = 0;
709 			rw_sleep(&bufspace_request, &nblock, PRIBIO|PDROP,
710 			    "-", hz);
711 		} else
712 			rw_wunlock(&nblock);
713 	}
714 }
715 
716 static struct kproc_desc bufspace_kp = {
717 	"bufspacedaemon",
718 	bufspace_daemon,
719 	&bufspacedaemonproc
720 };
721 SYSINIT(bufspacedaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start,
722     &bufspace_kp);
723 
724 /*
725  *	bufmallocadjust:
726  *
727  *	Adjust the reported bufspace for a malloc managed buffer, possibly
728  *	waking any waiters.
729  */
730 static void
731 bufmallocadjust(struct buf *bp, int bufsize)
732 {
733 	int diff;
734 
735 	KASSERT((bp->b_flags & B_MALLOC) != 0,
736 	    ("bufmallocadjust: non-malloc buf %p", bp));
737 	diff = bufsize - bp->b_bufsize;
738 	if (diff < 0)
739 		atomic_subtract_long(&bufmallocspace, -diff);
740 	else
741 		atomic_add_long(&bufmallocspace, diff);
742 	bp->b_bufsize = bufsize;
743 }
744 
745 /*
746  *	runningwakeup:
747  *
748  *	Wake up processes that are waiting on asynchronous writes to fall
749  *	below lorunningspace.
750  */
751 static void
752 runningwakeup(void)
753 {
754 
755 	mtx_lock(&rbreqlock);
756 	if (runningbufreq) {
757 		runningbufreq = 0;
758 		wakeup(&runningbufreq);
759 	}
760 	mtx_unlock(&rbreqlock);
761 }
762 
763 /*
764  *	runningbufwakeup:
765  *
766  *	Decrement the outstanding write count according.
767  */
768 void
769 runningbufwakeup(struct buf *bp)
770 {
771 	long space, bspace;
772 
773 	bspace = bp->b_runningbufspace;
774 	if (bspace == 0)
775 		return;
776 	space = atomic_fetchadd_long(&runningbufspace, -bspace);
777 	KASSERT(space >= bspace, ("runningbufspace underflow %ld %ld",
778 	    space, bspace));
779 	bp->b_runningbufspace = 0;
780 	/*
781 	 * Only acquire the lock and wakeup on the transition from exceeding
782 	 * the threshold to falling below it.
783 	 */
784 	if (space < lorunningspace)
785 		return;
786 	if (space - bspace > lorunningspace)
787 		return;
788 	runningwakeup();
789 }
790 
791 /*
792  *	waitrunningbufspace()
793  *
794  *	runningbufspace is a measure of the amount of I/O currently
795  *	running.  This routine is used in async-write situations to
796  *	prevent creating huge backups of pending writes to a device.
797  *	Only asynchronous writes are governed by this function.
798  *
799  *	This does NOT turn an async write into a sync write.  It waits
800  *	for earlier writes to complete and generally returns before the
801  *	caller's write has reached the device.
802  */
803 void
804 waitrunningbufspace(void)
805 {
806 
807 	mtx_lock(&rbreqlock);
808 	while (runningbufspace > hirunningspace) {
809 		runningbufreq = 1;
810 		msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
811 	}
812 	mtx_unlock(&rbreqlock);
813 }
814 
815 
816 /*
817  *	vfs_buf_test_cache:
818  *
819  *	Called when a buffer is extended.  This function clears the B_CACHE
820  *	bit if the newly extended portion of the buffer does not contain
821  *	valid data.
822  */
823 static __inline void
824 vfs_buf_test_cache(struct buf *bp, vm_ooffset_t foff, vm_offset_t off,
825     vm_offset_t size, vm_page_t m)
826 {
827 
828 	VM_OBJECT_ASSERT_LOCKED(m->object);
829 	if (bp->b_flags & B_CACHE) {
830 		int base = (foff + off) & PAGE_MASK;
831 		if (vm_page_is_valid(m, base, size) == 0)
832 			bp->b_flags &= ~B_CACHE;
833 	}
834 }
835 
836 /* Wake up the buffer daemon if necessary */
837 static __inline void
838 bd_wakeup(void)
839 {
840 
841 	mtx_lock(&bdlock);
842 	if (bd_request == 0) {
843 		bd_request = 1;
844 		wakeup(&bd_request);
845 	}
846 	mtx_unlock(&bdlock);
847 }
848 
849 /*
850  * bd_speedup - speedup the buffer cache flushing code
851  */
852 void
853 bd_speedup(void)
854 {
855 	int needwake;
856 
857 	mtx_lock(&bdlock);
858 	needwake = 0;
859 	if (bd_speedupreq == 0 || bd_request == 0)
860 		needwake = 1;
861 	bd_speedupreq = 1;
862 	bd_request = 1;
863 	if (needwake)
864 		wakeup(&bd_request);
865 	mtx_unlock(&bdlock);
866 }
867 
868 #ifndef NSWBUF_MIN
869 #define	NSWBUF_MIN	16
870 #endif
871 
872 #ifdef __i386__
873 #define	TRANSIENT_DENOM	5
874 #else
875 #define	TRANSIENT_DENOM 10
876 #endif
877 
878 /*
879  * Calculating buffer cache scaling values and reserve space for buffer
880  * headers.  This is called during low level kernel initialization and
881  * may be called more then once.  We CANNOT write to the memory area
882  * being reserved at this time.
883  */
884 caddr_t
885 kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
886 {
887 	int tuned_nbuf;
888 	long maxbuf, maxbuf_sz, buf_sz,	biotmap_sz;
889 
890 	/*
891 	 * physmem_est is in pages.  Convert it to kilobytes (assumes
892 	 * PAGE_SIZE is >= 1K)
893 	 */
894 	physmem_est = physmem_est * (PAGE_SIZE / 1024);
895 
896 	/*
897 	 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
898 	 * For the first 64MB of ram nominally allocate sufficient buffers to
899 	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
900 	 * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
901 	 * the buffer cache we limit the eventual kva reservation to
902 	 * maxbcache bytes.
903 	 *
904 	 * factor represents the 1/4 x ram conversion.
905 	 */
906 	if (nbuf == 0) {
907 		int factor = 4 * BKVASIZE / 1024;
908 
909 		nbuf = 50;
910 		if (physmem_est > 4096)
911 			nbuf += min((physmem_est - 4096) / factor,
912 			    65536 / factor);
913 		if (physmem_est > 65536)
914 			nbuf += min((physmem_est - 65536) * 2 / (factor * 5),
915 			    32 * 1024 * 1024 / (factor * 5));
916 
917 		if (maxbcache && nbuf > maxbcache / BKVASIZE)
918 			nbuf = maxbcache / BKVASIZE;
919 		tuned_nbuf = 1;
920 	} else
921 		tuned_nbuf = 0;
922 
923 	/* XXX Avoid unsigned long overflows later on with maxbufspace. */
924 	maxbuf = (LONG_MAX / 3) / BKVASIZE;
925 	if (nbuf > maxbuf) {
926 		if (!tuned_nbuf)
927 			printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
928 			    maxbuf);
929 		nbuf = maxbuf;
930 	}
931 
932 	/*
933 	 * Ideal allocation size for the transient bio submap is 10%
934 	 * of the maximal space buffer map.  This roughly corresponds
935 	 * to the amount of the buffer mapped for typical UFS load.
936 	 *
937 	 * Clip the buffer map to reserve space for the transient
938 	 * BIOs, if its extent is bigger than 90% (80% on i386) of the
939 	 * maximum buffer map extent on the platform.
940 	 *
941 	 * The fall-back to the maxbuf in case of maxbcache unset,
942 	 * allows to not trim the buffer KVA for the architectures
943 	 * with ample KVA space.
944 	 */
945 	if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) {
946 		maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE;
947 		buf_sz = (long)nbuf * BKVASIZE;
948 		if (buf_sz < maxbuf_sz / TRANSIENT_DENOM *
949 		    (TRANSIENT_DENOM - 1)) {
950 			/*
951 			 * There is more KVA than memory.  Do not
952 			 * adjust buffer map size, and assign the rest
953 			 * of maxbuf to transient map.
954 			 */
955 			biotmap_sz = maxbuf_sz - buf_sz;
956 		} else {
957 			/*
958 			 * Buffer map spans all KVA we could afford on
959 			 * this platform.  Give 10% (20% on i386) of
960 			 * the buffer map to the transient bio map.
961 			 */
962 			biotmap_sz = buf_sz / TRANSIENT_DENOM;
963 			buf_sz -= biotmap_sz;
964 		}
965 		if (biotmap_sz / INT_MAX > MAXPHYS)
966 			bio_transient_maxcnt = INT_MAX;
967 		else
968 			bio_transient_maxcnt = biotmap_sz / MAXPHYS;
969 		/*
970 		 * Artificially limit to 1024 simultaneous in-flight I/Os
971 		 * using the transient mapping.
972 		 */
973 		if (bio_transient_maxcnt > 1024)
974 			bio_transient_maxcnt = 1024;
975 		if (tuned_nbuf)
976 			nbuf = buf_sz / BKVASIZE;
977 	}
978 
979 	/*
980 	 * swbufs are used as temporary holders for I/O, such as paging I/O.
981 	 * We have no less then 16 and no more then 256.
982 	 */
983 	nswbuf = min(nbuf / 4, 256);
984 	TUNABLE_INT_FETCH("kern.nswbuf", &nswbuf);
985 	if (nswbuf < NSWBUF_MIN)
986 		nswbuf = NSWBUF_MIN;
987 
988 	/*
989 	 * Reserve space for the buffer cache buffers
990 	 */
991 	swbuf = (void *)v;
992 	v = (caddr_t)(swbuf + nswbuf);
993 	buf = (void *)v;
994 	v = (caddr_t)(buf + nbuf);
995 
996 	return(v);
997 }
998 
999 /* Initialize the buffer subsystem.  Called before use of any buffers. */
1000 void
1001 bufinit(void)
1002 {
1003 	struct buf *bp;
1004 	int i;
1005 
1006 	CTASSERT(MAXBCACHEBUF >= MAXBSIZE);
1007 	mtx_init(&bqlocks[QUEUE_DIRTY], "bufq dirty lock", NULL, MTX_DEF);
1008 	mtx_init(&bqlocks[QUEUE_EMPTY], "bufq empty lock", NULL, MTX_DEF);
1009 	for (i = QUEUE_CLEAN; i < QUEUE_CLEAN + CLEAN_QUEUES; i++)
1010 		mtx_init(&bqlocks[i], "bufq clean lock", NULL, MTX_DEF);
1011 	mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
1012 	rw_init(&nblock, "needsbuffer lock");
1013 	mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
1014 	mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF);
1015 
1016 	/* next, make a null set of free lists */
1017 	for (i = 0; i < BUFFER_QUEUES; i++)
1018 		TAILQ_INIT(&bufqueues[i]);
1019 
1020 	unmapped_buf = (caddr_t)kva_alloc(MAXPHYS);
1021 
1022 	/* finally, initialize each buffer header and stick on empty q */
1023 	for (i = 0; i < nbuf; i++) {
1024 		bp = &buf[i];
1025 		bzero(bp, sizeof *bp);
1026 		bp->b_flags = B_INVAL;
1027 		bp->b_rcred = NOCRED;
1028 		bp->b_wcred = NOCRED;
1029 		bp->b_qindex = QUEUE_EMPTY;
1030 		bp->b_xflags = 0;
1031 		bp->b_data = bp->b_kvabase = unmapped_buf;
1032 		LIST_INIT(&bp->b_dep);
1033 		BUF_LOCKINIT(bp);
1034 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
1035 #ifdef INVARIANTS
1036 		bq_len[QUEUE_EMPTY]++;
1037 #endif
1038 	}
1039 
1040 	/*
1041 	 * maxbufspace is the absolute maximum amount of buffer space we are
1042 	 * allowed to reserve in KVM and in real terms.  The absolute maximum
1043 	 * is nominally used by metadata.  hibufspace is the nominal maximum
1044 	 * used by most other requests.  The differential is required to
1045 	 * ensure that metadata deadlocks don't occur.
1046 	 *
1047 	 * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
1048 	 * this may result in KVM fragmentation which is not handled optimally
1049 	 * by the system. XXX This is less true with vmem.  We could use
1050 	 * PAGE_SIZE.
1051 	 */
1052 	maxbufspace = (long)nbuf * BKVASIZE;
1053 	hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBCACHEBUF * 10);
1054 	lobufspace = (hibufspace / 20) * 19; /* 95% */
1055 	bufspacethresh = lobufspace + (hibufspace - lobufspace) / 2;
1056 
1057 	/*
1058 	 * Note: The 16 MiB upper limit for hirunningspace was chosen
1059 	 * arbitrarily and may need further tuning. It corresponds to
1060 	 * 128 outstanding write IO requests (if IO size is 128 KiB),
1061 	 * which fits with many RAID controllers' tagged queuing limits.
1062 	 * The lower 1 MiB limit is the historical upper limit for
1063 	 * hirunningspace.
1064 	 */
1065 	hirunningspace = lmax(lmin(roundup(hibufspace / 64, MAXBCACHEBUF),
1066 	    16 * 1024 * 1024), 1024 * 1024);
1067 	lorunningspace = roundup((hirunningspace * 2) / 3, MAXBCACHEBUF);
1068 
1069 	/*
1070 	 * Limit the amount of malloc memory since it is wired permanently into
1071 	 * the kernel space.  Even though this is accounted for in the buffer
1072 	 * allocation, we don't want the malloced region to grow uncontrolled.
1073 	 * The malloc scheme improves memory utilization significantly on
1074 	 * average (small) directories.
1075 	 */
1076 	maxbufmallocspace = hibufspace / 20;
1077 
1078 	/*
1079 	 * Reduce the chance of a deadlock occurring by limiting the number
1080 	 * of delayed-write dirty buffers we allow to stack up.
1081 	 */
1082 	hidirtybuffers = nbuf / 4 + 20;
1083 	dirtybufthresh = hidirtybuffers * 9 / 10;
1084 	numdirtybuffers = 0;
1085 	/*
1086 	 * To support extreme low-memory systems, make sure hidirtybuffers
1087 	 * cannot eat up all available buffer space.  This occurs when our
1088 	 * minimum cannot be met.  We try to size hidirtybuffers to 3/4 our
1089 	 * buffer space assuming BKVASIZE'd buffers.
1090 	 */
1091 	while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
1092 		hidirtybuffers >>= 1;
1093 	}
1094 	lodirtybuffers = hidirtybuffers / 2;
1095 
1096 	/*
1097 	 * lofreebuffers should be sufficient to avoid stalling waiting on
1098 	 * buf headers under heavy utilization.  The bufs in per-cpu caches
1099 	 * are counted as free but will be unavailable to threads executing
1100 	 * on other cpus.
1101 	 *
1102 	 * hifreebuffers is the free target for the bufspace daemon.  This
1103 	 * should be set appropriately to limit work per-iteration.
1104 	 */
1105 	lofreebuffers = MIN((nbuf / 25) + (20 * mp_ncpus), 128 * mp_ncpus);
1106 	hifreebuffers = (3 * lofreebuffers) / 2;
1107 	numfreebuffers = nbuf;
1108 
1109 	/* Setup the kva and free list allocators. */
1110 	vmem_set_reclaim(buffer_arena, bufkva_reclaim);
1111 	buf_zone = uma_zcache_create("buf free cache", sizeof(struct buf),
1112 	    NULL, NULL, NULL, NULL, buf_import, buf_release, NULL, 0);
1113 
1114 	/*
1115 	 * Size the clean queue according to the amount of buffer space.
1116 	 * One queue per-256mb up to the max.  More queues gives better
1117 	 * concurrency but less accurate LRU.
1118 	 */
1119 	clean_queues = MIN(howmany(maxbufspace, 256*1024*1024), CLEAN_QUEUES);
1120 
1121 }
1122 
1123 #ifdef INVARIANTS
1124 static inline void
1125 vfs_buf_check_mapped(struct buf *bp)
1126 {
1127 
1128 	KASSERT(bp->b_kvabase != unmapped_buf,
1129 	    ("mapped buf: b_kvabase was not updated %p", bp));
1130 	KASSERT(bp->b_data != unmapped_buf,
1131 	    ("mapped buf: b_data was not updated %p", bp));
1132 	KASSERT(bp->b_data < unmapped_buf || bp->b_data >= unmapped_buf +
1133 	    MAXPHYS, ("b_data + b_offset unmapped %p", bp));
1134 }
1135 
1136 static inline void
1137 vfs_buf_check_unmapped(struct buf *bp)
1138 {
1139 
1140 	KASSERT(bp->b_data == unmapped_buf,
1141 	    ("unmapped buf: corrupted b_data %p", bp));
1142 }
1143 
1144 #define	BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp)
1145 #define	BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp)
1146 #else
1147 #define	BUF_CHECK_MAPPED(bp) do {} while (0)
1148 #define	BUF_CHECK_UNMAPPED(bp) do {} while (0)
1149 #endif
1150 
1151 static int
1152 isbufbusy(struct buf *bp)
1153 {
1154 	if (((bp->b_flags & B_INVAL) == 0 && BUF_ISLOCKED(bp)) ||
1155 	    ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI))
1156 		return (1);
1157 	return (0);
1158 }
1159 
1160 /*
1161  * Shutdown the system cleanly to prepare for reboot, halt, or power off.
1162  */
1163 void
1164 bufshutdown(int show_busybufs)
1165 {
1166 	static int first_buf_printf = 1;
1167 	struct buf *bp;
1168 	int iter, nbusy, pbusy;
1169 #ifndef PREEMPTION
1170 	int subiter;
1171 #endif
1172 
1173 	/*
1174 	 * Sync filesystems for shutdown
1175 	 */
1176 	wdog_kern_pat(WD_LASTVAL);
1177 	sys_sync(curthread, NULL);
1178 
1179 	/*
1180 	 * With soft updates, some buffers that are
1181 	 * written will be remarked as dirty until other
1182 	 * buffers are written.
1183 	 */
1184 	for (iter = pbusy = 0; iter < 20; iter++) {
1185 		nbusy = 0;
1186 		for (bp = &buf[nbuf]; --bp >= buf; )
1187 			if (isbufbusy(bp))
1188 				nbusy++;
1189 		if (nbusy == 0) {
1190 			if (first_buf_printf)
1191 				printf("All buffers synced.");
1192 			break;
1193 		}
1194 		if (first_buf_printf) {
1195 			printf("Syncing disks, buffers remaining... ");
1196 			first_buf_printf = 0;
1197 		}
1198 		printf("%d ", nbusy);
1199 		if (nbusy < pbusy)
1200 			iter = 0;
1201 		pbusy = nbusy;
1202 
1203 		wdog_kern_pat(WD_LASTVAL);
1204 		sys_sync(curthread, NULL);
1205 
1206 #ifdef PREEMPTION
1207 		/*
1208 		 * Drop Giant and spin for a while to allow
1209 		 * interrupt threads to run.
1210 		 */
1211 		DROP_GIANT();
1212 		DELAY(50000 * iter);
1213 		PICKUP_GIANT();
1214 #else
1215 		/*
1216 		 * Drop Giant and context switch several times to
1217 		 * allow interrupt threads to run.
1218 		 */
1219 		DROP_GIANT();
1220 		for (subiter = 0; subiter < 50 * iter; subiter++) {
1221 			thread_lock(curthread);
1222 			mi_switch(SW_VOL, NULL);
1223 			thread_unlock(curthread);
1224 			DELAY(1000);
1225 		}
1226 		PICKUP_GIANT();
1227 #endif
1228 	}
1229 	printf("\n");
1230 	/*
1231 	 * Count only busy local buffers to prevent forcing
1232 	 * a fsck if we're just a client of a wedged NFS server
1233 	 */
1234 	nbusy = 0;
1235 	for (bp = &buf[nbuf]; --bp >= buf; ) {
1236 		if (isbufbusy(bp)) {
1237 #if 0
1238 /* XXX: This is bogus.  We should probably have a BO_REMOTE flag instead */
1239 			if (bp->b_dev == NULL) {
1240 				TAILQ_REMOVE(&mountlist,
1241 				    bp->b_vp->v_mount, mnt_list);
1242 				continue;
1243 			}
1244 #endif
1245 			nbusy++;
1246 			if (show_busybufs > 0) {
1247 				printf(
1248 	    "%d: buf:%p, vnode:%p, flags:%0x, blkno:%jd, lblkno:%jd, buflock:",
1249 				    nbusy, bp, bp->b_vp, bp->b_flags,
1250 				    (intmax_t)bp->b_blkno,
1251 				    (intmax_t)bp->b_lblkno);
1252 				BUF_LOCKPRINTINFO(bp);
1253 				if (show_busybufs > 1)
1254 					vn_printf(bp->b_vp,
1255 					    "vnode content: ");
1256 			}
1257 		}
1258 	}
1259 	if (nbusy) {
1260 		/*
1261 		 * Failed to sync all blocks. Indicate this and don't
1262 		 * unmount filesystems (thus forcing an fsck on reboot).
1263 		 */
1264 		printf("Giving up on %d buffers\n", nbusy);
1265 		DELAY(5000000);	/* 5 seconds */
1266 	} else {
1267 		if (!first_buf_printf)
1268 			printf("Final sync complete\n");
1269 		/*
1270 		 * Unmount filesystems
1271 		 */
1272 		if (panicstr == NULL)
1273 			vfs_unmountall();
1274 	}
1275 	swapoff_all();
1276 	DELAY(100000);		/* wait for console output to finish */
1277 }
1278 
1279 static void
1280 bpmap_qenter(struct buf *bp)
1281 {
1282 
1283 	BUF_CHECK_MAPPED(bp);
1284 
1285 	/*
1286 	 * bp->b_data is relative to bp->b_offset, but
1287 	 * bp->b_offset may be offset into the first page.
1288 	 */
1289 	bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
1290 	pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
1291 	bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
1292 	    (vm_offset_t)(bp->b_offset & PAGE_MASK));
1293 }
1294 
1295 /*
1296  *	binsfree:
1297  *
1298  *	Insert the buffer into the appropriate free list.
1299  */
1300 static void
1301 binsfree(struct buf *bp, int qindex)
1302 {
1303 	struct mtx *olock, *nlock;
1304 
1305 	if (qindex != QUEUE_EMPTY) {
1306 		BUF_ASSERT_XLOCKED(bp);
1307 	}
1308 
1309 	/*
1310 	 * Stick to the same clean queue for the lifetime of the buf to
1311 	 * limit locking below.  Otherwise pick ont sequentially.
1312 	 */
1313 	if (qindex == QUEUE_CLEAN) {
1314 		if (bqisclean(bp->b_qindex))
1315 			qindex = bp->b_qindex;
1316 		else
1317 			qindex = bqcleanq();
1318 	}
1319 
1320 	/*
1321 	 * Handle delayed bremfree() processing.
1322 	 */
1323 	nlock = bqlock(qindex);
1324 	if (bp->b_flags & B_REMFREE) {
1325 		olock = bqlock(bp->b_qindex);
1326 		mtx_lock(olock);
1327 		bremfreel(bp);
1328 		if (olock != nlock) {
1329 			mtx_unlock(olock);
1330 			mtx_lock(nlock);
1331 		}
1332 	} else
1333 		mtx_lock(nlock);
1334 
1335 	if (bp->b_qindex != QUEUE_NONE)
1336 		panic("binsfree: free buffer onto another queue???");
1337 
1338 	bp->b_qindex = qindex;
1339 	if (bp->b_flags & B_AGE)
1340 		TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1341 	else
1342 		TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1343 #ifdef INVARIANTS
1344 	bq_len[bp->b_qindex]++;
1345 #endif
1346 	mtx_unlock(nlock);
1347 }
1348 
1349 /*
1350  * buf_free:
1351  *
1352  *	Free a buffer to the buf zone once it no longer has valid contents.
1353  */
1354 static void
1355 buf_free(struct buf *bp)
1356 {
1357 
1358 	if (bp->b_flags & B_REMFREE)
1359 		bremfreef(bp);
1360 	if (bp->b_vflags & BV_BKGRDINPROG)
1361 		panic("losing buffer 1");
1362 	if (bp->b_rcred != NOCRED) {
1363 		crfree(bp->b_rcred);
1364 		bp->b_rcred = NOCRED;
1365 	}
1366 	if (bp->b_wcred != NOCRED) {
1367 		crfree(bp->b_wcred);
1368 		bp->b_wcred = NOCRED;
1369 	}
1370 	if (!LIST_EMPTY(&bp->b_dep))
1371 		buf_deallocate(bp);
1372 	bufkva_free(bp);
1373 	BUF_UNLOCK(bp);
1374 	uma_zfree(buf_zone, bp);
1375 	atomic_add_int(&numfreebuffers, 1);
1376 	bufspace_wakeup();
1377 }
1378 
1379 /*
1380  * buf_import:
1381  *
1382  *	Import bufs into the uma cache from the buf list.  The system still
1383  *	expects a static array of bufs and much of the synchronization
1384  *	around bufs assumes type stable storage.  As a result, UMA is used
1385  *	only as a per-cpu cache of bufs still maintained on a global list.
1386  */
1387 static int
1388 buf_import(void *arg, void **store, int cnt, int flags)
1389 {
1390 	struct buf *bp;
1391 	int i;
1392 
1393 	mtx_lock(&bqlocks[QUEUE_EMPTY]);
1394 	for (i = 0; i < cnt; i++) {
1395 		bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
1396 		if (bp == NULL)
1397 			break;
1398 		bremfreel(bp);
1399 		store[i] = bp;
1400 	}
1401 	mtx_unlock(&bqlocks[QUEUE_EMPTY]);
1402 
1403 	return (i);
1404 }
1405 
1406 /*
1407  * buf_release:
1408  *
1409  *	Release bufs from the uma cache back to the buffer queues.
1410  */
1411 static void
1412 buf_release(void *arg, void **store, int cnt)
1413 {
1414         int i;
1415 
1416         for (i = 0; i < cnt; i++)
1417 		binsfree(store[i], QUEUE_EMPTY);
1418 }
1419 
1420 /*
1421  * buf_alloc:
1422  *
1423  *	Allocate an empty buffer header.
1424  */
1425 static struct buf *
1426 buf_alloc(void)
1427 {
1428 	struct buf *bp;
1429 
1430 	bp = uma_zalloc(buf_zone, M_NOWAIT);
1431 	if (bp == NULL) {
1432 		bufspace_daemonwakeup();
1433 		atomic_add_int(&numbufallocfails, 1);
1434 		return (NULL);
1435 	}
1436 
1437 	/*
1438 	 * Wake-up the bufspace daemon on transition.
1439 	 */
1440 	if (atomic_fetchadd_int(&numfreebuffers, -1) == lofreebuffers)
1441 		bufspace_daemonwakeup();
1442 
1443 	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1444 		panic("getnewbuf_empty: Locked buf %p on free queue.", bp);
1445 
1446 	KASSERT(bp->b_vp == NULL,
1447 	    ("bp: %p still has vnode %p.", bp, bp->b_vp));
1448 	KASSERT((bp->b_flags & (B_DELWRI | B_NOREUSE)) == 0,
1449 	    ("invalid buffer %p flags %#x", bp, bp->b_flags));
1450 	KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
1451 	    ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags));
1452 	KASSERT(bp->b_npages == 0,
1453 	    ("bp: %p still has %d vm pages\n", bp, bp->b_npages));
1454 	KASSERT(bp->b_kvasize == 0, ("bp: %p still has kva\n", bp));
1455 	KASSERT(bp->b_bufsize == 0, ("bp: %p still has bufspace\n", bp));
1456 
1457 	bp->b_flags = 0;
1458 	bp->b_ioflags = 0;
1459 	bp->b_xflags = 0;
1460 	bp->b_vflags = 0;
1461 	bp->b_vp = NULL;
1462 	bp->b_blkno = bp->b_lblkno = 0;
1463 	bp->b_offset = NOOFFSET;
1464 	bp->b_iodone = 0;
1465 	bp->b_error = 0;
1466 	bp->b_resid = 0;
1467 	bp->b_bcount = 0;
1468 	bp->b_npages = 0;
1469 	bp->b_dirtyoff = bp->b_dirtyend = 0;
1470 	bp->b_bufobj = NULL;
1471 	bp->b_data = bp->b_kvabase = unmapped_buf;
1472 	bp->b_fsprivate1 = NULL;
1473 	bp->b_fsprivate2 = NULL;
1474 	bp->b_fsprivate3 = NULL;
1475 	LIST_INIT(&bp->b_dep);
1476 
1477 	return (bp);
1478 }
1479 
1480 /*
1481  *	buf_qrecycle:
1482  *
1483  *	Free a buffer from the given bufqueue.  kva controls whether the
1484  *	freed buf must own some kva resources.  This is used for
1485  *	defragmenting.
1486  */
1487 static int
1488 buf_qrecycle(int qindex, bool kva)
1489 {
1490 	struct buf *bp, *nbp;
1491 
1492 	if (kva)
1493 		atomic_add_int(&bufdefragcnt, 1);
1494 	nbp = NULL;
1495 	mtx_lock(&bqlocks[qindex]);
1496 	nbp = TAILQ_FIRST(&bufqueues[qindex]);
1497 
1498 	/*
1499 	 * Run scan, possibly freeing data and/or kva mappings on the fly
1500 	 * depending.
1501 	 */
1502 	while ((bp = nbp) != NULL) {
1503 		/*
1504 		 * Calculate next bp (we can only use it if we do not
1505 		 * release the bqlock).
1506 		 */
1507 		nbp = TAILQ_NEXT(bp, b_freelist);
1508 
1509 		/*
1510 		 * If we are defragging then we need a buffer with
1511 		 * some kva to reclaim.
1512 		 */
1513 		if (kva && bp->b_kvasize == 0)
1514 			continue;
1515 
1516 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1517 			continue;
1518 
1519 		/*
1520 		 * Skip buffers with background writes in progress.
1521 		 */
1522 		if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
1523 			BUF_UNLOCK(bp);
1524 			continue;
1525 		}
1526 
1527 		KASSERT(bp->b_qindex == qindex,
1528 		    ("getnewbuf: inconsistent queue %d bp %p", qindex, bp));
1529 		/*
1530 		 * NOTE:  nbp is now entirely invalid.  We can only restart
1531 		 * the scan from this point on.
1532 		 */
1533 		bremfreel(bp);
1534 		mtx_unlock(&bqlocks[qindex]);
1535 
1536 		/*
1537 		 * Requeue the background write buffer with error and
1538 		 * restart the scan.
1539 		 */
1540 		if ((bp->b_vflags & BV_BKGRDERR) != 0) {
1541 			bqrelse(bp);
1542 			mtx_lock(&bqlocks[qindex]);
1543 			nbp = TAILQ_FIRST(&bufqueues[qindex]);
1544 			continue;
1545 		}
1546 		bp->b_flags |= B_INVAL;
1547 		brelse(bp);
1548 		return (0);
1549 	}
1550 	mtx_unlock(&bqlocks[qindex]);
1551 
1552 	return (ENOBUFS);
1553 }
1554 
1555 /*
1556  *	buf_recycle:
1557  *
1558  *	Iterate through all clean queues until we find a buf to recycle or
1559  *	exhaust the search.
1560  */
1561 static int
1562 buf_recycle(bool kva)
1563 {
1564 	int qindex, first_qindex;
1565 
1566 	qindex = first_qindex = bqcleanq();
1567 	do {
1568 		if (buf_qrecycle(qindex, kva) == 0)
1569 			return (0);
1570 		if (++qindex == QUEUE_CLEAN + clean_queues)
1571 			qindex = QUEUE_CLEAN;
1572 	} while (qindex != first_qindex);
1573 
1574 	return (ENOBUFS);
1575 }
1576 
1577 /*
1578  *	buf_scan:
1579  *
1580  *	Scan the clean queues looking for a buffer to recycle.  needsbuffer
1581  *	is set on failure so that the caller may optionally bufspace_wait()
1582  *	in a race-free fashion.
1583  */
1584 static int
1585 buf_scan(bool defrag)
1586 {
1587 	int error;
1588 
1589 	/*
1590 	 * To avoid heavy synchronization and wakeup races we set
1591 	 * needsbuffer and re-poll before failing.  This ensures that
1592 	 * no frees can be missed between an unsuccessful poll and
1593 	 * going to sleep in a synchronized fashion.
1594 	 */
1595 	if ((error = buf_recycle(defrag)) != 0) {
1596 		atomic_set_int(&needsbuffer, 1);
1597 		bufspace_daemonwakeup();
1598 		error = buf_recycle(defrag);
1599 	}
1600 	if (error == 0)
1601 		atomic_add_int(&getnewbufrestarts, 1);
1602 	return (error);
1603 }
1604 
1605 /*
1606  *	bremfree:
1607  *
1608  *	Mark the buffer for removal from the appropriate free list.
1609  *
1610  */
1611 void
1612 bremfree(struct buf *bp)
1613 {
1614 
1615 	CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1616 	KASSERT((bp->b_flags & B_REMFREE) == 0,
1617 	    ("bremfree: buffer %p already marked for delayed removal.", bp));
1618 	KASSERT(bp->b_qindex != QUEUE_NONE,
1619 	    ("bremfree: buffer %p not on a queue.", bp));
1620 	BUF_ASSERT_XLOCKED(bp);
1621 
1622 	bp->b_flags |= B_REMFREE;
1623 }
1624 
1625 /*
1626  *	bremfreef:
1627  *
1628  *	Force an immediate removal from a free list.  Used only in nfs when
1629  *	it abuses the b_freelist pointer.
1630  */
1631 void
1632 bremfreef(struct buf *bp)
1633 {
1634 	struct mtx *qlock;
1635 
1636 	qlock = bqlock(bp->b_qindex);
1637 	mtx_lock(qlock);
1638 	bremfreel(bp);
1639 	mtx_unlock(qlock);
1640 }
1641 
1642 /*
1643  *	bremfreel:
1644  *
1645  *	Removes a buffer from the free list, must be called with the
1646  *	correct qlock held.
1647  */
1648 static void
1649 bremfreel(struct buf *bp)
1650 {
1651 
1652 	CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X",
1653 	    bp, bp->b_vp, bp->b_flags);
1654 	KASSERT(bp->b_qindex != QUEUE_NONE,
1655 	    ("bremfreel: buffer %p not on a queue.", bp));
1656 	if (bp->b_qindex != QUEUE_EMPTY) {
1657 		BUF_ASSERT_XLOCKED(bp);
1658 	}
1659 	mtx_assert(bqlock(bp->b_qindex), MA_OWNED);
1660 
1661 	TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
1662 #ifdef INVARIANTS
1663 	KASSERT(bq_len[bp->b_qindex] >= 1, ("queue %d underflow",
1664 	    bp->b_qindex));
1665 	bq_len[bp->b_qindex]--;
1666 #endif
1667 	bp->b_qindex = QUEUE_NONE;
1668 	bp->b_flags &= ~B_REMFREE;
1669 }
1670 
1671 /*
1672  *	bufkva_free:
1673  *
1674  *	Free the kva allocation for a buffer.
1675  *
1676  */
1677 static void
1678 bufkva_free(struct buf *bp)
1679 {
1680 
1681 #ifdef INVARIANTS
1682 	if (bp->b_kvasize == 0) {
1683 		KASSERT(bp->b_kvabase == unmapped_buf &&
1684 		    bp->b_data == unmapped_buf,
1685 		    ("Leaked KVA space on %p", bp));
1686 	} else if (buf_mapped(bp))
1687 		BUF_CHECK_MAPPED(bp);
1688 	else
1689 		BUF_CHECK_UNMAPPED(bp);
1690 #endif
1691 	if (bp->b_kvasize == 0)
1692 		return;
1693 
1694 	vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase, bp->b_kvasize);
1695 	atomic_subtract_long(&bufkvaspace, bp->b_kvasize);
1696 	atomic_add_int(&buffreekvacnt, 1);
1697 	bp->b_data = bp->b_kvabase = unmapped_buf;
1698 	bp->b_kvasize = 0;
1699 }
1700 
1701 /*
1702  *	bufkva_alloc:
1703  *
1704  *	Allocate the buffer KVA and set b_kvasize and b_kvabase.
1705  */
1706 static int
1707 bufkva_alloc(struct buf *bp, int maxsize, int gbflags)
1708 {
1709 	vm_offset_t addr;
1710 	int error;
1711 
1712 	KASSERT((gbflags & GB_UNMAPPED) == 0 || (gbflags & GB_KVAALLOC) != 0,
1713 	    ("Invalid gbflags 0x%x in %s", gbflags, __func__));
1714 
1715 	bufkva_free(bp);
1716 
1717 	addr = 0;
1718 	error = vmem_alloc(buffer_arena, maxsize, M_BESTFIT | M_NOWAIT, &addr);
1719 	if (error != 0) {
1720 		/*
1721 		 * Buffer map is too fragmented.  Request the caller
1722 		 * to defragment the map.
1723 		 */
1724 		return (error);
1725 	}
1726 	bp->b_kvabase = (caddr_t)addr;
1727 	bp->b_kvasize = maxsize;
1728 	atomic_add_long(&bufkvaspace, bp->b_kvasize);
1729 	if ((gbflags & GB_UNMAPPED) != 0) {
1730 		bp->b_data = unmapped_buf;
1731 		BUF_CHECK_UNMAPPED(bp);
1732 	} else {
1733 		bp->b_data = bp->b_kvabase;
1734 		BUF_CHECK_MAPPED(bp);
1735 	}
1736 	return (0);
1737 }
1738 
1739 /*
1740  *	bufkva_reclaim:
1741  *
1742  *	Reclaim buffer kva by freeing buffers holding kva.  This is a vmem
1743  *	callback that fires to avoid returning failure.
1744  */
1745 static void
1746 bufkva_reclaim(vmem_t *vmem, int flags)
1747 {
1748 	int i;
1749 
1750 	for (i = 0; i < 5; i++)
1751 		if (buf_scan(true) != 0)
1752 			break;
1753 	return;
1754 }
1755 
1756 
1757 /*
1758  * Attempt to initiate asynchronous I/O on read-ahead blocks.  We must
1759  * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
1760  * the buffer is valid and we do not have to do anything.
1761  */
1762 void
1763 breada(struct vnode * vp, daddr_t * rablkno, int * rabsize,
1764     int cnt, struct ucred * cred)
1765 {
1766 	struct buf *rabp;
1767 	int i;
1768 
1769 	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
1770 		if (inmem(vp, *rablkno))
1771 			continue;
1772 		rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
1773 
1774 		if ((rabp->b_flags & B_CACHE) == 0) {
1775 			if (!TD_IS_IDLETHREAD(curthread)) {
1776 #ifdef RACCT
1777 				if (racct_enable) {
1778 					PROC_LOCK(curproc);
1779 					racct_add_buf(curproc, rabp, 0);
1780 					PROC_UNLOCK(curproc);
1781 				}
1782 #endif /* RACCT */
1783 				curthread->td_ru.ru_inblock++;
1784 			}
1785 			rabp->b_flags |= B_ASYNC;
1786 			rabp->b_flags &= ~B_INVAL;
1787 			rabp->b_ioflags &= ~BIO_ERROR;
1788 			rabp->b_iocmd = BIO_READ;
1789 			if (rabp->b_rcred == NOCRED && cred != NOCRED)
1790 				rabp->b_rcred = crhold(cred);
1791 			vfs_busy_pages(rabp, 0);
1792 			BUF_KERNPROC(rabp);
1793 			rabp->b_iooffset = dbtob(rabp->b_blkno);
1794 			bstrategy(rabp);
1795 		} else {
1796 			brelse(rabp);
1797 		}
1798 	}
1799 }
1800 
1801 /*
1802  * Entry point for bread() and breadn() via #defines in sys/buf.h.
1803  *
1804  * Get a buffer with the specified data.  Look in the cache first.  We
1805  * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
1806  * is set, the buffer is valid and we do not have to do anything, see
1807  * getblk(). Also starts asynchronous I/O on read-ahead blocks.
1808  *
1809  * Always return a NULL buffer pointer (in bpp) when returning an error.
1810  */
1811 int
1812 breadn_flags(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablkno,
1813     int *rabsize, int cnt, struct ucred *cred, int flags, struct buf **bpp)
1814 {
1815 	struct buf *bp;
1816 	int rv = 0, readwait = 0;
1817 
1818 	CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
1819 	/*
1820 	 * Can only return NULL if GB_LOCK_NOWAIT flag is specified.
1821 	 */
1822 	*bpp = bp = getblk(vp, blkno, size, 0, 0, flags);
1823 	if (bp == NULL)
1824 		return (EBUSY);
1825 
1826 	/* if not found in cache, do some I/O */
1827 	if ((bp->b_flags & B_CACHE) == 0) {
1828 		if (!TD_IS_IDLETHREAD(curthread)) {
1829 #ifdef RACCT
1830 			if (racct_enable) {
1831 				PROC_LOCK(curproc);
1832 				racct_add_buf(curproc, bp, 0);
1833 				PROC_UNLOCK(curproc);
1834 			}
1835 #endif /* RACCT */
1836 			curthread->td_ru.ru_inblock++;
1837 		}
1838 		bp->b_iocmd = BIO_READ;
1839 		bp->b_flags &= ~B_INVAL;
1840 		bp->b_ioflags &= ~BIO_ERROR;
1841 		if (bp->b_rcred == NOCRED && cred != NOCRED)
1842 			bp->b_rcred = crhold(cred);
1843 		vfs_busy_pages(bp, 0);
1844 		bp->b_iooffset = dbtob(bp->b_blkno);
1845 		bstrategy(bp);
1846 		++readwait;
1847 	}
1848 
1849 	breada(vp, rablkno, rabsize, cnt, cred);
1850 
1851 	if (readwait) {
1852 		rv = bufwait(bp);
1853 		if (rv != 0) {
1854 			brelse(bp);
1855 			*bpp = NULL;
1856 		}
1857 	}
1858 	return (rv);
1859 }
1860 
1861 /*
1862  * Write, release buffer on completion.  (Done by iodone
1863  * if async).  Do not bother writing anything if the buffer
1864  * is invalid.
1865  *
1866  * Note that we set B_CACHE here, indicating that buffer is
1867  * fully valid and thus cacheable.  This is true even of NFS
1868  * now so we set it generally.  This could be set either here
1869  * or in biodone() since the I/O is synchronous.  We put it
1870  * here.
1871  */
1872 int
1873 bufwrite(struct buf *bp)
1874 {
1875 	int oldflags;
1876 	struct vnode *vp;
1877 	long space;
1878 	int vp_md;
1879 
1880 	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1881 	if ((bp->b_bufobj->bo_flag & BO_DEAD) != 0) {
1882 		bp->b_flags |= B_INVAL | B_RELBUF;
1883 		bp->b_flags &= ~B_CACHE;
1884 		brelse(bp);
1885 		return (ENXIO);
1886 	}
1887 	if (bp->b_flags & B_INVAL) {
1888 		brelse(bp);
1889 		return (0);
1890 	}
1891 
1892 	if (bp->b_flags & B_BARRIER)
1893 		barrierwrites++;
1894 
1895 	oldflags = bp->b_flags;
1896 
1897 	BUF_ASSERT_HELD(bp);
1898 
1899 	KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
1900 	    ("FFS background buffer should not get here %p", bp));
1901 
1902 	vp = bp->b_vp;
1903 	if (vp)
1904 		vp_md = vp->v_vflag & VV_MD;
1905 	else
1906 		vp_md = 0;
1907 
1908 	/*
1909 	 * Mark the buffer clean.  Increment the bufobj write count
1910 	 * before bundirty() call, to prevent other thread from seeing
1911 	 * empty dirty list and zero counter for writes in progress,
1912 	 * falsely indicating that the bufobj is clean.
1913 	 */
1914 	bufobj_wref(bp->b_bufobj);
1915 	bundirty(bp);
1916 
1917 	bp->b_flags &= ~B_DONE;
1918 	bp->b_ioflags &= ~BIO_ERROR;
1919 	bp->b_flags |= B_CACHE;
1920 	bp->b_iocmd = BIO_WRITE;
1921 
1922 	vfs_busy_pages(bp, 1);
1923 
1924 	/*
1925 	 * Normal bwrites pipeline writes
1926 	 */
1927 	bp->b_runningbufspace = bp->b_bufsize;
1928 	space = atomic_fetchadd_long(&runningbufspace, bp->b_runningbufspace);
1929 
1930 	if (!TD_IS_IDLETHREAD(curthread)) {
1931 #ifdef RACCT
1932 		if (racct_enable) {
1933 			PROC_LOCK(curproc);
1934 			racct_add_buf(curproc, bp, 1);
1935 			PROC_UNLOCK(curproc);
1936 		}
1937 #endif /* RACCT */
1938 		curthread->td_ru.ru_oublock++;
1939 	}
1940 	if (oldflags & B_ASYNC)
1941 		BUF_KERNPROC(bp);
1942 	bp->b_iooffset = dbtob(bp->b_blkno);
1943 	buf_track(bp, __func__);
1944 	bstrategy(bp);
1945 
1946 	if ((oldflags & B_ASYNC) == 0) {
1947 		int rtval = bufwait(bp);
1948 		brelse(bp);
1949 		return (rtval);
1950 	} else if (space > hirunningspace) {
1951 		/*
1952 		 * don't allow the async write to saturate the I/O
1953 		 * system.  We will not deadlock here because
1954 		 * we are blocking waiting for I/O that is already in-progress
1955 		 * to complete. We do not block here if it is the update
1956 		 * or syncer daemon trying to clean up as that can lead
1957 		 * to deadlock.
1958 		 */
1959 		if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
1960 			waitrunningbufspace();
1961 	}
1962 
1963 	return (0);
1964 }
1965 
1966 void
1967 bufbdflush(struct bufobj *bo, struct buf *bp)
1968 {
1969 	struct buf *nbp;
1970 
1971 	if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) {
1972 		(void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
1973 		altbufferflushes++;
1974 	} else if (bo->bo_dirty.bv_cnt > dirtybufthresh) {
1975 		BO_LOCK(bo);
1976 		/*
1977 		 * Try to find a buffer to flush.
1978 		 */
1979 		TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
1980 			if ((nbp->b_vflags & BV_BKGRDINPROG) ||
1981 			    BUF_LOCK(nbp,
1982 				     LK_EXCLUSIVE | LK_NOWAIT, NULL))
1983 				continue;
1984 			if (bp == nbp)
1985 				panic("bdwrite: found ourselves");
1986 			BO_UNLOCK(bo);
1987 			/* Don't countdeps with the bo lock held. */
1988 			if (buf_countdeps(nbp, 0)) {
1989 				BO_LOCK(bo);
1990 				BUF_UNLOCK(nbp);
1991 				continue;
1992 			}
1993 			if (nbp->b_flags & B_CLUSTEROK) {
1994 				vfs_bio_awrite(nbp);
1995 			} else {
1996 				bremfree(nbp);
1997 				bawrite(nbp);
1998 			}
1999 			dirtybufferflushes++;
2000 			break;
2001 		}
2002 		if (nbp == NULL)
2003 			BO_UNLOCK(bo);
2004 	}
2005 }
2006 
2007 /*
2008  * Delayed write. (Buffer is marked dirty).  Do not bother writing
2009  * anything if the buffer is marked invalid.
2010  *
2011  * Note that since the buffer must be completely valid, we can safely
2012  * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
2013  * biodone() in order to prevent getblk from writing the buffer
2014  * out synchronously.
2015  */
2016 void
2017 bdwrite(struct buf *bp)
2018 {
2019 	struct thread *td = curthread;
2020 	struct vnode *vp;
2021 	struct bufobj *bo;
2022 
2023 	CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2024 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2025 	KASSERT((bp->b_flags & B_BARRIER) == 0,
2026 	    ("Barrier request in delayed write %p", bp));
2027 	BUF_ASSERT_HELD(bp);
2028 
2029 	if (bp->b_flags & B_INVAL) {
2030 		brelse(bp);
2031 		return;
2032 	}
2033 
2034 	/*
2035 	 * If we have too many dirty buffers, don't create any more.
2036 	 * If we are wildly over our limit, then force a complete
2037 	 * cleanup. Otherwise, just keep the situation from getting
2038 	 * out of control. Note that we have to avoid a recursive
2039 	 * disaster and not try to clean up after our own cleanup!
2040 	 */
2041 	vp = bp->b_vp;
2042 	bo = bp->b_bufobj;
2043 	if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
2044 		td->td_pflags |= TDP_INBDFLUSH;
2045 		BO_BDFLUSH(bo, bp);
2046 		td->td_pflags &= ~TDP_INBDFLUSH;
2047 	} else
2048 		recursiveflushes++;
2049 
2050 	bdirty(bp);
2051 	/*
2052 	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
2053 	 * true even of NFS now.
2054 	 */
2055 	bp->b_flags |= B_CACHE;
2056 
2057 	/*
2058 	 * This bmap keeps the system from needing to do the bmap later,
2059 	 * perhaps when the system is attempting to do a sync.  Since it
2060 	 * is likely that the indirect block -- or whatever other datastructure
2061 	 * that the filesystem needs is still in memory now, it is a good
2062 	 * thing to do this.  Note also, that if the pageout daemon is
2063 	 * requesting a sync -- there might not be enough memory to do
2064 	 * the bmap then...  So, this is important to do.
2065 	 */
2066 	if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
2067 		VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
2068 	}
2069 
2070 	buf_track(bp, __func__);
2071 
2072 	/*
2073 	 * Set the *dirty* buffer range based upon the VM system dirty
2074 	 * pages.
2075 	 *
2076 	 * Mark the buffer pages as clean.  We need to do this here to
2077 	 * satisfy the vnode_pager and the pageout daemon, so that it
2078 	 * thinks that the pages have been "cleaned".  Note that since
2079 	 * the pages are in a delayed write buffer -- the VFS layer
2080 	 * "will" see that the pages get written out on the next sync,
2081 	 * or perhaps the cluster will be completed.
2082 	 */
2083 	vfs_clean_pages_dirty_buf(bp);
2084 	bqrelse(bp);
2085 
2086 	/*
2087 	 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
2088 	 * due to the softdep code.
2089 	 */
2090 }
2091 
2092 /*
2093  *	bdirty:
2094  *
2095  *	Turn buffer into delayed write request.  We must clear BIO_READ and
2096  *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
2097  *	itself to properly update it in the dirty/clean lists.  We mark it
2098  *	B_DONE to ensure that any asynchronization of the buffer properly
2099  *	clears B_DONE ( else a panic will occur later ).
2100  *
2101  *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
2102  *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
2103  *	should only be called if the buffer is known-good.
2104  *
2105  *	Since the buffer is not on a queue, we do not update the numfreebuffers
2106  *	count.
2107  *
2108  *	The buffer must be on QUEUE_NONE.
2109  */
2110 void
2111 bdirty(struct buf *bp)
2112 {
2113 
2114 	CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
2115 	    bp, bp->b_vp, bp->b_flags);
2116 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2117 	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2118 	    ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
2119 	BUF_ASSERT_HELD(bp);
2120 	bp->b_flags &= ~(B_RELBUF);
2121 	bp->b_iocmd = BIO_WRITE;
2122 
2123 	if ((bp->b_flags & B_DELWRI) == 0) {
2124 		bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
2125 		reassignbuf(bp);
2126 		bdirtyadd();
2127 	}
2128 }
2129 
2130 /*
2131  *	bundirty:
2132  *
2133  *	Clear B_DELWRI for buffer.
2134  *
2135  *	Since the buffer is not on a queue, we do not update the numfreebuffers
2136  *	count.
2137  *
2138  *	The buffer must be on QUEUE_NONE.
2139  */
2140 
2141 void
2142 bundirty(struct buf *bp)
2143 {
2144 
2145 	CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2146 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2147 	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2148 	    ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
2149 	BUF_ASSERT_HELD(bp);
2150 
2151 	if (bp->b_flags & B_DELWRI) {
2152 		bp->b_flags &= ~B_DELWRI;
2153 		reassignbuf(bp);
2154 		bdirtysub();
2155 	}
2156 	/*
2157 	 * Since it is now being written, we can clear its deferred write flag.
2158 	 */
2159 	bp->b_flags &= ~B_DEFERRED;
2160 }
2161 
2162 /*
2163  *	bawrite:
2164  *
2165  *	Asynchronous write.  Start output on a buffer, but do not wait for
2166  *	it to complete.  The buffer is released when the output completes.
2167  *
2168  *	bwrite() ( or the VOP routine anyway ) is responsible for handling
2169  *	B_INVAL buffers.  Not us.
2170  */
2171 void
2172 bawrite(struct buf *bp)
2173 {
2174 
2175 	bp->b_flags |= B_ASYNC;
2176 	(void) bwrite(bp);
2177 }
2178 
2179 /*
2180  *	babarrierwrite:
2181  *
2182  *	Asynchronous barrier write.  Start output on a buffer, but do not
2183  *	wait for it to complete.  Place a write barrier after this write so
2184  *	that this buffer and all buffers written before it are committed to
2185  *	the disk before any buffers written after this write are committed
2186  *	to the disk.  The buffer is released when the output completes.
2187  */
2188 void
2189 babarrierwrite(struct buf *bp)
2190 {
2191 
2192 	bp->b_flags |= B_ASYNC | B_BARRIER;
2193 	(void) bwrite(bp);
2194 }
2195 
2196 /*
2197  *	bbarrierwrite:
2198  *
2199  *	Synchronous barrier write.  Start output on a buffer and wait for
2200  *	it to complete.  Place a write barrier after this write so that
2201  *	this buffer and all buffers written before it are committed to
2202  *	the disk before any buffers written after this write are committed
2203  *	to the disk.  The buffer is released when the output completes.
2204  */
2205 int
2206 bbarrierwrite(struct buf *bp)
2207 {
2208 
2209 	bp->b_flags |= B_BARRIER;
2210 	return (bwrite(bp));
2211 }
2212 
2213 /*
2214  *	bwillwrite:
2215  *
2216  *	Called prior to the locking of any vnodes when we are expecting to
2217  *	write.  We do not want to starve the buffer cache with too many
2218  *	dirty buffers so we block here.  By blocking prior to the locking
2219  *	of any vnodes we attempt to avoid the situation where a locked vnode
2220  *	prevents the various system daemons from flushing related buffers.
2221  */
2222 void
2223 bwillwrite(void)
2224 {
2225 
2226 	if (numdirtybuffers >= hidirtybuffers) {
2227 		mtx_lock(&bdirtylock);
2228 		while (numdirtybuffers >= hidirtybuffers) {
2229 			bdirtywait = 1;
2230 			msleep(&bdirtywait, &bdirtylock, (PRIBIO + 4),
2231 			    "flswai", 0);
2232 		}
2233 		mtx_unlock(&bdirtylock);
2234 	}
2235 }
2236 
2237 /*
2238  * Return true if we have too many dirty buffers.
2239  */
2240 int
2241 buf_dirty_count_severe(void)
2242 {
2243 
2244 	return(numdirtybuffers >= hidirtybuffers);
2245 }
2246 
2247 /*
2248  *	brelse:
2249  *
2250  *	Release a busy buffer and, if requested, free its resources.  The
2251  *	buffer will be stashed in the appropriate bufqueue[] allowing it
2252  *	to be accessed later as a cache entity or reused for other purposes.
2253  */
2254 void
2255 brelse(struct buf *bp)
2256 {
2257 	int qindex;
2258 
2259 	/*
2260 	 * Many functions erroneously call brelse with a NULL bp under rare
2261 	 * error conditions. Simply return when called with a NULL bp.
2262 	 */
2263 	if (bp == NULL)
2264 		return;
2265 	CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
2266 	    bp, bp->b_vp, bp->b_flags);
2267 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2268 	    ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2269 	KASSERT((bp->b_flags & B_VMIO) != 0 || (bp->b_flags & B_NOREUSE) == 0,
2270 	    ("brelse: non-VMIO buffer marked NOREUSE"));
2271 
2272 	if (BUF_LOCKRECURSED(bp)) {
2273 		/*
2274 		 * Do not process, in particular, do not handle the
2275 		 * B_INVAL/B_RELBUF and do not release to free list.
2276 		 */
2277 		BUF_UNLOCK(bp);
2278 		return;
2279 	}
2280 
2281 	if (bp->b_flags & B_MANAGED) {
2282 		bqrelse(bp);
2283 		return;
2284 	}
2285 
2286 	if ((bp->b_vflags & (BV_BKGRDINPROG | BV_BKGRDERR)) == BV_BKGRDERR) {
2287 		BO_LOCK(bp->b_bufobj);
2288 		bp->b_vflags &= ~BV_BKGRDERR;
2289 		BO_UNLOCK(bp->b_bufobj);
2290 		bdirty(bp);
2291 	}
2292 	if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
2293 	    (bp->b_error != ENXIO || !LIST_EMPTY(&bp->b_dep)) &&
2294 	    !(bp->b_flags & B_INVAL)) {
2295 		/*
2296 		 * Failed write, redirty.  All errors except ENXIO (which
2297 		 * means the device is gone) are expected to be potentially
2298 		 * transient - underlying media might work if tried again
2299 		 * after EIO, and memory might be available after an ENOMEM.
2300 		 *
2301 		 * Do this also for buffers that failed with ENXIO, but have
2302 		 * non-empty dependencies - the soft updates code might need
2303 		 * to access the buffer to untangle them.
2304 		 *
2305 		 * Must clear BIO_ERROR to prevent pages from being scrapped.
2306 		 */
2307 		bp->b_ioflags &= ~BIO_ERROR;
2308 		bdirty(bp);
2309 	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
2310 	    (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
2311 		/*
2312 		 * Either a failed read I/O, or we were asked to free or not
2313 		 * cache the buffer, or we failed to write to a device that's
2314 		 * no longer present.
2315 		 */
2316 		bp->b_flags |= B_INVAL;
2317 		if (!LIST_EMPTY(&bp->b_dep))
2318 			buf_deallocate(bp);
2319 		if (bp->b_flags & B_DELWRI)
2320 			bdirtysub();
2321 		bp->b_flags &= ~(B_DELWRI | B_CACHE);
2322 		if ((bp->b_flags & B_VMIO) == 0) {
2323 			allocbuf(bp, 0);
2324 			if (bp->b_vp)
2325 				brelvp(bp);
2326 		}
2327 	}
2328 
2329 	/*
2330 	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_truncate()
2331 	 * is called with B_DELWRI set, the underlying pages may wind up
2332 	 * getting freed causing a previous write (bdwrite()) to get 'lost'
2333 	 * because pages associated with a B_DELWRI bp are marked clean.
2334 	 *
2335 	 * We still allow the B_INVAL case to call vfs_vmio_truncate(), even
2336 	 * if B_DELWRI is set.
2337 	 */
2338 	if (bp->b_flags & B_DELWRI)
2339 		bp->b_flags &= ~B_RELBUF;
2340 
2341 	/*
2342 	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
2343 	 * constituted, not even NFS buffers now.  Two flags effect this.  If
2344 	 * B_INVAL, the struct buf is invalidated but the VM object is kept
2345 	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
2346 	 *
2347 	 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
2348 	 * invalidated.  BIO_ERROR cannot be set for a failed write unless the
2349 	 * buffer is also B_INVAL because it hits the re-dirtying code above.
2350 	 *
2351 	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
2352 	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
2353 	 * the commit state and we cannot afford to lose the buffer. If the
2354 	 * buffer has a background write in progress, we need to keep it
2355 	 * around to prevent it from being reconstituted and starting a second
2356 	 * background write.
2357 	 */
2358 	if ((bp->b_flags & B_VMIO) && (bp->b_flags & B_NOCACHE ||
2359 	    (bp->b_ioflags & BIO_ERROR && bp->b_iocmd == BIO_READ)) &&
2360 	    !(bp->b_vp->v_mount != NULL &&
2361 	    (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
2362 	    !vn_isdisk(bp->b_vp, NULL) && (bp->b_flags & B_DELWRI))) {
2363 		vfs_vmio_invalidate(bp);
2364 		allocbuf(bp, 0);
2365 	}
2366 
2367 	if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0 ||
2368 	    (bp->b_flags & (B_DELWRI | B_NOREUSE)) == B_NOREUSE) {
2369 		allocbuf(bp, 0);
2370 		bp->b_flags &= ~B_NOREUSE;
2371 		if (bp->b_vp != NULL)
2372 			brelvp(bp);
2373 	}
2374 
2375 	/*
2376 	 * If the buffer has junk contents signal it and eventually
2377 	 * clean up B_DELWRI and diassociate the vnode so that gbincore()
2378 	 * doesn't find it.
2379 	 */
2380 	if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
2381 	    (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
2382 		bp->b_flags |= B_INVAL;
2383 	if (bp->b_flags & B_INVAL) {
2384 		if (bp->b_flags & B_DELWRI)
2385 			bundirty(bp);
2386 		if (bp->b_vp)
2387 			brelvp(bp);
2388 	}
2389 
2390 	buf_track(bp, __func__);
2391 
2392 	/* buffers with no memory */
2393 	if (bp->b_bufsize == 0) {
2394 		buf_free(bp);
2395 		return;
2396 	}
2397 	/* buffers with junk contents */
2398 	if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
2399 	    (bp->b_ioflags & BIO_ERROR)) {
2400 		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
2401 		if (bp->b_vflags & BV_BKGRDINPROG)
2402 			panic("losing buffer 2");
2403 		qindex = QUEUE_CLEAN;
2404 		bp->b_flags |= B_AGE;
2405 	/* remaining buffers */
2406 	} else if (bp->b_flags & B_DELWRI)
2407 		qindex = QUEUE_DIRTY;
2408 	else
2409 		qindex = QUEUE_CLEAN;
2410 
2411 	binsfree(bp, qindex);
2412 
2413 	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT);
2414 	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
2415 		panic("brelse: not dirty");
2416 	/* unlock */
2417 	BUF_UNLOCK(bp);
2418 	if (qindex == QUEUE_CLEAN)
2419 		bufspace_wakeup();
2420 }
2421 
2422 /*
2423  * Release a buffer back to the appropriate queue but do not try to free
2424  * it.  The buffer is expected to be used again soon.
2425  *
2426  * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
2427  * biodone() to requeue an async I/O on completion.  It is also used when
2428  * known good buffers need to be requeued but we think we may need the data
2429  * again soon.
2430  *
2431  * XXX we should be able to leave the B_RELBUF hint set on completion.
2432  */
2433 void
2434 bqrelse(struct buf *bp)
2435 {
2436 	int qindex;
2437 
2438 	CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2439 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2440 	    ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2441 
2442 	qindex = QUEUE_NONE;
2443 	if (BUF_LOCKRECURSED(bp)) {
2444 		/* do not release to free list */
2445 		BUF_UNLOCK(bp);
2446 		return;
2447 	}
2448 	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
2449 
2450 	if (bp->b_flags & B_MANAGED) {
2451 		if (bp->b_flags & B_REMFREE)
2452 			bremfreef(bp);
2453 		goto out;
2454 	}
2455 
2456 	/* buffers with stale but valid contents */
2457 	if ((bp->b_flags & B_DELWRI) != 0 || (bp->b_vflags & (BV_BKGRDINPROG |
2458 	    BV_BKGRDERR)) == BV_BKGRDERR) {
2459 		BO_LOCK(bp->b_bufobj);
2460 		bp->b_vflags &= ~BV_BKGRDERR;
2461 		BO_UNLOCK(bp->b_bufobj);
2462 		qindex = QUEUE_DIRTY;
2463 	} else {
2464 		if ((bp->b_flags & B_DELWRI) == 0 &&
2465 		    (bp->b_xflags & BX_VNDIRTY))
2466 			panic("bqrelse: not dirty");
2467 		if ((bp->b_flags & B_NOREUSE) != 0) {
2468 			brelse(bp);
2469 			return;
2470 		}
2471 		qindex = QUEUE_CLEAN;
2472 	}
2473 	binsfree(bp, qindex);
2474 
2475 out:
2476 	buf_track(bp, __func__);
2477 	/* unlock */
2478 	BUF_UNLOCK(bp);
2479 	if (qindex == QUEUE_CLEAN)
2480 		bufspace_wakeup();
2481 }
2482 
2483 /*
2484  * Complete I/O to a VMIO backed page.  Validate the pages as appropriate,
2485  * restore bogus pages.
2486  */
2487 static void
2488 vfs_vmio_iodone(struct buf *bp)
2489 {
2490 	vm_ooffset_t foff;
2491 	vm_page_t m;
2492 	vm_object_t obj;
2493 	struct vnode *vp;
2494 	int i, iosize, resid;
2495 	bool bogus;
2496 
2497 	obj = bp->b_bufobj->bo_object;
2498 	KASSERT(obj->paging_in_progress >= bp->b_npages,
2499 	    ("vfs_vmio_iodone: paging in progress(%d) < b_npages(%d)",
2500 	    obj->paging_in_progress, bp->b_npages));
2501 
2502 	vp = bp->b_vp;
2503 	KASSERT(vp->v_holdcnt > 0,
2504 	    ("vfs_vmio_iodone: vnode %p has zero hold count", vp));
2505 	KASSERT(vp->v_object != NULL,
2506 	    ("vfs_vmio_iodone: vnode %p has no vm_object", vp));
2507 
2508 	foff = bp->b_offset;
2509 	KASSERT(bp->b_offset != NOOFFSET,
2510 	    ("vfs_vmio_iodone: bp %p has no buffer offset", bp));
2511 
2512 	bogus = false;
2513 	iosize = bp->b_bcount - bp->b_resid;
2514 	VM_OBJECT_WLOCK(obj);
2515 	for (i = 0; i < bp->b_npages; i++) {
2516 		resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
2517 		if (resid > iosize)
2518 			resid = iosize;
2519 
2520 		/*
2521 		 * cleanup bogus pages, restoring the originals
2522 		 */
2523 		m = bp->b_pages[i];
2524 		if (m == bogus_page) {
2525 			bogus = true;
2526 			m = vm_page_lookup(obj, OFF_TO_IDX(foff));
2527 			if (m == NULL)
2528 				panic("biodone: page disappeared!");
2529 			bp->b_pages[i] = m;
2530 		} else if ((bp->b_iocmd == BIO_READ) && resid > 0) {
2531 			/*
2532 			 * In the write case, the valid and clean bits are
2533 			 * already changed correctly ( see bdwrite() ), so we
2534 			 * only need to do this here in the read case.
2535 			 */
2536 			KASSERT((m->dirty & vm_page_bits(foff & PAGE_MASK,
2537 			    resid)) == 0, ("vfs_vmio_iodone: page %p "
2538 			    "has unexpected dirty bits", m));
2539 			vfs_page_set_valid(bp, foff, m);
2540 		}
2541 		KASSERT(OFF_TO_IDX(foff) == m->pindex,
2542 		    ("vfs_vmio_iodone: foff(%jd)/pindex(%ju) mismatch",
2543 		    (intmax_t)foff, (uintmax_t)m->pindex));
2544 
2545 		vm_page_sunbusy(m);
2546 		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
2547 		iosize -= resid;
2548 	}
2549 	vm_object_pip_wakeupn(obj, bp->b_npages);
2550 	VM_OBJECT_WUNLOCK(obj);
2551 	if (bogus && buf_mapped(bp)) {
2552 		BUF_CHECK_MAPPED(bp);
2553 		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
2554 		    bp->b_pages, bp->b_npages);
2555 	}
2556 }
2557 
2558 /*
2559  * Unwire a page held by a buf and place it on the appropriate vm queue.
2560  */
2561 static void
2562 vfs_vmio_unwire(struct buf *bp, vm_page_t m)
2563 {
2564 	bool freed;
2565 
2566 	vm_page_lock(m);
2567 	if (vm_page_unwire(m, PQ_NONE)) {
2568 		/*
2569 		 * Determine if the page should be freed before adding
2570 		 * it to the inactive queue.
2571 		 */
2572 		if (m->valid == 0) {
2573 			freed = !vm_page_busied(m);
2574 			if (freed)
2575 				vm_page_free(m);
2576 		} else if ((bp->b_flags & B_DIRECT) != 0)
2577 			freed = vm_page_try_to_free(m);
2578 		else
2579 			freed = false;
2580 		if (!freed) {
2581 			/*
2582 			 * If the page is unlikely to be reused, let the
2583 			 * VM know.  Otherwise, maintain LRU page
2584 			 * ordering and put the page at the tail of the
2585 			 * inactive queue.
2586 			 */
2587 			if ((bp->b_flags & B_NOREUSE) != 0)
2588 				vm_page_deactivate_noreuse(m);
2589 			else
2590 				vm_page_deactivate(m);
2591 		}
2592 	}
2593 	vm_page_unlock(m);
2594 }
2595 
2596 /*
2597  * Perform page invalidation when a buffer is released.  The fully invalid
2598  * pages will be reclaimed later in vfs_vmio_truncate().
2599  */
2600 static void
2601 vfs_vmio_invalidate(struct buf *bp)
2602 {
2603 	vm_object_t obj;
2604 	vm_page_t m;
2605 	int i, resid, poffset, presid;
2606 
2607 	if (buf_mapped(bp)) {
2608 		BUF_CHECK_MAPPED(bp);
2609 		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
2610 	} else
2611 		BUF_CHECK_UNMAPPED(bp);
2612 	/*
2613 	 * Get the base offset and length of the buffer.  Note that
2614 	 * in the VMIO case if the buffer block size is not
2615 	 * page-aligned then b_data pointer may not be page-aligned.
2616 	 * But our b_pages[] array *IS* page aligned.
2617 	 *
2618 	 * block sizes less then DEV_BSIZE (usually 512) are not
2619 	 * supported due to the page granularity bits (m->valid,
2620 	 * m->dirty, etc...).
2621 	 *
2622 	 * See man buf(9) for more information
2623 	 */
2624 	obj = bp->b_bufobj->bo_object;
2625 	resid = bp->b_bufsize;
2626 	poffset = bp->b_offset & PAGE_MASK;
2627 	VM_OBJECT_WLOCK(obj);
2628 	for (i = 0; i < bp->b_npages; i++) {
2629 		m = bp->b_pages[i];
2630 		if (m == bogus_page)
2631 			panic("vfs_vmio_invalidate: Unexpected bogus page.");
2632 		bp->b_pages[i] = NULL;
2633 
2634 		presid = resid > (PAGE_SIZE - poffset) ?
2635 		    (PAGE_SIZE - poffset) : resid;
2636 		KASSERT(presid >= 0, ("brelse: extra page"));
2637 		while (vm_page_xbusied(m)) {
2638 			vm_page_lock(m);
2639 			VM_OBJECT_WUNLOCK(obj);
2640 			vm_page_busy_sleep(m, "mbncsh", true);
2641 			VM_OBJECT_WLOCK(obj);
2642 		}
2643 		if (pmap_page_wired_mappings(m) == 0)
2644 			vm_page_set_invalid(m, poffset, presid);
2645 		vfs_vmio_unwire(bp, m);
2646 		resid -= presid;
2647 		poffset = 0;
2648 	}
2649 	VM_OBJECT_WUNLOCK(obj);
2650 	bp->b_npages = 0;
2651 }
2652 
2653 /*
2654  * Page-granular truncation of an existing VMIO buffer.
2655  */
2656 static void
2657 vfs_vmio_truncate(struct buf *bp, int desiredpages)
2658 {
2659 	vm_object_t obj;
2660 	vm_page_t m;
2661 	int i;
2662 
2663 	if (bp->b_npages == desiredpages)
2664 		return;
2665 
2666 	if (buf_mapped(bp)) {
2667 		BUF_CHECK_MAPPED(bp);
2668 		pmap_qremove((vm_offset_t)trunc_page((vm_offset_t)bp->b_data) +
2669 		    (desiredpages << PAGE_SHIFT), bp->b_npages - desiredpages);
2670 	} else
2671 		BUF_CHECK_UNMAPPED(bp);
2672 	obj = bp->b_bufobj->bo_object;
2673 	if (obj != NULL)
2674 		VM_OBJECT_WLOCK(obj);
2675 	for (i = desiredpages; i < bp->b_npages; i++) {
2676 		m = bp->b_pages[i];
2677 		KASSERT(m != bogus_page, ("allocbuf: bogus page found"));
2678 		bp->b_pages[i] = NULL;
2679 		vfs_vmio_unwire(bp, m);
2680 	}
2681 	if (obj != NULL)
2682 		VM_OBJECT_WUNLOCK(obj);
2683 	bp->b_npages = desiredpages;
2684 }
2685 
2686 /*
2687  * Byte granular extension of VMIO buffers.
2688  */
2689 static void
2690 vfs_vmio_extend(struct buf *bp, int desiredpages, int size)
2691 {
2692 	/*
2693 	 * We are growing the buffer, possibly in a
2694 	 * byte-granular fashion.
2695 	 */
2696 	vm_object_t obj;
2697 	vm_offset_t toff;
2698 	vm_offset_t tinc;
2699 	vm_page_t m;
2700 
2701 	/*
2702 	 * Step 1, bring in the VM pages from the object, allocating
2703 	 * them if necessary.  We must clear B_CACHE if these pages
2704 	 * are not valid for the range covered by the buffer.
2705 	 */
2706 	obj = bp->b_bufobj->bo_object;
2707 	VM_OBJECT_WLOCK(obj);
2708 	while (bp->b_npages < desiredpages) {
2709 		/*
2710 		 * We must allocate system pages since blocking
2711 		 * here could interfere with paging I/O, no
2712 		 * matter which process we are.
2713 		 *
2714 		 * Only exclusive busy can be tested here.
2715 		 * Blocking on shared busy might lead to
2716 		 * deadlocks once allocbuf() is called after
2717 		 * pages are vfs_busy_pages().
2718 		 */
2719 		m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) + bp->b_npages,
2720 		    VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM |
2721 		    VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY |
2722 		    VM_ALLOC_COUNT(desiredpages - bp->b_npages));
2723 		if (m->valid == 0)
2724 			bp->b_flags &= ~B_CACHE;
2725 		bp->b_pages[bp->b_npages] = m;
2726 		++bp->b_npages;
2727 	}
2728 
2729 	/*
2730 	 * Step 2.  We've loaded the pages into the buffer,
2731 	 * we have to figure out if we can still have B_CACHE
2732 	 * set.  Note that B_CACHE is set according to the
2733 	 * byte-granular range ( bcount and size ), not the
2734 	 * aligned range ( newbsize ).
2735 	 *
2736 	 * The VM test is against m->valid, which is DEV_BSIZE
2737 	 * aligned.  Needless to say, the validity of the data
2738 	 * needs to also be DEV_BSIZE aligned.  Note that this
2739 	 * fails with NFS if the server or some other client
2740 	 * extends the file's EOF.  If our buffer is resized,
2741 	 * B_CACHE may remain set! XXX
2742 	 */
2743 	toff = bp->b_bcount;
2744 	tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
2745 	while ((bp->b_flags & B_CACHE) && toff < size) {
2746 		vm_pindex_t pi;
2747 
2748 		if (tinc > (size - toff))
2749 			tinc = size - toff;
2750 		pi = ((bp->b_offset & PAGE_MASK) + toff) >> PAGE_SHIFT;
2751 		m = bp->b_pages[pi];
2752 		vfs_buf_test_cache(bp, bp->b_offset, toff, tinc, m);
2753 		toff += tinc;
2754 		tinc = PAGE_SIZE;
2755 	}
2756 	VM_OBJECT_WUNLOCK(obj);
2757 
2758 	/*
2759 	 * Step 3, fixup the KVA pmap.
2760 	 */
2761 	if (buf_mapped(bp))
2762 		bpmap_qenter(bp);
2763 	else
2764 		BUF_CHECK_UNMAPPED(bp);
2765 }
2766 
2767 /*
2768  * Check to see if a block at a particular lbn is available for a clustered
2769  * write.
2770  */
2771 static int
2772 vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
2773 {
2774 	struct buf *bpa;
2775 	int match;
2776 
2777 	match = 0;
2778 
2779 	/* If the buf isn't in core skip it */
2780 	if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
2781 		return (0);
2782 
2783 	/* If the buf is busy we don't want to wait for it */
2784 	if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
2785 		return (0);
2786 
2787 	/* Only cluster with valid clusterable delayed write buffers */
2788 	if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
2789 	    (B_DELWRI | B_CLUSTEROK))
2790 		goto done;
2791 
2792 	if (bpa->b_bufsize != size)
2793 		goto done;
2794 
2795 	/*
2796 	 * Check to see if it is in the expected place on disk and that the
2797 	 * block has been mapped.
2798 	 */
2799 	if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
2800 		match = 1;
2801 done:
2802 	BUF_UNLOCK(bpa);
2803 	return (match);
2804 }
2805 
2806 /*
2807  *	vfs_bio_awrite:
2808  *
2809  *	Implement clustered async writes for clearing out B_DELWRI buffers.
2810  *	This is much better then the old way of writing only one buffer at
2811  *	a time.  Note that we may not be presented with the buffers in the
2812  *	correct order, so we search for the cluster in both directions.
2813  */
2814 int
2815 vfs_bio_awrite(struct buf *bp)
2816 {
2817 	struct bufobj *bo;
2818 	int i;
2819 	int j;
2820 	daddr_t lblkno = bp->b_lblkno;
2821 	struct vnode *vp = bp->b_vp;
2822 	int ncl;
2823 	int nwritten;
2824 	int size;
2825 	int maxcl;
2826 	int gbflags;
2827 
2828 	bo = &vp->v_bufobj;
2829 	gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0;
2830 	/*
2831 	 * right now we support clustered writing only to regular files.  If
2832 	 * we find a clusterable block we could be in the middle of a cluster
2833 	 * rather then at the beginning.
2834 	 */
2835 	if ((vp->v_type == VREG) &&
2836 	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
2837 	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
2838 
2839 		size = vp->v_mount->mnt_stat.f_iosize;
2840 		maxcl = MAXPHYS / size;
2841 
2842 		BO_RLOCK(bo);
2843 		for (i = 1; i < maxcl; i++)
2844 			if (vfs_bio_clcheck(vp, size, lblkno + i,
2845 			    bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
2846 				break;
2847 
2848 		for (j = 1; i + j <= maxcl && j <= lblkno; j++)
2849 			if (vfs_bio_clcheck(vp, size, lblkno - j,
2850 			    bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
2851 				break;
2852 		BO_RUNLOCK(bo);
2853 		--j;
2854 		ncl = i + j;
2855 		/*
2856 		 * this is a possible cluster write
2857 		 */
2858 		if (ncl != 1) {
2859 			BUF_UNLOCK(bp);
2860 			nwritten = cluster_wbuild(vp, size, lblkno - j, ncl,
2861 			    gbflags);
2862 			return (nwritten);
2863 		}
2864 	}
2865 	bremfree(bp);
2866 	bp->b_flags |= B_ASYNC;
2867 	/*
2868 	 * default (old) behavior, writing out only one block
2869 	 *
2870 	 * XXX returns b_bufsize instead of b_bcount for nwritten?
2871 	 */
2872 	nwritten = bp->b_bufsize;
2873 	(void) bwrite(bp);
2874 
2875 	return (nwritten);
2876 }
2877 
2878 /*
2879  *	getnewbuf_kva:
2880  *
2881  *	Allocate KVA for an empty buf header according to gbflags.
2882  */
2883 static int
2884 getnewbuf_kva(struct buf *bp, int gbflags, int maxsize)
2885 {
2886 
2887 	if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_UNMAPPED) {
2888 		/*
2889 		 * In order to keep fragmentation sane we only allocate kva
2890 		 * in BKVASIZE chunks.  XXX with vmem we can do page size.
2891 		 */
2892 		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
2893 
2894 		if (maxsize != bp->b_kvasize &&
2895 		    bufkva_alloc(bp, maxsize, gbflags))
2896 			return (ENOSPC);
2897 	}
2898 	return (0);
2899 }
2900 
2901 /*
2902  *	getnewbuf:
2903  *
2904  *	Find and initialize a new buffer header, freeing up existing buffers
2905  *	in the bufqueues as necessary.  The new buffer is returned locked.
2906  *
2907  *	We block if:
2908  *		We have insufficient buffer headers
2909  *		We have insufficient buffer space
2910  *		buffer_arena is too fragmented ( space reservation fails )
2911  *		If we have to flush dirty buffers ( but we try to avoid this )
2912  *
2913  *	The caller is responsible for releasing the reserved bufspace after
2914  *	allocbuf() is called.
2915  */
2916 static struct buf *
2917 getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int maxsize, int gbflags)
2918 {
2919 	struct buf *bp;
2920 	bool metadata, reserved;
2921 
2922 	bp = NULL;
2923 	KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
2924 	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
2925 	if (!unmapped_buf_allowed)
2926 		gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC);
2927 
2928 	if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 ||
2929 	    vp->v_type == VCHR)
2930 		metadata = true;
2931 	else
2932 		metadata = false;
2933 	atomic_add_int(&getnewbufcalls, 1);
2934 	reserved = false;
2935 	do {
2936 		if (reserved == false &&
2937 		    bufspace_reserve(maxsize, metadata) != 0)
2938 			continue;
2939 		reserved = true;
2940 		if ((bp = buf_alloc()) == NULL)
2941 			continue;
2942 		if (getnewbuf_kva(bp, gbflags, maxsize) == 0)
2943 			return (bp);
2944 		break;
2945 	} while(buf_scan(false) == 0);
2946 
2947 	if (reserved)
2948 		atomic_subtract_long(&bufspace, maxsize);
2949 	if (bp != NULL) {
2950 		bp->b_flags |= B_INVAL;
2951 		brelse(bp);
2952 	}
2953 	bufspace_wait(vp, gbflags, slpflag, slptimeo);
2954 
2955 	return (NULL);
2956 }
2957 
2958 /*
2959  *	buf_daemon:
2960  *
2961  *	buffer flushing daemon.  Buffers are normally flushed by the
2962  *	update daemon but if it cannot keep up this process starts to
2963  *	take the load in an attempt to prevent getnewbuf() from blocking.
2964  */
2965 static struct kproc_desc buf_kp = {
2966 	"bufdaemon",
2967 	buf_daemon,
2968 	&bufdaemonproc
2969 };
2970 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp);
2971 
2972 static int
2973 buf_flush(struct vnode *vp, int target)
2974 {
2975 	int flushed;
2976 
2977 	flushed = flushbufqueues(vp, target, 0);
2978 	if (flushed == 0) {
2979 		/*
2980 		 * Could not find any buffers without rollback
2981 		 * dependencies, so just write the first one
2982 		 * in the hopes of eventually making progress.
2983 		 */
2984 		if (vp != NULL && target > 2)
2985 			target /= 2;
2986 		flushbufqueues(vp, target, 1);
2987 	}
2988 	return (flushed);
2989 }
2990 
2991 static void
2992 buf_daemon()
2993 {
2994 	int lodirty;
2995 
2996 	/*
2997 	 * This process needs to be suspended prior to shutdown sync.
2998 	 */
2999 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
3000 	    SHUTDOWN_PRI_LAST);
3001 
3002 	/*
3003 	 * This process is allowed to take the buffer cache to the limit
3004 	 */
3005 	curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED;
3006 	mtx_lock(&bdlock);
3007 	for (;;) {
3008 		bd_request = 0;
3009 		mtx_unlock(&bdlock);
3010 
3011 		kproc_suspend_check(bufdaemonproc);
3012 		lodirty = lodirtybuffers;
3013 		if (bd_speedupreq) {
3014 			lodirty = numdirtybuffers / 2;
3015 			bd_speedupreq = 0;
3016 		}
3017 		/*
3018 		 * Do the flush.  Limit the amount of in-transit I/O we
3019 		 * allow to build up, otherwise we would completely saturate
3020 		 * the I/O system.
3021 		 */
3022 		while (numdirtybuffers > lodirty) {
3023 			if (buf_flush(NULL, numdirtybuffers - lodirty) == 0)
3024 				break;
3025 			kern_yield(PRI_USER);
3026 		}
3027 
3028 		/*
3029 		 * Only clear bd_request if we have reached our low water
3030 		 * mark.  The buf_daemon normally waits 1 second and
3031 		 * then incrementally flushes any dirty buffers that have
3032 		 * built up, within reason.
3033 		 *
3034 		 * If we were unable to hit our low water mark and couldn't
3035 		 * find any flushable buffers, we sleep for a short period
3036 		 * to avoid endless loops on unlockable buffers.
3037 		 */
3038 		mtx_lock(&bdlock);
3039 		if (numdirtybuffers <= lodirtybuffers) {
3040 			/*
3041 			 * We reached our low water mark, reset the
3042 			 * request and sleep until we are needed again.
3043 			 * The sleep is just so the suspend code works.
3044 			 */
3045 			bd_request = 0;
3046 			/*
3047 			 * Do an extra wakeup in case dirty threshold
3048 			 * changed via sysctl and the explicit transition
3049 			 * out of shortfall was missed.
3050 			 */
3051 			bdirtywakeup();
3052 			if (runningbufspace <= lorunningspace)
3053 				runningwakeup();
3054 			msleep(&bd_request, &bdlock, PVM, "psleep", hz);
3055 		} else {
3056 			/*
3057 			 * We couldn't find any flushable dirty buffers but
3058 			 * still have too many dirty buffers, we
3059 			 * have to sleep and try again.  (rare)
3060 			 */
3061 			msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
3062 		}
3063 	}
3064 }
3065 
3066 /*
3067  *	flushbufqueues:
3068  *
3069  *	Try to flush a buffer in the dirty queue.  We must be careful to
3070  *	free up B_INVAL buffers instead of write them, which NFS is
3071  *	particularly sensitive to.
3072  */
3073 static int flushwithdeps = 0;
3074 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
3075     0, "Number of buffers flushed with dependecies that require rollbacks");
3076 
3077 static int
3078 flushbufqueues(struct vnode *lvp, int target, int flushdeps)
3079 {
3080 	struct buf *sentinel;
3081 	struct vnode *vp;
3082 	struct mount *mp;
3083 	struct buf *bp;
3084 	int hasdeps;
3085 	int flushed;
3086 	int queue;
3087 	int error;
3088 	bool unlock;
3089 
3090 	flushed = 0;
3091 	queue = QUEUE_DIRTY;
3092 	bp = NULL;
3093 	sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO);
3094 	sentinel->b_qindex = QUEUE_SENTINEL;
3095 	mtx_lock(&bqlocks[queue]);
3096 	TAILQ_INSERT_HEAD(&bufqueues[queue], sentinel, b_freelist);
3097 	mtx_unlock(&bqlocks[queue]);
3098 	while (flushed != target) {
3099 		maybe_yield();
3100 		mtx_lock(&bqlocks[queue]);
3101 		bp = TAILQ_NEXT(sentinel, b_freelist);
3102 		if (bp != NULL) {
3103 			TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
3104 			TAILQ_INSERT_AFTER(&bufqueues[queue], bp, sentinel,
3105 			    b_freelist);
3106 		} else {
3107 			mtx_unlock(&bqlocks[queue]);
3108 			break;
3109 		}
3110 		/*
3111 		 * Skip sentinels inserted by other invocations of the
3112 		 * flushbufqueues(), taking care to not reorder them.
3113 		 *
3114 		 * Only flush the buffers that belong to the
3115 		 * vnode locked by the curthread.
3116 		 */
3117 		if (bp->b_qindex == QUEUE_SENTINEL || (lvp != NULL &&
3118 		    bp->b_vp != lvp)) {
3119 			mtx_unlock(&bqlocks[queue]);
3120 			continue;
3121 		}
3122 		error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL);
3123 		mtx_unlock(&bqlocks[queue]);
3124 		if (error != 0)
3125 			continue;
3126 
3127 		/*
3128 		 * BKGRDINPROG can only be set with the buf and bufobj
3129 		 * locks both held.  We tolerate a race to clear it here.
3130 		 */
3131 		if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
3132 		    (bp->b_flags & B_DELWRI) == 0) {
3133 			BUF_UNLOCK(bp);
3134 			continue;
3135 		}
3136 		if (bp->b_flags & B_INVAL) {
3137 			bremfreef(bp);
3138 			brelse(bp);
3139 			flushed++;
3140 			continue;
3141 		}
3142 
3143 		if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
3144 			if (flushdeps == 0) {
3145 				BUF_UNLOCK(bp);
3146 				continue;
3147 			}
3148 			hasdeps = 1;
3149 		} else
3150 			hasdeps = 0;
3151 		/*
3152 		 * We must hold the lock on a vnode before writing
3153 		 * one of its buffers. Otherwise we may confuse, or
3154 		 * in the case of a snapshot vnode, deadlock the
3155 		 * system.
3156 		 *
3157 		 * The lock order here is the reverse of the normal
3158 		 * of vnode followed by buf lock.  This is ok because
3159 		 * the NOWAIT will prevent deadlock.
3160 		 */
3161 		vp = bp->b_vp;
3162 		if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
3163 			BUF_UNLOCK(bp);
3164 			continue;
3165 		}
3166 		if (lvp == NULL) {
3167 			unlock = true;
3168 			error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
3169 		} else {
3170 			ASSERT_VOP_LOCKED(vp, "getbuf");
3171 			unlock = false;
3172 			error = VOP_ISLOCKED(vp) == LK_EXCLUSIVE ? 0 :
3173 			    vn_lock(vp, LK_TRYUPGRADE);
3174 		}
3175 		if (error == 0) {
3176 			CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
3177 			    bp, bp->b_vp, bp->b_flags);
3178 			if (curproc == bufdaemonproc) {
3179 				vfs_bio_awrite(bp);
3180 			} else {
3181 				bremfree(bp);
3182 				bwrite(bp);
3183 				notbufdflushes++;
3184 			}
3185 			vn_finished_write(mp);
3186 			if (unlock)
3187 				VOP_UNLOCK(vp, 0);
3188 			flushwithdeps += hasdeps;
3189 			flushed++;
3190 
3191 			/*
3192 			 * Sleeping on runningbufspace while holding
3193 			 * vnode lock leads to deadlock.
3194 			 */
3195 			if (curproc == bufdaemonproc &&
3196 			    runningbufspace > hirunningspace)
3197 				waitrunningbufspace();
3198 			continue;
3199 		}
3200 		vn_finished_write(mp);
3201 		BUF_UNLOCK(bp);
3202 	}
3203 	mtx_lock(&bqlocks[queue]);
3204 	TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
3205 	mtx_unlock(&bqlocks[queue]);
3206 	free(sentinel, M_TEMP);
3207 	return (flushed);
3208 }
3209 
3210 /*
3211  * Check to see if a block is currently memory resident.
3212  */
3213 struct buf *
3214 incore(struct bufobj *bo, daddr_t blkno)
3215 {
3216 	struct buf *bp;
3217 
3218 	BO_RLOCK(bo);
3219 	bp = gbincore(bo, blkno);
3220 	BO_RUNLOCK(bo);
3221 	return (bp);
3222 }
3223 
3224 /*
3225  * Returns true if no I/O is needed to access the
3226  * associated VM object.  This is like incore except
3227  * it also hunts around in the VM system for the data.
3228  */
3229 
3230 static int
3231 inmem(struct vnode * vp, daddr_t blkno)
3232 {
3233 	vm_object_t obj;
3234 	vm_offset_t toff, tinc, size;
3235 	vm_page_t m;
3236 	vm_ooffset_t off;
3237 
3238 	ASSERT_VOP_LOCKED(vp, "inmem");
3239 
3240 	if (incore(&vp->v_bufobj, blkno))
3241 		return 1;
3242 	if (vp->v_mount == NULL)
3243 		return 0;
3244 	obj = vp->v_object;
3245 	if (obj == NULL)
3246 		return (0);
3247 
3248 	size = PAGE_SIZE;
3249 	if (size > vp->v_mount->mnt_stat.f_iosize)
3250 		size = vp->v_mount->mnt_stat.f_iosize;
3251 	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
3252 
3253 	VM_OBJECT_RLOCK(obj);
3254 	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
3255 		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
3256 		if (!m)
3257 			goto notinmem;
3258 		tinc = size;
3259 		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
3260 			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
3261 		if (vm_page_is_valid(m,
3262 		    (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
3263 			goto notinmem;
3264 	}
3265 	VM_OBJECT_RUNLOCK(obj);
3266 	return 1;
3267 
3268 notinmem:
3269 	VM_OBJECT_RUNLOCK(obj);
3270 	return (0);
3271 }
3272 
3273 /*
3274  * Set the dirty range for a buffer based on the status of the dirty
3275  * bits in the pages comprising the buffer.  The range is limited
3276  * to the size of the buffer.
3277  *
3278  * Tell the VM system that the pages associated with this buffer
3279  * are clean.  This is used for delayed writes where the data is
3280  * going to go to disk eventually without additional VM intevention.
3281  *
3282  * Note that while we only really need to clean through to b_bcount, we
3283  * just go ahead and clean through to b_bufsize.
3284  */
3285 static void
3286 vfs_clean_pages_dirty_buf(struct buf *bp)
3287 {
3288 	vm_ooffset_t foff, noff, eoff;
3289 	vm_page_t m;
3290 	int i;
3291 
3292 	if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0)
3293 		return;
3294 
3295 	foff = bp->b_offset;
3296 	KASSERT(bp->b_offset != NOOFFSET,
3297 	    ("vfs_clean_pages_dirty_buf: no buffer offset"));
3298 
3299 	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
3300 	vfs_drain_busy_pages(bp);
3301 	vfs_setdirty_locked_object(bp);
3302 	for (i = 0; i < bp->b_npages; i++) {
3303 		noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3304 		eoff = noff;
3305 		if (eoff > bp->b_offset + bp->b_bufsize)
3306 			eoff = bp->b_offset + bp->b_bufsize;
3307 		m = bp->b_pages[i];
3308 		vfs_page_set_validclean(bp, foff, m);
3309 		/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
3310 		foff = noff;
3311 	}
3312 	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
3313 }
3314 
3315 static void
3316 vfs_setdirty_locked_object(struct buf *bp)
3317 {
3318 	vm_object_t object;
3319 	int i;
3320 
3321 	object = bp->b_bufobj->bo_object;
3322 	VM_OBJECT_ASSERT_WLOCKED(object);
3323 
3324 	/*
3325 	 * We qualify the scan for modified pages on whether the
3326 	 * object has been flushed yet.
3327 	 */
3328 	if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) {
3329 		vm_offset_t boffset;
3330 		vm_offset_t eoffset;
3331 
3332 		/*
3333 		 * test the pages to see if they have been modified directly
3334 		 * by users through the VM system.
3335 		 */
3336 		for (i = 0; i < bp->b_npages; i++)
3337 			vm_page_test_dirty(bp->b_pages[i]);
3338 
3339 		/*
3340 		 * Calculate the encompassing dirty range, boffset and eoffset,
3341 		 * (eoffset - boffset) bytes.
3342 		 */
3343 
3344 		for (i = 0; i < bp->b_npages; i++) {
3345 			if (bp->b_pages[i]->dirty)
3346 				break;
3347 		}
3348 		boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3349 
3350 		for (i = bp->b_npages - 1; i >= 0; --i) {
3351 			if (bp->b_pages[i]->dirty) {
3352 				break;
3353 			}
3354 		}
3355 		eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3356 
3357 		/*
3358 		 * Fit it to the buffer.
3359 		 */
3360 
3361 		if (eoffset > bp->b_bcount)
3362 			eoffset = bp->b_bcount;
3363 
3364 		/*
3365 		 * If we have a good dirty range, merge with the existing
3366 		 * dirty range.
3367 		 */
3368 
3369 		if (boffset < eoffset) {
3370 			if (bp->b_dirtyoff > boffset)
3371 				bp->b_dirtyoff = boffset;
3372 			if (bp->b_dirtyend < eoffset)
3373 				bp->b_dirtyend = eoffset;
3374 		}
3375 	}
3376 }
3377 
3378 /*
3379  * Allocate the KVA mapping for an existing buffer.
3380  * If an unmapped buffer is provided but a mapped buffer is requested, take
3381  * also care to properly setup mappings between pages and KVA.
3382  */
3383 static void
3384 bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
3385 {
3386 	int bsize, maxsize, need_mapping, need_kva;
3387 	off_t offset;
3388 
3389 	need_mapping = bp->b_data == unmapped_buf &&
3390 	    (gbflags & GB_UNMAPPED) == 0;
3391 	need_kva = bp->b_kvabase == unmapped_buf &&
3392 	    bp->b_data == unmapped_buf &&
3393 	    (gbflags & GB_KVAALLOC) != 0;
3394 	if (!need_mapping && !need_kva)
3395 		return;
3396 
3397 	BUF_CHECK_UNMAPPED(bp);
3398 
3399 	if (need_mapping && bp->b_kvabase != unmapped_buf) {
3400 		/*
3401 		 * Buffer is not mapped, but the KVA was already
3402 		 * reserved at the time of the instantiation.  Use the
3403 		 * allocated space.
3404 		 */
3405 		goto has_addr;
3406 	}
3407 
3408 	/*
3409 	 * Calculate the amount of the address space we would reserve
3410 	 * if the buffer was mapped.
3411 	 */
3412 	bsize = vn_isdisk(bp->b_vp, NULL) ? DEV_BSIZE : bp->b_bufobj->bo_bsize;
3413 	KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
3414 	offset = blkno * bsize;
3415 	maxsize = size + (offset & PAGE_MASK);
3416 	maxsize = imax(maxsize, bsize);
3417 
3418 	while (bufkva_alloc(bp, maxsize, gbflags) != 0) {
3419 		if ((gbflags & GB_NOWAIT_BD) != 0) {
3420 			/*
3421 			 * XXXKIB: defragmentation cannot
3422 			 * succeed, not sure what else to do.
3423 			 */
3424 			panic("GB_NOWAIT_BD and GB_UNMAPPED %p", bp);
3425 		}
3426 		atomic_add_int(&mappingrestarts, 1);
3427 		bufspace_wait(bp->b_vp, gbflags, 0, 0);
3428 	}
3429 has_addr:
3430 	if (need_mapping) {
3431 		/* b_offset is handled by bpmap_qenter. */
3432 		bp->b_data = bp->b_kvabase;
3433 		BUF_CHECK_MAPPED(bp);
3434 		bpmap_qenter(bp);
3435 	}
3436 }
3437 
3438 /*
3439  *	getblk:
3440  *
3441  *	Get a block given a specified block and offset into a file/device.
3442  *	The buffers B_DONE bit will be cleared on return, making it almost
3443  * 	ready for an I/O initiation.  B_INVAL may or may not be set on
3444  *	return.  The caller should clear B_INVAL prior to initiating a
3445  *	READ.
3446  *
3447  *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
3448  *	an existing buffer.
3449  *
3450  *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
3451  *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
3452  *	and then cleared based on the backing VM.  If the previous buffer is
3453  *	non-0-sized but invalid, B_CACHE will be cleared.
3454  *
3455  *	If getblk() must create a new buffer, the new buffer is returned with
3456  *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
3457  *	case it is returned with B_INVAL clear and B_CACHE set based on the
3458  *	backing VM.
3459  *
3460  *	getblk() also forces a bwrite() for any B_DELWRI buffer whos
3461  *	B_CACHE bit is clear.
3462  *
3463  *	What this means, basically, is that the caller should use B_CACHE to
3464  *	determine whether the buffer is fully valid or not and should clear
3465  *	B_INVAL prior to issuing a read.  If the caller intends to validate
3466  *	the buffer by loading its data area with something, the caller needs
3467  *	to clear B_INVAL.  If the caller does this without issuing an I/O,
3468  *	the caller should set B_CACHE ( as an optimization ), else the caller
3469  *	should issue the I/O and biodone() will set B_CACHE if the I/O was
3470  *	a write attempt or if it was a successful read.  If the caller
3471  *	intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
3472  *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
3473  */
3474 struct buf *
3475 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo,
3476     int flags)
3477 {
3478 	struct buf *bp;
3479 	struct bufobj *bo;
3480 	int bsize, error, maxsize, vmio;
3481 	off_t offset;
3482 
3483 	CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
3484 	KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3485 	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3486 	ASSERT_VOP_LOCKED(vp, "getblk");
3487 	if (size > MAXBCACHEBUF)
3488 		panic("getblk: size(%d) > MAXBCACHEBUF(%d)\n", size,
3489 		    MAXBCACHEBUF);
3490 	if (!unmapped_buf_allowed)
3491 		flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3492 
3493 	bo = &vp->v_bufobj;
3494 loop:
3495 	BO_RLOCK(bo);
3496 	bp = gbincore(bo, blkno);
3497 	if (bp != NULL) {
3498 		int lockflags;
3499 		/*
3500 		 * Buffer is in-core.  If the buffer is not busy nor managed,
3501 		 * it must be on a queue.
3502 		 */
3503 		lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK;
3504 
3505 		if (flags & GB_LOCK_NOWAIT)
3506 			lockflags |= LK_NOWAIT;
3507 
3508 		error = BUF_TIMELOCK(bp, lockflags,
3509 		    BO_LOCKPTR(bo), "getblk", slpflag, slptimeo);
3510 
3511 		/*
3512 		 * If we slept and got the lock we have to restart in case
3513 		 * the buffer changed identities.
3514 		 */
3515 		if (error == ENOLCK)
3516 			goto loop;
3517 		/* We timed out or were interrupted. */
3518 		else if (error)
3519 			return (NULL);
3520 		/* If recursed, assume caller knows the rules. */
3521 		else if (BUF_LOCKRECURSED(bp))
3522 			goto end;
3523 
3524 		/*
3525 		 * The buffer is locked.  B_CACHE is cleared if the buffer is
3526 		 * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
3527 		 * and for a VMIO buffer B_CACHE is adjusted according to the
3528 		 * backing VM cache.
3529 		 */
3530 		if (bp->b_flags & B_INVAL)
3531 			bp->b_flags &= ~B_CACHE;
3532 		else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
3533 			bp->b_flags |= B_CACHE;
3534 		if (bp->b_flags & B_MANAGED)
3535 			MPASS(bp->b_qindex == QUEUE_NONE);
3536 		else
3537 			bremfree(bp);
3538 
3539 		/*
3540 		 * check for size inconsistencies for non-VMIO case.
3541 		 */
3542 		if (bp->b_bcount != size) {
3543 			if ((bp->b_flags & B_VMIO) == 0 ||
3544 			    (size > bp->b_kvasize)) {
3545 				if (bp->b_flags & B_DELWRI) {
3546 					bp->b_flags |= B_NOCACHE;
3547 					bwrite(bp);
3548 				} else {
3549 					if (LIST_EMPTY(&bp->b_dep)) {
3550 						bp->b_flags |= B_RELBUF;
3551 						brelse(bp);
3552 					} else {
3553 						bp->b_flags |= B_NOCACHE;
3554 						bwrite(bp);
3555 					}
3556 				}
3557 				goto loop;
3558 			}
3559 		}
3560 
3561 		/*
3562 		 * Handle the case of unmapped buffer which should
3563 		 * become mapped, or the buffer for which KVA
3564 		 * reservation is requested.
3565 		 */
3566 		bp_unmapped_get_kva(bp, blkno, size, flags);
3567 
3568 		/*
3569 		 * If the size is inconsistent in the VMIO case, we can resize
3570 		 * the buffer.  This might lead to B_CACHE getting set or
3571 		 * cleared.  If the size has not changed, B_CACHE remains
3572 		 * unchanged from its previous state.
3573 		 */
3574 		allocbuf(bp, size);
3575 
3576 		KASSERT(bp->b_offset != NOOFFSET,
3577 		    ("getblk: no buffer offset"));
3578 
3579 		/*
3580 		 * A buffer with B_DELWRI set and B_CACHE clear must
3581 		 * be committed before we can return the buffer in
3582 		 * order to prevent the caller from issuing a read
3583 		 * ( due to B_CACHE not being set ) and overwriting
3584 		 * it.
3585 		 *
3586 		 * Most callers, including NFS and FFS, need this to
3587 		 * operate properly either because they assume they
3588 		 * can issue a read if B_CACHE is not set, or because
3589 		 * ( for example ) an uncached B_DELWRI might loop due
3590 		 * to softupdates re-dirtying the buffer.  In the latter
3591 		 * case, B_CACHE is set after the first write completes,
3592 		 * preventing further loops.
3593 		 * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
3594 		 * above while extending the buffer, we cannot allow the
3595 		 * buffer to remain with B_CACHE set after the write
3596 		 * completes or it will represent a corrupt state.  To
3597 		 * deal with this we set B_NOCACHE to scrap the buffer
3598 		 * after the write.
3599 		 *
3600 		 * We might be able to do something fancy, like setting
3601 		 * B_CACHE in bwrite() except if B_DELWRI is already set,
3602 		 * so the below call doesn't set B_CACHE, but that gets real
3603 		 * confusing.  This is much easier.
3604 		 */
3605 
3606 		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
3607 			bp->b_flags |= B_NOCACHE;
3608 			bwrite(bp);
3609 			goto loop;
3610 		}
3611 		bp->b_flags &= ~B_DONE;
3612 	} else {
3613 		/*
3614 		 * Buffer is not in-core, create new buffer.  The buffer
3615 		 * returned by getnewbuf() is locked.  Note that the returned
3616 		 * buffer is also considered valid (not marked B_INVAL).
3617 		 */
3618 		BO_RUNLOCK(bo);
3619 		/*
3620 		 * If the user does not want us to create the buffer, bail out
3621 		 * here.
3622 		 */
3623 		if (flags & GB_NOCREAT)
3624 			return NULL;
3625 		if (numfreebuffers == 0 && TD_IS_IDLETHREAD(curthread))
3626 			return NULL;
3627 
3628 		bsize = vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize;
3629 		KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
3630 		offset = blkno * bsize;
3631 		vmio = vp->v_object != NULL;
3632 		if (vmio) {
3633 			maxsize = size + (offset & PAGE_MASK);
3634 		} else {
3635 			maxsize = size;
3636 			/* Do not allow non-VMIO notmapped buffers. */
3637 			flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3638 		}
3639 		maxsize = imax(maxsize, bsize);
3640 
3641 		bp = getnewbuf(vp, slpflag, slptimeo, maxsize, flags);
3642 		if (bp == NULL) {
3643 			if (slpflag || slptimeo)
3644 				return NULL;
3645 			/*
3646 			 * XXX This is here until the sleep path is diagnosed
3647 			 * enough to work under very low memory conditions.
3648 			 *
3649 			 * There's an issue on low memory, 4BSD+non-preempt
3650 			 * systems (eg MIPS routers with 32MB RAM) where buffer
3651 			 * exhaustion occurs without sleeping for buffer
3652 			 * reclaimation.  This just sticks in a loop and
3653 			 * constantly attempts to allocate a buffer, which
3654 			 * hits exhaustion and tries to wakeup bufdaemon.
3655 			 * This never happens because we never yield.
3656 			 *
3657 			 * The real solution is to identify and fix these cases
3658 			 * so we aren't effectively busy-waiting in a loop
3659 			 * until the reclaimation path has cycles to run.
3660 			 */
3661 			kern_yield(PRI_USER);
3662 			goto loop;
3663 		}
3664 
3665 		/*
3666 		 * This code is used to make sure that a buffer is not
3667 		 * created while the getnewbuf routine is blocked.
3668 		 * This can be a problem whether the vnode is locked or not.
3669 		 * If the buffer is created out from under us, we have to
3670 		 * throw away the one we just created.
3671 		 *
3672 		 * Note: this must occur before we associate the buffer
3673 		 * with the vp especially considering limitations in
3674 		 * the splay tree implementation when dealing with duplicate
3675 		 * lblkno's.
3676 		 */
3677 		BO_LOCK(bo);
3678 		if (gbincore(bo, blkno)) {
3679 			BO_UNLOCK(bo);
3680 			bp->b_flags |= B_INVAL;
3681 			brelse(bp);
3682 			bufspace_release(maxsize);
3683 			goto loop;
3684 		}
3685 
3686 		/*
3687 		 * Insert the buffer into the hash, so that it can
3688 		 * be found by incore.
3689 		 */
3690 		bp->b_blkno = bp->b_lblkno = blkno;
3691 		bp->b_offset = offset;
3692 		bgetvp(vp, bp);
3693 		BO_UNLOCK(bo);
3694 
3695 		/*
3696 		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
3697 		 * buffer size starts out as 0, B_CACHE will be set by
3698 		 * allocbuf() for the VMIO case prior to it testing the
3699 		 * backing store for validity.
3700 		 */
3701 
3702 		if (vmio) {
3703 			bp->b_flags |= B_VMIO;
3704 			KASSERT(vp->v_object == bp->b_bufobj->bo_object,
3705 			    ("ARGH! different b_bufobj->bo_object %p %p %p\n",
3706 			    bp, vp->v_object, bp->b_bufobj->bo_object));
3707 		} else {
3708 			bp->b_flags &= ~B_VMIO;
3709 			KASSERT(bp->b_bufobj->bo_object == NULL,
3710 			    ("ARGH! has b_bufobj->bo_object %p %p\n",
3711 			    bp, bp->b_bufobj->bo_object));
3712 			BUF_CHECK_MAPPED(bp);
3713 		}
3714 
3715 		allocbuf(bp, size);
3716 		bufspace_release(maxsize);
3717 		bp->b_flags &= ~B_DONE;
3718 	}
3719 	CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
3720 	BUF_ASSERT_HELD(bp);
3721 end:
3722 	buf_track(bp, __func__);
3723 	KASSERT(bp->b_bufobj == bo,
3724 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
3725 	return (bp);
3726 }
3727 
3728 /*
3729  * Get an empty, disassociated buffer of given size.  The buffer is initially
3730  * set to B_INVAL.
3731  */
3732 struct buf *
3733 geteblk(int size, int flags)
3734 {
3735 	struct buf *bp;
3736 	int maxsize;
3737 
3738 	maxsize = (size + BKVAMASK) & ~BKVAMASK;
3739 	while ((bp = getnewbuf(NULL, 0, 0, maxsize, flags)) == NULL) {
3740 		if ((flags & GB_NOWAIT_BD) &&
3741 		    (curthread->td_pflags & TDP_BUFNEED) != 0)
3742 			return (NULL);
3743 	}
3744 	allocbuf(bp, size);
3745 	bufspace_release(maxsize);
3746 	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
3747 	BUF_ASSERT_HELD(bp);
3748 	return (bp);
3749 }
3750 
3751 /*
3752  * Truncate the backing store for a non-vmio buffer.
3753  */
3754 static void
3755 vfs_nonvmio_truncate(struct buf *bp, int newbsize)
3756 {
3757 
3758 	if (bp->b_flags & B_MALLOC) {
3759 		/*
3760 		 * malloced buffers are not shrunk
3761 		 */
3762 		if (newbsize == 0) {
3763 			bufmallocadjust(bp, 0);
3764 			free(bp->b_data, M_BIOBUF);
3765 			bp->b_data = bp->b_kvabase;
3766 			bp->b_flags &= ~B_MALLOC;
3767 		}
3768 		return;
3769 	}
3770 	vm_hold_free_pages(bp, newbsize);
3771 	bufspace_adjust(bp, newbsize);
3772 }
3773 
3774 /*
3775  * Extend the backing for a non-VMIO buffer.
3776  */
3777 static void
3778 vfs_nonvmio_extend(struct buf *bp, int newbsize)
3779 {
3780 	caddr_t origbuf;
3781 	int origbufsize;
3782 
3783 	/*
3784 	 * We only use malloced memory on the first allocation.
3785 	 * and revert to page-allocated memory when the buffer
3786 	 * grows.
3787 	 *
3788 	 * There is a potential smp race here that could lead
3789 	 * to bufmallocspace slightly passing the max.  It
3790 	 * is probably extremely rare and not worth worrying
3791 	 * over.
3792 	 */
3793 	if (bp->b_bufsize == 0 && newbsize <= PAGE_SIZE/2 &&
3794 	    bufmallocspace < maxbufmallocspace) {
3795 		bp->b_data = malloc(newbsize, M_BIOBUF, M_WAITOK);
3796 		bp->b_flags |= B_MALLOC;
3797 		bufmallocadjust(bp, newbsize);
3798 		return;
3799 	}
3800 
3801 	/*
3802 	 * If the buffer is growing on its other-than-first
3803 	 * allocation then we revert to the page-allocation
3804 	 * scheme.
3805 	 */
3806 	origbuf = NULL;
3807 	origbufsize = 0;
3808 	if (bp->b_flags & B_MALLOC) {
3809 		origbuf = bp->b_data;
3810 		origbufsize = bp->b_bufsize;
3811 		bp->b_data = bp->b_kvabase;
3812 		bufmallocadjust(bp, 0);
3813 		bp->b_flags &= ~B_MALLOC;
3814 		newbsize = round_page(newbsize);
3815 	}
3816 	vm_hold_load_pages(bp, (vm_offset_t) bp->b_data + bp->b_bufsize,
3817 	    (vm_offset_t) bp->b_data + newbsize);
3818 	if (origbuf != NULL) {
3819 		bcopy(origbuf, bp->b_data, origbufsize);
3820 		free(origbuf, M_BIOBUF);
3821 	}
3822 	bufspace_adjust(bp, newbsize);
3823 }
3824 
3825 /*
3826  * This code constitutes the buffer memory from either anonymous system
3827  * memory (in the case of non-VMIO operations) or from an associated
3828  * VM object (in the case of VMIO operations).  This code is able to
3829  * resize a buffer up or down.
3830  *
3831  * Note that this code is tricky, and has many complications to resolve
3832  * deadlock or inconsistent data situations.  Tread lightly!!!
3833  * There are B_CACHE and B_DELWRI interactions that must be dealt with by
3834  * the caller.  Calling this code willy nilly can result in the loss of data.
3835  *
3836  * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
3837  * B_CACHE for the non-VMIO case.
3838  */
3839 int
3840 allocbuf(struct buf *bp, int size)
3841 {
3842 	int newbsize;
3843 
3844 	BUF_ASSERT_HELD(bp);
3845 
3846 	if (bp->b_bcount == size)
3847 		return (1);
3848 
3849 	if (bp->b_kvasize != 0 && bp->b_kvasize < size)
3850 		panic("allocbuf: buffer too small");
3851 
3852 	newbsize = roundup2(size, DEV_BSIZE);
3853 	if ((bp->b_flags & B_VMIO) == 0) {
3854 		if ((bp->b_flags & B_MALLOC) == 0)
3855 			newbsize = round_page(newbsize);
3856 		/*
3857 		 * Just get anonymous memory from the kernel.  Don't
3858 		 * mess with B_CACHE.
3859 		 */
3860 		if (newbsize < bp->b_bufsize)
3861 			vfs_nonvmio_truncate(bp, newbsize);
3862 		else if (newbsize > bp->b_bufsize)
3863 			vfs_nonvmio_extend(bp, newbsize);
3864 	} else {
3865 		int desiredpages;
3866 
3867 		desiredpages = (size == 0) ? 0 :
3868 		    num_pages((bp->b_offset & PAGE_MASK) + newbsize);
3869 
3870 		if (bp->b_flags & B_MALLOC)
3871 			panic("allocbuf: VMIO buffer can't be malloced");
3872 		/*
3873 		 * Set B_CACHE initially if buffer is 0 length or will become
3874 		 * 0-length.
3875 		 */
3876 		if (size == 0 || bp->b_bufsize == 0)
3877 			bp->b_flags |= B_CACHE;
3878 
3879 		if (newbsize < bp->b_bufsize)
3880 			vfs_vmio_truncate(bp, desiredpages);
3881 		/* XXX This looks as if it should be newbsize > b_bufsize */
3882 		else if (size > bp->b_bcount)
3883 			vfs_vmio_extend(bp, desiredpages, size);
3884 		bufspace_adjust(bp, newbsize);
3885 	}
3886 	bp->b_bcount = size;		/* requested buffer size. */
3887 	return (1);
3888 }
3889 
3890 extern int inflight_transient_maps;
3891 
3892 void
3893 biodone(struct bio *bp)
3894 {
3895 	struct mtx *mtxp;
3896 	void (*done)(struct bio *);
3897 	vm_offset_t start, end;
3898 
3899 	biotrack(bp, __func__);
3900 	if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) {
3901 		bp->bio_flags &= ~BIO_TRANSIENT_MAPPING;
3902 		bp->bio_flags |= BIO_UNMAPPED;
3903 		start = trunc_page((vm_offset_t)bp->bio_data);
3904 		end = round_page((vm_offset_t)bp->bio_data + bp->bio_length);
3905 		bp->bio_data = unmapped_buf;
3906 		pmap_qremove(start, atop(end - start));
3907 		vmem_free(transient_arena, start, end - start);
3908 		atomic_add_int(&inflight_transient_maps, -1);
3909 	}
3910 	done = bp->bio_done;
3911 	if (done == NULL) {
3912 		mtxp = mtx_pool_find(mtxpool_sleep, bp);
3913 		mtx_lock(mtxp);
3914 		bp->bio_flags |= BIO_DONE;
3915 		wakeup(bp);
3916 		mtx_unlock(mtxp);
3917 	} else
3918 		done(bp);
3919 }
3920 
3921 /*
3922  * Wait for a BIO to finish.
3923  */
3924 int
3925 biowait(struct bio *bp, const char *wchan)
3926 {
3927 	struct mtx *mtxp;
3928 
3929 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3930 	mtx_lock(mtxp);
3931 	while ((bp->bio_flags & BIO_DONE) == 0)
3932 		msleep(bp, mtxp, PRIBIO, wchan, 0);
3933 	mtx_unlock(mtxp);
3934 	if (bp->bio_error != 0)
3935 		return (bp->bio_error);
3936 	if (!(bp->bio_flags & BIO_ERROR))
3937 		return (0);
3938 	return (EIO);
3939 }
3940 
3941 void
3942 biofinish(struct bio *bp, struct devstat *stat, int error)
3943 {
3944 
3945 	if (error) {
3946 		bp->bio_error = error;
3947 		bp->bio_flags |= BIO_ERROR;
3948 	}
3949 	if (stat != NULL)
3950 		devstat_end_transaction_bio(stat, bp);
3951 	biodone(bp);
3952 }
3953 
3954 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
3955 void
3956 biotrack_buf(struct bio *bp, const char *location)
3957 {
3958 
3959 	buf_track(bp->bio_track_bp, location);
3960 }
3961 #endif
3962 
3963 /*
3964  *	bufwait:
3965  *
3966  *	Wait for buffer I/O completion, returning error status.  The buffer
3967  *	is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
3968  *	error and cleared.
3969  */
3970 int
3971 bufwait(struct buf *bp)
3972 {
3973 	if (bp->b_iocmd == BIO_READ)
3974 		bwait(bp, PRIBIO, "biord");
3975 	else
3976 		bwait(bp, PRIBIO, "biowr");
3977 	if (bp->b_flags & B_EINTR) {
3978 		bp->b_flags &= ~B_EINTR;
3979 		return (EINTR);
3980 	}
3981 	if (bp->b_ioflags & BIO_ERROR) {
3982 		return (bp->b_error ? bp->b_error : EIO);
3983 	} else {
3984 		return (0);
3985 	}
3986 }
3987 
3988 /*
3989  *	bufdone:
3990  *
3991  *	Finish I/O on a buffer, optionally calling a completion function.
3992  *	This is usually called from an interrupt so process blocking is
3993  *	not allowed.
3994  *
3995  *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
3996  *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
3997  *	assuming B_INVAL is clear.
3998  *
3999  *	For the VMIO case, we set B_CACHE if the op was a read and no
4000  *	read error occurred, or if the op was a write.  B_CACHE is never
4001  *	set if the buffer is invalid or otherwise uncacheable.
4002  *
4003  *	biodone does not mess with B_INVAL, allowing the I/O routine or the
4004  *	initiator to leave B_INVAL set to brelse the buffer out of existence
4005  *	in the biodone routine.
4006  */
4007 void
4008 bufdone(struct buf *bp)
4009 {
4010 	struct bufobj *dropobj;
4011 	void    (*biodone)(struct buf *);
4012 
4013 	buf_track(bp, __func__);
4014 	CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
4015 	dropobj = NULL;
4016 
4017 	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
4018 	BUF_ASSERT_HELD(bp);
4019 
4020 	runningbufwakeup(bp);
4021 	if (bp->b_iocmd == BIO_WRITE)
4022 		dropobj = bp->b_bufobj;
4023 	/* call optional completion function if requested */
4024 	if (bp->b_iodone != NULL) {
4025 		biodone = bp->b_iodone;
4026 		bp->b_iodone = NULL;
4027 		(*biodone) (bp);
4028 		if (dropobj)
4029 			bufobj_wdrop(dropobj);
4030 		return;
4031 	}
4032 
4033 	bufdone_finish(bp);
4034 
4035 	if (dropobj)
4036 		bufobj_wdrop(dropobj);
4037 }
4038 
4039 void
4040 bufdone_finish(struct buf *bp)
4041 {
4042 	BUF_ASSERT_HELD(bp);
4043 
4044 	if (!LIST_EMPTY(&bp->b_dep))
4045 		buf_complete(bp);
4046 
4047 	if (bp->b_flags & B_VMIO) {
4048 		/*
4049 		 * Set B_CACHE if the op was a normal read and no error
4050 		 * occurred.  B_CACHE is set for writes in the b*write()
4051 		 * routines.
4052 		 */
4053 		if (bp->b_iocmd == BIO_READ &&
4054 		    !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
4055 		    !(bp->b_ioflags & BIO_ERROR))
4056 			bp->b_flags |= B_CACHE;
4057 		vfs_vmio_iodone(bp);
4058 	}
4059 
4060 	/*
4061 	 * For asynchronous completions, release the buffer now. The brelse
4062 	 * will do a wakeup there if necessary - so no need to do a wakeup
4063 	 * here in the async case. The sync case always needs to do a wakeup.
4064 	 */
4065 	if (bp->b_flags & B_ASYNC) {
4066 		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) ||
4067 		    (bp->b_ioflags & BIO_ERROR))
4068 			brelse(bp);
4069 		else
4070 			bqrelse(bp);
4071 	} else
4072 		bdone(bp);
4073 }
4074 
4075 /*
4076  * This routine is called in lieu of iodone in the case of
4077  * incomplete I/O.  This keeps the busy status for pages
4078  * consistent.
4079  */
4080 void
4081 vfs_unbusy_pages(struct buf *bp)
4082 {
4083 	int i;
4084 	vm_object_t obj;
4085 	vm_page_t m;
4086 
4087 	runningbufwakeup(bp);
4088 	if (!(bp->b_flags & B_VMIO))
4089 		return;
4090 
4091 	obj = bp->b_bufobj->bo_object;
4092 	VM_OBJECT_WLOCK(obj);
4093 	for (i = 0; i < bp->b_npages; i++) {
4094 		m = bp->b_pages[i];
4095 		if (m == bogus_page) {
4096 			m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
4097 			if (!m)
4098 				panic("vfs_unbusy_pages: page missing\n");
4099 			bp->b_pages[i] = m;
4100 			if (buf_mapped(bp)) {
4101 				BUF_CHECK_MAPPED(bp);
4102 				pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4103 				    bp->b_pages, bp->b_npages);
4104 			} else
4105 				BUF_CHECK_UNMAPPED(bp);
4106 		}
4107 		vm_page_sunbusy(m);
4108 	}
4109 	vm_object_pip_wakeupn(obj, bp->b_npages);
4110 	VM_OBJECT_WUNLOCK(obj);
4111 }
4112 
4113 /*
4114  * vfs_page_set_valid:
4115  *
4116  *	Set the valid bits in a page based on the supplied offset.   The
4117  *	range is restricted to the buffer's size.
4118  *
4119  *	This routine is typically called after a read completes.
4120  */
4121 static void
4122 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4123 {
4124 	vm_ooffset_t eoff;
4125 
4126 	/*
4127 	 * Compute the end offset, eoff, such that [off, eoff) does not span a
4128 	 * page boundary and eoff is not greater than the end of the buffer.
4129 	 * The end of the buffer, in this case, is our file EOF, not the
4130 	 * allocation size of the buffer.
4131 	 */
4132 	eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
4133 	if (eoff > bp->b_offset + bp->b_bcount)
4134 		eoff = bp->b_offset + bp->b_bcount;
4135 
4136 	/*
4137 	 * Set valid range.  This is typically the entire buffer and thus the
4138 	 * entire page.
4139 	 */
4140 	if (eoff > off)
4141 		vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off);
4142 }
4143 
4144 /*
4145  * vfs_page_set_validclean:
4146  *
4147  *	Set the valid bits and clear the dirty bits in a page based on the
4148  *	supplied offset.   The range is restricted to the buffer's size.
4149  */
4150 static void
4151 vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4152 {
4153 	vm_ooffset_t soff, eoff;
4154 
4155 	/*
4156 	 * Start and end offsets in buffer.  eoff - soff may not cross a
4157 	 * page boundary or cross the end of the buffer.  The end of the
4158 	 * buffer, in this case, is our file EOF, not the allocation size
4159 	 * of the buffer.
4160 	 */
4161 	soff = off;
4162 	eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4163 	if (eoff > bp->b_offset + bp->b_bcount)
4164 		eoff = bp->b_offset + bp->b_bcount;
4165 
4166 	/*
4167 	 * Set valid range.  This is typically the entire buffer and thus the
4168 	 * entire page.
4169 	 */
4170 	if (eoff > soff) {
4171 		vm_page_set_validclean(
4172 		    m,
4173 		   (vm_offset_t) (soff & PAGE_MASK),
4174 		   (vm_offset_t) (eoff - soff)
4175 		);
4176 	}
4177 }
4178 
4179 /*
4180  * Ensure that all buffer pages are not exclusive busied.  If any page is
4181  * exclusive busy, drain it.
4182  */
4183 void
4184 vfs_drain_busy_pages(struct buf *bp)
4185 {
4186 	vm_page_t m;
4187 	int i, last_busied;
4188 
4189 	VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object);
4190 	last_busied = 0;
4191 	for (i = 0; i < bp->b_npages; i++) {
4192 		m = bp->b_pages[i];
4193 		if (vm_page_xbusied(m)) {
4194 			for (; last_busied < i; last_busied++)
4195 				vm_page_sbusy(bp->b_pages[last_busied]);
4196 			while (vm_page_xbusied(m)) {
4197 				vm_page_lock(m);
4198 				VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4199 				vm_page_busy_sleep(m, "vbpage", true);
4200 				VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4201 			}
4202 		}
4203 	}
4204 	for (i = 0; i < last_busied; i++)
4205 		vm_page_sunbusy(bp->b_pages[i]);
4206 }
4207 
4208 /*
4209  * This routine is called before a device strategy routine.
4210  * It is used to tell the VM system that paging I/O is in
4211  * progress, and treat the pages associated with the buffer
4212  * almost as being exclusive busy.  Also the object paging_in_progress
4213  * flag is handled to make sure that the object doesn't become
4214  * inconsistent.
4215  *
4216  * Since I/O has not been initiated yet, certain buffer flags
4217  * such as BIO_ERROR or B_INVAL may be in an inconsistent state
4218  * and should be ignored.
4219  */
4220 void
4221 vfs_busy_pages(struct buf *bp, int clear_modify)
4222 {
4223 	vm_object_t obj;
4224 	vm_ooffset_t foff;
4225 	vm_page_t m;
4226 	int i;
4227 	bool bogus;
4228 
4229 	if (!(bp->b_flags & B_VMIO))
4230 		return;
4231 
4232 	obj = bp->b_bufobj->bo_object;
4233 	foff = bp->b_offset;
4234 	KASSERT(bp->b_offset != NOOFFSET,
4235 	    ("vfs_busy_pages: no buffer offset"));
4236 	VM_OBJECT_WLOCK(obj);
4237 	vfs_drain_busy_pages(bp);
4238 	if (bp->b_bufsize != 0)
4239 		vfs_setdirty_locked_object(bp);
4240 	bogus = false;
4241 	for (i = 0; i < bp->b_npages; i++) {
4242 		m = bp->b_pages[i];
4243 
4244 		if ((bp->b_flags & B_CLUSTER) == 0) {
4245 			vm_object_pip_add(obj, 1);
4246 			vm_page_sbusy(m);
4247 		}
4248 		/*
4249 		 * When readying a buffer for a read ( i.e
4250 		 * clear_modify == 0 ), it is important to do
4251 		 * bogus_page replacement for valid pages in
4252 		 * partially instantiated buffers.  Partially
4253 		 * instantiated buffers can, in turn, occur when
4254 		 * reconstituting a buffer from its VM backing store
4255 		 * base.  We only have to do this if B_CACHE is
4256 		 * clear ( which causes the I/O to occur in the
4257 		 * first place ).  The replacement prevents the read
4258 		 * I/O from overwriting potentially dirty VM-backed
4259 		 * pages.  XXX bogus page replacement is, uh, bogus.
4260 		 * It may not work properly with small-block devices.
4261 		 * We need to find a better way.
4262 		 */
4263 		if (clear_modify) {
4264 			pmap_remove_write(m);
4265 			vfs_page_set_validclean(bp, foff, m);
4266 		} else if (m->valid == VM_PAGE_BITS_ALL &&
4267 		    (bp->b_flags & B_CACHE) == 0) {
4268 			bp->b_pages[i] = bogus_page;
4269 			bogus = true;
4270 		}
4271 		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4272 	}
4273 	VM_OBJECT_WUNLOCK(obj);
4274 	if (bogus && buf_mapped(bp)) {
4275 		BUF_CHECK_MAPPED(bp);
4276 		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4277 		    bp->b_pages, bp->b_npages);
4278 	}
4279 }
4280 
4281 /*
4282  *	vfs_bio_set_valid:
4283  *
4284  *	Set the range within the buffer to valid.  The range is
4285  *	relative to the beginning of the buffer, b_offset.  Note that
4286  *	b_offset itself may be offset from the beginning of the first
4287  *	page.
4288  */
4289 void
4290 vfs_bio_set_valid(struct buf *bp, int base, int size)
4291 {
4292 	int i, n;
4293 	vm_page_t m;
4294 
4295 	if (!(bp->b_flags & B_VMIO))
4296 		return;
4297 
4298 	/*
4299 	 * Fixup base to be relative to beginning of first page.
4300 	 * Set initial n to be the maximum number of bytes in the
4301 	 * first page that can be validated.
4302 	 */
4303 	base += (bp->b_offset & PAGE_MASK);
4304 	n = PAGE_SIZE - (base & PAGE_MASK);
4305 
4306 	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4307 	for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4308 		m = bp->b_pages[i];
4309 		if (n > size)
4310 			n = size;
4311 		vm_page_set_valid_range(m, base & PAGE_MASK, n);
4312 		base += n;
4313 		size -= n;
4314 		n = PAGE_SIZE;
4315 	}
4316 	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4317 }
4318 
4319 /*
4320  *	vfs_bio_clrbuf:
4321  *
4322  *	If the specified buffer is a non-VMIO buffer, clear the entire
4323  *	buffer.  If the specified buffer is a VMIO buffer, clear and
4324  *	validate only the previously invalid portions of the buffer.
4325  *	This routine essentially fakes an I/O, so we need to clear
4326  *	BIO_ERROR and B_INVAL.
4327  *
4328  *	Note that while we only theoretically need to clear through b_bcount,
4329  *	we go ahead and clear through b_bufsize.
4330  */
4331 void
4332 vfs_bio_clrbuf(struct buf *bp)
4333 {
4334 	int i, j, mask, sa, ea, slide;
4335 
4336 	if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
4337 		clrbuf(bp);
4338 		return;
4339 	}
4340 	bp->b_flags &= ~B_INVAL;
4341 	bp->b_ioflags &= ~BIO_ERROR;
4342 	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4343 	if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
4344 	    (bp->b_offset & PAGE_MASK) == 0) {
4345 		if (bp->b_pages[0] == bogus_page)
4346 			goto unlock;
4347 		mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
4348 		VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[0]->object);
4349 		if ((bp->b_pages[0]->valid & mask) == mask)
4350 			goto unlock;
4351 		if ((bp->b_pages[0]->valid & mask) == 0) {
4352 			pmap_zero_page_area(bp->b_pages[0], 0, bp->b_bufsize);
4353 			bp->b_pages[0]->valid |= mask;
4354 			goto unlock;
4355 		}
4356 	}
4357 	sa = bp->b_offset & PAGE_MASK;
4358 	slide = 0;
4359 	for (i = 0; i < bp->b_npages; i++, sa = 0) {
4360 		slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize);
4361 		ea = slide & PAGE_MASK;
4362 		if (ea == 0)
4363 			ea = PAGE_SIZE;
4364 		if (bp->b_pages[i] == bogus_page)
4365 			continue;
4366 		j = sa / DEV_BSIZE;
4367 		mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
4368 		VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[i]->object);
4369 		if ((bp->b_pages[i]->valid & mask) == mask)
4370 			continue;
4371 		if ((bp->b_pages[i]->valid & mask) == 0)
4372 			pmap_zero_page_area(bp->b_pages[i], sa, ea - sa);
4373 		else {
4374 			for (; sa < ea; sa += DEV_BSIZE, j++) {
4375 				if ((bp->b_pages[i]->valid & (1 << j)) == 0) {
4376 					pmap_zero_page_area(bp->b_pages[i],
4377 					    sa, DEV_BSIZE);
4378 				}
4379 			}
4380 		}
4381 		bp->b_pages[i]->valid |= mask;
4382 	}
4383 unlock:
4384 	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4385 	bp->b_resid = 0;
4386 }
4387 
4388 void
4389 vfs_bio_bzero_buf(struct buf *bp, int base, int size)
4390 {
4391 	vm_page_t m;
4392 	int i, n;
4393 
4394 	if (buf_mapped(bp)) {
4395 		BUF_CHECK_MAPPED(bp);
4396 		bzero(bp->b_data + base, size);
4397 	} else {
4398 		BUF_CHECK_UNMAPPED(bp);
4399 		n = PAGE_SIZE - (base & PAGE_MASK);
4400 		for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4401 			m = bp->b_pages[i];
4402 			if (n > size)
4403 				n = size;
4404 			pmap_zero_page_area(m, base & PAGE_MASK, n);
4405 			base += n;
4406 			size -= n;
4407 			n = PAGE_SIZE;
4408 		}
4409 	}
4410 }
4411 
4412 /*
4413  * Update buffer flags based on I/O request parameters, optionally releasing the
4414  * buffer.  If it's VMIO or direct I/O, the buffer pages are released to the VM,
4415  * where they may be placed on a page queue (VMIO) or freed immediately (direct
4416  * I/O).  Otherwise the buffer is released to the cache.
4417  */
4418 static void
4419 b_io_dismiss(struct buf *bp, int ioflag, bool release)
4420 {
4421 
4422 	KASSERT((ioflag & IO_NOREUSE) == 0 || (ioflag & IO_VMIO) != 0,
4423 	    ("buf %p non-VMIO noreuse", bp));
4424 
4425 	if ((ioflag & IO_DIRECT) != 0)
4426 		bp->b_flags |= B_DIRECT;
4427 	if ((ioflag & (IO_VMIO | IO_DIRECT)) != 0 && LIST_EMPTY(&bp->b_dep)) {
4428 		bp->b_flags |= B_RELBUF;
4429 		if ((ioflag & IO_NOREUSE) != 0)
4430 			bp->b_flags |= B_NOREUSE;
4431 		if (release)
4432 			brelse(bp);
4433 	} else if (release)
4434 		bqrelse(bp);
4435 }
4436 
4437 void
4438 vfs_bio_brelse(struct buf *bp, int ioflag)
4439 {
4440 
4441 	b_io_dismiss(bp, ioflag, true);
4442 }
4443 
4444 void
4445 vfs_bio_set_flags(struct buf *bp, int ioflag)
4446 {
4447 
4448 	b_io_dismiss(bp, ioflag, false);
4449 }
4450 
4451 /*
4452  * vm_hold_load_pages and vm_hold_free_pages get pages into
4453  * a buffers address space.  The pages are anonymous and are
4454  * not associated with a file object.
4455  */
4456 static void
4457 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
4458 {
4459 	vm_offset_t pg;
4460 	vm_page_t p;
4461 	int index;
4462 
4463 	BUF_CHECK_MAPPED(bp);
4464 
4465 	to = round_page(to);
4466 	from = round_page(from);
4467 	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4468 
4469 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
4470 tryagain:
4471 		/*
4472 		 * note: must allocate system pages since blocking here
4473 		 * could interfere with paging I/O, no matter which
4474 		 * process we are.
4475 		 */
4476 		p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
4477 		    VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT));
4478 		if (p == NULL) {
4479 			VM_WAIT;
4480 			goto tryagain;
4481 		}
4482 		pmap_qenter(pg, &p, 1);
4483 		bp->b_pages[index] = p;
4484 	}
4485 	bp->b_npages = index;
4486 }
4487 
4488 /* Return pages associated with this buf to the vm system */
4489 static void
4490 vm_hold_free_pages(struct buf *bp, int newbsize)
4491 {
4492 	vm_offset_t from;
4493 	vm_page_t p;
4494 	int index, newnpages;
4495 
4496 	BUF_CHECK_MAPPED(bp);
4497 
4498 	from = round_page((vm_offset_t)bp->b_data + newbsize);
4499 	newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4500 	if (bp->b_npages > newnpages)
4501 		pmap_qremove(from, bp->b_npages - newnpages);
4502 	for (index = newnpages; index < bp->b_npages; index++) {
4503 		p = bp->b_pages[index];
4504 		bp->b_pages[index] = NULL;
4505 		if (vm_page_sbusied(p))
4506 			printf("vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
4507 			    (intmax_t)bp->b_blkno, (intmax_t)bp->b_lblkno);
4508 		p->wire_count--;
4509 		vm_page_free(p);
4510 		atomic_subtract_int(&vm_cnt.v_wire_count, 1);
4511 	}
4512 	bp->b_npages = newnpages;
4513 }
4514 
4515 /*
4516  * Map an IO request into kernel virtual address space.
4517  *
4518  * All requests are (re)mapped into kernel VA space.
4519  * Notice that we use b_bufsize for the size of the buffer
4520  * to be mapped.  b_bcount might be modified by the driver.
4521  *
4522  * Note that even if the caller determines that the address space should
4523  * be valid, a race or a smaller-file mapped into a larger space may
4524  * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
4525  * check the return value.
4526  *
4527  * This function only works with pager buffers.
4528  */
4529 int
4530 vmapbuf(struct buf *bp, int mapbuf)
4531 {
4532 	vm_prot_t prot;
4533 	int pidx;
4534 
4535 	if (bp->b_bufsize < 0)
4536 		return (-1);
4537 	prot = VM_PROT_READ;
4538 	if (bp->b_iocmd == BIO_READ)
4539 		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
4540 	if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
4541 	    (vm_offset_t)bp->b_data, bp->b_bufsize, prot, bp->b_pages,
4542 	    btoc(MAXPHYS))) < 0)
4543 		return (-1);
4544 	bp->b_npages = pidx;
4545 	bp->b_offset = ((vm_offset_t)bp->b_data) & PAGE_MASK;
4546 	if (mapbuf || !unmapped_buf_allowed) {
4547 		pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_pages, pidx);
4548 		bp->b_data = bp->b_kvabase + bp->b_offset;
4549 	} else
4550 		bp->b_data = unmapped_buf;
4551 	return(0);
4552 }
4553 
4554 /*
4555  * Free the io map PTEs associated with this IO operation.
4556  * We also invalidate the TLB entries and restore the original b_addr.
4557  *
4558  * This function only works with pager buffers.
4559  */
4560 void
4561 vunmapbuf(struct buf *bp)
4562 {
4563 	int npages;
4564 
4565 	npages = bp->b_npages;
4566 	if (buf_mapped(bp))
4567 		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
4568 	vm_page_unhold_pages(bp->b_pages, npages);
4569 
4570 	bp->b_data = unmapped_buf;
4571 }
4572 
4573 void
4574 bdone(struct buf *bp)
4575 {
4576 	struct mtx *mtxp;
4577 
4578 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4579 	mtx_lock(mtxp);
4580 	bp->b_flags |= B_DONE;
4581 	wakeup(bp);
4582 	mtx_unlock(mtxp);
4583 }
4584 
4585 void
4586 bwait(struct buf *bp, u_char pri, const char *wchan)
4587 {
4588 	struct mtx *mtxp;
4589 
4590 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4591 	mtx_lock(mtxp);
4592 	while ((bp->b_flags & B_DONE) == 0)
4593 		msleep(bp, mtxp, pri, wchan, 0);
4594 	mtx_unlock(mtxp);
4595 }
4596 
4597 int
4598 bufsync(struct bufobj *bo, int waitfor)
4599 {
4600 
4601 	return (VOP_FSYNC(bo2vnode(bo), waitfor, curthread));
4602 }
4603 
4604 void
4605 bufstrategy(struct bufobj *bo, struct buf *bp)
4606 {
4607 	int i = 0;
4608 	struct vnode *vp;
4609 
4610 	vp = bp->b_vp;
4611 	KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
4612 	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
4613 	    ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
4614 	i = VOP_STRATEGY(vp, bp);
4615 	KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
4616 }
4617 
4618 void
4619 bufobj_wrefl(struct bufobj *bo)
4620 {
4621 
4622 	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
4623 	ASSERT_BO_WLOCKED(bo);
4624 	bo->bo_numoutput++;
4625 }
4626 
4627 void
4628 bufobj_wref(struct bufobj *bo)
4629 {
4630 
4631 	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
4632 	BO_LOCK(bo);
4633 	bo->bo_numoutput++;
4634 	BO_UNLOCK(bo);
4635 }
4636 
4637 void
4638 bufobj_wdrop(struct bufobj *bo)
4639 {
4640 
4641 	KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
4642 	BO_LOCK(bo);
4643 	KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
4644 	if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
4645 		bo->bo_flag &= ~BO_WWAIT;
4646 		wakeup(&bo->bo_numoutput);
4647 	}
4648 	BO_UNLOCK(bo);
4649 }
4650 
4651 int
4652 bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
4653 {
4654 	int error;
4655 
4656 	KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
4657 	ASSERT_BO_WLOCKED(bo);
4658 	error = 0;
4659 	while (bo->bo_numoutput) {
4660 		bo->bo_flag |= BO_WWAIT;
4661 		error = msleep(&bo->bo_numoutput, BO_LOCKPTR(bo),
4662 		    slpflag | (PRIBIO + 1), "bo_wwait", timeo);
4663 		if (error)
4664 			break;
4665 	}
4666 	return (error);
4667 }
4668 
4669 /*
4670  * Set bio_data or bio_ma for struct bio from the struct buf.
4671  */
4672 void
4673 bdata2bio(struct buf *bp, struct bio *bip)
4674 {
4675 
4676 	if (!buf_mapped(bp)) {
4677 		KASSERT(unmapped_buf_allowed, ("unmapped"));
4678 		bip->bio_ma = bp->b_pages;
4679 		bip->bio_ma_n = bp->b_npages;
4680 		bip->bio_data = unmapped_buf;
4681 		bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
4682 		bip->bio_flags |= BIO_UNMAPPED;
4683 		KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) /
4684 		    PAGE_SIZE == bp->b_npages,
4685 		    ("Buffer %p too short: %d %lld %d", bp, bip->bio_ma_offset,
4686 		    (long long)bip->bio_length, bip->bio_ma_n));
4687 	} else {
4688 		bip->bio_data = bp->b_data;
4689 		bip->bio_ma = NULL;
4690 	}
4691 }
4692 
4693 /*
4694  * The MIPS pmap code currently doesn't handle aliased pages.
4695  * The VIPT caches may not handle page aliasing themselves, leading
4696  * to data corruption.
4697  *
4698  * As such, this code makes a system extremely unhappy if said
4699  * system doesn't support unaliasing the above situation in hardware.
4700  * Some "recent" systems (eg some mips24k/mips74k cores) don't enable
4701  * this feature at build time, so it has to be handled in software.
4702  *
4703  * Once the MIPS pmap/cache code grows to support this function on
4704  * earlier chips, it should be flipped back off.
4705  */
4706 #ifdef	__mips__
4707 static int buf_pager_relbuf = 1;
4708 #else
4709 static int buf_pager_relbuf = 0;
4710 #endif
4711 SYSCTL_INT(_vfs, OID_AUTO, buf_pager_relbuf, CTLFLAG_RWTUN,
4712     &buf_pager_relbuf, 0,
4713     "Make buffer pager release buffers after reading");
4714 
4715 /*
4716  * The buffer pager.  It uses buffer reads to validate pages.
4717  *
4718  * In contrast to the generic local pager from vm/vnode_pager.c, this
4719  * pager correctly and easily handles volumes where the underlying
4720  * device block size is greater than the machine page size.  The
4721  * buffer cache transparently extends the requested page run to be
4722  * aligned at the block boundary, and does the necessary bogus page
4723  * replacements in the addends to avoid obliterating already valid
4724  * pages.
4725  *
4726  * The only non-trivial issue is that the exclusive busy state for
4727  * pages, which is assumed by the vm_pager_getpages() interface, is
4728  * incompatible with the VMIO buffer cache's desire to share-busy the
4729  * pages.  This function performs a trivial downgrade of the pages'
4730  * state before reading buffers, and a less trivial upgrade from the
4731  * shared-busy to excl-busy state after the read.
4732  */
4733 int
4734 vfs_bio_getpages(struct vnode *vp, vm_page_t *ma, int count,
4735     int *rbehind, int *rahead, vbg_get_lblkno_t get_lblkno,
4736     vbg_get_blksize_t get_blksize)
4737 {
4738 	vm_page_t m;
4739 	vm_object_t object;
4740 	struct buf *bp;
4741 	struct mount *mp;
4742 	daddr_t lbn, lbnp;
4743 	vm_ooffset_t la, lb, poff, poffe;
4744 	long bsize;
4745 	int bo_bs, br_flags, error, i, pgsin, pgsin_a, pgsin_b;
4746 	bool redo, lpart;
4747 
4748 	object = vp->v_object;
4749 	mp = vp->v_mount;
4750 	la = IDX_TO_OFF(ma[count - 1]->pindex);
4751 	if (la >= object->un_pager.vnp.vnp_size)
4752 		return (VM_PAGER_BAD);
4753 	lpart = la + PAGE_SIZE > object->un_pager.vnp.vnp_size;
4754 	bo_bs = get_blksize(vp, get_lblkno(vp, IDX_TO_OFF(ma[0]->pindex)));
4755 
4756 	/*
4757 	 * Calculate read-ahead, behind and total pages.
4758 	 */
4759 	pgsin = count;
4760 	lb = IDX_TO_OFF(ma[0]->pindex);
4761 	pgsin_b = OFF_TO_IDX(lb - rounddown2(lb, bo_bs));
4762 	pgsin += pgsin_b;
4763 	if (rbehind != NULL)
4764 		*rbehind = pgsin_b;
4765 	pgsin_a = OFF_TO_IDX(roundup2(la, bo_bs) - la);
4766 	if (la + IDX_TO_OFF(pgsin_a) >= object->un_pager.vnp.vnp_size)
4767 		pgsin_a = OFF_TO_IDX(roundup2(object->un_pager.vnp.vnp_size,
4768 		    PAGE_SIZE) - la);
4769 	pgsin += pgsin_a;
4770 	if (rahead != NULL)
4771 		*rahead = pgsin_a;
4772 	VM_CNT_INC(v_vnodein);
4773 	VM_CNT_ADD(v_vnodepgsin, pgsin);
4774 
4775 	br_flags = (mp != NULL && (mp->mnt_kern_flag & MNTK_UNMAPPED_BUFS)
4776 	    != 0) ? GB_UNMAPPED : 0;
4777 	VM_OBJECT_WLOCK(object);
4778 again:
4779 	for (i = 0; i < count; i++)
4780 		vm_page_busy_downgrade(ma[i]);
4781 	VM_OBJECT_WUNLOCK(object);
4782 
4783 	lbnp = -1;
4784 	for (i = 0; i < count; i++) {
4785 		m = ma[i];
4786 
4787 		/*
4788 		 * Pages are shared busy and the object lock is not
4789 		 * owned, which together allow for the pages'
4790 		 * invalidation.  The racy test for validity avoids
4791 		 * useless creation of the buffer for the most typical
4792 		 * case when invalidation is not used in redo or for
4793 		 * parallel read.  The shared->excl upgrade loop at
4794 		 * the end of the function catches the race in a
4795 		 * reliable way (protected by the object lock).
4796 		 */
4797 		if (m->valid == VM_PAGE_BITS_ALL)
4798 			continue;
4799 
4800 		poff = IDX_TO_OFF(m->pindex);
4801 		poffe = MIN(poff + PAGE_SIZE, object->un_pager.vnp.vnp_size);
4802 		for (; poff < poffe; poff += bsize) {
4803 			lbn = get_lblkno(vp, poff);
4804 			if (lbn == lbnp)
4805 				goto next_page;
4806 			lbnp = lbn;
4807 
4808 			bsize = get_blksize(vp, lbn);
4809 			error = bread_gb(vp, lbn, bsize, curthread->td_ucred,
4810 			    br_flags, &bp);
4811 			if (error != 0)
4812 				goto end_pages;
4813 			if (LIST_EMPTY(&bp->b_dep)) {
4814 				/*
4815 				 * Invalidation clears m->valid, but
4816 				 * may leave B_CACHE flag if the
4817 				 * buffer existed at the invalidation
4818 				 * time.  In this case, recycle the
4819 				 * buffer to do real read on next
4820 				 * bread() after redo.
4821 				 *
4822 				 * Otherwise B_RELBUF is not strictly
4823 				 * necessary, enable to reduce buf
4824 				 * cache pressure.
4825 				 */
4826 				if (buf_pager_relbuf ||
4827 				    m->valid != VM_PAGE_BITS_ALL)
4828 					bp->b_flags |= B_RELBUF;
4829 
4830 				bp->b_flags &= ~B_NOCACHE;
4831 				brelse(bp);
4832 			} else {
4833 				bqrelse(bp);
4834 			}
4835 		}
4836 		KASSERT(1 /* racy, enable for debugging */ ||
4837 		    m->valid == VM_PAGE_BITS_ALL || i == count - 1,
4838 		    ("buf %d %p invalid", i, m));
4839 		if (i == count - 1 && lpart) {
4840 			VM_OBJECT_WLOCK(object);
4841 			if (m->valid != 0 &&
4842 			    m->valid != VM_PAGE_BITS_ALL)
4843 				vm_page_zero_invalid(m, TRUE);
4844 			VM_OBJECT_WUNLOCK(object);
4845 		}
4846 next_page:;
4847 	}
4848 end_pages:
4849 
4850 	VM_OBJECT_WLOCK(object);
4851 	redo = false;
4852 	for (i = 0; i < count; i++) {
4853 		vm_page_sunbusy(ma[i]);
4854 		ma[i] = vm_page_grab(object, ma[i]->pindex, VM_ALLOC_NORMAL);
4855 
4856 		/*
4857 		 * Since the pages were only sbusy while neither the
4858 		 * buffer nor the object lock was held by us, or
4859 		 * reallocated while vm_page_grab() slept for busy
4860 		 * relinguish, they could have been invalidated.
4861 		 * Recheck the valid bits and re-read as needed.
4862 		 *
4863 		 * Note that the last page is made fully valid in the
4864 		 * read loop, and partial validity for the page at
4865 		 * index count - 1 could mean that the page was
4866 		 * invalidated or removed, so we must restart for
4867 		 * safety as well.
4868 		 */
4869 		if (ma[i]->valid != VM_PAGE_BITS_ALL)
4870 			redo = true;
4871 	}
4872 	if (redo && error == 0)
4873 		goto again;
4874 	VM_OBJECT_WUNLOCK(object);
4875 	return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK);
4876 }
4877 
4878 #include "opt_ddb.h"
4879 #ifdef DDB
4880 #include <ddb/ddb.h>
4881 
4882 /* DDB command to show buffer data */
4883 DB_SHOW_COMMAND(buffer, db_show_buffer)
4884 {
4885 	/* get args */
4886 	struct buf *bp = (struct buf *)addr;
4887 #ifdef FULL_BUF_TRACKING
4888 	uint32_t i, j;
4889 #endif
4890 
4891 	if (!have_addr) {
4892 		db_printf("usage: show buffer <addr>\n");
4893 		return;
4894 	}
4895 
4896 	db_printf("buf at %p\n", bp);
4897 	db_printf("b_flags = 0x%b, b_xflags=0x%b, b_vflags=0x%b\n",
4898 	    (u_int)bp->b_flags, PRINT_BUF_FLAGS, (u_int)bp->b_xflags,
4899 	    PRINT_BUF_XFLAGS, (u_int)bp->b_vflags, PRINT_BUF_VFLAGS);
4900 	db_printf(
4901 	    "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
4902 	    "b_bufobj = (%p), b_data = %p, b_blkno = %jd, b_lblkno = %jd, "
4903 	    "b_dep = %p\n",
4904 	    bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
4905 	    bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
4906 	    (intmax_t)bp->b_lblkno, bp->b_dep.lh_first);
4907 	db_printf("b_kvabase = %p, b_kvasize = %d\n",
4908 	    bp->b_kvabase, bp->b_kvasize);
4909 	if (bp->b_npages) {
4910 		int i;
4911 		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
4912 		for (i = 0; i < bp->b_npages; i++) {
4913 			vm_page_t m;
4914 			m = bp->b_pages[i];
4915 			if (m != NULL)
4916 				db_printf("(%p, 0x%lx, 0x%lx)", m->object,
4917 				    (u_long)m->pindex,
4918 				    (u_long)VM_PAGE_TO_PHYS(m));
4919 			else
4920 				db_printf("( ??? )");
4921 			if ((i + 1) < bp->b_npages)
4922 				db_printf(",");
4923 		}
4924 		db_printf("\n");
4925 	}
4926 #if defined(FULL_BUF_TRACKING)
4927 	db_printf("b_io_tracking: b_io_tcnt = %u\n", bp->b_io_tcnt);
4928 
4929 	i = bp->b_io_tcnt % BUF_TRACKING_SIZE;
4930 	for (j = 1; j <= BUF_TRACKING_SIZE; j++) {
4931 		if (bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)] == NULL)
4932 			continue;
4933 		db_printf(" %2u: %s\n", j,
4934 		    bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)]);
4935 	}
4936 #elif defined(BUF_TRACKING)
4937 	db_printf("b_io_tracking: %s\n", bp->b_io_tracking);
4938 #endif
4939 	db_printf(" ");
4940 	BUF_LOCKPRINTINFO(bp);
4941 }
4942 
4943 DB_SHOW_COMMAND(lockedbufs, lockedbufs)
4944 {
4945 	struct buf *bp;
4946 	int i;
4947 
4948 	for (i = 0; i < nbuf; i++) {
4949 		bp = &buf[i];
4950 		if (BUF_ISLOCKED(bp)) {
4951 			db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4952 			db_printf("\n");
4953 			if (db_pager_quit)
4954 				break;
4955 		}
4956 	}
4957 }
4958 
4959 DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
4960 {
4961 	struct vnode *vp;
4962 	struct buf *bp;
4963 
4964 	if (!have_addr) {
4965 		db_printf("usage: show vnodebufs <addr>\n");
4966 		return;
4967 	}
4968 	vp = (struct vnode *)addr;
4969 	db_printf("Clean buffers:\n");
4970 	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
4971 		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4972 		db_printf("\n");
4973 	}
4974 	db_printf("Dirty buffers:\n");
4975 	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
4976 		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4977 		db_printf("\n");
4978 	}
4979 }
4980 
4981 DB_COMMAND(countfreebufs, db_coundfreebufs)
4982 {
4983 	struct buf *bp;
4984 	int i, used = 0, nfree = 0;
4985 
4986 	if (have_addr) {
4987 		db_printf("usage: countfreebufs\n");
4988 		return;
4989 	}
4990 
4991 	for (i = 0; i < nbuf; i++) {
4992 		bp = &buf[i];
4993 		if (bp->b_qindex == QUEUE_EMPTY)
4994 			nfree++;
4995 		else
4996 			used++;
4997 	}
4998 
4999 	db_printf("Counted %d free, %d used (%d tot)\n", nfree, used,
5000 	    nfree + used);
5001 	db_printf("numfreebuffers is %d\n", numfreebuffers);
5002 }
5003 #endif /* DDB */
5004