xref: /freebsd/sys/kern/vfs_bio.c (revision 3982006ed5587cfd83cc1955186e0aa4b92b3fcd)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2004 Poul-Henning Kamp
5  * Copyright (c) 1994,1997 John S. Dyson
6  * Copyright (c) 2013 The FreeBSD Foundation
7  * All rights reserved.
8  *
9  * Portions of this software were developed by Konstantin Belousov
10  * under sponsorship from the FreeBSD Foundation.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 /*
35  * this file contains a new buffer I/O scheme implementing a coherent
36  * VM object and buffer cache scheme.  Pains have been taken to make
37  * sure that the performance degradation associated with schemes such
38  * as this is not realized.
39  *
40  * Author:  John S. Dyson
41  * Significant help during the development and debugging phases
42  * had been provided by David Greenman, also of the FreeBSD core team.
43  *
44  * see man buf(9) for more info.
45  */
46 
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/bio.h>
53 #include <sys/conf.h>
54 #include <sys/counter.h>
55 #include <sys/buf.h>
56 #include <sys/devicestat.h>
57 #include <sys/eventhandler.h>
58 #include <sys/fail.h>
59 #include <sys/limits.h>
60 #include <sys/lock.h>
61 #include <sys/malloc.h>
62 #include <sys/mount.h>
63 #include <sys/mutex.h>
64 #include <sys/kernel.h>
65 #include <sys/kthread.h>
66 #include <sys/proc.h>
67 #include <sys/racct.h>
68 #include <sys/resourcevar.h>
69 #include <sys/rwlock.h>
70 #include <sys/smp.h>
71 #include <sys/sysctl.h>
72 #include <sys/sysproto.h>
73 #include <sys/vmem.h>
74 #include <sys/vmmeter.h>
75 #include <sys/vnode.h>
76 #include <sys/watchdog.h>
77 #include <geom/geom.h>
78 #include <vm/vm.h>
79 #include <vm/vm_param.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_object.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_pageout.h>
84 #include <vm/vm_pager.h>
85 #include <vm/vm_extern.h>
86 #include <vm/vm_map.h>
87 #include <vm/swap_pager.h>
88 #include "opt_compat.h"
89 #include "opt_swap.h"
90 
91 static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
92 
93 struct	bio_ops bioops;		/* I/O operation notification */
94 
95 struct	buf_ops buf_ops_bio = {
96 	.bop_name	=	"buf_ops_bio",
97 	.bop_write	=	bufwrite,
98 	.bop_strategy	=	bufstrategy,
99 	.bop_sync	=	bufsync,
100 	.bop_bdflush	=	bufbdflush,
101 };
102 
103 static struct buf *buf;		/* buffer header pool */
104 extern struct buf *swbuf;	/* Swap buffer header pool. */
105 caddr_t unmapped_buf;
106 
107 /* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */
108 struct proc *bufdaemonproc;
109 
110 static int inmem(struct vnode *vp, daddr_t blkno);
111 static void vm_hold_free_pages(struct buf *bp, int newbsize);
112 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
113 		vm_offset_t to);
114 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
115 static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
116 		vm_page_t m);
117 static void vfs_clean_pages_dirty_buf(struct buf *bp);
118 static void vfs_setdirty_locked_object(struct buf *bp);
119 static void vfs_vmio_invalidate(struct buf *bp);
120 static void vfs_vmio_truncate(struct buf *bp, int npages);
121 static void vfs_vmio_extend(struct buf *bp, int npages, int size);
122 static int vfs_bio_clcheck(struct vnode *vp, int size,
123 		daddr_t lblkno, daddr_t blkno);
124 static void breada(struct vnode *, daddr_t *, int *, int, struct ucred *, int,
125 		void (*)(struct buf *));
126 static int buf_flush(struct vnode *vp, int);
127 static int flushbufqueues(struct vnode *, int, int);
128 static void buf_daemon(void);
129 static __inline void bd_wakeup(void);
130 static int sysctl_runningspace(SYSCTL_HANDLER_ARGS);
131 static void bufkva_reclaim(vmem_t *, int);
132 static void bufkva_free(struct buf *);
133 static int buf_import(void *, void **, int, int, int);
134 static void buf_release(void *, void **, int);
135 static void maxbcachebuf_adjust(void);
136 
137 static int sysctl_bufspace(SYSCTL_HANDLER_ARGS);
138 int vmiodirenable = TRUE;
139 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
140     "Use the VM system for directory writes");
141 long runningbufspace;
142 SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
143     "Amount of presently outstanding async buffer io");
144 SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD,
145     NULL, 0, sysctl_bufspace, "L", "Physical memory used for buffers");
146 static counter_u64_t bufkvaspace;
147 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, bufkvaspace, CTLFLAG_RD, &bufkvaspace,
148     "Kernel virtual memory used for buffers");
149 static long maxbufspace;
150 SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, &maxbufspace, 0,
151     "Maximum allowed value of bufspace (including metadata)");
152 static long bufmallocspace;
153 SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
154     "Amount of malloced memory for buffers");
155 static long maxbufmallocspace;
156 SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace,
157     0, "Maximum amount of malloced memory for buffers");
158 static long lobufspace;
159 SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RW, &lobufspace, 0,
160     "Minimum amount of buffers we want to have");
161 long hibufspace;
162 SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RW, &hibufspace, 0,
163     "Maximum allowed value of bufspace (excluding metadata)");
164 long bufspacethresh;
165 SYSCTL_LONG(_vfs, OID_AUTO, bufspacethresh, CTLFLAG_RW, &bufspacethresh,
166     0, "Bufspace consumed before waking the daemon to free some");
167 static counter_u64_t buffreekvacnt;
168 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt,
169     "Number of times we have freed the KVA space from some buffer");
170 static counter_u64_t bufdefragcnt;
171 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt,
172     "Number of times we have had to repeat buffer allocation to defragment");
173 static long lorunningspace;
174 SYSCTL_PROC(_vfs, OID_AUTO, lorunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
175     CTLFLAG_RW, &lorunningspace, 0, sysctl_runningspace, "L",
176     "Minimum preferred space used for in-progress I/O");
177 static long hirunningspace;
178 SYSCTL_PROC(_vfs, OID_AUTO, hirunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
179     CTLFLAG_RW, &hirunningspace, 0, sysctl_runningspace, "L",
180     "Maximum amount of space to use for in-progress I/O");
181 int dirtybufferflushes;
182 SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
183     0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
184 int bdwriteskip;
185 SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
186     0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
187 int altbufferflushes;
188 SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes,
189     0, "Number of fsync flushes to limit dirty buffers");
190 static int recursiveflushes;
191 SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes,
192     0, "Number of flushes skipped due to being recursive");
193 static int numdirtybuffers;
194 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
195     "Number of buffers that are dirty (has unwritten changes) at the moment");
196 static int lodirtybuffers;
197 SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
198     "How many buffers we want to have free before bufdaemon can sleep");
199 static int hidirtybuffers;
200 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
201     "When the number of dirty buffers is considered severe");
202 int dirtybufthresh;
203 SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh,
204     0, "Number of bdwrite to bawrite conversions to clear dirty buffers");
205 static int numfreebuffers;
206 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
207     "Number of free buffers");
208 static int lofreebuffers;
209 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
210    "Target number of free buffers");
211 static int hifreebuffers;
212 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
213    "Threshold for clean buffer recycling");
214 static counter_u64_t getnewbufcalls;
215 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RD,
216    &getnewbufcalls, "Number of calls to getnewbuf");
217 static counter_u64_t getnewbufrestarts;
218 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RD,
219     &getnewbufrestarts,
220     "Number of times getnewbuf has had to restart a buffer acquisition");
221 static counter_u64_t mappingrestarts;
222 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, mappingrestarts, CTLFLAG_RD,
223     &mappingrestarts,
224     "Number of times getblk has had to restart a buffer mapping for "
225     "unmapped buffer");
226 static counter_u64_t numbufallocfails;
227 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, numbufallocfails, CTLFLAG_RW,
228     &numbufallocfails, "Number of times buffer allocations failed");
229 static int flushbufqtarget = 100;
230 SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0,
231     "Amount of work to do in flushbufqueues when helping bufdaemon");
232 static counter_u64_t notbufdflushes;
233 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, notbufdflushes, CTLFLAG_RD, &notbufdflushes,
234     "Number of dirty buffer flushes done by the bufdaemon helpers");
235 static long barrierwrites;
236 SYSCTL_LONG(_vfs, OID_AUTO, barrierwrites, CTLFLAG_RW, &barrierwrites, 0,
237     "Number of barrier writes");
238 SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD,
239     &unmapped_buf_allowed, 0,
240     "Permit the use of the unmapped i/o");
241 int maxbcachebuf = MAXBCACHEBUF;
242 SYSCTL_INT(_vfs, OID_AUTO, maxbcachebuf, CTLFLAG_RDTUN, &maxbcachebuf, 0,
243     "Maximum size of a buffer cache block");
244 
245 /*
246  * This lock synchronizes access to bd_request.
247  */
248 static struct mtx_padalign __exclusive_cache_line bdlock;
249 
250 /*
251  * This lock protects the runningbufreq and synchronizes runningbufwakeup and
252  * waitrunningbufspace().
253  */
254 static struct mtx_padalign __exclusive_cache_line rbreqlock;
255 
256 /*
257  * Lock that protects bdirtywait.
258  */
259 static struct mtx_padalign __exclusive_cache_line bdirtylock;
260 
261 /*
262  * Wakeup point for bufdaemon, as well as indicator of whether it is already
263  * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
264  * is idling.
265  */
266 static int bd_request;
267 
268 /*
269  * Request for the buf daemon to write more buffers than is indicated by
270  * lodirtybuf.  This may be necessary to push out excess dependencies or
271  * defragment the address space where a simple count of the number of dirty
272  * buffers is insufficient to characterize the demand for flushing them.
273  */
274 static int bd_speedupreq;
275 
276 /*
277  * Synchronization (sleep/wakeup) variable for active buffer space requests.
278  * Set when wait starts, cleared prior to wakeup().
279  * Used in runningbufwakeup() and waitrunningbufspace().
280  */
281 static int runningbufreq;
282 
283 /*
284  * Synchronization for bwillwrite() waiters.
285  */
286 static int bdirtywait;
287 
288 /*
289  * Definitions for the buffer free lists.
290  */
291 #define QUEUE_NONE	0	/* on no queue */
292 #define QUEUE_EMPTY	1	/* empty buffer headers */
293 #define QUEUE_DIRTY	2	/* B_DELWRI buffers */
294 #define QUEUE_CLEAN	3	/* non-B_DELWRI buffers */
295 #define QUEUE_SENTINEL	4	/* not an queue index, but mark for sentinel */
296 
297 struct bufqueue {
298 	struct mtx_padalign	bq_lock;
299 	TAILQ_HEAD(, buf)	bq_queue;
300 	uint8_t			bq_index;
301 	uint16_t		bq_subqueue;
302 	int			bq_len;
303 } __aligned(CACHE_LINE_SIZE);
304 
305 #define	BQ_LOCKPTR(bq)		(&(bq)->bq_lock)
306 #define	BQ_LOCK(bq)		mtx_lock(BQ_LOCKPTR((bq)))
307 #define	BQ_UNLOCK(bq)		mtx_unlock(BQ_LOCKPTR((bq)))
308 #define	BQ_ASSERT_LOCKED(bq)	mtx_assert(BQ_LOCKPTR((bq)), MA_OWNED)
309 
310 struct bufqueue __exclusive_cache_line bqempty;
311 struct bufqueue __exclusive_cache_line bqdirty;
312 
313 struct bufdomain {
314 	struct bufqueue	bd_subq[MAXCPU + 1]; /* Per-cpu sub queues + global */
315 	struct bufqueue	*bd_cleanq;
316 	struct mtx_padalign bd_run_lock;
317 	/* Constants */
318 	long		bd_maxbufspace;
319 	long		bd_hibufspace;
320 	long 		bd_lobufspace;
321 	long 		bd_bufspacethresh;
322 	int		bd_hifreebuffers;
323 	int		bd_lofreebuffers;
324 	int		bd_lim;
325 	/* atomics */
326 	int		bd_wanted;
327 	int  __aligned(CACHE_LINE_SIZE)	bd_running;
328 	long __aligned(CACHE_LINE_SIZE) bd_bufspace;
329 	int __aligned(CACHE_LINE_SIZE)	bd_freebuffers;
330 } __aligned(CACHE_LINE_SIZE);
331 
332 #define	BD_LOCKPTR(bd)		(&(bd)->bd_cleanq->bq_lock)
333 #define	BD_LOCK(bd)		mtx_lock(BD_LOCKPTR((bd)))
334 #define	BD_UNLOCK(bd)		mtx_unlock(BD_LOCKPTR((bd)))
335 #define	BD_ASSERT_LOCKED(bd)	mtx_assert(BD_LOCKPTR((bd)), MA_OWNED)
336 #define	BD_RUN_LOCKPTR(bd)	(&(bd)->bd_run_lock)
337 #define	BD_RUN_LOCK(bd)		mtx_lock(BD_RUN_LOCKPTR((bd)))
338 #define	BD_RUN_UNLOCK(bd)	mtx_unlock(BD_RUN_LOCKPTR((bd)))
339 #define	BD_DOMAIN(bd)		(bd - bdclean)
340 
341 /* Maximum number of clean buffer domains. */
342 #define	CLEAN_DOMAINS	8
343 
344 /* Configured number of clean queues. */
345 static int __read_mostly clean_domains;
346 
347 struct bufdomain __exclusive_cache_line bdclean[CLEAN_DOMAINS];
348 
349 static void bq_remove(struct bufqueue *bq, struct buf *bp);
350 static void bq_insert(struct bufqueue *bq, struct buf *bp, bool unlock);
351 static int buf_recycle(struct bufdomain *, bool kva);
352 static void bq_init(struct bufqueue *bq, int qindex, int cpu,
353 	    const char *lockname);
354 static void bd_init(struct bufdomain *bd);
355 static int bd_flushall(struct bufdomain *bd);
356 
357 /*
358  * per-cpu empty buffer cache.
359  */
360 uma_zone_t buf_zone;
361 
362 /*
363  * Single global constant for BUF_WMESG, to avoid getting multiple references.
364  * buf_wmesg is referred from macros.
365  */
366 const char *buf_wmesg = BUF_WMESG;
367 
368 static int
369 sysctl_runningspace(SYSCTL_HANDLER_ARGS)
370 {
371 	long value;
372 	int error;
373 
374 	value = *(long *)arg1;
375 	error = sysctl_handle_long(oidp, &value, 0, req);
376 	if (error != 0 || req->newptr == NULL)
377 		return (error);
378 	mtx_lock(&rbreqlock);
379 	if (arg1 == &hirunningspace) {
380 		if (value < lorunningspace)
381 			error = EINVAL;
382 		else
383 			hirunningspace = value;
384 	} else {
385 		KASSERT(arg1 == &lorunningspace,
386 		    ("%s: unknown arg1", __func__));
387 		if (value > hirunningspace)
388 			error = EINVAL;
389 		else
390 			lorunningspace = value;
391 	}
392 	mtx_unlock(&rbreqlock);
393 	return (error);
394 }
395 
396 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
397     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
398 static int
399 sysctl_bufspace(SYSCTL_HANDLER_ARGS)
400 {
401 	long lvalue;
402 	int ivalue;
403 	int i;
404 
405 	lvalue = 0;
406 	for (i = 0; i < clean_domains; i++)
407 		lvalue += bdclean[i].bd_bufspace;
408 	if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long))
409 		return (sysctl_handle_long(oidp, &lvalue, 0, req));
410 	if (lvalue > INT_MAX)
411 		/* On overflow, still write out a long to trigger ENOMEM. */
412 		return (sysctl_handle_long(oidp, &lvalue, 0, req));
413 	ivalue = lvalue;
414 	return (sysctl_handle_int(oidp, &ivalue, 0, req));
415 }
416 #else
417 static int
418 sysctl_bufspace(SYSCTL_HANDLER_ARGS)
419 {
420 	long lvalue;
421 	int i;
422 
423 	lvalue = 0;
424 	for (i = 0; i < clean_domains; i++)
425 		lvalue += bdclean[i].bd_bufspace;
426 	return (sysctl_handle_int(oidp, &lvalue, 0, req));
427 }
428 #endif
429 
430 /*
431  *	bdirtywakeup:
432  *
433  *	Wakeup any bwillwrite() waiters.
434  */
435 static void
436 bdirtywakeup(void)
437 {
438 	mtx_lock(&bdirtylock);
439 	if (bdirtywait) {
440 		bdirtywait = 0;
441 		wakeup(&bdirtywait);
442 	}
443 	mtx_unlock(&bdirtylock);
444 }
445 
446 /*
447  *	bdirtysub:
448  *
449  *	Decrement the numdirtybuffers count by one and wakeup any
450  *	threads blocked in bwillwrite().
451  */
452 static void
453 bdirtysub(void)
454 {
455 
456 	if (atomic_fetchadd_int(&numdirtybuffers, -1) ==
457 	    (lodirtybuffers + hidirtybuffers) / 2)
458 		bdirtywakeup();
459 }
460 
461 /*
462  *	bdirtyadd:
463  *
464  *	Increment the numdirtybuffers count by one and wakeup the buf
465  *	daemon if needed.
466  */
467 static void
468 bdirtyadd(void)
469 {
470 
471 	/*
472 	 * Only do the wakeup once as we cross the boundary.  The
473 	 * buf daemon will keep running until the condition clears.
474 	 */
475 	if (atomic_fetchadd_int(&numdirtybuffers, 1) ==
476 	    (lodirtybuffers + hidirtybuffers) / 2)
477 		bd_wakeup();
478 }
479 
480 /*
481  *	bufspace_daemon_wakeup:
482  *
483  *	Wakeup the daemons responsible for freeing clean bufs.
484  */
485 static void
486 bufspace_daemon_wakeup(struct bufdomain *bd)
487 {
488 
489 	/*
490 	 * avoid the lock if the daemon is running.
491 	 */
492 	if (atomic_fetchadd_int(&bd->bd_running, 1) == 0) {
493 		BD_RUN_LOCK(bd);
494 		atomic_store_int(&bd->bd_running, 1);
495 		wakeup(&bd->bd_running);
496 		BD_RUN_UNLOCK(bd);
497 	}
498 }
499 
500 /*
501  *	bufspace_daemon_wait:
502  *
503  *	Sleep until the domain falls below a limit or one second passes.
504  */
505 static void
506 bufspace_daemon_wait(struct bufdomain *bd)
507 {
508 	/*
509 	 * Re-check our limits and sleep.  bd_running must be
510 	 * cleared prior to checking the limits to avoid missed
511 	 * wakeups.  The waker will adjust one of bufspace or
512 	 * freebuffers prior to checking bd_running.
513 	 */
514 	BD_RUN_LOCK(bd);
515 	atomic_store_int(&bd->bd_running, 0);
516 	if (bd->bd_bufspace < bd->bd_bufspacethresh &&
517 	    bd->bd_freebuffers > bd->bd_lofreebuffers) {
518 		msleep(&bd->bd_running, BD_RUN_LOCKPTR(bd), PRIBIO|PDROP,
519 		    "-", hz);
520 	} else {
521 		/* Avoid spurious wakeups while running. */
522 		atomic_store_int(&bd->bd_running, 1);
523 		BD_RUN_UNLOCK(bd);
524 	}
525 }
526 
527 /*
528  *	bufspace_adjust:
529  *
530  *	Adjust the reported bufspace for a KVA managed buffer, possibly
531  * 	waking any waiters.
532  */
533 static void
534 bufspace_adjust(struct buf *bp, int bufsize)
535 {
536 	struct bufdomain *bd;
537 	long space;
538 	int diff;
539 
540 	KASSERT((bp->b_flags & B_MALLOC) == 0,
541 	    ("bufspace_adjust: malloc buf %p", bp));
542 	bd = &bdclean[bp->b_domain];
543 	diff = bufsize - bp->b_bufsize;
544 	if (diff < 0) {
545 		atomic_subtract_long(&bd->bd_bufspace, -diff);
546 	} else {
547 		space = atomic_fetchadd_long(&bd->bd_bufspace, diff);
548 		/* Wake up the daemon on the transition. */
549 		if (space < bd->bd_bufspacethresh &&
550 		    space + diff >= bd->bd_bufspacethresh)
551 			bufspace_daemon_wakeup(bd);
552 	}
553 	bp->b_bufsize = bufsize;
554 }
555 
556 /*
557  *	bufspace_reserve:
558  *
559  *	Reserve bufspace before calling allocbuf().  metadata has a
560  *	different space limit than data.
561  */
562 static int
563 bufspace_reserve(struct bufdomain *bd, int size, bool metadata)
564 {
565 	long limit, new;
566 	long space;
567 
568 	if (metadata)
569 		limit = bd->bd_maxbufspace;
570 	else
571 		limit = bd->bd_hibufspace;
572 	space = atomic_fetchadd_long(&bd->bd_bufspace, size);
573 	new = space + size;
574 	if (new > limit) {
575 		atomic_subtract_long(&bd->bd_bufspace, size);
576 		return (ENOSPC);
577 	}
578 
579 	/* Wake up the daemon on the transition. */
580 	if (space < bd->bd_bufspacethresh && new >= bd->bd_bufspacethresh)
581 		bufspace_daemon_wakeup(bd);
582 
583 	return (0);
584 }
585 
586 /*
587  *	bufspace_release:
588  *
589  *	Release reserved bufspace after bufspace_adjust() has consumed it.
590  */
591 static void
592 bufspace_release(struct bufdomain *bd, int size)
593 {
594 
595 	atomic_subtract_long(&bd->bd_bufspace, size);
596 }
597 
598 /*
599  *	bufspace_wait:
600  *
601  *	Wait for bufspace, acting as the buf daemon if a locked vnode is
602  *	supplied.  bd_wanted must be set prior to polling for space.  The
603  *	operation must be re-tried on return.
604  */
605 static void
606 bufspace_wait(struct bufdomain *bd, struct vnode *vp, int gbflags,
607     int slpflag, int slptimeo)
608 {
609 	struct thread *td;
610 	int error, fl, norunbuf;
611 
612 	if ((gbflags & GB_NOWAIT_BD) != 0)
613 		return;
614 
615 	td = curthread;
616 	BD_LOCK(bd);
617 	while (bd->bd_wanted) {
618 		if (vp != NULL && vp->v_type != VCHR &&
619 		    (td->td_pflags & TDP_BUFNEED) == 0) {
620 			BD_UNLOCK(bd);
621 			/*
622 			 * getblk() is called with a vnode locked, and
623 			 * some majority of the dirty buffers may as
624 			 * well belong to the vnode.  Flushing the
625 			 * buffers there would make a progress that
626 			 * cannot be achieved by the buf_daemon, that
627 			 * cannot lock the vnode.
628 			 */
629 			norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) |
630 			    (td->td_pflags & TDP_NORUNNINGBUF);
631 
632 			/*
633 			 * Play bufdaemon.  The getnewbuf() function
634 			 * may be called while the thread owns lock
635 			 * for another dirty buffer for the same
636 			 * vnode, which makes it impossible to use
637 			 * VOP_FSYNC() there, due to the buffer lock
638 			 * recursion.
639 			 */
640 			td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF;
641 			fl = buf_flush(vp, flushbufqtarget);
642 			td->td_pflags &= norunbuf;
643 			BD_LOCK(bd);
644 			if (fl != 0)
645 				continue;
646 			if (bd->bd_wanted == 0)
647 				break;
648 		}
649 		error = msleep(&bd->bd_wanted, BD_LOCKPTR(bd),
650 		    (PRIBIO + 4) | slpflag, "newbuf", slptimeo);
651 		if (error != 0)
652 			break;
653 	}
654 	BD_UNLOCK(bd);
655 }
656 
657 
658 /*
659  *	bufspace_daemon:
660  *
661  *	buffer space management daemon.  Tries to maintain some marginal
662  *	amount of free buffer space so that requesting processes neither
663  *	block nor work to reclaim buffers.
664  */
665 static void
666 bufspace_daemon(void *arg)
667 {
668 	struct bufdomain *bd;
669 
670 	bd = arg;
671 	for (;;) {
672 		kproc_suspend_check(curproc);
673 
674 		/*
675 		 * Free buffers from the clean queue until we meet our
676 		 * targets.
677 		 *
678 		 * Theory of operation:  The buffer cache is most efficient
679 		 * when some free buffer headers and space are always
680 		 * available to getnewbuf().  This daemon attempts to prevent
681 		 * the excessive blocking and synchronization associated
682 		 * with shortfall.  It goes through three phases according
683 		 * demand:
684 		 *
685 		 * 1)	The daemon wakes up voluntarily once per-second
686 		 *	during idle periods when the counters are below
687 		 *	the wakeup thresholds (bufspacethresh, lofreebuffers).
688 		 *
689 		 * 2)	The daemon wakes up as we cross the thresholds
690 		 *	ahead of any potential blocking.  This may bounce
691 		 *	slightly according to the rate of consumption and
692 		 *	release.
693 		 *
694 		 * 3)	The daemon and consumers are starved for working
695 		 *	clean buffers.  This is the 'bufspace' sleep below
696 		 *	which will inefficiently trade bufs with bqrelse
697 		 *	until we return to condition 2.
698 		 */
699 		do {
700 			if (buf_recycle(bd, false) != 0) {
701 				if (bd_flushall(bd))
702 					continue;
703 				BD_LOCK(bd);
704 				if (bd->bd_wanted) {
705 					msleep(&bd->bd_wanted, BD_LOCKPTR(bd),
706 					    PRIBIO|PDROP, "bufspace", hz/10);
707 				} else
708 					BD_UNLOCK(bd);
709 			}
710 			maybe_yield();
711 		} while (bd->bd_bufspace > bd->bd_lobufspace ||
712 		    bd->bd_freebuffers < bd->bd_hifreebuffers);
713 
714 		bufspace_daemon_wait(bd);
715 	}
716 }
717 
718 /*
719  *	bufmallocadjust:
720  *
721  *	Adjust the reported bufspace for a malloc managed buffer, possibly
722  *	waking any waiters.
723  */
724 static void
725 bufmallocadjust(struct buf *bp, int bufsize)
726 {
727 	int diff;
728 
729 	KASSERT((bp->b_flags & B_MALLOC) != 0,
730 	    ("bufmallocadjust: non-malloc buf %p", bp));
731 	diff = bufsize - bp->b_bufsize;
732 	if (diff < 0)
733 		atomic_subtract_long(&bufmallocspace, -diff);
734 	else
735 		atomic_add_long(&bufmallocspace, diff);
736 	bp->b_bufsize = bufsize;
737 }
738 
739 /*
740  *	runningwakeup:
741  *
742  *	Wake up processes that are waiting on asynchronous writes to fall
743  *	below lorunningspace.
744  */
745 static void
746 runningwakeup(void)
747 {
748 
749 	mtx_lock(&rbreqlock);
750 	if (runningbufreq) {
751 		runningbufreq = 0;
752 		wakeup(&runningbufreq);
753 	}
754 	mtx_unlock(&rbreqlock);
755 }
756 
757 /*
758  *	runningbufwakeup:
759  *
760  *	Decrement the outstanding write count according.
761  */
762 void
763 runningbufwakeup(struct buf *bp)
764 {
765 	long space, bspace;
766 
767 	bspace = bp->b_runningbufspace;
768 	if (bspace == 0)
769 		return;
770 	space = atomic_fetchadd_long(&runningbufspace, -bspace);
771 	KASSERT(space >= bspace, ("runningbufspace underflow %ld %ld",
772 	    space, bspace));
773 	bp->b_runningbufspace = 0;
774 	/*
775 	 * Only acquire the lock and wakeup on the transition from exceeding
776 	 * the threshold to falling below it.
777 	 */
778 	if (space < lorunningspace)
779 		return;
780 	if (space - bspace > lorunningspace)
781 		return;
782 	runningwakeup();
783 }
784 
785 /*
786  *	waitrunningbufspace()
787  *
788  *	runningbufspace is a measure of the amount of I/O currently
789  *	running.  This routine is used in async-write situations to
790  *	prevent creating huge backups of pending writes to a device.
791  *	Only asynchronous writes are governed by this function.
792  *
793  *	This does NOT turn an async write into a sync write.  It waits
794  *	for earlier writes to complete and generally returns before the
795  *	caller's write has reached the device.
796  */
797 void
798 waitrunningbufspace(void)
799 {
800 
801 	mtx_lock(&rbreqlock);
802 	while (runningbufspace > hirunningspace) {
803 		runningbufreq = 1;
804 		msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
805 	}
806 	mtx_unlock(&rbreqlock);
807 }
808 
809 
810 /*
811  *	vfs_buf_test_cache:
812  *
813  *	Called when a buffer is extended.  This function clears the B_CACHE
814  *	bit if the newly extended portion of the buffer does not contain
815  *	valid data.
816  */
817 static __inline void
818 vfs_buf_test_cache(struct buf *bp, vm_ooffset_t foff, vm_offset_t off,
819     vm_offset_t size, vm_page_t m)
820 {
821 
822 	VM_OBJECT_ASSERT_LOCKED(m->object);
823 	if (bp->b_flags & B_CACHE) {
824 		int base = (foff + off) & PAGE_MASK;
825 		if (vm_page_is_valid(m, base, size) == 0)
826 			bp->b_flags &= ~B_CACHE;
827 	}
828 }
829 
830 /* Wake up the buffer daemon if necessary */
831 static void
832 bd_wakeup(void)
833 {
834 
835 	mtx_lock(&bdlock);
836 	if (bd_request == 0) {
837 		bd_request = 1;
838 		wakeup(&bd_request);
839 	}
840 	mtx_unlock(&bdlock);
841 }
842 
843 /*
844  * Adjust the maxbcachbuf tunable.
845  */
846 static void
847 maxbcachebuf_adjust(void)
848 {
849 	int i;
850 
851 	/*
852 	 * maxbcachebuf must be a power of 2 >= MAXBSIZE.
853 	 */
854 	i = 2;
855 	while (i * 2 <= maxbcachebuf)
856 		i *= 2;
857 	maxbcachebuf = i;
858 	if (maxbcachebuf < MAXBSIZE)
859 		maxbcachebuf = MAXBSIZE;
860 	if (maxbcachebuf > MAXPHYS)
861 		maxbcachebuf = MAXPHYS;
862 	if (bootverbose != 0 && maxbcachebuf != MAXBCACHEBUF)
863 		printf("maxbcachebuf=%d\n", maxbcachebuf);
864 }
865 
866 /*
867  * bd_speedup - speedup the buffer cache flushing code
868  */
869 void
870 bd_speedup(void)
871 {
872 	int needwake;
873 
874 	mtx_lock(&bdlock);
875 	needwake = 0;
876 	if (bd_speedupreq == 0 || bd_request == 0)
877 		needwake = 1;
878 	bd_speedupreq = 1;
879 	bd_request = 1;
880 	if (needwake)
881 		wakeup(&bd_request);
882 	mtx_unlock(&bdlock);
883 }
884 
885 #ifndef NSWBUF_MIN
886 #define	NSWBUF_MIN	16
887 #endif
888 
889 #ifdef __i386__
890 #define	TRANSIENT_DENOM	5
891 #else
892 #define	TRANSIENT_DENOM 10
893 #endif
894 
895 /*
896  * Calculating buffer cache scaling values and reserve space for buffer
897  * headers.  This is called during low level kernel initialization and
898  * may be called more then once.  We CANNOT write to the memory area
899  * being reserved at this time.
900  */
901 caddr_t
902 kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
903 {
904 	int tuned_nbuf;
905 	long maxbuf, maxbuf_sz, buf_sz,	biotmap_sz;
906 
907 	/*
908 	 * physmem_est is in pages.  Convert it to kilobytes (assumes
909 	 * PAGE_SIZE is >= 1K)
910 	 */
911 	physmem_est = physmem_est * (PAGE_SIZE / 1024);
912 
913 	maxbcachebuf_adjust();
914 	/*
915 	 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
916 	 * For the first 64MB of ram nominally allocate sufficient buffers to
917 	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
918 	 * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
919 	 * the buffer cache we limit the eventual kva reservation to
920 	 * maxbcache bytes.
921 	 *
922 	 * factor represents the 1/4 x ram conversion.
923 	 */
924 	if (nbuf == 0) {
925 		int factor = 4 * BKVASIZE / 1024;
926 
927 		nbuf = 50;
928 		if (physmem_est > 4096)
929 			nbuf += min((physmem_est - 4096) / factor,
930 			    65536 / factor);
931 		if (physmem_est > 65536)
932 			nbuf += min((physmem_est - 65536) * 2 / (factor * 5),
933 			    32 * 1024 * 1024 / (factor * 5));
934 
935 		if (maxbcache && nbuf > maxbcache / BKVASIZE)
936 			nbuf = maxbcache / BKVASIZE;
937 		tuned_nbuf = 1;
938 	} else
939 		tuned_nbuf = 0;
940 
941 	/* XXX Avoid unsigned long overflows later on with maxbufspace. */
942 	maxbuf = (LONG_MAX / 3) / BKVASIZE;
943 	if (nbuf > maxbuf) {
944 		if (!tuned_nbuf)
945 			printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
946 			    maxbuf);
947 		nbuf = maxbuf;
948 	}
949 
950 	/*
951 	 * Ideal allocation size for the transient bio submap is 10%
952 	 * of the maximal space buffer map.  This roughly corresponds
953 	 * to the amount of the buffer mapped for typical UFS load.
954 	 *
955 	 * Clip the buffer map to reserve space for the transient
956 	 * BIOs, if its extent is bigger than 90% (80% on i386) of the
957 	 * maximum buffer map extent on the platform.
958 	 *
959 	 * The fall-back to the maxbuf in case of maxbcache unset,
960 	 * allows to not trim the buffer KVA for the architectures
961 	 * with ample KVA space.
962 	 */
963 	if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) {
964 		maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE;
965 		buf_sz = (long)nbuf * BKVASIZE;
966 		if (buf_sz < maxbuf_sz / TRANSIENT_DENOM *
967 		    (TRANSIENT_DENOM - 1)) {
968 			/*
969 			 * There is more KVA than memory.  Do not
970 			 * adjust buffer map size, and assign the rest
971 			 * of maxbuf to transient map.
972 			 */
973 			biotmap_sz = maxbuf_sz - buf_sz;
974 		} else {
975 			/*
976 			 * Buffer map spans all KVA we could afford on
977 			 * this platform.  Give 10% (20% on i386) of
978 			 * the buffer map to the transient bio map.
979 			 */
980 			biotmap_sz = buf_sz / TRANSIENT_DENOM;
981 			buf_sz -= biotmap_sz;
982 		}
983 		if (biotmap_sz / INT_MAX > MAXPHYS)
984 			bio_transient_maxcnt = INT_MAX;
985 		else
986 			bio_transient_maxcnt = biotmap_sz / MAXPHYS;
987 		/*
988 		 * Artificially limit to 1024 simultaneous in-flight I/Os
989 		 * using the transient mapping.
990 		 */
991 		if (bio_transient_maxcnt > 1024)
992 			bio_transient_maxcnt = 1024;
993 		if (tuned_nbuf)
994 			nbuf = buf_sz / BKVASIZE;
995 	}
996 
997 	/*
998 	 * swbufs are used as temporary holders for I/O, such as paging I/O.
999 	 * We have no less then 16 and no more then 256.
1000 	 */
1001 	nswbuf = min(nbuf / 4, 256);
1002 	TUNABLE_INT_FETCH("kern.nswbuf", &nswbuf);
1003 	if (nswbuf < NSWBUF_MIN)
1004 		nswbuf = NSWBUF_MIN;
1005 
1006 	/*
1007 	 * Reserve space for the buffer cache buffers
1008 	 */
1009 	swbuf = (void *)v;
1010 	v = (caddr_t)(swbuf + nswbuf);
1011 	buf = (void *)v;
1012 	v = (caddr_t)(buf + nbuf);
1013 
1014 	return(v);
1015 }
1016 
1017 /* Initialize the buffer subsystem.  Called before use of any buffers. */
1018 void
1019 bufinit(void)
1020 {
1021 	struct buf *bp;
1022 	int i;
1023 
1024 	KASSERT(maxbcachebuf >= MAXBSIZE,
1025 	    ("maxbcachebuf (%d) must be >= MAXBSIZE (%d)\n", maxbcachebuf,
1026 	    MAXBSIZE));
1027 	bq_init(&bqempty, QUEUE_EMPTY, -1, "bufq empty lock");
1028 	bq_init(&bqdirty, QUEUE_DIRTY, -1, "bufq dirty lock");
1029 	mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
1030 	mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
1031 	mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF);
1032 
1033 	unmapped_buf = (caddr_t)kva_alloc(MAXPHYS);
1034 
1035 	/* finally, initialize each buffer header and stick on empty q */
1036 	for (i = 0; i < nbuf; i++) {
1037 		bp = &buf[i];
1038 		bzero(bp, sizeof *bp);
1039 		bp->b_flags = B_INVAL;
1040 		bp->b_rcred = NOCRED;
1041 		bp->b_wcred = NOCRED;
1042 		bp->b_qindex = QUEUE_NONE;
1043 		bp->b_domain = -1;
1044 		bp->b_subqueue = mp_ncpus;
1045 		bp->b_xflags = 0;
1046 		bp->b_data = bp->b_kvabase = unmapped_buf;
1047 		LIST_INIT(&bp->b_dep);
1048 		BUF_LOCKINIT(bp);
1049 		bq_insert(&bqempty, bp, false);
1050 	}
1051 
1052 	/*
1053 	 * maxbufspace is the absolute maximum amount of buffer space we are
1054 	 * allowed to reserve in KVM and in real terms.  The absolute maximum
1055 	 * is nominally used by metadata.  hibufspace is the nominal maximum
1056 	 * used by most other requests.  The differential is required to
1057 	 * ensure that metadata deadlocks don't occur.
1058 	 *
1059 	 * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
1060 	 * this may result in KVM fragmentation which is not handled optimally
1061 	 * by the system. XXX This is less true with vmem.  We could use
1062 	 * PAGE_SIZE.
1063 	 */
1064 	maxbufspace = (long)nbuf * BKVASIZE;
1065 	hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - maxbcachebuf * 10);
1066 	lobufspace = (hibufspace / 20) * 19; /* 95% */
1067 	bufspacethresh = lobufspace + (hibufspace - lobufspace) / 2;
1068 
1069 	/*
1070 	 * Note: The 16 MiB upper limit for hirunningspace was chosen
1071 	 * arbitrarily and may need further tuning. It corresponds to
1072 	 * 128 outstanding write IO requests (if IO size is 128 KiB),
1073 	 * which fits with many RAID controllers' tagged queuing limits.
1074 	 * The lower 1 MiB limit is the historical upper limit for
1075 	 * hirunningspace.
1076 	 */
1077 	hirunningspace = lmax(lmin(roundup(hibufspace / 64, maxbcachebuf),
1078 	    16 * 1024 * 1024), 1024 * 1024);
1079 	lorunningspace = roundup((hirunningspace * 2) / 3, maxbcachebuf);
1080 
1081 	/*
1082 	 * Limit the amount of malloc memory since it is wired permanently into
1083 	 * the kernel space.  Even though this is accounted for in the buffer
1084 	 * allocation, we don't want the malloced region to grow uncontrolled.
1085 	 * The malloc scheme improves memory utilization significantly on
1086 	 * average (small) directories.
1087 	 */
1088 	maxbufmallocspace = hibufspace / 20;
1089 
1090 	/*
1091 	 * Reduce the chance of a deadlock occurring by limiting the number
1092 	 * of delayed-write dirty buffers we allow to stack up.
1093 	 */
1094 	hidirtybuffers = nbuf / 4 + 20;
1095 	dirtybufthresh = hidirtybuffers * 9 / 10;
1096 	numdirtybuffers = 0;
1097 	/*
1098 	 * To support extreme low-memory systems, make sure hidirtybuffers
1099 	 * cannot eat up all available buffer space.  This occurs when our
1100 	 * minimum cannot be met.  We try to size hidirtybuffers to 3/4 our
1101 	 * buffer space assuming BKVASIZE'd buffers.
1102 	 */
1103 	while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
1104 		hidirtybuffers >>= 1;
1105 	}
1106 	lodirtybuffers = hidirtybuffers / 2;
1107 
1108 	/*
1109 	 * lofreebuffers should be sufficient to avoid stalling waiting on
1110 	 * buf headers under heavy utilization.  The bufs in per-cpu caches
1111 	 * are counted as free but will be unavailable to threads executing
1112 	 * on other cpus.
1113 	 *
1114 	 * hifreebuffers is the free target for the bufspace daemon.  This
1115 	 * should be set appropriately to limit work per-iteration.
1116 	 */
1117 	lofreebuffers = MIN((nbuf / 25) + (20 * mp_ncpus), 128 * mp_ncpus);
1118 	hifreebuffers = (3 * lofreebuffers) / 2;
1119 	numfreebuffers = nbuf;
1120 
1121 	/* Setup the kva and free list allocators. */
1122 	vmem_set_reclaim(buffer_arena, bufkva_reclaim);
1123 	buf_zone = uma_zcache_create("buf free cache", sizeof(struct buf),
1124 	    NULL, NULL, NULL, NULL, buf_import, buf_release, NULL, 0);
1125 
1126 	/*
1127 	 * Size the clean queue according to the amount of buffer space.
1128 	 * One queue per-256mb up to the max.  More queues gives better
1129 	 * concurrency but less accurate LRU.
1130 	 */
1131 	clean_domains = MIN(howmany(maxbufspace, 256*1024*1024), CLEAN_DOMAINS);
1132 	for (i = 0 ; i < clean_domains; i++) {
1133 		struct bufdomain *bd;
1134 
1135 		bd = &bdclean[i];
1136 		bd_init(bd);
1137 		bd->bd_freebuffers = nbuf / clean_domains;
1138 		bd->bd_hifreebuffers = hifreebuffers / clean_domains;
1139 		bd->bd_lofreebuffers = lofreebuffers / clean_domains;
1140 		bd->bd_bufspace = 0;
1141 		bd->bd_maxbufspace = maxbufspace / clean_domains;
1142 		bd->bd_hibufspace = hibufspace / clean_domains;
1143 		bd->bd_lobufspace = lobufspace / clean_domains;
1144 		bd->bd_bufspacethresh = bufspacethresh / clean_domains;
1145 		/* Don't allow more than 2% of bufs in the per-cpu caches. */
1146 		bd->bd_lim = nbuf / clean_domains / 50 / mp_ncpus;
1147 	}
1148 	getnewbufcalls = counter_u64_alloc(M_WAITOK);
1149 	getnewbufrestarts = counter_u64_alloc(M_WAITOK);
1150 	mappingrestarts = counter_u64_alloc(M_WAITOK);
1151 	numbufallocfails = counter_u64_alloc(M_WAITOK);
1152 	notbufdflushes = counter_u64_alloc(M_WAITOK);
1153 	buffreekvacnt = counter_u64_alloc(M_WAITOK);
1154 	bufdefragcnt = counter_u64_alloc(M_WAITOK);
1155 	bufkvaspace = counter_u64_alloc(M_WAITOK);
1156 }
1157 
1158 #ifdef INVARIANTS
1159 static inline void
1160 vfs_buf_check_mapped(struct buf *bp)
1161 {
1162 
1163 	KASSERT(bp->b_kvabase != unmapped_buf,
1164 	    ("mapped buf: b_kvabase was not updated %p", bp));
1165 	KASSERT(bp->b_data != unmapped_buf,
1166 	    ("mapped buf: b_data was not updated %p", bp));
1167 	KASSERT(bp->b_data < unmapped_buf || bp->b_data >= unmapped_buf +
1168 	    MAXPHYS, ("b_data + b_offset unmapped %p", bp));
1169 }
1170 
1171 static inline void
1172 vfs_buf_check_unmapped(struct buf *bp)
1173 {
1174 
1175 	KASSERT(bp->b_data == unmapped_buf,
1176 	    ("unmapped buf: corrupted b_data %p", bp));
1177 }
1178 
1179 #define	BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp)
1180 #define	BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp)
1181 #else
1182 #define	BUF_CHECK_MAPPED(bp) do {} while (0)
1183 #define	BUF_CHECK_UNMAPPED(bp) do {} while (0)
1184 #endif
1185 
1186 static int
1187 isbufbusy(struct buf *bp)
1188 {
1189 	if (((bp->b_flags & B_INVAL) == 0 && BUF_ISLOCKED(bp)) ||
1190 	    ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI))
1191 		return (1);
1192 	return (0);
1193 }
1194 
1195 /*
1196  * Shutdown the system cleanly to prepare for reboot, halt, or power off.
1197  */
1198 void
1199 bufshutdown(int show_busybufs)
1200 {
1201 	static int first_buf_printf = 1;
1202 	struct buf *bp;
1203 	int iter, nbusy, pbusy;
1204 #ifndef PREEMPTION
1205 	int subiter;
1206 #endif
1207 
1208 	/*
1209 	 * Sync filesystems for shutdown
1210 	 */
1211 	wdog_kern_pat(WD_LASTVAL);
1212 	sys_sync(curthread, NULL);
1213 
1214 	/*
1215 	 * With soft updates, some buffers that are
1216 	 * written will be remarked as dirty until other
1217 	 * buffers are written.
1218 	 */
1219 	for (iter = pbusy = 0; iter < 20; iter++) {
1220 		nbusy = 0;
1221 		for (bp = &buf[nbuf]; --bp >= buf; )
1222 			if (isbufbusy(bp))
1223 				nbusy++;
1224 		if (nbusy == 0) {
1225 			if (first_buf_printf)
1226 				printf("All buffers synced.");
1227 			break;
1228 		}
1229 		if (first_buf_printf) {
1230 			printf("Syncing disks, buffers remaining... ");
1231 			first_buf_printf = 0;
1232 		}
1233 		printf("%d ", nbusy);
1234 		if (nbusy < pbusy)
1235 			iter = 0;
1236 		pbusy = nbusy;
1237 
1238 		wdog_kern_pat(WD_LASTVAL);
1239 		sys_sync(curthread, NULL);
1240 
1241 #ifdef PREEMPTION
1242 		/*
1243 		 * Drop Giant and spin for a while to allow
1244 		 * interrupt threads to run.
1245 		 */
1246 		DROP_GIANT();
1247 		DELAY(50000 * iter);
1248 		PICKUP_GIANT();
1249 #else
1250 		/*
1251 		 * Drop Giant and context switch several times to
1252 		 * allow interrupt threads to run.
1253 		 */
1254 		DROP_GIANT();
1255 		for (subiter = 0; subiter < 50 * iter; subiter++) {
1256 			thread_lock(curthread);
1257 			mi_switch(SW_VOL, NULL);
1258 			thread_unlock(curthread);
1259 			DELAY(1000);
1260 		}
1261 		PICKUP_GIANT();
1262 #endif
1263 	}
1264 	printf("\n");
1265 	/*
1266 	 * Count only busy local buffers to prevent forcing
1267 	 * a fsck if we're just a client of a wedged NFS server
1268 	 */
1269 	nbusy = 0;
1270 	for (bp = &buf[nbuf]; --bp >= buf; ) {
1271 		if (isbufbusy(bp)) {
1272 #if 0
1273 /* XXX: This is bogus.  We should probably have a BO_REMOTE flag instead */
1274 			if (bp->b_dev == NULL) {
1275 				TAILQ_REMOVE(&mountlist,
1276 				    bp->b_vp->v_mount, mnt_list);
1277 				continue;
1278 			}
1279 #endif
1280 			nbusy++;
1281 			if (show_busybufs > 0) {
1282 				printf(
1283 	    "%d: buf:%p, vnode:%p, flags:%0x, blkno:%jd, lblkno:%jd, buflock:",
1284 				    nbusy, bp, bp->b_vp, bp->b_flags,
1285 				    (intmax_t)bp->b_blkno,
1286 				    (intmax_t)bp->b_lblkno);
1287 				BUF_LOCKPRINTINFO(bp);
1288 				if (show_busybufs > 1)
1289 					vn_printf(bp->b_vp,
1290 					    "vnode content: ");
1291 			}
1292 		}
1293 	}
1294 	if (nbusy) {
1295 		/*
1296 		 * Failed to sync all blocks. Indicate this and don't
1297 		 * unmount filesystems (thus forcing an fsck on reboot).
1298 		 */
1299 		printf("Giving up on %d buffers\n", nbusy);
1300 		DELAY(5000000);	/* 5 seconds */
1301 	} else {
1302 		if (!first_buf_printf)
1303 			printf("Final sync complete\n");
1304 		/*
1305 		 * Unmount filesystems
1306 		 */
1307 		if (panicstr == NULL)
1308 			vfs_unmountall();
1309 	}
1310 	swapoff_all();
1311 	DELAY(100000);		/* wait for console output to finish */
1312 }
1313 
1314 static void
1315 bpmap_qenter(struct buf *bp)
1316 {
1317 
1318 	BUF_CHECK_MAPPED(bp);
1319 
1320 	/*
1321 	 * bp->b_data is relative to bp->b_offset, but
1322 	 * bp->b_offset may be offset into the first page.
1323 	 */
1324 	bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
1325 	pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
1326 	bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
1327 	    (vm_offset_t)(bp->b_offset & PAGE_MASK));
1328 }
1329 
1330 static struct bufqueue *
1331 bufqueue(struct buf *bp)
1332 {
1333 
1334 	switch (bp->b_qindex) {
1335 	case QUEUE_NONE:
1336 		/* FALLTHROUGH */
1337 	case QUEUE_SENTINEL:
1338 		return (NULL);
1339 	case QUEUE_EMPTY:
1340 		return (&bqempty);
1341 	case QUEUE_DIRTY:
1342 		return (&bqdirty);
1343 	case QUEUE_CLEAN:
1344 		return (&bdclean[bp->b_domain].bd_subq[bp->b_subqueue]);
1345 	default:
1346 		break;
1347 	}
1348 	panic("bufqueue(%p): Unhandled type %d\n", bp, bp->b_qindex);
1349 }
1350 
1351 /*
1352  * Return the locked bufqueue that bp is a member of.
1353  */
1354 static struct bufqueue *
1355 bufqueue_acquire(struct buf *bp)
1356 {
1357 	struct bufqueue *bq, *nbq;
1358 
1359 	/*
1360 	 * bp can be pushed from a per-cpu queue to the
1361 	 * cleanq while we're waiting on the lock.  Retry
1362 	 * if the queues don't match.
1363 	 */
1364 	bq = bufqueue(bp);
1365 	BQ_LOCK(bq);
1366 	for (;;) {
1367 		nbq = bufqueue(bp);
1368 		if (bq == nbq)
1369 			break;
1370 		BQ_UNLOCK(bq);
1371 		BQ_LOCK(nbq);
1372 		bq = nbq;
1373 	}
1374 	return (bq);
1375 }
1376 
1377 /*
1378  *	binsfree:
1379  *
1380  *	Insert the buffer into the appropriate free list.  Requires a
1381  *	locked buffer on entry and buffer is unlocked before return.
1382  */
1383 static void
1384 binsfree(struct buf *bp, int qindex)
1385 {
1386 	struct bufdomain *bd;
1387 	struct bufqueue *bq;
1388 
1389 	KASSERT(qindex == QUEUE_CLEAN || qindex == QUEUE_DIRTY,
1390 	    ("binsfree: Invalid qindex %d", qindex));
1391 	BUF_ASSERT_XLOCKED(bp);
1392 
1393 	/*
1394 	 * Handle delayed bremfree() processing.
1395 	 */
1396 	if (bp->b_flags & B_REMFREE) {
1397 		if (bp->b_qindex == qindex) {
1398 			bp->b_flags |= B_REUSE;
1399 			bp->b_flags &= ~B_REMFREE;
1400 			BUF_UNLOCK(bp);
1401 			return;
1402 		}
1403 		bq = bufqueue_acquire(bp);
1404 		bq_remove(bq, bp);
1405 		BQ_UNLOCK(bq);
1406 	}
1407 	if (qindex == QUEUE_CLEAN) {
1408 		bd = &bdclean[bp->b_domain];
1409 		if (bd->bd_lim != 0)
1410 			bq = &bd->bd_subq[PCPU_GET(cpuid)];
1411 		else
1412 			bq = bd->bd_cleanq;
1413 	} else
1414 		bq = &bqdirty;
1415 	bq_insert(bq, bp, true);
1416 }
1417 
1418 /*
1419  * buf_free:
1420  *
1421  *	Free a buffer to the buf zone once it no longer has valid contents.
1422  */
1423 static void
1424 buf_free(struct buf *bp)
1425 {
1426 
1427 	if (bp->b_flags & B_REMFREE)
1428 		bremfreef(bp);
1429 	if (bp->b_vflags & BV_BKGRDINPROG)
1430 		panic("losing buffer 1");
1431 	if (bp->b_rcred != NOCRED) {
1432 		crfree(bp->b_rcred);
1433 		bp->b_rcred = NOCRED;
1434 	}
1435 	if (bp->b_wcred != NOCRED) {
1436 		crfree(bp->b_wcred);
1437 		bp->b_wcred = NOCRED;
1438 	}
1439 	if (!LIST_EMPTY(&bp->b_dep))
1440 		buf_deallocate(bp);
1441 	bufkva_free(bp);
1442 	atomic_add_int(&bdclean[bp->b_domain].bd_freebuffers, 1);
1443 	BUF_UNLOCK(bp);
1444 	uma_zfree(buf_zone, bp);
1445 }
1446 
1447 /*
1448  * buf_import:
1449  *
1450  *	Import bufs into the uma cache from the buf list.  The system still
1451  *	expects a static array of bufs and much of the synchronization
1452  *	around bufs assumes type stable storage.  As a result, UMA is used
1453  *	only as a per-cpu cache of bufs still maintained on a global list.
1454  */
1455 static int
1456 buf_import(void *arg, void **store, int cnt, int domain, int flags)
1457 {
1458 	struct buf *bp;
1459 	int i;
1460 
1461 	BQ_LOCK(&bqempty);
1462 	for (i = 0; i < cnt; i++) {
1463 		bp = TAILQ_FIRST(&bqempty.bq_queue);
1464 		if (bp == NULL)
1465 			break;
1466 		bq_remove(&bqempty, bp);
1467 		store[i] = bp;
1468 	}
1469 	BQ_UNLOCK(&bqempty);
1470 
1471 	return (i);
1472 }
1473 
1474 /*
1475  * buf_release:
1476  *
1477  *	Release bufs from the uma cache back to the buffer queues.
1478  */
1479 static void
1480 buf_release(void *arg, void **store, int cnt)
1481 {
1482 	struct bufqueue *bq;
1483 	struct buf *bp;
1484         int i;
1485 
1486 	bq = &bqempty;
1487 	BQ_LOCK(bq);
1488         for (i = 0; i < cnt; i++) {
1489 		bp = store[i];
1490 		/* Inline bq_insert() to batch locking. */
1491 		TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
1492 		bp->b_flags &= ~(B_AGE | B_REUSE);
1493 		bq->bq_len++;
1494 		bp->b_qindex = bq->bq_index;
1495 	}
1496 	BQ_UNLOCK(bq);
1497 }
1498 
1499 /*
1500  * buf_alloc:
1501  *
1502  *	Allocate an empty buffer header.
1503  */
1504 static struct buf *
1505 buf_alloc(struct bufdomain *bd)
1506 {
1507 	struct buf *bp;
1508 	int freebufs;
1509 
1510 	/*
1511 	 * We can only run out of bufs in the buf zone if the average buf
1512 	 * is less than BKVASIZE.  In this case the actual wait/block will
1513 	 * come from buf_reycle() failing to flush one of these small bufs.
1514 	 */
1515 	bp = NULL;
1516 	freebufs = atomic_fetchadd_int(&bd->bd_freebuffers, -1);
1517 	if (freebufs > 0)
1518 		bp = uma_zalloc(buf_zone, M_NOWAIT);
1519 	if (bp == NULL) {
1520 		atomic_fetchadd_int(&bd->bd_freebuffers, 1);
1521 		bufspace_daemon_wakeup(bd);
1522 		counter_u64_add(numbufallocfails, 1);
1523 		return (NULL);
1524 	}
1525 	/*
1526 	 * Wake-up the bufspace daemon on transition below threshold.
1527 	 */
1528 	if (freebufs == bd->bd_lofreebuffers)
1529 		bufspace_daemon_wakeup(bd);
1530 
1531 	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1532 		panic("getnewbuf_empty: Locked buf %p on free queue.", bp);
1533 
1534 	KASSERT(bp->b_vp == NULL,
1535 	    ("bp: %p still has vnode %p.", bp, bp->b_vp));
1536 	KASSERT((bp->b_flags & (B_DELWRI | B_NOREUSE)) == 0,
1537 	    ("invalid buffer %p flags %#x", bp, bp->b_flags));
1538 	KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
1539 	    ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags));
1540 	KASSERT(bp->b_npages == 0,
1541 	    ("bp: %p still has %d vm pages\n", bp, bp->b_npages));
1542 	KASSERT(bp->b_kvasize == 0, ("bp: %p still has kva\n", bp));
1543 	KASSERT(bp->b_bufsize == 0, ("bp: %p still has bufspace\n", bp));
1544 
1545 	bp->b_domain = BD_DOMAIN(bd);
1546 	bp->b_flags = 0;
1547 	bp->b_ioflags = 0;
1548 	bp->b_xflags = 0;
1549 	bp->b_vflags = 0;
1550 	bp->b_vp = NULL;
1551 	bp->b_blkno = bp->b_lblkno = 0;
1552 	bp->b_offset = NOOFFSET;
1553 	bp->b_iodone = 0;
1554 	bp->b_error = 0;
1555 	bp->b_resid = 0;
1556 	bp->b_bcount = 0;
1557 	bp->b_npages = 0;
1558 	bp->b_dirtyoff = bp->b_dirtyend = 0;
1559 	bp->b_bufobj = NULL;
1560 	bp->b_data = bp->b_kvabase = unmapped_buf;
1561 	bp->b_fsprivate1 = NULL;
1562 	bp->b_fsprivate2 = NULL;
1563 	bp->b_fsprivate3 = NULL;
1564 	LIST_INIT(&bp->b_dep);
1565 
1566 	return (bp);
1567 }
1568 
1569 /*
1570  *	buf_recycle:
1571  *
1572  *	Free a buffer from the given bufqueue.  kva controls whether the
1573  *	freed buf must own some kva resources.  This is used for
1574  *	defragmenting.
1575  */
1576 static int
1577 buf_recycle(struct bufdomain *bd, bool kva)
1578 {
1579 	struct bufqueue *bq;
1580 	struct buf *bp, *nbp;
1581 
1582 	if (kva)
1583 		counter_u64_add(bufdefragcnt, 1);
1584 	nbp = NULL;
1585 	bq = bd->bd_cleanq;
1586 	BQ_LOCK(bq);
1587 	KASSERT(BQ_LOCKPTR(bq) == BD_LOCKPTR(bd),
1588 	    ("buf_recycle: Locks don't match"));
1589 	nbp = TAILQ_FIRST(&bq->bq_queue);
1590 
1591 	/*
1592 	 * Run scan, possibly freeing data and/or kva mappings on the fly
1593 	 * depending.
1594 	 */
1595 	while ((bp = nbp) != NULL) {
1596 		/*
1597 		 * Calculate next bp (we can only use it if we do not
1598 		 * release the bqlock).
1599 		 */
1600 		nbp = TAILQ_NEXT(bp, b_freelist);
1601 
1602 		/*
1603 		 * If we are defragging then we need a buffer with
1604 		 * some kva to reclaim.
1605 		 */
1606 		if (kva && bp->b_kvasize == 0)
1607 			continue;
1608 
1609 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1610 			continue;
1611 
1612 		/*
1613 		 * Implement a second chance algorithm for frequently
1614 		 * accessed buffers.
1615 		 */
1616 		if ((bp->b_flags & B_REUSE) != 0) {
1617 			TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
1618 			TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
1619 			bp->b_flags &= ~B_REUSE;
1620 			BUF_UNLOCK(bp);
1621 			continue;
1622 		}
1623 
1624 		/*
1625 		 * Skip buffers with background writes in progress.
1626 		 */
1627 		if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
1628 			BUF_UNLOCK(bp);
1629 			continue;
1630 		}
1631 
1632 		KASSERT(bp->b_qindex == QUEUE_CLEAN,
1633 		    ("buf_recycle: inconsistent queue %d bp %p",
1634 		    bp->b_qindex, bp));
1635 		KASSERT(bp->b_domain == BD_DOMAIN(bd),
1636 		    ("getnewbuf: queue domain %d doesn't match request %d",
1637 		    bp->b_domain, (int)BD_DOMAIN(bd)));
1638 		/*
1639 		 * NOTE:  nbp is now entirely invalid.  We can only restart
1640 		 * the scan from this point on.
1641 		 */
1642 		bq_remove(bq, bp);
1643 		BQ_UNLOCK(bq);
1644 
1645 		/*
1646 		 * Requeue the background write buffer with error and
1647 		 * restart the scan.
1648 		 */
1649 		if ((bp->b_vflags & BV_BKGRDERR) != 0) {
1650 			bqrelse(bp);
1651 			BQ_LOCK(bq);
1652 			nbp = TAILQ_FIRST(&bq->bq_queue);
1653 			continue;
1654 		}
1655 		bp->b_flags |= B_INVAL;
1656 		brelse(bp);
1657 		return (0);
1658 	}
1659 	bd->bd_wanted = 1;
1660 	BQ_UNLOCK(bq);
1661 
1662 	return (ENOBUFS);
1663 }
1664 
1665 /*
1666  *	bremfree:
1667  *
1668  *	Mark the buffer for removal from the appropriate free list.
1669  *
1670  */
1671 void
1672 bremfree(struct buf *bp)
1673 {
1674 
1675 	CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1676 	KASSERT((bp->b_flags & B_REMFREE) == 0,
1677 	    ("bremfree: buffer %p already marked for delayed removal.", bp));
1678 	KASSERT(bp->b_qindex != QUEUE_NONE,
1679 	    ("bremfree: buffer %p not on a queue.", bp));
1680 	BUF_ASSERT_XLOCKED(bp);
1681 
1682 	bp->b_flags |= B_REMFREE;
1683 }
1684 
1685 /*
1686  *	bremfreef:
1687  *
1688  *	Force an immediate removal from a free list.  Used only in nfs when
1689  *	it abuses the b_freelist pointer.
1690  */
1691 void
1692 bremfreef(struct buf *bp)
1693 {
1694 	struct bufqueue *bq;
1695 
1696 	bq = bufqueue_acquire(bp);
1697 	bq_remove(bq, bp);
1698 	BQ_UNLOCK(bq);
1699 }
1700 
1701 static void
1702 bq_init(struct bufqueue *bq, int qindex, int subqueue, const char *lockname)
1703 {
1704 
1705 	mtx_init(&bq->bq_lock, lockname, NULL, MTX_DEF);
1706 	TAILQ_INIT(&bq->bq_queue);
1707 	bq->bq_len = 0;
1708 	bq->bq_index = qindex;
1709 	bq->bq_subqueue = subqueue;
1710 }
1711 
1712 static void
1713 bd_init(struct bufdomain *bd)
1714 {
1715 	int domain;
1716 	int i;
1717 
1718 	domain = bd - bdclean;
1719 	bd->bd_cleanq = &bd->bd_subq[mp_ncpus];
1720 	bq_init(bd->bd_cleanq, QUEUE_CLEAN, mp_ncpus, "bufq clean lock");
1721 	for (i = 0; i <= mp_maxid; i++)
1722 		bq_init(&bd->bd_subq[i], QUEUE_CLEAN, i,
1723 		    "bufq clean subqueue lock");
1724 	mtx_init(&bd->bd_run_lock, "bufspace daemon run lock", NULL, MTX_DEF);
1725 }
1726 
1727 /*
1728  *	bq_remove:
1729  *
1730  *	Removes a buffer from the free list, must be called with the
1731  *	correct qlock held.
1732  */
1733 static void
1734 bq_remove(struct bufqueue *bq, struct buf *bp)
1735 {
1736 
1737 	CTR3(KTR_BUF, "bq_remove(%p) vp %p flags %X",
1738 	    bp, bp->b_vp, bp->b_flags);
1739 	KASSERT(bp->b_qindex != QUEUE_NONE,
1740 	    ("bq_remove: buffer %p not on a queue.", bp));
1741 	KASSERT(bufqueue(bp) == bq,
1742 	    ("bq_remove: Remove buffer %p from wrong queue.", bp));
1743 
1744 	BQ_ASSERT_LOCKED(bq);
1745 	if (bp->b_qindex != QUEUE_EMPTY) {
1746 		BUF_ASSERT_XLOCKED(bp);
1747 	}
1748 	KASSERT(bq->bq_len >= 1,
1749 	    ("queue %d underflow", bp->b_qindex));
1750 	TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
1751 	bq->bq_len--;
1752 	bp->b_qindex = QUEUE_NONE;
1753 	bp->b_flags &= ~(B_REMFREE | B_REUSE);
1754 }
1755 
1756 static void
1757 bd_flush(struct bufdomain *bd, struct bufqueue *bq)
1758 {
1759 	struct buf *bp;
1760 
1761 	BQ_ASSERT_LOCKED(bq);
1762 	if (bq != bd->bd_cleanq) {
1763 		BD_LOCK(bd);
1764 		while ((bp = TAILQ_FIRST(&bq->bq_queue)) != NULL) {
1765 			TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
1766 			TAILQ_INSERT_TAIL(&bd->bd_cleanq->bq_queue, bp,
1767 			    b_freelist);
1768 			bp->b_subqueue = mp_ncpus;
1769 		}
1770 		bd->bd_cleanq->bq_len += bq->bq_len;
1771 		bq->bq_len = 0;
1772 	}
1773 	if (bd->bd_wanted) {
1774 		bd->bd_wanted = 0;
1775 		wakeup(&bd->bd_wanted);
1776 	}
1777 	if (bq != bd->bd_cleanq)
1778 		BD_UNLOCK(bd);
1779 }
1780 
1781 static int
1782 bd_flushall(struct bufdomain *bd)
1783 {
1784 	struct bufqueue *bq;
1785 	int flushed;
1786 	int i;
1787 
1788 	if (bd->bd_lim == 0)
1789 		return (0);
1790 	flushed = 0;
1791 	for (i = 0; i < mp_maxid; i++) {
1792 		bq = &bd->bd_subq[i];
1793 		if (bq->bq_len == 0)
1794 			continue;
1795 		BQ_LOCK(bq);
1796 		bd_flush(bd, bq);
1797 		BQ_UNLOCK(bq);
1798 		flushed++;
1799 	}
1800 
1801 	return (flushed);
1802 }
1803 
1804 static void
1805 bq_insert(struct bufqueue *bq, struct buf *bp, bool unlock)
1806 {
1807 	struct bufdomain *bd;
1808 
1809 	if (bp->b_qindex != QUEUE_NONE)
1810 		panic("bq_insert: free buffer %p onto another queue?", bp);
1811 
1812 	bd = &bdclean[bp->b_domain];
1813 	if (bp->b_flags & B_AGE) {
1814 		/* Place this buf directly on the real queue. */
1815 		if (bq->bq_index == QUEUE_CLEAN)
1816 			bq = bd->bd_cleanq;
1817 		BQ_LOCK(bq);
1818 		TAILQ_INSERT_HEAD(&bq->bq_queue, bp, b_freelist);
1819 	} else {
1820 		BQ_LOCK(bq);
1821 		TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
1822 	}
1823 	bp->b_flags &= ~(B_AGE | B_REUSE);
1824 	bq->bq_len++;
1825 	bp->b_qindex = bq->bq_index;
1826 	bp->b_subqueue = bq->bq_subqueue;
1827 
1828 	/*
1829 	 * Unlock before we notify so that we don't wakeup a waiter that
1830 	 * fails a trylock on the buf and sleeps again.
1831 	 */
1832 	if (unlock)
1833 		BUF_UNLOCK(bp);
1834 
1835 	if (bp->b_qindex == QUEUE_CLEAN) {
1836 		/*
1837 		 * Flush the per-cpu queue and notify any waiters.
1838 		 */
1839 		if (bd->bd_wanted || (bq != bd->bd_cleanq &&
1840 		    bq->bq_len >= bd->bd_lim))
1841 			bd_flush(bd, bq);
1842 	}
1843 	BQ_UNLOCK(bq);
1844 }
1845 
1846 /*
1847  *	bufkva_free:
1848  *
1849  *	Free the kva allocation for a buffer.
1850  *
1851  */
1852 static void
1853 bufkva_free(struct buf *bp)
1854 {
1855 
1856 #ifdef INVARIANTS
1857 	if (bp->b_kvasize == 0) {
1858 		KASSERT(bp->b_kvabase == unmapped_buf &&
1859 		    bp->b_data == unmapped_buf,
1860 		    ("Leaked KVA space on %p", bp));
1861 	} else if (buf_mapped(bp))
1862 		BUF_CHECK_MAPPED(bp);
1863 	else
1864 		BUF_CHECK_UNMAPPED(bp);
1865 #endif
1866 	if (bp->b_kvasize == 0)
1867 		return;
1868 
1869 	vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase, bp->b_kvasize);
1870 	counter_u64_add(bufkvaspace, -bp->b_kvasize);
1871 	counter_u64_add(buffreekvacnt, 1);
1872 	bp->b_data = bp->b_kvabase = unmapped_buf;
1873 	bp->b_kvasize = 0;
1874 }
1875 
1876 /*
1877  *	bufkva_alloc:
1878  *
1879  *	Allocate the buffer KVA and set b_kvasize and b_kvabase.
1880  */
1881 static int
1882 bufkva_alloc(struct buf *bp, int maxsize, int gbflags)
1883 {
1884 	vm_offset_t addr;
1885 	int error;
1886 
1887 	KASSERT((gbflags & GB_UNMAPPED) == 0 || (gbflags & GB_KVAALLOC) != 0,
1888 	    ("Invalid gbflags 0x%x in %s", gbflags, __func__));
1889 
1890 	bufkva_free(bp);
1891 
1892 	addr = 0;
1893 	error = vmem_alloc(buffer_arena, maxsize, M_BESTFIT | M_NOWAIT, &addr);
1894 	if (error != 0) {
1895 		/*
1896 		 * Buffer map is too fragmented.  Request the caller
1897 		 * to defragment the map.
1898 		 */
1899 		return (error);
1900 	}
1901 	bp->b_kvabase = (caddr_t)addr;
1902 	bp->b_kvasize = maxsize;
1903 	counter_u64_add(bufkvaspace, bp->b_kvasize);
1904 	if ((gbflags & GB_UNMAPPED) != 0) {
1905 		bp->b_data = unmapped_buf;
1906 		BUF_CHECK_UNMAPPED(bp);
1907 	} else {
1908 		bp->b_data = bp->b_kvabase;
1909 		BUF_CHECK_MAPPED(bp);
1910 	}
1911 	return (0);
1912 }
1913 
1914 /*
1915  *	bufkva_reclaim:
1916  *
1917  *	Reclaim buffer kva by freeing buffers holding kva.  This is a vmem
1918  *	callback that fires to avoid returning failure.
1919  */
1920 static void
1921 bufkva_reclaim(vmem_t *vmem, int flags)
1922 {
1923 	bool done;
1924 	int q;
1925 	int i;
1926 
1927 	done = false;
1928 	for (i = 0; i < 5; i++) {
1929 		for (q = 0; q < clean_domains; q++)
1930 			if (buf_recycle(&bdclean[q], true) != 0)
1931 				done = true;
1932 		if (done)
1933 			break;
1934 	}
1935 	return;
1936 }
1937 
1938 /*
1939  * Attempt to initiate asynchronous I/O on read-ahead blocks.  We must
1940  * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
1941  * the buffer is valid and we do not have to do anything.
1942  */
1943 static void
1944 breada(struct vnode * vp, daddr_t * rablkno, int * rabsize, int cnt,
1945     struct ucred * cred, int flags, void (*ckhashfunc)(struct buf *))
1946 {
1947 	struct buf *rabp;
1948 	int i;
1949 
1950 	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
1951 		if (inmem(vp, *rablkno))
1952 			continue;
1953 		rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
1954 		if ((rabp->b_flags & B_CACHE) != 0) {
1955 			brelse(rabp);
1956 			continue;
1957 		}
1958 		if (!TD_IS_IDLETHREAD(curthread)) {
1959 #ifdef RACCT
1960 			if (racct_enable) {
1961 				PROC_LOCK(curproc);
1962 				racct_add_buf(curproc, rabp, 0);
1963 				PROC_UNLOCK(curproc);
1964 			}
1965 #endif /* RACCT */
1966 			curthread->td_ru.ru_inblock++;
1967 		}
1968 		rabp->b_flags |= B_ASYNC;
1969 		rabp->b_flags &= ~B_INVAL;
1970 		if ((flags & GB_CKHASH) != 0) {
1971 			rabp->b_flags |= B_CKHASH;
1972 			rabp->b_ckhashcalc = ckhashfunc;
1973 		}
1974 		rabp->b_ioflags &= ~BIO_ERROR;
1975 		rabp->b_iocmd = BIO_READ;
1976 		if (rabp->b_rcred == NOCRED && cred != NOCRED)
1977 			rabp->b_rcred = crhold(cred);
1978 		vfs_busy_pages(rabp, 0);
1979 		BUF_KERNPROC(rabp);
1980 		rabp->b_iooffset = dbtob(rabp->b_blkno);
1981 		bstrategy(rabp);
1982 	}
1983 }
1984 
1985 /*
1986  * Entry point for bread() and breadn() via #defines in sys/buf.h.
1987  *
1988  * Get a buffer with the specified data.  Look in the cache first.  We
1989  * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
1990  * is set, the buffer is valid and we do not have to do anything, see
1991  * getblk(). Also starts asynchronous I/O on read-ahead blocks.
1992  *
1993  * Always return a NULL buffer pointer (in bpp) when returning an error.
1994  */
1995 int
1996 breadn_flags(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablkno,
1997     int *rabsize, int cnt, struct ucred *cred, int flags,
1998     void (*ckhashfunc)(struct buf *), struct buf **bpp)
1999 {
2000 	struct buf *bp;
2001 	int readwait, rv;
2002 
2003 	CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
2004 	/*
2005 	 * Can only return NULL if GB_LOCK_NOWAIT flag is specified.
2006 	 */
2007 	*bpp = bp = getblk(vp, blkno, size, 0, 0, flags);
2008 	if (bp == NULL)
2009 		return (EBUSY);
2010 
2011 	/*
2012 	 * If not found in cache, do some I/O
2013 	 */
2014 	readwait = 0;
2015 	if ((bp->b_flags & B_CACHE) == 0) {
2016 		if (!TD_IS_IDLETHREAD(curthread)) {
2017 #ifdef RACCT
2018 			if (racct_enable) {
2019 				PROC_LOCK(curproc);
2020 				racct_add_buf(curproc, bp, 0);
2021 				PROC_UNLOCK(curproc);
2022 			}
2023 #endif /* RACCT */
2024 			curthread->td_ru.ru_inblock++;
2025 		}
2026 		bp->b_iocmd = BIO_READ;
2027 		bp->b_flags &= ~B_INVAL;
2028 		if ((flags & GB_CKHASH) != 0) {
2029 			bp->b_flags |= B_CKHASH;
2030 			bp->b_ckhashcalc = ckhashfunc;
2031 		}
2032 		bp->b_ioflags &= ~BIO_ERROR;
2033 		if (bp->b_rcred == NOCRED && cred != NOCRED)
2034 			bp->b_rcred = crhold(cred);
2035 		vfs_busy_pages(bp, 0);
2036 		bp->b_iooffset = dbtob(bp->b_blkno);
2037 		bstrategy(bp);
2038 		++readwait;
2039 	}
2040 
2041 	/*
2042 	 * Attempt to initiate asynchronous I/O on read-ahead blocks.
2043 	 */
2044 	breada(vp, rablkno, rabsize, cnt, cred, flags, ckhashfunc);
2045 
2046 	rv = 0;
2047 	if (readwait) {
2048 		rv = bufwait(bp);
2049 		if (rv != 0) {
2050 			brelse(bp);
2051 			*bpp = NULL;
2052 		}
2053 	}
2054 	return (rv);
2055 }
2056 
2057 /*
2058  * Write, release buffer on completion.  (Done by iodone
2059  * if async).  Do not bother writing anything if the buffer
2060  * is invalid.
2061  *
2062  * Note that we set B_CACHE here, indicating that buffer is
2063  * fully valid and thus cacheable.  This is true even of NFS
2064  * now so we set it generally.  This could be set either here
2065  * or in biodone() since the I/O is synchronous.  We put it
2066  * here.
2067  */
2068 int
2069 bufwrite(struct buf *bp)
2070 {
2071 	int oldflags;
2072 	struct vnode *vp;
2073 	long space;
2074 	int vp_md;
2075 
2076 	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2077 	if ((bp->b_bufobj->bo_flag & BO_DEAD) != 0) {
2078 		bp->b_flags |= B_INVAL | B_RELBUF;
2079 		bp->b_flags &= ~B_CACHE;
2080 		brelse(bp);
2081 		return (ENXIO);
2082 	}
2083 	if (bp->b_flags & B_INVAL) {
2084 		brelse(bp);
2085 		return (0);
2086 	}
2087 
2088 	if (bp->b_flags & B_BARRIER)
2089 		barrierwrites++;
2090 
2091 	oldflags = bp->b_flags;
2092 
2093 	BUF_ASSERT_HELD(bp);
2094 
2095 	KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
2096 	    ("FFS background buffer should not get here %p", bp));
2097 
2098 	vp = bp->b_vp;
2099 	if (vp)
2100 		vp_md = vp->v_vflag & VV_MD;
2101 	else
2102 		vp_md = 0;
2103 
2104 	/*
2105 	 * Mark the buffer clean.  Increment the bufobj write count
2106 	 * before bundirty() call, to prevent other thread from seeing
2107 	 * empty dirty list and zero counter for writes in progress,
2108 	 * falsely indicating that the bufobj is clean.
2109 	 */
2110 	bufobj_wref(bp->b_bufobj);
2111 	bundirty(bp);
2112 
2113 	bp->b_flags &= ~B_DONE;
2114 	bp->b_ioflags &= ~BIO_ERROR;
2115 	bp->b_flags |= B_CACHE;
2116 	bp->b_iocmd = BIO_WRITE;
2117 
2118 	vfs_busy_pages(bp, 1);
2119 
2120 	/*
2121 	 * Normal bwrites pipeline writes
2122 	 */
2123 	bp->b_runningbufspace = bp->b_bufsize;
2124 	space = atomic_fetchadd_long(&runningbufspace, bp->b_runningbufspace);
2125 
2126 	if (!TD_IS_IDLETHREAD(curthread)) {
2127 #ifdef RACCT
2128 		if (racct_enable) {
2129 			PROC_LOCK(curproc);
2130 			racct_add_buf(curproc, bp, 1);
2131 			PROC_UNLOCK(curproc);
2132 		}
2133 #endif /* RACCT */
2134 		curthread->td_ru.ru_oublock++;
2135 	}
2136 	if (oldflags & B_ASYNC)
2137 		BUF_KERNPROC(bp);
2138 	bp->b_iooffset = dbtob(bp->b_blkno);
2139 	buf_track(bp, __func__);
2140 	bstrategy(bp);
2141 
2142 	if ((oldflags & B_ASYNC) == 0) {
2143 		int rtval = bufwait(bp);
2144 		brelse(bp);
2145 		return (rtval);
2146 	} else if (space > hirunningspace) {
2147 		/*
2148 		 * don't allow the async write to saturate the I/O
2149 		 * system.  We will not deadlock here because
2150 		 * we are blocking waiting for I/O that is already in-progress
2151 		 * to complete. We do not block here if it is the update
2152 		 * or syncer daemon trying to clean up as that can lead
2153 		 * to deadlock.
2154 		 */
2155 		if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
2156 			waitrunningbufspace();
2157 	}
2158 
2159 	return (0);
2160 }
2161 
2162 void
2163 bufbdflush(struct bufobj *bo, struct buf *bp)
2164 {
2165 	struct buf *nbp;
2166 
2167 	if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) {
2168 		(void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
2169 		altbufferflushes++;
2170 	} else if (bo->bo_dirty.bv_cnt > dirtybufthresh) {
2171 		BO_LOCK(bo);
2172 		/*
2173 		 * Try to find a buffer to flush.
2174 		 */
2175 		TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
2176 			if ((nbp->b_vflags & BV_BKGRDINPROG) ||
2177 			    BUF_LOCK(nbp,
2178 				     LK_EXCLUSIVE | LK_NOWAIT, NULL))
2179 				continue;
2180 			if (bp == nbp)
2181 				panic("bdwrite: found ourselves");
2182 			BO_UNLOCK(bo);
2183 			/* Don't countdeps with the bo lock held. */
2184 			if (buf_countdeps(nbp, 0)) {
2185 				BO_LOCK(bo);
2186 				BUF_UNLOCK(nbp);
2187 				continue;
2188 			}
2189 			if (nbp->b_flags & B_CLUSTEROK) {
2190 				vfs_bio_awrite(nbp);
2191 			} else {
2192 				bremfree(nbp);
2193 				bawrite(nbp);
2194 			}
2195 			dirtybufferflushes++;
2196 			break;
2197 		}
2198 		if (nbp == NULL)
2199 			BO_UNLOCK(bo);
2200 	}
2201 }
2202 
2203 /*
2204  * Delayed write. (Buffer is marked dirty).  Do not bother writing
2205  * anything if the buffer is marked invalid.
2206  *
2207  * Note that since the buffer must be completely valid, we can safely
2208  * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
2209  * biodone() in order to prevent getblk from writing the buffer
2210  * out synchronously.
2211  */
2212 void
2213 bdwrite(struct buf *bp)
2214 {
2215 	struct thread *td = curthread;
2216 	struct vnode *vp;
2217 	struct bufobj *bo;
2218 
2219 	CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2220 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2221 	KASSERT((bp->b_flags & B_BARRIER) == 0,
2222 	    ("Barrier request in delayed write %p", bp));
2223 	BUF_ASSERT_HELD(bp);
2224 
2225 	if (bp->b_flags & B_INVAL) {
2226 		brelse(bp);
2227 		return;
2228 	}
2229 
2230 	/*
2231 	 * If we have too many dirty buffers, don't create any more.
2232 	 * If we are wildly over our limit, then force a complete
2233 	 * cleanup. Otherwise, just keep the situation from getting
2234 	 * out of control. Note that we have to avoid a recursive
2235 	 * disaster and not try to clean up after our own cleanup!
2236 	 */
2237 	vp = bp->b_vp;
2238 	bo = bp->b_bufobj;
2239 	if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
2240 		td->td_pflags |= TDP_INBDFLUSH;
2241 		BO_BDFLUSH(bo, bp);
2242 		td->td_pflags &= ~TDP_INBDFLUSH;
2243 	} else
2244 		recursiveflushes++;
2245 
2246 	bdirty(bp);
2247 	/*
2248 	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
2249 	 * true even of NFS now.
2250 	 */
2251 	bp->b_flags |= B_CACHE;
2252 
2253 	/*
2254 	 * This bmap keeps the system from needing to do the bmap later,
2255 	 * perhaps when the system is attempting to do a sync.  Since it
2256 	 * is likely that the indirect block -- or whatever other datastructure
2257 	 * that the filesystem needs is still in memory now, it is a good
2258 	 * thing to do this.  Note also, that if the pageout daemon is
2259 	 * requesting a sync -- there might not be enough memory to do
2260 	 * the bmap then...  So, this is important to do.
2261 	 */
2262 	if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
2263 		VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
2264 	}
2265 
2266 	buf_track(bp, __func__);
2267 
2268 	/*
2269 	 * Set the *dirty* buffer range based upon the VM system dirty
2270 	 * pages.
2271 	 *
2272 	 * Mark the buffer pages as clean.  We need to do this here to
2273 	 * satisfy the vnode_pager and the pageout daemon, so that it
2274 	 * thinks that the pages have been "cleaned".  Note that since
2275 	 * the pages are in a delayed write buffer -- the VFS layer
2276 	 * "will" see that the pages get written out on the next sync,
2277 	 * or perhaps the cluster will be completed.
2278 	 */
2279 	vfs_clean_pages_dirty_buf(bp);
2280 	bqrelse(bp);
2281 
2282 	/*
2283 	 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
2284 	 * due to the softdep code.
2285 	 */
2286 }
2287 
2288 /*
2289  *	bdirty:
2290  *
2291  *	Turn buffer into delayed write request.  We must clear BIO_READ and
2292  *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
2293  *	itself to properly update it in the dirty/clean lists.  We mark it
2294  *	B_DONE to ensure that any asynchronization of the buffer properly
2295  *	clears B_DONE ( else a panic will occur later ).
2296  *
2297  *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
2298  *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
2299  *	should only be called if the buffer is known-good.
2300  *
2301  *	Since the buffer is not on a queue, we do not update the numfreebuffers
2302  *	count.
2303  *
2304  *	The buffer must be on QUEUE_NONE.
2305  */
2306 void
2307 bdirty(struct buf *bp)
2308 {
2309 
2310 	CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
2311 	    bp, bp->b_vp, bp->b_flags);
2312 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2313 	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2314 	    ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
2315 	BUF_ASSERT_HELD(bp);
2316 	bp->b_flags &= ~(B_RELBUF);
2317 	bp->b_iocmd = BIO_WRITE;
2318 
2319 	if ((bp->b_flags & B_DELWRI) == 0) {
2320 		bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
2321 		reassignbuf(bp);
2322 		bdirtyadd();
2323 	}
2324 }
2325 
2326 /*
2327  *	bundirty:
2328  *
2329  *	Clear B_DELWRI for buffer.
2330  *
2331  *	Since the buffer is not on a queue, we do not update the numfreebuffers
2332  *	count.
2333  *
2334  *	The buffer must be on QUEUE_NONE.
2335  */
2336 
2337 void
2338 bundirty(struct buf *bp)
2339 {
2340 
2341 	CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2342 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2343 	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2344 	    ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
2345 	BUF_ASSERT_HELD(bp);
2346 
2347 	if (bp->b_flags & B_DELWRI) {
2348 		bp->b_flags &= ~B_DELWRI;
2349 		reassignbuf(bp);
2350 		bdirtysub();
2351 	}
2352 	/*
2353 	 * Since it is now being written, we can clear its deferred write flag.
2354 	 */
2355 	bp->b_flags &= ~B_DEFERRED;
2356 }
2357 
2358 /*
2359  *	bawrite:
2360  *
2361  *	Asynchronous write.  Start output on a buffer, but do not wait for
2362  *	it to complete.  The buffer is released when the output completes.
2363  *
2364  *	bwrite() ( or the VOP routine anyway ) is responsible for handling
2365  *	B_INVAL buffers.  Not us.
2366  */
2367 void
2368 bawrite(struct buf *bp)
2369 {
2370 
2371 	bp->b_flags |= B_ASYNC;
2372 	(void) bwrite(bp);
2373 }
2374 
2375 /*
2376  *	babarrierwrite:
2377  *
2378  *	Asynchronous barrier write.  Start output on a buffer, but do not
2379  *	wait for it to complete.  Place a write barrier after this write so
2380  *	that this buffer and all buffers written before it are committed to
2381  *	the disk before any buffers written after this write are committed
2382  *	to the disk.  The buffer is released when the output completes.
2383  */
2384 void
2385 babarrierwrite(struct buf *bp)
2386 {
2387 
2388 	bp->b_flags |= B_ASYNC | B_BARRIER;
2389 	(void) bwrite(bp);
2390 }
2391 
2392 /*
2393  *	bbarrierwrite:
2394  *
2395  *	Synchronous barrier write.  Start output on a buffer and wait for
2396  *	it to complete.  Place a write barrier after this write so that
2397  *	this buffer and all buffers written before it are committed to
2398  *	the disk before any buffers written after this write are committed
2399  *	to the disk.  The buffer is released when the output completes.
2400  */
2401 int
2402 bbarrierwrite(struct buf *bp)
2403 {
2404 
2405 	bp->b_flags |= B_BARRIER;
2406 	return (bwrite(bp));
2407 }
2408 
2409 /*
2410  *	bwillwrite:
2411  *
2412  *	Called prior to the locking of any vnodes when we are expecting to
2413  *	write.  We do not want to starve the buffer cache with too many
2414  *	dirty buffers so we block here.  By blocking prior to the locking
2415  *	of any vnodes we attempt to avoid the situation where a locked vnode
2416  *	prevents the various system daemons from flushing related buffers.
2417  */
2418 void
2419 bwillwrite(void)
2420 {
2421 
2422 	if (numdirtybuffers >= hidirtybuffers) {
2423 		mtx_lock(&bdirtylock);
2424 		while (numdirtybuffers >= hidirtybuffers) {
2425 			bdirtywait = 1;
2426 			msleep(&bdirtywait, &bdirtylock, (PRIBIO + 4),
2427 			    "flswai", 0);
2428 		}
2429 		mtx_unlock(&bdirtylock);
2430 	}
2431 }
2432 
2433 /*
2434  * Return true if we have too many dirty buffers.
2435  */
2436 int
2437 buf_dirty_count_severe(void)
2438 {
2439 
2440 	return(numdirtybuffers >= hidirtybuffers);
2441 }
2442 
2443 /*
2444  *	brelse:
2445  *
2446  *	Release a busy buffer and, if requested, free its resources.  The
2447  *	buffer will be stashed in the appropriate bufqueue[] allowing it
2448  *	to be accessed later as a cache entity or reused for other purposes.
2449  */
2450 void
2451 brelse(struct buf *bp)
2452 {
2453 	int qindex;
2454 
2455 	/*
2456 	 * Many functions erroneously call brelse with a NULL bp under rare
2457 	 * error conditions. Simply return when called with a NULL bp.
2458 	 */
2459 	if (bp == NULL)
2460 		return;
2461 	CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
2462 	    bp, bp->b_vp, bp->b_flags);
2463 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2464 	    ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2465 	KASSERT((bp->b_flags & B_VMIO) != 0 || (bp->b_flags & B_NOREUSE) == 0,
2466 	    ("brelse: non-VMIO buffer marked NOREUSE"));
2467 
2468 	if (BUF_LOCKRECURSED(bp)) {
2469 		/*
2470 		 * Do not process, in particular, do not handle the
2471 		 * B_INVAL/B_RELBUF and do not release to free list.
2472 		 */
2473 		BUF_UNLOCK(bp);
2474 		return;
2475 	}
2476 
2477 	if (bp->b_flags & B_MANAGED) {
2478 		bqrelse(bp);
2479 		return;
2480 	}
2481 
2482 	if ((bp->b_vflags & (BV_BKGRDINPROG | BV_BKGRDERR)) == BV_BKGRDERR) {
2483 		BO_LOCK(bp->b_bufobj);
2484 		bp->b_vflags &= ~BV_BKGRDERR;
2485 		BO_UNLOCK(bp->b_bufobj);
2486 		bdirty(bp);
2487 	}
2488 	if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
2489 	    (bp->b_error != ENXIO || !LIST_EMPTY(&bp->b_dep)) &&
2490 	    !(bp->b_flags & B_INVAL)) {
2491 		/*
2492 		 * Failed write, redirty.  All errors except ENXIO (which
2493 		 * means the device is gone) are treated as being
2494 		 * transient.
2495 		 *
2496 		 * XXX Treating EIO as transient is not correct; the
2497 		 * contract with the local storage device drivers is that
2498 		 * they will only return EIO once the I/O is no longer
2499 		 * retriable.  Network I/O also respects this through the
2500 		 * guarantees of TCP and/or the internal retries of NFS.
2501 		 * ENOMEM might be transient, but we also have no way of
2502 		 * knowing when its ok to retry/reschedule.  In general,
2503 		 * this entire case should be made obsolete through better
2504 		 * error handling/recovery and resource scheduling.
2505 		 *
2506 		 * Do this also for buffers that failed with ENXIO, but have
2507 		 * non-empty dependencies - the soft updates code might need
2508 		 * to access the buffer to untangle them.
2509 		 *
2510 		 * Must clear BIO_ERROR to prevent pages from being scrapped.
2511 		 */
2512 		bp->b_ioflags &= ~BIO_ERROR;
2513 		bdirty(bp);
2514 	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
2515 	    (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
2516 		/*
2517 		 * Either a failed read I/O, or we were asked to free or not
2518 		 * cache the buffer, or we failed to write to a device that's
2519 		 * no longer present.
2520 		 */
2521 		bp->b_flags |= B_INVAL;
2522 		if (!LIST_EMPTY(&bp->b_dep))
2523 			buf_deallocate(bp);
2524 		if (bp->b_flags & B_DELWRI)
2525 			bdirtysub();
2526 		bp->b_flags &= ~(B_DELWRI | B_CACHE);
2527 		if ((bp->b_flags & B_VMIO) == 0) {
2528 			allocbuf(bp, 0);
2529 			if (bp->b_vp)
2530 				brelvp(bp);
2531 		}
2532 	}
2533 
2534 	/*
2535 	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_truncate()
2536 	 * is called with B_DELWRI set, the underlying pages may wind up
2537 	 * getting freed causing a previous write (bdwrite()) to get 'lost'
2538 	 * because pages associated with a B_DELWRI bp are marked clean.
2539 	 *
2540 	 * We still allow the B_INVAL case to call vfs_vmio_truncate(), even
2541 	 * if B_DELWRI is set.
2542 	 */
2543 	if (bp->b_flags & B_DELWRI)
2544 		bp->b_flags &= ~B_RELBUF;
2545 
2546 	/*
2547 	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
2548 	 * constituted, not even NFS buffers now.  Two flags effect this.  If
2549 	 * B_INVAL, the struct buf is invalidated but the VM object is kept
2550 	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
2551 	 *
2552 	 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
2553 	 * invalidated.  BIO_ERROR cannot be set for a failed write unless the
2554 	 * buffer is also B_INVAL because it hits the re-dirtying code above.
2555 	 *
2556 	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
2557 	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
2558 	 * the commit state and we cannot afford to lose the buffer. If the
2559 	 * buffer has a background write in progress, we need to keep it
2560 	 * around to prevent it from being reconstituted and starting a second
2561 	 * background write.
2562 	 */
2563 	if ((bp->b_flags & B_VMIO) && (bp->b_flags & B_NOCACHE ||
2564 	    (bp->b_ioflags & BIO_ERROR && bp->b_iocmd == BIO_READ)) &&
2565 	    !(bp->b_vp->v_mount != NULL &&
2566 	    (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
2567 	    !vn_isdisk(bp->b_vp, NULL) && (bp->b_flags & B_DELWRI))) {
2568 		vfs_vmio_invalidate(bp);
2569 		allocbuf(bp, 0);
2570 	}
2571 
2572 	if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0 ||
2573 	    (bp->b_flags & (B_DELWRI | B_NOREUSE)) == B_NOREUSE) {
2574 		allocbuf(bp, 0);
2575 		bp->b_flags &= ~B_NOREUSE;
2576 		if (bp->b_vp != NULL)
2577 			brelvp(bp);
2578 	}
2579 
2580 	/*
2581 	 * If the buffer has junk contents signal it and eventually
2582 	 * clean up B_DELWRI and diassociate the vnode so that gbincore()
2583 	 * doesn't find it.
2584 	 */
2585 	if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
2586 	    (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
2587 		bp->b_flags |= B_INVAL;
2588 	if (bp->b_flags & B_INVAL) {
2589 		if (bp->b_flags & B_DELWRI)
2590 			bundirty(bp);
2591 		if (bp->b_vp)
2592 			brelvp(bp);
2593 	}
2594 
2595 	buf_track(bp, __func__);
2596 
2597 	/* buffers with no memory */
2598 	if (bp->b_bufsize == 0) {
2599 		buf_free(bp);
2600 		return;
2601 	}
2602 	/* buffers with junk contents */
2603 	if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
2604 	    (bp->b_ioflags & BIO_ERROR)) {
2605 		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
2606 		if (bp->b_vflags & BV_BKGRDINPROG)
2607 			panic("losing buffer 2");
2608 		qindex = QUEUE_CLEAN;
2609 		bp->b_flags |= B_AGE;
2610 	/* remaining buffers */
2611 	} else if (bp->b_flags & B_DELWRI)
2612 		qindex = QUEUE_DIRTY;
2613 	else
2614 		qindex = QUEUE_CLEAN;
2615 
2616 	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
2617 		panic("brelse: not dirty");
2618 
2619 	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_RELBUF | B_DIRECT);
2620 	/* binsfree unlocks bp. */
2621 	binsfree(bp, qindex);
2622 }
2623 
2624 /*
2625  * Release a buffer back to the appropriate queue but do not try to free
2626  * it.  The buffer is expected to be used again soon.
2627  *
2628  * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
2629  * biodone() to requeue an async I/O on completion.  It is also used when
2630  * known good buffers need to be requeued but we think we may need the data
2631  * again soon.
2632  *
2633  * XXX we should be able to leave the B_RELBUF hint set on completion.
2634  */
2635 void
2636 bqrelse(struct buf *bp)
2637 {
2638 	int qindex;
2639 
2640 	CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2641 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2642 	    ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2643 
2644 	qindex = QUEUE_NONE;
2645 	if (BUF_LOCKRECURSED(bp)) {
2646 		/* do not release to free list */
2647 		BUF_UNLOCK(bp);
2648 		return;
2649 	}
2650 	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
2651 
2652 	if (bp->b_flags & B_MANAGED) {
2653 		if (bp->b_flags & B_REMFREE)
2654 			bremfreef(bp);
2655 		goto out;
2656 	}
2657 
2658 	/* buffers with stale but valid contents */
2659 	if ((bp->b_flags & B_DELWRI) != 0 || (bp->b_vflags & (BV_BKGRDINPROG |
2660 	    BV_BKGRDERR)) == BV_BKGRDERR) {
2661 		BO_LOCK(bp->b_bufobj);
2662 		bp->b_vflags &= ~BV_BKGRDERR;
2663 		BO_UNLOCK(bp->b_bufobj);
2664 		qindex = QUEUE_DIRTY;
2665 	} else {
2666 		if ((bp->b_flags & B_DELWRI) == 0 &&
2667 		    (bp->b_xflags & BX_VNDIRTY))
2668 			panic("bqrelse: not dirty");
2669 		if ((bp->b_flags & B_NOREUSE) != 0) {
2670 			brelse(bp);
2671 			return;
2672 		}
2673 		qindex = QUEUE_CLEAN;
2674 	}
2675 	buf_track(bp, __func__);
2676 	/* binsfree unlocks bp. */
2677 	binsfree(bp, qindex);
2678 	return;
2679 
2680 out:
2681 	buf_track(bp, __func__);
2682 	/* unlock */
2683 	BUF_UNLOCK(bp);
2684 }
2685 
2686 /*
2687  * Complete I/O to a VMIO backed page.  Validate the pages as appropriate,
2688  * restore bogus pages.
2689  */
2690 static void
2691 vfs_vmio_iodone(struct buf *bp)
2692 {
2693 	vm_ooffset_t foff;
2694 	vm_page_t m;
2695 	vm_object_t obj;
2696 	struct vnode *vp;
2697 	int i, iosize, resid;
2698 	bool bogus;
2699 
2700 	obj = bp->b_bufobj->bo_object;
2701 	KASSERT(obj->paging_in_progress >= bp->b_npages,
2702 	    ("vfs_vmio_iodone: paging in progress(%d) < b_npages(%d)",
2703 	    obj->paging_in_progress, bp->b_npages));
2704 
2705 	vp = bp->b_vp;
2706 	KASSERT(vp->v_holdcnt > 0,
2707 	    ("vfs_vmio_iodone: vnode %p has zero hold count", vp));
2708 	KASSERT(vp->v_object != NULL,
2709 	    ("vfs_vmio_iodone: vnode %p has no vm_object", vp));
2710 
2711 	foff = bp->b_offset;
2712 	KASSERT(bp->b_offset != NOOFFSET,
2713 	    ("vfs_vmio_iodone: bp %p has no buffer offset", bp));
2714 
2715 	bogus = false;
2716 	iosize = bp->b_bcount - bp->b_resid;
2717 	VM_OBJECT_WLOCK(obj);
2718 	for (i = 0; i < bp->b_npages; i++) {
2719 		resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
2720 		if (resid > iosize)
2721 			resid = iosize;
2722 
2723 		/*
2724 		 * cleanup bogus pages, restoring the originals
2725 		 */
2726 		m = bp->b_pages[i];
2727 		if (m == bogus_page) {
2728 			bogus = true;
2729 			m = vm_page_lookup(obj, OFF_TO_IDX(foff));
2730 			if (m == NULL)
2731 				panic("biodone: page disappeared!");
2732 			bp->b_pages[i] = m;
2733 		} else if ((bp->b_iocmd == BIO_READ) && resid > 0) {
2734 			/*
2735 			 * In the write case, the valid and clean bits are
2736 			 * already changed correctly ( see bdwrite() ), so we
2737 			 * only need to do this here in the read case.
2738 			 */
2739 			KASSERT((m->dirty & vm_page_bits(foff & PAGE_MASK,
2740 			    resid)) == 0, ("vfs_vmio_iodone: page %p "
2741 			    "has unexpected dirty bits", m));
2742 			vfs_page_set_valid(bp, foff, m);
2743 		}
2744 		KASSERT(OFF_TO_IDX(foff) == m->pindex,
2745 		    ("vfs_vmio_iodone: foff(%jd)/pindex(%ju) mismatch",
2746 		    (intmax_t)foff, (uintmax_t)m->pindex));
2747 
2748 		vm_page_sunbusy(m);
2749 		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
2750 		iosize -= resid;
2751 	}
2752 	vm_object_pip_wakeupn(obj, bp->b_npages);
2753 	VM_OBJECT_WUNLOCK(obj);
2754 	if (bogus && buf_mapped(bp)) {
2755 		BUF_CHECK_MAPPED(bp);
2756 		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
2757 		    bp->b_pages, bp->b_npages);
2758 	}
2759 }
2760 
2761 /*
2762  * Unwire a page held by a buf and place it on the appropriate vm queue.
2763  */
2764 static void
2765 vfs_vmio_unwire(struct buf *bp, vm_page_t m)
2766 {
2767 	bool freed;
2768 
2769 	vm_page_lock(m);
2770 	if (vm_page_unwire_noq(m)) {
2771 		/*
2772 		 * Determine if the page should be freed before adding
2773 		 * it to the inactive queue.
2774 		 */
2775 		if (m->valid == 0) {
2776 			freed = !vm_page_busied(m);
2777 			if (freed)
2778 				vm_page_free(m);
2779 		} else if ((bp->b_flags & B_DIRECT) != 0)
2780 			freed = vm_page_try_to_free(m);
2781 		else
2782 			freed = false;
2783 		if (!freed) {
2784 			/*
2785 			 * If the page is unlikely to be reused, let the
2786 			 * VM know.  Otherwise, maintain LRU.
2787 			 */
2788 			if ((bp->b_flags & B_NOREUSE) != 0)
2789 				vm_page_deactivate_noreuse(m);
2790 			else if (m->queue == PQ_ACTIVE)
2791 				vm_page_reference(m);
2792 			else if (m->queue != PQ_INACTIVE)
2793 				vm_page_deactivate(m);
2794 			else
2795 				vm_page_requeue(m);
2796 		}
2797 	}
2798 	vm_page_unlock(m);
2799 }
2800 
2801 /*
2802  * Perform page invalidation when a buffer is released.  The fully invalid
2803  * pages will be reclaimed later in vfs_vmio_truncate().
2804  */
2805 static void
2806 vfs_vmio_invalidate(struct buf *bp)
2807 {
2808 	vm_object_t obj;
2809 	vm_page_t m;
2810 	int i, resid, poffset, presid;
2811 
2812 	if (buf_mapped(bp)) {
2813 		BUF_CHECK_MAPPED(bp);
2814 		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
2815 	} else
2816 		BUF_CHECK_UNMAPPED(bp);
2817 	/*
2818 	 * Get the base offset and length of the buffer.  Note that
2819 	 * in the VMIO case if the buffer block size is not
2820 	 * page-aligned then b_data pointer may not be page-aligned.
2821 	 * But our b_pages[] array *IS* page aligned.
2822 	 *
2823 	 * block sizes less then DEV_BSIZE (usually 512) are not
2824 	 * supported due to the page granularity bits (m->valid,
2825 	 * m->dirty, etc...).
2826 	 *
2827 	 * See man buf(9) for more information
2828 	 */
2829 	obj = bp->b_bufobj->bo_object;
2830 	resid = bp->b_bufsize;
2831 	poffset = bp->b_offset & PAGE_MASK;
2832 	VM_OBJECT_WLOCK(obj);
2833 	for (i = 0; i < bp->b_npages; i++) {
2834 		m = bp->b_pages[i];
2835 		if (m == bogus_page)
2836 			panic("vfs_vmio_invalidate: Unexpected bogus page.");
2837 		bp->b_pages[i] = NULL;
2838 
2839 		presid = resid > (PAGE_SIZE - poffset) ?
2840 		    (PAGE_SIZE - poffset) : resid;
2841 		KASSERT(presid >= 0, ("brelse: extra page"));
2842 		while (vm_page_xbusied(m)) {
2843 			vm_page_lock(m);
2844 			VM_OBJECT_WUNLOCK(obj);
2845 			vm_page_busy_sleep(m, "mbncsh", true);
2846 			VM_OBJECT_WLOCK(obj);
2847 		}
2848 		if (pmap_page_wired_mappings(m) == 0)
2849 			vm_page_set_invalid(m, poffset, presid);
2850 		vfs_vmio_unwire(bp, m);
2851 		resid -= presid;
2852 		poffset = 0;
2853 	}
2854 	VM_OBJECT_WUNLOCK(obj);
2855 	bp->b_npages = 0;
2856 }
2857 
2858 /*
2859  * Page-granular truncation of an existing VMIO buffer.
2860  */
2861 static void
2862 vfs_vmio_truncate(struct buf *bp, int desiredpages)
2863 {
2864 	vm_object_t obj;
2865 	vm_page_t m;
2866 	int i;
2867 
2868 	if (bp->b_npages == desiredpages)
2869 		return;
2870 
2871 	if (buf_mapped(bp)) {
2872 		BUF_CHECK_MAPPED(bp);
2873 		pmap_qremove((vm_offset_t)trunc_page((vm_offset_t)bp->b_data) +
2874 		    (desiredpages << PAGE_SHIFT), bp->b_npages - desiredpages);
2875 	} else
2876 		BUF_CHECK_UNMAPPED(bp);
2877 	obj = bp->b_bufobj->bo_object;
2878 	if (obj != NULL)
2879 		VM_OBJECT_WLOCK(obj);
2880 	for (i = desiredpages; i < bp->b_npages; i++) {
2881 		m = bp->b_pages[i];
2882 		KASSERT(m != bogus_page, ("allocbuf: bogus page found"));
2883 		bp->b_pages[i] = NULL;
2884 		vfs_vmio_unwire(bp, m);
2885 	}
2886 	if (obj != NULL)
2887 		VM_OBJECT_WUNLOCK(obj);
2888 	bp->b_npages = desiredpages;
2889 }
2890 
2891 /*
2892  * Byte granular extension of VMIO buffers.
2893  */
2894 static void
2895 vfs_vmio_extend(struct buf *bp, int desiredpages, int size)
2896 {
2897 	/*
2898 	 * We are growing the buffer, possibly in a
2899 	 * byte-granular fashion.
2900 	 */
2901 	vm_object_t obj;
2902 	vm_offset_t toff;
2903 	vm_offset_t tinc;
2904 	vm_page_t m;
2905 
2906 	/*
2907 	 * Step 1, bring in the VM pages from the object, allocating
2908 	 * them if necessary.  We must clear B_CACHE if these pages
2909 	 * are not valid for the range covered by the buffer.
2910 	 */
2911 	obj = bp->b_bufobj->bo_object;
2912 	VM_OBJECT_WLOCK(obj);
2913 	if (bp->b_npages < desiredpages) {
2914 		/*
2915 		 * We must allocate system pages since blocking
2916 		 * here could interfere with paging I/O, no
2917 		 * matter which process we are.
2918 		 *
2919 		 * Only exclusive busy can be tested here.
2920 		 * Blocking on shared busy might lead to
2921 		 * deadlocks once allocbuf() is called after
2922 		 * pages are vfs_busy_pages().
2923 		 */
2924 		(void)vm_page_grab_pages(obj,
2925 		    OFF_TO_IDX(bp->b_offset) + bp->b_npages,
2926 		    VM_ALLOC_SYSTEM | VM_ALLOC_IGN_SBUSY |
2927 		    VM_ALLOC_NOBUSY | VM_ALLOC_WIRED,
2928 		    &bp->b_pages[bp->b_npages], desiredpages - bp->b_npages);
2929 		bp->b_npages = desiredpages;
2930 	}
2931 
2932 	/*
2933 	 * Step 2.  We've loaded the pages into the buffer,
2934 	 * we have to figure out if we can still have B_CACHE
2935 	 * set.  Note that B_CACHE is set according to the
2936 	 * byte-granular range ( bcount and size ), not the
2937 	 * aligned range ( newbsize ).
2938 	 *
2939 	 * The VM test is against m->valid, which is DEV_BSIZE
2940 	 * aligned.  Needless to say, the validity of the data
2941 	 * needs to also be DEV_BSIZE aligned.  Note that this
2942 	 * fails with NFS if the server or some other client
2943 	 * extends the file's EOF.  If our buffer is resized,
2944 	 * B_CACHE may remain set! XXX
2945 	 */
2946 	toff = bp->b_bcount;
2947 	tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
2948 	while ((bp->b_flags & B_CACHE) && toff < size) {
2949 		vm_pindex_t pi;
2950 
2951 		if (tinc > (size - toff))
2952 			tinc = size - toff;
2953 		pi = ((bp->b_offset & PAGE_MASK) + toff) >> PAGE_SHIFT;
2954 		m = bp->b_pages[pi];
2955 		vfs_buf_test_cache(bp, bp->b_offset, toff, tinc, m);
2956 		toff += tinc;
2957 		tinc = PAGE_SIZE;
2958 	}
2959 	VM_OBJECT_WUNLOCK(obj);
2960 
2961 	/*
2962 	 * Step 3, fixup the KVA pmap.
2963 	 */
2964 	if (buf_mapped(bp))
2965 		bpmap_qenter(bp);
2966 	else
2967 		BUF_CHECK_UNMAPPED(bp);
2968 }
2969 
2970 /*
2971  * Check to see if a block at a particular lbn is available for a clustered
2972  * write.
2973  */
2974 static int
2975 vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
2976 {
2977 	struct buf *bpa;
2978 	int match;
2979 
2980 	match = 0;
2981 
2982 	/* If the buf isn't in core skip it */
2983 	if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
2984 		return (0);
2985 
2986 	/* If the buf is busy we don't want to wait for it */
2987 	if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
2988 		return (0);
2989 
2990 	/* Only cluster with valid clusterable delayed write buffers */
2991 	if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
2992 	    (B_DELWRI | B_CLUSTEROK))
2993 		goto done;
2994 
2995 	if (bpa->b_bufsize != size)
2996 		goto done;
2997 
2998 	/*
2999 	 * Check to see if it is in the expected place on disk and that the
3000 	 * block has been mapped.
3001 	 */
3002 	if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
3003 		match = 1;
3004 done:
3005 	BUF_UNLOCK(bpa);
3006 	return (match);
3007 }
3008 
3009 /*
3010  *	vfs_bio_awrite:
3011  *
3012  *	Implement clustered async writes for clearing out B_DELWRI buffers.
3013  *	This is much better then the old way of writing only one buffer at
3014  *	a time.  Note that we may not be presented with the buffers in the
3015  *	correct order, so we search for the cluster in both directions.
3016  */
3017 int
3018 vfs_bio_awrite(struct buf *bp)
3019 {
3020 	struct bufobj *bo;
3021 	int i;
3022 	int j;
3023 	daddr_t lblkno = bp->b_lblkno;
3024 	struct vnode *vp = bp->b_vp;
3025 	int ncl;
3026 	int nwritten;
3027 	int size;
3028 	int maxcl;
3029 	int gbflags;
3030 
3031 	bo = &vp->v_bufobj;
3032 	gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0;
3033 	/*
3034 	 * right now we support clustered writing only to regular files.  If
3035 	 * we find a clusterable block we could be in the middle of a cluster
3036 	 * rather then at the beginning.
3037 	 */
3038 	if ((vp->v_type == VREG) &&
3039 	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
3040 	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
3041 
3042 		size = vp->v_mount->mnt_stat.f_iosize;
3043 		maxcl = MAXPHYS / size;
3044 
3045 		BO_RLOCK(bo);
3046 		for (i = 1; i < maxcl; i++)
3047 			if (vfs_bio_clcheck(vp, size, lblkno + i,
3048 			    bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
3049 				break;
3050 
3051 		for (j = 1; i + j <= maxcl && j <= lblkno; j++)
3052 			if (vfs_bio_clcheck(vp, size, lblkno - j,
3053 			    bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
3054 				break;
3055 		BO_RUNLOCK(bo);
3056 		--j;
3057 		ncl = i + j;
3058 		/*
3059 		 * this is a possible cluster write
3060 		 */
3061 		if (ncl != 1) {
3062 			BUF_UNLOCK(bp);
3063 			nwritten = cluster_wbuild(vp, size, lblkno - j, ncl,
3064 			    gbflags);
3065 			return (nwritten);
3066 		}
3067 	}
3068 	bremfree(bp);
3069 	bp->b_flags |= B_ASYNC;
3070 	/*
3071 	 * default (old) behavior, writing out only one block
3072 	 *
3073 	 * XXX returns b_bufsize instead of b_bcount for nwritten?
3074 	 */
3075 	nwritten = bp->b_bufsize;
3076 	(void) bwrite(bp);
3077 
3078 	return (nwritten);
3079 }
3080 
3081 /*
3082  *	getnewbuf_kva:
3083  *
3084  *	Allocate KVA for an empty buf header according to gbflags.
3085  */
3086 static int
3087 getnewbuf_kva(struct buf *bp, int gbflags, int maxsize)
3088 {
3089 
3090 	if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_UNMAPPED) {
3091 		/*
3092 		 * In order to keep fragmentation sane we only allocate kva
3093 		 * in BKVASIZE chunks.  XXX with vmem we can do page size.
3094 		 */
3095 		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
3096 
3097 		if (maxsize != bp->b_kvasize &&
3098 		    bufkva_alloc(bp, maxsize, gbflags))
3099 			return (ENOSPC);
3100 	}
3101 	return (0);
3102 }
3103 
3104 /*
3105  *	getnewbuf:
3106  *
3107  *	Find and initialize a new buffer header, freeing up existing buffers
3108  *	in the bufqueues as necessary.  The new buffer is returned locked.
3109  *
3110  *	We block if:
3111  *		We have insufficient buffer headers
3112  *		We have insufficient buffer space
3113  *		buffer_arena is too fragmented ( space reservation fails )
3114  *		If we have to flush dirty buffers ( but we try to avoid this )
3115  *
3116  *	The caller is responsible for releasing the reserved bufspace after
3117  *	allocbuf() is called.
3118  */
3119 static struct buf *
3120 getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int maxsize, int gbflags)
3121 {
3122 	struct bufdomain *bd;
3123 	struct buf *bp;
3124 	bool metadata, reserved;
3125 
3126 	bp = NULL;
3127 	KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3128 	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3129 	if (!unmapped_buf_allowed)
3130 		gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3131 
3132 	if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 ||
3133 	    vp->v_type == VCHR)
3134 		metadata = true;
3135 	else
3136 		metadata = false;
3137 	if (vp == NULL)
3138 		bd = &bdclean[0];
3139 	else
3140 		bd = &bdclean[vp->v_bufobj.bo_domain];
3141 
3142 	counter_u64_add(getnewbufcalls, 1);
3143 	reserved = false;
3144 	do {
3145 		if (reserved == false &&
3146 		    bufspace_reserve(bd, maxsize, metadata) != 0) {
3147 			counter_u64_add(getnewbufrestarts, 1);
3148 			continue;
3149 		}
3150 		reserved = true;
3151 		if ((bp = buf_alloc(bd)) == NULL) {
3152 			counter_u64_add(getnewbufrestarts, 1);
3153 			continue;
3154 		}
3155 		if (getnewbuf_kva(bp, gbflags, maxsize) == 0)
3156 			return (bp);
3157 		break;
3158 	} while (buf_recycle(bd, false) == 0);
3159 
3160 	if (reserved)
3161 		bufspace_release(bd, maxsize);
3162 	if (bp != NULL) {
3163 		bp->b_flags |= B_INVAL;
3164 		brelse(bp);
3165 	}
3166 	bufspace_wait(bd, vp, gbflags, slpflag, slptimeo);
3167 
3168 	return (NULL);
3169 }
3170 
3171 /*
3172  *	buf_daemon:
3173  *
3174  *	buffer flushing daemon.  Buffers are normally flushed by the
3175  *	update daemon but if it cannot keep up this process starts to
3176  *	take the load in an attempt to prevent getnewbuf() from blocking.
3177  */
3178 static struct kproc_desc buf_kp = {
3179 	"bufdaemon",
3180 	buf_daemon,
3181 	&bufdaemonproc
3182 };
3183 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp);
3184 
3185 static int
3186 buf_flush(struct vnode *vp, int target)
3187 {
3188 	int flushed;
3189 
3190 	flushed = flushbufqueues(vp, target, 0);
3191 	if (flushed == 0) {
3192 		/*
3193 		 * Could not find any buffers without rollback
3194 		 * dependencies, so just write the first one
3195 		 * in the hopes of eventually making progress.
3196 		 */
3197 		if (vp != NULL && target > 2)
3198 			target /= 2;
3199 		flushbufqueues(vp, target, 1);
3200 	}
3201 	return (flushed);
3202 }
3203 
3204 static void
3205 buf_daemon()
3206 {
3207 	int lodirty;
3208 	int i;
3209 
3210 	/*
3211 	 * This process needs to be suspended prior to shutdown sync.
3212 	 */
3213 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
3214 	    SHUTDOWN_PRI_LAST);
3215 
3216 	/*
3217 	 * Start the buf clean daemons as children threads.
3218 	 */
3219 	for (i = 0 ; i < clean_domains; i++) {
3220 		int error;
3221 
3222 		error = kthread_add((void (*)(void *))bufspace_daemon,
3223 		    &bdclean[i], curproc, NULL, 0, 0, "bufspacedaemon-%d", i);
3224 		if (error)
3225 			panic("error %d spawning bufspace daemon", error);
3226 	}
3227 
3228 	/*
3229 	 * This process is allowed to take the buffer cache to the limit
3230 	 */
3231 	curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED;
3232 	mtx_lock(&bdlock);
3233 	for (;;) {
3234 		bd_request = 0;
3235 		mtx_unlock(&bdlock);
3236 
3237 		kproc_suspend_check(bufdaemonproc);
3238 		lodirty = lodirtybuffers;
3239 		if (bd_speedupreq) {
3240 			lodirty = numdirtybuffers / 2;
3241 			bd_speedupreq = 0;
3242 		}
3243 		/*
3244 		 * Do the flush.  Limit the amount of in-transit I/O we
3245 		 * allow to build up, otherwise we would completely saturate
3246 		 * the I/O system.
3247 		 */
3248 		while (numdirtybuffers > lodirty) {
3249 			if (buf_flush(NULL, numdirtybuffers - lodirty) == 0)
3250 				break;
3251 			kern_yield(PRI_USER);
3252 		}
3253 
3254 		/*
3255 		 * Only clear bd_request if we have reached our low water
3256 		 * mark.  The buf_daemon normally waits 1 second and
3257 		 * then incrementally flushes any dirty buffers that have
3258 		 * built up, within reason.
3259 		 *
3260 		 * If we were unable to hit our low water mark and couldn't
3261 		 * find any flushable buffers, we sleep for a short period
3262 		 * to avoid endless loops on unlockable buffers.
3263 		 */
3264 		mtx_lock(&bdlock);
3265 		if (numdirtybuffers <= lodirtybuffers) {
3266 			/*
3267 			 * We reached our low water mark, reset the
3268 			 * request and sleep until we are needed again.
3269 			 * The sleep is just so the suspend code works.
3270 			 */
3271 			bd_request = 0;
3272 			/*
3273 			 * Do an extra wakeup in case dirty threshold
3274 			 * changed via sysctl and the explicit transition
3275 			 * out of shortfall was missed.
3276 			 */
3277 			bdirtywakeup();
3278 			if (runningbufspace <= lorunningspace)
3279 				runningwakeup();
3280 			msleep(&bd_request, &bdlock, PVM, "psleep", hz);
3281 		} else {
3282 			/*
3283 			 * We couldn't find any flushable dirty buffers but
3284 			 * still have too many dirty buffers, we
3285 			 * have to sleep and try again.  (rare)
3286 			 */
3287 			msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
3288 		}
3289 	}
3290 }
3291 
3292 /*
3293  *	flushbufqueues:
3294  *
3295  *	Try to flush a buffer in the dirty queue.  We must be careful to
3296  *	free up B_INVAL buffers instead of write them, which NFS is
3297  *	particularly sensitive to.
3298  */
3299 static int flushwithdeps = 0;
3300 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
3301     0, "Number of buffers flushed with dependecies that require rollbacks");
3302 
3303 static int
3304 flushbufqueues(struct vnode *lvp, int target, int flushdeps)
3305 {
3306 	struct bufqueue *bq;
3307 	struct buf *sentinel;
3308 	struct vnode *vp;
3309 	struct mount *mp;
3310 	struct buf *bp;
3311 	int hasdeps;
3312 	int flushed;
3313 	int error;
3314 	bool unlock;
3315 
3316 	flushed = 0;
3317 	bq = &bqdirty;
3318 	bp = NULL;
3319 	sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO);
3320 	sentinel->b_qindex = QUEUE_SENTINEL;
3321 	BQ_LOCK(bq);
3322 	TAILQ_INSERT_HEAD(&bq->bq_queue, sentinel, b_freelist);
3323 	BQ_UNLOCK(bq);
3324 	while (flushed != target) {
3325 		maybe_yield();
3326 		BQ_LOCK(bq);
3327 		bp = TAILQ_NEXT(sentinel, b_freelist);
3328 		if (bp != NULL) {
3329 			TAILQ_REMOVE(&bq->bq_queue, sentinel, b_freelist);
3330 			TAILQ_INSERT_AFTER(&bq->bq_queue, bp, sentinel,
3331 			    b_freelist);
3332 		} else {
3333 			BQ_UNLOCK(bq);
3334 			break;
3335 		}
3336 		/*
3337 		 * Skip sentinels inserted by other invocations of the
3338 		 * flushbufqueues(), taking care to not reorder them.
3339 		 *
3340 		 * Only flush the buffers that belong to the
3341 		 * vnode locked by the curthread.
3342 		 */
3343 		if (bp->b_qindex == QUEUE_SENTINEL || (lvp != NULL &&
3344 		    bp->b_vp != lvp)) {
3345 			BQ_UNLOCK(bq);
3346 			continue;
3347 		}
3348 		error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL);
3349 		BQ_UNLOCK(bq);
3350 		if (error != 0)
3351 			continue;
3352 
3353 		/*
3354 		 * BKGRDINPROG can only be set with the buf and bufobj
3355 		 * locks both held.  We tolerate a race to clear it here.
3356 		 */
3357 		if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
3358 		    (bp->b_flags & B_DELWRI) == 0) {
3359 			BUF_UNLOCK(bp);
3360 			continue;
3361 		}
3362 		if (bp->b_flags & B_INVAL) {
3363 			bremfreef(bp);
3364 			brelse(bp);
3365 			flushed++;
3366 			continue;
3367 		}
3368 
3369 		if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
3370 			if (flushdeps == 0) {
3371 				BUF_UNLOCK(bp);
3372 				continue;
3373 			}
3374 			hasdeps = 1;
3375 		} else
3376 			hasdeps = 0;
3377 		/*
3378 		 * We must hold the lock on a vnode before writing
3379 		 * one of its buffers. Otherwise we may confuse, or
3380 		 * in the case of a snapshot vnode, deadlock the
3381 		 * system.
3382 		 *
3383 		 * The lock order here is the reverse of the normal
3384 		 * of vnode followed by buf lock.  This is ok because
3385 		 * the NOWAIT will prevent deadlock.
3386 		 */
3387 		vp = bp->b_vp;
3388 		if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
3389 			BUF_UNLOCK(bp);
3390 			continue;
3391 		}
3392 		if (lvp == NULL) {
3393 			unlock = true;
3394 			error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
3395 		} else {
3396 			ASSERT_VOP_LOCKED(vp, "getbuf");
3397 			unlock = false;
3398 			error = VOP_ISLOCKED(vp) == LK_EXCLUSIVE ? 0 :
3399 			    vn_lock(vp, LK_TRYUPGRADE);
3400 		}
3401 		if (error == 0) {
3402 			CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
3403 			    bp, bp->b_vp, bp->b_flags);
3404 			if (curproc == bufdaemonproc) {
3405 				vfs_bio_awrite(bp);
3406 			} else {
3407 				bremfree(bp);
3408 				bwrite(bp);
3409 				counter_u64_add(notbufdflushes, 1);
3410 			}
3411 			vn_finished_write(mp);
3412 			if (unlock)
3413 				VOP_UNLOCK(vp, 0);
3414 			flushwithdeps += hasdeps;
3415 			flushed++;
3416 
3417 			/*
3418 			 * Sleeping on runningbufspace while holding
3419 			 * vnode lock leads to deadlock.
3420 			 */
3421 			if (curproc == bufdaemonproc &&
3422 			    runningbufspace > hirunningspace)
3423 				waitrunningbufspace();
3424 			continue;
3425 		}
3426 		vn_finished_write(mp);
3427 		BUF_UNLOCK(bp);
3428 	}
3429 	BQ_LOCK(bq);
3430 	TAILQ_REMOVE(&bq->bq_queue, sentinel, b_freelist);
3431 	BQ_UNLOCK(bq);
3432 	free(sentinel, M_TEMP);
3433 	return (flushed);
3434 }
3435 
3436 /*
3437  * Check to see if a block is currently memory resident.
3438  */
3439 struct buf *
3440 incore(struct bufobj *bo, daddr_t blkno)
3441 {
3442 	struct buf *bp;
3443 
3444 	BO_RLOCK(bo);
3445 	bp = gbincore(bo, blkno);
3446 	BO_RUNLOCK(bo);
3447 	return (bp);
3448 }
3449 
3450 /*
3451  * Returns true if no I/O is needed to access the
3452  * associated VM object.  This is like incore except
3453  * it also hunts around in the VM system for the data.
3454  */
3455 
3456 static int
3457 inmem(struct vnode * vp, daddr_t blkno)
3458 {
3459 	vm_object_t obj;
3460 	vm_offset_t toff, tinc, size;
3461 	vm_page_t m;
3462 	vm_ooffset_t off;
3463 
3464 	ASSERT_VOP_LOCKED(vp, "inmem");
3465 
3466 	if (incore(&vp->v_bufobj, blkno))
3467 		return 1;
3468 	if (vp->v_mount == NULL)
3469 		return 0;
3470 	obj = vp->v_object;
3471 	if (obj == NULL)
3472 		return (0);
3473 
3474 	size = PAGE_SIZE;
3475 	if (size > vp->v_mount->mnt_stat.f_iosize)
3476 		size = vp->v_mount->mnt_stat.f_iosize;
3477 	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
3478 
3479 	VM_OBJECT_RLOCK(obj);
3480 	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
3481 		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
3482 		if (!m)
3483 			goto notinmem;
3484 		tinc = size;
3485 		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
3486 			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
3487 		if (vm_page_is_valid(m,
3488 		    (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
3489 			goto notinmem;
3490 	}
3491 	VM_OBJECT_RUNLOCK(obj);
3492 	return 1;
3493 
3494 notinmem:
3495 	VM_OBJECT_RUNLOCK(obj);
3496 	return (0);
3497 }
3498 
3499 /*
3500  * Set the dirty range for a buffer based on the status of the dirty
3501  * bits in the pages comprising the buffer.  The range is limited
3502  * to the size of the buffer.
3503  *
3504  * Tell the VM system that the pages associated with this buffer
3505  * are clean.  This is used for delayed writes where the data is
3506  * going to go to disk eventually without additional VM intevention.
3507  *
3508  * Note that while we only really need to clean through to b_bcount, we
3509  * just go ahead and clean through to b_bufsize.
3510  */
3511 static void
3512 vfs_clean_pages_dirty_buf(struct buf *bp)
3513 {
3514 	vm_ooffset_t foff, noff, eoff;
3515 	vm_page_t m;
3516 	int i;
3517 
3518 	if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0)
3519 		return;
3520 
3521 	foff = bp->b_offset;
3522 	KASSERT(bp->b_offset != NOOFFSET,
3523 	    ("vfs_clean_pages_dirty_buf: no buffer offset"));
3524 
3525 	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
3526 	vfs_drain_busy_pages(bp);
3527 	vfs_setdirty_locked_object(bp);
3528 	for (i = 0; i < bp->b_npages; i++) {
3529 		noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3530 		eoff = noff;
3531 		if (eoff > bp->b_offset + bp->b_bufsize)
3532 			eoff = bp->b_offset + bp->b_bufsize;
3533 		m = bp->b_pages[i];
3534 		vfs_page_set_validclean(bp, foff, m);
3535 		/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
3536 		foff = noff;
3537 	}
3538 	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
3539 }
3540 
3541 static void
3542 vfs_setdirty_locked_object(struct buf *bp)
3543 {
3544 	vm_object_t object;
3545 	int i;
3546 
3547 	object = bp->b_bufobj->bo_object;
3548 	VM_OBJECT_ASSERT_WLOCKED(object);
3549 
3550 	/*
3551 	 * We qualify the scan for modified pages on whether the
3552 	 * object has been flushed yet.
3553 	 */
3554 	if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) {
3555 		vm_offset_t boffset;
3556 		vm_offset_t eoffset;
3557 
3558 		/*
3559 		 * test the pages to see if they have been modified directly
3560 		 * by users through the VM system.
3561 		 */
3562 		for (i = 0; i < bp->b_npages; i++)
3563 			vm_page_test_dirty(bp->b_pages[i]);
3564 
3565 		/*
3566 		 * Calculate the encompassing dirty range, boffset and eoffset,
3567 		 * (eoffset - boffset) bytes.
3568 		 */
3569 
3570 		for (i = 0; i < bp->b_npages; i++) {
3571 			if (bp->b_pages[i]->dirty)
3572 				break;
3573 		}
3574 		boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3575 
3576 		for (i = bp->b_npages - 1; i >= 0; --i) {
3577 			if (bp->b_pages[i]->dirty) {
3578 				break;
3579 			}
3580 		}
3581 		eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3582 
3583 		/*
3584 		 * Fit it to the buffer.
3585 		 */
3586 
3587 		if (eoffset > bp->b_bcount)
3588 			eoffset = bp->b_bcount;
3589 
3590 		/*
3591 		 * If we have a good dirty range, merge with the existing
3592 		 * dirty range.
3593 		 */
3594 
3595 		if (boffset < eoffset) {
3596 			if (bp->b_dirtyoff > boffset)
3597 				bp->b_dirtyoff = boffset;
3598 			if (bp->b_dirtyend < eoffset)
3599 				bp->b_dirtyend = eoffset;
3600 		}
3601 	}
3602 }
3603 
3604 /*
3605  * Allocate the KVA mapping for an existing buffer.
3606  * If an unmapped buffer is provided but a mapped buffer is requested, take
3607  * also care to properly setup mappings between pages and KVA.
3608  */
3609 static void
3610 bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
3611 {
3612 	int bsize, maxsize, need_mapping, need_kva;
3613 	off_t offset;
3614 
3615 	need_mapping = bp->b_data == unmapped_buf &&
3616 	    (gbflags & GB_UNMAPPED) == 0;
3617 	need_kva = bp->b_kvabase == unmapped_buf &&
3618 	    bp->b_data == unmapped_buf &&
3619 	    (gbflags & GB_KVAALLOC) != 0;
3620 	if (!need_mapping && !need_kva)
3621 		return;
3622 
3623 	BUF_CHECK_UNMAPPED(bp);
3624 
3625 	if (need_mapping && bp->b_kvabase != unmapped_buf) {
3626 		/*
3627 		 * Buffer is not mapped, but the KVA was already
3628 		 * reserved at the time of the instantiation.  Use the
3629 		 * allocated space.
3630 		 */
3631 		goto has_addr;
3632 	}
3633 
3634 	/*
3635 	 * Calculate the amount of the address space we would reserve
3636 	 * if the buffer was mapped.
3637 	 */
3638 	bsize = vn_isdisk(bp->b_vp, NULL) ? DEV_BSIZE : bp->b_bufobj->bo_bsize;
3639 	KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
3640 	offset = blkno * bsize;
3641 	maxsize = size + (offset & PAGE_MASK);
3642 	maxsize = imax(maxsize, bsize);
3643 
3644 	while (bufkva_alloc(bp, maxsize, gbflags) != 0) {
3645 		if ((gbflags & GB_NOWAIT_BD) != 0) {
3646 			/*
3647 			 * XXXKIB: defragmentation cannot
3648 			 * succeed, not sure what else to do.
3649 			 */
3650 			panic("GB_NOWAIT_BD and GB_UNMAPPED %p", bp);
3651 		}
3652 		counter_u64_add(mappingrestarts, 1);
3653 		bufspace_wait(&bdclean[bp->b_domain], bp->b_vp, gbflags, 0, 0);
3654 	}
3655 has_addr:
3656 	if (need_mapping) {
3657 		/* b_offset is handled by bpmap_qenter. */
3658 		bp->b_data = bp->b_kvabase;
3659 		BUF_CHECK_MAPPED(bp);
3660 		bpmap_qenter(bp);
3661 	}
3662 }
3663 
3664 /*
3665  *	getblk:
3666  *
3667  *	Get a block given a specified block and offset into a file/device.
3668  *	The buffers B_DONE bit will be cleared on return, making it almost
3669  * 	ready for an I/O initiation.  B_INVAL may or may not be set on
3670  *	return.  The caller should clear B_INVAL prior to initiating a
3671  *	READ.
3672  *
3673  *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
3674  *	an existing buffer.
3675  *
3676  *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
3677  *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
3678  *	and then cleared based on the backing VM.  If the previous buffer is
3679  *	non-0-sized but invalid, B_CACHE will be cleared.
3680  *
3681  *	If getblk() must create a new buffer, the new buffer is returned with
3682  *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
3683  *	case it is returned with B_INVAL clear and B_CACHE set based on the
3684  *	backing VM.
3685  *
3686  *	getblk() also forces a bwrite() for any B_DELWRI buffer whos
3687  *	B_CACHE bit is clear.
3688  *
3689  *	What this means, basically, is that the caller should use B_CACHE to
3690  *	determine whether the buffer is fully valid or not and should clear
3691  *	B_INVAL prior to issuing a read.  If the caller intends to validate
3692  *	the buffer by loading its data area with something, the caller needs
3693  *	to clear B_INVAL.  If the caller does this without issuing an I/O,
3694  *	the caller should set B_CACHE ( as an optimization ), else the caller
3695  *	should issue the I/O and biodone() will set B_CACHE if the I/O was
3696  *	a write attempt or if it was a successful read.  If the caller
3697  *	intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
3698  *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
3699  */
3700 struct buf *
3701 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo,
3702     int flags)
3703 {
3704 	struct buf *bp;
3705 	struct bufobj *bo;
3706 	int bsize, error, maxsize, vmio;
3707 	off_t offset;
3708 
3709 	CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
3710 	KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3711 	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3712 	ASSERT_VOP_LOCKED(vp, "getblk");
3713 	if (size > maxbcachebuf)
3714 		panic("getblk: size(%d) > maxbcachebuf(%d)\n", size,
3715 		    maxbcachebuf);
3716 	if (!unmapped_buf_allowed)
3717 		flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3718 
3719 	bo = &vp->v_bufobj;
3720 loop:
3721 	BO_RLOCK(bo);
3722 	bp = gbincore(bo, blkno);
3723 	if (bp != NULL) {
3724 		int lockflags;
3725 		/*
3726 		 * Buffer is in-core.  If the buffer is not busy nor managed,
3727 		 * it must be on a queue.
3728 		 */
3729 		lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK;
3730 
3731 		if (flags & GB_LOCK_NOWAIT)
3732 			lockflags |= LK_NOWAIT;
3733 
3734 		error = BUF_TIMELOCK(bp, lockflags,
3735 		    BO_LOCKPTR(bo), "getblk", slpflag, slptimeo);
3736 
3737 		/*
3738 		 * If we slept and got the lock we have to restart in case
3739 		 * the buffer changed identities.
3740 		 */
3741 		if (error == ENOLCK)
3742 			goto loop;
3743 		/* We timed out or were interrupted. */
3744 		else if (error)
3745 			return (NULL);
3746 		/* If recursed, assume caller knows the rules. */
3747 		else if (BUF_LOCKRECURSED(bp))
3748 			goto end;
3749 
3750 		/*
3751 		 * The buffer is locked.  B_CACHE is cleared if the buffer is
3752 		 * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
3753 		 * and for a VMIO buffer B_CACHE is adjusted according to the
3754 		 * backing VM cache.
3755 		 */
3756 		if (bp->b_flags & B_INVAL)
3757 			bp->b_flags &= ~B_CACHE;
3758 		else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
3759 			bp->b_flags |= B_CACHE;
3760 		if (bp->b_flags & B_MANAGED)
3761 			MPASS(bp->b_qindex == QUEUE_NONE);
3762 		else
3763 			bremfree(bp);
3764 
3765 		/*
3766 		 * check for size inconsistencies for non-VMIO case.
3767 		 */
3768 		if (bp->b_bcount != size) {
3769 			if ((bp->b_flags & B_VMIO) == 0 ||
3770 			    (size > bp->b_kvasize)) {
3771 				if (bp->b_flags & B_DELWRI) {
3772 					bp->b_flags |= B_NOCACHE;
3773 					bwrite(bp);
3774 				} else {
3775 					if (LIST_EMPTY(&bp->b_dep)) {
3776 						bp->b_flags |= B_RELBUF;
3777 						brelse(bp);
3778 					} else {
3779 						bp->b_flags |= B_NOCACHE;
3780 						bwrite(bp);
3781 					}
3782 				}
3783 				goto loop;
3784 			}
3785 		}
3786 
3787 		/*
3788 		 * Handle the case of unmapped buffer which should
3789 		 * become mapped, or the buffer for which KVA
3790 		 * reservation is requested.
3791 		 */
3792 		bp_unmapped_get_kva(bp, blkno, size, flags);
3793 
3794 		/*
3795 		 * If the size is inconsistent in the VMIO case, we can resize
3796 		 * the buffer.  This might lead to B_CACHE getting set or
3797 		 * cleared.  If the size has not changed, B_CACHE remains
3798 		 * unchanged from its previous state.
3799 		 */
3800 		allocbuf(bp, size);
3801 
3802 		KASSERT(bp->b_offset != NOOFFSET,
3803 		    ("getblk: no buffer offset"));
3804 
3805 		/*
3806 		 * A buffer with B_DELWRI set and B_CACHE clear must
3807 		 * be committed before we can return the buffer in
3808 		 * order to prevent the caller from issuing a read
3809 		 * ( due to B_CACHE not being set ) and overwriting
3810 		 * it.
3811 		 *
3812 		 * Most callers, including NFS and FFS, need this to
3813 		 * operate properly either because they assume they
3814 		 * can issue a read if B_CACHE is not set, or because
3815 		 * ( for example ) an uncached B_DELWRI might loop due
3816 		 * to softupdates re-dirtying the buffer.  In the latter
3817 		 * case, B_CACHE is set after the first write completes,
3818 		 * preventing further loops.
3819 		 * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
3820 		 * above while extending the buffer, we cannot allow the
3821 		 * buffer to remain with B_CACHE set after the write
3822 		 * completes or it will represent a corrupt state.  To
3823 		 * deal with this we set B_NOCACHE to scrap the buffer
3824 		 * after the write.
3825 		 *
3826 		 * We might be able to do something fancy, like setting
3827 		 * B_CACHE in bwrite() except if B_DELWRI is already set,
3828 		 * so the below call doesn't set B_CACHE, but that gets real
3829 		 * confusing.  This is much easier.
3830 		 */
3831 
3832 		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
3833 			bp->b_flags |= B_NOCACHE;
3834 			bwrite(bp);
3835 			goto loop;
3836 		}
3837 		bp->b_flags &= ~B_DONE;
3838 	} else {
3839 		/*
3840 		 * Buffer is not in-core, create new buffer.  The buffer
3841 		 * returned by getnewbuf() is locked.  Note that the returned
3842 		 * buffer is also considered valid (not marked B_INVAL).
3843 		 */
3844 		BO_RUNLOCK(bo);
3845 		/*
3846 		 * If the user does not want us to create the buffer, bail out
3847 		 * here.
3848 		 */
3849 		if (flags & GB_NOCREAT)
3850 			return NULL;
3851 		if (bdclean[bo->bo_domain].bd_freebuffers == 0 &&
3852 		    TD_IS_IDLETHREAD(curthread))
3853 			return NULL;
3854 
3855 		bsize = vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize;
3856 		KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
3857 		offset = blkno * bsize;
3858 		vmio = vp->v_object != NULL;
3859 		if (vmio) {
3860 			maxsize = size + (offset & PAGE_MASK);
3861 		} else {
3862 			maxsize = size;
3863 			/* Do not allow non-VMIO notmapped buffers. */
3864 			flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3865 		}
3866 		maxsize = imax(maxsize, bsize);
3867 
3868 		bp = getnewbuf(vp, slpflag, slptimeo, maxsize, flags);
3869 		if (bp == NULL) {
3870 			if (slpflag || slptimeo)
3871 				return NULL;
3872 			/*
3873 			 * XXX This is here until the sleep path is diagnosed
3874 			 * enough to work under very low memory conditions.
3875 			 *
3876 			 * There's an issue on low memory, 4BSD+non-preempt
3877 			 * systems (eg MIPS routers with 32MB RAM) where buffer
3878 			 * exhaustion occurs without sleeping for buffer
3879 			 * reclaimation.  This just sticks in a loop and
3880 			 * constantly attempts to allocate a buffer, which
3881 			 * hits exhaustion and tries to wakeup bufdaemon.
3882 			 * This never happens because we never yield.
3883 			 *
3884 			 * The real solution is to identify and fix these cases
3885 			 * so we aren't effectively busy-waiting in a loop
3886 			 * until the reclaimation path has cycles to run.
3887 			 */
3888 			kern_yield(PRI_USER);
3889 			goto loop;
3890 		}
3891 
3892 		/*
3893 		 * This code is used to make sure that a buffer is not
3894 		 * created while the getnewbuf routine is blocked.
3895 		 * This can be a problem whether the vnode is locked or not.
3896 		 * If the buffer is created out from under us, we have to
3897 		 * throw away the one we just created.
3898 		 *
3899 		 * Note: this must occur before we associate the buffer
3900 		 * with the vp especially considering limitations in
3901 		 * the splay tree implementation when dealing with duplicate
3902 		 * lblkno's.
3903 		 */
3904 		BO_LOCK(bo);
3905 		if (gbincore(bo, blkno)) {
3906 			BO_UNLOCK(bo);
3907 			bp->b_flags |= B_INVAL;
3908 			bufspace_release(&bdclean[bp->b_domain], maxsize);
3909 			brelse(bp);
3910 			goto loop;
3911 		}
3912 
3913 		/*
3914 		 * Insert the buffer into the hash, so that it can
3915 		 * be found by incore.
3916 		 */
3917 		bp->b_blkno = bp->b_lblkno = blkno;
3918 		bp->b_offset = offset;
3919 		bgetvp(vp, bp);
3920 		BO_UNLOCK(bo);
3921 
3922 		/*
3923 		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
3924 		 * buffer size starts out as 0, B_CACHE will be set by
3925 		 * allocbuf() for the VMIO case prior to it testing the
3926 		 * backing store for validity.
3927 		 */
3928 
3929 		if (vmio) {
3930 			bp->b_flags |= B_VMIO;
3931 			KASSERT(vp->v_object == bp->b_bufobj->bo_object,
3932 			    ("ARGH! different b_bufobj->bo_object %p %p %p\n",
3933 			    bp, vp->v_object, bp->b_bufobj->bo_object));
3934 		} else {
3935 			bp->b_flags &= ~B_VMIO;
3936 			KASSERT(bp->b_bufobj->bo_object == NULL,
3937 			    ("ARGH! has b_bufobj->bo_object %p %p\n",
3938 			    bp, bp->b_bufobj->bo_object));
3939 			BUF_CHECK_MAPPED(bp);
3940 		}
3941 
3942 		allocbuf(bp, size);
3943 		bufspace_release(&bdclean[bp->b_domain], maxsize);
3944 		bp->b_flags &= ~B_DONE;
3945 	}
3946 	CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
3947 	BUF_ASSERT_HELD(bp);
3948 end:
3949 	buf_track(bp, __func__);
3950 	KASSERT(bp->b_bufobj == bo,
3951 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
3952 	return (bp);
3953 }
3954 
3955 /*
3956  * Get an empty, disassociated buffer of given size.  The buffer is initially
3957  * set to B_INVAL.
3958  */
3959 struct buf *
3960 geteblk(int size, int flags)
3961 {
3962 	struct buf *bp;
3963 	int maxsize;
3964 
3965 	maxsize = (size + BKVAMASK) & ~BKVAMASK;
3966 	while ((bp = getnewbuf(NULL, 0, 0, maxsize, flags)) == NULL) {
3967 		if ((flags & GB_NOWAIT_BD) &&
3968 		    (curthread->td_pflags & TDP_BUFNEED) != 0)
3969 			return (NULL);
3970 	}
3971 	allocbuf(bp, size);
3972 	bufspace_release(&bdclean[bp->b_domain], maxsize);
3973 	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
3974 	BUF_ASSERT_HELD(bp);
3975 	return (bp);
3976 }
3977 
3978 /*
3979  * Truncate the backing store for a non-vmio buffer.
3980  */
3981 static void
3982 vfs_nonvmio_truncate(struct buf *bp, int newbsize)
3983 {
3984 
3985 	if (bp->b_flags & B_MALLOC) {
3986 		/*
3987 		 * malloced buffers are not shrunk
3988 		 */
3989 		if (newbsize == 0) {
3990 			bufmallocadjust(bp, 0);
3991 			free(bp->b_data, M_BIOBUF);
3992 			bp->b_data = bp->b_kvabase;
3993 			bp->b_flags &= ~B_MALLOC;
3994 		}
3995 		return;
3996 	}
3997 	vm_hold_free_pages(bp, newbsize);
3998 	bufspace_adjust(bp, newbsize);
3999 }
4000 
4001 /*
4002  * Extend the backing for a non-VMIO buffer.
4003  */
4004 static void
4005 vfs_nonvmio_extend(struct buf *bp, int newbsize)
4006 {
4007 	caddr_t origbuf;
4008 	int origbufsize;
4009 
4010 	/*
4011 	 * We only use malloced memory on the first allocation.
4012 	 * and revert to page-allocated memory when the buffer
4013 	 * grows.
4014 	 *
4015 	 * There is a potential smp race here that could lead
4016 	 * to bufmallocspace slightly passing the max.  It
4017 	 * is probably extremely rare and not worth worrying
4018 	 * over.
4019 	 */
4020 	if (bp->b_bufsize == 0 && newbsize <= PAGE_SIZE/2 &&
4021 	    bufmallocspace < maxbufmallocspace) {
4022 		bp->b_data = malloc(newbsize, M_BIOBUF, M_WAITOK);
4023 		bp->b_flags |= B_MALLOC;
4024 		bufmallocadjust(bp, newbsize);
4025 		return;
4026 	}
4027 
4028 	/*
4029 	 * If the buffer is growing on its other-than-first
4030 	 * allocation then we revert to the page-allocation
4031 	 * scheme.
4032 	 */
4033 	origbuf = NULL;
4034 	origbufsize = 0;
4035 	if (bp->b_flags & B_MALLOC) {
4036 		origbuf = bp->b_data;
4037 		origbufsize = bp->b_bufsize;
4038 		bp->b_data = bp->b_kvabase;
4039 		bufmallocadjust(bp, 0);
4040 		bp->b_flags &= ~B_MALLOC;
4041 		newbsize = round_page(newbsize);
4042 	}
4043 	vm_hold_load_pages(bp, (vm_offset_t) bp->b_data + bp->b_bufsize,
4044 	    (vm_offset_t) bp->b_data + newbsize);
4045 	if (origbuf != NULL) {
4046 		bcopy(origbuf, bp->b_data, origbufsize);
4047 		free(origbuf, M_BIOBUF);
4048 	}
4049 	bufspace_adjust(bp, newbsize);
4050 }
4051 
4052 /*
4053  * This code constitutes the buffer memory from either anonymous system
4054  * memory (in the case of non-VMIO operations) or from an associated
4055  * VM object (in the case of VMIO operations).  This code is able to
4056  * resize a buffer up or down.
4057  *
4058  * Note that this code is tricky, and has many complications to resolve
4059  * deadlock or inconsistent data situations.  Tread lightly!!!
4060  * There are B_CACHE and B_DELWRI interactions that must be dealt with by
4061  * the caller.  Calling this code willy nilly can result in the loss of data.
4062  *
4063  * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
4064  * B_CACHE for the non-VMIO case.
4065  */
4066 int
4067 allocbuf(struct buf *bp, int size)
4068 {
4069 	int newbsize;
4070 
4071 	BUF_ASSERT_HELD(bp);
4072 
4073 	if (bp->b_bcount == size)
4074 		return (1);
4075 
4076 	if (bp->b_kvasize != 0 && bp->b_kvasize < size)
4077 		panic("allocbuf: buffer too small");
4078 
4079 	newbsize = roundup2(size, DEV_BSIZE);
4080 	if ((bp->b_flags & B_VMIO) == 0) {
4081 		if ((bp->b_flags & B_MALLOC) == 0)
4082 			newbsize = round_page(newbsize);
4083 		/*
4084 		 * Just get anonymous memory from the kernel.  Don't
4085 		 * mess with B_CACHE.
4086 		 */
4087 		if (newbsize < bp->b_bufsize)
4088 			vfs_nonvmio_truncate(bp, newbsize);
4089 		else if (newbsize > bp->b_bufsize)
4090 			vfs_nonvmio_extend(bp, newbsize);
4091 	} else {
4092 		int desiredpages;
4093 
4094 		desiredpages = (size == 0) ? 0 :
4095 		    num_pages((bp->b_offset & PAGE_MASK) + newbsize);
4096 
4097 		if (bp->b_flags & B_MALLOC)
4098 			panic("allocbuf: VMIO buffer can't be malloced");
4099 		/*
4100 		 * Set B_CACHE initially if buffer is 0 length or will become
4101 		 * 0-length.
4102 		 */
4103 		if (size == 0 || bp->b_bufsize == 0)
4104 			bp->b_flags |= B_CACHE;
4105 
4106 		if (newbsize < bp->b_bufsize)
4107 			vfs_vmio_truncate(bp, desiredpages);
4108 		/* XXX This looks as if it should be newbsize > b_bufsize */
4109 		else if (size > bp->b_bcount)
4110 			vfs_vmio_extend(bp, desiredpages, size);
4111 		bufspace_adjust(bp, newbsize);
4112 	}
4113 	bp->b_bcount = size;		/* requested buffer size. */
4114 	return (1);
4115 }
4116 
4117 extern int inflight_transient_maps;
4118 
4119 void
4120 biodone(struct bio *bp)
4121 {
4122 	struct mtx *mtxp;
4123 	void (*done)(struct bio *);
4124 	vm_offset_t start, end;
4125 
4126 	biotrack(bp, __func__);
4127 	if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) {
4128 		bp->bio_flags &= ~BIO_TRANSIENT_MAPPING;
4129 		bp->bio_flags |= BIO_UNMAPPED;
4130 		start = trunc_page((vm_offset_t)bp->bio_data);
4131 		end = round_page((vm_offset_t)bp->bio_data + bp->bio_length);
4132 		bp->bio_data = unmapped_buf;
4133 		pmap_qremove(start, atop(end - start));
4134 		vmem_free(transient_arena, start, end - start);
4135 		atomic_add_int(&inflight_transient_maps, -1);
4136 	}
4137 	done = bp->bio_done;
4138 	if (done == NULL) {
4139 		mtxp = mtx_pool_find(mtxpool_sleep, bp);
4140 		mtx_lock(mtxp);
4141 		bp->bio_flags |= BIO_DONE;
4142 		wakeup(bp);
4143 		mtx_unlock(mtxp);
4144 	} else
4145 		done(bp);
4146 }
4147 
4148 /*
4149  * Wait for a BIO to finish.
4150  */
4151 int
4152 biowait(struct bio *bp, const char *wchan)
4153 {
4154 	struct mtx *mtxp;
4155 
4156 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4157 	mtx_lock(mtxp);
4158 	while ((bp->bio_flags & BIO_DONE) == 0)
4159 		msleep(bp, mtxp, PRIBIO, wchan, 0);
4160 	mtx_unlock(mtxp);
4161 	if (bp->bio_error != 0)
4162 		return (bp->bio_error);
4163 	if (!(bp->bio_flags & BIO_ERROR))
4164 		return (0);
4165 	return (EIO);
4166 }
4167 
4168 void
4169 biofinish(struct bio *bp, struct devstat *stat, int error)
4170 {
4171 
4172 	if (error) {
4173 		bp->bio_error = error;
4174 		bp->bio_flags |= BIO_ERROR;
4175 	}
4176 	if (stat != NULL)
4177 		devstat_end_transaction_bio(stat, bp);
4178 	biodone(bp);
4179 }
4180 
4181 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4182 void
4183 biotrack_buf(struct bio *bp, const char *location)
4184 {
4185 
4186 	buf_track(bp->bio_track_bp, location);
4187 }
4188 #endif
4189 
4190 /*
4191  *	bufwait:
4192  *
4193  *	Wait for buffer I/O completion, returning error status.  The buffer
4194  *	is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
4195  *	error and cleared.
4196  */
4197 int
4198 bufwait(struct buf *bp)
4199 {
4200 	if (bp->b_iocmd == BIO_READ)
4201 		bwait(bp, PRIBIO, "biord");
4202 	else
4203 		bwait(bp, PRIBIO, "biowr");
4204 	if (bp->b_flags & B_EINTR) {
4205 		bp->b_flags &= ~B_EINTR;
4206 		return (EINTR);
4207 	}
4208 	if (bp->b_ioflags & BIO_ERROR) {
4209 		return (bp->b_error ? bp->b_error : EIO);
4210 	} else {
4211 		return (0);
4212 	}
4213 }
4214 
4215 /*
4216  *	bufdone:
4217  *
4218  *	Finish I/O on a buffer, optionally calling a completion function.
4219  *	This is usually called from an interrupt so process blocking is
4220  *	not allowed.
4221  *
4222  *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
4223  *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
4224  *	assuming B_INVAL is clear.
4225  *
4226  *	For the VMIO case, we set B_CACHE if the op was a read and no
4227  *	read error occurred, or if the op was a write.  B_CACHE is never
4228  *	set if the buffer is invalid or otherwise uncacheable.
4229  *
4230  *	biodone does not mess with B_INVAL, allowing the I/O routine or the
4231  *	initiator to leave B_INVAL set to brelse the buffer out of existence
4232  *	in the biodone routine.
4233  */
4234 void
4235 bufdone(struct buf *bp)
4236 {
4237 	struct bufobj *dropobj;
4238 	void    (*biodone)(struct buf *);
4239 
4240 	buf_track(bp, __func__);
4241 	CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
4242 	dropobj = NULL;
4243 
4244 	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
4245 	BUF_ASSERT_HELD(bp);
4246 
4247 	runningbufwakeup(bp);
4248 	if (bp->b_iocmd == BIO_WRITE)
4249 		dropobj = bp->b_bufobj;
4250 	/* call optional completion function if requested */
4251 	if (bp->b_iodone != NULL) {
4252 		biodone = bp->b_iodone;
4253 		bp->b_iodone = NULL;
4254 		(*biodone) (bp);
4255 		if (dropobj)
4256 			bufobj_wdrop(dropobj);
4257 		return;
4258 	}
4259 	if (bp->b_flags & B_VMIO) {
4260 		/*
4261 		 * Set B_CACHE if the op was a normal read and no error
4262 		 * occurred.  B_CACHE is set for writes in the b*write()
4263 		 * routines.
4264 		 */
4265 		if (bp->b_iocmd == BIO_READ &&
4266 		    !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
4267 		    !(bp->b_ioflags & BIO_ERROR))
4268 			bp->b_flags |= B_CACHE;
4269 		vfs_vmio_iodone(bp);
4270 	}
4271 	if (!LIST_EMPTY(&bp->b_dep))
4272 		buf_complete(bp);
4273 	if ((bp->b_flags & B_CKHASH) != 0) {
4274 		KASSERT(bp->b_iocmd == BIO_READ,
4275 		    ("bufdone: b_iocmd %d not BIO_READ", bp->b_iocmd));
4276 		KASSERT(buf_mapped(bp), ("bufdone: bp %p not mapped", bp));
4277 		(*bp->b_ckhashcalc)(bp);
4278 	}
4279 	/*
4280 	 * For asynchronous completions, release the buffer now. The brelse
4281 	 * will do a wakeup there if necessary - so no need to do a wakeup
4282 	 * here in the async case. The sync case always needs to do a wakeup.
4283 	 */
4284 	if (bp->b_flags & B_ASYNC) {
4285 		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) ||
4286 		    (bp->b_ioflags & BIO_ERROR))
4287 			brelse(bp);
4288 		else
4289 			bqrelse(bp);
4290 	} else
4291 		bdone(bp);
4292 	if (dropobj)
4293 		bufobj_wdrop(dropobj);
4294 }
4295 
4296 /*
4297  * This routine is called in lieu of iodone in the case of
4298  * incomplete I/O.  This keeps the busy status for pages
4299  * consistent.
4300  */
4301 void
4302 vfs_unbusy_pages(struct buf *bp)
4303 {
4304 	int i;
4305 	vm_object_t obj;
4306 	vm_page_t m;
4307 
4308 	runningbufwakeup(bp);
4309 	if (!(bp->b_flags & B_VMIO))
4310 		return;
4311 
4312 	obj = bp->b_bufobj->bo_object;
4313 	VM_OBJECT_WLOCK(obj);
4314 	for (i = 0; i < bp->b_npages; i++) {
4315 		m = bp->b_pages[i];
4316 		if (m == bogus_page) {
4317 			m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
4318 			if (!m)
4319 				panic("vfs_unbusy_pages: page missing\n");
4320 			bp->b_pages[i] = m;
4321 			if (buf_mapped(bp)) {
4322 				BUF_CHECK_MAPPED(bp);
4323 				pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4324 				    bp->b_pages, bp->b_npages);
4325 			} else
4326 				BUF_CHECK_UNMAPPED(bp);
4327 		}
4328 		vm_page_sunbusy(m);
4329 	}
4330 	vm_object_pip_wakeupn(obj, bp->b_npages);
4331 	VM_OBJECT_WUNLOCK(obj);
4332 }
4333 
4334 /*
4335  * vfs_page_set_valid:
4336  *
4337  *	Set the valid bits in a page based on the supplied offset.   The
4338  *	range is restricted to the buffer's size.
4339  *
4340  *	This routine is typically called after a read completes.
4341  */
4342 static void
4343 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4344 {
4345 	vm_ooffset_t eoff;
4346 
4347 	/*
4348 	 * Compute the end offset, eoff, such that [off, eoff) does not span a
4349 	 * page boundary and eoff is not greater than the end of the buffer.
4350 	 * The end of the buffer, in this case, is our file EOF, not the
4351 	 * allocation size of the buffer.
4352 	 */
4353 	eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
4354 	if (eoff > bp->b_offset + bp->b_bcount)
4355 		eoff = bp->b_offset + bp->b_bcount;
4356 
4357 	/*
4358 	 * Set valid range.  This is typically the entire buffer and thus the
4359 	 * entire page.
4360 	 */
4361 	if (eoff > off)
4362 		vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off);
4363 }
4364 
4365 /*
4366  * vfs_page_set_validclean:
4367  *
4368  *	Set the valid bits and clear the dirty bits in a page based on the
4369  *	supplied offset.   The range is restricted to the buffer's size.
4370  */
4371 static void
4372 vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4373 {
4374 	vm_ooffset_t soff, eoff;
4375 
4376 	/*
4377 	 * Start and end offsets in buffer.  eoff - soff may not cross a
4378 	 * page boundary or cross the end of the buffer.  The end of the
4379 	 * buffer, in this case, is our file EOF, not the allocation size
4380 	 * of the buffer.
4381 	 */
4382 	soff = off;
4383 	eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4384 	if (eoff > bp->b_offset + bp->b_bcount)
4385 		eoff = bp->b_offset + bp->b_bcount;
4386 
4387 	/*
4388 	 * Set valid range.  This is typically the entire buffer and thus the
4389 	 * entire page.
4390 	 */
4391 	if (eoff > soff) {
4392 		vm_page_set_validclean(
4393 		    m,
4394 		   (vm_offset_t) (soff & PAGE_MASK),
4395 		   (vm_offset_t) (eoff - soff)
4396 		);
4397 	}
4398 }
4399 
4400 /*
4401  * Ensure that all buffer pages are not exclusive busied.  If any page is
4402  * exclusive busy, drain it.
4403  */
4404 void
4405 vfs_drain_busy_pages(struct buf *bp)
4406 {
4407 	vm_page_t m;
4408 	int i, last_busied;
4409 
4410 	VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object);
4411 	last_busied = 0;
4412 	for (i = 0; i < bp->b_npages; i++) {
4413 		m = bp->b_pages[i];
4414 		if (vm_page_xbusied(m)) {
4415 			for (; last_busied < i; last_busied++)
4416 				vm_page_sbusy(bp->b_pages[last_busied]);
4417 			while (vm_page_xbusied(m)) {
4418 				vm_page_lock(m);
4419 				VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4420 				vm_page_busy_sleep(m, "vbpage", true);
4421 				VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4422 			}
4423 		}
4424 	}
4425 	for (i = 0; i < last_busied; i++)
4426 		vm_page_sunbusy(bp->b_pages[i]);
4427 }
4428 
4429 /*
4430  * This routine is called before a device strategy routine.
4431  * It is used to tell the VM system that paging I/O is in
4432  * progress, and treat the pages associated with the buffer
4433  * almost as being exclusive busy.  Also the object paging_in_progress
4434  * flag is handled to make sure that the object doesn't become
4435  * inconsistent.
4436  *
4437  * Since I/O has not been initiated yet, certain buffer flags
4438  * such as BIO_ERROR or B_INVAL may be in an inconsistent state
4439  * and should be ignored.
4440  */
4441 void
4442 vfs_busy_pages(struct buf *bp, int clear_modify)
4443 {
4444 	vm_object_t obj;
4445 	vm_ooffset_t foff;
4446 	vm_page_t m;
4447 	int i;
4448 	bool bogus;
4449 
4450 	if (!(bp->b_flags & B_VMIO))
4451 		return;
4452 
4453 	obj = bp->b_bufobj->bo_object;
4454 	foff = bp->b_offset;
4455 	KASSERT(bp->b_offset != NOOFFSET,
4456 	    ("vfs_busy_pages: no buffer offset"));
4457 	VM_OBJECT_WLOCK(obj);
4458 	vfs_drain_busy_pages(bp);
4459 	if (bp->b_bufsize != 0)
4460 		vfs_setdirty_locked_object(bp);
4461 	bogus = false;
4462 	for (i = 0; i < bp->b_npages; i++) {
4463 		m = bp->b_pages[i];
4464 
4465 		if ((bp->b_flags & B_CLUSTER) == 0) {
4466 			vm_object_pip_add(obj, 1);
4467 			vm_page_sbusy(m);
4468 		}
4469 		/*
4470 		 * When readying a buffer for a read ( i.e
4471 		 * clear_modify == 0 ), it is important to do
4472 		 * bogus_page replacement for valid pages in
4473 		 * partially instantiated buffers.  Partially
4474 		 * instantiated buffers can, in turn, occur when
4475 		 * reconstituting a buffer from its VM backing store
4476 		 * base.  We only have to do this if B_CACHE is
4477 		 * clear ( which causes the I/O to occur in the
4478 		 * first place ).  The replacement prevents the read
4479 		 * I/O from overwriting potentially dirty VM-backed
4480 		 * pages.  XXX bogus page replacement is, uh, bogus.
4481 		 * It may not work properly with small-block devices.
4482 		 * We need to find a better way.
4483 		 */
4484 		if (clear_modify) {
4485 			pmap_remove_write(m);
4486 			vfs_page_set_validclean(bp, foff, m);
4487 		} else if (m->valid == VM_PAGE_BITS_ALL &&
4488 		    (bp->b_flags & B_CACHE) == 0) {
4489 			bp->b_pages[i] = bogus_page;
4490 			bogus = true;
4491 		}
4492 		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4493 	}
4494 	VM_OBJECT_WUNLOCK(obj);
4495 	if (bogus && buf_mapped(bp)) {
4496 		BUF_CHECK_MAPPED(bp);
4497 		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4498 		    bp->b_pages, bp->b_npages);
4499 	}
4500 }
4501 
4502 /*
4503  *	vfs_bio_set_valid:
4504  *
4505  *	Set the range within the buffer to valid.  The range is
4506  *	relative to the beginning of the buffer, b_offset.  Note that
4507  *	b_offset itself may be offset from the beginning of the first
4508  *	page.
4509  */
4510 void
4511 vfs_bio_set_valid(struct buf *bp, int base, int size)
4512 {
4513 	int i, n;
4514 	vm_page_t m;
4515 
4516 	if (!(bp->b_flags & B_VMIO))
4517 		return;
4518 
4519 	/*
4520 	 * Fixup base to be relative to beginning of first page.
4521 	 * Set initial n to be the maximum number of bytes in the
4522 	 * first page that can be validated.
4523 	 */
4524 	base += (bp->b_offset & PAGE_MASK);
4525 	n = PAGE_SIZE - (base & PAGE_MASK);
4526 
4527 	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4528 	for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4529 		m = bp->b_pages[i];
4530 		if (n > size)
4531 			n = size;
4532 		vm_page_set_valid_range(m, base & PAGE_MASK, n);
4533 		base += n;
4534 		size -= n;
4535 		n = PAGE_SIZE;
4536 	}
4537 	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4538 }
4539 
4540 /*
4541  *	vfs_bio_clrbuf:
4542  *
4543  *	If the specified buffer is a non-VMIO buffer, clear the entire
4544  *	buffer.  If the specified buffer is a VMIO buffer, clear and
4545  *	validate only the previously invalid portions of the buffer.
4546  *	This routine essentially fakes an I/O, so we need to clear
4547  *	BIO_ERROR and B_INVAL.
4548  *
4549  *	Note that while we only theoretically need to clear through b_bcount,
4550  *	we go ahead and clear through b_bufsize.
4551  */
4552 void
4553 vfs_bio_clrbuf(struct buf *bp)
4554 {
4555 	int i, j, mask, sa, ea, slide;
4556 
4557 	if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
4558 		clrbuf(bp);
4559 		return;
4560 	}
4561 	bp->b_flags &= ~B_INVAL;
4562 	bp->b_ioflags &= ~BIO_ERROR;
4563 	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4564 	if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
4565 	    (bp->b_offset & PAGE_MASK) == 0) {
4566 		if (bp->b_pages[0] == bogus_page)
4567 			goto unlock;
4568 		mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
4569 		VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[0]->object);
4570 		if ((bp->b_pages[0]->valid & mask) == mask)
4571 			goto unlock;
4572 		if ((bp->b_pages[0]->valid & mask) == 0) {
4573 			pmap_zero_page_area(bp->b_pages[0], 0, bp->b_bufsize);
4574 			bp->b_pages[0]->valid |= mask;
4575 			goto unlock;
4576 		}
4577 	}
4578 	sa = bp->b_offset & PAGE_MASK;
4579 	slide = 0;
4580 	for (i = 0; i < bp->b_npages; i++, sa = 0) {
4581 		slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize);
4582 		ea = slide & PAGE_MASK;
4583 		if (ea == 0)
4584 			ea = PAGE_SIZE;
4585 		if (bp->b_pages[i] == bogus_page)
4586 			continue;
4587 		j = sa / DEV_BSIZE;
4588 		mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
4589 		VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[i]->object);
4590 		if ((bp->b_pages[i]->valid & mask) == mask)
4591 			continue;
4592 		if ((bp->b_pages[i]->valid & mask) == 0)
4593 			pmap_zero_page_area(bp->b_pages[i], sa, ea - sa);
4594 		else {
4595 			for (; sa < ea; sa += DEV_BSIZE, j++) {
4596 				if ((bp->b_pages[i]->valid & (1 << j)) == 0) {
4597 					pmap_zero_page_area(bp->b_pages[i],
4598 					    sa, DEV_BSIZE);
4599 				}
4600 			}
4601 		}
4602 		bp->b_pages[i]->valid |= mask;
4603 	}
4604 unlock:
4605 	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4606 	bp->b_resid = 0;
4607 }
4608 
4609 void
4610 vfs_bio_bzero_buf(struct buf *bp, int base, int size)
4611 {
4612 	vm_page_t m;
4613 	int i, n;
4614 
4615 	if (buf_mapped(bp)) {
4616 		BUF_CHECK_MAPPED(bp);
4617 		bzero(bp->b_data + base, size);
4618 	} else {
4619 		BUF_CHECK_UNMAPPED(bp);
4620 		n = PAGE_SIZE - (base & PAGE_MASK);
4621 		for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4622 			m = bp->b_pages[i];
4623 			if (n > size)
4624 				n = size;
4625 			pmap_zero_page_area(m, base & PAGE_MASK, n);
4626 			base += n;
4627 			size -= n;
4628 			n = PAGE_SIZE;
4629 		}
4630 	}
4631 }
4632 
4633 /*
4634  * Update buffer flags based on I/O request parameters, optionally releasing the
4635  * buffer.  If it's VMIO or direct I/O, the buffer pages are released to the VM,
4636  * where they may be placed on a page queue (VMIO) or freed immediately (direct
4637  * I/O).  Otherwise the buffer is released to the cache.
4638  */
4639 static void
4640 b_io_dismiss(struct buf *bp, int ioflag, bool release)
4641 {
4642 
4643 	KASSERT((ioflag & IO_NOREUSE) == 0 || (ioflag & IO_VMIO) != 0,
4644 	    ("buf %p non-VMIO noreuse", bp));
4645 
4646 	if ((ioflag & IO_DIRECT) != 0)
4647 		bp->b_flags |= B_DIRECT;
4648 	if ((ioflag & (IO_VMIO | IO_DIRECT)) != 0 && LIST_EMPTY(&bp->b_dep)) {
4649 		bp->b_flags |= B_RELBUF;
4650 		if ((ioflag & IO_NOREUSE) != 0)
4651 			bp->b_flags |= B_NOREUSE;
4652 		if (release)
4653 			brelse(bp);
4654 	} else if (release)
4655 		bqrelse(bp);
4656 }
4657 
4658 void
4659 vfs_bio_brelse(struct buf *bp, int ioflag)
4660 {
4661 
4662 	b_io_dismiss(bp, ioflag, true);
4663 }
4664 
4665 void
4666 vfs_bio_set_flags(struct buf *bp, int ioflag)
4667 {
4668 
4669 	b_io_dismiss(bp, ioflag, false);
4670 }
4671 
4672 /*
4673  * vm_hold_load_pages and vm_hold_free_pages get pages into
4674  * a buffers address space.  The pages are anonymous and are
4675  * not associated with a file object.
4676  */
4677 static void
4678 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
4679 {
4680 	vm_offset_t pg;
4681 	vm_page_t p;
4682 	int index;
4683 
4684 	BUF_CHECK_MAPPED(bp);
4685 
4686 	to = round_page(to);
4687 	from = round_page(from);
4688 	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4689 
4690 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
4691 		/*
4692 		 * note: must allocate system pages since blocking here
4693 		 * could interfere with paging I/O, no matter which
4694 		 * process we are.
4695 		 */
4696 		p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
4697 		    VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT) |
4698 		    VM_ALLOC_WAITOK);
4699 		pmap_qenter(pg, &p, 1);
4700 		bp->b_pages[index] = p;
4701 	}
4702 	bp->b_npages = index;
4703 }
4704 
4705 /* Return pages associated with this buf to the vm system */
4706 static void
4707 vm_hold_free_pages(struct buf *bp, int newbsize)
4708 {
4709 	vm_offset_t from;
4710 	vm_page_t p;
4711 	int index, newnpages;
4712 
4713 	BUF_CHECK_MAPPED(bp);
4714 
4715 	from = round_page((vm_offset_t)bp->b_data + newbsize);
4716 	newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4717 	if (bp->b_npages > newnpages)
4718 		pmap_qremove(from, bp->b_npages - newnpages);
4719 	for (index = newnpages; index < bp->b_npages; index++) {
4720 		p = bp->b_pages[index];
4721 		bp->b_pages[index] = NULL;
4722 		p->wire_count--;
4723 		vm_page_free(p);
4724 	}
4725 	vm_wire_sub(bp->b_npages - newnpages);
4726 	bp->b_npages = newnpages;
4727 }
4728 
4729 /*
4730  * Map an IO request into kernel virtual address space.
4731  *
4732  * All requests are (re)mapped into kernel VA space.
4733  * Notice that we use b_bufsize for the size of the buffer
4734  * to be mapped.  b_bcount might be modified by the driver.
4735  *
4736  * Note that even if the caller determines that the address space should
4737  * be valid, a race or a smaller-file mapped into a larger space may
4738  * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
4739  * check the return value.
4740  *
4741  * This function only works with pager buffers.
4742  */
4743 int
4744 vmapbuf(struct buf *bp, int mapbuf)
4745 {
4746 	vm_prot_t prot;
4747 	int pidx;
4748 
4749 	if (bp->b_bufsize < 0)
4750 		return (-1);
4751 	prot = VM_PROT_READ;
4752 	if (bp->b_iocmd == BIO_READ)
4753 		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
4754 	if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
4755 	    (vm_offset_t)bp->b_data, bp->b_bufsize, prot, bp->b_pages,
4756 	    btoc(MAXPHYS))) < 0)
4757 		return (-1);
4758 	bp->b_npages = pidx;
4759 	bp->b_offset = ((vm_offset_t)bp->b_data) & PAGE_MASK;
4760 	if (mapbuf || !unmapped_buf_allowed) {
4761 		pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_pages, pidx);
4762 		bp->b_data = bp->b_kvabase + bp->b_offset;
4763 	} else
4764 		bp->b_data = unmapped_buf;
4765 	return(0);
4766 }
4767 
4768 /*
4769  * Free the io map PTEs associated with this IO operation.
4770  * We also invalidate the TLB entries and restore the original b_addr.
4771  *
4772  * This function only works with pager buffers.
4773  */
4774 void
4775 vunmapbuf(struct buf *bp)
4776 {
4777 	int npages;
4778 
4779 	npages = bp->b_npages;
4780 	if (buf_mapped(bp))
4781 		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
4782 	vm_page_unhold_pages(bp->b_pages, npages);
4783 
4784 	bp->b_data = unmapped_buf;
4785 }
4786 
4787 void
4788 bdone(struct buf *bp)
4789 {
4790 	struct mtx *mtxp;
4791 
4792 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4793 	mtx_lock(mtxp);
4794 	bp->b_flags |= B_DONE;
4795 	wakeup(bp);
4796 	mtx_unlock(mtxp);
4797 }
4798 
4799 void
4800 bwait(struct buf *bp, u_char pri, const char *wchan)
4801 {
4802 	struct mtx *mtxp;
4803 
4804 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4805 	mtx_lock(mtxp);
4806 	while ((bp->b_flags & B_DONE) == 0)
4807 		msleep(bp, mtxp, pri, wchan, 0);
4808 	mtx_unlock(mtxp);
4809 }
4810 
4811 int
4812 bufsync(struct bufobj *bo, int waitfor)
4813 {
4814 
4815 	return (VOP_FSYNC(bo2vnode(bo), waitfor, curthread));
4816 }
4817 
4818 void
4819 bufstrategy(struct bufobj *bo, struct buf *bp)
4820 {
4821 	int i = 0;
4822 	struct vnode *vp;
4823 
4824 	vp = bp->b_vp;
4825 	KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
4826 	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
4827 	    ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
4828 	i = VOP_STRATEGY(vp, bp);
4829 	KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
4830 }
4831 
4832 /*
4833  * Initialize a struct bufobj before use.  Memory is assumed zero filled.
4834  */
4835 void
4836 bufobj_init(struct bufobj *bo, void *private)
4837 {
4838 	static volatile int bufobj_cleanq;
4839 
4840         bo->bo_domain =
4841             atomic_fetchadd_int(&bufobj_cleanq, 1) % clean_domains;
4842         rw_init(BO_LOCKPTR(bo), "bufobj interlock");
4843         bo->bo_private = private;
4844         TAILQ_INIT(&bo->bo_clean.bv_hd);
4845         TAILQ_INIT(&bo->bo_dirty.bv_hd);
4846 }
4847 
4848 void
4849 bufobj_wrefl(struct bufobj *bo)
4850 {
4851 
4852 	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
4853 	ASSERT_BO_WLOCKED(bo);
4854 	bo->bo_numoutput++;
4855 }
4856 
4857 void
4858 bufobj_wref(struct bufobj *bo)
4859 {
4860 
4861 	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
4862 	BO_LOCK(bo);
4863 	bo->bo_numoutput++;
4864 	BO_UNLOCK(bo);
4865 }
4866 
4867 void
4868 bufobj_wdrop(struct bufobj *bo)
4869 {
4870 
4871 	KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
4872 	BO_LOCK(bo);
4873 	KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
4874 	if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
4875 		bo->bo_flag &= ~BO_WWAIT;
4876 		wakeup(&bo->bo_numoutput);
4877 	}
4878 	BO_UNLOCK(bo);
4879 }
4880 
4881 int
4882 bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
4883 {
4884 	int error;
4885 
4886 	KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
4887 	ASSERT_BO_WLOCKED(bo);
4888 	error = 0;
4889 	while (bo->bo_numoutput) {
4890 		bo->bo_flag |= BO_WWAIT;
4891 		error = msleep(&bo->bo_numoutput, BO_LOCKPTR(bo),
4892 		    slpflag | (PRIBIO + 1), "bo_wwait", timeo);
4893 		if (error)
4894 			break;
4895 	}
4896 	return (error);
4897 }
4898 
4899 /*
4900  * Set bio_data or bio_ma for struct bio from the struct buf.
4901  */
4902 void
4903 bdata2bio(struct buf *bp, struct bio *bip)
4904 {
4905 
4906 	if (!buf_mapped(bp)) {
4907 		KASSERT(unmapped_buf_allowed, ("unmapped"));
4908 		bip->bio_ma = bp->b_pages;
4909 		bip->bio_ma_n = bp->b_npages;
4910 		bip->bio_data = unmapped_buf;
4911 		bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
4912 		bip->bio_flags |= BIO_UNMAPPED;
4913 		KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) /
4914 		    PAGE_SIZE == bp->b_npages,
4915 		    ("Buffer %p too short: %d %lld %d", bp, bip->bio_ma_offset,
4916 		    (long long)bip->bio_length, bip->bio_ma_n));
4917 	} else {
4918 		bip->bio_data = bp->b_data;
4919 		bip->bio_ma = NULL;
4920 	}
4921 }
4922 
4923 /*
4924  * The MIPS pmap code currently doesn't handle aliased pages.
4925  * The VIPT caches may not handle page aliasing themselves, leading
4926  * to data corruption.
4927  *
4928  * As such, this code makes a system extremely unhappy if said
4929  * system doesn't support unaliasing the above situation in hardware.
4930  * Some "recent" systems (eg some mips24k/mips74k cores) don't enable
4931  * this feature at build time, so it has to be handled in software.
4932  *
4933  * Once the MIPS pmap/cache code grows to support this function on
4934  * earlier chips, it should be flipped back off.
4935  */
4936 #ifdef	__mips__
4937 static int buf_pager_relbuf = 1;
4938 #else
4939 static int buf_pager_relbuf = 0;
4940 #endif
4941 SYSCTL_INT(_vfs, OID_AUTO, buf_pager_relbuf, CTLFLAG_RWTUN,
4942     &buf_pager_relbuf, 0,
4943     "Make buffer pager release buffers after reading");
4944 
4945 /*
4946  * The buffer pager.  It uses buffer reads to validate pages.
4947  *
4948  * In contrast to the generic local pager from vm/vnode_pager.c, this
4949  * pager correctly and easily handles volumes where the underlying
4950  * device block size is greater than the machine page size.  The
4951  * buffer cache transparently extends the requested page run to be
4952  * aligned at the block boundary, and does the necessary bogus page
4953  * replacements in the addends to avoid obliterating already valid
4954  * pages.
4955  *
4956  * The only non-trivial issue is that the exclusive busy state for
4957  * pages, which is assumed by the vm_pager_getpages() interface, is
4958  * incompatible with the VMIO buffer cache's desire to share-busy the
4959  * pages.  This function performs a trivial downgrade of the pages'
4960  * state before reading buffers, and a less trivial upgrade from the
4961  * shared-busy to excl-busy state after the read.
4962  */
4963 int
4964 vfs_bio_getpages(struct vnode *vp, vm_page_t *ma, int count,
4965     int *rbehind, int *rahead, vbg_get_lblkno_t get_lblkno,
4966     vbg_get_blksize_t get_blksize)
4967 {
4968 	vm_page_t m;
4969 	vm_object_t object;
4970 	struct buf *bp;
4971 	struct mount *mp;
4972 	daddr_t lbn, lbnp;
4973 	vm_ooffset_t la, lb, poff, poffe;
4974 	long bsize;
4975 	int bo_bs, br_flags, error, i, pgsin, pgsin_a, pgsin_b;
4976 	bool redo, lpart;
4977 
4978 	object = vp->v_object;
4979 	mp = vp->v_mount;
4980 	la = IDX_TO_OFF(ma[count - 1]->pindex);
4981 	if (la >= object->un_pager.vnp.vnp_size)
4982 		return (VM_PAGER_BAD);
4983 
4984 	/*
4985 	 * Change the meaning of la from where the last requested page starts
4986 	 * to where it ends, because that's the end of the requested region
4987 	 * and the start of the potential read-ahead region.
4988 	 */
4989 	la += PAGE_SIZE;
4990 	lpart = la > object->un_pager.vnp.vnp_size;
4991 	bo_bs = get_blksize(vp, get_lblkno(vp, IDX_TO_OFF(ma[0]->pindex)));
4992 
4993 	/*
4994 	 * Calculate read-ahead, behind and total pages.
4995 	 */
4996 	pgsin = count;
4997 	lb = IDX_TO_OFF(ma[0]->pindex);
4998 	pgsin_b = OFF_TO_IDX(lb - rounddown2(lb, bo_bs));
4999 	pgsin += pgsin_b;
5000 	if (rbehind != NULL)
5001 		*rbehind = pgsin_b;
5002 	pgsin_a = OFF_TO_IDX(roundup2(la, bo_bs) - la);
5003 	if (la + IDX_TO_OFF(pgsin_a) >= object->un_pager.vnp.vnp_size)
5004 		pgsin_a = OFF_TO_IDX(roundup2(object->un_pager.vnp.vnp_size,
5005 		    PAGE_SIZE) - la);
5006 	pgsin += pgsin_a;
5007 	if (rahead != NULL)
5008 		*rahead = pgsin_a;
5009 	VM_CNT_INC(v_vnodein);
5010 	VM_CNT_ADD(v_vnodepgsin, pgsin);
5011 
5012 	br_flags = (mp != NULL && (mp->mnt_kern_flag & MNTK_UNMAPPED_BUFS)
5013 	    != 0) ? GB_UNMAPPED : 0;
5014 	VM_OBJECT_WLOCK(object);
5015 again:
5016 	for (i = 0; i < count; i++)
5017 		vm_page_busy_downgrade(ma[i]);
5018 	VM_OBJECT_WUNLOCK(object);
5019 
5020 	lbnp = -1;
5021 	for (i = 0; i < count; i++) {
5022 		m = ma[i];
5023 
5024 		/*
5025 		 * Pages are shared busy and the object lock is not
5026 		 * owned, which together allow for the pages'
5027 		 * invalidation.  The racy test for validity avoids
5028 		 * useless creation of the buffer for the most typical
5029 		 * case when invalidation is not used in redo or for
5030 		 * parallel read.  The shared->excl upgrade loop at
5031 		 * the end of the function catches the race in a
5032 		 * reliable way (protected by the object lock).
5033 		 */
5034 		if (m->valid == VM_PAGE_BITS_ALL)
5035 			continue;
5036 
5037 		poff = IDX_TO_OFF(m->pindex);
5038 		poffe = MIN(poff + PAGE_SIZE, object->un_pager.vnp.vnp_size);
5039 		for (; poff < poffe; poff += bsize) {
5040 			lbn = get_lblkno(vp, poff);
5041 			if (lbn == lbnp)
5042 				goto next_page;
5043 			lbnp = lbn;
5044 
5045 			bsize = get_blksize(vp, lbn);
5046 			error = bread_gb(vp, lbn, bsize, curthread->td_ucred,
5047 			    br_flags, &bp);
5048 			if (error != 0)
5049 				goto end_pages;
5050 			if (LIST_EMPTY(&bp->b_dep)) {
5051 				/*
5052 				 * Invalidation clears m->valid, but
5053 				 * may leave B_CACHE flag if the
5054 				 * buffer existed at the invalidation
5055 				 * time.  In this case, recycle the
5056 				 * buffer to do real read on next
5057 				 * bread() after redo.
5058 				 *
5059 				 * Otherwise B_RELBUF is not strictly
5060 				 * necessary, enable to reduce buf
5061 				 * cache pressure.
5062 				 */
5063 				if (buf_pager_relbuf ||
5064 				    m->valid != VM_PAGE_BITS_ALL)
5065 					bp->b_flags |= B_RELBUF;
5066 
5067 				bp->b_flags &= ~B_NOCACHE;
5068 				brelse(bp);
5069 			} else {
5070 				bqrelse(bp);
5071 			}
5072 		}
5073 		KASSERT(1 /* racy, enable for debugging */ ||
5074 		    m->valid == VM_PAGE_BITS_ALL || i == count - 1,
5075 		    ("buf %d %p invalid", i, m));
5076 		if (i == count - 1 && lpart) {
5077 			VM_OBJECT_WLOCK(object);
5078 			if (m->valid != 0 &&
5079 			    m->valid != VM_PAGE_BITS_ALL)
5080 				vm_page_zero_invalid(m, TRUE);
5081 			VM_OBJECT_WUNLOCK(object);
5082 		}
5083 next_page:;
5084 	}
5085 end_pages:
5086 
5087 	VM_OBJECT_WLOCK(object);
5088 	redo = false;
5089 	for (i = 0; i < count; i++) {
5090 		vm_page_sunbusy(ma[i]);
5091 		ma[i] = vm_page_grab(object, ma[i]->pindex, VM_ALLOC_NORMAL);
5092 
5093 		/*
5094 		 * Since the pages were only sbusy while neither the
5095 		 * buffer nor the object lock was held by us, or
5096 		 * reallocated while vm_page_grab() slept for busy
5097 		 * relinguish, they could have been invalidated.
5098 		 * Recheck the valid bits and re-read as needed.
5099 		 *
5100 		 * Note that the last page is made fully valid in the
5101 		 * read loop, and partial validity for the page at
5102 		 * index count - 1 could mean that the page was
5103 		 * invalidated or removed, so we must restart for
5104 		 * safety as well.
5105 		 */
5106 		if (ma[i]->valid != VM_PAGE_BITS_ALL)
5107 			redo = true;
5108 	}
5109 	if (redo && error == 0)
5110 		goto again;
5111 	VM_OBJECT_WUNLOCK(object);
5112 	return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK);
5113 }
5114 
5115 #include "opt_ddb.h"
5116 #ifdef DDB
5117 #include <ddb/ddb.h>
5118 
5119 /* DDB command to show buffer data */
5120 DB_SHOW_COMMAND(buffer, db_show_buffer)
5121 {
5122 	/* get args */
5123 	struct buf *bp = (struct buf *)addr;
5124 #ifdef FULL_BUF_TRACKING
5125 	uint32_t i, j;
5126 #endif
5127 
5128 	if (!have_addr) {
5129 		db_printf("usage: show buffer <addr>\n");
5130 		return;
5131 	}
5132 
5133 	db_printf("buf at %p\n", bp);
5134 	db_printf("b_flags = 0x%b, b_xflags=0x%b, b_vflags=0x%b\n",
5135 	    (u_int)bp->b_flags, PRINT_BUF_FLAGS, (u_int)bp->b_xflags,
5136 	    PRINT_BUF_XFLAGS, (u_int)bp->b_vflags, PRINT_BUF_VFLAGS);
5137 	db_printf(
5138 	    "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
5139 	    "b_bufobj = (%p), b_data = %p, b_blkno = %jd, b_lblkno = %jd, "
5140 	    "b_dep = %p\n",
5141 	    bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
5142 	    bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
5143 	    (intmax_t)bp->b_lblkno, bp->b_dep.lh_first);
5144 	db_printf("b_kvabase = %p, b_kvasize = %d\n",
5145 	    bp->b_kvabase, bp->b_kvasize);
5146 	if (bp->b_npages) {
5147 		int i;
5148 		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
5149 		for (i = 0; i < bp->b_npages; i++) {
5150 			vm_page_t m;
5151 			m = bp->b_pages[i];
5152 			if (m != NULL)
5153 				db_printf("(%p, 0x%lx, 0x%lx)", m->object,
5154 				    (u_long)m->pindex,
5155 				    (u_long)VM_PAGE_TO_PHYS(m));
5156 			else
5157 				db_printf("( ??? )");
5158 			if ((i + 1) < bp->b_npages)
5159 				db_printf(",");
5160 		}
5161 		db_printf("\n");
5162 	}
5163 #if defined(FULL_BUF_TRACKING)
5164 	db_printf("b_io_tracking: b_io_tcnt = %u\n", bp->b_io_tcnt);
5165 
5166 	i = bp->b_io_tcnt % BUF_TRACKING_SIZE;
5167 	for (j = 1; j <= BUF_TRACKING_SIZE; j++) {
5168 		if (bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)] == NULL)
5169 			continue;
5170 		db_printf(" %2u: %s\n", j,
5171 		    bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)]);
5172 	}
5173 #elif defined(BUF_TRACKING)
5174 	db_printf("b_io_tracking: %s\n", bp->b_io_tracking);
5175 #endif
5176 	db_printf(" ");
5177 	BUF_LOCKPRINTINFO(bp);
5178 }
5179 
5180 DB_SHOW_COMMAND(bufqueues, bufqueues)
5181 {
5182 	struct bufdomain *bd;
5183 	int i, j;
5184 
5185 	db_printf("bqempty: %d\n", bqempty.bq_len);
5186 	db_printf("bqdirty: %d\n", bqdirty.bq_len);
5187 
5188 	for (i = 0; i < clean_domains; i++) {
5189 		bd = &bdclean[i];
5190 		db_printf("Buf domain %d\n", i);
5191 		db_printf("\tfreebufs\t%d\n", bd->bd_freebuffers);
5192 		db_printf("\tlofreebufs\t%d\n", bd->bd_lofreebuffers);
5193 		db_printf("\thifreebufs\t%d\n", bd->bd_hifreebuffers);
5194 		db_printf("\n");
5195 		db_printf("\tbufspace\t%ld\n", bd->bd_bufspace);
5196 		db_printf("\tmaxbufspace\t%ld\n", bd->bd_maxbufspace);
5197 		db_printf("\thibufspace\t%ld\n", bd->bd_hibufspace);
5198 		db_printf("\tlobufspace\t%ld\n", bd->bd_lobufspace);
5199 		db_printf("\tbufspacethresh\t%ld\n", bd->bd_bufspacethresh);
5200 		db_printf("\n");
5201 		db_printf("\tcleanq count\t%d\n", bd->bd_cleanq->bq_len);
5202 		db_printf("\twakeup\t\t%d\n", bd->bd_wanted);
5203 		db_printf("\tlim\t\t%d\n", bd->bd_lim);
5204 		db_printf("\tCPU ");
5205 		for (j = 0; j < mp_maxid + 1; j++)
5206 			db_printf("%d, ", bd->bd_subq[j].bq_len);
5207 		db_printf("\n");
5208 	}
5209 }
5210 
5211 DB_SHOW_COMMAND(lockedbufs, lockedbufs)
5212 {
5213 	struct buf *bp;
5214 	int i;
5215 
5216 	for (i = 0; i < nbuf; i++) {
5217 		bp = &buf[i];
5218 		if (BUF_ISLOCKED(bp)) {
5219 			db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5220 			db_printf("\n");
5221 			if (db_pager_quit)
5222 				break;
5223 		}
5224 	}
5225 }
5226 
5227 DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
5228 {
5229 	struct vnode *vp;
5230 	struct buf *bp;
5231 
5232 	if (!have_addr) {
5233 		db_printf("usage: show vnodebufs <addr>\n");
5234 		return;
5235 	}
5236 	vp = (struct vnode *)addr;
5237 	db_printf("Clean buffers:\n");
5238 	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
5239 		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5240 		db_printf("\n");
5241 	}
5242 	db_printf("Dirty buffers:\n");
5243 	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
5244 		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5245 		db_printf("\n");
5246 	}
5247 }
5248 
5249 DB_COMMAND(countfreebufs, db_coundfreebufs)
5250 {
5251 	struct buf *bp;
5252 	int i, used = 0, nfree = 0;
5253 
5254 	if (have_addr) {
5255 		db_printf("usage: countfreebufs\n");
5256 		return;
5257 	}
5258 
5259 	for (i = 0; i < nbuf; i++) {
5260 		bp = &buf[i];
5261 		if (bp->b_qindex == QUEUE_EMPTY)
5262 			nfree++;
5263 		else
5264 			used++;
5265 	}
5266 
5267 	db_printf("Counted %d free, %d used (%d tot)\n", nfree, used,
5268 	    nfree + used);
5269 	db_printf("numfreebuffers is %d\n", numfreebuffers);
5270 }
5271 #endif /* DDB */
5272