xref: /freebsd/sys/kern/vfs_bio.c (revision 177034c44ed18d99e6cc85dfddd2bb04b41d38ac)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2004 Poul-Henning Kamp
5  * Copyright (c) 1994,1997 John S. Dyson
6  * Copyright (c) 2013 The FreeBSD Foundation
7  * All rights reserved.
8  *
9  * Portions of this software were developed by Konstantin Belousov
10  * under sponsorship from the FreeBSD Foundation.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 /*
35  * this file contains a new buffer I/O scheme implementing a coherent
36  * VM object and buffer cache scheme.  Pains have been taken to make
37  * sure that the performance degradation associated with schemes such
38  * as this is not realized.
39  *
40  * Author:  John S. Dyson
41  * Significant help during the development and debugging phases
42  * had been provided by David Greenman, also of the FreeBSD core team.
43  *
44  * see man buf(9) for more info.
45  */
46 
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/asan.h>
53 #include <sys/bio.h>
54 #include <sys/bitset.h>
55 #include <sys/boottrace.h>
56 #include <sys/buf.h>
57 #include <sys/conf.h>
58 #include <sys/counter.h>
59 #include <sys/devicestat.h>
60 #include <sys/eventhandler.h>
61 #include <sys/fail.h>
62 #include <sys/ktr.h>
63 #include <sys/limits.h>
64 #include <sys/lock.h>
65 #include <sys/malloc.h>
66 #include <sys/mount.h>
67 #include <sys/mutex.h>
68 #include <sys/kernel.h>
69 #include <sys/kthread.h>
70 #include <sys/proc.h>
71 #include <sys/racct.h>
72 #include <sys/refcount.h>
73 #include <sys/resourcevar.h>
74 #include <sys/rwlock.h>
75 #include <sys/smp.h>
76 #include <sys/sysctl.h>
77 #include <sys/syscallsubr.h>
78 #include <sys/vmem.h>
79 #include <sys/vmmeter.h>
80 #include <sys/vnode.h>
81 #include <sys/watchdog.h>
82 #include <geom/geom.h>
83 #include <vm/vm.h>
84 #include <vm/vm_param.h>
85 #include <vm/vm_kern.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_pageout.h>
89 #include <vm/vm_pager.h>
90 #include <vm/vm_extern.h>
91 #include <vm/vm_map.h>
92 #include <vm/swap_pager.h>
93 
94 static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
95 
96 struct	bio_ops bioops;		/* I/O operation notification */
97 
98 struct	buf_ops buf_ops_bio = {
99 	.bop_name	=	"buf_ops_bio",
100 	.bop_write	=	bufwrite,
101 	.bop_strategy	=	bufstrategy,
102 	.bop_sync	=	bufsync,
103 	.bop_bdflush	=	bufbdflush,
104 };
105 
106 struct bufqueue {
107 	struct mtx_padalign	bq_lock;
108 	TAILQ_HEAD(, buf)	bq_queue;
109 	uint8_t			bq_index;
110 	uint16_t		bq_subqueue;
111 	int			bq_len;
112 } __aligned(CACHE_LINE_SIZE);
113 
114 #define	BQ_LOCKPTR(bq)		(&(bq)->bq_lock)
115 #define	BQ_LOCK(bq)		mtx_lock(BQ_LOCKPTR((bq)))
116 #define	BQ_UNLOCK(bq)		mtx_unlock(BQ_LOCKPTR((bq)))
117 #define	BQ_ASSERT_LOCKED(bq)	mtx_assert(BQ_LOCKPTR((bq)), MA_OWNED)
118 
119 struct bufdomain {
120 	struct bufqueue	bd_subq[MAXCPU + 1]; /* Per-cpu sub queues + global */
121 	struct bufqueue bd_dirtyq;
122 	struct bufqueue	*bd_cleanq;
123 	struct mtx_padalign bd_run_lock;
124 	/* Constants */
125 	long		bd_maxbufspace;
126 	long		bd_hibufspace;
127 	long 		bd_lobufspace;
128 	long 		bd_bufspacethresh;
129 	int		bd_hifreebuffers;
130 	int		bd_lofreebuffers;
131 	int		bd_hidirtybuffers;
132 	int		bd_lodirtybuffers;
133 	int		bd_dirtybufthresh;
134 	int		bd_lim;
135 	/* atomics */
136 	int		bd_wanted;
137 	bool		bd_shutdown;
138 	int __aligned(CACHE_LINE_SIZE)	bd_numdirtybuffers;
139 	int __aligned(CACHE_LINE_SIZE)	bd_running;
140 	long __aligned(CACHE_LINE_SIZE) bd_bufspace;
141 	int __aligned(CACHE_LINE_SIZE)	bd_freebuffers;
142 } __aligned(CACHE_LINE_SIZE);
143 
144 #define	BD_LOCKPTR(bd)		(&(bd)->bd_cleanq->bq_lock)
145 #define	BD_LOCK(bd)		mtx_lock(BD_LOCKPTR((bd)))
146 #define	BD_UNLOCK(bd)		mtx_unlock(BD_LOCKPTR((bd)))
147 #define	BD_ASSERT_LOCKED(bd)	mtx_assert(BD_LOCKPTR((bd)), MA_OWNED)
148 #define	BD_RUN_LOCKPTR(bd)	(&(bd)->bd_run_lock)
149 #define	BD_RUN_LOCK(bd)		mtx_lock(BD_RUN_LOCKPTR((bd)))
150 #define	BD_RUN_UNLOCK(bd)	mtx_unlock(BD_RUN_LOCKPTR((bd)))
151 #define	BD_DOMAIN(bd)		(bd - bdomain)
152 
153 static char *buf;		/* buffer header pool */
154 static struct buf *
155 nbufp(unsigned i)
156 {
157 	return ((struct buf *)(buf + (sizeof(struct buf) +
158 	    sizeof(vm_page_t) * atop(maxbcachebuf)) * i));
159 }
160 
161 caddr_t __read_mostly unmapped_buf;
162 
163 /* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */
164 struct proc *bufdaemonproc;
165 
166 static void vm_hold_free_pages(struct buf *bp, int newbsize);
167 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
168 		vm_offset_t to);
169 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
170 static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
171 		vm_page_t m);
172 static void vfs_clean_pages_dirty_buf(struct buf *bp);
173 static void vfs_setdirty_range(struct buf *bp);
174 static void vfs_vmio_invalidate(struct buf *bp);
175 static void vfs_vmio_truncate(struct buf *bp, int npages);
176 static void vfs_vmio_extend(struct buf *bp, int npages, int size);
177 static int vfs_bio_clcheck(struct vnode *vp, int size,
178 		daddr_t lblkno, daddr_t blkno);
179 static void breada(struct vnode *, daddr_t *, int *, int, struct ucred *, int,
180 		void (*)(struct buf *));
181 static int buf_flush(struct vnode *vp, struct bufdomain *, int);
182 static int flushbufqueues(struct vnode *, struct bufdomain *, int, int);
183 static void buf_daemon(void);
184 static __inline void bd_wakeup(void);
185 static int sysctl_runningspace(SYSCTL_HANDLER_ARGS);
186 static void bufkva_reclaim(vmem_t *, int);
187 static void bufkva_free(struct buf *);
188 static int buf_import(void *, void **, int, int, int);
189 static void buf_release(void *, void **, int);
190 static void maxbcachebuf_adjust(void);
191 static inline struct bufdomain *bufdomain(struct buf *);
192 static void bq_remove(struct bufqueue *bq, struct buf *bp);
193 static void bq_insert(struct bufqueue *bq, struct buf *bp, bool unlock);
194 static int buf_recycle(struct bufdomain *, bool kva);
195 static void bq_init(struct bufqueue *bq, int qindex, int cpu,
196 	    const char *lockname);
197 static void bd_init(struct bufdomain *bd);
198 static int bd_flushall(struct bufdomain *bd);
199 static int sysctl_bufdomain_long(SYSCTL_HANDLER_ARGS);
200 static int sysctl_bufdomain_int(SYSCTL_HANDLER_ARGS);
201 
202 static int sysctl_bufspace(SYSCTL_HANDLER_ARGS);
203 int vmiodirenable = TRUE;
204 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
205     "Use the VM system for directory writes");
206 long runningbufspace;
207 SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
208     "Amount of presently outstanding async buffer io");
209 SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD,
210     NULL, 0, sysctl_bufspace, "L", "Physical memory used for buffers");
211 static counter_u64_t bufkvaspace;
212 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, bufkvaspace, CTLFLAG_RD, &bufkvaspace,
213     "Kernel virtual memory used for buffers");
214 static long maxbufspace;
215 SYSCTL_PROC(_vfs, OID_AUTO, maxbufspace,
216     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &maxbufspace,
217     __offsetof(struct bufdomain, bd_maxbufspace), sysctl_bufdomain_long, "L",
218     "Maximum allowed value of bufspace (including metadata)");
219 static long bufmallocspace;
220 SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
221     "Amount of malloced memory for buffers");
222 static long maxbufmallocspace;
223 SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace,
224     0, "Maximum amount of malloced memory for buffers");
225 static long lobufspace;
226 SYSCTL_PROC(_vfs, OID_AUTO, lobufspace,
227     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &lobufspace,
228     __offsetof(struct bufdomain, bd_lobufspace), sysctl_bufdomain_long, "L",
229     "Minimum amount of buffers we want to have");
230 long hibufspace;
231 SYSCTL_PROC(_vfs, OID_AUTO, hibufspace,
232     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &hibufspace,
233     __offsetof(struct bufdomain, bd_hibufspace), sysctl_bufdomain_long, "L",
234     "Maximum allowed value of bufspace (excluding metadata)");
235 long bufspacethresh;
236 SYSCTL_PROC(_vfs, OID_AUTO, bufspacethresh,
237     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &bufspacethresh,
238     __offsetof(struct bufdomain, bd_bufspacethresh), sysctl_bufdomain_long, "L",
239     "Bufspace consumed before waking the daemon to free some");
240 static counter_u64_t buffreekvacnt;
241 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt,
242     "Number of times we have freed the KVA space from some buffer");
243 static counter_u64_t bufdefragcnt;
244 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt,
245     "Number of times we have had to repeat buffer allocation to defragment");
246 static long lorunningspace;
247 SYSCTL_PROC(_vfs, OID_AUTO, lorunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
248     CTLFLAG_RW, &lorunningspace, 0, sysctl_runningspace, "L",
249     "Minimum preferred space used for in-progress I/O");
250 static long hirunningspace;
251 SYSCTL_PROC(_vfs, OID_AUTO, hirunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
252     CTLFLAG_RW, &hirunningspace, 0, sysctl_runningspace, "L",
253     "Maximum amount of space to use for in-progress I/O");
254 int dirtybufferflushes;
255 SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
256     0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
257 int bdwriteskip;
258 SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
259     0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
260 int altbufferflushes;
261 SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW | CTLFLAG_STATS,
262     &altbufferflushes, 0, "Number of fsync flushes to limit dirty buffers");
263 static int recursiveflushes;
264 SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW | CTLFLAG_STATS,
265     &recursiveflushes, 0, "Number of flushes skipped due to being recursive");
266 static int sysctl_numdirtybuffers(SYSCTL_HANDLER_ARGS);
267 SYSCTL_PROC(_vfs, OID_AUTO, numdirtybuffers,
268     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RD, NULL, 0, sysctl_numdirtybuffers, "I",
269     "Number of buffers that are dirty (has unwritten changes) at the moment");
270 static int lodirtybuffers;
271 SYSCTL_PROC(_vfs, OID_AUTO, lodirtybuffers,
272     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &lodirtybuffers,
273     __offsetof(struct bufdomain, bd_lodirtybuffers), sysctl_bufdomain_int, "I",
274     "How many buffers we want to have free before bufdaemon can sleep");
275 static int hidirtybuffers;
276 SYSCTL_PROC(_vfs, OID_AUTO, hidirtybuffers,
277     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &hidirtybuffers,
278     __offsetof(struct bufdomain, bd_hidirtybuffers), sysctl_bufdomain_int, "I",
279     "When the number of dirty buffers is considered severe");
280 int dirtybufthresh;
281 SYSCTL_PROC(_vfs, OID_AUTO, dirtybufthresh,
282     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &dirtybufthresh,
283     __offsetof(struct bufdomain, bd_dirtybufthresh), sysctl_bufdomain_int, "I",
284     "Number of bdwrite to bawrite conversions to clear dirty buffers");
285 static int numfreebuffers;
286 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
287     "Number of free buffers");
288 static int lofreebuffers;
289 SYSCTL_PROC(_vfs, OID_AUTO, lofreebuffers,
290     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &lofreebuffers,
291     __offsetof(struct bufdomain, bd_lofreebuffers), sysctl_bufdomain_int, "I",
292    "Target number of free buffers");
293 static int hifreebuffers;
294 SYSCTL_PROC(_vfs, OID_AUTO, hifreebuffers,
295     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &hifreebuffers,
296     __offsetof(struct bufdomain, bd_hifreebuffers), sysctl_bufdomain_int, "I",
297    "Threshold for clean buffer recycling");
298 static counter_u64_t getnewbufcalls;
299 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RD,
300    &getnewbufcalls, "Number of calls to getnewbuf");
301 static counter_u64_t getnewbufrestarts;
302 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RD,
303     &getnewbufrestarts,
304     "Number of times getnewbuf has had to restart a buffer acquisition");
305 static counter_u64_t mappingrestarts;
306 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, mappingrestarts, CTLFLAG_RD,
307     &mappingrestarts,
308     "Number of times getblk has had to restart a buffer mapping for "
309     "unmapped buffer");
310 static counter_u64_t numbufallocfails;
311 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, numbufallocfails, CTLFLAG_RW,
312     &numbufallocfails, "Number of times buffer allocations failed");
313 static int flushbufqtarget = 100;
314 SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0,
315     "Amount of work to do in flushbufqueues when helping bufdaemon");
316 static counter_u64_t notbufdflushes;
317 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, notbufdflushes, CTLFLAG_RD, &notbufdflushes,
318     "Number of dirty buffer flushes done by the bufdaemon helpers");
319 static long barrierwrites;
320 SYSCTL_LONG(_vfs, OID_AUTO, barrierwrites, CTLFLAG_RW | CTLFLAG_STATS,
321     &barrierwrites, 0, "Number of barrier writes");
322 SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD,
323     &unmapped_buf_allowed, 0,
324     "Permit the use of the unmapped i/o");
325 int maxbcachebuf = MAXBCACHEBUF;
326 SYSCTL_INT(_vfs, OID_AUTO, maxbcachebuf, CTLFLAG_RDTUN, &maxbcachebuf, 0,
327     "Maximum size of a buffer cache block");
328 
329 /*
330  * This lock synchronizes access to bd_request.
331  */
332 static struct mtx_padalign __exclusive_cache_line bdlock;
333 
334 /*
335  * This lock protects the runningbufreq and synchronizes runningbufwakeup and
336  * waitrunningbufspace().
337  */
338 static struct mtx_padalign __exclusive_cache_line rbreqlock;
339 
340 /*
341  * Lock that protects bdirtywait.
342  */
343 static struct mtx_padalign __exclusive_cache_line bdirtylock;
344 
345 /*
346  * bufdaemon shutdown request and sleep channel.
347  */
348 static bool bd_shutdown;
349 
350 /*
351  * Wakeup point for bufdaemon, as well as indicator of whether it is already
352  * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
353  * is idling.
354  */
355 static int bd_request;
356 
357 /*
358  * Request for the buf daemon to write more buffers than is indicated by
359  * lodirtybuf.  This may be necessary to push out excess dependencies or
360  * defragment the address space where a simple count of the number of dirty
361  * buffers is insufficient to characterize the demand for flushing them.
362  */
363 static int bd_speedupreq;
364 
365 /*
366  * Synchronization (sleep/wakeup) variable for active buffer space requests.
367  * Set when wait starts, cleared prior to wakeup().
368  * Used in runningbufwakeup() and waitrunningbufspace().
369  */
370 static int runningbufreq;
371 
372 /*
373  * Synchronization for bwillwrite() waiters.
374  */
375 static int bdirtywait;
376 
377 /*
378  * Definitions for the buffer free lists.
379  */
380 #define QUEUE_NONE	0	/* on no queue */
381 #define QUEUE_EMPTY	1	/* empty buffer headers */
382 #define QUEUE_DIRTY	2	/* B_DELWRI buffers */
383 #define QUEUE_CLEAN	3	/* non-B_DELWRI buffers */
384 #define QUEUE_SENTINEL	4	/* not an queue index, but mark for sentinel */
385 
386 /* Maximum number of buffer domains. */
387 #define	BUF_DOMAINS	8
388 
389 struct bufdomainset bdlodirty;		/* Domains > lodirty */
390 struct bufdomainset bdhidirty;		/* Domains > hidirty */
391 
392 /* Configured number of clean queues. */
393 static int __read_mostly buf_domains;
394 
395 BITSET_DEFINE(bufdomainset, BUF_DOMAINS);
396 struct bufdomain __exclusive_cache_line bdomain[BUF_DOMAINS];
397 struct bufqueue __exclusive_cache_line bqempty;
398 
399 /*
400  * per-cpu empty buffer cache.
401  */
402 uma_zone_t buf_zone;
403 
404 static int
405 sysctl_runningspace(SYSCTL_HANDLER_ARGS)
406 {
407 	long value;
408 	int error;
409 
410 	value = *(long *)arg1;
411 	error = sysctl_handle_long(oidp, &value, 0, req);
412 	if (error != 0 || req->newptr == NULL)
413 		return (error);
414 	mtx_lock(&rbreqlock);
415 	if (arg1 == &hirunningspace) {
416 		if (value < lorunningspace)
417 			error = EINVAL;
418 		else
419 			hirunningspace = value;
420 	} else {
421 		KASSERT(arg1 == &lorunningspace,
422 		    ("%s: unknown arg1", __func__));
423 		if (value > hirunningspace)
424 			error = EINVAL;
425 		else
426 			lorunningspace = value;
427 	}
428 	mtx_unlock(&rbreqlock);
429 	return (error);
430 }
431 
432 static int
433 sysctl_bufdomain_int(SYSCTL_HANDLER_ARGS)
434 {
435 	int error;
436 	int value;
437 	int i;
438 
439 	value = *(int *)arg1;
440 	error = sysctl_handle_int(oidp, &value, 0, req);
441 	if (error != 0 || req->newptr == NULL)
442 		return (error);
443 	*(int *)arg1 = value;
444 	for (i = 0; i < buf_domains; i++)
445 		*(int *)(uintptr_t)(((uintptr_t)&bdomain[i]) + arg2) =
446 		    value / buf_domains;
447 
448 	return (error);
449 }
450 
451 static int
452 sysctl_bufdomain_long(SYSCTL_HANDLER_ARGS)
453 {
454 	long value;
455 	int error;
456 	int i;
457 
458 	value = *(long *)arg1;
459 	error = sysctl_handle_long(oidp, &value, 0, req);
460 	if (error != 0 || req->newptr == NULL)
461 		return (error);
462 	*(long *)arg1 = value;
463 	for (i = 0; i < buf_domains; i++)
464 		*(long *)(uintptr_t)(((uintptr_t)&bdomain[i]) + arg2) =
465 		    value / buf_domains;
466 
467 	return (error);
468 }
469 
470 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
471     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
472 static int
473 sysctl_bufspace(SYSCTL_HANDLER_ARGS)
474 {
475 	long lvalue;
476 	int ivalue;
477 	int i;
478 
479 	lvalue = 0;
480 	for (i = 0; i < buf_domains; i++)
481 		lvalue += bdomain[i].bd_bufspace;
482 	if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long))
483 		return (sysctl_handle_long(oidp, &lvalue, 0, req));
484 	if (lvalue > INT_MAX)
485 		/* On overflow, still write out a long to trigger ENOMEM. */
486 		return (sysctl_handle_long(oidp, &lvalue, 0, req));
487 	ivalue = lvalue;
488 	return (sysctl_handle_int(oidp, &ivalue, 0, req));
489 }
490 #else
491 static int
492 sysctl_bufspace(SYSCTL_HANDLER_ARGS)
493 {
494 	long lvalue;
495 	int i;
496 
497 	lvalue = 0;
498 	for (i = 0; i < buf_domains; i++)
499 		lvalue += bdomain[i].bd_bufspace;
500 	return (sysctl_handle_long(oidp, &lvalue, 0, req));
501 }
502 #endif
503 
504 static int
505 sysctl_numdirtybuffers(SYSCTL_HANDLER_ARGS)
506 {
507 	int value;
508 	int i;
509 
510 	value = 0;
511 	for (i = 0; i < buf_domains; i++)
512 		value += bdomain[i].bd_numdirtybuffers;
513 	return (sysctl_handle_int(oidp, &value, 0, req));
514 }
515 
516 /*
517  *	bdirtywakeup:
518  *
519  *	Wakeup any bwillwrite() waiters.
520  */
521 static void
522 bdirtywakeup(void)
523 {
524 	mtx_lock(&bdirtylock);
525 	if (bdirtywait) {
526 		bdirtywait = 0;
527 		wakeup(&bdirtywait);
528 	}
529 	mtx_unlock(&bdirtylock);
530 }
531 
532 /*
533  *	bd_clear:
534  *
535  *	Clear a domain from the appropriate bitsets when dirtybuffers
536  *	is decremented.
537  */
538 static void
539 bd_clear(struct bufdomain *bd)
540 {
541 
542 	mtx_lock(&bdirtylock);
543 	if (bd->bd_numdirtybuffers <= bd->bd_lodirtybuffers)
544 		BIT_CLR(BUF_DOMAINS, BD_DOMAIN(bd), &bdlodirty);
545 	if (bd->bd_numdirtybuffers <= bd->bd_hidirtybuffers)
546 		BIT_CLR(BUF_DOMAINS, BD_DOMAIN(bd), &bdhidirty);
547 	mtx_unlock(&bdirtylock);
548 }
549 
550 /*
551  *	bd_set:
552  *
553  *	Set a domain in the appropriate bitsets when dirtybuffers
554  *	is incremented.
555  */
556 static void
557 bd_set(struct bufdomain *bd)
558 {
559 
560 	mtx_lock(&bdirtylock);
561 	if (bd->bd_numdirtybuffers > bd->bd_lodirtybuffers)
562 		BIT_SET(BUF_DOMAINS, BD_DOMAIN(bd), &bdlodirty);
563 	if (bd->bd_numdirtybuffers > bd->bd_hidirtybuffers)
564 		BIT_SET(BUF_DOMAINS, BD_DOMAIN(bd), &bdhidirty);
565 	mtx_unlock(&bdirtylock);
566 }
567 
568 /*
569  *	bdirtysub:
570  *
571  *	Decrement the numdirtybuffers count by one and wakeup any
572  *	threads blocked in bwillwrite().
573  */
574 static void
575 bdirtysub(struct buf *bp)
576 {
577 	struct bufdomain *bd;
578 	int num;
579 
580 	bd = bufdomain(bp);
581 	num = atomic_fetchadd_int(&bd->bd_numdirtybuffers, -1);
582 	if (num == (bd->bd_lodirtybuffers + bd->bd_hidirtybuffers) / 2)
583 		bdirtywakeup();
584 	if (num == bd->bd_lodirtybuffers || num == bd->bd_hidirtybuffers)
585 		bd_clear(bd);
586 }
587 
588 /*
589  *	bdirtyadd:
590  *
591  *	Increment the numdirtybuffers count by one and wakeup the buf
592  *	daemon if needed.
593  */
594 static void
595 bdirtyadd(struct buf *bp)
596 {
597 	struct bufdomain *bd;
598 	int num;
599 
600 	/*
601 	 * Only do the wakeup once as we cross the boundary.  The
602 	 * buf daemon will keep running until the condition clears.
603 	 */
604 	bd = bufdomain(bp);
605 	num = atomic_fetchadd_int(&bd->bd_numdirtybuffers, 1);
606 	if (num == (bd->bd_lodirtybuffers + bd->bd_hidirtybuffers) / 2)
607 		bd_wakeup();
608 	if (num == bd->bd_lodirtybuffers || num == bd->bd_hidirtybuffers)
609 		bd_set(bd);
610 }
611 
612 /*
613  *	bufspace_daemon_wakeup:
614  *
615  *	Wakeup the daemons responsible for freeing clean bufs.
616  */
617 static void
618 bufspace_daemon_wakeup(struct bufdomain *bd)
619 {
620 
621 	/*
622 	 * avoid the lock if the daemon is running.
623 	 */
624 	if (atomic_fetchadd_int(&bd->bd_running, 1) == 0) {
625 		BD_RUN_LOCK(bd);
626 		atomic_store_int(&bd->bd_running, 1);
627 		wakeup(&bd->bd_running);
628 		BD_RUN_UNLOCK(bd);
629 	}
630 }
631 
632 /*
633  *	bufspace_adjust:
634  *
635  *	Adjust the reported bufspace for a KVA managed buffer, possibly
636  * 	waking any waiters.
637  */
638 static void
639 bufspace_adjust(struct buf *bp, int bufsize)
640 {
641 	struct bufdomain *bd;
642 	long space;
643 	int diff;
644 
645 	KASSERT((bp->b_flags & B_MALLOC) == 0,
646 	    ("bufspace_adjust: malloc buf %p", bp));
647 	bd = bufdomain(bp);
648 	diff = bufsize - bp->b_bufsize;
649 	if (diff < 0) {
650 		atomic_subtract_long(&bd->bd_bufspace, -diff);
651 	} else if (diff > 0) {
652 		space = atomic_fetchadd_long(&bd->bd_bufspace, diff);
653 		/* Wake up the daemon on the transition. */
654 		if (space < bd->bd_bufspacethresh &&
655 		    space + diff >= bd->bd_bufspacethresh)
656 			bufspace_daemon_wakeup(bd);
657 	}
658 	bp->b_bufsize = bufsize;
659 }
660 
661 /*
662  *	bufspace_reserve:
663  *
664  *	Reserve bufspace before calling allocbuf().  metadata has a
665  *	different space limit than data.
666  */
667 static int
668 bufspace_reserve(struct bufdomain *bd, int size, bool metadata)
669 {
670 	long limit, new;
671 	long space;
672 
673 	if (metadata)
674 		limit = bd->bd_maxbufspace;
675 	else
676 		limit = bd->bd_hibufspace;
677 	space = atomic_fetchadd_long(&bd->bd_bufspace, size);
678 	new = space + size;
679 	if (new > limit) {
680 		atomic_subtract_long(&bd->bd_bufspace, size);
681 		return (ENOSPC);
682 	}
683 
684 	/* Wake up the daemon on the transition. */
685 	if (space < bd->bd_bufspacethresh && new >= bd->bd_bufspacethresh)
686 		bufspace_daemon_wakeup(bd);
687 
688 	return (0);
689 }
690 
691 /*
692  *	bufspace_release:
693  *
694  *	Release reserved bufspace after bufspace_adjust() has consumed it.
695  */
696 static void
697 bufspace_release(struct bufdomain *bd, int size)
698 {
699 
700 	atomic_subtract_long(&bd->bd_bufspace, size);
701 }
702 
703 /*
704  *	bufspace_wait:
705  *
706  *	Wait for bufspace, acting as the buf daemon if a locked vnode is
707  *	supplied.  bd_wanted must be set prior to polling for space.  The
708  *	operation must be re-tried on return.
709  */
710 static void
711 bufspace_wait(struct bufdomain *bd, struct vnode *vp, int gbflags,
712     int slpflag, int slptimeo)
713 {
714 	struct thread *td;
715 	int error, fl, norunbuf;
716 
717 	if ((gbflags & GB_NOWAIT_BD) != 0)
718 		return;
719 
720 	td = curthread;
721 	BD_LOCK(bd);
722 	while (bd->bd_wanted) {
723 		if (vp != NULL && vp->v_type != VCHR &&
724 		    (td->td_pflags & TDP_BUFNEED) == 0) {
725 			BD_UNLOCK(bd);
726 			/*
727 			 * getblk() is called with a vnode locked, and
728 			 * some majority of the dirty buffers may as
729 			 * well belong to the vnode.  Flushing the
730 			 * buffers there would make a progress that
731 			 * cannot be achieved by the buf_daemon, that
732 			 * cannot lock the vnode.
733 			 */
734 			norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) |
735 			    (td->td_pflags & TDP_NORUNNINGBUF);
736 
737 			/*
738 			 * Play bufdaemon.  The getnewbuf() function
739 			 * may be called while the thread owns lock
740 			 * for another dirty buffer for the same
741 			 * vnode, which makes it impossible to use
742 			 * VOP_FSYNC() there, due to the buffer lock
743 			 * recursion.
744 			 */
745 			td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF;
746 			fl = buf_flush(vp, bd, flushbufqtarget);
747 			td->td_pflags &= norunbuf;
748 			BD_LOCK(bd);
749 			if (fl != 0)
750 				continue;
751 			if (bd->bd_wanted == 0)
752 				break;
753 		}
754 		error = msleep(&bd->bd_wanted, BD_LOCKPTR(bd),
755 		    (PRIBIO + 4) | slpflag, "newbuf", slptimeo);
756 		if (error != 0)
757 			break;
758 	}
759 	BD_UNLOCK(bd);
760 }
761 
762 static void
763 bufspace_daemon_shutdown(void *arg, int howto __unused)
764 {
765 	struct bufdomain *bd = arg;
766 	int error;
767 
768 	BD_RUN_LOCK(bd);
769 	bd->bd_shutdown = true;
770 	wakeup(&bd->bd_running);
771 	error = msleep(&bd->bd_shutdown, BD_RUN_LOCKPTR(bd), 0,
772 	    "bufspace_shutdown", 60 * hz);
773 	BD_RUN_UNLOCK(bd);
774 	if (error != 0)
775 		printf("bufspacedaemon wait error: %d\n", error);
776 }
777 
778 /*
779  *	bufspace_daemon:
780  *
781  *	buffer space management daemon.  Tries to maintain some marginal
782  *	amount of free buffer space so that requesting processes neither
783  *	block nor work to reclaim buffers.
784  */
785 static void
786 bufspace_daemon(void *arg)
787 {
788 	struct bufdomain *bd = arg;
789 
790 	EVENTHANDLER_REGISTER(shutdown_pre_sync, bufspace_daemon_shutdown, bd,
791 	    SHUTDOWN_PRI_LAST + 100);
792 
793 	BD_RUN_LOCK(bd);
794 	while (!bd->bd_shutdown) {
795 		BD_RUN_UNLOCK(bd);
796 
797 		/*
798 		 * Free buffers from the clean queue until we meet our
799 		 * targets.
800 		 *
801 		 * Theory of operation:  The buffer cache is most efficient
802 		 * when some free buffer headers and space are always
803 		 * available to getnewbuf().  This daemon attempts to prevent
804 		 * the excessive blocking and synchronization associated
805 		 * with shortfall.  It goes through three phases according
806 		 * demand:
807 		 *
808 		 * 1)	The daemon wakes up voluntarily once per-second
809 		 *	during idle periods when the counters are below
810 		 *	the wakeup thresholds (bufspacethresh, lofreebuffers).
811 		 *
812 		 * 2)	The daemon wakes up as we cross the thresholds
813 		 *	ahead of any potential blocking.  This may bounce
814 		 *	slightly according to the rate of consumption and
815 		 *	release.
816 		 *
817 		 * 3)	The daemon and consumers are starved for working
818 		 *	clean buffers.  This is the 'bufspace' sleep below
819 		 *	which will inefficiently trade bufs with bqrelse
820 		 *	until we return to condition 2.
821 		 */
822 		while (bd->bd_bufspace > bd->bd_lobufspace ||
823 		    bd->bd_freebuffers < bd->bd_hifreebuffers) {
824 			if (buf_recycle(bd, false) != 0) {
825 				if (bd_flushall(bd))
826 					continue;
827 				/*
828 				 * Speedup dirty if we've run out of clean
829 				 * buffers.  This is possible in particular
830 				 * because softdep may held many bufs locked
831 				 * pending writes to other bufs which are
832 				 * marked for delayed write, exhausting
833 				 * clean space until they are written.
834 				 */
835 				bd_speedup();
836 				BD_LOCK(bd);
837 				if (bd->bd_wanted) {
838 					msleep(&bd->bd_wanted, BD_LOCKPTR(bd),
839 					    PRIBIO|PDROP, "bufspace", hz/10);
840 				} else
841 					BD_UNLOCK(bd);
842 			}
843 			maybe_yield();
844 		}
845 
846 		/*
847 		 * Re-check our limits and sleep.  bd_running must be
848 		 * cleared prior to checking the limits to avoid missed
849 		 * wakeups.  The waker will adjust one of bufspace or
850 		 * freebuffers prior to checking bd_running.
851 		 */
852 		BD_RUN_LOCK(bd);
853 		if (bd->bd_shutdown)
854 			break;
855 		atomic_store_int(&bd->bd_running, 0);
856 		if (bd->bd_bufspace < bd->bd_bufspacethresh &&
857 		    bd->bd_freebuffers > bd->bd_lofreebuffers) {
858 			msleep(&bd->bd_running, BD_RUN_LOCKPTR(bd),
859 			    PRIBIO, "-", hz);
860 		} else {
861 			/* Avoid spurious wakeups while running. */
862 			atomic_store_int(&bd->bd_running, 1);
863 		}
864 	}
865 	wakeup(&bd->bd_shutdown);
866 	BD_RUN_UNLOCK(bd);
867 	kthread_exit();
868 }
869 
870 /*
871  *	bufmallocadjust:
872  *
873  *	Adjust the reported bufspace for a malloc managed buffer, possibly
874  *	waking any waiters.
875  */
876 static void
877 bufmallocadjust(struct buf *bp, int bufsize)
878 {
879 	int diff;
880 
881 	KASSERT((bp->b_flags & B_MALLOC) != 0,
882 	    ("bufmallocadjust: non-malloc buf %p", bp));
883 	diff = bufsize - bp->b_bufsize;
884 	if (diff < 0)
885 		atomic_subtract_long(&bufmallocspace, -diff);
886 	else
887 		atomic_add_long(&bufmallocspace, diff);
888 	bp->b_bufsize = bufsize;
889 }
890 
891 /*
892  *	runningwakeup:
893  *
894  *	Wake up processes that are waiting on asynchronous writes to fall
895  *	below lorunningspace.
896  */
897 static void
898 runningwakeup(void)
899 {
900 
901 	mtx_lock(&rbreqlock);
902 	if (runningbufreq) {
903 		runningbufreq = 0;
904 		wakeup(&runningbufreq);
905 	}
906 	mtx_unlock(&rbreqlock);
907 }
908 
909 /*
910  *	runningbufwakeup:
911  *
912  *	Decrement the outstanding write count according.
913  */
914 void
915 runningbufwakeup(struct buf *bp)
916 {
917 	long space, bspace;
918 
919 	bspace = bp->b_runningbufspace;
920 	if (bspace == 0)
921 		return;
922 	space = atomic_fetchadd_long(&runningbufspace, -bspace);
923 	KASSERT(space >= bspace, ("runningbufspace underflow %ld %ld",
924 	    space, bspace));
925 	bp->b_runningbufspace = 0;
926 	/*
927 	 * Only acquire the lock and wakeup on the transition from exceeding
928 	 * the threshold to falling below it.
929 	 */
930 	if (space < lorunningspace)
931 		return;
932 	if (space - bspace > lorunningspace)
933 		return;
934 	runningwakeup();
935 }
936 
937 /*
938  *	waitrunningbufspace()
939  *
940  *	runningbufspace is a measure of the amount of I/O currently
941  *	running.  This routine is used in async-write situations to
942  *	prevent creating huge backups of pending writes to a device.
943  *	Only asynchronous writes are governed by this function.
944  *
945  *	This does NOT turn an async write into a sync write.  It waits
946  *	for earlier writes to complete and generally returns before the
947  *	caller's write has reached the device.
948  */
949 void
950 waitrunningbufspace(void)
951 {
952 
953 	mtx_lock(&rbreqlock);
954 	while (runningbufspace > hirunningspace) {
955 		runningbufreq = 1;
956 		msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
957 	}
958 	mtx_unlock(&rbreqlock);
959 }
960 
961 /*
962  *	vfs_buf_test_cache:
963  *
964  *	Called when a buffer is extended.  This function clears the B_CACHE
965  *	bit if the newly extended portion of the buffer does not contain
966  *	valid data.
967  */
968 static __inline void
969 vfs_buf_test_cache(struct buf *bp, vm_ooffset_t foff, vm_offset_t off,
970     vm_offset_t size, vm_page_t m)
971 {
972 
973 	/*
974 	 * This function and its results are protected by higher level
975 	 * synchronization requiring vnode and buf locks to page in and
976 	 * validate pages.
977 	 */
978 	if (bp->b_flags & B_CACHE) {
979 		int base = (foff + off) & PAGE_MASK;
980 		if (vm_page_is_valid(m, base, size) == 0)
981 			bp->b_flags &= ~B_CACHE;
982 	}
983 }
984 
985 /* Wake up the buffer daemon if necessary */
986 static void
987 bd_wakeup(void)
988 {
989 
990 	mtx_lock(&bdlock);
991 	if (bd_request == 0) {
992 		bd_request = 1;
993 		wakeup(&bd_request);
994 	}
995 	mtx_unlock(&bdlock);
996 }
997 
998 /*
999  * Adjust the maxbcachbuf tunable.
1000  */
1001 static void
1002 maxbcachebuf_adjust(void)
1003 {
1004 	int i;
1005 
1006 	/*
1007 	 * maxbcachebuf must be a power of 2 >= MAXBSIZE.
1008 	 */
1009 	i = 2;
1010 	while (i * 2 <= maxbcachebuf)
1011 		i *= 2;
1012 	maxbcachebuf = i;
1013 	if (maxbcachebuf < MAXBSIZE)
1014 		maxbcachebuf = MAXBSIZE;
1015 	if (maxbcachebuf > maxphys)
1016 		maxbcachebuf = maxphys;
1017 	if (bootverbose != 0 && maxbcachebuf != MAXBCACHEBUF)
1018 		printf("maxbcachebuf=%d\n", maxbcachebuf);
1019 }
1020 
1021 /*
1022  * bd_speedup - speedup the buffer cache flushing code
1023  */
1024 void
1025 bd_speedup(void)
1026 {
1027 	int needwake;
1028 
1029 	mtx_lock(&bdlock);
1030 	needwake = 0;
1031 	if (bd_speedupreq == 0 || bd_request == 0)
1032 		needwake = 1;
1033 	bd_speedupreq = 1;
1034 	bd_request = 1;
1035 	if (needwake)
1036 		wakeup(&bd_request);
1037 	mtx_unlock(&bdlock);
1038 }
1039 
1040 #ifdef __i386__
1041 #define	TRANSIENT_DENOM	5
1042 #else
1043 #define	TRANSIENT_DENOM 10
1044 #endif
1045 
1046 /*
1047  * Calculating buffer cache scaling values and reserve space for buffer
1048  * headers.  This is called during low level kernel initialization and
1049  * may be called more then once.  We CANNOT write to the memory area
1050  * being reserved at this time.
1051  */
1052 caddr_t
1053 kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
1054 {
1055 	int tuned_nbuf;
1056 	long maxbuf, maxbuf_sz, buf_sz,	biotmap_sz;
1057 
1058 	/*
1059 	 * With KASAN or KMSAN enabled, the kernel map is shadowed.  Account for
1060 	 * this when sizing maps based on the amount of physical memory
1061 	 * available.
1062 	 */
1063 #if defined(KASAN)
1064 	physmem_est = (physmem_est * KASAN_SHADOW_SCALE) /
1065 	    (KASAN_SHADOW_SCALE + 1);
1066 #elif defined(KMSAN)
1067 	physmem_est /= 3;
1068 
1069 	/*
1070 	 * KMSAN cannot reliably determine whether buffer data is initialized
1071 	 * unless it is updated through a KVA mapping.
1072 	 */
1073 	unmapped_buf_allowed = 0;
1074 #endif
1075 
1076 	/*
1077 	 * physmem_est is in pages.  Convert it to kilobytes (assumes
1078 	 * PAGE_SIZE is >= 1K)
1079 	 */
1080 	physmem_est = physmem_est * (PAGE_SIZE / 1024);
1081 
1082 	maxbcachebuf_adjust();
1083 	/*
1084 	 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
1085 	 * For the first 64MB of ram nominally allocate sufficient buffers to
1086 	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
1087 	 * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
1088 	 * the buffer cache we limit the eventual kva reservation to
1089 	 * maxbcache bytes.
1090 	 *
1091 	 * factor represents the 1/4 x ram conversion.
1092 	 */
1093 	if (nbuf == 0) {
1094 		int factor = 4 * BKVASIZE / 1024;
1095 
1096 		nbuf = 50;
1097 		if (physmem_est > 4096)
1098 			nbuf += min((physmem_est - 4096) / factor,
1099 			    65536 / factor);
1100 		if (physmem_est > 65536)
1101 			nbuf += min((physmem_est - 65536) * 2 / (factor * 5),
1102 			    32 * 1024 * 1024 / (factor * 5));
1103 
1104 		if (maxbcache && nbuf > maxbcache / BKVASIZE)
1105 			nbuf = maxbcache / BKVASIZE;
1106 		tuned_nbuf = 1;
1107 	} else
1108 		tuned_nbuf = 0;
1109 
1110 	/* XXX Avoid unsigned long overflows later on with maxbufspace. */
1111 	maxbuf = (LONG_MAX / 3) / BKVASIZE;
1112 	if (nbuf > maxbuf) {
1113 		if (!tuned_nbuf)
1114 			printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
1115 			    maxbuf);
1116 		nbuf = maxbuf;
1117 	}
1118 
1119 	/*
1120 	 * Ideal allocation size for the transient bio submap is 10%
1121 	 * of the maximal space buffer map.  This roughly corresponds
1122 	 * to the amount of the buffer mapped for typical UFS load.
1123 	 *
1124 	 * Clip the buffer map to reserve space for the transient
1125 	 * BIOs, if its extent is bigger than 90% (80% on i386) of the
1126 	 * maximum buffer map extent on the platform.
1127 	 *
1128 	 * The fall-back to the maxbuf in case of maxbcache unset,
1129 	 * allows to not trim the buffer KVA for the architectures
1130 	 * with ample KVA space.
1131 	 */
1132 	if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) {
1133 		maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE;
1134 		buf_sz = (long)nbuf * BKVASIZE;
1135 		if (buf_sz < maxbuf_sz / TRANSIENT_DENOM *
1136 		    (TRANSIENT_DENOM - 1)) {
1137 			/*
1138 			 * There is more KVA than memory.  Do not
1139 			 * adjust buffer map size, and assign the rest
1140 			 * of maxbuf to transient map.
1141 			 */
1142 			biotmap_sz = maxbuf_sz - buf_sz;
1143 		} else {
1144 			/*
1145 			 * Buffer map spans all KVA we could afford on
1146 			 * this platform.  Give 10% (20% on i386) of
1147 			 * the buffer map to the transient bio map.
1148 			 */
1149 			biotmap_sz = buf_sz / TRANSIENT_DENOM;
1150 			buf_sz -= biotmap_sz;
1151 		}
1152 		if (biotmap_sz / INT_MAX > maxphys)
1153 			bio_transient_maxcnt = INT_MAX;
1154 		else
1155 			bio_transient_maxcnt = biotmap_sz / maxphys;
1156 		/*
1157 		 * Artificially limit to 1024 simultaneous in-flight I/Os
1158 		 * using the transient mapping.
1159 		 */
1160 		if (bio_transient_maxcnt > 1024)
1161 			bio_transient_maxcnt = 1024;
1162 		if (tuned_nbuf)
1163 			nbuf = buf_sz / BKVASIZE;
1164 	}
1165 
1166 	if (nswbuf == 0) {
1167 		nswbuf = min(nbuf / 4, 256);
1168 		if (nswbuf < NSWBUF_MIN)
1169 			nswbuf = NSWBUF_MIN;
1170 	}
1171 
1172 	/*
1173 	 * Reserve space for the buffer cache buffers
1174 	 */
1175 	buf = (char *)v;
1176 	v = (caddr_t)buf + (sizeof(struct buf) + sizeof(vm_page_t) *
1177 	    atop(maxbcachebuf)) * nbuf;
1178 
1179 	return (v);
1180 }
1181 
1182 /*
1183  * Single global constant for BUF_WMESG, to avoid getting multiple
1184  * references.
1185  */
1186 static const char buf_wmesg[] = "bufwait";
1187 
1188 /* Initialize the buffer subsystem.  Called before use of any buffers. */
1189 void
1190 bufinit(void)
1191 {
1192 	struct buf *bp;
1193 	int i;
1194 
1195 	KASSERT(maxbcachebuf >= MAXBSIZE,
1196 	    ("maxbcachebuf (%d) must be >= MAXBSIZE (%d)\n", maxbcachebuf,
1197 	    MAXBSIZE));
1198 	bq_init(&bqempty, QUEUE_EMPTY, -1, "bufq empty lock");
1199 	mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
1200 	mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
1201 	mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF);
1202 
1203 	unmapped_buf = (caddr_t)kva_alloc(maxphys);
1204 
1205 	/* finally, initialize each buffer header and stick on empty q */
1206 	for (i = 0; i < nbuf; i++) {
1207 		bp = nbufp(i);
1208 		bzero(bp, sizeof(*bp) + sizeof(vm_page_t) * atop(maxbcachebuf));
1209 		bp->b_flags = B_INVAL;
1210 		bp->b_rcred = NOCRED;
1211 		bp->b_wcred = NOCRED;
1212 		bp->b_qindex = QUEUE_NONE;
1213 		bp->b_domain = -1;
1214 		bp->b_subqueue = mp_maxid + 1;
1215 		bp->b_xflags = 0;
1216 		bp->b_data = bp->b_kvabase = unmapped_buf;
1217 		LIST_INIT(&bp->b_dep);
1218 		BUF_LOCKINIT(bp, buf_wmesg);
1219 		bq_insert(&bqempty, bp, false);
1220 	}
1221 
1222 	/*
1223 	 * maxbufspace is the absolute maximum amount of buffer space we are
1224 	 * allowed to reserve in KVM and in real terms.  The absolute maximum
1225 	 * is nominally used by metadata.  hibufspace is the nominal maximum
1226 	 * used by most other requests.  The differential is required to
1227 	 * ensure that metadata deadlocks don't occur.
1228 	 *
1229 	 * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
1230 	 * this may result in KVM fragmentation which is not handled optimally
1231 	 * by the system. XXX This is less true with vmem.  We could use
1232 	 * PAGE_SIZE.
1233 	 */
1234 	maxbufspace = (long)nbuf * BKVASIZE;
1235 	hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - maxbcachebuf * 10);
1236 	lobufspace = (hibufspace / 20) * 19; /* 95% */
1237 	bufspacethresh = lobufspace + (hibufspace - lobufspace) / 2;
1238 
1239 	/*
1240 	 * Note: The 16 MiB upper limit for hirunningspace was chosen
1241 	 * arbitrarily and may need further tuning. It corresponds to
1242 	 * 128 outstanding write IO requests (if IO size is 128 KiB),
1243 	 * which fits with many RAID controllers' tagged queuing limits.
1244 	 * The lower 1 MiB limit is the historical upper limit for
1245 	 * hirunningspace.
1246 	 */
1247 	hirunningspace = lmax(lmin(roundup(hibufspace / 64, maxbcachebuf),
1248 	    16 * 1024 * 1024), 1024 * 1024);
1249 	lorunningspace = roundup((hirunningspace * 2) / 3, maxbcachebuf);
1250 
1251 	/*
1252 	 * Limit the amount of malloc memory since it is wired permanently into
1253 	 * the kernel space.  Even though this is accounted for in the buffer
1254 	 * allocation, we don't want the malloced region to grow uncontrolled.
1255 	 * The malloc scheme improves memory utilization significantly on
1256 	 * average (small) directories.
1257 	 */
1258 	maxbufmallocspace = hibufspace / 20;
1259 
1260 	/*
1261 	 * Reduce the chance of a deadlock occurring by limiting the number
1262 	 * of delayed-write dirty buffers we allow to stack up.
1263 	 */
1264 	hidirtybuffers = nbuf / 4 + 20;
1265 	dirtybufthresh = hidirtybuffers * 9 / 10;
1266 	/*
1267 	 * To support extreme low-memory systems, make sure hidirtybuffers
1268 	 * cannot eat up all available buffer space.  This occurs when our
1269 	 * minimum cannot be met.  We try to size hidirtybuffers to 3/4 our
1270 	 * buffer space assuming BKVASIZE'd buffers.
1271 	 */
1272 	while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
1273 		hidirtybuffers >>= 1;
1274 	}
1275 	lodirtybuffers = hidirtybuffers / 2;
1276 
1277 	/*
1278 	 * lofreebuffers should be sufficient to avoid stalling waiting on
1279 	 * buf headers under heavy utilization.  The bufs in per-cpu caches
1280 	 * are counted as free but will be unavailable to threads executing
1281 	 * on other cpus.
1282 	 *
1283 	 * hifreebuffers is the free target for the bufspace daemon.  This
1284 	 * should be set appropriately to limit work per-iteration.
1285 	 */
1286 	lofreebuffers = MIN((nbuf / 25) + (20 * mp_ncpus), 128 * mp_ncpus);
1287 	hifreebuffers = (3 * lofreebuffers) / 2;
1288 	numfreebuffers = nbuf;
1289 
1290 	/* Setup the kva and free list allocators. */
1291 	vmem_set_reclaim(buffer_arena, bufkva_reclaim);
1292 	buf_zone = uma_zcache_create("buf free cache",
1293 	    sizeof(struct buf) + sizeof(vm_page_t) * atop(maxbcachebuf),
1294 	    NULL, NULL, NULL, NULL, buf_import, buf_release, NULL, 0);
1295 
1296 	/*
1297 	 * Size the clean queue according to the amount of buffer space.
1298 	 * One queue per-256mb up to the max.  More queues gives better
1299 	 * concurrency but less accurate LRU.
1300 	 */
1301 	buf_domains = MIN(howmany(maxbufspace, 256*1024*1024), BUF_DOMAINS);
1302 	for (i = 0 ; i < buf_domains; i++) {
1303 		struct bufdomain *bd;
1304 
1305 		bd = &bdomain[i];
1306 		bd_init(bd);
1307 		bd->bd_freebuffers = nbuf / buf_domains;
1308 		bd->bd_hifreebuffers = hifreebuffers / buf_domains;
1309 		bd->bd_lofreebuffers = lofreebuffers / buf_domains;
1310 		bd->bd_bufspace = 0;
1311 		bd->bd_maxbufspace = maxbufspace / buf_domains;
1312 		bd->bd_hibufspace = hibufspace / buf_domains;
1313 		bd->bd_lobufspace = lobufspace / buf_domains;
1314 		bd->bd_bufspacethresh = bufspacethresh / buf_domains;
1315 		bd->bd_numdirtybuffers = 0;
1316 		bd->bd_hidirtybuffers = hidirtybuffers / buf_domains;
1317 		bd->bd_lodirtybuffers = lodirtybuffers / buf_domains;
1318 		bd->bd_dirtybufthresh = dirtybufthresh / buf_domains;
1319 		/* Don't allow more than 2% of bufs in the per-cpu caches. */
1320 		bd->bd_lim = nbuf / buf_domains / 50 / mp_ncpus;
1321 	}
1322 	getnewbufcalls = counter_u64_alloc(M_WAITOK);
1323 	getnewbufrestarts = counter_u64_alloc(M_WAITOK);
1324 	mappingrestarts = counter_u64_alloc(M_WAITOK);
1325 	numbufallocfails = counter_u64_alloc(M_WAITOK);
1326 	notbufdflushes = counter_u64_alloc(M_WAITOK);
1327 	buffreekvacnt = counter_u64_alloc(M_WAITOK);
1328 	bufdefragcnt = counter_u64_alloc(M_WAITOK);
1329 	bufkvaspace = counter_u64_alloc(M_WAITOK);
1330 }
1331 
1332 #ifdef INVARIANTS
1333 static inline void
1334 vfs_buf_check_mapped(struct buf *bp)
1335 {
1336 
1337 	KASSERT(bp->b_kvabase != unmapped_buf,
1338 	    ("mapped buf: b_kvabase was not updated %p", bp));
1339 	KASSERT(bp->b_data != unmapped_buf,
1340 	    ("mapped buf: b_data was not updated %p", bp));
1341 	KASSERT(bp->b_data < unmapped_buf || bp->b_data >= unmapped_buf +
1342 	    maxphys, ("b_data + b_offset unmapped %p", bp));
1343 }
1344 
1345 static inline void
1346 vfs_buf_check_unmapped(struct buf *bp)
1347 {
1348 
1349 	KASSERT(bp->b_data == unmapped_buf,
1350 	    ("unmapped buf: corrupted b_data %p", bp));
1351 }
1352 
1353 #define	BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp)
1354 #define	BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp)
1355 #else
1356 #define	BUF_CHECK_MAPPED(bp) do {} while (0)
1357 #define	BUF_CHECK_UNMAPPED(bp) do {} while (0)
1358 #endif
1359 
1360 static int
1361 isbufbusy(struct buf *bp)
1362 {
1363 	if (((bp->b_flags & B_INVAL) == 0 && BUF_ISLOCKED(bp)) ||
1364 	    ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI))
1365 		return (1);
1366 	return (0);
1367 }
1368 
1369 /*
1370  * Shutdown the system cleanly to prepare for reboot, halt, or power off.
1371  */
1372 void
1373 bufshutdown(int show_busybufs)
1374 {
1375 	static int first_buf_printf = 1;
1376 	struct buf *bp;
1377 	int i, iter, nbusy, pbusy;
1378 #ifndef PREEMPTION
1379 	int subiter;
1380 #endif
1381 
1382 	/*
1383 	 * Sync filesystems for shutdown
1384 	 */
1385 	wdog_kern_pat(WD_LASTVAL);
1386 	kern_sync(curthread);
1387 
1388 	/*
1389 	 * With soft updates, some buffers that are
1390 	 * written will be remarked as dirty until other
1391 	 * buffers are written.
1392 	 */
1393 	for (iter = pbusy = 0; iter < 20; iter++) {
1394 		nbusy = 0;
1395 		for (i = nbuf - 1; i >= 0; i--) {
1396 			bp = nbufp(i);
1397 			if (isbufbusy(bp))
1398 				nbusy++;
1399 		}
1400 		if (nbusy == 0) {
1401 			if (first_buf_printf)
1402 				printf("All buffers synced.");
1403 			break;
1404 		}
1405 		if (first_buf_printf) {
1406 			printf("Syncing disks, buffers remaining... ");
1407 			first_buf_printf = 0;
1408 		}
1409 		printf("%d ", nbusy);
1410 		if (nbusy < pbusy)
1411 			iter = 0;
1412 		pbusy = nbusy;
1413 
1414 		wdog_kern_pat(WD_LASTVAL);
1415 		kern_sync(curthread);
1416 
1417 #ifdef PREEMPTION
1418 		/*
1419 		 * Spin for a while to allow interrupt threads to run.
1420 		 */
1421 		DELAY(50000 * iter);
1422 #else
1423 		/*
1424 		 * Context switch several times to allow interrupt
1425 		 * threads to run.
1426 		 */
1427 		for (subiter = 0; subiter < 50 * iter; subiter++) {
1428 			thread_lock(curthread);
1429 			mi_switch(SW_VOL);
1430 			DELAY(1000);
1431 		}
1432 #endif
1433 	}
1434 	printf("\n");
1435 	/*
1436 	 * Count only busy local buffers to prevent forcing
1437 	 * a fsck if we're just a client of a wedged NFS server
1438 	 */
1439 	nbusy = 0;
1440 	for (i = nbuf - 1; i >= 0; i--) {
1441 		bp = nbufp(i);
1442 		if (isbufbusy(bp)) {
1443 #if 0
1444 /* XXX: This is bogus.  We should probably have a BO_REMOTE flag instead */
1445 			if (bp->b_dev == NULL) {
1446 				TAILQ_REMOVE(&mountlist,
1447 				    bp->b_vp->v_mount, mnt_list);
1448 				continue;
1449 			}
1450 #endif
1451 			nbusy++;
1452 			if (show_busybufs > 0) {
1453 				printf(
1454 	    "%d: buf:%p, vnode:%p, flags:%0x, blkno:%jd, lblkno:%jd, buflock:",
1455 				    nbusy, bp, bp->b_vp, bp->b_flags,
1456 				    (intmax_t)bp->b_blkno,
1457 				    (intmax_t)bp->b_lblkno);
1458 				BUF_LOCKPRINTINFO(bp);
1459 				if (show_busybufs > 1)
1460 					vn_printf(bp->b_vp,
1461 					    "vnode content: ");
1462 			}
1463 		}
1464 	}
1465 	if (nbusy) {
1466 		/*
1467 		 * Failed to sync all blocks. Indicate this and don't
1468 		 * unmount filesystems (thus forcing an fsck on reboot).
1469 		 */
1470 		BOOTTRACE("shutdown failed to sync buffers");
1471 		printf("Giving up on %d buffers\n", nbusy);
1472 		DELAY(5000000);	/* 5 seconds */
1473 		swapoff_all();
1474 	} else {
1475 		BOOTTRACE("shutdown sync complete");
1476 		if (!first_buf_printf)
1477 			printf("Final sync complete\n");
1478 
1479 		/*
1480 		 * Unmount filesystems and perform swapoff, to quiesce
1481 		 * the system as much as possible.  In particular, no
1482 		 * I/O should be initiated from top levels since it
1483 		 * might be abruptly terminated by reset, or otherwise
1484 		 * erronously handled because other parts of the
1485 		 * system are disabled.
1486 		 *
1487 		 * Swapoff before unmount, because file-backed swap is
1488 		 * non-operational after unmount of the underlying
1489 		 * filesystem.
1490 		 */
1491 		if (!KERNEL_PANICKED()) {
1492 			swapoff_all();
1493 			vfs_unmountall();
1494 		}
1495 		BOOTTRACE("shutdown unmounted all filesystems");
1496 	}
1497 	DELAY(100000);		/* wait for console output to finish */
1498 }
1499 
1500 static void
1501 bpmap_qenter(struct buf *bp)
1502 {
1503 
1504 	BUF_CHECK_MAPPED(bp);
1505 
1506 	/*
1507 	 * bp->b_data is relative to bp->b_offset, but
1508 	 * bp->b_offset may be offset into the first page.
1509 	 */
1510 	bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
1511 	pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
1512 	bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
1513 	    (vm_offset_t)(bp->b_offset & PAGE_MASK));
1514 }
1515 
1516 static inline struct bufdomain *
1517 bufdomain(struct buf *bp)
1518 {
1519 
1520 	return (&bdomain[bp->b_domain]);
1521 }
1522 
1523 static struct bufqueue *
1524 bufqueue(struct buf *bp)
1525 {
1526 
1527 	switch (bp->b_qindex) {
1528 	case QUEUE_NONE:
1529 		/* FALLTHROUGH */
1530 	case QUEUE_SENTINEL:
1531 		return (NULL);
1532 	case QUEUE_EMPTY:
1533 		return (&bqempty);
1534 	case QUEUE_DIRTY:
1535 		return (&bufdomain(bp)->bd_dirtyq);
1536 	case QUEUE_CLEAN:
1537 		return (&bufdomain(bp)->bd_subq[bp->b_subqueue]);
1538 	default:
1539 		break;
1540 	}
1541 	panic("bufqueue(%p): Unhandled type %d\n", bp, bp->b_qindex);
1542 }
1543 
1544 /*
1545  * Return the locked bufqueue that bp is a member of.
1546  */
1547 static struct bufqueue *
1548 bufqueue_acquire(struct buf *bp)
1549 {
1550 	struct bufqueue *bq, *nbq;
1551 
1552 	/*
1553 	 * bp can be pushed from a per-cpu queue to the
1554 	 * cleanq while we're waiting on the lock.  Retry
1555 	 * if the queues don't match.
1556 	 */
1557 	bq = bufqueue(bp);
1558 	BQ_LOCK(bq);
1559 	for (;;) {
1560 		nbq = bufqueue(bp);
1561 		if (bq == nbq)
1562 			break;
1563 		BQ_UNLOCK(bq);
1564 		BQ_LOCK(nbq);
1565 		bq = nbq;
1566 	}
1567 	return (bq);
1568 }
1569 
1570 /*
1571  *	binsfree:
1572  *
1573  *	Insert the buffer into the appropriate free list.  Requires a
1574  *	locked buffer on entry and buffer is unlocked before return.
1575  */
1576 static void
1577 binsfree(struct buf *bp, int qindex)
1578 {
1579 	struct bufdomain *bd;
1580 	struct bufqueue *bq;
1581 
1582 	KASSERT(qindex == QUEUE_CLEAN || qindex == QUEUE_DIRTY,
1583 	    ("binsfree: Invalid qindex %d", qindex));
1584 	BUF_ASSERT_XLOCKED(bp);
1585 
1586 	/*
1587 	 * Handle delayed bremfree() processing.
1588 	 */
1589 	if (bp->b_flags & B_REMFREE) {
1590 		if (bp->b_qindex == qindex) {
1591 			bp->b_flags |= B_REUSE;
1592 			bp->b_flags &= ~B_REMFREE;
1593 			BUF_UNLOCK(bp);
1594 			return;
1595 		}
1596 		bq = bufqueue_acquire(bp);
1597 		bq_remove(bq, bp);
1598 		BQ_UNLOCK(bq);
1599 	}
1600 	bd = bufdomain(bp);
1601 	if (qindex == QUEUE_CLEAN) {
1602 		if (bd->bd_lim != 0)
1603 			bq = &bd->bd_subq[PCPU_GET(cpuid)];
1604 		else
1605 			bq = bd->bd_cleanq;
1606 	} else
1607 		bq = &bd->bd_dirtyq;
1608 	bq_insert(bq, bp, true);
1609 }
1610 
1611 /*
1612  * buf_free:
1613  *
1614  *	Free a buffer to the buf zone once it no longer has valid contents.
1615  */
1616 static void
1617 buf_free(struct buf *bp)
1618 {
1619 
1620 	if (bp->b_flags & B_REMFREE)
1621 		bremfreef(bp);
1622 	if (bp->b_vflags & BV_BKGRDINPROG)
1623 		panic("losing buffer 1");
1624 	if (bp->b_rcred != NOCRED) {
1625 		crfree(bp->b_rcred);
1626 		bp->b_rcred = NOCRED;
1627 	}
1628 	if (bp->b_wcred != NOCRED) {
1629 		crfree(bp->b_wcred);
1630 		bp->b_wcred = NOCRED;
1631 	}
1632 	if (!LIST_EMPTY(&bp->b_dep))
1633 		buf_deallocate(bp);
1634 	bufkva_free(bp);
1635 	atomic_add_int(&bufdomain(bp)->bd_freebuffers, 1);
1636 	MPASS((bp->b_flags & B_MAXPHYS) == 0);
1637 	BUF_UNLOCK(bp);
1638 	uma_zfree(buf_zone, bp);
1639 }
1640 
1641 /*
1642  * buf_import:
1643  *
1644  *	Import bufs into the uma cache from the buf list.  The system still
1645  *	expects a static array of bufs and much of the synchronization
1646  *	around bufs assumes type stable storage.  As a result, UMA is used
1647  *	only as a per-cpu cache of bufs still maintained on a global list.
1648  */
1649 static int
1650 buf_import(void *arg, void **store, int cnt, int domain, int flags)
1651 {
1652 	struct buf *bp;
1653 	int i;
1654 
1655 	BQ_LOCK(&bqempty);
1656 	for (i = 0; i < cnt; i++) {
1657 		bp = TAILQ_FIRST(&bqempty.bq_queue);
1658 		if (bp == NULL)
1659 			break;
1660 		bq_remove(&bqempty, bp);
1661 		store[i] = bp;
1662 	}
1663 	BQ_UNLOCK(&bqempty);
1664 
1665 	return (i);
1666 }
1667 
1668 /*
1669  * buf_release:
1670  *
1671  *	Release bufs from the uma cache back to the buffer queues.
1672  */
1673 static void
1674 buf_release(void *arg, void **store, int cnt)
1675 {
1676 	struct bufqueue *bq;
1677 	struct buf *bp;
1678         int i;
1679 
1680 	bq = &bqempty;
1681 	BQ_LOCK(bq);
1682         for (i = 0; i < cnt; i++) {
1683 		bp = store[i];
1684 		/* Inline bq_insert() to batch locking. */
1685 		TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
1686 		bp->b_flags &= ~(B_AGE | B_REUSE);
1687 		bq->bq_len++;
1688 		bp->b_qindex = bq->bq_index;
1689 	}
1690 	BQ_UNLOCK(bq);
1691 }
1692 
1693 /*
1694  * buf_alloc:
1695  *
1696  *	Allocate an empty buffer header.
1697  */
1698 static struct buf *
1699 buf_alloc(struct bufdomain *bd)
1700 {
1701 	struct buf *bp;
1702 	int freebufs, error;
1703 
1704 	/*
1705 	 * We can only run out of bufs in the buf zone if the average buf
1706 	 * is less than BKVASIZE.  In this case the actual wait/block will
1707 	 * come from buf_reycle() failing to flush one of these small bufs.
1708 	 */
1709 	bp = NULL;
1710 	freebufs = atomic_fetchadd_int(&bd->bd_freebuffers, -1);
1711 	if (freebufs > 0)
1712 		bp = uma_zalloc(buf_zone, M_NOWAIT);
1713 	if (bp == NULL) {
1714 		atomic_add_int(&bd->bd_freebuffers, 1);
1715 		bufspace_daemon_wakeup(bd);
1716 		counter_u64_add(numbufallocfails, 1);
1717 		return (NULL);
1718 	}
1719 	/*
1720 	 * Wake-up the bufspace daemon on transition below threshold.
1721 	 */
1722 	if (freebufs == bd->bd_lofreebuffers)
1723 		bufspace_daemon_wakeup(bd);
1724 
1725 	error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
1726 	KASSERT(error == 0, ("%s: BUF_LOCK on free buf %p: %d.", __func__, bp,
1727 	    error));
1728 	(void)error;
1729 
1730 	KASSERT(bp->b_vp == NULL,
1731 	    ("bp: %p still has vnode %p.", bp, bp->b_vp));
1732 	KASSERT((bp->b_flags & (B_DELWRI | B_NOREUSE)) == 0,
1733 	    ("invalid buffer %p flags %#x", bp, bp->b_flags));
1734 	KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
1735 	    ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags));
1736 	KASSERT(bp->b_npages == 0,
1737 	    ("bp: %p still has %d vm pages\n", bp, bp->b_npages));
1738 	KASSERT(bp->b_kvasize == 0, ("bp: %p still has kva\n", bp));
1739 	KASSERT(bp->b_bufsize == 0, ("bp: %p still has bufspace\n", bp));
1740 	MPASS((bp->b_flags & B_MAXPHYS) == 0);
1741 
1742 	bp->b_domain = BD_DOMAIN(bd);
1743 	bp->b_flags = 0;
1744 	bp->b_ioflags = 0;
1745 	bp->b_xflags = 0;
1746 	bp->b_vflags = 0;
1747 	bp->b_vp = NULL;
1748 	bp->b_blkno = bp->b_lblkno = 0;
1749 	bp->b_offset = NOOFFSET;
1750 	bp->b_iodone = 0;
1751 	bp->b_error = 0;
1752 	bp->b_resid = 0;
1753 	bp->b_bcount = 0;
1754 	bp->b_npages = 0;
1755 	bp->b_dirtyoff = bp->b_dirtyend = 0;
1756 	bp->b_bufobj = NULL;
1757 	bp->b_data = bp->b_kvabase = unmapped_buf;
1758 	bp->b_fsprivate1 = NULL;
1759 	bp->b_fsprivate2 = NULL;
1760 	bp->b_fsprivate3 = NULL;
1761 	LIST_INIT(&bp->b_dep);
1762 
1763 	return (bp);
1764 }
1765 
1766 /*
1767  *	buf_recycle:
1768  *
1769  *	Free a buffer from the given bufqueue.  kva controls whether the
1770  *	freed buf must own some kva resources.  This is used for
1771  *	defragmenting.
1772  */
1773 static int
1774 buf_recycle(struct bufdomain *bd, bool kva)
1775 {
1776 	struct bufqueue *bq;
1777 	struct buf *bp, *nbp;
1778 
1779 	if (kva)
1780 		counter_u64_add(bufdefragcnt, 1);
1781 	nbp = NULL;
1782 	bq = bd->bd_cleanq;
1783 	BQ_LOCK(bq);
1784 	KASSERT(BQ_LOCKPTR(bq) == BD_LOCKPTR(bd),
1785 	    ("buf_recycle: Locks don't match"));
1786 	nbp = TAILQ_FIRST(&bq->bq_queue);
1787 
1788 	/*
1789 	 * Run scan, possibly freeing data and/or kva mappings on the fly
1790 	 * depending.
1791 	 */
1792 	while ((bp = nbp) != NULL) {
1793 		/*
1794 		 * Calculate next bp (we can only use it if we do not
1795 		 * release the bqlock).
1796 		 */
1797 		nbp = TAILQ_NEXT(bp, b_freelist);
1798 
1799 		/*
1800 		 * If we are defragging then we need a buffer with
1801 		 * some kva to reclaim.
1802 		 */
1803 		if (kva && bp->b_kvasize == 0)
1804 			continue;
1805 
1806 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1807 			continue;
1808 
1809 		/*
1810 		 * Implement a second chance algorithm for frequently
1811 		 * accessed buffers.
1812 		 */
1813 		if ((bp->b_flags & B_REUSE) != 0) {
1814 			TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
1815 			TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
1816 			bp->b_flags &= ~B_REUSE;
1817 			BUF_UNLOCK(bp);
1818 			continue;
1819 		}
1820 
1821 		/*
1822 		 * Skip buffers with background writes in progress.
1823 		 */
1824 		if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
1825 			BUF_UNLOCK(bp);
1826 			continue;
1827 		}
1828 
1829 		KASSERT(bp->b_qindex == QUEUE_CLEAN,
1830 		    ("buf_recycle: inconsistent queue %d bp %p",
1831 		    bp->b_qindex, bp));
1832 		KASSERT(bp->b_domain == BD_DOMAIN(bd),
1833 		    ("getnewbuf: queue domain %d doesn't match request %d",
1834 		    bp->b_domain, (int)BD_DOMAIN(bd)));
1835 		/*
1836 		 * NOTE:  nbp is now entirely invalid.  We can only restart
1837 		 * the scan from this point on.
1838 		 */
1839 		bq_remove(bq, bp);
1840 		BQ_UNLOCK(bq);
1841 
1842 		/*
1843 		 * Requeue the background write buffer with error and
1844 		 * restart the scan.
1845 		 */
1846 		if ((bp->b_vflags & BV_BKGRDERR) != 0) {
1847 			bqrelse(bp);
1848 			BQ_LOCK(bq);
1849 			nbp = TAILQ_FIRST(&bq->bq_queue);
1850 			continue;
1851 		}
1852 		bp->b_flags |= B_INVAL;
1853 		brelse(bp);
1854 		return (0);
1855 	}
1856 	bd->bd_wanted = 1;
1857 	BQ_UNLOCK(bq);
1858 
1859 	return (ENOBUFS);
1860 }
1861 
1862 /*
1863  *	bremfree:
1864  *
1865  *	Mark the buffer for removal from the appropriate free list.
1866  *
1867  */
1868 void
1869 bremfree(struct buf *bp)
1870 {
1871 
1872 	CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1873 	KASSERT((bp->b_flags & B_REMFREE) == 0,
1874 	    ("bremfree: buffer %p already marked for delayed removal.", bp));
1875 	KASSERT(bp->b_qindex != QUEUE_NONE,
1876 	    ("bremfree: buffer %p not on a queue.", bp));
1877 	BUF_ASSERT_XLOCKED(bp);
1878 
1879 	bp->b_flags |= B_REMFREE;
1880 }
1881 
1882 /*
1883  *	bremfreef:
1884  *
1885  *	Force an immediate removal from a free list.  Used only in nfs when
1886  *	it abuses the b_freelist pointer.
1887  */
1888 void
1889 bremfreef(struct buf *bp)
1890 {
1891 	struct bufqueue *bq;
1892 
1893 	bq = bufqueue_acquire(bp);
1894 	bq_remove(bq, bp);
1895 	BQ_UNLOCK(bq);
1896 }
1897 
1898 static void
1899 bq_init(struct bufqueue *bq, int qindex, int subqueue, const char *lockname)
1900 {
1901 
1902 	mtx_init(&bq->bq_lock, lockname, NULL, MTX_DEF);
1903 	TAILQ_INIT(&bq->bq_queue);
1904 	bq->bq_len = 0;
1905 	bq->bq_index = qindex;
1906 	bq->bq_subqueue = subqueue;
1907 }
1908 
1909 static void
1910 bd_init(struct bufdomain *bd)
1911 {
1912 	int i;
1913 
1914 	bd->bd_cleanq = &bd->bd_subq[mp_maxid + 1];
1915 	bq_init(bd->bd_cleanq, QUEUE_CLEAN, mp_maxid + 1, "bufq clean lock");
1916 	bq_init(&bd->bd_dirtyq, QUEUE_DIRTY, -1, "bufq dirty lock");
1917 	for (i = 0; i <= mp_maxid; i++)
1918 		bq_init(&bd->bd_subq[i], QUEUE_CLEAN, i,
1919 		    "bufq clean subqueue lock");
1920 	mtx_init(&bd->bd_run_lock, "bufspace daemon run lock", NULL, MTX_DEF);
1921 }
1922 
1923 /*
1924  *	bq_remove:
1925  *
1926  *	Removes a buffer from the free list, must be called with the
1927  *	correct qlock held.
1928  */
1929 static void
1930 bq_remove(struct bufqueue *bq, struct buf *bp)
1931 {
1932 
1933 	CTR3(KTR_BUF, "bq_remove(%p) vp %p flags %X",
1934 	    bp, bp->b_vp, bp->b_flags);
1935 	KASSERT(bp->b_qindex != QUEUE_NONE,
1936 	    ("bq_remove: buffer %p not on a queue.", bp));
1937 	KASSERT(bufqueue(bp) == bq,
1938 	    ("bq_remove: Remove buffer %p from wrong queue.", bp));
1939 
1940 	BQ_ASSERT_LOCKED(bq);
1941 	if (bp->b_qindex != QUEUE_EMPTY) {
1942 		BUF_ASSERT_XLOCKED(bp);
1943 	}
1944 	KASSERT(bq->bq_len >= 1,
1945 	    ("queue %d underflow", bp->b_qindex));
1946 	TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
1947 	bq->bq_len--;
1948 	bp->b_qindex = QUEUE_NONE;
1949 	bp->b_flags &= ~(B_REMFREE | B_REUSE);
1950 }
1951 
1952 static void
1953 bd_flush(struct bufdomain *bd, struct bufqueue *bq)
1954 {
1955 	struct buf *bp;
1956 
1957 	BQ_ASSERT_LOCKED(bq);
1958 	if (bq != bd->bd_cleanq) {
1959 		BD_LOCK(bd);
1960 		while ((bp = TAILQ_FIRST(&bq->bq_queue)) != NULL) {
1961 			TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
1962 			TAILQ_INSERT_TAIL(&bd->bd_cleanq->bq_queue, bp,
1963 			    b_freelist);
1964 			bp->b_subqueue = bd->bd_cleanq->bq_subqueue;
1965 		}
1966 		bd->bd_cleanq->bq_len += bq->bq_len;
1967 		bq->bq_len = 0;
1968 	}
1969 	if (bd->bd_wanted) {
1970 		bd->bd_wanted = 0;
1971 		wakeup(&bd->bd_wanted);
1972 	}
1973 	if (bq != bd->bd_cleanq)
1974 		BD_UNLOCK(bd);
1975 }
1976 
1977 static int
1978 bd_flushall(struct bufdomain *bd)
1979 {
1980 	struct bufqueue *bq;
1981 	int flushed;
1982 	int i;
1983 
1984 	if (bd->bd_lim == 0)
1985 		return (0);
1986 	flushed = 0;
1987 	for (i = 0; i <= mp_maxid; i++) {
1988 		bq = &bd->bd_subq[i];
1989 		if (bq->bq_len == 0)
1990 			continue;
1991 		BQ_LOCK(bq);
1992 		bd_flush(bd, bq);
1993 		BQ_UNLOCK(bq);
1994 		flushed++;
1995 	}
1996 
1997 	return (flushed);
1998 }
1999 
2000 static void
2001 bq_insert(struct bufqueue *bq, struct buf *bp, bool unlock)
2002 {
2003 	struct bufdomain *bd;
2004 
2005 	if (bp->b_qindex != QUEUE_NONE)
2006 		panic("bq_insert: free buffer %p onto another queue?", bp);
2007 
2008 	bd = bufdomain(bp);
2009 	if (bp->b_flags & B_AGE) {
2010 		/* Place this buf directly on the real queue. */
2011 		if (bq->bq_index == QUEUE_CLEAN)
2012 			bq = bd->bd_cleanq;
2013 		BQ_LOCK(bq);
2014 		TAILQ_INSERT_HEAD(&bq->bq_queue, bp, b_freelist);
2015 	} else {
2016 		BQ_LOCK(bq);
2017 		TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
2018 	}
2019 	bp->b_flags &= ~(B_AGE | B_REUSE);
2020 	bq->bq_len++;
2021 	bp->b_qindex = bq->bq_index;
2022 	bp->b_subqueue = bq->bq_subqueue;
2023 
2024 	/*
2025 	 * Unlock before we notify so that we don't wakeup a waiter that
2026 	 * fails a trylock on the buf and sleeps again.
2027 	 */
2028 	if (unlock)
2029 		BUF_UNLOCK(bp);
2030 
2031 	if (bp->b_qindex == QUEUE_CLEAN) {
2032 		/*
2033 		 * Flush the per-cpu queue and notify any waiters.
2034 		 */
2035 		if (bd->bd_wanted || (bq != bd->bd_cleanq &&
2036 		    bq->bq_len >= bd->bd_lim))
2037 			bd_flush(bd, bq);
2038 	}
2039 	BQ_UNLOCK(bq);
2040 }
2041 
2042 /*
2043  *	bufkva_free:
2044  *
2045  *	Free the kva allocation for a buffer.
2046  *
2047  */
2048 static void
2049 bufkva_free(struct buf *bp)
2050 {
2051 
2052 #ifdef INVARIANTS
2053 	if (bp->b_kvasize == 0) {
2054 		KASSERT(bp->b_kvabase == unmapped_buf &&
2055 		    bp->b_data == unmapped_buf,
2056 		    ("Leaked KVA space on %p", bp));
2057 	} else if (buf_mapped(bp))
2058 		BUF_CHECK_MAPPED(bp);
2059 	else
2060 		BUF_CHECK_UNMAPPED(bp);
2061 #endif
2062 	if (bp->b_kvasize == 0)
2063 		return;
2064 
2065 	vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase, bp->b_kvasize);
2066 	counter_u64_add(bufkvaspace, -bp->b_kvasize);
2067 	counter_u64_add(buffreekvacnt, 1);
2068 	bp->b_data = bp->b_kvabase = unmapped_buf;
2069 	bp->b_kvasize = 0;
2070 }
2071 
2072 /*
2073  *	bufkva_alloc:
2074  *
2075  *	Allocate the buffer KVA and set b_kvasize and b_kvabase.
2076  */
2077 static int
2078 bufkva_alloc(struct buf *bp, int maxsize, int gbflags)
2079 {
2080 	vm_offset_t addr;
2081 	int error;
2082 
2083 	KASSERT((gbflags & GB_UNMAPPED) == 0 || (gbflags & GB_KVAALLOC) != 0,
2084 	    ("Invalid gbflags 0x%x in %s", gbflags, __func__));
2085 	MPASS((bp->b_flags & B_MAXPHYS) == 0);
2086 	KASSERT(maxsize <= maxbcachebuf,
2087 	    ("bufkva_alloc kva too large %d %u", maxsize, maxbcachebuf));
2088 
2089 	bufkva_free(bp);
2090 
2091 	addr = 0;
2092 	error = vmem_alloc(buffer_arena, maxsize, M_BESTFIT | M_NOWAIT, &addr);
2093 	if (error != 0) {
2094 		/*
2095 		 * Buffer map is too fragmented.  Request the caller
2096 		 * to defragment the map.
2097 		 */
2098 		return (error);
2099 	}
2100 	bp->b_kvabase = (caddr_t)addr;
2101 	bp->b_kvasize = maxsize;
2102 	counter_u64_add(bufkvaspace, bp->b_kvasize);
2103 	if ((gbflags & GB_UNMAPPED) != 0) {
2104 		bp->b_data = unmapped_buf;
2105 		BUF_CHECK_UNMAPPED(bp);
2106 	} else {
2107 		bp->b_data = bp->b_kvabase;
2108 		BUF_CHECK_MAPPED(bp);
2109 	}
2110 	return (0);
2111 }
2112 
2113 /*
2114  *	bufkva_reclaim:
2115  *
2116  *	Reclaim buffer kva by freeing buffers holding kva.  This is a vmem
2117  *	callback that fires to avoid returning failure.
2118  */
2119 static void
2120 bufkva_reclaim(vmem_t *vmem, int flags)
2121 {
2122 	bool done;
2123 	int q;
2124 	int i;
2125 
2126 	done = false;
2127 	for (i = 0; i < 5; i++) {
2128 		for (q = 0; q < buf_domains; q++)
2129 			if (buf_recycle(&bdomain[q], true) != 0)
2130 				done = true;
2131 		if (done)
2132 			break;
2133 	}
2134 	return;
2135 }
2136 
2137 /*
2138  * Attempt to initiate asynchronous I/O on read-ahead blocks.  We must
2139  * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
2140  * the buffer is valid and we do not have to do anything.
2141  */
2142 static void
2143 breada(struct vnode * vp, daddr_t * rablkno, int * rabsize, int cnt,
2144     struct ucred * cred, int flags, void (*ckhashfunc)(struct buf *))
2145 {
2146 	struct buf *rabp;
2147 	struct thread *td;
2148 	int i;
2149 
2150 	td = curthread;
2151 
2152 	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
2153 		if (inmem(vp, *rablkno))
2154 			continue;
2155 		rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
2156 		if ((rabp->b_flags & B_CACHE) != 0) {
2157 			brelse(rabp);
2158 			continue;
2159 		}
2160 #ifdef RACCT
2161 		if (racct_enable) {
2162 			PROC_LOCK(curproc);
2163 			racct_add_buf(curproc, rabp, 0);
2164 			PROC_UNLOCK(curproc);
2165 		}
2166 #endif /* RACCT */
2167 		td->td_ru.ru_inblock++;
2168 		rabp->b_flags |= B_ASYNC;
2169 		rabp->b_flags &= ~B_INVAL;
2170 		if ((flags & GB_CKHASH) != 0) {
2171 			rabp->b_flags |= B_CKHASH;
2172 			rabp->b_ckhashcalc = ckhashfunc;
2173 		}
2174 		rabp->b_ioflags &= ~BIO_ERROR;
2175 		rabp->b_iocmd = BIO_READ;
2176 		if (rabp->b_rcred == NOCRED && cred != NOCRED)
2177 			rabp->b_rcred = crhold(cred);
2178 		vfs_busy_pages(rabp, 0);
2179 		BUF_KERNPROC(rabp);
2180 		rabp->b_iooffset = dbtob(rabp->b_blkno);
2181 		bstrategy(rabp);
2182 	}
2183 }
2184 
2185 /*
2186  * Entry point for bread() and breadn() via #defines in sys/buf.h.
2187  *
2188  * Get a buffer with the specified data.  Look in the cache first.  We
2189  * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
2190  * is set, the buffer is valid and we do not have to do anything, see
2191  * getblk(). Also starts asynchronous I/O on read-ahead blocks.
2192  *
2193  * Always return a NULL buffer pointer (in bpp) when returning an error.
2194  *
2195  * The blkno parameter is the logical block being requested. Normally
2196  * the mapping of logical block number to disk block address is done
2197  * by calling VOP_BMAP(). However, if the mapping is already known, the
2198  * disk block address can be passed using the dblkno parameter. If the
2199  * disk block address is not known, then the same value should be passed
2200  * for blkno and dblkno.
2201  */
2202 int
2203 breadn_flags(struct vnode *vp, daddr_t blkno, daddr_t dblkno, int size,
2204     daddr_t *rablkno, int *rabsize, int cnt, struct ucred *cred, int flags,
2205     void (*ckhashfunc)(struct buf *), struct buf **bpp)
2206 {
2207 	struct buf *bp;
2208 	struct thread *td;
2209 	int error, readwait, rv;
2210 
2211 	CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
2212 	td = curthread;
2213 	/*
2214 	 * Can only return NULL if GB_LOCK_NOWAIT or GB_SPARSE flags
2215 	 * are specified.
2216 	 */
2217 	error = getblkx(vp, blkno, dblkno, size, 0, 0, flags, &bp);
2218 	if (error != 0) {
2219 		*bpp = NULL;
2220 		return (error);
2221 	}
2222 	KASSERT(blkno == bp->b_lblkno,
2223 	    ("getblkx returned buffer for blkno %jd instead of blkno %jd",
2224 	    (intmax_t)bp->b_lblkno, (intmax_t)blkno));
2225 	flags &= ~GB_NOSPARSE;
2226 	*bpp = bp;
2227 
2228 	/*
2229 	 * If not found in cache, do some I/O
2230 	 */
2231 	readwait = 0;
2232 	if ((bp->b_flags & B_CACHE) == 0) {
2233 #ifdef RACCT
2234 		if (racct_enable) {
2235 			PROC_LOCK(td->td_proc);
2236 			racct_add_buf(td->td_proc, bp, 0);
2237 			PROC_UNLOCK(td->td_proc);
2238 		}
2239 #endif /* RACCT */
2240 		td->td_ru.ru_inblock++;
2241 		bp->b_iocmd = BIO_READ;
2242 		bp->b_flags &= ~B_INVAL;
2243 		if ((flags & GB_CKHASH) != 0) {
2244 			bp->b_flags |= B_CKHASH;
2245 			bp->b_ckhashcalc = ckhashfunc;
2246 		}
2247 		if ((flags & GB_CVTENXIO) != 0)
2248 			bp->b_xflags |= BX_CVTENXIO;
2249 		bp->b_ioflags &= ~BIO_ERROR;
2250 		if (bp->b_rcred == NOCRED && cred != NOCRED)
2251 			bp->b_rcred = crhold(cred);
2252 		vfs_busy_pages(bp, 0);
2253 		bp->b_iooffset = dbtob(bp->b_blkno);
2254 		bstrategy(bp);
2255 		++readwait;
2256 	}
2257 
2258 	/*
2259 	 * Attempt to initiate asynchronous I/O on read-ahead blocks.
2260 	 */
2261 	breada(vp, rablkno, rabsize, cnt, cred, flags, ckhashfunc);
2262 
2263 	rv = 0;
2264 	if (readwait) {
2265 		rv = bufwait(bp);
2266 		if (rv != 0) {
2267 			brelse(bp);
2268 			*bpp = NULL;
2269 		}
2270 	}
2271 	return (rv);
2272 }
2273 
2274 /*
2275  * Write, release buffer on completion.  (Done by iodone
2276  * if async).  Do not bother writing anything if the buffer
2277  * is invalid.
2278  *
2279  * Note that we set B_CACHE here, indicating that buffer is
2280  * fully valid and thus cacheable.  This is true even of NFS
2281  * now so we set it generally.  This could be set either here
2282  * or in biodone() since the I/O is synchronous.  We put it
2283  * here.
2284  */
2285 int
2286 bufwrite(struct buf *bp)
2287 {
2288 	int oldflags;
2289 	struct vnode *vp;
2290 	long space;
2291 	int vp_md;
2292 
2293 	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2294 	if ((bp->b_bufobj->bo_flag & BO_DEAD) != 0) {
2295 		bp->b_flags |= B_INVAL | B_RELBUF;
2296 		bp->b_flags &= ~B_CACHE;
2297 		brelse(bp);
2298 		return (ENXIO);
2299 	}
2300 	if (bp->b_flags & B_INVAL) {
2301 		brelse(bp);
2302 		return (0);
2303 	}
2304 
2305 	if (bp->b_flags & B_BARRIER)
2306 		atomic_add_long(&barrierwrites, 1);
2307 
2308 	oldflags = bp->b_flags;
2309 
2310 	KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
2311 	    ("FFS background buffer should not get here %p", bp));
2312 
2313 	vp = bp->b_vp;
2314 	if (vp)
2315 		vp_md = vp->v_vflag & VV_MD;
2316 	else
2317 		vp_md = 0;
2318 
2319 	/*
2320 	 * Mark the buffer clean.  Increment the bufobj write count
2321 	 * before bundirty() call, to prevent other thread from seeing
2322 	 * empty dirty list and zero counter for writes in progress,
2323 	 * falsely indicating that the bufobj is clean.
2324 	 */
2325 	bufobj_wref(bp->b_bufobj);
2326 	bundirty(bp);
2327 
2328 	bp->b_flags &= ~B_DONE;
2329 	bp->b_ioflags &= ~BIO_ERROR;
2330 	bp->b_flags |= B_CACHE;
2331 	bp->b_iocmd = BIO_WRITE;
2332 
2333 	vfs_busy_pages(bp, 1);
2334 
2335 	/*
2336 	 * Normal bwrites pipeline writes
2337 	 */
2338 	bp->b_runningbufspace = bp->b_bufsize;
2339 	space = atomic_fetchadd_long(&runningbufspace, bp->b_runningbufspace);
2340 
2341 #ifdef RACCT
2342 	if (racct_enable) {
2343 		PROC_LOCK(curproc);
2344 		racct_add_buf(curproc, bp, 1);
2345 		PROC_UNLOCK(curproc);
2346 	}
2347 #endif /* RACCT */
2348 	curthread->td_ru.ru_oublock++;
2349 	if (oldflags & B_ASYNC)
2350 		BUF_KERNPROC(bp);
2351 	bp->b_iooffset = dbtob(bp->b_blkno);
2352 	buf_track(bp, __func__);
2353 	bstrategy(bp);
2354 
2355 	if ((oldflags & B_ASYNC) == 0) {
2356 		int rtval = bufwait(bp);
2357 		brelse(bp);
2358 		return (rtval);
2359 	} else if (space > hirunningspace) {
2360 		/*
2361 		 * don't allow the async write to saturate the I/O
2362 		 * system.  We will not deadlock here because
2363 		 * we are blocking waiting for I/O that is already in-progress
2364 		 * to complete. We do not block here if it is the update
2365 		 * or syncer daemon trying to clean up as that can lead
2366 		 * to deadlock.
2367 		 */
2368 		if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
2369 			waitrunningbufspace();
2370 	}
2371 
2372 	return (0);
2373 }
2374 
2375 void
2376 bufbdflush(struct bufobj *bo, struct buf *bp)
2377 {
2378 	struct buf *nbp;
2379 	struct bufdomain *bd;
2380 
2381 	bd = &bdomain[bo->bo_domain];
2382 	if (bo->bo_dirty.bv_cnt > bd->bd_dirtybufthresh + 10) {
2383 		(void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
2384 		altbufferflushes++;
2385 	} else if (bo->bo_dirty.bv_cnt > bd->bd_dirtybufthresh) {
2386 		BO_LOCK(bo);
2387 		/*
2388 		 * Try to find a buffer to flush.
2389 		 */
2390 		TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
2391 			if ((nbp->b_vflags & BV_BKGRDINPROG) ||
2392 			    BUF_LOCK(nbp,
2393 				     LK_EXCLUSIVE | LK_NOWAIT, NULL))
2394 				continue;
2395 			if (bp == nbp)
2396 				panic("bdwrite: found ourselves");
2397 			BO_UNLOCK(bo);
2398 			/* Don't countdeps with the bo lock held. */
2399 			if (buf_countdeps(nbp, 0)) {
2400 				BO_LOCK(bo);
2401 				BUF_UNLOCK(nbp);
2402 				continue;
2403 			}
2404 			if (nbp->b_flags & B_CLUSTEROK) {
2405 				vfs_bio_awrite(nbp);
2406 			} else {
2407 				bremfree(nbp);
2408 				bawrite(nbp);
2409 			}
2410 			dirtybufferflushes++;
2411 			break;
2412 		}
2413 		if (nbp == NULL)
2414 			BO_UNLOCK(bo);
2415 	}
2416 }
2417 
2418 /*
2419  * Delayed write. (Buffer is marked dirty).  Do not bother writing
2420  * anything if the buffer is marked invalid.
2421  *
2422  * Note that since the buffer must be completely valid, we can safely
2423  * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
2424  * biodone() in order to prevent getblk from writing the buffer
2425  * out synchronously.
2426  */
2427 void
2428 bdwrite(struct buf *bp)
2429 {
2430 	struct thread *td = curthread;
2431 	struct vnode *vp;
2432 	struct bufobj *bo;
2433 
2434 	CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2435 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2436 	KASSERT((bp->b_flags & B_BARRIER) == 0,
2437 	    ("Barrier request in delayed write %p", bp));
2438 
2439 	if (bp->b_flags & B_INVAL) {
2440 		brelse(bp);
2441 		return;
2442 	}
2443 
2444 	/*
2445 	 * If we have too many dirty buffers, don't create any more.
2446 	 * If we are wildly over our limit, then force a complete
2447 	 * cleanup. Otherwise, just keep the situation from getting
2448 	 * out of control. Note that we have to avoid a recursive
2449 	 * disaster and not try to clean up after our own cleanup!
2450 	 */
2451 	vp = bp->b_vp;
2452 	bo = bp->b_bufobj;
2453 	if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
2454 		td->td_pflags |= TDP_INBDFLUSH;
2455 		BO_BDFLUSH(bo, bp);
2456 		td->td_pflags &= ~TDP_INBDFLUSH;
2457 	} else
2458 		recursiveflushes++;
2459 
2460 	bdirty(bp);
2461 	/*
2462 	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
2463 	 * true even of NFS now.
2464 	 */
2465 	bp->b_flags |= B_CACHE;
2466 
2467 	/*
2468 	 * This bmap keeps the system from needing to do the bmap later,
2469 	 * perhaps when the system is attempting to do a sync.  Since it
2470 	 * is likely that the indirect block -- or whatever other datastructure
2471 	 * that the filesystem needs is still in memory now, it is a good
2472 	 * thing to do this.  Note also, that if the pageout daemon is
2473 	 * requesting a sync -- there might not be enough memory to do
2474 	 * the bmap then...  So, this is important to do.
2475 	 */
2476 	if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
2477 		VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
2478 	}
2479 
2480 	buf_track(bp, __func__);
2481 
2482 	/*
2483 	 * Set the *dirty* buffer range based upon the VM system dirty
2484 	 * pages.
2485 	 *
2486 	 * Mark the buffer pages as clean.  We need to do this here to
2487 	 * satisfy the vnode_pager and the pageout daemon, so that it
2488 	 * thinks that the pages have been "cleaned".  Note that since
2489 	 * the pages are in a delayed write buffer -- the VFS layer
2490 	 * "will" see that the pages get written out on the next sync,
2491 	 * or perhaps the cluster will be completed.
2492 	 */
2493 	vfs_clean_pages_dirty_buf(bp);
2494 	bqrelse(bp);
2495 
2496 	/*
2497 	 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
2498 	 * due to the softdep code.
2499 	 */
2500 }
2501 
2502 /*
2503  *	bdirty:
2504  *
2505  *	Turn buffer into delayed write request.  We must clear BIO_READ and
2506  *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
2507  *	itself to properly update it in the dirty/clean lists.  We mark it
2508  *	B_DONE to ensure that any asynchronization of the buffer properly
2509  *	clears B_DONE ( else a panic will occur later ).
2510  *
2511  *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
2512  *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
2513  *	should only be called if the buffer is known-good.
2514  *
2515  *	Since the buffer is not on a queue, we do not update the numfreebuffers
2516  *	count.
2517  *
2518  *	The buffer must be on QUEUE_NONE.
2519  */
2520 void
2521 bdirty(struct buf *bp)
2522 {
2523 
2524 	CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
2525 	    bp, bp->b_vp, bp->b_flags);
2526 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2527 	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2528 	    ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
2529 	bp->b_flags &= ~(B_RELBUF);
2530 	bp->b_iocmd = BIO_WRITE;
2531 
2532 	if ((bp->b_flags & B_DELWRI) == 0) {
2533 		bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
2534 		reassignbuf(bp);
2535 		bdirtyadd(bp);
2536 	}
2537 }
2538 
2539 /*
2540  *	bundirty:
2541  *
2542  *	Clear B_DELWRI for buffer.
2543  *
2544  *	Since the buffer is not on a queue, we do not update the numfreebuffers
2545  *	count.
2546  *
2547  *	The buffer must be on QUEUE_NONE.
2548  */
2549 
2550 void
2551 bundirty(struct buf *bp)
2552 {
2553 
2554 	CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2555 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2556 	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2557 	    ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
2558 
2559 	if (bp->b_flags & B_DELWRI) {
2560 		bp->b_flags &= ~B_DELWRI;
2561 		reassignbuf(bp);
2562 		bdirtysub(bp);
2563 	}
2564 	/*
2565 	 * Since it is now being written, we can clear its deferred write flag.
2566 	 */
2567 	bp->b_flags &= ~B_DEFERRED;
2568 }
2569 
2570 /*
2571  *	bawrite:
2572  *
2573  *	Asynchronous write.  Start output on a buffer, but do not wait for
2574  *	it to complete.  The buffer is released when the output completes.
2575  *
2576  *	bwrite() ( or the VOP routine anyway ) is responsible for handling
2577  *	B_INVAL buffers.  Not us.
2578  */
2579 void
2580 bawrite(struct buf *bp)
2581 {
2582 
2583 	bp->b_flags |= B_ASYNC;
2584 	(void) bwrite(bp);
2585 }
2586 
2587 /*
2588  *	babarrierwrite:
2589  *
2590  *	Asynchronous barrier write.  Start output on a buffer, but do not
2591  *	wait for it to complete.  Place a write barrier after this write so
2592  *	that this buffer and all buffers written before it are committed to
2593  *	the disk before any buffers written after this write are committed
2594  *	to the disk.  The buffer is released when the output completes.
2595  */
2596 void
2597 babarrierwrite(struct buf *bp)
2598 {
2599 
2600 	bp->b_flags |= B_ASYNC | B_BARRIER;
2601 	(void) bwrite(bp);
2602 }
2603 
2604 /*
2605  *	bbarrierwrite:
2606  *
2607  *	Synchronous barrier write.  Start output on a buffer and wait for
2608  *	it to complete.  Place a write barrier after this write so that
2609  *	this buffer and all buffers written before it are committed to
2610  *	the disk before any buffers written after this write are committed
2611  *	to the disk.  The buffer is released when the output completes.
2612  */
2613 int
2614 bbarrierwrite(struct buf *bp)
2615 {
2616 
2617 	bp->b_flags |= B_BARRIER;
2618 	return (bwrite(bp));
2619 }
2620 
2621 /*
2622  *	bwillwrite:
2623  *
2624  *	Called prior to the locking of any vnodes when we are expecting to
2625  *	write.  We do not want to starve the buffer cache with too many
2626  *	dirty buffers so we block here.  By blocking prior to the locking
2627  *	of any vnodes we attempt to avoid the situation where a locked vnode
2628  *	prevents the various system daemons from flushing related buffers.
2629  */
2630 void
2631 bwillwrite(void)
2632 {
2633 
2634 	if (buf_dirty_count_severe()) {
2635 		mtx_lock(&bdirtylock);
2636 		while (buf_dirty_count_severe()) {
2637 			bdirtywait = 1;
2638 			msleep(&bdirtywait, &bdirtylock, (PRIBIO + 4),
2639 			    "flswai", 0);
2640 		}
2641 		mtx_unlock(&bdirtylock);
2642 	}
2643 }
2644 
2645 /*
2646  * Return true if we have too many dirty buffers.
2647  */
2648 int
2649 buf_dirty_count_severe(void)
2650 {
2651 
2652 	return (!BIT_EMPTY(BUF_DOMAINS, &bdhidirty));
2653 }
2654 
2655 /*
2656  *	brelse:
2657  *
2658  *	Release a busy buffer and, if requested, free its resources.  The
2659  *	buffer will be stashed in the appropriate bufqueue[] allowing it
2660  *	to be accessed later as a cache entity or reused for other purposes.
2661  */
2662 void
2663 brelse(struct buf *bp)
2664 {
2665 	struct mount *v_mnt;
2666 	int qindex;
2667 
2668 	/*
2669 	 * Many functions erroneously call brelse with a NULL bp under rare
2670 	 * error conditions. Simply return when called with a NULL bp.
2671 	 */
2672 	if (bp == NULL)
2673 		return;
2674 	CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
2675 	    bp, bp->b_vp, bp->b_flags);
2676 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2677 	    ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2678 	KASSERT((bp->b_flags & B_VMIO) != 0 || (bp->b_flags & B_NOREUSE) == 0,
2679 	    ("brelse: non-VMIO buffer marked NOREUSE"));
2680 
2681 	if (BUF_LOCKRECURSED(bp)) {
2682 		/*
2683 		 * Do not process, in particular, do not handle the
2684 		 * B_INVAL/B_RELBUF and do not release to free list.
2685 		 */
2686 		BUF_UNLOCK(bp);
2687 		return;
2688 	}
2689 
2690 	if (bp->b_flags & B_MANAGED) {
2691 		bqrelse(bp);
2692 		return;
2693 	}
2694 
2695 	if (LIST_EMPTY(&bp->b_dep)) {
2696 		bp->b_flags &= ~B_IOSTARTED;
2697 	} else {
2698 		KASSERT((bp->b_flags & B_IOSTARTED) == 0,
2699 		    ("brelse: SU io not finished bp %p", bp));
2700 	}
2701 
2702 	if ((bp->b_vflags & (BV_BKGRDINPROG | BV_BKGRDERR)) == BV_BKGRDERR) {
2703 		BO_LOCK(bp->b_bufobj);
2704 		bp->b_vflags &= ~BV_BKGRDERR;
2705 		BO_UNLOCK(bp->b_bufobj);
2706 		bdirty(bp);
2707 	}
2708 
2709 	if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
2710 	    (bp->b_flags & B_INVALONERR)) {
2711 		/*
2712 		 * Forced invalidation of dirty buffer contents, to be used
2713 		 * after a failed write in the rare case that the loss of the
2714 		 * contents is acceptable.  The buffer is invalidated and
2715 		 * freed.
2716 		 */
2717 		bp->b_flags |= B_INVAL | B_RELBUF | B_NOCACHE;
2718 		bp->b_flags &= ~(B_ASYNC | B_CACHE);
2719 	}
2720 
2721 	if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
2722 	    (bp->b_error != ENXIO || !LIST_EMPTY(&bp->b_dep)) &&
2723 	    !(bp->b_flags & B_INVAL)) {
2724 		/*
2725 		 * Failed write, redirty.  All errors except ENXIO (which
2726 		 * means the device is gone) are treated as being
2727 		 * transient.
2728 		 *
2729 		 * XXX Treating EIO as transient is not correct; the
2730 		 * contract with the local storage device drivers is that
2731 		 * they will only return EIO once the I/O is no longer
2732 		 * retriable.  Network I/O also respects this through the
2733 		 * guarantees of TCP and/or the internal retries of NFS.
2734 		 * ENOMEM might be transient, but we also have no way of
2735 		 * knowing when its ok to retry/reschedule.  In general,
2736 		 * this entire case should be made obsolete through better
2737 		 * error handling/recovery and resource scheduling.
2738 		 *
2739 		 * Do this also for buffers that failed with ENXIO, but have
2740 		 * non-empty dependencies - the soft updates code might need
2741 		 * to access the buffer to untangle them.
2742 		 *
2743 		 * Must clear BIO_ERROR to prevent pages from being scrapped.
2744 		 */
2745 		bp->b_ioflags &= ~BIO_ERROR;
2746 		bdirty(bp);
2747 	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
2748 	    (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
2749 		/*
2750 		 * Either a failed read I/O, or we were asked to free or not
2751 		 * cache the buffer, or we failed to write to a device that's
2752 		 * no longer present.
2753 		 */
2754 		bp->b_flags |= B_INVAL;
2755 		if (!LIST_EMPTY(&bp->b_dep))
2756 			buf_deallocate(bp);
2757 		if (bp->b_flags & B_DELWRI)
2758 			bdirtysub(bp);
2759 		bp->b_flags &= ~(B_DELWRI | B_CACHE);
2760 		if ((bp->b_flags & B_VMIO) == 0) {
2761 			allocbuf(bp, 0);
2762 			if (bp->b_vp)
2763 				brelvp(bp);
2764 		}
2765 	}
2766 
2767 	/*
2768 	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_truncate()
2769 	 * is called with B_DELWRI set, the underlying pages may wind up
2770 	 * getting freed causing a previous write (bdwrite()) to get 'lost'
2771 	 * because pages associated with a B_DELWRI bp are marked clean.
2772 	 *
2773 	 * We still allow the B_INVAL case to call vfs_vmio_truncate(), even
2774 	 * if B_DELWRI is set.
2775 	 */
2776 	if (bp->b_flags & B_DELWRI)
2777 		bp->b_flags &= ~B_RELBUF;
2778 
2779 	/*
2780 	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
2781 	 * constituted, not even NFS buffers now.  Two flags effect this.  If
2782 	 * B_INVAL, the struct buf is invalidated but the VM object is kept
2783 	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
2784 	 *
2785 	 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
2786 	 * invalidated.  BIO_ERROR cannot be set for a failed write unless the
2787 	 * buffer is also B_INVAL because it hits the re-dirtying code above.
2788 	 *
2789 	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
2790 	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
2791 	 * the commit state and we cannot afford to lose the buffer. If the
2792 	 * buffer has a background write in progress, we need to keep it
2793 	 * around to prevent it from being reconstituted and starting a second
2794 	 * background write.
2795 	 */
2796 
2797 	v_mnt = bp->b_vp != NULL ? bp->b_vp->v_mount : NULL;
2798 
2799 	if ((bp->b_flags & B_VMIO) && (bp->b_flags & B_NOCACHE ||
2800 	    (bp->b_ioflags & BIO_ERROR && bp->b_iocmd == BIO_READ)) &&
2801 	    (v_mnt == NULL || (v_mnt->mnt_vfc->vfc_flags & VFCF_NETWORK) == 0 ||
2802 	    vn_isdisk(bp->b_vp) || (bp->b_flags & B_DELWRI) == 0)) {
2803 		vfs_vmio_invalidate(bp);
2804 		allocbuf(bp, 0);
2805 	}
2806 
2807 	if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0 ||
2808 	    (bp->b_flags & (B_DELWRI | B_NOREUSE)) == B_NOREUSE) {
2809 		allocbuf(bp, 0);
2810 		bp->b_flags &= ~B_NOREUSE;
2811 		if (bp->b_vp != NULL)
2812 			brelvp(bp);
2813 	}
2814 
2815 	/*
2816 	 * If the buffer has junk contents signal it and eventually
2817 	 * clean up B_DELWRI and diassociate the vnode so that gbincore()
2818 	 * doesn't find it.
2819 	 */
2820 	if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
2821 	    (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
2822 		bp->b_flags |= B_INVAL;
2823 	if (bp->b_flags & B_INVAL) {
2824 		if (bp->b_flags & B_DELWRI)
2825 			bundirty(bp);
2826 		if (bp->b_vp)
2827 			brelvp(bp);
2828 	}
2829 
2830 	buf_track(bp, __func__);
2831 
2832 	/* buffers with no memory */
2833 	if (bp->b_bufsize == 0) {
2834 		buf_free(bp);
2835 		return;
2836 	}
2837 	/* buffers with junk contents */
2838 	if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
2839 	    (bp->b_ioflags & BIO_ERROR)) {
2840 		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
2841 		if (bp->b_vflags & BV_BKGRDINPROG)
2842 			panic("losing buffer 2");
2843 		qindex = QUEUE_CLEAN;
2844 		bp->b_flags |= B_AGE;
2845 	/* remaining buffers */
2846 	} else if (bp->b_flags & B_DELWRI)
2847 		qindex = QUEUE_DIRTY;
2848 	else
2849 		qindex = QUEUE_CLEAN;
2850 
2851 	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
2852 		panic("brelse: not dirty");
2853 
2854 	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_RELBUF | B_DIRECT);
2855 	bp->b_xflags &= ~(BX_CVTENXIO);
2856 	/* binsfree unlocks bp. */
2857 	binsfree(bp, qindex);
2858 }
2859 
2860 /*
2861  * Release a buffer back to the appropriate queue but do not try to free
2862  * it.  The buffer is expected to be used again soon.
2863  *
2864  * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
2865  * biodone() to requeue an async I/O on completion.  It is also used when
2866  * known good buffers need to be requeued but we think we may need the data
2867  * again soon.
2868  *
2869  * XXX we should be able to leave the B_RELBUF hint set on completion.
2870  */
2871 void
2872 bqrelse(struct buf *bp)
2873 {
2874 	int qindex;
2875 
2876 	CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2877 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2878 	    ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2879 
2880 	qindex = QUEUE_NONE;
2881 	if (BUF_LOCKRECURSED(bp)) {
2882 		/* do not release to free list */
2883 		BUF_UNLOCK(bp);
2884 		return;
2885 	}
2886 	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
2887 	bp->b_xflags &= ~(BX_CVTENXIO);
2888 
2889 	if (LIST_EMPTY(&bp->b_dep)) {
2890 		bp->b_flags &= ~B_IOSTARTED;
2891 	} else {
2892 		KASSERT((bp->b_flags & B_IOSTARTED) == 0,
2893 		    ("bqrelse: SU io not finished bp %p", bp));
2894 	}
2895 
2896 	if (bp->b_flags & B_MANAGED) {
2897 		if (bp->b_flags & B_REMFREE)
2898 			bremfreef(bp);
2899 		goto out;
2900 	}
2901 
2902 	/* buffers with stale but valid contents */
2903 	if ((bp->b_flags & B_DELWRI) != 0 || (bp->b_vflags & (BV_BKGRDINPROG |
2904 	    BV_BKGRDERR)) == BV_BKGRDERR) {
2905 		BO_LOCK(bp->b_bufobj);
2906 		bp->b_vflags &= ~BV_BKGRDERR;
2907 		BO_UNLOCK(bp->b_bufobj);
2908 		qindex = QUEUE_DIRTY;
2909 	} else {
2910 		if ((bp->b_flags & B_DELWRI) == 0 &&
2911 		    (bp->b_xflags & BX_VNDIRTY))
2912 			panic("bqrelse: not dirty");
2913 		if ((bp->b_flags & B_NOREUSE) != 0) {
2914 			brelse(bp);
2915 			return;
2916 		}
2917 		qindex = QUEUE_CLEAN;
2918 	}
2919 	buf_track(bp, __func__);
2920 	/* binsfree unlocks bp. */
2921 	binsfree(bp, qindex);
2922 	return;
2923 
2924 out:
2925 	buf_track(bp, __func__);
2926 	/* unlock */
2927 	BUF_UNLOCK(bp);
2928 }
2929 
2930 /*
2931  * Complete I/O to a VMIO backed page.  Validate the pages as appropriate,
2932  * restore bogus pages.
2933  */
2934 static void
2935 vfs_vmio_iodone(struct buf *bp)
2936 {
2937 	vm_ooffset_t foff;
2938 	vm_page_t m;
2939 	vm_object_t obj;
2940 	struct vnode *vp __unused;
2941 	int i, iosize, resid;
2942 	bool bogus;
2943 
2944 	obj = bp->b_bufobj->bo_object;
2945 	KASSERT(blockcount_read(&obj->paging_in_progress) >= bp->b_npages,
2946 	    ("vfs_vmio_iodone: paging in progress(%d) < b_npages(%d)",
2947 	    blockcount_read(&obj->paging_in_progress), bp->b_npages));
2948 
2949 	vp = bp->b_vp;
2950 	VNPASS(vp->v_holdcnt > 0, vp);
2951 	VNPASS(vp->v_object != NULL, vp);
2952 
2953 	foff = bp->b_offset;
2954 	KASSERT(bp->b_offset != NOOFFSET,
2955 	    ("vfs_vmio_iodone: bp %p has no buffer offset", bp));
2956 
2957 	bogus = false;
2958 	iosize = bp->b_bcount - bp->b_resid;
2959 	for (i = 0; i < bp->b_npages; i++) {
2960 		resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
2961 		if (resid > iosize)
2962 			resid = iosize;
2963 
2964 		/*
2965 		 * cleanup bogus pages, restoring the originals
2966 		 */
2967 		m = bp->b_pages[i];
2968 		if (m == bogus_page) {
2969 			bogus = true;
2970 			m = vm_page_relookup(obj, OFF_TO_IDX(foff));
2971 			if (m == NULL)
2972 				panic("biodone: page disappeared!");
2973 			bp->b_pages[i] = m;
2974 		} else if ((bp->b_iocmd == BIO_READ) && resid > 0) {
2975 			/*
2976 			 * In the write case, the valid and clean bits are
2977 			 * already changed correctly ( see bdwrite() ), so we
2978 			 * only need to do this here in the read case.
2979 			 */
2980 			KASSERT((m->dirty & vm_page_bits(foff & PAGE_MASK,
2981 			    resid)) == 0, ("vfs_vmio_iodone: page %p "
2982 			    "has unexpected dirty bits", m));
2983 			vfs_page_set_valid(bp, foff, m);
2984 		}
2985 		KASSERT(OFF_TO_IDX(foff) == m->pindex,
2986 		    ("vfs_vmio_iodone: foff(%jd)/pindex(%ju) mismatch",
2987 		    (intmax_t)foff, (uintmax_t)m->pindex));
2988 
2989 		vm_page_sunbusy(m);
2990 		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
2991 		iosize -= resid;
2992 	}
2993 	vm_object_pip_wakeupn(obj, bp->b_npages);
2994 	if (bogus && buf_mapped(bp)) {
2995 		BUF_CHECK_MAPPED(bp);
2996 		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
2997 		    bp->b_pages, bp->b_npages);
2998 	}
2999 }
3000 
3001 /*
3002  * Perform page invalidation when a buffer is released.  The fully invalid
3003  * pages will be reclaimed later in vfs_vmio_truncate().
3004  */
3005 static void
3006 vfs_vmio_invalidate(struct buf *bp)
3007 {
3008 	vm_object_t obj;
3009 	vm_page_t m;
3010 	int flags, i, resid, poffset, presid;
3011 
3012 	if (buf_mapped(bp)) {
3013 		BUF_CHECK_MAPPED(bp);
3014 		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
3015 	} else
3016 		BUF_CHECK_UNMAPPED(bp);
3017 	/*
3018 	 * Get the base offset and length of the buffer.  Note that
3019 	 * in the VMIO case if the buffer block size is not
3020 	 * page-aligned then b_data pointer may not be page-aligned.
3021 	 * But our b_pages[] array *IS* page aligned.
3022 	 *
3023 	 * block sizes less then DEV_BSIZE (usually 512) are not
3024 	 * supported due to the page granularity bits (m->valid,
3025 	 * m->dirty, etc...).
3026 	 *
3027 	 * See man buf(9) for more information
3028 	 */
3029 	flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0;
3030 	obj = bp->b_bufobj->bo_object;
3031 	resid = bp->b_bufsize;
3032 	poffset = bp->b_offset & PAGE_MASK;
3033 	VM_OBJECT_WLOCK(obj);
3034 	for (i = 0; i < bp->b_npages; i++) {
3035 		m = bp->b_pages[i];
3036 		if (m == bogus_page)
3037 			panic("vfs_vmio_invalidate: Unexpected bogus page.");
3038 		bp->b_pages[i] = NULL;
3039 
3040 		presid = resid > (PAGE_SIZE - poffset) ?
3041 		    (PAGE_SIZE - poffset) : resid;
3042 		KASSERT(presid >= 0, ("brelse: extra page"));
3043 		vm_page_busy_acquire(m, VM_ALLOC_SBUSY);
3044 		if (pmap_page_wired_mappings(m) == 0)
3045 			vm_page_set_invalid(m, poffset, presid);
3046 		vm_page_sunbusy(m);
3047 		vm_page_release_locked(m, flags);
3048 		resid -= presid;
3049 		poffset = 0;
3050 	}
3051 	VM_OBJECT_WUNLOCK(obj);
3052 	bp->b_npages = 0;
3053 }
3054 
3055 /*
3056  * Page-granular truncation of an existing VMIO buffer.
3057  */
3058 static void
3059 vfs_vmio_truncate(struct buf *bp, int desiredpages)
3060 {
3061 	vm_object_t obj;
3062 	vm_page_t m;
3063 	int flags, i;
3064 
3065 	if (bp->b_npages == desiredpages)
3066 		return;
3067 
3068 	if (buf_mapped(bp)) {
3069 		BUF_CHECK_MAPPED(bp);
3070 		pmap_qremove((vm_offset_t)trunc_page((vm_offset_t)bp->b_data) +
3071 		    (desiredpages << PAGE_SHIFT), bp->b_npages - desiredpages);
3072 	} else
3073 		BUF_CHECK_UNMAPPED(bp);
3074 
3075 	/*
3076 	 * The object lock is needed only if we will attempt to free pages.
3077 	 */
3078 	flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0;
3079 	if ((bp->b_flags & B_DIRECT) != 0) {
3080 		flags |= VPR_TRYFREE;
3081 		obj = bp->b_bufobj->bo_object;
3082 		VM_OBJECT_WLOCK(obj);
3083 	} else {
3084 		obj = NULL;
3085 	}
3086 	for (i = desiredpages; i < bp->b_npages; i++) {
3087 		m = bp->b_pages[i];
3088 		KASSERT(m != bogus_page, ("allocbuf: bogus page found"));
3089 		bp->b_pages[i] = NULL;
3090 		if (obj != NULL)
3091 			vm_page_release_locked(m, flags);
3092 		else
3093 			vm_page_release(m, flags);
3094 	}
3095 	if (obj != NULL)
3096 		VM_OBJECT_WUNLOCK(obj);
3097 	bp->b_npages = desiredpages;
3098 }
3099 
3100 /*
3101  * Byte granular extension of VMIO buffers.
3102  */
3103 static void
3104 vfs_vmio_extend(struct buf *bp, int desiredpages, int size)
3105 {
3106 	/*
3107 	 * We are growing the buffer, possibly in a
3108 	 * byte-granular fashion.
3109 	 */
3110 	vm_object_t obj;
3111 	vm_offset_t toff;
3112 	vm_offset_t tinc;
3113 	vm_page_t m;
3114 
3115 	/*
3116 	 * Step 1, bring in the VM pages from the object, allocating
3117 	 * them if necessary.  We must clear B_CACHE if these pages
3118 	 * are not valid for the range covered by the buffer.
3119 	 */
3120 	obj = bp->b_bufobj->bo_object;
3121 	if (bp->b_npages < desiredpages) {
3122 		KASSERT(desiredpages <= atop(maxbcachebuf),
3123 		    ("vfs_vmio_extend past maxbcachebuf %p %d %u",
3124 		    bp, desiredpages, maxbcachebuf));
3125 
3126 		/*
3127 		 * We must allocate system pages since blocking
3128 		 * here could interfere with paging I/O, no
3129 		 * matter which process we are.
3130 		 *
3131 		 * Only exclusive busy can be tested here.
3132 		 * Blocking on shared busy might lead to
3133 		 * deadlocks once allocbuf() is called after
3134 		 * pages are vfs_busy_pages().
3135 		 */
3136 		(void)vm_page_grab_pages_unlocked(obj,
3137 		    OFF_TO_IDX(bp->b_offset) + bp->b_npages,
3138 		    VM_ALLOC_SYSTEM | VM_ALLOC_IGN_SBUSY |
3139 		    VM_ALLOC_NOBUSY | VM_ALLOC_WIRED,
3140 		    &bp->b_pages[bp->b_npages], desiredpages - bp->b_npages);
3141 		bp->b_npages = desiredpages;
3142 	}
3143 
3144 	/*
3145 	 * Step 2.  We've loaded the pages into the buffer,
3146 	 * we have to figure out if we can still have B_CACHE
3147 	 * set.  Note that B_CACHE is set according to the
3148 	 * byte-granular range ( bcount and size ), not the
3149 	 * aligned range ( newbsize ).
3150 	 *
3151 	 * The VM test is against m->valid, which is DEV_BSIZE
3152 	 * aligned.  Needless to say, the validity of the data
3153 	 * needs to also be DEV_BSIZE aligned.  Note that this
3154 	 * fails with NFS if the server or some other client
3155 	 * extends the file's EOF.  If our buffer is resized,
3156 	 * B_CACHE may remain set! XXX
3157 	 */
3158 	toff = bp->b_bcount;
3159 	tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
3160 	while ((bp->b_flags & B_CACHE) && toff < size) {
3161 		vm_pindex_t pi;
3162 
3163 		if (tinc > (size - toff))
3164 			tinc = size - toff;
3165 		pi = ((bp->b_offset & PAGE_MASK) + toff) >> PAGE_SHIFT;
3166 		m = bp->b_pages[pi];
3167 		vfs_buf_test_cache(bp, bp->b_offset, toff, tinc, m);
3168 		toff += tinc;
3169 		tinc = PAGE_SIZE;
3170 	}
3171 
3172 	/*
3173 	 * Step 3, fixup the KVA pmap.
3174 	 */
3175 	if (buf_mapped(bp))
3176 		bpmap_qenter(bp);
3177 	else
3178 		BUF_CHECK_UNMAPPED(bp);
3179 }
3180 
3181 /*
3182  * Check to see if a block at a particular lbn is available for a clustered
3183  * write.
3184  */
3185 static int
3186 vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
3187 {
3188 	struct buf *bpa;
3189 	int match;
3190 
3191 	match = 0;
3192 
3193 	/* If the buf isn't in core skip it */
3194 	if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
3195 		return (0);
3196 
3197 	/* If the buf is busy we don't want to wait for it */
3198 	if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
3199 		return (0);
3200 
3201 	/* Only cluster with valid clusterable delayed write buffers */
3202 	if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
3203 	    (B_DELWRI | B_CLUSTEROK))
3204 		goto done;
3205 
3206 	if (bpa->b_bufsize != size)
3207 		goto done;
3208 
3209 	/*
3210 	 * Check to see if it is in the expected place on disk and that the
3211 	 * block has been mapped.
3212 	 */
3213 	if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
3214 		match = 1;
3215 done:
3216 	BUF_UNLOCK(bpa);
3217 	return (match);
3218 }
3219 
3220 /*
3221  *	vfs_bio_awrite:
3222  *
3223  *	Implement clustered async writes for clearing out B_DELWRI buffers.
3224  *	This is much better then the old way of writing only one buffer at
3225  *	a time.  Note that we may not be presented with the buffers in the
3226  *	correct order, so we search for the cluster in both directions.
3227  */
3228 int
3229 vfs_bio_awrite(struct buf *bp)
3230 {
3231 	struct bufobj *bo;
3232 	int i;
3233 	int j;
3234 	daddr_t lblkno = bp->b_lblkno;
3235 	struct vnode *vp = bp->b_vp;
3236 	int ncl;
3237 	int nwritten;
3238 	int size;
3239 	int maxcl;
3240 	int gbflags;
3241 
3242 	bo = &vp->v_bufobj;
3243 	gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0;
3244 	/*
3245 	 * right now we support clustered writing only to regular files.  If
3246 	 * we find a clusterable block we could be in the middle of a cluster
3247 	 * rather then at the beginning.
3248 	 */
3249 	if ((vp->v_type == VREG) &&
3250 	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
3251 	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
3252 		size = vp->v_mount->mnt_stat.f_iosize;
3253 		maxcl = maxphys / size;
3254 
3255 		BO_RLOCK(bo);
3256 		for (i = 1; i < maxcl; i++)
3257 			if (vfs_bio_clcheck(vp, size, lblkno + i,
3258 			    bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
3259 				break;
3260 
3261 		for (j = 1; i + j <= maxcl && j <= lblkno; j++)
3262 			if (vfs_bio_clcheck(vp, size, lblkno - j,
3263 			    bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
3264 				break;
3265 		BO_RUNLOCK(bo);
3266 		--j;
3267 		ncl = i + j;
3268 		/*
3269 		 * this is a possible cluster write
3270 		 */
3271 		if (ncl != 1) {
3272 			BUF_UNLOCK(bp);
3273 			nwritten = cluster_wbuild(vp, size, lblkno - j, ncl,
3274 			    gbflags);
3275 			return (nwritten);
3276 		}
3277 	}
3278 	bremfree(bp);
3279 	bp->b_flags |= B_ASYNC;
3280 	/*
3281 	 * default (old) behavior, writing out only one block
3282 	 *
3283 	 * XXX returns b_bufsize instead of b_bcount for nwritten?
3284 	 */
3285 	nwritten = bp->b_bufsize;
3286 	(void) bwrite(bp);
3287 
3288 	return (nwritten);
3289 }
3290 
3291 /*
3292  *	getnewbuf_kva:
3293  *
3294  *	Allocate KVA for an empty buf header according to gbflags.
3295  */
3296 static int
3297 getnewbuf_kva(struct buf *bp, int gbflags, int maxsize)
3298 {
3299 
3300 	if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_UNMAPPED) {
3301 		/*
3302 		 * In order to keep fragmentation sane we only allocate kva
3303 		 * in BKVASIZE chunks.  XXX with vmem we can do page size.
3304 		 */
3305 		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
3306 
3307 		if (maxsize != bp->b_kvasize &&
3308 		    bufkva_alloc(bp, maxsize, gbflags))
3309 			return (ENOSPC);
3310 	}
3311 	return (0);
3312 }
3313 
3314 /*
3315  *	getnewbuf:
3316  *
3317  *	Find and initialize a new buffer header, freeing up existing buffers
3318  *	in the bufqueues as necessary.  The new buffer is returned locked.
3319  *
3320  *	We block if:
3321  *		We have insufficient buffer headers
3322  *		We have insufficient buffer space
3323  *		buffer_arena is too fragmented ( space reservation fails )
3324  *		If we have to flush dirty buffers ( but we try to avoid this )
3325  *
3326  *	The caller is responsible for releasing the reserved bufspace after
3327  *	allocbuf() is called.
3328  */
3329 static struct buf *
3330 getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int maxsize, int gbflags)
3331 {
3332 	struct bufdomain *bd;
3333 	struct buf *bp;
3334 	bool metadata, reserved;
3335 
3336 	bp = NULL;
3337 	KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3338 	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3339 	if (!unmapped_buf_allowed)
3340 		gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3341 
3342 	if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 ||
3343 	    vp->v_type == VCHR)
3344 		metadata = true;
3345 	else
3346 		metadata = false;
3347 	if (vp == NULL)
3348 		bd = &bdomain[0];
3349 	else
3350 		bd = &bdomain[vp->v_bufobj.bo_domain];
3351 
3352 	counter_u64_add(getnewbufcalls, 1);
3353 	reserved = false;
3354 	do {
3355 		if (reserved == false &&
3356 		    bufspace_reserve(bd, maxsize, metadata) != 0) {
3357 			counter_u64_add(getnewbufrestarts, 1);
3358 			continue;
3359 		}
3360 		reserved = true;
3361 		if ((bp = buf_alloc(bd)) == NULL) {
3362 			counter_u64_add(getnewbufrestarts, 1);
3363 			continue;
3364 		}
3365 		if (getnewbuf_kva(bp, gbflags, maxsize) == 0)
3366 			return (bp);
3367 		break;
3368 	} while (buf_recycle(bd, false) == 0);
3369 
3370 	if (reserved)
3371 		bufspace_release(bd, maxsize);
3372 	if (bp != NULL) {
3373 		bp->b_flags |= B_INVAL;
3374 		brelse(bp);
3375 	}
3376 	bufspace_wait(bd, vp, gbflags, slpflag, slptimeo);
3377 
3378 	return (NULL);
3379 }
3380 
3381 /*
3382  *	buf_daemon:
3383  *
3384  *	buffer flushing daemon.  Buffers are normally flushed by the
3385  *	update daemon but if it cannot keep up this process starts to
3386  *	take the load in an attempt to prevent getnewbuf() from blocking.
3387  */
3388 static struct kproc_desc buf_kp = {
3389 	"bufdaemon",
3390 	buf_daemon,
3391 	&bufdaemonproc
3392 };
3393 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp);
3394 
3395 static int
3396 buf_flush(struct vnode *vp, struct bufdomain *bd, int target)
3397 {
3398 	int flushed;
3399 
3400 	flushed = flushbufqueues(vp, bd, target, 0);
3401 	if (flushed == 0) {
3402 		/*
3403 		 * Could not find any buffers without rollback
3404 		 * dependencies, so just write the first one
3405 		 * in the hopes of eventually making progress.
3406 		 */
3407 		if (vp != NULL && target > 2)
3408 			target /= 2;
3409 		flushbufqueues(vp, bd, target, 1);
3410 	}
3411 	return (flushed);
3412 }
3413 
3414 static void
3415 buf_daemon_shutdown(void *arg __unused, int howto __unused)
3416 {
3417 	int error;
3418 
3419 	mtx_lock(&bdlock);
3420 	bd_shutdown = true;
3421 	wakeup(&bd_request);
3422 	error = msleep(&bd_shutdown, &bdlock, 0, "buf_daemon_shutdown",
3423 	    60 * hz);
3424 	mtx_unlock(&bdlock);
3425 	if (error != 0)
3426 		printf("bufdaemon wait error: %d\n", error);
3427 }
3428 
3429 static void
3430 buf_daemon(void)
3431 {
3432 	struct bufdomain *bd;
3433 	int speedupreq;
3434 	int lodirty;
3435 	int i;
3436 
3437 	/*
3438 	 * This process needs to be suspended prior to shutdown sync.
3439 	 */
3440 	EVENTHANDLER_REGISTER(shutdown_pre_sync, buf_daemon_shutdown, NULL,
3441 	    SHUTDOWN_PRI_LAST + 100);
3442 
3443 	/*
3444 	 * Start the buf clean daemons as children threads.
3445 	 */
3446 	for (i = 0 ; i < buf_domains; i++) {
3447 		int error;
3448 
3449 		error = kthread_add((void (*)(void *))bufspace_daemon,
3450 		    &bdomain[i], curproc, NULL, 0, 0, "bufspacedaemon-%d", i);
3451 		if (error)
3452 			panic("error %d spawning bufspace daemon", error);
3453 	}
3454 
3455 	/*
3456 	 * This process is allowed to take the buffer cache to the limit
3457 	 */
3458 	curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED;
3459 	mtx_lock(&bdlock);
3460 	while (!bd_shutdown) {
3461 		bd_request = 0;
3462 		mtx_unlock(&bdlock);
3463 
3464 		/*
3465 		 * Save speedupreq for this pass and reset to capture new
3466 		 * requests.
3467 		 */
3468 		speedupreq = bd_speedupreq;
3469 		bd_speedupreq = 0;
3470 
3471 		/*
3472 		 * Flush each domain sequentially according to its level and
3473 		 * the speedup request.
3474 		 */
3475 		for (i = 0; i < buf_domains; i++) {
3476 			bd = &bdomain[i];
3477 			if (speedupreq)
3478 				lodirty = bd->bd_numdirtybuffers / 2;
3479 			else
3480 				lodirty = bd->bd_lodirtybuffers;
3481 			while (bd->bd_numdirtybuffers > lodirty) {
3482 				if (buf_flush(NULL, bd,
3483 				    bd->bd_numdirtybuffers - lodirty) == 0)
3484 					break;
3485 				kern_yield(PRI_USER);
3486 			}
3487 		}
3488 
3489 		/*
3490 		 * Only clear bd_request if we have reached our low water
3491 		 * mark.  The buf_daemon normally waits 1 second and
3492 		 * then incrementally flushes any dirty buffers that have
3493 		 * built up, within reason.
3494 		 *
3495 		 * If we were unable to hit our low water mark and couldn't
3496 		 * find any flushable buffers, we sleep for a short period
3497 		 * to avoid endless loops on unlockable buffers.
3498 		 */
3499 		mtx_lock(&bdlock);
3500 		if (bd_shutdown)
3501 			break;
3502 		if (BIT_EMPTY(BUF_DOMAINS, &bdlodirty)) {
3503 			/*
3504 			 * We reached our low water mark, reset the
3505 			 * request and sleep until we are needed again.
3506 			 * The sleep is just so the suspend code works.
3507 			 */
3508 			bd_request = 0;
3509 			/*
3510 			 * Do an extra wakeup in case dirty threshold
3511 			 * changed via sysctl and the explicit transition
3512 			 * out of shortfall was missed.
3513 			 */
3514 			bdirtywakeup();
3515 			if (runningbufspace <= lorunningspace)
3516 				runningwakeup();
3517 			msleep(&bd_request, &bdlock, PVM, "psleep", hz);
3518 		} else {
3519 			/*
3520 			 * We couldn't find any flushable dirty buffers but
3521 			 * still have too many dirty buffers, we
3522 			 * have to sleep and try again.  (rare)
3523 			 */
3524 			msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
3525 		}
3526 	}
3527 	wakeup(&bd_shutdown);
3528 	mtx_unlock(&bdlock);
3529 	kthread_exit();
3530 }
3531 
3532 /*
3533  *	flushbufqueues:
3534  *
3535  *	Try to flush a buffer in the dirty queue.  We must be careful to
3536  *	free up B_INVAL buffers instead of write them, which NFS is
3537  *	particularly sensitive to.
3538  */
3539 static int flushwithdeps = 0;
3540 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW | CTLFLAG_STATS,
3541     &flushwithdeps, 0,
3542     "Number of buffers flushed with dependencies that require rollbacks");
3543 
3544 static int
3545 flushbufqueues(struct vnode *lvp, struct bufdomain *bd, int target,
3546     int flushdeps)
3547 {
3548 	struct bufqueue *bq;
3549 	struct buf *sentinel;
3550 	struct vnode *vp;
3551 	struct mount *mp;
3552 	struct buf *bp;
3553 	int hasdeps;
3554 	int flushed;
3555 	int error;
3556 	bool unlock;
3557 
3558 	flushed = 0;
3559 	bq = &bd->bd_dirtyq;
3560 	bp = NULL;
3561 	sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO);
3562 	sentinel->b_qindex = QUEUE_SENTINEL;
3563 	BQ_LOCK(bq);
3564 	TAILQ_INSERT_HEAD(&bq->bq_queue, sentinel, b_freelist);
3565 	BQ_UNLOCK(bq);
3566 	while (flushed != target) {
3567 		maybe_yield();
3568 		BQ_LOCK(bq);
3569 		bp = TAILQ_NEXT(sentinel, b_freelist);
3570 		if (bp != NULL) {
3571 			TAILQ_REMOVE(&bq->bq_queue, sentinel, b_freelist);
3572 			TAILQ_INSERT_AFTER(&bq->bq_queue, bp, sentinel,
3573 			    b_freelist);
3574 		} else {
3575 			BQ_UNLOCK(bq);
3576 			break;
3577 		}
3578 		/*
3579 		 * Skip sentinels inserted by other invocations of the
3580 		 * flushbufqueues(), taking care to not reorder them.
3581 		 *
3582 		 * Only flush the buffers that belong to the
3583 		 * vnode locked by the curthread.
3584 		 */
3585 		if (bp->b_qindex == QUEUE_SENTINEL || (lvp != NULL &&
3586 		    bp->b_vp != lvp)) {
3587 			BQ_UNLOCK(bq);
3588 			continue;
3589 		}
3590 		error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL);
3591 		BQ_UNLOCK(bq);
3592 		if (error != 0)
3593 			continue;
3594 
3595 		/*
3596 		 * BKGRDINPROG can only be set with the buf and bufobj
3597 		 * locks both held.  We tolerate a race to clear it here.
3598 		 */
3599 		if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
3600 		    (bp->b_flags & B_DELWRI) == 0) {
3601 			BUF_UNLOCK(bp);
3602 			continue;
3603 		}
3604 		if (bp->b_flags & B_INVAL) {
3605 			bremfreef(bp);
3606 			brelse(bp);
3607 			flushed++;
3608 			continue;
3609 		}
3610 
3611 		if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
3612 			if (flushdeps == 0) {
3613 				BUF_UNLOCK(bp);
3614 				continue;
3615 			}
3616 			hasdeps = 1;
3617 		} else
3618 			hasdeps = 0;
3619 		/*
3620 		 * We must hold the lock on a vnode before writing
3621 		 * one of its buffers. Otherwise we may confuse, or
3622 		 * in the case of a snapshot vnode, deadlock the
3623 		 * system.
3624 		 *
3625 		 * The lock order here is the reverse of the normal
3626 		 * of vnode followed by buf lock.  This is ok because
3627 		 * the NOWAIT will prevent deadlock.
3628 		 */
3629 		vp = bp->b_vp;
3630 		if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
3631 			BUF_UNLOCK(bp);
3632 			continue;
3633 		}
3634 		if (lvp == NULL) {
3635 			unlock = true;
3636 			error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
3637 		} else {
3638 			ASSERT_VOP_LOCKED(vp, "getbuf");
3639 			unlock = false;
3640 			error = VOP_ISLOCKED(vp) == LK_EXCLUSIVE ? 0 :
3641 			    vn_lock(vp, LK_TRYUPGRADE);
3642 		}
3643 		if (error == 0) {
3644 			CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
3645 			    bp, bp->b_vp, bp->b_flags);
3646 			if (curproc == bufdaemonproc) {
3647 				vfs_bio_awrite(bp);
3648 			} else {
3649 				bremfree(bp);
3650 				bwrite(bp);
3651 				counter_u64_add(notbufdflushes, 1);
3652 			}
3653 			vn_finished_write(mp);
3654 			if (unlock)
3655 				VOP_UNLOCK(vp);
3656 			flushwithdeps += hasdeps;
3657 			flushed++;
3658 
3659 			/*
3660 			 * Sleeping on runningbufspace while holding
3661 			 * vnode lock leads to deadlock.
3662 			 */
3663 			if (curproc == bufdaemonproc &&
3664 			    runningbufspace > hirunningspace)
3665 				waitrunningbufspace();
3666 			continue;
3667 		}
3668 		vn_finished_write(mp);
3669 		BUF_UNLOCK(bp);
3670 	}
3671 	BQ_LOCK(bq);
3672 	TAILQ_REMOVE(&bq->bq_queue, sentinel, b_freelist);
3673 	BQ_UNLOCK(bq);
3674 	free(sentinel, M_TEMP);
3675 	return (flushed);
3676 }
3677 
3678 /*
3679  * Check to see if a block is currently memory resident.
3680  */
3681 struct buf *
3682 incore(struct bufobj *bo, daddr_t blkno)
3683 {
3684 	return (gbincore_unlocked(bo, blkno));
3685 }
3686 
3687 /*
3688  * Returns true if no I/O is needed to access the
3689  * associated VM object.  This is like incore except
3690  * it also hunts around in the VM system for the data.
3691  */
3692 bool
3693 inmem(struct vnode * vp, daddr_t blkno)
3694 {
3695 	vm_object_t obj;
3696 	vm_offset_t toff, tinc, size;
3697 	vm_page_t m, n;
3698 	vm_ooffset_t off;
3699 	int valid;
3700 
3701 	ASSERT_VOP_LOCKED(vp, "inmem");
3702 
3703 	if (incore(&vp->v_bufobj, blkno))
3704 		return (true);
3705 	if (vp->v_mount == NULL)
3706 		return (false);
3707 	obj = vp->v_object;
3708 	if (obj == NULL)
3709 		return (false);
3710 
3711 	size = PAGE_SIZE;
3712 	if (size > vp->v_mount->mnt_stat.f_iosize)
3713 		size = vp->v_mount->mnt_stat.f_iosize;
3714 	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
3715 
3716 	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
3717 		m = vm_page_lookup_unlocked(obj, OFF_TO_IDX(off + toff));
3718 recheck:
3719 		if (m == NULL)
3720 			return (false);
3721 
3722 		tinc = size;
3723 		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
3724 			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
3725 		/*
3726 		 * Consider page validity only if page mapping didn't change
3727 		 * during the check.
3728 		 */
3729 		valid = vm_page_is_valid(m,
3730 		    (vm_offset_t)((toff + off) & PAGE_MASK), tinc);
3731 		n = vm_page_lookup_unlocked(obj, OFF_TO_IDX(off + toff));
3732 		if (m != n) {
3733 			m = n;
3734 			goto recheck;
3735 		}
3736 		if (!valid)
3737 			return (false);
3738 	}
3739 	return (true);
3740 }
3741 
3742 /*
3743  * Set the dirty range for a buffer based on the status of the dirty
3744  * bits in the pages comprising the buffer.  The range is limited
3745  * to the size of the buffer.
3746  *
3747  * Tell the VM system that the pages associated with this buffer
3748  * are clean.  This is used for delayed writes where the data is
3749  * going to go to disk eventually without additional VM intevention.
3750  *
3751  * Note that while we only really need to clean through to b_bcount, we
3752  * just go ahead and clean through to b_bufsize.
3753  */
3754 static void
3755 vfs_clean_pages_dirty_buf(struct buf *bp)
3756 {
3757 	vm_ooffset_t foff, noff, eoff;
3758 	vm_page_t m;
3759 	int i;
3760 
3761 	if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0)
3762 		return;
3763 
3764 	foff = bp->b_offset;
3765 	KASSERT(bp->b_offset != NOOFFSET,
3766 	    ("vfs_clean_pages_dirty_buf: no buffer offset"));
3767 
3768 	vfs_busy_pages_acquire(bp);
3769 	vfs_setdirty_range(bp);
3770 	for (i = 0; i < bp->b_npages; i++) {
3771 		noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3772 		eoff = noff;
3773 		if (eoff > bp->b_offset + bp->b_bufsize)
3774 			eoff = bp->b_offset + bp->b_bufsize;
3775 		m = bp->b_pages[i];
3776 		vfs_page_set_validclean(bp, foff, m);
3777 		/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
3778 		foff = noff;
3779 	}
3780 	vfs_busy_pages_release(bp);
3781 }
3782 
3783 static void
3784 vfs_setdirty_range(struct buf *bp)
3785 {
3786 	vm_offset_t boffset;
3787 	vm_offset_t eoffset;
3788 	int i;
3789 
3790 	/*
3791 	 * test the pages to see if they have been modified directly
3792 	 * by users through the VM system.
3793 	 */
3794 	for (i = 0; i < bp->b_npages; i++)
3795 		vm_page_test_dirty(bp->b_pages[i]);
3796 
3797 	/*
3798 	 * Calculate the encompassing dirty range, boffset and eoffset,
3799 	 * (eoffset - boffset) bytes.
3800 	 */
3801 
3802 	for (i = 0; i < bp->b_npages; i++) {
3803 		if (bp->b_pages[i]->dirty)
3804 			break;
3805 	}
3806 	boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3807 
3808 	for (i = bp->b_npages - 1; i >= 0; --i) {
3809 		if (bp->b_pages[i]->dirty) {
3810 			break;
3811 		}
3812 	}
3813 	eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3814 
3815 	/*
3816 	 * Fit it to the buffer.
3817 	 */
3818 
3819 	if (eoffset > bp->b_bcount)
3820 		eoffset = bp->b_bcount;
3821 
3822 	/*
3823 	 * If we have a good dirty range, merge with the existing
3824 	 * dirty range.
3825 	 */
3826 
3827 	if (boffset < eoffset) {
3828 		if (bp->b_dirtyoff > boffset)
3829 			bp->b_dirtyoff = boffset;
3830 		if (bp->b_dirtyend < eoffset)
3831 			bp->b_dirtyend = eoffset;
3832 	}
3833 }
3834 
3835 /*
3836  * Allocate the KVA mapping for an existing buffer.
3837  * If an unmapped buffer is provided but a mapped buffer is requested, take
3838  * also care to properly setup mappings between pages and KVA.
3839  */
3840 static void
3841 bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
3842 {
3843 	int bsize, maxsize, need_mapping, need_kva;
3844 	off_t offset;
3845 
3846 	need_mapping = bp->b_data == unmapped_buf &&
3847 	    (gbflags & GB_UNMAPPED) == 0;
3848 	need_kva = bp->b_kvabase == unmapped_buf &&
3849 	    bp->b_data == unmapped_buf &&
3850 	    (gbflags & GB_KVAALLOC) != 0;
3851 	if (!need_mapping && !need_kva)
3852 		return;
3853 
3854 	BUF_CHECK_UNMAPPED(bp);
3855 
3856 	if (need_mapping && bp->b_kvabase != unmapped_buf) {
3857 		/*
3858 		 * Buffer is not mapped, but the KVA was already
3859 		 * reserved at the time of the instantiation.  Use the
3860 		 * allocated space.
3861 		 */
3862 		goto has_addr;
3863 	}
3864 
3865 	/*
3866 	 * Calculate the amount of the address space we would reserve
3867 	 * if the buffer was mapped.
3868 	 */
3869 	bsize = vn_isdisk(bp->b_vp) ? DEV_BSIZE : bp->b_bufobj->bo_bsize;
3870 	KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
3871 	offset = blkno * bsize;
3872 	maxsize = size + (offset & PAGE_MASK);
3873 	maxsize = imax(maxsize, bsize);
3874 
3875 	while (bufkva_alloc(bp, maxsize, gbflags) != 0) {
3876 		if ((gbflags & GB_NOWAIT_BD) != 0) {
3877 			/*
3878 			 * XXXKIB: defragmentation cannot
3879 			 * succeed, not sure what else to do.
3880 			 */
3881 			panic("GB_NOWAIT_BD and GB_UNMAPPED %p", bp);
3882 		}
3883 		counter_u64_add(mappingrestarts, 1);
3884 		bufspace_wait(bufdomain(bp), bp->b_vp, gbflags, 0, 0);
3885 	}
3886 has_addr:
3887 	if (need_mapping) {
3888 		/* b_offset is handled by bpmap_qenter. */
3889 		bp->b_data = bp->b_kvabase;
3890 		BUF_CHECK_MAPPED(bp);
3891 		bpmap_qenter(bp);
3892 	}
3893 }
3894 
3895 struct buf *
3896 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo,
3897     int flags)
3898 {
3899 	struct buf *bp;
3900 	int error;
3901 
3902 	error = getblkx(vp, blkno, blkno, size, slpflag, slptimeo, flags, &bp);
3903 	if (error != 0)
3904 		return (NULL);
3905 	return (bp);
3906 }
3907 
3908 /*
3909  *	getblkx:
3910  *
3911  *	Get a block given a specified block and offset into a file/device.
3912  *	The buffers B_DONE bit will be cleared on return, making it almost
3913  * 	ready for an I/O initiation.  B_INVAL may or may not be set on
3914  *	return.  The caller should clear B_INVAL prior to initiating a
3915  *	READ.
3916  *
3917  *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
3918  *	an existing buffer.
3919  *
3920  *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
3921  *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
3922  *	and then cleared based on the backing VM.  If the previous buffer is
3923  *	non-0-sized but invalid, B_CACHE will be cleared.
3924  *
3925  *	If getblk() must create a new buffer, the new buffer is returned with
3926  *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
3927  *	case it is returned with B_INVAL clear and B_CACHE set based on the
3928  *	backing VM.
3929  *
3930  *	getblk() also forces a bwrite() for any B_DELWRI buffer whose
3931  *	B_CACHE bit is clear.
3932  *
3933  *	What this means, basically, is that the caller should use B_CACHE to
3934  *	determine whether the buffer is fully valid or not and should clear
3935  *	B_INVAL prior to issuing a read.  If the caller intends to validate
3936  *	the buffer by loading its data area with something, the caller needs
3937  *	to clear B_INVAL.  If the caller does this without issuing an I/O,
3938  *	the caller should set B_CACHE ( as an optimization ), else the caller
3939  *	should issue the I/O and biodone() will set B_CACHE if the I/O was
3940  *	a write attempt or if it was a successful read.  If the caller
3941  *	intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
3942  *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
3943  *
3944  *	The blkno parameter is the logical block being requested. Normally
3945  *	the mapping of logical block number to disk block address is done
3946  *	by calling VOP_BMAP(). However, if the mapping is already known, the
3947  *	disk block address can be passed using the dblkno parameter. If the
3948  *	disk block address is not known, then the same value should be passed
3949  *	for blkno and dblkno.
3950  */
3951 int
3952 getblkx(struct vnode *vp, daddr_t blkno, daddr_t dblkno, int size, int slpflag,
3953     int slptimeo, int flags, struct buf **bpp)
3954 {
3955 	struct buf *bp;
3956 	struct bufobj *bo;
3957 	daddr_t d_blkno;
3958 	int bsize, error, maxsize, vmio;
3959 	off_t offset;
3960 
3961 	CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
3962 	KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3963 	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3964 	if (vp->v_type != VCHR)
3965 		ASSERT_VOP_LOCKED(vp, "getblk");
3966 	if (size > maxbcachebuf)
3967 		panic("getblk: size(%d) > maxbcachebuf(%d)\n", size,
3968 		    maxbcachebuf);
3969 	if (!unmapped_buf_allowed)
3970 		flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3971 
3972 	bo = &vp->v_bufobj;
3973 	d_blkno = dblkno;
3974 
3975 	/* Attempt lockless lookup first. */
3976 	bp = gbincore_unlocked(bo, blkno);
3977 	if (bp == NULL) {
3978 		/*
3979 		 * With GB_NOCREAT we must be sure about not finding the buffer
3980 		 * as it may have been reassigned during unlocked lookup.
3981 		 */
3982 		if ((flags & GB_NOCREAT) != 0)
3983 			goto loop;
3984 		goto newbuf_unlocked;
3985 	}
3986 
3987 	error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL, "getblku", 0,
3988 	    0);
3989 	if (error != 0)
3990 		goto loop;
3991 
3992 	/* Verify buf identify has not changed since lookup. */
3993 	if (bp->b_bufobj == bo && bp->b_lblkno == blkno)
3994 		goto foundbuf_fastpath;
3995 
3996 	/* It changed, fallback to locked lookup. */
3997 	BUF_UNLOCK_RAW(bp);
3998 
3999 loop:
4000 	BO_RLOCK(bo);
4001 	bp = gbincore(bo, blkno);
4002 	if (bp != NULL) {
4003 		int lockflags;
4004 
4005 		/*
4006 		 * Buffer is in-core.  If the buffer is not busy nor managed,
4007 		 * it must be on a queue.
4008 		 */
4009 		lockflags = LK_EXCLUSIVE | LK_INTERLOCK |
4010 		    ((flags & GB_LOCK_NOWAIT) != 0 ? LK_NOWAIT : LK_SLEEPFAIL);
4011 #ifdef WITNESS
4012 		lockflags |= (flags & GB_NOWITNESS) != 0 ? LK_NOWITNESS : 0;
4013 #endif
4014 
4015 		error = BUF_TIMELOCK(bp, lockflags,
4016 		    BO_LOCKPTR(bo), "getblk", slpflag, slptimeo);
4017 
4018 		/*
4019 		 * If we slept and got the lock we have to restart in case
4020 		 * the buffer changed identities.
4021 		 */
4022 		if (error == ENOLCK)
4023 			goto loop;
4024 		/* We timed out or were interrupted. */
4025 		else if (error != 0)
4026 			return (error);
4027 
4028 foundbuf_fastpath:
4029 		/* If recursed, assume caller knows the rules. */
4030 		if (BUF_LOCKRECURSED(bp))
4031 			goto end;
4032 
4033 		/*
4034 		 * The buffer is locked.  B_CACHE is cleared if the buffer is
4035 		 * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
4036 		 * and for a VMIO buffer B_CACHE is adjusted according to the
4037 		 * backing VM cache.
4038 		 */
4039 		if (bp->b_flags & B_INVAL)
4040 			bp->b_flags &= ~B_CACHE;
4041 		else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
4042 			bp->b_flags |= B_CACHE;
4043 		if (bp->b_flags & B_MANAGED)
4044 			MPASS(bp->b_qindex == QUEUE_NONE);
4045 		else
4046 			bremfree(bp);
4047 
4048 		/*
4049 		 * check for size inconsistencies for non-VMIO case.
4050 		 */
4051 		if (bp->b_bcount != size) {
4052 			if ((bp->b_flags & B_VMIO) == 0 ||
4053 			    (size > bp->b_kvasize)) {
4054 				if (bp->b_flags & B_DELWRI) {
4055 					bp->b_flags |= B_NOCACHE;
4056 					bwrite(bp);
4057 				} else {
4058 					if (LIST_EMPTY(&bp->b_dep)) {
4059 						bp->b_flags |= B_RELBUF;
4060 						brelse(bp);
4061 					} else {
4062 						bp->b_flags |= B_NOCACHE;
4063 						bwrite(bp);
4064 					}
4065 				}
4066 				goto loop;
4067 			}
4068 		}
4069 
4070 		/*
4071 		 * Handle the case of unmapped buffer which should
4072 		 * become mapped, or the buffer for which KVA
4073 		 * reservation is requested.
4074 		 */
4075 		bp_unmapped_get_kva(bp, blkno, size, flags);
4076 
4077 		/*
4078 		 * If the size is inconsistent in the VMIO case, we can resize
4079 		 * the buffer.  This might lead to B_CACHE getting set or
4080 		 * cleared.  If the size has not changed, B_CACHE remains
4081 		 * unchanged from its previous state.
4082 		 */
4083 		allocbuf(bp, size);
4084 
4085 		KASSERT(bp->b_offset != NOOFFSET,
4086 		    ("getblk: no buffer offset"));
4087 
4088 		/*
4089 		 * A buffer with B_DELWRI set and B_CACHE clear must
4090 		 * be committed before we can return the buffer in
4091 		 * order to prevent the caller from issuing a read
4092 		 * ( due to B_CACHE not being set ) and overwriting
4093 		 * it.
4094 		 *
4095 		 * Most callers, including NFS and FFS, need this to
4096 		 * operate properly either because they assume they
4097 		 * can issue a read if B_CACHE is not set, or because
4098 		 * ( for example ) an uncached B_DELWRI might loop due
4099 		 * to softupdates re-dirtying the buffer.  In the latter
4100 		 * case, B_CACHE is set after the first write completes,
4101 		 * preventing further loops.
4102 		 * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
4103 		 * above while extending the buffer, we cannot allow the
4104 		 * buffer to remain with B_CACHE set after the write
4105 		 * completes or it will represent a corrupt state.  To
4106 		 * deal with this we set B_NOCACHE to scrap the buffer
4107 		 * after the write.
4108 		 *
4109 		 * We might be able to do something fancy, like setting
4110 		 * B_CACHE in bwrite() except if B_DELWRI is already set,
4111 		 * so the below call doesn't set B_CACHE, but that gets real
4112 		 * confusing.  This is much easier.
4113 		 */
4114 
4115 		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
4116 			bp->b_flags |= B_NOCACHE;
4117 			bwrite(bp);
4118 			goto loop;
4119 		}
4120 		bp->b_flags &= ~B_DONE;
4121 	} else {
4122 		/*
4123 		 * Buffer is not in-core, create new buffer.  The buffer
4124 		 * returned by getnewbuf() is locked.  Note that the returned
4125 		 * buffer is also considered valid (not marked B_INVAL).
4126 		 */
4127 		BO_RUNLOCK(bo);
4128 newbuf_unlocked:
4129 		/*
4130 		 * If the user does not want us to create the buffer, bail out
4131 		 * here.
4132 		 */
4133 		if (flags & GB_NOCREAT)
4134 			return (EEXIST);
4135 
4136 		bsize = vn_isdisk(vp) ? DEV_BSIZE : bo->bo_bsize;
4137 		KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
4138 		offset = blkno * bsize;
4139 		vmio = vp->v_object != NULL;
4140 		if (vmio) {
4141 			maxsize = size + (offset & PAGE_MASK);
4142 		} else {
4143 			maxsize = size;
4144 			/* Do not allow non-VMIO notmapped buffers. */
4145 			flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
4146 		}
4147 		maxsize = imax(maxsize, bsize);
4148 		if ((flags & GB_NOSPARSE) != 0 && vmio &&
4149 		    !vn_isdisk(vp)) {
4150 			error = VOP_BMAP(vp, blkno, NULL, &d_blkno, 0, 0);
4151 			KASSERT(error != EOPNOTSUPP,
4152 			    ("GB_NOSPARSE from fs not supporting bmap, vp %p",
4153 			    vp));
4154 			if (error != 0)
4155 				return (error);
4156 			if (d_blkno == -1)
4157 				return (EJUSTRETURN);
4158 		}
4159 
4160 		bp = getnewbuf(vp, slpflag, slptimeo, maxsize, flags);
4161 		if (bp == NULL) {
4162 			if (slpflag || slptimeo)
4163 				return (ETIMEDOUT);
4164 			/*
4165 			 * XXX This is here until the sleep path is diagnosed
4166 			 * enough to work under very low memory conditions.
4167 			 *
4168 			 * There's an issue on low memory, 4BSD+non-preempt
4169 			 * systems (eg MIPS routers with 32MB RAM) where buffer
4170 			 * exhaustion occurs without sleeping for buffer
4171 			 * reclaimation.  This just sticks in a loop and
4172 			 * constantly attempts to allocate a buffer, which
4173 			 * hits exhaustion and tries to wakeup bufdaemon.
4174 			 * This never happens because we never yield.
4175 			 *
4176 			 * The real solution is to identify and fix these cases
4177 			 * so we aren't effectively busy-waiting in a loop
4178 			 * until the reclaimation path has cycles to run.
4179 			 */
4180 			kern_yield(PRI_USER);
4181 			goto loop;
4182 		}
4183 
4184 		/*
4185 		 * This code is used to make sure that a buffer is not
4186 		 * created while the getnewbuf routine is blocked.
4187 		 * This can be a problem whether the vnode is locked or not.
4188 		 * If the buffer is created out from under us, we have to
4189 		 * throw away the one we just created.
4190 		 *
4191 		 * Note: this must occur before we associate the buffer
4192 		 * with the vp especially considering limitations in
4193 		 * the splay tree implementation when dealing with duplicate
4194 		 * lblkno's.
4195 		 */
4196 		BO_LOCK(bo);
4197 		if (gbincore(bo, blkno)) {
4198 			BO_UNLOCK(bo);
4199 			bp->b_flags |= B_INVAL;
4200 			bufspace_release(bufdomain(bp), maxsize);
4201 			brelse(bp);
4202 			goto loop;
4203 		}
4204 
4205 		/*
4206 		 * Insert the buffer into the hash, so that it can
4207 		 * be found by incore.
4208 		 */
4209 		bp->b_lblkno = blkno;
4210 		bp->b_blkno = d_blkno;
4211 		bp->b_offset = offset;
4212 		bgetvp(vp, bp);
4213 		BO_UNLOCK(bo);
4214 
4215 		/*
4216 		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
4217 		 * buffer size starts out as 0, B_CACHE will be set by
4218 		 * allocbuf() for the VMIO case prior to it testing the
4219 		 * backing store for validity.
4220 		 */
4221 
4222 		if (vmio) {
4223 			bp->b_flags |= B_VMIO;
4224 			KASSERT(vp->v_object == bp->b_bufobj->bo_object,
4225 			    ("ARGH! different b_bufobj->bo_object %p %p %p\n",
4226 			    bp, vp->v_object, bp->b_bufobj->bo_object));
4227 		} else {
4228 			bp->b_flags &= ~B_VMIO;
4229 			KASSERT(bp->b_bufobj->bo_object == NULL,
4230 			    ("ARGH! has b_bufobj->bo_object %p %p\n",
4231 			    bp, bp->b_bufobj->bo_object));
4232 			BUF_CHECK_MAPPED(bp);
4233 		}
4234 
4235 		allocbuf(bp, size);
4236 		bufspace_release(bufdomain(bp), maxsize);
4237 		bp->b_flags &= ~B_DONE;
4238 	}
4239 	CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
4240 end:
4241 	buf_track(bp, __func__);
4242 	KASSERT(bp->b_bufobj == bo,
4243 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
4244 	*bpp = bp;
4245 	return (0);
4246 }
4247 
4248 /*
4249  * Get an empty, disassociated buffer of given size.  The buffer is initially
4250  * set to B_INVAL.
4251  */
4252 struct buf *
4253 geteblk(int size, int flags)
4254 {
4255 	struct buf *bp;
4256 	int maxsize;
4257 
4258 	maxsize = (size + BKVAMASK) & ~BKVAMASK;
4259 	while ((bp = getnewbuf(NULL, 0, 0, maxsize, flags)) == NULL) {
4260 		if ((flags & GB_NOWAIT_BD) &&
4261 		    (curthread->td_pflags & TDP_BUFNEED) != 0)
4262 			return (NULL);
4263 	}
4264 	allocbuf(bp, size);
4265 	bufspace_release(bufdomain(bp), maxsize);
4266 	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
4267 	return (bp);
4268 }
4269 
4270 /*
4271  * Truncate the backing store for a non-vmio buffer.
4272  */
4273 static void
4274 vfs_nonvmio_truncate(struct buf *bp, int newbsize)
4275 {
4276 
4277 	if (bp->b_flags & B_MALLOC) {
4278 		/*
4279 		 * malloced buffers are not shrunk
4280 		 */
4281 		if (newbsize == 0) {
4282 			bufmallocadjust(bp, 0);
4283 			free(bp->b_data, M_BIOBUF);
4284 			bp->b_data = bp->b_kvabase;
4285 			bp->b_flags &= ~B_MALLOC;
4286 		}
4287 		return;
4288 	}
4289 	vm_hold_free_pages(bp, newbsize);
4290 	bufspace_adjust(bp, newbsize);
4291 }
4292 
4293 /*
4294  * Extend the backing for a non-VMIO buffer.
4295  */
4296 static void
4297 vfs_nonvmio_extend(struct buf *bp, int newbsize)
4298 {
4299 	caddr_t origbuf;
4300 	int origbufsize;
4301 
4302 	/*
4303 	 * We only use malloced memory on the first allocation.
4304 	 * and revert to page-allocated memory when the buffer
4305 	 * grows.
4306 	 *
4307 	 * There is a potential smp race here that could lead
4308 	 * to bufmallocspace slightly passing the max.  It
4309 	 * is probably extremely rare and not worth worrying
4310 	 * over.
4311 	 */
4312 	if (bp->b_bufsize == 0 && newbsize <= PAGE_SIZE/2 &&
4313 	    bufmallocspace < maxbufmallocspace) {
4314 		bp->b_data = malloc(newbsize, M_BIOBUF, M_WAITOK);
4315 		bp->b_flags |= B_MALLOC;
4316 		bufmallocadjust(bp, newbsize);
4317 		return;
4318 	}
4319 
4320 	/*
4321 	 * If the buffer is growing on its other-than-first
4322 	 * allocation then we revert to the page-allocation
4323 	 * scheme.
4324 	 */
4325 	origbuf = NULL;
4326 	origbufsize = 0;
4327 	if (bp->b_flags & B_MALLOC) {
4328 		origbuf = bp->b_data;
4329 		origbufsize = bp->b_bufsize;
4330 		bp->b_data = bp->b_kvabase;
4331 		bufmallocadjust(bp, 0);
4332 		bp->b_flags &= ~B_MALLOC;
4333 		newbsize = round_page(newbsize);
4334 	}
4335 	vm_hold_load_pages(bp, (vm_offset_t) bp->b_data + bp->b_bufsize,
4336 	    (vm_offset_t) bp->b_data + newbsize);
4337 	if (origbuf != NULL) {
4338 		bcopy(origbuf, bp->b_data, origbufsize);
4339 		free(origbuf, M_BIOBUF);
4340 	}
4341 	bufspace_adjust(bp, newbsize);
4342 }
4343 
4344 /*
4345  * This code constitutes the buffer memory from either anonymous system
4346  * memory (in the case of non-VMIO operations) or from an associated
4347  * VM object (in the case of VMIO operations).  This code is able to
4348  * resize a buffer up or down.
4349  *
4350  * Note that this code is tricky, and has many complications to resolve
4351  * deadlock or inconsistent data situations.  Tread lightly!!!
4352  * There are B_CACHE and B_DELWRI interactions that must be dealt with by
4353  * the caller.  Calling this code willy nilly can result in the loss of data.
4354  *
4355  * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
4356  * B_CACHE for the non-VMIO case.
4357  */
4358 int
4359 allocbuf(struct buf *bp, int size)
4360 {
4361 	int newbsize;
4362 
4363 	if (bp->b_bcount == size)
4364 		return (1);
4365 
4366 	if (bp->b_kvasize != 0 && bp->b_kvasize < size)
4367 		panic("allocbuf: buffer too small");
4368 
4369 	newbsize = roundup2(size, DEV_BSIZE);
4370 	if ((bp->b_flags & B_VMIO) == 0) {
4371 		if ((bp->b_flags & B_MALLOC) == 0)
4372 			newbsize = round_page(newbsize);
4373 		/*
4374 		 * Just get anonymous memory from the kernel.  Don't
4375 		 * mess with B_CACHE.
4376 		 */
4377 		if (newbsize < bp->b_bufsize)
4378 			vfs_nonvmio_truncate(bp, newbsize);
4379 		else if (newbsize > bp->b_bufsize)
4380 			vfs_nonvmio_extend(bp, newbsize);
4381 	} else {
4382 		int desiredpages;
4383 
4384 		desiredpages = (size == 0) ? 0 :
4385 		    num_pages((bp->b_offset & PAGE_MASK) + newbsize);
4386 
4387 		if (bp->b_flags & B_MALLOC)
4388 			panic("allocbuf: VMIO buffer can't be malloced");
4389 		/*
4390 		 * Set B_CACHE initially if buffer is 0 length or will become
4391 		 * 0-length.
4392 		 */
4393 		if (size == 0 || bp->b_bufsize == 0)
4394 			bp->b_flags |= B_CACHE;
4395 
4396 		if (newbsize < bp->b_bufsize)
4397 			vfs_vmio_truncate(bp, desiredpages);
4398 		/* XXX This looks as if it should be newbsize > b_bufsize */
4399 		else if (size > bp->b_bcount)
4400 			vfs_vmio_extend(bp, desiredpages, size);
4401 		bufspace_adjust(bp, newbsize);
4402 	}
4403 	bp->b_bcount = size;		/* requested buffer size. */
4404 	return (1);
4405 }
4406 
4407 extern int inflight_transient_maps;
4408 
4409 static struct bio_queue nondump_bios;
4410 
4411 void
4412 biodone(struct bio *bp)
4413 {
4414 	struct mtx *mtxp;
4415 	void (*done)(struct bio *);
4416 	vm_offset_t start, end;
4417 
4418 	biotrack(bp, __func__);
4419 
4420 	/*
4421 	 * Avoid completing I/O when dumping after a panic since that may
4422 	 * result in a deadlock in the filesystem or pager code.  Note that
4423 	 * this doesn't affect dumps that were started manually since we aim
4424 	 * to keep the system usable after it has been resumed.
4425 	 */
4426 	if (__predict_false(dumping && SCHEDULER_STOPPED())) {
4427 		TAILQ_INSERT_HEAD(&nondump_bios, bp, bio_queue);
4428 		return;
4429 	}
4430 	if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) {
4431 		bp->bio_flags &= ~BIO_TRANSIENT_MAPPING;
4432 		bp->bio_flags |= BIO_UNMAPPED;
4433 		start = trunc_page((vm_offset_t)bp->bio_data);
4434 		end = round_page((vm_offset_t)bp->bio_data + bp->bio_length);
4435 		bp->bio_data = unmapped_buf;
4436 		pmap_qremove(start, atop(end - start));
4437 		vmem_free(transient_arena, start, end - start);
4438 		atomic_add_int(&inflight_transient_maps, -1);
4439 	}
4440 	done = bp->bio_done;
4441 	/*
4442 	 * The check for done == biodone is to allow biodone to be
4443 	 * used as a bio_done routine.
4444 	 */
4445 	if (done == NULL || done == biodone) {
4446 		mtxp = mtx_pool_find(mtxpool_sleep, bp);
4447 		mtx_lock(mtxp);
4448 		bp->bio_flags |= BIO_DONE;
4449 		wakeup(bp);
4450 		mtx_unlock(mtxp);
4451 	} else
4452 		done(bp);
4453 }
4454 
4455 /*
4456  * Wait for a BIO to finish.
4457  */
4458 int
4459 biowait(struct bio *bp, const char *wmesg)
4460 {
4461 	struct mtx *mtxp;
4462 
4463 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4464 	mtx_lock(mtxp);
4465 	while ((bp->bio_flags & BIO_DONE) == 0)
4466 		msleep(bp, mtxp, PRIBIO, wmesg, 0);
4467 	mtx_unlock(mtxp);
4468 	if (bp->bio_error != 0)
4469 		return (bp->bio_error);
4470 	if (!(bp->bio_flags & BIO_ERROR))
4471 		return (0);
4472 	return (EIO);
4473 }
4474 
4475 void
4476 biofinish(struct bio *bp, struct devstat *stat, int error)
4477 {
4478 
4479 	if (error) {
4480 		bp->bio_error = error;
4481 		bp->bio_flags |= BIO_ERROR;
4482 	}
4483 	if (stat != NULL)
4484 		devstat_end_transaction_bio(stat, bp);
4485 	biodone(bp);
4486 }
4487 
4488 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4489 void
4490 biotrack_buf(struct bio *bp, const char *location)
4491 {
4492 
4493 	buf_track(bp->bio_track_bp, location);
4494 }
4495 #endif
4496 
4497 /*
4498  *	bufwait:
4499  *
4500  *	Wait for buffer I/O completion, returning error status.  The buffer
4501  *	is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
4502  *	error and cleared.
4503  */
4504 int
4505 bufwait(struct buf *bp)
4506 {
4507 	if (bp->b_iocmd == BIO_READ)
4508 		bwait(bp, PRIBIO, "biord");
4509 	else
4510 		bwait(bp, PRIBIO, "biowr");
4511 	if (bp->b_flags & B_EINTR) {
4512 		bp->b_flags &= ~B_EINTR;
4513 		return (EINTR);
4514 	}
4515 	if (bp->b_ioflags & BIO_ERROR) {
4516 		return (bp->b_error ? bp->b_error : EIO);
4517 	} else {
4518 		return (0);
4519 	}
4520 }
4521 
4522 /*
4523  *	bufdone:
4524  *
4525  *	Finish I/O on a buffer, optionally calling a completion function.
4526  *	This is usually called from an interrupt so process blocking is
4527  *	not allowed.
4528  *
4529  *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
4530  *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
4531  *	assuming B_INVAL is clear.
4532  *
4533  *	For the VMIO case, we set B_CACHE if the op was a read and no
4534  *	read error occurred, or if the op was a write.  B_CACHE is never
4535  *	set if the buffer is invalid or otherwise uncacheable.
4536  *
4537  *	bufdone does not mess with B_INVAL, allowing the I/O routine or the
4538  *	initiator to leave B_INVAL set to brelse the buffer out of existence
4539  *	in the biodone routine.
4540  */
4541 void
4542 bufdone(struct buf *bp)
4543 {
4544 	struct bufobj *dropobj;
4545 	void    (*biodone)(struct buf *);
4546 
4547 	buf_track(bp, __func__);
4548 	CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
4549 	dropobj = NULL;
4550 
4551 	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
4552 
4553 	runningbufwakeup(bp);
4554 	if (bp->b_iocmd == BIO_WRITE)
4555 		dropobj = bp->b_bufobj;
4556 	/* call optional completion function if requested */
4557 	if (bp->b_iodone != NULL) {
4558 		biodone = bp->b_iodone;
4559 		bp->b_iodone = NULL;
4560 		(*biodone) (bp);
4561 		if (dropobj)
4562 			bufobj_wdrop(dropobj);
4563 		return;
4564 	}
4565 	if (bp->b_flags & B_VMIO) {
4566 		/*
4567 		 * Set B_CACHE if the op was a normal read and no error
4568 		 * occurred.  B_CACHE is set for writes in the b*write()
4569 		 * routines.
4570 		 */
4571 		if (bp->b_iocmd == BIO_READ &&
4572 		    !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
4573 		    !(bp->b_ioflags & BIO_ERROR))
4574 			bp->b_flags |= B_CACHE;
4575 		vfs_vmio_iodone(bp);
4576 	}
4577 	if (!LIST_EMPTY(&bp->b_dep))
4578 		buf_complete(bp);
4579 	if ((bp->b_flags & B_CKHASH) != 0) {
4580 		KASSERT(bp->b_iocmd == BIO_READ,
4581 		    ("bufdone: b_iocmd %d not BIO_READ", bp->b_iocmd));
4582 		KASSERT(buf_mapped(bp), ("bufdone: bp %p not mapped", bp));
4583 		(*bp->b_ckhashcalc)(bp);
4584 	}
4585 	/*
4586 	 * For asynchronous completions, release the buffer now. The brelse
4587 	 * will do a wakeup there if necessary - so no need to do a wakeup
4588 	 * here in the async case. The sync case always needs to do a wakeup.
4589 	 */
4590 	if (bp->b_flags & B_ASYNC) {
4591 		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) ||
4592 		    (bp->b_ioflags & BIO_ERROR))
4593 			brelse(bp);
4594 		else
4595 			bqrelse(bp);
4596 	} else
4597 		bdone(bp);
4598 	if (dropobj)
4599 		bufobj_wdrop(dropobj);
4600 }
4601 
4602 /*
4603  * This routine is called in lieu of iodone in the case of
4604  * incomplete I/O.  This keeps the busy status for pages
4605  * consistent.
4606  */
4607 void
4608 vfs_unbusy_pages(struct buf *bp)
4609 {
4610 	int i;
4611 	vm_object_t obj;
4612 	vm_page_t m;
4613 
4614 	runningbufwakeup(bp);
4615 	if (!(bp->b_flags & B_VMIO))
4616 		return;
4617 
4618 	obj = bp->b_bufobj->bo_object;
4619 	for (i = 0; i < bp->b_npages; i++) {
4620 		m = bp->b_pages[i];
4621 		if (m == bogus_page) {
4622 			m = vm_page_relookup(obj, OFF_TO_IDX(bp->b_offset) + i);
4623 			if (!m)
4624 				panic("vfs_unbusy_pages: page missing\n");
4625 			bp->b_pages[i] = m;
4626 			if (buf_mapped(bp)) {
4627 				BUF_CHECK_MAPPED(bp);
4628 				pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4629 				    bp->b_pages, bp->b_npages);
4630 			} else
4631 				BUF_CHECK_UNMAPPED(bp);
4632 		}
4633 		vm_page_sunbusy(m);
4634 	}
4635 	vm_object_pip_wakeupn(obj, bp->b_npages);
4636 }
4637 
4638 /*
4639  * vfs_page_set_valid:
4640  *
4641  *	Set the valid bits in a page based on the supplied offset.   The
4642  *	range is restricted to the buffer's size.
4643  *
4644  *	This routine is typically called after a read completes.
4645  */
4646 static void
4647 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4648 {
4649 	vm_ooffset_t eoff;
4650 
4651 	/*
4652 	 * Compute the end offset, eoff, such that [off, eoff) does not span a
4653 	 * page boundary and eoff is not greater than the end of the buffer.
4654 	 * The end of the buffer, in this case, is our file EOF, not the
4655 	 * allocation size of the buffer.
4656 	 */
4657 	eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
4658 	if (eoff > bp->b_offset + bp->b_bcount)
4659 		eoff = bp->b_offset + bp->b_bcount;
4660 
4661 	/*
4662 	 * Set valid range.  This is typically the entire buffer and thus the
4663 	 * entire page.
4664 	 */
4665 	if (eoff > off)
4666 		vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off);
4667 }
4668 
4669 /*
4670  * vfs_page_set_validclean:
4671  *
4672  *	Set the valid bits and clear the dirty bits in a page based on the
4673  *	supplied offset.   The range is restricted to the buffer's size.
4674  */
4675 static void
4676 vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4677 {
4678 	vm_ooffset_t soff, eoff;
4679 
4680 	/*
4681 	 * Start and end offsets in buffer.  eoff - soff may not cross a
4682 	 * page boundary or cross the end of the buffer.  The end of the
4683 	 * buffer, in this case, is our file EOF, not the allocation size
4684 	 * of the buffer.
4685 	 */
4686 	soff = off;
4687 	eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4688 	if (eoff > bp->b_offset + bp->b_bcount)
4689 		eoff = bp->b_offset + bp->b_bcount;
4690 
4691 	/*
4692 	 * Set valid range.  This is typically the entire buffer and thus the
4693 	 * entire page.
4694 	 */
4695 	if (eoff > soff) {
4696 		vm_page_set_validclean(
4697 		    m,
4698 		   (vm_offset_t) (soff & PAGE_MASK),
4699 		   (vm_offset_t) (eoff - soff)
4700 		);
4701 	}
4702 }
4703 
4704 /*
4705  * Acquire a shared busy on all pages in the buf.
4706  */
4707 void
4708 vfs_busy_pages_acquire(struct buf *bp)
4709 {
4710 	int i;
4711 
4712 	for (i = 0; i < bp->b_npages; i++)
4713 		vm_page_busy_acquire(bp->b_pages[i], VM_ALLOC_SBUSY);
4714 }
4715 
4716 void
4717 vfs_busy_pages_release(struct buf *bp)
4718 {
4719 	int i;
4720 
4721 	for (i = 0; i < bp->b_npages; i++)
4722 		vm_page_sunbusy(bp->b_pages[i]);
4723 }
4724 
4725 /*
4726  * This routine is called before a device strategy routine.
4727  * It is used to tell the VM system that paging I/O is in
4728  * progress, and treat the pages associated with the buffer
4729  * almost as being exclusive busy.  Also the object paging_in_progress
4730  * flag is handled to make sure that the object doesn't become
4731  * inconsistent.
4732  *
4733  * Since I/O has not been initiated yet, certain buffer flags
4734  * such as BIO_ERROR or B_INVAL may be in an inconsistent state
4735  * and should be ignored.
4736  */
4737 void
4738 vfs_busy_pages(struct buf *bp, int clear_modify)
4739 {
4740 	vm_object_t obj;
4741 	vm_ooffset_t foff;
4742 	vm_page_t m;
4743 	int i;
4744 	bool bogus;
4745 
4746 	if (!(bp->b_flags & B_VMIO))
4747 		return;
4748 
4749 	obj = bp->b_bufobj->bo_object;
4750 	foff = bp->b_offset;
4751 	KASSERT(bp->b_offset != NOOFFSET,
4752 	    ("vfs_busy_pages: no buffer offset"));
4753 	if ((bp->b_flags & B_CLUSTER) == 0) {
4754 		vm_object_pip_add(obj, bp->b_npages);
4755 		vfs_busy_pages_acquire(bp);
4756 	}
4757 	if (bp->b_bufsize != 0)
4758 		vfs_setdirty_range(bp);
4759 	bogus = false;
4760 	for (i = 0; i < bp->b_npages; i++) {
4761 		m = bp->b_pages[i];
4762 		vm_page_assert_sbusied(m);
4763 
4764 		/*
4765 		 * When readying a buffer for a read ( i.e
4766 		 * clear_modify == 0 ), it is important to do
4767 		 * bogus_page replacement for valid pages in
4768 		 * partially instantiated buffers.  Partially
4769 		 * instantiated buffers can, in turn, occur when
4770 		 * reconstituting a buffer from its VM backing store
4771 		 * base.  We only have to do this if B_CACHE is
4772 		 * clear ( which causes the I/O to occur in the
4773 		 * first place ).  The replacement prevents the read
4774 		 * I/O from overwriting potentially dirty VM-backed
4775 		 * pages.  XXX bogus page replacement is, uh, bogus.
4776 		 * It may not work properly with small-block devices.
4777 		 * We need to find a better way.
4778 		 */
4779 		if (clear_modify) {
4780 			pmap_remove_write(m);
4781 			vfs_page_set_validclean(bp, foff, m);
4782 		} else if (vm_page_all_valid(m) &&
4783 		    (bp->b_flags & B_CACHE) == 0) {
4784 			bp->b_pages[i] = bogus_page;
4785 			bogus = true;
4786 		}
4787 		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4788 	}
4789 	if (bogus && buf_mapped(bp)) {
4790 		BUF_CHECK_MAPPED(bp);
4791 		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4792 		    bp->b_pages, bp->b_npages);
4793 	}
4794 }
4795 
4796 /*
4797  *	vfs_bio_set_valid:
4798  *
4799  *	Set the range within the buffer to valid.  The range is
4800  *	relative to the beginning of the buffer, b_offset.  Note that
4801  *	b_offset itself may be offset from the beginning of the first
4802  *	page.
4803  */
4804 void
4805 vfs_bio_set_valid(struct buf *bp, int base, int size)
4806 {
4807 	int i, n;
4808 	vm_page_t m;
4809 
4810 	if (!(bp->b_flags & B_VMIO))
4811 		return;
4812 
4813 	/*
4814 	 * Fixup base to be relative to beginning of first page.
4815 	 * Set initial n to be the maximum number of bytes in the
4816 	 * first page that can be validated.
4817 	 */
4818 	base += (bp->b_offset & PAGE_MASK);
4819 	n = PAGE_SIZE - (base & PAGE_MASK);
4820 
4821 	/*
4822 	 * Busy may not be strictly necessary here because the pages are
4823 	 * unlikely to be fully valid and the vnode lock will synchronize
4824 	 * their access via getpages.  It is grabbed for consistency with
4825 	 * other page validation.
4826 	 */
4827 	vfs_busy_pages_acquire(bp);
4828 	for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4829 		m = bp->b_pages[i];
4830 		if (n > size)
4831 			n = size;
4832 		vm_page_set_valid_range(m, base & PAGE_MASK, n);
4833 		base += n;
4834 		size -= n;
4835 		n = PAGE_SIZE;
4836 	}
4837 	vfs_busy_pages_release(bp);
4838 }
4839 
4840 /*
4841  *	vfs_bio_clrbuf:
4842  *
4843  *	If the specified buffer is a non-VMIO buffer, clear the entire
4844  *	buffer.  If the specified buffer is a VMIO buffer, clear and
4845  *	validate only the previously invalid portions of the buffer.
4846  *	This routine essentially fakes an I/O, so we need to clear
4847  *	BIO_ERROR and B_INVAL.
4848  *
4849  *	Note that while we only theoretically need to clear through b_bcount,
4850  *	we go ahead and clear through b_bufsize.
4851  */
4852 void
4853 vfs_bio_clrbuf(struct buf *bp)
4854 {
4855 	int i, j, sa, ea, slide, zbits;
4856 	vm_page_bits_t mask;
4857 
4858 	if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
4859 		clrbuf(bp);
4860 		return;
4861 	}
4862 	bp->b_flags &= ~B_INVAL;
4863 	bp->b_ioflags &= ~BIO_ERROR;
4864 	vfs_busy_pages_acquire(bp);
4865 	sa = bp->b_offset & PAGE_MASK;
4866 	slide = 0;
4867 	for (i = 0; i < bp->b_npages; i++, sa = 0) {
4868 		slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize);
4869 		ea = slide & PAGE_MASK;
4870 		if (ea == 0)
4871 			ea = PAGE_SIZE;
4872 		if (bp->b_pages[i] == bogus_page)
4873 			continue;
4874 		j = sa / DEV_BSIZE;
4875 		zbits = (sizeof(vm_page_bits_t) * NBBY) -
4876 		    (ea - sa) / DEV_BSIZE;
4877 		mask = (VM_PAGE_BITS_ALL >> zbits) << j;
4878 		if ((bp->b_pages[i]->valid & mask) == mask)
4879 			continue;
4880 		if ((bp->b_pages[i]->valid & mask) == 0)
4881 			pmap_zero_page_area(bp->b_pages[i], sa, ea - sa);
4882 		else {
4883 			for (; sa < ea; sa += DEV_BSIZE, j++) {
4884 				if ((bp->b_pages[i]->valid & (1 << j)) == 0) {
4885 					pmap_zero_page_area(bp->b_pages[i],
4886 					    sa, DEV_BSIZE);
4887 				}
4888 			}
4889 		}
4890 		vm_page_set_valid_range(bp->b_pages[i], j * DEV_BSIZE,
4891 		    roundup2(ea - sa, DEV_BSIZE));
4892 	}
4893 	vfs_busy_pages_release(bp);
4894 	bp->b_resid = 0;
4895 }
4896 
4897 void
4898 vfs_bio_bzero_buf(struct buf *bp, int base, int size)
4899 {
4900 	vm_page_t m;
4901 	int i, n;
4902 
4903 	if (buf_mapped(bp)) {
4904 		BUF_CHECK_MAPPED(bp);
4905 		bzero(bp->b_data + base, size);
4906 	} else {
4907 		BUF_CHECK_UNMAPPED(bp);
4908 		n = PAGE_SIZE - (base & PAGE_MASK);
4909 		for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4910 			m = bp->b_pages[i];
4911 			if (n > size)
4912 				n = size;
4913 			pmap_zero_page_area(m, base & PAGE_MASK, n);
4914 			base += n;
4915 			size -= n;
4916 			n = PAGE_SIZE;
4917 		}
4918 	}
4919 }
4920 
4921 /*
4922  * Update buffer flags based on I/O request parameters, optionally releasing the
4923  * buffer.  If it's VMIO or direct I/O, the buffer pages are released to the VM,
4924  * where they may be placed on a page queue (VMIO) or freed immediately (direct
4925  * I/O).  Otherwise the buffer is released to the cache.
4926  */
4927 static void
4928 b_io_dismiss(struct buf *bp, int ioflag, bool release)
4929 {
4930 
4931 	KASSERT((ioflag & IO_NOREUSE) == 0 || (ioflag & IO_VMIO) != 0,
4932 	    ("buf %p non-VMIO noreuse", bp));
4933 
4934 	if ((ioflag & IO_DIRECT) != 0)
4935 		bp->b_flags |= B_DIRECT;
4936 	if ((ioflag & IO_EXT) != 0)
4937 		bp->b_xflags |= BX_ALTDATA;
4938 	if ((ioflag & (IO_VMIO | IO_DIRECT)) != 0 && LIST_EMPTY(&bp->b_dep)) {
4939 		bp->b_flags |= B_RELBUF;
4940 		if ((ioflag & IO_NOREUSE) != 0)
4941 			bp->b_flags |= B_NOREUSE;
4942 		if (release)
4943 			brelse(bp);
4944 	} else if (release)
4945 		bqrelse(bp);
4946 }
4947 
4948 void
4949 vfs_bio_brelse(struct buf *bp, int ioflag)
4950 {
4951 
4952 	b_io_dismiss(bp, ioflag, true);
4953 }
4954 
4955 void
4956 vfs_bio_set_flags(struct buf *bp, int ioflag)
4957 {
4958 
4959 	b_io_dismiss(bp, ioflag, false);
4960 }
4961 
4962 /*
4963  * vm_hold_load_pages and vm_hold_free_pages get pages into
4964  * a buffers address space.  The pages are anonymous and are
4965  * not associated with a file object.
4966  */
4967 static void
4968 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
4969 {
4970 	vm_offset_t pg;
4971 	vm_page_t p;
4972 	int index;
4973 
4974 	BUF_CHECK_MAPPED(bp);
4975 
4976 	to = round_page(to);
4977 	from = round_page(from);
4978 	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4979 	MPASS((bp->b_flags & B_MAXPHYS) == 0);
4980 	KASSERT(to - from <= maxbcachebuf,
4981 	    ("vm_hold_load_pages too large %p %#jx %#jx %u",
4982 	    bp, (uintmax_t)from, (uintmax_t)to, maxbcachebuf));
4983 
4984 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
4985 		/*
4986 		 * note: must allocate system pages since blocking here
4987 		 * could interfere with paging I/O, no matter which
4988 		 * process we are.
4989 		 */
4990 		p = vm_page_alloc_noobj(VM_ALLOC_SYSTEM | VM_ALLOC_WIRED |
4991 		    VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT) | VM_ALLOC_WAITOK);
4992 		pmap_qenter(pg, &p, 1);
4993 		bp->b_pages[index] = p;
4994 	}
4995 	bp->b_npages = index;
4996 }
4997 
4998 /* Return pages associated with this buf to the vm system */
4999 static void
5000 vm_hold_free_pages(struct buf *bp, int newbsize)
5001 {
5002 	vm_offset_t from;
5003 	vm_page_t p;
5004 	int index, newnpages;
5005 
5006 	BUF_CHECK_MAPPED(bp);
5007 
5008 	from = round_page((vm_offset_t)bp->b_data + newbsize);
5009 	newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
5010 	if (bp->b_npages > newnpages)
5011 		pmap_qremove(from, bp->b_npages - newnpages);
5012 	for (index = newnpages; index < bp->b_npages; index++) {
5013 		p = bp->b_pages[index];
5014 		bp->b_pages[index] = NULL;
5015 		vm_page_unwire_noq(p);
5016 		vm_page_free(p);
5017 	}
5018 	bp->b_npages = newnpages;
5019 }
5020 
5021 /*
5022  * Map an IO request into kernel virtual address space.
5023  *
5024  * All requests are (re)mapped into kernel VA space.
5025  * Notice that we use b_bufsize for the size of the buffer
5026  * to be mapped.  b_bcount might be modified by the driver.
5027  *
5028  * Note that even if the caller determines that the address space should
5029  * be valid, a race or a smaller-file mapped into a larger space may
5030  * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
5031  * check the return value.
5032  *
5033  * This function only works with pager buffers.
5034  */
5035 int
5036 vmapbuf(struct buf *bp, void *uaddr, size_t len, int mapbuf)
5037 {
5038 	vm_prot_t prot;
5039 	int pidx;
5040 
5041 	MPASS((bp->b_flags & B_MAXPHYS) != 0);
5042 	prot = VM_PROT_READ;
5043 	if (bp->b_iocmd == BIO_READ)
5044 		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
5045 	pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
5046 	    (vm_offset_t)uaddr, len, prot, bp->b_pages, PBUF_PAGES);
5047 	if (pidx < 0)
5048 		return (-1);
5049 	bp->b_bufsize = len;
5050 	bp->b_npages = pidx;
5051 	bp->b_offset = ((vm_offset_t)uaddr) & PAGE_MASK;
5052 	if (mapbuf || !unmapped_buf_allowed) {
5053 		pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_pages, pidx);
5054 		bp->b_data = bp->b_kvabase + bp->b_offset;
5055 	} else
5056 		bp->b_data = unmapped_buf;
5057 	return (0);
5058 }
5059 
5060 /*
5061  * Free the io map PTEs associated with this IO operation.
5062  * We also invalidate the TLB entries and restore the original b_addr.
5063  *
5064  * This function only works with pager buffers.
5065  */
5066 void
5067 vunmapbuf(struct buf *bp)
5068 {
5069 	int npages;
5070 
5071 	npages = bp->b_npages;
5072 	if (buf_mapped(bp))
5073 		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
5074 	vm_page_unhold_pages(bp->b_pages, npages);
5075 
5076 	bp->b_data = unmapped_buf;
5077 }
5078 
5079 void
5080 bdone(struct buf *bp)
5081 {
5082 	struct mtx *mtxp;
5083 
5084 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
5085 	mtx_lock(mtxp);
5086 	bp->b_flags |= B_DONE;
5087 	wakeup(bp);
5088 	mtx_unlock(mtxp);
5089 }
5090 
5091 void
5092 bwait(struct buf *bp, u_char pri, const char *wchan)
5093 {
5094 	struct mtx *mtxp;
5095 
5096 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
5097 	mtx_lock(mtxp);
5098 	while ((bp->b_flags & B_DONE) == 0)
5099 		msleep(bp, mtxp, pri, wchan, 0);
5100 	mtx_unlock(mtxp);
5101 }
5102 
5103 int
5104 bufsync(struct bufobj *bo, int waitfor)
5105 {
5106 
5107 	return (VOP_FSYNC(bo2vnode(bo), waitfor, curthread));
5108 }
5109 
5110 void
5111 bufstrategy(struct bufobj *bo, struct buf *bp)
5112 {
5113 	int i __unused;
5114 	struct vnode *vp;
5115 
5116 	vp = bp->b_vp;
5117 	KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
5118 	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
5119 	    ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
5120 	i = VOP_STRATEGY(vp, bp);
5121 	KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
5122 }
5123 
5124 /*
5125  * Initialize a struct bufobj before use.  Memory is assumed zero filled.
5126  */
5127 void
5128 bufobj_init(struct bufobj *bo, void *private)
5129 {
5130 	static volatile int bufobj_cleanq;
5131 
5132         bo->bo_domain =
5133             atomic_fetchadd_int(&bufobj_cleanq, 1) % buf_domains;
5134         rw_init(BO_LOCKPTR(bo), "bufobj interlock");
5135         bo->bo_private = private;
5136         TAILQ_INIT(&bo->bo_clean.bv_hd);
5137         TAILQ_INIT(&bo->bo_dirty.bv_hd);
5138 }
5139 
5140 void
5141 bufobj_wrefl(struct bufobj *bo)
5142 {
5143 
5144 	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
5145 	ASSERT_BO_WLOCKED(bo);
5146 	bo->bo_numoutput++;
5147 }
5148 
5149 void
5150 bufobj_wref(struct bufobj *bo)
5151 {
5152 
5153 	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
5154 	BO_LOCK(bo);
5155 	bo->bo_numoutput++;
5156 	BO_UNLOCK(bo);
5157 }
5158 
5159 void
5160 bufobj_wdrop(struct bufobj *bo)
5161 {
5162 
5163 	KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
5164 	BO_LOCK(bo);
5165 	KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
5166 	if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
5167 		bo->bo_flag &= ~BO_WWAIT;
5168 		wakeup(&bo->bo_numoutput);
5169 	}
5170 	BO_UNLOCK(bo);
5171 }
5172 
5173 int
5174 bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
5175 {
5176 	int error;
5177 
5178 	KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
5179 	ASSERT_BO_WLOCKED(bo);
5180 	error = 0;
5181 	while (bo->bo_numoutput) {
5182 		bo->bo_flag |= BO_WWAIT;
5183 		error = msleep(&bo->bo_numoutput, BO_LOCKPTR(bo),
5184 		    slpflag | (PRIBIO + 1), "bo_wwait", timeo);
5185 		if (error)
5186 			break;
5187 	}
5188 	return (error);
5189 }
5190 
5191 /*
5192  * Set bio_data or bio_ma for struct bio from the struct buf.
5193  */
5194 void
5195 bdata2bio(struct buf *bp, struct bio *bip)
5196 {
5197 
5198 	if (!buf_mapped(bp)) {
5199 		KASSERT(unmapped_buf_allowed, ("unmapped"));
5200 		bip->bio_ma = bp->b_pages;
5201 		bip->bio_ma_n = bp->b_npages;
5202 		bip->bio_data = unmapped_buf;
5203 		bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
5204 		bip->bio_flags |= BIO_UNMAPPED;
5205 		KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) /
5206 		    PAGE_SIZE == bp->b_npages,
5207 		    ("Buffer %p too short: %d %lld %d", bp, bip->bio_ma_offset,
5208 		    (long long)bip->bio_length, bip->bio_ma_n));
5209 	} else {
5210 		bip->bio_data = bp->b_data;
5211 		bip->bio_ma = NULL;
5212 	}
5213 }
5214 
5215 static int buf_pager_relbuf;
5216 SYSCTL_INT(_vfs, OID_AUTO, buf_pager_relbuf, CTLFLAG_RWTUN,
5217     &buf_pager_relbuf, 0,
5218     "Make buffer pager release buffers after reading");
5219 
5220 /*
5221  * The buffer pager.  It uses buffer reads to validate pages.
5222  *
5223  * In contrast to the generic local pager from vm/vnode_pager.c, this
5224  * pager correctly and easily handles volumes where the underlying
5225  * device block size is greater than the machine page size.  The
5226  * buffer cache transparently extends the requested page run to be
5227  * aligned at the block boundary, and does the necessary bogus page
5228  * replacements in the addends to avoid obliterating already valid
5229  * pages.
5230  *
5231  * The only non-trivial issue is that the exclusive busy state for
5232  * pages, which is assumed by the vm_pager_getpages() interface, is
5233  * incompatible with the VMIO buffer cache's desire to share-busy the
5234  * pages.  This function performs a trivial downgrade of the pages'
5235  * state before reading buffers, and a less trivial upgrade from the
5236  * shared-busy to excl-busy state after the read.
5237  */
5238 int
5239 vfs_bio_getpages(struct vnode *vp, vm_page_t *ma, int count,
5240     int *rbehind, int *rahead, vbg_get_lblkno_t get_lblkno,
5241     vbg_get_blksize_t get_blksize)
5242 {
5243 	vm_page_t m;
5244 	vm_object_t object;
5245 	struct buf *bp;
5246 	struct mount *mp;
5247 	daddr_t lbn, lbnp;
5248 	vm_ooffset_t la, lb, poff, poffe;
5249 	long bo_bs, bsize;
5250 	int br_flags, error, i, pgsin, pgsin_a, pgsin_b;
5251 	bool redo, lpart;
5252 
5253 	object = vp->v_object;
5254 	mp = vp->v_mount;
5255 	error = 0;
5256 	la = IDX_TO_OFF(ma[count - 1]->pindex);
5257 	if (la >= object->un_pager.vnp.vnp_size)
5258 		return (VM_PAGER_BAD);
5259 
5260 	/*
5261 	 * Change the meaning of la from where the last requested page starts
5262 	 * to where it ends, because that's the end of the requested region
5263 	 * and the start of the potential read-ahead region.
5264 	 */
5265 	la += PAGE_SIZE;
5266 	lpart = la > object->un_pager.vnp.vnp_size;
5267 	error = get_blksize(vp, get_lblkno(vp, IDX_TO_OFF(ma[0]->pindex)),
5268 	    &bo_bs);
5269 	if (error != 0)
5270 		return (VM_PAGER_ERROR);
5271 
5272 	/*
5273 	 * Calculate read-ahead, behind and total pages.
5274 	 */
5275 	pgsin = count;
5276 	lb = IDX_TO_OFF(ma[0]->pindex);
5277 	pgsin_b = OFF_TO_IDX(lb - rounddown2(lb, bo_bs));
5278 	pgsin += pgsin_b;
5279 	if (rbehind != NULL)
5280 		*rbehind = pgsin_b;
5281 	pgsin_a = OFF_TO_IDX(roundup2(la, bo_bs) - la);
5282 	if (la + IDX_TO_OFF(pgsin_a) >= object->un_pager.vnp.vnp_size)
5283 		pgsin_a = OFF_TO_IDX(roundup2(object->un_pager.vnp.vnp_size,
5284 		    PAGE_SIZE) - la);
5285 	pgsin += pgsin_a;
5286 	if (rahead != NULL)
5287 		*rahead = pgsin_a;
5288 	VM_CNT_INC(v_vnodein);
5289 	VM_CNT_ADD(v_vnodepgsin, pgsin);
5290 
5291 	br_flags = (mp != NULL && (mp->mnt_kern_flag & MNTK_UNMAPPED_BUFS)
5292 	    != 0) ? GB_UNMAPPED : 0;
5293 again:
5294 	for (i = 0; i < count; i++) {
5295 		if (ma[i] != bogus_page)
5296 			vm_page_busy_downgrade(ma[i]);
5297 	}
5298 
5299 	lbnp = -1;
5300 	for (i = 0; i < count; i++) {
5301 		m = ma[i];
5302 		if (m == bogus_page)
5303 			continue;
5304 
5305 		/*
5306 		 * Pages are shared busy and the object lock is not
5307 		 * owned, which together allow for the pages'
5308 		 * invalidation.  The racy test for validity avoids
5309 		 * useless creation of the buffer for the most typical
5310 		 * case when invalidation is not used in redo or for
5311 		 * parallel read.  The shared->excl upgrade loop at
5312 		 * the end of the function catches the race in a
5313 		 * reliable way (protected by the object lock).
5314 		 */
5315 		if (vm_page_all_valid(m))
5316 			continue;
5317 
5318 		poff = IDX_TO_OFF(m->pindex);
5319 		poffe = MIN(poff + PAGE_SIZE, object->un_pager.vnp.vnp_size);
5320 		for (; poff < poffe; poff += bsize) {
5321 			lbn = get_lblkno(vp, poff);
5322 			if (lbn == lbnp)
5323 				goto next_page;
5324 			lbnp = lbn;
5325 
5326 			error = get_blksize(vp, lbn, &bsize);
5327 			if (error == 0)
5328 				error = bread_gb(vp, lbn, bsize,
5329 				    curthread->td_ucred, br_flags, &bp);
5330 			if (error != 0)
5331 				goto end_pages;
5332 			if (bp->b_rcred == curthread->td_ucred) {
5333 				crfree(bp->b_rcred);
5334 				bp->b_rcred = NOCRED;
5335 			}
5336 			if (LIST_EMPTY(&bp->b_dep)) {
5337 				/*
5338 				 * Invalidation clears m->valid, but
5339 				 * may leave B_CACHE flag if the
5340 				 * buffer existed at the invalidation
5341 				 * time.  In this case, recycle the
5342 				 * buffer to do real read on next
5343 				 * bread() after redo.
5344 				 *
5345 				 * Otherwise B_RELBUF is not strictly
5346 				 * necessary, enable to reduce buf
5347 				 * cache pressure.
5348 				 */
5349 				if (buf_pager_relbuf ||
5350 				    !vm_page_all_valid(m))
5351 					bp->b_flags |= B_RELBUF;
5352 
5353 				bp->b_flags &= ~B_NOCACHE;
5354 				brelse(bp);
5355 			} else {
5356 				bqrelse(bp);
5357 			}
5358 		}
5359 		KASSERT(1 /* racy, enable for debugging */ ||
5360 		    vm_page_all_valid(m) || i == count - 1,
5361 		    ("buf %d %p invalid", i, m));
5362 		if (i == count - 1 && lpart) {
5363 			if (!vm_page_none_valid(m) &&
5364 			    !vm_page_all_valid(m))
5365 				vm_page_zero_invalid(m, TRUE);
5366 		}
5367 next_page:;
5368 	}
5369 end_pages:
5370 
5371 	redo = false;
5372 	for (i = 0; i < count; i++) {
5373 		if (ma[i] == bogus_page)
5374 			continue;
5375 		if (vm_page_busy_tryupgrade(ma[i]) == 0) {
5376 			vm_page_sunbusy(ma[i]);
5377 			ma[i] = vm_page_grab_unlocked(object, ma[i]->pindex,
5378 			    VM_ALLOC_NORMAL);
5379 		}
5380 
5381 		/*
5382 		 * Since the pages were only sbusy while neither the
5383 		 * buffer nor the object lock was held by us, or
5384 		 * reallocated while vm_page_grab() slept for busy
5385 		 * relinguish, they could have been invalidated.
5386 		 * Recheck the valid bits and re-read as needed.
5387 		 *
5388 		 * Note that the last page is made fully valid in the
5389 		 * read loop, and partial validity for the page at
5390 		 * index count - 1 could mean that the page was
5391 		 * invalidated or removed, so we must restart for
5392 		 * safety as well.
5393 		 */
5394 		if (!vm_page_all_valid(ma[i]))
5395 			redo = true;
5396 	}
5397 	if (redo && error == 0)
5398 		goto again;
5399 	return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK);
5400 }
5401 
5402 #include "opt_ddb.h"
5403 #ifdef DDB
5404 #include <ddb/ddb.h>
5405 
5406 /* DDB command to show buffer data */
5407 DB_SHOW_COMMAND(buffer, db_show_buffer)
5408 {
5409 	/* get args */
5410 	struct buf *bp = (struct buf *)addr;
5411 #ifdef FULL_BUF_TRACKING
5412 	uint32_t i, j;
5413 #endif
5414 
5415 	if (!have_addr) {
5416 		db_printf("usage: show buffer <addr>\n");
5417 		return;
5418 	}
5419 
5420 	db_printf("buf at %p\n", bp);
5421 	db_printf("b_flags = 0x%b, b_xflags=0x%b\n",
5422 	    (u_int)bp->b_flags, PRINT_BUF_FLAGS,
5423 	    (u_int)bp->b_xflags, PRINT_BUF_XFLAGS);
5424 	db_printf("b_vflags=0x%b b_ioflags0x%b\n",
5425 	    (u_int)bp->b_vflags, PRINT_BUF_VFLAGS,
5426 	    (u_int)bp->b_ioflags, PRINT_BIO_FLAGS);
5427 	db_printf(
5428 	    "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
5429 	    "b_bufobj = (%p), b_data = %p\n, b_blkno = %jd, b_lblkno = %jd, "
5430 	    "b_vp = %p, b_dep = %p\n",
5431 	    bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
5432 	    bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
5433 	    (intmax_t)bp->b_lblkno, bp->b_vp, bp->b_dep.lh_first);
5434 	db_printf("b_kvabase = %p, b_kvasize = %d\n",
5435 	    bp->b_kvabase, bp->b_kvasize);
5436 	if (bp->b_npages) {
5437 		int i;
5438 		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
5439 		for (i = 0; i < bp->b_npages; i++) {
5440 			vm_page_t m;
5441 			m = bp->b_pages[i];
5442 			if (m != NULL)
5443 				db_printf("(%p, 0x%lx, 0x%lx)", m->object,
5444 				    (u_long)m->pindex,
5445 				    (u_long)VM_PAGE_TO_PHYS(m));
5446 			else
5447 				db_printf("( ??? )");
5448 			if ((i + 1) < bp->b_npages)
5449 				db_printf(",");
5450 		}
5451 		db_printf("\n");
5452 	}
5453 	BUF_LOCKPRINTINFO(bp);
5454 #if defined(FULL_BUF_TRACKING)
5455 	db_printf("b_io_tracking: b_io_tcnt = %u\n", bp->b_io_tcnt);
5456 
5457 	i = bp->b_io_tcnt % BUF_TRACKING_SIZE;
5458 	for (j = 1; j <= BUF_TRACKING_SIZE; j++) {
5459 		if (bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)] == NULL)
5460 			continue;
5461 		db_printf(" %2u: %s\n", j,
5462 		    bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)]);
5463 	}
5464 #elif defined(BUF_TRACKING)
5465 	db_printf("b_io_tracking: %s\n", bp->b_io_tracking);
5466 #endif
5467 	db_printf(" ");
5468 }
5469 
5470 DB_SHOW_COMMAND_FLAGS(bufqueues, bufqueues, DB_CMD_MEMSAFE)
5471 {
5472 	struct bufdomain *bd;
5473 	struct buf *bp;
5474 	long total;
5475 	int i, j, cnt;
5476 
5477 	db_printf("bqempty: %d\n", bqempty.bq_len);
5478 
5479 	for (i = 0; i < buf_domains; i++) {
5480 		bd = &bdomain[i];
5481 		db_printf("Buf domain %d\n", i);
5482 		db_printf("\tfreebufs\t%d\n", bd->bd_freebuffers);
5483 		db_printf("\tlofreebufs\t%d\n", bd->bd_lofreebuffers);
5484 		db_printf("\thifreebufs\t%d\n", bd->bd_hifreebuffers);
5485 		db_printf("\n");
5486 		db_printf("\tbufspace\t%ld\n", bd->bd_bufspace);
5487 		db_printf("\tmaxbufspace\t%ld\n", bd->bd_maxbufspace);
5488 		db_printf("\thibufspace\t%ld\n", bd->bd_hibufspace);
5489 		db_printf("\tlobufspace\t%ld\n", bd->bd_lobufspace);
5490 		db_printf("\tbufspacethresh\t%ld\n", bd->bd_bufspacethresh);
5491 		db_printf("\n");
5492 		db_printf("\tnumdirtybuffers\t%d\n", bd->bd_numdirtybuffers);
5493 		db_printf("\tlodirtybuffers\t%d\n", bd->bd_lodirtybuffers);
5494 		db_printf("\thidirtybuffers\t%d\n", bd->bd_hidirtybuffers);
5495 		db_printf("\tdirtybufthresh\t%d\n", bd->bd_dirtybufthresh);
5496 		db_printf("\n");
5497 		total = 0;
5498 		TAILQ_FOREACH(bp, &bd->bd_cleanq->bq_queue, b_freelist)
5499 			total += bp->b_bufsize;
5500 		db_printf("\tcleanq count\t%d (%ld)\n",
5501 		    bd->bd_cleanq->bq_len, total);
5502 		total = 0;
5503 		TAILQ_FOREACH(bp, &bd->bd_dirtyq.bq_queue, b_freelist)
5504 			total += bp->b_bufsize;
5505 		db_printf("\tdirtyq count\t%d (%ld)\n",
5506 		    bd->bd_dirtyq.bq_len, total);
5507 		db_printf("\twakeup\t\t%d\n", bd->bd_wanted);
5508 		db_printf("\tlim\t\t%d\n", bd->bd_lim);
5509 		db_printf("\tCPU ");
5510 		for (j = 0; j <= mp_maxid; j++)
5511 			db_printf("%d, ", bd->bd_subq[j].bq_len);
5512 		db_printf("\n");
5513 		cnt = 0;
5514 		total = 0;
5515 		for (j = 0; j < nbuf; j++) {
5516 			bp = nbufp(j);
5517 			if (bp->b_domain == i && BUF_ISLOCKED(bp)) {
5518 				cnt++;
5519 				total += bp->b_bufsize;
5520 			}
5521 		}
5522 		db_printf("\tLocked buffers: %d space %ld\n", cnt, total);
5523 		cnt = 0;
5524 		total = 0;
5525 		for (j = 0; j < nbuf; j++) {
5526 			bp = nbufp(j);
5527 			if (bp->b_domain == i) {
5528 				cnt++;
5529 				total += bp->b_bufsize;
5530 			}
5531 		}
5532 		db_printf("\tTotal buffers: %d space %ld\n", cnt, total);
5533 	}
5534 }
5535 
5536 DB_SHOW_COMMAND_FLAGS(lockedbufs, lockedbufs, DB_CMD_MEMSAFE)
5537 {
5538 	struct buf *bp;
5539 	int i;
5540 
5541 	for (i = 0; i < nbuf; i++) {
5542 		bp = nbufp(i);
5543 		if (BUF_ISLOCKED(bp)) {
5544 			db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5545 			db_printf("\n");
5546 			if (db_pager_quit)
5547 				break;
5548 		}
5549 	}
5550 }
5551 
5552 DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
5553 {
5554 	struct vnode *vp;
5555 	struct buf *bp;
5556 
5557 	if (!have_addr) {
5558 		db_printf("usage: show vnodebufs <addr>\n");
5559 		return;
5560 	}
5561 	vp = (struct vnode *)addr;
5562 	db_printf("Clean buffers:\n");
5563 	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
5564 		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5565 		db_printf("\n");
5566 	}
5567 	db_printf("Dirty buffers:\n");
5568 	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
5569 		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5570 		db_printf("\n");
5571 	}
5572 }
5573 
5574 DB_COMMAND_FLAGS(countfreebufs, db_coundfreebufs, DB_CMD_MEMSAFE)
5575 {
5576 	struct buf *bp;
5577 	int i, used = 0, nfree = 0;
5578 
5579 	if (have_addr) {
5580 		db_printf("usage: countfreebufs\n");
5581 		return;
5582 	}
5583 
5584 	for (i = 0; i < nbuf; i++) {
5585 		bp = nbufp(i);
5586 		if (bp->b_qindex == QUEUE_EMPTY)
5587 			nfree++;
5588 		else
5589 			used++;
5590 	}
5591 
5592 	db_printf("Counted %d free, %d used (%d tot)\n", nfree, used,
5593 	    nfree + used);
5594 	db_printf("numfreebuffers is %d\n", numfreebuffers);
5595 }
5596 #endif /* DDB */
5597