xref: /freebsd/sys/kern/vfs_bio.c (revision dd41de95a84d979615a2ef11df6850622bf6184e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2004 Poul-Henning Kamp
5  * Copyright (c) 1994,1997 John S. Dyson
6  * Copyright (c) 2013 The FreeBSD Foundation
7  * All rights reserved.
8  *
9  * Portions of this software were developed by Konstantin Belousov
10  * under sponsorship from the FreeBSD Foundation.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 /*
35  * this file contains a new buffer I/O scheme implementing a coherent
36  * VM object and buffer cache scheme.  Pains have been taken to make
37  * sure that the performance degradation associated with schemes such
38  * as this is not realized.
39  *
40  * Author:  John S. Dyson
41  * Significant help during the development and debugging phases
42  * had been provided by David Greenman, also of the FreeBSD core team.
43  *
44  * see man buf(9) for more info.
45  */
46 
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/asan.h>
53 #include <sys/bio.h>
54 #include <sys/bitset.h>
55 #include <sys/conf.h>
56 #include <sys/counter.h>
57 #include <sys/buf.h>
58 #include <sys/devicestat.h>
59 #include <sys/eventhandler.h>
60 #include <sys/fail.h>
61 #include <sys/ktr.h>
62 #include <sys/limits.h>
63 #include <sys/lock.h>
64 #include <sys/malloc.h>
65 #include <sys/mount.h>
66 #include <sys/mutex.h>
67 #include <sys/kernel.h>
68 #include <sys/kthread.h>
69 #include <sys/proc.h>
70 #include <sys/racct.h>
71 #include <sys/refcount.h>
72 #include <sys/resourcevar.h>
73 #include <sys/rwlock.h>
74 #include <sys/smp.h>
75 #include <sys/sysctl.h>
76 #include <sys/syscallsubr.h>
77 #include <sys/vmem.h>
78 #include <sys/vmmeter.h>
79 #include <sys/vnode.h>
80 #include <sys/watchdog.h>
81 #include <geom/geom.h>
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/vm_kern.h>
85 #include <vm/vm_object.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_pageout.h>
88 #include <vm/vm_pager.h>
89 #include <vm/vm_extern.h>
90 #include <vm/vm_map.h>
91 #include <vm/swap_pager.h>
92 
93 static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
94 
95 struct	bio_ops bioops;		/* I/O operation notification */
96 
97 struct	buf_ops buf_ops_bio = {
98 	.bop_name	=	"buf_ops_bio",
99 	.bop_write	=	bufwrite,
100 	.bop_strategy	=	bufstrategy,
101 	.bop_sync	=	bufsync,
102 	.bop_bdflush	=	bufbdflush,
103 };
104 
105 struct bufqueue {
106 	struct mtx_padalign	bq_lock;
107 	TAILQ_HEAD(, buf)	bq_queue;
108 	uint8_t			bq_index;
109 	uint16_t		bq_subqueue;
110 	int			bq_len;
111 } __aligned(CACHE_LINE_SIZE);
112 
113 #define	BQ_LOCKPTR(bq)		(&(bq)->bq_lock)
114 #define	BQ_LOCK(bq)		mtx_lock(BQ_LOCKPTR((bq)))
115 #define	BQ_UNLOCK(bq)		mtx_unlock(BQ_LOCKPTR((bq)))
116 #define	BQ_ASSERT_LOCKED(bq)	mtx_assert(BQ_LOCKPTR((bq)), MA_OWNED)
117 
118 struct bufdomain {
119 	struct bufqueue	bd_subq[MAXCPU + 1]; /* Per-cpu sub queues + global */
120 	struct bufqueue bd_dirtyq;
121 	struct bufqueue	*bd_cleanq;
122 	struct mtx_padalign bd_run_lock;
123 	/* Constants */
124 	long		bd_maxbufspace;
125 	long		bd_hibufspace;
126 	long 		bd_lobufspace;
127 	long 		bd_bufspacethresh;
128 	int		bd_hifreebuffers;
129 	int		bd_lofreebuffers;
130 	int		bd_hidirtybuffers;
131 	int		bd_lodirtybuffers;
132 	int		bd_dirtybufthresh;
133 	int		bd_lim;
134 	/* atomics */
135 	int		bd_wanted;
136 	int __aligned(CACHE_LINE_SIZE)	bd_numdirtybuffers;
137 	int __aligned(CACHE_LINE_SIZE)	bd_running;
138 	long __aligned(CACHE_LINE_SIZE) bd_bufspace;
139 	int __aligned(CACHE_LINE_SIZE)	bd_freebuffers;
140 } __aligned(CACHE_LINE_SIZE);
141 
142 #define	BD_LOCKPTR(bd)		(&(bd)->bd_cleanq->bq_lock)
143 #define	BD_LOCK(bd)		mtx_lock(BD_LOCKPTR((bd)))
144 #define	BD_UNLOCK(bd)		mtx_unlock(BD_LOCKPTR((bd)))
145 #define	BD_ASSERT_LOCKED(bd)	mtx_assert(BD_LOCKPTR((bd)), MA_OWNED)
146 #define	BD_RUN_LOCKPTR(bd)	(&(bd)->bd_run_lock)
147 #define	BD_RUN_LOCK(bd)		mtx_lock(BD_RUN_LOCKPTR((bd)))
148 #define	BD_RUN_UNLOCK(bd)	mtx_unlock(BD_RUN_LOCKPTR((bd)))
149 #define	BD_DOMAIN(bd)		(bd - bdomain)
150 
151 static char *buf;		/* buffer header pool */
152 static struct buf *
153 nbufp(unsigned i)
154 {
155 	return ((struct buf *)(buf + (sizeof(struct buf) +
156 	    sizeof(vm_page_t) * atop(maxbcachebuf)) * i));
157 }
158 
159 caddr_t __read_mostly unmapped_buf;
160 
161 /* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */
162 struct proc *bufdaemonproc;
163 
164 static void vm_hold_free_pages(struct buf *bp, int newbsize);
165 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
166 		vm_offset_t to);
167 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
168 static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
169 		vm_page_t m);
170 static void vfs_clean_pages_dirty_buf(struct buf *bp);
171 static void vfs_setdirty_range(struct buf *bp);
172 static void vfs_vmio_invalidate(struct buf *bp);
173 static void vfs_vmio_truncate(struct buf *bp, int npages);
174 static void vfs_vmio_extend(struct buf *bp, int npages, int size);
175 static int vfs_bio_clcheck(struct vnode *vp, int size,
176 		daddr_t lblkno, daddr_t blkno);
177 static void breada(struct vnode *, daddr_t *, int *, int, struct ucred *, int,
178 		void (*)(struct buf *));
179 static int buf_flush(struct vnode *vp, struct bufdomain *, int);
180 static int flushbufqueues(struct vnode *, struct bufdomain *, int, int);
181 static void buf_daemon(void);
182 static __inline void bd_wakeup(void);
183 static int sysctl_runningspace(SYSCTL_HANDLER_ARGS);
184 static void bufkva_reclaim(vmem_t *, int);
185 static void bufkva_free(struct buf *);
186 static int buf_import(void *, void **, int, int, int);
187 static void buf_release(void *, void **, int);
188 static void maxbcachebuf_adjust(void);
189 static inline struct bufdomain *bufdomain(struct buf *);
190 static void bq_remove(struct bufqueue *bq, struct buf *bp);
191 static void bq_insert(struct bufqueue *bq, struct buf *bp, bool unlock);
192 static int buf_recycle(struct bufdomain *, bool kva);
193 static void bq_init(struct bufqueue *bq, int qindex, int cpu,
194 	    const char *lockname);
195 static void bd_init(struct bufdomain *bd);
196 static int bd_flushall(struct bufdomain *bd);
197 static int sysctl_bufdomain_long(SYSCTL_HANDLER_ARGS);
198 static int sysctl_bufdomain_int(SYSCTL_HANDLER_ARGS);
199 
200 static int sysctl_bufspace(SYSCTL_HANDLER_ARGS);
201 int vmiodirenable = TRUE;
202 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
203     "Use the VM system for directory writes");
204 long runningbufspace;
205 SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
206     "Amount of presently outstanding async buffer io");
207 SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD,
208     NULL, 0, sysctl_bufspace, "L", "Physical memory used for buffers");
209 static counter_u64_t bufkvaspace;
210 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, bufkvaspace, CTLFLAG_RD, &bufkvaspace,
211     "Kernel virtual memory used for buffers");
212 static long maxbufspace;
213 SYSCTL_PROC(_vfs, OID_AUTO, maxbufspace,
214     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &maxbufspace,
215     __offsetof(struct bufdomain, bd_maxbufspace), sysctl_bufdomain_long, "L",
216     "Maximum allowed value of bufspace (including metadata)");
217 static long bufmallocspace;
218 SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
219     "Amount of malloced memory for buffers");
220 static long maxbufmallocspace;
221 SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace,
222     0, "Maximum amount of malloced memory for buffers");
223 static long lobufspace;
224 SYSCTL_PROC(_vfs, OID_AUTO, lobufspace,
225     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &lobufspace,
226     __offsetof(struct bufdomain, bd_lobufspace), sysctl_bufdomain_long, "L",
227     "Minimum amount of buffers we want to have");
228 long hibufspace;
229 SYSCTL_PROC(_vfs, OID_AUTO, hibufspace,
230     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &hibufspace,
231     __offsetof(struct bufdomain, bd_hibufspace), sysctl_bufdomain_long, "L",
232     "Maximum allowed value of bufspace (excluding metadata)");
233 long bufspacethresh;
234 SYSCTL_PROC(_vfs, OID_AUTO, bufspacethresh,
235     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &bufspacethresh,
236     __offsetof(struct bufdomain, bd_bufspacethresh), sysctl_bufdomain_long, "L",
237     "Bufspace consumed before waking the daemon to free some");
238 static counter_u64_t buffreekvacnt;
239 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt,
240     "Number of times we have freed the KVA space from some buffer");
241 static counter_u64_t bufdefragcnt;
242 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt,
243     "Number of times we have had to repeat buffer allocation to defragment");
244 static long lorunningspace;
245 SYSCTL_PROC(_vfs, OID_AUTO, lorunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
246     CTLFLAG_RW, &lorunningspace, 0, sysctl_runningspace, "L",
247     "Minimum preferred space used for in-progress I/O");
248 static long hirunningspace;
249 SYSCTL_PROC(_vfs, OID_AUTO, hirunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
250     CTLFLAG_RW, &hirunningspace, 0, sysctl_runningspace, "L",
251     "Maximum amount of space to use for in-progress I/O");
252 int dirtybufferflushes;
253 SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
254     0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
255 int bdwriteskip;
256 SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
257     0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
258 int altbufferflushes;
259 SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW | CTLFLAG_STATS,
260     &altbufferflushes, 0, "Number of fsync flushes to limit dirty buffers");
261 static int recursiveflushes;
262 SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW | CTLFLAG_STATS,
263     &recursiveflushes, 0, "Number of flushes skipped due to being recursive");
264 static int sysctl_numdirtybuffers(SYSCTL_HANDLER_ARGS);
265 SYSCTL_PROC(_vfs, OID_AUTO, numdirtybuffers,
266     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RD, NULL, 0, sysctl_numdirtybuffers, "I",
267     "Number of buffers that are dirty (has unwritten changes) at the moment");
268 static int lodirtybuffers;
269 SYSCTL_PROC(_vfs, OID_AUTO, lodirtybuffers,
270     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &lodirtybuffers,
271     __offsetof(struct bufdomain, bd_lodirtybuffers), sysctl_bufdomain_int, "I",
272     "How many buffers we want to have free before bufdaemon can sleep");
273 static int hidirtybuffers;
274 SYSCTL_PROC(_vfs, OID_AUTO, hidirtybuffers,
275     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &hidirtybuffers,
276     __offsetof(struct bufdomain, bd_hidirtybuffers), sysctl_bufdomain_int, "I",
277     "When the number of dirty buffers is considered severe");
278 int dirtybufthresh;
279 SYSCTL_PROC(_vfs, OID_AUTO, dirtybufthresh,
280     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &dirtybufthresh,
281     __offsetof(struct bufdomain, bd_dirtybufthresh), sysctl_bufdomain_int, "I",
282     "Number of bdwrite to bawrite conversions to clear dirty buffers");
283 static int numfreebuffers;
284 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
285     "Number of free buffers");
286 static int lofreebuffers;
287 SYSCTL_PROC(_vfs, OID_AUTO, lofreebuffers,
288     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &lofreebuffers,
289     __offsetof(struct bufdomain, bd_lofreebuffers), sysctl_bufdomain_int, "I",
290    "Target number of free buffers");
291 static int hifreebuffers;
292 SYSCTL_PROC(_vfs, OID_AUTO, hifreebuffers,
293     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &hifreebuffers,
294     __offsetof(struct bufdomain, bd_hifreebuffers), sysctl_bufdomain_int, "I",
295    "Threshold for clean buffer recycling");
296 static counter_u64_t getnewbufcalls;
297 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RD,
298    &getnewbufcalls, "Number of calls to getnewbuf");
299 static counter_u64_t getnewbufrestarts;
300 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RD,
301     &getnewbufrestarts,
302     "Number of times getnewbuf has had to restart a buffer acquisition");
303 static counter_u64_t mappingrestarts;
304 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, mappingrestarts, CTLFLAG_RD,
305     &mappingrestarts,
306     "Number of times getblk has had to restart a buffer mapping for "
307     "unmapped buffer");
308 static counter_u64_t numbufallocfails;
309 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, numbufallocfails, CTLFLAG_RW,
310     &numbufallocfails, "Number of times buffer allocations failed");
311 static int flushbufqtarget = 100;
312 SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0,
313     "Amount of work to do in flushbufqueues when helping bufdaemon");
314 static counter_u64_t notbufdflushes;
315 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, notbufdflushes, CTLFLAG_RD, &notbufdflushes,
316     "Number of dirty buffer flushes done by the bufdaemon helpers");
317 static long barrierwrites;
318 SYSCTL_LONG(_vfs, OID_AUTO, barrierwrites, CTLFLAG_RW | CTLFLAG_STATS,
319     &barrierwrites, 0, "Number of barrier writes");
320 SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD,
321     &unmapped_buf_allowed, 0,
322     "Permit the use of the unmapped i/o");
323 int maxbcachebuf = MAXBCACHEBUF;
324 SYSCTL_INT(_vfs, OID_AUTO, maxbcachebuf, CTLFLAG_RDTUN, &maxbcachebuf, 0,
325     "Maximum size of a buffer cache block");
326 
327 /*
328  * This lock synchronizes access to bd_request.
329  */
330 static struct mtx_padalign __exclusive_cache_line bdlock;
331 
332 /*
333  * This lock protects the runningbufreq and synchronizes runningbufwakeup and
334  * waitrunningbufspace().
335  */
336 static struct mtx_padalign __exclusive_cache_line rbreqlock;
337 
338 /*
339  * Lock that protects bdirtywait.
340  */
341 static struct mtx_padalign __exclusive_cache_line bdirtylock;
342 
343 /*
344  * Wakeup point for bufdaemon, as well as indicator of whether it is already
345  * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
346  * is idling.
347  */
348 static int bd_request;
349 
350 /*
351  * Request for the buf daemon to write more buffers than is indicated by
352  * lodirtybuf.  This may be necessary to push out excess dependencies or
353  * defragment the address space where a simple count of the number of dirty
354  * buffers is insufficient to characterize the demand for flushing them.
355  */
356 static int bd_speedupreq;
357 
358 /*
359  * Synchronization (sleep/wakeup) variable for active buffer space requests.
360  * Set when wait starts, cleared prior to wakeup().
361  * Used in runningbufwakeup() and waitrunningbufspace().
362  */
363 static int runningbufreq;
364 
365 /*
366  * Synchronization for bwillwrite() waiters.
367  */
368 static int bdirtywait;
369 
370 /*
371  * Definitions for the buffer free lists.
372  */
373 #define QUEUE_NONE	0	/* on no queue */
374 #define QUEUE_EMPTY	1	/* empty buffer headers */
375 #define QUEUE_DIRTY	2	/* B_DELWRI buffers */
376 #define QUEUE_CLEAN	3	/* non-B_DELWRI buffers */
377 #define QUEUE_SENTINEL	4	/* not an queue index, but mark for sentinel */
378 
379 /* Maximum number of buffer domains. */
380 #define	BUF_DOMAINS	8
381 
382 struct bufdomainset bdlodirty;		/* Domains > lodirty */
383 struct bufdomainset bdhidirty;		/* Domains > hidirty */
384 
385 /* Configured number of clean queues. */
386 static int __read_mostly buf_domains;
387 
388 BITSET_DEFINE(bufdomainset, BUF_DOMAINS);
389 struct bufdomain __exclusive_cache_line bdomain[BUF_DOMAINS];
390 struct bufqueue __exclusive_cache_line bqempty;
391 
392 /*
393  * per-cpu empty buffer cache.
394  */
395 uma_zone_t buf_zone;
396 
397 /*
398  * Single global constant for BUF_WMESG, to avoid getting multiple references.
399  * buf_wmesg is referred from macros.
400  */
401 const char *buf_wmesg = BUF_WMESG;
402 
403 static int
404 sysctl_runningspace(SYSCTL_HANDLER_ARGS)
405 {
406 	long value;
407 	int error;
408 
409 	value = *(long *)arg1;
410 	error = sysctl_handle_long(oidp, &value, 0, req);
411 	if (error != 0 || req->newptr == NULL)
412 		return (error);
413 	mtx_lock(&rbreqlock);
414 	if (arg1 == &hirunningspace) {
415 		if (value < lorunningspace)
416 			error = EINVAL;
417 		else
418 			hirunningspace = value;
419 	} else {
420 		KASSERT(arg1 == &lorunningspace,
421 		    ("%s: unknown arg1", __func__));
422 		if (value > hirunningspace)
423 			error = EINVAL;
424 		else
425 			lorunningspace = value;
426 	}
427 	mtx_unlock(&rbreqlock);
428 	return (error);
429 }
430 
431 static int
432 sysctl_bufdomain_int(SYSCTL_HANDLER_ARGS)
433 {
434 	int error;
435 	int value;
436 	int i;
437 
438 	value = *(int *)arg1;
439 	error = sysctl_handle_int(oidp, &value, 0, req);
440 	if (error != 0 || req->newptr == NULL)
441 		return (error);
442 	*(int *)arg1 = value;
443 	for (i = 0; i < buf_domains; i++)
444 		*(int *)(uintptr_t)(((uintptr_t)&bdomain[i]) + arg2) =
445 		    value / buf_domains;
446 
447 	return (error);
448 }
449 
450 static int
451 sysctl_bufdomain_long(SYSCTL_HANDLER_ARGS)
452 {
453 	long value;
454 	int error;
455 	int i;
456 
457 	value = *(long *)arg1;
458 	error = sysctl_handle_long(oidp, &value, 0, req);
459 	if (error != 0 || req->newptr == NULL)
460 		return (error);
461 	*(long *)arg1 = value;
462 	for (i = 0; i < buf_domains; i++)
463 		*(long *)(uintptr_t)(((uintptr_t)&bdomain[i]) + arg2) =
464 		    value / buf_domains;
465 
466 	return (error);
467 }
468 
469 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
470     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
471 static int
472 sysctl_bufspace(SYSCTL_HANDLER_ARGS)
473 {
474 	long lvalue;
475 	int ivalue;
476 	int i;
477 
478 	lvalue = 0;
479 	for (i = 0; i < buf_domains; i++)
480 		lvalue += bdomain[i].bd_bufspace;
481 	if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long))
482 		return (sysctl_handle_long(oidp, &lvalue, 0, req));
483 	if (lvalue > INT_MAX)
484 		/* On overflow, still write out a long to trigger ENOMEM. */
485 		return (sysctl_handle_long(oidp, &lvalue, 0, req));
486 	ivalue = lvalue;
487 	return (sysctl_handle_int(oidp, &ivalue, 0, req));
488 }
489 #else
490 static int
491 sysctl_bufspace(SYSCTL_HANDLER_ARGS)
492 {
493 	long lvalue;
494 	int i;
495 
496 	lvalue = 0;
497 	for (i = 0; i < buf_domains; i++)
498 		lvalue += bdomain[i].bd_bufspace;
499 	return (sysctl_handle_long(oidp, &lvalue, 0, req));
500 }
501 #endif
502 
503 static int
504 sysctl_numdirtybuffers(SYSCTL_HANDLER_ARGS)
505 {
506 	int value;
507 	int i;
508 
509 	value = 0;
510 	for (i = 0; i < buf_domains; i++)
511 		value += bdomain[i].bd_numdirtybuffers;
512 	return (sysctl_handle_int(oidp, &value, 0, req));
513 }
514 
515 /*
516  *	bdirtywakeup:
517  *
518  *	Wakeup any bwillwrite() waiters.
519  */
520 static void
521 bdirtywakeup(void)
522 {
523 	mtx_lock(&bdirtylock);
524 	if (bdirtywait) {
525 		bdirtywait = 0;
526 		wakeup(&bdirtywait);
527 	}
528 	mtx_unlock(&bdirtylock);
529 }
530 
531 /*
532  *	bd_clear:
533  *
534  *	Clear a domain from the appropriate bitsets when dirtybuffers
535  *	is decremented.
536  */
537 static void
538 bd_clear(struct bufdomain *bd)
539 {
540 
541 	mtx_lock(&bdirtylock);
542 	if (bd->bd_numdirtybuffers <= bd->bd_lodirtybuffers)
543 		BIT_CLR(BUF_DOMAINS, BD_DOMAIN(bd), &bdlodirty);
544 	if (bd->bd_numdirtybuffers <= bd->bd_hidirtybuffers)
545 		BIT_CLR(BUF_DOMAINS, BD_DOMAIN(bd), &bdhidirty);
546 	mtx_unlock(&bdirtylock);
547 }
548 
549 /*
550  *	bd_set:
551  *
552  *	Set a domain in the appropriate bitsets when dirtybuffers
553  *	is incremented.
554  */
555 static void
556 bd_set(struct bufdomain *bd)
557 {
558 
559 	mtx_lock(&bdirtylock);
560 	if (bd->bd_numdirtybuffers > bd->bd_lodirtybuffers)
561 		BIT_SET(BUF_DOMAINS, BD_DOMAIN(bd), &bdlodirty);
562 	if (bd->bd_numdirtybuffers > bd->bd_hidirtybuffers)
563 		BIT_SET(BUF_DOMAINS, BD_DOMAIN(bd), &bdhidirty);
564 	mtx_unlock(&bdirtylock);
565 }
566 
567 /*
568  *	bdirtysub:
569  *
570  *	Decrement the numdirtybuffers count by one and wakeup any
571  *	threads blocked in bwillwrite().
572  */
573 static void
574 bdirtysub(struct buf *bp)
575 {
576 	struct bufdomain *bd;
577 	int num;
578 
579 	bd = bufdomain(bp);
580 	num = atomic_fetchadd_int(&bd->bd_numdirtybuffers, -1);
581 	if (num == (bd->bd_lodirtybuffers + bd->bd_hidirtybuffers) / 2)
582 		bdirtywakeup();
583 	if (num == bd->bd_lodirtybuffers || num == bd->bd_hidirtybuffers)
584 		bd_clear(bd);
585 }
586 
587 /*
588  *	bdirtyadd:
589  *
590  *	Increment the numdirtybuffers count by one and wakeup the buf
591  *	daemon if needed.
592  */
593 static void
594 bdirtyadd(struct buf *bp)
595 {
596 	struct bufdomain *bd;
597 	int num;
598 
599 	/*
600 	 * Only do the wakeup once as we cross the boundary.  The
601 	 * buf daemon will keep running until the condition clears.
602 	 */
603 	bd = bufdomain(bp);
604 	num = atomic_fetchadd_int(&bd->bd_numdirtybuffers, 1);
605 	if (num == (bd->bd_lodirtybuffers + bd->bd_hidirtybuffers) / 2)
606 		bd_wakeup();
607 	if (num == bd->bd_lodirtybuffers || num == bd->bd_hidirtybuffers)
608 		bd_set(bd);
609 }
610 
611 /*
612  *	bufspace_daemon_wakeup:
613  *
614  *	Wakeup the daemons responsible for freeing clean bufs.
615  */
616 static void
617 bufspace_daemon_wakeup(struct bufdomain *bd)
618 {
619 
620 	/*
621 	 * avoid the lock if the daemon is running.
622 	 */
623 	if (atomic_fetchadd_int(&bd->bd_running, 1) == 0) {
624 		BD_RUN_LOCK(bd);
625 		atomic_store_int(&bd->bd_running, 1);
626 		wakeup(&bd->bd_running);
627 		BD_RUN_UNLOCK(bd);
628 	}
629 }
630 
631 /*
632  *	bufspace_daemon_wait:
633  *
634  *	Sleep until the domain falls below a limit or one second passes.
635  */
636 static void
637 bufspace_daemon_wait(struct bufdomain *bd)
638 {
639 	/*
640 	 * Re-check our limits and sleep.  bd_running must be
641 	 * cleared prior to checking the limits to avoid missed
642 	 * wakeups.  The waker will adjust one of bufspace or
643 	 * freebuffers prior to checking bd_running.
644 	 */
645 	BD_RUN_LOCK(bd);
646 	atomic_store_int(&bd->bd_running, 0);
647 	if (bd->bd_bufspace < bd->bd_bufspacethresh &&
648 	    bd->bd_freebuffers > bd->bd_lofreebuffers) {
649 		msleep(&bd->bd_running, BD_RUN_LOCKPTR(bd), PRIBIO|PDROP,
650 		    "-", hz);
651 	} else {
652 		/* Avoid spurious wakeups while running. */
653 		atomic_store_int(&bd->bd_running, 1);
654 		BD_RUN_UNLOCK(bd);
655 	}
656 }
657 
658 /*
659  *	bufspace_adjust:
660  *
661  *	Adjust the reported bufspace for a KVA managed buffer, possibly
662  * 	waking any waiters.
663  */
664 static void
665 bufspace_adjust(struct buf *bp, int bufsize)
666 {
667 	struct bufdomain *bd;
668 	long space;
669 	int diff;
670 
671 	KASSERT((bp->b_flags & B_MALLOC) == 0,
672 	    ("bufspace_adjust: malloc buf %p", bp));
673 	bd = bufdomain(bp);
674 	diff = bufsize - bp->b_bufsize;
675 	if (diff < 0) {
676 		atomic_subtract_long(&bd->bd_bufspace, -diff);
677 	} else if (diff > 0) {
678 		space = atomic_fetchadd_long(&bd->bd_bufspace, diff);
679 		/* Wake up the daemon on the transition. */
680 		if (space < bd->bd_bufspacethresh &&
681 		    space + diff >= bd->bd_bufspacethresh)
682 			bufspace_daemon_wakeup(bd);
683 	}
684 	bp->b_bufsize = bufsize;
685 }
686 
687 /*
688  *	bufspace_reserve:
689  *
690  *	Reserve bufspace before calling allocbuf().  metadata has a
691  *	different space limit than data.
692  */
693 static int
694 bufspace_reserve(struct bufdomain *bd, int size, bool metadata)
695 {
696 	long limit, new;
697 	long space;
698 
699 	if (metadata)
700 		limit = bd->bd_maxbufspace;
701 	else
702 		limit = bd->bd_hibufspace;
703 	space = atomic_fetchadd_long(&bd->bd_bufspace, size);
704 	new = space + size;
705 	if (new > limit) {
706 		atomic_subtract_long(&bd->bd_bufspace, size);
707 		return (ENOSPC);
708 	}
709 
710 	/* Wake up the daemon on the transition. */
711 	if (space < bd->bd_bufspacethresh && new >= bd->bd_bufspacethresh)
712 		bufspace_daemon_wakeup(bd);
713 
714 	return (0);
715 }
716 
717 /*
718  *	bufspace_release:
719  *
720  *	Release reserved bufspace after bufspace_adjust() has consumed it.
721  */
722 static void
723 bufspace_release(struct bufdomain *bd, int size)
724 {
725 
726 	atomic_subtract_long(&bd->bd_bufspace, size);
727 }
728 
729 /*
730  *	bufspace_wait:
731  *
732  *	Wait for bufspace, acting as the buf daemon if a locked vnode is
733  *	supplied.  bd_wanted must be set prior to polling for space.  The
734  *	operation must be re-tried on return.
735  */
736 static void
737 bufspace_wait(struct bufdomain *bd, struct vnode *vp, int gbflags,
738     int slpflag, int slptimeo)
739 {
740 	struct thread *td;
741 	int error, fl, norunbuf;
742 
743 	if ((gbflags & GB_NOWAIT_BD) != 0)
744 		return;
745 
746 	td = curthread;
747 	BD_LOCK(bd);
748 	while (bd->bd_wanted) {
749 		if (vp != NULL && vp->v_type != VCHR &&
750 		    (td->td_pflags & TDP_BUFNEED) == 0) {
751 			BD_UNLOCK(bd);
752 			/*
753 			 * getblk() is called with a vnode locked, and
754 			 * some majority of the dirty buffers may as
755 			 * well belong to the vnode.  Flushing the
756 			 * buffers there would make a progress that
757 			 * cannot be achieved by the buf_daemon, that
758 			 * cannot lock the vnode.
759 			 */
760 			norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) |
761 			    (td->td_pflags & TDP_NORUNNINGBUF);
762 
763 			/*
764 			 * Play bufdaemon.  The getnewbuf() function
765 			 * may be called while the thread owns lock
766 			 * for another dirty buffer for the same
767 			 * vnode, which makes it impossible to use
768 			 * VOP_FSYNC() there, due to the buffer lock
769 			 * recursion.
770 			 */
771 			td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF;
772 			fl = buf_flush(vp, bd, flushbufqtarget);
773 			td->td_pflags &= norunbuf;
774 			BD_LOCK(bd);
775 			if (fl != 0)
776 				continue;
777 			if (bd->bd_wanted == 0)
778 				break;
779 		}
780 		error = msleep(&bd->bd_wanted, BD_LOCKPTR(bd),
781 		    (PRIBIO + 4) | slpflag, "newbuf", slptimeo);
782 		if (error != 0)
783 			break;
784 	}
785 	BD_UNLOCK(bd);
786 }
787 
788 /*
789  *	bufspace_daemon:
790  *
791  *	buffer space management daemon.  Tries to maintain some marginal
792  *	amount of free buffer space so that requesting processes neither
793  *	block nor work to reclaim buffers.
794  */
795 static void
796 bufspace_daemon(void *arg)
797 {
798 	struct bufdomain *bd;
799 
800 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kthread_shutdown, curthread,
801 	    SHUTDOWN_PRI_LAST + 100);
802 
803 	bd = arg;
804 	for (;;) {
805 		kthread_suspend_check();
806 
807 		/*
808 		 * Free buffers from the clean queue until we meet our
809 		 * targets.
810 		 *
811 		 * Theory of operation:  The buffer cache is most efficient
812 		 * when some free buffer headers and space are always
813 		 * available to getnewbuf().  This daemon attempts to prevent
814 		 * the excessive blocking and synchronization associated
815 		 * with shortfall.  It goes through three phases according
816 		 * demand:
817 		 *
818 		 * 1)	The daemon wakes up voluntarily once per-second
819 		 *	during idle periods when the counters are below
820 		 *	the wakeup thresholds (bufspacethresh, lofreebuffers).
821 		 *
822 		 * 2)	The daemon wakes up as we cross the thresholds
823 		 *	ahead of any potential blocking.  This may bounce
824 		 *	slightly according to the rate of consumption and
825 		 *	release.
826 		 *
827 		 * 3)	The daemon and consumers are starved for working
828 		 *	clean buffers.  This is the 'bufspace' sleep below
829 		 *	which will inefficiently trade bufs with bqrelse
830 		 *	until we return to condition 2.
831 		 */
832 		while (bd->bd_bufspace > bd->bd_lobufspace ||
833 		    bd->bd_freebuffers < bd->bd_hifreebuffers) {
834 			if (buf_recycle(bd, false) != 0) {
835 				if (bd_flushall(bd))
836 					continue;
837 				/*
838 				 * Speedup dirty if we've run out of clean
839 				 * buffers.  This is possible in particular
840 				 * because softdep may held many bufs locked
841 				 * pending writes to other bufs which are
842 				 * marked for delayed write, exhausting
843 				 * clean space until they are written.
844 				 */
845 				bd_speedup();
846 				BD_LOCK(bd);
847 				if (bd->bd_wanted) {
848 					msleep(&bd->bd_wanted, BD_LOCKPTR(bd),
849 					    PRIBIO|PDROP, "bufspace", hz/10);
850 				} else
851 					BD_UNLOCK(bd);
852 			}
853 			maybe_yield();
854 		}
855 		bufspace_daemon_wait(bd);
856 	}
857 }
858 
859 /*
860  *	bufmallocadjust:
861  *
862  *	Adjust the reported bufspace for a malloc managed buffer, possibly
863  *	waking any waiters.
864  */
865 static void
866 bufmallocadjust(struct buf *bp, int bufsize)
867 {
868 	int diff;
869 
870 	KASSERT((bp->b_flags & B_MALLOC) != 0,
871 	    ("bufmallocadjust: non-malloc buf %p", bp));
872 	diff = bufsize - bp->b_bufsize;
873 	if (diff < 0)
874 		atomic_subtract_long(&bufmallocspace, -diff);
875 	else
876 		atomic_add_long(&bufmallocspace, diff);
877 	bp->b_bufsize = bufsize;
878 }
879 
880 /*
881  *	runningwakeup:
882  *
883  *	Wake up processes that are waiting on asynchronous writes to fall
884  *	below lorunningspace.
885  */
886 static void
887 runningwakeup(void)
888 {
889 
890 	mtx_lock(&rbreqlock);
891 	if (runningbufreq) {
892 		runningbufreq = 0;
893 		wakeup(&runningbufreq);
894 	}
895 	mtx_unlock(&rbreqlock);
896 }
897 
898 /*
899  *	runningbufwakeup:
900  *
901  *	Decrement the outstanding write count according.
902  */
903 void
904 runningbufwakeup(struct buf *bp)
905 {
906 	long space, bspace;
907 
908 	bspace = bp->b_runningbufspace;
909 	if (bspace == 0)
910 		return;
911 	space = atomic_fetchadd_long(&runningbufspace, -bspace);
912 	KASSERT(space >= bspace, ("runningbufspace underflow %ld %ld",
913 	    space, bspace));
914 	bp->b_runningbufspace = 0;
915 	/*
916 	 * Only acquire the lock and wakeup on the transition from exceeding
917 	 * the threshold to falling below it.
918 	 */
919 	if (space < lorunningspace)
920 		return;
921 	if (space - bspace > lorunningspace)
922 		return;
923 	runningwakeup();
924 }
925 
926 /*
927  *	waitrunningbufspace()
928  *
929  *	runningbufspace is a measure of the amount of I/O currently
930  *	running.  This routine is used in async-write situations to
931  *	prevent creating huge backups of pending writes to a device.
932  *	Only asynchronous writes are governed by this function.
933  *
934  *	This does NOT turn an async write into a sync write.  It waits
935  *	for earlier writes to complete and generally returns before the
936  *	caller's write has reached the device.
937  */
938 void
939 waitrunningbufspace(void)
940 {
941 
942 	mtx_lock(&rbreqlock);
943 	while (runningbufspace > hirunningspace) {
944 		runningbufreq = 1;
945 		msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
946 	}
947 	mtx_unlock(&rbreqlock);
948 }
949 
950 /*
951  *	vfs_buf_test_cache:
952  *
953  *	Called when a buffer is extended.  This function clears the B_CACHE
954  *	bit if the newly extended portion of the buffer does not contain
955  *	valid data.
956  */
957 static __inline void
958 vfs_buf_test_cache(struct buf *bp, vm_ooffset_t foff, vm_offset_t off,
959     vm_offset_t size, vm_page_t m)
960 {
961 
962 	/*
963 	 * This function and its results are protected by higher level
964 	 * synchronization requiring vnode and buf locks to page in and
965 	 * validate pages.
966 	 */
967 	if (bp->b_flags & B_CACHE) {
968 		int base = (foff + off) & PAGE_MASK;
969 		if (vm_page_is_valid(m, base, size) == 0)
970 			bp->b_flags &= ~B_CACHE;
971 	}
972 }
973 
974 /* Wake up the buffer daemon if necessary */
975 static void
976 bd_wakeup(void)
977 {
978 
979 	mtx_lock(&bdlock);
980 	if (bd_request == 0) {
981 		bd_request = 1;
982 		wakeup(&bd_request);
983 	}
984 	mtx_unlock(&bdlock);
985 }
986 
987 /*
988  * Adjust the maxbcachbuf tunable.
989  */
990 static void
991 maxbcachebuf_adjust(void)
992 {
993 	int i;
994 
995 	/*
996 	 * maxbcachebuf must be a power of 2 >= MAXBSIZE.
997 	 */
998 	i = 2;
999 	while (i * 2 <= maxbcachebuf)
1000 		i *= 2;
1001 	maxbcachebuf = i;
1002 	if (maxbcachebuf < MAXBSIZE)
1003 		maxbcachebuf = MAXBSIZE;
1004 	if (maxbcachebuf > maxphys)
1005 		maxbcachebuf = maxphys;
1006 	if (bootverbose != 0 && maxbcachebuf != MAXBCACHEBUF)
1007 		printf("maxbcachebuf=%d\n", maxbcachebuf);
1008 }
1009 
1010 /*
1011  * bd_speedup - speedup the buffer cache flushing code
1012  */
1013 void
1014 bd_speedup(void)
1015 {
1016 	int needwake;
1017 
1018 	mtx_lock(&bdlock);
1019 	needwake = 0;
1020 	if (bd_speedupreq == 0 || bd_request == 0)
1021 		needwake = 1;
1022 	bd_speedupreq = 1;
1023 	bd_request = 1;
1024 	if (needwake)
1025 		wakeup(&bd_request);
1026 	mtx_unlock(&bdlock);
1027 }
1028 
1029 #ifdef __i386__
1030 #define	TRANSIENT_DENOM	5
1031 #else
1032 #define	TRANSIENT_DENOM 10
1033 #endif
1034 
1035 /*
1036  * Calculating buffer cache scaling values and reserve space for buffer
1037  * headers.  This is called during low level kernel initialization and
1038  * may be called more then once.  We CANNOT write to the memory area
1039  * being reserved at this time.
1040  */
1041 caddr_t
1042 kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
1043 {
1044 	int tuned_nbuf;
1045 	long maxbuf, maxbuf_sz, buf_sz,	biotmap_sz;
1046 
1047 #ifdef KASAN
1048 	/*
1049 	 * With KASAN enabled, the kernel map is shadowed.  Account for this
1050 	 * when sizing maps based on the amount of physical memory available.
1051 	 */
1052 	physmem_est = (physmem_est * KASAN_SHADOW_SCALE) /
1053 	    (KASAN_SHADOW_SCALE + 1);
1054 #endif
1055 
1056 	/*
1057 	 * physmem_est is in pages.  Convert it to kilobytes (assumes
1058 	 * PAGE_SIZE is >= 1K)
1059 	 */
1060 	physmem_est = physmem_est * (PAGE_SIZE / 1024);
1061 
1062 	maxbcachebuf_adjust();
1063 	/*
1064 	 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
1065 	 * For the first 64MB of ram nominally allocate sufficient buffers to
1066 	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
1067 	 * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
1068 	 * the buffer cache we limit the eventual kva reservation to
1069 	 * maxbcache bytes.
1070 	 *
1071 	 * factor represents the 1/4 x ram conversion.
1072 	 */
1073 	if (nbuf == 0) {
1074 		int factor = 4 * BKVASIZE / 1024;
1075 
1076 		nbuf = 50;
1077 		if (physmem_est > 4096)
1078 			nbuf += min((physmem_est - 4096) / factor,
1079 			    65536 / factor);
1080 		if (physmem_est > 65536)
1081 			nbuf += min((physmem_est - 65536) * 2 / (factor * 5),
1082 			    32 * 1024 * 1024 / (factor * 5));
1083 
1084 		if (maxbcache && nbuf > maxbcache / BKVASIZE)
1085 			nbuf = maxbcache / BKVASIZE;
1086 		tuned_nbuf = 1;
1087 	} else
1088 		tuned_nbuf = 0;
1089 
1090 	/* XXX Avoid unsigned long overflows later on with maxbufspace. */
1091 	maxbuf = (LONG_MAX / 3) / BKVASIZE;
1092 	if (nbuf > maxbuf) {
1093 		if (!tuned_nbuf)
1094 			printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
1095 			    maxbuf);
1096 		nbuf = maxbuf;
1097 	}
1098 
1099 	/*
1100 	 * Ideal allocation size for the transient bio submap is 10%
1101 	 * of the maximal space buffer map.  This roughly corresponds
1102 	 * to the amount of the buffer mapped for typical UFS load.
1103 	 *
1104 	 * Clip the buffer map to reserve space for the transient
1105 	 * BIOs, if its extent is bigger than 90% (80% on i386) of the
1106 	 * maximum buffer map extent on the platform.
1107 	 *
1108 	 * The fall-back to the maxbuf in case of maxbcache unset,
1109 	 * allows to not trim the buffer KVA for the architectures
1110 	 * with ample KVA space.
1111 	 */
1112 	if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) {
1113 		maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE;
1114 		buf_sz = (long)nbuf * BKVASIZE;
1115 		if (buf_sz < maxbuf_sz / TRANSIENT_DENOM *
1116 		    (TRANSIENT_DENOM - 1)) {
1117 			/*
1118 			 * There is more KVA than memory.  Do not
1119 			 * adjust buffer map size, and assign the rest
1120 			 * of maxbuf to transient map.
1121 			 */
1122 			biotmap_sz = maxbuf_sz - buf_sz;
1123 		} else {
1124 			/*
1125 			 * Buffer map spans all KVA we could afford on
1126 			 * this platform.  Give 10% (20% on i386) of
1127 			 * the buffer map to the transient bio map.
1128 			 */
1129 			biotmap_sz = buf_sz / TRANSIENT_DENOM;
1130 			buf_sz -= biotmap_sz;
1131 		}
1132 		if (biotmap_sz / INT_MAX > maxphys)
1133 			bio_transient_maxcnt = INT_MAX;
1134 		else
1135 			bio_transient_maxcnt = biotmap_sz / maxphys;
1136 		/*
1137 		 * Artificially limit to 1024 simultaneous in-flight I/Os
1138 		 * using the transient mapping.
1139 		 */
1140 		if (bio_transient_maxcnt > 1024)
1141 			bio_transient_maxcnt = 1024;
1142 		if (tuned_nbuf)
1143 			nbuf = buf_sz / BKVASIZE;
1144 	}
1145 
1146 	if (nswbuf == 0) {
1147 		nswbuf = min(nbuf / 4, 256);
1148 		if (nswbuf < NSWBUF_MIN)
1149 			nswbuf = NSWBUF_MIN;
1150 	}
1151 
1152 	/*
1153 	 * Reserve space for the buffer cache buffers
1154 	 */
1155 	buf = (char *)v;
1156 	v = (caddr_t)buf + (sizeof(struct buf) + sizeof(vm_page_t) *
1157 	    atop(maxbcachebuf)) * nbuf;
1158 
1159 	return (v);
1160 }
1161 
1162 /* Initialize the buffer subsystem.  Called before use of any buffers. */
1163 void
1164 bufinit(void)
1165 {
1166 	struct buf *bp;
1167 	int i;
1168 
1169 	KASSERT(maxbcachebuf >= MAXBSIZE,
1170 	    ("maxbcachebuf (%d) must be >= MAXBSIZE (%d)\n", maxbcachebuf,
1171 	    MAXBSIZE));
1172 	bq_init(&bqempty, QUEUE_EMPTY, -1, "bufq empty lock");
1173 	mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
1174 	mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
1175 	mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF);
1176 
1177 	unmapped_buf = (caddr_t)kva_alloc(maxphys);
1178 
1179 	/* finally, initialize each buffer header and stick on empty q */
1180 	for (i = 0; i < nbuf; i++) {
1181 		bp = nbufp(i);
1182 		bzero(bp, sizeof(*bp) + sizeof(vm_page_t) * atop(maxbcachebuf));
1183 		bp->b_flags = B_INVAL;
1184 		bp->b_rcred = NOCRED;
1185 		bp->b_wcred = NOCRED;
1186 		bp->b_qindex = QUEUE_NONE;
1187 		bp->b_domain = -1;
1188 		bp->b_subqueue = mp_maxid + 1;
1189 		bp->b_xflags = 0;
1190 		bp->b_data = bp->b_kvabase = unmapped_buf;
1191 		LIST_INIT(&bp->b_dep);
1192 		BUF_LOCKINIT(bp);
1193 		bq_insert(&bqempty, bp, false);
1194 	}
1195 
1196 	/*
1197 	 * maxbufspace is the absolute maximum amount of buffer space we are
1198 	 * allowed to reserve in KVM and in real terms.  The absolute maximum
1199 	 * is nominally used by metadata.  hibufspace is the nominal maximum
1200 	 * used by most other requests.  The differential is required to
1201 	 * ensure that metadata deadlocks don't occur.
1202 	 *
1203 	 * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
1204 	 * this may result in KVM fragmentation which is not handled optimally
1205 	 * by the system. XXX This is less true with vmem.  We could use
1206 	 * PAGE_SIZE.
1207 	 */
1208 	maxbufspace = (long)nbuf * BKVASIZE;
1209 	hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - maxbcachebuf * 10);
1210 	lobufspace = (hibufspace / 20) * 19; /* 95% */
1211 	bufspacethresh = lobufspace + (hibufspace - lobufspace) / 2;
1212 
1213 	/*
1214 	 * Note: The 16 MiB upper limit for hirunningspace was chosen
1215 	 * arbitrarily and may need further tuning. It corresponds to
1216 	 * 128 outstanding write IO requests (if IO size is 128 KiB),
1217 	 * which fits with many RAID controllers' tagged queuing limits.
1218 	 * The lower 1 MiB limit is the historical upper limit for
1219 	 * hirunningspace.
1220 	 */
1221 	hirunningspace = lmax(lmin(roundup(hibufspace / 64, maxbcachebuf),
1222 	    16 * 1024 * 1024), 1024 * 1024);
1223 	lorunningspace = roundup((hirunningspace * 2) / 3, maxbcachebuf);
1224 
1225 	/*
1226 	 * Limit the amount of malloc memory since it is wired permanently into
1227 	 * the kernel space.  Even though this is accounted for in the buffer
1228 	 * allocation, we don't want the malloced region to grow uncontrolled.
1229 	 * The malloc scheme improves memory utilization significantly on
1230 	 * average (small) directories.
1231 	 */
1232 	maxbufmallocspace = hibufspace / 20;
1233 
1234 	/*
1235 	 * Reduce the chance of a deadlock occurring by limiting the number
1236 	 * of delayed-write dirty buffers we allow to stack up.
1237 	 */
1238 	hidirtybuffers = nbuf / 4 + 20;
1239 	dirtybufthresh = hidirtybuffers * 9 / 10;
1240 	/*
1241 	 * To support extreme low-memory systems, make sure hidirtybuffers
1242 	 * cannot eat up all available buffer space.  This occurs when our
1243 	 * minimum cannot be met.  We try to size hidirtybuffers to 3/4 our
1244 	 * buffer space assuming BKVASIZE'd buffers.
1245 	 */
1246 	while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
1247 		hidirtybuffers >>= 1;
1248 	}
1249 	lodirtybuffers = hidirtybuffers / 2;
1250 
1251 	/*
1252 	 * lofreebuffers should be sufficient to avoid stalling waiting on
1253 	 * buf headers under heavy utilization.  The bufs in per-cpu caches
1254 	 * are counted as free but will be unavailable to threads executing
1255 	 * on other cpus.
1256 	 *
1257 	 * hifreebuffers is the free target for the bufspace daemon.  This
1258 	 * should be set appropriately to limit work per-iteration.
1259 	 */
1260 	lofreebuffers = MIN((nbuf / 25) + (20 * mp_ncpus), 128 * mp_ncpus);
1261 	hifreebuffers = (3 * lofreebuffers) / 2;
1262 	numfreebuffers = nbuf;
1263 
1264 	/* Setup the kva and free list allocators. */
1265 	vmem_set_reclaim(buffer_arena, bufkva_reclaim);
1266 	buf_zone = uma_zcache_create("buf free cache",
1267 	    sizeof(struct buf) + sizeof(vm_page_t) * atop(maxbcachebuf),
1268 	    NULL, NULL, NULL, NULL, buf_import, buf_release, NULL, 0);
1269 
1270 	/*
1271 	 * Size the clean queue according to the amount of buffer space.
1272 	 * One queue per-256mb up to the max.  More queues gives better
1273 	 * concurrency but less accurate LRU.
1274 	 */
1275 	buf_domains = MIN(howmany(maxbufspace, 256*1024*1024), BUF_DOMAINS);
1276 	for (i = 0 ; i < buf_domains; i++) {
1277 		struct bufdomain *bd;
1278 
1279 		bd = &bdomain[i];
1280 		bd_init(bd);
1281 		bd->bd_freebuffers = nbuf / buf_domains;
1282 		bd->bd_hifreebuffers = hifreebuffers / buf_domains;
1283 		bd->bd_lofreebuffers = lofreebuffers / buf_domains;
1284 		bd->bd_bufspace = 0;
1285 		bd->bd_maxbufspace = maxbufspace / buf_domains;
1286 		bd->bd_hibufspace = hibufspace / buf_domains;
1287 		bd->bd_lobufspace = lobufspace / buf_domains;
1288 		bd->bd_bufspacethresh = bufspacethresh / buf_domains;
1289 		bd->bd_numdirtybuffers = 0;
1290 		bd->bd_hidirtybuffers = hidirtybuffers / buf_domains;
1291 		bd->bd_lodirtybuffers = lodirtybuffers / buf_domains;
1292 		bd->bd_dirtybufthresh = dirtybufthresh / buf_domains;
1293 		/* Don't allow more than 2% of bufs in the per-cpu caches. */
1294 		bd->bd_lim = nbuf / buf_domains / 50 / mp_ncpus;
1295 	}
1296 	getnewbufcalls = counter_u64_alloc(M_WAITOK);
1297 	getnewbufrestarts = counter_u64_alloc(M_WAITOK);
1298 	mappingrestarts = counter_u64_alloc(M_WAITOK);
1299 	numbufallocfails = counter_u64_alloc(M_WAITOK);
1300 	notbufdflushes = counter_u64_alloc(M_WAITOK);
1301 	buffreekvacnt = counter_u64_alloc(M_WAITOK);
1302 	bufdefragcnt = counter_u64_alloc(M_WAITOK);
1303 	bufkvaspace = counter_u64_alloc(M_WAITOK);
1304 }
1305 
1306 #ifdef INVARIANTS
1307 static inline void
1308 vfs_buf_check_mapped(struct buf *bp)
1309 {
1310 
1311 	KASSERT(bp->b_kvabase != unmapped_buf,
1312 	    ("mapped buf: b_kvabase was not updated %p", bp));
1313 	KASSERT(bp->b_data != unmapped_buf,
1314 	    ("mapped buf: b_data was not updated %p", bp));
1315 	KASSERT(bp->b_data < unmapped_buf || bp->b_data >= unmapped_buf +
1316 	    maxphys, ("b_data + b_offset unmapped %p", bp));
1317 }
1318 
1319 static inline void
1320 vfs_buf_check_unmapped(struct buf *bp)
1321 {
1322 
1323 	KASSERT(bp->b_data == unmapped_buf,
1324 	    ("unmapped buf: corrupted b_data %p", bp));
1325 }
1326 
1327 #define	BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp)
1328 #define	BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp)
1329 #else
1330 #define	BUF_CHECK_MAPPED(bp) do {} while (0)
1331 #define	BUF_CHECK_UNMAPPED(bp) do {} while (0)
1332 #endif
1333 
1334 static int
1335 isbufbusy(struct buf *bp)
1336 {
1337 	if (((bp->b_flags & B_INVAL) == 0 && BUF_ISLOCKED(bp)) ||
1338 	    ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI))
1339 		return (1);
1340 	return (0);
1341 }
1342 
1343 /*
1344  * Shutdown the system cleanly to prepare for reboot, halt, or power off.
1345  */
1346 void
1347 bufshutdown(int show_busybufs)
1348 {
1349 	static int first_buf_printf = 1;
1350 	struct buf *bp;
1351 	int i, iter, nbusy, pbusy;
1352 #ifndef PREEMPTION
1353 	int subiter;
1354 #endif
1355 
1356 	/*
1357 	 * Sync filesystems for shutdown
1358 	 */
1359 	wdog_kern_pat(WD_LASTVAL);
1360 	kern_sync(curthread);
1361 
1362 	/*
1363 	 * With soft updates, some buffers that are
1364 	 * written will be remarked as dirty until other
1365 	 * buffers are written.
1366 	 */
1367 	for (iter = pbusy = 0; iter < 20; iter++) {
1368 		nbusy = 0;
1369 		for (i = nbuf - 1; i >= 0; i--) {
1370 			bp = nbufp(i);
1371 			if (isbufbusy(bp))
1372 				nbusy++;
1373 		}
1374 		if (nbusy == 0) {
1375 			if (first_buf_printf)
1376 				printf("All buffers synced.");
1377 			break;
1378 		}
1379 		if (first_buf_printf) {
1380 			printf("Syncing disks, buffers remaining... ");
1381 			first_buf_printf = 0;
1382 		}
1383 		printf("%d ", nbusy);
1384 		if (nbusy < pbusy)
1385 			iter = 0;
1386 		pbusy = nbusy;
1387 
1388 		wdog_kern_pat(WD_LASTVAL);
1389 		kern_sync(curthread);
1390 
1391 #ifdef PREEMPTION
1392 		/*
1393 		 * Spin for a while to allow interrupt threads to run.
1394 		 */
1395 		DELAY(50000 * iter);
1396 #else
1397 		/*
1398 		 * Context switch several times to allow interrupt
1399 		 * threads to run.
1400 		 */
1401 		for (subiter = 0; subiter < 50 * iter; subiter++) {
1402 			thread_lock(curthread);
1403 			mi_switch(SW_VOL);
1404 			DELAY(1000);
1405 		}
1406 #endif
1407 	}
1408 	printf("\n");
1409 	/*
1410 	 * Count only busy local buffers to prevent forcing
1411 	 * a fsck if we're just a client of a wedged NFS server
1412 	 */
1413 	nbusy = 0;
1414 	for (i = nbuf - 1; i >= 0; i--) {
1415 		bp = nbufp(i);
1416 		if (isbufbusy(bp)) {
1417 #if 0
1418 /* XXX: This is bogus.  We should probably have a BO_REMOTE flag instead */
1419 			if (bp->b_dev == NULL) {
1420 				TAILQ_REMOVE(&mountlist,
1421 				    bp->b_vp->v_mount, mnt_list);
1422 				continue;
1423 			}
1424 #endif
1425 			nbusy++;
1426 			if (show_busybufs > 0) {
1427 				printf(
1428 	    "%d: buf:%p, vnode:%p, flags:%0x, blkno:%jd, lblkno:%jd, buflock:",
1429 				    nbusy, bp, bp->b_vp, bp->b_flags,
1430 				    (intmax_t)bp->b_blkno,
1431 				    (intmax_t)bp->b_lblkno);
1432 				BUF_LOCKPRINTINFO(bp);
1433 				if (show_busybufs > 1)
1434 					vn_printf(bp->b_vp,
1435 					    "vnode content: ");
1436 			}
1437 		}
1438 	}
1439 	if (nbusy) {
1440 		/*
1441 		 * Failed to sync all blocks. Indicate this and don't
1442 		 * unmount filesystems (thus forcing an fsck on reboot).
1443 		 */
1444 		printf("Giving up on %d buffers\n", nbusy);
1445 		DELAY(5000000);	/* 5 seconds */
1446 	} else {
1447 		if (!first_buf_printf)
1448 			printf("Final sync complete\n");
1449 		/*
1450 		 * Unmount filesystems
1451 		 */
1452 		if (!KERNEL_PANICKED())
1453 			vfs_unmountall();
1454 	}
1455 	swapoff_all();
1456 	DELAY(100000);		/* wait for console output to finish */
1457 }
1458 
1459 static void
1460 bpmap_qenter(struct buf *bp)
1461 {
1462 
1463 	BUF_CHECK_MAPPED(bp);
1464 
1465 	/*
1466 	 * bp->b_data is relative to bp->b_offset, but
1467 	 * bp->b_offset may be offset into the first page.
1468 	 */
1469 	bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
1470 	pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
1471 	bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
1472 	    (vm_offset_t)(bp->b_offset & PAGE_MASK));
1473 }
1474 
1475 static inline struct bufdomain *
1476 bufdomain(struct buf *bp)
1477 {
1478 
1479 	return (&bdomain[bp->b_domain]);
1480 }
1481 
1482 static struct bufqueue *
1483 bufqueue(struct buf *bp)
1484 {
1485 
1486 	switch (bp->b_qindex) {
1487 	case QUEUE_NONE:
1488 		/* FALLTHROUGH */
1489 	case QUEUE_SENTINEL:
1490 		return (NULL);
1491 	case QUEUE_EMPTY:
1492 		return (&bqempty);
1493 	case QUEUE_DIRTY:
1494 		return (&bufdomain(bp)->bd_dirtyq);
1495 	case QUEUE_CLEAN:
1496 		return (&bufdomain(bp)->bd_subq[bp->b_subqueue]);
1497 	default:
1498 		break;
1499 	}
1500 	panic("bufqueue(%p): Unhandled type %d\n", bp, bp->b_qindex);
1501 }
1502 
1503 /*
1504  * Return the locked bufqueue that bp is a member of.
1505  */
1506 static struct bufqueue *
1507 bufqueue_acquire(struct buf *bp)
1508 {
1509 	struct bufqueue *bq, *nbq;
1510 
1511 	/*
1512 	 * bp can be pushed from a per-cpu queue to the
1513 	 * cleanq while we're waiting on the lock.  Retry
1514 	 * if the queues don't match.
1515 	 */
1516 	bq = bufqueue(bp);
1517 	BQ_LOCK(bq);
1518 	for (;;) {
1519 		nbq = bufqueue(bp);
1520 		if (bq == nbq)
1521 			break;
1522 		BQ_UNLOCK(bq);
1523 		BQ_LOCK(nbq);
1524 		bq = nbq;
1525 	}
1526 	return (bq);
1527 }
1528 
1529 /*
1530  *	binsfree:
1531  *
1532  *	Insert the buffer into the appropriate free list.  Requires a
1533  *	locked buffer on entry and buffer is unlocked before return.
1534  */
1535 static void
1536 binsfree(struct buf *bp, int qindex)
1537 {
1538 	struct bufdomain *bd;
1539 	struct bufqueue *bq;
1540 
1541 	KASSERT(qindex == QUEUE_CLEAN || qindex == QUEUE_DIRTY,
1542 	    ("binsfree: Invalid qindex %d", qindex));
1543 	BUF_ASSERT_XLOCKED(bp);
1544 
1545 	/*
1546 	 * Handle delayed bremfree() processing.
1547 	 */
1548 	if (bp->b_flags & B_REMFREE) {
1549 		if (bp->b_qindex == qindex) {
1550 			bp->b_flags |= B_REUSE;
1551 			bp->b_flags &= ~B_REMFREE;
1552 			BUF_UNLOCK(bp);
1553 			return;
1554 		}
1555 		bq = bufqueue_acquire(bp);
1556 		bq_remove(bq, bp);
1557 		BQ_UNLOCK(bq);
1558 	}
1559 	bd = bufdomain(bp);
1560 	if (qindex == QUEUE_CLEAN) {
1561 		if (bd->bd_lim != 0)
1562 			bq = &bd->bd_subq[PCPU_GET(cpuid)];
1563 		else
1564 			bq = bd->bd_cleanq;
1565 	} else
1566 		bq = &bd->bd_dirtyq;
1567 	bq_insert(bq, bp, true);
1568 }
1569 
1570 /*
1571  * buf_free:
1572  *
1573  *	Free a buffer to the buf zone once it no longer has valid contents.
1574  */
1575 static void
1576 buf_free(struct buf *bp)
1577 {
1578 
1579 	if (bp->b_flags & B_REMFREE)
1580 		bremfreef(bp);
1581 	if (bp->b_vflags & BV_BKGRDINPROG)
1582 		panic("losing buffer 1");
1583 	if (bp->b_rcred != NOCRED) {
1584 		crfree(bp->b_rcred);
1585 		bp->b_rcred = NOCRED;
1586 	}
1587 	if (bp->b_wcred != NOCRED) {
1588 		crfree(bp->b_wcred);
1589 		bp->b_wcred = NOCRED;
1590 	}
1591 	if (!LIST_EMPTY(&bp->b_dep))
1592 		buf_deallocate(bp);
1593 	bufkva_free(bp);
1594 	atomic_add_int(&bufdomain(bp)->bd_freebuffers, 1);
1595 	MPASS((bp->b_flags & B_MAXPHYS) == 0);
1596 	BUF_UNLOCK(bp);
1597 	uma_zfree(buf_zone, bp);
1598 }
1599 
1600 /*
1601  * buf_import:
1602  *
1603  *	Import bufs into the uma cache from the buf list.  The system still
1604  *	expects a static array of bufs and much of the synchronization
1605  *	around bufs assumes type stable storage.  As a result, UMA is used
1606  *	only as a per-cpu cache of bufs still maintained on a global list.
1607  */
1608 static int
1609 buf_import(void *arg, void **store, int cnt, int domain, int flags)
1610 {
1611 	struct buf *bp;
1612 	int i;
1613 
1614 	BQ_LOCK(&bqempty);
1615 	for (i = 0; i < cnt; i++) {
1616 		bp = TAILQ_FIRST(&bqempty.bq_queue);
1617 		if (bp == NULL)
1618 			break;
1619 		bq_remove(&bqempty, bp);
1620 		store[i] = bp;
1621 	}
1622 	BQ_UNLOCK(&bqempty);
1623 
1624 	return (i);
1625 }
1626 
1627 /*
1628  * buf_release:
1629  *
1630  *	Release bufs from the uma cache back to the buffer queues.
1631  */
1632 static void
1633 buf_release(void *arg, void **store, int cnt)
1634 {
1635 	struct bufqueue *bq;
1636 	struct buf *bp;
1637         int i;
1638 
1639 	bq = &bqempty;
1640 	BQ_LOCK(bq);
1641         for (i = 0; i < cnt; i++) {
1642 		bp = store[i];
1643 		/* Inline bq_insert() to batch locking. */
1644 		TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
1645 		bp->b_flags &= ~(B_AGE | B_REUSE);
1646 		bq->bq_len++;
1647 		bp->b_qindex = bq->bq_index;
1648 	}
1649 	BQ_UNLOCK(bq);
1650 }
1651 
1652 /*
1653  * buf_alloc:
1654  *
1655  *	Allocate an empty buffer header.
1656  */
1657 static struct buf *
1658 buf_alloc(struct bufdomain *bd)
1659 {
1660 	struct buf *bp;
1661 	int freebufs, error;
1662 
1663 	/*
1664 	 * We can only run out of bufs in the buf zone if the average buf
1665 	 * is less than BKVASIZE.  In this case the actual wait/block will
1666 	 * come from buf_reycle() failing to flush one of these small bufs.
1667 	 */
1668 	bp = NULL;
1669 	freebufs = atomic_fetchadd_int(&bd->bd_freebuffers, -1);
1670 	if (freebufs > 0)
1671 		bp = uma_zalloc(buf_zone, M_NOWAIT);
1672 	if (bp == NULL) {
1673 		atomic_add_int(&bd->bd_freebuffers, 1);
1674 		bufspace_daemon_wakeup(bd);
1675 		counter_u64_add(numbufallocfails, 1);
1676 		return (NULL);
1677 	}
1678 	/*
1679 	 * Wake-up the bufspace daemon on transition below threshold.
1680 	 */
1681 	if (freebufs == bd->bd_lofreebuffers)
1682 		bufspace_daemon_wakeup(bd);
1683 
1684 	error = BUF_LOCK(bp, LK_EXCLUSIVE, NULL);
1685 	KASSERT(error == 0, ("%s: BUF_LOCK on free buf %p: %d.", __func__, bp,
1686 	    error));
1687 	(void)error;
1688 
1689 	KASSERT(bp->b_vp == NULL,
1690 	    ("bp: %p still has vnode %p.", bp, bp->b_vp));
1691 	KASSERT((bp->b_flags & (B_DELWRI | B_NOREUSE)) == 0,
1692 	    ("invalid buffer %p flags %#x", bp, bp->b_flags));
1693 	KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
1694 	    ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags));
1695 	KASSERT(bp->b_npages == 0,
1696 	    ("bp: %p still has %d vm pages\n", bp, bp->b_npages));
1697 	KASSERT(bp->b_kvasize == 0, ("bp: %p still has kva\n", bp));
1698 	KASSERT(bp->b_bufsize == 0, ("bp: %p still has bufspace\n", bp));
1699 	MPASS((bp->b_flags & B_MAXPHYS) == 0);
1700 
1701 	bp->b_domain = BD_DOMAIN(bd);
1702 	bp->b_flags = 0;
1703 	bp->b_ioflags = 0;
1704 	bp->b_xflags = 0;
1705 	bp->b_vflags = 0;
1706 	bp->b_vp = NULL;
1707 	bp->b_blkno = bp->b_lblkno = 0;
1708 	bp->b_offset = NOOFFSET;
1709 	bp->b_iodone = 0;
1710 	bp->b_error = 0;
1711 	bp->b_resid = 0;
1712 	bp->b_bcount = 0;
1713 	bp->b_npages = 0;
1714 	bp->b_dirtyoff = bp->b_dirtyend = 0;
1715 	bp->b_bufobj = NULL;
1716 	bp->b_data = bp->b_kvabase = unmapped_buf;
1717 	bp->b_fsprivate1 = NULL;
1718 	bp->b_fsprivate2 = NULL;
1719 	bp->b_fsprivate3 = NULL;
1720 	LIST_INIT(&bp->b_dep);
1721 
1722 	return (bp);
1723 }
1724 
1725 /*
1726  *	buf_recycle:
1727  *
1728  *	Free a buffer from the given bufqueue.  kva controls whether the
1729  *	freed buf must own some kva resources.  This is used for
1730  *	defragmenting.
1731  */
1732 static int
1733 buf_recycle(struct bufdomain *bd, bool kva)
1734 {
1735 	struct bufqueue *bq;
1736 	struct buf *bp, *nbp;
1737 
1738 	if (kva)
1739 		counter_u64_add(bufdefragcnt, 1);
1740 	nbp = NULL;
1741 	bq = bd->bd_cleanq;
1742 	BQ_LOCK(bq);
1743 	KASSERT(BQ_LOCKPTR(bq) == BD_LOCKPTR(bd),
1744 	    ("buf_recycle: Locks don't match"));
1745 	nbp = TAILQ_FIRST(&bq->bq_queue);
1746 
1747 	/*
1748 	 * Run scan, possibly freeing data and/or kva mappings on the fly
1749 	 * depending.
1750 	 */
1751 	while ((bp = nbp) != NULL) {
1752 		/*
1753 		 * Calculate next bp (we can only use it if we do not
1754 		 * release the bqlock).
1755 		 */
1756 		nbp = TAILQ_NEXT(bp, b_freelist);
1757 
1758 		/*
1759 		 * If we are defragging then we need a buffer with
1760 		 * some kva to reclaim.
1761 		 */
1762 		if (kva && bp->b_kvasize == 0)
1763 			continue;
1764 
1765 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1766 			continue;
1767 
1768 		/*
1769 		 * Implement a second chance algorithm for frequently
1770 		 * accessed buffers.
1771 		 */
1772 		if ((bp->b_flags & B_REUSE) != 0) {
1773 			TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
1774 			TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
1775 			bp->b_flags &= ~B_REUSE;
1776 			BUF_UNLOCK(bp);
1777 			continue;
1778 		}
1779 
1780 		/*
1781 		 * Skip buffers with background writes in progress.
1782 		 */
1783 		if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
1784 			BUF_UNLOCK(bp);
1785 			continue;
1786 		}
1787 
1788 		KASSERT(bp->b_qindex == QUEUE_CLEAN,
1789 		    ("buf_recycle: inconsistent queue %d bp %p",
1790 		    bp->b_qindex, bp));
1791 		KASSERT(bp->b_domain == BD_DOMAIN(bd),
1792 		    ("getnewbuf: queue domain %d doesn't match request %d",
1793 		    bp->b_domain, (int)BD_DOMAIN(bd)));
1794 		/*
1795 		 * NOTE:  nbp is now entirely invalid.  We can only restart
1796 		 * the scan from this point on.
1797 		 */
1798 		bq_remove(bq, bp);
1799 		BQ_UNLOCK(bq);
1800 
1801 		/*
1802 		 * Requeue the background write buffer with error and
1803 		 * restart the scan.
1804 		 */
1805 		if ((bp->b_vflags & BV_BKGRDERR) != 0) {
1806 			bqrelse(bp);
1807 			BQ_LOCK(bq);
1808 			nbp = TAILQ_FIRST(&bq->bq_queue);
1809 			continue;
1810 		}
1811 		bp->b_flags |= B_INVAL;
1812 		brelse(bp);
1813 		return (0);
1814 	}
1815 	bd->bd_wanted = 1;
1816 	BQ_UNLOCK(bq);
1817 
1818 	return (ENOBUFS);
1819 }
1820 
1821 /*
1822  *	bremfree:
1823  *
1824  *	Mark the buffer for removal from the appropriate free list.
1825  *
1826  */
1827 void
1828 bremfree(struct buf *bp)
1829 {
1830 
1831 	CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1832 	KASSERT((bp->b_flags & B_REMFREE) == 0,
1833 	    ("bremfree: buffer %p already marked for delayed removal.", bp));
1834 	KASSERT(bp->b_qindex != QUEUE_NONE,
1835 	    ("bremfree: buffer %p not on a queue.", bp));
1836 	BUF_ASSERT_XLOCKED(bp);
1837 
1838 	bp->b_flags |= B_REMFREE;
1839 }
1840 
1841 /*
1842  *	bremfreef:
1843  *
1844  *	Force an immediate removal from a free list.  Used only in nfs when
1845  *	it abuses the b_freelist pointer.
1846  */
1847 void
1848 bremfreef(struct buf *bp)
1849 {
1850 	struct bufqueue *bq;
1851 
1852 	bq = bufqueue_acquire(bp);
1853 	bq_remove(bq, bp);
1854 	BQ_UNLOCK(bq);
1855 }
1856 
1857 static void
1858 bq_init(struct bufqueue *bq, int qindex, int subqueue, const char *lockname)
1859 {
1860 
1861 	mtx_init(&bq->bq_lock, lockname, NULL, MTX_DEF);
1862 	TAILQ_INIT(&bq->bq_queue);
1863 	bq->bq_len = 0;
1864 	bq->bq_index = qindex;
1865 	bq->bq_subqueue = subqueue;
1866 }
1867 
1868 static void
1869 bd_init(struct bufdomain *bd)
1870 {
1871 	int i;
1872 
1873 	bd->bd_cleanq = &bd->bd_subq[mp_maxid + 1];
1874 	bq_init(bd->bd_cleanq, QUEUE_CLEAN, mp_maxid + 1, "bufq clean lock");
1875 	bq_init(&bd->bd_dirtyq, QUEUE_DIRTY, -1, "bufq dirty lock");
1876 	for (i = 0; i <= mp_maxid; i++)
1877 		bq_init(&bd->bd_subq[i], QUEUE_CLEAN, i,
1878 		    "bufq clean subqueue lock");
1879 	mtx_init(&bd->bd_run_lock, "bufspace daemon run lock", NULL, MTX_DEF);
1880 }
1881 
1882 /*
1883  *	bq_remove:
1884  *
1885  *	Removes a buffer from the free list, must be called with the
1886  *	correct qlock held.
1887  */
1888 static void
1889 bq_remove(struct bufqueue *bq, struct buf *bp)
1890 {
1891 
1892 	CTR3(KTR_BUF, "bq_remove(%p) vp %p flags %X",
1893 	    bp, bp->b_vp, bp->b_flags);
1894 	KASSERT(bp->b_qindex != QUEUE_NONE,
1895 	    ("bq_remove: buffer %p not on a queue.", bp));
1896 	KASSERT(bufqueue(bp) == bq,
1897 	    ("bq_remove: Remove buffer %p from wrong queue.", bp));
1898 
1899 	BQ_ASSERT_LOCKED(bq);
1900 	if (bp->b_qindex != QUEUE_EMPTY) {
1901 		BUF_ASSERT_XLOCKED(bp);
1902 	}
1903 	KASSERT(bq->bq_len >= 1,
1904 	    ("queue %d underflow", bp->b_qindex));
1905 	TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
1906 	bq->bq_len--;
1907 	bp->b_qindex = QUEUE_NONE;
1908 	bp->b_flags &= ~(B_REMFREE | B_REUSE);
1909 }
1910 
1911 static void
1912 bd_flush(struct bufdomain *bd, struct bufqueue *bq)
1913 {
1914 	struct buf *bp;
1915 
1916 	BQ_ASSERT_LOCKED(bq);
1917 	if (bq != bd->bd_cleanq) {
1918 		BD_LOCK(bd);
1919 		while ((bp = TAILQ_FIRST(&bq->bq_queue)) != NULL) {
1920 			TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
1921 			TAILQ_INSERT_TAIL(&bd->bd_cleanq->bq_queue, bp,
1922 			    b_freelist);
1923 			bp->b_subqueue = bd->bd_cleanq->bq_subqueue;
1924 		}
1925 		bd->bd_cleanq->bq_len += bq->bq_len;
1926 		bq->bq_len = 0;
1927 	}
1928 	if (bd->bd_wanted) {
1929 		bd->bd_wanted = 0;
1930 		wakeup(&bd->bd_wanted);
1931 	}
1932 	if (bq != bd->bd_cleanq)
1933 		BD_UNLOCK(bd);
1934 }
1935 
1936 static int
1937 bd_flushall(struct bufdomain *bd)
1938 {
1939 	struct bufqueue *bq;
1940 	int flushed;
1941 	int i;
1942 
1943 	if (bd->bd_lim == 0)
1944 		return (0);
1945 	flushed = 0;
1946 	for (i = 0; i <= mp_maxid; i++) {
1947 		bq = &bd->bd_subq[i];
1948 		if (bq->bq_len == 0)
1949 			continue;
1950 		BQ_LOCK(bq);
1951 		bd_flush(bd, bq);
1952 		BQ_UNLOCK(bq);
1953 		flushed++;
1954 	}
1955 
1956 	return (flushed);
1957 }
1958 
1959 static void
1960 bq_insert(struct bufqueue *bq, struct buf *bp, bool unlock)
1961 {
1962 	struct bufdomain *bd;
1963 
1964 	if (bp->b_qindex != QUEUE_NONE)
1965 		panic("bq_insert: free buffer %p onto another queue?", bp);
1966 
1967 	bd = bufdomain(bp);
1968 	if (bp->b_flags & B_AGE) {
1969 		/* Place this buf directly on the real queue. */
1970 		if (bq->bq_index == QUEUE_CLEAN)
1971 			bq = bd->bd_cleanq;
1972 		BQ_LOCK(bq);
1973 		TAILQ_INSERT_HEAD(&bq->bq_queue, bp, b_freelist);
1974 	} else {
1975 		BQ_LOCK(bq);
1976 		TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
1977 	}
1978 	bp->b_flags &= ~(B_AGE | B_REUSE);
1979 	bq->bq_len++;
1980 	bp->b_qindex = bq->bq_index;
1981 	bp->b_subqueue = bq->bq_subqueue;
1982 
1983 	/*
1984 	 * Unlock before we notify so that we don't wakeup a waiter that
1985 	 * fails a trylock on the buf and sleeps again.
1986 	 */
1987 	if (unlock)
1988 		BUF_UNLOCK(bp);
1989 
1990 	if (bp->b_qindex == QUEUE_CLEAN) {
1991 		/*
1992 		 * Flush the per-cpu queue and notify any waiters.
1993 		 */
1994 		if (bd->bd_wanted || (bq != bd->bd_cleanq &&
1995 		    bq->bq_len >= bd->bd_lim))
1996 			bd_flush(bd, bq);
1997 	}
1998 	BQ_UNLOCK(bq);
1999 }
2000 
2001 /*
2002  *	bufkva_free:
2003  *
2004  *	Free the kva allocation for a buffer.
2005  *
2006  */
2007 static void
2008 bufkva_free(struct buf *bp)
2009 {
2010 
2011 #ifdef INVARIANTS
2012 	if (bp->b_kvasize == 0) {
2013 		KASSERT(bp->b_kvabase == unmapped_buf &&
2014 		    bp->b_data == unmapped_buf,
2015 		    ("Leaked KVA space on %p", bp));
2016 	} else if (buf_mapped(bp))
2017 		BUF_CHECK_MAPPED(bp);
2018 	else
2019 		BUF_CHECK_UNMAPPED(bp);
2020 #endif
2021 	if (bp->b_kvasize == 0)
2022 		return;
2023 
2024 	vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase, bp->b_kvasize);
2025 	counter_u64_add(bufkvaspace, -bp->b_kvasize);
2026 	counter_u64_add(buffreekvacnt, 1);
2027 	bp->b_data = bp->b_kvabase = unmapped_buf;
2028 	bp->b_kvasize = 0;
2029 }
2030 
2031 /*
2032  *	bufkva_alloc:
2033  *
2034  *	Allocate the buffer KVA and set b_kvasize and b_kvabase.
2035  */
2036 static int
2037 bufkva_alloc(struct buf *bp, int maxsize, int gbflags)
2038 {
2039 	vm_offset_t addr;
2040 	int error;
2041 
2042 	KASSERT((gbflags & GB_UNMAPPED) == 0 || (gbflags & GB_KVAALLOC) != 0,
2043 	    ("Invalid gbflags 0x%x in %s", gbflags, __func__));
2044 	MPASS((bp->b_flags & B_MAXPHYS) == 0);
2045 	KASSERT(maxsize <= maxbcachebuf,
2046 	    ("bufkva_alloc kva too large %d %u", maxsize, maxbcachebuf));
2047 
2048 	bufkva_free(bp);
2049 
2050 	addr = 0;
2051 	error = vmem_alloc(buffer_arena, maxsize, M_BESTFIT | M_NOWAIT, &addr);
2052 	if (error != 0) {
2053 		/*
2054 		 * Buffer map is too fragmented.  Request the caller
2055 		 * to defragment the map.
2056 		 */
2057 		return (error);
2058 	}
2059 	bp->b_kvabase = (caddr_t)addr;
2060 	bp->b_kvasize = maxsize;
2061 	counter_u64_add(bufkvaspace, bp->b_kvasize);
2062 	if ((gbflags & GB_UNMAPPED) != 0) {
2063 		bp->b_data = unmapped_buf;
2064 		BUF_CHECK_UNMAPPED(bp);
2065 	} else {
2066 		bp->b_data = bp->b_kvabase;
2067 		BUF_CHECK_MAPPED(bp);
2068 	}
2069 	return (0);
2070 }
2071 
2072 /*
2073  *	bufkva_reclaim:
2074  *
2075  *	Reclaim buffer kva by freeing buffers holding kva.  This is a vmem
2076  *	callback that fires to avoid returning failure.
2077  */
2078 static void
2079 bufkva_reclaim(vmem_t *vmem, int flags)
2080 {
2081 	bool done;
2082 	int q;
2083 	int i;
2084 
2085 	done = false;
2086 	for (i = 0; i < 5; i++) {
2087 		for (q = 0; q < buf_domains; q++)
2088 			if (buf_recycle(&bdomain[q], true) != 0)
2089 				done = true;
2090 		if (done)
2091 			break;
2092 	}
2093 	return;
2094 }
2095 
2096 /*
2097  * Attempt to initiate asynchronous I/O on read-ahead blocks.  We must
2098  * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
2099  * the buffer is valid and we do not have to do anything.
2100  */
2101 static void
2102 breada(struct vnode * vp, daddr_t * rablkno, int * rabsize, int cnt,
2103     struct ucred * cred, int flags, void (*ckhashfunc)(struct buf *))
2104 {
2105 	struct buf *rabp;
2106 	struct thread *td;
2107 	int i;
2108 
2109 	td = curthread;
2110 
2111 	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
2112 		if (inmem(vp, *rablkno))
2113 			continue;
2114 		rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
2115 		if ((rabp->b_flags & B_CACHE) != 0) {
2116 			brelse(rabp);
2117 			continue;
2118 		}
2119 #ifdef RACCT
2120 		if (racct_enable) {
2121 			PROC_LOCK(curproc);
2122 			racct_add_buf(curproc, rabp, 0);
2123 			PROC_UNLOCK(curproc);
2124 		}
2125 #endif /* RACCT */
2126 		td->td_ru.ru_inblock++;
2127 		rabp->b_flags |= B_ASYNC;
2128 		rabp->b_flags &= ~B_INVAL;
2129 		if ((flags & GB_CKHASH) != 0) {
2130 			rabp->b_flags |= B_CKHASH;
2131 			rabp->b_ckhashcalc = ckhashfunc;
2132 		}
2133 		rabp->b_ioflags &= ~BIO_ERROR;
2134 		rabp->b_iocmd = BIO_READ;
2135 		if (rabp->b_rcred == NOCRED && cred != NOCRED)
2136 			rabp->b_rcred = crhold(cred);
2137 		vfs_busy_pages(rabp, 0);
2138 		BUF_KERNPROC(rabp);
2139 		rabp->b_iooffset = dbtob(rabp->b_blkno);
2140 		bstrategy(rabp);
2141 	}
2142 }
2143 
2144 /*
2145  * Entry point for bread() and breadn() via #defines in sys/buf.h.
2146  *
2147  * Get a buffer with the specified data.  Look in the cache first.  We
2148  * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
2149  * is set, the buffer is valid and we do not have to do anything, see
2150  * getblk(). Also starts asynchronous I/O on read-ahead blocks.
2151  *
2152  * Always return a NULL buffer pointer (in bpp) when returning an error.
2153  *
2154  * The blkno parameter is the logical block being requested. Normally
2155  * the mapping of logical block number to disk block address is done
2156  * by calling VOP_BMAP(). However, if the mapping is already known, the
2157  * disk block address can be passed using the dblkno parameter. If the
2158  * disk block address is not known, then the same value should be passed
2159  * for blkno and dblkno.
2160  */
2161 int
2162 breadn_flags(struct vnode *vp, daddr_t blkno, daddr_t dblkno, int size,
2163     daddr_t *rablkno, int *rabsize, int cnt, struct ucred *cred, int flags,
2164     void (*ckhashfunc)(struct buf *), struct buf **bpp)
2165 {
2166 	struct buf *bp;
2167 	struct thread *td;
2168 	int error, readwait, rv;
2169 
2170 	CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
2171 	td = curthread;
2172 	/*
2173 	 * Can only return NULL if GB_LOCK_NOWAIT or GB_SPARSE flags
2174 	 * are specified.
2175 	 */
2176 	error = getblkx(vp, blkno, dblkno, size, 0, 0, flags, &bp);
2177 	if (error != 0) {
2178 		*bpp = NULL;
2179 		return (error);
2180 	}
2181 	KASSERT(blkno == bp->b_lblkno,
2182 	    ("getblkx returned buffer for blkno %jd instead of blkno %jd",
2183 	    (intmax_t)bp->b_lblkno, (intmax_t)blkno));
2184 	flags &= ~GB_NOSPARSE;
2185 	*bpp = bp;
2186 
2187 	/*
2188 	 * If not found in cache, do some I/O
2189 	 */
2190 	readwait = 0;
2191 	if ((bp->b_flags & B_CACHE) == 0) {
2192 #ifdef RACCT
2193 		if (racct_enable) {
2194 			PROC_LOCK(td->td_proc);
2195 			racct_add_buf(td->td_proc, bp, 0);
2196 			PROC_UNLOCK(td->td_proc);
2197 		}
2198 #endif /* RACCT */
2199 		td->td_ru.ru_inblock++;
2200 		bp->b_iocmd = BIO_READ;
2201 		bp->b_flags &= ~B_INVAL;
2202 		if ((flags & GB_CKHASH) != 0) {
2203 			bp->b_flags |= B_CKHASH;
2204 			bp->b_ckhashcalc = ckhashfunc;
2205 		}
2206 		if ((flags & GB_CVTENXIO) != 0)
2207 			bp->b_xflags |= BX_CVTENXIO;
2208 		bp->b_ioflags &= ~BIO_ERROR;
2209 		if (bp->b_rcred == NOCRED && cred != NOCRED)
2210 			bp->b_rcred = crhold(cred);
2211 		vfs_busy_pages(bp, 0);
2212 		bp->b_iooffset = dbtob(bp->b_blkno);
2213 		bstrategy(bp);
2214 		++readwait;
2215 	}
2216 
2217 	/*
2218 	 * Attempt to initiate asynchronous I/O on read-ahead blocks.
2219 	 */
2220 	breada(vp, rablkno, rabsize, cnt, cred, flags, ckhashfunc);
2221 
2222 	rv = 0;
2223 	if (readwait) {
2224 		rv = bufwait(bp);
2225 		if (rv != 0) {
2226 			brelse(bp);
2227 			*bpp = NULL;
2228 		}
2229 	}
2230 	return (rv);
2231 }
2232 
2233 /*
2234  * Write, release buffer on completion.  (Done by iodone
2235  * if async).  Do not bother writing anything if the buffer
2236  * is invalid.
2237  *
2238  * Note that we set B_CACHE here, indicating that buffer is
2239  * fully valid and thus cacheable.  This is true even of NFS
2240  * now so we set it generally.  This could be set either here
2241  * or in biodone() since the I/O is synchronous.  We put it
2242  * here.
2243  */
2244 int
2245 bufwrite(struct buf *bp)
2246 {
2247 	int oldflags;
2248 	struct vnode *vp;
2249 	long space;
2250 	int vp_md;
2251 
2252 	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2253 	if ((bp->b_bufobj->bo_flag & BO_DEAD) != 0) {
2254 		bp->b_flags |= B_INVAL | B_RELBUF;
2255 		bp->b_flags &= ~B_CACHE;
2256 		brelse(bp);
2257 		return (ENXIO);
2258 	}
2259 	if (bp->b_flags & B_INVAL) {
2260 		brelse(bp);
2261 		return (0);
2262 	}
2263 
2264 	if (bp->b_flags & B_BARRIER)
2265 		atomic_add_long(&barrierwrites, 1);
2266 
2267 	oldflags = bp->b_flags;
2268 
2269 	KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
2270 	    ("FFS background buffer should not get here %p", bp));
2271 
2272 	vp = bp->b_vp;
2273 	if (vp)
2274 		vp_md = vp->v_vflag & VV_MD;
2275 	else
2276 		vp_md = 0;
2277 
2278 	/*
2279 	 * Mark the buffer clean.  Increment the bufobj write count
2280 	 * before bundirty() call, to prevent other thread from seeing
2281 	 * empty dirty list and zero counter for writes in progress,
2282 	 * falsely indicating that the bufobj is clean.
2283 	 */
2284 	bufobj_wref(bp->b_bufobj);
2285 	bundirty(bp);
2286 
2287 	bp->b_flags &= ~B_DONE;
2288 	bp->b_ioflags &= ~BIO_ERROR;
2289 	bp->b_flags |= B_CACHE;
2290 	bp->b_iocmd = BIO_WRITE;
2291 
2292 	vfs_busy_pages(bp, 1);
2293 
2294 	/*
2295 	 * Normal bwrites pipeline writes
2296 	 */
2297 	bp->b_runningbufspace = bp->b_bufsize;
2298 	space = atomic_fetchadd_long(&runningbufspace, bp->b_runningbufspace);
2299 
2300 #ifdef RACCT
2301 	if (racct_enable) {
2302 		PROC_LOCK(curproc);
2303 		racct_add_buf(curproc, bp, 1);
2304 		PROC_UNLOCK(curproc);
2305 	}
2306 #endif /* RACCT */
2307 	curthread->td_ru.ru_oublock++;
2308 	if (oldflags & B_ASYNC)
2309 		BUF_KERNPROC(bp);
2310 	bp->b_iooffset = dbtob(bp->b_blkno);
2311 	buf_track(bp, __func__);
2312 	bstrategy(bp);
2313 
2314 	if ((oldflags & B_ASYNC) == 0) {
2315 		int rtval = bufwait(bp);
2316 		brelse(bp);
2317 		return (rtval);
2318 	} else if (space > hirunningspace) {
2319 		/*
2320 		 * don't allow the async write to saturate the I/O
2321 		 * system.  We will not deadlock here because
2322 		 * we are blocking waiting for I/O that is already in-progress
2323 		 * to complete. We do not block here if it is the update
2324 		 * or syncer daemon trying to clean up as that can lead
2325 		 * to deadlock.
2326 		 */
2327 		if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
2328 			waitrunningbufspace();
2329 	}
2330 
2331 	return (0);
2332 }
2333 
2334 void
2335 bufbdflush(struct bufobj *bo, struct buf *bp)
2336 {
2337 	struct buf *nbp;
2338 	struct bufdomain *bd;
2339 
2340 	bd = &bdomain[bo->bo_domain];
2341 	if (bo->bo_dirty.bv_cnt > bd->bd_dirtybufthresh + 10) {
2342 		(void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
2343 		altbufferflushes++;
2344 	} else if (bo->bo_dirty.bv_cnt > bd->bd_dirtybufthresh) {
2345 		BO_LOCK(bo);
2346 		/*
2347 		 * Try to find a buffer to flush.
2348 		 */
2349 		TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
2350 			if ((nbp->b_vflags & BV_BKGRDINPROG) ||
2351 			    BUF_LOCK(nbp,
2352 				     LK_EXCLUSIVE | LK_NOWAIT, NULL))
2353 				continue;
2354 			if (bp == nbp)
2355 				panic("bdwrite: found ourselves");
2356 			BO_UNLOCK(bo);
2357 			/* Don't countdeps with the bo lock held. */
2358 			if (buf_countdeps(nbp, 0)) {
2359 				BO_LOCK(bo);
2360 				BUF_UNLOCK(nbp);
2361 				continue;
2362 			}
2363 			if (nbp->b_flags & B_CLUSTEROK) {
2364 				vfs_bio_awrite(nbp);
2365 			} else {
2366 				bremfree(nbp);
2367 				bawrite(nbp);
2368 			}
2369 			dirtybufferflushes++;
2370 			break;
2371 		}
2372 		if (nbp == NULL)
2373 			BO_UNLOCK(bo);
2374 	}
2375 }
2376 
2377 /*
2378  * Delayed write. (Buffer is marked dirty).  Do not bother writing
2379  * anything if the buffer is marked invalid.
2380  *
2381  * Note that since the buffer must be completely valid, we can safely
2382  * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
2383  * biodone() in order to prevent getblk from writing the buffer
2384  * out synchronously.
2385  */
2386 void
2387 bdwrite(struct buf *bp)
2388 {
2389 	struct thread *td = curthread;
2390 	struct vnode *vp;
2391 	struct bufobj *bo;
2392 
2393 	CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2394 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2395 	KASSERT((bp->b_flags & B_BARRIER) == 0,
2396 	    ("Barrier request in delayed write %p", bp));
2397 
2398 	if (bp->b_flags & B_INVAL) {
2399 		brelse(bp);
2400 		return;
2401 	}
2402 
2403 	/*
2404 	 * If we have too many dirty buffers, don't create any more.
2405 	 * If we are wildly over our limit, then force a complete
2406 	 * cleanup. Otherwise, just keep the situation from getting
2407 	 * out of control. Note that we have to avoid a recursive
2408 	 * disaster and not try to clean up after our own cleanup!
2409 	 */
2410 	vp = bp->b_vp;
2411 	bo = bp->b_bufobj;
2412 	if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
2413 		td->td_pflags |= TDP_INBDFLUSH;
2414 		BO_BDFLUSH(bo, bp);
2415 		td->td_pflags &= ~TDP_INBDFLUSH;
2416 	} else
2417 		recursiveflushes++;
2418 
2419 	bdirty(bp);
2420 	/*
2421 	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
2422 	 * true even of NFS now.
2423 	 */
2424 	bp->b_flags |= B_CACHE;
2425 
2426 	/*
2427 	 * This bmap keeps the system from needing to do the bmap later,
2428 	 * perhaps when the system is attempting to do a sync.  Since it
2429 	 * is likely that the indirect block -- or whatever other datastructure
2430 	 * that the filesystem needs is still in memory now, it is a good
2431 	 * thing to do this.  Note also, that if the pageout daemon is
2432 	 * requesting a sync -- there might not be enough memory to do
2433 	 * the bmap then...  So, this is important to do.
2434 	 */
2435 	if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
2436 		VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
2437 	}
2438 
2439 	buf_track(bp, __func__);
2440 
2441 	/*
2442 	 * Set the *dirty* buffer range based upon the VM system dirty
2443 	 * pages.
2444 	 *
2445 	 * Mark the buffer pages as clean.  We need to do this here to
2446 	 * satisfy the vnode_pager and the pageout daemon, so that it
2447 	 * thinks that the pages have been "cleaned".  Note that since
2448 	 * the pages are in a delayed write buffer -- the VFS layer
2449 	 * "will" see that the pages get written out on the next sync,
2450 	 * or perhaps the cluster will be completed.
2451 	 */
2452 	vfs_clean_pages_dirty_buf(bp);
2453 	bqrelse(bp);
2454 
2455 	/*
2456 	 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
2457 	 * due to the softdep code.
2458 	 */
2459 }
2460 
2461 /*
2462  *	bdirty:
2463  *
2464  *	Turn buffer into delayed write request.  We must clear BIO_READ and
2465  *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
2466  *	itself to properly update it in the dirty/clean lists.  We mark it
2467  *	B_DONE to ensure that any asynchronization of the buffer properly
2468  *	clears B_DONE ( else a panic will occur later ).
2469  *
2470  *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
2471  *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
2472  *	should only be called if the buffer is known-good.
2473  *
2474  *	Since the buffer is not on a queue, we do not update the numfreebuffers
2475  *	count.
2476  *
2477  *	The buffer must be on QUEUE_NONE.
2478  */
2479 void
2480 bdirty(struct buf *bp)
2481 {
2482 
2483 	CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
2484 	    bp, bp->b_vp, bp->b_flags);
2485 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2486 	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2487 	    ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
2488 	bp->b_flags &= ~(B_RELBUF);
2489 	bp->b_iocmd = BIO_WRITE;
2490 
2491 	if ((bp->b_flags & B_DELWRI) == 0) {
2492 		bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
2493 		reassignbuf(bp);
2494 		bdirtyadd(bp);
2495 	}
2496 }
2497 
2498 /*
2499  *	bundirty:
2500  *
2501  *	Clear B_DELWRI for buffer.
2502  *
2503  *	Since the buffer is not on a queue, we do not update the numfreebuffers
2504  *	count.
2505  *
2506  *	The buffer must be on QUEUE_NONE.
2507  */
2508 
2509 void
2510 bundirty(struct buf *bp)
2511 {
2512 
2513 	CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2514 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2515 	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2516 	    ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
2517 
2518 	if (bp->b_flags & B_DELWRI) {
2519 		bp->b_flags &= ~B_DELWRI;
2520 		reassignbuf(bp);
2521 		bdirtysub(bp);
2522 	}
2523 	/*
2524 	 * Since it is now being written, we can clear its deferred write flag.
2525 	 */
2526 	bp->b_flags &= ~B_DEFERRED;
2527 }
2528 
2529 /*
2530  *	bawrite:
2531  *
2532  *	Asynchronous write.  Start output on a buffer, but do not wait for
2533  *	it to complete.  The buffer is released when the output completes.
2534  *
2535  *	bwrite() ( or the VOP routine anyway ) is responsible for handling
2536  *	B_INVAL buffers.  Not us.
2537  */
2538 void
2539 bawrite(struct buf *bp)
2540 {
2541 
2542 	bp->b_flags |= B_ASYNC;
2543 	(void) bwrite(bp);
2544 }
2545 
2546 /*
2547  *	babarrierwrite:
2548  *
2549  *	Asynchronous barrier write.  Start output on a buffer, but do not
2550  *	wait for it to complete.  Place a write barrier after this write so
2551  *	that this buffer and all buffers written before it are committed to
2552  *	the disk before any buffers written after this write are committed
2553  *	to the disk.  The buffer is released when the output completes.
2554  */
2555 void
2556 babarrierwrite(struct buf *bp)
2557 {
2558 
2559 	bp->b_flags |= B_ASYNC | B_BARRIER;
2560 	(void) bwrite(bp);
2561 }
2562 
2563 /*
2564  *	bbarrierwrite:
2565  *
2566  *	Synchronous barrier write.  Start output on a buffer and wait for
2567  *	it to complete.  Place a write barrier after this write so that
2568  *	this buffer and all buffers written before it are committed to
2569  *	the disk before any buffers written after this write are committed
2570  *	to the disk.  The buffer is released when the output completes.
2571  */
2572 int
2573 bbarrierwrite(struct buf *bp)
2574 {
2575 
2576 	bp->b_flags |= B_BARRIER;
2577 	return (bwrite(bp));
2578 }
2579 
2580 /*
2581  *	bwillwrite:
2582  *
2583  *	Called prior to the locking of any vnodes when we are expecting to
2584  *	write.  We do not want to starve the buffer cache with too many
2585  *	dirty buffers so we block here.  By blocking prior to the locking
2586  *	of any vnodes we attempt to avoid the situation where a locked vnode
2587  *	prevents the various system daemons from flushing related buffers.
2588  */
2589 void
2590 bwillwrite(void)
2591 {
2592 
2593 	if (buf_dirty_count_severe()) {
2594 		mtx_lock(&bdirtylock);
2595 		while (buf_dirty_count_severe()) {
2596 			bdirtywait = 1;
2597 			msleep(&bdirtywait, &bdirtylock, (PRIBIO + 4),
2598 			    "flswai", 0);
2599 		}
2600 		mtx_unlock(&bdirtylock);
2601 	}
2602 }
2603 
2604 /*
2605  * Return true if we have too many dirty buffers.
2606  */
2607 int
2608 buf_dirty_count_severe(void)
2609 {
2610 
2611 	return (!BIT_EMPTY(BUF_DOMAINS, &bdhidirty));
2612 }
2613 
2614 /*
2615  *	brelse:
2616  *
2617  *	Release a busy buffer and, if requested, free its resources.  The
2618  *	buffer will be stashed in the appropriate bufqueue[] allowing it
2619  *	to be accessed later as a cache entity or reused for other purposes.
2620  */
2621 void
2622 brelse(struct buf *bp)
2623 {
2624 	struct mount *v_mnt;
2625 	int qindex;
2626 
2627 	/*
2628 	 * Many functions erroneously call brelse with a NULL bp under rare
2629 	 * error conditions. Simply return when called with a NULL bp.
2630 	 */
2631 	if (bp == NULL)
2632 		return;
2633 	CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
2634 	    bp, bp->b_vp, bp->b_flags);
2635 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2636 	    ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2637 	KASSERT((bp->b_flags & B_VMIO) != 0 || (bp->b_flags & B_NOREUSE) == 0,
2638 	    ("brelse: non-VMIO buffer marked NOREUSE"));
2639 
2640 	if (BUF_LOCKRECURSED(bp)) {
2641 		/*
2642 		 * Do not process, in particular, do not handle the
2643 		 * B_INVAL/B_RELBUF and do not release to free list.
2644 		 */
2645 		BUF_UNLOCK(bp);
2646 		return;
2647 	}
2648 
2649 	if (bp->b_flags & B_MANAGED) {
2650 		bqrelse(bp);
2651 		return;
2652 	}
2653 
2654 	if (LIST_EMPTY(&bp->b_dep)) {
2655 		bp->b_flags &= ~B_IOSTARTED;
2656 	} else {
2657 		KASSERT((bp->b_flags & B_IOSTARTED) == 0,
2658 		    ("brelse: SU io not finished bp %p", bp));
2659 	}
2660 
2661 	if ((bp->b_vflags & (BV_BKGRDINPROG | BV_BKGRDERR)) == BV_BKGRDERR) {
2662 		BO_LOCK(bp->b_bufobj);
2663 		bp->b_vflags &= ~BV_BKGRDERR;
2664 		BO_UNLOCK(bp->b_bufobj);
2665 		bdirty(bp);
2666 	}
2667 
2668 	if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
2669 	    (bp->b_flags & B_INVALONERR)) {
2670 		/*
2671 		 * Forced invalidation of dirty buffer contents, to be used
2672 		 * after a failed write in the rare case that the loss of the
2673 		 * contents is acceptable.  The buffer is invalidated and
2674 		 * freed.
2675 		 */
2676 		bp->b_flags |= B_INVAL | B_RELBUF | B_NOCACHE;
2677 		bp->b_flags &= ~(B_ASYNC | B_CACHE);
2678 	}
2679 
2680 	if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
2681 	    (bp->b_error != ENXIO || !LIST_EMPTY(&bp->b_dep)) &&
2682 	    !(bp->b_flags & B_INVAL)) {
2683 		/*
2684 		 * Failed write, redirty.  All errors except ENXIO (which
2685 		 * means the device is gone) are treated as being
2686 		 * transient.
2687 		 *
2688 		 * XXX Treating EIO as transient is not correct; the
2689 		 * contract with the local storage device drivers is that
2690 		 * they will only return EIO once the I/O is no longer
2691 		 * retriable.  Network I/O also respects this through the
2692 		 * guarantees of TCP and/or the internal retries of NFS.
2693 		 * ENOMEM might be transient, but we also have no way of
2694 		 * knowing when its ok to retry/reschedule.  In general,
2695 		 * this entire case should be made obsolete through better
2696 		 * error handling/recovery and resource scheduling.
2697 		 *
2698 		 * Do this also for buffers that failed with ENXIO, but have
2699 		 * non-empty dependencies - the soft updates code might need
2700 		 * to access the buffer to untangle them.
2701 		 *
2702 		 * Must clear BIO_ERROR to prevent pages from being scrapped.
2703 		 */
2704 		bp->b_ioflags &= ~BIO_ERROR;
2705 		bdirty(bp);
2706 	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
2707 	    (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
2708 		/*
2709 		 * Either a failed read I/O, or we were asked to free or not
2710 		 * cache the buffer, or we failed to write to a device that's
2711 		 * no longer present.
2712 		 */
2713 		bp->b_flags |= B_INVAL;
2714 		if (!LIST_EMPTY(&bp->b_dep))
2715 			buf_deallocate(bp);
2716 		if (bp->b_flags & B_DELWRI)
2717 			bdirtysub(bp);
2718 		bp->b_flags &= ~(B_DELWRI | B_CACHE);
2719 		if ((bp->b_flags & B_VMIO) == 0) {
2720 			allocbuf(bp, 0);
2721 			if (bp->b_vp)
2722 				brelvp(bp);
2723 		}
2724 	}
2725 
2726 	/*
2727 	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_truncate()
2728 	 * is called with B_DELWRI set, the underlying pages may wind up
2729 	 * getting freed causing a previous write (bdwrite()) to get 'lost'
2730 	 * because pages associated with a B_DELWRI bp are marked clean.
2731 	 *
2732 	 * We still allow the B_INVAL case to call vfs_vmio_truncate(), even
2733 	 * if B_DELWRI is set.
2734 	 */
2735 	if (bp->b_flags & B_DELWRI)
2736 		bp->b_flags &= ~B_RELBUF;
2737 
2738 	/*
2739 	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
2740 	 * constituted, not even NFS buffers now.  Two flags effect this.  If
2741 	 * B_INVAL, the struct buf is invalidated but the VM object is kept
2742 	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
2743 	 *
2744 	 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
2745 	 * invalidated.  BIO_ERROR cannot be set for a failed write unless the
2746 	 * buffer is also B_INVAL because it hits the re-dirtying code above.
2747 	 *
2748 	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
2749 	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
2750 	 * the commit state and we cannot afford to lose the buffer. If the
2751 	 * buffer has a background write in progress, we need to keep it
2752 	 * around to prevent it from being reconstituted and starting a second
2753 	 * background write.
2754 	 */
2755 
2756 	v_mnt = bp->b_vp != NULL ? bp->b_vp->v_mount : NULL;
2757 
2758 	if ((bp->b_flags & B_VMIO) && (bp->b_flags & B_NOCACHE ||
2759 	    (bp->b_ioflags & BIO_ERROR && bp->b_iocmd == BIO_READ)) &&
2760 	    (v_mnt == NULL || (v_mnt->mnt_vfc->vfc_flags & VFCF_NETWORK) == 0 ||
2761 	    vn_isdisk(bp->b_vp) || (bp->b_flags & B_DELWRI) == 0)) {
2762 		vfs_vmio_invalidate(bp);
2763 		allocbuf(bp, 0);
2764 	}
2765 
2766 	if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0 ||
2767 	    (bp->b_flags & (B_DELWRI | B_NOREUSE)) == B_NOREUSE) {
2768 		allocbuf(bp, 0);
2769 		bp->b_flags &= ~B_NOREUSE;
2770 		if (bp->b_vp != NULL)
2771 			brelvp(bp);
2772 	}
2773 
2774 	/*
2775 	 * If the buffer has junk contents signal it and eventually
2776 	 * clean up B_DELWRI and diassociate the vnode so that gbincore()
2777 	 * doesn't find it.
2778 	 */
2779 	if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
2780 	    (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
2781 		bp->b_flags |= B_INVAL;
2782 	if (bp->b_flags & B_INVAL) {
2783 		if (bp->b_flags & B_DELWRI)
2784 			bundirty(bp);
2785 		if (bp->b_vp)
2786 			brelvp(bp);
2787 	}
2788 
2789 	buf_track(bp, __func__);
2790 
2791 	/* buffers with no memory */
2792 	if (bp->b_bufsize == 0) {
2793 		buf_free(bp);
2794 		return;
2795 	}
2796 	/* buffers with junk contents */
2797 	if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
2798 	    (bp->b_ioflags & BIO_ERROR)) {
2799 		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
2800 		if (bp->b_vflags & BV_BKGRDINPROG)
2801 			panic("losing buffer 2");
2802 		qindex = QUEUE_CLEAN;
2803 		bp->b_flags |= B_AGE;
2804 	/* remaining buffers */
2805 	} else if (bp->b_flags & B_DELWRI)
2806 		qindex = QUEUE_DIRTY;
2807 	else
2808 		qindex = QUEUE_CLEAN;
2809 
2810 	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
2811 		panic("brelse: not dirty");
2812 
2813 	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_RELBUF | B_DIRECT);
2814 	bp->b_xflags &= ~(BX_CVTENXIO);
2815 	/* binsfree unlocks bp. */
2816 	binsfree(bp, qindex);
2817 }
2818 
2819 /*
2820  * Release a buffer back to the appropriate queue but do not try to free
2821  * it.  The buffer is expected to be used again soon.
2822  *
2823  * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
2824  * biodone() to requeue an async I/O on completion.  It is also used when
2825  * known good buffers need to be requeued but we think we may need the data
2826  * again soon.
2827  *
2828  * XXX we should be able to leave the B_RELBUF hint set on completion.
2829  */
2830 void
2831 bqrelse(struct buf *bp)
2832 {
2833 	int qindex;
2834 
2835 	CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2836 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2837 	    ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2838 
2839 	qindex = QUEUE_NONE;
2840 	if (BUF_LOCKRECURSED(bp)) {
2841 		/* do not release to free list */
2842 		BUF_UNLOCK(bp);
2843 		return;
2844 	}
2845 	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
2846 	bp->b_xflags &= ~(BX_CVTENXIO);
2847 
2848 	if (LIST_EMPTY(&bp->b_dep)) {
2849 		bp->b_flags &= ~B_IOSTARTED;
2850 	} else {
2851 		KASSERT((bp->b_flags & B_IOSTARTED) == 0,
2852 		    ("bqrelse: SU io not finished bp %p", bp));
2853 	}
2854 
2855 	if (bp->b_flags & B_MANAGED) {
2856 		if (bp->b_flags & B_REMFREE)
2857 			bremfreef(bp);
2858 		goto out;
2859 	}
2860 
2861 	/* buffers with stale but valid contents */
2862 	if ((bp->b_flags & B_DELWRI) != 0 || (bp->b_vflags & (BV_BKGRDINPROG |
2863 	    BV_BKGRDERR)) == BV_BKGRDERR) {
2864 		BO_LOCK(bp->b_bufobj);
2865 		bp->b_vflags &= ~BV_BKGRDERR;
2866 		BO_UNLOCK(bp->b_bufobj);
2867 		qindex = QUEUE_DIRTY;
2868 	} else {
2869 		if ((bp->b_flags & B_DELWRI) == 0 &&
2870 		    (bp->b_xflags & BX_VNDIRTY))
2871 			panic("bqrelse: not dirty");
2872 		if ((bp->b_flags & B_NOREUSE) != 0) {
2873 			brelse(bp);
2874 			return;
2875 		}
2876 		qindex = QUEUE_CLEAN;
2877 	}
2878 	buf_track(bp, __func__);
2879 	/* binsfree unlocks bp. */
2880 	binsfree(bp, qindex);
2881 	return;
2882 
2883 out:
2884 	buf_track(bp, __func__);
2885 	/* unlock */
2886 	BUF_UNLOCK(bp);
2887 }
2888 
2889 /*
2890  * Complete I/O to a VMIO backed page.  Validate the pages as appropriate,
2891  * restore bogus pages.
2892  */
2893 static void
2894 vfs_vmio_iodone(struct buf *bp)
2895 {
2896 	vm_ooffset_t foff;
2897 	vm_page_t m;
2898 	vm_object_t obj;
2899 	struct vnode *vp __unused;
2900 	int i, iosize, resid;
2901 	bool bogus;
2902 
2903 	obj = bp->b_bufobj->bo_object;
2904 	KASSERT(blockcount_read(&obj->paging_in_progress) >= bp->b_npages,
2905 	    ("vfs_vmio_iodone: paging in progress(%d) < b_npages(%d)",
2906 	    blockcount_read(&obj->paging_in_progress), bp->b_npages));
2907 
2908 	vp = bp->b_vp;
2909 	VNPASS(vp->v_holdcnt > 0, vp);
2910 	VNPASS(vp->v_object != NULL, vp);
2911 
2912 	foff = bp->b_offset;
2913 	KASSERT(bp->b_offset != NOOFFSET,
2914 	    ("vfs_vmio_iodone: bp %p has no buffer offset", bp));
2915 
2916 	bogus = false;
2917 	iosize = bp->b_bcount - bp->b_resid;
2918 	for (i = 0; i < bp->b_npages; i++) {
2919 		resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
2920 		if (resid > iosize)
2921 			resid = iosize;
2922 
2923 		/*
2924 		 * cleanup bogus pages, restoring the originals
2925 		 */
2926 		m = bp->b_pages[i];
2927 		if (m == bogus_page) {
2928 			bogus = true;
2929 			m = vm_page_relookup(obj, OFF_TO_IDX(foff));
2930 			if (m == NULL)
2931 				panic("biodone: page disappeared!");
2932 			bp->b_pages[i] = m;
2933 		} else if ((bp->b_iocmd == BIO_READ) && resid > 0) {
2934 			/*
2935 			 * In the write case, the valid and clean bits are
2936 			 * already changed correctly ( see bdwrite() ), so we
2937 			 * only need to do this here in the read case.
2938 			 */
2939 			KASSERT((m->dirty & vm_page_bits(foff & PAGE_MASK,
2940 			    resid)) == 0, ("vfs_vmio_iodone: page %p "
2941 			    "has unexpected dirty bits", m));
2942 			vfs_page_set_valid(bp, foff, m);
2943 		}
2944 		KASSERT(OFF_TO_IDX(foff) == m->pindex,
2945 		    ("vfs_vmio_iodone: foff(%jd)/pindex(%ju) mismatch",
2946 		    (intmax_t)foff, (uintmax_t)m->pindex));
2947 
2948 		vm_page_sunbusy(m);
2949 		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
2950 		iosize -= resid;
2951 	}
2952 	vm_object_pip_wakeupn(obj, bp->b_npages);
2953 	if (bogus && buf_mapped(bp)) {
2954 		BUF_CHECK_MAPPED(bp);
2955 		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
2956 		    bp->b_pages, bp->b_npages);
2957 	}
2958 }
2959 
2960 /*
2961  * Perform page invalidation when a buffer is released.  The fully invalid
2962  * pages will be reclaimed later in vfs_vmio_truncate().
2963  */
2964 static void
2965 vfs_vmio_invalidate(struct buf *bp)
2966 {
2967 	vm_object_t obj;
2968 	vm_page_t m;
2969 	int flags, i, resid, poffset, presid;
2970 
2971 	if (buf_mapped(bp)) {
2972 		BUF_CHECK_MAPPED(bp);
2973 		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
2974 	} else
2975 		BUF_CHECK_UNMAPPED(bp);
2976 	/*
2977 	 * Get the base offset and length of the buffer.  Note that
2978 	 * in the VMIO case if the buffer block size is not
2979 	 * page-aligned then b_data pointer may not be page-aligned.
2980 	 * But our b_pages[] array *IS* page aligned.
2981 	 *
2982 	 * block sizes less then DEV_BSIZE (usually 512) are not
2983 	 * supported due to the page granularity bits (m->valid,
2984 	 * m->dirty, etc...).
2985 	 *
2986 	 * See man buf(9) for more information
2987 	 */
2988 	flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0;
2989 	obj = bp->b_bufobj->bo_object;
2990 	resid = bp->b_bufsize;
2991 	poffset = bp->b_offset & PAGE_MASK;
2992 	VM_OBJECT_WLOCK(obj);
2993 	for (i = 0; i < bp->b_npages; i++) {
2994 		m = bp->b_pages[i];
2995 		if (m == bogus_page)
2996 			panic("vfs_vmio_invalidate: Unexpected bogus page.");
2997 		bp->b_pages[i] = NULL;
2998 
2999 		presid = resid > (PAGE_SIZE - poffset) ?
3000 		    (PAGE_SIZE - poffset) : resid;
3001 		KASSERT(presid >= 0, ("brelse: extra page"));
3002 		vm_page_busy_acquire(m, VM_ALLOC_SBUSY);
3003 		if (pmap_page_wired_mappings(m) == 0)
3004 			vm_page_set_invalid(m, poffset, presid);
3005 		vm_page_sunbusy(m);
3006 		vm_page_release_locked(m, flags);
3007 		resid -= presid;
3008 		poffset = 0;
3009 	}
3010 	VM_OBJECT_WUNLOCK(obj);
3011 	bp->b_npages = 0;
3012 }
3013 
3014 /*
3015  * Page-granular truncation of an existing VMIO buffer.
3016  */
3017 static void
3018 vfs_vmio_truncate(struct buf *bp, int desiredpages)
3019 {
3020 	vm_object_t obj;
3021 	vm_page_t m;
3022 	int flags, i;
3023 
3024 	if (bp->b_npages == desiredpages)
3025 		return;
3026 
3027 	if (buf_mapped(bp)) {
3028 		BUF_CHECK_MAPPED(bp);
3029 		pmap_qremove((vm_offset_t)trunc_page((vm_offset_t)bp->b_data) +
3030 		    (desiredpages << PAGE_SHIFT), bp->b_npages - desiredpages);
3031 	} else
3032 		BUF_CHECK_UNMAPPED(bp);
3033 
3034 	/*
3035 	 * The object lock is needed only if we will attempt to free pages.
3036 	 */
3037 	flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0;
3038 	if ((bp->b_flags & B_DIRECT) != 0) {
3039 		flags |= VPR_TRYFREE;
3040 		obj = bp->b_bufobj->bo_object;
3041 		VM_OBJECT_WLOCK(obj);
3042 	} else {
3043 		obj = NULL;
3044 	}
3045 	for (i = desiredpages; i < bp->b_npages; i++) {
3046 		m = bp->b_pages[i];
3047 		KASSERT(m != bogus_page, ("allocbuf: bogus page found"));
3048 		bp->b_pages[i] = NULL;
3049 		if (obj != NULL)
3050 			vm_page_release_locked(m, flags);
3051 		else
3052 			vm_page_release(m, flags);
3053 	}
3054 	if (obj != NULL)
3055 		VM_OBJECT_WUNLOCK(obj);
3056 	bp->b_npages = desiredpages;
3057 }
3058 
3059 /*
3060  * Byte granular extension of VMIO buffers.
3061  */
3062 static void
3063 vfs_vmio_extend(struct buf *bp, int desiredpages, int size)
3064 {
3065 	/*
3066 	 * We are growing the buffer, possibly in a
3067 	 * byte-granular fashion.
3068 	 */
3069 	vm_object_t obj;
3070 	vm_offset_t toff;
3071 	vm_offset_t tinc;
3072 	vm_page_t m;
3073 
3074 	/*
3075 	 * Step 1, bring in the VM pages from the object, allocating
3076 	 * them if necessary.  We must clear B_CACHE if these pages
3077 	 * are not valid for the range covered by the buffer.
3078 	 */
3079 	obj = bp->b_bufobj->bo_object;
3080 	if (bp->b_npages < desiredpages) {
3081 		KASSERT(desiredpages <= atop(maxbcachebuf),
3082 		    ("vfs_vmio_extend past maxbcachebuf %p %d %u",
3083 		    bp, desiredpages, maxbcachebuf));
3084 
3085 		/*
3086 		 * We must allocate system pages since blocking
3087 		 * here could interfere with paging I/O, no
3088 		 * matter which process we are.
3089 		 *
3090 		 * Only exclusive busy can be tested here.
3091 		 * Blocking on shared busy might lead to
3092 		 * deadlocks once allocbuf() is called after
3093 		 * pages are vfs_busy_pages().
3094 		 */
3095 		(void)vm_page_grab_pages_unlocked(obj,
3096 		    OFF_TO_IDX(bp->b_offset) + bp->b_npages,
3097 		    VM_ALLOC_SYSTEM | VM_ALLOC_IGN_SBUSY |
3098 		    VM_ALLOC_NOBUSY | VM_ALLOC_WIRED,
3099 		    &bp->b_pages[bp->b_npages], desiredpages - bp->b_npages);
3100 		bp->b_npages = desiredpages;
3101 	}
3102 
3103 	/*
3104 	 * Step 2.  We've loaded the pages into the buffer,
3105 	 * we have to figure out if we can still have B_CACHE
3106 	 * set.  Note that B_CACHE is set according to the
3107 	 * byte-granular range ( bcount and size ), not the
3108 	 * aligned range ( newbsize ).
3109 	 *
3110 	 * The VM test is against m->valid, which is DEV_BSIZE
3111 	 * aligned.  Needless to say, the validity of the data
3112 	 * needs to also be DEV_BSIZE aligned.  Note that this
3113 	 * fails with NFS if the server or some other client
3114 	 * extends the file's EOF.  If our buffer is resized,
3115 	 * B_CACHE may remain set! XXX
3116 	 */
3117 	toff = bp->b_bcount;
3118 	tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
3119 	while ((bp->b_flags & B_CACHE) && toff < size) {
3120 		vm_pindex_t pi;
3121 
3122 		if (tinc > (size - toff))
3123 			tinc = size - toff;
3124 		pi = ((bp->b_offset & PAGE_MASK) + toff) >> PAGE_SHIFT;
3125 		m = bp->b_pages[pi];
3126 		vfs_buf_test_cache(bp, bp->b_offset, toff, tinc, m);
3127 		toff += tinc;
3128 		tinc = PAGE_SIZE;
3129 	}
3130 
3131 	/*
3132 	 * Step 3, fixup the KVA pmap.
3133 	 */
3134 	if (buf_mapped(bp))
3135 		bpmap_qenter(bp);
3136 	else
3137 		BUF_CHECK_UNMAPPED(bp);
3138 }
3139 
3140 /*
3141  * Check to see if a block at a particular lbn is available for a clustered
3142  * write.
3143  */
3144 static int
3145 vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
3146 {
3147 	struct buf *bpa;
3148 	int match;
3149 
3150 	match = 0;
3151 
3152 	/* If the buf isn't in core skip it */
3153 	if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
3154 		return (0);
3155 
3156 	/* If the buf is busy we don't want to wait for it */
3157 	if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
3158 		return (0);
3159 
3160 	/* Only cluster with valid clusterable delayed write buffers */
3161 	if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
3162 	    (B_DELWRI | B_CLUSTEROK))
3163 		goto done;
3164 
3165 	if (bpa->b_bufsize != size)
3166 		goto done;
3167 
3168 	/*
3169 	 * Check to see if it is in the expected place on disk and that the
3170 	 * block has been mapped.
3171 	 */
3172 	if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
3173 		match = 1;
3174 done:
3175 	BUF_UNLOCK(bpa);
3176 	return (match);
3177 }
3178 
3179 /*
3180  *	vfs_bio_awrite:
3181  *
3182  *	Implement clustered async writes for clearing out B_DELWRI buffers.
3183  *	This is much better then the old way of writing only one buffer at
3184  *	a time.  Note that we may not be presented with the buffers in the
3185  *	correct order, so we search for the cluster in both directions.
3186  */
3187 int
3188 vfs_bio_awrite(struct buf *bp)
3189 {
3190 	struct bufobj *bo;
3191 	int i;
3192 	int j;
3193 	daddr_t lblkno = bp->b_lblkno;
3194 	struct vnode *vp = bp->b_vp;
3195 	int ncl;
3196 	int nwritten;
3197 	int size;
3198 	int maxcl;
3199 	int gbflags;
3200 
3201 	bo = &vp->v_bufobj;
3202 	gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0;
3203 	/*
3204 	 * right now we support clustered writing only to regular files.  If
3205 	 * we find a clusterable block we could be in the middle of a cluster
3206 	 * rather then at the beginning.
3207 	 */
3208 	if ((vp->v_type == VREG) &&
3209 	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
3210 	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
3211 		size = vp->v_mount->mnt_stat.f_iosize;
3212 		maxcl = maxphys / size;
3213 
3214 		BO_RLOCK(bo);
3215 		for (i = 1; i < maxcl; i++)
3216 			if (vfs_bio_clcheck(vp, size, lblkno + i,
3217 			    bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
3218 				break;
3219 
3220 		for (j = 1; i + j <= maxcl && j <= lblkno; j++)
3221 			if (vfs_bio_clcheck(vp, size, lblkno - j,
3222 			    bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
3223 				break;
3224 		BO_RUNLOCK(bo);
3225 		--j;
3226 		ncl = i + j;
3227 		/*
3228 		 * this is a possible cluster write
3229 		 */
3230 		if (ncl != 1) {
3231 			BUF_UNLOCK(bp);
3232 			nwritten = cluster_wbuild(vp, size, lblkno - j, ncl,
3233 			    gbflags);
3234 			return (nwritten);
3235 		}
3236 	}
3237 	bremfree(bp);
3238 	bp->b_flags |= B_ASYNC;
3239 	/*
3240 	 * default (old) behavior, writing out only one block
3241 	 *
3242 	 * XXX returns b_bufsize instead of b_bcount for nwritten?
3243 	 */
3244 	nwritten = bp->b_bufsize;
3245 	(void) bwrite(bp);
3246 
3247 	return (nwritten);
3248 }
3249 
3250 /*
3251  *	getnewbuf_kva:
3252  *
3253  *	Allocate KVA for an empty buf header according to gbflags.
3254  */
3255 static int
3256 getnewbuf_kva(struct buf *bp, int gbflags, int maxsize)
3257 {
3258 
3259 	if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_UNMAPPED) {
3260 		/*
3261 		 * In order to keep fragmentation sane we only allocate kva
3262 		 * in BKVASIZE chunks.  XXX with vmem we can do page size.
3263 		 */
3264 		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
3265 
3266 		if (maxsize != bp->b_kvasize &&
3267 		    bufkva_alloc(bp, maxsize, gbflags))
3268 			return (ENOSPC);
3269 	}
3270 	return (0);
3271 }
3272 
3273 /*
3274  *	getnewbuf:
3275  *
3276  *	Find and initialize a new buffer header, freeing up existing buffers
3277  *	in the bufqueues as necessary.  The new buffer is returned locked.
3278  *
3279  *	We block if:
3280  *		We have insufficient buffer headers
3281  *		We have insufficient buffer space
3282  *		buffer_arena is too fragmented ( space reservation fails )
3283  *		If we have to flush dirty buffers ( but we try to avoid this )
3284  *
3285  *	The caller is responsible for releasing the reserved bufspace after
3286  *	allocbuf() is called.
3287  */
3288 static struct buf *
3289 getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int maxsize, int gbflags)
3290 {
3291 	struct bufdomain *bd;
3292 	struct buf *bp;
3293 	bool metadata, reserved;
3294 
3295 	bp = NULL;
3296 	KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3297 	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3298 	if (!unmapped_buf_allowed)
3299 		gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3300 
3301 	if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 ||
3302 	    vp->v_type == VCHR)
3303 		metadata = true;
3304 	else
3305 		metadata = false;
3306 	if (vp == NULL)
3307 		bd = &bdomain[0];
3308 	else
3309 		bd = &bdomain[vp->v_bufobj.bo_domain];
3310 
3311 	counter_u64_add(getnewbufcalls, 1);
3312 	reserved = false;
3313 	do {
3314 		if (reserved == false &&
3315 		    bufspace_reserve(bd, maxsize, metadata) != 0) {
3316 			counter_u64_add(getnewbufrestarts, 1);
3317 			continue;
3318 		}
3319 		reserved = true;
3320 		if ((bp = buf_alloc(bd)) == NULL) {
3321 			counter_u64_add(getnewbufrestarts, 1);
3322 			continue;
3323 		}
3324 		if (getnewbuf_kva(bp, gbflags, maxsize) == 0)
3325 			return (bp);
3326 		break;
3327 	} while (buf_recycle(bd, false) == 0);
3328 
3329 	if (reserved)
3330 		bufspace_release(bd, maxsize);
3331 	if (bp != NULL) {
3332 		bp->b_flags |= B_INVAL;
3333 		brelse(bp);
3334 	}
3335 	bufspace_wait(bd, vp, gbflags, slpflag, slptimeo);
3336 
3337 	return (NULL);
3338 }
3339 
3340 /*
3341  *	buf_daemon:
3342  *
3343  *	buffer flushing daemon.  Buffers are normally flushed by the
3344  *	update daemon but if it cannot keep up this process starts to
3345  *	take the load in an attempt to prevent getnewbuf() from blocking.
3346  */
3347 static struct kproc_desc buf_kp = {
3348 	"bufdaemon",
3349 	buf_daemon,
3350 	&bufdaemonproc
3351 };
3352 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp);
3353 
3354 static int
3355 buf_flush(struct vnode *vp, struct bufdomain *bd, int target)
3356 {
3357 	int flushed;
3358 
3359 	flushed = flushbufqueues(vp, bd, target, 0);
3360 	if (flushed == 0) {
3361 		/*
3362 		 * Could not find any buffers without rollback
3363 		 * dependencies, so just write the first one
3364 		 * in the hopes of eventually making progress.
3365 		 */
3366 		if (vp != NULL && target > 2)
3367 			target /= 2;
3368 		flushbufqueues(vp, bd, target, 1);
3369 	}
3370 	return (flushed);
3371 }
3372 
3373 static void
3374 buf_daemon()
3375 {
3376 	struct bufdomain *bd;
3377 	int speedupreq;
3378 	int lodirty;
3379 	int i;
3380 
3381 	/*
3382 	 * This process needs to be suspended prior to shutdown sync.
3383 	 */
3384 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kthread_shutdown, curthread,
3385 	    SHUTDOWN_PRI_LAST + 100);
3386 
3387 	/*
3388 	 * Start the buf clean daemons as children threads.
3389 	 */
3390 	for (i = 0 ; i < buf_domains; i++) {
3391 		int error;
3392 
3393 		error = kthread_add((void (*)(void *))bufspace_daemon,
3394 		    &bdomain[i], curproc, NULL, 0, 0, "bufspacedaemon-%d", i);
3395 		if (error)
3396 			panic("error %d spawning bufspace daemon", error);
3397 	}
3398 
3399 	/*
3400 	 * This process is allowed to take the buffer cache to the limit
3401 	 */
3402 	curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED;
3403 	mtx_lock(&bdlock);
3404 	for (;;) {
3405 		bd_request = 0;
3406 		mtx_unlock(&bdlock);
3407 
3408 		kthread_suspend_check();
3409 
3410 		/*
3411 		 * Save speedupreq for this pass and reset to capture new
3412 		 * requests.
3413 		 */
3414 		speedupreq = bd_speedupreq;
3415 		bd_speedupreq = 0;
3416 
3417 		/*
3418 		 * Flush each domain sequentially according to its level and
3419 		 * the speedup request.
3420 		 */
3421 		for (i = 0; i < buf_domains; i++) {
3422 			bd = &bdomain[i];
3423 			if (speedupreq)
3424 				lodirty = bd->bd_numdirtybuffers / 2;
3425 			else
3426 				lodirty = bd->bd_lodirtybuffers;
3427 			while (bd->bd_numdirtybuffers > lodirty) {
3428 				if (buf_flush(NULL, bd,
3429 				    bd->bd_numdirtybuffers - lodirty) == 0)
3430 					break;
3431 				kern_yield(PRI_USER);
3432 			}
3433 		}
3434 
3435 		/*
3436 		 * Only clear bd_request if we have reached our low water
3437 		 * mark.  The buf_daemon normally waits 1 second and
3438 		 * then incrementally flushes any dirty buffers that have
3439 		 * built up, within reason.
3440 		 *
3441 		 * If we were unable to hit our low water mark and couldn't
3442 		 * find any flushable buffers, we sleep for a short period
3443 		 * to avoid endless loops on unlockable buffers.
3444 		 */
3445 		mtx_lock(&bdlock);
3446 		if (!BIT_EMPTY(BUF_DOMAINS, &bdlodirty)) {
3447 			/*
3448 			 * We reached our low water mark, reset the
3449 			 * request and sleep until we are needed again.
3450 			 * The sleep is just so the suspend code works.
3451 			 */
3452 			bd_request = 0;
3453 			/*
3454 			 * Do an extra wakeup in case dirty threshold
3455 			 * changed via sysctl and the explicit transition
3456 			 * out of shortfall was missed.
3457 			 */
3458 			bdirtywakeup();
3459 			if (runningbufspace <= lorunningspace)
3460 				runningwakeup();
3461 			msleep(&bd_request, &bdlock, PVM, "psleep", hz);
3462 		} else {
3463 			/*
3464 			 * We couldn't find any flushable dirty buffers but
3465 			 * still have too many dirty buffers, we
3466 			 * have to sleep and try again.  (rare)
3467 			 */
3468 			msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
3469 		}
3470 	}
3471 }
3472 
3473 /*
3474  *	flushbufqueues:
3475  *
3476  *	Try to flush a buffer in the dirty queue.  We must be careful to
3477  *	free up B_INVAL buffers instead of write them, which NFS is
3478  *	particularly sensitive to.
3479  */
3480 static int flushwithdeps = 0;
3481 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW | CTLFLAG_STATS,
3482     &flushwithdeps, 0,
3483     "Number of buffers flushed with dependecies that require rollbacks");
3484 
3485 static int
3486 flushbufqueues(struct vnode *lvp, struct bufdomain *bd, int target,
3487     int flushdeps)
3488 {
3489 	struct bufqueue *bq;
3490 	struct buf *sentinel;
3491 	struct vnode *vp;
3492 	struct mount *mp;
3493 	struct buf *bp;
3494 	int hasdeps;
3495 	int flushed;
3496 	int error;
3497 	bool unlock;
3498 
3499 	flushed = 0;
3500 	bq = &bd->bd_dirtyq;
3501 	bp = NULL;
3502 	sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO);
3503 	sentinel->b_qindex = QUEUE_SENTINEL;
3504 	BQ_LOCK(bq);
3505 	TAILQ_INSERT_HEAD(&bq->bq_queue, sentinel, b_freelist);
3506 	BQ_UNLOCK(bq);
3507 	while (flushed != target) {
3508 		maybe_yield();
3509 		BQ_LOCK(bq);
3510 		bp = TAILQ_NEXT(sentinel, b_freelist);
3511 		if (bp != NULL) {
3512 			TAILQ_REMOVE(&bq->bq_queue, sentinel, b_freelist);
3513 			TAILQ_INSERT_AFTER(&bq->bq_queue, bp, sentinel,
3514 			    b_freelist);
3515 		} else {
3516 			BQ_UNLOCK(bq);
3517 			break;
3518 		}
3519 		/*
3520 		 * Skip sentinels inserted by other invocations of the
3521 		 * flushbufqueues(), taking care to not reorder them.
3522 		 *
3523 		 * Only flush the buffers that belong to the
3524 		 * vnode locked by the curthread.
3525 		 */
3526 		if (bp->b_qindex == QUEUE_SENTINEL || (lvp != NULL &&
3527 		    bp->b_vp != lvp)) {
3528 			BQ_UNLOCK(bq);
3529 			continue;
3530 		}
3531 		error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL);
3532 		BQ_UNLOCK(bq);
3533 		if (error != 0)
3534 			continue;
3535 
3536 		/*
3537 		 * BKGRDINPROG can only be set with the buf and bufobj
3538 		 * locks both held.  We tolerate a race to clear it here.
3539 		 */
3540 		if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
3541 		    (bp->b_flags & B_DELWRI) == 0) {
3542 			BUF_UNLOCK(bp);
3543 			continue;
3544 		}
3545 		if (bp->b_flags & B_INVAL) {
3546 			bremfreef(bp);
3547 			brelse(bp);
3548 			flushed++;
3549 			continue;
3550 		}
3551 
3552 		if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
3553 			if (flushdeps == 0) {
3554 				BUF_UNLOCK(bp);
3555 				continue;
3556 			}
3557 			hasdeps = 1;
3558 		} else
3559 			hasdeps = 0;
3560 		/*
3561 		 * We must hold the lock on a vnode before writing
3562 		 * one of its buffers. Otherwise we may confuse, or
3563 		 * in the case of a snapshot vnode, deadlock the
3564 		 * system.
3565 		 *
3566 		 * The lock order here is the reverse of the normal
3567 		 * of vnode followed by buf lock.  This is ok because
3568 		 * the NOWAIT will prevent deadlock.
3569 		 */
3570 		vp = bp->b_vp;
3571 		if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
3572 			BUF_UNLOCK(bp);
3573 			continue;
3574 		}
3575 		if (lvp == NULL) {
3576 			unlock = true;
3577 			error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
3578 		} else {
3579 			ASSERT_VOP_LOCKED(vp, "getbuf");
3580 			unlock = false;
3581 			error = VOP_ISLOCKED(vp) == LK_EXCLUSIVE ? 0 :
3582 			    vn_lock(vp, LK_TRYUPGRADE);
3583 		}
3584 		if (error == 0) {
3585 			CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
3586 			    bp, bp->b_vp, bp->b_flags);
3587 			if (curproc == bufdaemonproc) {
3588 				vfs_bio_awrite(bp);
3589 			} else {
3590 				bremfree(bp);
3591 				bwrite(bp);
3592 				counter_u64_add(notbufdflushes, 1);
3593 			}
3594 			vn_finished_write(mp);
3595 			if (unlock)
3596 				VOP_UNLOCK(vp);
3597 			flushwithdeps += hasdeps;
3598 			flushed++;
3599 
3600 			/*
3601 			 * Sleeping on runningbufspace while holding
3602 			 * vnode lock leads to deadlock.
3603 			 */
3604 			if (curproc == bufdaemonproc &&
3605 			    runningbufspace > hirunningspace)
3606 				waitrunningbufspace();
3607 			continue;
3608 		}
3609 		vn_finished_write(mp);
3610 		BUF_UNLOCK(bp);
3611 	}
3612 	BQ_LOCK(bq);
3613 	TAILQ_REMOVE(&bq->bq_queue, sentinel, b_freelist);
3614 	BQ_UNLOCK(bq);
3615 	free(sentinel, M_TEMP);
3616 	return (flushed);
3617 }
3618 
3619 /*
3620  * Check to see if a block is currently memory resident.
3621  */
3622 struct buf *
3623 incore(struct bufobj *bo, daddr_t blkno)
3624 {
3625 	return (gbincore_unlocked(bo, blkno));
3626 }
3627 
3628 /*
3629  * Returns true if no I/O is needed to access the
3630  * associated VM object.  This is like incore except
3631  * it also hunts around in the VM system for the data.
3632  */
3633 bool
3634 inmem(struct vnode * vp, daddr_t blkno)
3635 {
3636 	vm_object_t obj;
3637 	vm_offset_t toff, tinc, size;
3638 	vm_page_t m, n;
3639 	vm_ooffset_t off;
3640 	int valid;
3641 
3642 	ASSERT_VOP_LOCKED(vp, "inmem");
3643 
3644 	if (incore(&vp->v_bufobj, blkno))
3645 		return (true);
3646 	if (vp->v_mount == NULL)
3647 		return (false);
3648 	obj = vp->v_object;
3649 	if (obj == NULL)
3650 		return (false);
3651 
3652 	size = PAGE_SIZE;
3653 	if (size > vp->v_mount->mnt_stat.f_iosize)
3654 		size = vp->v_mount->mnt_stat.f_iosize;
3655 	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
3656 
3657 	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
3658 		m = vm_page_lookup_unlocked(obj, OFF_TO_IDX(off + toff));
3659 recheck:
3660 		if (m == NULL)
3661 			return (false);
3662 
3663 		tinc = size;
3664 		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
3665 			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
3666 		/*
3667 		 * Consider page validity only if page mapping didn't change
3668 		 * during the check.
3669 		 */
3670 		valid = vm_page_is_valid(m,
3671 		    (vm_offset_t)((toff + off) & PAGE_MASK), tinc);
3672 		n = vm_page_lookup_unlocked(obj, OFF_TO_IDX(off + toff));
3673 		if (m != n) {
3674 			m = n;
3675 			goto recheck;
3676 		}
3677 		if (!valid)
3678 			return (false);
3679 	}
3680 	return (true);
3681 }
3682 
3683 /*
3684  * Set the dirty range for a buffer based on the status of the dirty
3685  * bits in the pages comprising the buffer.  The range is limited
3686  * to the size of the buffer.
3687  *
3688  * Tell the VM system that the pages associated with this buffer
3689  * are clean.  This is used for delayed writes where the data is
3690  * going to go to disk eventually without additional VM intevention.
3691  *
3692  * Note that while we only really need to clean through to b_bcount, we
3693  * just go ahead and clean through to b_bufsize.
3694  */
3695 static void
3696 vfs_clean_pages_dirty_buf(struct buf *bp)
3697 {
3698 	vm_ooffset_t foff, noff, eoff;
3699 	vm_page_t m;
3700 	int i;
3701 
3702 	if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0)
3703 		return;
3704 
3705 	foff = bp->b_offset;
3706 	KASSERT(bp->b_offset != NOOFFSET,
3707 	    ("vfs_clean_pages_dirty_buf: no buffer offset"));
3708 
3709 	vfs_busy_pages_acquire(bp);
3710 	vfs_setdirty_range(bp);
3711 	for (i = 0; i < bp->b_npages; i++) {
3712 		noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3713 		eoff = noff;
3714 		if (eoff > bp->b_offset + bp->b_bufsize)
3715 			eoff = bp->b_offset + bp->b_bufsize;
3716 		m = bp->b_pages[i];
3717 		vfs_page_set_validclean(bp, foff, m);
3718 		/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
3719 		foff = noff;
3720 	}
3721 	vfs_busy_pages_release(bp);
3722 }
3723 
3724 static void
3725 vfs_setdirty_range(struct buf *bp)
3726 {
3727 	vm_offset_t boffset;
3728 	vm_offset_t eoffset;
3729 	int i;
3730 
3731 	/*
3732 	 * test the pages to see if they have been modified directly
3733 	 * by users through the VM system.
3734 	 */
3735 	for (i = 0; i < bp->b_npages; i++)
3736 		vm_page_test_dirty(bp->b_pages[i]);
3737 
3738 	/*
3739 	 * Calculate the encompassing dirty range, boffset and eoffset,
3740 	 * (eoffset - boffset) bytes.
3741 	 */
3742 
3743 	for (i = 0; i < bp->b_npages; i++) {
3744 		if (bp->b_pages[i]->dirty)
3745 			break;
3746 	}
3747 	boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3748 
3749 	for (i = bp->b_npages - 1; i >= 0; --i) {
3750 		if (bp->b_pages[i]->dirty) {
3751 			break;
3752 		}
3753 	}
3754 	eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3755 
3756 	/*
3757 	 * Fit it to the buffer.
3758 	 */
3759 
3760 	if (eoffset > bp->b_bcount)
3761 		eoffset = bp->b_bcount;
3762 
3763 	/*
3764 	 * If we have a good dirty range, merge with the existing
3765 	 * dirty range.
3766 	 */
3767 
3768 	if (boffset < eoffset) {
3769 		if (bp->b_dirtyoff > boffset)
3770 			bp->b_dirtyoff = boffset;
3771 		if (bp->b_dirtyend < eoffset)
3772 			bp->b_dirtyend = eoffset;
3773 	}
3774 }
3775 
3776 /*
3777  * Allocate the KVA mapping for an existing buffer.
3778  * If an unmapped buffer is provided but a mapped buffer is requested, take
3779  * also care to properly setup mappings between pages and KVA.
3780  */
3781 static void
3782 bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
3783 {
3784 	int bsize, maxsize, need_mapping, need_kva;
3785 	off_t offset;
3786 
3787 	need_mapping = bp->b_data == unmapped_buf &&
3788 	    (gbflags & GB_UNMAPPED) == 0;
3789 	need_kva = bp->b_kvabase == unmapped_buf &&
3790 	    bp->b_data == unmapped_buf &&
3791 	    (gbflags & GB_KVAALLOC) != 0;
3792 	if (!need_mapping && !need_kva)
3793 		return;
3794 
3795 	BUF_CHECK_UNMAPPED(bp);
3796 
3797 	if (need_mapping && bp->b_kvabase != unmapped_buf) {
3798 		/*
3799 		 * Buffer is not mapped, but the KVA was already
3800 		 * reserved at the time of the instantiation.  Use the
3801 		 * allocated space.
3802 		 */
3803 		goto has_addr;
3804 	}
3805 
3806 	/*
3807 	 * Calculate the amount of the address space we would reserve
3808 	 * if the buffer was mapped.
3809 	 */
3810 	bsize = vn_isdisk(bp->b_vp) ? DEV_BSIZE : bp->b_bufobj->bo_bsize;
3811 	KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
3812 	offset = blkno * bsize;
3813 	maxsize = size + (offset & PAGE_MASK);
3814 	maxsize = imax(maxsize, bsize);
3815 
3816 	while (bufkva_alloc(bp, maxsize, gbflags) != 0) {
3817 		if ((gbflags & GB_NOWAIT_BD) != 0) {
3818 			/*
3819 			 * XXXKIB: defragmentation cannot
3820 			 * succeed, not sure what else to do.
3821 			 */
3822 			panic("GB_NOWAIT_BD and GB_UNMAPPED %p", bp);
3823 		}
3824 		counter_u64_add(mappingrestarts, 1);
3825 		bufspace_wait(bufdomain(bp), bp->b_vp, gbflags, 0, 0);
3826 	}
3827 has_addr:
3828 	if (need_mapping) {
3829 		/* b_offset is handled by bpmap_qenter. */
3830 		bp->b_data = bp->b_kvabase;
3831 		BUF_CHECK_MAPPED(bp);
3832 		bpmap_qenter(bp);
3833 	}
3834 }
3835 
3836 struct buf *
3837 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo,
3838     int flags)
3839 {
3840 	struct buf *bp;
3841 	int error;
3842 
3843 	error = getblkx(vp, blkno, blkno, size, slpflag, slptimeo, flags, &bp);
3844 	if (error != 0)
3845 		return (NULL);
3846 	return (bp);
3847 }
3848 
3849 /*
3850  *	getblkx:
3851  *
3852  *	Get a block given a specified block and offset into a file/device.
3853  *	The buffers B_DONE bit will be cleared on return, making it almost
3854  * 	ready for an I/O initiation.  B_INVAL may or may not be set on
3855  *	return.  The caller should clear B_INVAL prior to initiating a
3856  *	READ.
3857  *
3858  *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
3859  *	an existing buffer.
3860  *
3861  *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
3862  *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
3863  *	and then cleared based on the backing VM.  If the previous buffer is
3864  *	non-0-sized but invalid, B_CACHE will be cleared.
3865  *
3866  *	If getblk() must create a new buffer, the new buffer is returned with
3867  *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
3868  *	case it is returned with B_INVAL clear and B_CACHE set based on the
3869  *	backing VM.
3870  *
3871  *	getblk() also forces a bwrite() for any B_DELWRI buffer whose
3872  *	B_CACHE bit is clear.
3873  *
3874  *	What this means, basically, is that the caller should use B_CACHE to
3875  *	determine whether the buffer is fully valid or not and should clear
3876  *	B_INVAL prior to issuing a read.  If the caller intends to validate
3877  *	the buffer by loading its data area with something, the caller needs
3878  *	to clear B_INVAL.  If the caller does this without issuing an I/O,
3879  *	the caller should set B_CACHE ( as an optimization ), else the caller
3880  *	should issue the I/O and biodone() will set B_CACHE if the I/O was
3881  *	a write attempt or if it was a successful read.  If the caller
3882  *	intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
3883  *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
3884  *
3885  *	The blkno parameter is the logical block being requested. Normally
3886  *	the mapping of logical block number to disk block address is done
3887  *	by calling VOP_BMAP(). However, if the mapping is already known, the
3888  *	disk block address can be passed using the dblkno parameter. If the
3889  *	disk block address is not known, then the same value should be passed
3890  *	for blkno and dblkno.
3891  */
3892 int
3893 getblkx(struct vnode *vp, daddr_t blkno, daddr_t dblkno, int size, int slpflag,
3894     int slptimeo, int flags, struct buf **bpp)
3895 {
3896 	struct buf *bp;
3897 	struct bufobj *bo;
3898 	daddr_t d_blkno;
3899 	int bsize, error, maxsize, vmio;
3900 	off_t offset;
3901 
3902 	CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
3903 	KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3904 	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3905 	ASSERT_VOP_LOCKED(vp, "getblk");
3906 	if (size > maxbcachebuf)
3907 		panic("getblk: size(%d) > maxbcachebuf(%d)\n", size,
3908 		    maxbcachebuf);
3909 	if (!unmapped_buf_allowed)
3910 		flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3911 
3912 	bo = &vp->v_bufobj;
3913 	d_blkno = dblkno;
3914 
3915 	/* Attempt lockless lookup first. */
3916 	bp = gbincore_unlocked(bo, blkno);
3917 	if (bp == NULL) {
3918 		/*
3919 		 * With GB_NOCREAT we must be sure about not finding the buffer
3920 		 * as it may have been reassigned during unlocked lookup.
3921 		 */
3922 		if ((flags & GB_NOCREAT) != 0)
3923 			goto loop;
3924 		goto newbuf_unlocked;
3925 	}
3926 
3927 	error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL, "getblku", 0,
3928 	    0);
3929 	if (error != 0)
3930 		goto loop;
3931 
3932 	/* Verify buf identify has not changed since lookup. */
3933 	if (bp->b_bufobj == bo && bp->b_lblkno == blkno)
3934 		goto foundbuf_fastpath;
3935 
3936 	/* It changed, fallback to locked lookup. */
3937 	BUF_UNLOCK_RAW(bp);
3938 
3939 loop:
3940 	BO_RLOCK(bo);
3941 	bp = gbincore(bo, blkno);
3942 	if (bp != NULL) {
3943 		int lockflags;
3944 
3945 		/*
3946 		 * Buffer is in-core.  If the buffer is not busy nor managed,
3947 		 * it must be on a queue.
3948 		 */
3949 		lockflags = LK_EXCLUSIVE | LK_INTERLOCK |
3950 		    ((flags & GB_LOCK_NOWAIT) ? LK_NOWAIT : LK_SLEEPFAIL);
3951 
3952 		error = BUF_TIMELOCK(bp, lockflags,
3953 		    BO_LOCKPTR(bo), "getblk", slpflag, slptimeo);
3954 
3955 		/*
3956 		 * If we slept and got the lock we have to restart in case
3957 		 * the buffer changed identities.
3958 		 */
3959 		if (error == ENOLCK)
3960 			goto loop;
3961 		/* We timed out or were interrupted. */
3962 		else if (error != 0)
3963 			return (error);
3964 
3965 foundbuf_fastpath:
3966 		/* If recursed, assume caller knows the rules. */
3967 		if (BUF_LOCKRECURSED(bp))
3968 			goto end;
3969 
3970 		/*
3971 		 * The buffer is locked.  B_CACHE is cleared if the buffer is
3972 		 * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
3973 		 * and for a VMIO buffer B_CACHE is adjusted according to the
3974 		 * backing VM cache.
3975 		 */
3976 		if (bp->b_flags & B_INVAL)
3977 			bp->b_flags &= ~B_CACHE;
3978 		else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
3979 			bp->b_flags |= B_CACHE;
3980 		if (bp->b_flags & B_MANAGED)
3981 			MPASS(bp->b_qindex == QUEUE_NONE);
3982 		else
3983 			bremfree(bp);
3984 
3985 		/*
3986 		 * check for size inconsistencies for non-VMIO case.
3987 		 */
3988 		if (bp->b_bcount != size) {
3989 			if ((bp->b_flags & B_VMIO) == 0 ||
3990 			    (size > bp->b_kvasize)) {
3991 				if (bp->b_flags & B_DELWRI) {
3992 					bp->b_flags |= B_NOCACHE;
3993 					bwrite(bp);
3994 				} else {
3995 					if (LIST_EMPTY(&bp->b_dep)) {
3996 						bp->b_flags |= B_RELBUF;
3997 						brelse(bp);
3998 					} else {
3999 						bp->b_flags |= B_NOCACHE;
4000 						bwrite(bp);
4001 					}
4002 				}
4003 				goto loop;
4004 			}
4005 		}
4006 
4007 		/*
4008 		 * Handle the case of unmapped buffer which should
4009 		 * become mapped, or the buffer for which KVA
4010 		 * reservation is requested.
4011 		 */
4012 		bp_unmapped_get_kva(bp, blkno, size, flags);
4013 
4014 		/*
4015 		 * If the size is inconsistent in the VMIO case, we can resize
4016 		 * the buffer.  This might lead to B_CACHE getting set or
4017 		 * cleared.  If the size has not changed, B_CACHE remains
4018 		 * unchanged from its previous state.
4019 		 */
4020 		allocbuf(bp, size);
4021 
4022 		KASSERT(bp->b_offset != NOOFFSET,
4023 		    ("getblk: no buffer offset"));
4024 
4025 		/*
4026 		 * A buffer with B_DELWRI set and B_CACHE clear must
4027 		 * be committed before we can return the buffer in
4028 		 * order to prevent the caller from issuing a read
4029 		 * ( due to B_CACHE not being set ) and overwriting
4030 		 * it.
4031 		 *
4032 		 * Most callers, including NFS and FFS, need this to
4033 		 * operate properly either because they assume they
4034 		 * can issue a read if B_CACHE is not set, or because
4035 		 * ( for example ) an uncached B_DELWRI might loop due
4036 		 * to softupdates re-dirtying the buffer.  In the latter
4037 		 * case, B_CACHE is set after the first write completes,
4038 		 * preventing further loops.
4039 		 * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
4040 		 * above while extending the buffer, we cannot allow the
4041 		 * buffer to remain with B_CACHE set after the write
4042 		 * completes or it will represent a corrupt state.  To
4043 		 * deal with this we set B_NOCACHE to scrap the buffer
4044 		 * after the write.
4045 		 *
4046 		 * We might be able to do something fancy, like setting
4047 		 * B_CACHE in bwrite() except if B_DELWRI is already set,
4048 		 * so the below call doesn't set B_CACHE, but that gets real
4049 		 * confusing.  This is much easier.
4050 		 */
4051 
4052 		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
4053 			bp->b_flags |= B_NOCACHE;
4054 			bwrite(bp);
4055 			goto loop;
4056 		}
4057 		bp->b_flags &= ~B_DONE;
4058 	} else {
4059 		/*
4060 		 * Buffer is not in-core, create new buffer.  The buffer
4061 		 * returned by getnewbuf() is locked.  Note that the returned
4062 		 * buffer is also considered valid (not marked B_INVAL).
4063 		 */
4064 		BO_RUNLOCK(bo);
4065 newbuf_unlocked:
4066 		/*
4067 		 * If the user does not want us to create the buffer, bail out
4068 		 * here.
4069 		 */
4070 		if (flags & GB_NOCREAT)
4071 			return (EEXIST);
4072 
4073 		bsize = vn_isdisk(vp) ? DEV_BSIZE : bo->bo_bsize;
4074 		KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
4075 		offset = blkno * bsize;
4076 		vmio = vp->v_object != NULL;
4077 		if (vmio) {
4078 			maxsize = size + (offset & PAGE_MASK);
4079 		} else {
4080 			maxsize = size;
4081 			/* Do not allow non-VMIO notmapped buffers. */
4082 			flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
4083 		}
4084 		maxsize = imax(maxsize, bsize);
4085 		if ((flags & GB_NOSPARSE) != 0 && vmio &&
4086 		    !vn_isdisk(vp)) {
4087 			error = VOP_BMAP(vp, blkno, NULL, &d_blkno, 0, 0);
4088 			KASSERT(error != EOPNOTSUPP,
4089 			    ("GB_NOSPARSE from fs not supporting bmap, vp %p",
4090 			    vp));
4091 			if (error != 0)
4092 				return (error);
4093 			if (d_blkno == -1)
4094 				return (EJUSTRETURN);
4095 		}
4096 
4097 		bp = getnewbuf(vp, slpflag, slptimeo, maxsize, flags);
4098 		if (bp == NULL) {
4099 			if (slpflag || slptimeo)
4100 				return (ETIMEDOUT);
4101 			/*
4102 			 * XXX This is here until the sleep path is diagnosed
4103 			 * enough to work under very low memory conditions.
4104 			 *
4105 			 * There's an issue on low memory, 4BSD+non-preempt
4106 			 * systems (eg MIPS routers with 32MB RAM) where buffer
4107 			 * exhaustion occurs without sleeping for buffer
4108 			 * reclaimation.  This just sticks in a loop and
4109 			 * constantly attempts to allocate a buffer, which
4110 			 * hits exhaustion and tries to wakeup bufdaemon.
4111 			 * This never happens because we never yield.
4112 			 *
4113 			 * The real solution is to identify and fix these cases
4114 			 * so we aren't effectively busy-waiting in a loop
4115 			 * until the reclaimation path has cycles to run.
4116 			 */
4117 			kern_yield(PRI_USER);
4118 			goto loop;
4119 		}
4120 
4121 		/*
4122 		 * This code is used to make sure that a buffer is not
4123 		 * created while the getnewbuf routine is blocked.
4124 		 * This can be a problem whether the vnode is locked or not.
4125 		 * If the buffer is created out from under us, we have to
4126 		 * throw away the one we just created.
4127 		 *
4128 		 * Note: this must occur before we associate the buffer
4129 		 * with the vp especially considering limitations in
4130 		 * the splay tree implementation when dealing with duplicate
4131 		 * lblkno's.
4132 		 */
4133 		BO_LOCK(bo);
4134 		if (gbincore(bo, blkno)) {
4135 			BO_UNLOCK(bo);
4136 			bp->b_flags |= B_INVAL;
4137 			bufspace_release(bufdomain(bp), maxsize);
4138 			brelse(bp);
4139 			goto loop;
4140 		}
4141 
4142 		/*
4143 		 * Insert the buffer into the hash, so that it can
4144 		 * be found by incore.
4145 		 */
4146 		bp->b_lblkno = blkno;
4147 		bp->b_blkno = d_blkno;
4148 		bp->b_offset = offset;
4149 		bgetvp(vp, bp);
4150 		BO_UNLOCK(bo);
4151 
4152 		/*
4153 		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
4154 		 * buffer size starts out as 0, B_CACHE will be set by
4155 		 * allocbuf() for the VMIO case prior to it testing the
4156 		 * backing store for validity.
4157 		 */
4158 
4159 		if (vmio) {
4160 			bp->b_flags |= B_VMIO;
4161 			KASSERT(vp->v_object == bp->b_bufobj->bo_object,
4162 			    ("ARGH! different b_bufobj->bo_object %p %p %p\n",
4163 			    bp, vp->v_object, bp->b_bufobj->bo_object));
4164 		} else {
4165 			bp->b_flags &= ~B_VMIO;
4166 			KASSERT(bp->b_bufobj->bo_object == NULL,
4167 			    ("ARGH! has b_bufobj->bo_object %p %p\n",
4168 			    bp, bp->b_bufobj->bo_object));
4169 			BUF_CHECK_MAPPED(bp);
4170 		}
4171 
4172 		allocbuf(bp, size);
4173 		bufspace_release(bufdomain(bp), maxsize);
4174 		bp->b_flags &= ~B_DONE;
4175 	}
4176 	CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
4177 end:
4178 	buf_track(bp, __func__);
4179 	KASSERT(bp->b_bufobj == bo,
4180 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
4181 	*bpp = bp;
4182 	return (0);
4183 }
4184 
4185 /*
4186  * Get an empty, disassociated buffer of given size.  The buffer is initially
4187  * set to B_INVAL.
4188  */
4189 struct buf *
4190 geteblk(int size, int flags)
4191 {
4192 	struct buf *bp;
4193 	int maxsize;
4194 
4195 	maxsize = (size + BKVAMASK) & ~BKVAMASK;
4196 	while ((bp = getnewbuf(NULL, 0, 0, maxsize, flags)) == NULL) {
4197 		if ((flags & GB_NOWAIT_BD) &&
4198 		    (curthread->td_pflags & TDP_BUFNEED) != 0)
4199 			return (NULL);
4200 	}
4201 	allocbuf(bp, size);
4202 	bufspace_release(bufdomain(bp), maxsize);
4203 	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
4204 	return (bp);
4205 }
4206 
4207 /*
4208  * Truncate the backing store for a non-vmio buffer.
4209  */
4210 static void
4211 vfs_nonvmio_truncate(struct buf *bp, int newbsize)
4212 {
4213 
4214 	if (bp->b_flags & B_MALLOC) {
4215 		/*
4216 		 * malloced buffers are not shrunk
4217 		 */
4218 		if (newbsize == 0) {
4219 			bufmallocadjust(bp, 0);
4220 			free(bp->b_data, M_BIOBUF);
4221 			bp->b_data = bp->b_kvabase;
4222 			bp->b_flags &= ~B_MALLOC;
4223 		}
4224 		return;
4225 	}
4226 	vm_hold_free_pages(bp, newbsize);
4227 	bufspace_adjust(bp, newbsize);
4228 }
4229 
4230 /*
4231  * Extend the backing for a non-VMIO buffer.
4232  */
4233 static void
4234 vfs_nonvmio_extend(struct buf *bp, int newbsize)
4235 {
4236 	caddr_t origbuf;
4237 	int origbufsize;
4238 
4239 	/*
4240 	 * We only use malloced memory on the first allocation.
4241 	 * and revert to page-allocated memory when the buffer
4242 	 * grows.
4243 	 *
4244 	 * There is a potential smp race here that could lead
4245 	 * to bufmallocspace slightly passing the max.  It
4246 	 * is probably extremely rare and not worth worrying
4247 	 * over.
4248 	 */
4249 	if (bp->b_bufsize == 0 && newbsize <= PAGE_SIZE/2 &&
4250 	    bufmallocspace < maxbufmallocspace) {
4251 		bp->b_data = malloc(newbsize, M_BIOBUF, M_WAITOK);
4252 		bp->b_flags |= B_MALLOC;
4253 		bufmallocadjust(bp, newbsize);
4254 		return;
4255 	}
4256 
4257 	/*
4258 	 * If the buffer is growing on its other-than-first
4259 	 * allocation then we revert to the page-allocation
4260 	 * scheme.
4261 	 */
4262 	origbuf = NULL;
4263 	origbufsize = 0;
4264 	if (bp->b_flags & B_MALLOC) {
4265 		origbuf = bp->b_data;
4266 		origbufsize = bp->b_bufsize;
4267 		bp->b_data = bp->b_kvabase;
4268 		bufmallocadjust(bp, 0);
4269 		bp->b_flags &= ~B_MALLOC;
4270 		newbsize = round_page(newbsize);
4271 	}
4272 	vm_hold_load_pages(bp, (vm_offset_t) bp->b_data + bp->b_bufsize,
4273 	    (vm_offset_t) bp->b_data + newbsize);
4274 	if (origbuf != NULL) {
4275 		bcopy(origbuf, bp->b_data, origbufsize);
4276 		free(origbuf, M_BIOBUF);
4277 	}
4278 	bufspace_adjust(bp, newbsize);
4279 }
4280 
4281 /*
4282  * This code constitutes the buffer memory from either anonymous system
4283  * memory (in the case of non-VMIO operations) or from an associated
4284  * VM object (in the case of VMIO operations).  This code is able to
4285  * resize a buffer up or down.
4286  *
4287  * Note that this code is tricky, and has many complications to resolve
4288  * deadlock or inconsistent data situations.  Tread lightly!!!
4289  * There are B_CACHE and B_DELWRI interactions that must be dealt with by
4290  * the caller.  Calling this code willy nilly can result in the loss of data.
4291  *
4292  * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
4293  * B_CACHE for the non-VMIO case.
4294  */
4295 int
4296 allocbuf(struct buf *bp, int size)
4297 {
4298 	int newbsize;
4299 
4300 	if (bp->b_bcount == size)
4301 		return (1);
4302 
4303 	if (bp->b_kvasize != 0 && bp->b_kvasize < size)
4304 		panic("allocbuf: buffer too small");
4305 
4306 	newbsize = roundup2(size, DEV_BSIZE);
4307 	if ((bp->b_flags & B_VMIO) == 0) {
4308 		if ((bp->b_flags & B_MALLOC) == 0)
4309 			newbsize = round_page(newbsize);
4310 		/*
4311 		 * Just get anonymous memory from the kernel.  Don't
4312 		 * mess with B_CACHE.
4313 		 */
4314 		if (newbsize < bp->b_bufsize)
4315 			vfs_nonvmio_truncate(bp, newbsize);
4316 		else if (newbsize > bp->b_bufsize)
4317 			vfs_nonvmio_extend(bp, newbsize);
4318 	} else {
4319 		int desiredpages;
4320 
4321 		desiredpages = (size == 0) ? 0 :
4322 		    num_pages((bp->b_offset & PAGE_MASK) + newbsize);
4323 
4324 		if (bp->b_flags & B_MALLOC)
4325 			panic("allocbuf: VMIO buffer can't be malloced");
4326 		/*
4327 		 * Set B_CACHE initially if buffer is 0 length or will become
4328 		 * 0-length.
4329 		 */
4330 		if (size == 0 || bp->b_bufsize == 0)
4331 			bp->b_flags |= B_CACHE;
4332 
4333 		if (newbsize < bp->b_bufsize)
4334 			vfs_vmio_truncate(bp, desiredpages);
4335 		/* XXX This looks as if it should be newbsize > b_bufsize */
4336 		else if (size > bp->b_bcount)
4337 			vfs_vmio_extend(bp, desiredpages, size);
4338 		bufspace_adjust(bp, newbsize);
4339 	}
4340 	bp->b_bcount = size;		/* requested buffer size. */
4341 	return (1);
4342 }
4343 
4344 extern int inflight_transient_maps;
4345 
4346 static struct bio_queue nondump_bios;
4347 
4348 void
4349 biodone(struct bio *bp)
4350 {
4351 	struct mtx *mtxp;
4352 	void (*done)(struct bio *);
4353 	vm_offset_t start, end;
4354 
4355 	biotrack(bp, __func__);
4356 
4357 	/*
4358 	 * Avoid completing I/O when dumping after a panic since that may
4359 	 * result in a deadlock in the filesystem or pager code.  Note that
4360 	 * this doesn't affect dumps that were started manually since we aim
4361 	 * to keep the system usable after it has been resumed.
4362 	 */
4363 	if (__predict_false(dumping && SCHEDULER_STOPPED())) {
4364 		TAILQ_INSERT_HEAD(&nondump_bios, bp, bio_queue);
4365 		return;
4366 	}
4367 	if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) {
4368 		bp->bio_flags &= ~BIO_TRANSIENT_MAPPING;
4369 		bp->bio_flags |= BIO_UNMAPPED;
4370 		start = trunc_page((vm_offset_t)bp->bio_data);
4371 		end = round_page((vm_offset_t)bp->bio_data + bp->bio_length);
4372 		bp->bio_data = unmapped_buf;
4373 		pmap_qremove(start, atop(end - start));
4374 		vmem_free(transient_arena, start, end - start);
4375 		atomic_add_int(&inflight_transient_maps, -1);
4376 	}
4377 	done = bp->bio_done;
4378 	if (done == NULL) {
4379 		mtxp = mtx_pool_find(mtxpool_sleep, bp);
4380 		mtx_lock(mtxp);
4381 		bp->bio_flags |= BIO_DONE;
4382 		wakeup(bp);
4383 		mtx_unlock(mtxp);
4384 	} else
4385 		done(bp);
4386 }
4387 
4388 /*
4389  * Wait for a BIO to finish.
4390  */
4391 int
4392 biowait(struct bio *bp, const char *wchan)
4393 {
4394 	struct mtx *mtxp;
4395 
4396 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4397 	mtx_lock(mtxp);
4398 	while ((bp->bio_flags & BIO_DONE) == 0)
4399 		msleep(bp, mtxp, PRIBIO, wchan, 0);
4400 	mtx_unlock(mtxp);
4401 	if (bp->bio_error != 0)
4402 		return (bp->bio_error);
4403 	if (!(bp->bio_flags & BIO_ERROR))
4404 		return (0);
4405 	return (EIO);
4406 }
4407 
4408 void
4409 biofinish(struct bio *bp, struct devstat *stat, int error)
4410 {
4411 
4412 	if (error) {
4413 		bp->bio_error = error;
4414 		bp->bio_flags |= BIO_ERROR;
4415 	}
4416 	if (stat != NULL)
4417 		devstat_end_transaction_bio(stat, bp);
4418 	biodone(bp);
4419 }
4420 
4421 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4422 void
4423 biotrack_buf(struct bio *bp, const char *location)
4424 {
4425 
4426 	buf_track(bp->bio_track_bp, location);
4427 }
4428 #endif
4429 
4430 /*
4431  *	bufwait:
4432  *
4433  *	Wait for buffer I/O completion, returning error status.  The buffer
4434  *	is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
4435  *	error and cleared.
4436  */
4437 int
4438 bufwait(struct buf *bp)
4439 {
4440 	if (bp->b_iocmd == BIO_READ)
4441 		bwait(bp, PRIBIO, "biord");
4442 	else
4443 		bwait(bp, PRIBIO, "biowr");
4444 	if (bp->b_flags & B_EINTR) {
4445 		bp->b_flags &= ~B_EINTR;
4446 		return (EINTR);
4447 	}
4448 	if (bp->b_ioflags & BIO_ERROR) {
4449 		return (bp->b_error ? bp->b_error : EIO);
4450 	} else {
4451 		return (0);
4452 	}
4453 }
4454 
4455 /*
4456  *	bufdone:
4457  *
4458  *	Finish I/O on a buffer, optionally calling a completion function.
4459  *	This is usually called from an interrupt so process blocking is
4460  *	not allowed.
4461  *
4462  *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
4463  *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
4464  *	assuming B_INVAL is clear.
4465  *
4466  *	For the VMIO case, we set B_CACHE if the op was a read and no
4467  *	read error occurred, or if the op was a write.  B_CACHE is never
4468  *	set if the buffer is invalid or otherwise uncacheable.
4469  *
4470  *	bufdone does not mess with B_INVAL, allowing the I/O routine or the
4471  *	initiator to leave B_INVAL set to brelse the buffer out of existence
4472  *	in the biodone routine.
4473  */
4474 void
4475 bufdone(struct buf *bp)
4476 {
4477 	struct bufobj *dropobj;
4478 	void    (*biodone)(struct buf *);
4479 
4480 	buf_track(bp, __func__);
4481 	CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
4482 	dropobj = NULL;
4483 
4484 	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
4485 
4486 	runningbufwakeup(bp);
4487 	if (bp->b_iocmd == BIO_WRITE)
4488 		dropobj = bp->b_bufobj;
4489 	/* call optional completion function if requested */
4490 	if (bp->b_iodone != NULL) {
4491 		biodone = bp->b_iodone;
4492 		bp->b_iodone = NULL;
4493 		(*biodone) (bp);
4494 		if (dropobj)
4495 			bufobj_wdrop(dropobj);
4496 		return;
4497 	}
4498 	if (bp->b_flags & B_VMIO) {
4499 		/*
4500 		 * Set B_CACHE if the op was a normal read and no error
4501 		 * occurred.  B_CACHE is set for writes in the b*write()
4502 		 * routines.
4503 		 */
4504 		if (bp->b_iocmd == BIO_READ &&
4505 		    !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
4506 		    !(bp->b_ioflags & BIO_ERROR))
4507 			bp->b_flags |= B_CACHE;
4508 		vfs_vmio_iodone(bp);
4509 	}
4510 	if (!LIST_EMPTY(&bp->b_dep))
4511 		buf_complete(bp);
4512 	if ((bp->b_flags & B_CKHASH) != 0) {
4513 		KASSERT(bp->b_iocmd == BIO_READ,
4514 		    ("bufdone: b_iocmd %d not BIO_READ", bp->b_iocmd));
4515 		KASSERT(buf_mapped(bp), ("bufdone: bp %p not mapped", bp));
4516 		(*bp->b_ckhashcalc)(bp);
4517 	}
4518 	/*
4519 	 * For asynchronous completions, release the buffer now. The brelse
4520 	 * will do a wakeup there if necessary - so no need to do a wakeup
4521 	 * here in the async case. The sync case always needs to do a wakeup.
4522 	 */
4523 	if (bp->b_flags & B_ASYNC) {
4524 		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) ||
4525 		    (bp->b_ioflags & BIO_ERROR))
4526 			brelse(bp);
4527 		else
4528 			bqrelse(bp);
4529 	} else
4530 		bdone(bp);
4531 	if (dropobj)
4532 		bufobj_wdrop(dropobj);
4533 }
4534 
4535 /*
4536  * This routine is called in lieu of iodone in the case of
4537  * incomplete I/O.  This keeps the busy status for pages
4538  * consistent.
4539  */
4540 void
4541 vfs_unbusy_pages(struct buf *bp)
4542 {
4543 	int i;
4544 	vm_object_t obj;
4545 	vm_page_t m;
4546 
4547 	runningbufwakeup(bp);
4548 	if (!(bp->b_flags & B_VMIO))
4549 		return;
4550 
4551 	obj = bp->b_bufobj->bo_object;
4552 	for (i = 0; i < bp->b_npages; i++) {
4553 		m = bp->b_pages[i];
4554 		if (m == bogus_page) {
4555 			m = vm_page_relookup(obj, OFF_TO_IDX(bp->b_offset) + i);
4556 			if (!m)
4557 				panic("vfs_unbusy_pages: page missing\n");
4558 			bp->b_pages[i] = m;
4559 			if (buf_mapped(bp)) {
4560 				BUF_CHECK_MAPPED(bp);
4561 				pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4562 				    bp->b_pages, bp->b_npages);
4563 			} else
4564 				BUF_CHECK_UNMAPPED(bp);
4565 		}
4566 		vm_page_sunbusy(m);
4567 	}
4568 	vm_object_pip_wakeupn(obj, bp->b_npages);
4569 }
4570 
4571 /*
4572  * vfs_page_set_valid:
4573  *
4574  *	Set the valid bits in a page based on the supplied offset.   The
4575  *	range is restricted to the buffer's size.
4576  *
4577  *	This routine is typically called after a read completes.
4578  */
4579 static void
4580 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4581 {
4582 	vm_ooffset_t eoff;
4583 
4584 	/*
4585 	 * Compute the end offset, eoff, such that [off, eoff) does not span a
4586 	 * page boundary and eoff is not greater than the end of the buffer.
4587 	 * The end of the buffer, in this case, is our file EOF, not the
4588 	 * allocation size of the buffer.
4589 	 */
4590 	eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
4591 	if (eoff > bp->b_offset + bp->b_bcount)
4592 		eoff = bp->b_offset + bp->b_bcount;
4593 
4594 	/*
4595 	 * Set valid range.  This is typically the entire buffer and thus the
4596 	 * entire page.
4597 	 */
4598 	if (eoff > off)
4599 		vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off);
4600 }
4601 
4602 /*
4603  * vfs_page_set_validclean:
4604  *
4605  *	Set the valid bits and clear the dirty bits in a page based on the
4606  *	supplied offset.   The range is restricted to the buffer's size.
4607  */
4608 static void
4609 vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4610 {
4611 	vm_ooffset_t soff, eoff;
4612 
4613 	/*
4614 	 * Start and end offsets in buffer.  eoff - soff may not cross a
4615 	 * page boundary or cross the end of the buffer.  The end of the
4616 	 * buffer, in this case, is our file EOF, not the allocation size
4617 	 * of the buffer.
4618 	 */
4619 	soff = off;
4620 	eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4621 	if (eoff > bp->b_offset + bp->b_bcount)
4622 		eoff = bp->b_offset + bp->b_bcount;
4623 
4624 	/*
4625 	 * Set valid range.  This is typically the entire buffer and thus the
4626 	 * entire page.
4627 	 */
4628 	if (eoff > soff) {
4629 		vm_page_set_validclean(
4630 		    m,
4631 		   (vm_offset_t) (soff & PAGE_MASK),
4632 		   (vm_offset_t) (eoff - soff)
4633 		);
4634 	}
4635 }
4636 
4637 /*
4638  * Acquire a shared busy on all pages in the buf.
4639  */
4640 void
4641 vfs_busy_pages_acquire(struct buf *bp)
4642 {
4643 	int i;
4644 
4645 	for (i = 0; i < bp->b_npages; i++)
4646 		vm_page_busy_acquire(bp->b_pages[i], VM_ALLOC_SBUSY);
4647 }
4648 
4649 void
4650 vfs_busy_pages_release(struct buf *bp)
4651 {
4652 	int i;
4653 
4654 	for (i = 0; i < bp->b_npages; i++)
4655 		vm_page_sunbusy(bp->b_pages[i]);
4656 }
4657 
4658 /*
4659  * This routine is called before a device strategy routine.
4660  * It is used to tell the VM system that paging I/O is in
4661  * progress, and treat the pages associated with the buffer
4662  * almost as being exclusive busy.  Also the object paging_in_progress
4663  * flag is handled to make sure that the object doesn't become
4664  * inconsistent.
4665  *
4666  * Since I/O has not been initiated yet, certain buffer flags
4667  * such as BIO_ERROR or B_INVAL may be in an inconsistent state
4668  * and should be ignored.
4669  */
4670 void
4671 vfs_busy_pages(struct buf *bp, int clear_modify)
4672 {
4673 	vm_object_t obj;
4674 	vm_ooffset_t foff;
4675 	vm_page_t m;
4676 	int i;
4677 	bool bogus;
4678 
4679 	if (!(bp->b_flags & B_VMIO))
4680 		return;
4681 
4682 	obj = bp->b_bufobj->bo_object;
4683 	foff = bp->b_offset;
4684 	KASSERT(bp->b_offset != NOOFFSET,
4685 	    ("vfs_busy_pages: no buffer offset"));
4686 	if ((bp->b_flags & B_CLUSTER) == 0) {
4687 		vm_object_pip_add(obj, bp->b_npages);
4688 		vfs_busy_pages_acquire(bp);
4689 	}
4690 	if (bp->b_bufsize != 0)
4691 		vfs_setdirty_range(bp);
4692 	bogus = false;
4693 	for (i = 0; i < bp->b_npages; i++) {
4694 		m = bp->b_pages[i];
4695 		vm_page_assert_sbusied(m);
4696 
4697 		/*
4698 		 * When readying a buffer for a read ( i.e
4699 		 * clear_modify == 0 ), it is important to do
4700 		 * bogus_page replacement for valid pages in
4701 		 * partially instantiated buffers.  Partially
4702 		 * instantiated buffers can, in turn, occur when
4703 		 * reconstituting a buffer from its VM backing store
4704 		 * base.  We only have to do this if B_CACHE is
4705 		 * clear ( which causes the I/O to occur in the
4706 		 * first place ).  The replacement prevents the read
4707 		 * I/O from overwriting potentially dirty VM-backed
4708 		 * pages.  XXX bogus page replacement is, uh, bogus.
4709 		 * It may not work properly with small-block devices.
4710 		 * We need to find a better way.
4711 		 */
4712 		if (clear_modify) {
4713 			pmap_remove_write(m);
4714 			vfs_page_set_validclean(bp, foff, m);
4715 		} else if (vm_page_all_valid(m) &&
4716 		    (bp->b_flags & B_CACHE) == 0) {
4717 			bp->b_pages[i] = bogus_page;
4718 			bogus = true;
4719 		}
4720 		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4721 	}
4722 	if (bogus && buf_mapped(bp)) {
4723 		BUF_CHECK_MAPPED(bp);
4724 		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4725 		    bp->b_pages, bp->b_npages);
4726 	}
4727 }
4728 
4729 /*
4730  *	vfs_bio_set_valid:
4731  *
4732  *	Set the range within the buffer to valid.  The range is
4733  *	relative to the beginning of the buffer, b_offset.  Note that
4734  *	b_offset itself may be offset from the beginning of the first
4735  *	page.
4736  */
4737 void
4738 vfs_bio_set_valid(struct buf *bp, int base, int size)
4739 {
4740 	int i, n;
4741 	vm_page_t m;
4742 
4743 	if (!(bp->b_flags & B_VMIO))
4744 		return;
4745 
4746 	/*
4747 	 * Fixup base to be relative to beginning of first page.
4748 	 * Set initial n to be the maximum number of bytes in the
4749 	 * first page that can be validated.
4750 	 */
4751 	base += (bp->b_offset & PAGE_MASK);
4752 	n = PAGE_SIZE - (base & PAGE_MASK);
4753 
4754 	/*
4755 	 * Busy may not be strictly necessary here because the pages are
4756 	 * unlikely to be fully valid and the vnode lock will synchronize
4757 	 * their access via getpages.  It is grabbed for consistency with
4758 	 * other page validation.
4759 	 */
4760 	vfs_busy_pages_acquire(bp);
4761 	for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4762 		m = bp->b_pages[i];
4763 		if (n > size)
4764 			n = size;
4765 		vm_page_set_valid_range(m, base & PAGE_MASK, n);
4766 		base += n;
4767 		size -= n;
4768 		n = PAGE_SIZE;
4769 	}
4770 	vfs_busy_pages_release(bp);
4771 }
4772 
4773 /*
4774  *	vfs_bio_clrbuf:
4775  *
4776  *	If the specified buffer is a non-VMIO buffer, clear the entire
4777  *	buffer.  If the specified buffer is a VMIO buffer, clear and
4778  *	validate only the previously invalid portions of the buffer.
4779  *	This routine essentially fakes an I/O, so we need to clear
4780  *	BIO_ERROR and B_INVAL.
4781  *
4782  *	Note that while we only theoretically need to clear through b_bcount,
4783  *	we go ahead and clear through b_bufsize.
4784  */
4785 void
4786 vfs_bio_clrbuf(struct buf *bp)
4787 {
4788 	int i, j, mask, sa, ea, slide;
4789 
4790 	if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
4791 		clrbuf(bp);
4792 		return;
4793 	}
4794 	bp->b_flags &= ~B_INVAL;
4795 	bp->b_ioflags &= ~BIO_ERROR;
4796 	vfs_busy_pages_acquire(bp);
4797 	sa = bp->b_offset & PAGE_MASK;
4798 	slide = 0;
4799 	for (i = 0; i < bp->b_npages; i++, sa = 0) {
4800 		slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize);
4801 		ea = slide & PAGE_MASK;
4802 		if (ea == 0)
4803 			ea = PAGE_SIZE;
4804 		if (bp->b_pages[i] == bogus_page)
4805 			continue;
4806 		j = sa / DEV_BSIZE;
4807 		mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
4808 		if ((bp->b_pages[i]->valid & mask) == mask)
4809 			continue;
4810 		if ((bp->b_pages[i]->valid & mask) == 0)
4811 			pmap_zero_page_area(bp->b_pages[i], sa, ea - sa);
4812 		else {
4813 			for (; sa < ea; sa += DEV_BSIZE, j++) {
4814 				if ((bp->b_pages[i]->valid & (1 << j)) == 0) {
4815 					pmap_zero_page_area(bp->b_pages[i],
4816 					    sa, DEV_BSIZE);
4817 				}
4818 			}
4819 		}
4820 		vm_page_set_valid_range(bp->b_pages[i], j * DEV_BSIZE,
4821 		    roundup2(ea - sa, DEV_BSIZE));
4822 	}
4823 	vfs_busy_pages_release(bp);
4824 	bp->b_resid = 0;
4825 }
4826 
4827 void
4828 vfs_bio_bzero_buf(struct buf *bp, int base, int size)
4829 {
4830 	vm_page_t m;
4831 	int i, n;
4832 
4833 	if (buf_mapped(bp)) {
4834 		BUF_CHECK_MAPPED(bp);
4835 		bzero(bp->b_data + base, size);
4836 	} else {
4837 		BUF_CHECK_UNMAPPED(bp);
4838 		n = PAGE_SIZE - (base & PAGE_MASK);
4839 		for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4840 			m = bp->b_pages[i];
4841 			if (n > size)
4842 				n = size;
4843 			pmap_zero_page_area(m, base & PAGE_MASK, n);
4844 			base += n;
4845 			size -= n;
4846 			n = PAGE_SIZE;
4847 		}
4848 	}
4849 }
4850 
4851 /*
4852  * Update buffer flags based on I/O request parameters, optionally releasing the
4853  * buffer.  If it's VMIO or direct I/O, the buffer pages are released to the VM,
4854  * where they may be placed on a page queue (VMIO) or freed immediately (direct
4855  * I/O).  Otherwise the buffer is released to the cache.
4856  */
4857 static void
4858 b_io_dismiss(struct buf *bp, int ioflag, bool release)
4859 {
4860 
4861 	KASSERT((ioflag & IO_NOREUSE) == 0 || (ioflag & IO_VMIO) != 0,
4862 	    ("buf %p non-VMIO noreuse", bp));
4863 
4864 	if ((ioflag & IO_DIRECT) != 0)
4865 		bp->b_flags |= B_DIRECT;
4866 	if ((ioflag & IO_EXT) != 0)
4867 		bp->b_xflags |= BX_ALTDATA;
4868 	if ((ioflag & (IO_VMIO | IO_DIRECT)) != 0 && LIST_EMPTY(&bp->b_dep)) {
4869 		bp->b_flags |= B_RELBUF;
4870 		if ((ioflag & IO_NOREUSE) != 0)
4871 			bp->b_flags |= B_NOREUSE;
4872 		if (release)
4873 			brelse(bp);
4874 	} else if (release)
4875 		bqrelse(bp);
4876 }
4877 
4878 void
4879 vfs_bio_brelse(struct buf *bp, int ioflag)
4880 {
4881 
4882 	b_io_dismiss(bp, ioflag, true);
4883 }
4884 
4885 void
4886 vfs_bio_set_flags(struct buf *bp, int ioflag)
4887 {
4888 
4889 	b_io_dismiss(bp, ioflag, false);
4890 }
4891 
4892 /*
4893  * vm_hold_load_pages and vm_hold_free_pages get pages into
4894  * a buffers address space.  The pages are anonymous and are
4895  * not associated with a file object.
4896  */
4897 static void
4898 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
4899 {
4900 	vm_offset_t pg;
4901 	vm_page_t p;
4902 	int index;
4903 
4904 	BUF_CHECK_MAPPED(bp);
4905 
4906 	to = round_page(to);
4907 	from = round_page(from);
4908 	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4909 	MPASS((bp->b_flags & B_MAXPHYS) == 0);
4910 	KASSERT(to - from <= maxbcachebuf,
4911 	    ("vm_hold_load_pages too large %p %#jx %#jx %u",
4912 	    bp, (uintmax_t)from, (uintmax_t)to, maxbcachebuf));
4913 
4914 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
4915 		/*
4916 		 * note: must allocate system pages since blocking here
4917 		 * could interfere with paging I/O, no matter which
4918 		 * process we are.
4919 		 */
4920 		p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
4921 		    VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT) |
4922 		    VM_ALLOC_WAITOK);
4923 		pmap_qenter(pg, &p, 1);
4924 		bp->b_pages[index] = p;
4925 	}
4926 	bp->b_npages = index;
4927 }
4928 
4929 /* Return pages associated with this buf to the vm system */
4930 static void
4931 vm_hold_free_pages(struct buf *bp, int newbsize)
4932 {
4933 	vm_offset_t from;
4934 	vm_page_t p;
4935 	int index, newnpages;
4936 
4937 	BUF_CHECK_MAPPED(bp);
4938 
4939 	from = round_page((vm_offset_t)bp->b_data + newbsize);
4940 	newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4941 	if (bp->b_npages > newnpages)
4942 		pmap_qremove(from, bp->b_npages - newnpages);
4943 	for (index = newnpages; index < bp->b_npages; index++) {
4944 		p = bp->b_pages[index];
4945 		bp->b_pages[index] = NULL;
4946 		vm_page_unwire_noq(p);
4947 		vm_page_free(p);
4948 	}
4949 	bp->b_npages = newnpages;
4950 }
4951 
4952 /*
4953  * Map an IO request into kernel virtual address space.
4954  *
4955  * All requests are (re)mapped into kernel VA space.
4956  * Notice that we use b_bufsize for the size of the buffer
4957  * to be mapped.  b_bcount might be modified by the driver.
4958  *
4959  * Note that even if the caller determines that the address space should
4960  * be valid, a race or a smaller-file mapped into a larger space may
4961  * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
4962  * check the return value.
4963  *
4964  * This function only works with pager buffers.
4965  */
4966 int
4967 vmapbuf(struct buf *bp, void *uaddr, size_t len, int mapbuf)
4968 {
4969 	vm_prot_t prot;
4970 	int pidx;
4971 
4972 	MPASS((bp->b_flags & B_MAXPHYS) != 0);
4973 	prot = VM_PROT_READ;
4974 	if (bp->b_iocmd == BIO_READ)
4975 		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
4976 	pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
4977 	    (vm_offset_t)uaddr, len, prot, bp->b_pages, PBUF_PAGES);
4978 	if (pidx < 0)
4979 		return (-1);
4980 	bp->b_bufsize = len;
4981 	bp->b_npages = pidx;
4982 	bp->b_offset = ((vm_offset_t)uaddr) & PAGE_MASK;
4983 	if (mapbuf || !unmapped_buf_allowed) {
4984 		pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_pages, pidx);
4985 		bp->b_data = bp->b_kvabase + bp->b_offset;
4986 	} else
4987 		bp->b_data = unmapped_buf;
4988 	return (0);
4989 }
4990 
4991 /*
4992  * Free the io map PTEs associated with this IO operation.
4993  * We also invalidate the TLB entries and restore the original b_addr.
4994  *
4995  * This function only works with pager buffers.
4996  */
4997 void
4998 vunmapbuf(struct buf *bp)
4999 {
5000 	int npages;
5001 
5002 	npages = bp->b_npages;
5003 	if (buf_mapped(bp))
5004 		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
5005 	vm_page_unhold_pages(bp->b_pages, npages);
5006 
5007 	bp->b_data = unmapped_buf;
5008 }
5009 
5010 void
5011 bdone(struct buf *bp)
5012 {
5013 	struct mtx *mtxp;
5014 
5015 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
5016 	mtx_lock(mtxp);
5017 	bp->b_flags |= B_DONE;
5018 	wakeup(bp);
5019 	mtx_unlock(mtxp);
5020 }
5021 
5022 void
5023 bwait(struct buf *bp, u_char pri, const char *wchan)
5024 {
5025 	struct mtx *mtxp;
5026 
5027 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
5028 	mtx_lock(mtxp);
5029 	while ((bp->b_flags & B_DONE) == 0)
5030 		msleep(bp, mtxp, pri, wchan, 0);
5031 	mtx_unlock(mtxp);
5032 }
5033 
5034 int
5035 bufsync(struct bufobj *bo, int waitfor)
5036 {
5037 
5038 	return (VOP_FSYNC(bo2vnode(bo), waitfor, curthread));
5039 }
5040 
5041 void
5042 bufstrategy(struct bufobj *bo, struct buf *bp)
5043 {
5044 	int i __unused;
5045 	struct vnode *vp;
5046 
5047 	vp = bp->b_vp;
5048 	KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
5049 	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
5050 	    ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
5051 	i = VOP_STRATEGY(vp, bp);
5052 	KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
5053 }
5054 
5055 /*
5056  * Initialize a struct bufobj before use.  Memory is assumed zero filled.
5057  */
5058 void
5059 bufobj_init(struct bufobj *bo, void *private)
5060 {
5061 	static volatile int bufobj_cleanq;
5062 
5063         bo->bo_domain =
5064             atomic_fetchadd_int(&bufobj_cleanq, 1) % buf_domains;
5065         rw_init(BO_LOCKPTR(bo), "bufobj interlock");
5066         bo->bo_private = private;
5067         TAILQ_INIT(&bo->bo_clean.bv_hd);
5068         TAILQ_INIT(&bo->bo_dirty.bv_hd);
5069 }
5070 
5071 void
5072 bufobj_wrefl(struct bufobj *bo)
5073 {
5074 
5075 	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
5076 	ASSERT_BO_WLOCKED(bo);
5077 	bo->bo_numoutput++;
5078 }
5079 
5080 void
5081 bufobj_wref(struct bufobj *bo)
5082 {
5083 
5084 	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
5085 	BO_LOCK(bo);
5086 	bo->bo_numoutput++;
5087 	BO_UNLOCK(bo);
5088 }
5089 
5090 void
5091 bufobj_wdrop(struct bufobj *bo)
5092 {
5093 
5094 	KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
5095 	BO_LOCK(bo);
5096 	KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
5097 	if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
5098 		bo->bo_flag &= ~BO_WWAIT;
5099 		wakeup(&bo->bo_numoutput);
5100 	}
5101 	BO_UNLOCK(bo);
5102 }
5103 
5104 int
5105 bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
5106 {
5107 	int error;
5108 
5109 	KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
5110 	ASSERT_BO_WLOCKED(bo);
5111 	error = 0;
5112 	while (bo->bo_numoutput) {
5113 		bo->bo_flag |= BO_WWAIT;
5114 		error = msleep(&bo->bo_numoutput, BO_LOCKPTR(bo),
5115 		    slpflag | (PRIBIO + 1), "bo_wwait", timeo);
5116 		if (error)
5117 			break;
5118 	}
5119 	return (error);
5120 }
5121 
5122 /*
5123  * Set bio_data or bio_ma for struct bio from the struct buf.
5124  */
5125 void
5126 bdata2bio(struct buf *bp, struct bio *bip)
5127 {
5128 
5129 	if (!buf_mapped(bp)) {
5130 		KASSERT(unmapped_buf_allowed, ("unmapped"));
5131 		bip->bio_ma = bp->b_pages;
5132 		bip->bio_ma_n = bp->b_npages;
5133 		bip->bio_data = unmapped_buf;
5134 		bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
5135 		bip->bio_flags |= BIO_UNMAPPED;
5136 		KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) /
5137 		    PAGE_SIZE == bp->b_npages,
5138 		    ("Buffer %p too short: %d %lld %d", bp, bip->bio_ma_offset,
5139 		    (long long)bip->bio_length, bip->bio_ma_n));
5140 	} else {
5141 		bip->bio_data = bp->b_data;
5142 		bip->bio_ma = NULL;
5143 	}
5144 }
5145 
5146 /*
5147  * The MIPS pmap code currently doesn't handle aliased pages.
5148  * The VIPT caches may not handle page aliasing themselves, leading
5149  * to data corruption.
5150  *
5151  * As such, this code makes a system extremely unhappy if said
5152  * system doesn't support unaliasing the above situation in hardware.
5153  * Some "recent" systems (eg some mips24k/mips74k cores) don't enable
5154  * this feature at build time, so it has to be handled in software.
5155  *
5156  * Once the MIPS pmap/cache code grows to support this function on
5157  * earlier chips, it should be flipped back off.
5158  */
5159 #ifdef	__mips__
5160 static int buf_pager_relbuf = 1;
5161 #else
5162 static int buf_pager_relbuf = 0;
5163 #endif
5164 SYSCTL_INT(_vfs, OID_AUTO, buf_pager_relbuf, CTLFLAG_RWTUN,
5165     &buf_pager_relbuf, 0,
5166     "Make buffer pager release buffers after reading");
5167 
5168 /*
5169  * The buffer pager.  It uses buffer reads to validate pages.
5170  *
5171  * In contrast to the generic local pager from vm/vnode_pager.c, this
5172  * pager correctly and easily handles volumes where the underlying
5173  * device block size is greater than the machine page size.  The
5174  * buffer cache transparently extends the requested page run to be
5175  * aligned at the block boundary, and does the necessary bogus page
5176  * replacements in the addends to avoid obliterating already valid
5177  * pages.
5178  *
5179  * The only non-trivial issue is that the exclusive busy state for
5180  * pages, which is assumed by the vm_pager_getpages() interface, is
5181  * incompatible with the VMIO buffer cache's desire to share-busy the
5182  * pages.  This function performs a trivial downgrade of the pages'
5183  * state before reading buffers, and a less trivial upgrade from the
5184  * shared-busy to excl-busy state after the read.
5185  */
5186 int
5187 vfs_bio_getpages(struct vnode *vp, vm_page_t *ma, int count,
5188     int *rbehind, int *rahead, vbg_get_lblkno_t get_lblkno,
5189     vbg_get_blksize_t get_blksize)
5190 {
5191 	vm_page_t m;
5192 	vm_object_t object;
5193 	struct buf *bp;
5194 	struct mount *mp;
5195 	daddr_t lbn, lbnp;
5196 	vm_ooffset_t la, lb, poff, poffe;
5197 	long bsize;
5198 	int bo_bs, br_flags, error, i, pgsin, pgsin_a, pgsin_b;
5199 	bool redo, lpart;
5200 
5201 	object = vp->v_object;
5202 	mp = vp->v_mount;
5203 	error = 0;
5204 	la = IDX_TO_OFF(ma[count - 1]->pindex);
5205 	if (la >= object->un_pager.vnp.vnp_size)
5206 		return (VM_PAGER_BAD);
5207 
5208 	/*
5209 	 * Change the meaning of la from where the last requested page starts
5210 	 * to where it ends, because that's the end of the requested region
5211 	 * and the start of the potential read-ahead region.
5212 	 */
5213 	la += PAGE_SIZE;
5214 	lpart = la > object->un_pager.vnp.vnp_size;
5215 	bo_bs = get_blksize(vp, get_lblkno(vp, IDX_TO_OFF(ma[0]->pindex)));
5216 
5217 	/*
5218 	 * Calculate read-ahead, behind and total pages.
5219 	 */
5220 	pgsin = count;
5221 	lb = IDX_TO_OFF(ma[0]->pindex);
5222 	pgsin_b = OFF_TO_IDX(lb - rounddown2(lb, bo_bs));
5223 	pgsin += pgsin_b;
5224 	if (rbehind != NULL)
5225 		*rbehind = pgsin_b;
5226 	pgsin_a = OFF_TO_IDX(roundup2(la, bo_bs) - la);
5227 	if (la + IDX_TO_OFF(pgsin_a) >= object->un_pager.vnp.vnp_size)
5228 		pgsin_a = OFF_TO_IDX(roundup2(object->un_pager.vnp.vnp_size,
5229 		    PAGE_SIZE) - la);
5230 	pgsin += pgsin_a;
5231 	if (rahead != NULL)
5232 		*rahead = pgsin_a;
5233 	VM_CNT_INC(v_vnodein);
5234 	VM_CNT_ADD(v_vnodepgsin, pgsin);
5235 
5236 	br_flags = (mp != NULL && (mp->mnt_kern_flag & MNTK_UNMAPPED_BUFS)
5237 	    != 0) ? GB_UNMAPPED : 0;
5238 again:
5239 	for (i = 0; i < count; i++) {
5240 		if (ma[i] != bogus_page)
5241 			vm_page_busy_downgrade(ma[i]);
5242 	}
5243 
5244 	lbnp = -1;
5245 	for (i = 0; i < count; i++) {
5246 		m = ma[i];
5247 		if (m == bogus_page)
5248 			continue;
5249 
5250 		/*
5251 		 * Pages are shared busy and the object lock is not
5252 		 * owned, which together allow for the pages'
5253 		 * invalidation.  The racy test for validity avoids
5254 		 * useless creation of the buffer for the most typical
5255 		 * case when invalidation is not used in redo or for
5256 		 * parallel read.  The shared->excl upgrade loop at
5257 		 * the end of the function catches the race in a
5258 		 * reliable way (protected by the object lock).
5259 		 */
5260 		if (vm_page_all_valid(m))
5261 			continue;
5262 
5263 		poff = IDX_TO_OFF(m->pindex);
5264 		poffe = MIN(poff + PAGE_SIZE, object->un_pager.vnp.vnp_size);
5265 		for (; poff < poffe; poff += bsize) {
5266 			lbn = get_lblkno(vp, poff);
5267 			if (lbn == lbnp)
5268 				goto next_page;
5269 			lbnp = lbn;
5270 
5271 			bsize = get_blksize(vp, lbn);
5272 			error = bread_gb(vp, lbn, bsize, curthread->td_ucred,
5273 			    br_flags, &bp);
5274 			if (error != 0)
5275 				goto end_pages;
5276 			if (bp->b_rcred == curthread->td_ucred) {
5277 				crfree(bp->b_rcred);
5278 				bp->b_rcred = NOCRED;
5279 			}
5280 			if (LIST_EMPTY(&bp->b_dep)) {
5281 				/*
5282 				 * Invalidation clears m->valid, but
5283 				 * may leave B_CACHE flag if the
5284 				 * buffer existed at the invalidation
5285 				 * time.  In this case, recycle the
5286 				 * buffer to do real read on next
5287 				 * bread() after redo.
5288 				 *
5289 				 * Otherwise B_RELBUF is not strictly
5290 				 * necessary, enable to reduce buf
5291 				 * cache pressure.
5292 				 */
5293 				if (buf_pager_relbuf ||
5294 				    !vm_page_all_valid(m))
5295 					bp->b_flags |= B_RELBUF;
5296 
5297 				bp->b_flags &= ~B_NOCACHE;
5298 				brelse(bp);
5299 			} else {
5300 				bqrelse(bp);
5301 			}
5302 		}
5303 		KASSERT(1 /* racy, enable for debugging */ ||
5304 		    vm_page_all_valid(m) || i == count - 1,
5305 		    ("buf %d %p invalid", i, m));
5306 		if (i == count - 1 && lpart) {
5307 			if (!vm_page_none_valid(m) &&
5308 			    !vm_page_all_valid(m))
5309 				vm_page_zero_invalid(m, TRUE);
5310 		}
5311 next_page:;
5312 	}
5313 end_pages:
5314 
5315 	redo = false;
5316 	for (i = 0; i < count; i++) {
5317 		if (ma[i] == bogus_page)
5318 			continue;
5319 		if (vm_page_busy_tryupgrade(ma[i]) == 0) {
5320 			vm_page_sunbusy(ma[i]);
5321 			ma[i] = vm_page_grab_unlocked(object, ma[i]->pindex,
5322 			    VM_ALLOC_NORMAL);
5323 		}
5324 
5325 		/*
5326 		 * Since the pages were only sbusy while neither the
5327 		 * buffer nor the object lock was held by us, or
5328 		 * reallocated while vm_page_grab() slept for busy
5329 		 * relinguish, they could have been invalidated.
5330 		 * Recheck the valid bits and re-read as needed.
5331 		 *
5332 		 * Note that the last page is made fully valid in the
5333 		 * read loop, and partial validity for the page at
5334 		 * index count - 1 could mean that the page was
5335 		 * invalidated or removed, so we must restart for
5336 		 * safety as well.
5337 		 */
5338 		if (!vm_page_all_valid(ma[i]))
5339 			redo = true;
5340 	}
5341 	if (redo && error == 0)
5342 		goto again;
5343 	return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK);
5344 }
5345 
5346 #include "opt_ddb.h"
5347 #ifdef DDB
5348 #include <ddb/ddb.h>
5349 
5350 /* DDB command to show buffer data */
5351 DB_SHOW_COMMAND(buffer, db_show_buffer)
5352 {
5353 	/* get args */
5354 	struct buf *bp = (struct buf *)addr;
5355 #ifdef FULL_BUF_TRACKING
5356 	uint32_t i, j;
5357 #endif
5358 
5359 	if (!have_addr) {
5360 		db_printf("usage: show buffer <addr>\n");
5361 		return;
5362 	}
5363 
5364 	db_printf("buf at %p\n", bp);
5365 	db_printf("b_flags = 0x%b, b_xflags=0x%b\n",
5366 	    (u_int)bp->b_flags, PRINT_BUF_FLAGS,
5367 	    (u_int)bp->b_xflags, PRINT_BUF_XFLAGS);
5368 	db_printf("b_vflags=0x%b b_ioflags0x%b\n",
5369 	    (u_int)bp->b_vflags, PRINT_BUF_VFLAGS,
5370 	    (u_int)bp->b_ioflags, PRINT_BIO_FLAGS);
5371 	db_printf(
5372 	    "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
5373 	    "b_bufobj = (%p), b_data = %p\n, b_blkno = %jd, b_lblkno = %jd, "
5374 	    "b_vp = %p, b_dep = %p\n",
5375 	    bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
5376 	    bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
5377 	    (intmax_t)bp->b_lblkno, bp->b_vp, bp->b_dep.lh_first);
5378 	db_printf("b_kvabase = %p, b_kvasize = %d\n",
5379 	    bp->b_kvabase, bp->b_kvasize);
5380 	if (bp->b_npages) {
5381 		int i;
5382 		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
5383 		for (i = 0; i < bp->b_npages; i++) {
5384 			vm_page_t m;
5385 			m = bp->b_pages[i];
5386 			if (m != NULL)
5387 				db_printf("(%p, 0x%lx, 0x%lx)", m->object,
5388 				    (u_long)m->pindex,
5389 				    (u_long)VM_PAGE_TO_PHYS(m));
5390 			else
5391 				db_printf("( ??? )");
5392 			if ((i + 1) < bp->b_npages)
5393 				db_printf(",");
5394 		}
5395 		db_printf("\n");
5396 	}
5397 	BUF_LOCKPRINTINFO(bp);
5398 #if defined(FULL_BUF_TRACKING)
5399 	db_printf("b_io_tracking: b_io_tcnt = %u\n", bp->b_io_tcnt);
5400 
5401 	i = bp->b_io_tcnt % BUF_TRACKING_SIZE;
5402 	for (j = 1; j <= BUF_TRACKING_SIZE; j++) {
5403 		if (bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)] == NULL)
5404 			continue;
5405 		db_printf(" %2u: %s\n", j,
5406 		    bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)]);
5407 	}
5408 #elif defined(BUF_TRACKING)
5409 	db_printf("b_io_tracking: %s\n", bp->b_io_tracking);
5410 #endif
5411 	db_printf(" ");
5412 }
5413 
5414 DB_SHOW_COMMAND(bufqueues, bufqueues)
5415 {
5416 	struct bufdomain *bd;
5417 	struct buf *bp;
5418 	long total;
5419 	int i, j, cnt;
5420 
5421 	db_printf("bqempty: %d\n", bqempty.bq_len);
5422 
5423 	for (i = 0; i < buf_domains; i++) {
5424 		bd = &bdomain[i];
5425 		db_printf("Buf domain %d\n", i);
5426 		db_printf("\tfreebufs\t%d\n", bd->bd_freebuffers);
5427 		db_printf("\tlofreebufs\t%d\n", bd->bd_lofreebuffers);
5428 		db_printf("\thifreebufs\t%d\n", bd->bd_hifreebuffers);
5429 		db_printf("\n");
5430 		db_printf("\tbufspace\t%ld\n", bd->bd_bufspace);
5431 		db_printf("\tmaxbufspace\t%ld\n", bd->bd_maxbufspace);
5432 		db_printf("\thibufspace\t%ld\n", bd->bd_hibufspace);
5433 		db_printf("\tlobufspace\t%ld\n", bd->bd_lobufspace);
5434 		db_printf("\tbufspacethresh\t%ld\n", bd->bd_bufspacethresh);
5435 		db_printf("\n");
5436 		db_printf("\tnumdirtybuffers\t%d\n", bd->bd_numdirtybuffers);
5437 		db_printf("\tlodirtybuffers\t%d\n", bd->bd_lodirtybuffers);
5438 		db_printf("\thidirtybuffers\t%d\n", bd->bd_hidirtybuffers);
5439 		db_printf("\tdirtybufthresh\t%d\n", bd->bd_dirtybufthresh);
5440 		db_printf("\n");
5441 		total = 0;
5442 		TAILQ_FOREACH(bp, &bd->bd_cleanq->bq_queue, b_freelist)
5443 			total += bp->b_bufsize;
5444 		db_printf("\tcleanq count\t%d (%ld)\n",
5445 		    bd->bd_cleanq->bq_len, total);
5446 		total = 0;
5447 		TAILQ_FOREACH(bp, &bd->bd_dirtyq.bq_queue, b_freelist)
5448 			total += bp->b_bufsize;
5449 		db_printf("\tdirtyq count\t%d (%ld)\n",
5450 		    bd->bd_dirtyq.bq_len, total);
5451 		db_printf("\twakeup\t\t%d\n", bd->bd_wanted);
5452 		db_printf("\tlim\t\t%d\n", bd->bd_lim);
5453 		db_printf("\tCPU ");
5454 		for (j = 0; j <= mp_maxid; j++)
5455 			db_printf("%d, ", bd->bd_subq[j].bq_len);
5456 		db_printf("\n");
5457 		cnt = 0;
5458 		total = 0;
5459 		for (j = 0; j < nbuf; j++) {
5460 			bp = nbufp(j);
5461 			if (bp->b_domain == i && BUF_ISLOCKED(bp)) {
5462 				cnt++;
5463 				total += bp->b_bufsize;
5464 			}
5465 		}
5466 		db_printf("\tLocked buffers: %d space %ld\n", cnt, total);
5467 		cnt = 0;
5468 		total = 0;
5469 		for (j = 0; j < nbuf; j++) {
5470 			bp = nbufp(j);
5471 			if (bp->b_domain == i) {
5472 				cnt++;
5473 				total += bp->b_bufsize;
5474 			}
5475 		}
5476 		db_printf("\tTotal buffers: %d space %ld\n", cnt, total);
5477 	}
5478 }
5479 
5480 DB_SHOW_COMMAND(lockedbufs, lockedbufs)
5481 {
5482 	struct buf *bp;
5483 	int i;
5484 
5485 	for (i = 0; i < nbuf; i++) {
5486 		bp = nbufp(i);
5487 		if (BUF_ISLOCKED(bp)) {
5488 			db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5489 			db_printf("\n");
5490 			if (db_pager_quit)
5491 				break;
5492 		}
5493 	}
5494 }
5495 
5496 DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
5497 {
5498 	struct vnode *vp;
5499 	struct buf *bp;
5500 
5501 	if (!have_addr) {
5502 		db_printf("usage: show vnodebufs <addr>\n");
5503 		return;
5504 	}
5505 	vp = (struct vnode *)addr;
5506 	db_printf("Clean buffers:\n");
5507 	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
5508 		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5509 		db_printf("\n");
5510 	}
5511 	db_printf("Dirty buffers:\n");
5512 	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
5513 		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5514 		db_printf("\n");
5515 	}
5516 }
5517 
5518 DB_COMMAND(countfreebufs, db_coundfreebufs)
5519 {
5520 	struct buf *bp;
5521 	int i, used = 0, nfree = 0;
5522 
5523 	if (have_addr) {
5524 		db_printf("usage: countfreebufs\n");
5525 		return;
5526 	}
5527 
5528 	for (i = 0; i < nbuf; i++) {
5529 		bp = nbufp(i);
5530 		if (bp->b_qindex == QUEUE_EMPTY)
5531 			nfree++;
5532 		else
5533 			used++;
5534 	}
5535 
5536 	db_printf("Counted %d free, %d used (%d tot)\n", nfree, used,
5537 	    nfree + used);
5538 	db_printf("numfreebuffers is %d\n", numfreebuffers);
5539 }
5540 #endif /* DDB */
5541